diff --git a/src/avalanche/processor.cpp b/src/avalanche/processor.cpp index 0b84f0d95..a7c1415eb 100644 --- a/src/avalanche/processor.cpp +++ b/src/avalanche/processor.cpp @@ -1,557 +1,591 @@ // Copyright (c) 2018-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include // For DecodeSecret #include // For Misbehaving #include #include #include #include #include #include #include /** * Run the avalanche event loop every 10ms. */ static constexpr std::chrono::milliseconds AVALANCHE_TIME_STEP{10}; // Unfortunately, the bitcoind codebase is full of global and we are kinda // forced into it here. std::unique_ptr g_avalanche; namespace avalanche { bool VoteRecord::registerVote(NodeId nodeid, uint32_t error) { // We just got a new vote, so there is one less inflight request. clearInflightRequest(); // We want to avoid having the same node voting twice in a quorum. if (!addNodeToQuorum(nodeid)) { return false; } /** * The result of the vote is determined from the error code. If the error * code is 0, there is no error and therefore the vote is yes. If there is * an error, we check the most significant bit to decide if the vote is a no * (for instance, the block is invalid) or is the vote inconclusive (for * instance, the queried node does not have the block yet). */ votes = (votes << 1) | (error == 0); consider = (consider << 1) | (int32_t(error) >= 0); /** * We compute the number of yes and/or no votes as follow: * * votes: 1010 * consider: 1100 * * yes votes: 1000 using votes & consider * no votes: 0100 using ~votes & consider */ bool yes = countBits(votes & consider & 0xff) > 6; if (!yes) { bool no = countBits(~votes & consider & 0xff) > 6; if (!no) { // The round is inconclusive. return false; } } // If the round is in agreement with previous rounds, increase confidence. if (isAccepted() == yes) { confidence += 2; return getConfidence() == AVALANCHE_FINALIZATION_SCORE; } // The round changed our state. We reset the confidence. confidence = yes; return true; } bool VoteRecord::addNodeToQuorum(NodeId nodeid) { if (nodeid == NO_NODE) { // Helpful for testing. return true; } // MMIX Linear Congruent Generator. const uint64_t r1 = 6364136223846793005 * uint64_t(nodeid) + 1442695040888963407; // Fibonacci hashing. const uint64_t r2 = 11400714819323198485ull * (nodeid ^ seed); // Combine and extract hash. const uint16_t h = (r1 + r2) >> 48; /** * Check if the node is in the filter. */ for (size_t i = 1; i < nodeFilter.size(); i++) { if (nodeFilter[(successfulVotes + i) % nodeFilter.size()] == h) { return false; } } /** * Add the node which just voted to the filter. */ nodeFilter[successfulVotes % nodeFilter.size()] = h; successfulVotes++; return true; } bool VoteRecord::registerPoll() const { uint8_t count = inflight.load(); while (count < AVALANCHE_MAX_INFLIGHT_POLL) { if (inflight.compare_exchange_weak(count, count + 1)) { return true; } } return false; } static bool IsWorthPolling(const CBlockIndex *pindex) { AssertLockHeld(cs_main); if (pindex->nStatus.isInvalid()) { // No point polling invalid blocks. return false; } if (::ChainstateActive().IsBlockFinalized(pindex)) { // There is no point polling finalized block. return false; } return true; } struct Processor::PeerData { Proof proof; Delegation delegation; }; class Processor::NotificationsHandler : public interfaces::Chain::Notifications { Processor *m_processor; public: NotificationsHandler(Processor *p) : m_processor(p) {} void updatedBlockTip() override { LOCK(m_processor->cs_peerManager); m_processor->peerManager->updatedBlockTip(); } }; Processor::Processor(interfaces::Chain &chain, CConnman *connmanIn) : connman(connmanIn), queryTimeoutDuration(AVALANCHE_DEFAULT_QUERY_TIMEOUT), round(0), peerManager(std::make_unique()) { if (gArgs.IsArgSet("-avasessionkey")) { sessionKey = DecodeSecret(gArgs.GetArg("-avasessionkey", "")); } else { // Pick a random key for the session. sessionKey.MakeNewKey(true); } if (gArgs.IsArgSet("-avaproof")) { peerData = std::make_unique(); { // The proof. CDataStream stream(ParseHex(gArgs.GetArg("-avaproof", "")), SER_NETWORK, 0); stream >> peerData->proof; // Ensure the peer manager knows about it. - LOCK(cs_peerManager); - peerManager->getPeerId(peerData->proof); + // FIXME: There is no way to register the proof at this time because + // we might not have the proper chainstate at the moment. We need to + // find a way to delay the registration of the proof until after IBD + // has finished and the chain state is settled. + // LOCK(cs_peerManager); + // peerManager->getPeerId(peerData->proof); } // Generate the delegation to the session key. DelegationBuilder dgb(peerData->proof); if (sessionKey.GetPubKey() != peerData->proof.getMaster()) { dgb.addLevel(DecodeSecret(gArgs.GetArg("-avamasterkey", "")), sessionKey.GetPubKey()); } peerData->delegation = dgb.build(); } // Make sure we get notified of chain state changes. chainNotificationsHandler = chain.handleNotifications(std::make_shared(this)); } Processor::~Processor() { chainNotificationsHandler.reset(); stopEventLoop(); } bool Processor::addBlockToReconcile(const CBlockIndex *pindex) { bool isAccepted; { LOCK(cs_main); if (!IsWorthPolling(pindex)) { // There is no point polling this block. return false; } isAccepted = ::ChainActive().Contains(pindex); } return vote_records.getWriteView() ->insert(std::make_pair(pindex, VoteRecord(isAccepted))) .second; } bool Processor::isAccepted(const CBlockIndex *pindex) const { auto r = vote_records.getReadView(); auto it = r->find(pindex); if (it == r.end()) { return false; } return it->second.isAccepted(); } int Processor::getConfidence(const CBlockIndex *pindex) const { auto r = vote_records.getReadView(); auto it = r->find(pindex); if (it == r.end()) { return -1; } return it->second.getConfidence(); } namespace { /** * When using TCP, we need to sign all messages as the transport layer is * not secure. */ class TCPResponse { Response response; std::array sig; public: TCPResponse(Response responseIn, const CKey &key) : response(std::move(responseIn)) { CHashWriter hasher(SER_GETHASH, 0); hasher << response; const uint256 hash = hasher.GetHash(); // Now let's sign! if (!key.SignSchnorr(hash, sig)) { sig.fill(0); } } // serialization support ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(response); READWRITE(sig); } }; } // namespace void Processor::sendResponse(CNode *pfrom, Response response) const { connman->PushMessage( pfrom, CNetMsgMaker(pfrom->GetSendVersion()) .Make(NetMsgType::AVARESPONSE, TCPResponse(std::move(response), sessionKey))); } bool Processor::registerVotes(NodeId nodeid, const Response &response, std::vector &updates) { { // Save the time at which we can query again. LOCK(cs_peerManager); // FIXME: This will override the time even when we received an old stale // message. This should check that the message is indeed the most up to // date one before updating the time. peerManager->updateNextRequestTime( nodeid, std::chrono::steady_clock::now() + std::chrono::milliseconds(response.getCooldown())); } std::vector invs; { // Check that the query exists. auto w = queries.getWriteView(); auto it = w->find(std::make_tuple(nodeid, response.getRound())); if (it == w.end()) { Misbehaving(nodeid, 2, "unexpcted-ava-response"); return false; } invs = std::move(it->invs); w->erase(it); } // Verify that the request and the vote are consistent. const std::vector &votes = response.GetVotes(); size_t size = invs.size(); if (votes.size() != size) { Misbehaving(nodeid, 100, "invalid-ava-response-size"); return false; } for (size_t i = 0; i < size; i++) { if (invs[i].hash != votes[i].GetHash()) { Misbehaving(nodeid, 100, "invalid-ava-response-content"); return false; } } std::map responseIndex; { LOCK(cs_main); for (const auto &v : votes) { auto pindex = LookupBlockIndex(BlockHash(v.GetHash())); if (!pindex) { // This should not happen, but just in case... continue; } if (!IsWorthPolling(pindex)) { // There is no point polling this block. continue; } responseIndex.insert(std::make_pair(pindex, v)); } } { // Register votes. auto w = vote_records.getWriteView(); for (const auto &p : responseIndex) { CBlockIndex *pindex = p.first; const Vote &v = p.second; auto it = w->find(pindex); if (it == w.end()) { // We are not voting on that item anymore. continue; } auto &vr = it->second; if (!vr.registerVote(nodeid, v.GetError())) { // This vote did not provide any extra information, move on. continue; } if (!vr.hasFinalized()) { // This item has note been finalized, so we have nothing more to // do. updates.emplace_back( pindex, vr.isAccepted() ? BlockUpdate::Status::Accepted : BlockUpdate::Status::Rejected); continue; } // We just finalized a vote. If it is valid, then let the caller // know. Either way, remove the item from the map. updates.emplace_back(pindex, vr.isAccepted() ? BlockUpdate::Status::Finalized : BlockUpdate::Status::Invalid); w->erase(it); } } return true; } bool Processor::addNode(NodeId nodeid, const Proof &proof, const Delegation &delegation) { LOCK(cs_peerManager); return peerManager->addNode(nodeid, proof, delegation); } bool Processor::forNode(NodeId nodeid, std::function func) const { LOCK(cs_peerManager); return peerManager->forNode(nodeid, std::move(func)); } CPubKey Processor::getSessionPubKey() const { return sessionKey.GetPubKey(); } +bool Processor::sendHello(CNode *pfrom) const { + if (!peerData) { + // We do not have a delegation to advertise. + return false; + } + + // Now let's sign! + std::array sig; + + { + CHashWriter hasher(SER_GETHASH, 0); + hasher << peerData->delegation.getId(); + hasher << pfrom->GetLocalNonce(); + hasher << pfrom->nRemoteHostNonce; + hasher << pfrom->GetLocalExtraEntropy(); + hasher << pfrom->nRemoteExtraEntropy; + const uint256 hash = hasher.GetHash(); + + if (!sessionKey.SignSchnorr(hash, sig)) { + return false; + } + } + + connman->PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()) + .Make(NetMsgType::AVAHELLO, + Hello(peerData->delegation, sig))); + + return true; +} + bool Processor::startEventLoop(CScheduler &scheduler) { return eventLoop.startEventLoop( scheduler, [this]() { this->runEventLoop(); }, AVALANCHE_TIME_STEP); } bool Processor::stopEventLoop() { return eventLoop.stopEventLoop(); } std::vector Processor::getInvsForNextPoll(bool forPoll) { std::vector invs; // First remove all blocks that are not worth polling. { LOCK(cs_main); auto w = vote_records.getWriteView(); for (auto it = w->begin(); it != w->end();) { const CBlockIndex *pindex = it->first; if (!IsWorthPolling(pindex)) { w->erase(it++); } else { ++it; } } } auto r = vote_records.getReadView(); for (const std::pair &p : reverse_iterate(r)) { // Check if we can run poll. const bool shouldPoll = forPoll ? p.second.registerPoll() : p.second.shouldPoll(); if (!shouldPoll) { continue; } // We don't have a decision, we need more votes. invs.emplace_back(MSG_BLOCK, p.first->GetBlockHash()); if (invs.size() >= AVALANCHE_MAX_ELEMENT_POLL) { // Make sure we do not produce more invs than specified by the // protocol. return invs; } } return invs; } NodeId Processor::getSuitableNodeToQuery() { LOCK(cs_peerManager); return peerManager->selectNode(); } void Processor::clearTimedoutRequests() { auto now = std::chrono::steady_clock::now(); std::map timedout_items{}; { // Clear expired requests. auto w = queries.getWriteView(); auto it = w->get().begin(); while (it != w->get().end() && it->timeout < now) { for (const auto &i : it->invs) { timedout_items[i]++; } w->get().erase(it++); } } if (timedout_items.empty()) { return; } // In flight request accounting. for (const auto &p : timedout_items) { const CInv &inv = p.first; assert(inv.type == MSG_BLOCK); CBlockIndex *pindex; { LOCK(cs_main); pindex = LookupBlockIndex(BlockHash(inv.hash)); if (!pindex) { continue; } } auto w = vote_records.getWriteView(); auto it = w->find(pindex); if (it == w.end()) { continue; } it->second.clearInflightRequest(p.second); } } void Processor::runEventLoop() { // First things first, check if we have requests that timed out and clear // them. clearTimedoutRequests(); // Make sure there is at least one suitable node to query before gathering // invs. NodeId nodeid = getSuitableNodeToQuery(); if (nodeid == NO_NODE) { return; } std::vector invs = getInvsForNextPoll(); if (invs.empty()) { return; } do { /** * If we lost contact to that node, then we remove it from nodeids, but * never add the request to queries, which ensures bad nodes get cleaned * up over time. */ bool hasSent = connman->ForNode(nodeid, [this, &invs](CNode *pnode) { uint64_t current_round = round++; { // Compute the time at which this requests times out. auto timeout = std::chrono::steady_clock::now() + queryTimeoutDuration; // Register the query. queries.getWriteView()->insert( {pnode->GetId(), current_round, timeout, invs}); // Set the timeout. LOCK(cs_peerManager); peerManager->updateNextRequestTime(pnode->GetId(), timeout); } // Send the query to the node. connman->PushMessage( pnode, CNetMsgMaker(pnode->GetSendVersion()) .Make(NetMsgType::AVAPOLL, Poll(current_round, std::move(invs)))); return true; }); // Success! if (hasSent) { return; } { // This node is obsolete, delete it. LOCK(cs_peerManager); peerManager->removeNode(nodeid); } // Get next suitable node to try again nodeid = getSuitableNodeToQuery(); } while (nodeid != NO_NODE); } } // namespace avalanche diff --git a/src/avalanche/processor.h b/src/avalanche/processor.h index f248735b0..8fb0fd3bf 100644 --- a/src/avalanche/processor.h +++ b/src/avalanche/processor.h @@ -1,296 +1,297 @@ // Copyright (c) 2018-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_AVALANCHE_PROCESSOR_H #define BITCOIN_AVALANCHE_PROCESSOR_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include class Config; class CBlockIndex; class CScheduler; /** * Is avalanche enabled by default. */ static constexpr bool AVALANCHE_DEFAULT_ENABLED = false; /** * Finalization score. */ static constexpr int AVALANCHE_FINALIZATION_SCORE = 128; /** * Maximum item that can be polled at once. */ static constexpr size_t AVALANCHE_MAX_ELEMENT_POLL = 16; /** * Avalanche default cooldown in milliseconds. */ static constexpr size_t AVALANCHE_DEFAULT_COOLDOWN = 100; /** * How long before we consider that a query timed out. */ static constexpr std::chrono::milliseconds AVALANCHE_DEFAULT_QUERY_TIMEOUT{ 10000}; /** * How many inflight requests can exist for one item. */ static constexpr int AVALANCHE_MAX_INFLIGHT_POLL = 10; namespace avalanche { class Delegation; class PeerManager; class Proof; /** * Vote history. */ struct VoteRecord { private: // confidence's LSB bit is the result. Higher bits are actual confidence // score. uint16_t confidence = 0; // Historical record of votes. uint8_t votes = 0; // Each bit indicate if the vote is to be considered. uint8_t consider = 0; // How many in flight requests exists for this element. mutable std::atomic inflight{0}; // Seed for pseudorandom operations. const uint32_t seed = 0; // Track how many successful votes occured. uint32_t successfulVotes = 0; // Track the nodes which are part of the quorum. std::array nodeFilter{{0, 0, 0, 0, 0, 0, 0, 0}}; public: explicit VoteRecord(bool accepted) : confidence(accepted) {} /** * Copy semantic */ VoteRecord(const VoteRecord &other) : confidence(other.confidence), votes(other.votes), consider(other.consider), inflight(other.inflight.load()), successfulVotes(other.successfulVotes), nodeFilter(other.nodeFilter) { } /** * Vote accounting facilities. */ bool isAccepted() const { return confidence & 0x01; } uint16_t getConfidence() const { return confidence >> 1; } bool hasFinalized() const { return getConfidence() >= AVALANCHE_FINALIZATION_SCORE; } /** * Register a new vote for an item and update confidence accordingly. * Returns true if the acceptance or finalization state changed. */ bool registerVote(NodeId nodeid, uint32_t error); /** * Register that a request is being made regarding that item. * The method is made const so that it can be accessed via a read only view * of vote_records. It's not a problem as it is made thread safe. */ bool registerPoll() const; /** * Return if this item is in condition to be polled at the moment. */ bool shouldPoll() const { return inflight < AVALANCHE_MAX_INFLIGHT_POLL; } /** * Clear `count` inflight requests. */ void clearInflightRequest(uint8_t count = 1) { inflight -= count; } private: /** * Add the node to the quorum. * Returns true if the node was added, false if the node already was in the * quorum. */ bool addNodeToQuorum(NodeId nodeid); }; class BlockUpdate { union { CBlockIndex *pindex; uintptr_t raw; }; static const size_t STATUS_BITS = 2; static const uintptr_t MASK = (1 << STATUS_BITS) - 1; static_assert( alignof(CBlockIndex) >= (1 << STATUS_BITS), "CBlockIndex alignement doesn't allow for Status to be stored."); public: enum Status : uint8_t { Invalid, Rejected, Accepted, Finalized, }; BlockUpdate(CBlockIndex *pindexIn, Status statusIn) : pindex(pindexIn) { raw |= statusIn; } Status getStatus() const { return Status(raw & MASK); } CBlockIndex *getBlockIndex() { return reinterpret_cast(raw & ~MASK); } const CBlockIndex *getBlockIndex() const { return const_cast(this)->getBlockIndex(); } }; using BlockVoteMap = std::map; struct query_timeout {}; namespace { struct AvalancheTest; } class Processor { CConnman *connman; std::chrono::milliseconds queryTimeoutDuration; /** * Blocks to run avalanche on. */ RWCollection vote_records; /** * Keep track of peers and queries sent. */ std::atomic round; /** * Keep track of the peers and associated infos. */ mutable Mutex cs_peerManager; std::unique_ptr peerManager GUARDED_BY(cs_peerManager); struct Query { NodeId nodeid; uint64_t round; TimePoint timeout; /** * We declare this as mutable so it can be modified in the multi_index. * This is ok because we do not use this field to index in anyway. * * /!\ Do not use any mutable field as index. */ mutable std::vector invs; }; using QuerySet = boost::multi_index_container< Query, boost::multi_index::indexed_by< // index by nodeid/round boost::multi_index::hashed_unique, boost::multi_index::member>>, // sorted by timeout boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::member>>>; RWCollection queries; /** Data required to participate. */ struct PeerData; std::unique_ptr peerData; CKey sessionKey; /** Event loop machinery. */ EventLoop eventLoop; /** Registered interfaces::Chain::Notifications handler. */ class NotificationsHandler; std::unique_ptr chainNotificationsHandler; public: Processor(interfaces::Chain &chain, CConnman *connmanIn); ~Processor(); void setQueryTimeoutDuration(std::chrono::milliseconds d) { queryTimeoutDuration = d; } bool addBlockToReconcile(const CBlockIndex *pindex); bool isAccepted(const CBlockIndex *pindex) const; int getConfidence(const CBlockIndex *pindex) const; // TDOD: Refactor the API to remove the dependency on avalanche/protocol.h void sendResponse(CNode *pfrom, Response response) const; bool registerVotes(NodeId nodeid, const Response &response, std::vector &updates); bool addNode(NodeId nodeid, const Proof &proof, const Delegation &delegation); bool forNode(NodeId nodeid, std::function func) const; CPubKey getSessionPubKey() const; + bool sendHello(CNode *pfrom) const; bool startEventLoop(CScheduler &scheduler); bool stopEventLoop(); private: void runEventLoop(); void clearTimedoutRequests(); std::vector getInvsForNextPoll(bool forPoll = true); NodeId getSuitableNodeToQuery(); friend struct ::avalanche::AvalancheTest; }; } // namespace avalanche /** * Global avalanche instance. */ extern std::unique_ptr g_avalanche; #endif // BITCOIN_AVALANCHE_PROCESSOR_H diff --git a/src/avalanche/protocol.h b/src/avalanche/protocol.h index fe6214973..d66acbb98 100644 --- a/src/avalanche/protocol.h +++ b/src/avalanche/protocol.h @@ -1,85 +1,104 @@ // Copyright (c) 2020 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_AVALANCHE_PROTOCOL_H #define BITCOIN_AVALANCHE_PROTOCOL_H +#include #include // for CInv #include #include #include #include namespace avalanche { class Vote { uint32_t error; uint256 hash; public: Vote() : error(-1), hash() {} Vote(uint32_t errorIn, uint256 hashIn) : error(errorIn), hash(hashIn) {} const uint256 &GetHash() const { return hash; } uint32_t GetError() const { return error; } // serialization support ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(error); READWRITE(hash); } }; class Response { uint64_t round; uint32_t cooldown; std::vector votes; public: Response() : round(-1), cooldown(-1) {} Response(uint64_t roundIn, uint32_t cooldownIn, std::vector votesIn) : round(roundIn), cooldown(cooldownIn), votes(votesIn) {} uint64_t getRound() const { return round; } uint32_t getCooldown() const { return cooldown; } const std::vector &GetVotes() const { return votes; } // serialization support ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(round); READWRITE(cooldown); READWRITE(votes); } }; class Poll { uint64_t round; std::vector invs; public: Poll(uint64_t roundIn, std::vector invsIn) : round(roundIn), invs(invsIn) {} const std::vector &GetInvs() const { return invs; } // serialization support ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(round); READWRITE(invs); } }; +class Hello { + Delegation delegation; + std::array sig; + +public: + Hello(Delegation delegationIn, std::array sigIn) + : delegation(std::move(delegationIn)), sig(sigIn) {} + + // serialization support + ADD_SERIALIZE_METHODS; + + template + inline void SerializationOp(Stream &s, Operation ser_action) { + READWRITE(delegation); + READWRITE(sig); + } +}; + } // namespace avalanche #endif // BITCOIN_AVALANCHE_PROTOCOL_H diff --git a/src/net.h b/src/net.h index 0c647ea54..1a8df2fe3 100644 --- a/src/net.h +++ b/src/net.h @@ -1,1173 +1,1183 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2019 The Bitcoin Core developers // Copyright (c) 2017-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_NET_H #define BITCOIN_NET_H #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // For cs_main #include #include #include #include #include #include #ifndef WIN32 #include #endif class BanMan; class Config; class CNode; class CScheduler; struct bilingual_str; /** Default for -whitelistrelay. */ static const bool DEFAULT_WHITELISTRELAY = true; /** Default for -whitelistforcerelay. */ static const bool DEFAULT_WHITELISTFORCERELAY = false; /** * Time between pings automatically sent out for latency probing and keepalive * (in seconds). */ static const int PING_INTERVAL = 2 * 60; /** * Time after which to disconnect, after waiting for a ping response (or * inactivity). */ static const int TIMEOUT_INTERVAL = 20 * 60; /** Run the feeler connection loop once every 2 minutes or 120 seconds. **/ static const int FEELER_INTERVAL = 120; /** The maximum number of entries in an 'inv' protocol message */ static const unsigned int MAX_INV_SZ = 50000; static_assert(MAX_PROTOCOL_MESSAGE_LENGTH > MAX_INV_SZ * sizeof(CInv), "Max protocol message length must be greater than largest " "possible INV message"); /** The maximum number of entries in a locator */ static const unsigned int MAX_LOCATOR_SZ = 101; /** The maximum number of new addresses to accumulate before announcing. */ static const unsigned int MAX_ADDR_TO_SEND = 1000; /** Maximum length of the user agent string in `version` message */ static const unsigned int MAX_SUBVERSION_LENGTH = 256; /** * Maximum number of automatic outgoing nodes over which we'll relay everything * (blocks, tx, addrs, etc) */ static const int MAX_OUTBOUND_FULL_RELAY_CONNECTIONS = 8; /** Maximum number of addnode outgoing nodes */ static const int MAX_ADDNODE_CONNECTIONS = 8; /** Maximum number of block-relay-only outgoing connections */ static const int MAX_BLOCKS_ONLY_CONNECTIONS = 2; /** -listen default */ static const bool DEFAULT_LISTEN = true; /** -upnp default */ #ifdef USE_UPNP static const bool DEFAULT_UPNP = USE_UPNP; #else static const bool DEFAULT_UPNP = false; #endif /** * The maximum number of peer connections to maintain. * This quantity might not be reachable on some systems, especially on platforms * that do not provide a working poll() interface. */ static const unsigned int DEFAULT_MAX_PEER_CONNECTIONS = 4096; /** The default for -maxuploadtarget. 0 = Unlimited */ static const uint64_t DEFAULT_MAX_UPLOAD_TARGET = 0; /** The default timeframe for -maxuploadtarget. 1 day. */ static const uint64_t MAX_UPLOAD_TIMEFRAME = 60 * 60 * 24; /** Default for blocks only*/ static const bool DEFAULT_BLOCKSONLY = false; /** -peertimeout default */ static const int64_t DEFAULT_PEER_CONNECT_TIMEOUT = 60; static const bool DEFAULT_FORCEDNSSEED = false; static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000; static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000; typedef int64_t NodeId; /** * Special NodeId that represent no node. */ static constexpr NodeId NO_NODE = -1; struct AddedNodeInfo { std::string strAddedNode; CService resolvedAddress; bool fConnected; bool fInbound; }; struct CNodeStats; class CClientUIInterface; struct CSerializedNetMsg { CSerializedNetMsg() = default; CSerializedNetMsg(CSerializedNetMsg &&) = default; CSerializedNetMsg &operator=(CSerializedNetMsg &&) = default; // No copying, only moves. CSerializedNetMsg(const CSerializedNetMsg &msg) = delete; CSerializedNetMsg &operator=(const CSerializedNetMsg &) = delete; std::vector data; std::string m_type; }; /** * Different types of connections to a peer. This enum encapsulates the * information we have available at the time of opening or accepting the * connection. Aside from INBOUND, all types are initiated by us. */ enum class ConnectionType { /** Peer initiated connections. */ INBOUND, /** * Full relay connections (blocks, addrs, txns) made automatically. * Addresses selected from AddrMan. */ OUTBOUND, /** * Connections to addresses added via addnode or the connect command line * argument. */ MANUAL, /** Short lived connections used to test address validity. */ FEELER, /** * Only relay blocks to these automatic outbound connections. * Addresses selected from AddrMan. */ BLOCK_RELAY, /** * Short lived connections used to solicit addrs when starting the node * without a populated AddrMan. */ ADDR_FETCH, }; namespace { struct CConnmanTest; } class NetEventsInterface; class CConnman { public: enum NumConnections { CONNECTIONS_NONE = 0, CONNECTIONS_IN = (1U << 0), CONNECTIONS_OUT = (1U << 1), CONNECTIONS_ALL = (CONNECTIONS_IN | CONNECTIONS_OUT), }; struct Options { ServiceFlags nLocalServices = NODE_NONE; int nMaxConnections = 0; int m_max_outbound_full_relay = 0; int m_max_outbound_block_relay = 0; int nMaxAddnode = 0; int nMaxFeeler = 0; int nBestHeight = 0; CClientUIInterface *uiInterface = nullptr; NetEventsInterface *m_msgproc = nullptr; BanMan *m_banman = nullptr; unsigned int nSendBufferMaxSize = 0; unsigned int nReceiveFloodSize = 0; uint64_t nMaxOutboundTimeframe = 0; uint64_t nMaxOutboundLimit = 0; int64_t m_peer_connect_timeout = DEFAULT_PEER_CONNECT_TIMEOUT; std::vector vSeedNodes; std::vector vWhitelistedRange; std::vector vWhiteBinds; std::vector vBinds; bool m_use_addrman_outgoing = true; std::vector m_specified_outgoing; std::vector m_added_nodes; std::vector m_asmap; }; void Init(const Options &connOptions) { nLocalServices = connOptions.nLocalServices; nMaxConnections = connOptions.nMaxConnections; m_use_addrman_outgoing = connOptions.m_use_addrman_outgoing; nMaxAddnode = connOptions.nMaxAddnode; nMaxFeeler = connOptions.nMaxFeeler; { // Lock cs_main to prevent a potential race with the peer validation // logic thread. LOCK(::cs_main); m_max_outbound_full_relay = std::min(connOptions.m_max_outbound_full_relay, connOptions.nMaxConnections); m_max_outbound_block_relay = connOptions.m_max_outbound_block_relay; m_max_outbound = m_max_outbound_full_relay + m_max_outbound_block_relay + nMaxFeeler; } nBestHeight = connOptions.nBestHeight; clientInterface = connOptions.uiInterface; m_banman = connOptions.m_banman; m_msgproc = connOptions.m_msgproc; nSendBufferMaxSize = connOptions.nSendBufferMaxSize; nReceiveFloodSize = connOptions.nReceiveFloodSize; m_peer_connect_timeout = connOptions.m_peer_connect_timeout; { LOCK(cs_totalBytesSent); nMaxOutboundTimeframe = connOptions.nMaxOutboundTimeframe; nMaxOutboundLimit = connOptions.nMaxOutboundLimit; } vWhitelistedRange = connOptions.vWhitelistedRange; { LOCK(cs_vAddedNodes); vAddedNodes = connOptions.m_added_nodes; } } CConnman(const Config &configIn, uint64_t seed0, uint64_t seed1); ~CConnman(); bool Start(CScheduler &scheduler, const Options &options); void StopThreads(); void StopNodes(); void Stop() { StopThreads(); StopNodes(); }; void Interrupt(); bool GetNetworkActive() const { return fNetworkActive; }; bool GetUseAddrmanOutgoing() const { return m_use_addrman_outgoing; }; void SetNetworkActive(bool active); void OpenNetworkConnection(const CAddress &addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = nullptr, const char *strDest = nullptr, ConnectionType conn_type = ConnectionType::OUTBOUND); bool CheckIncomingNonce(uint64_t nonce); bool ForNode(NodeId id, std::function func); void PushMessage(CNode *pnode, CSerializedNetMsg &&msg); template void ForEachNode(Callable &&func) { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { func(node); } } }; template void ForEachNode(Callable &&func) const { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { func(node); } } }; template void ForEachNodeThen(Callable &&pre, CallableAfter &&post) { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { pre(node); } } post(); }; template void ForEachNodeThen(Callable &&pre, CallableAfter &&post) const { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { pre(node); } } post(); }; // Addrman functions void SetServices(const CService &addr, ServiceFlags nServices); void MarkAddressGood(const CAddress &addr); void AddNewAddresses(const std::vector &vAddr, const CAddress &addrFrom, int64_t nTimePenalty = 0); std::vector GetAddresses(); // This allows temporarily exceeding m_max_outbound_full_relay, with the // goal of finding a peer that is better than all our current peers. void SetTryNewOutboundPeer(bool flag); bool GetTryNewOutboundPeer(); // Return the number of outbound peers we have in excess of our target (eg, // if we previously called SetTryNewOutboundPeer(true), and have since set // to false, we may have extra peers that we wish to disconnect). This may // return a value less than (num_outbound_connections - num_outbound_slots) // in cases where some outbound connections are not yet fully connected, or // not yet fully disconnected. int GetExtraOutboundCount(); bool AddNode(const std::string &node); bool RemoveAddedNode(const std::string &node); std::vector GetAddedNodeInfo(); size_t GetNodeCount(NumConnections num); void GetNodeStats(std::vector &vstats); bool DisconnectNode(const std::string &node); bool DisconnectNode(const CSubNet &subnet); bool DisconnectNode(const CNetAddr &addr); bool DisconnectNode(NodeId id); //! Used to convey which local services we are offering peers during node //! connection. //! //! The data returned by this is used in CNode construction, //! which is used to advertise which services we are offering //! that peer during `net_processing.cpp:PushNodeVersion()`. ServiceFlags GetLocalServices() const; //! set the max outbound target in bytes. void SetMaxOutboundTarget(uint64_t limit); uint64_t GetMaxOutboundTarget(); //! set the timeframe for the max outbound target. void SetMaxOutboundTimeframe(uint64_t timeframe); uint64_t GetMaxOutboundTimeframe(); //! check if the outbound target is reached. If param //! historicalBlockServingLimit is set true, the function will response true //! if the limit for serving historical blocks has been reached. bool OutboundTargetReached(bool historicalBlockServingLimit); //! response the bytes left in the current max outbound cycle in case of no //! limit, it will always response 0 uint64_t GetOutboundTargetBytesLeft(); //! response the time in second left in the current max outbound cycle in //! case of no limit, it will always response 0 uint64_t GetMaxOutboundTimeLeftInCycle(); uint64_t GetTotalBytesRecv(); uint64_t GetTotalBytesSent(); void SetBestHeight(int height); int GetBestHeight() const; /** Get a unique deterministic randomizer. */ CSipHasher GetDeterministicRandomizer(uint64_t id) const; unsigned int GetReceiveFloodSize() const; void WakeMessageHandler(); /** * Attempts to obfuscate tx time through exponentially distributed emitting. * Works assuming that a single interval is used. * Variable intervals will result in privacy decrease. */ int64_t PoissonNextSendInbound(int64_t now, int average_interval_seconds); void SetAsmap(std::vector asmap) { addrman.m_asmap = std::move(asmap); } private: struct ListenSocket { public: SOCKET socket; inline void AddSocketPermissionFlags(NetPermissionFlags &flags) const { NetPermissions::AddFlag(flags, m_permissions); } ListenSocket(SOCKET socket_, NetPermissionFlags permissions_) : socket(socket_), m_permissions(permissions_) {} private: NetPermissionFlags m_permissions; }; bool BindListenPort(const CService &bindAddr, bilingual_str &strError, NetPermissionFlags permissions); bool Bind(const CService &addr, unsigned int flags, NetPermissionFlags permissions); bool InitBinds(const std::vector &binds, const std::vector &whiteBinds); void ThreadOpenAddedConnections(); void AddAddrFetch(const std::string &strDest); void ProcessAddrFetch(); void ThreadOpenConnections(std::vector connect); void ThreadMessageHandler(); void AcceptConnection(const ListenSocket &hListenSocket); void DisconnectNodes(); void NotifyNumConnectionsChanged(); void InactivityCheck(CNode *pnode); bool GenerateSelectSet(std::set &recv_set, std::set &send_set, std::set &error_set); void SocketEvents(std::set &recv_set, std::set &send_set, std::set &error_set); void SocketHandler(); void ThreadSocketHandler(); void ThreadDNSAddressSeed(); uint64_t CalculateKeyedNetGroup(const CAddress &ad) const; CNode *FindNode(const CNetAddr &ip); CNode *FindNode(const CSubNet &subNet); CNode *FindNode(const std::string &addrName); CNode *FindNode(const CService &addr); bool AttemptToEvictConnection(); CNode *ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type); void AddWhitelistPermissionFlags(NetPermissionFlags &flags, const CNetAddr &addr) const; void DeleteNode(CNode *pnode); NodeId GetNewNodeId(); size_t SocketSendData(CNode *pnode) const; void DumpAddresses(); // Network stats void RecordBytesRecv(uint64_t bytes); void RecordBytesSent(uint64_t bytes); // Whether the node should be passed out in ForEach* callbacks static bool NodeFullyConnected(const CNode *pnode); const Config *config; // Network usage totals RecursiveMutex cs_totalBytesRecv; RecursiveMutex cs_totalBytesSent; uint64_t nTotalBytesRecv GUARDED_BY(cs_totalBytesRecv){0}; uint64_t nTotalBytesSent GUARDED_BY(cs_totalBytesSent){0}; // outbound limit & stats uint64_t nMaxOutboundTotalBytesSentInCycle GUARDED_BY(cs_totalBytesSent); uint64_t nMaxOutboundCycleStartTime GUARDED_BY(cs_totalBytesSent); uint64_t nMaxOutboundLimit GUARDED_BY(cs_totalBytesSent); uint64_t nMaxOutboundTimeframe GUARDED_BY(cs_totalBytesSent); // P2P timeout in seconds int64_t m_peer_connect_timeout; // Whitelisted ranges. Any node connecting from these is automatically // whitelisted (as well as those connecting to whitelisted binds). std::vector vWhitelistedRange; unsigned int nSendBufferMaxSize{0}; unsigned int nReceiveFloodSize{0}; std::vector vhListenSocket; std::atomic fNetworkActive{true}; bool fAddressesInitialized{false}; CAddrMan addrman; std::deque m_addr_fetches GUARDED_BY(m_addr_fetches_mutex); RecursiveMutex m_addr_fetches_mutex; std::vector vAddedNodes GUARDED_BY(cs_vAddedNodes); RecursiveMutex cs_vAddedNodes; std::vector vNodes GUARDED_BY(cs_vNodes); std::list vNodesDisconnected; mutable RecursiveMutex cs_vNodes; std::atomic nLastNodeId{0}; unsigned int nPrevNodeCount{0}; /** * Services this instance offers. * * This data is replicated in each CNode instance we create during peer * connection (in ConnectNode()) under a member also called * nLocalServices. * * This data is not marked const, but after being set it should not * change. See the note in CNode::nLocalServices documentation. * * \sa CNode::nLocalServices */ ServiceFlags nLocalServices; std::unique_ptr semOutbound; std::unique_ptr semAddnode; int nMaxConnections; // How many full-relay (tx, block, addr) outbound peers we want int m_max_outbound_full_relay; // How many block-relay only outbound peers we want // We do not relay tx or addr messages with these peers int m_max_outbound_block_relay; int nMaxAddnode; int nMaxFeeler; int m_max_outbound; bool m_use_addrman_outgoing; std::atomic nBestHeight; CClientUIInterface *clientInterface; NetEventsInterface *m_msgproc; /** * Pointer to this node's banman. May be nullptr - check existence before * dereferencing. */ BanMan *m_banman; /** SipHasher seeds for deterministic randomness */ const uint64_t nSeed0, nSeed1; /** flag for waking the message processor. */ bool fMsgProcWake GUARDED_BY(mutexMsgProc); std::condition_variable condMsgProc; Mutex mutexMsgProc; std::atomic flagInterruptMsgProc{false}; CThreadInterrupt interruptNet; std::thread threadDNSAddressSeed; std::thread threadSocketHandler; std::thread threadOpenAddedConnections; std::thread threadOpenConnections; std::thread threadMessageHandler; /** * flag for deciding to connect to an extra outbound peer, in excess of * m_max_outbound_full_relay. This takes the place of a feeler connection. */ std::atomic_bool m_try_another_outbound_peer; std::atomic m_next_send_inv_to_incoming{0}; friend struct ::CConnmanTest; friend struct ConnmanTestMsg; }; void Discover(); void StartMapPort(); void InterruptMapPort(); void StopMapPort(); unsigned short GetListenPort(); /** * Interface for message handling */ class NetEventsInterface { public: virtual bool ProcessMessages(const Config &config, CNode *pnode, std::atomic &interrupt) = 0; virtual bool SendMessages(const Config &config, CNode *pnode, std::atomic &interrupt) = 0; virtual void InitializeNode(const Config &config, CNode *pnode) = 0; virtual void FinalizeNode(const Config &config, NodeId id, bool &update_connection_time) = 0; protected: /** * Protected destructor so that instances can only be deleted by derived * classes. If that restriction is no longer desired, this should be made * public and virtual. */ ~NetEventsInterface() = default; }; enum { // unknown LOCAL_NONE, // address a local interface listens on LOCAL_IF, // address explicit bound to LOCAL_BIND, // address reported by UPnP LOCAL_UPNP, // address explicitly specified (-externalip=) LOCAL_MANUAL, LOCAL_MAX }; bool IsPeerAddrLocalGood(CNode *pnode); void AdvertiseLocal(CNode *pnode); /** * Mark a network as reachable or unreachable (no automatic connects to it) * @note Networks are reachable by default */ void SetReachable(enum Network net, bool reachable); /** @returns true if the network is reachable, false otherwise */ bool IsReachable(enum Network net); /** @returns true if the address is in a reachable network, false otherwise */ bool IsReachable(const CNetAddr &addr); bool AddLocal(const CService &addr, int nScore = LOCAL_NONE); bool AddLocal(const CNetAddr &addr, int nScore = LOCAL_NONE); void RemoveLocal(const CService &addr); bool SeenLocal(const CService &addr); bool IsLocal(const CService &addr); bool GetLocal(CService &addr, const CNetAddr *paddrPeer = nullptr); CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices); extern bool fDiscover; extern bool fListen; extern bool g_relay_txes; struct LocalServiceInfo { int nScore; int nPort; }; extern RecursiveMutex cs_mapLocalHost; extern std::map mapLocalHost GUARDED_BY(cs_mapLocalHost); extern const std::string NET_MESSAGE_COMMAND_OTHER; // Command, total bytes typedef std::map mapMsgCmdSize; /** * POD that contains various stats about a node. * Usually constructed from CConman::GetNodeStats. Stats are filled from the * node using CNode::copyStats. */ struct CNodeStats { NodeId nodeid; ServiceFlags nServices; bool fRelayTxes; int64_t nLastSend; int64_t nLastRecv; int64_t nTimeConnected; int64_t nTimeOffset; std::string addrName; int nVersion; std::string cleanSubVer; bool fInbound; bool m_manual_connection; int nStartingHeight; uint64_t nSendBytes; mapMsgCmdSize mapSendBytesPerMsgCmd; uint64_t nRecvBytes; mapMsgCmdSize mapRecvBytesPerMsgCmd; NetPermissionFlags m_permissionFlags; bool m_legacyWhitelisted; int64_t m_ping_usec; int64_t m_ping_wait_usec; int64_t m_min_ping_usec; Amount minFeeFilter; // Our address, as reported by the peer std::string addrLocal; // Address of this peer CAddress addr; // Bind address of our side of the connection CAddress addrBind; uint32_t m_mapped_as; }; /** * Transport protocol agnostic message container. * Ideally it should only contain receive time, payload, * command and size. */ class CNetMessage { public: // received message data CDataStream m_recv; // time (in microseconds) of message receipt. int64_t m_time = 0; bool m_valid_netmagic = false; bool m_valid_header = false; bool m_valid_checksum = false; // size of the payload uint32_t m_message_size = 0; // used wire size of the message (including header/checksum) uint32_t m_raw_message_size = 0; std::string m_command; CNetMessage(CDataStream &&recv_in) : m_recv(std::move(recv_in)) {} void SetVersion(int nVersionIn) { m_recv.SetVersion(nVersionIn); } }; /** * The TransportDeserializer takes care of holding and deserializing the * network receive buffer. It can deserialize the network buffer into a * transport protocol agnostic CNetMessage (command & payload) */ class TransportDeserializer { public: // returns true if the current deserialization is complete virtual bool Complete() const = 0; // set the serialization context version virtual void SetVersion(int version) = 0; // read and deserialize data virtual int Read(const Config &config, const char *data, uint32_t bytes) = 0; // decomposes a message from the context virtual CNetMessage GetMessage(const Config &config, int64_t time) = 0; virtual ~TransportDeserializer() {} }; class V1TransportDeserializer final : public TransportDeserializer { private: mutable CHash256 hasher; mutable uint256 data_hash; // Parsing header (false) or data (true) bool in_data; // Partially received header. CDataStream hdrbuf; // Complete header. CMessageHeader hdr; // Received message data. CDataStream vRecv; uint32_t nHdrPos; uint32_t nDataPos; const uint256 &GetMessageHash() const; int readHeader(const Config &config, const char *pch, uint32_t nBytes); int readData(const char *pch, uint32_t nBytes); void Reset() { vRecv.clear(); hdrbuf.clear(); hdrbuf.resize(24); in_data = false; nHdrPos = 0; nDataPos = 0; data_hash.SetNull(); hasher.Reset(); } public: V1TransportDeserializer( const CMessageHeader::MessageMagic &pchMessageStartIn, int nTypeIn, int nVersionIn) : hdrbuf(nTypeIn, nVersionIn), hdr(pchMessageStartIn), vRecv(nTypeIn, nVersionIn) { Reset(); } bool Complete() const override { if (!in_data) { return false; } return (hdr.nMessageSize == nDataPos); } void SetVersion(int nVersionIn) override { hdrbuf.SetVersion(nVersionIn); vRecv.SetVersion(nVersionIn); } int Read(const Config &config, const char *pch, uint32_t nBytes) override { int ret = in_data ? readData(pch, nBytes) : readHeader(config, pch, nBytes); if (ret < 0) { Reset(); } return ret; } CNetMessage GetMessage(const Config &config, int64_t time) override; }; /** * The TransportSerializer prepares messages for the network transport */ class TransportSerializer { public: // prepare message for transport (header construction, error-correction // computation, payload encryption, etc.) virtual void prepareForTransport(const Config &config, CSerializedNetMsg &msg, std::vector &header) = 0; virtual ~TransportSerializer() {} }; class V1TransportSerializer : public TransportSerializer { public: void prepareForTransport(const Config &config, CSerializedNetMsg &msg, std::vector &header) override; }; /** Information about a peer */ class CNode { friend class CConnman; friend struct ConnmanTestMsg; public: std::unique_ptr m_deserializer; std::unique_ptr m_serializer; // socket std::atomic nServices{NODE_NONE}; SOCKET hSocket GUARDED_BY(cs_hSocket); // Total size of all vSendMsg entries. size_t nSendSize{0}; // Offset inside the first vSendMsg already sent. size_t nSendOffset{0}; uint64_t nSendBytes GUARDED_BY(cs_vSend){0}; std::deque> vSendMsg GUARDED_BY(cs_vSend); RecursiveMutex cs_vSend; RecursiveMutex cs_hSocket; RecursiveMutex cs_vRecv; RecursiveMutex cs_vProcessMsg; std::list vProcessMsg GUARDED_BY(cs_vProcessMsg); size_t nProcessQueueSize{0}; RecursiveMutex cs_sendProcessing; std::deque vRecvGetData; uint64_t nRecvBytes GUARDED_BY(cs_vRecv){0}; std::atomic nRecvVersion{INIT_PROTO_VERSION}; std::atomic nLastSend{0}; std::atomic nLastRecv{0}; const int64_t nTimeConnected; std::atomic nTimeOffset{0}; // Address of this peer const CAddress addr; // Bind address of our side of the connection const CAddress addrBind; std::atomic nVersion{0}; // The nonce provided by the remote host. uint64_t nRemoteHostNonce{0}; // The extra entropy provided by the remote host. uint64_t nRemoteExtraEntropy{0}; /** * cleanSubVer is a sanitized string of the user agent byte array we read * from the wire. This cleaned string can safely be logged or displayed. */ RecursiveMutex cs_SubVer; std::string cleanSubVer GUARDED_BY(cs_SubVer){}; // This peer is preferred for eviction. bool m_prefer_evict{false}; bool HasPermission(NetPermissionFlags permission) const { return NetPermissions::HasFlag(m_permissionFlags, permission); } // This boolean is unusued in actual processing, only present for backward // compatibility at RPC/QT level bool m_legacyWhitelisted{false}; // set by version message bool fClient{false}; // after BIP159, set by version message bool m_limited_node{false}; std::atomic_bool fSuccessfullyConnected{false}; // Setting fDisconnect to true will cause the node to be disconnected the // next time DisconnectNodes() runs std::atomic_bool fDisconnect{false}; bool fSentAddr{false}; CSemaphoreGrant grantOutbound; std::atomic nRefCount{0}; const uint64_t nKeyedNetGroup; std::atomic_bool fPauseRecv{false}; std::atomic_bool fPauseSend{false}; bool IsOutboundOrBlockRelayConn() const { switch (m_conn_type) { case ConnectionType::OUTBOUND: case ConnectionType::BLOCK_RELAY: return true; case ConnectionType::INBOUND: case ConnectionType::MANUAL: case ConnectionType::ADDR_FETCH: case ConnectionType::FEELER: return false; } assert(false); } bool IsFullOutboundConn() const { return m_conn_type == ConnectionType::OUTBOUND; } bool IsManualConn() const { return m_conn_type == ConnectionType::MANUAL; } bool IsBlockOnlyConn() const { return m_conn_type == ConnectionType::BLOCK_RELAY; } bool IsFeelerConn() const { return m_conn_type == ConnectionType::FEELER; } bool IsAddrFetchConn() const { return m_conn_type == ConnectionType::ADDR_FETCH; } bool IsInboundConn() const { return m_conn_type == ConnectionType::INBOUND; } bool ExpectServicesFromConn() const { switch (m_conn_type) { case ConnectionType::INBOUND: case ConnectionType::MANUAL: case ConnectionType::FEELER: return false; case ConnectionType::OUTBOUND: case ConnectionType::BLOCK_RELAY: case ConnectionType::ADDR_FETCH: return true; } assert(false); } protected: mapMsgCmdSize mapSendBytesPerMsgCmd; mapMsgCmdSize mapRecvBytesPerMsgCmd GUARDED_BY(cs_vRecv); public: BlockHash hashContinue; std::atomic nStartingHeight{-1}; // flood relay std::vector vAddrToSend; std::unique_ptr m_addr_known = nullptr; bool fGetAddr{false}; std::chrono::microseconds m_next_addr_send GUARDED_BY(cs_sendProcessing){0}; std::chrono::microseconds m_next_local_addr_send GUARDED_BY(cs_sendProcessing){0}; bool IsAddrRelayPeer() const { return m_addr_known != nullptr; } // List of block ids we still have to announce. // There is no final sorting before sending, as they are always sent // immediately and in the order requested. std::vector vInventoryBlockToSend GUARDED_BY(cs_inventory); RecursiveMutex cs_inventory; struct TxRelay { TxRelay() { pfilter = std::make_unique(); } mutable RecursiveMutex cs_filter; // We use fRelayTxes for two purposes - // a) it allows us to not relay tx invs before receiving the peer's // version message. // b) the peer may tell us in its version message that we should not // relay tx invs unless it loads a bloom filter. bool fRelayTxes GUARDED_BY(cs_filter){false}; std::unique_ptr pfilter PT_GUARDED_BY(cs_filter) GUARDED_BY(cs_filter); mutable RecursiveMutex cs_tx_inventory; CRollingBloomFilter filterInventoryKnown GUARDED_BY(cs_tx_inventory){ 50000, 0.000001}; // Set of transaction ids we still have to announce. // They are sorted by the mempool before relay, so the order is not // important. std::set setInventoryTxToSend; // Used for BIP35 mempool sending bool fSendMempool GUARDED_BY(cs_tx_inventory){false}; // Last time a "MEMPOOL" request was serviced. std::atomic m_last_mempool_req{ std::chrono::seconds{0}}; std::chrono::microseconds nNextInvSend{0}; RecursiveMutex cs_feeFilter; // Minimum fee rate with which to filter inv's to this node Amount minFeeFilter GUARDED_BY(cs_feeFilter){Amount::zero()}; Amount lastSentFeeFilter{Amount::zero()}; int64_t nextSendTimeFeeFilter{0}; }; // m_tx_relay == nullptr if we're not relaying transactions with this peer std::unique_ptr m_tx_relay; + struct AvalancheState { + AvalancheState() {} + + avalanche::Delegation delegation; + }; + + // m_avalanche_state == nullptr if we're not using avalanche with this peer + std::unique_ptr m_avalanche_state; + // Used for headers announcements - unfiltered blocks to relay std::vector vBlockHashesToAnnounce GUARDED_BY(cs_inventory); // Block and TXN accept times std::atomic nLastBlockTime{0}; std::atomic nLastTXTime{0}; // Ping time measurement: // The pong reply we're expecting, or 0 if no pong expected. std::atomic nPingNonceSent{0}; // Time (in usec) the last ping was sent, or 0 if no ping was ever sent. std::atomic nPingUsecStart{0}; // Last measured round-trip time. std::atomic nPingUsecTime{0}; // Best measured round-trip time. std::atomic nMinPingUsecTime{std::numeric_limits::max()}; // Whether a ping is requested. std::atomic fPingQueued{false}; std::set orphan_work_set; CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, uint64_t nLocalExtraEntropyIn, const CAddress &addrBindIn, const std::string &addrNameIn, ConnectionType conn_type_in); ~CNode(); CNode(const CNode &) = delete; CNode &operator=(const CNode &) = delete; private: const NodeId id; const uint64_t nLocalHostNonce; const uint64_t nLocalExtraEntropy; const ConnectionType m_conn_type; //! Services offered to this peer. //! //! This is supplied by the parent CConnman during peer connection //! (CConnman::ConnectNode()) from its attribute of the same name. //! //! This is const because there is no protocol defined for renegotiating //! services initially offered to a peer. The set of local services we //! offer should not change after initialization. //! //! An interesting example of this is NODE_NETWORK and initial block //! download: a node which starts up from scratch doesn't have any blocks //! to serve, but still advertises NODE_NETWORK because it will eventually //! fulfill this role after IBD completes. P2P code is written in such a //! way that it can gracefully handle peers who don't make good on their //! service advertisements. const ServiceFlags nLocalServices; const int nMyStartingHeight; int nSendVersion{0}; NetPermissionFlags m_permissionFlags{PF_NONE}; // Used only by SocketHandler thread std::list vRecvMsg; mutable RecursiveMutex cs_addrName; std::string addrName GUARDED_BY(cs_addrName); // Our address, as reported by the peer CService addrLocal GUARDED_BY(cs_addrLocal); mutable RecursiveMutex cs_addrLocal; public: NodeId GetId() const { return id; } uint64_t GetLocalNonce() const { return nLocalHostNonce; } uint64_t GetLocalExtraEntropy() const { return nLocalExtraEntropy; } int GetMyStartingHeight() const { return nMyStartingHeight; } int GetRefCount() const { assert(nRefCount >= 0); return nRefCount; } bool ReceiveMsgBytes(const Config &config, const char *pch, uint32_t nBytes, bool &complete); void SetRecvVersion(int nVersionIn) { nRecvVersion = nVersionIn; } int GetRecvVersion() const { return nRecvVersion; } void SetSendVersion(int nVersionIn); int GetSendVersion() const; CService GetAddrLocal() const; //! May not be called more than once void SetAddrLocal(const CService &addrLocalIn); CNode *AddRef() { nRefCount++; return this; } void Release() { nRefCount--; } void AddAddressKnown(const CAddress &_addr) { assert(m_addr_known); m_addr_known->insert(_addr.GetKey()); } void PushAddress(const CAddress &_addr, FastRandomContext &insecure_rand) { // Known checking here is only to save space from duplicates. // SendMessages will filter it again for knowns that were added // after addresses were pushed. assert(m_addr_known); if (_addr.IsValid() && !m_addr_known->contains(_addr.GetKey())) { if (vAddrToSend.size() >= MAX_ADDR_TO_SEND) { vAddrToSend[insecure_rand.randrange(vAddrToSend.size())] = _addr; } else { vAddrToSend.push_back(_addr); } } } void AddKnownTx(const TxId &txid) { if (m_tx_relay != nullptr) { LOCK(m_tx_relay->cs_tx_inventory); m_tx_relay->filterInventoryKnown.insert(txid); } } void PushInventory(const CInv &inv) { if (inv.type == MSG_TX && m_tx_relay != nullptr) { const TxId txid(inv.hash); LOCK(m_tx_relay->cs_tx_inventory); if (!m_tx_relay->filterInventoryKnown.contains(txid)) { m_tx_relay->setInventoryTxToSend.insert(txid); } } else if (inv.type == MSG_BLOCK) { const BlockHash hash(inv.hash); LOCK(cs_inventory); vInventoryBlockToSend.push_back(hash); } } void PushBlockHash(const BlockHash &hash) { LOCK(cs_inventory); vBlockHashesToAnnounce.push_back(hash); } void CloseSocketDisconnect(); void copyStats(CNodeStats &stats, const std::vector &m_asmap); ServiceFlags GetLocalServices() const { return nLocalServices; } std::string GetAddrName() const; //! Sets the addrName only if it was not previously set void MaybeSetAddrName(const std::string &addrNameIn); }; /** * Return a timestamp in the future (in microseconds) for exponentially * distributed events. */ int64_t PoissonNextSend(int64_t now, int average_interval_seconds); /** Wrapper to return mockable type */ inline std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval) { return std::chrono::microseconds{ PoissonNextSend(now.count(), average_interval.count())}; } std::string getSubVersionEB(uint64_t MaxBlockSize); std::string userAgent(const Config &config); #endif // BITCOIN_NET_H diff --git a/src/net_processing.cpp b/src/net_processing.cpp index dcd43c29a..04aaf45c5 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1,5416 +1,5451 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include +#include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // For NDEBUG compile time check #include #include #include #include #include /** Expiration time for orphan transactions in seconds */ static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60; /** Minimum time between orphan transactions expire time checks in seconds */ static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60; /** How long to cache transactions in mapRelay for normal relay */ static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME{15 * 60}; /** * Headers download timeout expressed in microseconds. * Timeout = base + per_header * (expected number of headers) */ // 15 minutes static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000; // 1ms/header static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000; /** * Protect at least this many outbound peers from disconnection due to * slow/behind headers chain. */ static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4; /** * Timeout for (unprotected) outbound peers to sync to our chainwork, in * seconds. */ // 20 minutes static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60; /** How frequently to check for stale tips, in seconds */ // 10 minutes static constexpr int64_t STALE_CHECK_INTERVAL = 10 * 60; /** * How frequently to check for extra outbound peers and disconnect, in seconds. */ static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL = 45; /** * Minimum time an outbound-peer-eviction candidate must be connected for, in * order to evict, in seconds. */ static constexpr int64_t MINIMUM_CONNECT_TIME = 30; /** SHA256("main address relay")[0:8] */ static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; /// Age after which a stale block will no longer be served if requested as /// protection against fingerprinting. Set to one month, denominated in seconds. static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60; /// Age after which a block is considered historical for purposes of rate /// limiting block relay. Set to one week, denominated in seconds. static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60; /** Maximum number of in-flight transactions from a peer */ static constexpr int32_t MAX_PEER_TX_IN_FLIGHT = 100; /** Maximum number of announced transactions from a peer */ static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 2 * MAX_INV_SZ; /** How many microseconds to delay requesting transactions from inbound peers */ static constexpr std::chrono::microseconds INBOUND_PEER_TX_DELAY{ std::chrono::seconds{2}}; /** * How long to wait (in microseconds) before downloading a transaction from an * additional peer. */ static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{ std::chrono::seconds{60}}; /** * Maximum delay (in microseconds) for transaction requests to avoid biasing * some peers over others. */ static constexpr std::chrono::microseconds MAX_GETDATA_RANDOM_DELAY{ std::chrono::seconds{2}}; /** * How long to wait (in microseconds) before expiring an in-flight getdata * request to a peer. */ static constexpr std::chrono::microseconds TX_EXPIRY_INTERVAL{ GETDATA_TX_INTERVAL * 10}; static_assert(INBOUND_PEER_TX_DELAY >= MAX_GETDATA_RANDOM_DELAY, "To preserve security, MAX_GETDATA_RANDOM_DELAY should not " "exceed INBOUND_PEER_DELAY"); /** * Limit to avoid sending big packets. Not used in processing incoming GETDATA * for compatibility. */ static const unsigned int MAX_GETDATA_SZ = 1000; /// How many non standard orphan do we consider from a node before ignoring it. static constexpr uint32_t MAX_NON_STANDARD_ORPHAN_PER_NODE = 5; struct COrphanTx { // When modifying, adapt the copy of this definition in tests/DoS_tests. CTransactionRef tx; NodeId fromPeer; int64_t nTimeExpire; size_t list_pos; }; RecursiveMutex g_cs_orphans; std::map mapOrphanTransactions GUARDED_BY(g_cs_orphans); void EraseOrphansFor(NodeId peer); /** * Average delay between local address broadcasts. */ static constexpr std::chrono::hours AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24}; /** * Average delay between peer address broadcasts. */ static const std::chrono::seconds AVG_ADDRESS_BROADCAST_INTERVAL{30}; /** * Average delay between trickled inventory transmissions in seconds. * Blocks and whitelisted receivers bypass this, outbound peers get half this * delay. */ static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5; /** * Maximum number of inventory items to send per transmission. * Limits the impact of low-fee transaction floods. */ static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB = 7 * INVENTORY_BROADCAST_INTERVAL; /** * Average delay between feefilter broadcasts in seconds. */ static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60; /** * Maximum feefilter broadcast delay after significant change. */ static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60; /** * Maximum number of compact filters that may be requested with one * getcfilters. See BIP 157. */ static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000; /** * Maximum number of cf hashes that may be requested with one getcfheaders. See * BIP 157. */ static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000; // Internal stuff namespace { /** Number of nodes with fSyncStarted. */ int nSyncStarted GUARDED_BY(cs_main) = 0; /** * Sources of received blocks, saved to be able to punish them when processing * happens afterwards. * Set mapBlockSource[hash].second to false if the node should not be punished * if the block is invalid. */ std::map> mapBlockSource GUARDED_BY(cs_main); /** * Filter for transactions that were recently rejected by AcceptToMemoryPool. * These are not rerequested until the chain tip changes, at which point the * entire filter is reset. * * Without this filter we'd be re-requesting txs from each of our peers, * increasing bandwidth consumption considerably. For instance, with 100 peers, * half of which relay a tx we don't accept, that might be a 50x bandwidth * increase. A flooding attacker attempting to roll-over the filter using * minimum-sized, 60byte, transactions might manage to send 1000/sec if we have * fast peers, so we pick 120,000 to give our peers a two minute window to send * invs to us. * * Decreasing the false positive rate is fairly cheap, so we pick one in a * million to make it highly unlikely for users to have issues with this filter. * * Memory used: 1.3 MB */ std::unique_ptr recentRejects GUARDED_BY(cs_main); uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main); /** * Filter for transactions that have been recently confirmed. * We use this to avoid requesting transactions that have already been * confirmed. */ RecursiveMutex g_cs_recent_confirmed_transactions; std::unique_ptr g_recent_confirmed_transactions GUARDED_BY(g_cs_recent_confirmed_transactions); /** * Blocks that are in flight, and that are in the queue to be downloaded. */ struct QueuedBlock { BlockHash hash; //! Optional. const CBlockIndex *pindex; //! Whether this block has validated headers at the time of request. bool fValidatedHeaders; //! Optional, used for CMPCTBLOCK downloads std::unique_ptr partialBlock; }; std::map::iterator>> mapBlocksInFlight GUARDED_BY(cs_main); /** Stack of nodes which we have set to announce using compact blocks */ std::list lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main); /** Number of preferable block download peers. */ int nPreferredDownload GUARDED_BY(cs_main) = 0; /** Number of peers from which we're downloading blocks. */ int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0; /** Number of outbound peers with m_chain_sync.m_protect. */ int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; /** When our tip was last updated. */ std::atomic g_last_tip_update(0); /** Relay map. */ typedef std::map MapRelay; MapRelay mapRelay GUARDED_BY(cs_main); /** * Expiration-time ordered list of (expire time, relay map entry) pairs, * protected by cs_main). */ std::deque> vRelayExpiration GUARDED_BY(cs_main); struct IteratorComparator { template bool operator()(const I &a, const I &b) const { return &(*a) < &(*b); } }; std::map::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans); //! For random eviction std::vector::iterator> g_orphan_list GUARDED_BY(g_cs_orphans); static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0; static std::vector> vExtraTxnForCompact GUARDED_BY(g_cs_orphans); } // namespace namespace { /** * Maintain validation-specific state about nodes, protected by cs_main, instead * by CNode's own locks. This simplifies asynchronous operation, where * processing of incoming data is done after the ProcessMessage call returns, * and we're no longer holding the node's locks. */ struct CNodeState { //! The peer's address const CService address; //! Whether we have a fully established connection. bool fCurrentlyConnected; //! The best known block we know this peer has announced. const CBlockIndex *pindexBestKnownBlock; //! The hash of the last unknown block this peer has announced. BlockHash hashLastUnknownBlock; //! The last full block we both have. const CBlockIndex *pindexLastCommonBlock; //! The best header we have sent our peer. const CBlockIndex *pindexBestHeaderSent; //! Length of current-streak of unconnecting headers announcements int nUnconnectingHeaders; //! Whether we've started headers synchronization with this peer. bool fSyncStarted; //! When to potentially disconnect peer for stalling headers download int64_t nHeadersSyncTimeout; //! Since when we're stalling block download progress (in microseconds), or //! 0. int64_t nStallingSince; std::list vBlocksInFlight; //! When the first entry in vBlocksInFlight started downloading. Don't care //! when vBlocksInFlight is empty. int64_t nDownloadingSince; int nBlocksInFlight; int nBlocksInFlightValidHeaders; //! Whether we consider this a preferred download peer. bool fPreferredDownload; //! Whether this peer wants invs or headers (when possible) for block //! announcements. bool fPreferHeaders; //! Whether this peer wants invs or cmpctblocks (when possible) for block //! announcements. bool fPreferHeaderAndIDs; /** * Whether this peer will send us cmpctblocks if we request them. * This is not used to gate request logic, as we really only care about * fSupportsDesiredCmpctVersion, but is used as a flag to "lock in" the * version of compact blocks we send. */ bool fProvidesHeaderAndIDs; /** * If we've announced NODE_WITNESS to this peer: whether the peer sends * witnesses in cmpctblocks/blocktxns, otherwise: whether this peer sends * non-witnesses in cmpctblocks/blocktxns. */ bool fSupportsDesiredCmpctVersion; /** * State used to enforce CHAIN_SYNC_TIMEOUT * Only in effect for outbound, non-manual, full-relay connections, with * m_protect == false * Algorithm: if a peer's best known block has less work than our tip, set * a timeout CHAIN_SYNC_TIMEOUT seconds in the future: * - If at timeout their best known block now has more work than our tip * when the timeout was set, then either reset the timeout or clear it * (after comparing against our current tip's work) * - If at timeout their best known block still has less work than our tip * did when the timeout was set, then send a getheaders message, and set a * shorter timeout, HEADERS_RESPONSE_TIME seconds in future. If their best * known block is still behind when that new timeout is reached, disconnect. */ struct ChainSyncTimeoutState { //! A timeout used for checking whether our peer has sufficiently //! synced. int64_t m_timeout; //! A header with the work we require on our peer's chain. const CBlockIndex *m_work_header; //! After timeout is reached, set to true after sending getheaders. bool m_sent_getheaders; //! Whether this peer is protected from disconnection due to a bad/slow //! chain. bool m_protect; }; ChainSyncTimeoutState m_chain_sync; //! Time of last new block announcement int64_t m_last_block_announcement; /* * State associated with transaction download. * * Tx download algorithm: * * When inv comes in, queue up (process_time, txid) inside the peer's * CNodeState (m_tx_process_time) as long as m_tx_announced for the peer * isn't too big (MAX_PEER_TX_ANNOUNCEMENTS). * * The process_time for a transaction is set to nNow for outbound peers, * nNow + 2 seconds for inbound peers. This is the time at which we'll * consider trying to request the transaction from the peer in * SendMessages(). The delay for inbound peers is to allow outbound peers * a chance to announce before we request from inbound peers, to prevent * an adversary from using inbound connections to blind us to a * transaction (InvBlock). * * When we call SendMessages() for a given peer, * we will loop over the transactions in m_tx_process_time, looking * at the transactions whose process_time <= nNow. We'll request each * such transaction that we don't have already and that hasn't been * requested from another peer recently, up until we hit the * MAX_PEER_TX_IN_FLIGHT limit for the peer. Then we'll update * g_already_asked_for for each requested txid, storing the time of the * GETDATA request. We use g_already_asked_for to coordinate transaction * requests amongst our peers. * * For transactions that we still need but we have already recently * requested from some other peer, we'll reinsert (process_time, txid) * back into the peer's m_tx_process_time at the point in the future at * which the most recent GETDATA request would time out (ie * GETDATA_TX_INTERVAL + the request time stored in g_already_asked_for). * We add an additional delay for inbound peers, again to prefer * attempting download from outbound peers first. * We also add an extra small random delay up to 2 seconds * to avoid biasing some peers over others. (e.g., due to fixed ordering * of peer processing in ThreadMessageHandler). * * When we receive a transaction from a peer, we remove the txid from the * peer's m_tx_in_flight set and from their recently announced set * (m_tx_announced). We also clear g_already_asked_for for that entry, so * that if somehow the transaction is not accepted but also not added to * the reject filter, then we will eventually redownload from other * peers. */ struct TxDownloadState { /** * Track when to attempt download of announced transactions (process * time in micros -> txid) */ std::multimap m_tx_process_time; //! Store all the transactions a peer has recently announced std::set m_tx_announced; //! Store transactions which were requested by us, with timestamp std::map m_tx_in_flight; //! Periodically check for stuck getdata requests std::chrono::microseconds m_check_expiry_timer{0}; }; TxDownloadState m_tx_download; struct AvalancheState { std::chrono::time_point last_poll; }; AvalancheState m_avalanche_state; //! Whether this peer is an inbound connection bool m_is_inbound; //! Whether this peer is a manual connection bool m_is_manual_connection; CNodeState(CAddress addrIn, bool is_inbound, bool is_manual) : address(addrIn), m_is_inbound(is_inbound), m_is_manual_connection(is_manual) { fCurrentlyConnected = false; pindexBestKnownBlock = nullptr; hashLastUnknownBlock = BlockHash(); pindexLastCommonBlock = nullptr; pindexBestHeaderSent = nullptr; nUnconnectingHeaders = 0; fSyncStarted = false; nHeadersSyncTimeout = 0; nStallingSince = 0; nDownloadingSince = 0; nBlocksInFlight = 0; nBlocksInFlightValidHeaders = 0; fPreferredDownload = false; fPreferHeaders = false; fPreferHeaderAndIDs = false; fProvidesHeaderAndIDs = false; fSupportsDesiredCmpctVersion = false; m_chain_sync = {0, nullptr, false, false}; m_last_block_announcement = 0; } }; // Keeps track of the time (in microseconds) when transactions were requested // last time limitedmap g_already_asked_for GUARDED_BY(cs_main)(MAX_INV_SZ); /** Map maintaining per-node state. */ static std::map mapNodeState GUARDED_BY(cs_main); static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { std::map::iterator it = mapNodeState.find(pnode); if (it == mapNodeState.end()) { return nullptr; } return &it->second; } /** * Data structure for an individual peer. This struct is not protected by * cs_main since it does not contain validation-critical data. * * Memory is owned by shared pointers and this object is destructed when * the refcount drops to zero. * * TODO: move most members from CNodeState to this structure. * TODO: move remaining application-layer data members from CNode to this * structure. */ struct Peer { /** Same id as the CNode object for this peer */ const NodeId m_id{0}; /** Protects misbehavior data members */ Mutex m_misbehavior_mutex; /** Accumulated misbehavior score for this peer */ int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0}; /** Whether this peer should be disconnected and marked as discouraged * (unless it has the noban permission). */ bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false}; Peer(NodeId id) : m_id(id) {} }; using PeerRef = std::shared_ptr; /** * Map of all Peer objects, keyed by peer id. This map is protected * by the global g_peer_mutex. Once a shared pointer reference is * taken, the lock may be released. Individual fields are protected by * their own locks. */ Mutex g_peer_mutex; static std::map g_peer_map GUARDED_BY(g_peer_mutex); /** * Get a shared pointer to the Peer object. * May return nullptr if the Peer object can't be found. */ static PeerRef GetPeerRef(NodeId id) { LOCK(g_peer_mutex); auto it = g_peer_map.find(id); return it != g_peer_map.end() ? it->second : nullptr; } static void UpdatePreferredDownload(const CNode &node, CNodeState *state) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { nPreferredDownload -= state->fPreferredDownload; // Whether this node should be marked as a preferred download node. state->fPreferredDownload = (!node.IsInboundConn() || node.HasPermission(PF_NOBAN)) && !node.IsAddrFetchConn() && !node.fClient; nPreferredDownload += state->fPreferredDownload; } static void PushNodeVersion(const Config &config, CNode &pnode, CConnman &connman, int64_t nTime) { // Note that pnode.GetLocalServices() is a reflection of the local // services we were offering when the CNode object was created for this // peer. ServiceFlags nLocalNodeServices = pnode.GetLocalServices(); uint64_t nonce = pnode.GetLocalNonce(); int nNodeStartingHeight = pnode.GetMyStartingHeight(); NodeId nodeid = pnode.GetId(); CAddress addr = pnode.addr; uint64_t extraEntropy = pnode.GetLocalExtraEntropy(); CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices)); CAddress addrMe = CAddress(CService(), nLocalNodeServices); connman.PushMessage( &pnode, CNetMsgMaker(INIT_PROTO_VERSION) .Make(NetMsgType::VERSION, PROTOCOL_VERSION, uint64_t(nLocalNodeServices), nTime, addrYou, addrMe, nonce, userAgent(config), nNodeStartingHeight, ::g_relay_txes && pnode.m_tx_relay != nullptr, extraEntropy)); if (fLogIPs) { LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, " "peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid); } else { LogPrint( BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid); } LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); } // Returns a bool indicating whether we requested this block. // Also used if a block was /not/ received and timed out or started with another // peer. static bool MarkBlockAsReceived(const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { std::map::iterator>>::iterator itInFlight = mapBlocksInFlight.find(hash); if (itInFlight != mapBlocksInFlight.end()) { CNodeState *state = State(itInFlight->second.first); assert(state != nullptr); state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders; if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) { // Last validated block on the queue was received. nPeersWithValidatedDownloads--; } if (state->vBlocksInFlight.begin() == itInFlight->second.second) { // First block on the queue was received, update the start download // time for the next one state->nDownloadingSince = std::max(state->nDownloadingSince, GetTimeMicros()); } state->vBlocksInFlight.erase(itInFlight->second.second); state->nBlocksInFlight--; state->nStallingSince = 0; mapBlocksInFlight.erase(itInFlight); return true; } return false; } // returns false, still setting pit, if the block was already in flight from the // same peer // pit will only be valid as long as the same cs_main lock is being held. static bool MarkBlockAsInFlight(const Config &config, CTxMemPool &mempool, NodeId nodeid, const BlockHash &hash, const Consensus::Params &consensusParams, const CBlockIndex *pindex = nullptr, std::list::iterator **pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { CNodeState *state = State(nodeid); assert(state != nullptr); // Short-circuit most stuff in case it is from the same node. std::map::iterator>>::iterator itInFlight = mapBlocksInFlight.find(hash); if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) { if (pit) { *pit = &itInFlight->second.second; } return false; } // Make sure it's not listed somewhere already. MarkBlockAsReceived(hash); std::list::iterator it = state->vBlocksInFlight.insert( state->vBlocksInFlight.end(), {hash, pindex, pindex != nullptr, std::unique_ptr( pit ? new PartiallyDownloadedBlock(config, &mempool) : nullptr)}); state->nBlocksInFlight++; state->nBlocksInFlightValidHeaders += it->fValidatedHeaders; if (state->nBlocksInFlight == 1) { // We're starting a block download (batch) from this peer. state->nDownloadingSince = GetTimeMicros(); } if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) { nPeersWithValidatedDownloads++; } itInFlight = mapBlocksInFlight .insert(std::make_pair(hash, std::make_pair(nodeid, it))) .first; if (pit) { *pit = &itInFlight->second.second; } return true; } /** Check whether the last unknown block a peer advertised is not yet known. */ static void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { CNodeState *state = State(nodeid); assert(state != nullptr); if (!state->hashLastUnknownBlock.IsNull()) { const CBlockIndex *pindex = LookupBlockIndex(state->hashLastUnknownBlock); if (pindex && pindex->nChainWork > 0) { if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) { state->pindexBestKnownBlock = pindex; } state->hashLastUnknownBlock.SetNull(); } } } /** Update tracking information about which blocks a peer is assumed to have. */ static void UpdateBlockAvailability(NodeId nodeid, const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { CNodeState *state = State(nodeid); assert(state != nullptr); ProcessBlockAvailability(nodeid); const CBlockIndex *pindex = LookupBlockIndex(hash); if (pindex && pindex->nChainWork > 0) { // An actually better block was announced. if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) { state->pindexBestKnownBlock = pindex; } } else { // An unknown block was announced; just assume that the latest one is // the best one. state->hashLastUnknownBlock = hash; } } /** * When a peer sends us a valid block, instruct it to announce blocks to us * using CMPCTBLOCK if possible by adding its nodeid to the end of * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by * removing the first element if necessary. */ static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman &connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); CNodeState *nodestate = State(nodeid); if (!nodestate) { LogPrint(BCLog::NET, "node state unavailable: peer=%d\n", nodeid); return; } if (!nodestate->fProvidesHeaderAndIDs) { return; } for (std::list::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) { if (*it == nodeid) { lNodesAnnouncingHeaderAndIDs.erase(it); lNodesAnnouncingHeaderAndIDs.push_back(nodeid); return; } } connman.ForNode(nodeid, [&connman](CNode *pfrom) { AssertLockHeld(cs_main); uint64_t nCMPCTBLOCKVersion = 1; if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { // As per BIP152, we only get 3 of our peers to announce // blocks using compact encodings. connman.ForNode( lNodesAnnouncingHeaderAndIDs.front(), [&connman, nCMPCTBLOCKVersion](CNode *pnodeStop) { AssertLockHeld(cs_main); connman.PushMessage( pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion()) .Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion)); return true; }); lNodesAnnouncingHeaderAndIDs.pop_front(); } connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()) .Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion)); lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); return true; }); } static bool TipMayBeStale(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); if (g_last_tip_update == 0) { g_last_tip_update = GetTime(); } return g_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty(); } static bool CanDirectFetch(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return ::ChainActive().Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20; } static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) { return true; } if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) { return true; } return false; } /** * Update pindexLastCommonBlock and add not-in-flight missing successors to * vBlocks, until it has at most count entries. */ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector &vBlocks, NodeId &nodeStaller, const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { if (count == 0) { return; } vBlocks.reserve(vBlocks.size() + count); CNodeState *state = State(nodeid); assert(state != nullptr); // Make sure pindexBestKnownBlock is up to date, we'll need it. ProcessBlockAvailability(nodeid); if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < ::ChainActive().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) { // This peer has nothing interesting. return; } if (state->pindexLastCommonBlock == nullptr) { // Bootstrap quickly by guessing a parent of our best tip is the forking // point. Guessing wrong in either direction is not a problem. state->pindexLastCommonBlock = ::ChainActive()[std::min( state->pindexBestKnownBlock->nHeight, ::ChainActive().Height())]; } // If the peer reorganized, our previous pindexLastCommonBlock may not be an // ancestor of its current tip anymore. Go back enough to fix that. state->pindexLastCommonBlock = LastCommonAncestor( state->pindexLastCommonBlock, state->pindexBestKnownBlock); if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) { return; } std::vector vToFetch; const CBlockIndex *pindexWalk = state->pindexLastCommonBlock; // Never fetch further than the best block we know the peer has, or more // than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last linked block we have in // common with this peer. The +1 is so we can detect stalling, namely if we // would be able to download that next block if the window were 1 larger. int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW; int nMaxHeight = std::min(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1); NodeId waitingfor = -1; while (pindexWalk->nHeight < nMaxHeight) { // Read up to 128 (or more, if more blocks than that are needed) // successors of pindexWalk (towards pindexBestKnownBlock) into // vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as // expensive as iterating over ~100 CBlockIndex* entries anyway. int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max(count - vBlocks.size(), 128)); vToFetch.resize(nToFetch); pindexWalk = state->pindexBestKnownBlock->GetAncestor( pindexWalk->nHeight + nToFetch); vToFetch[nToFetch - 1] = pindexWalk; for (unsigned int i = nToFetch - 1; i > 0; i--) { vToFetch[i - 1] = vToFetch[i]->pprev; } // Iterate over those blocks in vToFetch (in forward direction), adding // the ones that are not yet downloaded and not in flight to vBlocks. In // the meantime, update pindexLastCommonBlock as long as all ancestors // are already downloaded, or if it's already part of our chain (and // therefore don't need it even if pruned). for (const CBlockIndex *pindex : vToFetch) { if (!pindex->IsValid(BlockValidity::TREE)) { // We consider the chain that this peer is on invalid. return; } if (pindex->nStatus.hasData() || ::ChainActive().Contains(pindex)) { if (pindex->HaveTxsDownloaded()) { state->pindexLastCommonBlock = pindex; } } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) { // The block is not already downloaded, and not yet in flight. if (pindex->nHeight > nWindowEnd) { // We reached the end of the window. if (vBlocks.size() == 0 && waitingfor != nodeid) { // We aren't able to fetch anything, but we would be if // the download window was one larger. nodeStaller = waitingfor; } return; } vBlocks.push_back(pindex); if (vBlocks.size() == count) { return; } } else if (waitingfor == -1) { // This is the first already-in-flight block. waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first; } } } } void EraseTxRequest(const TxId &txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { g_already_asked_for.erase(txid); } std::chrono::microseconds GetTxRequestTime(const TxId &txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { auto it = g_already_asked_for.find(txid); if (it != g_already_asked_for.end()) { return it->second; } return {}; } void UpdateTxRequestTime(const TxId &txid, std::chrono::microseconds request_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { auto it = g_already_asked_for.find(txid); if (it == g_already_asked_for.end()) { g_already_asked_for.insert(std::make_pair(txid, request_time)); } else { g_already_asked_for.update(it, request_time); } } std::chrono::microseconds CalculateTxGetDataTime(const TxId &txid, std::chrono::microseconds current_time, bool use_inbound_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { std::chrono::microseconds process_time; const auto last_request_time = GetTxRequestTime(txid); // First time requesting this tx if (last_request_time.count() == 0) { process_time = current_time; } else { // Randomize the delay to avoid biasing some peers over others (such as // due to fixed ordering of peer processing in ThreadMessageHandler) process_time = last_request_time + GETDATA_TX_INTERVAL + GetRandMicros(MAX_GETDATA_RANDOM_DELAY); } // We delay processing announcements from inbound peers if (use_inbound_delay) { process_time += INBOUND_PEER_TX_DELAY; } return process_time; } void RequestTx(CNodeState *state, const TxId &txid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { CNodeState::TxDownloadState &peer_download_state = state->m_tx_download; if (peer_download_state.m_tx_announced.size() >= MAX_PEER_TX_ANNOUNCEMENTS || peer_download_state.m_tx_process_time.size() >= MAX_PEER_TX_ANNOUNCEMENTS || peer_download_state.m_tx_announced.count(txid)) { // Too many queued announcements from this peer, or we already have // this announcement return; } peer_download_state.m_tx_announced.insert(txid); // Calculate the time to try requesting this transaction. Use // fPreferredDownload as a proxy for outbound peers. const auto process_time = CalculateTxGetDataTime(txid, current_time, !state->fPreferredDownload); peer_download_state.m_tx_process_time.emplace(process_time, txid); } } // namespace // This function is used for testing the stale tip eviction logic, see // denialofservice_tests.cpp void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) { LOCK(cs_main); CNodeState *state = State(node); if (state) { state->m_last_block_announcement = time_in_seconds; } } void PeerLogicValidation::InitializeNode(const Config &config, CNode *pnode) { CAddress addr = pnode->addr; std::string addrName = pnode->GetAddrName(); NodeId nodeid = pnode->GetId(); { LOCK(cs_main); mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, pnode->IsInboundConn(), pnode->IsManualConn())); } { PeerRef peer = std::make_shared(nodeid); LOCK(g_peer_mutex); g_peer_map.emplace_hint(g_peer_map.end(), nodeid, std::move(peer)); } if (!pnode->IsInboundConn()) { PushNodeVersion(config, *pnode, m_connman, GetTime()); } } void PeerLogicValidation::FinalizeNode(const Config &config, NodeId nodeid, bool &fUpdateConnectionTime) { fUpdateConnectionTime = false; LOCK(cs_main); int misbehavior{0}; { PeerRef peer = GetPeerRef(nodeid); assert(peer != nullptr); misbehavior = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score); LOCK(g_peer_mutex); g_peer_map.erase(nodeid); } CNodeState *state = State(nodeid); assert(state != nullptr); if (state->fSyncStarted) { nSyncStarted--; } if (misbehavior == 0 && state->fCurrentlyConnected) { fUpdateConnectionTime = true; } for (const QueuedBlock &entry : state->vBlocksInFlight) { mapBlocksInFlight.erase(entry.hash); } EraseOrphansFor(nodeid); nPreferredDownload -= state->fPreferredDownload; nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0); assert(nPeersWithValidatedDownloads >= 0); g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; assert(g_outbound_peers_with_protect_from_disconnect >= 0); mapNodeState.erase(nodeid); if (mapNodeState.empty()) { // Do a consistency check after the last peer is removed. assert(mapBlocksInFlight.empty()); assert(nPreferredDownload == 0); assert(nPeersWithValidatedDownloads == 0); assert(g_outbound_peers_with_protect_from_disconnect == 0); } LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); } bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) { { LOCK(cs_main); CNodeState *state = State(nodeid); if (state == nullptr) { return false; } stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1; stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1; for (const QueuedBlock &queue : state->vBlocksInFlight) { if (queue.pindex) { stats.vHeightInFlight.push_back(queue.pindex->nHeight); } } } PeerRef peer = GetPeerRef(nodeid); if (peer == nullptr) { return false; } stats.m_misbehavior_score = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score); return true; } ////////////////////////////////////////////////////////////////////////////// // // mapOrphanTransactions // static void AddToCompactExtraTransactions(const CTransactionRef &tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) { size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN); if (max_extra_txn <= 0) { return; } if (!vExtraTxnForCompact.size()) { vExtraTxnForCompact.resize(max_extra_txn); } vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetHash(), tx); vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn; } bool AddOrphanTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) { const TxId &txid = tx->GetId(); if (mapOrphanTransactions.count(txid)) { return false; } // Ignore big transactions, to avoid a send-big-orphans memory exhaustion // attack. If a peer has a legitimate large transaction with a missing // parent then we assume it will rebroadcast it later, after the parent // transaction(s) have been mined or received. // 100 orphans, each of which is at most 100,000 bytes big is at most 10 // megabytes of orphans and somewhat more byprev index (in the worst case): unsigned int sz = tx->GetTotalSize(); if (sz > MAX_STANDARD_TX_SIZE) { LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, txid.ToString()); return false; } auto ret = mapOrphanTransactions.emplace( txid, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, g_orphan_list.size()}); assert(ret.second); g_orphan_list.push_back(ret.first); for (const CTxIn &txin : tx->vin) { mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first); } AddToCompactExtraTransactions(tx); LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", txid.ToString(), mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size()); return true; } static int EraseOrphanTx(const TxId id) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) { const auto it = mapOrphanTransactions.find(id); if (it == mapOrphanTransactions.end()) { return 0; } for (const CTxIn &txin : it->second.tx->vin) { const auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout); if (itPrev == mapOrphanTransactionsByPrev.end()) { continue; } itPrev->second.erase(it); if (itPrev->second.empty()) { mapOrphanTransactionsByPrev.erase(itPrev); } } size_t old_pos = it->second.list_pos; assert(g_orphan_list[old_pos] == it); if (old_pos + 1 != g_orphan_list.size()) { // Unless we're deleting the last entry in g_orphan_list, move the last // entry to the position we're deleting. auto it_last = g_orphan_list.back(); g_orphan_list[old_pos] = it_last; it_last->second.list_pos = old_pos; } g_orphan_list.pop_back(); mapOrphanTransactions.erase(it); return 1; } void EraseOrphansFor(NodeId peer) { LOCK(g_cs_orphans); int nErased = 0; auto iter = mapOrphanTransactions.begin(); while (iter != mapOrphanTransactions.end()) { // Increment to avoid iterator becoming invalid. const auto maybeErase = iter++; if (maybeErase->second.fromPeer == peer) { nErased += EraseOrphanTx(maybeErase->second.tx->GetId()); } } if (nErased > 0) { LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer); } } unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) { LOCK(g_cs_orphans); unsigned int nEvicted = 0; static int64_t nNextSweep; int64_t nNow = GetTime(); if (nNextSweep <= nNow) { // Sweep out expired orphan pool entries: int nErased = 0; int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL; auto iter = mapOrphanTransactions.begin(); while (iter != mapOrphanTransactions.end()) { const auto maybeErase = iter++; if (maybeErase->second.nTimeExpire <= nNow) { nErased += EraseOrphanTx(maybeErase->second.tx->GetId()); } else { nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime); } } // Sweep again 5 minutes after the next entry that expires in order to // batch the linear scan. nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL; if (nErased > 0) { LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased); } } FastRandomContext rng; while (mapOrphanTransactions.size() > nMaxOrphans) { // Evict a random orphan: size_t randompos = rng.randrange(g_orphan_list.size()); EraseOrphanTx(g_orphan_list[randompos]->first); ++nEvicted; } return nEvicted; } /** * Increment peer's misbehavior score. If the new value >= * DISCOURAGEMENT_THRESHOLD, mark the node to be discouraged, meaning the peer * might be disconnected and added to the discouragement filter. */ void Misbehaving(const NodeId pnode, const int howmuch, const std::string &message) { assert(howmuch > 0); PeerRef peer = GetPeerRef(pnode); if (peer == nullptr) { return; } LOCK(peer->m_misbehavior_mutex); peer->m_misbehavior_score += howmuch; const std::string message_prefixed = message.empty() ? "" : (": " + message); if (peer->m_misbehavior_score >= DISCOURAGEMENT_THRESHOLD && peer->m_misbehavior_score - howmuch < DISCOURAGEMENT_THRESHOLD) { LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED%s\n", pnode, peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score, message_prefixed); peer->m_should_discourage = true; } else { LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s\n", pnode, peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score, message_prefixed); } } // overloaded variant of above to operate on CNode*s static void Misbehaving(const CNode &node, int howmuch, const std::string &message) { Misbehaving(node.GetId(), howmuch, message); } /** * Returns true if the given validation state result may result in a peer * banning/disconnecting us. We use this to determine which unaccepted * transactions from a whitelisted peer that we can safely relay. */ static bool TxRelayMayResultInDisconnect(const TxValidationState &state) { return state.GetResult() == TxValidationResult::TX_CONSENSUS; } /** * Potentially ban a node based on the contents of a BlockValidationState object * * @param[in] via_compact_block this bool is passed in because net_processing * should punish peers differently depending on whether the data was provided in * a compact block message or not. If the compact block had a valid header, but * contained invalid txs, the peer should not be punished. See BIP 152. * * @return Returns true if the peer was punished (probably disconnected) */ static bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState &state, bool via_compact_block, const std::string &message = "") { switch (state.GetResult()) { case BlockValidationResult::BLOCK_RESULT_UNSET: break; // The node is providing invalid data: case BlockValidationResult::BLOCK_CONSENSUS: case BlockValidationResult::BLOCK_MUTATED: if (!via_compact_block) { Misbehaving(nodeid, 100, message); return true; } break; case BlockValidationResult::BLOCK_CACHED_INVALID: { LOCK(cs_main); CNodeState *node_state = State(nodeid); if (node_state == nullptr) { break; } // Ban outbound (but not inbound) peers if on an invalid chain. // Exempt HB compact block peers and manual connections. if (!via_compact_block && !node_state->m_is_inbound && !node_state->m_is_manual_connection) { Misbehaving(nodeid, 100, message); return true; } break; } case BlockValidationResult::BLOCK_INVALID_HEADER: case BlockValidationResult::BLOCK_CHECKPOINT: case BlockValidationResult::BLOCK_INVALID_PREV: Misbehaving(nodeid, 100, message); return true; case BlockValidationResult::BLOCK_FINALIZATION: // TODO: Use the state object to report this is probably not the // best idea. This is effectively unreachable, unless there is a bug // somewhere. Misbehaving(nodeid, 20, message); return true; // Conflicting (but not necessarily invalid) data or different policy: case BlockValidationResult::BLOCK_MISSING_PREV: // TODO: Handle this much more gracefully (10 DoS points is super // arbitrary) Misbehaving(nodeid, 10, message); return true; case BlockValidationResult::BLOCK_RECENT_CONSENSUS_CHANGE: case BlockValidationResult::BLOCK_TIME_FUTURE: break; } if (message != "") { LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message); } return false; } /** * Potentially ban a node based on the contents of a TxValidationState object * * @return Returns true if the peer was punished (probably disconnected) * * Changes here may need to be reflected in TxRelayMayResultInDisconnect(). */ static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState &state, const std::string &message = "") { switch (state.GetResult()) { case TxValidationResult::TX_RESULT_UNSET: break; // The node is providing invalid data: case TxValidationResult::TX_CONSENSUS: Misbehaving(nodeid, 100, message); return true; // Conflicting (but not necessarily invalid) data or different policy: case TxValidationResult::TX_RECENT_CONSENSUS_CHANGE: case TxValidationResult::TX_NOT_STANDARD: case TxValidationResult::TX_MISSING_INPUTS: case TxValidationResult::TX_PREMATURE_SPEND: case TxValidationResult::TX_CONFLICT: case TxValidationResult::TX_MEMPOOL_POLICY: break; } if (message != "") { LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message); } return false; } ////////////////////////////////////////////////////////////////////////////// // // blockchain -> download logic notification // // To prevent fingerprinting attacks, only send blocks/headers outside of the // active chain if they are no more than a month older (both in time, and in // best equivalent proof of work) than the best header chain we know about and // we fully-validated them at some point. static bool BlockRequestAllowed(const CBlockIndex *pindex, const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); if (::ChainActive().Contains(pindex)) { return true; } return pindex->IsValid(BlockValidity::SCRIPTS) && (pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) && (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, consensusParams) < STALE_RELAY_AGE_LIMIT); } PeerLogicValidation::PeerLogicValidation(CConnman &connman, BanMan *banman, CScheduler &scheduler, ChainstateManager &chainman, CTxMemPool &pool) : m_connman(connman), m_banman(banman), m_chainman(chainman), m_mempool(pool), m_stale_tip_check_time(0) { // Initialize global variables that cannot be constructed at startup. recentRejects.reset(new CRollingBloomFilter(120000, 0.000001)); // Blocks don't typically have more than 4000 transactions, so this should // be at least six blocks (~1 hr) worth of transactions that we can store. // If the number of transactions appearing in a block goes up, or if we are // seeing getdata requests more than an hour after initial announcement, we // can increase this number. // The false positive rate of 1/1M should come out to less than 1 // transaction per day that would be inadvertently ignored (which is the // same probability that we have in the reject filter). g_recent_confirmed_transactions.reset( new CRollingBloomFilter(24000, 0.000001)); const Consensus::Params &consensusParams = Params().GetConsensus(); // Stale tip checking and peer eviction are on two different timers, but we // don't want them to get out of sync due to drift in the scheduler, so we // combine them in one function and schedule at the quicker (peer-eviction) // timer. static_assert( EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer"); scheduler.scheduleEvery( [this, &consensusParams]() { this->CheckForStaleTipAndEvictPeers(consensusParams); return true; }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL}); } /** * Evict orphan txn pool entries (EraseOrphanTx) based on a newly connected * block. Also save the time of the last tip update. */ void PeerLogicValidation::BlockConnected( const std::shared_ptr &pblock, const CBlockIndex *pindex) { { LOCK(g_cs_orphans); std::vector vOrphanErase; for (const CTransactionRef &ptx : pblock->vtx) { const CTransaction &tx = *ptx; // Which orphan pool entries must we evict? for (const auto &txin : tx.vin) { auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout); if (itByPrev == mapOrphanTransactionsByPrev.end()) { continue; } for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) { const CTransaction &orphanTx = *(*mi)->second.tx; const TxId &orphanId = orphanTx.GetId(); vOrphanErase.push_back(orphanId); } } } // Erase orphan transactions included or precluded by this block if (vOrphanErase.size()) { int nErased = 0; for (const auto &orphanId : vOrphanErase) { nErased += EraseOrphanTx(orphanId); } LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased); } g_last_tip_update = GetTime(); } { LOCK(g_cs_recent_confirmed_transactions); for (const CTransactionRef &ptx : pblock->vtx) { g_recent_confirmed_transactions->insert(ptx->GetId()); } } } void PeerLogicValidation::BlockDisconnected( const std::shared_ptr &block, const CBlockIndex *pindex) { // To avoid relay problems with transactions that were previously // confirmed, clear our filter of recently confirmed transactions whenever // there's a reorg. // This means that in a 1-block reorg (where 1 block is disconnected and // then another block reconnected), our filter will drop to having only one // block's worth of transactions in it, but that should be fine, since // presumably the most common case of relaying a confirmed transaction // should be just after a new block containing it is found. LOCK(g_cs_recent_confirmed_transactions); g_recent_confirmed_transactions->reset(); } // All of the following cache a recent block, and are protected by // cs_most_recent_block static RecursiveMutex cs_most_recent_block; static std::shared_ptr most_recent_block GUARDED_BY(cs_most_recent_block); static std::shared_ptr most_recent_compact_block GUARDED_BY(cs_most_recent_block); static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block); /** * Maintain state about the best-seen block and fast-announce a compact block * to compatible peers. */ void PeerLogicValidation::NewPoWValidBlock( const CBlockIndex *pindex, const std::shared_ptr &pblock) { std::shared_ptr pcmpctblock = std::make_shared(*pblock); const CNetMsgMaker msgMaker(PROTOCOL_VERSION); LOCK(cs_main); static int nHighestFastAnnounce = 0; if (pindex->nHeight <= nHighestFastAnnounce) { return; } nHighestFastAnnounce = pindex->nHeight; uint256 hashBlock(pblock->GetHash()); { LOCK(cs_most_recent_block); most_recent_block_hash = hashBlock; most_recent_block = pblock; most_recent_compact_block = pcmpctblock; } m_connman.ForEachNode([this, &pcmpctblock, pindex, &msgMaker, &hashBlock](CNode *pnode) { AssertLockHeld(cs_main); // TODO: Avoid the repeated-serialization here if (pnode->nVersion < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect) { return; } ProcessBlockAvailability(pnode->GetId()); CNodeState &state = *State(pnode->GetId()); // If the peer has, or we announced to them the previous block already, // but we don't think they have this one, go ahead and announce it. if (state.fPreferHeaderAndIDs && !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) { LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerLogicValidation::NewPoWValidBlock", hashBlock.ToString(), pnode->GetId()); m_connman.PushMessage( pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock)); state.pindexBestHeaderSent = pindex; } }); } /** * Update our best height and announce any block hashes which weren't previously * in ::ChainActive() to our peers. */ void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) { const int nNewHeight = pindexNew->nHeight; m_connman.SetBestHeight(nNewHeight); SetServiceFlagsIBDCache(!fInitialDownload); if (!fInitialDownload) { // Find the hashes of all blocks that weren't previously in the best // chain. std::vector vHashes; const CBlockIndex *pindexToAnnounce = pindexNew; while (pindexToAnnounce != pindexFork) { vHashes.push_back(pindexToAnnounce->GetBlockHash()); pindexToAnnounce = pindexToAnnounce->pprev; if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) { // Limit announcements in case of a huge reorganization. Rely on // the peer's synchronization mechanism in that case. break; } } // Relay inventory, but don't relay old inventory during initial block // download. m_connman.ForEachNode([nNewHeight, &vHashes](CNode *pnode) { if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) { for (const BlockHash &hash : reverse_iterate(vHashes)) { pnode->PushBlockHash(hash); } } }); m_connman.WakeMessageHandler(); } } /** * Handle invalid block rejection and consequent peer banning, maintain which * peers announce compact blocks. */ void PeerLogicValidation::BlockChecked(const CBlock &block, const BlockValidationState &state) { LOCK(cs_main); const BlockHash hash = block.GetHash(); std::map>::iterator it = mapBlockSource.find(hash); // If the block failed validation, we know where it came from and we're // still connected to that peer, maybe punish. if (state.IsInvalid() && it != mapBlockSource.end() && State(it->second.first)) { MaybePunishNodeForBlock(/*nodeid=*/it->second.first, state, /*via_compact_block=*/!it->second.second); } // Check that: // 1. The block is valid // 2. We're not in initial block download // 3. This is currently the best block we're aware of. We haven't updated // the tip yet so we have no way to check this directly here. Instead we // just check that there are currently no other blocks in flight. else if (state.IsValid() && !::ChainstateActive().IsInitialBlockDownload() && mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) { if (it != mapBlockSource.end()) { MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, m_connman); } } if (it != mapBlockSource.end()) { mapBlockSource.erase(it); } } ////////////////////////////////////////////////////////////////////////////// // // Messages // static bool AlreadyHave(const CInv &inv, const CTxMemPool &mempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { switch (inv.type) { case MSG_TX: { assert(recentRejects); if (::ChainActive().Tip()->GetBlockHash() != hashRecentRejectsChainTip) { // If the chain tip has changed previously rejected transactions // might be now valid, e.g. due to a nLockTime'd tx becoming // valid, or a double-spend. Reset the rejects filter and give // those txs a second chance. hashRecentRejectsChainTip = ::ChainActive().Tip()->GetBlockHash(); recentRejects->reset(); } const TxId txid(inv.hash); { LOCK(g_cs_orphans); if (mapOrphanTransactions.count(txid)) { return true; } } { LOCK(g_cs_recent_confirmed_transactions); if (g_recent_confirmed_transactions->contains(txid)) { return true; } } return recentRejects->contains(txid) || mempool.exists(txid); } case MSG_BLOCK: return LookupBlockIndex(BlockHash(inv.hash)) != nullptr; } // Don't know what it is, just say we already got one return true; } void RelayTransaction(const TxId &txid, const CConnman &connman) { CInv inv(MSG_TX, txid); connman.ForEachNode([&inv](CNode *pnode) { pnode->PushInventory(inv); }); } static void RelayAddress(const CAddress &addr, bool fReachable, const CConnman &connman) { // Limited relaying of addresses outside our network(s) unsigned int nRelayNodes = fReachable ? 2 : 1; // Relay to a limited number of other nodes. // Use deterministic randomness to send to the same nodes for 24 hours at a // time so the m_addr_knowns of the chosen nodes prevent repeats uint64_t hashAddr = addr.GetHash(); const CSipHasher hasher = connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY) .Write(hashAddr << 32) .Write((GetTime() + hashAddr) / (24 * 60 * 60)); FastRandomContext insecure_rand; std::array, 2> best{ {{0, nullptr}, {0, nullptr}}}; assert(nRelayNodes <= best.size()); auto sortfunc = [&best, &hasher, nRelayNodes](CNode *pnode) { if (pnode->IsAddrRelayPeer()) { uint64_t hashKey = CSipHasher(hasher).Write(pnode->GetId()).Finalize(); for (unsigned int i = 0; i < nRelayNodes; i++) { if (hashKey > best[i].first) { std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1); best[i] = std::make_pair(hashKey, pnode); break; } } } }; auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] { for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) { best[i].second->PushAddress(addr, insecure_rand); } }; connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc)); } static void ProcessGetBlockData(const Config &config, CNode &pfrom, const CInv &inv, CConnman &connman, const std::atomic &interruptMsgProc) { const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); const BlockHash hash(inv.hash); bool send = false; std::shared_ptr a_recent_block; std::shared_ptr a_recent_compact_block; { LOCK(cs_most_recent_block); a_recent_block = most_recent_block; a_recent_compact_block = most_recent_compact_block; } bool need_activate_chain = false; { LOCK(cs_main); const CBlockIndex *pindex = LookupBlockIndex(hash); if (pindex) { if (pindex->HaveTxsDownloaded() && !pindex->IsValid(BlockValidity::SCRIPTS) && pindex->IsValid(BlockValidity::TREE)) { // If we have the block and all of its parents, but have not yet // validated it, we might be in the middle of connecting it (ie // in the unlock of cs_main before ActivateBestChain but after // AcceptBlock). In this case, we need to run ActivateBestChain // prior to checking the relay conditions below. need_activate_chain = true; } } } // release cs_main before calling ActivateBestChain if (need_activate_chain) { BlockValidationState state; if (!ActivateBestChain(config, state, a_recent_block)) { LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString()); } } LOCK(cs_main); const CBlockIndex *pindex = LookupBlockIndex(hash); if (pindex) { send = BlockRequestAllowed(pindex, consensusParams); if (!send) { LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old " "block that isn't in the main chain\n", __func__, pfrom.GetId()); } } const CNetMsgMaker msgMaker(pfrom.GetSendVersion()); // Disconnect node in case we have reached the outbound limit for serving // historical blocks. // Never disconnect whitelisted nodes. if (send && connman.OutboundTargetReached(true) && (((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom.HasPermission(PF_NOBAN)) { LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId()); // disconnect node pfrom.fDisconnect = true; send = false; } // Avoid leaking prune-height by never sending blocks below the // NODE_NETWORK_LIMITED threshold. // Add two blocks buffer extension for possible races if (send && !pfrom.HasPermission(PF_NOBAN) && ((((pfrom.GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom.GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (::ChainActive().Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2)))) { LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED " "threshold from peer=%d\n", pfrom.GetId()); // disconnect node and prevent it from stalling (would otherwise wait // for the missing block) pfrom.fDisconnect = true; send = false; } // Pruned nodes may have deleted the block, so check whether it's available // before trying to send. if (send && pindex->nStatus.hasData()) { std::shared_ptr pblock; if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) { pblock = a_recent_block; } else { // Send block from disk std::shared_ptr pblockRead = std::make_shared(); if (!ReadBlockFromDisk(*pblockRead, pindex, consensusParams)) { assert(!"cannot load block from disk"); } pblock = pblockRead; } if (inv.type == MSG_BLOCK) { connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock)); } else if (inv.type == MSG_FILTERED_BLOCK) { bool sendMerkleBlock = false; CMerkleBlock merkleBlock; if (pfrom.m_tx_relay != nullptr) { LOCK(pfrom.m_tx_relay->cs_filter); if (pfrom.m_tx_relay->pfilter) { sendMerkleBlock = true; merkleBlock = CMerkleBlock(*pblock, *pfrom.m_tx_relay->pfilter); } } if (sendMerkleBlock) { connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock)); // CMerkleBlock just contains hashes, so also push any // transactions in the block the client did not see. This avoids // hurting performance by pointlessly requiring a round-trip. // Note that there is currently no way for a node to request any // single transactions we didn't send here - they must either // disconnect and retry or request the full block. Thus, the // protocol spec specified allows for us to provide duplicate // txn here, however we MUST always provide at least what the // remote peer needs. typedef std::pair PairType; for (PairType &pair : merkleBlock.vMatchedTxn) { connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::TX, *pblock->vtx[pair.first])); } } // else // no response } else if (inv.type == MSG_CMPCT_BLOCK) { // If a peer is asking for old blocks, we're almost guaranteed they // won't have a useful mempool to match against a compact block, and // we don't feel like constructing the object for them, so instead // we respond with the full, non-compact block. int nSendFlags = 0; if (CanDirectFetch(consensusParams) && pindex->nHeight >= ::ChainActive().Height() - MAX_CMPCTBLOCK_DEPTH) { CBlockHeaderAndShortTxIDs cmpctblock(*pblock); connman.PushMessage( &pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); } else { connman.PushMessage( &pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock)); } } // Trigger the peer node to send a getblocks request for the next batch // of inventory. if (hash == pfrom.hashContinue) { // Bypass PushInventory, this must send even if redundant, and we // want it right after the last block so they don't wait for other // stuff first. std::vector vInv; vInv.push_back( CInv(MSG_BLOCK, ::ChainActive().Tip()->GetBlockHash())); connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv)); pfrom.hashContinue = BlockHash(); } } } static void ProcessGetData(const Config &config, CNode &pfrom, CConnman &connman, const CTxMemPool &mempool, const std::atomic &interruptMsgProc) LOCKS_EXCLUDED(cs_main) { AssertLockNotHeld(cs_main); std::deque::iterator it = pfrom.vRecvGetData.begin(); std::vector vNotFound; const CNetMsgMaker msgMaker(pfrom.GetSendVersion()); // Note that if we receive a getdata for a MSG_TX from a block-relay-only // outbound peer, we will stop processing further getdata messages from this // peer (likely resulting in our peer eventually disconnecting us). if (pfrom.m_tx_relay != nullptr) { // mempool entries added before this time have likely expired from // mapRelay const std::chrono::seconds longlived_mempool_time = GetTime() - RELAY_TX_CACHE_TIME; const std::chrono::seconds mempool_req = pfrom.m_tx_relay->m_last_mempool_req.load(); LOCK(cs_main); while (it != pfrom.vRecvGetData.end() && it->type == MSG_TX) { if (interruptMsgProc) { return; } // Don't bother if send buffer is too full to respond anyway. if (pfrom.fPauseSend) { break; } const CInv &inv = *it; it++; // Send stream from relay memory bool push = false; auto mi = mapRelay.find(inv.hash); int nSendFlags = 0; if (mi != mapRelay.end()) { connman.PushMessage( &pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second)); push = true; } else { auto txinfo = mempool.info(TxId(inv.hash)); // To protect privacy, do not answer getdata using the mempool // when that TX couldn't have been INVed in reply to a MEMPOOL // request, or when it's too recent to have expired from // mapRelay. if (txinfo.tx && ((mempool_req.count() && txinfo.m_time <= mempool_req) || (txinfo.m_time <= longlived_mempool_time))) { connman.PushMessage( &pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx)); push = true; } } if (!push) { vNotFound.push_back(inv); } } } // release cs_main if (it != pfrom.vRecvGetData.end() && !pfrom.fPauseSend) { const CInv &inv = *it; if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK) { it++; ProcessGetBlockData(config, pfrom, inv, connman, interruptMsgProc); } } // Unknown types in the GetData stay in vRecvGetData and block any future // message from this peer, see vRecvGetData check in ProcessMessages(). // Depending on future p2p changes, we might either drop unknown getdata on // the floor or disconnect the peer. pfrom.vRecvGetData.erase(pfrom.vRecvGetData.begin(), it); if (!vNotFound.empty()) { // Let the peer know that we didn't find what it asked for, so it // doesn't have to wait around forever. SPV clients care about this // message: it's needed when they are recursively walking the // dependencies of relevant unconfirmed transactions. SPV clients want // to do that because they want to know about (and store and rebroadcast // and risk analyze) the dependencies of transactions relevant to them, // without having to download the entire memory pool. Also, other nodes // can use these messages to automatically request a transaction from // some other peer that annnounced it, and stop waiting for us to // respond. In normal operation, we often send NOTFOUND messages for // parents of transactions that we relay; if a peer is missing a parent, // they may assume we have them and request the parents from us. connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound)); } } inline static void SendBlockTransactions(const CBlock &block, const BlockTransactionsRequest &req, CNode &pfrom, CConnman &connman) { BlockTransactions resp(req); for (size_t i = 0; i < req.indices.size(); i++) { if (req.indices[i] >= block.vtx.size()) { Misbehaving(pfrom, 100, "getblocktxn with out-of-bounds tx indices"); return; } resp.txn[i] = block.vtx[req.indices[i]]; } LOCK(cs_main); const CNetMsgMaker msgMaker(pfrom.GetSendVersion()); int nSendFlags = 0; connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp)); } static void ProcessHeadersMessage(const Config &config, CNode &pfrom, CConnman &connman, CTxMemPool &mempool, ChainstateManager &chainman, const std::vector &headers, bool via_compact_block) { const CChainParams &chainparams = config.GetChainParams(); const CNetMsgMaker msgMaker(pfrom.GetSendVersion()); size_t nCount = headers.size(); if (nCount == 0) { // Nothing interesting. Stop asking this peers for more headers. return; } bool received_new_header = false; const CBlockIndex *pindexLast = nullptr; { LOCK(cs_main); CNodeState *nodestate = State(pfrom.GetId()); // If this looks like it could be a block announcement (nCount < // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that // don't connect: // - Send a getheaders message in response to try to connect the chain. // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that // don't connect before giving DoS points // - Once a headers message is received that is valid and does connect, // nUnconnectingHeaders gets reset back to 0. if (!LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) { nodestate->nUnconnectingHeaders++; connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256())); LogPrint( BCLog::NET, "received header %s: missing prev block %s, sending getheaders " "(%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", headers[0].GetHash().ToString(), headers[0].hashPrevBlock.ToString(), pindexBestHeader->nHeight, pfrom.GetId(), nodestate->nUnconnectingHeaders); // Set hashLastUnknownBlock for this peer, so that if we eventually // get the headers - even from a different peer - we can use this // peer to download. UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()); if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) { // The peer is sending us many headers we can't connect. Misbehaving(pfrom, 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders)); } return; } BlockHash hashLastBlock; for (const CBlockHeader &header : headers) { if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) { Misbehaving(pfrom, 20, "non-continuous headers sequence"); return; } hashLastBlock = header.GetHash(); } // If we don't have the last header, then they'll have given us // something new (if these headers are valid). if (!LookupBlockIndex(hashLastBlock)) { received_new_header = true; } } BlockValidationState state; if (!chainman.ProcessNewBlockHeaders(config, headers, state, &pindexLast)) { if (state.IsInvalid()) { MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received"); return; } } { LOCK(cs_main); CNodeState *nodestate = State(pfrom.GetId()); if (nodestate->nUnconnectingHeaders > 0) { LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom.GetId(), nodestate->nUnconnectingHeaders); } nodestate->nUnconnectingHeaders = 0; assert(pindexLast); UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash()); // From here, pindexBestKnownBlock should be guaranteed to be non-null, // because it is set in UpdateBlockAvailability. Some nullptr checks are // still present, however, as belt-and-suspenders. if (received_new_header && pindexLast->nChainWork > ::ChainActive().Tip()->nChainWork) { nodestate->m_last_block_announcement = GetTime(); } if (nCount == MAX_HEADERS_RESULTS) { // Headers message had its maximum size; the peer may have more // headers. // TODO: optimize: if pindexLast is an ancestor of // ::ChainActive().Tip or pindexBestHeader, continue from there // instead. LogPrint( BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom.GetId(), pfrom.nStartingHeight); connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexLast), uint256())); } bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus()); // If this set of headers is valid and ends in a block with at least as // much work as our tip, download as much as possible. if (fCanDirectFetch && pindexLast->IsValid(BlockValidity::TREE) && ::ChainActive().Tip()->nChainWork <= pindexLast->nChainWork) { std::vector vToFetch; const CBlockIndex *pindexWalk = pindexLast; // Calculate all the blocks we'd need to switch to pindexLast, up to // a limit. while (pindexWalk && !::ChainActive().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { if (!pindexWalk->nStatus.hasData() && !mapBlocksInFlight.count(pindexWalk->GetBlockHash())) { // We don't have this block, and it's not yet in flight. vToFetch.push_back(pindexWalk); } pindexWalk = pindexWalk->pprev; } // If pindexWalk still isn't on our main chain, we're looking at a // very large reorg at a time we think we're close to caught up to // the main chain -- this shouldn't really happen. Bail out on the // direct fetch and rely on parallel download instead. if (!::ChainActive().Contains(pindexWalk)) { LogPrint( BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); } else { std::vector vGetData; // Download as much as possible, from earliest to latest. for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) { if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { // Can't download any more from this peer break; } vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash())); MarkBlockAsInFlight(config, mempool, pfrom.GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex); LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", pindex->GetBlockHash().ToString(), pfrom.GetId()); } if (vGetData.size() > 1) { LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers " "direct fetch\n", pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); } if (vGetData.size() > 0) { if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BlockValidity::CHAIN)) { // In any case, we want to download using a compact // block, not a regular one. vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); } connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData)); } } } // If we're in IBD, we want outbound peers that will serve us a useful // chain. Disconnect peers that are on chains with insufficient work. if (::ChainstateActive().IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) { // When nCount < MAX_HEADERS_RESULTS, we know we have no more // headers to fetch from this peer. if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) { // This peer has too little work on their headers chain to help // us sync -- disconnect if using an outbound slot (unless // whitelisted or addnode). // Note: We compare their tip to nMinimumChainWork (rather than // ::ChainActive().Tip()) because we won't start block download // until we have a headers chain that has at least // nMinimumChainWork, even if a peer has a chain past our tip, // as an anti-DoS measure. if (pfrom.IsOutboundOrBlockRelayConn()) { LogPrintf("Disconnecting outbound peer %d -- headers " "chain has insufficient work\n", pfrom.GetId()); pfrom.fDisconnect = true; } } } if (!pfrom.fDisconnect && pfrom.IsOutboundOrBlockRelayConn() && nodestate->pindexBestKnownBlock != nullptr && pfrom.m_tx_relay != nullptr) { // If this is an outbound full-relay peer, check to see if we should // protect it from the bad/lagging chain logic. Note that // block-relay-only peers are already implicitly protected, so we // only consider setting m_protect for the full-relay peers. if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId()); nodestate->m_chain_sync.m_protect = true; ++g_outbound_peers_with_protect_from_disconnect; } } } } void static ProcessOrphanTx(const Config &config, CConnman &connman, CTxMemPool &mempool, std::set &orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans) { AssertLockHeld(cs_main); AssertLockHeld(g_cs_orphans); std::unordered_map rejectCountPerNode; bool done = false; while (!done && !orphan_work_set.empty()) { const TxId orphanTxId = *orphan_work_set.begin(); orphan_work_set.erase(orphan_work_set.begin()); auto orphan_it = mapOrphanTransactions.find(orphanTxId); if (orphan_it == mapOrphanTransactions.end()) { continue; } const CTransactionRef porphanTx = orphan_it->second.tx; const CTransaction &orphanTx = *porphanTx; NodeId fromPeer = orphan_it->second.fromPeer; // Use a new TxValidationState because orphans come from different peers // (and we call MaybePunishNodeForTx based on the source peer from the // orphan map, not based on the peer that relayed the previous // transaction). TxValidationState orphan_state; auto it = rejectCountPerNode.find(fromPeer); if (it != rejectCountPerNode.end() && it->second > MAX_NON_STANDARD_ORPHAN_PER_NODE) { continue; } if (AcceptToMemoryPool(config, mempool, orphan_state, porphanTx, false /* bypass_limits */, Amount::zero() /* nAbsurdFee */)) { LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanTxId.ToString()); RelayTransaction(orphanTxId, connman); for (size_t i = 0; i < orphanTx.vout.size(); i++) { auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(orphanTxId, i)); if (it_by_prev != mapOrphanTransactionsByPrev.end()) { for (const auto &elem : it_by_prev->second) { orphan_work_set.insert(elem->first); } } } EraseOrphanTx(orphanTxId); done = true; } else if (orphan_state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) { if (orphan_state.IsInvalid()) { // Punish peer that gave us an invalid orphan tx MaybePunishNodeForTx(fromPeer, orphan_state); LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s\n", orphanTxId.ToString()); } // Has inputs but not accepted to mempool // Probably non-standard or insufficient fee LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanTxId.ToString()); assert(recentRejects); recentRejects->insert(orphanTxId); EraseOrphanTx(orphanTxId); done = true; } mempool.check(&::ChainstateActive().CoinsTip()); } } /** * Validation logic for compact filters request handling. * * May disconnect from the peer in the case of a bad request. * * @param[in] peer The peer that we received the request from * @param[in] chain_params Chain parameters * @param[in] filter_type The filter type the request is for. Must be * basic filters. * @param[in] start_height The start height for the request * @param[in] stop_hash The stop_hash for the request * @param[in] max_height_diff The maximum number of items permitted to * request, as specified in BIP 157 * @param[out] stop_index The CBlockIndex for the stop_hash block, if the * request can be serviced. * @param[out] filter_index The filter index, if the request can be * serviced. * @return True if the request can be serviced. */ static bool PrepareBlockFilterRequest( CNode &peer, const CChainParams &chain_params, BlockFilterType filter_type, uint32_t start_height, const BlockHash &stop_hash, uint32_t max_height_diff, const CBlockIndex *&stop_index, BlockFilterIndex *&filter_index) { const bool supported_filter_type = (filter_type == BlockFilterType::BASIC && (peer.GetLocalServices() & NODE_COMPACT_FILTERS)); if (!supported_filter_type) { LogPrint(BCLog::NET, "peer %d requested unsupported block filter type: %d\n", peer.GetId(), static_cast(filter_type)); peer.fDisconnect = true; return false; } { LOCK(cs_main); stop_index = LookupBlockIndex(stop_hash); // Check that the stop block exists and the peer would be allowed to // fetch it. if (!stop_index || !BlockRequestAllowed(stop_index, chain_params.GetConsensus())) { LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n", peer.GetId(), stop_hash.ToString()); peer.fDisconnect = true; return false; } } uint32_t stop_height = stop_index->nHeight; if (start_height > stop_height) { LogPrint( BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with " /* Continued */ "start height %d and stop height %d\n", peer.GetId(), start_height, stop_height); peer.fDisconnect = true; return false; } if (stop_height - start_height >= max_height_diff) { LogPrint(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n", peer.GetId(), stop_height - start_height + 1, max_height_diff); peer.fDisconnect = true; return false; } filter_index = GetBlockFilterIndex(filter_type); if (!filter_index) { LogPrint(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type)); return false; } return true; } /** * Handle a cfilters request. * * May disconnect from the peer in the case of a bad request. * * @param[in] peer The peer that we received the request from * @param[in] vRecv The raw message received * @param[in] chain_params Chain parameters * @param[in] connman Pointer to the connection manager */ static void ProcessGetCFilters(CNode &peer, CDataStream &vRecv, const CChainParams &chain_params, CConnman &connman) { uint8_t filter_type_ser; uint32_t start_height; BlockHash stop_hash; vRecv >> filter_type_ser >> start_height >> stop_hash; const BlockFilterType filter_type = static_cast(filter_type_ser); const CBlockIndex *stop_index; BlockFilterIndex *filter_index; if (!PrepareBlockFilterRequest( peer, chain_params, filter_type, start_height, stop_hash, MAX_GETCFILTERS_SIZE, stop_index, filter_index)) { return; } std::vector filters; if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) { LogPrint(BCLog::NET, "Failed to find block filter in index: filter_type=%s, " "start_height=%d, stop_hash=%s\n", BlockFilterTypeName(filter_type), start_height, stop_hash.ToString()); return; } for (const auto &filter : filters) { CSerializedNetMsg msg = CNetMsgMaker(peer.GetSendVersion()) .Make(NetMsgType::CFILTER, filter); connman.PushMessage(&peer, std::move(msg)); } } /** * Handle a cfheaders request. * * May disconnect from the peer in the case of a bad request. * * @param[in] peer The peer that we received the request from * @param[in] vRecv The raw message received * @param[in] chain_params Chain parameters * @param[in] connman Pointer to the connection manager */ static void ProcessGetCFHeaders(CNode &peer, CDataStream &vRecv, const CChainParams &chain_params, CConnman &connman) { uint8_t filter_type_ser; uint32_t start_height; BlockHash stop_hash; vRecv >> filter_type_ser >> start_height >> stop_hash; const BlockFilterType filter_type = static_cast(filter_type_ser); const CBlockIndex *stop_index; BlockFilterIndex *filter_index; if (!PrepareBlockFilterRequest( peer, chain_params, filter_type, start_height, stop_hash, MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) { return; } uint256 prev_header; if (start_height > 0) { const CBlockIndex *const prev_block = stop_index->GetAncestor(static_cast(start_height - 1)); if (!filter_index->LookupFilterHeader(prev_block, prev_header)) { LogPrint(BCLog::NET, "Failed to find block filter header in index: " "filter_type=%s, block_hash=%s\n", BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString()); return; } } std::vector filter_hashes; if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) { LogPrint(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, " "start_height=%d, stop_hash=%s\n", BlockFilterTypeName(filter_type), start_height, stop_hash.ToString()); return; } CSerializedNetMsg msg = CNetMsgMaker(peer.GetSendVersion()) .Make(NetMsgType::CFHEADERS, filter_type_ser, stop_index->GetBlockHash(), prev_header, filter_hashes); connman.PushMessage(&peer, std::move(msg)); } /** * Handle a getcfcheckpt request. * * May disconnect from the peer in the case of a bad request. * * @param[in] peer The peer that we received the request from * @param[in] vRecv The raw message received * @param[in] chain_params Chain parameters * @param[in] connman Pointer to the connection manager */ static void ProcessGetCFCheckPt(CNode &peer, CDataStream &vRecv, const CChainParams &chain_params, CConnman &connman) { uint8_t filter_type_ser; BlockHash stop_hash; vRecv >> filter_type_ser >> stop_hash; const BlockFilterType filter_type = static_cast(filter_type_ser); const CBlockIndex *stop_index; BlockFilterIndex *filter_index; if (!PrepareBlockFilterRequest( peer, chain_params, filter_type, /*start_height=*/0, stop_hash, /*max_height_diff=*/std::numeric_limits::max(), stop_index, filter_index)) { return; } std::vector headers(stop_index->nHeight / CFCHECKPT_INTERVAL); // Populate headers. const CBlockIndex *block_index = stop_index; for (int i = headers.size() - 1; i >= 0; i--) { int height = (i + 1) * CFCHECKPT_INTERVAL; block_index = block_index->GetAncestor(height); if (!filter_index->LookupFilterHeader(block_index, headers[i])) { LogPrint(BCLog::NET, "Failed to find block filter header in index: " "filter_type=%s, block_hash=%s\n", BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString()); return; } } CSerializedNetMsg msg = CNetMsgMaker(peer.GetSendVersion()) .Make(NetMsgType::CFCHECKPT, filter_type_ser, stop_index->GetBlockHash(), headers); connman.PushMessage(&peer, std::move(msg)); } void PeerLogicValidation::ProcessMessage( const Config &config, CNode &pfrom, const std::string &msg_type, CDataStream &vRecv, int64_t nTimeReceived, const std::atomic &interruptMsgProc) { const CChainParams &chainparams = config.GetChainParams(); LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId()); if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0) { LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n"); return; } if (!(pfrom.GetLocalServices() & NODE_BLOOM) && (msg_type == NetMsgType::FILTERLOAD || msg_type == NetMsgType::FILTERADD)) { if (pfrom.nVersion >= NO_BLOOM_VERSION) { Misbehaving(pfrom, 100, "no-bloom-version"); } else { pfrom.fDisconnect = true; } return; } if (msg_type == NetMsgType::VERSION) { // Each connection can only send one version message if (pfrom.nVersion != 0) { Misbehaving(pfrom, 1, "redundant version message"); return; } int64_t nTime; CAddress addrMe; CAddress addrFrom; uint64_t nNonce = 1; uint64_t nServiceInt; ServiceFlags nServices; int nVersion; int nSendVersion; std::string cleanSubVer; int nStartingHeight = -1; bool fRelay = true; uint64_t nExtraEntropy = 1; vRecv >> nVersion >> nServiceInt >> nTime >> addrMe; nSendVersion = std::min(nVersion, PROTOCOL_VERSION); nServices = ServiceFlags(nServiceInt); if (!pfrom.IsInboundConn()) { m_connman.SetServices(pfrom.addr, nServices); } if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices)) { LogPrint(BCLog::NET, "peer=%d does not offer the expected services " "(%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices)); pfrom.fDisconnect = true; return; } if (nVersion < MIN_PEER_PROTO_VERSION) { // disconnect from peers older than this proto version LogPrint(BCLog::NET, "peer=%d using obsolete version %i; disconnecting\n", pfrom.GetId(), nVersion); pfrom.fDisconnect = true; return; } if (!vRecv.empty()) { vRecv >> addrFrom >> nNonce; } if (!vRecv.empty()) { std::string strSubVer; vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH); cleanSubVer = SanitizeString(strSubVer); } if (!vRecv.empty()) { vRecv >> nStartingHeight; } if (!vRecv.empty()) { vRecv >> fRelay; } if (!vRecv.empty()) { vRecv >> nExtraEntropy; } // Disconnect if we connected to ourself if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce)) { LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToString()); pfrom.fDisconnect = true; return; } if (pfrom.IsInboundConn() && addrMe.IsRoutable()) { SeenLocal(addrMe); } // Be shy and don't send version until we hear if (pfrom.IsInboundConn()) { PushNodeVersion(config, pfrom, m_connman, GetAdjustedTime()); } m_connman.PushMessage( &pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK)); pfrom.nServices = nServices; pfrom.SetAddrLocal(addrMe); { LOCK(pfrom.cs_SubVer); pfrom.cleanSubVer = cleanSubVer; } pfrom.nStartingHeight = nStartingHeight; // set nodes not relaying blocks and tx and not serving (parts) of the // historical blockchain as "clients" pfrom.fClient = (!(nServices & NODE_NETWORK) && !(nServices & NODE_NETWORK_LIMITED)); // set nodes not capable of serving the complete blockchain history as // "limited nodes" pfrom.m_limited_node = (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED)); if (pfrom.m_tx_relay != nullptr) { LOCK(pfrom.m_tx_relay->cs_filter); // set to true after we get the first filter* message pfrom.m_tx_relay->fRelayTxes = fRelay; } // Change version pfrom.SetSendVersion(nSendVersion); pfrom.nVersion = nVersion; pfrom.nRemoteHostNonce = nNonce; pfrom.nRemoteExtraEntropy = nExtraEntropy; // Potentially mark this peer as a preferred download peer. { LOCK(cs_main); UpdatePreferredDownload(pfrom, State(pfrom.GetId())); } if (!pfrom.IsInboundConn() && pfrom.IsAddrRelayPeer()) { // Advertise our address if (fListen && !::ChainstateActive().IsInitialBlockDownload()) { CAddress addr = GetLocalAddress(&pfrom.addr, pfrom.GetLocalServices()); FastRandomContext insecure_rand; if (addr.IsRoutable()) { LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString()); pfrom.PushAddress(addr, insecure_rand); } else if (IsPeerAddrLocalGood(&pfrom)) { addr.SetIP(addrMe); LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString()); pfrom.PushAddress(addr, insecure_rand); } } // Get recent addresses m_connman.PushMessage( &pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR)); pfrom.fGetAddr = true; m_connman.MarkAddressGood(pfrom.addr); } std::string remoteAddr; if (fLogIPs) { remoteAddr = ", peeraddr=" + pfrom.addr.ToString(); } LogPrint(BCLog::NET, "receive version message: [%s] %s: version %d, blocks=%d, " "us=%s, peer=%d%s\n", pfrom.addr.ToString(), cleanSubVer, pfrom.nVersion, pfrom.nStartingHeight, addrMe.ToString(), pfrom.GetId(), remoteAddr); // Ignore time offsets that are improbable (before the Genesis block) // and may underflow the nTimeOffset calculation. int64_t currentTime = GetTime(); if (nTime >= int64_t(chainparams.GenesisBlock().nTime)) { int64_t nTimeOffset = nTime - currentTime; pfrom.nTimeOffset = nTimeOffset; AddTimeData(pfrom.addr, nTimeOffset); } else { Misbehaving(pfrom, 20, "Ignoring invalid timestamp in version message"); } // Feeler connections exist only to verify if address is online. if (pfrom.IsFeelerConn()) { pfrom.fDisconnect = true; } return; } if (pfrom.nVersion == 0) { // Must have a version message before anything else Misbehaving(pfrom, 10, "non-version message before version handshake"); return; } // At this point, the outgoing message serialization version can't change. const CNetMsgMaker msgMaker(pfrom.GetSendVersion()); if (msg_type == NetMsgType::VERACK) { pfrom.SetRecvVersion(std::min(pfrom.nVersion.load(), PROTOCOL_VERSION)); if (!pfrom.IsInboundConn()) { // Mark this node as currently connected, so we update its timestamp // later. LOCK(cs_main); State(pfrom.GetId())->fCurrentlyConnected = true; LogPrintf( "New outbound peer connected: version: %d, blocks=%d, " "peer=%d%s (%s)\n", pfrom.nVersion.load(), pfrom.nStartingHeight, pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString()) : ""), pfrom.m_tx_relay == nullptr ? "block-relay" : "full-relay"); } if (pfrom.nVersion >= SENDHEADERS_VERSION) { // Tell our peer we prefer to receive headers rather than inv's // We send this to non-NODE NETWORK peers as well, because even // non-NODE NETWORK peers can announce blocks (such as pruning // nodes) m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDHEADERS)); } + if (pfrom.nVersion >= SHORT_IDS_BLOCKS_VERSION) { // Tell our peer we are willing to provide version 1 or 2 // cmpctblocks. However, we do not request new block announcements // using cmpctblock messages. We send this to non-NODE NETWORK peers // as well, because they may wish to request compact blocks from us. bool fAnnounceUsingCMPCTBLOCK = false; uint64_t nCMPCTBLOCKVersion = 1; m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); } + + if ((pfrom.nServices & NODE_AVALANCHE) && g_avalanche && + gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) { + if (g_avalanche->sendHello(&pfrom)) { + LogPrint(BCLog::NET, "Send avahello to peer %d\n", + pfrom.GetId()); + } + } + pfrom.fSuccessfullyConnected = true; return; } if (!pfrom.fSuccessfullyConnected) { // Must have a verack message before anything else Misbehaving(pfrom, 10, "non-verack message before version handshake"); return; } if (msg_type == NetMsgType::ADDR) { std::vector vAddr; vRecv >> vAddr; if (!pfrom.IsAddrRelayPeer()) { return; } if (vAddr.size() > 1000) { Misbehaving(pfrom, 20, strprintf("oversized-addr: message addr size() = %u", vAddr.size())); return; } // Store the new addresses std::vector vAddrOk; int64_t nNow = GetAdjustedTime(); int64_t nSince = nNow - 10 * 60; for (CAddress &addr : vAddr) { if (interruptMsgProc) { return; } // We only bother storing full nodes, though this may include things // which we would not make an outbound connection to, in part // because we may make feeler connections to them. if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices)) { continue; } if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) { addr.nTime = nNow - 5 * 24 * 60 * 60; } pfrom.AddAddressKnown(addr); if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) { // Do not process banned/discouraged addresses beyond // remembering we received them continue; } bool fReachable = IsReachable(addr); if (addr.nTime > nSince && !pfrom.fGetAddr && vAddr.size() <= 10 && addr.IsRoutable()) { // Relay to a limited number of other nodes RelayAddress(addr, fReachable, m_connman); } // Do not store addresses outside our network if (fReachable) { vAddrOk.push_back(addr); } } m_connman.AddNewAddresses(vAddrOk, pfrom.addr, 2 * 60 * 60); if (vAddr.size() < 1000) { pfrom.fGetAddr = false; } if (pfrom.IsAddrFetchConn()) { pfrom.fDisconnect = true; } return; } if (msg_type == NetMsgType::SENDHEADERS) { LOCK(cs_main); State(pfrom.GetId())->fPreferHeaders = true; return; } if (msg_type == NetMsgType::SENDCMPCT) { bool fAnnounceUsingCMPCTBLOCK = false; uint64_t nCMPCTBLOCKVersion = 0; vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion; if (nCMPCTBLOCKVersion == 1) { LOCK(cs_main); // fProvidesHeaderAndIDs is used to "lock in" version of compact // blocks we send. if (!State(pfrom.GetId())->fProvidesHeaderAndIDs) { State(pfrom.GetId())->fProvidesHeaderAndIDs = true; } State(pfrom.GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK; if (!State(pfrom.GetId())->fSupportsDesiredCmpctVersion) { State(pfrom.GetId())->fSupportsDesiredCmpctVersion = true; } } return; } if (msg_type == NetMsgType::INV) { std::vector vInv; vRecv >> vInv; if (vInv.size() > MAX_INV_SZ) { Misbehaving(pfrom, 20, strprintf("oversized-inv: message inv size() = %u", vInv.size())); return; } // We won't accept tx inv's if we're in blocks-only mode, or this is a // block-relay-only peer bool fBlocksOnly = !g_relay_txes || (pfrom.m_tx_relay == nullptr); // Allow whitelisted peers to send data other than blocks in blocks only // mode if whitelistrelay is true if (pfrom.HasPermission(PF_RELAY)) { fBlocksOnly = false; } LOCK(cs_main); const auto current_time = GetTime(); for (CInv &inv : vInv) { if (interruptMsgProc) { return; } bool fAlreadyHave = AlreadyHave(inv, m_mempool); LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); if (inv.type == MSG_BLOCK) { const BlockHash hash(inv.hash); UpdateBlockAvailability(pfrom.GetId(), hash); if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(hash)) { // We used to request the full block here, but since // headers-announcements are now the primary method of // announcement on the network, and since, in the case that // a node fell back to inv we probably have a reorg which we // should get the headers for first, we now only provide a // getheaders response here. When we receive the headers, we // will then ask for the blocks we need. m_connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator( pindexBestHeader), hash)); LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, hash.ToString(), pfrom.GetId()); } } else { const TxId txid(inv.hash); pfrom.AddKnownTx(txid); if (fBlocksOnly) { LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of " "protocol, disconnecting peer=%d\n", txid.ToString(), pfrom.GetId()); pfrom.fDisconnect = true; return; } else if (!fAlreadyHave && !fImporting && !fReindex && !::ChainstateActive().IsInitialBlockDownload()) { RequestTx(State(pfrom.GetId()), txid, current_time); } } } return; } if (msg_type == NetMsgType::GETDATA) { std::vector vInv; vRecv >> vInv; if (vInv.size() > MAX_INV_SZ) { Misbehaving(pfrom, 20, strprintf("too-many-inv: message getdata size() = %u", vInv.size())); return; } LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId()); if (vInv.size() > 0) { LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId()); } pfrom.vRecvGetData.insert(pfrom.vRecvGetData.end(), vInv.begin(), vInv.end()); ProcessGetData(config, pfrom, m_connman, m_mempool, interruptMsgProc); return; } if (msg_type == NetMsgType::GETBLOCKS) { CBlockLocator locator; uint256 hashStop; vRecv >> locator >> hashStop; if (locator.vHave.size() > MAX_LOCATOR_SZ) { LogPrint(BCLog::NET, "getblocks locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId()); pfrom.fDisconnect = true; return; } // We might have announced the currently-being-connected tip using a // compact block, which resulted in the peer sending a getblocks // request, which we would otherwise respond to without the new block. // To avoid this situation we simply verify that we are on our best // known chain now. This is super overkill, but we handle it better // for getheaders requests, and there are no known nodes which support // compact blocks but still use getblocks to request blocks. { std::shared_ptr a_recent_block; { LOCK(cs_most_recent_block); a_recent_block = most_recent_block; } BlockValidationState state; if (!ActivateBestChain(config, state, a_recent_block)) { LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString()); } } LOCK(cs_main); // Find the last block the caller has in the main chain const CBlockIndex *pindex = FindForkInGlobalIndex(::ChainActive(), locator); // Send the rest of the chain if (pindex) { pindex = ::ChainActive().Next(pindex); } int nLimit = 500; LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId()); for (; pindex; pindex = ::ChainActive().Next(pindex)) { if (pindex->GetBlockHash() == hashStop) { LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); break; } // If pruning, don't inv blocks unless we have on disk and are // likely to still have for some reasonable time window (1 hour) // that block relay might require. const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing; if (fPruneMode && (!pindex->nStatus.hasData() || pindex->nHeight <= ::ChainActive().Tip()->nHeight - nPrunedBlocksLikelyToHave)) { LogPrint( BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); break; } pfrom.PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash())); if (--nLimit <= 0) { // When this block is requested, we'll send an inv that'll // trigger the peer to getblocks the next batch of inventory. LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); pfrom.hashContinue = pindex->GetBlockHash(); break; } } return; } if (msg_type == NetMsgType::GETBLOCKTXN) { BlockTransactionsRequest req; vRecv >> req; std::shared_ptr recent_block; { LOCK(cs_most_recent_block); if (most_recent_block_hash == req.blockhash) { recent_block = most_recent_block; } // Unlock cs_most_recent_block to avoid cs_main lock inversion } if (recent_block) { SendBlockTransactions(*recent_block, req, pfrom, m_connman); return; } LOCK(cs_main); const CBlockIndex *pindex = LookupBlockIndex(req.blockhash); if (!pindex || !pindex->nStatus.hasData()) { LogPrint( BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId()); return; } if (pindex->nHeight < ::ChainActive().Height() - MAX_BLOCKTXN_DEPTH) { // If an older block is requested (should never happen in practice, // but can happen in tests) send a block response instead of a // blocktxn response. Sending a full block response instead of a // small blocktxn response is preferable in the case where a peer // might maliciously send lots of getblocktxn requests to trigger // expensive disk reads, because it will require the peer to // actually receive all the data read from disk over the network. LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH); CInv inv; inv.type = MSG_BLOCK; inv.hash = req.blockhash; pfrom.vRecvGetData.push_back(inv); // The message processing loop will go around again (without // pausing) and we'll respond then (without cs_main) return; } CBlock block; bool ret = ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()); assert(ret); SendBlockTransactions(block, req, pfrom, m_connman); return; } if (msg_type == NetMsgType::GETHEADERS) { CBlockLocator locator; BlockHash hashStop; vRecv >> locator >> hashStop; if (locator.vHave.size() > MAX_LOCATOR_SZ) { LogPrint(BCLog::NET, "getheaders locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId()); pfrom.fDisconnect = true; return; } LOCK(cs_main); if (::ChainstateActive().IsInitialBlockDownload() && !pfrom.HasPermission(PF_NOBAN)) { LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in " "initial block download\n", pfrom.GetId()); return; } CNodeState *nodestate = State(pfrom.GetId()); const CBlockIndex *pindex = nullptr; if (locator.IsNull()) { // If locator is null, return the hashStop block pindex = LookupBlockIndex(hashStop); if (!pindex) { return; } if (!BlockRequestAllowed(pindex, chainparams.GetConsensus())) { LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block " "header that isn't in the main chain\n", __func__, pfrom.GetId()); return; } } else { // Find the last block the caller has in the main chain pindex = FindForkInGlobalIndex(::ChainActive(), locator); if (pindex) { pindex = ::ChainActive().Next(pindex); } } // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx // count at the end std::vector vHeaders; int nLimit = MAX_HEADERS_RESULTS; LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId()); for (; pindex; pindex = ::ChainActive().Next(pindex)) { vHeaders.push_back(pindex->GetBlockHeader()); if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) { break; } } // pindex can be nullptr either if we sent ::ChainActive().Tip() OR // if our peer has ::ChainActive().Tip() (and thus we are sending an // empty headers message). In both cases it's safe to update // pindexBestHeaderSent to be our tip. // // It is important that we simply reset the BestHeaderSent value here, // and not max(BestHeaderSent, newHeaderSent). We might have announced // the currently-being-connected tip using a compact block, which // resulted in the peer sending a headers request, which we respond to // without the new block. By resetting the BestHeaderSent, we ensure we // will re-announce the new block via headers (or compact blocks again) // in the SendMessages logic. nodestate->pindexBestHeaderSent = pindex ? pindex : ::ChainActive().Tip(); m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders)); return; } if (msg_type == NetMsgType::TX) { // Stop processing the transaction early if // We are in blocks only mode and peer is either not whitelisted or // whitelistrelay is off or if this peer is supposed to be a // block-relay-only peer if ((!g_relay_txes && !pfrom.HasPermission(PF_RELAY)) || (pfrom.m_tx_relay == nullptr)) { LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId()); pfrom.fDisconnect = true; return; } CTransactionRef ptx; vRecv >> ptx; const CTransaction &tx = *ptx; const TxId &txid = tx.GetId(); pfrom.AddKnownTx(txid); LOCK2(cs_main, g_cs_orphans); TxValidationState state; CNodeState *nodestate = State(pfrom.GetId()); nodestate->m_tx_download.m_tx_announced.erase(txid); nodestate->m_tx_download.m_tx_in_flight.erase(txid); EraseTxRequest(txid); if (!AlreadyHave(CInv(MSG_TX, txid), m_mempool) && AcceptToMemoryPool(config, m_mempool, state, ptx, false /* bypass_limits */, Amount::zero() /* nAbsurdFee */)) { m_mempool.check(&::ChainstateActive().CoinsTip()); RelayTransaction(tx.GetId(), m_connman); for (size_t i = 0; i < tx.vout.size(); i++) { auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(txid, i)); if (it_by_prev != mapOrphanTransactionsByPrev.end()) { for (const auto &elem : it_by_prev->second) { pfrom.orphan_work_set.insert(elem->first); } } } pfrom.nLastTXTime = GetTime(); LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s " "(poolsz %u txn, %u kB)\n", pfrom.GetId(), tx.GetId().ToString(), m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000); // Recursively process any orphan transactions that depended on this // one ProcessOrphanTx(config, m_connman, m_mempool, pfrom.orphan_work_set); } else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { // It may be the case that the orphans parents have all been // rejected. bool fRejectedParents = false; for (const CTxIn &txin : tx.vin) { if (recentRejects->contains(txin.prevout.GetTxId())) { fRejectedParents = true; break; } } if (!fRejectedParents) { const auto current_time = GetTime(); for (const CTxIn &txin : tx.vin) { // FIXME: MSG_TX should use a TxHash, not a TxId. const TxId _txid = txin.prevout.GetTxId(); pfrom.AddKnownTx(_txid); if (!AlreadyHave(CInv(MSG_TX, _txid), m_mempool)) { RequestTx(State(pfrom.GetId()), _txid, current_time); } } AddOrphanTx(ptx, pfrom.GetId()); // DoS prevention: do not allow mapOrphanTransactions to grow // unbounded (see CVE-2012-3789) unsigned int nMaxOrphanTx = (unsigned int)std::max( int64_t(0), gArgs.GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS)); unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx); if (nEvicted > 0) { LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted); } } else { LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n", tx.GetId().ToString()); // We will continue to reject this tx since it has rejected // parents so avoid re-requesting it from other peers. recentRejects->insert(tx.GetId()); } } else { assert(recentRejects); recentRejects->insert(tx.GetId()); if (RecursiveDynamicUsage(*ptx) < 100000) { AddToCompactExtraTransactions(ptx); } if (pfrom.HasPermission(PF_FORCERELAY)) { // Always relay transactions received from whitelisted peers, // even if they were already in the mempool or rejected from it // due to policy, allowing the node to function as a gateway for // nodes hidden behind it. // // Never relay transactions that might result in being // disconnected (or banned). if (state.IsInvalid() && TxRelayMayResultInDisconnect(state)) { LogPrintf("Not relaying invalid transaction %s from " "whitelisted peer=%d (%s)\n", tx.GetId().ToString(), pfrom.GetId(), state.ToString()); } else { LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetId().ToString(), pfrom.GetId()); RelayTransaction(tx.GetId(), m_connman); } } } // If a tx has been detected by recentRejects, we will have reached // this point and the tx will have been ignored. Because we haven't run // the tx through AcceptToMemoryPool, we won't have computed a DoS // score for it or determined exactly why we consider it invalid. // // This means we won't penalize any peer subsequently relaying a DoSy // tx (even if we penalized the first peer who gave it to us) because // we have to account for recentRejects showing false positives. In // other words, we shouldn't penalize a peer if we aren't *sure* they // submitted a DoSy tx. // // Note that recentRejects doesn't just record DoSy or invalid // transactions, but any tx not accepted by the mempool, which may be // due to node policy (vs. consensus). So we can't blanket penalize a // peer simply for relaying a tx that our recentRejects has caught, // regardless of false positives. if (state.IsInvalid()) { LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(), pfrom.GetId(), state.ToString()); MaybePunishNodeForTx(pfrom.GetId(), state); } return; } if (msg_type == NetMsgType::CMPCTBLOCK) { // Ignore cmpctblock received while importing if (fImporting || fReindex) { LogPrint(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId()); return; } CBlockHeaderAndShortTxIDs cmpctblock; vRecv >> cmpctblock; bool received_new_header = false; { LOCK(cs_main); if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) { // Doesn't connect (or is genesis), instead of DoSing in // AcceptBlockHeader, request deeper headers if (!::ChainstateActive().IsInitialBlockDownload()) { m_connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator( pindexBestHeader), uint256())); } return; } if (!LookupBlockIndex(cmpctblock.header.GetHash())) { received_new_header = true; } } const CBlockIndex *pindex = nullptr; BlockValidationState state; if (!m_chainman.ProcessNewBlockHeaders(config, {cmpctblock.header}, state, &pindex)) { if (state.IsInvalid()) { MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block*/ true, "invalid header via cmpctblock"); return; } } // When we succeed in decoding a block's txids from a cmpctblock // message we typically jump to the BLOCKTXN handling code, with a // dummy (empty) BLOCKTXN message, to re-use the logic there in // completing processing of the putative block (without cs_main). bool fProcessBLOCKTXN = false; CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION); // If we end up treating this as a plain headers message, call that as // well // without cs_main. bool fRevertToHeaderProcessing = false; // Keep a CBlock for "optimistic" compactblock reconstructions (see // below) std::shared_ptr pblock = std::make_shared(); bool fBlockReconstructed = false; { LOCK2(cs_main, g_cs_orphans); // If AcceptBlockHeader returned true, it set pindex assert(pindex); UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash()); CNodeState *nodestate = State(pfrom.GetId()); // If this was a new header with more work than our tip, update the // peer's last block announcement time if (received_new_header && pindex->nChainWork > ::ChainActive().Tip()->nChainWork) { nodestate->m_last_block_announcement = GetTime(); } std::map::iterator>>:: iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash()); bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end(); if (pindex->nStatus.hasData()) { // Nothing to do here return; } if (pindex->nChainWork <= ::ChainActive() .Tip() ->nChainWork || // We know something better pindex->nTx != 0) { // We had this block at some point, but pruned it if (fAlreadyInFlight) { // We requested this block for some reason, but our mempool // will probably be useless so we just grab the block via // normal getdata. std::vector vInv(1); vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash()); m_connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); } return; } // If we're not close to tip yet, give up and let parallel block // fetch work its magic. if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus())) { return; } // We want to be a bit conservative just to be extra careful about // DoS possibilities in compact block processing... if (pindex->nHeight <= ::ChainActive().Height() + 2) { if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || (fAlreadyInFlight && blockInFlightIt->second.first == pfrom.GetId())) { std::list::iterator *queuedBlockIt = nullptr; if (!MarkBlockAsInFlight(config, m_mempool, pfrom.GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex, &queuedBlockIt)) { if (!(*queuedBlockIt)->partialBlock) { (*queuedBlockIt) ->partialBlock.reset( new PartiallyDownloadedBlock(config, &m_mempool)); } else { // The block was already in flight using compact // blocks from the same peer. LogPrint(BCLog::NET, "Peer sent us compact block " "we were already syncing!\n"); return; } } PartiallyDownloadedBlock &partialBlock = *(*queuedBlockIt)->partialBlock; ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact); if (status == READ_STATUS_INVALID) { // Reset in-flight state in case of whitelist MarkBlockAsReceived(pindex->GetBlockHash()); Misbehaving(pfrom, 100, "invalid compact block"); return; } else if (status == READ_STATUS_FAILED) { // Duplicate txindices, the block is now in-flight, so // just request it. std::vector vInv(1); vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash()); m_connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); return; } BlockTransactionsRequest req; for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) { if (!partialBlock.IsTxAvailable(i)) { req.indices.push_back(i); } } if (req.indices.empty()) { // Dirty hack to jump to BLOCKTXN code (TODO: move // message handling into their own functions) BlockTransactions txn; txn.blockhash = cmpctblock.header.GetHash(); blockTxnMsg << txn; fProcessBLOCKTXN = true; } else { req.blockhash = pindex->GetBlockHash(); m_connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req)); } } else { // This block is either already in flight from a different // peer, or this peer has too many blocks outstanding to // download from. Optimistically try to reconstruct anyway // since we might be able to without any round trips. PartiallyDownloadedBlock tempBlock(config, &m_mempool); ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact); if (status != READ_STATUS_OK) { // TODO: don't ignore failures return; } std::vector dummy; status = tempBlock.FillBlock(*pblock, dummy); if (status == READ_STATUS_OK) { fBlockReconstructed = true; } } } else { if (fAlreadyInFlight) { // We requested this block, but its far into the future, so // our mempool will probably be useless - request the block // normally. std::vector vInv(1); vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash()); m_connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); return; } else { // If this was an announce-cmpctblock, we want the same // treatment as a header message. fRevertToHeaderProcessing = true; } } } // cs_main if (fProcessBLOCKTXN) { return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, interruptMsgProc); } if (fRevertToHeaderProcessing) { // Headers received from HB compact block peers are permitted to be // relayed before full validation (see BIP 152), so we don't want to // disconnect the peer if the header turns out to be for an invalid // block. Note that if a peer tries to build on an invalid chain, // that will be detected and the peer will be banned. return ProcessHeadersMessage(config, pfrom, m_connman, m_mempool, m_chainman, {cmpctblock.header}, /*via_compact_block=*/true); } if (fBlockReconstructed) { // If we got here, we were able to optimistically reconstruct a // block that is in flight from some other peer. { LOCK(cs_main); mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false)); } bool fNewBlock = false; // Setting fForceProcessing to true means that we bypass some of // our anti-DoS protections in AcceptBlock, which filters // unrequested blocks that might be trying to waste our resources // (eg disk space). Because we only try to reconstruct blocks when // we're close to caught up (via the CanDirectFetch() requirement // above, combined with the behavior of not requesting blocks until // we have a chain with at least nMinimumChainWork), and we ignore // compact blocks with less work than our tip, it is safe to treat // reconstructed compact blocks as having been requested. m_chainman.ProcessNewBlock(config, pblock, /*fForceProcessing=*/true, &fNewBlock); if (fNewBlock) { pfrom.nLastBlockTime = GetTime(); } else { LOCK(cs_main); mapBlockSource.erase(pblock->GetHash()); } // hold cs_main for CBlockIndex::IsValid() LOCK(cs_main); if (pindex->IsValid(BlockValidity::TRANSACTIONS)) { // Clear download state for this block, which is in process from // some other peer. We do this after calling. ProcessNewBlock so // that a malleated cmpctblock announcement can't be used to // interfere with block relay. MarkBlockAsReceived(pblock->GetHash()); } } return; } if (msg_type == NetMsgType::BLOCKTXN) { // Ignore blocktxn received while importing if (fImporting || fReindex) { LogPrint(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId()); return; } BlockTransactions resp; vRecv >> resp; std::shared_ptr pblock = std::make_shared(); bool fBlockRead = false; { LOCK(cs_main); std::map::iterator>>:: iterator it = mapBlocksInFlight.find(resp.blockhash); if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock || it->second.first != pfrom.GetId()) { LogPrint(BCLog::NET, "Peer %d sent us block transactions for block " "we weren't expecting\n", pfrom.GetId()); return; } PartiallyDownloadedBlock &partialBlock = *it->second.second->partialBlock; ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn); if (status == READ_STATUS_INVALID) { // Reset in-flight state in case of whitelist. MarkBlockAsReceived(resp.blockhash); Misbehaving( pfrom, 100, "invalid compact block/non-matching block transactions"); return; } else if (status == READ_STATUS_FAILED) { // Might have collided, fall back to getdata now :( std::vector invs; invs.push_back(CInv(MSG_BLOCK, resp.blockhash)); m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs)); } else { // Block is either okay, or possibly we received // READ_STATUS_CHECKBLOCK_FAILED. // Note that CheckBlock can only fail for one of a few reasons: // 1. bad-proof-of-work (impossible here, because we've already // accepted the header) // 2. merkleroot doesn't match the transactions given (already // caught in FillBlock with READ_STATUS_FAILED, so // impossible here) // 3. the block is otherwise invalid (eg invalid coinbase, // block is too big, too many legacy sigops, etc). // So if CheckBlock failed, #3 is the only possibility. // Under BIP 152, we don't DoS-ban unless proof of work is // invalid (we don't require all the stateless checks to have // been run). This is handled below, so just treat this as // though the block was successfully read, and rely on the // handling in ProcessNewBlock to ensure the block index is // updated, etc. // it is now an empty pointer MarkBlockAsReceived(resp.blockhash); fBlockRead = true; // mapBlockSource is used for potentially punishing peers and // updating which peers send us compact blocks, so the race // between here and cs_main in ProcessNewBlock is fine. // BIP 152 permits peers to relay compact blocks after // validating the header only; we should not punish peers // if the block turns out to be invalid. mapBlockSource.emplace(resp.blockhash, std::make_pair(pfrom.GetId(), false)); } } // Don't hold cs_main when we call into ProcessNewBlock if (fBlockRead) { bool fNewBlock = false; // Since we requested this block (it was in mapBlocksInFlight), // force it to be processed, even if it would not be a candidate for // new tip (missing previous block, chain not long enough, etc) // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent // disk-space attacks), but this should be safe due to the // protections in the compact block handler -- see related comment // in compact block optimistic reconstruction handling. m_chainman.ProcessNewBlock(config, pblock, /*fForceProcessing=*/true, &fNewBlock); if (fNewBlock) { pfrom.nLastBlockTime = GetTime(); } else { LOCK(cs_main); mapBlockSource.erase(pblock->GetHash()); } } return; } if (msg_type == NetMsgType::HEADERS) { // Ignore headers received while importing if (fImporting || fReindex) { LogPrint(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId()); return; } std::vector headers; // Bypass the normal CBlock deserialization, as we don't want to risk // deserializing 2000 full blocks. unsigned int nCount = ReadCompactSize(vRecv); if (nCount > MAX_HEADERS_RESULTS) { Misbehaving(pfrom, 20, strprintf("too-many-headers: headers message size = %u", nCount)); return; } headers.resize(nCount); for (unsigned int n = 0; n < nCount; n++) { vRecv >> headers[n]; // Ignore tx count; assume it is 0. ReadCompactSize(vRecv); } return ProcessHeadersMessage(config, pfrom, m_connman, m_mempool, m_chainman, headers, /*via_compact_block=*/false); } if (msg_type == NetMsgType::BLOCK) { // Ignore block received while importing if (fImporting || fReindex) { LogPrint(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId()); return; } std::shared_ptr pblock = std::make_shared(); vRecv >> *pblock; LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId()); // Process all blocks from whitelisted peers, even if not requested, // unless we're still syncing with the network. Such an unrequested // block may still be processed, subject to the conditions in // AcceptBlock(). bool forceProcessing = pfrom.HasPermission(PF_NOBAN) && !::ChainstateActive().IsInitialBlockDownload(); const BlockHash hash = pblock->GetHash(); { LOCK(cs_main); // Also always process if we requested the block explicitly, as we // may need it even though it is not a candidate for a new best tip. forceProcessing |= MarkBlockAsReceived(hash); // mapBlockSource is only used for punishing peers and setting // which peers send us compact blocks, so the race between here and // cs_main in ProcessNewBlock is fine. mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true)); } bool fNewBlock = false; m_chainman.ProcessNewBlock(config, pblock, forceProcessing, &fNewBlock); if (fNewBlock) { pfrom.nLastBlockTime = GetTime(); } else { LOCK(cs_main); mapBlockSource.erase(hash); } return; } + if (msg_type == NetMsgType::AVAHELLO && g_avalanche && + gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) { + if (!pfrom.m_avalanche_state) { + pfrom.m_avalanche_state = std::make_unique(); + } + + CHashVerifier verifier(&vRecv); + avalanche::Delegation &delegation = pfrom.m_avalanche_state->delegation; + verifier >> delegation; + + avalanche::Proof proof; + + avalanche::DelegationState state; + CPubKey pubkey; + if (!delegation.verify(state, proof, pubkey)) { + Misbehaving(pfrom, 100, "invalid-delegation"); + return; + } + + std::array sig; + verifier >> sig; + } + // Ignore avalanche requests while importing if (msg_type == NetMsgType::AVAPOLL && !fImporting && !fReindex && g_avalanche && gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) { auto now = std::chrono::steady_clock::now(); int64_t cooldown = gArgs.GetArg("-avacooldown", AVALANCHE_DEFAULT_COOLDOWN); { LOCK(cs_main); auto &node_state = State(pfrom.GetId())->m_avalanche_state; if (now < node_state.last_poll + std::chrono::milliseconds(cooldown)) { Misbehaving(pfrom, 20, "avapool-cooldown"); } node_state.last_poll = now; } uint64_t round; Unserialize(vRecv, round); unsigned int nCount = ReadCompactSize(vRecv); if (nCount > AVALANCHE_MAX_ELEMENT_POLL) { Misbehaving( pfrom, 20, strprintf("too-many-ava-poll: poll message size = %u", nCount)); return; } std::vector votes; votes.reserve(nCount); LogPrint(BCLog::NET, "received avalanche poll from peer=%d\n", pfrom.GetId()); { LOCK(cs_main); for (unsigned int n = 0; n < nCount; n++) { CInv inv; vRecv >> inv; const auto insertVote = [&](uint32_t e) { votes.emplace_back(e, inv.hash); }; // Not a block. if (inv.type != MSG_BLOCK) { insertVote(-1); continue; } // We have a block. const CBlockIndex *pindex = LookupBlockIndex(BlockHash(inv.hash)); // Unknown block. if (!pindex) { insertVote(-1); continue; } // Invalid block if (pindex->nStatus.isInvalid()) { insertVote(1); continue; } // Parked block if (pindex->nStatus.isOnParkedChain()) { insertVote(2); continue; } const CBlockIndex *pindexTip = ::ChainActive().Tip(); const CBlockIndex *pindexFork = LastCommonAncestor(pindex, pindexTip); // Active block. if (pindex == pindexFork) { insertVote(0); continue; } // Fork block. if (pindexFork != pindexTip) { insertVote(3); continue; } // Missing block data. if (!pindex->nStatus.hasData()) { insertVote(-2); continue; } // This block is built on top of the tip, we have the data, it // is pending connection or rejection. insertVote(-3); } } // Send the query to the node. g_avalanche->sendResponse( &pfrom, avalanche::Response(round, cooldown, std::move(votes))); return; } // Ignore avalanche requests while importing if (msg_type == NetMsgType::AVARESPONSE && !fImporting && !fReindex && g_avalanche && gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) { // As long as QUIC is not implemented, we need to sign response and // verify response's signatures in order to avoid any manipulation of // messages at the transport level. CHashVerifier verifier(&vRecv); avalanche::Response response; verifier >> response; if (!g_avalanche->forNode(pfrom.GetId(), [&](const avalanche::Node &n) { std::array sig; vRecv >> sig; return n.pubkey.VerifySchnorr(verifier.GetHash(), sig); })) { Misbehaving(pfrom, 100, "invalid-ava-response-signature"); return; } std::vector updates; if (!g_avalanche->registerVotes(pfrom.GetId(), response, updates)) { return; } if (updates.size()) { for (avalanche::BlockUpdate &u : updates) { CBlockIndex *pindex = u.getBlockIndex(); switch (u.getStatus()) { case avalanche::BlockUpdate::Status::Invalid: case avalanche::BlockUpdate::Status::Rejected: { LogPrintf("Avalanche rejected %s, parking\n", pindex->GetBlockHash().GetHex()); BlockValidationState state; ::ChainstateActive().ParkBlock(config, state, pindex); if (!state.IsValid()) { LogPrintf("ERROR: Database error: %s\n", state.GetRejectReason()); return; } } break; case avalanche::BlockUpdate::Status::Accepted: case avalanche::BlockUpdate::Status::Finalized: { LogPrintf("Avalanche accepted %s\n", pindex->GetBlockHash().GetHex()); LOCK(cs_main); UnparkBlock(pindex); } break; } } BlockValidationState state; if (!ActivateBestChain(config, state)) { LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString()); } } return; } if (msg_type == NetMsgType::GETADDR) { // This asymmetric behavior for inbound and outbound connections was // introduced to prevent a fingerprinting attack: an attacker can send // specific fake addresses to users' AddrMan and later request them by // sending getaddr messages. Making nodes which are behind NAT and can // only make outgoing connections ignore the getaddr message mitigates // the attack. if (!pfrom.IsInboundConn()) { LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom.GetId()); return; } if (!pfrom.IsAddrRelayPeer()) { LogPrint(BCLog::NET, "Ignoring \"getaddr\" from block-relay-only connection. " "peer=%d\n", pfrom.GetId()); return; } // Only send one GetAddr response per connection to reduce resource // waste and discourage addr stamping of INV announcements. if (pfrom.fSentAddr) { LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId()); return; } pfrom.fSentAddr = true; pfrom.vAddrToSend.clear(); std::vector vAddr = m_connman.GetAddresses(); FastRandomContext insecure_rand; for (const CAddress &addr : vAddr) { bool banned_or_discouraged = m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr)); if (!banned_or_discouraged) { pfrom.PushAddress(addr, insecure_rand); } } return; } if (msg_type == NetMsgType::MEMPOOL) { if (!(pfrom.GetLocalServices() & NODE_BLOOM) && !pfrom.HasPermission(PF_MEMPOOL)) { if (!pfrom.HasPermission(PF_NOBAN)) { LogPrint(BCLog::NET, "mempool request with bloom filters disabled, " "disconnect peer=%d\n", pfrom.GetId()); pfrom.fDisconnect = true; } return; } if (m_connman.OutboundTargetReached(false) && !pfrom.HasPermission(PF_MEMPOOL)) { if (!pfrom.HasPermission(PF_NOBAN)) { LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, " "disconnect peer=%d\n", pfrom.GetId()); pfrom.fDisconnect = true; } return; } if (pfrom.m_tx_relay != nullptr) { LOCK(pfrom.m_tx_relay->cs_tx_inventory); pfrom.m_tx_relay->fSendMempool = true; } return; } if (msg_type == NetMsgType::PING) { if (pfrom.nVersion > BIP0031_VERSION) { uint64_t nonce = 0; vRecv >> nonce; // Echo the message back with the nonce. This allows for two useful // features: // // 1) A remote node can quickly check if the connection is // operational. // 2) Remote nodes can measure the latency of the network thread. If // this node is overloaded it won't respond to pings quickly and the // remote node can avoid sending us more work, like chain download // requests. // // The nonce stops the remote getting confused between different // pings: without it, if the remote node sends a ping once per // second and this node takes 5 seconds to respond to each, the 5th // ping the remote sends would appear to return very quickly. m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::PONG, nonce)); } return; } if (msg_type == NetMsgType::PONG) { int64_t pingUsecEnd = nTimeReceived; uint64_t nonce = 0; size_t nAvail = vRecv.in_avail(); bool bPingFinished = false; std::string sProblem; if (nAvail >= sizeof(nonce)) { vRecv >> nonce; // Only process pong message if there is an outstanding ping (old // ping without nonce should never pong) if (pfrom.nPingNonceSent != 0) { if (nonce == pfrom.nPingNonceSent) { // Matching pong received, this ping is no longer // outstanding bPingFinished = true; int64_t pingUsecTime = pingUsecEnd - pfrom.nPingUsecStart; if (pingUsecTime > 0) { // Successful ping time measurement, replace previous pfrom.nPingUsecTime = pingUsecTime; pfrom.nMinPingUsecTime = std::min( pfrom.nMinPingUsecTime.load(), pingUsecTime); } else { // This should never happen sProblem = "Timing mishap"; } } else { // Nonce mismatches are normal when pings are overlapping sProblem = "Nonce mismatch"; if (nonce == 0) { // This is most likely a bug in another implementation // somewhere; cancel this ping bPingFinished = true; sProblem = "Nonce zero"; } } } else { sProblem = "Unsolicited pong without ping"; } } else { // This is most likely a bug in another implementation somewhere; // cancel this ping bPingFinished = true; sProblem = "Short payload"; } if (!(sProblem.empty())) { LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n", pfrom.GetId(), sProblem, pfrom.nPingNonceSent, nonce, nAvail); } if (bPingFinished) { pfrom.nPingNonceSent = 0; } return; } if (msg_type == NetMsgType::FILTERLOAD) { CBloomFilter filter; vRecv >> filter; if (!filter.IsWithinSizeConstraints()) { // There is no excuse for sending a too-large filter Misbehaving(pfrom, 100, "too-large bloom filter"); } else if (pfrom.m_tx_relay != nullptr) { LOCK(pfrom.m_tx_relay->cs_filter); pfrom.m_tx_relay->pfilter.reset(new CBloomFilter(filter)); pfrom.m_tx_relay->pfilter->UpdateEmptyFull(); pfrom.m_tx_relay->fRelayTxes = true; } return; } if (msg_type == NetMsgType::FILTERADD) { std::vector vData; vRecv >> vData; // Nodes must NEVER send a data item > 520 bytes (the max size for a // script data object, and thus, the maximum size any matched object can // have) in a filteradd message. bool bad = false; if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) { bad = true; } else if (pfrom.m_tx_relay != nullptr) { LOCK(pfrom.m_tx_relay->cs_filter); if (pfrom.m_tx_relay->pfilter) { pfrom.m_tx_relay->pfilter->insert(vData); } else { bad = true; } } if (bad) { // The structure of this code doesn't really allow for a good error // code. We'll go generic. Misbehaving(pfrom, 100, "bad filteradd message"); } return; } if (msg_type == NetMsgType::FILTERCLEAR) { if (pfrom.m_tx_relay == nullptr) { return; } LOCK(pfrom.m_tx_relay->cs_filter); if (pfrom.GetLocalServices() & NODE_BLOOM) { pfrom.m_tx_relay->pfilter.reset(new CBloomFilter()); } pfrom.m_tx_relay->fRelayTxes = true; return; } if (msg_type == NetMsgType::FEEFILTER) { Amount newFeeFilter = Amount::zero(); vRecv >> newFeeFilter; if (MoneyRange(newFeeFilter)) { if (pfrom.m_tx_relay != nullptr) { LOCK(pfrom.m_tx_relay->cs_feeFilter); pfrom.m_tx_relay->minFeeFilter = newFeeFilter; } LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId()); } return; } if (msg_type == NetMsgType::GETCFILTERS) { ProcessGetCFilters(pfrom, vRecv, chainparams, m_connman); return; } if (msg_type == NetMsgType::GETCFHEADERS) { ProcessGetCFHeaders(pfrom, vRecv, chainparams, m_connman); return; } if (msg_type == NetMsgType::GETCFCHECKPT) { ProcessGetCFCheckPt(pfrom, vRecv, chainparams, m_connman); return; } if (msg_type == NetMsgType::NOTFOUND) { // Remove the NOTFOUND transactions from the peer LOCK(cs_main); CNodeState *state = State(pfrom.GetId()); std::vector vInv; vRecv >> vInv; if (vInv.size() <= MAX_PEER_TX_IN_FLIGHT + MAX_BLOCKS_IN_TRANSIT_PER_PEER) { for (CInv &inv : vInv) { if (inv.type == MSG_TX) { const TxId txid(inv.hash); // If we receive a NOTFOUND message for a txid we requested, // erase it from our data structures for this peer. auto in_flight_it = state->m_tx_download.m_tx_in_flight.find(txid); if (in_flight_it == state->m_tx_download.m_tx_in_flight.end()) { // Skip any further work if this is a spurious NOTFOUND // message. continue; } state->m_tx_download.m_tx_in_flight.erase(in_flight_it); state->m_tx_download.m_tx_announced.erase(txid); } } } return; } // Ignore unknown commands for extensibility LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId()); return; } /** * Maybe disconnect a peer and discourage future connections from its address. * * @param[in] pnode The node to check. * @return True if the peer was marked for disconnection in this * function */ bool PeerLogicValidation::MaybeDiscourageAndDisconnect(CNode &pnode) { const NodeId peer_id{pnode.GetId()}; PeerRef peer = GetPeerRef(peer_id); if (peer == nullptr) { return false; } { LOCK(peer->m_misbehavior_mutex); // There's nothing to do if the m_should_discourage flag isn't set if (!peer->m_should_discourage) { return false; } peer->m_should_discourage = false; } // peer.m_misbehavior_mutex if (pnode.HasPermission(PF_NOBAN)) { // We never disconnect or discourage peers for bad behavior if they have // the NOBAN permission flag LogPrintf("Warning: not punishing noban peer %d!\n", peer_id); return false; } if (pnode.IsManualConn()) { // We never disconnect or discourage manual peers for bad behavior LogPrintf("Warning: not punishing manually connected peer %d!\n", peer_id); return false; } if (pnode.addr.IsLocal()) { // We disconnect local peers for bad behavior but don't discourage // (since that would discourage all peers on the same local address) LogPrintf( "Warning: disconnecting but not discouraging local peer %d!\n", peer_id); pnode.fDisconnect = true; return true; } // Normal case: Disconnect the peer and discourage all nodes sharing the // address LogPrintf("Disconnecting and discouraging peer %d!\n", peer_id); if (m_banman) { m_banman->Discourage(pnode.addr); } m_connman.DisconnectNode(pnode.addr); return true; } bool PeerLogicValidation::ProcessMessages(const Config &config, CNode *pfrom, std::atomic &interruptMsgProc) { // // Message format // (4) message start // (12) command // (4) size // (4) checksum // (x) data // bool fMoreWork = false; if (!pfrom->vRecvGetData.empty()) { ProcessGetData(config, *pfrom, m_connman, m_mempool, interruptMsgProc); } if (!pfrom->orphan_work_set.empty()) { LOCK2(cs_main, g_cs_orphans); ProcessOrphanTx(config, m_connman, m_mempool, pfrom->orphan_work_set); } if (pfrom->fDisconnect) { return false; } // this maintains the order of responses and prevents vRecvGetData from // growing unbounded if (!pfrom->vRecvGetData.empty()) { return true; } if (!pfrom->orphan_work_set.empty()) { return true; } // Don't bother if send buffer is too full to respond anyway if (pfrom->fPauseSend) { return false; } std::list msgs; { LOCK(pfrom->cs_vProcessMsg); if (pfrom->vProcessMsg.empty()) { return false; } // Just take one message msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin()); pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size; pfrom->fPauseRecv = pfrom->nProcessQueueSize > m_connman.GetReceiveFloodSize(); fMoreWork = !pfrom->vProcessMsg.empty(); } CNetMessage &msg(msgs.front()); msg.SetVersion(pfrom->GetRecvVersion()); // Check network magic if (!msg.m_valid_netmagic) { LogPrint(BCLog::NET, "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.m_command), pfrom->GetId()); // Make sure we discourage where that come from for some time. if (m_banman) { m_banman->Discourage(pfrom->addr); } m_connman.DisconnectNode(pfrom->addr); pfrom->fDisconnect = true; return false; } // Check header if (!msg.m_valid_header) { LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(msg.m_command), pfrom->GetId()); return fMoreWork; } const std::string &msg_type = msg.m_command; // Message size unsigned int nMessageSize = msg.m_message_size; // Checksum CDataStream &vRecv = msg.m_recv; if (!msg.m_valid_checksum) { LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR peer=%d\n", __func__, SanitizeString(msg_type), nMessageSize, pfrom->GetId()); if (m_banman) { m_banman->Discourage(pfrom->addr); } m_connman.DisconnectNode(pfrom->addr); return fMoreWork; } try { ProcessMessage(config, *pfrom, msg_type, vRecv, msg.m_time, interruptMsgProc); if (interruptMsgProc) { return false; } if (!pfrom->vRecvGetData.empty()) { fMoreWork = true; } } catch (const std::exception &e) { LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg_type), nMessageSize, e.what(), typeid(e).name()); } catch (...) { LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg_type), nMessageSize); } return fMoreWork; } void PeerLogicValidation::ConsiderEviction(CNode &pto, int64_t time_in_seconds) { AssertLockHeld(cs_main); CNodeState &state = *State(pto.GetId()); const CNetMsgMaker msgMaker(pto.GetSendVersion()); if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) { // This is an outbound peer subject to disconnection if they don't // announce a block with as much work as the current tip within // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if their // chain has more work than ours, we should sync to it, unless it's // invalid, in which case we should find that out and disconnect from // them elsewhere). if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork) { if (state.m_chain_sync.m_timeout != 0) { state.m_chain_sync.m_timeout = 0; state.m_chain_sync.m_work_header = nullptr; state.m_chain_sync.m_sent_getheaders = false; } } else if (state.m_chain_sync.m_timeout == 0 || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) { // Our best block known by this peer is behind our tip, and we're // either noticing that for the first time, OR this peer was able to // catch up to some earlier point where we checked against our tip. // Either way, set a new timeout based on current tip. state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT; state.m_chain_sync.m_work_header = ::ChainActive().Tip(); state.m_chain_sync.m_sent_getheaders = false; } else if (state.m_chain_sync.m_timeout > 0 && time_in_seconds > state.m_chain_sync.m_timeout) { // No evidence yet that our peer has synced to a chain with work // equal to that of our tip, when we first detected it was behind. // Send a single getheaders message to give the peer a chance to // update us. if (state.m_chain_sync.m_sent_getheaders) { // They've run out of time to catch up! LogPrintf( "Disconnecting outbound peer %d for old chain, best known " "block = %s\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : ""); pto.fDisconnect = true; } else { assert(state.m_chain_sync.m_work_header); LogPrint( BCLog::NET, "sending getheaders to outbound peer=%d to verify chain " "work (current best known block:%s, benchmark blockhash: " "%s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "", state.m_chain_sync.m_work_header->GetBlockHash() .ToString()); m_connman.PushMessage( &pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator( state.m_chain_sync.m_work_header->pprev), uint256())); state.m_chain_sync.m_sent_getheaders = true; // 2 minutes constexpr int64_t HEADERS_RESPONSE_TIME = 120; // Bump the timeout to allow a response, which could clear the // timeout (if the response shows the peer has synced), reset // the timeout (if the peer syncs to the required work but not // to our tip), or result in disconnect (if we advance to the // timeout and pindexBestKnownBlock has not sufficiently // progressed) state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME; } } } } void PeerLogicValidation::EvictExtraOutboundPeers(int64_t time_in_seconds) { // Check whether we have too many outbound peers int extra_peers = m_connman.GetExtraOutboundCount(); if (extra_peers <= 0) { return; } // If we have more outbound peers than we target, disconnect one. // Pick the outbound peer that least recently announced us a new block, with // ties broken by choosing the more recent connection (higher node id) NodeId worst_peer = -1; int64_t oldest_block_announcement = std::numeric_limits::max(); m_connman.ForEachNode([&](CNode *pnode) { AssertLockHeld(cs_main); // Ignore non-outbound peers, or nodes marked for disconnect already if (!pnode->IsOutboundOrBlockRelayConn() || pnode->fDisconnect) { return; } CNodeState *state = State(pnode->GetId()); if (state == nullptr) { // shouldn't be possible, but just in case return; } // Don't evict our protected peers if (state->m_chain_sync.m_protect) { return; } // Don't evict our block-relay-only peers. if (pnode->m_tx_relay == nullptr) { return; } if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) { worst_peer = pnode->GetId(); oldest_block_announcement = state->m_last_block_announcement; } }); if (worst_peer == -1) { return; } bool disconnected = m_connman.ForNode(worst_peer, [&](CNode *pnode) { AssertLockHeld(cs_main); // Only disconnect a peer that has been connected to us for some // reasonable fraction of our check-frequency, to give it time for new // information to have arrived. // Also don't disconnect any peer we're trying to download a block from. CNodeState &state = *State(pnode->GetId()); if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME && state.nBlocksInFlight == 0) { LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block " "announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement); pnode->fDisconnect = true; return true; } else { LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction " "(connect time: %d, blocks_in_flight: %d)\n", pnode->GetId(), pnode->nTimeConnected, state.nBlocksInFlight); return false; } }); if (disconnected) { // If we disconnected an extra peer, that means we successfully // connected to at least one peer after the last time we detected a // stale tip. Don't try any more extra peers until we next detect a // stale tip, to limit the load we put on the network from these extra // connections. m_connman.SetTryNewOutboundPeer(false); } } void PeerLogicValidation::CheckForStaleTipAndEvictPeers( const Consensus::Params &consensusParams) { LOCK(cs_main); int64_t time_in_seconds = GetTime(); EvictExtraOutboundPeers(time_in_seconds); if (time_in_seconds <= m_stale_tip_check_time) { return; } // Check whether our tip is stale, and if so, allow using an extra outbound // peer. if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale(consensusParams)) { LogPrintf("Potential stale tip detected, will try using extra outbound " "peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update); m_connman.SetTryNewOutboundPeer(true); } else if (m_connman.GetTryNewOutboundPeer()) { m_connman.SetTryNewOutboundPeer(false); } m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL; } namespace { class CompareInvMempoolOrder { CTxMemPool *mp; public: explicit CompareInvMempoolOrder(CTxMemPool *_mempool) { mp = _mempool; } bool operator()(std::set::iterator a, std::set::iterator b) { /** * As std::make_heap produces a max-heap, we want the entries with the * fewest ancestors/highest fee to sort later. */ return mp->CompareDepthAndScore(*b, *a); } }; } // namespace bool PeerLogicValidation::SendMessages(const Config &config, CNode *pto, std::atomic &interruptMsgProc) { const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll // disconnect misbehaving peers even before the version handshake is // complete. if (MaybeDiscourageAndDisconnect(*pto)) { return true; } // Don't send anything until the version handshake is complete if (!pto->fSuccessfullyConnected || pto->fDisconnect) { return true; } // If we get here, the outgoing message serialization version is set and // can't change. const CNetMsgMaker msgMaker(pto->GetSendVersion()); // // Message: ping // bool pingSend = false; if (pto->fPingQueued) { // RPC ping request by user pingSend = true; } if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) { // Ping automatically sent as a latency probe & keepalive. pingSend = true; } if (pingSend) { uint64_t nonce = 0; while (nonce == 0) { GetRandBytes((uint8_t *)&nonce, sizeof(nonce)); } pto->fPingQueued = false; pto->nPingUsecStart = GetTimeMicros(); if (pto->nVersion > BIP0031_VERSION) { pto->nPingNonceSent = nonce; m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce)); } else { // Peer is too old to support ping command with nonce, pong will // never arrive. pto->nPingNonceSent = 0; m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING)); } } { LOCK(cs_main); CNodeState &state = *State(pto->GetId()); // Address refresh broadcast int64_t nNow = GetTimeMicros(); auto current_time = GetTime(); if (pto->IsAddrRelayPeer() && !::ChainstateActive().IsInitialBlockDownload() && pto->m_next_local_addr_send < current_time) { AdvertiseLocal(pto); pto->m_next_local_addr_send = PoissonNextSend( current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); } // // Message: addr // if (pto->IsAddrRelayPeer() && pto->m_next_addr_send < current_time) { pto->m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL); std::vector vAddr; vAddr.reserve(pto->vAddrToSend.size()); assert(pto->m_addr_known); for (const CAddress &addr : pto->vAddrToSend) { if (!pto->m_addr_known->contains(addr.GetKey())) { pto->m_addr_known->insert(addr.GetKey()); vAddr.push_back(addr); // receiver rejects addr messages larger than 1000 if (vAddr.size() >= 1000) { m_connman.PushMessage( pto, msgMaker.Make(NetMsgType::ADDR, vAddr)); vAddr.clear(); } } } pto->vAddrToSend.clear(); if (!vAddr.empty()) { m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr)); } // we only send the big addr message once if (pto->vAddrToSend.capacity() > 40) { pto->vAddrToSend.shrink_to_fit(); } } // Start block sync if (pindexBestHeader == nullptr) { pindexBestHeader = ::ChainActive().Tip(); } // Download if this is a nice peer, or we have no nice peers and this // one might do. bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->IsAddrFetchConn()); if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) { // Only actively request headers from a single peer, unless we're // close to today. if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) { state.fSyncStarted = true; state.nHeadersSyncTimeout = GetTimeMicros() + HEADERS_DOWNLOAD_TIMEOUT_BASE + HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER * (GetAdjustedTime() - pindexBestHeader->GetBlockTime()) / (consensusParams.nPowTargetSpacing); nSyncStarted++; const CBlockIndex *pindexStart = pindexBestHeader; /** * If possible, start at the block preceding the currently best * known header. This ensures that we always get a non-empty * list of headers back as long as the peer is up-to-date. With * a non-empty response, we can initialise the peer's known best * block. This wouldn't be possible if we requested starting at * pindexBestHeader and got back an empty response. */ if (pindexStart->pprev) { pindexStart = pindexStart->pprev; } LogPrint( BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), pto->nStartingHeight); m_connman.PushMessage( pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexStart), uint256())); } } // // Try sending block announcements via headers // { // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our list of block // hashes we're relaying, and our peer wants headers announcements, // then find the first header not yet known to our peer but would // connect, and send. If no header would connect, or if we have too // many blocks, or if the peer doesn't want headers, just add all to // the inv queue. LOCK(pto->cs_inventory); std::vector vHeaders; bool fRevertToInv = ((!state.fPreferHeaders && (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) || pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE); // last header queued for delivery const CBlockIndex *pBestIndex = nullptr; // ensure pindexBestKnownBlock is up-to-date ProcessBlockAvailability(pto->GetId()); if (!fRevertToInv) { bool fFoundStartingHeader = false; // Try to find first header that our peer doesn't have, and then // send all headers past that one. If we come across an headers // that aren't on ::ChainActive(), give up. for (const BlockHash &hash : pto->vBlockHashesToAnnounce) { const CBlockIndex *pindex = LookupBlockIndex(hash); assert(pindex); if (::ChainActive()[pindex->nHeight] != pindex) { // Bail out if we reorged away from this block fRevertToInv = true; break; } if (pBestIndex != nullptr && pindex->pprev != pBestIndex) { // This means that the list of blocks to announce don't // connect to each other. This shouldn't really be // possible to hit during regular operation (because // reorgs should take us to a chain that has some block // not on the prior chain, which should be caught by the // prior check), but one way this could happen is by // using invalidateblock / reconsiderblock repeatedly on // the tip, causing it to be added multiple times to // vBlockHashesToAnnounce. Robustly deal with this rare // situation by reverting to an inv. fRevertToInv = true; break; } pBestIndex = pindex; if (fFoundStartingHeader) { // add this to the headers message vHeaders.push_back(pindex->GetBlockHeader()); } else if (PeerHasHeader(&state, pindex)) { // Keep looking for the first new block. continue; } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) { // Peer doesn't have this header but they do have the // prior one. Start sending headers. fFoundStartingHeader = true; vHeaders.push_back(pindex->GetBlockHeader()); } else { // Peer doesn't have this header or the prior one -- // nothing will connect, so bail out. fRevertToInv = true; break; } } } if (!fRevertToInv && !vHeaders.empty()) { if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) { // We only send up to 1 block as header-and-ids, as // otherwise probably means we're doing an initial-ish-sync // or they're slow. LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__, vHeaders.front().GetHash().ToString(), pto->GetId()); int nSendFlags = 0; bool fGotBlockFromCache = false; { LOCK(cs_most_recent_block); if (most_recent_block_hash == pBestIndex->GetBlockHash()) { CBlockHeaderAndShortTxIDs cmpctblock( *most_recent_block); m_connman.PushMessage( pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); fGotBlockFromCache = true; } } if (!fGotBlockFromCache) { CBlock block; bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams); assert(ret); CBlockHeaderAndShortTxIDs cmpctblock(block); m_connman.PushMessage( pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); } state.pindexBestHeaderSent = pBestIndex; } else if (state.fPreferHeaders) { if (vHeaders.size() > 1) { LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__, vHeaders.size(), vHeaders.front().GetHash().ToString(), vHeaders.back().GetHash().ToString(), pto->GetId()); } else { LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__, vHeaders.front().GetHash().ToString(), pto->GetId()); } m_connman.PushMessage( pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders)); state.pindexBestHeaderSent = pBestIndex; } else { fRevertToInv = true; } } if (fRevertToInv) { // If falling back to using an inv, just try to inv the tip. The // last entry in vBlockHashesToAnnounce was our tip at some // point in the past. if (!pto->vBlockHashesToAnnounce.empty()) { const BlockHash &hashToAnnounce = pto->vBlockHashesToAnnounce.back(); const CBlockIndex *pindex = LookupBlockIndex(hashToAnnounce); assert(pindex); // Warn if we're announcing a block that is not on the main // chain. This should be very rare and could be optimized // out. Just log for now. if (::ChainActive()[pindex->nHeight] != pindex) { LogPrint( BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n", hashToAnnounce.ToString(), ::ChainActive().Tip()->GetBlockHash().ToString()); } // If the peer's chain has this block, don't inv it back. if (!PeerHasHeader(&state, pindex)) { pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce)); LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__, pto->GetId(), hashToAnnounce.ToString()); } } } pto->vBlockHashesToAnnounce.clear(); } // // Message: inventory // std::vector vInv; { LOCK(pto->cs_inventory); vInv.reserve(std::max(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX_PER_MB * config.GetMaxBlockSize() / 1000000)); // Add blocks for (const BlockHash &hash : pto->vInventoryBlockToSend) { vInv.push_back(CInv(MSG_BLOCK, hash)); if (vInv.size() == MAX_INV_SZ) { m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); vInv.clear(); } } pto->vInventoryBlockToSend.clear(); if (pto->m_tx_relay != nullptr) { LOCK(pto->m_tx_relay->cs_tx_inventory); // Check whether periodic sends should happen bool fSendTrickle = pto->HasPermission(PF_NOBAN); if (pto->m_tx_relay->nNextInvSend < current_time) { fSendTrickle = true; if (pto->IsInboundConn()) { pto->m_tx_relay->nNextInvSend = std::chrono::microseconds{ m_connman.PoissonNextSendInbound( nNow, INVENTORY_BROADCAST_INTERVAL)}; } else { // Skip delay for outbound peers, as there is less // privacy concern for them. pto->m_tx_relay->nNextInvSend = current_time; } } // Time to send but the peer has requested we not relay // transactions. if (fSendTrickle) { LOCK(pto->m_tx_relay->cs_filter); if (!pto->m_tx_relay->fRelayTxes) { pto->m_tx_relay->setInventoryTxToSend.clear(); } } // Respond to BIP35 mempool requests if (fSendTrickle && pto->m_tx_relay->fSendMempool) { auto vtxinfo = m_mempool.infoAll(); pto->m_tx_relay->fSendMempool = false; CFeeRate filterrate; { LOCK(pto->m_tx_relay->cs_feeFilter); filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter); } LOCK(pto->m_tx_relay->cs_filter); for (const auto &txinfo : vtxinfo) { const TxId &txid = txinfo.tx->GetId(); CInv inv(MSG_TX, txid); pto->m_tx_relay->setInventoryTxToSend.erase(txid); // Don't send transactions that peers will not put into // their mempool if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { continue; } if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate( *txinfo.tx)) { continue; } pto->m_tx_relay->filterInventoryKnown.insert(txid); vInv.push_back(inv); if (vInv.size() == MAX_INV_SZ) { m_connman.PushMessage( pto, msgMaker.Make(NetMsgType::INV, vInv)); vInv.clear(); } } pto->m_tx_relay->m_last_mempool_req = GetTime(); } // Determine transactions to relay if (fSendTrickle) { // Produce a vector with all candidates for sending std::vector::iterator> vInvTx; vInvTx.reserve( pto->m_tx_relay->setInventoryTxToSend.size()); for (std::set::iterator it = pto->m_tx_relay->setInventoryTxToSend.begin(); it != pto->m_tx_relay->setInventoryTxToSend.end(); it++) { vInvTx.push_back(it); } CFeeRate filterrate; { LOCK(pto->m_tx_relay->cs_feeFilter); filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter); } // Topologically and fee-rate sort the inventory we send for // privacy and priority reasons. A heap is used so that not // all items need sorting if only a few are being sent. CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool); std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); // No reason to drain out at many times the network's // capacity, especially since we have many peers and some // will draw much shorter delays. unsigned int nRelayedTransactions = 0; LOCK(pto->m_tx_relay->cs_filter); while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX_PER_MB * config.GetMaxBlockSize() / 1000000) { // Fetch the top element from the heap std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); std::set::iterator it = vInvTx.back(); vInvTx.pop_back(); const TxId txid = *it; // Remove it from the to-be-sent set pto->m_tx_relay->setInventoryTxToSend.erase(it); // Check if not in the filter already if (pto->m_tx_relay->filterInventoryKnown.contains( txid)) { continue; } // Not in the mempool anymore? don't bother sending it. auto txinfo = m_mempool.info(txid); if (!txinfo.tx) { continue; } // Peer told you to not send transactions at that // feerate? Don't bother sending it. if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { continue; } if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate( *txinfo.tx)) { continue; } // Send vInv.push_back(CInv(MSG_TX, txid)); nRelayedTransactions++; { // Expire old relay messages while (!vRelayExpiration.empty() && vRelayExpiration.front().first < nNow) { mapRelay.erase(vRelayExpiration.front().second); vRelayExpiration.pop_front(); } auto ret = mapRelay.insert( std::make_pair(txid, std::move(txinfo.tx))); if (ret.second) { vRelayExpiration.push_back(std::make_pair( nNow + std::chrono::microseconds{ RELAY_TX_CACHE_TIME} .count(), ret.first)); } } if (vInv.size() == MAX_INV_SZ) { m_connman.PushMessage( pto, msgMaker.Make(NetMsgType::INV, vInv)); vInv.clear(); } pto->m_tx_relay->filterInventoryKnown.insert(txid); } } } } if (!vInv.empty()) { m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); } // Detect whether we're stalling current_time = GetTime(); // nNow is the current system time (GetTimeMicros is not mockable) and // should be replaced by the mockable current_time eventually nNow = GetTimeMicros(); if (state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) { // Stalling only triggers when the block download window cannot // move. During normal steady state, the download window should be // much larger than the to-be-downloaded set of blocks, so // disconnection should only happen during initial block download. LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->GetId()); pto->fDisconnect = true; return true; } // In case there is a block that has been in flight from this peer for 2 // + 0.5 * N times the block interval (with N the number of peers from // which we're downloading validated blocks), disconnect due to timeout. // We compensate for other peers to prevent killing off peers due to our // own downstream link being saturated. We only count validated // in-flight blocks so peers can't advertise non-existing block hashes // to unreasonably increase our timeout. if (state.vBlocksInFlight.size() > 0) { QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0); if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) { LogPrintf("Timeout downloading block %s from peer=%d, " "disconnecting\n", queuedBlock.hash.ToString(), pto->GetId()); pto->fDisconnect = true; return true; } } // Check for headers sync timeouts if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits::max()) { // Detect whether this is a stalling initial-headers-sync peer if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24 * 60 * 60) { if (nNow > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) { // Disconnect a (non-whitelisted) peer if it is our only // sync peer, and we have others we could be using instead. // Note: If all our peers are inbound, then we won't // disconnect our sync peer for stalling; we have bigger // problems if we can't get any outbound peers. if (!pto->HasPermission(PF_NOBAN)) { LogPrintf("Timeout downloading headers from peer=%d, " "disconnecting\n", pto->GetId()); pto->fDisconnect = true; return true; } else { LogPrintf( "Timeout downloading headers from whitelisted " "peer=%d, not disconnecting\n", pto->GetId()); // Reset the headers sync state so that we have a chance // to try downloading from a different peer. Note: this // will also result in at least one more getheaders // message to be sent to this peer (eventually). state.fSyncStarted = false; nSyncStarted--; state.nHeadersSyncTimeout = 0; } } } else { // After we've caught up once, reset the timeout so we can't // trigger disconnect later. state.nHeadersSyncTimeout = std::numeric_limits::max(); } } // Check that outbound peers have reasonable chains GetTime() is used by // this anti-DoS logic so we can test this using mocktime. ConsiderEviction(*pto, GetTime()); // // Message: getdata (blocks) // std::vector vGetData; if (!pto->fClient && ((fFetch && !pto->m_limited_node) || !::ChainstateActive().IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { std::vector vToDownload; NodeId staller = -1; FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams); for (const CBlockIndex *pindex : vToDownload) { vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash())); MarkBlockAsInFlight(config, m_mempool, pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex); LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), pindex->nHeight, pto->GetId()); } if (state.nBlocksInFlight == 0 && staller != -1) { if (State(staller)->nStallingSince == 0) { State(staller)->nStallingSince = nNow; LogPrint(BCLog::NET, "Stall started peer=%d\n", staller); } } } // // Message: getdata (transactions) // // For robustness, expire old requests after a long timeout, so that we // can resume downloading transactions from a peer even if they were // unresponsive in the past. Eventually we should consider disconnecting // peers, but this is conservative. if (state.m_tx_download.m_check_expiry_timer <= current_time) { for (auto it = state.m_tx_download.m_tx_in_flight.begin(); it != state.m_tx_download.m_tx_in_flight.end();) { if (it->second <= current_time - TX_EXPIRY_INTERVAL) { LogPrint(BCLog::NET, "timeout of inflight tx %s from peer=%d\n", it->first.ToString(), pto->GetId()); state.m_tx_download.m_tx_announced.erase(it->first); state.m_tx_download.m_tx_in_flight.erase(it++); } else { ++it; } } // On average, we do this check every TX_EXPIRY_INTERVAL. Randomize // so that we're not doing this for all peers at the same time. state.m_tx_download.m_check_expiry_timer = current_time + TX_EXPIRY_INTERVAL / 2 + GetRandMicros(TX_EXPIRY_INTERVAL); } auto &tx_process_time = state.m_tx_download.m_tx_process_time; while (!tx_process_time.empty() && tx_process_time.begin()->first <= current_time && state.m_tx_download.m_tx_in_flight.size() < MAX_PEER_TX_IN_FLIGHT) { const TxId txid = tx_process_time.begin()->second; // Erase this entry from tx_process_time (it may be added back for // processing at a later time, see below) tx_process_time.erase(tx_process_time.begin()); CInv inv(MSG_TX, txid); if (!AlreadyHave(inv, m_mempool)) { // If this transaction was last requested more than 1 minute // ago, then request. const auto last_request_time = GetTxRequestTime(txid); if (last_request_time <= current_time - GETDATA_TX_INTERVAL) { LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->GetId()); vGetData.push_back(inv); if (vGetData.size() >= MAX_GETDATA_SZ) { m_connman.PushMessage( pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); vGetData.clear(); } UpdateTxRequestTime(txid, current_time); state.m_tx_download.m_tx_in_flight.emplace(txid, current_time); } else { // This transaction is in flight from someone else; queue // up processing to happen after the download times out // (with a slight delay for inbound peers, to prefer // requests to outbound peers). const auto next_process_time = CalculateTxGetDataTime( txid, current_time, !state.fPreferredDownload); tx_process_time.emplace(next_process_time, txid); } } else { // We have already seen this transaction, no need to download. state.m_tx_download.m_tx_announced.erase(txid); state.m_tx_download.m_tx_in_flight.erase(txid); } } if (!vGetData.empty()) { m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); } // // Message: feefilter // // We don't want white listed peers to filter txs to us if we have // -whitelistforcerelay if (pto->m_tx_relay != nullptr && pto->nVersion >= FEEFILTER_VERSION && gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) && !pto->HasPermission(PF_FORCERELAY)) { Amount currentFilter = m_mempool .GetMinFee( gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000) .GetFeePerK(); int64_t timeNow = GetTimeMicros(); if (timeNow > pto->m_tx_relay->nextSendTimeFeeFilter) { static CFeeRate default_feerate = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE_PER_KB); static FeeFilterRounder filterRounder(default_feerate); Amount filterToSend = filterRounder.round(currentFilter); filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK()); if (filterToSend != pto->m_tx_relay->lastSentFeeFilter) { m_connman.PushMessage( pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend)); pto->m_tx_relay->lastSentFeeFilter = filterToSend; } pto->m_tx_relay->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL); } // If the fee filter has changed substantially and it's still more // than MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then // move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY. else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->m_tx_relay->nextSendTimeFeeFilter && (currentFilter < 3 * pto->m_tx_relay->lastSentFeeFilter / 4 || currentFilter > 4 * pto->m_tx_relay->lastSentFeeFilter / 3)) { pto->m_tx_relay->nextSendTimeFeeFilter = timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000; } } } // release cs_main return true; } class CNetProcessingCleanup { public: CNetProcessingCleanup() {} ~CNetProcessingCleanup() { // orphan transactions mapOrphanTransactions.clear(); mapOrphanTransactionsByPrev.clear(); } }; static CNetProcessingCleanup instance_of_cnetprocessingcleanup; diff --git a/src/protocol.cpp b/src/protocol.cpp index 833edc323..ea0d6c47c 100644 --- a/src/protocol.cpp +++ b/src/protocol.cpp @@ -1,303 +1,304 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #ifndef WIN32 #include #endif #include static std::atomic g_initial_block_download_completed(false); namespace NetMsgType { const char *VERSION = "version"; const char *VERACK = "verack"; const char *ADDR = "addr"; const char *INV = "inv"; const char *GETDATA = "getdata"; const char *MERKLEBLOCK = "merkleblock"; const char *GETBLOCKS = "getblocks"; const char *GETHEADERS = "getheaders"; const char *TX = "tx"; const char *HEADERS = "headers"; const char *BLOCK = "block"; const char *GETADDR = "getaddr"; const char *MEMPOOL = "mempool"; const char *PING = "ping"; const char *PONG = "pong"; const char *NOTFOUND = "notfound"; const char *FILTERLOAD = "filterload"; const char *FILTERADD = "filteradd"; const char *FILTERCLEAR = "filterclear"; const char *SENDHEADERS = "sendheaders"; const char *FEEFILTER = "feefilter"; const char *SENDCMPCT = "sendcmpct"; const char *CMPCTBLOCK = "cmpctblock"; const char *GETBLOCKTXN = "getblocktxn"; const char *BLOCKTXN = "blocktxn"; const char *GETCFILTERS = "getcfilters"; const char *CFILTER = "cfilter"; const char *GETCFHEADERS = "getcfheaders"; const char *CFHEADERS = "cfheaders"; const char *GETCFCHECKPT = "getcfcheckpt"; const char *CFCHECKPT = "cfcheckpt"; +const char *AVAHELLO = "avahello"; const char *AVAPOLL = "avapoll"; const char *AVARESPONSE = "avaresponse"; bool IsBlockLike(const std::string &strCommand) { return strCommand == NetMsgType::BLOCK || strCommand == NetMsgType::CMPCTBLOCK || strCommand == NetMsgType::BLOCKTXN; } }; // namespace NetMsgType /** * All known message types. Keep this in the same order as the list of messages * above and in protocol.h. */ static const std::string allNetMessageTypes[] = { NetMsgType::VERSION, NetMsgType::VERACK, NetMsgType::ADDR, NetMsgType::INV, NetMsgType::GETDATA, NetMsgType::MERKLEBLOCK, NetMsgType::GETBLOCKS, NetMsgType::GETHEADERS, NetMsgType::TX, NetMsgType::HEADERS, NetMsgType::BLOCK, NetMsgType::GETADDR, NetMsgType::MEMPOOL, NetMsgType::PING, NetMsgType::PONG, NetMsgType::NOTFOUND, NetMsgType::FILTERLOAD, NetMsgType::FILTERADD, NetMsgType::FILTERCLEAR, NetMsgType::SENDHEADERS, NetMsgType::FEEFILTER, NetMsgType::SENDCMPCT, NetMsgType::CMPCTBLOCK, NetMsgType::GETBLOCKTXN, NetMsgType::BLOCKTXN, NetMsgType::GETCFILTERS, NetMsgType::CFILTER, NetMsgType::GETCFHEADERS, NetMsgType::CFHEADERS, NetMsgType::GETCFCHECKPT, NetMsgType::CFCHECKPT, }; static const std::vector allNetMessageTypesVec(allNetMessageTypes, allNetMessageTypes + ARRAYLEN(allNetMessageTypes)); CMessageHeader::CMessageHeader(const MessageMagic &pchMessageStartIn) { memcpy(std::begin(pchMessageStart), std::begin(pchMessageStartIn), MESSAGE_START_SIZE); memset(pchCommand.data(), 0, sizeof(pchCommand)); nMessageSize = -1; memset(pchChecksum, 0, CHECKSUM_SIZE); } CMessageHeader::CMessageHeader(const MessageMagic &pchMessageStartIn, const char *pszCommand, unsigned int nMessageSizeIn) { memcpy(std::begin(pchMessageStart), std::begin(pchMessageStartIn), MESSAGE_START_SIZE); // Copy the command name, zero-padding to COMMAND_SIZE bytes size_t i = 0; for (; i < pchCommand.size() && pszCommand[i] != 0; ++i) { pchCommand[i] = pszCommand[i]; } // Assert that the command name passed in is not longer than COMMAND_SIZE assert(pszCommand[i] == 0); for (; i < pchCommand.size(); ++i) { pchCommand[i] = 0; } nMessageSize = nMessageSizeIn; memset(pchChecksum, 0, CHECKSUM_SIZE); } std::string CMessageHeader::GetCommand() const { // return std::string(pchCommand.begin(), pchCommand.end()); return std::string(pchCommand.data(), pchCommand.data() + strnlen(pchCommand.data(), COMMAND_SIZE)); } static bool CheckHeaderMagicAndCommand(const CMessageHeader &header, const CMessageHeader::MessageMagic &magic) { // Check start string if (memcmp(std::begin(header.pchMessageStart), std::begin(magic), CMessageHeader::MESSAGE_START_SIZE) != 0) { return false; } // Check the command string for errors for (const char *p1 = header.pchCommand.data(); p1 < header.pchCommand.data() + CMessageHeader::COMMAND_SIZE; p1++) { if (*p1 == 0) { // Must be all zeros after the first zero for (; p1 < header.pchCommand.data() + CMessageHeader::COMMAND_SIZE; p1++) { if (*p1 != 0) { return false; } } } else if (*p1 < ' ' || *p1 > 0x7E) { return false; } } return true; } bool CMessageHeader::IsValid(const Config &config) const { // Check start string if (!CheckHeaderMagicAndCommand(*this, config.GetChainParams().NetMagic())) { return false; } // Message size if (IsOversized(config)) { LogPrintf("CMessageHeader::IsValid(): (%s, %u bytes) is oversized\n", GetCommand(), nMessageSize); return false; } return true; } /** * This is a transition method in order to stay compatible with older code that * do not use the config. It assumes message will not get too large. This cannot * be used for any piece of code that will download blocks as blocks may be * bigger than the permitted size. Idealy, code that uses this function should * be migrated toward using the config. */ bool CMessageHeader::IsValidWithoutConfig(const MessageMagic &magic) const { // Check start string if (!CheckHeaderMagicAndCommand(*this, magic)) { return false; } // Message size if (nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH) { LogPrintf( "CMessageHeader::IsValidForSeeder(): (%s, %u bytes) is oversized\n", GetCommand(), nMessageSize); return false; } return true; } bool CMessageHeader::IsOversized(const Config &config) const { // If the message doesn't not contain a block content, check against // MAX_PROTOCOL_MESSAGE_LENGTH. if (nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH && !NetMsgType::IsBlockLike(GetCommand())) { return true; } // Scale the maximum accepted size with the block size. if (nMessageSize > 2 * config.GetMaxBlockSize()) { return true; } return false; } ServiceFlags GetDesirableServiceFlags(ServiceFlags services) { if ((services & NODE_NETWORK_LIMITED) && g_initial_block_download_completed) { return ServiceFlags(NODE_NETWORK_LIMITED); } return ServiceFlags(NODE_NETWORK); } void SetServiceFlagsIBDCache(bool state) { g_initial_block_download_completed = state; } CAddress::CAddress() : CService() { Init(); } CAddress::CAddress(CService ipIn, ServiceFlags nServicesIn) : CService(ipIn) { Init(); nServices = nServicesIn; } void CAddress::Init() { nServices = NODE_NONE; nTime = 100000000; } std::string CInv::GetCommand() const { std::string cmd; switch (GetKind()) { case MSG_TX: return cmd.append(NetMsgType::TX); case MSG_BLOCK: return cmd.append(NetMsgType::BLOCK); case MSG_FILTERED_BLOCK: return cmd.append(NetMsgType::MERKLEBLOCK); case MSG_CMPCT_BLOCK: return cmd.append(NetMsgType::CMPCTBLOCK); default: throw std::out_of_range( strprintf("CInv::GetCommand(): type=%d unknown type", type)); } } std::string CInv::ToString() const { try { return strprintf("%s %s", GetCommand(), hash.ToString()); } catch (const std::out_of_range &) { return strprintf("0x%08x %s", type, hash.ToString()); } } const std::vector &getAllNetMessageTypes() { return allNetMessageTypesVec; } /** * Convert a service flag (NODE_*) to a human readable string. * It supports unknown service flags which will be returned as "UNKNOWN[...]". * @param[in] bit the service flag is calculated as (1 << bit) */ static std::string serviceFlagToStr(const size_t bit) { const uint64_t service_flag = 1ULL << bit; switch (ServiceFlags(service_flag)) { case NODE_NONE: // impossible abort(); case NODE_NETWORK: return "NETWORK"; case NODE_GETUTXO: return "GETUTXO"; case NODE_BLOOM: return "BLOOM"; case NODE_XTHIN: return "XTHIN"; case NODE_NETWORK_LIMITED: return "NETWORK_LIMITED"; case NODE_COMPACT_FILTERS: return "COMPACT_FILTERS"; case NODE_AVALANCHE: return "AVALANCHE"; default: std::ostringstream stream; stream.imbue(std::locale::classic()); stream << "UNKNOWN["; stream << "2^" << bit; stream << "]"; return stream.str(); } } std::vector serviceFlagsToStr(const uint64_t flags) { std::vector str_flags; for (size_t i = 0; i < sizeof(flags) * 8; ++i) { if (flags & (1ULL << i)) { str_flags.emplace_back(serviceFlagToStr(i)); } } return str_flags; } diff --git a/src/protocol.h b/src/protocol.h index b53ef3f6d..f3a9eedc8 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -1,530 +1,534 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef __cplusplus #error This header can only be compiled as C++. #endif #ifndef BITCOIN_PROTOCOL_H #define BITCOIN_PROTOCOL_H #include #include #include #include #include #include #include class Config; /** * Maximum length of incoming protocol messages (Currently 2MB). * NB: Messages propagating block content are not subject to this limit. */ static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH = 2 * 1024 * 1024; /** * Message header. * (4) message start. * (12) command. * (4) size. * (4) checksum. */ class CMessageHeader { public: static constexpr size_t MESSAGE_START_SIZE = 4; static constexpr size_t COMMAND_SIZE = 12; static constexpr size_t MESSAGE_SIZE_SIZE = 4; static constexpr size_t CHECKSUM_SIZE = 4; static constexpr size_t MESSAGE_SIZE_OFFSET = MESSAGE_START_SIZE + COMMAND_SIZE; static constexpr size_t CHECKSUM_OFFSET = MESSAGE_SIZE_OFFSET + MESSAGE_SIZE_SIZE; static constexpr size_t HEADER_SIZE = MESSAGE_START_SIZE + COMMAND_SIZE + MESSAGE_SIZE_SIZE + CHECKSUM_SIZE; typedef std::array MessageMagic; explicit CMessageHeader(const MessageMagic &pchMessageStartIn); /** * Construct a P2P message header from message-start characters, a command * and the size of the message. * @note Passing in a `pszCommand` longer than COMMAND_SIZE will result in a * run-time assertion error. */ CMessageHeader(const MessageMagic &pchMessageStartIn, const char *pszCommand, unsigned int nMessageSizeIn); std::string GetCommand() const; bool IsValid(const Config &config) const; bool IsValidWithoutConfig(const MessageMagic &magic) const; bool IsOversized(const Config &config) const; ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(pchMessageStart); READWRITE(pchCommand); READWRITE(nMessageSize); READWRITE(pchChecksum); } MessageMagic pchMessageStart; std::array pchCommand; uint32_t nMessageSize; uint8_t pchChecksum[CHECKSUM_SIZE]; }; /** * Bitcoin protocol message types. When adding new message types, don't forget * to update allNetMessageTypes in protocol.cpp. */ namespace NetMsgType { /** * The version message provides information about the transmitting node to the * receiving node at the beginning of a connection. * @see https://bitcoin.org/en/developer-reference#version */ extern const char *VERSION; /** * The verack message acknowledges a previously-received version message, * informing the connecting node that it can begin to send other messages. * @see https://bitcoin.org/en/developer-reference#verack */ extern const char *VERACK; /** * The addr (IP address) message relays connection information for peers on the * network. * @see https://bitcoin.org/en/developer-reference#addr */ extern const char *ADDR; /** * The inv message (inventory message) transmits one or more inventories of * objects known to the transmitting peer. * @see https://bitcoin.org/en/developer-reference#inv */ extern const char *INV; /** * The getdata message requests one or more data objects from another node. * @see https://bitcoin.org/en/developer-reference#getdata */ extern const char *GETDATA; /** * The merkleblock message is a reply to a getdata message which requested a * block using the inventory type MSG_MERKLEBLOCK. * @since protocol version 70001 as described by BIP37. * @see https://bitcoin.org/en/developer-reference#merkleblock */ extern const char *MERKLEBLOCK; /** * The getblocks message requests an inv message that provides block header * hashes starting from a particular point in the block chain. * @see https://bitcoin.org/en/developer-reference#getblocks */ extern const char *GETBLOCKS; /** * The getheaders message requests a headers message that provides block * headers starting from a particular point in the block chain. * @since protocol version 31800. * @see https://bitcoin.org/en/developer-reference#getheaders */ extern const char *GETHEADERS; /** * The tx message transmits a single transaction. * @see https://bitcoin.org/en/developer-reference#tx */ extern const char *TX; /** * The headers message sends one or more block headers to a node which * previously requested certain headers with a getheaders message. * @since protocol version 31800. * @see https://bitcoin.org/en/developer-reference#headers */ extern const char *HEADERS; /** * The block message transmits a single serialized block. * @see https://bitcoin.org/en/developer-reference#block */ extern const char *BLOCK; /** * The getaddr message requests an addr message from the receiving node, * preferably one with lots of IP addresses of other receiving nodes. * @see https://bitcoin.org/en/developer-reference#getaddr */ extern const char *GETADDR; /** * The mempool message requests the TXIDs of transactions that the receiving * node has verified as valid but which have not yet appeared in a block. * @since protocol version 60002. * @see https://bitcoin.org/en/developer-reference#mempool */ extern const char *MEMPOOL; /** * The ping message is sent periodically to help confirm that the receiving * peer is still connected. * @see https://bitcoin.org/en/developer-reference#ping */ extern const char *PING; /** * The pong message replies to a ping message, proving to the pinging node that * the ponging node is still alive. * @since protocol version 60001 as described by BIP31. * @see https://bitcoin.org/en/developer-reference#pong */ extern const char *PONG; /** * The notfound message is a reply to a getdata message which requested an * object the receiving node does not have available for relay. * @since protocol version 70001. * @see https://bitcoin.org/en/developer-reference#notfound */ extern const char *NOTFOUND; /** * The filterload message tells the receiving peer to filter all relayed * transactions and requested merkle blocks through the provided filter. * @since protocol version 70001 as described by BIP37. * Only available with service bit NODE_BLOOM since protocol version * 70011 as described by BIP111. * @see https://bitcoin.org/en/developer-reference#filterload */ extern const char *FILTERLOAD; /** * The filteradd message tells the receiving peer to add a single element to a * previously-set bloom filter, such as a new public key. * @since protocol version 70001 as described by BIP37. * Only available with service bit NODE_BLOOM since protocol version * 70011 as described by BIP111. * @see https://bitcoin.org/en/developer-reference#filteradd */ extern const char *FILTERADD; /** * The filterclear message tells the receiving peer to remove a previously-set * bloom filter. * @since protocol version 70001 as described by BIP37. * Only available with service bit NODE_BLOOM since protocol version * 70011 as described by BIP111. * @see https://bitcoin.org/en/developer-reference#filterclear */ extern const char *FILTERCLEAR; /** * Indicates that a node prefers to receive new block announcements via a * "headers" message rather than an "inv". * @since protocol version 70012 as described by BIP130. * @see https://bitcoin.org/en/developer-reference#sendheaders */ extern const char *SENDHEADERS; /** * The feefilter message tells the receiving peer not to inv us any txs * which do not meet the specified min fee rate. * @since protocol version 70013 as described by BIP133 */ extern const char *FEEFILTER; /** * Contains a 1-byte bool and 8-byte LE version number. * Indicates that a node is willing to provide blocks via "cmpctblock" messages. * May indicate that a node prefers to receive new block announcements via a * "cmpctblock" message rather than an "inv", depending on message contents. * @since protocol version 70014 as described by BIP 152 */ extern const char *SENDCMPCT; /** * Contains a CBlockHeaderAndShortTxIDs object - providing a header and * list of "short txids". * @since protocol version 70014 as described by BIP 152 */ extern const char *CMPCTBLOCK; /** * Contains a BlockTransactionsRequest * Peer should respond with "blocktxn" message. * @since protocol version 70014 as described by BIP 152 */ extern const char *GETBLOCKTXN; /** * Contains a BlockTransactions. * Sent in response to a "getblocktxn" message. * @since protocol version 70014 as described by BIP 152 */ extern const char *BLOCKTXN; /** * getcfilters requests compact filters for a range of blocks. * Only available with service bit NODE_COMPACT_FILTERS as described by * BIP 157 & 158. */ extern const char *GETCFILTERS; /** * cfilter is a response to a getcfilters request containing a single compact * filter. */ extern const char *CFILTER; /** * getcfheaders requests a compact filter header and the filter hashes for a * range of blocks, which can then be used to reconstruct the filter headers * for those blocks. * Only available with service bit NODE_COMPACT_FILTERS as described by * BIP 157 & 158. */ extern const char *GETCFHEADERS; /** * cfheaders is a response to a getcfheaders request containing a filter header * and a vector of filter hashes for each subsequent block in the requested * range. */ extern const char *CFHEADERS; /** * getcfcheckpt requests evenly spaced compact filter headers, enabling * parallelized download and validation of the headers between them. * Only available with service bit NODE_COMPACT_FILTERS as described by * BIP 157 & 158. */ extern const char *GETCFCHECKPT; /** * cfcheckpt is a response to a getcfcheckpt request containing a vector of * evenly spaced filter headers for blocks on the requested chain. */ extern const char *CFCHECKPT; +/** + * Contains a delegation and a signature. + */ +extern const char *AVAHELLO; /** * Contains an avalanche::Poll. * Peer should respond with "avaresponse" message. */ extern const char *AVAPOLL; /** * Contains an avalanche::Response. * Sent in response to a "avapoll" message. */ extern const char *AVARESPONSE; /** * Indicate if the message is used to transmit the content of a block. * These messages can be significantly larger than usual messages and therefore * may need to be processed differently. */ bool IsBlockLike(const std::string &strCommand); }; // namespace NetMsgType /** Get a vector of all valid message types (see above) */ const std::vector &getAllNetMessageTypes(); /** * nServices flags. */ enum ServiceFlags : uint64_t { // NOTE: When adding here, be sure to update serviceFlagToStr too // Nothing NODE_NONE = 0, // NODE_NETWORK means that the node is capable of serving the complete block // chain. It is currently set by all Bitcoin ABC non pruned nodes, and is // unset by SPV clients or other light clients. NODE_NETWORK = (1 << 0), // NODE_GETUTXO means the node is capable of responding to the getutxo // protocol request. Bitcoin ABC does not support this but a patch set // called Bitcoin XT does. See BIP 64 for details on how this is // implemented. NODE_GETUTXO = (1 << 1), // NODE_BLOOM means the node is capable and willing to handle bloom-filtered // connections. Bitcoin ABC nodes used to support this by default, without // advertising this bit, but no longer do as of protocol version 70011 (= // NO_BLOOM_VERSION) NODE_BLOOM = (1 << 2), // NODE_XTHIN means the node supports Xtreme Thinblocks. If this is turned // off then the node will not service nor make xthin requests. NODE_XTHIN = (1 << 4), // Bit 5 was NODE_BITCOIN_CASH, removed in v0.22.8 // NODE_COMPACT_FILTERS means the node will service basic block filter // requests. // See BIP157 and BIP158 for details on how this is implemented. NODE_COMPACT_FILTERS = (1 << 6), // NODE_NETWORK_LIMITED means the same as NODE_NETWORK with the limitation // of only serving the last 288 (2 day) blocks // See BIP159 for details on how this is implemented. NODE_NETWORK_LIMITED = (1 << 10), // The last non experimental service bit, helper for looping over the flags NODE_LAST_NON_EXPERIMENTAL_SERVICE_BIT = (1 << 23), // Bits 24-31 are reserved for temporary experiments. Just pick a bit that // isn't getting used, or one not being used much, and notify the // bitcoin-development mailing list. Remember that service bits are just // unauthenticated advertisements, so your code must be robust against // collisions and other cases where nodes may be advertising a service they // do not actually support. Other service bits should be allocated via the // BIP process. // NODE_AVALANCHE means the node supports Bitcoin Cash's avalanche // preconsensus mechanism. NODE_AVALANCHE = (1 << 24), }; /** * Convert service flags (a bitmask of NODE_*) to human readable strings. * It supports unknown service flags which will be returned as "UNKNOWN[...]". * @param[in] flags multiple NODE_* bitwise-OR-ed together */ std::vector serviceFlagsToStr(const uint64_t flags); /** * Gets the set of service flags which are "desirable" for a given peer. * * These are the flags which are required for a peer to support for them * to be "interesting" to us, ie for us to wish to use one of our few * outbound connection slots for or for us to wish to prioritize keeping * their connection around. * * Relevant service flags may be peer- and state-specific in that the * version of the peer may determine which flags are required (eg in the * case of NODE_NETWORK_LIMITED where we seek out NODE_NETWORK peers * unless they set NODE_NETWORK_LIMITED and we are out of IBD, in which * case NODE_NETWORK_LIMITED suffices). * * Thus, generally, avoid calling with peerServices == NODE_NONE, unless * state-specific flags must absolutely be avoided. When called with * peerServices == NODE_NONE, the returned desirable service flags are * guaranteed to not change dependent on state - ie they are suitable for * use when describing peers which we know to be desirable, but for which * we do not have a confirmed set of service flags. * * If the NODE_NONE return value is changed, contrib/seeds/makeseeds.py * should be updated appropriately to filter for the same nodes. */ ServiceFlags GetDesirableServiceFlags(ServiceFlags services); /** * Set the current IBD status in order to figure out the desirable service * flags */ void SetServiceFlagsIBDCache(bool status); /** * A shortcut for (services & GetDesirableServiceFlags(services)) * == GetDesirableServiceFlags(services), ie determines whether the given * set of service flags are sufficient for a peer to be "relevant". */ static inline bool HasAllDesirableServiceFlags(ServiceFlags services) { return !(GetDesirableServiceFlags(services) & (~services)); } /** * Checks if a peer with the given service flags may be capable of having a * robust address-storage DB. */ static inline bool MayHaveUsefulAddressDB(ServiceFlags services) { return (services & NODE_NETWORK) || (services & NODE_NETWORK_LIMITED); } /** * A CService with information about it as peer. */ class CAddress : public CService { public: CAddress(); explicit CAddress(CService ipIn, ServiceFlags nServicesIn); void Init(); ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { if (ser_action.ForRead()) { Init(); } int nVersion = s.GetVersion(); if (s.GetType() & SER_DISK) { READWRITE(nVersion); } if ((s.GetType() & SER_DISK) || (nVersion != INIT_PROTO_VERSION && !(s.GetType() & SER_GETHASH))) { // The only time we serialize a CAddress object without nTime is in // the initial VERSION messages which contain two CAddress records. // At that point, the serialization version is INIT_PROTO_VERSION. // After the version handshake, serialization version is >= // MIN_PEER_PROTO_VERSION and all ADDR messages are serialized with // nTime. READWRITE(nTime); } uint64_t nServicesInt = nServices; READWRITE(nServicesInt); nServices = static_cast(nServicesInt); READWRITEAS(CService, *this); } // TODO: make private (improves encapsulation) public: ServiceFlags nServices; // disk and network only unsigned int nTime; }; /** getdata message type flags */ const uint32_t MSG_TYPE_MASK = 0xffffffff >> 3; /** * getdata / inv message types. * These numbers are defined by the protocol. When adding a new value, be sure * to mention it in the respective BIP. */ enum GetDataMsg { UNDEFINED = 0, MSG_TX = 1, MSG_BLOCK = 2, // The following can only occur in getdata. Invs always use TX or BLOCK. //! Defined in BIP37 MSG_FILTERED_BLOCK = 3, //! Defined in BIP152 MSG_CMPCT_BLOCK = 4, }; /** * Inv(ventory) message data. * Intended as non-ambiguous identifier of objects (eg. transactions, blocks) * held by peers. */ class CInv { public: // TODO: make private (improves encapsulation) uint32_t type; uint256 hash; public: CInv() : type(0), hash() {} CInv(uint32_t typeIn, const uint256 &hashIn) : type(typeIn), hash(hashIn) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(type); READWRITE(hash); } friend bool operator<(const CInv &a, const CInv &b) { return a.type < b.type || (a.type == b.type && a.hash < b.hash); } std::string GetCommand() const; std::string ToString() const; uint32_t GetKind() const { return type & MSG_TYPE_MASK; } bool IsTx() const { auto k = GetKind(); return k == MSG_TX; } bool IsSomeBlock() const { auto k = GetKind(); return k == MSG_BLOCK || k == MSG_FILTERED_BLOCK || k == MSG_CMPCT_BLOCK; } }; #endif // BITCOIN_PROTOCOL_H diff --git a/test/functional/abc_p2p_avalanche.py b/test/functional/abc_p2p_avalanche.py index 05fdc8f09..aabb2c742 100755 --- a/test/functional/abc_p2p_avalanche.py +++ b/test/functional/abc_p2p_avalanche.py @@ -1,306 +1,352 @@ #!/usr/bin/env python3 # Copyright (c) 2018 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the resolution of forks via avalanche.""" import random from test_framework.key import ( ECKey, ECPubKey, ) from test_framework.mininode import P2PInterface, mininode_lock from test_framework.messages import ( AvalancheResponse, AvalancheVote, CInv, msg_avapoll, msg_tcpavaresponse, + NODE_AVALANCHE, + NODE_NETWORK, TCPAvalancheResponse, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, wait_until, ) BLOCK_ACCEPTED = 0 BLOCK_INVALID = 1 BLOCK_PARKED = 2 BLOCK_FORK = 3 BLOCK_UNKNOWN = -1 BLOCK_MISSING = -2 BLOCK_PENDING = -3 class TestNode(P2PInterface): def __init__(self): self.round = 0 + self.avahello = None self.avaresponses = [] self.avapolls = [] super().__init__() + def peer_connect(self, *args, **kwargs): + create_conn = super().peer_connect(*args, **kwargs) + + # Save the nonce and extra entropy so they can be reused later. + self.local_nonce = self.on_connection_send_msg.nNonce + self.local_extra_entropy = self.on_connection_send_msg.nExtraEntropy + + return create_conn + + def on_version(self, message): + super().on_version(message) + + # Save the nonce and extra entropy so they can be reused later. + self.remote_nonce = message.nNonce + self.remote_extra_entropy = message.nExtraEntropy + def on_avaresponse(self, message): with mininode_lock: self.avaresponses.append(message.response) def on_avapoll(self, message): with mininode_lock: self.avapolls.append(message.poll) + def on_avahello(self, message): + with mininode_lock: + assert(self.avahello is None) + self.avahello = message + def send_avaresponse(self, round, votes, privkey): response = AvalancheResponse(round, 0, votes) sig = privkey.sign_schnorr(response.get_hash()) msg = msg_tcpavaresponse() msg.response = TCPAvalancheResponse(response, sig) self.send_message(msg) + def wait_for_avaresponse(self, timeout=5): + wait_until( + lambda: len(self.avaresponses) > 0, + timeout=timeout, + lock=mininode_lock) + + with mininode_lock: + return self.avaresponses.pop(0) + def send_poll(self, hashes): msg = msg_avapoll() msg.poll.round = self.round self.round += 1 for h in hashes: msg.poll.invs.append(CInv(2, h)) self.send_message(msg) - def wait_for_avaresponse(self, timeout=5): + def get_avapoll_if_available(self): + with mininode_lock: + return self.avapolls.pop(0) if len(self.avapolls) > 0 else None + + def wait_for_avahello(self, timeout=5): wait_until( - lambda: len(self.avaresponses) > 0, + lambda: self.avahello is not None, timeout=timeout, lock=mininode_lock) with mininode_lock: - return self.avaresponses.pop(0) - - def get_avapoll_if_available(self): - with mininode_lock: - return self.avapolls.pop(0) if len(self.avapolls) > 0 else None + return self.avahello class AvalancheTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [ ['-enableavalanche=1', '-avacooldown=0'], ['-enableavalanche=1', '-avacooldown=0', '-noparkdeepreorg', '-maxreorgdepth=-1']] self.supports_cli = False def run_test(self): node = self.nodes[0] # Build a fake quorum of nodes. def get_quorum(): def get_node(): n = TestNode() - node.add_p2p_connection(n) + node.add_p2p_connection( + n, services=NODE_NETWORK | NODE_AVALANCHE) n.wait_for_verack() # Get our own node id so we can use it later. n.nodeid = node.getpeerinfo()[-1]['id'] return n return [get_node() for _ in range(0, 16)] - quorum = get_quorum() - # Pick on node from the quorum for polling. + quorum = get_quorum() poll_node = quorum[0] # Generate many block and poll for them. address = node.get_deterministic_priv_key().address blocks = node.generatetoaddress(100, address) def get_coinbase(h): b = node.getblock(h, 2) return { 'height': b['height'], 'txid': b['tx'][0]['txid'], 'n': 0, 'value': b['tx'][0]['vout'][0]['value'], } coinbases = [get_coinbase(h) for h in blocks] fork_node = self.nodes[1] # Make sure the fork node has synced the blocks self.sync_blocks([node, fork_node]) # Get the key so we can verify signatures. avakey = ECPubKey() avakey.set(bytes.fromhex(node.getavalanchekey())) self.log.info("Poll for the chain tip...") best_block_hash = int(node.getbestblockhash(), 16) poll_node.send_poll([best_block_hash]) def assert_response(expected): response = poll_node.wait_for_avaresponse() r = response.response assert_equal(r.cooldown, 0) # Verify signature. assert avakey.verify_schnorr(response.sig, r.get_hash()) votes = r.votes assert_equal(len(votes), len(expected)) for i in range(0, len(votes)): assert_equal(repr(votes[i]), repr(expected[i])) assert_response([AvalancheVote(BLOCK_ACCEPTED, best_block_hash)]) self.log.info("Poll for a selection of blocks...") various_block_hashes = [ int(node.getblockhash(0), 16), int(node.getblockhash(1), 16), int(node.getblockhash(10), 16), int(node.getblockhash(25), 16), int(node.getblockhash(42), 16), int(node.getblockhash(96), 16), int(node.getblockhash(99), 16), int(node.getblockhash(100), 16), ] poll_node.send_poll(various_block_hashes) assert_response([AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes]) self.log.info( "Poll for a selection of blocks, but some are now invalid...") invalidated_block = node.getblockhash(76) node.invalidateblock(invalidated_block) # We need to send the coin to a new address in order to make sure we do # not regenerate the same block. node.generatetoaddress( 26, 'bchreg:pqv2r67sgz3qumufap3h2uuj0zfmnzuv8v7ej0fffv') node.reconsiderblock(invalidated_block) poll_node.send_poll(various_block_hashes) assert_response([AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes[:5]] + [AvalancheVote(BLOCK_FORK, h) for h in various_block_hashes[-3:]]) self.log.info("Poll for unknown blocks...") various_block_hashes = [ int(node.getblockhash(0), 16), int(node.getblockhash(25), 16), int(node.getblockhash(42), 16), various_block_hashes[5], various_block_hashes[6], various_block_hashes[7], random.randrange(1 << 255, (1 << 256) - 1), random.randrange(1 << 255, (1 << 256) - 1), random.randrange(1 << 255, (1 << 256) - 1), ] poll_node.send_poll(various_block_hashes) assert_response([AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes[:3]] + [AvalancheVote(BLOCK_FORK, h) for h in various_block_hashes[3:6]] + [AvalancheVote(BLOCK_UNKNOWN, h) for h in various_block_hashes[-3:]]) self.log.info("Trigger polling from the node...") # duplicate the deterministic sig test from src/test/key_tests.cpp privkey = ECKey() privkey.set(bytes.fromhex( "12b004fff7f4b69ef8650e767f18f11ede158148b425660723b9f9a66e61f747"), True) pubkey = privkey.get_pubkey() privatekey = node.get_deterministic_priv_key().key proof = node.buildavalancheproof(11, 12, pubkey.get_bytes().hex(), [{ 'txid': coinbases[0]['txid'], 'vout': coinbases[0]['n'], 'amount': coinbases[0]['value'], 'height': coinbases[0]['height'], 'iscoinbase': True, 'privatekey': privatekey, }]) # Activate the quorum. for n in quorum: success = node.addavalanchenode( n.nodeid, pubkey.get_bytes().hex(), proof) assert success is True def can_find_block_in_poll(hash, resp=BLOCK_ACCEPTED): found_hash = False for n in quorum: poll = n.get_avapoll_if_available() # That node has not received a poll if poll is None: continue # We got a poll, check for the hash and repond votes = [] for inv in poll.invs: # Vote yes to everything r = BLOCK_ACCEPTED # Look for what we expect if inv.hash == hash: r = resp found_hash = True votes.append(AvalancheVote(r, inv.hash)) n.send_avaresponse(poll.round, votes, privkey) return found_hash # Now that we have a peer, we should start polling for the tip. hash_tip = int(node.getbestblockhash(), 16) wait_until(lambda: can_find_block_in_poll(hash_tip), timeout=5) # Make sure the fork node has synced the blocks self.sync_blocks([node, fork_node]) # Create a fork 2 blocks deep. This should trigger polling. fork_node.invalidateblock(fork_node.getblockhash(100)) fork_address = fork_node.get_deterministic_priv_key().address fork_node.generatetoaddress(2, fork_address) # Because the new tip is a deep reorg, the node will not accept it # right away, but poll for it. def parked_block(blockhash): for tip in node.getchaintips(): if tip["hash"] == blockhash: assert tip["status"] != "active" return tip["status"] == "parked" return False fork_tip = fork_node.getbestblockhash() wait_until(lambda: parked_block(fork_tip)) self.log.info("Answer all polls to finalize...") hash_to_find = int(fork_tip, 16) def has_accepted_new_tip(): can_find_block_in_poll(hash_to_find) return node.getbestblockhash() == fork_tip # Because everybody answers yes, the node will accept that block. wait_until(has_accepted_new_tip, timeout=15) assert_equal(node.getbestblockhash(), fork_tip) self.log.info("Answer all polls to park...") node.generate(1) tip_to_park = node.getbestblockhash() - self.log.info(tip_to_park) - hash_to_find = int(tip_to_park, 16) assert(tip_to_park != fork_tip) def has_parked_new_tip(): can_find_block_in_poll(hash_to_find, BLOCK_PARKED) return node.getbestblockhash() == fork_tip # Because everybody answers no, the node will park that block. wait_until(has_parked_new_tip, timeout=15) assert_equal(node.getbestblockhash(), fork_tip) + # Restart the node and rebuild the quorum + self.restart_node(0, self.extra_args[0] + [ + "-avaproof={}".format(proof), + "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN", + ]) + quorum = get_quorum() + poll_node = quorum[0] + + # Check the avahello is consistent + avahello = poll_node.wait_for_avahello().hello + + avakey.set(bytes.fromhex(node.getavalanchekey())) + assert avakey.verify_schnorr( + avahello.sig, avahello.get_sighash(poll_node)) + if __name__ == '__main__': AvalancheTest().main() diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 64d414390..da62159bb 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -1,1689 +1,1788 @@ #!/usr/bin/env python3 # Copyright (c) 2010 ArtForz -- public domain half-a-node # Copyright (c) 2012 Jeff Garzik # Copyright (c) 2010-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Bitcoin test framework primitive and message structures CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....: data structures that should map to corresponding structures in bitcoin/primitives msg_block, msg_tx, msg_headers, etc.: data structures that represent network messages ser_*, deser_*: functions that handle serialization/deserialization. Classes use __slots__ to ensure extraneous attributes aren't accidentally added by tests, compromising their intended effect. """ from codecs import encode import copy import hashlib from io import BytesIO import random import socket import struct import time from test_framework.siphash import siphash256 from test_framework.util import hex_str_to_bytes, assert_equal MIN_VERSION_SUPPORTED = 60001 # past bip-31 for ping/pong MY_VERSION = 70014 MY_SUBVERSION = b"/python-mininode-tester:0.0.3/" # from version 70001 onwards, fRelay should be appended to version # messages (BIP37) MY_RELAY = 1 MAX_LOCATOR_SZ = 101 MAX_BLOCK_BASE_SIZE = 1000000 # 1 BCH in satoshis COIN = 100000000 MAX_MONEY = 21000000 * COIN NODE_NETWORK = (1 << 0) NODE_GETUTXO = (1 << 1) NODE_BLOOM = (1 << 2) # NODE_WITNESS = (1 << 3) NODE_XTHIN = (1 << 4) NODE_COMPACT_FILTERS = (1 << 6) NODE_NETWORK_LIMITED = (1 << 10) NODE_AVALANCHE = (1 << 24) MSG_TX = 1 MSG_BLOCK = 2 MSG_FILTERED_BLOCK = 3 MSG_CMPCTBLOCK = 4 MSG_TYPE_MASK = 0xffffffff >> 2 FILTER_TYPE_BASIC = 0 # Serialization/deserialization tools def sha256(s): return hashlib.new('sha256', s).digest() def hash256(s): return sha256(sha256(s)) def ser_compact_size(size): r = b"" if size < 253: r = struct.pack("B", size) elif size < 0x10000: r = struct.pack(">= 32 return rs def uint256_from_str(s): r = 0 t = struct.unpack("> 24) & 0xFF v = (c & 0xFFFFFF) << (8 * (nbytes - 3)) return v def deser_vector(f, c): nit = deser_compact_size(f) r = [] for i in range(nit): t = c() t.deserialize(f) r.append(t) return r # ser_function_name: Allow for an alternate serialization function on the # entries in the vector. def ser_vector(v, ser_function_name=None): r = ser_compact_size(len(v)) for i in v: if ser_function_name: r += getattr(i, ser_function_name)() else: r += i.serialize() return r def deser_uint256_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_uint256(f) r.append(t) return r def ser_uint256_vector(v): r = ser_compact_size(len(v)) for i in v: r += ser_uint256(i) return r def deser_string_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_string(f) r.append(t) return r def ser_string_vector(v): r = ser_compact_size(len(v)) for sv in v: r += ser_string(sv) return r def FromHex(obj, hex_string): """Deserialize from a hex string representation (eg from RPC)""" obj.deserialize(BytesIO(hex_str_to_bytes(hex_string))) return obj def ToHex(obj): """Convert a binary-serializable object to hex (eg for submission via RPC)""" return obj.serialize().hex() # Objects that map to bitcoind objects, which can be serialized/deserialized class CAddress: __slots__ = ("ip", "nServices", "pchReserved", "port", "time") def __init__(self): self.time = 0 self.nServices = 1 self.pchReserved = b"\x00" * 10 + b"\xff" * 2 self.ip = "0.0.0.0" self.port = 0 def deserialize(self, f, *, with_time=True): if with_time: # VERSION messages serialize CAddress objects without time self.time = struct.unpack("H", f.read(2))[0] def serialize(self, *, with_time=True): r = b"" if with_time: # VERSION messages serialize CAddress objects without time r += struct.pack("H", self.port) return r def __repr__(self): return "CAddress(nServices={} ip={} port={})".format( self.nServices, self.ip, self.port) class CInv: __slots__ = ("hash", "type") typemap = { 0: "Error", MSG_TX: "TX", MSG_BLOCK: "Block", MSG_FILTERED_BLOCK: "filtered Block", MSG_CMPCTBLOCK: "CompactBlock" } def __init__(self, t=0, h=0): self.type = t self.hash = h def deserialize(self, f): self.type = struct.unpack(" 21000000 * COIN: return False return True def __repr__(self): return "CTransaction(nVersion={} vin={} vout={} nLockTime={})".format( self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime) class CBlockHeader: __slots__ = ("hash", "hashMerkleRoot", "hashPrevBlock", "nBits", "nNonce", "nTime", "nVersion", "sha256") def __init__(self, header=None): if header is None: self.set_null() else: self.nVersion = header.nVersion self.hashPrevBlock = header.hashPrevBlock self.hashMerkleRoot = header.hashMerkleRoot self.nTime = header.nTime self.nBits = header.nBits self.nNonce = header.nNonce self.sha256 = header.sha256 self.hash = header.hash self.calc_sha256() def set_null(self): self.nVersion = 1 self.hashPrevBlock = 0 self.hashMerkleRoot = 0 self.nTime = 0 self.nBits = 0 self.nNonce = 0 self.sha256 = None self.hash = None def deserialize(self, f): self.nVersion = struct.unpack(" 1: newhashes = [] for i in range(0, len(hashes), 2): i2 = min(i + 1, len(hashes) - 1) newhashes.append(hash256(hashes[i] + hashes[i2])) hashes = newhashes return uint256_from_str(hashes[0]) def calc_merkle_root(self): hashes = [] for tx in self.vtx: tx.calc_sha256() hashes.append(ser_uint256(tx.sha256)) return self.get_merkle_root(hashes) def is_valid(self): self.calc_sha256() target = uint256_from_compact(self.nBits) if self.sha256 > target: return False for tx in self.vtx: if not tx.is_valid(): return False if self.calc_merkle_root() != self.hashMerkleRoot: return False return True def solve(self): self.rehash() target = uint256_from_compact(self.nBits) while self.sha256 > target: self.nNonce += 1 self.rehash() def __repr__(self): return "CBlock(nVersion={} hashPrevBlock={:064x} hashMerkleRoot={:064x} nTime={} nBits={:08x} nNonce={:08x} vtx={})".format( self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, self.nTime, self.nBits, self.nNonce, repr(self.vtx)) class PrefilledTransaction: __slots__ = ("index", "tx") def __init__(self, index=0, tx=None): self.index = index self.tx = tx def deserialize(self, f): self.index = deser_compact_size(f) self.tx = CTransaction() self.tx.deserialize(f) def serialize(self): r = b"" r += ser_compact_size(self.index) r += self.tx.serialize() return r def __repr__(self): return "PrefilledTransaction(index={}, tx={})".format( self.index, repr(self.tx)) # This is what we send on the wire, in a cmpctblock message. class P2PHeaderAndShortIDs: __slots__ = ("header", "nonce", "prefilled_txn", "prefilled_txn_length", "shortids", "shortids_length") def __init__(self): self.header = CBlockHeader() self.nonce = 0 self.shortids_length = 0 self.shortids = [] self.prefilled_txn_length = 0 self.prefilled_txn = [] def deserialize(self, f): self.header.deserialize(f) self.nonce = struct.unpack(" class msg_headers: __slots__ = ("headers",) msgtype = b"headers" def __init__(self, headers=None): self.headers = headers if headers is not None else [] def deserialize(self, f): # comment in bitcoind indicates these should be deserialized as blocks blocks = deser_vector(f, CBlock) for x in blocks: self.headers.append(CBlockHeader(x)) def serialize(self): blocks = [CBlock(x) for x in self.headers] return ser_vector(blocks) def __repr__(self): return "msg_headers(headers={})".format(repr(self.headers)) class msg_merkleblock: __slots__ = ("merkleblock",) msgtype = b"merkleblock" def __init__(self, merkleblock=None): if merkleblock is None: self.merkleblock = CMerkleBlock() else: self.merkleblock = merkleblock def deserialize(self, f): self.merkleblock.deserialize(f) def serialize(self): return self.merkleblock.serialize() def __repr__(self): return "msg_merkleblock(merkleblock={})".format(repr(self.merkleblock)) class msg_filterload: __slots__ = ("data", "nHashFuncs", "nTweak", "nFlags") msgtype = b"filterload" def __init__(self, data=b'00', nHashFuncs=0, nTweak=0, nFlags=0): self.data = data self.nHashFuncs = nHashFuncs self.nTweak = nTweak self.nFlags = nFlags def deserialize(self, f): self.data = deser_string(f) self.nHashFuncs = struct.unpack(" 0: self.recvbuf += t while True: msg = self._on_data() if msg is None: break self.on_message(msg) def _on_data(self): """Try to read P2P messages from the recv buffer. This method reads data from the buffer in a loop. It deserializes, parses and verifies the P2P header, then passes the P2P payload to the on_message callback for processing.""" try: with mininode_lock: if len(self.recvbuf) < 4: return None if self.recvbuf[:4] != self.magic_bytes: raise ValueError( "magic bytes mismatch: {} != {}".format( repr( self.magic_bytes), repr( self.recvbuf))) if len(self.recvbuf) < 4 + 12 + 4 + 4: return None msgtype = self.recvbuf[4:4 + 12].split(b"\x00", 1)[0] msglen = struct.unpack( " 500: log_message += "... (msg truncated)" logger.debug(log_message) class P2PInterface(P2PConnection): """A high-level P2P interface class for communicating with a Bitcoin Cash node. This class provides high-level callbacks for processing P2P message payloads, as well as convenience methods for interacting with the node over P2P. Individual testcases should subclass this and override the on_* methods if they want to alter message handling behaviour.""" def __init__(self): super().__init__() # Track number of messages of each type received and the most recent # message of each type self.message_count = defaultdict(int) self.last_message = {} # A count of the number of ping messages we've sent to the node self.ping_counter = 1 # The network services received from the peer self.nServices = 0 def peer_connect(self, *args, services=NODE_NETWORK, send_version=True, **kwargs): create_conn = super().peer_connect(*args, **kwargs) if send_version: # Send a version msg vt = msg_version() vt.nServices = services vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport vt.addrFrom.ip = "0.0.0.0" vt.addrFrom.port = 0 + # Will be sent soon after connection_made self.on_connection_send_msg = vt return create_conn # Message receiving methods def on_message(self, message): """Receive message and dispatch message to appropriate callback. We keep a count of how many of each message type has been received and the most recent message of each type.""" with mininode_lock: try: msgtype = message.msgtype.decode('ascii') self.message_count[msgtype] += 1 self.last_message[msgtype] = message getattr(self, 'on_' + msgtype)(message) except Exception: print("ERROR delivering {} ({})".format( repr(message), sys.exc_info()[0])) raise # Callback methods. Can be overridden by subclasses in individual test # cases to provide custom message handling behaviour. def on_open(self): pass def on_close(self): pass def on_addr(self, message): pass def on_avapoll(self, message): pass def on_avaresponse(self, message): pass + def on_avahello(self, message): pass + def on_block(self, message): pass def on_blocktxn(self, message): pass def on_cfcheckpt(self, message): pass def on_cfheaders(self, message): pass def on_cfilter(self, message): pass def on_cmpctblock(self, message): pass def on_feefilter(self, message): pass def on_filteradd(self, message): pass def on_filterclear(self, message): pass def on_filterload(self, message): pass def on_getaddr(self, message): pass def on_getblocks(self, message): pass def on_getblocktxn(self, message): pass def on_getdata(self, message): pass def on_getheaders(self, message): pass def on_headers(self, message): pass def on_mempool(self, message): pass def on_merkleblock(self, message): pass def on_notfound(self, message): pass def on_pong(self, message): pass def on_reject(self, message): pass def on_sendcmpct(self, message): pass def on_sendheaders(self, message): pass def on_tx(self, message): pass def on_inv(self, message): want = msg_getdata() for i in message.inv: if i.type != 0: want.inv.append(i) if len(want.inv): self.send_message(want) def on_ping(self, message): self.send_message(msg_pong(message.nonce)) def on_verack(self, message): pass def on_version(self, message): assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format( message.nVersion, MIN_VERSION_SUPPORTED) self.send_message(msg_verack()) self.nServices = message.nServices # Connection helper methods def wait_for_disconnect(self, timeout=60): def test_function(): return not self.is_connected wait_until(test_function, timeout=timeout, lock=mininode_lock) # Message receiving helper methods def wait_for_tx(self, txid, timeout=60): def test_function(): assert self.is_connected if not self.last_message.get('tx'): return False return self.last_message['tx'].tx.rehash() == txid wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_block(self, blockhash, timeout=60): def test_function(): assert self.is_connected return self.last_message.get( "block") and self.last_message["block"].block.rehash() == blockhash wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_header(self, blockhash, timeout=60): def test_function(): assert self.is_connected last_headers = self.last_message.get('headers') if not last_headers: return False return last_headers.headers[0].rehash() == int(blockhash, 16) wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_merkleblock(self, blockhash, timeout=60): def test_function(): assert self.is_connected last_filtered_block = self.last_message.get('merkleblock') if not last_filtered_block: return False return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16) wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_getdata(self, timeout=60): """Waits for a getdata message. Receiving any getdata message will satisfy the predicate. the last_message["getdata"] value must be explicitly cleared before calling this method, or this will return immediately with success. TODO: change this method to take a hash value and only return true if the correct block/tx has been requested.""" def test_function(): assert self.is_connected return self.last_message.get("getdata") wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_getheaders(self, timeout=60): """Waits for a getheaders message. Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"] value must be explicitly cleared before calling this method, or this will return immediately with success. TODO: change this method to take a hash value and only return true if the correct block header has been requested.""" def test_function(): assert self.is_connected return self.last_message.get("getheaders") wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_inv(self, expected_inv, timeout=60): """Waits for an INV message and checks that the first inv object in the message was as expected.""" if len(expected_inv) > 1: raise NotImplementedError( "wait_for_inv() will only verify the first inv object") def test_function(): assert self.is_connected return self.last_message.get("inv") and \ self.last_message["inv"].inv[0].type == expected_inv[0].type and \ self.last_message["inv"].inv[0].hash == expected_inv[0].hash wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_verack(self, timeout=60): def test_function(): return self.message_count["verack"] wait_until(test_function, timeout=timeout, lock=mininode_lock) # Message sending helper functions def send_and_ping(self, message, timeout=60): self.send_message(message) self.sync_with_ping(timeout=timeout) # Sync up with the node def sync_with_ping(self, timeout=60): self.send_message(msg_ping(nonce=self.ping_counter)) def test_function(): assert self.is_connected return self.last_message.get( "pong") and self.last_message["pong"].nonce == self.ping_counter wait_until(test_function, timeout=timeout, lock=mininode_lock) self.ping_counter += 1 # One lock for synchronizing all data access between the networking thread (see # NetworkThread below) and the thread running the test logic. For simplicity, # P2PConnection acquires this lock whenever delivering a message to a P2PInterface. # This lock should be acquired in the thread running the test logic to synchronize # access to any data shared with the P2PInterface or P2PConnection. mininode_lock = threading.RLock() class NetworkThread(threading.Thread): network_event_loop = None def __init__(self): super().__init__(name="NetworkThread") # There is only one event loop and no more than one thread must be # created assert not self.network_event_loop NetworkThread.network_event_loop = asyncio.new_event_loop() def run(self): """Start the network thread.""" self.network_event_loop.run_forever() def close(self, timeout=10): """Close the connections and network event loop.""" self.network_event_loop.call_soon_threadsafe( self.network_event_loop.stop) wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout) self.network_event_loop.close() self.join(timeout) # Safe to remove event loop. NetworkThread.network_event_loop = None class P2PDataStore(P2PInterface): """A P2P data store class. Keeps a block and transaction store and responds correctly to getdata and getheaders requests.""" def __init__(self): super().__init__() # store of blocks. key is block hash, value is a CBlock object self.block_store = {} self.last_block_hash = '' # store of txs. key is txid, value is a CTransaction object self.tx_store = {} self.getdata_requests = [] def on_getdata(self, message): """Check for the tx/block in our stores and if found, reply with an inv message.""" for inv in message.inv: self.getdata_requests.append(inv.hash) if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys(): self.send_message(msg_tx(self.tx_store[inv.hash])) elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys(): self.send_message(msg_block(self.block_store[inv.hash])) else: logger.debug( 'getdata message type {} received.'.format(hex(inv.type))) def on_getheaders(self, message): """Search back through our block store for the locator, and reply with a headers message if found.""" locator, hash_stop = message.locator, message.hashstop # Assume that the most recent block added is the tip if not self.block_store: return headers_list = [self.block_store[self.last_block_hash]] maxheaders = 2000 while headers_list[-1].sha256 not in locator.vHave: # Walk back through the block store, adding headers to headers_list # as we go. prev_block_hash = headers_list[-1].hashPrevBlock if prev_block_hash in self.block_store: prev_block_header = CBlockHeader( self.block_store[prev_block_hash]) headers_list.append(prev_block_header) if prev_block_header.sha256 == hash_stop: # if this is the hashstop header, stop here break else: logger.debug('block hash {} not found in block store'.format( hex(prev_block_hash))) break # Truncate the list if there are too many headers headers_list = headers_list[:-maxheaders - 1:-1] response = msg_headers(headers_list) if response is not None: self.send_message(response) def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60): """Send blocks to test node and test whether the tip advances. - add all blocks to our block_store - send a headers message for the final block - the on_getheaders handler will ensure that any getheaders are responded to - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will ensure that any getdata messages are responded to. Otherwise send the full block unsolicited. - if success is True: assert that the node's tip advances to the most recent block - if success is False: assert that the node's tip doesn't advance - if reject_reason is set: assert that the correct reject message is logged""" with mininode_lock: for block in blocks: self.block_store[block.sha256] = block self.last_block_hash = block.sha256 def test(): if force_send: for b in blocks: self.send_message(msg_block(block=b)) else: self.send_message( msg_headers([CBlockHeader(block) for block in blocks])) wait_until( lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock) if expect_disconnect: self.wait_for_disconnect(timeout=timeout) else: self.sync_with_ping(timeout=timeout) if success: wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout) else: assert node.getbestblockhash() != blocks[-1].hash if reject_reason: with node.assert_debug_log(expected_msgs=[reject_reason]): test() else: test() def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None): """Send txs to test node and test whether they're accepted to the mempool. - add all txs to our tx_store - send tx messages for all txs - if success is True/False: assert that the txs are/are not accepted to the mempool - if expect_disconnect is True: Skip the sync with ping - if reject_reason is set: assert that the correct reject message is logged.""" with mininode_lock: for tx in txs: self.tx_store[tx.sha256] = tx def test(): for tx in txs: self.send_message(msg_tx(tx)) if expect_disconnect: self.wait_for_disconnect() else: self.sync_with_ping() raw_mempool = node.getrawmempool() if success: # Check that all txs are now in the mempool for tx in txs: assert tx.hash in raw_mempool, "{} not found in mempool".format( tx.hash) else: # Check that none of the txs are now in the mempool for tx in txs: assert tx.hash not in raw_mempool, "{} tx found in mempool".format( tx.hash) if reject_reason: with node.assert_debug_log(expected_msgs=[reject_reason]): test() else: test()