diff --git a/src/avalanche/peermanager.cpp b/src/avalanche/peermanager.cpp index aa01fb76e..27a6f86f9 100644 --- a/src/avalanche/peermanager.cpp +++ b/src/avalanche/peermanager.cpp @@ -1,301 +1,301 @@ // Copyright (c) 2020 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include namespace avalanche { PeerId PeerManager::addPeer(PeerId p, uint32_t score) { auto inserted = peers.emplace(p, Peer(score, uint32_t(slots.size()))); assert(inserted.second); const uint64_t start = slotCount; slots.emplace_back(start, score, p); slotCount = start + score; return p; } bool PeerManager::removePeer(PeerId p) { auto it = peers.find(p); if (it == peers.end()) { return false; } size_t i = it->second.index; assert(i < slots.size()); if (i + 1 == slots.size()) { slots.pop_back(); slotCount = slots.empty() ? 0 : slots.back().getStop(); } else { fragmentation += slots[i].getScore(); slots[i] = slots[i].withPeerId(NO_PEER); } // Remove nodes associated with this peer, unless their timeout is still // active. This ensure that we don't overquery them in case their are // subsequently added to another peer. auto &nview = nodes.get(); nview.erase(nview.lower_bound(boost::make_tuple(p, TimePoint())), nview.upper_bound( boost::make_tuple(p, std::chrono::steady_clock::now()))); peers.erase(it); return true; } bool PeerManager::rescorePeer(PeerId p, uint32_t score) { auto it = peers.find(p); if (it == peers.end()) { return false; } size_t i = it->second.index; assert(i < slots.size()); // Update the peer's score. it->second.score = score; // Update the slot allocation to reflect the new score. const uint64_t start = slots[i].getStart(); // If this is the last element, we can extend/shrink easily. if (i + 1 == slots.size()) { slots[i] = slots[i].withScore(score); slotCount = slots[i].getStop(); return true; } const uint64_t stop = start + score; const uint64_t nextStart = slots[i + 1].getStart(); // We can extend in place. if (stop <= nextStart) { fragmentation += (slots[i].getStop() - stop); slots[i] = slots[i].withScore(score); return true; } // So we need to insert a new entry. fragmentation += slots[i].getScore(); slots[i] = slots[i].withPeerId(NO_PEER); it->second.index = uint32_t(slots.size()); const uint64_t newStart = slotCount; slots.emplace_back(newStart, score, p); slotCount = newStart + score; return true; } bool PeerManager::addNodeToPeer(PeerId peerid, NodeId nodeid, CPubKey pubkey) { auto pit = peers.find(peerid); if (pit == peers.end()) { return false; } auto nit = nodes.find(nodeid); if (nit == nodes.end()) { return nodes.emplace(nodeid, peerid, std::move(pubkey)).second; } // We actually have this node already, we need to update it. return nodes.modify(nit, [&](Node &n) { n.peerid = peerid; n.pubkey = std::move(pubkey); }); } bool PeerManager::removeNode(NodeId nodeid) { return nodes.erase(nodeid) > 0; } bool PeerManager::forNode(NodeId nodeid, std::function func) const { auto it = nodes.find(nodeid); return it != nodes.end() && func(*it); } bool PeerManager::updateNextRequestTime(NodeId nodeid, TimePoint timeout) { auto it = nodes.find(nodeid); if (it == nodes.end()) { return false; } return nodes.modify(it, [&](Node &n) { n.nextRequestTime = timeout; }); } -NodeId PeerManager::getSuitableNodeToQuery() { +NodeId PeerManager::selectNode() { for (int retry = 0; retry < SELECT_NODE_MAX_RETRY; retry++) { const PeerId p = selectPeer(); // If we cannot find a peer, it may be due to the fact that it is // unlikely due to high fragmentation, so compact and retry. if (p == NO_PEER) { compact(); continue; } // See if that peer has an available node. auto &nview = nodes.get(); auto it = nview.lower_bound(boost::make_tuple(p, TimePoint())); if (it != nview.end() && it->peerid == p && it->nextRequestTime <= std::chrono::steady_clock::now()) { return it->nodeid; } } return NO_NODE; } PeerId PeerManager::selectPeer() const { if (slots.empty() || slotCount == 0) { return NO_PEER; } const uint64_t max = slotCount; for (int retry = 0; retry < SELECT_PEER_MAX_RETRY; retry++) { size_t i = selectPeerImpl(slots, GetRand(max), max); if (i != NO_PEER) { return i; } } return NO_PEER; } uint64_t PeerManager::compact() { // There is nothing to compact. if (fragmentation == 0) { return 0; } // Shrink the vector to the expected size. while (slots.size() > peers.size()) { slots.pop_back(); } uint64_t prevStop = 0; uint32_t i = 0; for (auto &pair : peers) { PeerId pid = pair.first; Peer &p = pair.second; slots[i] = Slot(prevStop, p.score, pid); prevStop = slots[i].getStop(); p.index = i++; } const uint64_t saved = slotCount - prevStop; slotCount = prevStop; fragmentation = 0; return saved; } bool PeerManager::verify() const { uint64_t prevStop = 0; for (size_t i = 0; i < slots.size(); i++) { const Slot &s = slots[i]; // Slots must be in correct order. if (s.getStart() < prevStop) { return false; } prevStop = s.getStop(); // If this is a dead slot, then nothing more needs to be checked. if (s.getPeerId() == NO_PEER) { continue; } // We have a live slot, verify index. auto it = peers.find(s.getPeerId()); if (it == peers.end() || it->second.index != i) { return false; } } for (const auto &pair : peers) { auto p = pair.first; auto i = pair.second.index; auto s = pair.second.score; // The index must point to a slot refering to this peer. if (i >= slots.size() || slots[i].getPeerId() != p) { return false; } // If the score do not match, same thing. if (slots[i].getScore() != s) { return false; } } return true; } PeerId selectPeerImpl(const std::vector &slots, const uint64_t slot, const uint64_t max) { assert(slot <= max); size_t begin = 0, end = slots.size(); uint64_t bottom = 0, top = max; // Try to find the slot using dichotomic search. while ((end - begin) > 8) { // The slot we picked in not allocated. if (slot < bottom || slot >= top) { return NO_PEER; } // Guesstimate the position of the slot. size_t i = begin + ((slot - bottom) * (end - begin) / (top - bottom)); assert(begin <= i && i < end); // We have a match. if (slots[i].contains(slot)) { return slots[i].getPeerId(); } // We undershooted. if (slots[i].precedes(slot)) { begin = i + 1; if (begin >= end) { return NO_PEER; } bottom = slots[begin].getStart(); continue; } // We overshooted. if (slots[i].follows(slot)) { end = i; top = slots[end].getStart(); continue; } // We have an unalocated slot. return NO_PEER; } // Enough of that nonsense, let fallback to linear search. for (size_t i = begin; i < end; i++) { // We have a match. if (slots[i].contains(slot)) { return slots[i].getPeerId(); } } // We failed to find a slot, retry. return NO_PEER; } } // namespace avalanche diff --git a/src/avalanche/peermanager.h b/src/avalanche/peermanager.h index bd65ef0d8..8fae6693e 100644 --- a/src/avalanche/peermanager.h +++ b/src/avalanche/peermanager.h @@ -1,151 +1,151 @@ // Copyright (c) 2020 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_AVALANCHE_PEERMANAGER_H #define BITCOIN_AVALANCHE_PEERMANAGER_H #include #include #include #include #include #include #include #include #include #include #include #include namespace avalanche { struct Slot { private: uint64_t start; uint32_t score; PeerId peerid; public: Slot(uint64_t startIn, uint32_t scoreIn, PeerId peeridIn) : start(startIn), score(scoreIn), peerid(peeridIn) {} Slot withStart(uint64_t startIn) const { return Slot(startIn, score, peerid); } Slot withScore(uint64_t scoreIn) const { return Slot(start, scoreIn, peerid); } Slot withPeerId(PeerId peeridIn) const { return Slot(start, score, peeridIn); } uint64_t getStart() const { return start; } uint64_t getStop() const { return start + score; } uint32_t getScore() const { return score; } PeerId getPeerId() const { return peerid; } bool contains(uint64_t slot) const { return getStart() <= slot && slot < getStop(); } bool precedes(uint64_t slot) const { return slot >= getStop(); } bool follows(uint64_t slot) const { return getStart() > slot; } }; struct next_request_time {}; class PeerManager { std::vector slots; uint64_t slotCount = 0; uint64_t fragmentation = 0; /** * Several nodes can make an avalanche peer. In this case, all nodes are * considered interchangeable parts of the same peer. */ struct Peer { uint32_t score; uint32_t index; Peer(uint32_t score_, uint32_t index_) : score(score_), index(index_) {} }; PeerId nextPeerId = 0; std::unordered_map peers; using NodeSet = boost::multi_index_container< Node, boost::multi_index::indexed_by< // index by nodeid boost::multi_index::hashed_unique< boost::multi_index::member>, // sorted by peerid/nextRequestTime boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::composite_key< Node, boost::multi_index::member, boost::multi_index::member>>>>; NodeSet nodes; static constexpr int SELECT_PEER_MAX_RETRY = 3; static constexpr int SELECT_NODE_MAX_RETRY = 3; public: /** * Peer API. */ PeerId addPeer(uint32_t score) { return addPeer(nextPeerId++, score); } bool removePeer(PeerId p); bool rescorePeer(PeerId p, uint32_t score); /** * Node API. */ bool addNodeToPeer(PeerId peerid, NodeId nodeid, CPubKey pubkey); bool removeNode(NodeId nodeid); bool forNode(NodeId nodeid, std::function func) const; bool updateNextRequestTime(NodeId nodeid, TimePoint timeout); - NodeId getSuitableNodeToQuery(); + NodeId selectNode(); /** * Exposed for tests. */ PeerId selectPeer() const; /** * Trigger maintenance of internal datastructures. * Returns how much slot space was saved after compaction. */ uint64_t compact(); /** * Perform consistency check on internal data structures. * Mostly useful for tests. */ bool verify() const; // Accssors. uint64_t getSlotCount() const { return slotCount; } uint64_t getFragmentation() const { return fragmentation; } private: PeerId addPeer(PeerId peerid, uint32_t score); }; /** * This is an internal method that is exposed for testing purposes. */ PeerId selectPeerImpl(const std::vector &slots, const uint64_t slot, const uint64_t max); } // namespace avalanche #endif // BITCOIN_AVALANCHE_PEERMANAGER_H diff --git a/src/avalanche/processor.cpp b/src/avalanche/processor.cpp index e041e3be7..765e0c7b6 100644 --- a/src/avalanche/processor.cpp +++ b/src/avalanche/processor.cpp @@ -1,516 +1,516 @@ // Copyright (c) 2018-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include /** * Run the avalanche event loop every 10ms. */ static constexpr std::chrono::milliseconds AVALANCHE_TIME_STEP{10}; // Unfortunately, the bitcoind codebase is full of global and we are kinda // forced into it here. std::unique_ptr g_avalanche; namespace avalanche { bool VoteRecord::registerVote(NodeId nodeid, uint32_t error) { // We just got a new vote, so there is one less inflight request. clearInflightRequest(); // We want to avoid having the same node voting twice in a quorum. if (!addNodeToQuorum(nodeid)) { return false; } /** * The result of the vote is determined from the error code. If the error * code is 0, there is no error and therefore the vote is yes. If there is * an error, we check the most significant bit to decide if the vote is a no * (for instance, the block is invalid) or is the vote inconclusive (for * instance, the queried node does not have the block yet). */ votes = (votes << 1) | (error == 0); consider = (consider << 1) | (int32_t(error) >= 0); /** * We compute the number of yes and/or no votes as follow: * * votes: 1010 * consider: 1100 * * yes votes: 1000 using votes & consider * no votes: 0100 using ~votes & consider */ bool yes = countBits(votes & consider & 0xff) > 6; if (!yes) { bool no = countBits(~votes & consider & 0xff) > 6; if (!no) { // The round is inconclusive. return false; } } // If the round is in agreement with previous rounds, increase confidence. if (isAccepted() == yes) { confidence += 2; return getConfidence() == AVALANCHE_FINALIZATION_SCORE; } // The round changed our state. We reset the confidence. confidence = yes; return true; } bool VoteRecord::addNodeToQuorum(NodeId nodeid) { if (nodeid == NO_NODE) { // Helpful for testing. return true; } // MMIX Linear Congruent Generator. const uint64_t r1 = 6364136223846793005 * uint64_t(nodeid) + 1442695040888963407; // Fibonacci hashing. const uint64_t r2 = 11400714819323198485ull * (nodeid ^ seed); // Combine and extract hash. const uint16_t h = (r1 + r2) >> 48; /** * Check if the node is in the filter. */ for (size_t i = 1; i < nodeFilter.size(); i++) { if (nodeFilter[(successfulVotes + i) % nodeFilter.size()] == h) { return false; } } /** * Add the node which just voted to the filter. */ nodeFilter[successfulVotes % nodeFilter.size()] = h; successfulVotes++; return true; } bool VoteRecord::registerPoll() const { uint8_t count = inflight.load(); while (count < AVALANCHE_MAX_INFLIGHT_POLL) { if (inflight.compare_exchange_weak(count, count + 1)) { return true; } } return false; } static bool IsWorthPolling(const CBlockIndex *pindex) { AssertLockHeld(cs_main); if (pindex->nStatus.isInvalid()) { // No point polling invalid blocks. return false; } if (IsBlockFinalized(pindex)) { // There is no point polling finalized block. return false; } return true; } Processor::Processor(CConnman *connmanIn) : connman(connmanIn), queryTimeoutDuration(AVALANCHE_DEFAULT_QUERY_TIMEOUT), round(0), peerManager(std::make_unique()) { // Pick a random key for the session. sessionKey.MakeNewKey(true); } Processor::~Processor() { stopEventLoop(); } bool Processor::addBlockToReconcile(const CBlockIndex *pindex) { bool isAccepted; { LOCK(cs_main); if (!IsWorthPolling(pindex)) { // There is no point polling this block. return false; } isAccepted = ::ChainActive().Contains(pindex); } return vote_records.getWriteView() ->insert(std::make_pair(pindex, VoteRecord(isAccepted))) .second; } bool Processor::isAccepted(const CBlockIndex *pindex) const { auto r = vote_records.getReadView(); auto it = r->find(pindex); if (it == r.end()) { return false; } return it->second.isAccepted(); } int Processor::getConfidence(const CBlockIndex *pindex) const { auto r = vote_records.getReadView(); auto it = r->find(pindex); if (it == r.end()) { return -1; } return it->second.getConfidence(); } namespace { /** * When using TCP, we need to sign all messages as the transport layer is * not secure. */ class TCPResponse { Response response; std::array sig; public: TCPResponse(Response responseIn, const CKey &key) : response(std::move(responseIn)) { CHashWriter hasher(SER_GETHASH, 0); hasher << response; const uint256 hash = hasher.GetHash(); // Now let's sign! std::vector vchSig; if (key.SignSchnorr(hash, vchSig)) { // Schnorr sigs are 64 bytes in size. assert(vchSig.size() == 64); std::copy(vchSig.begin(), vchSig.end(), sig.begin()); } else { sig.fill(0); } } // serialization support ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(response); READWRITE(sig); } }; } // namespace void Processor::sendResponse(CNode *pfrom, Response response) const { connman->PushMessage( pfrom, CNetMsgMaker(pfrom->GetSendVersion()) .Make(NetMsgType::AVARESPONSE, TCPResponse(std::move(response), sessionKey))); } bool Processor::registerVotes(NodeId nodeid, const Response &response, std::vector &updates) { { // Save the time at which we can query again. LOCK(cs_peerManager); // FIXME: This will override the time even when we received an old stale // message. This should check that the message is indeed the most up to // date one before updating the time. peerManager->updateNextRequestTime( nodeid, std::chrono::steady_clock::now() + std::chrono::milliseconds(response.getCooldown())); } std::vector invs; { // Check that the query exists. auto w = queries.getWriteView(); auto it = w->find(std::make_tuple(nodeid, response.getRound())); if (it == w.end()) { // NB: The request may be old, so we don't increase banscore. return false; } invs = std::move(it->invs); w->erase(it); } // Verify that the request and the vote are consistent. const std::vector &votes = response.GetVotes(); size_t size = invs.size(); if (votes.size() != size) { // TODO: increase banscore for inconsistent response. // NB: This isn't timeout but actually node misbehaving. return false; } for (size_t i = 0; i < size; i++) { if (invs[i].hash != votes[i].GetHash()) { // TODO: increase banscore for inconsistent response. // NB: This isn't timeout but actually node misbehaving. return false; } } std::map responseIndex; { LOCK(cs_main); for (const auto &v : votes) { BlockMap::iterator mi = mapBlockIndex.find(BlockHash(v.GetHash())); if (mi == mapBlockIndex.end()) { // This should not happen, but just in case... continue; } CBlockIndex *pindex = mi->second; if (!IsWorthPolling(pindex)) { // There is no point polling this block. continue; } responseIndex.insert(std::make_pair(pindex, v)); } } { // Register votes. auto w = vote_records.getWriteView(); for (const auto &p : responseIndex) { CBlockIndex *pindex = p.first; const Vote &v = p.second; auto it = w->find(pindex); if (it == w.end()) { // We are not voting on that item anymore. continue; } auto &vr = it->second; if (!vr.registerVote(nodeid, v.GetError())) { // This vote did not provide any extra information, move on. continue; } if (!vr.hasFinalized()) { // This item has note been finalized, so we have nothing more to // do. updates.emplace_back( pindex, vr.isAccepted() ? BlockUpdate::Status::Accepted : BlockUpdate::Status::Rejected); continue; } // We just finalized a vote. If it is valid, then let the caller // know. Either way, remove the item from the map. updates.emplace_back(pindex, vr.isAccepted() ? BlockUpdate::Status::Finalized : BlockUpdate::Status::Invalid); w->erase(it); } } return true; } bool Processor::addPeer(NodeId nodeid, int64_t score, CPubKey pubkey) { LOCK(cs_peerManager); PeerId p = peerManager->addPeer(score); bool inserted = peerManager->addNodeToPeer(p, nodeid, std::move(pubkey)); if (!inserted) { peerManager->removePeer(p); } return inserted; } bool Processor::forNode(NodeId nodeid, std::function func) const { LOCK(cs_peerManager); return peerManager->forNode(nodeid, std::move(func)); } bool Processor::startEventLoop(CScheduler &scheduler) { return eventLoop.startEventLoop( scheduler, [this]() { this->runEventLoop(); }, AVALANCHE_TIME_STEP); } bool Processor::stopEventLoop() { return eventLoop.stopEventLoop(); } std::vector Processor::getInvsForNextPoll(bool forPoll) { std::vector invs; // First remove all blocks that are not worth polling. { LOCK(cs_main); auto w = vote_records.getWriteView(); for (auto it = w->begin(); it != w->end();) { const CBlockIndex *pindex = it->first; if (!IsWorthPolling(pindex)) { w->erase(it++); } else { ++it; } } } auto r = vote_records.getReadView(); for (const std::pair &p : reverse_iterate(r)) { // Check if we can run poll. const bool shouldPoll = forPoll ? p.second.registerPoll() : p.second.shouldPoll(); if (!shouldPoll) { continue; } // We don't have a decision, we need more votes. invs.emplace_back(MSG_BLOCK, p.first->GetBlockHash()); if (invs.size() >= AVALANCHE_MAX_ELEMENT_POLL) { // Make sure we do not produce more invs than specified by the // protocol. return invs; } } return invs; } NodeId Processor::getSuitableNodeToQuery() { LOCK(cs_peerManager); - return peerManager->getSuitableNodeToQuery(); + return peerManager->selectNode(); } void Processor::clearTimedoutRequests() { auto now = std::chrono::steady_clock::now(); std::map timedout_items{}; { // Clear expired requests. auto w = queries.getWriteView(); auto it = w->get().begin(); while (it != w->get().end() && it->timeout < now) { for (const auto &i : it->invs) { timedout_items[i]++; } w->get().erase(it++); } } if (timedout_items.empty()) { return; } // In flight request accounting. for (const auto &p : timedout_items) { const CInv &inv = p.first; assert(inv.type == MSG_BLOCK); CBlockIndex *pindex; { LOCK(cs_main); BlockMap::iterator mi = mapBlockIndex.find(BlockHash(inv.hash)); if (mi == mapBlockIndex.end()) { continue; } pindex = mi->second; } auto w = vote_records.getWriteView(); auto it = w->find(pindex); if (it == w.end()) { continue; } it->second.clearInflightRequest(p.second); } } void Processor::runEventLoop() { // First things first, check if we have requests that timed out and clear // them. clearTimedoutRequests(); // Make sure there is at least one suitable node to query before gathering // invs. NodeId nodeid = getSuitableNodeToQuery(); if (nodeid == NO_NODE) { return; } std::vector invs = getInvsForNextPoll(); if (invs.empty()) { return; } do { /** * If we lost contact to that node, then we remove it from nodeids, but * never add the request to queries, which ensures bad nodes get cleaned * up over time. */ bool hasSent = connman->ForNode(nodeid, [this, &invs](CNode *pnode) { uint64_t current_round = round++; { // Compute the time at which this requests times out. auto timeout = std::chrono::steady_clock::now() + queryTimeoutDuration; // Register the query. queries.getWriteView()->insert( {pnode->GetId(), current_round, timeout, invs}); // Set the timeout. LOCK(cs_peerManager); peerManager->updateNextRequestTime(pnode->GetId(), timeout); } // Send the query to the node. connman->PushMessage( pnode, CNetMsgMaker(pnode->GetSendVersion()) .Make(NetMsgType::AVAPOLL, Poll(current_round, std::move(invs)))); return true; }); // Success! if (hasSent) { return; } { // This node is obsolete, delete it. LOCK(cs_peerManager); peerManager->removeNode(nodeid); } // Get next suitable node to try again nodeid = getSuitableNodeToQuery(); } while (nodeid != NO_NODE); } } // namespace avalanche diff --git a/src/avalanche/test/peermanager_tests.cpp b/src/avalanche/test/peermanager_tests.cpp index 5b8dc25c1..d14b4a48e 100644 --- a/src/avalanche/test/peermanager_tests.cpp +++ b/src/avalanche/test/peermanager_tests.cpp @@ -1,419 +1,419 @@ // Copyright (c) 2020 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include using namespace avalanche; BOOST_FIXTURE_TEST_SUITE(peermanager_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(select_peer_linear) { // No peers. BOOST_CHECK_EQUAL(selectPeerImpl({}, 0, 0), NO_PEER); BOOST_CHECK_EQUAL(selectPeerImpl({}, 1, 3), NO_PEER); // One peer const std::vector oneslot = {{100, 100, 23}}; // Undershoot BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 0, 300), NO_PEER); BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 42, 300), NO_PEER); BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 99, 300), NO_PEER); // Nailed it BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 100, 300), 23); BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 142, 300), 23); BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 199, 300), 23); // Overshoot BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 200, 300), NO_PEER); BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 242, 300), NO_PEER); BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 299, 300), NO_PEER); // Two peers const std::vector twoslots = {{100, 100, 69}, {300, 100, 42}}; // Undershoot BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 0, 500), NO_PEER); BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 42, 500), NO_PEER); BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 99, 500), NO_PEER); // First entry BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 100, 500), 69); BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 142, 500), 69); BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 199, 500), 69); // In betwenn BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 200, 500), NO_PEER); BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 242, 500), NO_PEER); BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 299, 500), NO_PEER); // Second entry BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 300, 500), 42); BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 342, 500), 42); BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 399, 500), 42); // Overshoot BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 400, 500), NO_PEER); BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 442, 500), NO_PEER); BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 499, 500), NO_PEER); } BOOST_AUTO_TEST_CASE(select_peer_dichotomic) { std::vector slots; // 100 peers of size 1 with 1 empty element apart. uint64_t max = 1; for (int i = 0; i < 100; i++) { slots.emplace_back(max, 1, i); max += 2; } BOOST_CHECK_EQUAL(selectPeerImpl(slots, 4, max), NO_PEER); // Check that we get what we expect. for (int i = 0; i < 100; i++) { BOOST_CHECK_EQUAL(selectPeerImpl(slots, 2 * i, max), NO_PEER); BOOST_CHECK_EQUAL(selectPeerImpl(slots, 2 * i + 1, max), i); } BOOST_CHECK_EQUAL(selectPeerImpl(slots, max, max), NO_PEER); // Update the slots to be heavily skewed toward the last element. slots[99] = slots[99].withScore(101); max = slots[99].getStop(); BOOST_CHECK_EQUAL(max, 300); for (int i = 0; i < 100; i++) { BOOST_CHECK_EQUAL(selectPeerImpl(slots, 2 * i, max), NO_PEER); BOOST_CHECK_EQUAL(selectPeerImpl(slots, 2 * i + 1, max), i); } BOOST_CHECK_EQUAL(selectPeerImpl(slots, 200, max), 99); BOOST_CHECK_EQUAL(selectPeerImpl(slots, 256, max), 99); BOOST_CHECK_EQUAL(selectPeerImpl(slots, 299, max), 99); BOOST_CHECK_EQUAL(selectPeerImpl(slots, 300, max), NO_PEER); // Update the slots to be heavily skewed toward the first element. for (int i = 0; i < 100; i++) { slots[i] = slots[i].withStart(slots[i].getStart() + 100); } slots[0] = Slot(1, slots[0].getStop() - 1, slots[0].getPeerId()); slots[99] = slots[99].withScore(1); max = slots[99].getStop(); BOOST_CHECK_EQUAL(max, 300); BOOST_CHECK_EQUAL(selectPeerImpl(slots, 0, max), NO_PEER); BOOST_CHECK_EQUAL(selectPeerImpl(slots, 1, max), 0); BOOST_CHECK_EQUAL(selectPeerImpl(slots, 42, max), 0); for (int i = 0; i < 100; i++) { BOOST_CHECK_EQUAL(selectPeerImpl(slots, 100 + 2 * i + 1, max), i); BOOST_CHECK_EQUAL(selectPeerImpl(slots, 100 + 2 * i + 2, max), NO_PEER); } } BOOST_AUTO_TEST_CASE(select_peer_random) { for (int c = 0; c < 1000; c++) { size_t size = InsecureRandBits(10) + 1; std::vector slots; slots.reserve(size); uint64_t max = InsecureRandBits(3); auto next = [&]() { uint64_t r = max; max += InsecureRandBits(3); return r; }; for (size_t i = 0; i < size; i++) { const uint64_t start = next(); const uint32_t score = InsecureRandBits(3); max += score; slots.emplace_back(start, score, i); } for (int k = 0; k < 100; k++) { uint64_t s = InsecureRandRange(max); auto i = selectPeerImpl(slots, s, max); // /!\ Because of the way we construct the vector, the peer id is // always the index. This might not be the case in practice. BOOST_CHECK(i == NO_PEER || slots[i].contains(s)); } } } BOOST_AUTO_TEST_CASE(add_peer) { // No peers. PeerManager pm; BOOST_CHECK_EQUAL(pm.selectPeer(), NO_PEER); // One peer, we always return it. PeerId peer0 = pm.addPeer(100); BOOST_CHECK_EQUAL(pm.selectPeer(), peer0); // Two peers, verify ratio. PeerId peer1 = pm.addPeer(200); std::unordered_map results = {}; for (int i = 0; i < 10000; i++) { size_t p = pm.selectPeer(); BOOST_CHECK(p == peer0 || p == peer1); results[p]++; } BOOST_CHECK(abs(2 * results[0] - results[1]) < 500); // Three peers, verify ratio. PeerId peer2 = pm.addPeer(100); results.clear(); for (int i = 0; i < 10000; i++) { size_t p = pm.selectPeer(); BOOST_CHECK(p == peer0 || p == peer1 || p == peer2); results[p]++; } BOOST_CHECK(abs(results[0] - results[1] + results[2]) < 500); } BOOST_AUTO_TEST_CASE(remove_peer) { // No peers. PeerManager pm; BOOST_CHECK_EQUAL(pm.selectPeer(), NO_PEER); // Add 4 peers. std::array peerids; for (int i = 0; i < 4; i++) { peerids[i] = pm.addPeer(100); } BOOST_CHECK_EQUAL(pm.getSlotCount(), 400); BOOST_CHECK_EQUAL(pm.getFragmentation(), 0); for (int i = 0; i < 100; i++) { PeerId p = pm.selectPeer(); BOOST_CHECK(p == peerids[0] || p == peerids[1] || p == peerids[2] || p == peerids[3]); } // Remove one peer, it nevers show up now. BOOST_CHECK(pm.removePeer(peerids[2])); BOOST_CHECK_EQUAL(pm.getSlotCount(), 400); BOOST_CHECK_EQUAL(pm.getFragmentation(), 100); // Make sure we compact to never get NO_PEER. BOOST_CHECK_EQUAL(pm.compact(), 100); BOOST_CHECK(pm.verify()); BOOST_CHECK_EQUAL(pm.getSlotCount(), 300); BOOST_CHECK_EQUAL(pm.getFragmentation(), 0); for (int i = 0; i < 100; i++) { PeerId p = pm.selectPeer(); BOOST_CHECK(p == peerids[0] || p == peerids[1] || p == peerids[3]); } // Add 4 more peers. for (int i = 0; i < 4; i++) { peerids[i + 4] = pm.addPeer(100); } BOOST_CHECK_EQUAL(pm.getSlotCount(), 700); BOOST_CHECK_EQUAL(pm.getFragmentation(), 0); BOOST_CHECK(pm.removePeer(peerids[0])); BOOST_CHECK_EQUAL(pm.getSlotCount(), 700); BOOST_CHECK_EQUAL(pm.getFragmentation(), 100); // Removing the last entry do not increase fragmentation. BOOST_CHECK(pm.removePeer(peerids[7])); BOOST_CHECK_EQUAL(pm.getSlotCount(), 600); BOOST_CHECK_EQUAL(pm.getFragmentation(), 100); // Make sure we compact to never get NO_PEER. BOOST_CHECK_EQUAL(pm.compact(), 100); BOOST_CHECK(pm.verify()); BOOST_CHECK_EQUAL(pm.getSlotCount(), 500); BOOST_CHECK_EQUAL(pm.getFragmentation(), 0); for (int i = 0; i < 100; i++) { PeerId p = pm.selectPeer(); BOOST_CHECK(p == peerids[1] || p == peerids[3] || p == peerids[4] || p == peerids[5] || p == peerids[6]); } // Removing non existent peers fails. BOOST_CHECK(!pm.removePeer(peerids[0])); BOOST_CHECK(!pm.removePeer(peerids[2])); BOOST_CHECK(!pm.removePeer(peerids[7])); BOOST_CHECK(!pm.removePeer(NO_PEER)); } BOOST_AUTO_TEST_CASE(rescore_peer, *boost::unit_test::timeout(5)) { // No peers. PeerManager pm; BOOST_CHECK_EQUAL(pm.selectPeer(), NO_PEER); // Add 4 peers. std::array peerids; for (int i = 0; i < 4; i++) { peerids[i] = pm.addPeer(100); } BOOST_CHECK_EQUAL(pm.getSlotCount(), 400); BOOST_CHECK_EQUAL(pm.getFragmentation(), 0); for (int i = 0; i < 100; i++) { PeerId p = pm.selectPeer(); BOOST_CHECK(p == peerids[0] || p == peerids[1] || p == peerids[2] || p == peerids[3]); } // Set one peer's score to 0, it nevers show up now. BOOST_CHECK(pm.rescorePeer(peerids[1], 0)); BOOST_CHECK_EQUAL(pm.getSlotCount(), 400); BOOST_CHECK_EQUAL(pm.getFragmentation(), 100); for (int i = 0; i < 100; i++) { PeerId p = pm.selectPeer(); BOOST_CHECK(p == peerids[0] || p == peerids[2] || p == peerids[3] || p == NO_PEER); } // "resurrect" the peer. BOOST_CHECK(pm.rescorePeer(peerids[1], 100)); BOOST_CHECK_EQUAL(pm.getSlotCount(), 400); BOOST_CHECK_EQUAL(pm.getFragmentation(), 0); while (true) { PeerId p = pm.selectPeer(); BOOST_CHECK(p == peerids[0] || p == peerids[1] || p == peerids[2] || p == peerids[3]); // Make sure peer 1 reappeared. if (p == peerids[1]) { break; } } // Grow the peer to a point where it needs to be reallocated. BOOST_CHECK(pm.rescorePeer(peerids[1], 200)); BOOST_CHECK_EQUAL(pm.getSlotCount(), 600); BOOST_CHECK_EQUAL(pm.getFragmentation(), 100); for (int i = 0; i < 25; i++) { while (true) { PeerId p = pm.selectPeer(); BOOST_CHECK(p == peerids[0] || p == peerids[1] || p == peerids[2] || p == peerids[3] || p == NO_PEER); // Make sure peer 1 reappeared. if (p == peerids[1]) { break; } } } // Compact the peer manager. BOOST_CHECK_EQUAL(pm.compact(), 100); BOOST_CHECK(pm.verify()); BOOST_CHECK_EQUAL(pm.getSlotCount(), 500); BOOST_CHECK_EQUAL(pm.getFragmentation(), 0); } BOOST_AUTO_TEST_CASE(compact_slots) { PeerManager pm; // Add 4 peers. std::array peerids; for (int i = 0; i < 4; i++) { peerids[i] = pm.addPeer(100); } // Remove all peers. for (auto p : peerids) { pm.removePeer(p); } BOOST_CHECK_EQUAL(pm.getSlotCount(), 300); BOOST_CHECK_EQUAL(pm.getFragmentation(), 300); for (int i = 0; i < 100; i++) { BOOST_CHECK_EQUAL(pm.selectPeer(), NO_PEER); } BOOST_CHECK_EQUAL(pm.compact(), 300); BOOST_CHECK(pm.verify()); BOOST_CHECK_EQUAL(pm.getSlotCount(), 0); BOOST_CHECK_EQUAL(pm.getFragmentation(), 0); } BOOST_AUTO_TEST_CASE(node_crud) { PeerManager pm; // Create one peer. PeerId peerid = pm.addPeer(100); - BOOST_CHECK_EQUAL(pm.getSuitableNodeToQuery(), NO_NODE); + BOOST_CHECK_EQUAL(pm.selectNode(), NO_NODE); // Add 4 nodes. for (int i = 0; i < 4; i++) { BOOST_CHECK(pm.addNodeToPeer(peerid, i, CPubKey())); } for (int i = 0; i < 100; i++) { - NodeId n = pm.getSuitableNodeToQuery(); + NodeId n = pm.selectNode(); BOOST_CHECK(n >= 0 && n < 4); BOOST_CHECK( pm.updateNextRequestTime(n, std::chrono::steady_clock::now())); } // Remove a node, check that it doesn't show up. BOOST_CHECK(pm.removeNode(2)); for (int i = 0; i < 100; i++) { - NodeId n = pm.getSuitableNodeToQuery(); + NodeId n = pm.selectNode(); BOOST_CHECK(n == 0 || n == 1 || n == 3); BOOST_CHECK( pm.updateNextRequestTime(n, std::chrono::steady_clock::now())); } // Push a node's timeout in the future, so that it doesn't show up. BOOST_CHECK(pm.updateNextRequestTime(1, std::chrono::steady_clock::now() + std::chrono::hours(24))); for (int i = 0; i < 100; i++) { - NodeId n = pm.getSuitableNodeToQuery(); + NodeId n = pm.selectNode(); BOOST_CHECK(n == 0 || n == 3); BOOST_CHECK( pm.updateNextRequestTime(n, std::chrono::steady_clock::now())); } // Move a node from a peer to another. PeerId altpeer = pm.addPeer(0); BOOST_CHECK(pm.addNodeToPeer(altpeer, 3, CPubKey())); for (int i = 0; i < 100; i++) { - NodeId n = pm.getSuitableNodeToQuery(); + NodeId n = pm.selectNode(); BOOST_CHECK(n == 0); BOOST_CHECK( pm.updateNextRequestTime(n, std::chrono::steady_clock::now())); } // Rescore peers and cheks node selection is affected as expected. BOOST_CHECK(pm.rescorePeer(peerid, 0)); BOOST_CHECK(pm.rescorePeer(altpeer, 100)); BOOST_CHECK_EQUAL(pm.compact(), 100); for (int i = 0; i < 100; i++) { - NodeId n = pm.getSuitableNodeToQuery(); + NodeId n = pm.selectNode(); BOOST_CHECK(n == 3); BOOST_CHECK( pm.updateNextRequestTime(n, std::chrono::steady_clock::now())); } } BOOST_AUTO_TEST_SUITE_END()