Page MenuHomePhabricator

No OneTemporary

diff --git a/src/avalanche/peermanager.cpp b/src/avalanche/peermanager.cpp
index 67a92cc84..1d0d365a7 100644
--- a/src/avalanche/peermanager.cpp
+++ b/src/avalanche/peermanager.cpp
@@ -1,832 +1,834 @@
// Copyright (c) 2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <avalanche/peermanager.h>
#include <avalanche/avalanche.h>
#include <avalanche/delegation.h>
#include <avalanche/validation.h>
#include <random.h>
#include <scheduler.h>
#include <validation.h> // For ChainstateManager
#include <algorithm>
#include <cassert>
namespace avalanche {
bool PeerManager::addNode(NodeId nodeid, const ProofId &proofid) {
auto &pview = peers.get<by_proofid>();
auto it = pview.find(proofid);
if (it == pview.end()) {
// If the node exists, it is actually updating its proof to an unknown
// one. In this case we need to remove it so it is not both active and
// pending at the same time.
removeNode(nodeid);
pendingNodes.emplace(proofid, nodeid);
return false;
}
return addOrUpdateNode(peers.project<0>(it), nodeid);
}
bool PeerManager::addOrUpdateNode(const PeerSet::iterator &it, NodeId nodeid) {
assert(it != peers.end());
const PeerId peerid = it->peerid;
auto nit = nodes.find(nodeid);
if (nit == nodes.end()) {
if (!nodes.emplace(nodeid, peerid).second) {
return false;
}
} else {
const PeerId oldpeerid = nit->peerid;
if (!nodes.modify(nit, [&](Node &n) { n.peerid = peerid; })) {
return false;
}
// We actually have this node already, we need to update it.
bool success = removeNodeFromPeer(peers.find(oldpeerid));
assert(success);
}
bool success = addNodeToPeer(it);
assert(success);
// If the added node was in the pending set, remove it
pendingNodes.get<by_nodeid>().erase(nodeid);
return true;
}
bool PeerManager::addNodeToPeer(const PeerSet::iterator &it) {
assert(it != peers.end());
return peers.modify(it, [&](Peer &p) {
if (p.node_count++ > 0) {
// We are done.
return;
}
// We need to allocate this peer.
p.index = uint32_t(slots.size());
const uint32_t score = p.getScore();
const uint64_t start = slotCount;
slots.emplace_back(start, score, it->peerid);
slotCount = start + score;
// Add to our allocated score when we allocate a new peer in the slots
connectedPeersScore += score;
});
}
bool PeerManager::removeNode(NodeId nodeid) {
if (pendingNodes.get<by_nodeid>().erase(nodeid) > 0) {
// If this was a pending node, there is nothing else to do.
return true;
}
auto it = nodes.find(nodeid);
if (it == nodes.end()) {
return false;
}
const PeerId peerid = it->peerid;
nodes.erase(it);
// Keep the track of the reference count.
bool success = removeNodeFromPeer(peers.find(peerid));
assert(success);
return true;
}
bool PeerManager::removeNodeFromPeer(const PeerSet::iterator &it,
uint32_t count) {
// It is possible for nodes to be dangling. If there was an inflight query
// when the peer gets removed, the node was not erased. In this case there
// is nothing to do.
if (it == peers.end()) {
return true;
}
assert(count <= it->node_count);
if (count == 0) {
// This is a NOOP.
return false;
}
const uint32_t new_count = it->node_count - count;
if (!peers.modify(it, [&](Peer &p) { p.node_count = new_count; })) {
return false;
}
if (new_count > 0) {
// We are done.
return true;
}
// There are no more nodes left, we need to clean up. Subtract allocated
// score and remove from slots.
const size_t i = it->index;
assert(i < slots.size());
assert(connectedPeersScore >= slots[i].getScore());
connectedPeersScore -= slots[i].getScore();
if (i + 1 == slots.size()) {
slots.pop_back();
slotCount = slots.empty() ? 0 : slots.back().getStop();
} else {
fragmentation += slots[i].getScore();
slots[i] = slots[i].withPeerId(NO_PEER);
}
return true;
}
bool PeerManager::updateNextRequestTime(NodeId nodeid, TimePoint timeout) {
auto it = nodes.find(nodeid);
if (it == nodes.end()) {
return false;
}
return nodes.modify(it, [&](Node &n) { n.nextRequestTime = timeout; });
}
bool PeerManager::latchAvaproofsSent(NodeId nodeid) {
auto it = nodes.find(nodeid);
if (it == nodes.end()) {
return false;
}
return !it->avaproofsSent &&
nodes.modify(it, [&](Node &n) { n.avaproofsSent = true; });
}
static bool isImmatureState(const ProofValidationState &state) {
return state.GetResult() == ProofValidationResult::IMMATURE_UTXO;
}
bool PeerManager::updateNextPossibleConflictTime(
PeerId peerid, const std::chrono::seconds &nextTime) {
auto it = peers.find(peerid);
if (it == peers.end()) {
// No such peer
return false;
}
// Make sure we don't move the time in the past.
peers.modify(it, [&](Peer &p) {
p.nextPossibleConflictTime =
std::max(p.nextPossibleConflictTime, nextTime);
});
return it->nextPossibleConflictTime == nextTime;
}
bool PeerManager::setFinalized(PeerId peerid) {
auto it = peers.find(peerid);
if (it == peers.end()) {
// No such peer
return false;
}
peers.modify(it, [&](Peer &p) { p.hasFinalized = true; });
return true;
}
template <typename ProofContainer>
void PeerManager::moveToConflictingPool(const ProofContainer &proofs) {
auto &peersView = peers.get<by_proofid>();
for (const ProofRef &proof : proofs) {
auto it = peersView.find(proof->getId());
if (it != peersView.end()) {
removePeer(it->peerid);
}
conflictingProofPool.addProofIfPreferred(proof);
}
}
bool PeerManager::registerProof(const ProofRef &proof,
ProofRegistrationState &registrationState,
RegistrationMode mode) {
assert(proof);
const ProofId &proofid = proof->getId();
auto invalidate = [&](ProofRegistrationResult result,
const std::string &message) {
return registrationState.Invalid(
result, message, strprintf("proofid: %s", proofid.ToString()));
};
if ((mode != RegistrationMode::FORCE_ACCEPT ||
!isInConflictingPool(proofid)) &&
exists(proofid)) {
// In default mode, we expect the proof to be unknown, i.e. in none of
// the pools.
// In forced accept mode, the proof can be in the conflicting pool.
return invalidate(ProofRegistrationResult::ALREADY_REGISTERED,
"proof-already-registered");
}
if (danglingProofIds.contains(proofid) &&
pendingNodes.count(proofid) == 0) {
// Don't attempt to register a proof that we already evicted because it
// was dangling, but rather attempt to retrieve an associated node.
needMoreNodes = true;
return invalidate(ProofRegistrationResult::DANGLING, "dangling-proof");
}
// Check the proof's validity.
ProofValidationState validationState;
if (!WITH_LOCK(cs_main, return proof->verify(stakeUtxoDustThreshold,
chainman, validationState))) {
if (isImmatureState(validationState)) {
- orphanProofPool.addProofIfPreferred(proof);
- if (orphanProofPool.countProofs() > AVALANCHE_MAX_ORPHAN_PROOFS) {
- // Adding this proof exceeds the orphan pool limit, so evict
+ immatureProofPool.addProofIfPreferred(proof);
+ if (immatureProofPool.countProofs() >
+ AVALANCHE_MAX_IMMATURE_PROOFS) {
+ // Adding this proof exceeds the immature pool limit, so evict
// the lowest scoring proof.
- orphanProofPool.removeProof(
- orphanProofPool.getLowestScoreProof()->getId());
+ immatureProofPool.removeProof(
+ immatureProofPool.getLowestScoreProof()->getId());
}
- return invalidate(ProofRegistrationResult::ORPHAN, "orphan-proof");
+ return invalidate(ProofRegistrationResult::IMMATURE,
+ "immature-proof");
}
if (validationState.GetResult() ==
ProofValidationResult::MISSING_UTXO) {
return invalidate(ProofRegistrationResult::MISSING_UTXO,
"utxo-missing-or-spent");
}
// Reject invalid proof.
return invalidate(ProofRegistrationResult::INVALID, "invalid-proof");
}
auto now = GetTime<std::chrono::seconds>();
auto nextCooldownTimePoint =
now + std::chrono::seconds(
gArgs.GetArg("-avalancheconflictingproofcooldown",
AVALANCHE_DEFAULT_CONFLICTING_PROOF_COOLDOWN));
ProofPool::ConflictingProofSet conflictingProofs;
switch (validProofPool.addProofIfNoConflict(proof, conflictingProofs)) {
case ProofPool::AddProofStatus::REJECTED: {
if (mode != RegistrationMode::FORCE_ACCEPT) {
auto bestPossibleConflictTime = std::chrono::seconds(0);
auto &pview = peers.get<by_proofid>();
for (auto &conflictingProof : conflictingProofs) {
auto it = pview.find(conflictingProof->getId());
assert(it != pview.end());
// Search the most recent time over the peers
bestPossibleConflictTime = std::max(
bestPossibleConflictTime, it->nextPossibleConflictTime);
updateNextPossibleConflictTime(it->peerid,
nextCooldownTimePoint);
}
if (bestPossibleConflictTime > now) {
// Cooldown not elapsed, reject the proof.
return invalidate(
ProofRegistrationResult::COOLDOWN_NOT_ELAPSED,
"cooldown-not-elapsed");
}
// If proof replacement is enabled, give the proof a chance to
// replace the conflicting ones.
if (gArgs.GetBoolArg(
"-enableavalancheproofreplacement",
AVALANCHE_DEFAULT_PROOF_REPLACEMENT_ENABLED)) {
if (validProofPool.addProofIfPreferred(proof)) {
// If we have overridden other proofs due to conflict,
// remove the peers and attempt to move them to the
// conflicting pool.
moveToConflictingPool(conflictingProofs);
// Replacement is successful, continue to peer creation
break;
}
}
// Not the preferred proof, or replacement is not enabled
return conflictingProofPool.addProofIfPreferred(proof) ==
ProofPool::AddProofStatus::REJECTED
? invalidate(ProofRegistrationResult::REJECTED,
"rejected-proof")
: invalidate(ProofRegistrationResult::CONFLICTING,
"conflicting-utxos");
}
conflictingProofPool.removeProof(proofid);
// Move the conflicting proofs from the valid pool to the
// conflicting pool
moveToConflictingPool(conflictingProofs);
auto status = validProofPool.addProofIfNoConflict(proof);
assert(status == ProofPool::AddProofStatus::SUCCEED);
break;
}
case ProofPool::AddProofStatus::DUPLICATED:
// If the proof was already in the pool, don't duplicate the peer.
return invalidate(ProofRegistrationResult::ALREADY_REGISTERED,
"proof-already-registered");
case ProofPool::AddProofStatus::SUCCEED:
break;
// No default case, so the compiler can warn about missing cases
}
// At this stage we are going to create a peer so the proof should never
// exist in the conflicting pool, but use belt and suspenders.
conflictingProofPool.removeProof(proofid);
// New peer means new peerid!
const PeerId peerid = nextPeerId++;
// We have no peer for this proof, time to create it.
auto inserted = peers.emplace(peerid, proof, nextCooldownTimePoint);
assert(inserted.second);
auto insertedRadixTree = shareableProofs.insert(proof);
assert(insertedRadixTree);
// Add to our registered score when adding to the peer list
totalPeersScore += proof->getScore();
// If there are nodes waiting for this proof, add them
auto &pendingNodesView = pendingNodes.get<by_proofid>();
auto range = pendingNodesView.equal_range(proofid);
// We want to update the nodes then remove them from the pending set. That
// will invalidate the range iterators, so we need to save the node ids
// first before we can loop over them.
std::vector<NodeId> nodeids;
nodeids.reserve(std::distance(range.first, range.second));
std::transform(range.first, range.second, std::back_inserter(nodeids),
[](const PendingNode &n) { return n.nodeid; });
for (const NodeId &nodeid : nodeids) {
addOrUpdateNode(inserted.first, nodeid);
}
return true;
}
bool PeerManager::rejectProof(const ProofId &proofid, RejectionMode mode) {
if (!exists(proofid)) {
return false;
}
- if (orphanProofPool.removeProof(proofid)) {
+ if (immatureProofPool.removeProof(proofid)) {
return true;
}
if (mode == RejectionMode::DEFAULT &&
conflictingProofPool.getProof(proofid)) {
// In default mode we keep the proof in the conflicting pool
return true;
}
if (mode == RejectionMode::INVALIDATE &&
conflictingProofPool.removeProof(proofid)) {
// In invalidate mode we remove the proof completely
return true;
}
auto &pview = peers.get<by_proofid>();
auto it = pview.find(proofid);
assert(it != pview.end());
const ProofRef proof = it->proof;
if (!removePeer(it->peerid)) {
return false;
}
// If there was conflicting proofs, attempt to pull them back
for (const SignedStake &ss : proof->getStakes()) {
const ProofRef conflictingProof =
conflictingProofPool.getProof(ss.getStake().getUTXO());
if (!conflictingProof) {
continue;
}
conflictingProofPool.removeProof(conflictingProof->getId());
registerProof(conflictingProof);
}
if (mode == RejectionMode::DEFAULT) {
conflictingProofPool.addProofIfPreferred(proof);
}
return true;
}
void PeerManager::cleanupDanglingProofs(const ProofRef &localProof) {
const auto now = GetTime<std::chrono::seconds>();
std::vector<ProofId> newlyDanglingProofIds;
for (const Peer &peer : peers) {
// If the peer is not our local proof, has been registered for some
// time and has no node attached, discard it.
if ((!localProof || peer.getProofId() != localProof->getId()) &&
peer.node_count == 0 &&
(peer.registration_time + Peer::DANGLING_TIMEOUT) <= now) {
newlyDanglingProofIds.push_back(peer.getProofId());
}
}
for (const ProofId &proofid : newlyDanglingProofIds) {
rejectProof(proofid, RejectionMode::INVALIDATE);
danglingProofIds.insert(proofid);
}
// If we have dangling proof, this is a good indicator that we need to
// request more nodes from our peers.
needMoreNodes = !newlyDanglingProofIds.empty();
}
NodeId PeerManager::selectNode() {
for (int retry = 0; retry < SELECT_NODE_MAX_RETRY; retry++) {
const PeerId p = selectPeer();
// If we cannot find a peer, it may be due to the fact that it is
// unlikely due to high fragmentation, so compact and retry.
if (p == NO_PEER) {
compact();
continue;
}
// See if that peer has an available node.
auto &nview = nodes.get<next_request_time>();
auto it = nview.lower_bound(boost::make_tuple(p, TimePoint()));
if (it != nview.end() && it->peerid == p &&
it->nextRequestTime <= std::chrono::steady_clock::now()) {
return it->nodeid;
}
}
// We failed to find a node to query, flag this so we can request more
needMoreNodes = true;
return NO_NODE;
}
std::unordered_set<ProofRef, SaltedProofHasher> PeerManager::updatedBlockTip() {
std::vector<ProofId> invalidProofIds;
std::vector<ProofRef> newOrphans;
{
LOCK(cs_main);
for (const auto &p : peers) {
ProofValidationState state;
if (!p.proof->verify(stakeUtxoDustThreshold, chainman, state)) {
if (isImmatureState(state)) {
newOrphans.push_back(p.proof);
}
invalidProofIds.push_back(p.getProofId());
}
}
}
- // Remove the invalid proofs before the orphans rescan. This makes it
+ // Remove the invalid proofs before the immature rescan. This makes it
// possible to pull back proofs with utxos that conflicted with these
// invalid proofs.
for (const ProofId &invalidProofId : invalidProofIds) {
rejectProof(invalidProofId, RejectionMode::INVALIDATE);
}
- auto registeredProofs = orphanProofPool.rescan(*this);
+ auto registeredProofs = immatureProofPool.rescan(*this);
for (auto &p : newOrphans) {
- orphanProofPool.addProofIfPreferred(p);
+ immatureProofPool.addProofIfPreferred(p);
}
return registeredProofs;
}
ProofRef PeerManager::getProof(const ProofId &proofid) const {
ProofRef proof;
forPeer(proofid, [&](const Peer &p) {
proof = p.proof;
return true;
});
if (!proof) {
proof = conflictingProofPool.getProof(proofid);
}
if (!proof) {
- proof = orphanProofPool.getProof(proofid);
+ proof = immatureProofPool.getProof(proofid);
}
return proof;
}
bool PeerManager::isBoundToPeer(const ProofId &proofid) const {
auto &pview = peers.get<by_proofid>();
return pview.find(proofid) != pview.end();
}
bool PeerManager::isImmature(const ProofId &proofid) const {
- return orphanProofPool.getProof(proofid) != nullptr;
+ return immatureProofPool.getProof(proofid) != nullptr;
}
bool PeerManager::isInConflictingPool(const ProofId &proofid) const {
return conflictingProofPool.getProof(proofid) != nullptr;
}
bool PeerManager::removePeer(const PeerId peerid) {
auto it = peers.find(peerid);
if (it == peers.end()) {
return false;
}
// Remove all nodes from this peer.
removeNodeFromPeer(it, it->node_count);
auto &nview = nodes.get<next_request_time>();
// Add the nodes to the pending set
auto range = nview.equal_range(peerid);
for (auto &nit = range.first; nit != range.second; ++nit) {
pendingNodes.emplace(it->getProofId(), nit->nodeid);
};
// Remove nodes associated with this peer, unless their timeout is still
// active. This ensure that we don't overquery them in case they are
// subsequently added to another peer.
nview.erase(nview.lower_bound(boost::make_tuple(peerid, TimePoint())),
nview.upper_bound(boost::make_tuple(
peerid, std::chrono::steady_clock::now())));
// Release UTXOs attached to this proof.
validProofPool.removeProof(it->getProofId());
auto removed = shareableProofs.remove(Uint256RadixKey(it->getProofId()));
assert(removed != nullptr);
m_unbroadcast_proofids.erase(it->getProofId());
// Remove the peer from the PeerSet and remove its score from the registered
// score total.
assert(totalPeersScore >= it->getScore());
totalPeersScore -= it->getScore();
peers.erase(it);
return true;
}
PeerId PeerManager::selectPeer() const {
if (slots.empty() || slotCount == 0) {
return NO_PEER;
}
const uint64_t max = slotCount;
for (int retry = 0; retry < SELECT_PEER_MAX_RETRY; retry++) {
size_t i = selectPeerImpl(slots, GetRand(max), max);
if (i != NO_PEER) {
return i;
}
}
return NO_PEER;
}
uint64_t PeerManager::compact() {
// There is nothing to compact.
if (fragmentation == 0) {
return 0;
}
std::vector<Slot> newslots;
newslots.reserve(peers.size());
uint64_t prevStop = 0;
uint32_t i = 0;
for (auto it = peers.begin(); it != peers.end(); it++) {
if (it->node_count == 0) {
continue;
}
newslots.emplace_back(prevStop, it->getScore(), it->peerid);
prevStop = slots[i].getStop();
if (!peers.modify(it, [&](Peer &p) { p.index = i++; })) {
return 0;
}
}
slots = std::move(newslots);
const uint64_t saved = slotCount - prevStop;
slotCount = prevStop;
fragmentation = 0;
return saved;
}
bool PeerManager::verify() const {
uint64_t prevStop = 0;
uint32_t scoreFromSlots = 0;
for (size_t i = 0; i < slots.size(); i++) {
const Slot &s = slots[i];
// Slots must be in correct order.
if (s.getStart() < prevStop) {
return false;
}
prevStop = s.getStop();
// If this is a dead slot, then nothing more needs to be checked.
if (s.getPeerId() == NO_PEER) {
continue;
}
// We have a live slot, verify index.
auto it = peers.find(s.getPeerId());
if (it == peers.end() || it->index != i) {
return false;
}
// Accumulate score across slots
scoreFromSlots += slots[i].getScore();
}
// Score across slots must be the same as our allocated score
if (scoreFromSlots != connectedPeersScore) {
return false;
}
uint32_t scoreFromAllPeers = 0;
uint32_t scoreFromPeersWithNodes = 0;
std::unordered_set<COutPoint, SaltedOutpointHasher> peersUtxos;
for (const auto &p : peers) {
// Accumulate the score across peers to compare with total known score
scoreFromAllPeers += p.getScore();
// A peer should have a proof attached
if (!p.proof) {
return false;
}
// Check proof pool consistency
for (const auto &ss : p.proof->getStakes()) {
const COutPoint &outpoint = ss.getStake().getUTXO();
auto proof = validProofPool.getProof(outpoint);
if (!proof) {
// Missing utxo
return false;
}
if (proof != p.proof) {
// Wrong proof
return false;
}
if (!peersUtxos.emplace(outpoint).second) {
// Duplicated utxo
return false;
}
}
// Count node attached to this peer.
const auto count_nodes = [&]() {
size_t count = 0;
auto &nview = nodes.get<next_request_time>();
auto begin =
nview.lower_bound(boost::make_tuple(p.peerid, TimePoint()));
auto end =
nview.upper_bound(boost::make_tuple(p.peerid + 1, TimePoint()));
for (auto it = begin; it != end; ++it) {
count++;
}
return count;
};
if (p.node_count != count_nodes()) {
return false;
}
// If there are no nodes attached to this peer, then we are done.
if (p.node_count == 0) {
continue;
}
scoreFromPeersWithNodes += p.getScore();
// The index must point to a slot refering to this peer.
if (p.index >= slots.size() || slots[p.index].getPeerId() != p.peerid) {
return false;
}
// If the score do not match, same thing.
if (slots[p.index].getScore() != p.getScore()) {
return false;
}
// Check the proof is in the radix tree
if (shareableProofs.get(p.getProofId()) == nullptr) {
return false;
}
}
// Check our accumulated scores against our registred and allocated scores
if (scoreFromAllPeers != totalPeersScore) {
return false;
}
if (scoreFromPeersWithNodes != connectedPeersScore) {
return false;
}
// We checked the utxo consistency for all our peers utxos already, so if
// the pool size differs from the expected one there are dangling utxos.
if (validProofPool.size() != peersUtxos.size()) {
return false;
}
// Check there is no dangling proof in the radix tree
return shareableProofs.forEachLeaf([&](RCUPtr<const Proof> pLeaf) {
return isBoundToPeer(pLeaf->getId());
});
}
PeerId selectPeerImpl(const std::vector<Slot> &slots, const uint64_t slot,
const uint64_t max) {
assert(slot <= max);
size_t begin = 0, end = slots.size();
uint64_t bottom = 0, top = max;
// Try to find the slot using dichotomic search.
while ((end - begin) > 8) {
// The slot we picked in not allocated.
if (slot < bottom || slot >= top) {
return NO_PEER;
}
// Guesstimate the position of the slot.
size_t i = begin + ((slot - bottom) * (end - begin) / (top - bottom));
assert(begin <= i && i < end);
// We have a match.
if (slots[i].contains(slot)) {
return slots[i].getPeerId();
}
// We undershooted.
if (slots[i].precedes(slot)) {
begin = i + 1;
if (begin >= end) {
return NO_PEER;
}
bottom = slots[begin].getStart();
continue;
}
// We overshooted.
if (slots[i].follows(slot)) {
end = i;
top = slots[end].getStart();
continue;
}
// We have an unalocated slot.
return NO_PEER;
}
// Enough of that nonsense, let fallback to linear search.
for (size_t i = begin; i < end; i++) {
// We have a match.
if (slots[i].contains(slot)) {
return slots[i].getPeerId();
}
}
// We failed to find a slot, retry.
return NO_PEER;
}
void PeerManager::addUnbroadcastProof(const ProofId &proofid) {
// The proof should be bound to a peer
if (isBoundToPeer(proofid)) {
m_unbroadcast_proofids.insert(proofid);
}
}
void PeerManager::removeUnbroadcastProof(const ProofId &proofid) {
m_unbroadcast_proofids.erase(proofid);
}
} // namespace avalanche
diff --git a/src/avalanche/peermanager.h b/src/avalanche/peermanager.h
index 57297835f..d8cd7d7ea 100644
--- a/src/avalanche/peermanager.h
+++ b/src/avalanche/peermanager.h
@@ -1,449 +1,448 @@
// Copyright (c) 2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_AVALANCHE_PEERMANAGER_H
#define BITCOIN_AVALANCHE_PEERMANAGER_H
#include <avalanche/node.h>
#include <avalanche/proof.h>
#include <avalanche/proofpool.h>
#include <avalanche/proofradixtreeadapter.h>
#include <bloom.h>
#include <coins.h>
#include <consensus/validation.h>
#include <pubkey.h>
#include <radix.h>
#include <salteduint256hasher.h>
#include <util/time.h>
#include <boost/multi_index/composite_key.hpp>
#include <boost/multi_index/hashed_index.hpp>
#include <boost/multi_index/mem_fun.hpp>
#include <boost/multi_index/member.hpp>
#include <boost/multi_index/ordered_index.hpp>
#include <boost/multi_index_container.hpp>
#include <atomic>
#include <chrono>
#include <cstdint>
#include <memory>
#include <unordered_set>
#include <vector>
class ChainstateManager;
class CScheduler;
namespace avalanche {
/**
- * Maximum number of orphan proofs the peer manager will accept from the
- * network. Under good conditions, this allows the node to collect relevant
- * proofs during IBD. Note that reorgs can cause the orphan pool to
- * temporarily exceed this limit. But a change in chaintip cause previously
- * reorged proofs to be trimmed.
+ * Maximum number of immature proofs the peer manager will accept from the
+ * network. Note that reorgs can cause the immature pool to temporarily exceed
+ * this limit, but a change in chaintip cause previously reorged proofs to be
+ * trimmed.
*/
-static constexpr uint32_t AVALANCHE_MAX_ORPHAN_PROOFS = 4000;
+static constexpr uint32_t AVALANCHE_MAX_IMMATURE_PROOFS = 4000;
class Delegation;
namespace {
struct TestPeerManager;
}
struct Slot {
private:
uint64_t start;
uint32_t score;
PeerId peerid;
public:
Slot(uint64_t startIn, uint32_t scoreIn, PeerId peeridIn)
: start(startIn), score(scoreIn), peerid(peeridIn) {}
Slot withStart(uint64_t startIn) const {
return Slot(startIn, score, peerid);
}
Slot withScore(uint64_t scoreIn) const {
return Slot(start, scoreIn, peerid);
}
Slot withPeerId(PeerId peeridIn) const {
return Slot(start, score, peeridIn);
}
uint64_t getStart() const { return start; }
uint64_t getStop() const { return start + score; }
uint32_t getScore() const { return score; }
PeerId getPeerId() const { return peerid; }
bool contains(uint64_t slot) const {
return getStart() <= slot && slot < getStop();
}
bool precedes(uint64_t slot) const { return slot >= getStop(); }
bool follows(uint64_t slot) const { return getStart() > slot; }
};
struct Peer {
PeerId peerid;
uint32_t index = -1;
uint32_t node_count = 0;
ProofRef proof;
bool hasFinalized = false;
// The network stack uses timestamp in seconds, so we oblige.
std::chrono::seconds registration_time;
std::chrono::seconds nextPossibleConflictTime;
/**
* Consider dropping the peer if no node is attached after this timeout
* expired.
*/
static constexpr auto DANGLING_TIMEOUT = 15min;
Peer(PeerId peerid_, ProofRef proof_,
std::chrono::seconds nextPossibleConflictTime_)
: peerid(peerid_), proof(std::move(proof_)),
registration_time(GetTime<std::chrono::seconds>()),
nextPossibleConflictTime(std::move(nextPossibleConflictTime_)) {}
const ProofId &getProofId() const { return proof->getId(); }
uint32_t getScore() const { return proof->getScore(); }
};
struct proof_index {
using result_type = ProofId;
result_type operator()(const Peer &p) const { return p.proof->getId(); }
};
struct score_index {
using result_type = uint32_t;
result_type operator()(const Peer &p) const { return p.getScore(); }
};
struct next_request_time {};
struct PendingNode {
ProofId proofid;
NodeId nodeid;
PendingNode(ProofId proofid_, NodeId nodeid_)
: proofid(proofid_), nodeid(nodeid_){};
};
struct by_proofid;
struct by_nodeid;
struct by_score;
enum class ProofRegistrationResult {
NONE = 0,
ALREADY_REGISTERED,
- ORPHAN,
+ IMMATURE,
INVALID,
CONFLICTING,
REJECTED,
COOLDOWN_NOT_ELAPSED,
DANGLING,
MISSING_UTXO,
};
class ProofRegistrationState : public ValidationState<ProofRegistrationResult> {
};
namespace bmi = boost::multi_index;
class PeerManager {
std::vector<Slot> slots;
uint64_t slotCount = 0;
uint64_t fragmentation = 0;
/**
* Several nodes can make an avalanche peer. In this case, all nodes are
* considered interchangeable parts of the same peer.
*/
using PeerSet = boost::multi_index_container<
Peer, bmi::indexed_by<
// index by peerid
bmi::hashed_unique<bmi::member<Peer, PeerId, &Peer::peerid>>,
// index by proof
bmi::hashed_unique<bmi::tag<by_proofid>, proof_index,
SaltedProofIdHasher>,
// ordered by score, decreasing order
bmi::ordered_non_unique<bmi::tag<by_score>, score_index,
std::greater<uint32_t>>>>;
PeerId nextPeerId = 0;
PeerSet peers;
ProofPool validProofPool;
ProofPool conflictingProofPool;
- ProofPool orphanProofPool;
+ ProofPool immatureProofPool;
using ProofRadixTree = RadixTree<const Proof, ProofRadixTreeAdapter>;
ProofRadixTree shareableProofs;
using NodeSet = boost::multi_index_container<
Node,
bmi::indexed_by<
// index by nodeid
bmi::hashed_unique<bmi::member<Node, NodeId, &Node::nodeid>>,
// sorted by peerid/nextRequestTime
bmi::ordered_non_unique<
bmi::tag<next_request_time>,
bmi::composite_key<
Node, bmi::member<Node, PeerId, &Node::peerid>,
bmi::member<Node, TimePoint, &Node::nextRequestTime>>>>>;
NodeSet nodes;
/**
* Flag indicating that we failed to select a node and need to expand our
* node set.
*/
std::atomic<bool> needMoreNodes{false};
using PendingNodeSet = boost::multi_index_container<
PendingNode,
bmi::indexed_by<
// index by proofid
bmi::hashed_non_unique<
bmi::tag<by_proofid>,
bmi::member<PendingNode, ProofId, &PendingNode::proofid>,
SaltedProofIdHasher>,
// index by nodeid
bmi::hashed_unique<
bmi::tag<by_nodeid>,
bmi::member<PendingNode, NodeId, &PendingNode::nodeid>>>>;
PendingNodeSet pendingNodes;
static constexpr int SELECT_PEER_MAX_RETRY = 3;
static constexpr int SELECT_NODE_MAX_RETRY = 3;
/**
* Track proof ids to broadcast
*/
std::unordered_set<ProofId, SaltedProofIdHasher> m_unbroadcast_proofids;
/**
* Remember the last proofs that have been evicted because they had no node
* attached.
* A false positive would cause the proof to fail to register if there is
* no previously known node that is claiming it, which is acceptable
* intended the low expected false positive rate.
*/
CRollingBloomFilter danglingProofIds{10000, 0.00001};
/**
* Quorum management.
*/
uint32_t totalPeersScore = 0;
uint32_t connectedPeersScore = 0;
Amount stakeUtxoDustThreshold;
ChainstateManager &chainman;
public:
PeerManager(const Amount &stakeUtxoDustThresholdIn,
ChainstateManager &chainmanIn)
: stakeUtxoDustThreshold(stakeUtxoDustThresholdIn),
chainman(chainmanIn){};
/**
* Node API.
*/
bool addNode(NodeId nodeid, const ProofId &proofid);
bool removeNode(NodeId nodeid);
size_t getNodeCount() const { return nodes.size(); }
size_t getPendingNodeCount() const { return pendingNodes.size(); }
// Update when a node is to be polled next.
bool updateNextRequestTime(NodeId nodeid, TimePoint timeout);
/**
* Flag that a node did send its compact proofs.
* @return True if the flag changed state, i;e. if this is the first time
* the message is accounted for this node.
*/
bool latchAvaproofsSent(NodeId nodeid);
// Randomly select a node to poll.
NodeId selectNode();
/**
* Returns true if we encountered a lack of node since the last call.
*/
bool shouldRequestMoreNodes() { return needMoreNodes.exchange(false); }
template <typename Callable>
bool forNode(NodeId nodeid, Callable &&func) const {
auto it = nodes.find(nodeid);
return it != nodes.end() && func(*it);
}
template <typename Callable>
void forEachNode(const Peer &peer, Callable &&func) const {
auto &nview = nodes.get<next_request_time>();
auto range = nview.equal_range(peer.peerid);
for (auto it = range.first; it != range.second; ++it) {
func(*it);
}
}
/**
* Proof and Peer related API.
*/
/**
* Update the time before which a proof is not allowed to have conflicting
* UTXO with this peer's proof.
*/
bool updateNextPossibleConflictTime(PeerId peerid,
const std::chrono::seconds &nextTime);
/**
* Latch on that this peer has a finalized proof.
*/
bool setFinalized(PeerId peerid);
/**
* Registration mode
* - DEFAULT: Default policy, register only if the proof is unknown and has
* no conflict.
* - FORCE_ACCEPT: Turn a valid proof into a peer even if it has conflicts
* and is not the best candidate.
*/
enum class RegistrationMode {
DEFAULT,
FORCE_ACCEPT,
};
bool registerProof(const ProofRef &proof,
ProofRegistrationState &registrationState,
RegistrationMode mode = RegistrationMode::DEFAULT);
bool registerProof(const ProofRef &proof,
RegistrationMode mode = RegistrationMode::DEFAULT) {
ProofRegistrationState dummy;
return registerProof(proof, dummy, mode);
}
/**
* Rejection mode
* - DEFAULT: Default policy, reject a proof and attempt to keep it in the
* conflicting pool if possible.
* - INVALIDATE: Reject a proof by removing it from any of the pool.
*
* In any case if a peer is rejected, it attempts to pull the conflicting
* proofs back.
*/
enum class RejectionMode {
DEFAULT,
INVALIDATE,
};
bool rejectProof(const ProofId &proofid,
RejectionMode mode = RejectionMode::DEFAULT);
bool exists(const ProofId &proofid) const {
return getProof(proofid) != nullptr;
}
void cleanupDanglingProofs(const ProofRef &localProof);
template <typename Callable>
bool forPeer(const ProofId &proofid, Callable &&func) const {
auto &pview = peers.get<by_proofid>();
auto it = pview.find(proofid);
return it != pview.end() && func(*it);
}
template <typename Callable> void forEachPeer(Callable &&func) const {
for (const auto &p : peers) {
func(p);
}
}
/**
* Update the peer set when a new block is connected.
*/
std::unordered_set<ProofRef, SaltedProofHasher> updatedBlockTip();
/**
* Proof broadcast API.
*/
void addUnbroadcastProof(const ProofId &proofid);
void removeUnbroadcastProof(const ProofId &proofid);
auto getUnbroadcastProofs() const { return m_unbroadcast_proofids; }
/*
* Quorum management
*/
uint32_t getTotalPeersScore() const { return totalPeersScore; }
uint32_t getConnectedPeersScore() const { return connectedPeersScore; }
/****************************************************
* Functions which are public for testing purposes. *
****************************************************/
/**
* Remove an existing peer.
*/
bool removePeer(const PeerId peerid);
/**
* Randomly select a peer to poll.
*/
PeerId selectPeer() const;
/**
* Trigger maintenance of internal data structures.
* Returns how much slot space was saved after compaction.
*/
uint64_t compact();
/**
* Perform consistency check on internal data structures.
*/
bool verify() const;
// Accessors.
uint64_t getSlotCount() const { return slotCount; }
uint64_t getFragmentation() const { return fragmentation; }
ProofRef getProof(const ProofId &proofid) const;
bool isBoundToPeer(const ProofId &proofid) const;
bool isImmature(const ProofId &proofid) const;
bool isInConflictingPool(const ProofId &proofid) const;
size_t getConflictingProofCount() {
return conflictingProofPool.countProofs();
}
- size_t getImmatureProofCount() { return orphanProofPool.countProofs(); }
+ size_t getImmatureProofCount() { return immatureProofPool.countProofs(); }
const ProofRadixTree &getShareableProofsSnapshot() const {
return shareableProofs;
}
const Amount &getStakeUtxoDustThreshold() const {
return stakeUtxoDustThreshold;
}
private:
template <typename ProofContainer>
void moveToConflictingPool(const ProofContainer &proofs);
bool addOrUpdateNode(const PeerSet::iterator &it, NodeId nodeid);
bool addNodeToPeer(const PeerSet::iterator &it);
bool removeNodeFromPeer(const PeerSet::iterator &it, uint32_t count = 1);
friend struct ::avalanche::TestPeerManager;
};
/**
* Internal methods that are exposed for testing purposes.
*/
PeerId selectPeerImpl(const std::vector<Slot> &slots, const uint64_t slot,
const uint64_t max);
} // namespace avalanche
#endif // BITCOIN_AVALANCHE_PEERMANAGER_H
diff --git a/src/avalanche/test/peermanager_tests.cpp b/src/avalanche/test/peermanager_tests.cpp
index 4d3599e91..ef83615f7 100644
--- a/src/avalanche/test/peermanager_tests.cpp
+++ b/src/avalanche/test/peermanager_tests.cpp
@@ -1,2055 +1,2055 @@
// Copyright (c) 2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <avalanche/delegationbuilder.h>
#include <avalanche/peermanager.h>
#include <avalanche/proofbuilder.h>
#include <avalanche/proofcomparator.h>
#include <avalanche/test/util.h>
#include <config.h>
#include <script/standard.h>
#include <util/time.h>
#include <util/translation.h>
#include <validation.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
using namespace avalanche;
namespace avalanche {
namespace {
struct TestPeerManager {
static bool nodeBelongToPeer(const PeerManager &pm, NodeId nodeid,
PeerId peerid) {
return pm.forNode(nodeid, [&](const Node &node) {
return node.peerid == peerid;
});
}
static bool isNodePending(const PeerManager &pm, NodeId nodeid) {
auto &pendingNodesView = pm.pendingNodes.get<by_nodeid>();
return pendingNodesView.find(nodeid) != pendingNodesView.end();
}
static PeerId getPeerIdForProofId(PeerManager &pm,
const ProofId &proofid) {
auto &pview = pm.peers.get<by_proofid>();
auto it = pview.find(proofid);
return it == pview.end() ? NO_PEER : it->peerid;
}
static PeerId registerAndGetPeerId(PeerManager &pm,
const ProofRef &proof) {
pm.registerProof(proof);
return getPeerIdForProofId(pm, proof->getId());
}
static std::vector<uint32_t> getOrderedScores(const PeerManager &pm) {
std::vector<uint32_t> scores;
auto &peerView = pm.peers.get<by_score>();
for (const Peer &peer : peerView) {
scores.push_back(peer.getScore());
}
return scores;
}
static void
cleanupDanglingProofs(PeerManager &pm,
const ProofRef &localProof = ProofRef()) {
pm.cleanupDanglingProofs(localProof);
}
};
static void addCoin(CChainState &chainstate, const COutPoint &outpoint,
const CKey &key,
const Amount amount = PROOF_DUST_THRESHOLD,
uint32_t height = 100, bool is_coinbase = false) {
CScript script = GetScriptForDestination(PKHash(key.GetPubKey()));
LOCK(cs_main);
CCoinsViewCache &coins = chainstate.CoinsTip();
coins.AddCoin(outpoint,
Coin(CTxOut(amount, script), height, is_coinbase), false);
}
static COutPoint createUtxo(CChainState &chainstate, const CKey &key,
const Amount amount = PROOF_DUST_THRESHOLD,
uint32_t height = 100,
bool is_coinbase = false) {
COutPoint outpoint(TxId(GetRandHash()), 0);
addCoin(chainstate, outpoint, key, amount, height, is_coinbase);
return outpoint;
}
static ProofRef
buildProof(const CKey &key,
const std::vector<std::tuple<COutPoint, Amount>> &outpoints,
const CKey &master = CKey::MakeCompressedKey(),
int64_t sequence = 1, uint32_t height = 100,
bool is_coinbase = false, int64_t expirationTime = 0) {
ProofBuilder pb(sequence, expirationTime, master);
for (const auto &outpoint : outpoints) {
BOOST_CHECK(pb.addUTXO(std::get<0>(outpoint), std::get<1>(outpoint),
height, is_coinbase, key));
}
return pb.build();
}
template <typename... Args>
static ProofRef
buildProofWithOutpoints(const CKey &key,
const std::vector<COutPoint> &outpoints,
Amount amount, Args &&...args) {
std::vector<std::tuple<COutPoint, Amount>> outpointsWithAmount;
std::transform(
outpoints.begin(), outpoints.end(),
std::back_inserter(outpointsWithAmount),
[amount](const auto &o) { return std::make_tuple(o, amount); });
return buildProof(key, outpointsWithAmount,
std::forward<Args>(args)...);
}
static ProofRef
buildProofWithSequence(const CKey &key,
const std::vector<COutPoint> &outpoints,
int64_t sequence) {
return buildProofWithOutpoints(key, outpoints, PROOF_DUST_THRESHOLD,
key, sequence);
}
} // namespace
} // namespace avalanche
namespace {
struct PeerManagerFixture : public TestChain100Setup {
PeerManagerFixture() {
gArgs.ForceSetArg("-avaproofstakeutxoconfirmations", "1");
}
~PeerManagerFixture() {
gArgs.ClearForcedArg("-avaproofstakeutxoconfirmations");
}
};
} // namespace
namespace {
struct NoCoolDownFixture : public PeerManagerFixture {
NoCoolDownFixture() {
gArgs.ForceSetArg("-avalancheconflictingproofcooldown", "0");
}
~NoCoolDownFixture() {
gArgs.ClearForcedArg("-avalancheconflictingproofcooldown");
}
};
} // namespace
BOOST_FIXTURE_TEST_SUITE(peermanager_tests, PeerManagerFixture)
BOOST_AUTO_TEST_CASE(select_peer_linear) {
// No peers.
BOOST_CHECK_EQUAL(selectPeerImpl({}, 0, 0), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl({}, 1, 3), NO_PEER);
// One peer
const std::vector<Slot> oneslot = {{100, 100, 23}};
// Undershoot
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 0, 300), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 42, 300), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 99, 300), NO_PEER);
// Nailed it
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 100, 300), 23);
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 142, 300), 23);
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 199, 300), 23);
// Overshoot
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 200, 300), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 242, 300), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 299, 300), NO_PEER);
// Two peers
const std::vector<Slot> twoslots = {{100, 100, 69}, {300, 100, 42}};
// Undershoot
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 0, 500), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 42, 500), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 99, 500), NO_PEER);
// First entry
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 100, 500), 69);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 142, 500), 69);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 199, 500), 69);
// In between
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 200, 500), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 242, 500), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 299, 500), NO_PEER);
// Second entry
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 300, 500), 42);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 342, 500), 42);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 399, 500), 42);
// Overshoot
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 400, 500), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 442, 500), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 499, 500), NO_PEER);
}
BOOST_AUTO_TEST_CASE(select_peer_dichotomic) {
std::vector<Slot> slots;
// 100 peers of size 1 with 1 empty element apart.
uint64_t max = 1;
for (int i = 0; i < 100; i++) {
slots.emplace_back(max, 1, i);
max += 2;
}
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 4, max), NO_PEER);
// Check that we get what we expect.
for (int i = 0; i < 100; i++) {
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 2 * i, max), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 2 * i + 1, max), i);
}
BOOST_CHECK_EQUAL(selectPeerImpl(slots, max, max), NO_PEER);
// Update the slots to be heavily skewed toward the last element.
slots[99] = slots[99].withScore(101);
max = slots[99].getStop();
BOOST_CHECK_EQUAL(max, 300);
for (int i = 0; i < 100; i++) {
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 2 * i, max), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 2 * i + 1, max), i);
}
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 200, max), 99);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 256, max), 99);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 299, max), 99);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 300, max), NO_PEER);
// Update the slots to be heavily skewed toward the first element.
for (int i = 0; i < 100; i++) {
slots[i] = slots[i].withStart(slots[i].getStart() + 100);
}
slots[0] = Slot(1, slots[0].getStop() - 1, slots[0].getPeerId());
slots[99] = slots[99].withScore(1);
max = slots[99].getStop();
BOOST_CHECK_EQUAL(max, 300);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 0, max), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 1, max), 0);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 42, max), 0);
for (int i = 0; i < 100; i++) {
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 100 + 2 * i + 1, max), i);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 100 + 2 * i + 2, max), NO_PEER);
}
}
BOOST_AUTO_TEST_CASE(select_peer_random) {
for (int c = 0; c < 1000; c++) {
size_t size = InsecureRandBits(10) + 1;
std::vector<Slot> slots;
slots.reserve(size);
uint64_t max = InsecureRandBits(3);
auto next = [&]() {
uint64_t r = max;
max += InsecureRandBits(3);
return r;
};
for (size_t i = 0; i < size; i++) {
const uint64_t start = next();
const uint32_t score = InsecureRandBits(3);
max += score;
slots.emplace_back(start, score, i);
}
for (int k = 0; k < 100; k++) {
uint64_t s = max > 0 ? InsecureRandRange(max) : 0;
auto i = selectPeerImpl(slots, s, max);
// /!\ Because of the way we construct the vector, the peer id is
// always the index. This might not be the case in practice.
BOOST_CHECK(i == NO_PEER || slots[i].contains(s));
}
}
}
static void addNodeWithScore(CChainState &active_chainstate,
avalanche::PeerManager &pm, NodeId node,
uint32_t score) {
auto proof = buildRandomProof(active_chainstate, score);
BOOST_CHECK(pm.registerProof(proof));
BOOST_CHECK(pm.addNode(node, proof->getId()));
};
BOOST_AUTO_TEST_CASE(peer_probabilities) {
ChainstateManager &chainman = *Assert(m_node.chainman);
// No peers.
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
BOOST_CHECK_EQUAL(pm.selectNode(), NO_NODE);
const NodeId node0 = 42, node1 = 69, node2 = 37;
CChainState &active_chainstate = chainman.ActiveChainstate();
// One peer, we always return it.
addNodeWithScore(active_chainstate, pm, node0, MIN_VALID_PROOF_SCORE);
BOOST_CHECK_EQUAL(pm.selectNode(), node0);
// Two peers, verify ratio.
addNodeWithScore(active_chainstate, pm, node1, 2 * MIN_VALID_PROOF_SCORE);
std::unordered_map<PeerId, int> results = {};
for (int i = 0; i < 10000; i++) {
size_t n = pm.selectNode();
BOOST_CHECK(n == node0 || n == node1);
results[n]++;
}
BOOST_CHECK(abs(2 * results[0] - results[1]) < 500);
// Three peers, verify ratio.
addNodeWithScore(active_chainstate, pm, node2, MIN_VALID_PROOF_SCORE);
results.clear();
for (int i = 0; i < 10000; i++) {
size_t n = pm.selectNode();
BOOST_CHECK(n == node0 || n == node1 || n == node2);
results[n]++;
}
BOOST_CHECK(abs(results[0] - results[1] + results[2]) < 500);
}
BOOST_AUTO_TEST_CASE(remove_peer) {
ChainstateManager &chainman = *Assert(m_node.chainman);
// No peers.
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
BOOST_CHECK_EQUAL(pm.selectPeer(), NO_PEER);
CChainState &active_chainstate = chainman.ActiveChainstate();
// Add 4 peers.
std::array<PeerId, 8> peerids;
for (int i = 0; i < 4; i++) {
auto p = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
peerids[i] = TestPeerManager::registerAndGetPeerId(pm, p);
BOOST_CHECK(pm.addNode(InsecureRand32(), p->getId()));
}
BOOST_CHECK_EQUAL(pm.getSlotCount(), 40000);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 0);
for (int i = 0; i < 100; i++) {
PeerId p = pm.selectPeer();
BOOST_CHECK(p == peerids[0] || p == peerids[1] || p == peerids[2] ||
p == peerids[3]);
}
// Remove one peer, it nevers show up now.
BOOST_CHECK(pm.removePeer(peerids[2]));
BOOST_CHECK_EQUAL(pm.getSlotCount(), 40000);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 10000);
// Make sure we compact to never get NO_PEER.
BOOST_CHECK_EQUAL(pm.compact(), 10000);
BOOST_CHECK(pm.verify());
BOOST_CHECK_EQUAL(pm.getSlotCount(), 30000);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 0);
for (int i = 0; i < 100; i++) {
PeerId p = pm.selectPeer();
BOOST_CHECK(p == peerids[0] || p == peerids[1] || p == peerids[3]);
}
// Add 4 more peers.
for (int i = 0; i < 4; i++) {
auto p = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
peerids[i + 4] = TestPeerManager::registerAndGetPeerId(pm, p);
BOOST_CHECK(pm.addNode(InsecureRand32(), p->getId()));
}
BOOST_CHECK_EQUAL(pm.getSlotCount(), 70000);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 0);
BOOST_CHECK(pm.removePeer(peerids[0]));
BOOST_CHECK_EQUAL(pm.getSlotCount(), 70000);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 10000);
// Removing the last entry do not increase fragmentation.
BOOST_CHECK(pm.removePeer(peerids[7]));
BOOST_CHECK_EQUAL(pm.getSlotCount(), 60000);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 10000);
// Make sure we compact to never get NO_PEER.
BOOST_CHECK_EQUAL(pm.compact(), 10000);
BOOST_CHECK(pm.verify());
BOOST_CHECK_EQUAL(pm.getSlotCount(), 50000);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 0);
for (int i = 0; i < 100; i++) {
PeerId p = pm.selectPeer();
BOOST_CHECK(p == peerids[1] || p == peerids[3] || p == peerids[4] ||
p == peerids[5] || p == peerids[6]);
}
// Removing non existent peers fails.
BOOST_CHECK(!pm.removePeer(peerids[0]));
BOOST_CHECK(!pm.removePeer(peerids[2]));
BOOST_CHECK(!pm.removePeer(peerids[7]));
BOOST_CHECK(!pm.removePeer(NO_PEER));
}
BOOST_AUTO_TEST_CASE(compact_slots) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
// Add 4 peers.
std::array<PeerId, 4> peerids;
for (int i = 0; i < 4; i++) {
auto p = buildRandomProof(chainman.ActiveChainstate(),
MIN_VALID_PROOF_SCORE);
peerids[i] = TestPeerManager::registerAndGetPeerId(pm, p);
BOOST_CHECK(pm.addNode(InsecureRand32(), p->getId()));
}
// Remove all peers.
for (auto p : peerids) {
pm.removePeer(p);
}
BOOST_CHECK_EQUAL(pm.getSlotCount(), 30000);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 30000);
for (int i = 0; i < 100; i++) {
BOOST_CHECK_EQUAL(pm.selectPeer(), NO_PEER);
}
BOOST_CHECK_EQUAL(pm.compact(), 30000);
BOOST_CHECK(pm.verify());
BOOST_CHECK_EQUAL(pm.getSlotCount(), 0);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 0);
}
BOOST_AUTO_TEST_CASE(node_crud) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
CChainState &active_chainstate = chainman.ActiveChainstate();
// Create one peer.
auto proof =
buildRandomProof(active_chainstate, 10000000 * MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(proof));
BOOST_CHECK_EQUAL(pm.selectNode(), NO_NODE);
// Add 4 nodes.
const ProofId &proofid = proof->getId();
for (int i = 0; i < 4; i++) {
BOOST_CHECK(pm.addNode(i, proofid));
}
for (int i = 0; i < 100; i++) {
NodeId n = pm.selectNode();
BOOST_CHECK(n >= 0 && n < 4);
BOOST_CHECK(
pm.updateNextRequestTime(n, std::chrono::steady_clock::now()));
}
// Remove a node, check that it doesn't show up.
BOOST_CHECK(pm.removeNode(2));
for (int i = 0; i < 100; i++) {
NodeId n = pm.selectNode();
BOOST_CHECK(n == 0 || n == 1 || n == 3);
BOOST_CHECK(
pm.updateNextRequestTime(n, std::chrono::steady_clock::now()));
}
// Push a node's timeout in the future, so that it doesn't show up.
BOOST_CHECK(pm.updateNextRequestTime(1, std::chrono::steady_clock::now() +
std::chrono::hours(24)));
for (int i = 0; i < 100; i++) {
NodeId n = pm.selectNode();
BOOST_CHECK(n == 0 || n == 3);
BOOST_CHECK(
pm.updateNextRequestTime(n, std::chrono::steady_clock::now()));
}
// Move a node from a peer to another. This peer has a very low score such
// as chances of being picked are 1 in 10 million.
addNodeWithScore(active_chainstate, pm, 3, MIN_VALID_PROOF_SCORE);
int node3selected = 0;
for (int i = 0; i < 100; i++) {
NodeId n = pm.selectNode();
if (n == 3) {
// Selecting this node should be exceedingly unlikely.
BOOST_CHECK(node3selected++ < 1);
} else {
BOOST_CHECK_EQUAL(n, 0);
}
BOOST_CHECK(
pm.updateNextRequestTime(n, std::chrono::steady_clock::now()));
}
}
BOOST_AUTO_TEST_CASE(node_binding) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
CChainState &active_chainstate = chainman.ActiveChainstate();
auto proof = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
const ProofId &proofid = proof->getId();
BOOST_CHECK_EQUAL(pm.getNodeCount(), 0);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), 0);
// Add a bunch of nodes with no associated peer
for (int i = 0; i < 10; i++) {
BOOST_CHECK(!pm.addNode(i, proofid));
BOOST_CHECK(TestPeerManager::isNodePending(pm, i));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 0);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), i + 1);
}
// Now create the peer and check all the nodes are bound
const PeerId peerid = TestPeerManager::registerAndGetPeerId(pm, proof);
BOOST_CHECK_NE(peerid, NO_PEER);
for (int i = 0; i < 10; i++) {
BOOST_CHECK(!TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(TestPeerManager::nodeBelongToPeer(pm, i, peerid));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 10);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), 0);
}
BOOST_CHECK(pm.verify());
// Disconnect some nodes
for (int i = 0; i < 5; i++) {
BOOST_CHECK(pm.removeNode(i));
BOOST_CHECK(!TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(!TestPeerManager::nodeBelongToPeer(pm, i, peerid));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 10 - i - 1);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), 0);
}
// Add nodes when the peer already exists
for (int i = 0; i < 5; i++) {
BOOST_CHECK(pm.addNode(i, proofid));
BOOST_CHECK(!TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(TestPeerManager::nodeBelongToPeer(pm, i, peerid));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 5 + i + 1);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), 0);
}
auto alt_proof = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
const ProofId &alt_proofid = alt_proof->getId();
// Update some nodes from a known proof to an unknown proof
for (int i = 0; i < 5; i++) {
BOOST_CHECK(!pm.addNode(i, alt_proofid));
BOOST_CHECK(TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(!TestPeerManager::nodeBelongToPeer(pm, i, peerid));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 10 - i - 1);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), i + 1);
}
auto alt2_proof =
buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
const ProofId &alt2_proofid = alt2_proof->getId();
// Update some nodes from an unknown proof to another unknown proof
for (int i = 0; i < 5; i++) {
BOOST_CHECK(!pm.addNode(i, alt2_proofid));
BOOST_CHECK(TestPeerManager::isNodePending(pm, i));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 5);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), 5);
}
// Update some nodes from an unknown proof to a known proof
for (int i = 0; i < 5; i++) {
BOOST_CHECK(pm.addNode(i, proofid));
BOOST_CHECK(!TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(TestPeerManager::nodeBelongToPeer(pm, i, peerid));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 5 + i + 1);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), 5 - i - 1);
}
// Remove the peer, the nodes should be pending again
BOOST_CHECK(pm.removePeer(peerid));
BOOST_CHECK(!pm.exists(proof->getId()));
for (int i = 0; i < 10; i++) {
BOOST_CHECK(TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(!TestPeerManager::nodeBelongToPeer(pm, i, peerid));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 0);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), 10);
}
BOOST_CHECK(pm.verify());
// Remove the remaining pending nodes, check the count drops accordingly
for (int i = 0; i < 10; i++) {
BOOST_CHECK(pm.removeNode(i));
BOOST_CHECK(!TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(!TestPeerManager::nodeBelongToPeer(pm, i, peerid));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 0);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), 10 - i - 1);
}
}
BOOST_AUTO_TEST_CASE(node_binding_reorg) {
gArgs.ForceSetArg("-avaproofstakeutxoconfirmations", "2");
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
auto proof = buildRandomProof(chainman.ActiveChainstate(),
MIN_VALID_PROOF_SCORE, 99);
const ProofId &proofid = proof->getId();
PeerId peerid = TestPeerManager::registerAndGetPeerId(pm, proof);
BOOST_CHECK_NE(peerid, NO_PEER);
BOOST_CHECK(pm.verify());
// Add nodes to our peer
for (int i = 0; i < 10; i++) {
BOOST_CHECK(pm.addNode(i, proofid));
BOOST_CHECK(!TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(TestPeerManager::nodeBelongToPeer(pm, i, peerid));
}
// Make the proof immature by reorging to a shorter chain
{
BlockValidationState state;
chainman.ActiveChainstate().InvalidateBlock(GetConfig(), state,
chainman.ActiveTip());
BOOST_CHECK_EQUAL(chainman.ActiveHeight(), 99);
}
pm.updatedBlockTip();
BOOST_CHECK(pm.isImmature(proofid));
BOOST_CHECK(!pm.isBoundToPeer(proofid));
for (int i = 0; i < 10; i++) {
BOOST_CHECK(TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(!TestPeerManager::nodeBelongToPeer(pm, i, peerid));
}
BOOST_CHECK(pm.verify());
// Make the proof great again
{
// Advance the clock so the newly mined block won't collide with the
// other deterministically-generated blocks
SetMockTime(GetTime() + 20);
mineBlocks(1);
BlockValidationState state;
BOOST_CHECK(
chainman.ActiveChainstate().ActivateBestChain(GetConfig(), state));
BOOST_CHECK_EQUAL(chainman.ActiveHeight(), 100);
}
pm.updatedBlockTip();
BOOST_CHECK(!pm.isImmature(proofid));
BOOST_CHECK(pm.isBoundToPeer(proofid));
// The peerid has certainly been updated
peerid = TestPeerManager::registerAndGetPeerId(pm, proof);
BOOST_CHECK_NE(peerid, NO_PEER);
for (int i = 0; i < 10; i++) {
BOOST_CHECK(!TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(TestPeerManager::nodeBelongToPeer(pm, i, peerid));
}
BOOST_CHECK(pm.verify());
}
BOOST_AUTO_TEST_CASE(proof_conflict) {
auto key = CKey::MakeCompressedKey();
TxId txid1(GetRandHash());
TxId txid2(GetRandHash());
BOOST_CHECK(txid1 != txid2);
const Amount v = PROOF_DUST_THRESHOLD;
const int height = 100;
ChainstateManager &chainman = *Assert(m_node.chainman);
for (uint32_t i = 0; i < 10; i++) {
addCoin(chainman.ActiveChainstate(), {txid1, i}, key);
addCoin(chainman.ActiveChainstate(), {txid2, i}, key);
}
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
CKey masterKey = CKey::MakeCompressedKey();
const auto getPeerId = [&](const std::vector<COutPoint> &outpoints) {
return TestPeerManager::registerAndGetPeerId(
pm, buildProofWithOutpoints(key, outpoints, v, masterKey, 0, height,
false, 0));
};
// Add one peer.
const PeerId peer1 = getPeerId({COutPoint(txid1, 0)});
BOOST_CHECK(peer1 != NO_PEER);
// Same proof, same peer.
BOOST_CHECK_EQUAL(getPeerId({COutPoint(txid1, 0)}), peer1);
// Different txid, different proof.
const PeerId peer2 = getPeerId({COutPoint(txid2, 0)});
BOOST_CHECK(peer2 != NO_PEER && peer2 != peer1);
// Different index, different proof.
const PeerId peer3 = getPeerId({COutPoint(txid1, 1)});
BOOST_CHECK(peer3 != NO_PEER && peer3 != peer1);
// Empty proof, no peer.
BOOST_CHECK_EQUAL(getPeerId({}), NO_PEER);
// Multiple inputs.
const PeerId peer4 = getPeerId({COutPoint(txid1, 2), COutPoint(txid2, 2)});
BOOST_CHECK(peer4 != NO_PEER && peer4 != peer1);
// Duplicated input.
{
ProofBuilder pb(0, 0, CKey::MakeCompressedKey());
COutPoint o(txid1, 3);
BOOST_CHECK(pb.addUTXO(o, v, height, false, key));
BOOST_CHECK(
!pm.registerProof(TestProofBuilder::buildDuplicatedStakes(pb)));
}
// Multiple inputs, collision on first input.
BOOST_CHECK_EQUAL(getPeerId({COutPoint(txid1, 0), COutPoint(txid2, 4)}),
NO_PEER);
// Mutliple inputs, collision on second input.
BOOST_CHECK_EQUAL(getPeerId({COutPoint(txid1, 4), COutPoint(txid2, 0)}),
NO_PEER);
// Mutliple inputs, collision on both inputs.
BOOST_CHECK_EQUAL(getPeerId({COutPoint(txid1, 0), COutPoint(txid2, 2)}),
NO_PEER);
}
BOOST_AUTO_TEST_CASE(orphan_proofs) {
ChainstateManager &chainman = *Assert(m_node.chainman);
gArgs.ForceSetArg("-avaproofstakeutxoconfirmations", "2");
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
auto key = CKey::MakeCompressedKey();
int immatureHeight = 100;
auto registerOrphan = [&](const ProofRef &proof) {
ProofRegistrationState state;
BOOST_CHECK(!pm.registerProof(proof, state));
- BOOST_CHECK(state.GetResult() == ProofRegistrationResult::ORPHAN);
+ BOOST_CHECK(state.GetResult() == ProofRegistrationResult::IMMATURE);
};
auto checkOrphan = [&](const ProofRef &proof, bool expectedOrphan) {
const ProofId &proofid = proof->getId();
BOOST_CHECK(pm.exists(proofid));
BOOST_CHECK_EQUAL(pm.isImmature(proofid), expectedOrphan);
BOOST_CHECK_EQUAL(pm.isBoundToPeer(proofid), !expectedOrphan);
bool ret = false;
pm.forEachPeer([&](const Peer &peer) {
if (proof->getId() == peer.proof->getId()) {
ret = true;
}
});
BOOST_CHECK_EQUAL(ret, !expectedOrphan);
};
// Track orphans so we can test them later
std::vector<ProofRef> orphans;
// Fill up orphan pool to test the size limit
- for (int64_t i = 1; i <= AVALANCHE_MAX_ORPHAN_PROOFS; i++) {
+ for (int64_t i = 1; i <= AVALANCHE_MAX_IMMATURE_PROOFS; i++) {
COutPoint outpoint = COutPoint(TxId(GetRandHash()), 0);
auto proof = buildProofWithOutpoints(
key, {outpoint}, i * PROOF_DUST_THRESHOLD, key, 0, immatureHeight);
addCoin(chainman.ActiveChainstate(), outpoint, key,
i * PROOF_DUST_THRESHOLD, immatureHeight);
registerOrphan(proof);
checkOrphan(proof, true);
orphans.push_back(proof);
}
// More orphans evict lower scoring proofs
for (auto i = 0; i < 100; i++) {
COutPoint outpoint = COutPoint(TxId(GetRandHash()), 0);
auto proof =
buildProofWithOutpoints(key, {outpoint}, 200 * PROOF_DUST_THRESHOLD,
key, 0, immatureHeight);
addCoin(chainman.ActiveChainstate(), outpoint, key,
200 * PROOF_DUST_THRESHOLD, immatureHeight);
registerOrphan(proof);
checkOrphan(proof, true);
orphans.push_back(proof);
BOOST_CHECK(!pm.exists(orphans.front()->getId()));
orphans.erase(orphans.begin());
}
// Replacement when the pool is full still works
{
const COutPoint &outpoint =
orphans.front()->getStakes()[0].getStake().getUTXO();
auto proof =
buildProofWithOutpoints(key, {outpoint}, 101 * PROOF_DUST_THRESHOLD,
key, 1, immatureHeight);
registerOrphan(proof);
checkOrphan(proof, true);
orphans.push_back(proof);
BOOST_CHECK(!pm.exists(orphans.front()->getId()));
orphans.erase(orphans.begin());
}
// Mine a block to increase the chain height, making all orphans mature
mineBlocks(1);
pm.updatedBlockTip();
for (const auto &proof : orphans) {
checkOrphan(proof, false);
}
}
BOOST_AUTO_TEST_CASE(dangling_node) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
CChainState &active_chainstate = chainman.ActiveChainstate();
auto proof = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
PeerId peerid = TestPeerManager::registerAndGetPeerId(pm, proof);
BOOST_CHECK_NE(peerid, NO_PEER);
const TimePoint theFuture(std::chrono::steady_clock::now() +
std::chrono::hours(24));
// Add nodes to this peer and update their request time far in the future
for (int i = 0; i < 10; i++) {
BOOST_CHECK(pm.addNode(i, proof->getId()));
BOOST_CHECK(pm.updateNextRequestTime(i, theFuture));
}
// Remove the peer
BOOST_CHECK(pm.removePeer(peerid));
// Check the nodes are still there
for (int i = 0; i < 10; i++) {
BOOST_CHECK(pm.forNode(i, [](const Node &n) { return true; }));
}
// Build a new one
proof = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
peerid = TestPeerManager::registerAndGetPeerId(pm, proof);
BOOST_CHECK_NE(peerid, NO_PEER);
// Update the nodes with the new proof
for (int i = 0; i < 10; i++) {
BOOST_CHECK(pm.addNode(i, proof->getId()));
BOOST_CHECK(pm.forNode(
i, [&](const Node &n) { return n.nextRequestTime == theFuture; }));
}
// Remove the peer
BOOST_CHECK(pm.removePeer(peerid));
// Disconnect the nodes
for (int i = 0; i < 10; i++) {
BOOST_CHECK(pm.removeNode(i));
}
}
BOOST_AUTO_TEST_CASE(proof_accessors) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
constexpr int numProofs = 10;
std::vector<ProofRef> proofs;
proofs.reserve(numProofs);
for (int i = 0; i < numProofs; i++) {
proofs.push_back(buildRandomProof(chainman.ActiveChainstate(),
MIN_VALID_PROOF_SCORE));
}
for (int i = 0; i < numProofs; i++) {
BOOST_CHECK(pm.registerProof(proofs[i]));
{
ProofRegistrationState state;
// Fail to add an existing proof
BOOST_CHECK(!pm.registerProof(proofs[i], state));
BOOST_CHECK(state.GetResult() ==
ProofRegistrationResult::ALREADY_REGISTERED);
}
for (int added = 0; added <= i; added++) {
auto proof = pm.getProof(proofs[added]->getId());
BOOST_CHECK(proof != nullptr);
const ProofId &proofid = proof->getId();
BOOST_CHECK_EQUAL(proofid, proofs[added]->getId());
}
}
// No stake, copied from proof_tests.cpp
const std::string badProofHex(
"96527eae083f1f24625f049d9e54bb9a2102a93d98bf42ab90cfc0bf9e7c634ed76a7"
"3e95b02cacfd357b64e4fb6c92e92dd00");
bilingual_str error;
auto badProof = RCUPtr<Proof>::make();
BOOST_CHECK(Proof::FromHex(*badProof, badProofHex, error));
ProofRegistrationState state;
BOOST_CHECK(!pm.registerProof(badProof, state));
BOOST_CHECK(state.GetResult() == ProofRegistrationResult::INVALID);
}
BOOST_FIXTURE_TEST_CASE(conflicting_proof_rescan, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const CKey key = CKey::MakeCompressedKey();
CChainState &active_chainstate = chainman.ActiveChainstate();
const COutPoint conflictingOutpoint = createUtxo(active_chainstate, key);
const COutPoint outpointToSend = createUtxo(active_chainstate, key);
const Amount amount = PROOF_DUST_THRESHOLD;
ProofRef proofToInvalidate = buildProofWithOutpoints(
key, {conflictingOutpoint, outpointToSend}, amount);
BOOST_CHECK(pm.registerProof(proofToInvalidate));
ProofRef conflictingProof = buildProofWithOutpoints(
key, {conflictingOutpoint, createUtxo(active_chainstate, key)}, amount);
ProofRegistrationState state;
BOOST_CHECK(!pm.registerProof(conflictingProof, state));
BOOST_CHECK(state.GetResult() == ProofRegistrationResult::CONFLICTING);
BOOST_CHECK(pm.isInConflictingPool(conflictingProof->getId()));
{
LOCK(cs_main);
CCoinsViewCache &coins = active_chainstate.CoinsTip();
// Make proofToInvalidate invalid
coins.SpendCoin(outpointToSend);
}
pm.updatedBlockTip();
BOOST_CHECK(!pm.exists(proofToInvalidate->getId()));
BOOST_CHECK(!pm.isInConflictingPool(conflictingProof->getId()));
BOOST_CHECK(pm.isBoundToPeer(conflictingProof->getId()));
}
BOOST_FIXTURE_TEST_CASE(conflicting_proof_selection, NoCoolDownFixture) {
const CKey key = CKey::MakeCompressedKey();
const Amount amount(PROOF_DUST_THRESHOLD);
const uint32_t height = 100;
const bool is_coinbase = false;
ChainstateManager &chainman = *Assert(m_node.chainman);
CChainState &active_chainstate = chainman.ActiveChainstate();
// This will be the conflicting UTXO for all the following proofs
auto conflictingOutpoint = createUtxo(active_chainstate, key, amount);
auto proof_base = buildProofWithSequence(key, {conflictingOutpoint}, 10);
gArgs.ForceSetArg("-enableavalancheproofreplacement", "1");
ConflictingProofComparator comparator;
auto checkPreferred = [&](const ProofRef &candidate,
const ProofRef &reference, bool expectAccepted) {
BOOST_CHECK_EQUAL(comparator(candidate, reference), expectAccepted);
BOOST_CHECK_EQUAL(comparator(reference, candidate), !expectAccepted);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
BOOST_CHECK(pm.registerProof(reference));
BOOST_CHECK(pm.isBoundToPeer(reference->getId()));
ProofRegistrationState state;
BOOST_CHECK_EQUAL(pm.registerProof(candidate, state), expectAccepted);
BOOST_CHECK_EQUAL(state.IsValid(), expectAccepted);
BOOST_CHECK_EQUAL(state.GetResult() ==
ProofRegistrationResult::CONFLICTING,
!expectAccepted);
BOOST_CHECK_EQUAL(pm.isBoundToPeer(candidate->getId()), expectAccepted);
BOOST_CHECK_EQUAL(pm.isInConflictingPool(candidate->getId()),
!expectAccepted);
BOOST_CHECK_EQUAL(pm.isBoundToPeer(reference->getId()),
!expectAccepted);
BOOST_CHECK_EQUAL(pm.isInConflictingPool(reference->getId()),
expectAccepted);
};
// Same master key, lower sequence number
checkPreferred(buildProofWithSequence(key, {conflictingOutpoint}, 9),
proof_base, false);
// Same master key, higher sequence number
checkPreferred(buildProofWithSequence(key, {conflictingOutpoint}, 11),
proof_base, true);
auto buildProofFromAmounts = [&](const CKey &master,
std::vector<Amount> &&amounts) {
std::vector<std::tuple<COutPoint, Amount>> outpointsWithAmount{
{conflictingOutpoint, amount}};
std::transform(amounts.begin(), amounts.end(),
std::back_inserter(outpointsWithAmount),
[&key, &active_chainstate](const Amount amount) {
return std::make_tuple(
createUtxo(active_chainstate, key, amount),
amount);
});
return buildProof(key, outpointsWithAmount, master, 0, height,
is_coinbase, 0);
};
auto proof_multiUtxo = buildProofFromAmounts(
key, {2 * PROOF_DUST_THRESHOLD, 2 * PROOF_DUST_THRESHOLD});
// Test for both the same master and a different one. The sequence number
// is the same for all these tests.
for (const CKey &k : {key, CKey::MakeCompressedKey()}) {
// Low amount
checkPreferred(buildProofFromAmounts(
k, {2 * PROOF_DUST_THRESHOLD, PROOF_DUST_THRESHOLD}),
proof_multiUtxo, false);
// High amount
checkPreferred(buildProofFromAmounts(k, {2 * PROOF_DUST_THRESHOLD,
3 * PROOF_DUST_THRESHOLD}),
proof_multiUtxo, true);
// Same amount, low stake count
checkPreferred(buildProofFromAmounts(k, {4 * PROOF_DUST_THRESHOLD}),
proof_multiUtxo, true);
// Same amount, high stake count
checkPreferred(buildProofFromAmounts(k, {2 * PROOF_DUST_THRESHOLD,
PROOF_DUST_THRESHOLD,
PROOF_DUST_THRESHOLD}),
proof_multiUtxo, false);
// Same amount, same stake count, selection is done on proof id
auto proofSimilar = buildProofFromAmounts(
k, {2 * PROOF_DUST_THRESHOLD, 2 * PROOF_DUST_THRESHOLD});
checkPreferred(proofSimilar, proof_multiUtxo,
proofSimilar->getId() < proof_multiUtxo->getId());
}
gArgs.ClearForcedArg("-enableavalancheproofreplacement");
}
BOOST_AUTO_TEST_CASE(conflicting_orphans) {
ChainstateManager &chainman = *Assert(m_node.chainman);
gArgs.ForceSetArg("-avaproofstakeutxoconfirmations", "2");
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const CKey key = CKey::MakeCompressedKey();
CChainState &active_chainstate = chainman.ActiveChainstate();
const COutPoint conflictingOutpoint = createUtxo(active_chainstate, key);
const COutPoint matureOutpoint =
createUtxo(active_chainstate, key, PROOF_DUST_THRESHOLD, 99);
auto orphan10 = buildProofWithSequence(key, {conflictingOutpoint}, 10);
auto orphan20 =
buildProofWithSequence(key, {conflictingOutpoint, matureOutpoint}, 20);
BOOST_CHECK(!pm.registerProof(orphan10));
BOOST_CHECK(pm.isImmature(orphan10->getId()));
BOOST_CHECK(!pm.registerProof(orphan20));
BOOST_CHECK(pm.isImmature(orphan20->getId()));
BOOST_CHECK(!pm.exists(orphan10->getId()));
// Build and register a valid proof that will conflict with the orphan
auto proof30 = buildProofWithOutpoints(key, {matureOutpoint},
PROOF_DUST_THRESHOLD, key, 30, 99);
BOOST_CHECK(pm.registerProof(proof30));
BOOST_CHECK(pm.isBoundToPeer(proof30->getId()));
// Reorg to a shorter chain to orphan proof30
{
BlockValidationState state;
active_chainstate.InvalidateBlock(GetConfig(), state,
chainman.ActiveTip());
BOOST_CHECK_EQUAL(chainman.ActiveHeight(), 99);
}
// Check that a rescan will also select the preferred orphan, in this case
// proof30 will replace orphan20.
pm.updatedBlockTip();
BOOST_CHECK(!pm.isBoundToPeer(proof30->getId()));
BOOST_CHECK(pm.isImmature(proof30->getId()));
BOOST_CHECK(!pm.exists(orphan20->getId()));
}
BOOST_FIXTURE_TEST_CASE(preferred_conflicting_proof, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const CKey key = CKey::MakeCompressedKey();
const COutPoint conflictingOutpoint =
createUtxo(chainman.ActiveChainstate(), key);
auto proofSeq10 = buildProofWithSequence(key, {conflictingOutpoint}, 10);
auto proofSeq20 = buildProofWithSequence(key, {conflictingOutpoint}, 20);
auto proofSeq30 = buildProofWithSequence(key, {conflictingOutpoint}, 30);
BOOST_CHECK(pm.registerProof(proofSeq30));
BOOST_CHECK(pm.isBoundToPeer(proofSeq30->getId()));
BOOST_CHECK(!pm.isInConflictingPool(proofSeq30->getId()));
// proofSeq10 is a worst candidate than proofSeq30, so it goes to the
// conflicting pool.
BOOST_CHECK(!pm.registerProof(proofSeq10));
BOOST_CHECK(pm.isBoundToPeer(proofSeq30->getId()));
BOOST_CHECK(!pm.isBoundToPeer(proofSeq10->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq10->getId()));
// proofSeq20 is a worst candidate than proofSeq30 but a better one than
// proogSeq10, so it replaces it in the conflicting pool and proofSeq10 is
// evicted.
BOOST_CHECK(!pm.registerProof(proofSeq20));
BOOST_CHECK(pm.isBoundToPeer(proofSeq30->getId()));
BOOST_CHECK(!pm.isBoundToPeer(proofSeq20->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq20->getId()));
BOOST_CHECK(!pm.exists(proofSeq10->getId()));
}
BOOST_FIXTURE_TEST_CASE(update_next_conflict_time, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
auto now = GetTime<std::chrono::seconds>();
SetMockTime(now.count());
// Updating the time of an unknown peer should fail
for (size_t i = 0; i < 10; i++) {
BOOST_CHECK(
!pm.updateNextPossibleConflictTime(PeerId(GetRandInt(1000)), now));
}
auto proof =
buildRandomProof(chainman.ActiveChainstate(), MIN_VALID_PROOF_SCORE);
PeerId peerid = TestPeerManager::registerAndGetPeerId(pm, proof);
auto checkNextPossibleConflictTime = [&](std::chrono::seconds expected) {
BOOST_CHECK(pm.forPeer(proof->getId(), [&](const Peer &p) {
return p.nextPossibleConflictTime == expected;
}));
};
checkNextPossibleConflictTime(now);
// Move the time in the past is not possible
BOOST_CHECK(!pm.updateNextPossibleConflictTime(
peerid, now - std::chrono::seconds{1}));
checkNextPossibleConflictTime(now);
BOOST_CHECK(pm.updateNextPossibleConflictTime(
peerid, now + std::chrono::seconds{1}));
checkNextPossibleConflictTime(now + std::chrono::seconds{1});
}
BOOST_FIXTURE_TEST_CASE(register_force_accept, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const CKey key = CKey::MakeCompressedKey();
const COutPoint conflictingOutpoint =
createUtxo(chainman.ActiveChainstate(), key);
auto proofSeq10 = buildProofWithSequence(key, {conflictingOutpoint}, 10);
auto proofSeq20 = buildProofWithSequence(key, {conflictingOutpoint}, 20);
auto proofSeq30 = buildProofWithSequence(key, {conflictingOutpoint}, 30);
BOOST_CHECK(pm.registerProof(proofSeq30));
BOOST_CHECK(pm.isBoundToPeer(proofSeq30->getId()));
BOOST_CHECK(!pm.isInConflictingPool(proofSeq30->getId()));
// proofSeq20 is a worst candidate than proofSeq30, so it goes to the
// conflicting pool.
BOOST_CHECK(!pm.registerProof(proofSeq20));
BOOST_CHECK(pm.isBoundToPeer(proofSeq30->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq20->getId()));
// We can force the acceptance of proofSeq20
using RegistrationMode = avalanche::PeerManager::RegistrationMode;
BOOST_CHECK(pm.registerProof(proofSeq20, RegistrationMode::FORCE_ACCEPT));
BOOST_CHECK(pm.isBoundToPeer(proofSeq20->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq30->getId()));
// We can also force the acceptance of a proof which is not already in the
// conflicting pool.
BOOST_CHECK(!pm.registerProof(proofSeq10));
BOOST_CHECK(!pm.exists(proofSeq10->getId()));
BOOST_CHECK(pm.registerProof(proofSeq10, RegistrationMode::FORCE_ACCEPT));
BOOST_CHECK(pm.isBoundToPeer(proofSeq10->getId()));
BOOST_CHECK(!pm.exists(proofSeq20->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq30->getId()));
// Attempting to register again fails, and has no impact on the pools
for (size_t i = 0; i < 10; i++) {
BOOST_CHECK(!pm.registerProof(proofSeq10));
BOOST_CHECK(
!pm.registerProof(proofSeq10, RegistrationMode::FORCE_ACCEPT));
BOOST_CHECK(pm.isBoundToPeer(proofSeq10->getId()));
BOOST_CHECK(!pm.exists(proofSeq20->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq30->getId()));
}
// Revert between proofSeq10 and proofSeq30 a few times
for (size_t i = 0; i < 10; i++) {
BOOST_CHECK(
pm.registerProof(proofSeq30, RegistrationMode::FORCE_ACCEPT));
BOOST_CHECK(pm.isBoundToPeer(proofSeq30->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq10->getId()));
BOOST_CHECK(
pm.registerProof(proofSeq10, RegistrationMode::FORCE_ACCEPT));
BOOST_CHECK(pm.isBoundToPeer(proofSeq10->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq30->getId()));
}
}
BOOST_FIXTURE_TEST_CASE(evicted_proof, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const CKey key = CKey::MakeCompressedKey();
const COutPoint conflictingOutpoint =
createUtxo(chainman.ActiveChainstate(), key);
auto proofSeq10 = buildProofWithSequence(key, {conflictingOutpoint}, 10);
auto proofSeq20 = buildProofWithSequence(key, {conflictingOutpoint}, 20);
auto proofSeq30 = buildProofWithSequence(key, {conflictingOutpoint}, 30);
{
ProofRegistrationState state;
BOOST_CHECK(pm.registerProof(proofSeq30, state));
BOOST_CHECK(state.IsValid());
}
{
ProofRegistrationState state;
BOOST_CHECK(!pm.registerProof(proofSeq20, state));
BOOST_CHECK(state.GetResult() == ProofRegistrationResult::CONFLICTING);
}
{
ProofRegistrationState state;
BOOST_CHECK(!pm.registerProof(proofSeq10, state));
BOOST_CHECK(state.GetResult() == ProofRegistrationResult::REJECTED);
}
}
BOOST_AUTO_TEST_CASE(conflicting_proof_cooldown) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const CKey key = CKey::MakeCompressedKey();
const COutPoint conflictingOutpoint =
createUtxo(chainman.ActiveChainstate(), key);
auto proofSeq20 = buildProofWithSequence(key, {conflictingOutpoint}, 20);
auto proofSeq30 = buildProofWithSequence(key, {conflictingOutpoint}, 30);
auto proofSeq40 = buildProofWithSequence(key, {conflictingOutpoint}, 40);
int64_t conflictingProofCooldown = 100;
gArgs.ForceSetArg("-avalancheconflictingproofcooldown",
strprintf("%d", conflictingProofCooldown));
int64_t now = GetTime();
auto increaseMockTime = [&](int64_t s) {
now += s;
SetMockTime(now);
};
increaseMockTime(0);
BOOST_CHECK(pm.registerProof(proofSeq30));
BOOST_CHECK(pm.isBoundToPeer(proofSeq30->getId()));
auto checkRegistrationFailure = [&](const ProofRef &proof,
ProofRegistrationResult reason) {
ProofRegistrationState state;
BOOST_CHECK(!pm.registerProof(proof, state));
BOOST_CHECK(state.GetResult() == reason);
};
// Registering a conflicting proof will fail due to the conflicting proof
// cooldown
checkRegistrationFailure(proofSeq20,
ProofRegistrationResult::COOLDOWN_NOT_ELAPSED);
BOOST_CHECK(!pm.exists(proofSeq20->getId()));
// The cooldown applies as well if the proof is the favorite
checkRegistrationFailure(proofSeq40,
ProofRegistrationResult::COOLDOWN_NOT_ELAPSED);
BOOST_CHECK(!pm.exists(proofSeq40->getId()));
// Elapse the cooldown
increaseMockTime(conflictingProofCooldown);
// The proof will now be added to conflicting pool
checkRegistrationFailure(proofSeq20, ProofRegistrationResult::CONFLICTING);
BOOST_CHECK(pm.isInConflictingPool(proofSeq20->getId()));
// But no other
checkRegistrationFailure(proofSeq40,
ProofRegistrationResult::COOLDOWN_NOT_ELAPSED);
BOOST_CHECK(!pm.exists(proofSeq40->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq20->getId()));
// Elapse the cooldown
increaseMockTime(conflictingProofCooldown);
// The proof will now be added to conflicting pool
checkRegistrationFailure(proofSeq40, ProofRegistrationResult::CONFLICTING);
BOOST_CHECK(pm.isInConflictingPool(proofSeq40->getId()));
BOOST_CHECK(!pm.exists(proofSeq20->getId()));
gArgs.ClearForcedArg("-avalancheconflictingproofcooldown");
}
BOOST_FIXTURE_TEST_CASE(reject_proof, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
gArgs.ForceSetArg("-avaproofstakeutxoconfirmations", "2");
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const CKey key = CKey::MakeCompressedKey();
CChainState &active_chainstate = chainman.ActiveChainstate();
const COutPoint conflictingOutpoint =
createUtxo(active_chainstate, key, PROOF_DUST_THRESHOLD, 99);
const COutPoint immatureOutpoint = createUtxo(active_chainstate, key);
// The good, the bad and the ugly
auto proofSeq10 = buildProofWithOutpoints(
key, {conflictingOutpoint}, PROOF_DUST_THRESHOLD, key, 10, 99);
auto proofSeq20 = buildProofWithOutpoints(
key, {conflictingOutpoint}, PROOF_DUST_THRESHOLD, key, 20, 99);
auto orphan30 = buildProofWithSequence(
key, {conflictingOutpoint, immatureOutpoint}, 30);
BOOST_CHECK(pm.registerProof(proofSeq20));
BOOST_CHECK(!pm.registerProof(proofSeq10));
BOOST_CHECK(!pm.registerProof(orphan30));
BOOST_CHECK(pm.isBoundToPeer(proofSeq20->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq10->getId()));
BOOST_CHECK(pm.isImmature(orphan30->getId()));
// Rejecting a proof that doesn't exist should fail
for (size_t i = 0; i < 10; i++) {
BOOST_CHECK(
!pm.rejectProof(avalanche::ProofId(GetRandHash()),
avalanche::PeerManager::RejectionMode::DEFAULT));
BOOST_CHECK(
!pm.rejectProof(avalanche::ProofId(GetRandHash()),
avalanche::PeerManager::RejectionMode::INVALIDATE));
}
auto checkRejectDefault = [&](const ProofId &proofid) {
BOOST_CHECK(pm.exists(proofid));
const bool isImmature = pm.isImmature(proofid);
BOOST_CHECK(pm.rejectProof(
proofid, avalanche::PeerManager::RejectionMode::DEFAULT));
BOOST_CHECK(!pm.isBoundToPeer(proofid));
BOOST_CHECK_EQUAL(pm.exists(proofid), !isImmature);
};
auto checkRejectInvalidate = [&](const ProofId &proofid) {
BOOST_CHECK(pm.exists(proofid));
BOOST_CHECK(pm.rejectProof(
proofid, avalanche::PeerManager::RejectionMode::INVALIDATE));
};
// Reject from the orphan pool
checkRejectDefault(orphan30->getId());
BOOST_CHECK(!pm.registerProof(orphan30));
BOOST_CHECK(pm.isImmature(orphan30->getId()));
checkRejectInvalidate(orphan30->getId());
// Reject from the conflicting pool
checkRejectDefault(proofSeq10->getId());
checkRejectInvalidate(proofSeq10->getId());
// Add again a proof to the conflicting pool
BOOST_CHECK(!pm.registerProof(proofSeq10));
BOOST_CHECK(pm.isInConflictingPool(proofSeq10->getId()));
// Reject from the valid pool, default mode
checkRejectDefault(proofSeq20->getId());
// The conflicting proof should be promoted to a peer
BOOST_CHECK(!pm.isInConflictingPool(proofSeq10->getId()));
BOOST_CHECK(pm.isBoundToPeer(proofSeq10->getId()));
// Reject from the valid pool, invalidate mode
checkRejectInvalidate(proofSeq10->getId());
// The conflicting proof should also be promoted to a peer
BOOST_CHECK(!pm.isInConflictingPool(proofSeq20->getId()));
BOOST_CHECK(pm.isBoundToPeer(proofSeq20->getId()));
}
BOOST_AUTO_TEST_CASE(should_request_more_nodes) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
// Set mock time so that proof registration time is predictable and
// testable.
SetMockTime(GetTime());
auto proof =
buildRandomProof(chainman.ActiveChainstate(), MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(proof));
// We have no nodes, so select node will fail and flag that we need more
// nodes
BOOST_CHECK_EQUAL(pm.selectNode(), NO_NODE);
BOOST_CHECK(pm.shouldRequestMoreNodes());
for (size_t i = 0; i < 10; i++) {
// The flag will not trigger again until we fail to select nodes again
BOOST_CHECK(!pm.shouldRequestMoreNodes());
}
// Add a few nodes.
const ProofId &proofid = proof->getId();
for (size_t i = 0; i < 10; i++) {
BOOST_CHECK(pm.addNode(i, proofid));
}
auto cooldownTimepoint = std::chrono::steady_clock::now() + 10s;
// All the nodes can be selected once
for (size_t i = 0; i < 10; i++) {
NodeId selectedId = pm.selectNode();
BOOST_CHECK_NE(selectedId, NO_NODE);
BOOST_CHECK(pm.updateNextRequestTime(selectedId, cooldownTimepoint));
BOOST_CHECK(!pm.shouldRequestMoreNodes());
}
// All the nodes have been requested, next select will fail and the flag
// should trigger
BOOST_CHECK_EQUAL(pm.selectNode(), NO_NODE);
BOOST_CHECK(pm.shouldRequestMoreNodes());
for (size_t i = 0; i < 10; i++) {
// The flag will not trigger again until we fail to select nodes again
BOOST_CHECK(!pm.shouldRequestMoreNodes());
}
// Make it possible to request a node again
BOOST_CHECK(pm.updateNextRequestTime(0, std::chrono::steady_clock::now()));
BOOST_CHECK_NE(pm.selectNode(), NO_NODE);
BOOST_CHECK(!pm.shouldRequestMoreNodes());
// Add another proof with no node attached
auto proof2 =
buildRandomProof(chainman.ActiveChainstate(), MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(proof2));
pm.cleanupDanglingProofs(ProofRef());
BOOST_CHECK(!pm.shouldRequestMoreNodes());
// After some time the proof will be considered dangling and more nodes will
// be requested.
SetMockTime(GetTime() + 15 * 60);
pm.cleanupDanglingProofs(ProofRef());
BOOST_CHECK(pm.shouldRequestMoreNodes());
for (size_t i = 0; i < 10; i++) {
// The flag will not trigger again until the condition is met again
BOOST_CHECK(!pm.shouldRequestMoreNodes());
}
// Attempt to register the dangling proof again. This should fail but
// trigger a request for more nodes.
ProofRegistrationState state;
BOOST_CHECK(!pm.registerProof(proof2, state));
BOOST_CHECK(state.GetResult() == ProofRegistrationResult::DANGLING);
BOOST_CHECK(pm.shouldRequestMoreNodes());
for (size_t i = 0; i < 10; i++) {
// The flag will not trigger again until the condition is met again
BOOST_CHECK(!pm.shouldRequestMoreNodes());
}
// Attach a node to that proof
BOOST_CHECK(!pm.addNode(11, proof2->getId()));
BOOST_CHECK(pm.registerProof(proof2));
SetMockTime(GetTime() + 15 * 60);
pm.cleanupDanglingProofs(ProofRef());
BOOST_CHECK(!pm.shouldRequestMoreNodes());
// Disconnect the node, the proof is dangling again
BOOST_CHECK(pm.removeNode(11));
pm.cleanupDanglingProofs(ProofRef());
BOOST_CHECK(pm.shouldRequestMoreNodes());
}
BOOST_AUTO_TEST_CASE(score_ordering) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
std::vector<uint32_t> expectedScores(10);
// Expect the peers to be ordered by descending score
std::generate(expectedScores.rbegin(), expectedScores.rend(),
[n = 1]() mutable { return n++ * MIN_VALID_PROOF_SCORE; });
std::vector<ProofRef> proofs;
proofs.reserve(expectedScores.size());
for (uint32_t score : expectedScores) {
proofs.push_back(buildRandomProof(chainman.ActiveChainstate(), score));
}
// Shuffle the proofs so they are registered in a random score order
Shuffle(proofs.begin(), proofs.end(), FastRandomContext());
for (auto &proof : proofs) {
BOOST_CHECK(pm.registerProof(proof));
}
auto peersScores = TestPeerManager::getOrderedScores(pm);
BOOST_CHECK_EQUAL_COLLECTIONS(peersScores.begin(), peersScores.end(),
expectedScores.begin(), expectedScores.end());
}
BOOST_FIXTURE_TEST_CASE(known_score_tracking, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
gArgs.ForceSetArg("-avaproofstakeutxoconfirmations", "2");
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const CKey key = CKey::MakeCompressedKey();
const Amount amount1(PROOF_DUST_THRESHOLD);
const Amount amount2(2 * PROOF_DUST_THRESHOLD);
CChainState &active_chainstate = chainman.ActiveChainstate();
const COutPoint peer1ConflictingOutput =
createUtxo(active_chainstate, key, amount1, 99);
const COutPoint peer1SecondaryOutpoint =
createUtxo(active_chainstate, key, amount2, 99);
auto peer1Proof1 = buildProof(
key,
{{peer1ConflictingOutput, amount1}, {peer1SecondaryOutpoint, amount2}},
key, 10, 99);
auto peer1Proof2 =
buildProof(key, {{peer1ConflictingOutput, amount1}}, key, 20, 99);
// Create a proof with an immature UTXO, so the proof will be orphaned
auto peer1Proof3 =
buildProof(key,
{{peer1ConflictingOutput, amount1},
{createUtxo(active_chainstate, key, amount1), amount1}},
key, 30);
const uint32_t peer1Score1 = Proof::amountToScore(amount1 + amount2);
const uint32_t peer1Score2 = Proof::amountToScore(amount1);
// Add first peer and check that we have its score tracked
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), 0);
BOOST_CHECK(pm.registerProof(peer1Proof2));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2);
// Ensure failing to add conflicting proofs doesn't affect the score, the
// first proof stays bound and counted
BOOST_CHECK(!pm.registerProof(peer1Proof1));
BOOST_CHECK(!pm.registerProof(peer1Proof3));
BOOST_CHECK(pm.isBoundToPeer(peer1Proof2->getId()));
BOOST_CHECK(pm.isInConflictingPool(peer1Proof1->getId()));
BOOST_CHECK(pm.isImmature(peer1Proof3->getId()));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2);
auto checkRejectDefault = [&](const ProofId &proofid) {
BOOST_CHECK(pm.exists(proofid));
const bool isImmature = pm.isImmature(proofid);
BOOST_CHECK(pm.rejectProof(
proofid, avalanche::PeerManager::RejectionMode::DEFAULT));
BOOST_CHECK(!pm.isBoundToPeer(proofid));
BOOST_CHECK_EQUAL(pm.exists(proofid), !isImmature);
};
auto checkRejectInvalidate = [&](const ProofId &proofid) {
BOOST_CHECK(pm.exists(proofid));
BOOST_CHECK(pm.rejectProof(
proofid, avalanche::PeerManager::RejectionMode::INVALIDATE));
};
// Reject from the orphan pool doesn't affect tracked score
checkRejectDefault(peer1Proof3->getId());
BOOST_CHECK(!pm.registerProof(peer1Proof3));
BOOST_CHECK(pm.isImmature(peer1Proof3->getId()));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2);
checkRejectInvalidate(peer1Proof3->getId());
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2);
// Reject from the conflicting pool
checkRejectDefault(peer1Proof1->getId());
checkRejectInvalidate(peer1Proof1->getId());
// Add again a proof to the conflicting pool
BOOST_CHECK(!pm.registerProof(peer1Proof1));
BOOST_CHECK(pm.isInConflictingPool(peer1Proof1->getId()));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2);
// Reject from the valid pool, default mode
// Now the score should change as the new peer is promoted
checkRejectDefault(peer1Proof2->getId());
BOOST_CHECK(!pm.isInConflictingPool(peer1Proof1->getId()));
BOOST_CHECK(pm.isBoundToPeer(peer1Proof1->getId()));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score1);
// Reject from the valid pool, invalidate mode
// Now the score should change as the old peer is re-promoted
checkRejectInvalidate(peer1Proof1->getId());
// The conflicting proof should also be promoted to a peer
BOOST_CHECK(!pm.isInConflictingPool(peer1Proof2->getId()));
BOOST_CHECK(pm.isBoundToPeer(peer1Proof2->getId()));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2);
// Now add another peer and check that combined scores are correct
uint32_t peer2Score = 1 * MIN_VALID_PROOF_SCORE;
auto peer2Proof1 = buildRandomProof(active_chainstate, peer2Score, 99);
PeerId peerid2 = TestPeerManager::registerAndGetPeerId(pm, peer2Proof1);
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2 + peer2Score);
// Trying to remove non-existent peer doesn't affect score
BOOST_CHECK(!pm.removePeer(1234));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2 + peer2Score);
// Removing new peer removes its score
BOOST_CHECK(pm.removePeer(peerid2));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2);
PeerId peerid1 =
TestPeerManager::getPeerIdForProofId(pm, peer1Proof2->getId());
BOOST_CHECK(pm.removePeer(peerid1));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), 0);
}
BOOST_AUTO_TEST_CASE(connected_score_tracking) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const auto checkScores = [&pm](uint32_t known, uint32_t connected) {
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), known);
BOOST_CHECK_EQUAL(pm.getConnectedPeersScore(), connected);
};
// Start out with 0s
checkScores(0, 0);
CChainState &active_chainstate = chainman.ActiveChainstate();
// Create one peer without a node. Its score should be registered but not
// connected
uint32_t score1 = 10000000 * MIN_VALID_PROOF_SCORE;
auto proof1 = buildRandomProof(active_chainstate, score1);
PeerId peerid1 = TestPeerManager::registerAndGetPeerId(pm, proof1);
checkScores(score1, 0);
// Add nodes. We now have a connected score, but it doesn't matter how many
// nodes we add the score is the same
const ProofId &proofid1 = proof1->getId();
const uint8_t nodesToAdd = 10;
for (int i = 0; i < nodesToAdd; i++) {
BOOST_CHECK(pm.addNode(i, proofid1));
checkScores(score1, score1);
}
// Remove all but 1 node and ensure the score doesn't change
for (int i = 0; i < nodesToAdd - 1; i++) {
BOOST_CHECK(pm.removeNode(i));
checkScores(score1, score1);
}
// Removing the last node should remove the score from the connected count
BOOST_CHECK(pm.removeNode(nodesToAdd - 1));
checkScores(score1, 0);
// Add 2 nodes to peer and create peer2. Without a node peer2 has no
// connected score but after adding a node it does.
BOOST_CHECK(pm.addNode(0, proofid1));
BOOST_CHECK(pm.addNode(1, proofid1));
checkScores(score1, score1);
uint32_t score2 = 1 * MIN_VALID_PROOF_SCORE;
auto proof2 = buildRandomProof(active_chainstate, score2);
PeerId peerid2 = TestPeerManager::registerAndGetPeerId(pm, proof2);
checkScores(score1 + score2, score1);
BOOST_CHECK(pm.addNode(2, proof2->getId()));
checkScores(score1 + score2, score1 + score2);
// The first peer has two nodes left. Remove one and nothing happens, remove
// the other and its score is no longer in the connected counter..
BOOST_CHECK(pm.removeNode(0));
checkScores(score1 + score2, score1 + score2);
BOOST_CHECK(pm.removeNode(1));
checkScores(score1 + score2, score2);
// Removing a peer with no allocated score has no affect.
BOOST_CHECK(pm.removePeer(peerid1));
checkScores(score2, score2);
// Remove the second peer's node removes its allocated score.
BOOST_CHECK(pm.removeNode(2));
checkScores(score2, 0);
// Removing the second peer takes us back to 0.
BOOST_CHECK(pm.removePeer(peerid2));
checkScores(0, 0);
// Add 2 peers with nodes and remove them without removing the nodes first.
// Both score counters should be reduced by each peer's score when it's
// removed.
peerid1 = TestPeerManager::registerAndGetPeerId(pm, proof1);
checkScores(score1, 0);
peerid2 = TestPeerManager::registerAndGetPeerId(pm, proof2);
checkScores(score1 + score2, 0);
BOOST_CHECK(pm.addNode(0, proof1->getId()));
checkScores(score1 + score2, score1);
BOOST_CHECK(pm.addNode(1, proof2->getId()));
checkScores(score1 + score2, score1 + score2);
BOOST_CHECK(pm.removePeer(peerid2));
checkScores(score1, score1);
BOOST_CHECK(pm.removePeer(peerid1));
checkScores(0, 0);
}
BOOST_FIXTURE_TEST_CASE(proof_radix_tree, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
gArgs.ForceSetArg("-enableavalancheproofreplacement", "1");
struct ProofComparatorById {
bool operator()(const ProofRef &lhs, const ProofRef &rhs) const {
return lhs->getId() < rhs->getId();
};
};
using ProofSetById = std::set<ProofRef, ProofComparatorById>;
// Maintain a list of the expected proofs through this test
ProofSetById expectedProofs;
auto matchExpectedContent = [&](const auto &tree) {
auto it = expectedProofs.begin();
return tree.forEachLeaf([&](auto pLeaf) {
return it != expectedProofs.end() &&
pLeaf->getId() == (*it++)->getId();
});
};
CKey key = CKey::MakeCompressedKey();
const int64_t sequence = 10;
CChainState &active_chainstate = chainman.ActiveChainstate();
// Add some initial proofs
for (size_t i = 0; i < 10; i++) {
auto outpoint = createUtxo(active_chainstate, key);
auto proof = buildProofWithSequence(key, {{outpoint}}, sequence);
BOOST_CHECK(pm.registerProof(proof));
expectedProofs.insert(std::move(proof));
}
const auto &treeRef = pm.getShareableProofsSnapshot();
BOOST_CHECK(matchExpectedContent(treeRef));
// Create a copy
auto tree = pm.getShareableProofsSnapshot();
// Adding more proofs doesn't change the tree...
ProofSetById addedProofs;
std::vector<COutPoint> outpointsToSpend;
for (size_t i = 0; i < 10; i++) {
auto outpoint = createUtxo(active_chainstate, key);
auto proof = buildProofWithSequence(key, {{outpoint}}, sequence);
BOOST_CHECK(pm.registerProof(proof));
addedProofs.insert(std::move(proof));
outpointsToSpend.push_back(std::move(outpoint));
}
BOOST_CHECK(matchExpectedContent(tree));
// ...until we get a new copy
tree = pm.getShareableProofsSnapshot();
expectedProofs.insert(addedProofs.begin(), addedProofs.end());
BOOST_CHECK(matchExpectedContent(tree));
// Spend some coins to make the associated proofs invalid
{
LOCK(cs_main);
CCoinsViewCache &coins = active_chainstate.CoinsTip();
for (const auto &outpoint : outpointsToSpend) {
coins.SpendCoin(outpoint);
}
}
pm.updatedBlockTip();
// This doesn't change the tree...
BOOST_CHECK(matchExpectedContent(tree));
// ...until we get a new copy
tree = pm.getShareableProofsSnapshot();
for (const auto &proof : addedProofs) {
BOOST_CHECK_EQUAL(expectedProofs.erase(proof), 1);
}
BOOST_CHECK(matchExpectedContent(tree));
// Add some more proof for which we will create conflicts
std::vector<ProofRef> conflictingProofs;
std::vector<COutPoint> conflictingOutpoints;
for (size_t i = 0; i < 10; i++) {
auto outpoint = createUtxo(active_chainstate, key);
auto proof = buildProofWithSequence(key, {{outpoint}}, sequence);
BOOST_CHECK(pm.registerProof(proof));
conflictingProofs.push_back(std::move(proof));
conflictingOutpoints.push_back(std::move(outpoint));
}
tree = pm.getShareableProofsSnapshot();
expectedProofs.insert(conflictingProofs.begin(), conflictingProofs.end());
BOOST_CHECK(matchExpectedContent(tree));
// Build a bunch of conflicting proofs, half better, half worst
for (size_t i = 0; i < 10; i += 2) {
// The worst proof is not added to the expected set
BOOST_CHECK(!pm.registerProof(buildProofWithSequence(
key, {{conflictingOutpoints[i]}}, sequence - 1)));
// But the better proof should replace its conflicting one
auto replacementProof = buildProofWithSequence(
key, {{conflictingOutpoints[i + 1]}}, sequence + 1);
BOOST_CHECK(pm.registerProof(replacementProof));
BOOST_CHECK_EQUAL(expectedProofs.erase(conflictingProofs[i + 1]), 1);
BOOST_CHECK(expectedProofs.insert(replacementProof).second);
}
tree = pm.getShareableProofsSnapshot();
BOOST_CHECK(matchExpectedContent(tree));
// Check for consistency
pm.verify();
gArgs.ClearForcedArg("-enableavalancheproofreplacement");
}
BOOST_AUTO_TEST_CASE(received_avaproofs) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
auto addNode = [&](NodeId nodeid) {
auto proof = buildRandomProof(chainman.ActiveChainstate(),
MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(proof));
BOOST_CHECK(pm.addNode(nodeid, proof->getId()));
};
for (NodeId nodeid = 0; nodeid < 10; nodeid++) {
// Node doesn't exist
BOOST_CHECK(!pm.latchAvaproofsSent(nodeid));
addNode(nodeid);
BOOST_CHECK(pm.latchAvaproofsSent(nodeid));
// The flag is already set
BOOST_CHECK(!pm.latchAvaproofsSent(nodeid));
}
}
BOOST_FIXTURE_TEST_CASE(cleanup_dangling_proof, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
gArgs.ForceSetArg("-enableavalancheproofreplacement", "1");
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const auto now = GetTime<std::chrono::seconds>();
auto mocktime = now;
auto elapseTime = [&](std::chrono::seconds seconds) {
mocktime += seconds;
SetMockTime(mocktime.count());
};
elapseTime(0s);
const CKey key = CKey::MakeCompressedKey();
const size_t numProofs = 10;
std::vector<COutPoint> outpoints(numProofs);
std::vector<ProofRef> proofs(numProofs);
std::vector<ProofRef> conflictingProofs(numProofs);
for (size_t i = 0; i < numProofs; i++) {
outpoints[i] = createUtxo(chainman.ActiveChainstate(), key);
proofs[i] = buildProofWithSequence(key, {outpoints[i]}, 2);
conflictingProofs[i] = buildProofWithSequence(key, {outpoints[i]}, 1);
BOOST_CHECK(pm.registerProof(proofs[i]));
BOOST_CHECK(pm.isBoundToPeer(proofs[i]->getId()));
BOOST_CHECK(!pm.registerProof(conflictingProofs[i]));
BOOST_CHECK(pm.isInConflictingPool(conflictingProofs[i]->getId()));
if (i % 2) {
// Odd indexes get a node attached to them
BOOST_CHECK(pm.addNode(i, proofs[i]->getId()));
}
BOOST_CHECK_EQUAL(pm.forPeer(proofs[i]->getId(),
[&](const avalanche::Peer &peer) {
return peer.node_count;
}),
i % 2);
elapseTime(1s);
}
// No proof expired yet
TestPeerManager::cleanupDanglingProofs(pm);
for (size_t i = 0; i < numProofs; i++) {
BOOST_CHECK(pm.isBoundToPeer(proofs[i]->getId()));
BOOST_CHECK(pm.isInConflictingPool(conflictingProofs[i]->getId()));
}
// Elapse the dangling timeout
elapseTime(avalanche::Peer::DANGLING_TIMEOUT);
TestPeerManager::cleanupDanglingProofs(pm);
for (size_t i = 0; i < numProofs; i++) {
const bool hasNodeAttached = i % 2;
// Only the peers with no nodes attached are getting discarded
BOOST_CHECK_EQUAL(pm.isBoundToPeer(proofs[i]->getId()),
hasNodeAttached);
BOOST_CHECK_EQUAL(!pm.exists(proofs[i]->getId()), !hasNodeAttached);
// The proofs conflicting with the discarded ones are pulled back
BOOST_CHECK_EQUAL(pm.isInConflictingPool(conflictingProofs[i]->getId()),
hasNodeAttached);
BOOST_CHECK_EQUAL(pm.isBoundToPeer(conflictingProofs[i]->getId()),
!hasNodeAttached);
}
// Attach a node to the first conflicting proof, which has been promoted
BOOST_CHECK(pm.addNode(42, conflictingProofs[0]->getId()));
BOOST_CHECK(pm.forPeer(
conflictingProofs[0]->getId(),
[&](const avalanche::Peer &peer) { return peer.node_count == 1; }));
// Elapse the dangling timeout again
elapseTime(avalanche::Peer::DANGLING_TIMEOUT);
TestPeerManager::cleanupDanglingProofs(pm);
for (size_t i = 0; i < numProofs; i++) {
const bool hasNodeAttached = i % 2;
// The initial peers with a node attached are still there
BOOST_CHECK_EQUAL(pm.isBoundToPeer(proofs[i]->getId()),
hasNodeAttached);
BOOST_CHECK_EQUAL(!pm.exists(proofs[i]->getId()), !hasNodeAttached);
// This time the previouly promoted conflicting proofs are evicted
// because they have no node attached, except the index 0.
BOOST_CHECK_EQUAL(pm.exists(conflictingProofs[i]->getId()),
hasNodeAttached || i == 0);
BOOST_CHECK_EQUAL(pm.isInConflictingPool(conflictingProofs[i]->getId()),
hasNodeAttached);
BOOST_CHECK_EQUAL(pm.isBoundToPeer(conflictingProofs[i]->getId()),
i == 0);
}
// Disconnect all the nodes
for (size_t i = 1; i < numProofs; i += 2) {
BOOST_CHECK(pm.removeNode(i));
BOOST_CHECK(
pm.forPeer(proofs[i]->getId(), [&](const avalanche::Peer &peer) {
return peer.node_count == 0;
}));
}
BOOST_CHECK(pm.removeNode(42));
BOOST_CHECK(pm.forPeer(
conflictingProofs[0]->getId(),
[&](const avalanche::Peer &peer) { return peer.node_count == 0; }));
TestPeerManager::cleanupDanglingProofs(pm);
for (size_t i = 0; i < numProofs; i++) {
const bool hadNodeAttached = i % 2;
// All initially valid proofs have now been discarded
BOOST_CHECK(!pm.exists(proofs[i]->getId()));
// The remaining conflicting proofs are promoted
BOOST_CHECK_EQUAL(!pm.exists(conflictingProofs[i]->getId()),
!hadNodeAttached);
BOOST_CHECK(!pm.isInConflictingPool(conflictingProofs[i]->getId()));
BOOST_CHECK_EQUAL(pm.isBoundToPeer(conflictingProofs[i]->getId()),
hadNodeAttached);
}
// Elapse the timeout for the newly promoted conflicting proofs
elapseTime(avalanche::Peer::DANGLING_TIMEOUT);
// Use the first conflicting proof as our local proof
TestPeerManager::cleanupDanglingProofs(pm, conflictingProofs[1]);
for (size_t i = 0; i < numProofs; i++) {
// All other proofs have now been discarded
BOOST_CHECK(!pm.exists(proofs[i]->getId()));
BOOST_CHECK_EQUAL(!pm.exists(conflictingProofs[i]->getId()), i != 1);
}
// Cleanup one more time with no local proof
TestPeerManager::cleanupDanglingProofs(pm);
for (size_t i = 0; i < numProofs; i++) {
// All proofs have finally been discarded
BOOST_CHECK(!pm.exists(proofs[i]->getId()));
BOOST_CHECK(!pm.exists(conflictingProofs[i]->getId()));
}
gArgs.ClearForcedArg("-enableavalancheproofreplacement");
}
BOOST_AUTO_TEST_CASE(register_proof_missing_utxo) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
CKey key = CKey::MakeCompressedKey();
auto proof = buildProofWithOutpoints(key, {{TxId(GetRandHash()), 0}},
PROOF_DUST_THRESHOLD);
ProofRegistrationState state;
BOOST_CHECK(!pm.registerProof(proof, state));
BOOST_CHECK(state.GetResult() == ProofRegistrationResult::MISSING_UTXO);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/functional/abc_p2p_avalanche_proof_voting.py b/test/functional/abc_p2p_avalanche_proof_voting.py
index 547bd3421..3af06462d 100755
--- a/test/functional/abc_p2p_avalanche_proof_voting.py
+++ b/test/functional/abc_p2p_avalanche_proof_voting.py
@@ -1,511 +1,511 @@
#!/usr/bin/env python3
# Copyright (c) 2021 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the resolution of conflicting proofs via avalanche."""
import time
from test_framework.avatools import (
avalanche_proof_from_hex,
create_coinbase_stakes,
gen_proof,
get_ava_p2p_interface,
get_proof_ids,
)
from test_framework.key import ECPubKey
from test_framework.messages import (
MSG_AVA_PROOF,
AvalancheProofVoteResponse,
AvalancheVote,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
try_rpc,
uint256_hex,
)
from test_framework.wallet_util import bytes_to_wif
QUORUM_NODE_COUNT = 16
class AvalancheProofVotingTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.conflicting_proof_cooldown = 100
self.peer_replacement_cooldown = 2000
self.extra_args = [
['-enableavalanche=1',
'-avaproofstakeutxodustthreshold=1000000',
'-enableavalancheproofreplacement=1',
'-avaproofstakeutxoconfirmations=2',
f'-avalancheconflictingproofcooldown={self.conflicting_proof_cooldown}', f'-avalanchepeerreplacementcooldown={self.peer_replacement_cooldown}', '-avacooldown=0', '-avastalevotethreshold=140', '-avastalevotefactor=1'],
]
self.supports_cli = False
# Build a fake quorum of nodes.
def get_quorum(self, node):
quorum = [get_ava_p2p_interface(node)
for _ in range(0, QUORUM_NODE_COUNT)]
for n in quorum:
success = node.addavalanchenode(
n.nodeid,
self.privkey.get_pubkey().get_bytes().hex(),
self.quorum_proof.serialize().hex(),
)
assert success is True
return quorum
def can_find_proof_in_poll(self, hash, response):
found_hash = False
for n in self.quorum:
poll = n.get_avapoll_if_available()
# That node has not received a poll
if poll is None:
continue
# We got a poll, check for the hash and repond
votes = []
for inv in poll.invs:
# Vote yes to everything
r = AvalancheProofVoteResponse.ACTIVE
# Look for what we expect
if inv.hash == hash:
r = response
found_hash = True
votes.append(AvalancheVote(r, inv.hash))
n.send_avaresponse(poll.round, votes, self.privkey)
return found_hash
@staticmethod
def send_proof(from_peer, proof_hex):
proof = avalanche_proof_from_hex(proof_hex)
from_peer.send_avaproof(proof)
return proof.proofid
def send_and_check_for_polling(self, peer,
proof_hex, response=AvalancheProofVoteResponse.ACTIVE):
proofid = self.send_proof(peer, proof_hex)
self.wait_until(lambda: self.can_find_proof_in_poll(proofid, response))
def build_conflicting_proof(self, node, sequence):
return node.buildavalancheproof(
sequence, 0, self.privkey_wif, self.conflicting_stakes)
def run_test(self):
node = self.nodes[0]
self.privkey, self.quorum_proof = gen_proof(node)
self.privkey_wif = bytes_to_wif(self.privkey.get_bytes())
# Make the quorum proof mature before preparing the quorum
node.generate(1)
self.quorum = self.get_quorum(node)
addrkey0 = node.get_deterministic_priv_key()
blockhash = node.generatetoaddress(10, addrkey0.address)
self.conflicting_stakes = create_coinbase_stakes(
node, blockhash[5:9], addrkey0.key)
self.immature_stakes = create_coinbase_stakes(
node, blockhash[9:], addrkey0.key)
self.poll_tests(node)
self.update_tests(node)
self.vote_tests(node)
self.stale_proof_tests(node)
self.unorphan_poll_tests(node)
def poll_tests(self, node):
proof_seq10 = self.build_conflicting_proof(node, 10)
proof_seq20 = self.build_conflicting_proof(node, 20)
proof_seq30 = self.build_conflicting_proof(node, 30)
proof_seq40 = self.build_conflicting_proof(node, 40)
orphan = node.buildavalancheproof(
100, 0, self.privkey_wif, self.immature_stakes)
no_stake = node.buildavalancheproof(
200, 2000000000, self.privkey_wif, []
)
# Get the key so we can verify signatures.
avakey = ECPubKey()
avakey.set(bytes.fromhex(node.getavalanchekey()))
self.log.info("Trigger polling from the node...")
peer = get_ava_p2p_interface(node)
mock_time = int(time.time())
node.setmocktime(mock_time)
self.log.info("Check we poll for valid proof")
self.send_and_check_for_polling(peer, proof_seq30)
self.log.info(
"Check we don't poll for subsequent proofs if the cooldown is not elapsed, proof not the favorite")
with node.assert_debug_log(["Not polling the avalanche proof (cooldown-not-elapsed)"]):
peer.send_avaproof(avalanche_proof_from_hex(proof_seq20))
self.log.info(
"Check we don't poll for subsequent proofs if the cooldown is not elapsed, proof is the favorite")
with node.assert_debug_log(["Not polling the avalanche proof (cooldown-not-elapsed)"]):
peer.send_avaproof(avalanche_proof_from_hex(proof_seq40))
self.log.info(
"Check we poll for conflicting proof if the proof is not the favorite")
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
self.send_and_check_for_polling(
peer, proof_seq20, response=AvalancheProofVoteResponse.REJECTED)
self.log.info(
"Check we poll for conflicting proof if the proof is the favorite")
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
self.send_and_check_for_polling(peer, proof_seq40)
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
- self.log.info("Check we don't poll for orphans")
- with node.assert_debug_log(["Not polling the avalanche proof (orphan-proof)"]):
+ self.log.info("Check we don't poll for immature proofs")
+ with node.assert_debug_log(["Not polling the avalanche proof (immature-proof)"]):
peer.send_avaproof(avalanche_proof_from_hex(orphan))
self.log.info("Check we don't poll for proofs that get rejected")
with node.assert_debug_log(["Not polling the avalanche proof (rejected-proof)"]):
peer.send_avaproof(avalanche_proof_from_hex(proof_seq10))
self.log.info("Check we don't poll for invalid proofs and get banned")
with node.assert_debug_log(["Misbehaving", "invalid-proof"]):
peer.send_avaproof(avalanche_proof_from_hex(no_stake))
peer.wait_for_disconnect()
self.log.info("We don't poll for proofs if replacement is disabled")
self.restart_node(
0,
extra_args=self.extra_args[0] +
['-enableavalancheproofreplacement=0'])
peer = get_ava_p2p_interface(node)
with node.assert_debug_log(["Not polling the avalanche proof (not-worth-polling)"]):
peer.send_avaproof(avalanche_proof_from_hex(proof_seq10))
def update_tests(self, node):
# Restart the node to get rid of in-flight requests
self.restart_node(0)
mock_time = int(time.time())
node.setmocktime(mock_time)
self.quorum = self.get_quorum(node)
peer = get_ava_p2p_interface(node)
proof_seq30 = self.build_conflicting_proof(node, 30)
proof_seq40 = self.build_conflicting_proof(node, 40)
proof_seq50 = self.build_conflicting_proof(node, 50)
proofid_seq30 = avalanche_proof_from_hex(proof_seq30).proofid
proofid_seq40 = avalanche_proof_from_hex(proof_seq40).proofid
proofid_seq50 = avalanche_proof_from_hex(proof_seq50).proofid
node.sendavalancheproof(proof_seq40)
self.wait_until(lambda: proofid_seq40 in get_proof_ids(node))
assert proofid_seq40 in get_proof_ids(node)
assert proofid_seq30 not in get_proof_ids(node)
self.log.info("Test proof acceptance")
def accept_proof(proofid):
self.wait_until(lambda: self.can_find_proof_in_poll(
proofid, response=AvalancheProofVoteResponse.ACTIVE), timeout=5)
return proofid in get_proof_ids(node)
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
self.send_and_check_for_polling(peer, proof_seq30)
# Let the quorum vote for it
self.wait_until(lambda: accept_proof(proofid_seq30))
assert proofid_seq40 not in get_proof_ids(node)
self.log.info("Test the peer replacement rate limit")
def vote_until_finalized(proofid):
self.can_find_proof_in_poll(
proofid, response=AvalancheProofVoteResponse.ACTIVE)
return node.getrawavalancheproof(
uint256_hex(proofid)).get("finalized", False)
# Wait until proof_seq30 is finalized
self.wait_until(lambda: vote_until_finalized(proofid_seq30))
# Not enough
assert self.conflicting_proof_cooldown < self.peer_replacement_cooldown
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
peer = get_ava_p2p_interface(node)
with node.assert_debug_log(["Not polling the avalanche proof (cooldown-not-elapsed)"]):
self.send_proof(peer, proof_seq50)
mock_time += self.peer_replacement_cooldown
node.setmocktime(mock_time)
self.log.info("Test proof rejection")
self.send_proof(peer, proof_seq50)
self.wait_until(lambda: proofid_seq50 in get_proof_ids(node))
assert proofid_seq40 not in get_proof_ids(node)
def reject_proof(proofid):
self.wait_until(
lambda: self.can_find_proof_in_poll(
proofid, response=AvalancheProofVoteResponse.REJECTED))
return proofid not in get_proof_ids(node)
with node.assert_debug_log(
[f"Avalanche rejected proof {uint256_hex(proofid_seq50)}"],
["Failed to reject proof"]
):
self.wait_until(lambda: reject_proof(proofid_seq50))
assert proofid_seq50 not in get_proof_ids(node)
assert proofid_seq40 in get_proof_ids(node)
self.log.info("Test proof invalidation")
def invalidate_proof(proofid):
self.wait_until(
lambda: self.can_find_proof_in_poll(
proofid, response=AvalancheProofVoteResponse.REJECTED))
return try_rpc(-8, "Proof not found",
node.getrawavalancheproof, uint256_hex(proofid))
with node.assert_debug_log(
[f"Avalanche invalidated proof {uint256_hex(proofid_seq50)}"],
["Failed to reject proof"]
):
self.wait_until(lambda: invalidate_proof(proofid_seq50))
self.log.info("The node will now ignore the invalid proof")
for i in range(5):
with node.assert_debug_log(["received: avaproof"]):
self.send_proof(peer, proof_seq50)
assert_raises_rpc_error(-8,
"Proof not found",
node.getrawavalancheproof,
uint256_hex(proofid_seq50))
def vote_tests(self, node):
self.restart_node(0, extra_args=['-enableavalanche=1',
'-avaproofstakeutxodustthreshold=1000000',
'-avaproofstakeutxoconfirmations=2',
'-avacooldown=0',
'-avalancheconflictingproofcooldown=0',
'-whitelist=noban@127.0.0.1', ])
self.get_quorum(node)
ava_node = get_ava_p2p_interface(node)
# Generate coinbases to use for stakes
stakes_key = node.get_deterministic_priv_key()
blocks = node.generatetoaddress(4, stakes_key.address)
# Get the ava key so we can verify signatures.
ava_key = ECPubKey()
ava_key.set(bytes.fromhex(node.getavalanchekey()))
def create_proof(stakes):
proof = node.buildavalancheproof(11, 12, self.privkey_wif, stakes)
proof_id = avalanche_proof_from_hex(proof).proofid
return proof, proof_id
# proof_0 is valid right now
stakes_0 = create_coinbase_stakes(node, [blocks[0]], stakes_key.key)
proof_0, proof_0_id = create_proof(stakes_0)
# proof_1 is valid right now, and from different stakes
stakes_1 = create_coinbase_stakes(node, [blocks[1]], stakes_key.key)
proof_1, proof_1_id = create_proof(stakes_1)
# proof_2 is an orphan because the stake UTXO is immature
stakes_2 = create_coinbase_stakes(node, [blocks[3]], stakes_key.key)
proof_2, proof_2_id = create_proof(stakes_2)
# proof_3 conflicts with proof_0 and proof_1
stakes_3 = create_coinbase_stakes(
node, [blocks[0], blocks[1]], stakes_key.key)
proof_3, proof_3_id = create_proof(stakes_3)
# proof_4 is invalid and should be rejected
stakes_4 = create_coinbase_stakes(node, [blocks[2]], stakes_key.key)
stakes_4[0]['amount'] -= 100000
proof_4, proof_4_id = create_proof(stakes_4)
# Create a helper to issue a poll and validate the responses
def poll_assert_response(expected):
# Issue a poll for each proof
self.log.info("Trigger polling from the node...")
ava_node.send_poll(
[proof_0_id, proof_1_id, proof_2_id, proof_3_id, proof_4_id],
MSG_AVA_PROOF)
response = ava_node.wait_for_avaresponse()
r = response.response
# Verify signature
assert ava_key.verify_schnorr(response.sig, r.get_hash())
# Verify votes
votes = r.votes
assert_equal(len(votes), len(expected))
for i in range(0, len(votes)):
assert_equal(repr(votes[i]), repr(expected[i]))
# Check that all proofs start unknown
poll_assert_response([
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_0_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_1_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_2_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_3_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_4_id)])
# Send the first proof. Nodes should now respond that it's accepted
node.sendavalancheproof(proof_0)
poll_assert_response([
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_0_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_1_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_2_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_3_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_4_id)])
# Send and check the 2nd proof. Nodes should now respond that it's
# accepted
node.sendavalancheproof(proof_1)
poll_assert_response([
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_0_id),
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_1_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_2_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_3_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_4_id)])
# The next proof should be rejected/put in the orphan pool
ava_node.send_proof(avalanche_proof_from_hex(proof_2))
poll_assert_response([
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_0_id),
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_1_id),
AvalancheVote(AvalancheProofVoteResponse.ORPHAN, proof_2_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_3_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_4_id)])
# The next proof should be rejected and marked as a conflicting proof
assert_raises_rpc_error(-8,
"The proof has conflicting utxo with an existing proof",
node.sendavalancheproof, proof_3)
poll_assert_response([
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_0_id),
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_1_id),
AvalancheVote(AvalancheProofVoteResponse.ORPHAN, proof_2_id),
AvalancheVote(AvalancheProofVoteResponse.CONFLICT, proof_3_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_4_id)])
# The final proof should be permanently rejected for being completely
# invalid
ava_node.send_proof(avalanche_proof_from_hex(proof_4))
poll_assert_response([
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_0_id),
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_1_id),
AvalancheVote(AvalancheProofVoteResponse.ORPHAN, proof_2_id),
AvalancheVote(AvalancheProofVoteResponse.CONFLICT, proof_3_id),
AvalancheVote(AvalancheProofVoteResponse.REJECTED, proof_4_id)])
def stale_proof_tests(self, node):
# Restart the node to get rid of in-flight requests
self.restart_node(0)
mock_time = int(time.time())
node.setmocktime(mock_time)
self.quorum = self.get_quorum(node)
peer = get_ava_p2p_interface(node)
proof_seq1 = self.build_conflicting_proof(node, 1)
proof_seq2 = self.build_conflicting_proof(node, 2)
proofid_seq1 = avalanche_proof_from_hex(proof_seq1).proofid
proofid_seq2 = avalanche_proof_from_hex(proof_seq2).proofid
node.sendavalancheproof(proof_seq2)
self.wait_until(lambda: proofid_seq2 in get_proof_ids(node))
assert proofid_seq2 in get_proof_ids(node)
assert proofid_seq1 not in get_proof_ids(node)
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
self.send_and_check_for_polling(
peer, proof_seq1, response=AvalancheProofVoteResponse.UNKNOWN)
def vote_until_dropped(proofid):
self.can_find_proof_in_poll(
proofid, response=AvalancheProofVoteResponse.UNKNOWN)
return try_rpc(-8, "Proof not found",
node.getrawavalancheproof, uint256_hex(proofid))
with node.assert_debug_log([f"Avalanche stalled proof {uint256_hex(proofid_seq1)}"]):
self.wait_until(lambda: vote_until_dropped(proofid_seq1))
# Verify that proof_seq2 was not replaced
assert proofid_seq2 in get_proof_ids(node)
assert proofid_seq1 not in get_proof_ids(node)
# When polled, peer responds with expected votes for both proofs
peer.send_poll([proofid_seq1, proofid_seq2], MSG_AVA_PROOF)
response = peer.wait_for_avaresponse()
assert repr(response.response.votes) == repr([
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proofid_seq1),
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proofid_seq2)])
def unorphan_poll_tests(self, node):
# Restart the node with appropriate flags for this test
self.restart_node(0, extra_args=[
'-enableavalanche=1',
'-avaproofstakeutxodustthreshold=1000000',
'-enableavalancheproofreplacement=1',
'-avaproofstakeutxoconfirmations=2',
'-avalancheconflictingproofcooldown=0',
'-avacooldown=0',
])
self.quorum = self.get_quorum(node)
peer = get_ava_p2p_interface(node)
_, immature_proof = gen_proof(node)
- self.log.info("Orphan proofs are not polled")
+ self.log.info("Immature proofs are not polled")
- with node.assert_debug_log(["Not polling the avalanche proof (orphan-proof)"]):
+ with node.assert_debug_log(["Not polling the avalanche proof (immature-proof)"]):
peer.send_avaproof(immature_proof)
self.log.info("Unorphaned proofs are polled")
node.generate(1)
self.send_and_check_for_polling(peer, immature_proof.serialize().hex())
if __name__ == '__main__':
AvalancheProofVotingTest().main()

File Metadata

Mime Type
text/x-diff
Expires
Wed, May 21, 18:39 (21 h, 46 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5865712
Default Alt Text
(142 KB)

Event Timeline