Page MenuHomePhabricator

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/doc/release-notes.md b/doc/release-notes.md
index f92bd8294b..bee490a3d9 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -1,9 +1,11 @@
# Bitcoin ABC 0.29.7 Release Notes
Bitcoin ABC version 0.29.7 is now available from:
<https://download.bitcoinabc.org/0.29.7/>
This release includes the following features and fixes:
- The `-deprecatedrpc=getstakingreward` option was under deprecation for
several months and has been removed completely.
+ - The `getstakingreward` RPC now returns the `proofid` of the staking reward
+ winner in addition to the payout script.
diff --git a/src/avalanche/peermanager.cpp b/src/avalanche/peermanager.cpp
index df2dd1ab28..3b074f5787 100644
--- a/src/avalanche/peermanager.cpp
+++ b/src/avalanche/peermanager.cpp
@@ -1,1376 +1,1377 @@
// Copyright (c) 2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <avalanche/peermanager.h>
#include <arith_uint256.h>
#include <avalanche/avalanche.h>
#include <avalanche/delegation.h>
#include <avalanche/validation.h>
#include <cashaddrenc.h>
#include <common/args.h>
#include <consensus/activation.h>
#include <logging.h>
#include <random.h>
#include <scheduler.h>
#include <uint256.h>
#include <util/fastrange.h>
#include <util/fs_helpers.h>
#include <util/time.h>
#include <validation.h> // For ChainstateManager
#include <algorithm>
#include <cassert>
#include <cmath>
#include <limits>
namespace avalanche {
static constexpr uint64_t PEERS_DUMP_VERSION{1};
bool PeerManager::addNode(NodeId nodeid, const ProofId &proofid) {
auto &pview = peers.get<by_proofid>();
auto it = pview.find(proofid);
if (it == pview.end()) {
// If the node exists, it is actually updating its proof to an unknown
// one. In this case we need to remove it so it is not both active and
// pending at the same time.
removeNode(nodeid);
pendingNodes.emplace(proofid, nodeid);
return false;
}
return addOrUpdateNode(peers.project<0>(it), nodeid);
}
bool PeerManager::addOrUpdateNode(const PeerSet::iterator &it, NodeId nodeid) {
assert(it != peers.end());
const PeerId peerid = it->peerid;
auto nit = nodes.find(nodeid);
if (nit == nodes.end()) {
if (!nodes.emplace(nodeid, peerid).second) {
return false;
}
} else {
const PeerId oldpeerid = nit->peerid;
if (!nodes.modify(nit, [&](Node &n) { n.peerid = peerid; })) {
return false;
}
// We actually have this node already, we need to update it.
bool success = removeNodeFromPeer(peers.find(oldpeerid));
assert(success);
}
// Then increase the node counter, and create the slot if needed
bool success = addNodeToPeer(it);
assert(success);
// If the added node was in the pending set, remove it
pendingNodes.get<by_nodeid>().erase(nodeid);
// If the proof was in the dangling pool, remove it
const ProofId &proofid = it->getProofId();
if (danglingProofPool.getProof(proofid)) {
danglingProofPool.removeProof(proofid);
}
// We know for sure there is at least 1 node. Note that this can fail if
// there is more than 1, in this case it's a no-op.
shareableProofs.insert(it->proof);
return true;
}
bool PeerManager::addNodeToPeer(const PeerSet::iterator &it) {
assert(it != peers.end());
return peers.modify(it, [&](Peer &p) {
if (p.node_count++ > 0) {
// We are done.
return;
}
// We need to allocate this peer.
p.index = uint32_t(slots.size());
const uint32_t score = p.getScore();
const uint64_t start = slotCount;
slots.emplace_back(start, score, it->peerid);
slotCount = start + score;
// Add to our allocated score when we allocate a new peer in the slots
connectedPeersScore += score;
});
}
bool PeerManager::removeNode(NodeId nodeid) {
// Remove all the remote proofs from this node
auto &remoteProofsView = remoteProofs.get<by_nodeid>();
auto [begin, end] = remoteProofsView.equal_range(nodeid);
remoteProofsView.erase(begin, end);
if (pendingNodes.get<by_nodeid>().erase(nodeid) > 0) {
// If this was a pending node, there is nothing else to do.
return true;
}
auto it = nodes.find(nodeid);
if (it == nodes.end()) {
return false;
}
const PeerId peerid = it->peerid;
nodes.erase(it);
// Keep the track of the reference count.
bool success = removeNodeFromPeer(peers.find(peerid));
assert(success);
return true;
}
bool PeerManager::removeNodeFromPeer(const PeerSet::iterator &it,
uint32_t count) {
// It is possible for nodes to be dangling. If there was an inflight query
// when the peer gets removed, the node was not erased. In this case there
// is nothing to do.
if (it == peers.end()) {
return true;
}
assert(count <= it->node_count);
if (count == 0) {
// This is a NOOP.
return false;
}
const uint32_t new_count = it->node_count - count;
if (!peers.modify(it, [&](Peer &p) { p.node_count = new_count; })) {
return false;
}
if (new_count > 0) {
// We are done.
return true;
}
// There are no more nodes left, we need to clean up. Remove from the radix
// tree (unless it's our local proof), subtract allocated score and remove
// from slots.
if (!localProof || it->getProofId() != localProof->getId()) {
const auto removed = shareableProofs.remove(it->getProofId());
assert(removed);
}
const size_t i = it->index;
assert(i < slots.size());
assert(connectedPeersScore >= slots[i].getScore());
connectedPeersScore -= slots[i].getScore();
if (i + 1 == slots.size()) {
slots.pop_back();
slotCount = slots.empty() ? 0 : slots.back().getStop();
} else {
fragmentation += slots[i].getScore();
slots[i] = slots[i].withPeerId(NO_PEER);
}
return true;
}
bool PeerManager::updateNextRequestTime(NodeId nodeid,
SteadyMilliseconds timeout) {
auto it = nodes.find(nodeid);
if (it == nodes.end()) {
return false;
}
return nodes.modify(it, [&](Node &n) { n.nextRequestTime = timeout; });
}
bool PeerManager::latchAvaproofsSent(NodeId nodeid) {
auto it = nodes.find(nodeid);
if (it == nodes.end()) {
return false;
}
return !it->avaproofsSent &&
nodes.modify(it, [&](Node &n) { n.avaproofsSent = true; });
}
static bool isImmatureState(const ProofValidationState &state) {
return state.GetResult() == ProofValidationResult::IMMATURE_UTXO;
}
bool PeerManager::updateNextPossibleConflictTime(
PeerId peerid, const std::chrono::seconds &nextTime) {
auto it = peers.find(peerid);
if (it == peers.end()) {
// No such peer
return false;
}
// Make sure we don't move the time in the past.
peers.modify(it, [&](Peer &p) {
p.nextPossibleConflictTime =
std::max(p.nextPossibleConflictTime, nextTime);
});
return it->nextPossibleConflictTime == nextTime;
}
bool PeerManager::setFinalized(PeerId peerid) {
auto it = peers.find(peerid);
if (it == peers.end()) {
// No such peer
return false;
}
peers.modify(it, [&](Peer &p) { p.hasFinalized = true; });
return true;
}
template <typename ProofContainer>
void PeerManager::moveToConflictingPool(const ProofContainer &proofs) {
auto &peersView = peers.get<by_proofid>();
for (const ProofRef &proof : proofs) {
auto it = peersView.find(proof->getId());
if (it != peersView.end()) {
removePeer(it->peerid);
}
conflictingProofPool.addProofIfPreferred(proof);
}
}
bool PeerManager::registerProof(const ProofRef &proof,
ProofRegistrationState &registrationState,
RegistrationMode mode) {
assert(proof);
const ProofId &proofid = proof->getId();
auto invalidate = [&](ProofRegistrationResult result,
const std::string &message) {
return registrationState.Invalid(
result, message, strprintf("proofid: %s", proofid.ToString()));
};
if ((mode != RegistrationMode::FORCE_ACCEPT ||
!isInConflictingPool(proofid)) &&
exists(proofid)) {
// In default mode, we expect the proof to be unknown, i.e. in none of
// the pools.
// In forced accept mode, the proof can be in the conflicting pool.
return invalidate(ProofRegistrationResult::ALREADY_REGISTERED,
"proof-already-registered");
}
if (danglingProofPool.getProof(proofid) &&
pendingNodes.count(proofid) == 0) {
// Don't attempt to register a proof that we already evicted because it
// was dangling, but rather attempt to retrieve an associated node.
needMoreNodes = true;
return invalidate(ProofRegistrationResult::DANGLING, "dangling-proof");
}
// Check the proof's validity.
ProofValidationState validationState;
if (!WITH_LOCK(cs_main, return proof->verify(stakeUtxoDustThreshold,
chainman, validationState))) {
if (isImmatureState(validationState)) {
immatureProofPool.addProofIfPreferred(proof);
if (immatureProofPool.countProofs() >
AVALANCHE_MAX_IMMATURE_PROOFS) {
// Adding this proof exceeds the immature pool limit, so evict
// the lowest scoring proof.
immatureProofPool.removeProof(
immatureProofPool.getLowestScoreProof()->getId());
}
return invalidate(ProofRegistrationResult::IMMATURE,
"immature-proof");
}
if (validationState.GetResult() ==
ProofValidationResult::MISSING_UTXO) {
return invalidate(ProofRegistrationResult::MISSING_UTXO,
"utxo-missing-or-spent");
}
// Reject invalid proof.
return invalidate(ProofRegistrationResult::INVALID, "invalid-proof");
}
auto now = GetTime<std::chrono::seconds>();
auto nextCooldownTimePoint =
now + std::chrono::seconds(gArgs.GetIntArg(
"-avalancheconflictingproofcooldown",
AVALANCHE_DEFAULT_CONFLICTING_PROOF_COOLDOWN));
ProofPool::ConflictingProofSet conflictingProofs;
switch (validProofPool.addProofIfNoConflict(proof, conflictingProofs)) {
case ProofPool::AddProofStatus::REJECTED: {
if (mode != RegistrationMode::FORCE_ACCEPT) {
auto bestPossibleConflictTime = std::chrono::seconds(0);
auto &pview = peers.get<by_proofid>();
for (auto &conflictingProof : conflictingProofs) {
auto it = pview.find(conflictingProof->getId());
assert(it != pview.end());
// Search the most recent time over the peers
bestPossibleConflictTime = std::max(
bestPossibleConflictTime, it->nextPossibleConflictTime);
updateNextPossibleConflictTime(it->peerid,
nextCooldownTimePoint);
}
if (bestPossibleConflictTime > now) {
// Cooldown not elapsed, reject the proof.
return invalidate(
ProofRegistrationResult::COOLDOWN_NOT_ELAPSED,
"cooldown-not-elapsed");
}
// Give the proof a chance to replace the conflicting ones.
if (validProofPool.addProofIfPreferred(proof)) {
// If we have overridden other proofs due to conflict,
// remove the peers and attempt to move them to the
// conflicting pool.
moveToConflictingPool(conflictingProofs);
// Replacement is successful, continue to peer creation
break;
}
// Not the preferred proof, or replacement is not enabled
return conflictingProofPool.addProofIfPreferred(proof) ==
ProofPool::AddProofStatus::REJECTED
? invalidate(ProofRegistrationResult::REJECTED,
"rejected-proof")
: invalidate(ProofRegistrationResult::CONFLICTING,
"conflicting-utxos");
}
conflictingProofPool.removeProof(proofid);
// Move the conflicting proofs from the valid pool to the
// conflicting pool
moveToConflictingPool(conflictingProofs);
auto status = validProofPool.addProofIfNoConflict(proof);
assert(status == ProofPool::AddProofStatus::SUCCEED);
break;
}
case ProofPool::AddProofStatus::DUPLICATED:
// If the proof was already in the pool, don't duplicate the peer.
return invalidate(ProofRegistrationResult::ALREADY_REGISTERED,
"proof-already-registered");
case ProofPool::AddProofStatus::SUCCEED:
break;
// No default case, so the compiler can warn about missing cases
}
// At this stage we are going to create a peer so the proof should never
// exist in the conflicting pool, but use belt and suspenders.
conflictingProofPool.removeProof(proofid);
// New peer means new peerid!
const PeerId peerid = nextPeerId++;
// We have no peer for this proof, time to create it.
auto inserted = peers.emplace(peerid, proof, nextCooldownTimePoint);
assert(inserted.second);
if (localProof && proof->getId() == localProof->getId()) {
// Add it to the shareable proofs even if there is no node, we are the
// node. Otherwise it will be inserted after a node is attached to the
// proof.
shareableProofs.insert(proof);
}
// Add to our registered score when adding to the peer list
totalPeersScore += proof->getScore();
// If there are nodes waiting for this proof, add them
auto &pendingNodesView = pendingNodes.get<by_proofid>();
auto range = pendingNodesView.equal_range(proofid);
// We want to update the nodes then remove them from the pending set. That
// will invalidate the range iterators, so we need to save the node ids
// first before we can loop over them.
std::vector<NodeId> nodeids;
nodeids.reserve(std::distance(range.first, range.second));
std::transform(range.first, range.second, std::back_inserter(nodeids),
[](const PendingNode &n) { return n.nodeid; });
for (const NodeId &nodeid : nodeids) {
addOrUpdateNode(inserted.first, nodeid);
}
return true;
}
bool PeerManager::rejectProof(const ProofId &proofid, RejectionMode mode) {
if (isDangling(proofid) && mode == RejectionMode::INVALIDATE) {
danglingProofPool.removeProof(proofid);
return true;
}
if (!exists(proofid)) {
return false;
}
if (immatureProofPool.removeProof(proofid)) {
return true;
}
if (mode == RejectionMode::DEFAULT &&
conflictingProofPool.getProof(proofid)) {
// In default mode we keep the proof in the conflicting pool
return true;
}
if (mode == RejectionMode::INVALIDATE &&
conflictingProofPool.removeProof(proofid)) {
// In invalidate mode we remove the proof completely
return true;
}
auto &pview = peers.get<by_proofid>();
auto it = pview.find(proofid);
assert(it != pview.end());
const ProofRef proof = it->proof;
if (!removePeer(it->peerid)) {
return false;
}
// If there was conflicting proofs, attempt to pull them back
for (const SignedStake &ss : proof->getStakes()) {
const ProofRef conflictingProof =
conflictingProofPool.getProof(ss.getStake().getUTXO());
if (!conflictingProof) {
continue;
}
conflictingProofPool.removeProof(conflictingProof->getId());
registerProof(conflictingProof);
}
if (mode == RejectionMode::DEFAULT) {
conflictingProofPool.addProofIfPreferred(proof);
}
return true;
}
void PeerManager::cleanupDanglingProofs(
std::unordered_set<ProofRef, SaltedProofHasher> &registeredProofs) {
registeredProofs.clear();
const auto now = GetTime<std::chrono::seconds>();
std::vector<ProofRef> newlyDanglingProofs;
for (const Peer &peer : peers) {
// If the peer is not our local proof, has been registered for some
// time and has no node attached, discard it.
if ((!localProof || peer.getProofId() != localProof->getId()) &&
peer.node_count == 0 &&
(peer.registration_time + Peer::DANGLING_TIMEOUT) <= now) {
// Check the remotes status to determine if we should set the proof
// as dangling. This prevents from dropping a proof on our own due
// to a network issue. If the remote presence status is inconclusive
// we assume our own position (missing = false).
if (!getRemotePresenceStatus(peer.getProofId()).value_or(false)) {
newlyDanglingProofs.push_back(peer.proof);
}
}
}
// Similarly, check if we have dangling proofs that could be pulled back
// because the network says so.
std::vector<ProofRef> previouslyDanglingProofs;
danglingProofPool.forEachProof([&](const ProofRef &proof) {
if (getRemotePresenceStatus(proof->getId()).value_or(false)) {
previouslyDanglingProofs.push_back(proof);
}
});
for (const ProofRef &proof : previouslyDanglingProofs) {
danglingProofPool.removeProof(proof->getId());
if (registerProof(proof)) {
registeredProofs.insert(proof);
}
}
for (const ProofRef &proof : newlyDanglingProofs) {
rejectProof(proof->getId(), RejectionMode::INVALIDATE);
if (danglingProofPool.addProofIfPreferred(proof)) {
// If the proof is added, it means there is no better conflicting
// dangling proof and this is not a duplicated, so it's worth
// printing a message to the log.
LogPrint(BCLog::AVALANCHE,
"Proof dangling for too long (no connected node): %s\n",
proof->getId().GetHex());
}
}
// If we have dangling proof, this is a good indicator that we need to
// request more nodes from our peers.
needMoreNodes = !newlyDanglingProofs.empty();
}
NodeId PeerManager::selectNode() {
for (int retry = 0; retry < SELECT_NODE_MAX_RETRY; retry++) {
const PeerId p = selectPeer();
// If we cannot find a peer, it may be due to the fact that it is
// unlikely due to high fragmentation, so compact and retry.
if (p == NO_PEER) {
compact();
continue;
}
// See if that peer has an available node.
auto &nview = nodes.get<next_request_time>();
auto it = nview.lower_bound(boost::make_tuple(p, SteadyMilliseconds()));
if (it != nview.end() && it->peerid == p &&
it->nextRequestTime <= Now<SteadyMilliseconds>()) {
return it->nodeid;
}
}
// We failed to find a node to query, flag this so we can request more
needMoreNodes = true;
return NO_NODE;
}
std::unordered_set<ProofRef, SaltedProofHasher> PeerManager::updatedBlockTip() {
std::vector<ProofId> invalidProofIds;
std::vector<ProofRef> newImmatures;
{
LOCK(cs_main);
for (const auto &p : peers) {
ProofValidationState state;
if (!p.proof->verify(stakeUtxoDustThreshold, chainman, state)) {
if (isImmatureState(state)) {
newImmatures.push_back(p.proof);
}
invalidProofIds.push_back(p.getProofId());
LogPrint(BCLog::AVALANCHE,
"Invalidating proof %s: verification failed (%s)\n",
p.proof->getId().GetHex(), state.ToString());
}
}
}
// Remove the invalid proofs before the immature rescan. This makes it
// possible to pull back proofs with utxos that conflicted with these
// invalid proofs.
for (const ProofId &invalidProofId : invalidProofIds) {
rejectProof(invalidProofId, RejectionMode::INVALIDATE);
}
auto registeredProofs = immatureProofPool.rescan(*this);
for (auto &p : newImmatures) {
immatureProofPool.addProofIfPreferred(p);
}
return registeredProofs;
}
ProofRef PeerManager::getProof(const ProofId &proofid) const {
ProofRef proof;
forPeer(proofid, [&](const Peer &p) {
proof = p.proof;
return true;
});
if (!proof) {
proof = conflictingProofPool.getProof(proofid);
}
if (!proof) {
proof = immatureProofPool.getProof(proofid);
}
return proof;
}
bool PeerManager::isBoundToPeer(const ProofId &proofid) const {
auto &pview = peers.get<by_proofid>();
return pview.find(proofid) != pview.end();
}
bool PeerManager::isImmature(const ProofId &proofid) const {
return immatureProofPool.getProof(proofid) != nullptr;
}
bool PeerManager::isInConflictingPool(const ProofId &proofid) const {
return conflictingProofPool.getProof(proofid) != nullptr;
}
bool PeerManager::isDangling(const ProofId &proofid) const {
return danglingProofPool.getProof(proofid) != nullptr;
}
void PeerManager::setInvalid(const ProofId &proofid) {
invalidProofs.insert(proofid);
}
bool PeerManager::isInvalid(const ProofId &proofid) const {
return invalidProofs.contains(proofid);
}
void PeerManager::clearAllInvalid() {
invalidProofs.reset();
}
bool PeerManager::saveRemoteProof(const ProofId &proofid, const NodeId nodeid,
const bool present) {
// Get how many proofs this node has announced
auto &remoteProofsByLastUpdate = remoteProofs.get<by_lastUpdate>();
auto [begin, end] = remoteProofsByLastUpdate.equal_range(nodeid);
// Limit the number of proofs a single node can save:
// - At least MAX_REMOTE_PROOFS
// - Up to 2x as much as we have
// The MAX_REMOTE_PROOFS minimum is there to ensure we don't overlimit at
// startup when we don't have proofs yet.
while (size_t(std::distance(begin, end)) >=
std::max(MAX_REMOTE_PROOFS, 2 * peers.size())) {
// Remove the proof with the oldest update time
begin = remoteProofsByLastUpdate.erase(begin);
}
auto it = remoteProofs.find(boost::make_tuple(proofid, nodeid));
if (it != remoteProofs.end()) {
remoteProofs.erase(it);
}
return remoteProofs
.emplace(RemoteProof{proofid, nodeid, GetTime<std::chrono::seconds>(),
present})
.second;
}
std::vector<RemoteProof>
PeerManager::getRemoteProofs(const NodeId nodeid) const {
std::vector<RemoteProof> nodeRemoteProofs;
auto &remoteProofsByLastUpdate = remoteProofs.get<by_lastUpdate>();
auto [begin, end] = remoteProofsByLastUpdate.equal_range(nodeid);
for (auto &it = begin; it != end; it++) {
nodeRemoteProofs.emplace_back(*it);
}
return nodeRemoteProofs;
}
bool PeerManager::removePeer(const PeerId peerid) {
auto it = peers.find(peerid);
if (it == peers.end()) {
return false;
}
// Remove all nodes from this peer.
removeNodeFromPeer(it, it->node_count);
auto &nview = nodes.get<next_request_time>();
// Add the nodes to the pending set
auto range = nview.equal_range(peerid);
for (auto &nit = range.first; nit != range.second; ++nit) {
pendingNodes.emplace(it->getProofId(), nit->nodeid);
};
// Remove nodes associated with this peer, unless their timeout is still
// active. This ensure that we don't overquery them in case they are
// subsequently added to another peer.
nview.erase(
nview.lower_bound(boost::make_tuple(peerid, SteadyMilliseconds())),
nview.upper_bound(
boost::make_tuple(peerid, Now<SteadyMilliseconds>())));
// Release UTXOs attached to this proof.
validProofPool.removeProof(it->getProofId());
// If there were nodes attached, remove from the radix tree as well
auto removed = shareableProofs.remove(Uint256RadixKey(it->getProofId()));
m_unbroadcast_proofids.erase(it->getProofId());
// Remove the peer from the PeerSet and remove its score from the registered
// score total.
assert(totalPeersScore >= it->getScore());
totalPeersScore -= it->getScore();
peers.erase(it);
return true;
}
PeerId PeerManager::selectPeer() const {
if (slots.empty() || slotCount == 0) {
return NO_PEER;
}
const uint64_t max = slotCount;
for (int retry = 0; retry < SELECT_PEER_MAX_RETRY; retry++) {
size_t i = selectPeerImpl(slots, GetRand(max), max);
if (i != NO_PEER) {
return i;
}
}
return NO_PEER;
}
uint64_t PeerManager::compact() {
// There is nothing to compact.
if (fragmentation == 0) {
return 0;
}
std::vector<Slot> newslots;
newslots.reserve(peers.size());
uint64_t prevStop = 0;
uint32_t i = 0;
for (auto it = peers.begin(); it != peers.end(); it++) {
if (it->node_count == 0) {
continue;
}
newslots.emplace_back(prevStop, it->getScore(), it->peerid);
prevStop = slots[i].getStop();
if (!peers.modify(it, [&](Peer &p) { p.index = i++; })) {
return 0;
}
}
slots = std::move(newslots);
const uint64_t saved = slotCount - prevStop;
slotCount = prevStop;
fragmentation = 0;
return saved;
}
bool PeerManager::verify() const {
uint64_t prevStop = 0;
uint32_t scoreFromSlots = 0;
for (size_t i = 0; i < slots.size(); i++) {
const Slot &s = slots[i];
// Slots must be in correct order.
if (s.getStart() < prevStop) {
return false;
}
prevStop = s.getStop();
// If this is a dead slot, then nothing more needs to be checked.
if (s.getPeerId() == NO_PEER) {
continue;
}
// We have a live slot, verify index.
auto it = peers.find(s.getPeerId());
if (it == peers.end() || it->index != i) {
return false;
}
// Accumulate score across slots
scoreFromSlots += slots[i].getScore();
}
// Score across slots must be the same as our allocated score
if (scoreFromSlots != connectedPeersScore) {
return false;
}
uint32_t scoreFromAllPeers = 0;
uint32_t scoreFromPeersWithNodes = 0;
std::unordered_set<COutPoint, SaltedOutpointHasher> peersUtxos;
for (const auto &p : peers) {
// Accumulate the score across peers to compare with total known score
scoreFromAllPeers += p.getScore();
// A peer should have a proof attached
if (!p.proof) {
return false;
}
// Check proof pool consistency
for (const auto &ss : p.proof->getStakes()) {
const COutPoint &outpoint = ss.getStake().getUTXO();
auto proof = validProofPool.getProof(outpoint);
if (!proof) {
// Missing utxo
return false;
}
if (proof != p.proof) {
// Wrong proof
return false;
}
if (!peersUtxos.emplace(outpoint).second) {
// Duplicated utxo
return false;
}
}
// Count node attached to this peer.
const auto count_nodes = [&]() {
size_t count = 0;
auto &nview = nodes.get<next_request_time>();
auto begin = nview.lower_bound(
boost::make_tuple(p.peerid, SteadyMilliseconds()));
auto end = nview.upper_bound(
boost::make_tuple(p.peerid + 1, SteadyMilliseconds()));
for (auto it = begin; it != end; ++it) {
count++;
}
return count;
};
if (p.node_count != count_nodes()) {
return false;
}
// If there are no nodes attached to this peer, then we are done.
if (p.node_count == 0) {
continue;
}
scoreFromPeersWithNodes += p.getScore();
// The index must point to a slot refering to this peer.
if (p.index >= slots.size() || slots[p.index].getPeerId() != p.peerid) {
return false;
}
// If the score do not match, same thing.
if (slots[p.index].getScore() != p.getScore()) {
return false;
}
// Check the proof is in the radix tree only if there are nodes attached
if (((localProof && p.getProofId() == localProof->getId()) ||
p.node_count > 0) &&
shareableProofs.get(p.getProofId()) == nullptr) {
return false;
}
if (p.node_count == 0 &&
shareableProofs.get(p.getProofId()) != nullptr) {
return false;
}
}
// Check our accumulated scores against our registred and allocated scores
if (scoreFromAllPeers != totalPeersScore) {
return false;
}
if (scoreFromPeersWithNodes != connectedPeersScore) {
return false;
}
// We checked the utxo consistency for all our peers utxos already, so if
// the pool size differs from the expected one there are dangling utxos.
if (validProofPool.size() != peersUtxos.size()) {
return false;
}
// Check there is no dangling proof in the radix tree
return shareableProofs.forEachLeaf([&](RCUPtr<const Proof> pLeaf) {
return isBoundToPeer(pLeaf->getId());
});
}
PeerId selectPeerImpl(const std::vector<Slot> &slots, const uint64_t slot,
const uint64_t max) {
assert(slot <= max);
size_t begin = 0, end = slots.size();
uint64_t bottom = 0, top = max;
// Try to find the slot using dichotomic search.
while ((end - begin) > 8) {
// The slot we picked in not allocated.
if (slot < bottom || slot >= top) {
return NO_PEER;
}
// Guesstimate the position of the slot.
size_t i = begin + ((slot - bottom) * (end - begin) / (top - bottom));
assert(begin <= i && i < end);
// We have a match.
if (slots[i].contains(slot)) {
return slots[i].getPeerId();
}
// We undershooted.
if (slots[i].precedes(slot)) {
begin = i + 1;
if (begin >= end) {
return NO_PEER;
}
bottom = slots[begin].getStart();
continue;
}
// We overshooted.
if (slots[i].follows(slot)) {
end = i;
top = slots[end].getStart();
continue;
}
// We have an unalocated slot.
return NO_PEER;
}
// Enough of that nonsense, let fallback to linear search.
for (size_t i = begin; i < end; i++) {
// We have a match.
if (slots[i].contains(slot)) {
return slots[i].getPeerId();
}
}
// We failed to find a slot, retry.
return NO_PEER;
}
void PeerManager::addUnbroadcastProof(const ProofId &proofid) {
// The proof should be bound to a peer
if (isBoundToPeer(proofid)) {
m_unbroadcast_proofids.insert(proofid);
}
}
void PeerManager::removeUnbroadcastProof(const ProofId &proofid) {
m_unbroadcast_proofids.erase(proofid);
}
-bool PeerManager::selectStakingRewardWinner(const CBlockIndex *pprev,
- std::vector<CScript> &winners) {
+bool PeerManager::selectStakingRewardWinner(
+ const CBlockIndex *pprev,
+ std::vector<std::pair<ProofId, CScript>> &winners) {
if (!pprev) {
return false;
}
// Don't select proofs that have not been known for long enough, i.e. at
// least since twice the dangling proof cleanup timeout before the last
// block time, so we're sure to not account for proofs more recent than the
// previous block or lacking node connected.
// The previous block time is capped to now for the unlikely event the
// previous block time is in the future.
auto registrationDelay = std::chrono::duration_cast<std::chrono::seconds>(
4 * Peer::DANGLING_TIMEOUT);
auto maxRegistrationDelay =
std::chrono::duration_cast<std::chrono::seconds>(
6 * Peer::DANGLING_TIMEOUT);
auto minRegistrationDelay =
std::chrono::duration_cast<std::chrono::seconds>(
2 * Peer::DANGLING_TIMEOUT);
const int64_t refTime = std::min(pprev->GetBlockTime(), GetTime());
const int64_t targetRegistrationTime = refTime - registrationDelay.count();
const int64_t maxRegistrationTime = refTime - minRegistrationDelay.count();
const int64_t minRegistrationTime = refTime - maxRegistrationDelay.count();
const BlockHash prevblockhash = pprev->GetBlockHash();
std::vector<ProofRef> selectedProofs;
ProofRef firstCompliantProof = ProofRef();
while (selectedProofs.size() < peers.size()) {
double bestRewardRank = std::numeric_limits<double>::max();
ProofRef selectedProof = ProofRef();
int64_t selectedProofRegistrationTime{0};
uint256 bestRewardHash;
for (const Peer &peer : peers) {
if (!peer.proof) {
// Should never happen, continue
continue;
}
if (!peer.hasFinalized ||
peer.registration_time.count() >= maxRegistrationTime) {
continue;
}
if (std::find_if(selectedProofs.begin(), selectedProofs.end(),
[&peer](const ProofRef &proof) {
return peer.getProofId() == proof->getId();
}) != selectedProofs.end()) {
continue;
}
uint256 proofRewardHash;
CHash256()
.Write(prevblockhash)
.Write(peer.getProofId())
.Finalize(proofRewardHash);
if (proofRewardHash == uint256::ZERO) {
// This either the result of an incredibly unlikely lucky hash,
// or a the hash is getting abused. In this case, skip the
// proof.
LogPrintf(
"Staking reward hash has a suspicious value of zero for "
"proof %s and blockhash %s, skipping\n",
peer.getProofId().ToString(), prevblockhash.ToString());
continue;
}
// To make sure the selection is properly weighted according to the
// proof score, we normalize the proofRewardHash to a number between
// 0 and 1, then take the logarithm and divide by the weight. Since
// it is scale-independent, we can simplify by removing constants
// and use base 2 logarithm.
// Inspired by: https://stackoverflow.com/a/30226926.
double proofRewardRank =
(256.0 -
std::log2(UintToArith256(proofRewardHash).getdouble())) /
peer.getScore();
// The best ranking is the lowest ranking value
if (proofRewardRank < bestRewardRank) {
bestRewardRank = proofRewardRank;
selectedProof = peer.proof;
selectedProofRegistrationTime = peer.registration_time.count();
bestRewardHash = proofRewardHash;
}
// Select the lowest reward hash then proofid in the unlikely case
// of a collision.
if (proofRewardRank == bestRewardRank &&
(proofRewardHash < bestRewardHash ||
(proofRewardHash == bestRewardHash &&
peer.getProofId() < selectedProof->getId()))) {
selectedProof = peer.proof;
selectedProofRegistrationTime = peer.registration_time.count();
bestRewardHash = proofRewardHash;
}
}
if (!selectedProof) {
// No winner
break;
}
if (!firstCompliantProof &&
selectedProofRegistrationTime < targetRegistrationTime) {
firstCompliantProof = selectedProof;
}
selectedProofs.push_back(selectedProof);
if (selectedProofRegistrationTime < minRegistrationTime &&
!isFlaky(selectedProof->getId())) {
break;
}
}
winners.clear();
if (!firstCompliantProof) {
return false;
}
winners.reserve(selectedProofs.size());
// Find the winner
for (const ProofRef &proof : selectedProofs) {
if (proof->getId() == firstCompliantProof->getId()) {
- winners.push_back(proof->getPayoutScript());
+ winners.push_back({proof->getId(), proof->getPayoutScript()});
}
}
// Add the others (if any) after the winner
for (const ProofRef &proof : selectedProofs) {
if (proof->getId() != firstCompliantProof->getId()) {
- winners.push_back(proof->getPayoutScript());
+ winners.push_back({proof->getId(), proof->getPayoutScript()});
}
}
return true;
}
bool PeerManager::isFlaky(const ProofId &proofid) const {
if (localProof && proofid == localProof->getId()) {
return false;
}
// If we are missing connection to this proof, consider flaky
if (forPeer(proofid,
[](const Peer &peer) { return peer.node_count == 0; })) {
return true;
}
auto &remoteProofsByNodeId = remoteProofs.get<by_nodeid>();
auto &nview = nodes.get<next_request_time>();
std::unordered_map<PeerId, std::unordered_set<ProofId, SaltedProofIdHasher>>
missing_per_peer;
// Construct a set of missing proof ids per peer
double total_score{0};
for (const Peer &peer : peers) {
const PeerId peerid = peer.peerid;
total_score += peer.getScore();
auto nodes_range = nview.equal_range(peerid);
for (auto &nit = nodes_range.first; nit != nodes_range.second; ++nit) {
auto proofs_range = remoteProofsByNodeId.equal_range(nit->nodeid);
for (auto &proofit = proofs_range.first;
proofit != proofs_range.second; ++proofit) {
if (!proofit->present) {
missing_per_peer[peerid].insert(proofit->proofid);
}
}
};
}
double missing_score{0};
// Now compute a score for the missing proof
for (const auto &[peerid, missingProofs] : missing_per_peer) {
if (missingProofs.size() > 3) {
// Ignore peers with too many missing proofs
continue;
}
auto pit = peers.find(peerid);
if (pit == peers.end()) {
// Peer not found
continue;
}
if (missingProofs.count(proofid) > 0) {
missing_score += pit->getScore();
}
}
return (missing_score / total_score) > 0.3;
}
std::optional<bool>
PeerManager::getRemotePresenceStatus(const ProofId &proofid) const {
auto &remoteProofsView = remoteProofs.get<by_proofid>();
auto [begin, end] = remoteProofsView.equal_range(proofid);
if (begin == end) {
// No remote registered anything yet, we are on our own
return std::nullopt;
}
double total_score{0};
double present_score{0};
double missing_score{0};
for (auto it = begin; it != end; it++) {
auto nit = nodes.find(it->nodeid);
if (nit == nodes.end()) {
// No such node
continue;
}
const PeerId peerid = nit->peerid;
auto pit = peers.find(peerid);
if (pit == peers.end()) {
// Peer not found
continue;
}
uint32_t node_count = pit->node_count;
if (localProof && pit->getProofId() == localProof->getId()) {
// If that's our local proof, account for ourself
++node_count;
}
if (node_count == 0) {
// should never happen
continue;
}
const double score = double(pit->getScore()) / node_count;
total_score += score;
if (it->present) {
present_score += score;
} else {
missing_score += score;
}
}
if (localProof) {
auto &peersByProofid = peers.get<by_proofid>();
// Do we have a node connected for that proof ?
bool present = false;
auto pit = peersByProofid.find(proofid);
if (pit != peersByProofid.end()) {
present = pit->node_count > 0;
}
pit = peersByProofid.find(localProof->getId());
if (pit != peersByProofid.end()) {
// Also divide by node_count, we can have several nodes even for our
// local proof.
const double score =
double(pit->getScore()) / (1 + pit->node_count);
total_score += score;
if (present) {
present_score += score;
} else {
missing_score += score;
}
}
}
if (present_score / total_score > 0.55) {
return std::make_optional(true);
}
if (missing_score / total_score > 0.55) {
return std::make_optional(false);
}
return std::nullopt;
}
bool PeerManager::dumpPeersToFile(const fs::path &dumpPath) const {
try {
const fs::path dumpPathTmp = dumpPath + ".new";
FILE *filestr = fsbridge::fopen(dumpPathTmp, "wb");
if (!filestr) {
return false;
}
CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
file << PEERS_DUMP_VERSION;
file << uint64_t(peers.size());
for (const Peer &peer : peers) {
file << peer.proof;
file << peer.hasFinalized;
file << int64_t(peer.registration_time.count());
file << int64_t(peer.nextPossibleConflictTime.count());
}
if (!FileCommit(file.Get())) {
throw std::runtime_error(strprintf("Failed to commit to file %s",
PathToString(dumpPathTmp)));
}
file.fclose();
if (!RenameOver(dumpPathTmp, dumpPath)) {
throw std::runtime_error(strprintf("Rename failed from %s to %s",
PathToString(dumpPathTmp),
PathToString(dumpPath)));
}
} catch (const std::exception &e) {
LogPrint(BCLog::AVALANCHE, "Failed to dump the avalanche peers: %s.\n",
e.what());
return false;
}
LogPrint(BCLog::AVALANCHE, "Successfully dumped %d peers to %s.\n",
peers.size(), PathToString(dumpPath));
return true;
}
bool PeerManager::loadPeersFromFile(
const fs::path &dumpPath,
std::unordered_set<ProofRef, SaltedProofHasher> &registeredProofs) {
registeredProofs.clear();
FILE *filestr = fsbridge::fopen(dumpPath, "rb");
CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
if (file.IsNull()) {
LogPrint(BCLog::AVALANCHE,
"Failed to open avalanche peers file from disk.\n");
return false;
}
try {
uint64_t version;
file >> version;
if (version != PEERS_DUMP_VERSION) {
LogPrint(BCLog::AVALANCHE,
"Unsupported avalanche peers file version.\n");
return false;
}
uint64_t numPeers;
file >> numPeers;
auto &peersByProofId = peers.get<by_proofid>();
for (uint64_t i = 0; i < numPeers; i++) {
ProofRef proof;
bool hasFinalized;
int64_t registrationTime;
int64_t nextPossibleConflictTime;
file >> proof;
file >> hasFinalized;
file >> registrationTime;
file >> nextPossibleConflictTime;
if (registerProof(proof)) {
auto it = peersByProofId.find(proof->getId());
if (it == peersByProofId.end()) {
// Should never happen
continue;
}
// We don't modify any key so we don't need to rehash.
// If the modify fails, it means we don't get the full benefit
// from the file but we still added our peer to the set. The
// non-overridden fields will be set the normal way.
peersByProofId.modify(it, [&](Peer &p) {
p.hasFinalized = hasFinalized;
p.registration_time =
std::chrono::seconds{registrationTime};
p.nextPossibleConflictTime =
std::chrono::seconds{nextPossibleConflictTime};
});
registeredProofs.insert(proof);
}
}
} catch (const std::exception &e) {
LogPrint(BCLog::AVALANCHE,
"Failed to read the avalanche peers file data on disk: %s.\n",
e.what());
return false;
}
return true;
}
} // namespace avalanche
diff --git a/src/avalanche/peermanager.h b/src/avalanche/peermanager.h
index d77d38af85..56b4ba34e6 100644
--- a/src/avalanche/peermanager.h
+++ b/src/avalanche/peermanager.h
@@ -1,556 +1,557 @@
// Copyright (c) 2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_AVALANCHE_PEERMANAGER_H
#define BITCOIN_AVALANCHE_PEERMANAGER_H
#include <avalanche/node.h>
#include <avalanche/proof.h>
#include <avalanche/proofpool.h>
#include <avalanche/proofradixtreeadapter.h>
#include <coins.h>
#include <common/bloom.h>
#include <consensus/validation.h>
#include <pubkey.h>
#include <radix.h>
#include <util/hasher.h>
#include <util/time.h>
#include <boost/multi_index/composite_key.hpp>
#include <boost/multi_index/hashed_index.hpp>
#include <boost/multi_index/mem_fun.hpp>
#include <boost/multi_index/member.hpp>
#include <boost/multi_index/ordered_index.hpp>
#include <boost/multi_index_container.hpp>
#include <atomic>
#include <chrono>
#include <cstdint>
#include <memory>
#include <vector>
class ChainstateManager;
class CScheduler;
namespace avalanche {
/**
* Maximum number of immature proofs the peer manager will accept from the
* network. Note that reorgs can cause the immature pool to temporarily exceed
* this limit, but a change in chaintip cause previously reorged proofs to be
* trimmed.
*/
static constexpr uint32_t AVALANCHE_MAX_IMMATURE_PROOFS = 4000;
class Delegation;
namespace {
struct TestPeerManager;
}
struct Slot {
private:
uint64_t start;
uint32_t score;
PeerId peerid;
public:
Slot(uint64_t startIn, uint32_t scoreIn, PeerId peeridIn)
: start(startIn), score(scoreIn), peerid(peeridIn) {}
Slot withStart(uint64_t startIn) const {
return Slot(startIn, score, peerid);
}
Slot withScore(uint64_t scoreIn) const {
return Slot(start, scoreIn, peerid);
}
Slot withPeerId(PeerId peeridIn) const {
return Slot(start, score, peeridIn);
}
uint64_t getStart() const { return start; }
uint64_t getStop() const { return start + score; }
uint32_t getScore() const { return score; }
PeerId getPeerId() const { return peerid; }
bool contains(uint64_t slot) const {
return getStart() <= slot && slot < getStop();
}
bool precedes(uint64_t slot) const { return slot >= getStop(); }
bool follows(uint64_t slot) const { return getStart() > slot; }
};
struct Peer {
PeerId peerid;
uint32_t index = -1;
uint32_t node_count = 0;
ProofRef proof;
bool hasFinalized = false;
// The network stack uses timestamp in seconds, so we oblige.
std::chrono::seconds registration_time;
std::chrono::seconds nextPossibleConflictTime;
double availabilityScore = 0.0;
/**
* Consider dropping the peer if no node is attached after this timeout
* expired.
*/
static constexpr auto DANGLING_TIMEOUT = 15min;
Peer(PeerId peerid_, ProofRef proof_,
std::chrono::seconds nextPossibleConflictTime_)
: peerid(peerid_), proof(std::move(proof_)),
registration_time(GetTime<std::chrono::seconds>()),
nextPossibleConflictTime(std::move(nextPossibleConflictTime_)) {}
const ProofId &getProofId() const { return proof->getId(); }
uint32_t getScore() const { return proof->getScore(); }
};
struct proof_index {
using result_type = ProofId;
result_type operator()(const Peer &p) const { return p.proof->getId(); }
};
struct score_index {
using result_type = uint32_t;
result_type operator()(const Peer &p) const { return p.getScore(); }
};
struct next_request_time {};
struct PendingNode {
ProofId proofid;
NodeId nodeid;
PendingNode(ProofId proofid_, NodeId nodeid_)
: proofid(proofid_), nodeid(nodeid_){};
};
struct by_proofid;
struct by_nodeid;
struct by_score;
struct RemoteProof {
ProofId proofid;
NodeId nodeid;
std::chrono::seconds lastUpdate;
bool present;
};
enum class ProofRegistrationResult {
NONE = 0,
ALREADY_REGISTERED,
IMMATURE,
INVALID,
CONFLICTING,
REJECTED,
COOLDOWN_NOT_ELAPSED,
DANGLING,
MISSING_UTXO,
};
class ProofRegistrationState : public ValidationState<ProofRegistrationResult> {
};
namespace bmi = boost::multi_index;
class PeerManager {
std::vector<Slot> slots;
uint64_t slotCount = 0;
uint64_t fragmentation = 0;
/**
* Several nodes can make an avalanche peer. In this case, all nodes are
* considered interchangeable parts of the same peer.
*/
using PeerSet = boost::multi_index_container<
Peer, bmi::indexed_by<
// index by peerid
bmi::hashed_unique<bmi::member<Peer, PeerId, &Peer::peerid>>,
// index by proof
bmi::hashed_unique<bmi::tag<by_proofid>, proof_index,
SaltedProofIdHasher>,
// ordered by score, decreasing order
bmi::ordered_non_unique<bmi::tag<by_score>, score_index,
std::greater<uint32_t>>>>;
PeerId nextPeerId = 0;
PeerSet peers;
ProofPool validProofPool;
ProofPool conflictingProofPool;
ProofPool immatureProofPool;
ProofPool danglingProofPool;
using ProofRadixTree = RadixTree<const Proof, ProofRadixTreeAdapter>;
ProofRadixTree shareableProofs;
using NodeSet = boost::multi_index_container<
Node, bmi::indexed_by<
// index by nodeid
bmi::hashed_unique<bmi::member<Node, NodeId, &Node::nodeid>>,
// sorted by peerid/nextRequestTime
bmi::ordered_non_unique<
bmi::tag<next_request_time>,
bmi::composite_key<
Node, bmi::member<Node, PeerId, &Node::peerid>,
bmi::member<Node, SteadyMilliseconds,
&Node::nextRequestTime>>>>>;
NodeSet nodes;
/**
* Flag indicating that we failed to select a node and need to expand our
* node set.
*/
std::atomic<bool> needMoreNodes{false};
using PendingNodeSet = boost::multi_index_container<
PendingNode,
bmi::indexed_by<
// index by proofid
bmi::hashed_non_unique<
bmi::tag<by_proofid>,
bmi::member<PendingNode, ProofId, &PendingNode::proofid>,
SaltedProofIdHasher>,
// index by nodeid
bmi::hashed_unique<
bmi::tag<by_nodeid>,
bmi::member<PendingNode, NodeId, &PendingNode::nodeid>>>>;
PendingNodeSet pendingNodes;
static constexpr int SELECT_PEER_MAX_RETRY = 3;
static constexpr int SELECT_NODE_MAX_RETRY = 3;
/**
* Track proof ids to broadcast
*/
ProofIdSet m_unbroadcast_proofids;
/**
* Quorum management.
*/
uint32_t totalPeersScore = 0;
uint32_t connectedPeersScore = 0;
Amount stakeUtxoDustThreshold;
ChainstateManager &chainman;
ProofRef localProof;
struct by_lastUpdate;
using RemoteProofSet = boost::multi_index_container<
RemoteProof,
bmi::indexed_by<
// index by proofid/nodeid pair
bmi::hashed_unique<
bmi::composite_key<
RemoteProof,
bmi::member<RemoteProof, ProofId, &RemoteProof::proofid>,
bmi::member<RemoteProof, NodeId, &RemoteProof::nodeid>>,
bmi::composite_key_hash<SaltedProofIdHasher,
boost::hash<NodeId>>>,
// index by proofid
bmi::hashed_non_unique<
bmi::tag<by_proofid>,
bmi::member<RemoteProof, ProofId, &RemoteProof::proofid>,
SaltedProofIdHasher>,
// index by nodeid
bmi::hashed_non_unique<
bmi::tag<by_nodeid>,
bmi::member<RemoteProof, NodeId, &RemoteProof::nodeid>>,
bmi::ordered_non_unique<
bmi::tag<by_lastUpdate>,
bmi::composite_key<
RemoteProof,
bmi::member<RemoteProof, NodeId, &RemoteProof::nodeid>,
bmi::member<RemoteProof, std::chrono::seconds,
&RemoteProof::lastUpdate>>>>>;
/**
* Remember which node sent which proof so we have an image of the proof set
* of our peers.
*/
RemoteProofSet remoteProofs;
/**
* Filter for proofs that are consensus-invalid or were recently invalidated
* by avalanche (finalized rejection). These are not rerequested until they
* are rolled out of the filter.
*
* Without this filter we'd be re-requesting proofs from each of our peers,
* increasing bandwidth consumption considerably.
*
* Decreasing the false positive rate is fairly cheap, so we pick one in a
* million to make it highly unlikely for users to have issues with this
* filter.
*/
CRollingBloomFilter invalidProofs{100000, 0.000001};
public:
static constexpr size_t MAX_REMOTE_PROOFS{100};
PeerManager(const Amount &stakeUtxoDustThresholdIn,
ChainstateManager &chainmanIn,
const ProofRef &localProofIn = ProofRef())
: stakeUtxoDustThreshold(stakeUtxoDustThresholdIn),
chainman(chainmanIn), localProof(localProofIn){};
/**
* Node API.
*/
bool addNode(NodeId nodeid, const ProofId &proofid);
bool removeNode(NodeId nodeid);
size_t getNodeCount() const { return nodes.size(); }
size_t getPendingNodeCount() const { return pendingNodes.size(); }
// Update when a node is to be polled next.
bool updateNextRequestTime(NodeId nodeid, SteadyMilliseconds timeout);
/**
* Flag that a node did send its compact proofs.
* @return True if the flag changed state, i;e. if this is the first time
* the message is accounted for this node.
*/
bool latchAvaproofsSent(NodeId nodeid);
// Randomly select a node to poll.
NodeId selectNode();
/**
* Returns true if we encountered a lack of node since the last call.
*/
bool shouldRequestMoreNodes() { return needMoreNodes.exchange(false); }
template <typename Callable>
bool forNode(NodeId nodeid, Callable &&func) const {
auto it = nodes.find(nodeid);
return it != nodes.end() && func(*it);
}
template <typename Callable>
void forEachNode(const Peer &peer, Callable &&func) const {
auto &nview = nodes.get<next_request_time>();
auto range = nview.equal_range(peer.peerid);
for (auto it = range.first; it != range.second; ++it) {
func(*it);
}
}
/**
* Proof and Peer related API.
*/
/**
* Update the time before which a proof is not allowed to have conflicting
* UTXO with this peer's proof.
*/
bool updateNextPossibleConflictTime(PeerId peerid,
const std::chrono::seconds &nextTime);
/**
* Latch on that this peer has a finalized proof.
*/
bool setFinalized(PeerId peerid);
/**
* Registration mode
* - DEFAULT: Default policy, register only if the proof is unknown and has
* no conflict.
* - FORCE_ACCEPT: Turn a valid proof into a peer even if it has conflicts
* and is not the best candidate.
*/
enum class RegistrationMode {
DEFAULT,
FORCE_ACCEPT,
};
bool registerProof(const ProofRef &proof,
ProofRegistrationState &registrationState,
RegistrationMode mode = RegistrationMode::DEFAULT);
bool registerProof(const ProofRef &proof,
RegistrationMode mode = RegistrationMode::DEFAULT) {
ProofRegistrationState dummy;
return registerProof(proof, dummy, mode);
}
/**
* Rejection mode
* - DEFAULT: Default policy, reject a proof and attempt to keep it in the
* conflicting pool if possible.
* - INVALIDATE: Reject a proof by removing it from any of the pool.
*
* In any case if a peer is rejected, it attempts to pull the conflicting
* proofs back.
*/
enum class RejectionMode {
DEFAULT,
INVALIDATE,
};
bool rejectProof(const ProofId &proofid,
RejectionMode mode = RejectionMode::DEFAULT);
bool exists(const ProofId &proofid) const {
return getProof(proofid) != nullptr;
}
void cleanupDanglingProofs(
std::unordered_set<ProofRef, SaltedProofHasher> &registeredProofs);
template <typename Callable>
bool forPeer(const ProofId &proofid, Callable &&func) const {
auto &pview = peers.get<by_proofid>();
auto it = pview.find(proofid);
return it != pview.end() && func(*it);
}
template <typename Callable> void forEachPeer(Callable &&func) const {
for (const auto &p : peers) {
func(p);
}
}
/**
* Update the peer set when a new block is connected.
*/
std::unordered_set<ProofRef, SaltedProofHasher> updatedBlockTip();
/**
* Proof broadcast API.
*/
void addUnbroadcastProof(const ProofId &proofid);
void removeUnbroadcastProof(const ProofId &proofid);
auto getUnbroadcastProofs() const { return m_unbroadcast_proofids; }
/*
* Quorum management
*/
uint32_t getTotalPeersScore() const { return totalPeersScore; }
uint32_t getConnectedPeersScore() const { return connectedPeersScore; }
bool saveRemoteProof(const ProofId &proofid, const NodeId nodeid,
const bool present);
std::vector<RemoteProof> getRemoteProofs(const NodeId nodeid) const;
template <typename Callable>
void updateAvailabilityScores(const double decayFactor,
Callable &&getNodeAvailabilityScore) {
for (auto it = peers.begin(); it != peers.end(); it++) {
peers.modify(it, [&](Peer &peer) {
// Calculate average of current node scores
double peerScore{0.0};
forEachNode(peer, [&](const avalanche::Node &node) {
peerScore += getNodeAvailabilityScore(node.nodeid);
});
// Calculate exponential moving average of averaged node scores
peer.availabilityScore =
decayFactor * peerScore +
(1. - decayFactor) * peer.availabilityScore;
});
}
}
/****************************************************
* Functions which are public for testing purposes. *
****************************************************/
/**
* Remove an existing peer.
*/
bool removePeer(const PeerId peerid);
/**
* Randomly select a peer to poll.
*/
PeerId selectPeer() const;
/**
* Trigger maintenance of internal data structures.
* Returns how much slot space was saved after compaction.
*/
uint64_t compact();
/**
* Perform consistency check on internal data structures.
*/
bool verify() const;
// Accessors.
uint64_t getSlotCount() const { return slotCount; }
uint64_t getFragmentation() const { return fragmentation; }
const ProofPool &getValidProofPool() const { return validProofPool; }
const ProofPool &getConflictingProofPool() const {
return conflictingProofPool;
}
const ProofPool &getImmatureProofPool() const { return immatureProofPool; }
ProofRef getProof(const ProofId &proofid) const;
bool isBoundToPeer(const ProofId &proofid) const;
bool isImmature(const ProofId &proofid) const;
bool isInConflictingPool(const ProofId &proofid) const;
bool isDangling(const ProofId &proofid) const;
void setInvalid(const ProofId &proofid);
bool isInvalid(const ProofId &proofid) const;
void clearAllInvalid();
const ProofRadixTree &getShareableProofsSnapshot() const {
return shareableProofs;
}
const Amount &getStakeUtxoDustThreshold() const {
return stakeUtxoDustThreshold;
}
/**
* Deterministically select a list of payout scripts based on the proof set
* and the previous block hash.
*/
- bool selectStakingRewardWinner(const CBlockIndex *pprev,
- std::vector<CScript> &winners);
+ bool selectStakingRewardWinner(
+ const CBlockIndex *pprev,
+ std::vector<std::pair<ProofId, CScript>> &winners);
bool dumpPeersToFile(const fs::path &dumpPath) const;
bool loadPeersFromFile(
const fs::path &dumpPath,
std::unordered_set<ProofRef, SaltedProofHasher> &registeredProofs);
private:
template <typename ProofContainer>
void moveToConflictingPool(const ProofContainer &proofs);
bool addOrUpdateNode(const PeerSet::iterator &it, NodeId nodeid);
bool addNodeToPeer(const PeerSet::iterator &it);
bool removeNodeFromPeer(const PeerSet::iterator &it, uint32_t count = 1);
/**
* @brief Get the presence remote status of a proof
*
* @param proofid The target proof id
* @return true if it's likely present, false if likely missing, nullopt if
* uncertain.
*/
std::optional<bool> getRemotePresenceStatus(const ProofId &proofid) const;
bool isFlaky(const ProofId &proofid) const;
friend struct ::avalanche::TestPeerManager;
};
/**
* Internal methods that are exposed for testing purposes.
*/
PeerId selectPeerImpl(const std::vector<Slot> &slots, const uint64_t slot,
const uint64_t max);
} // namespace avalanche
#endif // BITCOIN_AVALANCHE_PEERMANAGER_H
diff --git a/src/avalanche/processor.cpp b/src/avalanche/processor.cpp
index c0502004db..1e45dab7cf 100644
--- a/src/avalanche/processor.cpp
+++ b/src/avalanche/processor.cpp
@@ -1,1267 +1,1288 @@
// Copyright (c) 2018-2019 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <avalanche/processor.h>
#include <avalanche/avalanche.h>
#include <avalanche/delegationbuilder.h>
#include <avalanche/peermanager.h>
#include <avalanche/proofcomparator.h>
#include <avalanche/validation.h>
#include <avalanche/voterecord.h>
#include <chain.h>
#include <common/args.h>
#include <key_io.h> // For DecodeSecret
#include <net.h>
#include <netmessagemaker.h>
#include <policy/block/stakingrewards.h>
#include <scheduler.h>
#include <util/bitmanip.h>
#include <util/moneystr.h>
#include <util/time.h>
#include <util/translation.h>
#include <validation.h>
#include <chrono>
#include <limits>
#include <tuple>
/**
* Run the avalanche event loop every 10ms.
*/
static constexpr std::chrono::milliseconds AVALANCHE_TIME_STEP{10};
static const std::string AVAPEERS_FILE_NAME{"avapeers.dat"};
namespace avalanche {
static const uint256 GetVoteItemId(const AnyVoteItem &item) {
return std::visit(variant::overloaded{
[](const ProofRef &proof) {
uint256 id = proof->getId();
return id;
},
[](const CBlockIndex *pindex) {
uint256 hash = pindex->GetBlockHash();
return hash;
},
[](const CTransactionRef &tx) {
uint256 id = tx->GetId();
return id;
},
},
item);
}
static bool VerifyProof(const Amount &stakeUtxoDustThreshold,
const Proof &proof, bilingual_str &error) {
ProofValidationState proof_state;
if (!proof.verify(stakeUtxoDustThreshold, proof_state)) {
switch (proof_state.GetResult()) {
case ProofValidationResult::NO_STAKE:
error = _("The avalanche proof has no stake.");
return false;
case ProofValidationResult::DUST_THRESHOLD:
error = _("The avalanche proof stake is too low.");
return false;
case ProofValidationResult::DUPLICATE_STAKE:
error = _("The avalanche proof has duplicated stake.");
return false;
case ProofValidationResult::INVALID_STAKE_SIGNATURE:
error = _("The avalanche proof has invalid stake signatures.");
return false;
case ProofValidationResult::TOO_MANY_UTXOS:
error = strprintf(
_("The avalanche proof has too many utxos (max: %u)."),
AVALANCHE_MAX_PROOF_STAKES);
return false;
default:
error = _("The avalanche proof is invalid.");
return false;
}
}
return true;
}
static bool VerifyDelegation(const Delegation &dg,
const CPubKey &expectedPubKey,
bilingual_str &error) {
DelegationState dg_state;
CPubKey auth;
if (!dg.verify(dg_state, auth)) {
switch (dg_state.GetResult()) {
case avalanche::DelegationResult::INVALID_SIGNATURE:
error = _("The avalanche delegation has invalid signatures.");
return false;
case avalanche::DelegationResult::TOO_MANY_LEVELS:
error = _(
"The avalanche delegation has too many delegation levels.");
return false;
default:
error = _("The avalanche delegation is invalid.");
return false;
}
}
if (auth != expectedPubKey) {
error = _(
"The avalanche delegation does not match the expected public key.");
return false;
}
return true;
}
struct Processor::PeerData {
ProofRef proof;
Delegation delegation;
mutable Mutex cs_proofState;
ProofRegistrationState proofState GUARDED_BY(cs_proofState);
};
class Processor::NotificationsHandler
: public interfaces::Chain::Notifications {
Processor *m_processor;
public:
NotificationsHandler(Processor *p) : m_processor(p) {}
void updatedBlockTip() override { m_processor->updatedBlockTip(); }
void transactionAddedToMempool(const CTransactionRef &tx,
uint64_t mempool_sequence) override {
m_processor->transactionAddedToMempool(tx);
}
};
Processor::Processor(Config avaconfigIn, interfaces::Chain &chain,
CConnman *connmanIn, ChainstateManager &chainmanIn,
CTxMemPool *mempoolIn, CScheduler &scheduler,
std::unique_ptr<PeerData> peerDataIn, CKey sessionKeyIn,
uint32_t minQuorumTotalScoreIn,
double minQuorumConnectedScoreRatioIn,
int64_t minAvaproofsNodeCountIn,
uint32_t staleVoteThresholdIn, uint32_t staleVoteFactorIn,
Amount stakeUtxoDustThreshold, bool preConsensus)
: avaconfig(std::move(avaconfigIn)), connman(connmanIn),
chainman(chainmanIn), mempool(mempoolIn),
voteRecords(RWCollection<VoteMap>(VoteMap(VoteMapComparator(mempool)))),
round(0), peerManager(std::make_unique<PeerManager>(
stakeUtxoDustThreshold, chainman,
peerDataIn ? peerDataIn->proof : ProofRef())),
peerData(std::move(peerDataIn)), sessionKey(std::move(sessionKeyIn)),
minQuorumScore(minQuorumTotalScoreIn),
minQuorumConnectedScoreRatio(minQuorumConnectedScoreRatioIn),
minAvaproofsNodeCount(minAvaproofsNodeCountIn),
staleVoteThreshold(staleVoteThresholdIn),
staleVoteFactor(staleVoteFactorIn), m_preConsensus(preConsensus) {
// Make sure we get notified of chain state changes.
chainNotificationsHandler =
chain.handleNotifications(std::make_shared<NotificationsHandler>(this));
scheduler.scheduleEvery(
[this]() -> bool {
std::unordered_set<ProofRef, SaltedProofHasher> registeredProofs;
WITH_LOCK(cs_peerManager,
peerManager->cleanupDanglingProofs(registeredProofs));
for (const auto &proof : registeredProofs) {
LogPrint(BCLog::AVALANCHE,
"Promoting previously dangling proof %s\n",
proof->getId().ToString());
reconcileOrFinalize(proof);
}
return true;
},
5min);
if (!gArgs.GetBoolArg("-persistavapeers", DEFAULT_PERSIST_AVAPEERS)) {
return;
}
std::unordered_set<ProofRef, SaltedProofHasher> registeredProofs;
// Attempt to load the peer file if it exists.
const fs::path dumpPath = gArgs.GetDataDirNet() / AVAPEERS_FILE_NAME;
WITH_LOCK(cs_peerManager, return peerManager->loadPeersFromFile(
dumpPath, registeredProofs));
// We just loaded the previous finalization status, but make sure to trigger
// another round of vote for these proofs to avoid issue if the network
// status changed since the peers file was dumped.
for (const auto &proof : registeredProofs) {
addToReconcile(proof);
}
LogPrint(BCLog::AVALANCHE, "Loaded %d peers from the %s file\n",
registeredProofs.size(), PathToString(dumpPath));
}
Processor::~Processor() {
chainNotificationsHandler.reset();
stopEventLoop();
if (!gArgs.GetBoolArg("-persistavapeers", DEFAULT_PERSIST_AVAPEERS)) {
return;
}
LOCK(cs_peerManager);
// Discard the status output: if it fails we want to continue normally.
peerManager->dumpPeersToFile(gArgs.GetDataDirNet() / AVAPEERS_FILE_NAME);
}
std::unique_ptr<Processor>
Processor::MakeProcessor(const ArgsManager &argsman, interfaces::Chain &chain,
CConnman *connman, ChainstateManager &chainman,
CTxMemPool *mempool, CScheduler &scheduler,
bilingual_str &error) {
std::unique_ptr<PeerData> peerData;
CKey masterKey;
CKey sessionKey;
Amount stakeUtxoDustThreshold = PROOF_DUST_THRESHOLD;
if (argsman.IsArgSet("-avaproofstakeutxodustthreshold") &&
!ParseMoney(argsman.GetArg("-avaproofstakeutxodustthreshold", ""),
stakeUtxoDustThreshold)) {
error = _("The avalanche stake utxo dust threshold amount is invalid.");
return nullptr;
}
if (argsman.IsArgSet("-avasessionkey")) {
sessionKey = DecodeSecret(argsman.GetArg("-avasessionkey", ""));
if (!sessionKey.IsValid()) {
error = _("The avalanche session key is invalid.");
return nullptr;
}
} else {
// Pick a random key for the session.
sessionKey.MakeNewKey(true);
}
if (argsman.IsArgSet("-avaproof")) {
if (!argsman.IsArgSet("-avamasterkey")) {
error = _(
"The avalanche master key is missing for the avalanche proof.");
return nullptr;
}
masterKey = DecodeSecret(argsman.GetArg("-avamasterkey", ""));
if (!masterKey.IsValid()) {
error = _("The avalanche master key is invalid.");
return nullptr;
}
auto proof = RCUPtr<Proof>::make();
if (!Proof::FromHex(*proof, argsman.GetArg("-avaproof", ""), error)) {
// error is set by FromHex
return nullptr;
}
peerData = std::make_unique<PeerData>();
peerData->proof = proof;
if (!VerifyProof(stakeUtxoDustThreshold, *peerData->proof, error)) {
// error is set by VerifyProof
return nullptr;
}
std::unique_ptr<DelegationBuilder> dgb;
const CPubKey &masterPubKey = masterKey.GetPubKey();
if (argsman.IsArgSet("-avadelegation")) {
Delegation dg;
if (!Delegation::FromHex(dg, argsman.GetArg("-avadelegation", ""),
error)) {
// error is set by FromHex()
return nullptr;
}
if (dg.getProofId() != peerData->proof->getId()) {
error = _("The delegation does not match the proof.");
return nullptr;
}
if (masterPubKey != dg.getDelegatedPubkey()) {
error = _(
"The master key does not match the delegation public key.");
return nullptr;
}
dgb = std::make_unique<DelegationBuilder>(dg);
} else {
if (masterPubKey != peerData->proof->getMaster()) {
error =
_("The master key does not match the proof public key.");
return nullptr;
}
dgb = std::make_unique<DelegationBuilder>(*peerData->proof);
}
// Generate the delegation to the session key.
const CPubKey sessionPubKey = sessionKey.GetPubKey();
if (sessionPubKey != masterPubKey) {
if (!dgb->addLevel(masterKey, sessionPubKey)) {
error = _("Failed to generate a delegation for this session.");
return nullptr;
}
}
peerData->delegation = dgb->build();
if (!VerifyDelegation(peerData->delegation, sessionPubKey, error)) {
// error is set by VerifyDelegation
return nullptr;
}
}
const auto queryTimeoutDuration =
std::chrono::milliseconds(argsman.GetIntArg(
"-avatimeout", AVALANCHE_DEFAULT_QUERY_TIMEOUT.count()));
// Determine quorum parameters
Amount minQuorumStake = AVALANCHE_DEFAULT_MIN_QUORUM_STAKE;
if (argsman.IsArgSet("-avaminquorumstake") &&
!ParseMoney(argsman.GetArg("-avaminquorumstake", ""), minQuorumStake)) {
error = _("The avalanche min quorum stake amount is invalid.");
return nullptr;
}
if (!MoneyRange(minQuorumStake)) {
error = _("The avalanche min quorum stake amount is out of range.");
return nullptr;
}
double minQuorumConnectedStakeRatio =
AVALANCHE_DEFAULT_MIN_QUORUM_CONNECTED_STAKE_RATIO;
if (argsman.IsArgSet("-avaminquorumconnectedstakeratio") &&
!ParseDouble(argsman.GetArg("-avaminquorumconnectedstakeratio", ""),
&minQuorumConnectedStakeRatio)) {
error = _("The avalanche min quorum connected stake ratio is invalid.");
return nullptr;
}
if (minQuorumConnectedStakeRatio < 0 || minQuorumConnectedStakeRatio > 1) {
error = _(
"The avalanche min quorum connected stake ratio is out of range.");
return nullptr;
}
int64_t minAvaproofsNodeCount =
argsman.GetIntArg("-avaminavaproofsnodecount",
AVALANCHE_DEFAULT_MIN_AVAPROOFS_NODE_COUNT);
if (minAvaproofsNodeCount < 0) {
error = _("The minimum number of node that sent avaproofs message "
"should be non-negative");
return nullptr;
}
// Determine voting parameters
int64_t staleVoteThreshold = argsman.GetIntArg(
"-avastalevotethreshold", AVALANCHE_VOTE_STALE_THRESHOLD);
if (staleVoteThreshold < AVALANCHE_VOTE_STALE_MIN_THRESHOLD) {
error = strprintf(_("The avalanche stale vote threshold must be "
"greater than or equal to %d"),
AVALANCHE_VOTE_STALE_MIN_THRESHOLD);
return nullptr;
}
if (staleVoteThreshold > std::numeric_limits<uint32_t>::max()) {
error = strprintf(_("The avalanche stale vote threshold must be less "
"than or equal to %d"),
std::numeric_limits<uint32_t>::max());
return nullptr;
}
int64_t staleVoteFactor =
argsman.GetIntArg("-avastalevotefactor", AVALANCHE_VOTE_STALE_FACTOR);
if (staleVoteFactor <= 0) {
error = _("The avalanche stale vote factor must be greater than 0");
return nullptr;
}
if (staleVoteFactor > std::numeric_limits<uint32_t>::max()) {
error = strprintf(_("The avalanche stale vote factor must be less than "
"or equal to %d"),
std::numeric_limits<uint32_t>::max());
return nullptr;
}
Config avaconfig(queryTimeoutDuration);
// We can't use std::make_unique with a private constructor
return std::unique_ptr<Processor>(new Processor(
std::move(avaconfig), chain, connman, chainman, mempool, scheduler,
std::move(peerData), std::move(sessionKey),
Proof::amountToScore(minQuorumStake), minQuorumConnectedStakeRatio,
minAvaproofsNodeCount, staleVoteThreshold, staleVoteFactor,
stakeUtxoDustThreshold,
argsman.GetBoolArg("-avalanchepreconsensus",
DEFAULT_AVALANCHE_PRECONSENSUS)));
}
static bool isNull(const AnyVoteItem &item) {
return item.valueless_by_exception() ||
std::visit([](const auto &item) { return item == nullptr; }, item);
};
bool Processor::addToReconcile(const AnyVoteItem &item) {
if (isNull(item)) {
return false;
}
if (!isWorthPolling(item)) {
return false;
}
// getLocalAcceptance() takes the voteRecords read lock, so we can't inline
// the calls or we get a deadlock.
const bool accepted = getLocalAcceptance(item);
return voteRecords.getWriteView()
->insert(std::make_pair(item, VoteRecord(accepted)))
.second;
}
bool Processor::reconcileOrFinalize(const ProofRef &proof) {
if (!proof) {
return false;
}
if (isRecentlyFinalized(proof->getId())) {
PeerId peerid;
LOCK(cs_peerManager);
if (peerManager->forPeer(proof->getId(), [&](const Peer &peer) {
peerid = peer.peerid;
return true;
})) {
return peerManager->setFinalized(peerid);
}
}
return addToReconcile(proof);
}
bool Processor::isAccepted(const AnyVoteItem &item) const {
if (isNull(item)) {
return false;
}
auto r = voteRecords.getReadView();
auto it = r->find(item);
if (it == r.end()) {
return false;
}
return it->second.isAccepted();
}
int Processor::getConfidence(const AnyVoteItem &item) const {
if (isNull(item)) {
return -1;
}
auto r = voteRecords.getReadView();
auto it = r->find(item);
if (it == r.end()) {
return -1;
}
return it->second.getConfidence();
}
bool Processor::isRecentlyFinalized(const uint256 &itemId) const {
return WITH_LOCK(cs_finalizedItems, return finalizedItems.contains(itemId));
}
void Processor::clearFinalizedItems() {
LOCK(cs_finalizedItems);
finalizedItems.reset();
}
namespace {
/**
* When using TCP, we need to sign all messages as the transport layer is
* not secure.
*/
class TCPResponse {
Response response;
SchnorrSig sig;
public:
TCPResponse(Response responseIn, const CKey &key)
: response(std::move(responseIn)) {
CHashWriter hasher(SER_GETHASH, 0);
hasher << response;
const uint256 hash = hasher.GetHash();
// Now let's sign!
if (!key.SignSchnorr(hash, sig)) {
sig.fill(0);
}
}
// serialization support
SERIALIZE_METHODS(TCPResponse, obj) {
READWRITE(obj.response, obj.sig);
}
};
} // namespace
void Processor::sendResponse(CNode *pfrom, Response response) const {
connman->PushMessage(
pfrom, CNetMsgMaker(pfrom->GetCommonVersion())
.Make(NetMsgType::AVARESPONSE,
TCPResponse(std::move(response), sessionKey)));
}
bool Processor::registerVotes(NodeId nodeid, const Response &response,
std::vector<VoteItemUpdate> &updates,
int &banscore, std::string &error) {
{
// Save the time at which we can query again.
LOCK(cs_peerManager);
// FIXME: This will override the time even when we received an old stale
// message. This should check that the message is indeed the most up to
// date one before updating the time.
peerManager->updateNextRequestTime(
nodeid, Now<SteadyMilliseconds>() +
std::chrono::milliseconds(response.getCooldown()));
}
std::vector<CInv> invs;
{
// Check that the query exists. There is a possibility that it has been
// deleted if the query timed out, so we don't increase the ban score to
// slowly banning nodes for poor networking over time. Banning has to be
// handled at callsite to avoid DoS.
auto w = queries.getWriteView();
auto it = w->find(std::make_tuple(nodeid, response.getRound()));
if (it == w.end()) {
banscore = 0;
error = "unexpected-ava-response";
return false;
}
invs = std::move(it->invs);
w->erase(it);
}
// Verify that the request and the vote are consistent.
const std::vector<Vote> &votes = response.GetVotes();
size_t size = invs.size();
if (votes.size() != size) {
banscore = 100;
error = "invalid-ava-response-size";
return false;
}
for (size_t i = 0; i < size; i++) {
if (invs[i].hash != votes[i].GetHash()) {
banscore = 100;
error = "invalid-ava-response-content";
return false;
}
}
std::map<AnyVoteItem, Vote, VoteMapComparator> responseItems(
(VoteMapComparator(mempool)));
// At this stage we are certain that invs[i] matches votes[i], so we can use
// the inv type to retrieve what is being voted on.
for (size_t i = 0; i < size; i++) {
auto item = getVoteItemFromInv(invs[i]);
if (isNull(item)) {
// This should not happen, but just in case...
continue;
}
if (!isWorthPolling(item)) {
// There is no point polling this item.
continue;
}
responseItems.insert(std::make_pair(std::move(item), votes[i]));
}
auto voteRecordsWriteView = voteRecords.getWriteView();
// Register votes.
for (const auto &p : responseItems) {
auto item = p.first;
const Vote &v = p.second;
auto it = voteRecordsWriteView->find(item);
if (it == voteRecordsWriteView.end()) {
// We are not voting on that item anymore.
continue;
}
auto &vr = it->second;
if (!vr.registerVote(nodeid, v.GetError())) {
if (vr.isStale(staleVoteThreshold, staleVoteFactor)) {
updates.emplace_back(std::move(item), VoteStatus::Stale);
// Just drop stale votes. If we see this item again, we'll
// do a new vote.
voteRecordsWriteView->erase(it);
}
// This vote did not provide any extra information, move on.
continue;
}
if (!vr.hasFinalized()) {
// This item has not been finalized, so we have nothing more to
// do.
updates.emplace_back(std::move(item), vr.isAccepted()
? VoteStatus::Accepted
: VoteStatus::Rejected);
continue;
}
// We just finalized a vote. If it is valid, then let the caller
// know. Either way, remove the item from the map.
updates.emplace_back(std::move(item), vr.isAccepted()
? VoteStatus::Finalized
: VoteStatus::Invalid);
voteRecordsWriteView->erase(it);
}
// FIXME This doesn't belong here as it has nothing to do with vote
// registration.
for (const auto &update : updates) {
if (update.getStatus() != VoteStatus::Finalized &&
update.getStatus() != VoteStatus::Invalid) {
continue;
}
const auto &item = update.getVoteItem();
if (update.getStatus() == VoteStatus::Finalized) {
// Always track finalized items regardless of type. Once finalized
// they should never become invalid.
WITH_LOCK(cs_finalizedItems,
return finalizedItems.insert(GetVoteItemId(item)));
}
if (!std::holds_alternative<const CBlockIndex *>(item)) {
continue;
}
if (update.getStatus() == VoteStatus::Invalid) {
// Track invalidated blocks. Other invalidated types are not
// tracked because they may be rejected for transient reasons
// (ex: immature proofs or orphaned txs) With blocks this is not
// the case. A rejected block will not be mined on. To prevent
// reorgs, invalidated blocks should never be polled again.
LOCK(cs_invalidatedBlocks);
invalidatedBlocks.insert(GetVoteItemId(item));
continue;
}
// At this point the block index can only be finalized
const CBlockIndex *pindex = std::get<const CBlockIndex *>(item);
LOCK(cs_finalizationTip);
if (finalizationTip &&
finalizationTip->GetAncestor(pindex->nHeight) == pindex) {
continue;
}
finalizationTip = pindex;
}
return true;
}
CPubKey Processor::getSessionPubKey() const {
return sessionKey.GetPubKey();
}
bool Processor::sendHelloInternal(CNode *pfrom) {
AssertLockHeld(cs_delayedAvahelloNodeIds);
Delegation delegation;
if (peerData) {
if (!canShareLocalProof()) {
if (!delayedAvahelloNodeIds.emplace(pfrom->GetId()).second) {
// Nothing to do
return false;
}
} else {
delegation = peerData->delegation;
}
}
CHashWriter hasher(SER_GETHASH, 0);
hasher << delegation.getId();
hasher << pfrom->GetLocalNonce();
hasher << pfrom->nRemoteHostNonce;
hasher << pfrom->GetLocalExtraEntropy();
hasher << pfrom->nRemoteExtraEntropy;
// Now let's sign!
SchnorrSig sig;
if (!sessionKey.SignSchnorr(hasher.GetHash(), sig)) {
return false;
}
connman->PushMessage(
pfrom, CNetMsgMaker(pfrom->GetCommonVersion())
.Make(NetMsgType::AVAHELLO, Hello(delegation, sig)));
return delegation.getLimitedProofId() != uint256::ZERO;
}
bool Processor::sendHello(CNode *pfrom) {
return WITH_LOCK(cs_delayedAvahelloNodeIds,
return sendHelloInternal(pfrom));
}
void Processor::sendDelayedAvahello() {
LOCK(cs_delayedAvahelloNodeIds);
auto it = delayedAvahelloNodeIds.begin();
while (it != delayedAvahelloNodeIds.end()) {
if (connman->ForNode(*it, [&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(
cs_delayedAvahelloNodeIds) {
return sendHelloInternal(pnode);
})) {
// Our proof has been announced to this node
it = delayedAvahelloNodeIds.erase(it);
} else {
++it;
}
}
}
ProofRef Processor::getLocalProof() const {
return peerData ? peerData->proof : ProofRef();
}
ProofRegistrationState Processor::getLocalProofRegistrationState() const {
return peerData
? WITH_LOCK(peerData->cs_proofState, return peerData->proofState)
: ProofRegistrationState();
}
bool Processor::startEventLoop(CScheduler &scheduler) {
return eventLoop.startEventLoop(
scheduler, [this]() { this->runEventLoop(); }, AVALANCHE_TIME_STEP);
}
bool Processor::stopEventLoop() {
return eventLoop.stopEventLoop();
}
void Processor::avaproofsSent(NodeId nodeid) {
AssertLockNotHeld(cs_main);
if (chainman.ActiveChainstate().IsInitialBlockDownload()) {
// Before IBD is complete there is no way to make sure a proof is valid
// or not, e.g. it can be spent in a block we don't know yet. In order
// to increase confidence that our proof set is similar to other nodes
// on the network, the messages received during IBD are not accounted.
return;
}
LOCK(cs_peerManager);
if (peerManager->latchAvaproofsSent(nodeid)) {
avaproofsNodeCounter++;
}
}
/*
* Returns a bool indicating whether we have a usable Avalanche quorum enabling
* us to take decisions based on polls.
*/
bool Processor::isQuorumEstablished() {
AssertLockNotHeld(cs_main);
{
LOCK(cs_peerManager);
if (peerManager->getNodeCount() < 8) {
// There is no point polling if we know the vote cannot converge
return false;
}
}
/*
* The following parameters can naturally go temporarly below the threshold
* under normal circumstances, like during a proof replacement with a lower
* stake amount, or the discovery of a new proofs for which we don't have a
* node yet.
* In order to prevent our node from starting and stopping the polls
* spuriously on such event, the quorum establishement is latched. The only
* parameters that should not latched is the minimum node count, as this
* would cause the poll to be inconclusive anyway and should not happen
* under normal circumstances.
*/
if (quorumIsEstablished) {
return true;
}
// Don't do Avalanche while node is IBD'ing
if (chainman.ActiveChainstate().IsInitialBlockDownload()) {
return false;
}
if (avaproofsNodeCounter < minAvaproofsNodeCount) {
return false;
}
auto localProof = getLocalProof();
// Get the registered proof score and registered score we have nodes for
uint32_t totalPeersScore;
uint32_t connectedPeersScore;
{
LOCK(cs_peerManager);
totalPeersScore = peerManager->getTotalPeersScore();
connectedPeersScore = peerManager->getConnectedPeersScore();
// Consider that we are always connected to our proof, even if we are
// the single node using that proof.
if (localProof &&
peerManager->forPeer(localProof->getId(), [](const Peer &peer) {
return peer.node_count == 0;
})) {
connectedPeersScore += localProof->getScore();
}
}
// Ensure enough is being staked overall
if (totalPeersScore < minQuorumScore) {
return false;
}
// Ensure we have connected score for enough of the overall score
uint32_t minConnectedScore =
std::round(double(totalPeersScore) * minQuorumConnectedScoreRatio);
if (connectedPeersScore < minConnectedScore) {
return false;
}
quorumIsEstablished = true;
// Attempt to compute the staking rewards winner now so we don't have to
// wait for a block if we already have all the prerequisites.
const CBlockIndex *pprev = WITH_LOCK(cs_main, return chainman.ActiveTip());
if (pprev && IsStakingRewardsActivated(chainman.GetConsensus(), pprev)) {
computeStakingReward(pprev);
}
return true;
}
bool Processor::canShareLocalProof() {
// The flag is latched
if (m_canShareLocalProof) {
return true;
}
// Don't share our proof if we don't have any inbound connection.
// This is a best effort measure to prevent advertising a proof if we have
// limited network connectivity.
m_canShareLocalProof = connman->GetNodeCount(CConnman::CONNECTIONS_IN) > 0;
return m_canShareLocalProof;
}
bool Processor::computeStakingReward(const CBlockIndex *pindex) {
if (!pindex) {
return false;
}
// If the quorum is not established there is no point picking a winner that
// will be rejected.
if (!isQuorumEstablished()) {
return false;
}
{
LOCK(cs_stakingRewards);
if (stakingRewards.count(pindex->GetBlockHash()) > 0) {
return true;
}
}
StakingReward _stakingRewards;
_stakingRewards.blockheight = pindex->nHeight;
if (WITH_LOCK(cs_peerManager, return peerManager->selectStakingRewardWinner(
pindex, _stakingRewards.winners))) {
LOCK(cs_stakingRewards);
return stakingRewards
.emplace(pindex->GetBlockHash(), std::move(_stakingRewards))
.second;
}
return false;
}
bool Processor::eraseStakingRewardWinner(const BlockHash &prevBlockHash) {
LOCK(cs_stakingRewards);
return stakingRewards.erase(prevBlockHash) > 0;
}
void Processor::cleanupStakingRewards(const int minHeight) {
LOCK(cs_stakingRewards);
// std::erase_if is only defined since C++20
for (auto it = stakingRewards.begin(); it != stakingRewards.end();) {
if (it->second.blockheight < minHeight) {
it = stakingRewards.erase(it);
} else {
++it;
}
}
}
-bool Processor::getStakingRewardWinners(const BlockHash &prevBlockHash,
- std::vector<CScript> &winners) const {
+bool Processor::getStakingRewardWinners(
+ const BlockHash &prevBlockHash,
+ std::vector<std::pair<ProofId, CScript>> &winners) const {
LOCK(cs_stakingRewards);
auto it = stakingRewards.find(prevBlockHash);
if (it == stakingRewards.end()) {
return false;
}
winners = it->second.winners;
return true;
}
+bool Processor::getStakingRewardWinners(const BlockHash &prevBlockHash,
+ std::vector<CScript> &payouts) const {
+ std::vector<std::pair<ProofId, CScript>> winners;
+ if (!getStakingRewardWinners(prevBlockHash, winners)) {
+ return false;
+ }
+
+ payouts.clear();
+ payouts.reserve(winners.size());
+ for (auto &winner : winners) {
+ payouts.push_back(std::move(winner.second));
+ }
+
+ return true;
+}
+
bool Processor::setStakingRewardWinners(const CBlockIndex *pprev,
- const std::vector<CScript> &winners) {
+ const std::vector<CScript> &payouts) {
assert(pprev);
StakingReward stakingReward;
stakingReward.blockheight = pprev->nHeight;
- stakingReward.winners = winners;
+
+ stakingReward.winners.reserve(payouts.size());
+ for (const CScript &payout : payouts) {
+ stakingReward.winners.push_back({ProofId(), payout});
+ }
LOCK(cs_stakingRewards);
return stakingRewards.insert_or_assign(pprev->GetBlockHash(), stakingReward)
.second;
}
void Processor::FinalizeNode(const ::Config &config, const CNode &node) {
AssertLockNotHeld(cs_main);
const NodeId nodeid = node.GetId();
WITH_LOCK(cs_peerManager, peerManager->removeNode(nodeid));
WITH_LOCK(cs_delayedAvahelloNodeIds, delayedAvahelloNodeIds.erase(nodeid));
}
void Processor::updatedBlockTip() {
const bool registerLocalProof = canShareLocalProof();
auto registerProofs = [&]() {
LOCK(cs_peerManager);
auto registeredProofs = peerManager->updatedBlockTip();
ProofRegistrationState localProofState;
if (peerData && peerData->proof && registerLocalProof) {
if (peerManager->registerProof(peerData->proof, localProofState)) {
registeredProofs.insert(peerData->proof);
}
if (localProofState.GetResult() ==
ProofRegistrationResult::ALREADY_REGISTERED) {
// If our proof already exists, that's fine but we don't want to
// erase the state with a duplicated proof status, so let's
// retrieve the proper state. It also means we are able to
// update the status should the proof move from one pool to the
// other.
const ProofId &localProofId = peerData->proof->getId();
if (peerManager->isImmature(localProofId)) {
localProofState.Invalid(ProofRegistrationResult::IMMATURE,
"immature-proof");
}
if (peerManager->isInConflictingPool(localProofId)) {
localProofState.Invalid(
ProofRegistrationResult::CONFLICTING,
"conflicting-utxos");
}
if (peerManager->isBoundToPeer(localProofId)) {
localProofState = ProofRegistrationState();
}
}
WITH_LOCK(peerData->cs_proofState,
peerData->proofState = std::move(localProofState));
}
return registeredProofs;
};
auto registeredProofs = registerProofs();
for (const auto &proof : registeredProofs) {
reconcileOrFinalize(proof);
}
}
void Processor::transactionAddedToMempool(const CTransactionRef &tx) {
if (m_preConsensus) {
addToReconcile(tx);
}
}
void Processor::runEventLoop() {
// Don't poll if quorum hasn't been established yet
if (!isQuorumEstablished()) {
return;
}
// First things first, check if we have requests that timed out and clear
// them.
clearTimedoutRequests();
// Make sure there is at least one suitable node to query before gathering
// invs.
NodeId nodeid = WITH_LOCK(cs_peerManager, return peerManager->selectNode());
if (nodeid == NO_NODE) {
return;
}
std::vector<CInv> invs = getInvsForNextPoll();
if (invs.empty()) {
return;
}
LOCK(cs_peerManager);
do {
/**
* If we lost contact to that node, then we remove it from nodeids, but
* never add the request to queries, which ensures bad nodes get cleaned
* up over time.
*/
bool hasSent = connman->ForNode(
nodeid, [this, &invs](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(
cs_peerManager) {
uint64_t current_round = round++;
{
// Compute the time at which this requests times out.
auto timeout = Now<SteadyMilliseconds>() +
avaconfig.queryTimeoutDuration;
// Register the query.
queries.getWriteView()->insert(
{pnode->GetId(), current_round, timeout, invs});
// Set the timeout.
peerManager->updateNextRequestTime(pnode->GetId(), timeout);
}
pnode->invsPolled(invs.size());
// Send the query to the node.
connman->PushMessage(
pnode, CNetMsgMaker(pnode->GetCommonVersion())
.Make(NetMsgType::AVAPOLL,
Poll(current_round, std::move(invs))));
return true;
});
// Success!
if (hasSent) {
return;
}
// This node is obsolete, delete it.
peerManager->removeNode(nodeid);
// Get next suitable node to try again
nodeid = peerManager->selectNode();
} while (nodeid != NO_NODE);
}
void Processor::clearTimedoutRequests() {
auto now = Now<SteadyMilliseconds>();
std::map<CInv, uint8_t> timedout_items{};
{
// Clear expired requests.
auto w = queries.getWriteView();
auto it = w->get<query_timeout>().begin();
while (it != w->get<query_timeout>().end() && it->timeout < now) {
for (const auto &i : it->invs) {
timedout_items[i]++;
}
w->get<query_timeout>().erase(it++);
}
}
if (timedout_items.empty()) {
return;
}
// In flight request accounting.
auto voteRecordsWriteView = voteRecords.getWriteView();
for (const auto &p : timedout_items) {
auto item = getVoteItemFromInv(p.first);
if (isNull(item)) {
continue;
}
auto it = voteRecordsWriteView->find(item);
if (it == voteRecordsWriteView.end()) {
continue;
}
it->second.clearInflightRequest(p.second);
}
}
std::vector<CInv> Processor::getInvsForNextPoll(bool forPoll) {
std::vector<CInv> invs;
{
// First remove all items that are not worth polling.
auto w = voteRecords.getWriteView();
for (auto it = w->begin(); it != w->end();) {
if (!isWorthPolling(it->first)) {
it = w->erase(it);
} else {
++it;
}
}
}
auto buildInvFromVoteItem = variant::overloaded{
[](const ProofRef &proof) {
return CInv(MSG_AVA_PROOF, proof->getId());
},
[](const CBlockIndex *pindex) {
return CInv(MSG_BLOCK, pindex->GetBlockHash());
},
[](const CTransactionRef &tx) { return CInv(MSG_TX, tx->GetHash()); },
};
auto r = voteRecords.getReadView();
for (const auto &[item, voteRecord] : r) {
if (invs.size() >= AVALANCHE_MAX_ELEMENT_POLL) {
// Make sure we do not produce more invs than specified by the
// protocol.
return invs;
}
const bool shouldPoll =
forPoll ? voteRecord.registerPoll() : voteRecord.shouldPoll();
if (!shouldPoll) {
continue;
}
invs.emplace_back(std::visit(buildInvFromVoteItem, item));
}
return invs;
}
AnyVoteItem Processor::getVoteItemFromInv(const CInv &inv) const {
if (inv.IsMsgBlk()) {
return WITH_LOCK(cs_main, return chainman.m_blockman.LookupBlockIndex(
BlockHash(inv.hash)));
}
if (inv.IsMsgProof()) {
return WITH_LOCK(cs_peerManager,
return peerManager->getProof(ProofId(inv.hash)));
}
if (mempool && inv.IsMsgTx()) {
return WITH_LOCK(mempool->cs, return mempool->get(TxId(inv.hash)));
}
return {nullptr};
}
bool Processor::IsWorthPolling::operator()(const CBlockIndex *pindex) const {
AssertLockNotHeld(cs_main);
LOCK(cs_main);
if (pindex->nStatus.isInvalid()) {
// No point polling invalid blocks.
return false;
}
if (WITH_LOCK(processor.cs_finalizationTip,
return processor.finalizationTip &&
processor.finalizationTip->GetAncestor(
pindex->nHeight) == pindex)) {
// There is no point polling blocks that are ancestor of a block that
// has been accepted by the network.
return false;
}
if (WITH_LOCK(processor.cs_invalidatedBlocks,
return processor.invalidatedBlocks.contains(
pindex->GetBlockHash()))) {
// Blocks invalidated by Avalanche should not be polled twice.
return false;
}
return true;
}
bool Processor::IsWorthPolling::operator()(const ProofRef &proof) const {
// Avoid lock order issues cs_main -> cs_peerManager
AssertLockNotHeld(::cs_main);
AssertLockNotHeld(processor.cs_peerManager);
const ProofId &proofid = proof->getId();
LOCK(processor.cs_peerManager);
// No point polling immature or discarded proofs
return processor.peerManager->isBoundToPeer(proofid) ||
processor.peerManager->isInConflictingPool(proofid);
}
bool Processor::IsWorthPolling::operator()(const CTransactionRef &tx) const {
if (!processor.mempool) {
return false;
}
// TODO For now the transactions with conflicts or rejected by policies are
// not stored anywhere, so only the mempool transactions are worth polling.
AssertLockNotHeld(processor.mempool->cs);
return WITH_LOCK(processor.mempool->cs,
return processor.mempool->exists(tx->GetId()));
}
bool Processor::isWorthPolling(const AnyVoteItem &item) const {
return std::visit(IsWorthPolling(*this), item) &&
!isRecentlyFinalized(GetVoteItemId(item));
}
bool Processor::GetLocalAcceptance::operator()(
const CBlockIndex *pindex) const {
AssertLockNotHeld(cs_main);
return WITH_LOCK(cs_main,
return processor.chainman.ActiveChain().Contains(pindex));
}
bool Processor::GetLocalAcceptance::operator()(const ProofRef &proof) const {
AssertLockNotHeld(processor.cs_peerManager);
return WITH_LOCK(
processor.cs_peerManager,
return processor.peerManager->isBoundToPeer(proof->getId()));
}
bool Processor::GetLocalAcceptance::operator()(
const CTransactionRef &tx) const {
if (!processor.mempool) {
return false;
}
AssertLockNotHeld(processor.mempool->cs);
return WITH_LOCK(processor.mempool->cs,
return processor.mempool->exists(tx->GetId()));
}
} // namespace avalanche
diff --git a/src/avalanche/processor.h b/src/avalanche/processor.h
index 9808431acd..7693765895 100644
--- a/src/avalanche/processor.h
+++ b/src/avalanche/processor.h
@@ -1,467 +1,471 @@
// Copyright (c) 2018-2019 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_AVALANCHE_PROCESSOR_H
#define BITCOIN_AVALANCHE_PROCESSOR_H
#include <avalanche/config.h>
#include <avalanche/node.h>
#include <avalanche/proof.h>
#include <avalanche/proofcomparator.h>
#include <avalanche/protocol.h>
#include <avalanche/voterecord.h> // For AVALANCHE_MAX_INFLIGHT_POLL
#include <blockindex.h>
#include <blockindexcomparators.h>
#include <common/bloom.h>
#include <eventloop.h>
#include <interfaces/chain.h>
#include <interfaces/handler.h>
#include <key.h>
#include <net.h>
#include <primitives/transaction.h>
#include <rwcollection.h>
#include <util/variant.h>
#include <validationinterface.h>
#include <boost/multi_index/composite_key.hpp>
#include <boost/multi_index/hashed_index.hpp>
#include <boost/multi_index/member.hpp>
#include <boost/multi_index/ordered_index.hpp>
#include <boost/multi_index_container.hpp>
#include <atomic>
#include <chrono>
#include <cstdint>
#include <memory>
#include <unordered_map>
#include <variant>
#include <vector>
class ArgsManager;
class CConnman;
class CNode;
class CScheduler;
class Config;
class PeerManager;
struct bilingual_str;
/**
* Maximum item that can be polled at once.
*/
static constexpr size_t AVALANCHE_MAX_ELEMENT_POLL = 16;
/**
* How long before we consider that a query timed out.
*/
static constexpr std::chrono::milliseconds AVALANCHE_DEFAULT_QUERY_TIMEOUT{
10000};
/**
* The size of the finalized items filter. It should be large enough that an
* influx of inventories cannot roll any particular item out of the filter on
* demand. For example, transactions will roll blocks out of the filter.
* Tracking many more items than can possibly be polled at once ensures that
* recently polled items will come to a stable state on the network before
* rolling out of the filter.
*/
static constexpr uint32_t AVALANCHE_FINALIZED_ITEMS_FILTER_NUM_ELEMENTS =
AVALANCHE_MAX_INFLIGHT_POLL * 20;
namespace avalanche {
class Delegation;
class PeerManager;
class ProofRegistrationState;
struct VoteRecord;
enum struct VoteStatus : uint8_t {
Invalid,
Rejected,
Accepted,
Finalized,
Stale,
};
using AnyVoteItem =
std::variant<const ProofRef, const CBlockIndex *, const CTransactionRef>;
class VoteItemUpdate {
AnyVoteItem item;
VoteStatus status;
public:
VoteItemUpdate(AnyVoteItem itemIn, VoteStatus statusIn)
: item(std::move(itemIn)), status(statusIn) {}
const VoteStatus &getStatus() const { return status; }
const AnyVoteItem &getVoteItem() const { return item; }
};
class VoteMapComparator {
const CTxMemPool *mempool{nullptr};
public:
VoteMapComparator() {}
VoteMapComparator(const CTxMemPool *mempoolIn) : mempool(mempoolIn) {}
bool operator()(const AnyVoteItem &lhs, const AnyVoteItem &rhs) const {
// If the variants are of different types, sort them by variant index
if (lhs.index() != rhs.index()) {
return lhs.index() < rhs.index();
}
return std::visit(
variant::overloaded{
[](const ProofRef &lhs, const ProofRef &rhs) {
return ProofComparatorByScore()(lhs, rhs);
},
[](const CBlockIndex *lhs, const CBlockIndex *rhs) {
// Reverse ordering so we get the highest work first
return CBlockIndexWorkComparator()(rhs, lhs);
},
[this](const CTransactionRef &lhs, const CTransactionRef &rhs) {
const TxId &lhsTxId = lhs->GetId();
const TxId &rhsTxId = rhs->GetId();
// If there is no mempool, sort by TxId. Note that polling
// for txs is currently not supported if there is no mempool
// so this is only a safety net.
if (!mempool) {
return lhsTxId < rhsTxId;
}
LOCK(mempool->cs);
auto lhsOptIter = mempool->GetIter(lhsTxId);
auto rhsOptIter = mempool->GetIter(rhsTxId);
// If the transactions are not in the mempool, tie by TxId
if (!lhsOptIter && !rhsOptIter) {
return lhsTxId < rhsTxId;
}
// If only one is in the mempool, pick that one
if (lhsOptIter.has_value() != rhsOptIter.has_value()) {
return !!lhsOptIter;
}
// Both are in the mempool, select the highest fee rate
// including the fee deltas
return CompareTxMemPoolEntryByModifiedFeeRate{}(
**lhsOptIter, **rhsOptIter);
},
[](const auto &lhs, const auto &rhs) {
// This serves 2 purposes:
// - This makes sure that we don't forget to implement a
// comparison case when adding a new variant type.
// - This avoids having to write all the cross type cases
// which are already handled by the index sort above.
// Because the compiler has no way to determine that, we
// cannot use static assertions here without having to
// define the whole type matrix also.
assert(false);
// Return any bool, it's only there to make the compiler
// happy.
return false;
},
},
lhs, rhs);
}
};
using VoteMap = std::map<AnyVoteItem, VoteRecord, VoteMapComparator>;
struct query_timeout {};
namespace {
struct AvalancheTest;
}
// FIXME Implement a proper notification handler for node disconnection instead
// of implementing the whole NetEventsInterface for a single interesting event.
class Processor final : public NetEventsInterface {
Config avaconfig;
CConnman *connman;
ChainstateManager &chainman;
CTxMemPool *mempool;
/**
* Items to run avalanche on.
*/
RWCollection<VoteMap> voteRecords;
/**
* Keep track of peers and queries sent.
*/
std::atomic<uint64_t> round;
/**
* Keep track of the peers and associated infos.
*/
mutable Mutex cs_peerManager;
std::unique_ptr<PeerManager> peerManager GUARDED_BY(cs_peerManager);
struct Query {
NodeId nodeid;
uint64_t round;
SteadyMilliseconds timeout;
/**
* We declare this as mutable so it can be modified in the multi_index.
* This is ok because we do not use this field to index in anyway.
*
* /!\ Do not use any mutable field as index.
*/
mutable std::vector<CInv> invs;
};
using QuerySet = boost::multi_index_container<
Query,
boost::multi_index::indexed_by<
// index by nodeid/round
boost::multi_index::hashed_unique<boost::multi_index::composite_key<
Query,
boost::multi_index::member<Query, NodeId, &Query::nodeid>,
boost::multi_index::member<Query, uint64_t, &Query::round>>>,
// sorted by timeout
boost::multi_index::ordered_non_unique<
boost::multi_index::tag<query_timeout>,
boost::multi_index::member<Query, SteadyMilliseconds,
&Query::timeout>>>>;
RWCollection<QuerySet> queries;
/** Data required to participate. */
struct PeerData;
std::unique_ptr<PeerData> peerData;
CKey sessionKey;
/** Event loop machinery. */
EventLoop eventLoop;
/**
* Quorum management.
*/
uint32_t minQuorumScore;
double minQuorumConnectedScoreRatio;
std::atomic<bool> quorumIsEstablished{false};
std::atomic<bool> m_canShareLocalProof{false};
int64_t minAvaproofsNodeCount;
std::atomic<int64_t> avaproofsNodeCounter{0};
/** Voting parameters. */
const uint32_t staleVoteThreshold;
const uint32_t staleVoteFactor;
/** Registered interfaces::Chain::Notifications handler. */
class NotificationsHandler;
std::unique_ptr<interfaces::Handler> chainNotificationsHandler;
mutable Mutex cs_finalizationTip;
const CBlockIndex *finalizationTip GUARDED_BY(cs_finalizationTip){nullptr};
mutable Mutex cs_delayedAvahelloNodeIds;
/**
* A list of the nodes that did not get our proof announced via avahello
* yet because we had no inbound connection.
*/
std::unordered_set<NodeId>
delayedAvahelloNodeIds GUARDED_BY(cs_delayedAvahelloNodeIds);
struct StakingReward {
int blockheight;
// Ordered list of acceptable winners, only the first is used for mining
- std::vector<CScript> winners;
+ std::vector<std::pair<ProofId, CScript>> winners;
};
mutable Mutex cs_stakingRewards;
std::unordered_map<BlockHash, StakingReward, SaltedUint256Hasher>
stakingRewards GUARDED_BY(cs_stakingRewards);
const bool m_preConsensus{false};
Processor(Config avaconfig, interfaces::Chain &chain, CConnman *connmanIn,
ChainstateManager &chainman, CTxMemPool *mempoolIn,
CScheduler &scheduler, std::unique_ptr<PeerData> peerDataIn,
CKey sessionKeyIn, uint32_t minQuorumTotalScoreIn,
double minQuorumConnectedScoreRatioIn,
int64_t minAvaproofsNodeCountIn, uint32_t staleVoteThresholdIn,
uint32_t staleVoteFactorIn, Amount stakeUtxoDustThresholdIn,
bool preConsensus);
public:
~Processor();
static std::unique_ptr<Processor>
MakeProcessor(const ArgsManager &argsman, interfaces::Chain &chain,
CConnman *connman, ChainstateManager &chainman,
CTxMemPool *mempoolIn, CScheduler &scheduler,
bilingual_str &error);
bool addToReconcile(const AnyVoteItem &item)
EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems);
/**
* Wrapper around the addToReconcile for proofs that adds back the
* finalization flag to the peer if it is not polled due to being recently
* finalized.
*/
bool reconcileOrFinalize(const ProofRef &proof)
EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_finalizedItems);
bool isAccepted(const AnyVoteItem &item) const;
int getConfidence(const AnyVoteItem &item) const;
bool isRecentlyFinalized(const uint256 &itemId) const
EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems);
void clearFinalizedItems() EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems);
// TODO: Refactor the API to remove the dependency on avalanche/protocol.h
void sendResponse(CNode *pfrom, Response response) const;
bool registerVotes(NodeId nodeid, const Response &response,
std::vector<VoteItemUpdate> &updates, int &banscore,
std::string &error)
EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_finalizedItems,
!cs_invalidatedBlocks, !cs_finalizationTip);
template <typename Callable>
auto withPeerManager(Callable &&func) const
EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager) {
LOCK(cs_peerManager);
return func(*peerManager);
}
CPubKey getSessionPubKey() const;
/**
* @brief Send a avahello message
*
* @param pfrom The node to send the message to
* @return True if a non-null delegation has been announced
*/
bool sendHello(CNode *pfrom)
EXCLUSIVE_LOCKS_REQUIRED(!cs_delayedAvahelloNodeIds);
void sendDelayedAvahello()
EXCLUSIVE_LOCKS_REQUIRED(!cs_delayedAvahelloNodeIds);
ProofRef getLocalProof() const;
ProofRegistrationState getLocalProofRegistrationState() const;
/*
* Return whether the avalanche service flag should be set.
*/
bool isAvalancheServiceAvailable() { return !!peerData; }
bool startEventLoop(CScheduler &scheduler);
bool stopEventLoop();
void avaproofsSent(NodeId nodeid) LOCKS_EXCLUDED(cs_main)
EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager);
int64_t getAvaproofsNodeCounter() const {
return avaproofsNodeCounter.load();
}
bool isQuorumEstablished() LOCKS_EXCLUDED(cs_main)
EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_stakingRewards);
bool canShareLocalProof();
bool computeStakingReward(const CBlockIndex *pindex)
EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_stakingRewards);
bool eraseStakingRewardWinner(const BlockHash &prevBlockHash)
EXCLUSIVE_LOCKS_REQUIRED(!cs_stakingRewards);
void cleanupStakingRewards(const int minHeight)
EXCLUSIVE_LOCKS_REQUIRED(!cs_stakingRewards);
+ bool getStakingRewardWinners(
+ const BlockHash &prevBlockHash,
+ std::vector<std::pair<ProofId, CScript>> &winners) const
+ EXCLUSIVE_LOCKS_REQUIRED(!cs_stakingRewards);
bool getStakingRewardWinners(const BlockHash &prevBlockHash,
- std::vector<CScript> &winners) const
+ std::vector<CScript> &payouts) const
EXCLUSIVE_LOCKS_REQUIRED(!cs_stakingRewards);
bool setStakingRewardWinners(const CBlockIndex *pprev,
- const std::vector<CScript> &winners)
+ const std::vector<CScript> &payouts)
EXCLUSIVE_LOCKS_REQUIRED(!cs_stakingRewards);
// Implement NetEventInterface. Only FinalizeNode is of interest.
void InitializeNode(const ::Config &config, CNode &pnode,
ServiceFlags our_services) override {}
bool ProcessMessages(const ::Config &config, CNode *pnode,
std::atomic<bool> &interrupt) override {
return false;
}
bool SendMessages(const ::Config &config, CNode *pnode) override {
return false;
}
/** Handle removal of a node */
void FinalizeNode(const ::Config &config,
const CNode &node) override LOCKS_EXCLUDED(cs_main)
EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_delayedAvahelloNodeIds);
private:
void updatedBlockTip()
EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_finalizedItems);
void transactionAddedToMempool(const CTransactionRef &tx)
EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems);
void runEventLoop()
EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_stakingRewards,
!cs_finalizedItems);
void clearTimedoutRequests() EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager);
std::vector<CInv> getInvsForNextPoll(bool forPoll = true)
EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_finalizedItems);
bool sendHelloInternal(CNode *pfrom)
EXCLUSIVE_LOCKS_REQUIRED(cs_delayedAvahelloNodeIds);
AnyVoteItem getVoteItemFromInv(const CInv &inv) const
EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager);
/**
* We don't need many blocks but a low false positive rate.
* In the event of a false positive the node might skip polling this block.
* Such a block will not get marked as finalized until it is reconsidered
* for polling (if the filter changed its state) or another block is found.
*/
mutable Mutex cs_invalidatedBlocks;
CRollingBloomFilter invalidatedBlocks GUARDED_BY(cs_invalidatedBlocks){
100, 0.0000001};
/**
* Rolling bloom filter to track recently finalized inventory items of any
* type. Once placed in this filter, those items will not be polled again
* unless they roll out. Note that this one filter tracks all types so
* blocks may be rolled out by transaction activity for example.
*
* We want a low false positive rate to prevent accidentally not polling
* for an item when it is first seen.
*/
mutable Mutex cs_finalizedItems;
CRollingBloomFilter finalizedItems GUARDED_BY(cs_finalizedItems){
AVALANCHE_FINALIZED_ITEMS_FILTER_NUM_ELEMENTS, 0.0000001};
struct IsWorthPolling {
const Processor &processor;
IsWorthPolling(const Processor &_processor) : processor(_processor){};
bool operator()(const CBlockIndex *pindex) const
LOCKS_EXCLUDED(cs_main);
bool operator()(const ProofRef &proof) const
LOCKS_EXCLUDED(cs_peerManager);
bool operator()(const CTransactionRef &tx) const;
};
bool isWorthPolling(const AnyVoteItem &item) const
EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems);
struct GetLocalAcceptance {
const Processor &processor;
GetLocalAcceptance(const Processor &_processor)
: processor(_processor){};
bool operator()(const CBlockIndex *pindex) const
LOCKS_EXCLUDED(cs_main);
bool operator()(const ProofRef &proof) const
LOCKS_EXCLUDED(cs_peerManager);
bool operator()(const CTransactionRef &tx) const;
};
bool getLocalAcceptance(const AnyVoteItem &item) const {
return std::visit(GetLocalAcceptance(*this), item);
}
friend struct ::avalanche::AvalancheTest;
};
} // namespace avalanche
#endif // BITCOIN_AVALANCHE_PROCESSOR_H
diff --git a/src/avalanche/test/peermanager_tests.cpp b/src/avalanche/test/peermanager_tests.cpp
index 29cb1e8307..7ec21427b2 100644
--- a/src/avalanche/test/peermanager_tests.cpp
+++ b/src/avalanche/test/peermanager_tests.cpp
@@ -1,3156 +1,3158 @@
// Copyright (c) 2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <avalanche/delegationbuilder.h>
#include <avalanche/peermanager.h>
#include <avalanche/proofbuilder.h>
#include <avalanche/proofcomparator.h>
#include <avalanche/statistics.h>
#include <avalanche/test/util.h>
#include <cashaddrenc.h>
#include <config.h>
#include <consensus/activation.h>
#include <core_io.h>
#include <key_io.h>
#include <script/standard.h>
#include <uint256.h>
#include <util/fs_helpers.h>
#include <util/time.h>
#include <util/translation.h>
#include <validation.h>
#include <test/util/blockindex.h>
#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
#include <limits>
#include <optional>
#include <unordered_map>
using namespace avalanche;
namespace avalanche {
namespace {
struct TestPeerManager {
static bool nodeBelongToPeer(const PeerManager &pm, NodeId nodeid,
PeerId peerid) {
return pm.forNode(nodeid, [&](const Node &node) {
return node.peerid == peerid;
});
}
static bool isNodePending(const PeerManager &pm, NodeId nodeid) {
auto &pendingNodesView = pm.pendingNodes.get<by_nodeid>();
return pendingNodesView.find(nodeid) != pendingNodesView.end();
}
static PeerId getPeerIdForProofId(PeerManager &pm,
const ProofId &proofid) {
auto &pview = pm.peers.get<by_proofid>();
auto it = pview.find(proofid);
return it == pview.end() ? NO_PEER : it->peerid;
}
static PeerId registerAndGetPeerId(PeerManager &pm,
const ProofRef &proof) {
pm.registerProof(proof);
return getPeerIdForProofId(pm, proof->getId());
}
static std::vector<uint32_t> getOrderedScores(const PeerManager &pm) {
std::vector<uint32_t> scores;
auto &peerView = pm.peers.get<by_score>();
for (const Peer &peer : peerView) {
scores.push_back(peer.getScore());
}
return scores;
}
static void cleanupDanglingProofs(
PeerManager &pm,
std::unordered_set<ProofRef, SaltedProofHasher> &registeredProofs) {
pm.cleanupDanglingProofs(registeredProofs);
}
static void cleanupDanglingProofs(PeerManager &pm) {
std::unordered_set<ProofRef, SaltedProofHasher> dummy;
pm.cleanupDanglingProofs(dummy);
}
static std::optional<RemoteProof> getRemoteProof(const PeerManager &pm,
const ProofId &proofid,
NodeId nodeid) {
auto it = pm.remoteProofs.find(boost::make_tuple(proofid, nodeid));
if (it == pm.remoteProofs.end()) {
return std::nullopt;
}
return std::make_optional(*it);
}
static size_t getPeerCount(const PeerManager &pm) {
return pm.peers.size();
}
static std::optional<bool>
getRemotePresenceStatus(const PeerManager &pm, const ProofId &proofid) {
return pm.getRemotePresenceStatus(proofid);
}
static void clearPeers(PeerManager &pm) {
std::vector<PeerId> peerIds;
for (auto &peer : pm.peers) {
peerIds.push_back(peer.peerid);
}
for (const PeerId &peerid : peerIds) {
pm.removePeer(peerid);
}
BOOST_CHECK_EQUAL(pm.peers.size(), 0);
}
static void setLocalProof(PeerManager &pm, const ProofRef &proof) {
pm.localProof = proof;
}
static bool isFlaky(const PeerManager &pm, const ProofId &proofid) {
return pm.isFlaky(proofid);
}
};
static void addCoin(Chainstate &chainstate, const COutPoint &outpoint,
const CKey &key,
const Amount amount = PROOF_DUST_THRESHOLD,
uint32_t height = 100, bool is_coinbase = false) {
CScript script = GetScriptForDestination(PKHash(key.GetPubKey()));
LOCK(cs_main);
CCoinsViewCache &coins = chainstate.CoinsTip();
coins.AddCoin(outpoint,
Coin(CTxOut(amount, script), height, is_coinbase), false);
}
static COutPoint createUtxo(Chainstate &chainstate, const CKey &key,
const Amount amount = PROOF_DUST_THRESHOLD,
uint32_t height = 100,
bool is_coinbase = false) {
COutPoint outpoint(TxId(GetRandHash()), 0);
addCoin(chainstate, outpoint, key, amount, height, is_coinbase);
return outpoint;
}
static ProofRef
buildProof(const CKey &key,
const std::vector<std::tuple<COutPoint, Amount>> &outpoints,
const CKey &master = CKey::MakeCompressedKey(),
int64_t sequence = 1, uint32_t height = 100,
bool is_coinbase = false, int64_t expirationTime = 0,
const CScript &payoutScript = UNSPENDABLE_ECREG_PAYOUT_SCRIPT) {
ProofBuilder pb(sequence, expirationTime, master, payoutScript);
for (const auto &[outpoint, amount] : outpoints) {
BOOST_CHECK(pb.addUTXO(outpoint, amount, height, is_coinbase, key));
}
return pb.build();
}
template <typename... Args>
static ProofRef
buildProofWithOutpoints(const CKey &key,
const std::vector<COutPoint> &outpoints,
Amount amount, Args &&...args) {
std::vector<std::tuple<COutPoint, Amount>> outpointsWithAmount;
std::transform(
outpoints.begin(), outpoints.end(),
std::back_inserter(outpointsWithAmount),
[amount](const auto &o) { return std::make_tuple(o, amount); });
return buildProof(key, outpointsWithAmount,
std::forward<Args>(args)...);
}
static ProofRef
buildProofWithSequence(const CKey &key,
const std::vector<COutPoint> &outpoints,
int64_t sequence) {
return buildProofWithOutpoints(key, outpoints, PROOF_DUST_THRESHOLD,
key, sequence);
}
} // namespace
} // namespace avalanche
namespace {
struct PeerManagerFixture : public TestChain100Setup {
PeerManagerFixture() {
gArgs.ForceSetArg("-avaproofstakeutxoconfirmations", "1");
}
~PeerManagerFixture() {
gArgs.ClearForcedArg("-avaproofstakeutxoconfirmations");
}
};
} // namespace
namespace {
struct NoCoolDownFixture : public PeerManagerFixture {
NoCoolDownFixture() {
gArgs.ForceSetArg("-avalancheconflictingproofcooldown", "0");
}
~NoCoolDownFixture() {
gArgs.ClearForcedArg("-avalancheconflictingproofcooldown");
}
};
} // namespace
BOOST_FIXTURE_TEST_SUITE(peermanager_tests, PeerManagerFixture)
BOOST_AUTO_TEST_CASE(select_peer_linear) {
// No peers.
BOOST_CHECK_EQUAL(selectPeerImpl({}, 0, 0), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl({}, 1, 3), NO_PEER);
// One peer
const std::vector<Slot> oneslot = {{100, 100, 23}};
// Undershoot
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 0, 300), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 42, 300), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 99, 300), NO_PEER);
// Nailed it
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 100, 300), 23);
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 142, 300), 23);
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 199, 300), 23);
// Overshoot
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 200, 300), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 242, 300), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(oneslot, 299, 300), NO_PEER);
// Two peers
const std::vector<Slot> twoslots = {{100, 100, 69}, {300, 100, 42}};
// Undershoot
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 0, 500), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 42, 500), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 99, 500), NO_PEER);
// First entry
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 100, 500), 69);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 142, 500), 69);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 199, 500), 69);
// In between
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 200, 500), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 242, 500), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 299, 500), NO_PEER);
// Second entry
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 300, 500), 42);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 342, 500), 42);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 399, 500), 42);
// Overshoot
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 400, 500), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 442, 500), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(twoslots, 499, 500), NO_PEER);
}
BOOST_AUTO_TEST_CASE(select_peer_dichotomic) {
std::vector<Slot> slots;
// 100 peers of size 1 with 1 empty element apart.
uint64_t max = 1;
for (int i = 0; i < 100; i++) {
slots.emplace_back(max, 1, i);
max += 2;
}
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 4, max), NO_PEER);
// Check that we get what we expect.
for (int i = 0; i < 100; i++) {
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 2 * i, max), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 2 * i + 1, max), i);
}
BOOST_CHECK_EQUAL(selectPeerImpl(slots, max, max), NO_PEER);
// Update the slots to be heavily skewed toward the last element.
slots[99] = slots[99].withScore(101);
max = slots[99].getStop();
BOOST_CHECK_EQUAL(max, 300);
for (int i = 0; i < 100; i++) {
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 2 * i, max), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 2 * i + 1, max), i);
}
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 200, max), 99);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 256, max), 99);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 299, max), 99);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 300, max), NO_PEER);
// Update the slots to be heavily skewed toward the first element.
for (int i = 0; i < 100; i++) {
slots[i] = slots[i].withStart(slots[i].getStart() + 100);
}
slots[0] = Slot(1, slots[0].getStop() - 1, slots[0].getPeerId());
slots[99] = slots[99].withScore(1);
max = slots[99].getStop();
BOOST_CHECK_EQUAL(max, 300);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 0, max), NO_PEER);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 1, max), 0);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 42, max), 0);
for (int i = 0; i < 100; i++) {
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 100 + 2 * i + 1, max), i);
BOOST_CHECK_EQUAL(selectPeerImpl(slots, 100 + 2 * i + 2, max), NO_PEER);
}
}
BOOST_AUTO_TEST_CASE(select_peer_random) {
for (int c = 0; c < 1000; c++) {
size_t size = InsecureRandBits(10) + 1;
std::vector<Slot> slots;
slots.reserve(size);
uint64_t max = InsecureRandBits(3);
auto next = [&]() {
uint64_t r = max;
max += InsecureRandBits(3);
return r;
};
for (size_t i = 0; i < size; i++) {
const uint64_t start = next();
const uint32_t score = InsecureRandBits(3);
max += score;
slots.emplace_back(start, score, i);
}
for (int k = 0; k < 100; k++) {
uint64_t s = max > 0 ? InsecureRandRange(max) : 0;
auto i = selectPeerImpl(slots, s, max);
// /!\ Because of the way we construct the vector, the peer id is
// always the index. This might not be the case in practice.
BOOST_CHECK(i == NO_PEER || slots[i].contains(s));
}
}
}
static void addNodeWithScore(Chainstate &active_chainstate,
avalanche::PeerManager &pm, NodeId node,
uint32_t score) {
auto proof = buildRandomProof(active_chainstate, score);
BOOST_CHECK(pm.registerProof(proof));
BOOST_CHECK(pm.addNode(node, proof->getId()));
};
BOOST_AUTO_TEST_CASE(peer_probabilities) {
ChainstateManager &chainman = *Assert(m_node.chainman);
// No peers.
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
BOOST_CHECK_EQUAL(pm.selectNode(), NO_NODE);
const NodeId node0 = 42, node1 = 69, node2 = 37;
Chainstate &active_chainstate = chainman.ActiveChainstate();
// One peer, we always return it.
addNodeWithScore(active_chainstate, pm, node0, MIN_VALID_PROOF_SCORE);
BOOST_CHECK_EQUAL(pm.selectNode(), node0);
// Two peers, verify ratio.
addNodeWithScore(active_chainstate, pm, node1, 2 * MIN_VALID_PROOF_SCORE);
std::unordered_map<PeerId, int> results = {};
for (int i = 0; i < 10000; i++) {
size_t n = pm.selectNode();
BOOST_CHECK(n == node0 || n == node1);
results[n]++;
}
BOOST_CHECK(abs(2 * results[0] - results[1]) < 500);
// Three peers, verify ratio.
addNodeWithScore(active_chainstate, pm, node2, MIN_VALID_PROOF_SCORE);
results.clear();
for (int i = 0; i < 10000; i++) {
size_t n = pm.selectNode();
BOOST_CHECK(n == node0 || n == node1 || n == node2);
results[n]++;
}
BOOST_CHECK(abs(results[0] - results[1] + results[2]) < 500);
}
BOOST_AUTO_TEST_CASE(remove_peer) {
ChainstateManager &chainman = *Assert(m_node.chainman);
// No peers.
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
BOOST_CHECK_EQUAL(pm.selectPeer(), NO_PEER);
Chainstate &active_chainstate = chainman.ActiveChainstate();
// Add 4 peers.
std::array<PeerId, 8> peerids;
for (int i = 0; i < 4; i++) {
auto p = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
peerids[i] = TestPeerManager::registerAndGetPeerId(pm, p);
BOOST_CHECK(pm.addNode(InsecureRand32(), p->getId()));
}
BOOST_CHECK_EQUAL(pm.getSlotCount(), 40000);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 0);
for (int i = 0; i < 100; i++) {
PeerId p = pm.selectPeer();
BOOST_CHECK(p == peerids[0] || p == peerids[1] || p == peerids[2] ||
p == peerids[3]);
}
// Remove one peer, it nevers show up now.
BOOST_CHECK(pm.removePeer(peerids[2]));
BOOST_CHECK_EQUAL(pm.getSlotCount(), 40000);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 10000);
// Make sure we compact to never get NO_PEER.
BOOST_CHECK_EQUAL(pm.compact(), 10000);
BOOST_CHECK(pm.verify());
BOOST_CHECK_EQUAL(pm.getSlotCount(), 30000);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 0);
for (int i = 0; i < 100; i++) {
PeerId p = pm.selectPeer();
BOOST_CHECK(p == peerids[0] || p == peerids[1] || p == peerids[3]);
}
// Add 4 more peers.
for (int i = 0; i < 4; i++) {
auto p = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
peerids[i + 4] = TestPeerManager::registerAndGetPeerId(pm, p);
BOOST_CHECK(pm.addNode(InsecureRand32(), p->getId()));
}
BOOST_CHECK_EQUAL(pm.getSlotCount(), 70000);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 0);
BOOST_CHECK(pm.removePeer(peerids[0]));
BOOST_CHECK_EQUAL(pm.getSlotCount(), 70000);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 10000);
// Removing the last entry do not increase fragmentation.
BOOST_CHECK(pm.removePeer(peerids[7]));
BOOST_CHECK_EQUAL(pm.getSlotCount(), 60000);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 10000);
// Make sure we compact to never get NO_PEER.
BOOST_CHECK_EQUAL(pm.compact(), 10000);
BOOST_CHECK(pm.verify());
BOOST_CHECK_EQUAL(pm.getSlotCount(), 50000);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 0);
for (int i = 0; i < 100; i++) {
PeerId p = pm.selectPeer();
BOOST_CHECK(p == peerids[1] || p == peerids[3] || p == peerids[4] ||
p == peerids[5] || p == peerids[6]);
}
// Removing non existent peers fails.
BOOST_CHECK(!pm.removePeer(peerids[0]));
BOOST_CHECK(!pm.removePeer(peerids[2]));
BOOST_CHECK(!pm.removePeer(peerids[7]));
BOOST_CHECK(!pm.removePeer(NO_PEER));
}
BOOST_AUTO_TEST_CASE(compact_slots) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
// Add 4 peers.
std::array<PeerId, 4> peerids;
for (int i = 0; i < 4; i++) {
auto p = buildRandomProof(chainman.ActiveChainstate(),
MIN_VALID_PROOF_SCORE);
peerids[i] = TestPeerManager::registerAndGetPeerId(pm, p);
BOOST_CHECK(pm.addNode(InsecureRand32(), p->getId()));
}
// Remove all peers.
for (auto p : peerids) {
pm.removePeer(p);
}
BOOST_CHECK_EQUAL(pm.getSlotCount(), 30000);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 30000);
for (int i = 0; i < 100; i++) {
BOOST_CHECK_EQUAL(pm.selectPeer(), NO_PEER);
}
BOOST_CHECK_EQUAL(pm.compact(), 30000);
BOOST_CHECK(pm.verify());
BOOST_CHECK_EQUAL(pm.getSlotCount(), 0);
BOOST_CHECK_EQUAL(pm.getFragmentation(), 0);
}
BOOST_AUTO_TEST_CASE(node_crud) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
Chainstate &active_chainstate = chainman.ActiveChainstate();
// Create one peer.
auto proof =
buildRandomProof(active_chainstate, 10000000 * MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(proof));
BOOST_CHECK_EQUAL(pm.selectNode(), NO_NODE);
// Add 4 nodes.
const ProofId &proofid = proof->getId();
for (int i = 0; i < 4; i++) {
BOOST_CHECK(pm.addNode(i, proofid));
}
for (int i = 0; i < 100; i++) {
NodeId n = pm.selectNode();
BOOST_CHECK(n >= 0 && n < 4);
BOOST_CHECK(pm.updateNextRequestTime(n, Now<SteadyMilliseconds>()));
}
// Remove a node, check that it doesn't show up.
BOOST_CHECK(pm.removeNode(2));
for (int i = 0; i < 100; i++) {
NodeId n = pm.selectNode();
BOOST_CHECK(n == 0 || n == 1 || n == 3);
BOOST_CHECK(pm.updateNextRequestTime(n, Now<SteadyMilliseconds>()));
}
// Push a node's timeout in the future, so that it doesn't show up.
BOOST_CHECK(pm.updateNextRequestTime(1, Now<SteadyMilliseconds>() +
std::chrono::hours(24)));
for (int i = 0; i < 100; i++) {
NodeId n = pm.selectNode();
BOOST_CHECK(n == 0 || n == 3);
BOOST_CHECK(pm.updateNextRequestTime(n, Now<SteadyMilliseconds>()));
}
// Move a node from a peer to another. This peer has a very low score such
// as chances of being picked are 1 in 10 million.
addNodeWithScore(active_chainstate, pm, 3, MIN_VALID_PROOF_SCORE);
int node3selected = 0;
for (int i = 0; i < 100; i++) {
NodeId n = pm.selectNode();
if (n == 3) {
// Selecting this node should be exceedingly unlikely.
BOOST_CHECK(node3selected++ < 1);
} else {
BOOST_CHECK_EQUAL(n, 0);
}
BOOST_CHECK(pm.updateNextRequestTime(n, Now<SteadyMilliseconds>()));
}
}
BOOST_AUTO_TEST_CASE(node_binding) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
Chainstate &active_chainstate = chainman.ActiveChainstate();
auto proof = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
const ProofId &proofid = proof->getId();
BOOST_CHECK_EQUAL(pm.getNodeCount(), 0);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), 0);
// Add a bunch of nodes with no associated peer
for (int i = 0; i < 10; i++) {
BOOST_CHECK(!pm.addNode(i, proofid));
BOOST_CHECK(TestPeerManager::isNodePending(pm, i));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 0);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), i + 1);
}
// Now create the peer and check all the nodes are bound
const PeerId peerid = TestPeerManager::registerAndGetPeerId(pm, proof);
BOOST_CHECK_NE(peerid, NO_PEER);
for (int i = 0; i < 10; i++) {
BOOST_CHECK(!TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(TestPeerManager::nodeBelongToPeer(pm, i, peerid));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 10);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), 0);
}
BOOST_CHECK(pm.verify());
// Disconnect some nodes
for (int i = 0; i < 5; i++) {
BOOST_CHECK(pm.removeNode(i));
BOOST_CHECK(!TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(!TestPeerManager::nodeBelongToPeer(pm, i, peerid));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 10 - i - 1);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), 0);
}
// Add nodes when the peer already exists
for (int i = 0; i < 5; i++) {
BOOST_CHECK(pm.addNode(i, proofid));
BOOST_CHECK(!TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(TestPeerManager::nodeBelongToPeer(pm, i, peerid));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 5 + i + 1);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), 0);
}
auto alt_proof = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
const ProofId &alt_proofid = alt_proof->getId();
// Update some nodes from a known proof to an unknown proof
for (int i = 0; i < 5; i++) {
BOOST_CHECK(!pm.addNode(i, alt_proofid));
BOOST_CHECK(TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(!TestPeerManager::nodeBelongToPeer(pm, i, peerid));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 10 - i - 1);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), i + 1);
}
auto alt2_proof =
buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
const ProofId &alt2_proofid = alt2_proof->getId();
// Update some nodes from an unknown proof to another unknown proof
for (int i = 0; i < 5; i++) {
BOOST_CHECK(!pm.addNode(i, alt2_proofid));
BOOST_CHECK(TestPeerManager::isNodePending(pm, i));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 5);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), 5);
}
// Update some nodes from an unknown proof to a known proof
for (int i = 0; i < 5; i++) {
BOOST_CHECK(pm.addNode(i, proofid));
BOOST_CHECK(!TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(TestPeerManager::nodeBelongToPeer(pm, i, peerid));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 5 + i + 1);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), 5 - i - 1);
}
// Remove the peer, the nodes should be pending again
BOOST_CHECK(pm.removePeer(peerid));
BOOST_CHECK(!pm.exists(proof->getId()));
for (int i = 0; i < 10; i++) {
BOOST_CHECK(TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(!TestPeerManager::nodeBelongToPeer(pm, i, peerid));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 0);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), 10);
}
BOOST_CHECK(pm.verify());
// Remove the remaining pending nodes, check the count drops accordingly
for (int i = 0; i < 10; i++) {
BOOST_CHECK(pm.removeNode(i));
BOOST_CHECK(!TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(!TestPeerManager::nodeBelongToPeer(pm, i, peerid));
BOOST_CHECK_EQUAL(pm.getNodeCount(), 0);
BOOST_CHECK_EQUAL(pm.getPendingNodeCount(), 10 - i - 1);
}
}
BOOST_AUTO_TEST_CASE(node_binding_reorg) {
gArgs.ForceSetArg("-avaproofstakeutxoconfirmations", "2");
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
auto proof = buildRandomProof(chainman.ActiveChainstate(),
MIN_VALID_PROOF_SCORE, 99);
const ProofId &proofid = proof->getId();
PeerId peerid = TestPeerManager::registerAndGetPeerId(pm, proof);
BOOST_CHECK_NE(peerid, NO_PEER);
BOOST_CHECK(pm.verify());
// Add nodes to our peer
for (int i = 0; i < 10; i++) {
BOOST_CHECK(pm.addNode(i, proofid));
BOOST_CHECK(!TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(TestPeerManager::nodeBelongToPeer(pm, i, peerid));
}
// Make the proof immature by reorging to a shorter chain
{
BlockValidationState state;
chainman.ActiveChainstate().InvalidateBlock(
state, WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip()));
BOOST_CHECK_EQUAL(
WITH_LOCK(chainman.GetMutex(), return chainman.ActiveHeight()), 99);
}
pm.updatedBlockTip();
BOOST_CHECK(pm.isImmature(proofid));
BOOST_CHECK(!pm.isBoundToPeer(proofid));
for (int i = 0; i < 10; i++) {
BOOST_CHECK(TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(!TestPeerManager::nodeBelongToPeer(pm, i, peerid));
}
BOOST_CHECK(pm.verify());
// Make the proof great again
{
// Advance the clock so the newly mined block won't collide with the
// other deterministically-generated blocks
SetMockTime(GetTime() + 20);
mineBlocks(1);
BlockValidationState state;
BOOST_CHECK(chainman.ActiveChainstate().ActivateBestChain(state));
LOCK(chainman.GetMutex());
BOOST_CHECK_EQUAL(chainman.ActiveHeight(), 100);
}
pm.updatedBlockTip();
BOOST_CHECK(!pm.isImmature(proofid));
BOOST_CHECK(pm.isBoundToPeer(proofid));
// The peerid has certainly been updated
peerid = TestPeerManager::registerAndGetPeerId(pm, proof);
BOOST_CHECK_NE(peerid, NO_PEER);
for (int i = 0; i < 10; i++) {
BOOST_CHECK(!TestPeerManager::isNodePending(pm, i));
BOOST_CHECK(TestPeerManager::nodeBelongToPeer(pm, i, peerid));
}
BOOST_CHECK(pm.verify());
}
BOOST_AUTO_TEST_CASE(proof_conflict) {
auto key = CKey::MakeCompressedKey();
TxId txid1(GetRandHash());
TxId txid2(GetRandHash());
BOOST_CHECK(txid1 != txid2);
const Amount v = PROOF_DUST_THRESHOLD;
const int height = 100;
ChainstateManager &chainman = *Assert(m_node.chainman);
for (uint32_t i = 0; i < 10; i++) {
addCoin(chainman.ActiveChainstate(), {txid1, i}, key);
addCoin(chainman.ActiveChainstate(), {txid2, i}, key);
}
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
CKey masterKey = CKey::MakeCompressedKey();
const auto getPeerId = [&](const std::vector<COutPoint> &outpoints) {
return TestPeerManager::registerAndGetPeerId(
pm, buildProofWithOutpoints(key, outpoints, v, masterKey, 0, height,
false, 0));
};
// Add one peer.
const PeerId peer1 = getPeerId({COutPoint(txid1, 0)});
BOOST_CHECK(peer1 != NO_PEER);
// Same proof, same peer.
BOOST_CHECK_EQUAL(getPeerId({COutPoint(txid1, 0)}), peer1);
// Different txid, different proof.
const PeerId peer2 = getPeerId({COutPoint(txid2, 0)});
BOOST_CHECK(peer2 != NO_PEER && peer2 != peer1);
// Different index, different proof.
const PeerId peer3 = getPeerId({COutPoint(txid1, 1)});
BOOST_CHECK(peer3 != NO_PEER && peer3 != peer1);
// Empty proof, no peer.
BOOST_CHECK_EQUAL(getPeerId({}), NO_PEER);
// Multiple inputs.
const PeerId peer4 = getPeerId({COutPoint(txid1, 2), COutPoint(txid2, 2)});
BOOST_CHECK(peer4 != NO_PEER && peer4 != peer1);
// Duplicated input.
{
ProofBuilder pb(0, 0, CKey::MakeCompressedKey(),
UNSPENDABLE_ECREG_PAYOUT_SCRIPT);
COutPoint o(txid1, 3);
BOOST_CHECK(pb.addUTXO(o, v, height, false, key));
BOOST_CHECK(
!pm.registerProof(TestProofBuilder::buildDuplicatedStakes(pb)));
}
// Multiple inputs, collision on first input.
BOOST_CHECK_EQUAL(getPeerId({COutPoint(txid1, 0), COutPoint(txid2, 4)}),
NO_PEER);
// Mutliple inputs, collision on second input.
BOOST_CHECK_EQUAL(getPeerId({COutPoint(txid1, 4), COutPoint(txid2, 0)}),
NO_PEER);
// Mutliple inputs, collision on both inputs.
BOOST_CHECK_EQUAL(getPeerId({COutPoint(txid1, 0), COutPoint(txid2, 2)}),
NO_PEER);
}
BOOST_AUTO_TEST_CASE(immature_proofs) {
ChainstateManager &chainman = *Assert(m_node.chainman);
gArgs.ForceSetArg("-avaproofstakeutxoconfirmations", "2");
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
auto key = CKey::MakeCompressedKey();
int immatureHeight = 100;
auto registerImmature = [&](const ProofRef &proof) {
ProofRegistrationState state;
BOOST_CHECK(!pm.registerProof(proof, state));
BOOST_CHECK(state.GetResult() == ProofRegistrationResult::IMMATURE);
};
auto checkImmature = [&](const ProofRef &proof, bool expectedImmature) {
const ProofId &proofid = proof->getId();
BOOST_CHECK(pm.exists(proofid));
BOOST_CHECK_EQUAL(pm.isImmature(proofid), expectedImmature);
BOOST_CHECK_EQUAL(pm.isBoundToPeer(proofid), !expectedImmature);
bool ret = false;
pm.forEachPeer([&](const Peer &peer) {
if (proof->getId() == peer.proof->getId()) {
ret = true;
}
});
BOOST_CHECK_EQUAL(ret, !expectedImmature);
};
// Track immature proofs so we can test them later
std::vector<ProofRef> immatureProofs;
// Fill up the immature pool to test the size limit
for (int64_t i = 1; i <= AVALANCHE_MAX_IMMATURE_PROOFS; i++) {
COutPoint outpoint = COutPoint(TxId(GetRandHash()), 0);
auto proof = buildProofWithOutpoints(
key, {outpoint}, i * PROOF_DUST_THRESHOLD, key, 0, immatureHeight);
addCoin(chainman.ActiveChainstate(), outpoint, key,
i * PROOF_DUST_THRESHOLD, immatureHeight);
registerImmature(proof);
checkImmature(proof, true);
immatureProofs.push_back(proof);
}
// More immature proofs evict lower scoring proofs
for (auto i = 0; i < 100; i++) {
COutPoint outpoint = COutPoint(TxId(GetRandHash()), 0);
auto proof =
buildProofWithOutpoints(key, {outpoint}, 200 * PROOF_DUST_THRESHOLD,
key, 0, immatureHeight);
addCoin(chainman.ActiveChainstate(), outpoint, key,
200 * PROOF_DUST_THRESHOLD, immatureHeight);
registerImmature(proof);
checkImmature(proof, true);
immatureProofs.push_back(proof);
BOOST_CHECK(!pm.exists(immatureProofs.front()->getId()));
immatureProofs.erase(immatureProofs.begin());
}
// Replacement when the pool is full still works
{
const COutPoint &outpoint =
immatureProofs.front()->getStakes()[0].getStake().getUTXO();
auto proof =
buildProofWithOutpoints(key, {outpoint}, 101 * PROOF_DUST_THRESHOLD,
key, 1, immatureHeight);
registerImmature(proof);
checkImmature(proof, true);
immatureProofs.push_back(proof);
BOOST_CHECK(!pm.exists(immatureProofs.front()->getId()));
immatureProofs.erase(immatureProofs.begin());
}
// Mine a block to increase the chain height, turning all immature proofs to
// mature
mineBlocks(1);
pm.updatedBlockTip();
for (const auto &proof : immatureProofs) {
checkImmature(proof, false);
}
}
BOOST_AUTO_TEST_CASE(dangling_node) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
Chainstate &active_chainstate = chainman.ActiveChainstate();
auto proof = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
PeerId peerid = TestPeerManager::registerAndGetPeerId(pm, proof);
BOOST_CHECK_NE(peerid, NO_PEER);
const SteadyMilliseconds theFuture(Now<SteadyMilliseconds>() +
std::chrono::hours(24));
// Add nodes to this peer and update their request time far in the future
for (int i = 0; i < 10; i++) {
BOOST_CHECK(pm.addNode(i, proof->getId()));
BOOST_CHECK(pm.updateNextRequestTime(i, theFuture));
}
// Remove the peer
BOOST_CHECK(pm.removePeer(peerid));
// Check the nodes are still there
for (int i = 0; i < 10; i++) {
BOOST_CHECK(pm.forNode(i, [](const Node &n) { return true; }));
}
// Build a new one
proof = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
peerid = TestPeerManager::registerAndGetPeerId(pm, proof);
BOOST_CHECK_NE(peerid, NO_PEER);
// Update the nodes with the new proof
for (int i = 0; i < 10; i++) {
BOOST_CHECK(pm.addNode(i, proof->getId()));
BOOST_CHECK(pm.forNode(
i, [&](const Node &n) { return n.nextRequestTime == theFuture; }));
}
// Remove the peer
BOOST_CHECK(pm.removePeer(peerid));
// Disconnect the nodes
for (int i = 0; i < 10; i++) {
BOOST_CHECK(pm.removeNode(i));
}
}
BOOST_AUTO_TEST_CASE(proof_accessors) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
constexpr int numProofs = 10;
std::vector<ProofRef> proofs;
proofs.reserve(numProofs);
for (int i = 0; i < numProofs; i++) {
proofs.push_back(buildRandomProof(chainman.ActiveChainstate(),
MIN_VALID_PROOF_SCORE));
}
for (int i = 0; i < numProofs; i++) {
BOOST_CHECK(pm.registerProof(proofs[i]));
{
ProofRegistrationState state;
// Fail to add an existing proof
BOOST_CHECK(!pm.registerProof(proofs[i], state));
BOOST_CHECK(state.GetResult() ==
ProofRegistrationResult::ALREADY_REGISTERED);
}
for (int added = 0; added <= i; added++) {
auto proof = pm.getProof(proofs[added]->getId());
BOOST_CHECK(proof != nullptr);
const ProofId &proofid = proof->getId();
BOOST_CHECK_EQUAL(proofid, proofs[added]->getId());
}
}
// No stake, copied from proof_tests.cpp
const std::string badProofHex(
"96527eae083f1f24625f049d9e54bb9a21023beefdde700a6bc02036335b4df141c8b"
"c67bb05a971f5ac2745fd683797dde3002321023beefdde700a6bc02036335b4df141"
"c8bc67bb05a971f5ac2745fd683797dde3ac135da984db510334abe41134e3d4ef09a"
"d006b1152be8bc413182bf6f947eac1f8580fe265a382195aa2d73935cabf86d90a8f"
"666d0a62385ae24732eca51575");
bilingual_str error;
auto badProof = RCUPtr<Proof>::make();
BOOST_CHECK(Proof::FromHex(*badProof, badProofHex, error));
ProofRegistrationState state;
BOOST_CHECK(!pm.registerProof(badProof, state));
BOOST_CHECK(state.GetResult() == ProofRegistrationResult::INVALID);
}
BOOST_FIXTURE_TEST_CASE(conflicting_proof_rescan, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const CKey key = CKey::MakeCompressedKey();
Chainstate &active_chainstate = chainman.ActiveChainstate();
const COutPoint conflictingOutpoint = createUtxo(active_chainstate, key);
const COutPoint outpointToSend = createUtxo(active_chainstate, key);
ProofRef proofToInvalidate =
buildProofWithSequence(key, {conflictingOutpoint, outpointToSend}, 20);
BOOST_CHECK(pm.registerProof(proofToInvalidate));
ProofRef conflictingProof =
buildProofWithSequence(key, {conflictingOutpoint}, 10);
ProofRegistrationState state;
BOOST_CHECK(!pm.registerProof(conflictingProof, state));
BOOST_CHECK(state.GetResult() == ProofRegistrationResult::CONFLICTING);
BOOST_CHECK(pm.isInConflictingPool(conflictingProof->getId()));
{
LOCK(cs_main);
CCoinsViewCache &coins = active_chainstate.CoinsTip();
// Make proofToInvalidate invalid
coins.SpendCoin(outpointToSend);
}
pm.updatedBlockTip();
BOOST_CHECK(!pm.exists(proofToInvalidate->getId()));
BOOST_CHECK(!pm.isInConflictingPool(conflictingProof->getId()));
BOOST_CHECK(pm.isBoundToPeer(conflictingProof->getId()));
}
BOOST_FIXTURE_TEST_CASE(conflicting_proof_selection, NoCoolDownFixture) {
const CKey key = CKey::MakeCompressedKey();
const Amount amount(PROOF_DUST_THRESHOLD);
const uint32_t height = 100;
const bool is_coinbase = false;
ChainstateManager &chainman = *Assert(m_node.chainman);
Chainstate &active_chainstate = chainman.ActiveChainstate();
// This will be the conflicting UTXO for all the following proofs
auto conflictingOutpoint = createUtxo(active_chainstate, key, amount);
auto proof_base = buildProofWithSequence(key, {conflictingOutpoint}, 10);
ConflictingProofComparator comparator;
auto checkPreferred = [&](const ProofRef &candidate,
const ProofRef &reference, bool expectAccepted) {
BOOST_CHECK_EQUAL(comparator(candidate, reference), expectAccepted);
BOOST_CHECK_EQUAL(comparator(reference, candidate), !expectAccepted);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
BOOST_CHECK(pm.registerProof(reference));
BOOST_CHECK(pm.isBoundToPeer(reference->getId()));
ProofRegistrationState state;
BOOST_CHECK_EQUAL(pm.registerProof(candidate, state), expectAccepted);
BOOST_CHECK_EQUAL(state.IsValid(), expectAccepted);
BOOST_CHECK_EQUAL(state.GetResult() ==
ProofRegistrationResult::CONFLICTING,
!expectAccepted);
BOOST_CHECK_EQUAL(pm.isBoundToPeer(candidate->getId()), expectAccepted);
BOOST_CHECK_EQUAL(pm.isInConflictingPool(candidate->getId()),
!expectAccepted);
BOOST_CHECK_EQUAL(pm.isBoundToPeer(reference->getId()),
!expectAccepted);
BOOST_CHECK_EQUAL(pm.isInConflictingPool(reference->getId()),
expectAccepted);
};
// Same master key, lower sequence number
checkPreferred(buildProofWithSequence(key, {conflictingOutpoint}, 9),
proof_base, false);
// Same master key, higher sequence number
checkPreferred(buildProofWithSequence(key, {conflictingOutpoint}, 11),
proof_base, true);
auto buildProofFromAmounts = [&](const CKey &master,
std::vector<Amount> &&amounts) {
std::vector<std::tuple<COutPoint, Amount>> outpointsWithAmount{
{conflictingOutpoint, amount}};
std::transform(amounts.begin(), amounts.end(),
std::back_inserter(outpointsWithAmount),
[&key, &active_chainstate](const Amount amount) {
return std::make_tuple(
createUtxo(active_chainstate, key, amount),
amount);
});
return buildProof(key, outpointsWithAmount, master, 0, height,
is_coinbase, 0);
};
auto proof_multiUtxo = buildProofFromAmounts(
key, {2 * PROOF_DUST_THRESHOLD, 2 * PROOF_DUST_THRESHOLD});
// Test for both the same master and a different one. The sequence number
// is the same for all these tests.
for (const CKey &k : {key, CKey::MakeCompressedKey()}) {
// Low amount
checkPreferred(buildProofFromAmounts(
k, {2 * PROOF_DUST_THRESHOLD, PROOF_DUST_THRESHOLD}),
proof_multiUtxo, false);
// High amount
checkPreferred(buildProofFromAmounts(k, {2 * PROOF_DUST_THRESHOLD,
3 * PROOF_DUST_THRESHOLD}),
proof_multiUtxo, true);
// Same amount, low stake count
checkPreferred(buildProofFromAmounts(k, {4 * PROOF_DUST_THRESHOLD}),
proof_multiUtxo, true);
// Same amount, high stake count
checkPreferred(buildProofFromAmounts(k, {2 * PROOF_DUST_THRESHOLD,
PROOF_DUST_THRESHOLD,
PROOF_DUST_THRESHOLD}),
proof_multiUtxo, false);
// Same amount, same stake count, selection is done on proof id
auto proofSimilar = buildProofFromAmounts(
k, {2 * PROOF_DUST_THRESHOLD, 2 * PROOF_DUST_THRESHOLD});
checkPreferred(proofSimilar, proof_multiUtxo,
proofSimilar->getId() < proof_multiUtxo->getId());
}
}
BOOST_AUTO_TEST_CASE(conflicting_immature_proofs) {
ChainstateManager &chainman = *Assert(m_node.chainman);
gArgs.ForceSetArg("-avaproofstakeutxoconfirmations", "2");
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const CKey key = CKey::MakeCompressedKey();
Chainstate &active_chainstate = chainman.ActiveChainstate();
const COutPoint conflictingOutpoint = createUtxo(active_chainstate, key);
const COutPoint matureOutpoint =
createUtxo(active_chainstate, key, PROOF_DUST_THRESHOLD, 99);
auto immature10 = buildProofWithSequence(key, {conflictingOutpoint}, 10);
auto immature20 =
buildProofWithSequence(key, {conflictingOutpoint, matureOutpoint}, 20);
BOOST_CHECK(!pm.registerProof(immature10));
BOOST_CHECK(pm.isImmature(immature10->getId()));
BOOST_CHECK(!pm.registerProof(immature20));
BOOST_CHECK(pm.isImmature(immature20->getId()));
BOOST_CHECK(!pm.exists(immature10->getId()));
// Build and register a valid proof that will conflict with the immature one
auto proof30 = buildProofWithOutpoints(key, {matureOutpoint},
PROOF_DUST_THRESHOLD, key, 30, 99);
BOOST_CHECK(pm.registerProof(proof30));
BOOST_CHECK(pm.isBoundToPeer(proof30->getId()));
// Reorg to a shorter chain to make proof30 immature
{
BlockValidationState state;
active_chainstate.InvalidateBlock(
state, WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip()));
BOOST_CHECK_EQUAL(
WITH_LOCK(chainman.GetMutex(), return chainman.ActiveHeight()), 99);
}
// Check that a rescan will also select the preferred immature proof, in
// this case proof30 will replace immature20.
pm.updatedBlockTip();
BOOST_CHECK(!pm.isBoundToPeer(proof30->getId()));
BOOST_CHECK(pm.isImmature(proof30->getId()));
BOOST_CHECK(!pm.exists(immature20->getId()));
}
BOOST_FIXTURE_TEST_CASE(preferred_conflicting_proof, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const CKey key = CKey::MakeCompressedKey();
const COutPoint conflictingOutpoint =
createUtxo(chainman.ActiveChainstate(), key);
auto proofSeq10 = buildProofWithSequence(key, {conflictingOutpoint}, 10);
auto proofSeq20 = buildProofWithSequence(key, {conflictingOutpoint}, 20);
auto proofSeq30 = buildProofWithSequence(key, {conflictingOutpoint}, 30);
BOOST_CHECK(pm.registerProof(proofSeq30));
BOOST_CHECK(pm.isBoundToPeer(proofSeq30->getId()));
BOOST_CHECK(!pm.isInConflictingPool(proofSeq30->getId()));
// proofSeq10 is a worst candidate than proofSeq30, so it goes to the
// conflicting pool.
BOOST_CHECK(!pm.registerProof(proofSeq10));
BOOST_CHECK(pm.isBoundToPeer(proofSeq30->getId()));
BOOST_CHECK(!pm.isBoundToPeer(proofSeq10->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq10->getId()));
// proofSeq20 is a worst candidate than proofSeq30 but a better one than
// proogSeq10, so it replaces it in the conflicting pool and proofSeq10 is
// evicted.
BOOST_CHECK(!pm.registerProof(proofSeq20));
BOOST_CHECK(pm.isBoundToPeer(proofSeq30->getId()));
BOOST_CHECK(!pm.isBoundToPeer(proofSeq20->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq20->getId()));
BOOST_CHECK(!pm.exists(proofSeq10->getId()));
}
BOOST_FIXTURE_TEST_CASE(update_next_conflict_time, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
auto now = GetTime<std::chrono::seconds>();
SetMockTime(now.count());
// Updating the time of an unknown peer should fail
for (size_t i = 0; i < 10; i++) {
BOOST_CHECK(!pm.updateNextPossibleConflictTime(
PeerId(GetRand<int>(1000)), now));
}
auto proof =
buildRandomProof(chainman.ActiveChainstate(), MIN_VALID_PROOF_SCORE);
PeerId peerid = TestPeerManager::registerAndGetPeerId(pm, proof);
auto checkNextPossibleConflictTime = [&](std::chrono::seconds expected) {
BOOST_CHECK(pm.forPeer(proof->getId(), [&](const Peer &p) {
return p.nextPossibleConflictTime == expected;
}));
};
checkNextPossibleConflictTime(now);
// Move the time in the past is not possible
BOOST_CHECK(!pm.updateNextPossibleConflictTime(
peerid, now - std::chrono::seconds{1}));
checkNextPossibleConflictTime(now);
BOOST_CHECK(pm.updateNextPossibleConflictTime(
peerid, now + std::chrono::seconds{1}));
checkNextPossibleConflictTime(now + std::chrono::seconds{1});
}
BOOST_FIXTURE_TEST_CASE(register_force_accept, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const CKey key = CKey::MakeCompressedKey();
const COutPoint conflictingOutpoint =
createUtxo(chainman.ActiveChainstate(), key);
auto proofSeq10 = buildProofWithSequence(key, {conflictingOutpoint}, 10);
auto proofSeq20 = buildProofWithSequence(key, {conflictingOutpoint}, 20);
auto proofSeq30 = buildProofWithSequence(key, {conflictingOutpoint}, 30);
BOOST_CHECK(pm.registerProof(proofSeq30));
BOOST_CHECK(pm.isBoundToPeer(proofSeq30->getId()));
BOOST_CHECK(!pm.isInConflictingPool(proofSeq30->getId()));
// proofSeq20 is a worst candidate than proofSeq30, so it goes to the
// conflicting pool.
BOOST_CHECK(!pm.registerProof(proofSeq20));
BOOST_CHECK(pm.isBoundToPeer(proofSeq30->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq20->getId()));
// We can force the acceptance of proofSeq20
using RegistrationMode = avalanche::PeerManager::RegistrationMode;
BOOST_CHECK(pm.registerProof(proofSeq20, RegistrationMode::FORCE_ACCEPT));
BOOST_CHECK(pm.isBoundToPeer(proofSeq20->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq30->getId()));
// We can also force the acceptance of a proof which is not already in the
// conflicting pool.
BOOST_CHECK(!pm.registerProof(proofSeq10));
BOOST_CHECK(!pm.exists(proofSeq10->getId()));
BOOST_CHECK(pm.registerProof(proofSeq10, RegistrationMode::FORCE_ACCEPT));
BOOST_CHECK(pm.isBoundToPeer(proofSeq10->getId()));
BOOST_CHECK(!pm.exists(proofSeq20->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq30->getId()));
// Attempting to register again fails, and has no impact on the pools
for (size_t i = 0; i < 10; i++) {
BOOST_CHECK(!pm.registerProof(proofSeq10));
BOOST_CHECK(
!pm.registerProof(proofSeq10, RegistrationMode::FORCE_ACCEPT));
BOOST_CHECK(pm.isBoundToPeer(proofSeq10->getId()));
BOOST_CHECK(!pm.exists(proofSeq20->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq30->getId()));
}
// Revert between proofSeq10 and proofSeq30 a few times
for (size_t i = 0; i < 10; i++) {
BOOST_CHECK(
pm.registerProof(proofSeq30, RegistrationMode::FORCE_ACCEPT));
BOOST_CHECK(pm.isBoundToPeer(proofSeq30->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq10->getId()));
BOOST_CHECK(
pm.registerProof(proofSeq10, RegistrationMode::FORCE_ACCEPT));
BOOST_CHECK(pm.isBoundToPeer(proofSeq10->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq30->getId()));
}
}
BOOST_FIXTURE_TEST_CASE(evicted_proof, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const CKey key = CKey::MakeCompressedKey();
const COutPoint conflictingOutpoint =
createUtxo(chainman.ActiveChainstate(), key);
auto proofSeq10 = buildProofWithSequence(key, {conflictingOutpoint}, 10);
auto proofSeq20 = buildProofWithSequence(key, {conflictingOutpoint}, 20);
auto proofSeq30 = buildProofWithSequence(key, {conflictingOutpoint}, 30);
{
ProofRegistrationState state;
BOOST_CHECK(pm.registerProof(proofSeq30, state));
BOOST_CHECK(state.IsValid());
}
{
ProofRegistrationState state;
BOOST_CHECK(!pm.registerProof(proofSeq20, state));
BOOST_CHECK(state.GetResult() == ProofRegistrationResult::CONFLICTING);
}
{
ProofRegistrationState state;
BOOST_CHECK(!pm.registerProof(proofSeq10, state));
BOOST_CHECK(state.GetResult() == ProofRegistrationResult::REJECTED);
}
}
BOOST_AUTO_TEST_CASE(conflicting_proof_cooldown) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const CKey key = CKey::MakeCompressedKey();
const COutPoint conflictingOutpoint =
createUtxo(chainman.ActiveChainstate(), key);
auto proofSeq20 = buildProofWithSequence(key, {conflictingOutpoint}, 20);
auto proofSeq30 = buildProofWithSequence(key, {conflictingOutpoint}, 30);
auto proofSeq40 = buildProofWithSequence(key, {conflictingOutpoint}, 40);
int64_t conflictingProofCooldown = 100;
gArgs.ForceSetArg("-avalancheconflictingproofcooldown",
strprintf("%d", conflictingProofCooldown));
int64_t now = GetTime();
auto increaseMockTime = [&](int64_t s) {
now += s;
SetMockTime(now);
};
increaseMockTime(0);
BOOST_CHECK(pm.registerProof(proofSeq30));
BOOST_CHECK(pm.isBoundToPeer(proofSeq30->getId()));
auto checkRegistrationFailure = [&](const ProofRef &proof,
ProofRegistrationResult reason) {
ProofRegistrationState state;
BOOST_CHECK(!pm.registerProof(proof, state));
BOOST_CHECK(state.GetResult() == reason);
};
// Registering a conflicting proof will fail due to the conflicting proof
// cooldown
checkRegistrationFailure(proofSeq20,
ProofRegistrationResult::COOLDOWN_NOT_ELAPSED);
BOOST_CHECK(!pm.exists(proofSeq20->getId()));
// The cooldown applies as well if the proof is the favorite
checkRegistrationFailure(proofSeq40,
ProofRegistrationResult::COOLDOWN_NOT_ELAPSED);
BOOST_CHECK(!pm.exists(proofSeq40->getId()));
// Elapse the cooldown
increaseMockTime(conflictingProofCooldown);
// The proof will now be added to conflicting pool
checkRegistrationFailure(proofSeq20, ProofRegistrationResult::CONFLICTING);
BOOST_CHECK(pm.isInConflictingPool(proofSeq20->getId()));
// But no other
checkRegistrationFailure(proofSeq40,
ProofRegistrationResult::COOLDOWN_NOT_ELAPSED);
BOOST_CHECK(!pm.exists(proofSeq40->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq20->getId()));
// Elapse the cooldown
increaseMockTime(conflictingProofCooldown);
// The proof will now be accepted to replace proofSeq30, proofSeq30 will
// move to the conflicting pool, and proofSeq20 will be evicted.
BOOST_CHECK(pm.registerProof(proofSeq40));
BOOST_CHECK(pm.isBoundToPeer(proofSeq40->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq30->getId()));
BOOST_CHECK(!pm.exists(proofSeq20->getId()));
gArgs.ClearForcedArg("-avalancheconflictingproofcooldown");
}
BOOST_FIXTURE_TEST_CASE(reject_proof, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
gArgs.ForceSetArg("-avaproofstakeutxoconfirmations", "2");
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const CKey key = CKey::MakeCompressedKey();
Chainstate &active_chainstate = chainman.ActiveChainstate();
const COutPoint conflictingOutpoint =
createUtxo(active_chainstate, key, PROOF_DUST_THRESHOLD, 99);
const COutPoint immatureOutpoint = createUtxo(active_chainstate, key);
// The good, the bad and the ugly
auto proofSeq10 = buildProofWithOutpoints(
key, {conflictingOutpoint}, PROOF_DUST_THRESHOLD, key, 10, 99);
auto proofSeq20 = buildProofWithOutpoints(
key, {conflictingOutpoint}, PROOF_DUST_THRESHOLD, key, 20, 99);
auto immature30 = buildProofWithSequence(
key, {conflictingOutpoint, immatureOutpoint}, 30);
BOOST_CHECK(pm.registerProof(proofSeq20));
BOOST_CHECK(!pm.registerProof(proofSeq10));
BOOST_CHECK(!pm.registerProof(immature30));
BOOST_CHECK(pm.isBoundToPeer(proofSeq20->getId()));
BOOST_CHECK(pm.isInConflictingPool(proofSeq10->getId()));
BOOST_CHECK(pm.isImmature(immature30->getId()));
// Rejecting a proof that doesn't exist should fail
for (size_t i = 0; i < 10; i++) {
BOOST_CHECK(
!pm.rejectProof(avalanche::ProofId(GetRandHash()),
avalanche::PeerManager::RejectionMode::DEFAULT));
BOOST_CHECK(
!pm.rejectProof(avalanche::ProofId(GetRandHash()),
avalanche::PeerManager::RejectionMode::INVALIDATE));
}
auto checkRejectDefault = [&](const ProofId &proofid) {
BOOST_CHECK(pm.exists(proofid));
const bool isImmature = pm.isImmature(proofid);
BOOST_CHECK(pm.rejectProof(
proofid, avalanche::PeerManager::RejectionMode::DEFAULT));
BOOST_CHECK(!pm.isBoundToPeer(proofid));
BOOST_CHECK_EQUAL(pm.exists(proofid), !isImmature);
};
auto checkRejectInvalidate = [&](const ProofId &proofid) {
BOOST_CHECK(pm.exists(proofid));
BOOST_CHECK(pm.rejectProof(
proofid, avalanche::PeerManager::RejectionMode::INVALIDATE));
};
// Reject from the immature pool
checkRejectDefault(immature30->getId());
BOOST_CHECK(!pm.registerProof(immature30));
BOOST_CHECK(pm.isImmature(immature30->getId()));
checkRejectInvalidate(immature30->getId());
// Reject from the conflicting pool
checkRejectDefault(proofSeq10->getId());
checkRejectInvalidate(proofSeq10->getId());
// Add again a proof to the conflicting pool
BOOST_CHECK(!pm.registerProof(proofSeq10));
BOOST_CHECK(pm.isInConflictingPool(proofSeq10->getId()));
// Reject from the valid pool, default mode
checkRejectDefault(proofSeq20->getId());
// The conflicting proof should be promoted to a peer
BOOST_CHECK(!pm.isInConflictingPool(proofSeq10->getId()));
BOOST_CHECK(pm.isBoundToPeer(proofSeq10->getId()));
// Reject from the valid pool, invalidate mode
checkRejectInvalidate(proofSeq10->getId());
// The conflicting proof should also be promoted to a peer
BOOST_CHECK(!pm.isInConflictingPool(proofSeq20->getId()));
BOOST_CHECK(pm.isBoundToPeer(proofSeq20->getId()));
}
BOOST_AUTO_TEST_CASE(should_request_more_nodes) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
// Set mock time so that proof registration time is predictable and
// testable.
SetMockTime(GetTime());
auto proof =
buildRandomProof(chainman.ActiveChainstate(), MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(proof));
// Not dangling yet, the proof will remain active for some time before it
// turns dangling if no node is connecting in the meantime.
BOOST_CHECK(!pm.isDangling(proof->getId()));
// We have no nodes, so select node will fail and flag that we need more
// nodes
BOOST_CHECK_EQUAL(pm.selectNode(), NO_NODE);
BOOST_CHECK(pm.shouldRequestMoreNodes());
for (size_t i = 0; i < 10; i++) {
// The flag will not trigger again until we fail to select nodes again
BOOST_CHECK(!pm.shouldRequestMoreNodes());
}
// Add a few nodes.
const ProofId &proofid = proof->getId();
for (size_t i = 0; i < 10; i++) {
BOOST_CHECK(pm.addNode(i, proofid));
}
BOOST_CHECK(!pm.isDangling(proof->getId()));
auto cooldownTimepoint = Now<SteadyMilliseconds>() + 10s;
// All the nodes can be selected once
for (size_t i = 0; i < 10; i++) {
NodeId selectedId = pm.selectNode();
BOOST_CHECK_NE(selectedId, NO_NODE);
BOOST_CHECK(pm.updateNextRequestTime(selectedId, cooldownTimepoint));
BOOST_CHECK(!pm.shouldRequestMoreNodes());
}
// All the nodes have been requested, next select will fail and the flag
// should trigger
BOOST_CHECK_EQUAL(pm.selectNode(), NO_NODE);
BOOST_CHECK(pm.shouldRequestMoreNodes());
for (size_t i = 0; i < 10; i++) {
// The flag will not trigger again until we fail to select nodes again
BOOST_CHECK(!pm.shouldRequestMoreNodes());
}
// Make it possible to request a node again
BOOST_CHECK(pm.updateNextRequestTime(0, Now<SteadyMilliseconds>()));
BOOST_CHECK_NE(pm.selectNode(), NO_NODE);
BOOST_CHECK(!pm.shouldRequestMoreNodes());
// Add another proof with no node attached
auto proof2 =
buildRandomProof(chainman.ActiveChainstate(), MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(proof2));
BOOST_CHECK(!pm.isDangling(proof2->getId()));
TestPeerManager::cleanupDanglingProofs(pm);
BOOST_CHECK(!pm.isDangling(proof2->getId()));
BOOST_CHECK(!pm.shouldRequestMoreNodes());
// After some time the proof will be considered dangling and more nodes will
// be requested.
SetMockTime(GetTime() + 15 * 60);
TestPeerManager::cleanupDanglingProofs(pm);
BOOST_CHECK(pm.isDangling(proof2->getId()));
BOOST_CHECK(pm.shouldRequestMoreNodes());
for (size_t i = 0; i < 10; i++) {
BOOST_CHECK(pm.isDangling(proof2->getId()));
// The flag will not trigger again until the condition is met again
BOOST_CHECK(!pm.shouldRequestMoreNodes());
}
// Attempt to register the dangling proof again. This should fail but
// trigger a request for more nodes.
ProofRegistrationState state;
BOOST_CHECK(!pm.registerProof(proof2, state));
BOOST_CHECK(state.GetResult() == ProofRegistrationResult::DANGLING);
BOOST_CHECK(pm.isDangling(proof2->getId()));
BOOST_CHECK(pm.shouldRequestMoreNodes());
for (size_t i = 0; i < 10; i++) {
BOOST_CHECK(pm.isDangling(proof2->getId()));
// The flag will not trigger again until the condition is met again
BOOST_CHECK(!pm.shouldRequestMoreNodes());
}
// Attach a node to that proof
BOOST_CHECK(!pm.addNode(11, proof2->getId()));
BOOST_CHECK(pm.registerProof(proof2));
SetMockTime(GetTime() + 15 * 60);
TestPeerManager::cleanupDanglingProofs(pm);
BOOST_CHECK(!pm.isDangling(proof2->getId()));
BOOST_CHECK(!pm.shouldRequestMoreNodes());
// Disconnect the node, the proof is dangling again
BOOST_CHECK(pm.removeNode(11));
TestPeerManager::cleanupDanglingProofs(pm);
BOOST_CHECK(pm.isDangling(proof2->getId()));
BOOST_CHECK(pm.shouldRequestMoreNodes());
// Invalidating the proof, removes the proof from the dangling pool but not
// a simple rejection.
BOOST_CHECK(!pm.rejectProof(
proof2->getId(), avalanche::PeerManager::RejectionMode::DEFAULT));
BOOST_CHECK(pm.isDangling(proof2->getId()));
BOOST_CHECK(pm.rejectProof(
proof2->getId(), avalanche::PeerManager::RejectionMode::INVALIDATE));
BOOST_CHECK(!pm.isDangling(proof2->getId()));
}
BOOST_AUTO_TEST_CASE(score_ordering) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
std::vector<uint32_t> expectedScores(10);
// Expect the peers to be ordered by descending score
std::generate(expectedScores.rbegin(), expectedScores.rend(),
[n = 1]() mutable { return n++ * MIN_VALID_PROOF_SCORE; });
std::vector<ProofRef> proofs;
proofs.reserve(expectedScores.size());
for (uint32_t score : expectedScores) {
proofs.push_back(buildRandomProof(chainman.ActiveChainstate(), score));
}
// Shuffle the proofs so they are registered in a random score order
Shuffle(proofs.begin(), proofs.end(), FastRandomContext());
for (auto &proof : proofs) {
BOOST_CHECK(pm.registerProof(proof));
}
auto peersScores = TestPeerManager::getOrderedScores(pm);
BOOST_CHECK_EQUAL_COLLECTIONS(peersScores.begin(), peersScores.end(),
expectedScores.begin(), expectedScores.end());
}
BOOST_FIXTURE_TEST_CASE(known_score_tracking, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
gArgs.ForceSetArg("-avaproofstakeutxoconfirmations", "2");
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const CKey key = CKey::MakeCompressedKey();
const Amount amount1(PROOF_DUST_THRESHOLD);
const Amount amount2(2 * PROOF_DUST_THRESHOLD);
Chainstate &active_chainstate = chainman.ActiveChainstate();
const COutPoint peer1ConflictingOutput =
createUtxo(active_chainstate, key, amount1, 99);
const COutPoint peer1SecondaryOutpoint =
createUtxo(active_chainstate, key, amount2, 99);
auto peer1Proof1 = buildProof(
key,
{{peer1ConflictingOutput, amount1}, {peer1SecondaryOutpoint, amount2}},
key, 10, 99);
auto peer1Proof2 =
buildProof(key, {{peer1ConflictingOutput, amount1}}, key, 20, 99);
// Create a proof with an immature UTXO, so the proof will be immature
auto peer1Proof3 =
buildProof(key,
{{peer1ConflictingOutput, amount1},
{createUtxo(active_chainstate, key, amount1), amount1}},
key, 30);
const uint32_t peer1Score1 = Proof::amountToScore(amount1 + amount2);
const uint32_t peer1Score2 = Proof::amountToScore(amount1);
// Add first peer and check that we have its score tracked
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), 0);
BOOST_CHECK(pm.registerProof(peer1Proof2));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2);
// Ensure failing to add conflicting proofs doesn't affect the score, the
// first proof stays bound and counted
BOOST_CHECK(!pm.registerProof(peer1Proof1));
BOOST_CHECK(!pm.registerProof(peer1Proof3));
BOOST_CHECK(pm.isBoundToPeer(peer1Proof2->getId()));
BOOST_CHECK(pm.isInConflictingPool(peer1Proof1->getId()));
BOOST_CHECK(pm.isImmature(peer1Proof3->getId()));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2);
auto checkRejectDefault = [&](const ProofId &proofid) {
BOOST_CHECK(pm.exists(proofid));
const bool isImmature = pm.isImmature(proofid);
BOOST_CHECK(pm.rejectProof(
proofid, avalanche::PeerManager::RejectionMode::DEFAULT));
BOOST_CHECK(!pm.isBoundToPeer(proofid));
BOOST_CHECK_EQUAL(pm.exists(proofid), !isImmature);
};
auto checkRejectInvalidate = [&](const ProofId &proofid) {
BOOST_CHECK(pm.exists(proofid));
BOOST_CHECK(pm.rejectProof(
proofid, avalanche::PeerManager::RejectionMode::INVALIDATE));
};
// Reject from the immature pool doesn't affect tracked score
checkRejectDefault(peer1Proof3->getId());
BOOST_CHECK(!pm.registerProof(peer1Proof3));
BOOST_CHECK(pm.isImmature(peer1Proof3->getId()));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2);
checkRejectInvalidate(peer1Proof3->getId());
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2);
// Reject from the conflicting pool
checkRejectDefault(peer1Proof1->getId());
checkRejectInvalidate(peer1Proof1->getId());
// Add again a proof to the conflicting pool
BOOST_CHECK(!pm.registerProof(peer1Proof1));
BOOST_CHECK(pm.isInConflictingPool(peer1Proof1->getId()));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2);
// Reject from the valid pool, default mode
// Now the score should change as the new peer is promoted
checkRejectDefault(peer1Proof2->getId());
BOOST_CHECK(!pm.isInConflictingPool(peer1Proof1->getId()));
BOOST_CHECK(pm.isBoundToPeer(peer1Proof1->getId()));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score1);
// Reject from the valid pool, invalidate mode
// Now the score should change as the old peer is re-promoted
checkRejectInvalidate(peer1Proof1->getId());
// The conflicting proof should also be promoted to a peer
BOOST_CHECK(!pm.isInConflictingPool(peer1Proof2->getId()));
BOOST_CHECK(pm.isBoundToPeer(peer1Proof2->getId()));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2);
// Now add another peer and check that combined scores are correct
uint32_t peer2Score = 1 * MIN_VALID_PROOF_SCORE;
auto peer2Proof1 = buildRandomProof(active_chainstate, peer2Score, 99);
PeerId peerid2 = TestPeerManager::registerAndGetPeerId(pm, peer2Proof1);
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2 + peer2Score);
// Trying to remove non-existent peer doesn't affect score
BOOST_CHECK(!pm.removePeer(1234));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2 + peer2Score);
// Removing new peer removes its score
BOOST_CHECK(pm.removePeer(peerid2));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), peer1Score2);
PeerId peerid1 =
TestPeerManager::getPeerIdForProofId(pm, peer1Proof2->getId());
BOOST_CHECK(pm.removePeer(peerid1));
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), 0);
}
BOOST_AUTO_TEST_CASE(connected_score_tracking) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const auto checkScores = [&pm](uint32_t known, uint32_t connected) {
BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), known);
BOOST_CHECK_EQUAL(pm.getConnectedPeersScore(), connected);
};
// Start out with 0s
checkScores(0, 0);
Chainstate &active_chainstate = chainman.ActiveChainstate();
// Create one peer without a node. Its score should be registered but not
// connected
uint32_t score1 = 10000000 * MIN_VALID_PROOF_SCORE;
auto proof1 = buildRandomProof(active_chainstate, score1);
PeerId peerid1 = TestPeerManager::registerAndGetPeerId(pm, proof1);
checkScores(score1, 0);
// Add nodes. We now have a connected score, but it doesn't matter how many
// nodes we add the score is the same
const ProofId &proofid1 = proof1->getId();
const uint8_t nodesToAdd = 10;
for (int i = 0; i < nodesToAdd; i++) {
BOOST_CHECK(pm.addNode(i, proofid1));
checkScores(score1, score1);
}
// Remove all but 1 node and ensure the score doesn't change
for (int i = 0; i < nodesToAdd - 1; i++) {
BOOST_CHECK(pm.removeNode(i));
checkScores(score1, score1);
}
// Removing the last node should remove the score from the connected count
BOOST_CHECK(pm.removeNode(nodesToAdd - 1));
checkScores(score1, 0);
// Add 2 nodes to peer and create peer2. Without a node peer2 has no
// connected score but after adding a node it does.
BOOST_CHECK(pm.addNode(0, proofid1));
BOOST_CHECK(pm.addNode(1, proofid1));
checkScores(score1, score1);
uint32_t score2 = 1 * MIN_VALID_PROOF_SCORE;
auto proof2 = buildRandomProof(active_chainstate, score2);
PeerId peerid2 = TestPeerManager::registerAndGetPeerId(pm, proof2);
checkScores(score1 + score2, score1);
BOOST_CHECK(pm.addNode(2, proof2->getId()));
checkScores(score1 + score2, score1 + score2);
// The first peer has two nodes left. Remove one and nothing happens, remove
// the other and its score is no longer in the connected counter..
BOOST_CHECK(pm.removeNode(0));
checkScores(score1 + score2, score1 + score2);
BOOST_CHECK(pm.removeNode(1));
checkScores(score1 + score2, score2);
// Removing a peer with no allocated score has no affect.
BOOST_CHECK(pm.removePeer(peerid1));
checkScores(score2, score2);
// Remove the second peer's node removes its allocated score.
BOOST_CHECK(pm.removeNode(2));
checkScores(score2, 0);
// Removing the second peer takes us back to 0.
BOOST_CHECK(pm.removePeer(peerid2));
checkScores(0, 0);
// Add 2 peers with nodes and remove them without removing the nodes first.
// Both score counters should be reduced by each peer's score when it's
// removed.
peerid1 = TestPeerManager::registerAndGetPeerId(pm, proof1);
checkScores(score1, 0);
peerid2 = TestPeerManager::registerAndGetPeerId(pm, proof2);
checkScores(score1 + score2, 0);
BOOST_CHECK(pm.addNode(0, proof1->getId()));
checkScores(score1 + score2, score1);
BOOST_CHECK(pm.addNode(1, proof2->getId()));
checkScores(score1 + score2, score1 + score2);
BOOST_CHECK(pm.removePeer(peerid2));
checkScores(score1, score1);
BOOST_CHECK(pm.removePeer(peerid1));
checkScores(0, 0);
}
BOOST_FIXTURE_TEST_CASE(proof_radix_tree, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
struct ProofComparatorById {
bool operator()(const ProofRef &lhs, const ProofRef &rhs) const {
return lhs->getId() < rhs->getId();
};
};
using ProofSetById = std::set<ProofRef, ProofComparatorById>;
// Maintain a list of the expected proofs through this test
ProofSetById expectedProofs;
auto matchExpectedContent = [&](const auto &tree) {
auto it = expectedProofs.begin();
return tree.forEachLeaf([&](auto pLeaf) {
return it != expectedProofs.end() &&
pLeaf->getId() == (*it++)->getId();
});
};
CKey key = CKey::MakeCompressedKey();
const int64_t sequence = 10;
Chainstate &active_chainstate = chainman.ActiveChainstate();
// Add some initial proofs
for (size_t i = 0; i < 10; i++) {
auto outpoint = createUtxo(active_chainstate, key);
auto proof = buildProofWithSequence(key, {{outpoint}}, sequence);
BOOST_CHECK(pm.registerProof(proof));
expectedProofs.insert(std::move(proof));
}
const auto &treeRef = pm.getShareableProofsSnapshot();
BOOST_CHECK(matchExpectedContent(treeRef));
// Create a copy
auto tree = pm.getShareableProofsSnapshot();
// Adding more proofs doesn't change the tree...
ProofSetById addedProofs;
std::vector<COutPoint> outpointsToSpend;
for (size_t i = 0; i < 10; i++) {
auto outpoint = createUtxo(active_chainstate, key);
auto proof = buildProofWithSequence(key, {{outpoint}}, sequence);
BOOST_CHECK(pm.registerProof(proof));
addedProofs.insert(std::move(proof));
outpointsToSpend.push_back(std::move(outpoint));
}
BOOST_CHECK(matchExpectedContent(tree));
// ...until we get a new copy
tree = pm.getShareableProofsSnapshot();
expectedProofs.insert(addedProofs.begin(), addedProofs.end());
BOOST_CHECK(matchExpectedContent(tree));
// Spend some coins to make the associated proofs invalid
{
LOCK(cs_main);
CCoinsViewCache &coins = active_chainstate.CoinsTip();
for (const auto &outpoint : outpointsToSpend) {
coins.SpendCoin(outpoint);
}
}
pm.updatedBlockTip();
// This doesn't change the tree...
BOOST_CHECK(matchExpectedContent(tree));
// ...until we get a new copy
tree = pm.getShareableProofsSnapshot();
for (const auto &proof : addedProofs) {
BOOST_CHECK_EQUAL(expectedProofs.erase(proof), 1);
}
BOOST_CHECK(matchExpectedContent(tree));
// Add some more proof for which we will create conflicts
std::vector<ProofRef> conflictingProofs;
std::vector<COutPoint> conflictingOutpoints;
for (size_t i = 0; i < 10; i++) {
auto outpoint = createUtxo(active_chainstate, key);
auto proof = buildProofWithSequence(key, {{outpoint}}, sequence);
BOOST_CHECK(pm.registerProof(proof));
conflictingProofs.push_back(std::move(proof));
conflictingOutpoints.push_back(std::move(outpoint));
}
tree = pm.getShareableProofsSnapshot();
expectedProofs.insert(conflictingProofs.begin(), conflictingProofs.end());
BOOST_CHECK(matchExpectedContent(tree));
// Build a bunch of conflicting proofs, half better, half worst
for (size_t i = 0; i < 10; i += 2) {
// The worst proof is not added to the expected set
BOOST_CHECK(!pm.registerProof(buildProofWithSequence(
key, {{conflictingOutpoints[i]}}, sequence - 1)));
// But the better proof should replace its conflicting one
auto replacementProof = buildProofWithSequence(
key, {{conflictingOutpoints[i + 1]}}, sequence + 1);
BOOST_CHECK(pm.registerProof(replacementProof));
BOOST_CHECK_EQUAL(expectedProofs.erase(conflictingProofs[i + 1]), 1);
BOOST_CHECK(expectedProofs.insert(replacementProof).second);
}
tree = pm.getShareableProofsSnapshot();
BOOST_CHECK(matchExpectedContent(tree));
// Check for consistency
pm.verify();
}
BOOST_AUTO_TEST_CASE(received_avaproofs) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
auto addNode = [&](NodeId nodeid) {
auto proof = buildRandomProof(chainman.ActiveChainstate(),
MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(proof));
BOOST_CHECK(pm.addNode(nodeid, proof->getId()));
};
for (NodeId nodeid = 0; nodeid < 10; nodeid++) {
// Node doesn't exist
BOOST_CHECK(!pm.latchAvaproofsSent(nodeid));
addNode(nodeid);
BOOST_CHECK(pm.latchAvaproofsSent(nodeid));
// The flag is already set
BOOST_CHECK(!pm.latchAvaproofsSent(nodeid));
}
}
BOOST_FIXTURE_TEST_CASE(cleanup_dangling_proof, NoCoolDownFixture) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const auto now = GetTime<std::chrono::seconds>();
auto mocktime = now;
auto elapseTime = [&](std::chrono::seconds seconds) {
mocktime += seconds;
SetMockTime(mocktime.count());
};
elapseTime(0s);
const CKey key = CKey::MakeCompressedKey();
const size_t numProofs = 10;
std::vector<COutPoint> outpoints(numProofs);
std::vector<ProofRef> proofs(numProofs);
std::vector<ProofRef> conflictingProofs(numProofs);
for (size_t i = 0; i < numProofs; i++) {
outpoints[i] = createUtxo(chainman.ActiveChainstate(), key);
proofs[i] = buildProofWithSequence(key, {outpoints[i]}, 2);
conflictingProofs[i] = buildProofWithSequence(key, {outpoints[i]}, 1);
BOOST_CHECK(pm.registerProof(proofs[i]));
BOOST_CHECK(pm.isBoundToPeer(proofs[i]->getId()));
BOOST_CHECK(!pm.registerProof(conflictingProofs[i]));
BOOST_CHECK(pm.isInConflictingPool(conflictingProofs[i]->getId()));
if (i % 2) {
// Odd indexes get a node attached to them
BOOST_CHECK(pm.addNode(i, proofs[i]->getId()));
}
BOOST_CHECK_EQUAL(pm.forPeer(proofs[i]->getId(),
[&](const avalanche::Peer &peer) {
return peer.node_count;
}),
i % 2);
elapseTime(1s);
}
// No proof expired yet
TestPeerManager::cleanupDanglingProofs(pm);
for (size_t i = 0; i < numProofs; i++) {
BOOST_CHECK(pm.isBoundToPeer(proofs[i]->getId()));
BOOST_CHECK(pm.isInConflictingPool(conflictingProofs[i]->getId()));
}
// Elapse the dangling timeout
elapseTime(avalanche::Peer::DANGLING_TIMEOUT);
TestPeerManager::cleanupDanglingProofs(pm);
for (size_t i = 0; i < numProofs; i++) {
const bool hasNodeAttached = i % 2;
// Only the peers with no nodes attached are getting discarded
BOOST_CHECK_EQUAL(pm.isBoundToPeer(proofs[i]->getId()),
hasNodeAttached);
BOOST_CHECK_EQUAL(!pm.exists(proofs[i]->getId()), !hasNodeAttached);
// The proofs conflicting with the discarded ones are pulled back
BOOST_CHECK_EQUAL(pm.isInConflictingPool(conflictingProofs[i]->getId()),
hasNodeAttached);
BOOST_CHECK_EQUAL(pm.isBoundToPeer(conflictingProofs[i]->getId()),
!hasNodeAttached);
}
// Attach a node to the first conflicting proof, which has been promoted
BOOST_CHECK(pm.addNode(42, conflictingProofs[0]->getId()));
BOOST_CHECK(pm.forPeer(
conflictingProofs[0]->getId(),
[&](const avalanche::Peer &peer) { return peer.node_count == 1; }));
// Elapse the dangling timeout again
elapseTime(avalanche::Peer::DANGLING_TIMEOUT);
TestPeerManager::cleanupDanglingProofs(pm);
for (size_t i = 0; i < numProofs; i++) {
const bool hasNodeAttached = i % 2;
// The initial peers with a node attached are still there
BOOST_CHECK_EQUAL(pm.isBoundToPeer(proofs[i]->getId()),
hasNodeAttached);
BOOST_CHECK_EQUAL(!pm.exists(proofs[i]->getId()), !hasNodeAttached);
// This time the previouly promoted conflicting proofs are evicted
// because they have no node attached, except the index 0.
BOOST_CHECK_EQUAL(pm.exists(conflictingProofs[i]->getId()),
hasNodeAttached || i == 0);
BOOST_CHECK_EQUAL(pm.isInConflictingPool(conflictingProofs[i]->getId()),
hasNodeAttached);
BOOST_CHECK_EQUAL(pm.isBoundToPeer(conflictingProofs[i]->getId()),
i == 0);
}
// Disconnect all the nodes
for (size_t i = 1; i < numProofs; i += 2) {
BOOST_CHECK(pm.removeNode(i));
BOOST_CHECK(
pm.forPeer(proofs[i]->getId(), [&](const avalanche::Peer &peer) {
return peer.node_count == 0;
}));
}
BOOST_CHECK(pm.removeNode(42));
BOOST_CHECK(pm.forPeer(
conflictingProofs[0]->getId(),
[&](const avalanche::Peer &peer) { return peer.node_count == 0; }));
TestPeerManager::cleanupDanglingProofs(pm);
for (size_t i = 0; i < numProofs; i++) {
const bool hadNodeAttached = i % 2;
// All initially valid proofs have now been discarded
BOOST_CHECK(!pm.exists(proofs[i]->getId()));
// The remaining conflicting proofs are promoted
BOOST_CHECK_EQUAL(!pm.exists(conflictingProofs[i]->getId()),
!hadNodeAttached);
BOOST_CHECK(!pm.isInConflictingPool(conflictingProofs[i]->getId()));
BOOST_CHECK_EQUAL(pm.isBoundToPeer(conflictingProofs[i]->getId()),
hadNodeAttached);
}
// Elapse the timeout for the newly promoted conflicting proofs
elapseTime(avalanche::Peer::DANGLING_TIMEOUT);
// All other proofs have now been discarded
TestPeerManager::cleanupDanglingProofs(pm);
for (size_t i = 0; i < numProofs; i++) {
// All proofs have finally been discarded
BOOST_CHECK(!pm.exists(proofs[i]->getId()));
BOOST_CHECK(!pm.exists(conflictingProofs[i]->getId()));
}
}
BOOST_AUTO_TEST_CASE(register_proof_missing_utxo) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
CKey key = CKey::MakeCompressedKey();
auto proof = buildProofWithOutpoints(key, {{TxId(GetRandHash()), 0}},
PROOF_DUST_THRESHOLD);
ProofRegistrationState state;
BOOST_CHECK(!pm.registerProof(proof, state));
BOOST_CHECK(state.GetResult() == ProofRegistrationResult::MISSING_UTXO);
}
BOOST_AUTO_TEST_CASE(proof_expiry) {
gArgs.ForceSetArg("-avalancheconflictingproofcooldown", "0");
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
const int64_t tipTime =
WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip())
->GetBlockTime();
CKey key = CKey::MakeCompressedKey();
auto utxo = createUtxo(chainman.ActiveChainstate(), key);
auto proofToExpire = buildProof(key, {{utxo, PROOF_DUST_THRESHOLD}}, key, 2,
100, false, tipTime + 1);
auto conflictingProof = buildProof(key, {{utxo, PROOF_DUST_THRESHOLD}}, key,
1, 100, false, tipTime + 2);
// Our proofToExpire is not expired yet, so it registers fine
BOOST_CHECK(pm.registerProof(proofToExpire));
BOOST_CHECK(pm.isBoundToPeer(proofToExpire->getId()));
// The conflicting proof has a longer expiration time but a lower sequence
// number, so it is moved to the conflicting pool.
BOOST_CHECK(!pm.registerProof(conflictingProof));
BOOST_CHECK(pm.isInConflictingPool(conflictingProof->getId()));
// Mine blocks until the MTP of the tip moves to the proof expiration
for (int64_t i = 0; i < 6; i++) {
SetMockTime(proofToExpire->getExpirationTime() + i);
CreateAndProcessBlock({}, CScript());
}
BOOST_CHECK_EQUAL(
WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip())
->GetMedianTimePast(),
proofToExpire->getExpirationTime());
pm.updatedBlockTip();
// The now expired proof is removed
BOOST_CHECK(!pm.exists(proofToExpire->getId()));
// The conflicting proof has been pulled back to the valid pool
BOOST_CHECK(pm.isBoundToPeer(conflictingProof->getId()));
gArgs.ClearForcedArg("-avalancheconflictingproofcooldown");
}
BOOST_AUTO_TEST_CASE(peer_availability_score) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
Chainstate &active_chainstate = chainman.ActiveChainstate();
const std::vector<std::tuple<uint32_t, uint32_t, double>> testCases = {
// {step, tau, decay_factor}
{10, 100, 1. - std::exp(-1. * 10 / 100)},
// Current defaults
{AVALANCHE_STATISTICS_REFRESH_PERIOD.count(),
AVALANCHE_STATISTICS_TIME_CONSTANT.count(),
AVALANCHE_STATISTICS_DECAY_FACTOR},
};
for (const auto &[step, tau, decayFactor] : testCases) {
// Add a peer
auto proof = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(proof));
auto proofid = proof->getId();
// Add some nodes for this peer
const int numNodesPerPeer = 5;
for (auto nodeid = 0; nodeid < numNodesPerPeer; nodeid++) {
BOOST_CHECK(pm.addNode(nodeid, proofid));
}
auto getNodeAvailabilityScore = [&](double avgScore,
NodeId nodeid) -> double {
// Spread scores over a range of values such that their average is
// the provided value.
return (nodeid - numNodesPerPeer / 2) * 2 + avgScore;
};
auto getAvailabilityScore = [&]() {
double score{0.0};
pm.forPeer(proofid, [&](auto &peer) {
score = peer.availabilityScore;
return true;
});
return score;
};
double previousScore = getAvailabilityScore();
BOOST_CHECK_SMALL(previousScore, 1e-6);
// Check the statistics follow an exponential response for 1 to 10 tau
for (size_t i = 1; i <= 10; i++) {
for (uint32_t j = 0; j < tau; j += step) {
// Nodes respond to polls > 50% of the time (positive score)
pm.updateAvailabilityScores(decayFactor, [&](auto nodeid) {
return getNodeAvailabilityScore(1.0, nodeid);
});
// Expect a monotonic rise
double currentScore = getAvailabilityScore();
BOOST_CHECK_GE(currentScore, previousScore);
previousScore = currentScore;
}
// We expect (1 - e^-i) * numNodesPerPeer after i * tau. The
// tolerance is expressed as a percentage, and we add a (large)
// 0.1% margin to account for floating point errors.
BOOST_CHECK_CLOSE(previousScore,
-1 * std::expm1(-1. * i) * numNodesPerPeer,
100.1 / tau);
}
// After 10 tau we should be very close to 100% (about 99.995%)
BOOST_CHECK_CLOSE(previousScore, numNodesPerPeer, 0.01);
// Make the proof invalid
BOOST_CHECK(pm.rejectProof(
proofid, avalanche::PeerManager::RejectionMode::INVALIDATE));
BOOST_CHECK(!pm.isBoundToPeer(proofid));
BOOST_CHECK(!pm.exists(proofid));
// Re-register the proof
BOOST_CHECK(pm.registerProof(proof));
pm.forPeer(proofid, [&](auto &peer) {
int nodeCount = 0;
pm.forEachNode(peer, [&](const auto &node) { nodeCount++; });
BOOST_CHECK_EQUAL(nodeCount, numNodesPerPeer);
return true;
});
// Peer score should have reset even though nodes are still connected
previousScore = getAvailabilityScore();
BOOST_CHECK_SMALL(previousScore, 1e-6);
// Bring the score back up to where we were
for (size_t i = 1; i <= 10; i++) {
for (uint32_t j = 0; j < tau; j += step) {
pm.updateAvailabilityScores(decayFactor, [&](auto nodeid) {
return getNodeAvailabilityScore(1.0, nodeid);
});
}
}
previousScore = getAvailabilityScore();
BOOST_CHECK_CLOSE(previousScore, numNodesPerPeer, 0.01);
for (size_t i = 1; i <= 3; i++) {
for (uint32_t j = 0; j < tau; j += step) {
// Nodes only respond to polls 50% of the time (0 score)
pm.updateAvailabilityScores(decayFactor, [&](auto nodeid) {
return getNodeAvailabilityScore(0.0, nodeid);
});
// Expect a monotonic fall
double currentScore = getAvailabilityScore();
BOOST_CHECK_LE(currentScore, previousScore);
previousScore = currentScore;
}
// There is a slight error in the expected value because we did not
// start the decay at exactly 100%, but the 0.1% margin is at least
// an order of magnitude larger than the expected error so it
// doesn't matter.
BOOST_CHECK_CLOSE(previousScore,
(1. + std::expm1(-1. * i)) * numNodesPerPeer,
100.1 / tau);
}
// After 3 more tau we should be under 5%
BOOST_CHECK_LT(previousScore, .05 * numNodesPerPeer);
for (size_t i = 1; i <= 100; i++) {
// Nodes respond to polls < 50% of the time (negative score)
pm.updateAvailabilityScores(decayFactor, [&](auto nodeid) {
return getNodeAvailabilityScore(-10.0, nodeid);
});
// It's still a monotonic fall, and the score should turn negative.
double currentScore = getAvailabilityScore();
BOOST_CHECK_LE(currentScore, previousScore);
BOOST_CHECK_LE(currentScore, 0.);
previousScore = currentScore;
}
}
}
BOOST_AUTO_TEST_CASE(select_staking_reward_winner) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
Chainstate &active_chainstate = chainman.ActiveChainstate();
auto buildProofWithAmountAndPayout = [&](Amount amount,
const CScript &payoutScript) {
const CKey key = CKey::MakeCompressedKey();
COutPoint utxo = createUtxo(active_chainstate, key, amount);
return buildProof(key, {{std::move(utxo), amount}},
/*master=*/CKey::MakeCompressedKey(), /*sequence=*/1,
/*height=*/100, /*is_coinbase=*/false,
/*expirationTime=*/0, payoutScript);
};
- std::vector<CScript> winners;
+ std::vector<std::pair<ProofId, CScript>> winners;
// Null pprev
BOOST_CHECK(!pm.selectStakingRewardWinner(nullptr, winners));
CBlockIndex prevBlock;
auto now = GetTime<std::chrono::seconds>();
SetMockTime(now);
prevBlock.nTime = now.count();
BlockHash prevHash{uint256::ONE};
prevBlock.phashBlock = &prevHash;
// No peer
BOOST_CHECK(!pm.selectStakingRewardWinner(&prevBlock, winners));
// Let's build a list of payout addresses, and register a proofs for each
// address
size_t numProofs = 8;
std::vector<ProofRef> proofs;
proofs.reserve(numProofs);
for (size_t i = 0; i < numProofs; i++) {
const CKey key = CKey::MakeCompressedKey();
CScript payoutScript = GetScriptForRawPubKey(key.GetPubKey());
auto proof =
buildProofWithAmountAndPayout(PROOF_DUST_THRESHOLD, payoutScript);
PeerId peerid = TestPeerManager::registerAndGetPeerId(pm, proof);
BOOST_CHECK_NE(peerid, NO_PEER);
// Finalize the proof
BOOST_CHECK(pm.setFinalized(peerid));
proofs.emplace_back(std::move(proof));
}
// Make sure the proofs have been registered before the prev block was found
// and before 6x the peer replacement cooldown.
now += 6 * avalanche::Peer::DANGLING_TIMEOUT + 1s;
SetMockTime(now);
prevBlock.nTime = now.count();
// At this stage we have a set of peers out of which none has any node
// attached, so they're all considered flaky. Note that we have no remote
// proofs status yet.
BOOST_CHECK(pm.selectStakingRewardWinner(&prevBlock, winners));
BOOST_CHECK_LE(winners.size(), numProofs);
// Let's add a node for each peer
for (size_t i = 0; i < numProofs; i++) {
BOOST_CHECK(TestPeerManager::isFlaky(pm, proofs[i]->getId()));
BOOST_CHECK(pm.selectStakingRewardWinner(&prevBlock, winners));
BOOST_CHECK_LE(winners.size(), numProofs);
BOOST_CHECK(pm.addNode(NodeId(i), proofs[i]->getId()));
BOOST_CHECK(!TestPeerManager::isFlaky(pm, proofs[i]->getId()));
BOOST_CHECK(pm.selectStakingRewardWinner(&prevBlock, winners));
BOOST_CHECK_LE(winners.size(), numProofs - i);
}
// Now we have a single winner
BOOST_CHECK(pm.selectStakingRewardWinner(&prevBlock, winners));
BOOST_CHECK_LE(winners.size(), 1);
// All proofs have the same amount, so the same probability to get picked.
// Let's compute how many loop iterations we need to have a low false
// negative rate when checking for this. Target false positive rate is
// 10ppm (aka 1/100000).
const size_t loop_iters =
size_t(-1.0 * std::log(100000.0) /
std::log((double(numProofs) - 1) / numProofs)) +
1;
BOOST_CHECK_GT(loop_iters, numProofs);
std::unordered_map<std::string, size_t> winningCounts;
for (size_t i = 0; i < loop_iters; i++) {
BlockHash randomHash = BlockHash(GetRandHash());
prevBlock.phashBlock = &randomHash;
BOOST_CHECK(pm.selectStakingRewardWinner(&prevBlock, winners));
- winningCounts[FormatScript(winners[0])]++;
+ winningCounts[FormatScript(winners[0].second)]++;
}
BOOST_CHECK_EQUAL(winningCounts.size(), numProofs);
prevBlock.phashBlock = &prevHash;
// Ensure all nodes have all the proofs
for (size_t i = 0; i < numProofs; i++) {
for (size_t j = 0; j < numProofs; j++) {
BOOST_CHECK(
pm.saveRemoteProof(proofs[j]->getId(), NodeId(i), true));
}
}
// Make all the proofs flaky. This loop needs to be updated if the threshold
// or the number of proofs change, so assert the test precondition.
BOOST_CHECK_GT(3. / numProofs, 0.3);
for (size_t i = 0; i < numProofs; i++) {
const NodeId nodeid = NodeId(i);
BOOST_CHECK(pm.saveRemoteProof(
proofs[(i - 1 + numProofs) % numProofs]->getId(), nodeid, false));
BOOST_CHECK(pm.saveRemoteProof(
proofs[(i + numProofs) % numProofs]->getId(), nodeid, false));
BOOST_CHECK(pm.saveRemoteProof(
proofs[(i + 1 + numProofs) % numProofs]->getId(), nodeid, false));
}
// Now all the proofs are flaky
BOOST_CHECK(pm.selectStakingRewardWinner(&prevBlock, winners));
for (const auto &proof : proofs) {
BOOST_CHECK(TestPeerManager::isFlaky(pm, proof->getId()));
}
BOOST_CHECK_EQUAL(winners.size(), numProofs);
// Revert flakyness for all proofs
for (const auto &proof : proofs) {
for (NodeId nodeid = 0; nodeid < NodeId(numProofs); nodeid++) {
BOOST_CHECK(pm.saveRemoteProof(proof->getId(), nodeid, true));
}
}
BOOST_CHECK(pm.selectStakingRewardWinner(&prevBlock, winners));
BOOST_CHECK_EQUAL(winners.size(), 1);
// Increase the list from 1 to 4 winners by making them flaky
for (size_t numWinner = 1; numWinner < 4; numWinner++) {
// Who is the last possible winner ?
- CScript lastWinner = winners[numWinner - 1];
+ CScript lastWinner = winners[numWinner - 1].second;
// Make the last winner flaky, the other proofs untouched
ProofId winnerProofId = ProofId(uint256::ZERO);
for (const auto &proof : proofs) {
if (proof->getPayoutScript() == lastWinner) {
winnerProofId = proof->getId();
break;
}
}
BOOST_CHECK_NE(winnerProofId, ProofId(uint256::ZERO));
for (NodeId nodeid = 0; nodeid < NodeId(numProofs); nodeid++) {
BOOST_CHECK(pm.saveRemoteProof(winnerProofId, nodeid, false));
}
BOOST_CHECK(TestPeerManager::isFlaky(pm, winnerProofId));
// There should be now exactly numWinner + 1 winners
BOOST_CHECK(pm.selectStakingRewardWinner(&prevBlock, winners));
BOOST_CHECK_EQUAL(winners.size(), numWinner + 1);
}
// One more time and the nodes will be missing too many proofs, so they are
// no longer considered for flakyness evaluation and we're back to a single
// winner.
- CScript lastWinner = winners[3];
+ CScript lastWinner = winners[3].second;
ProofId winnerProofId = ProofId(uint256::ZERO);
for (const auto &proof : proofs) {
if (proof->getPayoutScript() == lastWinner) {
winnerProofId = proof->getId();
break;
}
}
BOOST_CHECK_NE(winnerProofId, ProofId(uint256::ZERO));
for (NodeId nodeid = 0; nodeid < NodeId(numProofs); nodeid++) {
BOOST_CHECK(pm.saveRemoteProof(winnerProofId, nodeid, false));
}
// We're back to exactly 1 winner
BOOST_CHECK(pm.selectStakingRewardWinner(&prevBlock, winners));
BOOST_CHECK_EQUAL(winners.size(), 1);
// Remove all proofs
for (auto &proof : proofs) {
BOOST_CHECK(pm.rejectProof(
proof->getId(), avalanche::PeerManager::RejectionMode::INVALIDATE));
}
// No more winner
prevBlock.phashBlock = &prevHash;
BOOST_CHECK(!pm.selectStakingRewardWinner(&prevBlock, winners));
{
// Add back a single proof
const CKey key = CKey::MakeCompressedKey();
CScript payoutScript = GetScriptForRawPubKey(key.GetPubKey());
auto proof =
buildProofWithAmountAndPayout(PROOF_DUST_THRESHOLD, payoutScript);
PeerId peerid = TestPeerManager::registerAndGetPeerId(pm, proof);
BOOST_CHECK_NE(peerid, NO_PEER);
// The single proof should always be selected, but:
// 1. The proof is not finalized, and has been registered after the last
// block was mined.
BOOST_CHECK(!pm.selectStakingRewardWinner(&prevBlock, winners));
// 2. The proof has has been registered after the last block was mined.
BOOST_CHECK(pm.setFinalized(peerid));
BOOST_CHECK(!pm.selectStakingRewardWinner(&prevBlock, winners));
// 3. The proof has been registered 60min from the previous block time,
// but the previous block time is in the future.
now += 50min + 1s;
SetMockTime(now);
prevBlock.nTime = (now + 10min).count();
BOOST_CHECK(!pm.selectStakingRewardWinner(&prevBlock, winners));
// 4. The proof has been registered 60min from now, but only 50min from
// the previous block time.
now += 10min;
SetMockTime(now);
prevBlock.nTime = (now - 10min).count();
BOOST_CHECK(!pm.selectStakingRewardWinner(&prevBlock, winners));
// 5. Now the proof has it all
prevBlock.nTime = now.count();
BOOST_CHECK(pm.selectStakingRewardWinner(&prevBlock, winners));
// With a single proof, it's easy to determine the winner
- BOOST_CHECK_EQUAL(FormatScript(winners[0]), FormatScript(payoutScript));
+ BOOST_CHECK_EQUAL(FormatScript(winners[0].second),
+ FormatScript(payoutScript));
// Remove the proof
BOOST_CHECK(pm.rejectProof(
proof->getId(), avalanche::PeerManager::RejectionMode::INVALIDATE));
}
{
BOOST_CHECK_EQUAL(TestPeerManager::getPeerCount(pm), 0);
proofs.clear();
for (size_t i = 0; i < 4; i++) {
// Add 4 proofs, registered at a 30 minutes interval
SetMockTime(now + i * 30min);
const CKey key = CKey::MakeCompressedKey();
CScript payoutScript = GetScriptForRawPubKey(key.GetPubKey());
auto proof = buildProofWithAmountAndPayout(PROOF_DUST_THRESHOLD,
payoutScript);
PeerId peerid = TestPeerManager::registerAndGetPeerId(pm, proof);
BOOST_CHECK_NE(peerid, NO_PEER);
BOOST_CHECK(pm.forPeer(proof->getId(), [&](const Peer &peer) {
return peer.registration_time == now + i * 30min;
}));
BOOST_CHECK(pm.addNode(NodeId(i), proof->getId()));
BOOST_CHECK(pm.setFinalized(peerid));
proofs.push_back(proof);
}
// No proof has been registered before the previous block time
SetMockTime(now);
prevBlock.nTime = now.count();
BOOST_CHECK(!pm.selectStakingRewardWinner(&prevBlock, winners));
// 1 proof has been registered > 30min from the previous block time, but
// none > 60 minutes from the previous block time
// => we have no winner.
now += 30min + 1s;
SetMockTime(now);
prevBlock.nTime = now.count();
BOOST_CHECK(!pm.selectStakingRewardWinner(&prevBlock, winners));
- auto checkRegistrationTime = [&](const CScript &payout) {
- pm.forEachPeer([&](const Peer &peer) {
- if (peer.proof->getPayoutScript() == payout) {
- BOOST_CHECK_LT(peer.registration_time.count(),
- (now - 60min).count());
- }
- return true;
- });
- };
+ auto checkRegistrationTime =
+ [&](const std::pair<ProofId, CScript> &winner) {
+ pm.forEachPeer([&](const Peer &peer) {
+ if (peer.proof->getPayoutScript() == winner.second) {
+ BOOST_CHECK_LT(peer.registration_time.count(),
+ (now - 60min).count());
+ }
+ return true;
+ });
+ };
// 1 proof has been registered > 60min but < 90min from the previous
// block time and 1 more has been registered > 30 minutes
// => we have a winner and one acceptable substitute.
now += 30min;
SetMockTime(now);
prevBlock.nTime = now.count();
BOOST_CHECK(pm.selectStakingRewardWinner(&prevBlock, winners));
BOOST_CHECK_EQUAL(winners.size(), 2);
checkRegistrationTime(winners[0]);
// 1 proof has been registered > 60min but < 90min from the
// previous block time, 1 has been registered > 90 minutes and 1 more
// has been registered > 30 minutes
// => we have 1 winner and up to 2 acceptable substitutes.
now += 30min;
SetMockTime(now);
prevBlock.nTime = now.count();
BOOST_CHECK(pm.selectStakingRewardWinner(&prevBlock, winners));
BOOST_CHECK_LE(winners.size(), 3);
checkRegistrationTime(winners[0]);
// 1 proofs has been registered > 60min but < 90min from the
// previous block time, 2 has been registered > 90 minutes and 1 more
// has been registered > 30 minutes
// => we have 1 winner, and up to 2 substitutes.
now += 30min;
SetMockTime(now);
prevBlock.nTime = now.count();
BOOST_CHECK(pm.selectStakingRewardWinner(&prevBlock, winners));
BOOST_CHECK_LE(winners.size(), 3);
checkRegistrationTime(winners[0]);
// 1 proof has been registered > 60min but < 90min from the
// previous block time and 3 more has been registered > 90 minutes
// => we have 1 winner, and up to 1 substitute.
now += 30min;
SetMockTime(now);
prevBlock.nTime = now.count();
BOOST_CHECK(pm.selectStakingRewardWinner(&prevBlock, winners));
BOOST_CHECK_LE(winners.size(), 2);
checkRegistrationTime(winners[0]);
// All proofs has been registered > 90min from the previous block time
// => we have 1 winner, and no substitute.
now += 30min;
SetMockTime(now);
prevBlock.nTime = now.count();
BOOST_CHECK(pm.selectStakingRewardWinner(&prevBlock, winners));
BOOST_CHECK_EQUAL(winners.size(), 1);
checkRegistrationTime(winners[0]);
}
}
BOOST_AUTO_TEST_CASE(remote_proof) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
auto mockTime = GetTime<std::chrono::seconds>();
SetMockTime(mockTime);
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ZERO), 0, true));
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ONE), 0, false));
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ZERO), 1, true));
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ONE), 1, false));
auto checkRemoteProof =
[&](const ProofId &proofid, const NodeId nodeid,
const bool expectedPresent,
const std::chrono::seconds &expectedlastUpdate) {
auto remoteProof =
TestPeerManager::getRemoteProof(pm, proofid, nodeid);
BOOST_CHECK(remoteProof.has_value());
BOOST_CHECK_EQUAL(remoteProof->proofid, proofid);
BOOST_CHECK_EQUAL(remoteProof->nodeid, nodeid);
BOOST_CHECK_EQUAL(remoteProof->present, expectedPresent);
BOOST_CHECK_EQUAL(remoteProof->lastUpdate.count(),
expectedlastUpdate.count());
};
checkRemoteProof(ProofId(uint256::ZERO), 0, true, mockTime);
checkRemoteProof(ProofId(uint256::ONE), 0, false, mockTime);
checkRemoteProof(ProofId(uint256::ZERO), 1, true, mockTime);
checkRemoteProof(ProofId(uint256::ONE), 1, false, mockTime);
mockTime += 1s;
SetMockTime(mockTime);
// Reverse the state
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ZERO), 0, false));
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ONE), 0, true));
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ZERO), 1, false));
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ONE), 1, true));
checkRemoteProof(ProofId(uint256::ZERO), 0, false, mockTime);
checkRemoteProof(ProofId(uint256::ONE), 0, true, mockTime);
checkRemoteProof(ProofId(uint256::ZERO), 1, false, mockTime);
checkRemoteProof(ProofId(uint256::ONE), 1, true, mockTime);
Chainstate &active_chainstate = chainman.ActiveChainstate();
// Actually register the nodes
auto proof0 = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(proof0));
BOOST_CHECK(pm.addNode(0, proof0->getId()));
auto proof1 = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(proof1));
BOOST_CHECK(pm.addNode(1, proof1->getId()));
// Removing the node removes all the associated remote proofs
BOOST_CHECK(pm.removeNode(0));
BOOST_CHECK(
!TestPeerManager::getRemoteProof(pm, ProofId(uint256::ZERO), 0));
BOOST_CHECK(!TestPeerManager::getRemoteProof(pm, ProofId(uint256::ONE), 0));
// Other nodes are left untouched
checkRemoteProof(ProofId(uint256::ZERO), 1, false, mockTime);
checkRemoteProof(ProofId(uint256::ONE), 1, true, mockTime);
BOOST_CHECK(pm.removeNode(1));
BOOST_CHECK(
!TestPeerManager::getRemoteProof(pm, ProofId(uint256::ZERO), 0));
BOOST_CHECK(!TestPeerManager::getRemoteProof(pm, ProofId(uint256::ONE), 0));
BOOST_CHECK(
!TestPeerManager::getRemoteProof(pm, ProofId(uint256::ZERO), 1));
BOOST_CHECK(!TestPeerManager::getRemoteProof(pm, ProofId(uint256::ONE), 1));
for (size_t i = 0; i < avalanche::PeerManager::MAX_REMOTE_PROOFS; i++) {
mockTime += 1s;
SetMockTime(mockTime);
const ProofId proofid{uint256(i)};
BOOST_CHECK(pm.saveRemoteProof(proofid, 0, true));
checkRemoteProof(proofid, 0, true, mockTime);
}
// The last updated proof is still there
checkRemoteProof(ProofId(uint256::ZERO), 0, true,
mockTime -
(avalanche::PeerManager::MAX_REMOTE_PROOFS - 1) * 1s);
// If we add one more it gets evicted
mockTime += 1s;
SetMockTime(mockTime);
ProofId proofid{
uint256(uint8_t(avalanche::PeerManager::MAX_REMOTE_PROOFS))};
BOOST_CHECK(pm.saveRemoteProof(proofid, 0, true));
checkRemoteProof(proofid, 0, true, mockTime);
// Proof id 0 has been evicted
BOOST_CHECK(
!TestPeerManager::getRemoteProof(pm, ProofId(uint256::ZERO), 0));
// Proof id 1 is still there
BOOST_CHECK(TestPeerManager::getRemoteProof(pm, ProofId(uint256::ONE), 0));
// Add MAX_REMOTE_PROOFS / 2 + 1 proofs to our node to bump the limit
// Note that we already have proofs from the beginning of the test.
std::vector<ProofRef> proofs;
for (size_t i = 0; i < avalanche::PeerManager::MAX_REMOTE_PROOFS / 2 - 1;
i++) {
auto proof = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(proof));
proofs.push_back(proof);
}
BOOST_CHECK_EQUAL(TestPeerManager::getPeerCount(pm),
avalanche::PeerManager::MAX_REMOTE_PROOFS / 2 + 1);
// We can now add one more without eviction
mockTime += 1s;
SetMockTime(mockTime);
proofid = ProofId{
uint256(uint8_t(avalanche::PeerManager::MAX_REMOTE_PROOFS + 1))};
BOOST_CHECK(pm.saveRemoteProof(proofid, 0, true));
checkRemoteProof(proofid, 0, true, mockTime);
// Proof id 1 is still there
BOOST_CHECK(TestPeerManager::getRemoteProof(pm, ProofId(uint256::ONE), 0));
// Shrink our proofs to MAX_REMOTE_PROOFS / 2 - 1
BOOST_CHECK(pm.rejectProof(
proofs[0]->getId(), avalanche::PeerManager::RejectionMode::INVALIDATE));
BOOST_CHECK(pm.rejectProof(
proofs[1]->getId(), avalanche::PeerManager::RejectionMode::INVALIDATE));
BOOST_CHECK_EQUAL(TestPeerManager::getPeerCount(pm),
avalanche::PeerManager::MAX_REMOTE_PROOFS / 2 - 1);
// Upon update the first proof got evicted
proofid = ProofId{
uint256(uint8_t(avalanche::PeerManager::MAX_REMOTE_PROOFS + 2))};
BOOST_CHECK(pm.saveRemoteProof(proofid, 0, true));
// Proof id 1 is evicted
BOOST_CHECK(!TestPeerManager::getRemoteProof(pm, ProofId(uint256::ONE), 0));
// So is proof id 2
BOOST_CHECK(!TestPeerManager::getRemoteProof(pm, ProofId(uint256(2)), 0));
// But proof id 3 is still here
BOOST_CHECK(TestPeerManager::getRemoteProof(pm, ProofId(uint256(3)), 0));
}
BOOST_AUTO_TEST_CASE(get_remote_status) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
Chainstate &active_chainstate = chainman.ActiveChainstate();
auto mockTime = GetTime<std::chrono::seconds>();
SetMockTime(mockTime);
// No remote proof yet
BOOST_CHECK(
!TestPeerManager::getRemotePresenceStatus(pm, ProofId(uint256::ZERO))
.has_value());
// 6/12 (50%) of the stakes
for (NodeId nodeid = 0; nodeid < 12; nodeid++) {
auto proof = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(proof));
BOOST_CHECK(pm.addNode(nodeid, proof->getId()));
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ZERO), nodeid,
nodeid % 2 == 0));
}
BOOST_CHECK(
!TestPeerManager::getRemotePresenceStatus(pm, ProofId(uint256::ZERO))
.has_value());
// 7/12 (~58%) of the stakes
for (NodeId nodeid = 0; nodeid < 5; nodeid++) {
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ZERO), nodeid, false));
}
for (NodeId nodeid = 5; nodeid < 12; nodeid++) {
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ZERO), nodeid, true));
}
BOOST_CHECK(
TestPeerManager::getRemotePresenceStatus(pm, ProofId(uint256::ZERO))
.value());
// Add our local proof so we have 7/13 (~54% < 55%)
auto localProof =
buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
TestPeerManager::setLocalProof(pm, localProof);
BOOST_CHECK(pm.registerProof(localProof));
BOOST_CHECK(
!TestPeerManager::getRemotePresenceStatus(pm, ProofId(uint256::ZERO))
.has_value());
// Remove the local proof to revert back to 7/12 (~58%)
pm.rejectProof(localProof->getId());
TestPeerManager::setLocalProof(pm, ProofRef());
BOOST_CHECK(
TestPeerManager::getRemotePresenceStatus(pm, ProofId(uint256::ZERO))
.value());
// 5/12 (~42%) of the stakes
for (NodeId nodeid = 0; nodeid < 5; nodeid++) {
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ZERO), nodeid, true));
}
for (NodeId nodeid = 5; nodeid < 12; nodeid++) {
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ZERO), nodeid, false));
}
BOOST_CHECK(
!TestPeerManager::getRemotePresenceStatus(pm, ProofId(uint256::ZERO))
.value());
// Most nodes agree but not enough of the stakes
auto bigProof =
buildRandomProof(active_chainstate, 100 * MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(bigProof));
// Update the node's proof
BOOST_CHECK(pm.addNode(0, bigProof->getId()));
// 7/12 (~58%) of the remotes, but < 10% of the stakes => absent
for (NodeId nodeid = 0; nodeid < 5; nodeid++) {
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ZERO), nodeid, false));
}
for (NodeId nodeid = 5; nodeid < 12; nodeid++) {
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ZERO), nodeid, true));
}
BOOST_CHECK(
!TestPeerManager::getRemotePresenceStatus(pm, ProofId(uint256::ZERO))
.value());
// 5/12 (42%) of the remotes, but > 90% of the stakes => present
for (NodeId nodeid = 0; nodeid < 5; nodeid++) {
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ZERO), nodeid, true));
}
for (NodeId nodeid = 5; nodeid < 12; nodeid++) {
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ZERO), nodeid, false));
}
BOOST_CHECK(
TestPeerManager::getRemotePresenceStatus(pm, ProofId(uint256::ZERO))
.value());
TestPeerManager::clearPeers(pm);
// Peer 1 has 1 node (id 0)
auto proof1 = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(proof1));
BOOST_CHECK(pm.addNode(0, proof1->getId()));
// Peer 2 has 5 nodes (ids 1 to 5)
auto proof2 = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(proof2));
for (NodeId nodeid = 1; nodeid < 6; nodeid++) {
BOOST_CHECK(pm.addNode(nodeid, proof2->getId()));
}
// Node 0 is missing proofid 0, nodes 1 to 5 have it
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ZERO), 0, false));
for (NodeId nodeid = 1; nodeid < 6; nodeid++) {
BOOST_CHECK(pm.saveRemoteProof(ProofId(uint256::ZERO), nodeid, true));
}
// At this stage we have 5/6 nodes with the proof, but since all the nodes
// advertising the proof are from the same peer, we only 1/2 peers, i.e. 50%
// of the stakes.
BOOST_CHECK(
!TestPeerManager::getRemotePresenceStatus(pm, ProofId(uint256::ZERO))
.has_value());
}
BOOST_AUTO_TEST_CASE(dangling_with_remotes) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
Chainstate &active_chainstate = chainman.ActiveChainstate();
auto mockTime = GetTime<std::chrono::seconds>();
SetMockTime(mockTime);
// Add a few proofs with no node attached
std::vector<ProofRef> proofs;
for (size_t i = 0; i < 10; i++) {
auto proof = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(proof));
proofs.push_back(proof);
}
// The proofs are recent enough, the cleanup won't make them dangling
TestPeerManager::cleanupDanglingProofs(pm);
for (const auto &proof : proofs) {
BOOST_CHECK(pm.isBoundToPeer(proof->getId()));
BOOST_CHECK(!pm.isDangling(proof->getId()));
}
// Elapse enough time so we get the proofs dangling
mockTime += avalanche::Peer::DANGLING_TIMEOUT + 1s;
SetMockTime(mockTime);
// The proofs are now dangling
TestPeerManager::cleanupDanglingProofs(pm);
for (const auto &proof : proofs) {
BOOST_CHECK(!pm.isBoundToPeer(proof->getId()));
BOOST_CHECK(pm.isDangling(proof->getId()));
}
// Add some remotes having this proof
for (NodeId nodeid = 0; nodeid < 10; nodeid++) {
auto localProof =
buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
BOOST_CHECK(pm.registerProof(localProof));
BOOST_CHECK(pm.addNode(nodeid, localProof->getId()));
for (const auto &proof : proofs) {
BOOST_CHECK(pm.saveRemoteProof(proof->getId(), nodeid, true));
}
}
// The proofs are all present according to the remote status
for (const auto &proof : proofs) {
BOOST_CHECK(TestPeerManager::getRemotePresenceStatus(pm, proof->getId())
.value());
}
// The proofs should be added back as a peer
std::unordered_set<ProofRef, SaltedProofHasher> registeredProofs;
TestPeerManager::cleanupDanglingProofs(pm, registeredProofs);
for (const auto &proof : proofs) {
BOOST_CHECK(pm.isBoundToPeer(proof->getId()));
BOOST_CHECK(!pm.isDangling(proof->getId()));
BOOST_CHECK_EQUAL(registeredProofs.count(proof), 1);
}
BOOST_CHECK_EQUAL(proofs.size(), registeredProofs.size());
// Remove the proofs from the remotes
for (NodeId nodeid = 0; nodeid < 10; nodeid++) {
for (const auto &proof : proofs) {
BOOST_CHECK(pm.saveRemoteProof(proof->getId(), nodeid, false));
}
}
// The proofs are now all absent according to the remotes
for (const auto &proof : proofs) {
BOOST_CHECK(
!TestPeerManager::getRemotePresenceStatus(pm, proof->getId())
.value());
}
// The proofs are not dangling yet as they have been registered recently
TestPeerManager::cleanupDanglingProofs(pm, registeredProofs);
BOOST_CHECK(registeredProofs.empty());
for (const auto &proof : proofs) {
BOOST_CHECK(pm.isBoundToPeer(proof->getId()));
BOOST_CHECK(!pm.isDangling(proof->getId()));
}
// Wait some time then run the cleanup again, the proofs will be dangling
mockTime += avalanche::Peer::DANGLING_TIMEOUT + 1s;
SetMockTime(mockTime);
TestPeerManager::cleanupDanglingProofs(pm, registeredProofs);
BOOST_CHECK(registeredProofs.empty());
for (const auto &proof : proofs) {
BOOST_CHECK(!pm.isBoundToPeer(proof->getId()));
BOOST_CHECK(pm.isDangling(proof->getId()));
}
// Pull them back one more time
for (NodeId nodeid = 0; nodeid < 10; nodeid++) {
for (const auto &proof : proofs) {
BOOST_CHECK(pm.saveRemoteProof(proof->getId(), nodeid, true));
}
}
TestPeerManager::cleanupDanglingProofs(pm, registeredProofs);
for (const auto &proof : proofs) {
BOOST_CHECK(pm.isBoundToPeer(proof->getId()));
BOOST_CHECK(!pm.isDangling(proof->getId()));
BOOST_CHECK_EQUAL(registeredProofs.count(proof), 1);
}
BOOST_CHECK_EQUAL(proofs.size(), registeredProofs.size());
}
BOOST_AUTO_TEST_CASE(avapeers_dump) {
ChainstateManager &chainman = *Assert(m_node.chainman);
avalanche::PeerManager pm(PROOF_DUST_THRESHOLD, chainman);
Chainstate &active_chainstate = chainman.ActiveChainstate();
auto mockTime = GetTime<std::chrono::seconds>();
SetMockTime(mockTime);
std::vector<ProofRef> proofs;
for (size_t i = 0; i < 10; i++) {
SetMockTime(mockTime + std::chrono::seconds{i});
auto proof = buildRandomProof(active_chainstate, MIN_VALID_PROOF_SCORE);
// Registration time is mockTime + i
BOOST_CHECK(pm.registerProof(proof));
auto peerid = TestPeerManager::getPeerIdForProofId(pm, proof->getId());
// Next conflict time is mockTime + 100 + i
BOOST_CHECK(pm.updateNextPossibleConflictTime(
peerid, mockTime + std::chrono::seconds{100 + i}));
// The 5 first proofs are finalized
if (i < 5) {
BOOST_CHECK(pm.setFinalized(peerid));
}
proofs.push_back(proof);
}
BOOST_CHECK_EQUAL(TestPeerManager::getPeerCount(pm), 10);
const fs::path testDumpPath = "test_avapeers_dump.dat";
BOOST_CHECK(pm.dumpPeersToFile(testDumpPath));
TestPeerManager::clearPeers(pm);
std::unordered_set<ProofRef, SaltedProofHasher> registeredProofs;
BOOST_CHECK(pm.loadPeersFromFile(testDumpPath, registeredProofs));
BOOST_CHECK_EQUAL(registeredProofs.size(), 10);
auto findProofIndex = [&proofs](const ProofId &proofid) {
for (size_t i = 0; i < proofs.size(); i++) {
if (proofs[i]->getId() == proofid) {
return i;
}
}
// ProofId not found
BOOST_CHECK(false);
return size_t{0};
};
for (const auto &proof : registeredProofs) {
const ProofId &proofid = proof->getId();
size_t i = findProofIndex(proofid);
BOOST_CHECK(pm.forPeer(proofid, [&](auto &peer) {
BOOST_CHECK_EQUAL(peer.hasFinalized, i < 5);
BOOST_CHECK_EQUAL(peer.registration_time.count(),
(mockTime + std::chrono::seconds{i}).count());
BOOST_CHECK_EQUAL(
peer.nextPossibleConflictTime.count(),
(mockTime + std::chrono::seconds{100 + i}).count());
return true;
}));
}
// No peer: create an empty file but generate no error
TestPeerManager::clearPeers(pm);
BOOST_CHECK(pm.dumpPeersToFile("test_empty_avapeers.dat"));
// We can also load an empty file
BOOST_CHECK(
pm.loadPeersFromFile("test_empty_avapeers.dat", registeredProofs));
BOOST_CHECK(registeredProofs.empty());
BOOST_CHECK_EQUAL(TestPeerManager::getPeerCount(pm), 0);
// If the file exists, it is overrwritten
BOOST_CHECK(pm.dumpPeersToFile("test_empty_avapeers.dat"));
// It fails to load if the file does not exist and the registeredProofs is
// cleared
registeredProofs.insert(proofs[0]);
BOOST_CHECK(!registeredProofs.empty());
BOOST_CHECK(!pm.loadPeersFromFile("I_dont_exist.dat", registeredProofs));
BOOST_CHECK(registeredProofs.empty());
{
// Change the version
FILE *f = fsbridge::fopen("test_bad_version_avapeers.dat", "wb");
BOOST_CHECK(f);
CAutoFile file(f, SER_DISK, CLIENT_VERSION);
file << static_cast<uint64_t>(-1); // Version
file << uint64_t{0}; // Number of peers
BOOST_CHECK(FileCommit(file.Get()));
file.fclose();
// Check loading fails and the registeredProofs is cleared
registeredProofs.insert(proofs[0]);
BOOST_CHECK(!registeredProofs.empty());
BOOST_CHECK(!pm.loadPeersFromFile("test_bad_version_avapeers.dat",
registeredProofs));
BOOST_CHECK(registeredProofs.empty());
}
{
// Wrong format, will cause a deserialization error
FILE *f = fsbridge::fopen("test_ill_formed_avapeers.dat", "wb");
BOOST_CHECK(f);
const uint64_t now = GetTime();
CAutoFile file(f, SER_DISK, CLIENT_VERSION);
file << static_cast<uint64_t>(1); // Version
file << uint64_t{2}; // Number of peers
// Single peer content!
file << proofs[0];
file << true;
file << now;
file << now + 100;
BOOST_CHECK(FileCommit(file.Get()));
file.fclose();
// Check loading fails and the registeredProofs is fed with our single
// peer
BOOST_CHECK(registeredProofs.empty());
BOOST_CHECK(!pm.loadPeersFromFile("test_ill_formed_avapeers.dat",
registeredProofs));
BOOST_CHECK_EQUAL(registeredProofs.size(), 1);
BOOST_CHECK_EQUAL((*registeredProofs.begin())->getId(),
proofs[0]->getId());
}
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/rpc/avalanche.cpp b/src/rpc/avalanche.cpp
index 5af8d534d9..b25c7f2be4 100644
--- a/src/rpc/avalanche.cpp
+++ b/src/rpc/avalanche.cpp
@@ -1,1657 +1,1658 @@
// Copyright (c) 2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <avalanche/avalanche.h>
#include <avalanche/delegation.h>
#include <avalanche/delegationbuilder.h>
#include <avalanche/peermanager.h>
#include <avalanche/processor.h>
#include <avalanche/proof.h>
#include <avalanche/proofbuilder.h>
#include <avalanche/validation.h>
#include <common/args.h>
#include <config.h>
#include <core_io.h>
#include <index/txindex.h>
#include <key_io.h>
#include <net_processing.h>
#include <node/context.h>
#include <policy/block/stakingrewards.h>
#include <rpc/blockchain.h>
#include <rpc/server.h>
#include <rpc/server_util.h>
#include <rpc/util.h>
#include <util/strencodings.h>
#include <util/translation.h>
#include <univalue.h>
using node::GetTransaction;
using node::NodeContext;
static RPCHelpMan getavalanchekey() {
return RPCHelpMan{
"getavalanchekey",
"Returns the key used to sign avalanche messages.\n",
{},
RPCResult{RPCResult::Type::STR_HEX, "", ""},
RPCExamples{HelpExampleRpc("getavalanchekey", "")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
NodeContext &node = EnsureAnyNodeContext(request.context);
const avalanche::Processor &avalanche = EnsureAvalanche(node);
return HexStr(avalanche.getSessionPubKey());
},
};
}
static CPubKey ParsePubKey(const UniValue &param) {
const std::string keyHex = param.get_str();
if ((keyHex.length() != 2 * CPubKey::COMPRESSED_SIZE &&
keyHex.length() != 2 * CPubKey::SIZE) ||
!IsHex(keyHex)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
strprintf("Invalid public key: %s\n", keyHex));
}
return HexToPubKey(keyHex);
}
static bool registerProofIfNeeded(const avalanche::Processor &avalanche,
avalanche::ProofRef proof,
avalanche::ProofRegistrationState &state) {
auto localProof = avalanche.getLocalProof();
if (localProof && localProof->getId() == proof->getId()) {
return true;
}
return avalanche.withPeerManager([&](avalanche::PeerManager &pm) {
return pm.getProof(proof->getId()) || pm.registerProof(proof, state);
});
}
static bool registerProofIfNeeded(const avalanche::Processor &avalanche,
avalanche::ProofRef proof) {
avalanche::ProofRegistrationState state;
return registerProofIfNeeded(avalanche, std::move(proof), state);
}
static void verifyDelegationOrThrow(avalanche::Delegation &dg,
const std::string &dgHex, CPubKey &auth) {
bilingual_str error;
if (!avalanche::Delegation::FromHex(dg, dgHex, error)) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, error.original);
}
avalanche::DelegationState state;
if (!dg.verify(state, auth)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"The delegation is invalid: " + state.ToString());
}
}
static void verifyProofOrThrow(const NodeContext &node, avalanche::Proof &proof,
const std::string &proofHex) {
bilingual_str error;
if (!avalanche::Proof::FromHex(proof, proofHex, error)) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, error.original);
}
Amount stakeUtxoDustThreshold = avalanche::PROOF_DUST_THRESHOLD;
if (node.avalanche) {
// If Avalanche is enabled, use the configured dust threshold
node.avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
stakeUtxoDustThreshold = pm.getStakeUtxoDustThreshold();
});
}
avalanche::ProofValidationState state;
{
LOCK(cs_main);
if (!proof.verify(stakeUtxoDustThreshold, *Assert(node.chainman),
state)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"The proof is invalid: " + state.ToString());
}
}
}
static RPCHelpMan addavalanchenode() {
return RPCHelpMan{
"addavalanchenode",
"Add a node in the set of peers to poll for avalanche.\n",
{
{"nodeid", RPCArg::Type::NUM, RPCArg::Optional::NO,
"Node to be added to avalanche."},
{"publickey", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The public key of the node."},
{"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"Proof that the node is not a sybil."},
{"delegation", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED,
"The proof delegation the the node public key"},
},
RPCResult{RPCResult::Type::BOOL, "success",
"Whether the addition succeeded or not."},
RPCExamples{
HelpExampleRpc("addavalanchenode", "5, \"<pubkey>\", \"<proof>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
const NodeId nodeid = request.params[0].getInt<int64_t>();
CPubKey key = ParsePubKey(request.params[1]);
auto proof = RCUPtr<avalanche::Proof>::make();
NodeContext &node = EnsureAnyNodeContext(request.context);
const avalanche::Processor &avalanche = EnsureAvalanche(node);
verifyProofOrThrow(node, *proof, request.params[2].get_str());
const avalanche::ProofId &proofid = proof->getId();
if (key != proof->getMaster()) {
if (request.params.size() < 4 || request.params[3].isNull()) {
throw JSONRPCError(
RPC_INVALID_ADDRESS_OR_KEY,
"The public key does not match the proof");
}
avalanche::Delegation dg;
CPubKey auth;
verifyDelegationOrThrow(dg, request.params[3].get_str(), auth);
if (dg.getProofId() != proofid) {
throw JSONRPCError(
RPC_INVALID_PARAMETER,
"The delegation does not match the proof");
}
if (key != auth) {
throw JSONRPCError(
RPC_INVALID_ADDRESS_OR_KEY,
"The public key does not match the delegation");
}
}
if (!registerProofIfNeeded(avalanche, proof)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"The proof has conflicting utxos");
}
if (!node.connman->ForNode(nodeid, [&](CNode *pnode) {
LOCK(pnode->cs_avalanche_pubkey);
bool expected = false;
if (pnode->m_avalanche_enabled.compare_exchange_strong(
expected, true)) {
pnode->m_avalanche_pubkey = std::move(key);
}
return true;
})) {
throw JSONRPCError(
RPC_INVALID_PARAMETER,
strprintf("The node does not exist: %d", nodeid));
}
return avalanche.withPeerManager([&](avalanche::PeerManager &pm) {
if (!pm.addNode(nodeid, proofid)) {
return false;
}
pm.addUnbroadcastProof(proofid);
return true;
});
},
};
}
static RPCHelpMan buildavalancheproof() {
return RPCHelpMan{
"buildavalancheproof",
"Build a proof for avalanche's sybil resistance.\n",
{
{"sequence", RPCArg::Type::NUM, RPCArg::Optional::NO,
"The proof's sequence"},
{"expiration", RPCArg::Type::NUM, RPCArg::Optional::NO,
"A timestamp indicating when the proof expire"},
{"master", RPCArg::Type::STR, RPCArg::Optional::NO,
"The master private key in base58-encoding"},
{
"stakes",
RPCArg::Type::ARR,
RPCArg::Optional::NO,
"The stakes to be signed and associated private keys",
{
{
"stake",
RPCArg::Type::OBJ,
RPCArg::Optional::NO,
"A stake to be attached to this proof",
{
{"txid", RPCArg::Type::STR_HEX,
RPCArg::Optional::NO, "The transaction id"},
{"vout", RPCArg::Type::NUM, RPCArg::Optional::NO,
"The output number"},
{"amount", RPCArg::Type::AMOUNT,
RPCArg::Optional::NO, "The amount in this UTXO"},
{"height", RPCArg::Type::NUM, RPCArg::Optional::NO,
"The height at which this UTXO was mined"},
{"iscoinbase", RPCArg::Type::BOOL,
RPCArg::Default{false},
"Indicate wether the UTXO is a coinbase"},
{"privatekey", RPCArg::Type::STR,
RPCArg::Optional::NO,
"private key in base58-encoding"},
},
},
},
},
{"payoutAddress", RPCArg::Type::STR, RPCArg::Optional::NO,
"A payout address"},
},
RPCResult{RPCResult::Type::STR_HEX, "proof",
"A string that is a serialized, hex-encoded proof data."},
RPCExamples{HelpExampleRpc("buildavalancheproof",
"0 1234567800 \"<master>\" []")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
const uint64_t sequence = request.params[0].getInt<int64_t>();
const int64_t expiration = request.params[1].getInt<int64_t>();
CKey masterKey = DecodeSecret(request.params[2].get_str());
if (!masterKey.IsValid()) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid master key");
}
CTxDestination payoutAddress = DecodeDestination(
request.params[4].get_str(), config.GetChainParams());
if (!IsValidDestination(payoutAddress)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Invalid payout address");
}
avalanche::ProofBuilder pb(sequence, expiration, masterKey,
GetScriptForDestination(payoutAddress));
const UniValue &stakes = request.params[3].get_array();
for (size_t i = 0; i < stakes.size(); i++) {
const UniValue &stake = stakes[i];
RPCTypeCheckObj(
stake,
{
{"txid", UniValue::VSTR},
{"vout", UniValue::VNUM},
// "amount" is also required but check is done below
// due to UniValue::VNUM erroneously not accepting
// quoted numerics (which are valid JSON)
{"height", UniValue::VNUM},
{"privatekey", UniValue::VSTR},
});
int nOut = stake.find_value("vout").getInt<int>();
if (nOut < 0) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
"vout cannot be negative");
}
const int height = stake.find_value("height").getInt<int>();
if (height < 1) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
"height must be positive");
}
const TxId txid(ParseHashO(stake, "txid"));
const COutPoint utxo(txid, nOut);
if (!stake.exists("amount")) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Missing amount");
}
const Amount amount =
AmountFromValue(stake.find_value("amount"));
const UniValue &iscbparam = stake.find_value("iscoinbase");
const bool iscoinbase =
iscbparam.isNull() ? false : iscbparam.get_bool();
CKey key =
DecodeSecret(stake.find_value("privatekey").get_str());
if (!key.IsValid()) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Invalid private key");
}
if (!pb.addUTXO(utxo, amount, uint32_t(height), iscoinbase,
std::move(key))) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Duplicated stake");
}
}
const avalanche::ProofRef proof = pb.build();
return proof->ToHex();
},
};
}
static RPCHelpMan decodeavalancheproof() {
return RPCHelpMan{
"decodeavalancheproof",
"Convert a serialized, hex-encoded proof, into JSON object. "
"The validity of the proof is not verified.\n",
{
{"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The proof hex string"},
},
RPCResult{
RPCResult::Type::OBJ,
"",
"",
{
{RPCResult::Type::NUM, "sequence",
"The proof's sequential number"},
{RPCResult::Type::NUM, "expiration",
"A timestamp indicating when the proof expires"},
{RPCResult::Type::STR_HEX, "master", "The master public key"},
{RPCResult::Type::STR, "signature",
"The proof signature (base64 encoded)"},
{RPCResult::Type::OBJ,
"payoutscript",
"The proof payout script",
{
{RPCResult::Type::STR, "asm", "Decoded payout script"},
{RPCResult::Type::STR_HEX, "hex",
"Raw payout script in hex format"},
{RPCResult::Type::STR, "type",
"The output type (e.g. " + GetAllOutputTypes() + ")"},
{RPCResult::Type::NUM, "reqSigs",
"The required signatures"},
{RPCResult::Type::ARR,
"addresses",
"",
{
{RPCResult::Type::STR, "address", "eCash address"},
}},
}},
{RPCResult::Type::STR_HEX, "limitedid",
"A hash of the proof data excluding the master key."},
{RPCResult::Type::STR_HEX, "proofid",
"A hash of the limitedid and master key."},
{RPCResult::Type::STR_AMOUNT, "staked_amount",
"The total staked amount of this proof in " +
Currency::get().ticker + "."},
{RPCResult::Type::NUM, "score", "The score of this proof."},
{RPCResult::Type::ARR,
"stakes",
"",
{
{RPCResult::Type::OBJ,
"",
"",
{
{RPCResult::Type::STR_HEX, "txid",
"The transaction id"},
{RPCResult::Type::NUM, "vout", "The output number"},
{RPCResult::Type::STR_AMOUNT, "amount",
"The amount in this UTXO"},
{RPCResult::Type::NUM, "height",
"The height at which this UTXO was mined"},
{RPCResult::Type::BOOL, "iscoinbase",
"Indicate whether the UTXO is a coinbase"},
{RPCResult::Type::STR_HEX, "pubkey",
"This UTXO's public key"},
{RPCResult::Type::STR, "signature",
"Signature of the proofid with this UTXO's private "
"key (base64 encoded)"},
}},
}},
}},
RPCExamples{HelpExampleCli("decodeavalancheproof", "\"<hex proof>\"") +
HelpExampleRpc("decodeavalancheproof", "\"<hex proof>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
avalanche::Proof proof;
bilingual_str error;
if (!avalanche::Proof::FromHex(proof, request.params[0].get_str(),
error)) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, error.original);
}
UniValue result(UniValue::VOBJ);
result.pushKV("sequence", proof.getSequence());
result.pushKV("expiration", proof.getExpirationTime());
result.pushKV("master", HexStr(proof.getMaster()));
result.pushKV("signature", EncodeBase64(proof.getSignature()));
const auto payoutScript = proof.getPayoutScript();
UniValue payoutScriptObj(UniValue::VOBJ);
ScriptPubKeyToUniv(payoutScript, payoutScriptObj,
/* fIncludeHex */ true);
result.pushKV("payoutscript", payoutScriptObj);
result.pushKV("limitedid", proof.getLimitedId().ToString());
result.pushKV("proofid", proof.getId().ToString());
result.pushKV("staked_amount", proof.getStakedAmount());
result.pushKV("score", uint64_t(proof.getScore()));
UniValue stakes(UniValue::VARR);
for (const avalanche::SignedStake &s : proof.getStakes()) {
const COutPoint &utxo = s.getStake().getUTXO();
UniValue stake(UniValue::VOBJ);
stake.pushKV("txid", utxo.GetTxId().ToString());
stake.pushKV("vout", uint64_t(utxo.GetN()));
stake.pushKV("amount", s.getStake().getAmount());
stake.pushKV("height", uint64_t(s.getStake().getHeight()));
stake.pushKV("iscoinbase", s.getStake().isCoinbase());
stake.pushKV("pubkey", HexStr(s.getStake().getPubkey()));
// Only PKHash destination is supported, so this is safe
stake.pushKV("address",
EncodeDestination(PKHash(s.getStake().getPubkey()),
config));
stake.pushKV("signature", EncodeBase64(s.getSignature()));
stakes.push_back(stake);
}
result.pushKV("stakes", stakes);
return result;
},
};
}
static RPCHelpMan delegateavalancheproof() {
return RPCHelpMan{
"delegateavalancheproof",
"Delegate the avalanche proof to another public key.\n",
{
{"limitedproofid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The limited id of the proof to be delegated."},
{"privatekey", RPCArg::Type::STR, RPCArg::Optional::NO,
"The private key in base58-encoding. Must match the proof master "
"public key or the upper level parent delegation public key if "
" supplied."},
{"publickey", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The public key to delegate the proof to."},
{"delegation", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED,
"A string that is the serialized, hex-encoded delegation for the "
"proof and which is a parent for the delegation to build."},
},
RPCResult{RPCResult::Type::STR_HEX, "delegation",
"A string that is a serialized, hex-encoded delegation."},
RPCExamples{
HelpExampleRpc("delegateavalancheproof",
"\"<limitedproofid>\" \"<privkey>\" \"<pubkey>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
avalanche::LimitedProofId limitedProofId{
ParseHashV(request.params[0], "limitedproofid")};
const CKey privkey = DecodeSecret(request.params[1].get_str());
if (!privkey.IsValid()) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
"The private key is invalid");
}
const CPubKey pubkey = ParsePubKey(request.params[2]);
std::unique_ptr<avalanche::DelegationBuilder> dgb;
if (request.params.size() >= 4 && !request.params[3].isNull()) {
avalanche::Delegation dg;
CPubKey auth;
verifyDelegationOrThrow(dg, request.params[3].get_str(), auth);
if (dg.getProofId() !=
limitedProofId.computeProofId(dg.getProofMaster())) {
throw JSONRPCError(
RPC_INVALID_PARAMETER,
"The delegation does not match the proof");
}
if (privkey.GetPubKey() != auth) {
throw JSONRPCError(
RPC_INVALID_ADDRESS_OR_KEY,
"The private key does not match the delegation");
}
dgb = std::make_unique<avalanche::DelegationBuilder>(dg);
} else {
dgb = std::make_unique<avalanche::DelegationBuilder>(
limitedProofId, privkey.GetPubKey());
}
if (!dgb->addLevel(privkey, pubkey)) {
throw JSONRPCError(RPC_MISC_ERROR,
"Unable to build the delegation");
}
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss << dgb->build();
return HexStr(ss);
},
};
}
static RPCHelpMan decodeavalanchedelegation() {
return RPCHelpMan{
"decodeavalanchedelegation",
"Convert a serialized, hex-encoded avalanche proof delegation, into "
"JSON object. \n"
"The validity of the delegation is not verified.\n",
{
{"delegation", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The delegation hex string"},
},
RPCResult{
RPCResult::Type::OBJ,
"",
"",
{
{RPCResult::Type::STR_HEX, "pubkey",
"The public key the proof is delegated to."},
{RPCResult::Type::STR_HEX, "proofmaster",
"The delegated proof master public key."},
{RPCResult::Type::STR_HEX, "delegationid",
"The identifier of this delegation."},
{RPCResult::Type::STR_HEX, "limitedid",
"A delegated proof data hash excluding the master key."},
{RPCResult::Type::STR_HEX, "proofid",
"A hash of the delegated proof limitedid and master key."},
{RPCResult::Type::NUM, "depth",
"The number of delegation levels."},
{RPCResult::Type::ARR,
"levels",
"",
{
{RPCResult::Type::OBJ,
"",
"",
{
{RPCResult::Type::NUM, "index",
"The index of this delegation level."},
{RPCResult::Type::STR_HEX, "pubkey",
"This delegated public key for this level"},
{RPCResult::Type::STR, "signature",
"Signature of this delegation level (base64 "
"encoded)"},
}},
}},
}},
RPCExamples{HelpExampleCli("decodeavalanchedelegation",
"\"<hex delegation>\"") +
HelpExampleRpc("decodeavalanchedelegation",
"\"<hex delegation>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
avalanche::Delegation delegation;
bilingual_str error;
if (!avalanche::Delegation::FromHex(
delegation, request.params[0].get_str(), error)) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, error.original);
}
UniValue result(UniValue::VOBJ);
result.pushKV("pubkey", HexStr(delegation.getDelegatedPubkey()));
result.pushKV("proofmaster", HexStr(delegation.getProofMaster()));
result.pushKV("delegationid", delegation.getId().ToString());
result.pushKV("limitedid",
delegation.getLimitedProofId().ToString());
result.pushKV("proofid", delegation.getProofId().ToString());
auto levels = delegation.getLevels();
result.pushKV("depth", uint64_t(levels.size()));
UniValue levelsArray(UniValue::VARR);
for (auto &level : levels) {
UniValue obj(UniValue::VOBJ);
obj.pushKV("pubkey", HexStr(level.pubkey));
obj.pushKV("signature", EncodeBase64(level.sig));
levelsArray.push_back(std::move(obj));
}
result.pushKV("levels", levelsArray);
return result;
},
};
}
static RPCHelpMan getavalancheinfo() {
return RPCHelpMan{
"getavalancheinfo",
"Returns an object containing various state info regarding avalanche "
"networking.\n",
{},
RPCResult{
RPCResult::Type::OBJ,
"",
"",
{
{RPCResult::Type::BOOL, "ready_to_poll",
"Whether the node is ready to start polling and voting."},
{RPCResult::Type::OBJ,
"local",
"Only available if -avaproof has been supplied to the node",
{
{RPCResult::Type::BOOL, "verified",
"Whether the node local proof has been locally verified "
"or not."},
{RPCResult::Type::STR, "verification_status",
"The proof verification status. Only available if the "
"\"verified\" flag is false."},
{RPCResult::Type::STR_HEX, "proofid",
"The node local proof id."},
{RPCResult::Type::STR_HEX, "limited_proofid",
"The node local limited proof id."},
{RPCResult::Type::STR_HEX, "master",
"The node local proof master public key."},
{RPCResult::Type::STR, "payout_address",
"The node local proof payout address. This might be "
"omitted if the payout script is not one of P2PK, P2PKH "
"or P2SH, in which case decodeavalancheproof can be used "
"to get more details."},
{RPCResult::Type::STR_AMOUNT, "stake_amount",
"The node local proof staked amount."},
}},
{RPCResult::Type::OBJ,
"network",
"",
{
{RPCResult::Type::NUM, "proof_count",
"The number of valid avalanche proofs we know exist "
"(including this node's local proof if applicable)."},
{RPCResult::Type::NUM, "connected_proof_count",
"The number of avalanche proofs with at least one node "
"we are connected to (including this node's local proof "
"if applicable)."},
{RPCResult::Type::NUM, "dangling_proof_count",
"The number of avalanche proofs with no node attached."},
{RPCResult::Type::NUM, "finalized_proof_count",
"The number of known avalanche proofs that have been "
"finalized by avalanche."},
{RPCResult::Type::NUM, "conflicting_proof_count",
"The number of known avalanche proofs that conflict with "
"valid proofs."},
{RPCResult::Type::NUM, "immature_proof_count",
"The number of known avalanche proofs that have immature "
"utxos."},
{RPCResult::Type::STR_AMOUNT, "total_stake_amount",
"The total staked amount over all the valid proofs in " +
Currency::get().ticker +
" (including this node's local proof if "
"applicable)."},
{RPCResult::Type::STR_AMOUNT, "connected_stake_amount",
"The total staked amount over all the connected proofs "
"in " +
Currency::get().ticker +
" (including this node's local proof if "
"applicable)."},
{RPCResult::Type::STR_AMOUNT, "dangling_stake_amount",
"The total staked amount over all the dangling proofs "
"in " +
Currency::get().ticker +
" (including this node's local proof if "
"applicable)."},
{RPCResult::Type::STR_AMOUNT, "immature_stake_amount",
"The total staked amount over all the immature proofs "
"in " +
Currency::get().ticker +
" (including this node's local proof if "
"applicable)."},
{RPCResult::Type::NUM, "node_count",
"The number of avalanche nodes we are connected to "
"(including this node if a local proof is set)."},
{RPCResult::Type::NUM, "connected_node_count",
"The number of avalanche nodes associated with an "
"avalanche proof (including this node if a local proof "
"is set)."},
{RPCResult::Type::NUM, "pending_node_count",
"The number of avalanche nodes pending for a proof."},
}},
},
},
RPCExamples{HelpExampleCli("getavalancheinfo", "") +
HelpExampleRpc("getavalancheinfo", "")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
NodeContext &node = EnsureAnyNodeContext(request.context);
avalanche::Processor &avalanche = EnsureAvalanche(node);
UniValue ret(UniValue::VOBJ);
ret.pushKV("ready_to_poll", avalanche.isQuorumEstablished());
auto localProof = avalanche.getLocalProof();
if (localProof != nullptr) {
UniValue local(UniValue::VOBJ);
const bool verified = avalanche.withPeerManager(
[&](const avalanche::PeerManager &pm) {
const avalanche::ProofId &proofid = localProof->getId();
return pm.isBoundToPeer(proofid);
});
local.pushKV("verified", verified);
const bool sharing = avalanche.canShareLocalProof();
if (!verified) {
avalanche::ProofRegistrationState state =
avalanche.getLocalProofRegistrationState();
// If the local proof is not registered but the state is
// valid, no registration attempt occurred yet.
local.pushKV("verification_status",
state.IsValid()
? (sharing ? "pending verification"
: "pending inbound connections")
: state.GetRejectReason());
}
local.pushKV("proofid", localProof->getId().ToString());
local.pushKV("limited_proofid",
localProof->getLimitedId().ToString());
local.pushKV("master", HexStr(localProof->getMaster()));
CTxDestination destination;
if (ExtractDestination(localProof->getPayoutScript(),
destination)) {
local.pushKV("payout_address",
EncodeDestination(destination, config));
}
local.pushKV("stake_amount", localProof->getStakedAmount());
ret.pushKV("local", local);
}
avalanche.withPeerManager([&](avalanche::PeerManager &pm) {
UniValue network(UniValue::VOBJ);
uint64_t proofCount{0};
uint64_t connectedProofCount{0};
uint64_t finalizedProofCount{0};
uint64_t connectedNodeCount{0};
Amount totalStakes = Amount::zero();
Amount connectedStakes = Amount::zero();
pm.forEachPeer([&](const avalanche::Peer &peer) {
CHECK_NONFATAL(peer.proof != nullptr);
const bool isLocalProof =
localProof &&
peer.proof->getId() == localProof->getId();
++proofCount;
const Amount proofStake = peer.proof->getStakedAmount();
totalStakes += proofStake;
if (peer.hasFinalized) {
++finalizedProofCount;
}
if (peer.node_count > 0 || isLocalProof) {
++connectedProofCount;
connectedStakes += proofStake;
}
connectedNodeCount += peer.node_count + isLocalProof;
});
Amount immatureStakes = Amount::zero();
pm.getImmatureProofPool().forEachProof(
[&](const avalanche::ProofRef &proof) {
immatureStakes += proof->getStakedAmount();
});
network.pushKV("proof_count", proofCount);
network.pushKV("connected_proof_count", connectedProofCount);
network.pushKV("dangling_proof_count",
proofCount - connectedProofCount);
network.pushKV("finalized_proof_count", finalizedProofCount);
network.pushKV(
"conflicting_proof_count",
uint64_t(pm.getConflictingProofPool().countProofs()));
network.pushKV(
"immature_proof_count",
uint64_t(pm.getImmatureProofPool().countProofs()));
network.pushKV("total_stake_amount", totalStakes);
network.pushKV("connected_stake_amount", connectedStakes);
network.pushKV("dangling_stake_amount",
totalStakes - connectedStakes);
network.pushKV("immature_stake_amount", immatureStakes);
const uint64_t pendingNodes = pm.getPendingNodeCount();
network.pushKV("node_count", connectedNodeCount + pendingNodes);
network.pushKV("connected_node_count", connectedNodeCount);
network.pushKV("pending_node_count", pendingNodes);
ret.pushKV("network", network);
});
return ret;
},
};
}
static RPCHelpMan getavalanchepeerinfo() {
return RPCHelpMan{
"getavalanchepeerinfo",
"Returns data about an avalanche peer as a json array of objects. If "
"no proofid is provided, returns data about all the peers.\n",
{
{"proofid", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED,
"The hex encoded avalanche proof identifier."},
},
RPCResult{
RPCResult::Type::ARR,
"",
"",
{{
RPCResult::Type::OBJ,
"",
"",
{{
{RPCResult::Type::NUM, "avalanche_peerid",
"The avalanche internal peer identifier"},
{RPCResult::Type::NUM, "availability_score",
"The agreggated availability score of this peer's nodes"},
{RPCResult::Type::STR_HEX, "proofid",
"The avalanche proof id used by this peer"},
{RPCResult::Type::STR_HEX, "proof",
"The avalanche proof used by this peer"},
{RPCResult::Type::NUM, "nodecount",
"The number of nodes for this peer"},
{RPCResult::Type::ARR,
"node_list",
"",
{
{RPCResult::Type::NUM, "nodeid",
"Node id, as returned by getpeerinfo"},
}},
}},
}},
},
RPCExamples{HelpExampleCli("getavalanchepeerinfo", "") +
HelpExampleCli("getavalanchepeerinfo", "\"proofid\"") +
HelpExampleRpc("getavalanchepeerinfo", "") +
HelpExampleRpc("getavalanchepeerinfo", "\"proofid\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
NodeContext &node = EnsureAnyNodeContext(request.context);
avalanche::Processor &avalanche = EnsureAvalanche(node);
auto peerToUniv = [](const avalanche::PeerManager &pm,
const avalanche::Peer &peer) {
UniValue obj(UniValue::VOBJ);
obj.pushKV("avalanche_peerid", uint64_t(peer.peerid));
obj.pushKV("availability_score", peer.availabilityScore);
obj.pushKV("proofid", peer.getProofId().ToString());
obj.pushKV("proof", peer.proof->ToHex());
UniValue nodes(UniValue::VARR);
pm.forEachNode(peer, [&](const avalanche::Node &n) {
nodes.push_back(n.nodeid);
});
obj.pushKV("nodecount", uint64_t(peer.node_count));
obj.pushKV("node_list", nodes);
return obj;
};
UniValue ret(UniValue::VARR);
avalanche.withPeerManager([&](const avalanche::PeerManager &pm) {
// If a proofid is provided, only return the associated peer
if (!request.params[0].isNull()) {
const avalanche::ProofId proofid =
avalanche::ProofId::fromHex(
request.params[0].get_str());
if (!pm.isBoundToPeer(proofid)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Proofid not found");
}
pm.forPeer(proofid, [&](const avalanche::Peer &peer) {
ret.push_back(peerToUniv(pm, peer));
return true;
});
return;
}
// If no proofid is provided, return all the peers
pm.forEachPeer([&](const avalanche::Peer &peer) {
ret.push_back(peerToUniv(pm, peer));
});
});
return ret;
},
};
}
static RPCHelpMan getavalancheproofs() {
return RPCHelpMan{
"getavalancheproofs",
"Returns an object containing all tracked proofids.\n",
{},
RPCResult{
RPCResult::Type::OBJ,
"",
"",
{
{RPCResult::Type::ARR,
"valid",
"",
{
{RPCResult::Type::STR_HEX, "proofid",
"Avalanche proof id"},
}},
{RPCResult::Type::ARR,
"conflicting",
"",
{
{RPCResult::Type::STR_HEX, "proofid",
"Avalanche proof id"},
}},
{RPCResult::Type::ARR,
"immature",
"",
{
{RPCResult::Type::STR_HEX, "proofid",
"Avalanche proof id"},
}},
},
},
RPCExamples{HelpExampleCli("getavalancheproofs", "") +
HelpExampleRpc("getavalancheproofs", "")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
NodeContext &node = EnsureAnyNodeContext(request.context);
const avalanche::Processor &avalanche = EnsureAvalanche(node);
UniValue ret(UniValue::VOBJ);
avalanche.withPeerManager([&](avalanche::PeerManager &pm) {
auto appendProofIds = [&ret](const avalanche::ProofPool &pool,
const std::string &key) {
UniValue arrOut(UniValue::VARR);
for (const avalanche::ProofId &proofid :
pool.getProofIds()) {
arrOut.push_back(proofid.ToString());
}
ret.pushKV(key, arrOut);
};
appendProofIds(pm.getValidProofPool(), "valid");
appendProofIds(pm.getConflictingProofPool(), "conflicting");
appendProofIds(pm.getImmatureProofPool(), "immature");
});
return ret;
},
};
}
static RPCHelpMan getstakingreward() {
return RPCHelpMan{
"getstakingreward",
"Return a list of possible staking reward winners based on the "
"previous "
"block hash.\n",
{
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The previous block hash, hex encoded."},
{"recompute", RPCArg::Type::BOOL, RPCArg::Default{false},
"Whether to recompute the staking reward winner if there is a "
"cached value."},
},
RPCResult{
RPCResult::Type::ARR,
"",
"",
{
{RPCResult::Type::OBJ,
- "payoutscript",
- "The winning proof payout script",
+ "winner",
+ "The winning proof",
{
+ {RPCResult::Type::STR_HEX, "proofid",
+ "The winning proofid"},
{RPCResult::Type::STR, "asm", "Decoded payout script"},
{RPCResult::Type::STR_HEX, "hex",
"Raw payout script in hex format"},
{RPCResult::Type::STR, "type",
"The output type (e.g. " + GetAllOutputTypes() + ")"},
{RPCResult::Type::NUM, "reqSigs",
"The required signatures"},
{RPCResult::Type::ARR,
"addresses",
"",
{
{RPCResult::Type::STR, "address", "eCash address"},
}},
}},
}},
RPCExamples{HelpExampleRpc("getstakingreward", "<blockhash>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
const NodeContext &node = EnsureAnyNodeContext(request.context);
ChainstateManager &chainman = EnsureChainman(node);
avalanche::Processor &avalanche = EnsureAvalanche(node);
const BlockHash blockhash(
ParseHashV(request.params[0], "blockhash"));
const CBlockIndex *pprev;
{
LOCK(cs_main);
pprev = chainman.m_blockman.LookupBlockIndex(blockhash);
}
if (!pprev) {
throw JSONRPCError(
RPC_INVALID_PARAMETER,
strprintf("Block not found: %s\n", blockhash.ToString()));
}
if (!IsStakingRewardsActivated(
config.GetChainParams().GetConsensus(), pprev)) {
throw JSONRPCError(
RPC_INTERNAL_ERROR,
strprintf(
"Staking rewards are not activated for block %s\n",
blockhash.ToString()));
}
if (!request.params[1].isNull() && request.params[1].get_bool()) {
// Force recompute the staking reward winner by first erasing
// the cached entry if any
avalanche.eraseStakingRewardWinner(blockhash);
}
if (!avalanche.computeStakingReward(pprev)) {
throw JSONRPCError(
RPC_INTERNAL_ERROR,
strprintf("Unable to determine a staking reward winner "
"for block %s\n",
blockhash.ToString()));
}
- std::vector<CScript> winnerPayoutScripts;
- if (!avalanche.getStakingRewardWinners(blockhash,
- winnerPayoutScripts)) {
+ std::vector<std::pair<avalanche::ProofId, CScript>> winners;
+ if (!avalanche.getStakingRewardWinners(blockhash, winners)) {
throw JSONRPCError(
RPC_INTERNAL_ERROR,
strprintf("Unable to retrieve the staking reward winner "
"for block %s\n",
blockhash.ToString()));
}
- UniValue winners(UniValue::VARR);
- for (auto &winnerPayoutScript : winnerPayoutScripts) {
- UniValue stakingRewardsPayoutScriptObj(UniValue::VOBJ);
- ScriptPubKeyToUniv(winnerPayoutScript,
- stakingRewardsPayoutScriptObj,
+ UniValue winnersArr(UniValue::VARR);
+ for (auto &winner : winners) {
+ UniValue stakingRewardsObj(UniValue::VOBJ);
+ ScriptPubKeyToUniv(winner.second, stakingRewardsObj,
/*fIncludeHex=*/true);
- winners.push_back(stakingRewardsPayoutScriptObj);
+ stakingRewardsObj.pushKV("proofid", winner.first.GetHex());
+ winnersArr.push_back(stakingRewardsObj);
}
- return winners;
+ return winnersArr;
},
};
}
static RPCHelpMan setstakingreward() {
return RPCHelpMan{
"setstakingreward",
"Set the staking reward winner for the given previous block hash.\n",
{
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The previous block hash, hex encoded."},
{"payoutscript", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The payout script for the staking reward, hex encoded."},
{"append", RPCArg::Type::BOOL, RPCArg::Default{false},
"Append to the list of possible winners instead of replacing."},
},
RPCResult{RPCResult::Type::BOOL, "success",
"Whether the payout script was set or not"},
RPCExamples{
HelpExampleRpc("setstakingreward", "<blockhash> <payout script>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
const NodeContext &node = EnsureAnyNodeContext(request.context);
ChainstateManager &chainman = EnsureChainman(node);
avalanche::Processor &avalanche = EnsureAvalanche(node);
const BlockHash blockhash(
ParseHashV(request.params[0], "blockhash"));
const CBlockIndex *pprev;
{
LOCK(cs_main);
pprev = chainman.m_blockman.LookupBlockIndex(blockhash);
}
if (!pprev) {
throw JSONRPCError(
RPC_INVALID_PARAMETER,
strprintf("Block not found: %s\n", blockhash.ToString()));
}
if (!IsStakingRewardsActivated(
config.GetChainParams().GetConsensus(), pprev)) {
throw JSONRPCError(
RPC_INTERNAL_ERROR,
strprintf(
"Staking rewards are not activated for block %s\n",
blockhash.ToString()));
}
const std::vector<uint8_t> data =
ParseHex(request.params[1].get_str());
CScript payoutScript(data.begin(), data.end());
std::vector<CScript> payoutScripts;
if (!request.params[2].isNull() && request.params[2].get_bool()) {
// Append mode, initialize our list with the current winners
// and the new one will be added to the back of that list. If
// there is no winner the list will remain empty.
avalanche.getStakingRewardWinners(blockhash, payoutScripts);
}
payoutScripts.push_back(std::move(payoutScript));
// This will return true upon insertion or false upon replacement.
// We want to convey the success of the RPC, so we always return
// true.
avalanche.setStakingRewardWinners(pprev, payoutScripts);
return true;
},
};
}
static RPCHelpMan getremoteproofs() {
return RPCHelpMan{
"getremoteproofs",
"Get the list of remote proofs for the given node id.\n",
{
{"nodeid", RPCArg::Type::NUM, RPCArg::Optional::NO,
"The node identifier."},
},
RPCResult{
RPCResult::Type::ARR,
"proofs",
"",
{{
RPCResult::Type::OBJ,
"proof",
"",
{{
{RPCResult::Type::STR_HEX, "proofid",
"The hex encoded proof identifier."},
{RPCResult::Type::BOOL, "present",
"Whether the node has the proof."},
{RPCResult::Type::NUM, "last_update",
"The last time this proof status was updated."},
}},
}},
},
RPCExamples{HelpExampleRpc("getremoteproofs", "<nodeid>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
NodeContext &node = EnsureAnyNodeContext(request.context);
const avalanche::Processor &avalanche = EnsureAvalanche(node);
const NodeId nodeid = request.params[0].getInt<int64_t>();
auto remoteProofs = avalanche.withPeerManager(
[nodeid](const avalanche::PeerManager &pm) {
return pm.getRemoteProofs(nodeid);
});
UniValue arrOut(UniValue::VARR);
for (const auto &remoteProof : remoteProofs) {
UniValue obj(UniValue::VOBJ);
obj.pushKV("proofid", remoteProof.proofid.ToString());
obj.pushKV("present", remoteProof.present);
obj.pushKV("last_update", remoteProof.lastUpdate.count());
arrOut.push_back(obj);
}
return arrOut;
},
};
}
static RPCHelpMan getrawavalancheproof() {
return RPCHelpMan{
"getrawavalancheproof",
"Lookup for a known avalanche proof by id.\n",
{
{"proofid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The hex encoded avalanche proof identifier."},
},
RPCResult{
RPCResult::Type::OBJ,
"",
"",
{{
{RPCResult::Type::STR_HEX, "proof",
"The hex encoded proof matching the identifier."},
{RPCResult::Type::BOOL, "immature",
"Whether the proof has immature utxos."},
{RPCResult::Type::BOOL, "boundToPeer",
"Whether the proof is bound to an avalanche peer."},
{RPCResult::Type::BOOL, "conflicting",
"Whether the proof has a conflicting UTXO with an avalanche "
"peer."},
{RPCResult::Type::BOOL, "finalized",
"Whether the proof is finalized by vote."},
}},
},
RPCExamples{HelpExampleRpc("getrawavalancheproof", "<proofid>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
NodeContext &node = EnsureAnyNodeContext(request.context);
const avalanche::Processor &avalanche = EnsureAvalanche(node);
const avalanche::ProofId proofid =
avalanche::ProofId::fromHex(request.params[0].get_str());
bool isImmature = false;
bool isBoundToPeer = false;
bool conflicting = false;
bool finalized = false;
auto proof = avalanche.withPeerManager(
[&](const avalanche::PeerManager &pm) {
isImmature = pm.isImmature(proofid);
isBoundToPeer = pm.isBoundToPeer(proofid);
conflicting = pm.isInConflictingPool(proofid);
finalized =
pm.forPeer(proofid, [&](const avalanche::Peer &p) {
return p.hasFinalized;
});
return pm.getProof(proofid);
});
if (!proof) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Proof not found");
}
UniValue ret(UniValue::VOBJ);
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss << *proof;
ret.pushKV("proof", HexStr(ss));
ret.pushKV("immature", isImmature);
ret.pushKV("boundToPeer", isBoundToPeer);
ret.pushKV("conflicting", conflicting);
ret.pushKV("finalized", finalized);
return ret;
},
};
}
static RPCHelpMan invalidateavalancheproof() {
return RPCHelpMan{
"invalidateavalancheproof",
"Reject a known avalanche proof by id.\n",
{
{"proofid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The hex encoded avalanche proof identifier."},
},
RPCResult{
RPCResult::Type::BOOL,
"success",
"",
},
RPCExamples{HelpExampleRpc("invalidateavalancheproof", "<proofid>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
NodeContext &node = EnsureAnyNodeContext(request.context);
avalanche::Processor &avalanche = EnsureAvalanche(node);
const avalanche::ProofId proofid =
avalanche::ProofId::fromHex(request.params[0].get_str());
avalanche.withPeerManager([&](avalanche::PeerManager &pm) {
if (!pm.exists(proofid) && !pm.isDangling(proofid)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Proof not found");
}
if (!pm.rejectProof(
proofid,
avalanche::PeerManager::RejectionMode::INVALIDATE)) {
throw JSONRPCError(RPC_INTERNAL_ERROR,
"Failed to reject the proof");
}
pm.setInvalid(proofid);
});
if (avalanche.isRecentlyFinalized(proofid)) {
// If the proof was previously finalized, clear the status.
// Because there is no way to selectively delete an entry from a
// Bloom filter, we have to clear the whole filter which could
// cause extra voting rounds.
avalanche.clearFinalizedItems();
}
return true;
},
};
}
static RPCHelpMan isfinalblock() {
return RPCHelpMan{
"isfinalblock",
"Check if a block has been finalized by avalanche votes.\n",
{
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The hash of the block."},
},
RPCResult{RPCResult::Type::BOOL, "success",
"Whether the block has been finalized by avalanche votes."},
RPCExamples{HelpExampleRpc("isfinalblock", "<block hash>") +
HelpExampleCli("isfinalblock", "<block hash>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
NodeContext &node = EnsureAnyNodeContext(request.context);
avalanche::Processor &avalanche = EnsureAvalanche(node);
if (!avalanche.isQuorumEstablished()) {
throw JSONRPCError(RPC_MISC_ERROR,
"Avalanche is not ready to poll yet.");
}
ChainstateManager &chainman = EnsureAnyChainman(request.context);
const BlockHash blockhash(
ParseHashV(request.params[0], "blockhash"));
const CBlockIndex *pindex;
{
LOCK(cs_main);
pindex = chainman.m_blockman.LookupBlockIndex(blockhash);
if (!pindex) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Block not found");
}
}
return chainman.ActiveChainstate().IsBlockAvalancheFinalized(
pindex);
},
};
}
static RPCHelpMan isfinaltransaction() {
return RPCHelpMan{
"isfinaltransaction",
"Check if a transaction has been finalized by avalanche votes.\n",
{
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The id of the transaction."},
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED,
"The block in which to look for the transaction"},
},
RPCResult{
RPCResult::Type::BOOL, "success",
"Whether the transaction has been finalized by avalanche votes."},
RPCExamples{HelpExampleRpc("isfinaltransaction", "<txid> <blockhash>") +
HelpExampleCli("isfinaltransaction", "<txid> <blockhash>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
const NodeContext &node = EnsureAnyNodeContext(request.context);
ChainstateManager &chainman = EnsureChainman(node);
const CTxMemPool &mempool = EnsureMemPool(node);
avalanche::Processor &avalanche = EnsureAvalanche(node);
const TxId txid = TxId(ParseHashV(request.params[0], "txid"));
CBlockIndex *pindex = nullptr;
if (!request.params[1].isNull()) {
const BlockHash blockhash(
ParseHashV(request.params[1], "blockhash"));
LOCK(cs_main);
pindex = chainman.m_blockman.LookupBlockIndex(blockhash);
if (!pindex) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Block not found");
}
}
bool f_txindex_ready = false;
if (g_txindex && !pindex) {
f_txindex_ready = g_txindex->BlockUntilSyncedToCurrentChain();
}
BlockHash hash_block;
const CTransactionRef tx = GetTransaction(
pindex, &mempool, txid, hash_block, chainman.m_blockman);
if (!avalanche.isQuorumEstablished()) {
throw JSONRPCError(RPC_MISC_ERROR,
"Avalanche is not ready to poll yet.");
}
if (!tx) {
std::string errmsg;
if (pindex) {
if (WITH_LOCK(::cs_main,
return !pindex->nStatus.hasData())) {
throw JSONRPCError(RPC_MISC_ERROR,
"Block data not downloaded yet.");
}
errmsg = "No such transaction found in the provided block.";
} else if (!g_txindex) {
errmsg = "No such transaction. Use -txindex or provide a "
"block hash to enable blockchain transaction "
"queries.";
} else if (!f_txindex_ready) {
errmsg = "No such transaction. Blockchain transactions are "
"still in the process of being indexed.";
} else {
errmsg = "No such mempool or blockchain transaction.";
}
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, errmsg);
}
if (!pindex) {
LOCK(cs_main);
pindex = chainman.m_blockman.LookupBlockIndex(hash_block);
}
if (!tx) {
// Tx not found, we should have raised an error at this stage
return false;
}
if (mempool.isAvalancheFinalized(txid)) {
// The transaction is finalized
return true;
}
// Return true if the tx is in a finalized block
return !node.mempool->exists(txid) &&
chainman.ActiveChainstate().IsBlockAvalancheFinalized(
pindex);
},
};
}
static RPCHelpMan reconsideravalancheproof() {
return RPCHelpMan{
"reconsideravalancheproof",
"Reconsider a known avalanche proof.\n",
{
{"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The hex encoded avalanche proof."},
},
RPCResult{
RPCResult::Type::BOOL,
"success",
"Whether the proof has been successfully registered.",
},
RPCExamples{HelpExampleRpc("reconsideravalancheproof", "<proof hex>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
auto proof = RCUPtr<avalanche::Proof>::make();
NodeContext &node = EnsureAnyNodeContext(request.context);
const avalanche::Processor &avalanche = EnsureAvalanche(node);
// Verify the proof. Note that this is redundant with the
// verification done when adding the proof to the pool, but we get a
// chance to give a better error message.
verifyProofOrThrow(node, *proof, request.params[0].get_str());
// There is no way to selectively clear the invalidation status of
// a single proof, so we clear the whole Bloom filter. This could
// cause extra voting rounds.
avalanche.withPeerManager([&](avalanche::PeerManager &pm) {
if (pm.isInvalid(proof->getId())) {
pm.clearAllInvalid();
}
});
// Add the proof to the pool if we don't have it already. Since the
// proof verification has already been done, a failure likely
// indicates that there already is a proof with conflicting utxos.
avalanche::ProofRegistrationState state;
if (!registerProofIfNeeded(avalanche, proof, state)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
strprintf("%s (%s)\n",
state.GetRejectReason(),
state.GetDebugMessage()));
}
return avalanche.withPeerManager(
[&](const avalanche::PeerManager &pm) {
return pm.isBoundToPeer(proof->getId());
});
},
};
}
static RPCHelpMan sendavalancheproof() {
return RPCHelpMan{
"sendavalancheproof",
"Broadcast an avalanche proof.\n",
{
{"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The avalanche proof to broadcast."},
},
RPCResult{RPCResult::Type::BOOL, "success",
"Whether the proof was sent successfully or not."},
RPCExamples{HelpExampleRpc("sendavalancheproof", "<proof>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
auto proof = RCUPtr<avalanche::Proof>::make();
NodeContext &node = EnsureAnyNodeContext(request.context);
const avalanche::Processor &avalanche = EnsureAvalanche(node);
// Verify the proof. Note that this is redundant with the
// verification done when adding the proof to the pool, but we get a
// chance to give a better error message.
verifyProofOrThrow(node, *proof, request.params[0].get_str());
// Add the proof to the pool if we don't have it already. Since the
// proof verification has already been done, a failure likely
// indicates that there already is a proof with conflicting utxos.
const avalanche::ProofId &proofid = proof->getId();
avalanche::ProofRegistrationState state;
if (!registerProofIfNeeded(avalanche, proof, state)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
strprintf("%s (%s)\n",
state.GetRejectReason(),
state.GetDebugMessage()));
}
avalanche.withPeerManager([&](avalanche::PeerManager &pm) {
pm.addUnbroadcastProof(proofid);
});
if (node.peerman) {
node.peerman->RelayProof(proofid);
}
return true;
},
};
}
static RPCHelpMan verifyavalancheproof() {
return RPCHelpMan{
"verifyavalancheproof",
"Verify an avalanche proof is valid and return the error otherwise.\n",
{
{"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"Proof to verify."},
},
RPCResult{RPCResult::Type::BOOL, "success",
"Whether the proof is valid or not."},
RPCExamples{HelpExampleRpc("verifyavalancheproof", "\"<proof>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
avalanche::Proof proof;
verifyProofOrThrow(EnsureAnyNodeContext(request.context), proof,
request.params[0].get_str());
return true;
},
};
}
static RPCHelpMan verifyavalanchedelegation() {
return RPCHelpMan{
"verifyavalanchedelegation",
"Verify an avalanche delegation is valid and return the error "
"otherwise.\n",
{
{"delegation", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The avalanche proof delegation to verify."},
},
RPCResult{RPCResult::Type::BOOL, "success",
"Whether the delegation is valid or not."},
RPCExamples{HelpExampleRpc("verifyavalanchedelegation", "\"<proof>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
avalanche::Delegation delegation;
CPubKey dummy;
verifyDelegationOrThrow(delegation, request.params[0].get_str(),
dummy);
return true;
},
};
}
void RegisterAvalancheRPCCommands(CRPCTable &t) {
// clang-format off
static const CRPCCommand commands[] = {
// category actor (function)
// ----------------- --------------------
{ "avalanche", getavalanchekey, },
{ "avalanche", addavalanchenode, },
{ "avalanche", buildavalancheproof, },
{ "avalanche", decodeavalancheproof, },
{ "avalanche", delegateavalancheproof, },
{ "avalanche", decodeavalanchedelegation, },
{ "avalanche", getavalancheinfo, },
{ "avalanche", getavalanchepeerinfo, },
{ "avalanche", getavalancheproofs, },
{ "avalanche", getstakingreward, },
{ "avalanche", setstakingreward, },
{ "avalanche", getremoteproofs, },
{ "avalanche", getrawavalancheproof, },
{ "avalanche", invalidateavalancheproof, },
{ "avalanche", isfinalblock, },
{ "avalanche", isfinaltransaction, },
{ "avalanche", reconsideravalancheproof, },
{ "avalanche", sendavalancheproof, },
{ "avalanche", verifyavalancheproof, },
{ "avalanche", verifyavalanchedelegation, },
};
// clang-format on
for (const auto &c : commands) {
t.appendCommand(c.name, &c);
}
}
diff --git a/test/functional/abc_mining_stakingrewards.py b/test/functional/abc_mining_stakingrewards.py
index 31ac9e69a7..ca45d8bf7f 100644
--- a/test/functional/abc_mining_stakingrewards.py
+++ b/test/functional/abc_mining_stakingrewards.py
@@ -1,304 +1,323 @@
# Copyright (c) 2023 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Tests for Bitcoin ABC mining with staking rewards
"""
import time
from decimal import Decimal
from test_framework.address import ADDRESS_ECREG_UNSPENDABLE
from test_framework.avatools import can_find_inv_in_poll, get_ava_p2p_interface
from test_framework.messages import XEC, AvalancheProofVoteResponse
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
assert_raises_rpc_error,
uint256_hex,
)
QUORUM_NODE_COUNT = 16
STAKING_REWARDS_COINBASE_RATIO_PERCENT = 10
class AbcMiningStakingRewardsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [
[
"-avaproofstakeutxodustthreshold=1000000",
"-avaproofstakeutxoconfirmations=1",
"-avacooldown=0",
"-avaminquorumstake=0",
"-avaminavaproofsnodecount=0",
"-whitelist=noban@127.0.0.1",
"-avalanchestakingrewards=1",
],
]
def run_test(self):
node = self.nodes[0]
now = int(time.time())
node.setmocktime(now)
# Build a quorum
quorum = [get_ava_p2p_interface(self, node) for _ in range(QUORUM_NODE_COUNT)]
assert node.getavalancheinfo()["ready_to_poll"] is True
now += 90 * 60 + 1
node.setmocktime(now)
invalid_block_hash = "0" * 63
assert_raises_rpc_error(
-8,
f"blockhash must be of length 64 (not 63, for '{invalid_block_hash}')",
node.getstakingreward,
invalid_block_hash,
)
assert_raises_rpc_error(
-8,
f"blockhash must be of length 64 (not 63, for '{invalid_block_hash}')",
node.setstakingreward,
invalid_block_hash,
"76a914000000000000000000000000000000000000000088ac",
)
invalid_block_hash = "0" * 64
assert_raises_rpc_error(
-8,
f"Block not found: {invalid_block_hash}",
node.getstakingreward,
invalid_block_hash,
)
assert_raises_rpc_error(
-8,
f"Block not found: {invalid_block_hash}",
node.setstakingreward,
invalid_block_hash,
"76a914000000000000000000000000000000000000000088ac",
)
def get_coinbase(blockhash):
return node.getblock(blockhash, 2)["tx"][0]
tiphash = node.getbestblockhash()
coinbase = get_coinbase(tiphash)
block_reward = sum([vout["value"] for vout in coinbase["vout"]])
self.log.info(
"Staking rewards not ready yet, check getblocktemplate lacks the staking rewards data"
)
assert_raises_rpc_error(
-32603,
f"Unable to determine a staking reward winner for block {tiphash}",
node.getstakingreward,
tiphash,
)
gbt = node.getblocktemplate()
assert_equal(gbt["previousblockhash"], tiphash)
assert "coinbasetxn" in gbt
assert "stakingrewards" not in gbt["coinbasetxn"]
self.log.info(
"Staking rewards not ready yet, check the miner doesn't produce the staking rewards output"
)
tiphash = self.generate(node, 1)[-1]
coinbase = get_coinbase(tiphash)
assert_equal(len(coinbase["vout"]), 1)
assert_raises_rpc_error(
-32603,
f"Unable to determine a staking reward winner for block {tiphash}",
node.getstakingreward,
tiphash,
)
self.log.info(
"Staking rewards are computed, check the block template returns the staking rewards data"
)
def wait_for_finalized_proof(proofid):
def finalize_proof(proofid):
can_find_inv_in_poll(
quorum, proofid, response=AvalancheProofVoteResponse.ACTIVE
)
return node.getrawavalancheproof(uint256_hex(proofid)).get(
"finalized", False
)
self.wait_until(lambda: finalize_proof(proofid))
for peer in quorum:
wait_for_finalized_proof(peer.proof.proofid)
tiphash = self.generate(node, 1)[-1]
gbt = node.getblocktemplate()
assert_equal(gbt["previousblockhash"], tiphash)
assert "coinbasetxn" in gbt
assert "stakingrewards" in gbt["coinbasetxn"]
assert_equal(
gbt["coinbasetxn"]["stakingrewards"],
{
"payoutscript": {
"asm": "OP_DUP OP_HASH160 0000000000000000000000000000000000000000 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914000000000000000000000000000000000000000088ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [ADDRESS_ECREG_UNSPENDABLE],
},
"minimumvalue": Decimal(
block_reward * STAKING_REWARDS_COINBASE_RATIO_PERCENT // 100 * XEC
),
},
)
+ reward = node.getstakingreward(tiphash)
+
+ assert_equal(len(reward), 1)
+ assert "proofid" in reward[0]
+ proofid = reward[0]["proofid"]
+ assert proofid in [uint256_hex(peer.proof.proofid) for peer in quorum]
+
assert_equal(
- node.getstakingreward(tiphash),
+ reward,
[
{
+ "proofid": proofid,
"asm": "OP_DUP OP_HASH160 0000000000000000000000000000000000000000 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914000000000000000000000000000000000000000088ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [ADDRESS_ECREG_UNSPENDABLE],
},
],
)
self.log.info(
"Staking rewards are computed, check the miner produces the staking rewards output"
)
tiphash = self.generate(node, 1)[-1]
coinbase = get_coinbase(tiphash)
assert_greater_than_or_equal(len(coinbase["vout"]), 2)
assert_equal(
coinbase["vout"][-1]["value"],
Decimal(block_reward * STAKING_REWARDS_COINBASE_RATIO_PERCENT // 100),
)
assert_equal(
coinbase["vout"][-1]["scriptPubKey"]["hex"],
"76a914000000000000000000000000000000000000000088ac",
)
self.log.info("Override the staking reward via RPC")
assert node.setstakingreward(
tiphash, "76a914000000000000000000000000000000000000000188ac"
)
assert_equal(
node.getstakingreward(tiphash),
[
{
+ "proofid": "0000000000000000000000000000000000000000000000000000000000000000",
"asm": "OP_DUP OP_HASH160 0000000000000000000000000000000000000001 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914000000000000000000000000000000000000000188ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [
"ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqyx0q3yvg0"
],
},
],
)
# Append another acceptable winner
assert node.setstakingreward(
tiphash,
"76a914000000000000000000000000000000000000000288ac",
True,
)
assert_equal(
node.getstakingreward(tiphash),
[
{
+ "proofid": "0000000000000000000000000000000000000000000000000000000000000000",
"asm": "OP_DUP OP_HASH160 0000000000000000000000000000000000000001 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914000000000000000000000000000000000000000188ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [
"ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqyx0q3yvg0"
],
},
{
+ "proofid": "0000000000000000000000000000000000000000000000000000000000000000",
"asm": "OP_DUP OP_HASH160 0000000000000000000000000000000000000002 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914000000000000000000000000000000000000000288ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [
"ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqgdmg7vcrr"
],
},
],
)
# We always pick the first one
gbt = node.getblocktemplate()
assert_equal(gbt["previousblockhash"], tiphash)
assert_equal(
gbt["coinbasetxn"]["stakingrewards"],
{
"payoutscript": {
"asm": "OP_DUP OP_HASH160 0000000000000000000000000000000000000001 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914000000000000000000000000000000000000000188ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [
"ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqyx0q3yvg0"
],
},
"minimumvalue": Decimal(
block_reward * STAKING_REWARDS_COINBASE_RATIO_PERCENT // 100 * XEC
),
},
)
for i in range(2, 10):
script_hex = f"76a914{i:0{40}x}88ac"
assert node.setstakingreward(tiphash, script_hex)
assert_equal(node.getstakingreward(tiphash)[0]["hex"], script_hex)
gbt = node.getblocktemplate()
assert_equal(
gbt["coinbasetxn"]["stakingrewards"]["payoutscript"]["hex"], script_hex
)
self.log.info("Recompute the staking reward")
+ reward = node.getstakingreward(blockhash=tiphash, recompute=True)
+
+ assert_equal(len(reward), 1)
+ assert "proofid" in reward[0]
+ proofid = reward[0]["proofid"]
+ assert proofid in [uint256_hex(peer.proof.proofid) for peer in quorum]
+
assert_equal(
- node.getstakingreward(blockhash=tiphash, recompute=True),
+ reward,
[
{
+ "proofid": proofid,
"asm": "OP_DUP OP_HASH160 0000000000000000000000000000000000000000 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914000000000000000000000000000000000000000088ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [ADDRESS_ECREG_UNSPENDABLE],
},
],
)
gbt = node.getblocktemplate()
assert_equal(gbt["previousblockhash"], tiphash)
assert_equal(
gbt["coinbasetxn"]["stakingrewards"],
{
"payoutscript": {
"asm": "OP_DUP OP_HASH160 0000000000000000000000000000000000000000 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914000000000000000000000000000000000000000088ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [ADDRESS_ECREG_UNSPENDABLE],
},
"minimumvalue": Decimal(
block_reward * STAKING_REWARDS_COINBASE_RATIO_PERCENT // 100 * XEC
),
},
)
if __name__ == "__main__":
AbcMiningStakingRewardsTest().main()

File Metadata

Mime Type
text/x-diff
Expires
Wed, May 21, 22:23 (1 d, 21 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5866054
Default Alt Text
(333 KB)

Event Timeline