Page Menu
Home
Phabricator
Search
Configure Global Search
Log In
Files
F12910299
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
161 KB
Subscribers
None
View Options
diff --git a/doc/release-notes.md b/doc/release-notes.md
index bee490a3d..2004c0f10 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -1,11 +1,13 @@
# Bitcoin ABC 0.29.7 Release Notes
Bitcoin ABC version 0.29.7 is now available from:
<https://download.bitcoinabc.org/0.29.7/>
This release includes the following features and fixes:
- The `-deprecatedrpc=getstakingreward` option was under deprecation for
several months and has been removed completely.
- The `getstakingreward` RPC now returns the `proofid` of the staking reward
winner in addition to the payout script.
+ - A new `setflakyproof` RPC instructs the node to also accept an alternative
+ staking reward winner when the flaky proof would have been selected.
diff --git a/src/avalanche/peermanager.cpp b/src/avalanche/peermanager.cpp
index 3b074f578..dcec1737f 100644
--- a/src/avalanche/peermanager.cpp
+++ b/src/avalanche/peermanager.cpp
@@ -1,1377 +1,1389 @@
// Copyright (c) 2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <avalanche/peermanager.h>
#include <arith_uint256.h>
#include <avalanche/avalanche.h>
#include <avalanche/delegation.h>
#include <avalanche/validation.h>
#include <cashaddrenc.h>
#include <common/args.h>
#include <consensus/activation.h>
#include <logging.h>
#include <random.h>
#include <scheduler.h>
#include <uint256.h>
#include <util/fastrange.h>
#include <util/fs_helpers.h>
#include <util/time.h>
#include <validation.h> // For ChainstateManager
#include <algorithm>
#include <cassert>
#include <cmath>
#include <limits>
namespace avalanche {
static constexpr uint64_t PEERS_DUMP_VERSION{1};
bool PeerManager::addNode(NodeId nodeid, const ProofId &proofid) {
auto &pview = peers.get<by_proofid>();
auto it = pview.find(proofid);
if (it == pview.end()) {
// If the node exists, it is actually updating its proof to an unknown
// one. In this case we need to remove it so it is not both active and
// pending at the same time.
removeNode(nodeid);
pendingNodes.emplace(proofid, nodeid);
return false;
}
return addOrUpdateNode(peers.project<0>(it), nodeid);
}
bool PeerManager::addOrUpdateNode(const PeerSet::iterator &it, NodeId nodeid) {
assert(it != peers.end());
const PeerId peerid = it->peerid;
auto nit = nodes.find(nodeid);
if (nit == nodes.end()) {
if (!nodes.emplace(nodeid, peerid).second) {
return false;
}
} else {
const PeerId oldpeerid = nit->peerid;
if (!nodes.modify(nit, [&](Node &n) { n.peerid = peerid; })) {
return false;
}
// We actually have this node already, we need to update it.
bool success = removeNodeFromPeer(peers.find(oldpeerid));
assert(success);
}
// Then increase the node counter, and create the slot if needed
bool success = addNodeToPeer(it);
assert(success);
// If the added node was in the pending set, remove it
pendingNodes.get<by_nodeid>().erase(nodeid);
// If the proof was in the dangling pool, remove it
const ProofId &proofid = it->getProofId();
if (danglingProofPool.getProof(proofid)) {
danglingProofPool.removeProof(proofid);
}
// We know for sure there is at least 1 node. Note that this can fail if
// there is more than 1, in this case it's a no-op.
shareableProofs.insert(it->proof);
return true;
}
bool PeerManager::addNodeToPeer(const PeerSet::iterator &it) {
assert(it != peers.end());
return peers.modify(it, [&](Peer &p) {
if (p.node_count++ > 0) {
// We are done.
return;
}
// We need to allocate this peer.
p.index = uint32_t(slots.size());
const uint32_t score = p.getScore();
const uint64_t start = slotCount;
slots.emplace_back(start, score, it->peerid);
slotCount = start + score;
// Add to our allocated score when we allocate a new peer in the slots
connectedPeersScore += score;
});
}
bool PeerManager::removeNode(NodeId nodeid) {
// Remove all the remote proofs from this node
auto &remoteProofsView = remoteProofs.get<by_nodeid>();
auto [begin, end] = remoteProofsView.equal_range(nodeid);
remoteProofsView.erase(begin, end);
if (pendingNodes.get<by_nodeid>().erase(nodeid) > 0) {
// If this was a pending node, there is nothing else to do.
return true;
}
auto it = nodes.find(nodeid);
if (it == nodes.end()) {
return false;
}
const PeerId peerid = it->peerid;
nodes.erase(it);
// Keep the track of the reference count.
bool success = removeNodeFromPeer(peers.find(peerid));
assert(success);
return true;
}
bool PeerManager::removeNodeFromPeer(const PeerSet::iterator &it,
uint32_t count) {
// It is possible for nodes to be dangling. If there was an inflight query
// when the peer gets removed, the node was not erased. In this case there
// is nothing to do.
if (it == peers.end()) {
return true;
}
assert(count <= it->node_count);
if (count == 0) {
// This is a NOOP.
return false;
}
const uint32_t new_count = it->node_count - count;
if (!peers.modify(it, [&](Peer &p) { p.node_count = new_count; })) {
return false;
}
if (new_count > 0) {
// We are done.
return true;
}
// There are no more nodes left, we need to clean up. Remove from the radix
// tree (unless it's our local proof), subtract allocated score and remove
// from slots.
if (!localProof || it->getProofId() != localProof->getId()) {
const auto removed = shareableProofs.remove(it->getProofId());
assert(removed);
}
const size_t i = it->index;
assert(i < slots.size());
assert(connectedPeersScore >= slots[i].getScore());
connectedPeersScore -= slots[i].getScore();
if (i + 1 == slots.size()) {
slots.pop_back();
slotCount = slots.empty() ? 0 : slots.back().getStop();
} else {
fragmentation += slots[i].getScore();
slots[i] = slots[i].withPeerId(NO_PEER);
}
return true;
}
bool PeerManager::updateNextRequestTime(NodeId nodeid,
SteadyMilliseconds timeout) {
auto it = nodes.find(nodeid);
if (it == nodes.end()) {
return false;
}
return nodes.modify(it, [&](Node &n) { n.nextRequestTime = timeout; });
}
bool PeerManager::latchAvaproofsSent(NodeId nodeid) {
auto it = nodes.find(nodeid);
if (it == nodes.end()) {
return false;
}
return !it->avaproofsSent &&
nodes.modify(it, [&](Node &n) { n.avaproofsSent = true; });
}
static bool isImmatureState(const ProofValidationState &state) {
return state.GetResult() == ProofValidationResult::IMMATURE_UTXO;
}
bool PeerManager::updateNextPossibleConflictTime(
PeerId peerid, const std::chrono::seconds &nextTime) {
auto it = peers.find(peerid);
if (it == peers.end()) {
// No such peer
return false;
}
// Make sure we don't move the time in the past.
peers.modify(it, [&](Peer &p) {
p.nextPossibleConflictTime =
std::max(p.nextPossibleConflictTime, nextTime);
});
return it->nextPossibleConflictTime == nextTime;
}
bool PeerManager::setFinalized(PeerId peerid) {
auto it = peers.find(peerid);
if (it == peers.end()) {
// No such peer
return false;
}
peers.modify(it, [&](Peer &p) { p.hasFinalized = true; });
return true;
}
template <typename ProofContainer>
void PeerManager::moveToConflictingPool(const ProofContainer &proofs) {
auto &peersView = peers.get<by_proofid>();
for (const ProofRef &proof : proofs) {
auto it = peersView.find(proof->getId());
if (it != peersView.end()) {
removePeer(it->peerid);
}
conflictingProofPool.addProofIfPreferred(proof);
}
}
bool PeerManager::registerProof(const ProofRef &proof,
ProofRegistrationState ®istrationState,
RegistrationMode mode) {
assert(proof);
const ProofId &proofid = proof->getId();
auto invalidate = [&](ProofRegistrationResult result,
const std::string &message) {
return registrationState.Invalid(
result, message, strprintf("proofid: %s", proofid.ToString()));
};
if ((mode != RegistrationMode::FORCE_ACCEPT ||
!isInConflictingPool(proofid)) &&
exists(proofid)) {
// In default mode, we expect the proof to be unknown, i.e. in none of
// the pools.
// In forced accept mode, the proof can be in the conflicting pool.
return invalidate(ProofRegistrationResult::ALREADY_REGISTERED,
"proof-already-registered");
}
if (danglingProofPool.getProof(proofid) &&
pendingNodes.count(proofid) == 0) {
// Don't attempt to register a proof that we already evicted because it
// was dangling, but rather attempt to retrieve an associated node.
needMoreNodes = true;
return invalidate(ProofRegistrationResult::DANGLING, "dangling-proof");
}
// Check the proof's validity.
ProofValidationState validationState;
if (!WITH_LOCK(cs_main, return proof->verify(stakeUtxoDustThreshold,
chainman, validationState))) {
if (isImmatureState(validationState)) {
immatureProofPool.addProofIfPreferred(proof);
if (immatureProofPool.countProofs() >
AVALANCHE_MAX_IMMATURE_PROOFS) {
// Adding this proof exceeds the immature pool limit, so evict
// the lowest scoring proof.
immatureProofPool.removeProof(
immatureProofPool.getLowestScoreProof()->getId());
}
return invalidate(ProofRegistrationResult::IMMATURE,
"immature-proof");
}
if (validationState.GetResult() ==
ProofValidationResult::MISSING_UTXO) {
return invalidate(ProofRegistrationResult::MISSING_UTXO,
"utxo-missing-or-spent");
}
// Reject invalid proof.
return invalidate(ProofRegistrationResult::INVALID, "invalid-proof");
}
auto now = GetTime<std::chrono::seconds>();
auto nextCooldownTimePoint =
now + std::chrono::seconds(gArgs.GetIntArg(
"-avalancheconflictingproofcooldown",
AVALANCHE_DEFAULT_CONFLICTING_PROOF_COOLDOWN));
ProofPool::ConflictingProofSet conflictingProofs;
switch (validProofPool.addProofIfNoConflict(proof, conflictingProofs)) {
case ProofPool::AddProofStatus::REJECTED: {
if (mode != RegistrationMode::FORCE_ACCEPT) {
auto bestPossibleConflictTime = std::chrono::seconds(0);
auto &pview = peers.get<by_proofid>();
for (auto &conflictingProof : conflictingProofs) {
auto it = pview.find(conflictingProof->getId());
assert(it != pview.end());
// Search the most recent time over the peers
bestPossibleConflictTime = std::max(
bestPossibleConflictTime, it->nextPossibleConflictTime);
updateNextPossibleConflictTime(it->peerid,
nextCooldownTimePoint);
}
if (bestPossibleConflictTime > now) {
// Cooldown not elapsed, reject the proof.
return invalidate(
ProofRegistrationResult::COOLDOWN_NOT_ELAPSED,
"cooldown-not-elapsed");
}
// Give the proof a chance to replace the conflicting ones.
if (validProofPool.addProofIfPreferred(proof)) {
// If we have overridden other proofs due to conflict,
// remove the peers and attempt to move them to the
// conflicting pool.
moveToConflictingPool(conflictingProofs);
// Replacement is successful, continue to peer creation
break;
}
// Not the preferred proof, or replacement is not enabled
return conflictingProofPool.addProofIfPreferred(proof) ==
ProofPool::AddProofStatus::REJECTED
? invalidate(ProofRegistrationResult::REJECTED,
"rejected-proof")
: invalidate(ProofRegistrationResult::CONFLICTING,
"conflicting-utxos");
}
conflictingProofPool.removeProof(proofid);
// Move the conflicting proofs from the valid pool to the
// conflicting pool
moveToConflictingPool(conflictingProofs);
auto status = validProofPool.addProofIfNoConflict(proof);
assert(status == ProofPool::AddProofStatus::SUCCEED);
break;
}
case ProofPool::AddProofStatus::DUPLICATED:
// If the proof was already in the pool, don't duplicate the peer.
return invalidate(ProofRegistrationResult::ALREADY_REGISTERED,
"proof-already-registered");
case ProofPool::AddProofStatus::SUCCEED:
break;
// No default case, so the compiler can warn about missing cases
}
// At this stage we are going to create a peer so the proof should never
// exist in the conflicting pool, but use belt and suspenders.
conflictingProofPool.removeProof(proofid);
// New peer means new peerid!
const PeerId peerid = nextPeerId++;
// We have no peer for this proof, time to create it.
auto inserted = peers.emplace(peerid, proof, nextCooldownTimePoint);
assert(inserted.second);
if (localProof && proof->getId() == localProof->getId()) {
// Add it to the shareable proofs even if there is no node, we are the
// node. Otherwise it will be inserted after a node is attached to the
// proof.
shareableProofs.insert(proof);
}
// Add to our registered score when adding to the peer list
totalPeersScore += proof->getScore();
// If there are nodes waiting for this proof, add them
auto &pendingNodesView = pendingNodes.get<by_proofid>();
auto range = pendingNodesView.equal_range(proofid);
// We want to update the nodes then remove them from the pending set. That
// will invalidate the range iterators, so we need to save the node ids
// first before we can loop over them.
std::vector<NodeId> nodeids;
nodeids.reserve(std::distance(range.first, range.second));
std::transform(range.first, range.second, std::back_inserter(nodeids),
[](const PendingNode &n) { return n.nodeid; });
for (const NodeId &nodeid : nodeids) {
addOrUpdateNode(inserted.first, nodeid);
}
return true;
}
bool PeerManager::rejectProof(const ProofId &proofid, RejectionMode mode) {
if (isDangling(proofid) && mode == RejectionMode::INVALIDATE) {
danglingProofPool.removeProof(proofid);
return true;
}
if (!exists(proofid)) {
return false;
}
if (immatureProofPool.removeProof(proofid)) {
return true;
}
if (mode == RejectionMode::DEFAULT &&
conflictingProofPool.getProof(proofid)) {
// In default mode we keep the proof in the conflicting pool
return true;
}
if (mode == RejectionMode::INVALIDATE &&
conflictingProofPool.removeProof(proofid)) {
// In invalidate mode we remove the proof completely
return true;
}
auto &pview = peers.get<by_proofid>();
auto it = pview.find(proofid);
assert(it != pview.end());
const ProofRef proof = it->proof;
if (!removePeer(it->peerid)) {
return false;
}
// If there was conflicting proofs, attempt to pull them back
for (const SignedStake &ss : proof->getStakes()) {
const ProofRef conflictingProof =
conflictingProofPool.getProof(ss.getStake().getUTXO());
if (!conflictingProof) {
continue;
}
conflictingProofPool.removeProof(conflictingProof->getId());
registerProof(conflictingProof);
}
if (mode == RejectionMode::DEFAULT) {
conflictingProofPool.addProofIfPreferred(proof);
}
return true;
}
void PeerManager::cleanupDanglingProofs(
std::unordered_set<ProofRef, SaltedProofHasher> ®isteredProofs) {
registeredProofs.clear();
const auto now = GetTime<std::chrono::seconds>();
std::vector<ProofRef> newlyDanglingProofs;
for (const Peer &peer : peers) {
// If the peer is not our local proof, has been registered for some
// time and has no node attached, discard it.
if ((!localProof || peer.getProofId() != localProof->getId()) &&
peer.node_count == 0 &&
(peer.registration_time + Peer::DANGLING_TIMEOUT) <= now) {
// Check the remotes status to determine if we should set the proof
// as dangling. This prevents from dropping a proof on our own due
// to a network issue. If the remote presence status is inconclusive
// we assume our own position (missing = false).
if (!getRemotePresenceStatus(peer.getProofId()).value_or(false)) {
newlyDanglingProofs.push_back(peer.proof);
}
}
}
// Similarly, check if we have dangling proofs that could be pulled back
// because the network says so.
std::vector<ProofRef> previouslyDanglingProofs;
danglingProofPool.forEachProof([&](const ProofRef &proof) {
if (getRemotePresenceStatus(proof->getId()).value_or(false)) {
previouslyDanglingProofs.push_back(proof);
}
});
for (const ProofRef &proof : previouslyDanglingProofs) {
danglingProofPool.removeProof(proof->getId());
if (registerProof(proof)) {
registeredProofs.insert(proof);
}
}
for (const ProofRef &proof : newlyDanglingProofs) {
rejectProof(proof->getId(), RejectionMode::INVALIDATE);
if (danglingProofPool.addProofIfPreferred(proof)) {
// If the proof is added, it means there is no better conflicting
// dangling proof and this is not a duplicated, so it's worth
// printing a message to the log.
LogPrint(BCLog::AVALANCHE,
"Proof dangling for too long (no connected node): %s\n",
proof->getId().GetHex());
}
}
// If we have dangling proof, this is a good indicator that we need to
// request more nodes from our peers.
needMoreNodes = !newlyDanglingProofs.empty();
}
NodeId PeerManager::selectNode() {
for (int retry = 0; retry < SELECT_NODE_MAX_RETRY; retry++) {
const PeerId p = selectPeer();
// If we cannot find a peer, it may be due to the fact that it is
// unlikely due to high fragmentation, so compact and retry.
if (p == NO_PEER) {
compact();
continue;
}
// See if that peer has an available node.
auto &nview = nodes.get<next_request_time>();
auto it = nview.lower_bound(boost::make_tuple(p, SteadyMilliseconds()));
if (it != nview.end() && it->peerid == p &&
it->nextRequestTime <= Now<SteadyMilliseconds>()) {
return it->nodeid;
}
}
// We failed to find a node to query, flag this so we can request more
needMoreNodes = true;
return NO_NODE;
}
std::unordered_set<ProofRef, SaltedProofHasher> PeerManager::updatedBlockTip() {
std::vector<ProofId> invalidProofIds;
std::vector<ProofRef> newImmatures;
{
LOCK(cs_main);
for (const auto &p : peers) {
ProofValidationState state;
if (!p.proof->verify(stakeUtxoDustThreshold, chainman, state)) {
if (isImmatureState(state)) {
newImmatures.push_back(p.proof);
}
invalidProofIds.push_back(p.getProofId());
LogPrint(BCLog::AVALANCHE,
"Invalidating proof %s: verification failed (%s)\n",
p.proof->getId().GetHex(), state.ToString());
}
}
}
// Remove the invalid proofs before the immature rescan. This makes it
// possible to pull back proofs with utxos that conflicted with these
// invalid proofs.
for (const ProofId &invalidProofId : invalidProofIds) {
rejectProof(invalidProofId, RejectionMode::INVALIDATE);
}
auto registeredProofs = immatureProofPool.rescan(*this);
for (auto &p : newImmatures) {
immatureProofPool.addProofIfPreferred(p);
}
return registeredProofs;
}
ProofRef PeerManager::getProof(const ProofId &proofid) const {
ProofRef proof;
forPeer(proofid, [&](const Peer &p) {
proof = p.proof;
return true;
});
if (!proof) {
proof = conflictingProofPool.getProof(proofid);
}
if (!proof) {
proof = immatureProofPool.getProof(proofid);
}
return proof;
}
bool PeerManager::isBoundToPeer(const ProofId &proofid) const {
auto &pview = peers.get<by_proofid>();
return pview.find(proofid) != pview.end();
}
bool PeerManager::isImmature(const ProofId &proofid) const {
return immatureProofPool.getProof(proofid) != nullptr;
}
bool PeerManager::isInConflictingPool(const ProofId &proofid) const {
return conflictingProofPool.getProof(proofid) != nullptr;
}
bool PeerManager::isDangling(const ProofId &proofid) const {
return danglingProofPool.getProof(proofid) != nullptr;
}
void PeerManager::setInvalid(const ProofId &proofid) {
invalidProofs.insert(proofid);
}
bool PeerManager::isInvalid(const ProofId &proofid) const {
return invalidProofs.contains(proofid);
}
void PeerManager::clearAllInvalid() {
invalidProofs.reset();
}
bool PeerManager::saveRemoteProof(const ProofId &proofid, const NodeId nodeid,
const bool present) {
// Get how many proofs this node has announced
auto &remoteProofsByLastUpdate = remoteProofs.get<by_lastUpdate>();
auto [begin, end] = remoteProofsByLastUpdate.equal_range(nodeid);
// Limit the number of proofs a single node can save:
// - At least MAX_REMOTE_PROOFS
// - Up to 2x as much as we have
// The MAX_REMOTE_PROOFS minimum is there to ensure we don't overlimit at
// startup when we don't have proofs yet.
while (size_t(std::distance(begin, end)) >=
std::max(MAX_REMOTE_PROOFS, 2 * peers.size())) {
// Remove the proof with the oldest update time
begin = remoteProofsByLastUpdate.erase(begin);
}
auto it = remoteProofs.find(boost::make_tuple(proofid, nodeid));
if (it != remoteProofs.end()) {
remoteProofs.erase(it);
}
return remoteProofs
.emplace(RemoteProof{proofid, nodeid, GetTime<std::chrono::seconds>(),
present})
.second;
}
std::vector<RemoteProof>
PeerManager::getRemoteProofs(const NodeId nodeid) const {
std::vector<RemoteProof> nodeRemoteProofs;
auto &remoteProofsByLastUpdate = remoteProofs.get<by_lastUpdate>();
auto [begin, end] = remoteProofsByLastUpdate.equal_range(nodeid);
for (auto &it = begin; it != end; it++) {
nodeRemoteProofs.emplace_back(*it);
}
return nodeRemoteProofs;
}
bool PeerManager::removePeer(const PeerId peerid) {
auto it = peers.find(peerid);
if (it == peers.end()) {
return false;
}
// Remove all nodes from this peer.
removeNodeFromPeer(it, it->node_count);
auto &nview = nodes.get<next_request_time>();
// Add the nodes to the pending set
auto range = nview.equal_range(peerid);
for (auto &nit = range.first; nit != range.second; ++nit) {
pendingNodes.emplace(it->getProofId(), nit->nodeid);
};
// Remove nodes associated with this peer, unless their timeout is still
// active. This ensure that we don't overquery them in case they are
// subsequently added to another peer.
nview.erase(
nview.lower_bound(boost::make_tuple(peerid, SteadyMilliseconds())),
nview.upper_bound(
boost::make_tuple(peerid, Now<SteadyMilliseconds>())));
// Release UTXOs attached to this proof.
validProofPool.removeProof(it->getProofId());
// If there were nodes attached, remove from the radix tree as well
auto removed = shareableProofs.remove(Uint256RadixKey(it->getProofId()));
m_unbroadcast_proofids.erase(it->getProofId());
// Remove the peer from the PeerSet and remove its score from the registered
// score total.
assert(totalPeersScore >= it->getScore());
totalPeersScore -= it->getScore();
peers.erase(it);
return true;
}
PeerId PeerManager::selectPeer() const {
if (slots.empty() || slotCount == 0) {
return NO_PEER;
}
const uint64_t max = slotCount;
for (int retry = 0; retry < SELECT_PEER_MAX_RETRY; retry++) {
size_t i = selectPeerImpl(slots, GetRand(max), max);
if (i != NO_PEER) {
return i;
}
}
return NO_PEER;
}
uint64_t PeerManager::compact() {
// There is nothing to compact.
if (fragmentation == 0) {
return 0;
}
std::vector<Slot> newslots;
newslots.reserve(peers.size());
uint64_t prevStop = 0;
uint32_t i = 0;
for (auto it = peers.begin(); it != peers.end(); it++) {
if (it->node_count == 0) {
continue;
}
newslots.emplace_back(prevStop, it->getScore(), it->peerid);
prevStop = slots[i].getStop();
if (!peers.modify(it, [&](Peer &p) { p.index = i++; })) {
return 0;
}
}
slots = std::move(newslots);
const uint64_t saved = slotCount - prevStop;
slotCount = prevStop;
fragmentation = 0;
return saved;
}
bool PeerManager::verify() const {
uint64_t prevStop = 0;
uint32_t scoreFromSlots = 0;
for (size_t i = 0; i < slots.size(); i++) {
const Slot &s = slots[i];
// Slots must be in correct order.
if (s.getStart() < prevStop) {
return false;
}
prevStop = s.getStop();
// If this is a dead slot, then nothing more needs to be checked.
if (s.getPeerId() == NO_PEER) {
continue;
}
// We have a live slot, verify index.
auto it = peers.find(s.getPeerId());
if (it == peers.end() || it->index != i) {
return false;
}
// Accumulate score across slots
scoreFromSlots += slots[i].getScore();
}
// Score across slots must be the same as our allocated score
if (scoreFromSlots != connectedPeersScore) {
return false;
}
uint32_t scoreFromAllPeers = 0;
uint32_t scoreFromPeersWithNodes = 0;
std::unordered_set<COutPoint, SaltedOutpointHasher> peersUtxos;
for (const auto &p : peers) {
// Accumulate the score across peers to compare with total known score
scoreFromAllPeers += p.getScore();
// A peer should have a proof attached
if (!p.proof) {
return false;
}
// Check proof pool consistency
for (const auto &ss : p.proof->getStakes()) {
const COutPoint &outpoint = ss.getStake().getUTXO();
auto proof = validProofPool.getProof(outpoint);
if (!proof) {
// Missing utxo
return false;
}
if (proof != p.proof) {
// Wrong proof
return false;
}
if (!peersUtxos.emplace(outpoint).second) {
// Duplicated utxo
return false;
}
}
// Count node attached to this peer.
const auto count_nodes = [&]() {
size_t count = 0;
auto &nview = nodes.get<next_request_time>();
auto begin = nview.lower_bound(
boost::make_tuple(p.peerid, SteadyMilliseconds()));
auto end = nview.upper_bound(
boost::make_tuple(p.peerid + 1, SteadyMilliseconds()));
for (auto it = begin; it != end; ++it) {
count++;
}
return count;
};
if (p.node_count != count_nodes()) {
return false;
}
// If there are no nodes attached to this peer, then we are done.
if (p.node_count == 0) {
continue;
}
scoreFromPeersWithNodes += p.getScore();
// The index must point to a slot refering to this peer.
if (p.index >= slots.size() || slots[p.index].getPeerId() != p.peerid) {
return false;
}
// If the score do not match, same thing.
if (slots[p.index].getScore() != p.getScore()) {
return false;
}
// Check the proof is in the radix tree only if there are nodes attached
if (((localProof && p.getProofId() == localProof->getId()) ||
p.node_count > 0) &&
shareableProofs.get(p.getProofId()) == nullptr) {
return false;
}
if (p.node_count == 0 &&
shareableProofs.get(p.getProofId()) != nullptr) {
return false;
}
}
// Check our accumulated scores against our registred and allocated scores
if (scoreFromAllPeers != totalPeersScore) {
return false;
}
if (scoreFromPeersWithNodes != connectedPeersScore) {
return false;
}
// We checked the utxo consistency for all our peers utxos already, so if
// the pool size differs from the expected one there are dangling utxos.
if (validProofPool.size() != peersUtxos.size()) {
return false;
}
// Check there is no dangling proof in the radix tree
return shareableProofs.forEachLeaf([&](RCUPtr<const Proof> pLeaf) {
return isBoundToPeer(pLeaf->getId());
});
}
PeerId selectPeerImpl(const std::vector<Slot> &slots, const uint64_t slot,
const uint64_t max) {
assert(slot <= max);
size_t begin = 0, end = slots.size();
uint64_t bottom = 0, top = max;
// Try to find the slot using dichotomic search.
while ((end - begin) > 8) {
// The slot we picked in not allocated.
if (slot < bottom || slot >= top) {
return NO_PEER;
}
// Guesstimate the position of the slot.
size_t i = begin + ((slot - bottom) * (end - begin) / (top - bottom));
assert(begin <= i && i < end);
// We have a match.
if (slots[i].contains(slot)) {
return slots[i].getPeerId();
}
// We undershooted.
if (slots[i].precedes(slot)) {
begin = i + 1;
if (begin >= end) {
return NO_PEER;
}
bottom = slots[begin].getStart();
continue;
}
// We overshooted.
if (slots[i].follows(slot)) {
end = i;
top = slots[end].getStart();
continue;
}
// We have an unalocated slot.
return NO_PEER;
}
// Enough of that nonsense, let fallback to linear search.
for (size_t i = begin; i < end; i++) {
// We have a match.
if (slots[i].contains(slot)) {
return slots[i].getPeerId();
}
}
// We failed to find a slot, retry.
return NO_PEER;
}
void PeerManager::addUnbroadcastProof(const ProofId &proofid) {
// The proof should be bound to a peer
if (isBoundToPeer(proofid)) {
m_unbroadcast_proofids.insert(proofid);
}
}
void PeerManager::removeUnbroadcastProof(const ProofId &proofid) {
m_unbroadcast_proofids.erase(proofid);
}
bool PeerManager::selectStakingRewardWinner(
const CBlockIndex *pprev,
std::vector<std::pair<ProofId, CScript>> &winners) {
if (!pprev) {
return false;
}
// Don't select proofs that have not been known for long enough, i.e. at
// least since twice the dangling proof cleanup timeout before the last
// block time, so we're sure to not account for proofs more recent than the
// previous block or lacking node connected.
// The previous block time is capped to now for the unlikely event the
// previous block time is in the future.
auto registrationDelay = std::chrono::duration_cast<std::chrono::seconds>(
4 * Peer::DANGLING_TIMEOUT);
auto maxRegistrationDelay =
std::chrono::duration_cast<std::chrono::seconds>(
6 * Peer::DANGLING_TIMEOUT);
auto minRegistrationDelay =
std::chrono::duration_cast<std::chrono::seconds>(
2 * Peer::DANGLING_TIMEOUT);
const int64_t refTime = std::min(pprev->GetBlockTime(), GetTime());
const int64_t targetRegistrationTime = refTime - registrationDelay.count();
const int64_t maxRegistrationTime = refTime - minRegistrationDelay.count();
const int64_t minRegistrationTime = refTime - maxRegistrationDelay.count();
const BlockHash prevblockhash = pprev->GetBlockHash();
std::vector<ProofRef> selectedProofs;
ProofRef firstCompliantProof = ProofRef();
while (selectedProofs.size() < peers.size()) {
double bestRewardRank = std::numeric_limits<double>::max();
ProofRef selectedProof = ProofRef();
int64_t selectedProofRegistrationTime{0};
uint256 bestRewardHash;
for (const Peer &peer : peers) {
if (!peer.proof) {
// Should never happen, continue
continue;
}
if (!peer.hasFinalized ||
peer.registration_time.count() >= maxRegistrationTime) {
continue;
}
if (std::find_if(selectedProofs.begin(), selectedProofs.end(),
[&peer](const ProofRef &proof) {
return peer.getProofId() == proof->getId();
}) != selectedProofs.end()) {
continue;
}
uint256 proofRewardHash;
CHash256()
.Write(prevblockhash)
.Write(peer.getProofId())
.Finalize(proofRewardHash);
if (proofRewardHash == uint256::ZERO) {
// This either the result of an incredibly unlikely lucky hash,
// or a the hash is getting abused. In this case, skip the
// proof.
LogPrintf(
"Staking reward hash has a suspicious value of zero for "
"proof %s and blockhash %s, skipping\n",
peer.getProofId().ToString(), prevblockhash.ToString());
continue;
}
// To make sure the selection is properly weighted according to the
// proof score, we normalize the proofRewardHash to a number between
// 0 and 1, then take the logarithm and divide by the weight. Since
// it is scale-independent, we can simplify by removing constants
// and use base 2 logarithm.
// Inspired by: https://stackoverflow.com/a/30226926.
double proofRewardRank =
(256.0 -
std::log2(UintToArith256(proofRewardHash).getdouble())) /
peer.getScore();
// The best ranking is the lowest ranking value
if (proofRewardRank < bestRewardRank) {
bestRewardRank = proofRewardRank;
selectedProof = peer.proof;
selectedProofRegistrationTime = peer.registration_time.count();
bestRewardHash = proofRewardHash;
}
// Select the lowest reward hash then proofid in the unlikely case
// of a collision.
if (proofRewardRank == bestRewardRank &&
(proofRewardHash < bestRewardHash ||
(proofRewardHash == bestRewardHash &&
peer.getProofId() < selectedProof->getId()))) {
selectedProof = peer.proof;
selectedProofRegistrationTime = peer.registration_time.count();
bestRewardHash = proofRewardHash;
}
}
if (!selectedProof) {
// No winner
break;
}
if (!firstCompliantProof &&
selectedProofRegistrationTime < targetRegistrationTime) {
firstCompliantProof = selectedProof;
}
selectedProofs.push_back(selectedProof);
if (selectedProofRegistrationTime < minRegistrationTime &&
!isFlaky(selectedProof->getId())) {
break;
}
}
winners.clear();
if (!firstCompliantProof) {
return false;
}
winners.reserve(selectedProofs.size());
// Find the winner
for (const ProofRef &proof : selectedProofs) {
if (proof->getId() == firstCompliantProof->getId()) {
winners.push_back({proof->getId(), proof->getPayoutScript()});
}
}
// Add the others (if any) after the winner
for (const ProofRef &proof : selectedProofs) {
if (proof->getId() != firstCompliantProof->getId()) {
winners.push_back({proof->getId(), proof->getPayoutScript()});
}
}
return true;
}
+bool PeerManager::setFlaky(const ProofId &proofid) {
+ return manualFlakyProofids.insert(proofid).second;
+}
+
+bool PeerManager::unsetFlaky(const ProofId &proofid) {
+ return manualFlakyProofids.erase(proofid) > 0;
+}
+
bool PeerManager::isFlaky(const ProofId &proofid) const {
if (localProof && proofid == localProof->getId()) {
return false;
}
+ if (manualFlakyProofids.count(proofid) > 0) {
+ return true;
+ }
+
// If we are missing connection to this proof, consider flaky
if (forPeer(proofid,
[](const Peer &peer) { return peer.node_count == 0; })) {
return true;
}
auto &remoteProofsByNodeId = remoteProofs.get<by_nodeid>();
auto &nview = nodes.get<next_request_time>();
std::unordered_map<PeerId, std::unordered_set<ProofId, SaltedProofIdHasher>>
missing_per_peer;
// Construct a set of missing proof ids per peer
double total_score{0};
for (const Peer &peer : peers) {
const PeerId peerid = peer.peerid;
total_score += peer.getScore();
auto nodes_range = nview.equal_range(peerid);
for (auto &nit = nodes_range.first; nit != nodes_range.second; ++nit) {
auto proofs_range = remoteProofsByNodeId.equal_range(nit->nodeid);
for (auto &proofit = proofs_range.first;
proofit != proofs_range.second; ++proofit) {
if (!proofit->present) {
missing_per_peer[peerid].insert(proofit->proofid);
}
}
};
}
double missing_score{0};
// Now compute a score for the missing proof
for (const auto &[peerid, missingProofs] : missing_per_peer) {
if (missingProofs.size() > 3) {
// Ignore peers with too many missing proofs
continue;
}
auto pit = peers.find(peerid);
if (pit == peers.end()) {
// Peer not found
continue;
}
if (missingProofs.count(proofid) > 0) {
missing_score += pit->getScore();
}
}
return (missing_score / total_score) > 0.3;
}
std::optional<bool>
PeerManager::getRemotePresenceStatus(const ProofId &proofid) const {
auto &remoteProofsView = remoteProofs.get<by_proofid>();
auto [begin, end] = remoteProofsView.equal_range(proofid);
if (begin == end) {
// No remote registered anything yet, we are on our own
return std::nullopt;
}
double total_score{0};
double present_score{0};
double missing_score{0};
for (auto it = begin; it != end; it++) {
auto nit = nodes.find(it->nodeid);
if (nit == nodes.end()) {
// No such node
continue;
}
const PeerId peerid = nit->peerid;
auto pit = peers.find(peerid);
if (pit == peers.end()) {
// Peer not found
continue;
}
uint32_t node_count = pit->node_count;
if (localProof && pit->getProofId() == localProof->getId()) {
// If that's our local proof, account for ourself
++node_count;
}
if (node_count == 0) {
// should never happen
continue;
}
const double score = double(pit->getScore()) / node_count;
total_score += score;
if (it->present) {
present_score += score;
} else {
missing_score += score;
}
}
if (localProof) {
auto &peersByProofid = peers.get<by_proofid>();
// Do we have a node connected for that proof ?
bool present = false;
auto pit = peersByProofid.find(proofid);
if (pit != peersByProofid.end()) {
present = pit->node_count > 0;
}
pit = peersByProofid.find(localProof->getId());
if (pit != peersByProofid.end()) {
// Also divide by node_count, we can have several nodes even for our
// local proof.
const double score =
double(pit->getScore()) / (1 + pit->node_count);
total_score += score;
if (present) {
present_score += score;
} else {
missing_score += score;
}
}
}
if (present_score / total_score > 0.55) {
return std::make_optional(true);
}
if (missing_score / total_score > 0.55) {
return std::make_optional(false);
}
return std::nullopt;
}
bool PeerManager::dumpPeersToFile(const fs::path &dumpPath) const {
try {
const fs::path dumpPathTmp = dumpPath + ".new";
FILE *filestr = fsbridge::fopen(dumpPathTmp, "wb");
if (!filestr) {
return false;
}
CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
file << PEERS_DUMP_VERSION;
file << uint64_t(peers.size());
for (const Peer &peer : peers) {
file << peer.proof;
file << peer.hasFinalized;
file << int64_t(peer.registration_time.count());
file << int64_t(peer.nextPossibleConflictTime.count());
}
if (!FileCommit(file.Get())) {
throw std::runtime_error(strprintf("Failed to commit to file %s",
PathToString(dumpPathTmp)));
}
file.fclose();
if (!RenameOver(dumpPathTmp, dumpPath)) {
throw std::runtime_error(strprintf("Rename failed from %s to %s",
PathToString(dumpPathTmp),
PathToString(dumpPath)));
}
} catch (const std::exception &e) {
LogPrint(BCLog::AVALANCHE, "Failed to dump the avalanche peers: %s.\n",
e.what());
return false;
}
LogPrint(BCLog::AVALANCHE, "Successfully dumped %d peers to %s.\n",
peers.size(), PathToString(dumpPath));
return true;
}
bool PeerManager::loadPeersFromFile(
const fs::path &dumpPath,
std::unordered_set<ProofRef, SaltedProofHasher> ®isteredProofs) {
registeredProofs.clear();
FILE *filestr = fsbridge::fopen(dumpPath, "rb");
CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
if (file.IsNull()) {
LogPrint(BCLog::AVALANCHE,
"Failed to open avalanche peers file from disk.\n");
return false;
}
try {
uint64_t version;
file >> version;
if (version != PEERS_DUMP_VERSION) {
LogPrint(BCLog::AVALANCHE,
"Unsupported avalanche peers file version.\n");
return false;
}
uint64_t numPeers;
file >> numPeers;
auto &peersByProofId = peers.get<by_proofid>();
for (uint64_t i = 0; i < numPeers; i++) {
ProofRef proof;
bool hasFinalized;
int64_t registrationTime;
int64_t nextPossibleConflictTime;
file >> proof;
file >> hasFinalized;
file >> registrationTime;
file >> nextPossibleConflictTime;
if (registerProof(proof)) {
auto it = peersByProofId.find(proof->getId());
if (it == peersByProofId.end()) {
// Should never happen
continue;
}
// We don't modify any key so we don't need to rehash.
// If the modify fails, it means we don't get the full benefit
// from the file but we still added our peer to the set. The
// non-overridden fields will be set the normal way.
peersByProofId.modify(it, [&](Peer &p) {
p.hasFinalized = hasFinalized;
p.registration_time =
std::chrono::seconds{registrationTime};
p.nextPossibleConflictTime =
std::chrono::seconds{nextPossibleConflictTime};
});
registeredProofs.insert(proof);
}
}
} catch (const std::exception &e) {
LogPrint(BCLog::AVALANCHE,
"Failed to read the avalanche peers file data on disk: %s.\n",
e.what());
return false;
}
return true;
}
} // namespace avalanche
diff --git a/src/avalanche/peermanager.h b/src/avalanche/peermanager.h
index 56b4ba34e..80722e566 100644
--- a/src/avalanche/peermanager.h
+++ b/src/avalanche/peermanager.h
@@ -1,557 +1,562 @@
// Copyright (c) 2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_AVALANCHE_PEERMANAGER_H
#define BITCOIN_AVALANCHE_PEERMANAGER_H
#include <avalanche/node.h>
#include <avalanche/proof.h>
#include <avalanche/proofpool.h>
#include <avalanche/proofradixtreeadapter.h>
#include <coins.h>
#include <common/bloom.h>
#include <consensus/validation.h>
#include <pubkey.h>
#include <radix.h>
#include <util/hasher.h>
#include <util/time.h>
#include <boost/multi_index/composite_key.hpp>
#include <boost/multi_index/hashed_index.hpp>
#include <boost/multi_index/mem_fun.hpp>
#include <boost/multi_index/member.hpp>
#include <boost/multi_index/ordered_index.hpp>
#include <boost/multi_index_container.hpp>
#include <atomic>
#include <chrono>
#include <cstdint>
#include <memory>
#include <vector>
class ChainstateManager;
class CScheduler;
namespace avalanche {
/**
* Maximum number of immature proofs the peer manager will accept from the
* network. Note that reorgs can cause the immature pool to temporarily exceed
* this limit, but a change in chaintip cause previously reorged proofs to be
* trimmed.
*/
static constexpr uint32_t AVALANCHE_MAX_IMMATURE_PROOFS = 4000;
class Delegation;
namespace {
struct TestPeerManager;
}
struct Slot {
private:
uint64_t start;
uint32_t score;
PeerId peerid;
public:
Slot(uint64_t startIn, uint32_t scoreIn, PeerId peeridIn)
: start(startIn), score(scoreIn), peerid(peeridIn) {}
Slot withStart(uint64_t startIn) const {
return Slot(startIn, score, peerid);
}
Slot withScore(uint64_t scoreIn) const {
return Slot(start, scoreIn, peerid);
}
Slot withPeerId(PeerId peeridIn) const {
return Slot(start, score, peeridIn);
}
uint64_t getStart() const { return start; }
uint64_t getStop() const { return start + score; }
uint32_t getScore() const { return score; }
PeerId getPeerId() const { return peerid; }
bool contains(uint64_t slot) const {
return getStart() <= slot && slot < getStop();
}
bool precedes(uint64_t slot) const { return slot >= getStop(); }
bool follows(uint64_t slot) const { return getStart() > slot; }
};
struct Peer {
PeerId peerid;
uint32_t index = -1;
uint32_t node_count = 0;
ProofRef proof;
bool hasFinalized = false;
// The network stack uses timestamp in seconds, so we oblige.
std::chrono::seconds registration_time;
std::chrono::seconds nextPossibleConflictTime;
double availabilityScore = 0.0;
/**
* Consider dropping the peer if no node is attached after this timeout
* expired.
*/
static constexpr auto DANGLING_TIMEOUT = 15min;
Peer(PeerId peerid_, ProofRef proof_,
std::chrono::seconds nextPossibleConflictTime_)
: peerid(peerid_), proof(std::move(proof_)),
registration_time(GetTime<std::chrono::seconds>()),
nextPossibleConflictTime(std::move(nextPossibleConflictTime_)) {}
const ProofId &getProofId() const { return proof->getId(); }
uint32_t getScore() const { return proof->getScore(); }
};
struct proof_index {
using result_type = ProofId;
result_type operator()(const Peer &p) const { return p.proof->getId(); }
};
struct score_index {
using result_type = uint32_t;
result_type operator()(const Peer &p) const { return p.getScore(); }
};
struct next_request_time {};
struct PendingNode {
ProofId proofid;
NodeId nodeid;
PendingNode(ProofId proofid_, NodeId nodeid_)
: proofid(proofid_), nodeid(nodeid_){};
};
struct by_proofid;
struct by_nodeid;
struct by_score;
struct RemoteProof {
ProofId proofid;
NodeId nodeid;
std::chrono::seconds lastUpdate;
bool present;
};
enum class ProofRegistrationResult {
NONE = 0,
ALREADY_REGISTERED,
IMMATURE,
INVALID,
CONFLICTING,
REJECTED,
COOLDOWN_NOT_ELAPSED,
DANGLING,
MISSING_UTXO,
};
class ProofRegistrationState : public ValidationState<ProofRegistrationResult> {
};
namespace bmi = boost::multi_index;
class PeerManager {
std::vector<Slot> slots;
uint64_t slotCount = 0;
uint64_t fragmentation = 0;
/**
* Several nodes can make an avalanche peer. In this case, all nodes are
* considered interchangeable parts of the same peer.
*/
using PeerSet = boost::multi_index_container<
Peer, bmi::indexed_by<
// index by peerid
bmi::hashed_unique<bmi::member<Peer, PeerId, &Peer::peerid>>,
// index by proof
bmi::hashed_unique<bmi::tag<by_proofid>, proof_index,
SaltedProofIdHasher>,
// ordered by score, decreasing order
bmi::ordered_non_unique<bmi::tag<by_score>, score_index,
std::greater<uint32_t>>>>;
PeerId nextPeerId = 0;
PeerSet peers;
ProofPool validProofPool;
ProofPool conflictingProofPool;
ProofPool immatureProofPool;
ProofPool danglingProofPool;
using ProofRadixTree = RadixTree<const Proof, ProofRadixTreeAdapter>;
ProofRadixTree shareableProofs;
using NodeSet = boost::multi_index_container<
Node, bmi::indexed_by<
// index by nodeid
bmi::hashed_unique<bmi::member<Node, NodeId, &Node::nodeid>>,
// sorted by peerid/nextRequestTime
bmi::ordered_non_unique<
bmi::tag<next_request_time>,
bmi::composite_key<
Node, bmi::member<Node, PeerId, &Node::peerid>,
bmi::member<Node, SteadyMilliseconds,
&Node::nextRequestTime>>>>>;
NodeSet nodes;
/**
* Flag indicating that we failed to select a node and need to expand our
* node set.
*/
std::atomic<bool> needMoreNodes{false};
using PendingNodeSet = boost::multi_index_container<
PendingNode,
bmi::indexed_by<
// index by proofid
bmi::hashed_non_unique<
bmi::tag<by_proofid>,
bmi::member<PendingNode, ProofId, &PendingNode::proofid>,
SaltedProofIdHasher>,
// index by nodeid
bmi::hashed_unique<
bmi::tag<by_nodeid>,
bmi::member<PendingNode, NodeId, &PendingNode::nodeid>>>>;
PendingNodeSet pendingNodes;
static constexpr int SELECT_PEER_MAX_RETRY = 3;
static constexpr int SELECT_NODE_MAX_RETRY = 3;
/**
* Track proof ids to broadcast
*/
ProofIdSet m_unbroadcast_proofids;
/**
* Quorum management.
*/
uint32_t totalPeersScore = 0;
uint32_t connectedPeersScore = 0;
Amount stakeUtxoDustThreshold;
ChainstateManager &chainman;
ProofRef localProof;
struct by_lastUpdate;
using RemoteProofSet = boost::multi_index_container<
RemoteProof,
bmi::indexed_by<
// index by proofid/nodeid pair
bmi::hashed_unique<
bmi::composite_key<
RemoteProof,
bmi::member<RemoteProof, ProofId, &RemoteProof::proofid>,
bmi::member<RemoteProof, NodeId, &RemoteProof::nodeid>>,
bmi::composite_key_hash<SaltedProofIdHasher,
boost::hash<NodeId>>>,
// index by proofid
bmi::hashed_non_unique<
bmi::tag<by_proofid>,
bmi::member<RemoteProof, ProofId, &RemoteProof::proofid>,
SaltedProofIdHasher>,
// index by nodeid
bmi::hashed_non_unique<
bmi::tag<by_nodeid>,
bmi::member<RemoteProof, NodeId, &RemoteProof::nodeid>>,
bmi::ordered_non_unique<
bmi::tag<by_lastUpdate>,
bmi::composite_key<
RemoteProof,
bmi::member<RemoteProof, NodeId, &RemoteProof::nodeid>,
bmi::member<RemoteProof, std::chrono::seconds,
&RemoteProof::lastUpdate>>>>>;
/**
* Remember which node sent which proof so we have an image of the proof set
* of our peers.
*/
RemoteProofSet remoteProofs;
/**
* Filter for proofs that are consensus-invalid or were recently invalidated
* by avalanche (finalized rejection). These are not rerequested until they
* are rolled out of the filter.
*
* Without this filter we'd be re-requesting proofs from each of our peers,
* increasing bandwidth consumption considerably.
*
* Decreasing the false positive rate is fairly cheap, so we pick one in a
* million to make it highly unlikely for users to have issues with this
* filter.
*/
CRollingBloomFilter invalidProofs{100000, 0.000001};
+ std::unordered_set<ProofId, SaltedProofIdHasher> manualFlakyProofids;
+
public:
static constexpr size_t MAX_REMOTE_PROOFS{100};
PeerManager(const Amount &stakeUtxoDustThresholdIn,
ChainstateManager &chainmanIn,
const ProofRef &localProofIn = ProofRef())
: stakeUtxoDustThreshold(stakeUtxoDustThresholdIn),
chainman(chainmanIn), localProof(localProofIn){};
/**
* Node API.
*/
bool addNode(NodeId nodeid, const ProofId &proofid);
bool removeNode(NodeId nodeid);
size_t getNodeCount() const { return nodes.size(); }
size_t getPendingNodeCount() const { return pendingNodes.size(); }
// Update when a node is to be polled next.
bool updateNextRequestTime(NodeId nodeid, SteadyMilliseconds timeout);
/**
* Flag that a node did send its compact proofs.
* @return True if the flag changed state, i;e. if this is the first time
* the message is accounted for this node.
*/
bool latchAvaproofsSent(NodeId nodeid);
// Randomly select a node to poll.
NodeId selectNode();
/**
* Returns true if we encountered a lack of node since the last call.
*/
bool shouldRequestMoreNodes() { return needMoreNodes.exchange(false); }
template <typename Callable>
bool forNode(NodeId nodeid, Callable &&func) const {
auto it = nodes.find(nodeid);
return it != nodes.end() && func(*it);
}
template <typename Callable>
void forEachNode(const Peer &peer, Callable &&func) const {
auto &nview = nodes.get<next_request_time>();
auto range = nview.equal_range(peer.peerid);
for (auto it = range.first; it != range.second; ++it) {
func(*it);
}
}
/**
* Proof and Peer related API.
*/
/**
* Update the time before which a proof is not allowed to have conflicting
* UTXO with this peer's proof.
*/
bool updateNextPossibleConflictTime(PeerId peerid,
const std::chrono::seconds &nextTime);
/**
* Latch on that this peer has a finalized proof.
*/
bool setFinalized(PeerId peerid);
/**
* Registration mode
* - DEFAULT: Default policy, register only if the proof is unknown and has
* no conflict.
* - FORCE_ACCEPT: Turn a valid proof into a peer even if it has conflicts
* and is not the best candidate.
*/
enum class RegistrationMode {
DEFAULT,
FORCE_ACCEPT,
};
bool registerProof(const ProofRef &proof,
ProofRegistrationState ®istrationState,
RegistrationMode mode = RegistrationMode::DEFAULT);
bool registerProof(const ProofRef &proof,
RegistrationMode mode = RegistrationMode::DEFAULT) {
ProofRegistrationState dummy;
return registerProof(proof, dummy, mode);
}
/**
* Rejection mode
* - DEFAULT: Default policy, reject a proof and attempt to keep it in the
* conflicting pool if possible.
* - INVALIDATE: Reject a proof by removing it from any of the pool.
*
* In any case if a peer is rejected, it attempts to pull the conflicting
* proofs back.
*/
enum class RejectionMode {
DEFAULT,
INVALIDATE,
};
bool rejectProof(const ProofId &proofid,
RejectionMode mode = RejectionMode::DEFAULT);
bool exists(const ProofId &proofid) const {
return getProof(proofid) != nullptr;
}
void cleanupDanglingProofs(
std::unordered_set<ProofRef, SaltedProofHasher> ®isteredProofs);
template <typename Callable>
bool forPeer(const ProofId &proofid, Callable &&func) const {
auto &pview = peers.get<by_proofid>();
auto it = pview.find(proofid);
return it != pview.end() && func(*it);
}
template <typename Callable> void forEachPeer(Callable &&func) const {
for (const auto &p : peers) {
func(p);
}
}
/**
* Update the peer set when a new block is connected.
*/
std::unordered_set<ProofRef, SaltedProofHasher> updatedBlockTip();
/**
* Proof broadcast API.
*/
void addUnbroadcastProof(const ProofId &proofid);
void removeUnbroadcastProof(const ProofId &proofid);
auto getUnbroadcastProofs() const { return m_unbroadcast_proofids; }
/*
* Quorum management
*/
uint32_t getTotalPeersScore() const { return totalPeersScore; }
uint32_t getConnectedPeersScore() const { return connectedPeersScore; }
bool saveRemoteProof(const ProofId &proofid, const NodeId nodeid,
const bool present);
std::vector<RemoteProof> getRemoteProofs(const NodeId nodeid) const;
+ bool setFlaky(const ProofId &proofid);
+ bool unsetFlaky(const ProofId &proofid);
+
template <typename Callable>
void updateAvailabilityScores(const double decayFactor,
Callable &&getNodeAvailabilityScore) {
for (auto it = peers.begin(); it != peers.end(); it++) {
peers.modify(it, [&](Peer &peer) {
// Calculate average of current node scores
double peerScore{0.0};
forEachNode(peer, [&](const avalanche::Node &node) {
peerScore += getNodeAvailabilityScore(node.nodeid);
});
// Calculate exponential moving average of averaged node scores
peer.availabilityScore =
decayFactor * peerScore +
(1. - decayFactor) * peer.availabilityScore;
});
}
}
/****************************************************
* Functions which are public for testing purposes. *
****************************************************/
/**
* Remove an existing peer.
*/
bool removePeer(const PeerId peerid);
/**
* Randomly select a peer to poll.
*/
PeerId selectPeer() const;
/**
* Trigger maintenance of internal data structures.
* Returns how much slot space was saved after compaction.
*/
uint64_t compact();
/**
* Perform consistency check on internal data structures.
*/
bool verify() const;
// Accessors.
uint64_t getSlotCount() const { return slotCount; }
uint64_t getFragmentation() const { return fragmentation; }
const ProofPool &getValidProofPool() const { return validProofPool; }
const ProofPool &getConflictingProofPool() const {
return conflictingProofPool;
}
const ProofPool &getImmatureProofPool() const { return immatureProofPool; }
ProofRef getProof(const ProofId &proofid) const;
bool isBoundToPeer(const ProofId &proofid) const;
bool isImmature(const ProofId &proofid) const;
bool isInConflictingPool(const ProofId &proofid) const;
bool isDangling(const ProofId &proofid) const;
void setInvalid(const ProofId &proofid);
bool isInvalid(const ProofId &proofid) const;
void clearAllInvalid();
const ProofRadixTree &getShareableProofsSnapshot() const {
return shareableProofs;
}
const Amount &getStakeUtxoDustThreshold() const {
return stakeUtxoDustThreshold;
}
/**
* Deterministically select a list of payout scripts based on the proof set
* and the previous block hash.
*/
bool selectStakingRewardWinner(
const CBlockIndex *pprev,
std::vector<std::pair<ProofId, CScript>> &winners);
bool dumpPeersToFile(const fs::path &dumpPath) const;
bool loadPeersFromFile(
const fs::path &dumpPath,
std::unordered_set<ProofRef, SaltedProofHasher> ®isteredProofs);
private:
template <typename ProofContainer>
void moveToConflictingPool(const ProofContainer &proofs);
bool addOrUpdateNode(const PeerSet::iterator &it, NodeId nodeid);
bool addNodeToPeer(const PeerSet::iterator &it);
bool removeNodeFromPeer(const PeerSet::iterator &it, uint32_t count = 1);
/**
* @brief Get the presence remote status of a proof
*
* @param proofid The target proof id
* @return true if it's likely present, false if likely missing, nullopt if
* uncertain.
*/
std::optional<bool> getRemotePresenceStatus(const ProofId &proofid) const;
bool isFlaky(const ProofId &proofid) const;
friend struct ::avalanche::TestPeerManager;
};
/**
* Internal methods that are exposed for testing purposes.
*/
PeerId selectPeerImpl(const std::vector<Slot> &slots, const uint64_t slot,
const uint64_t max);
} // namespace avalanche
#endif // BITCOIN_AVALANCHE_PEERMANAGER_H
diff --git a/src/rpc/avalanche.cpp b/src/rpc/avalanche.cpp
index b25c7f2be..582d1a36d 100644
--- a/src/rpc/avalanche.cpp
+++ b/src/rpc/avalanche.cpp
@@ -1,1658 +1,1704 @@
// Copyright (c) 2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <avalanche/avalanche.h>
#include <avalanche/delegation.h>
#include <avalanche/delegationbuilder.h>
#include <avalanche/peermanager.h>
#include <avalanche/processor.h>
#include <avalanche/proof.h>
#include <avalanche/proofbuilder.h>
#include <avalanche/validation.h>
#include <common/args.h>
#include <config.h>
#include <core_io.h>
#include <index/txindex.h>
#include <key_io.h>
#include <net_processing.h>
#include <node/context.h>
#include <policy/block/stakingrewards.h>
#include <rpc/blockchain.h>
#include <rpc/server.h>
#include <rpc/server_util.h>
#include <rpc/util.h>
#include <util/strencodings.h>
#include <util/translation.h>
#include <univalue.h>
using node::GetTransaction;
using node::NodeContext;
static RPCHelpMan getavalanchekey() {
return RPCHelpMan{
"getavalanchekey",
"Returns the key used to sign avalanche messages.\n",
{},
RPCResult{RPCResult::Type::STR_HEX, "", ""},
RPCExamples{HelpExampleRpc("getavalanchekey", "")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
NodeContext &node = EnsureAnyNodeContext(request.context);
const avalanche::Processor &avalanche = EnsureAvalanche(node);
return HexStr(avalanche.getSessionPubKey());
},
};
}
static CPubKey ParsePubKey(const UniValue ¶m) {
const std::string keyHex = param.get_str();
if ((keyHex.length() != 2 * CPubKey::COMPRESSED_SIZE &&
keyHex.length() != 2 * CPubKey::SIZE) ||
!IsHex(keyHex)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
strprintf("Invalid public key: %s\n", keyHex));
}
return HexToPubKey(keyHex);
}
static bool registerProofIfNeeded(const avalanche::Processor &avalanche,
avalanche::ProofRef proof,
avalanche::ProofRegistrationState &state) {
auto localProof = avalanche.getLocalProof();
if (localProof && localProof->getId() == proof->getId()) {
return true;
}
return avalanche.withPeerManager([&](avalanche::PeerManager &pm) {
return pm.getProof(proof->getId()) || pm.registerProof(proof, state);
});
}
static bool registerProofIfNeeded(const avalanche::Processor &avalanche,
avalanche::ProofRef proof) {
avalanche::ProofRegistrationState state;
return registerProofIfNeeded(avalanche, std::move(proof), state);
}
static void verifyDelegationOrThrow(avalanche::Delegation &dg,
const std::string &dgHex, CPubKey &auth) {
bilingual_str error;
if (!avalanche::Delegation::FromHex(dg, dgHex, error)) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, error.original);
}
avalanche::DelegationState state;
if (!dg.verify(state, auth)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"The delegation is invalid: " + state.ToString());
}
}
static void verifyProofOrThrow(const NodeContext &node, avalanche::Proof &proof,
const std::string &proofHex) {
bilingual_str error;
if (!avalanche::Proof::FromHex(proof, proofHex, error)) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, error.original);
}
Amount stakeUtxoDustThreshold = avalanche::PROOF_DUST_THRESHOLD;
if (node.avalanche) {
// If Avalanche is enabled, use the configured dust threshold
node.avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
stakeUtxoDustThreshold = pm.getStakeUtxoDustThreshold();
});
}
avalanche::ProofValidationState state;
{
LOCK(cs_main);
if (!proof.verify(stakeUtxoDustThreshold, *Assert(node.chainman),
state)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"The proof is invalid: " + state.ToString());
}
}
}
static RPCHelpMan addavalanchenode() {
return RPCHelpMan{
"addavalanchenode",
"Add a node in the set of peers to poll for avalanche.\n",
{
{"nodeid", RPCArg::Type::NUM, RPCArg::Optional::NO,
"Node to be added to avalanche."},
{"publickey", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The public key of the node."},
{"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"Proof that the node is not a sybil."},
{"delegation", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED,
"The proof delegation the the node public key"},
},
RPCResult{RPCResult::Type::BOOL, "success",
"Whether the addition succeeded or not."},
RPCExamples{
HelpExampleRpc("addavalanchenode", "5, \"<pubkey>\", \"<proof>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
const NodeId nodeid = request.params[0].getInt<int64_t>();
CPubKey key = ParsePubKey(request.params[1]);
auto proof = RCUPtr<avalanche::Proof>::make();
NodeContext &node = EnsureAnyNodeContext(request.context);
const avalanche::Processor &avalanche = EnsureAvalanche(node);
verifyProofOrThrow(node, *proof, request.params[2].get_str());
const avalanche::ProofId &proofid = proof->getId();
if (key != proof->getMaster()) {
if (request.params.size() < 4 || request.params[3].isNull()) {
throw JSONRPCError(
RPC_INVALID_ADDRESS_OR_KEY,
"The public key does not match the proof");
}
avalanche::Delegation dg;
CPubKey auth;
verifyDelegationOrThrow(dg, request.params[3].get_str(), auth);
if (dg.getProofId() != proofid) {
throw JSONRPCError(
RPC_INVALID_PARAMETER,
"The delegation does not match the proof");
}
if (key != auth) {
throw JSONRPCError(
RPC_INVALID_ADDRESS_OR_KEY,
"The public key does not match the delegation");
}
}
if (!registerProofIfNeeded(avalanche, proof)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"The proof has conflicting utxos");
}
if (!node.connman->ForNode(nodeid, [&](CNode *pnode) {
LOCK(pnode->cs_avalanche_pubkey);
bool expected = false;
if (pnode->m_avalanche_enabled.compare_exchange_strong(
expected, true)) {
pnode->m_avalanche_pubkey = std::move(key);
}
return true;
})) {
throw JSONRPCError(
RPC_INVALID_PARAMETER,
strprintf("The node does not exist: %d", nodeid));
}
return avalanche.withPeerManager([&](avalanche::PeerManager &pm) {
if (!pm.addNode(nodeid, proofid)) {
return false;
}
pm.addUnbroadcastProof(proofid);
return true;
});
},
};
}
static RPCHelpMan buildavalancheproof() {
return RPCHelpMan{
"buildavalancheproof",
"Build a proof for avalanche's sybil resistance.\n",
{
{"sequence", RPCArg::Type::NUM, RPCArg::Optional::NO,
"The proof's sequence"},
{"expiration", RPCArg::Type::NUM, RPCArg::Optional::NO,
"A timestamp indicating when the proof expire"},
{"master", RPCArg::Type::STR, RPCArg::Optional::NO,
"The master private key in base58-encoding"},
{
"stakes",
RPCArg::Type::ARR,
RPCArg::Optional::NO,
"The stakes to be signed and associated private keys",
{
{
"stake",
RPCArg::Type::OBJ,
RPCArg::Optional::NO,
"A stake to be attached to this proof",
{
{"txid", RPCArg::Type::STR_HEX,
RPCArg::Optional::NO, "The transaction id"},
{"vout", RPCArg::Type::NUM, RPCArg::Optional::NO,
"The output number"},
{"amount", RPCArg::Type::AMOUNT,
RPCArg::Optional::NO, "The amount in this UTXO"},
{"height", RPCArg::Type::NUM, RPCArg::Optional::NO,
"The height at which this UTXO was mined"},
{"iscoinbase", RPCArg::Type::BOOL,
RPCArg::Default{false},
"Indicate wether the UTXO is a coinbase"},
{"privatekey", RPCArg::Type::STR,
RPCArg::Optional::NO,
"private key in base58-encoding"},
},
},
},
},
{"payoutAddress", RPCArg::Type::STR, RPCArg::Optional::NO,
"A payout address"},
},
RPCResult{RPCResult::Type::STR_HEX, "proof",
"A string that is a serialized, hex-encoded proof data."},
RPCExamples{HelpExampleRpc("buildavalancheproof",
"0 1234567800 \"<master>\" []")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
const uint64_t sequence = request.params[0].getInt<int64_t>();
const int64_t expiration = request.params[1].getInt<int64_t>();
CKey masterKey = DecodeSecret(request.params[2].get_str());
if (!masterKey.IsValid()) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid master key");
}
CTxDestination payoutAddress = DecodeDestination(
request.params[4].get_str(), config.GetChainParams());
if (!IsValidDestination(payoutAddress)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Invalid payout address");
}
avalanche::ProofBuilder pb(sequence, expiration, masterKey,
GetScriptForDestination(payoutAddress));
const UniValue &stakes = request.params[3].get_array();
for (size_t i = 0; i < stakes.size(); i++) {
const UniValue &stake = stakes[i];
RPCTypeCheckObj(
stake,
{
{"txid", UniValue::VSTR},
{"vout", UniValue::VNUM},
// "amount" is also required but check is done below
// due to UniValue::VNUM erroneously not accepting
// quoted numerics (which are valid JSON)
{"height", UniValue::VNUM},
{"privatekey", UniValue::VSTR},
});
int nOut = stake.find_value("vout").getInt<int>();
if (nOut < 0) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
"vout cannot be negative");
}
const int height = stake.find_value("height").getInt<int>();
if (height < 1) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
"height must be positive");
}
const TxId txid(ParseHashO(stake, "txid"));
const COutPoint utxo(txid, nOut);
if (!stake.exists("amount")) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Missing amount");
}
const Amount amount =
AmountFromValue(stake.find_value("amount"));
const UniValue &iscbparam = stake.find_value("iscoinbase");
const bool iscoinbase =
iscbparam.isNull() ? false : iscbparam.get_bool();
CKey key =
DecodeSecret(stake.find_value("privatekey").get_str());
if (!key.IsValid()) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Invalid private key");
}
if (!pb.addUTXO(utxo, amount, uint32_t(height), iscoinbase,
std::move(key))) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Duplicated stake");
}
}
const avalanche::ProofRef proof = pb.build();
return proof->ToHex();
},
};
}
static RPCHelpMan decodeavalancheproof() {
return RPCHelpMan{
"decodeavalancheproof",
"Convert a serialized, hex-encoded proof, into JSON object. "
"The validity of the proof is not verified.\n",
{
{"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The proof hex string"},
},
RPCResult{
RPCResult::Type::OBJ,
"",
"",
{
{RPCResult::Type::NUM, "sequence",
"The proof's sequential number"},
{RPCResult::Type::NUM, "expiration",
"A timestamp indicating when the proof expires"},
{RPCResult::Type::STR_HEX, "master", "The master public key"},
{RPCResult::Type::STR, "signature",
"The proof signature (base64 encoded)"},
{RPCResult::Type::OBJ,
"payoutscript",
"The proof payout script",
{
{RPCResult::Type::STR, "asm", "Decoded payout script"},
{RPCResult::Type::STR_HEX, "hex",
"Raw payout script in hex format"},
{RPCResult::Type::STR, "type",
"The output type (e.g. " + GetAllOutputTypes() + ")"},
{RPCResult::Type::NUM, "reqSigs",
"The required signatures"},
{RPCResult::Type::ARR,
"addresses",
"",
{
{RPCResult::Type::STR, "address", "eCash address"},
}},
}},
{RPCResult::Type::STR_HEX, "limitedid",
"A hash of the proof data excluding the master key."},
{RPCResult::Type::STR_HEX, "proofid",
"A hash of the limitedid and master key."},
{RPCResult::Type::STR_AMOUNT, "staked_amount",
"The total staked amount of this proof in " +
Currency::get().ticker + "."},
{RPCResult::Type::NUM, "score", "The score of this proof."},
{RPCResult::Type::ARR,
"stakes",
"",
{
{RPCResult::Type::OBJ,
"",
"",
{
{RPCResult::Type::STR_HEX, "txid",
"The transaction id"},
{RPCResult::Type::NUM, "vout", "The output number"},
{RPCResult::Type::STR_AMOUNT, "amount",
"The amount in this UTXO"},
{RPCResult::Type::NUM, "height",
"The height at which this UTXO was mined"},
{RPCResult::Type::BOOL, "iscoinbase",
"Indicate whether the UTXO is a coinbase"},
{RPCResult::Type::STR_HEX, "pubkey",
"This UTXO's public key"},
{RPCResult::Type::STR, "signature",
"Signature of the proofid with this UTXO's private "
"key (base64 encoded)"},
}},
}},
}},
RPCExamples{HelpExampleCli("decodeavalancheproof", "\"<hex proof>\"") +
HelpExampleRpc("decodeavalancheproof", "\"<hex proof>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
avalanche::Proof proof;
bilingual_str error;
if (!avalanche::Proof::FromHex(proof, request.params[0].get_str(),
error)) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, error.original);
}
UniValue result(UniValue::VOBJ);
result.pushKV("sequence", proof.getSequence());
result.pushKV("expiration", proof.getExpirationTime());
result.pushKV("master", HexStr(proof.getMaster()));
result.pushKV("signature", EncodeBase64(proof.getSignature()));
const auto payoutScript = proof.getPayoutScript();
UniValue payoutScriptObj(UniValue::VOBJ);
ScriptPubKeyToUniv(payoutScript, payoutScriptObj,
/* fIncludeHex */ true);
result.pushKV("payoutscript", payoutScriptObj);
result.pushKV("limitedid", proof.getLimitedId().ToString());
result.pushKV("proofid", proof.getId().ToString());
result.pushKV("staked_amount", proof.getStakedAmount());
result.pushKV("score", uint64_t(proof.getScore()));
UniValue stakes(UniValue::VARR);
for (const avalanche::SignedStake &s : proof.getStakes()) {
const COutPoint &utxo = s.getStake().getUTXO();
UniValue stake(UniValue::VOBJ);
stake.pushKV("txid", utxo.GetTxId().ToString());
stake.pushKV("vout", uint64_t(utxo.GetN()));
stake.pushKV("amount", s.getStake().getAmount());
stake.pushKV("height", uint64_t(s.getStake().getHeight()));
stake.pushKV("iscoinbase", s.getStake().isCoinbase());
stake.pushKV("pubkey", HexStr(s.getStake().getPubkey()));
// Only PKHash destination is supported, so this is safe
stake.pushKV("address",
EncodeDestination(PKHash(s.getStake().getPubkey()),
config));
stake.pushKV("signature", EncodeBase64(s.getSignature()));
stakes.push_back(stake);
}
result.pushKV("stakes", stakes);
return result;
},
};
}
static RPCHelpMan delegateavalancheproof() {
return RPCHelpMan{
"delegateavalancheproof",
"Delegate the avalanche proof to another public key.\n",
{
{"limitedproofid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The limited id of the proof to be delegated."},
{"privatekey", RPCArg::Type::STR, RPCArg::Optional::NO,
"The private key in base58-encoding. Must match the proof master "
"public key or the upper level parent delegation public key if "
" supplied."},
{"publickey", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The public key to delegate the proof to."},
{"delegation", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED,
"A string that is the serialized, hex-encoded delegation for the "
"proof and which is a parent for the delegation to build."},
},
RPCResult{RPCResult::Type::STR_HEX, "delegation",
"A string that is a serialized, hex-encoded delegation."},
RPCExamples{
HelpExampleRpc("delegateavalancheproof",
"\"<limitedproofid>\" \"<privkey>\" \"<pubkey>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
avalanche::LimitedProofId limitedProofId{
ParseHashV(request.params[0], "limitedproofid")};
const CKey privkey = DecodeSecret(request.params[1].get_str());
if (!privkey.IsValid()) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
"The private key is invalid");
}
const CPubKey pubkey = ParsePubKey(request.params[2]);
std::unique_ptr<avalanche::DelegationBuilder> dgb;
if (request.params.size() >= 4 && !request.params[3].isNull()) {
avalanche::Delegation dg;
CPubKey auth;
verifyDelegationOrThrow(dg, request.params[3].get_str(), auth);
if (dg.getProofId() !=
limitedProofId.computeProofId(dg.getProofMaster())) {
throw JSONRPCError(
RPC_INVALID_PARAMETER,
"The delegation does not match the proof");
}
if (privkey.GetPubKey() != auth) {
throw JSONRPCError(
RPC_INVALID_ADDRESS_OR_KEY,
"The private key does not match the delegation");
}
dgb = std::make_unique<avalanche::DelegationBuilder>(dg);
} else {
dgb = std::make_unique<avalanche::DelegationBuilder>(
limitedProofId, privkey.GetPubKey());
}
if (!dgb->addLevel(privkey, pubkey)) {
throw JSONRPCError(RPC_MISC_ERROR,
"Unable to build the delegation");
}
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss << dgb->build();
return HexStr(ss);
},
};
}
static RPCHelpMan decodeavalanchedelegation() {
return RPCHelpMan{
"decodeavalanchedelegation",
"Convert a serialized, hex-encoded avalanche proof delegation, into "
"JSON object. \n"
"The validity of the delegation is not verified.\n",
{
{"delegation", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The delegation hex string"},
},
RPCResult{
RPCResult::Type::OBJ,
"",
"",
{
{RPCResult::Type::STR_HEX, "pubkey",
"The public key the proof is delegated to."},
{RPCResult::Type::STR_HEX, "proofmaster",
"The delegated proof master public key."},
{RPCResult::Type::STR_HEX, "delegationid",
"The identifier of this delegation."},
{RPCResult::Type::STR_HEX, "limitedid",
"A delegated proof data hash excluding the master key."},
{RPCResult::Type::STR_HEX, "proofid",
"A hash of the delegated proof limitedid and master key."},
{RPCResult::Type::NUM, "depth",
"The number of delegation levels."},
{RPCResult::Type::ARR,
"levels",
"",
{
{RPCResult::Type::OBJ,
"",
"",
{
{RPCResult::Type::NUM, "index",
"The index of this delegation level."},
{RPCResult::Type::STR_HEX, "pubkey",
"This delegated public key for this level"},
{RPCResult::Type::STR, "signature",
"Signature of this delegation level (base64 "
"encoded)"},
}},
}},
}},
RPCExamples{HelpExampleCli("decodeavalanchedelegation",
"\"<hex delegation>\"") +
HelpExampleRpc("decodeavalanchedelegation",
"\"<hex delegation>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
avalanche::Delegation delegation;
bilingual_str error;
if (!avalanche::Delegation::FromHex(
delegation, request.params[0].get_str(), error)) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, error.original);
}
UniValue result(UniValue::VOBJ);
result.pushKV("pubkey", HexStr(delegation.getDelegatedPubkey()));
result.pushKV("proofmaster", HexStr(delegation.getProofMaster()));
result.pushKV("delegationid", delegation.getId().ToString());
result.pushKV("limitedid",
delegation.getLimitedProofId().ToString());
result.pushKV("proofid", delegation.getProofId().ToString());
auto levels = delegation.getLevels();
result.pushKV("depth", uint64_t(levels.size()));
UniValue levelsArray(UniValue::VARR);
for (auto &level : levels) {
UniValue obj(UniValue::VOBJ);
obj.pushKV("pubkey", HexStr(level.pubkey));
obj.pushKV("signature", EncodeBase64(level.sig));
levelsArray.push_back(std::move(obj));
}
result.pushKV("levels", levelsArray);
return result;
},
};
}
static RPCHelpMan getavalancheinfo() {
return RPCHelpMan{
"getavalancheinfo",
"Returns an object containing various state info regarding avalanche "
"networking.\n",
{},
RPCResult{
RPCResult::Type::OBJ,
"",
"",
{
{RPCResult::Type::BOOL, "ready_to_poll",
"Whether the node is ready to start polling and voting."},
{RPCResult::Type::OBJ,
"local",
"Only available if -avaproof has been supplied to the node",
{
{RPCResult::Type::BOOL, "verified",
"Whether the node local proof has been locally verified "
"or not."},
{RPCResult::Type::STR, "verification_status",
"The proof verification status. Only available if the "
"\"verified\" flag is false."},
{RPCResult::Type::STR_HEX, "proofid",
"The node local proof id."},
{RPCResult::Type::STR_HEX, "limited_proofid",
"The node local limited proof id."},
{RPCResult::Type::STR_HEX, "master",
"The node local proof master public key."},
{RPCResult::Type::STR, "payout_address",
"The node local proof payout address. This might be "
"omitted if the payout script is not one of P2PK, P2PKH "
"or P2SH, in which case decodeavalancheproof can be used "
"to get more details."},
{RPCResult::Type::STR_AMOUNT, "stake_amount",
"The node local proof staked amount."},
}},
{RPCResult::Type::OBJ,
"network",
"",
{
{RPCResult::Type::NUM, "proof_count",
"The number of valid avalanche proofs we know exist "
"(including this node's local proof if applicable)."},
{RPCResult::Type::NUM, "connected_proof_count",
"The number of avalanche proofs with at least one node "
"we are connected to (including this node's local proof "
"if applicable)."},
{RPCResult::Type::NUM, "dangling_proof_count",
"The number of avalanche proofs with no node attached."},
{RPCResult::Type::NUM, "finalized_proof_count",
"The number of known avalanche proofs that have been "
"finalized by avalanche."},
{RPCResult::Type::NUM, "conflicting_proof_count",
"The number of known avalanche proofs that conflict with "
"valid proofs."},
{RPCResult::Type::NUM, "immature_proof_count",
"The number of known avalanche proofs that have immature "
"utxos."},
{RPCResult::Type::STR_AMOUNT, "total_stake_amount",
"The total staked amount over all the valid proofs in " +
Currency::get().ticker +
" (including this node's local proof if "
"applicable)."},
{RPCResult::Type::STR_AMOUNT, "connected_stake_amount",
"The total staked amount over all the connected proofs "
"in " +
Currency::get().ticker +
" (including this node's local proof if "
"applicable)."},
{RPCResult::Type::STR_AMOUNT, "dangling_stake_amount",
"The total staked amount over all the dangling proofs "
"in " +
Currency::get().ticker +
" (including this node's local proof if "
"applicable)."},
{RPCResult::Type::STR_AMOUNT, "immature_stake_amount",
"The total staked amount over all the immature proofs "
"in " +
Currency::get().ticker +
" (including this node's local proof if "
"applicable)."},
{RPCResult::Type::NUM, "node_count",
"The number of avalanche nodes we are connected to "
"(including this node if a local proof is set)."},
{RPCResult::Type::NUM, "connected_node_count",
"The number of avalanche nodes associated with an "
"avalanche proof (including this node if a local proof "
"is set)."},
{RPCResult::Type::NUM, "pending_node_count",
"The number of avalanche nodes pending for a proof."},
}},
},
},
RPCExamples{HelpExampleCli("getavalancheinfo", "") +
HelpExampleRpc("getavalancheinfo", "")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
NodeContext &node = EnsureAnyNodeContext(request.context);
avalanche::Processor &avalanche = EnsureAvalanche(node);
UniValue ret(UniValue::VOBJ);
ret.pushKV("ready_to_poll", avalanche.isQuorumEstablished());
auto localProof = avalanche.getLocalProof();
if (localProof != nullptr) {
UniValue local(UniValue::VOBJ);
const bool verified = avalanche.withPeerManager(
[&](const avalanche::PeerManager &pm) {
const avalanche::ProofId &proofid = localProof->getId();
return pm.isBoundToPeer(proofid);
});
local.pushKV("verified", verified);
const bool sharing = avalanche.canShareLocalProof();
if (!verified) {
avalanche::ProofRegistrationState state =
avalanche.getLocalProofRegistrationState();
// If the local proof is not registered but the state is
// valid, no registration attempt occurred yet.
local.pushKV("verification_status",
state.IsValid()
? (sharing ? "pending verification"
: "pending inbound connections")
: state.GetRejectReason());
}
local.pushKV("proofid", localProof->getId().ToString());
local.pushKV("limited_proofid",
localProof->getLimitedId().ToString());
local.pushKV("master", HexStr(localProof->getMaster()));
CTxDestination destination;
if (ExtractDestination(localProof->getPayoutScript(),
destination)) {
local.pushKV("payout_address",
EncodeDestination(destination, config));
}
local.pushKV("stake_amount", localProof->getStakedAmount());
ret.pushKV("local", local);
}
avalanche.withPeerManager([&](avalanche::PeerManager &pm) {
UniValue network(UniValue::VOBJ);
uint64_t proofCount{0};
uint64_t connectedProofCount{0};
uint64_t finalizedProofCount{0};
uint64_t connectedNodeCount{0};
Amount totalStakes = Amount::zero();
Amount connectedStakes = Amount::zero();
pm.forEachPeer([&](const avalanche::Peer &peer) {
CHECK_NONFATAL(peer.proof != nullptr);
const bool isLocalProof =
localProof &&
peer.proof->getId() == localProof->getId();
++proofCount;
const Amount proofStake = peer.proof->getStakedAmount();
totalStakes += proofStake;
if (peer.hasFinalized) {
++finalizedProofCount;
}
if (peer.node_count > 0 || isLocalProof) {
++connectedProofCount;
connectedStakes += proofStake;
}
connectedNodeCount += peer.node_count + isLocalProof;
});
Amount immatureStakes = Amount::zero();
pm.getImmatureProofPool().forEachProof(
[&](const avalanche::ProofRef &proof) {
immatureStakes += proof->getStakedAmount();
});
network.pushKV("proof_count", proofCount);
network.pushKV("connected_proof_count", connectedProofCount);
network.pushKV("dangling_proof_count",
proofCount - connectedProofCount);
network.pushKV("finalized_proof_count", finalizedProofCount);
network.pushKV(
"conflicting_proof_count",
uint64_t(pm.getConflictingProofPool().countProofs()));
network.pushKV(
"immature_proof_count",
uint64_t(pm.getImmatureProofPool().countProofs()));
network.pushKV("total_stake_amount", totalStakes);
network.pushKV("connected_stake_amount", connectedStakes);
network.pushKV("dangling_stake_amount",
totalStakes - connectedStakes);
network.pushKV("immature_stake_amount", immatureStakes);
const uint64_t pendingNodes = pm.getPendingNodeCount();
network.pushKV("node_count", connectedNodeCount + pendingNodes);
network.pushKV("connected_node_count", connectedNodeCount);
network.pushKV("pending_node_count", pendingNodes);
ret.pushKV("network", network);
});
return ret;
},
};
}
static RPCHelpMan getavalanchepeerinfo() {
return RPCHelpMan{
"getavalanchepeerinfo",
"Returns data about an avalanche peer as a json array of objects. If "
"no proofid is provided, returns data about all the peers.\n",
{
{"proofid", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED,
"The hex encoded avalanche proof identifier."},
},
RPCResult{
RPCResult::Type::ARR,
"",
"",
{{
RPCResult::Type::OBJ,
"",
"",
{{
{RPCResult::Type::NUM, "avalanche_peerid",
"The avalanche internal peer identifier"},
{RPCResult::Type::NUM, "availability_score",
"The agreggated availability score of this peer's nodes"},
{RPCResult::Type::STR_HEX, "proofid",
"The avalanche proof id used by this peer"},
{RPCResult::Type::STR_HEX, "proof",
"The avalanche proof used by this peer"},
{RPCResult::Type::NUM, "nodecount",
"The number of nodes for this peer"},
{RPCResult::Type::ARR,
"node_list",
"",
{
{RPCResult::Type::NUM, "nodeid",
"Node id, as returned by getpeerinfo"},
}},
}},
}},
},
RPCExamples{HelpExampleCli("getavalanchepeerinfo", "") +
HelpExampleCli("getavalanchepeerinfo", "\"proofid\"") +
HelpExampleRpc("getavalanchepeerinfo", "") +
HelpExampleRpc("getavalanchepeerinfo", "\"proofid\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
NodeContext &node = EnsureAnyNodeContext(request.context);
avalanche::Processor &avalanche = EnsureAvalanche(node);
auto peerToUniv = [](const avalanche::PeerManager &pm,
const avalanche::Peer &peer) {
UniValue obj(UniValue::VOBJ);
obj.pushKV("avalanche_peerid", uint64_t(peer.peerid));
obj.pushKV("availability_score", peer.availabilityScore);
obj.pushKV("proofid", peer.getProofId().ToString());
obj.pushKV("proof", peer.proof->ToHex());
UniValue nodes(UniValue::VARR);
pm.forEachNode(peer, [&](const avalanche::Node &n) {
nodes.push_back(n.nodeid);
});
obj.pushKV("nodecount", uint64_t(peer.node_count));
obj.pushKV("node_list", nodes);
return obj;
};
UniValue ret(UniValue::VARR);
avalanche.withPeerManager([&](const avalanche::PeerManager &pm) {
// If a proofid is provided, only return the associated peer
if (!request.params[0].isNull()) {
const avalanche::ProofId proofid =
avalanche::ProofId::fromHex(
request.params[0].get_str());
if (!pm.isBoundToPeer(proofid)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Proofid not found");
}
pm.forPeer(proofid, [&](const avalanche::Peer &peer) {
ret.push_back(peerToUniv(pm, peer));
return true;
});
return;
}
// If no proofid is provided, return all the peers
pm.forEachPeer([&](const avalanche::Peer &peer) {
ret.push_back(peerToUniv(pm, peer));
});
});
return ret;
},
};
}
static RPCHelpMan getavalancheproofs() {
return RPCHelpMan{
"getavalancheproofs",
"Returns an object containing all tracked proofids.\n",
{},
RPCResult{
RPCResult::Type::OBJ,
"",
"",
{
{RPCResult::Type::ARR,
"valid",
"",
{
{RPCResult::Type::STR_HEX, "proofid",
"Avalanche proof id"},
}},
{RPCResult::Type::ARR,
"conflicting",
"",
{
{RPCResult::Type::STR_HEX, "proofid",
"Avalanche proof id"},
}},
{RPCResult::Type::ARR,
"immature",
"",
{
{RPCResult::Type::STR_HEX, "proofid",
"Avalanche proof id"},
}},
},
},
RPCExamples{HelpExampleCli("getavalancheproofs", "") +
HelpExampleRpc("getavalancheproofs", "")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
NodeContext &node = EnsureAnyNodeContext(request.context);
const avalanche::Processor &avalanche = EnsureAvalanche(node);
UniValue ret(UniValue::VOBJ);
avalanche.withPeerManager([&](avalanche::PeerManager &pm) {
auto appendProofIds = [&ret](const avalanche::ProofPool &pool,
const std::string &key) {
UniValue arrOut(UniValue::VARR);
for (const avalanche::ProofId &proofid :
pool.getProofIds()) {
arrOut.push_back(proofid.ToString());
}
ret.pushKV(key, arrOut);
};
appendProofIds(pm.getValidProofPool(), "valid");
appendProofIds(pm.getConflictingProofPool(), "conflicting");
appendProofIds(pm.getImmatureProofPool(), "immature");
});
return ret;
},
};
}
static RPCHelpMan getstakingreward() {
return RPCHelpMan{
"getstakingreward",
"Return a list of possible staking reward winners based on the "
"previous "
"block hash.\n",
{
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The previous block hash, hex encoded."},
{"recompute", RPCArg::Type::BOOL, RPCArg::Default{false},
"Whether to recompute the staking reward winner if there is a "
"cached value."},
},
RPCResult{
RPCResult::Type::ARR,
"",
"",
{
{RPCResult::Type::OBJ,
"winner",
"The winning proof",
{
{RPCResult::Type::STR_HEX, "proofid",
"The winning proofid"},
{RPCResult::Type::STR, "asm", "Decoded payout script"},
{RPCResult::Type::STR_HEX, "hex",
"Raw payout script in hex format"},
{RPCResult::Type::STR, "type",
"The output type (e.g. " + GetAllOutputTypes() + ")"},
{RPCResult::Type::NUM, "reqSigs",
"The required signatures"},
{RPCResult::Type::ARR,
"addresses",
"",
{
{RPCResult::Type::STR, "address", "eCash address"},
}},
}},
}},
RPCExamples{HelpExampleRpc("getstakingreward", "<blockhash>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
const NodeContext &node = EnsureAnyNodeContext(request.context);
ChainstateManager &chainman = EnsureChainman(node);
avalanche::Processor &avalanche = EnsureAvalanche(node);
const BlockHash blockhash(
ParseHashV(request.params[0], "blockhash"));
const CBlockIndex *pprev;
{
LOCK(cs_main);
pprev = chainman.m_blockman.LookupBlockIndex(blockhash);
}
if (!pprev) {
throw JSONRPCError(
RPC_INVALID_PARAMETER,
strprintf("Block not found: %s\n", blockhash.ToString()));
}
if (!IsStakingRewardsActivated(
config.GetChainParams().GetConsensus(), pprev)) {
throw JSONRPCError(
RPC_INTERNAL_ERROR,
strprintf(
"Staking rewards are not activated for block %s\n",
blockhash.ToString()));
}
if (!request.params[1].isNull() && request.params[1].get_bool()) {
// Force recompute the staking reward winner by first erasing
// the cached entry if any
avalanche.eraseStakingRewardWinner(blockhash);
}
if (!avalanche.computeStakingReward(pprev)) {
throw JSONRPCError(
RPC_INTERNAL_ERROR,
strprintf("Unable to determine a staking reward winner "
"for block %s\n",
blockhash.ToString()));
}
std::vector<std::pair<avalanche::ProofId, CScript>> winners;
if (!avalanche.getStakingRewardWinners(blockhash, winners)) {
throw JSONRPCError(
RPC_INTERNAL_ERROR,
strprintf("Unable to retrieve the staking reward winner "
"for block %s\n",
blockhash.ToString()));
}
UniValue winnersArr(UniValue::VARR);
for (auto &winner : winners) {
UniValue stakingRewardsObj(UniValue::VOBJ);
ScriptPubKeyToUniv(winner.second, stakingRewardsObj,
/*fIncludeHex=*/true);
stakingRewardsObj.pushKV("proofid", winner.first.GetHex());
winnersArr.push_back(stakingRewardsObj);
}
return winnersArr;
},
};
}
static RPCHelpMan setstakingreward() {
return RPCHelpMan{
"setstakingreward",
"Set the staking reward winner for the given previous block hash.\n",
{
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The previous block hash, hex encoded."},
{"payoutscript", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The payout script for the staking reward, hex encoded."},
{"append", RPCArg::Type::BOOL, RPCArg::Default{false},
"Append to the list of possible winners instead of replacing."},
},
RPCResult{RPCResult::Type::BOOL, "success",
"Whether the payout script was set or not"},
RPCExamples{
HelpExampleRpc("setstakingreward", "<blockhash> <payout script>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
const NodeContext &node = EnsureAnyNodeContext(request.context);
ChainstateManager &chainman = EnsureChainman(node);
avalanche::Processor &avalanche = EnsureAvalanche(node);
const BlockHash blockhash(
ParseHashV(request.params[0], "blockhash"));
const CBlockIndex *pprev;
{
LOCK(cs_main);
pprev = chainman.m_blockman.LookupBlockIndex(blockhash);
}
if (!pprev) {
throw JSONRPCError(
RPC_INVALID_PARAMETER,
strprintf("Block not found: %s\n", blockhash.ToString()));
}
if (!IsStakingRewardsActivated(
config.GetChainParams().GetConsensus(), pprev)) {
throw JSONRPCError(
RPC_INTERNAL_ERROR,
strprintf(
"Staking rewards are not activated for block %s\n",
blockhash.ToString()));
}
const std::vector<uint8_t> data =
ParseHex(request.params[1].get_str());
CScript payoutScript(data.begin(), data.end());
std::vector<CScript> payoutScripts;
if (!request.params[2].isNull() && request.params[2].get_bool()) {
// Append mode, initialize our list with the current winners
// and the new one will be added to the back of that list. If
// there is no winner the list will remain empty.
avalanche.getStakingRewardWinners(blockhash, payoutScripts);
}
payoutScripts.push_back(std::move(payoutScript));
// This will return true upon insertion or false upon replacement.
// We want to convey the success of the RPC, so we always return
// true.
avalanche.setStakingRewardWinners(pprev, payoutScripts);
return true;
},
};
}
static RPCHelpMan getremoteproofs() {
return RPCHelpMan{
"getremoteproofs",
"Get the list of remote proofs for the given node id.\n",
{
{"nodeid", RPCArg::Type::NUM, RPCArg::Optional::NO,
"The node identifier."},
},
RPCResult{
RPCResult::Type::ARR,
"proofs",
"",
{{
RPCResult::Type::OBJ,
"proof",
"",
{{
{RPCResult::Type::STR_HEX, "proofid",
"The hex encoded proof identifier."},
{RPCResult::Type::BOOL, "present",
"Whether the node has the proof."},
{RPCResult::Type::NUM, "last_update",
"The last time this proof status was updated."},
}},
}},
},
RPCExamples{HelpExampleRpc("getremoteproofs", "<nodeid>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
NodeContext &node = EnsureAnyNodeContext(request.context);
const avalanche::Processor &avalanche = EnsureAvalanche(node);
const NodeId nodeid = request.params[0].getInt<int64_t>();
auto remoteProofs = avalanche.withPeerManager(
[nodeid](const avalanche::PeerManager &pm) {
return pm.getRemoteProofs(nodeid);
});
UniValue arrOut(UniValue::VARR);
for (const auto &remoteProof : remoteProofs) {
UniValue obj(UniValue::VOBJ);
obj.pushKV("proofid", remoteProof.proofid.ToString());
obj.pushKV("present", remoteProof.present);
obj.pushKV("last_update", remoteProof.lastUpdate.count());
arrOut.push_back(obj);
}
return arrOut;
},
};
}
static RPCHelpMan getrawavalancheproof() {
return RPCHelpMan{
"getrawavalancheproof",
"Lookup for a known avalanche proof by id.\n",
{
{"proofid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The hex encoded avalanche proof identifier."},
},
RPCResult{
RPCResult::Type::OBJ,
"",
"",
{{
{RPCResult::Type::STR_HEX, "proof",
"The hex encoded proof matching the identifier."},
{RPCResult::Type::BOOL, "immature",
"Whether the proof has immature utxos."},
{RPCResult::Type::BOOL, "boundToPeer",
"Whether the proof is bound to an avalanche peer."},
{RPCResult::Type::BOOL, "conflicting",
"Whether the proof has a conflicting UTXO with an avalanche "
"peer."},
{RPCResult::Type::BOOL, "finalized",
"Whether the proof is finalized by vote."},
}},
},
RPCExamples{HelpExampleRpc("getrawavalancheproof", "<proofid>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
NodeContext &node = EnsureAnyNodeContext(request.context);
const avalanche::Processor &avalanche = EnsureAvalanche(node);
const avalanche::ProofId proofid =
avalanche::ProofId::fromHex(request.params[0].get_str());
bool isImmature = false;
bool isBoundToPeer = false;
bool conflicting = false;
bool finalized = false;
auto proof = avalanche.withPeerManager(
[&](const avalanche::PeerManager &pm) {
isImmature = pm.isImmature(proofid);
isBoundToPeer = pm.isBoundToPeer(proofid);
conflicting = pm.isInConflictingPool(proofid);
finalized =
pm.forPeer(proofid, [&](const avalanche::Peer &p) {
return p.hasFinalized;
});
return pm.getProof(proofid);
});
if (!proof) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Proof not found");
}
UniValue ret(UniValue::VOBJ);
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss << *proof;
ret.pushKV("proof", HexStr(ss));
ret.pushKV("immature", isImmature);
ret.pushKV("boundToPeer", isBoundToPeer);
ret.pushKV("conflicting", conflicting);
ret.pushKV("finalized", finalized);
return ret;
},
};
}
static RPCHelpMan invalidateavalancheproof() {
return RPCHelpMan{
"invalidateavalancheproof",
"Reject a known avalanche proof by id.\n",
{
{"proofid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The hex encoded avalanche proof identifier."},
},
RPCResult{
RPCResult::Type::BOOL,
"success",
"",
},
RPCExamples{HelpExampleRpc("invalidateavalancheproof", "<proofid>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
NodeContext &node = EnsureAnyNodeContext(request.context);
avalanche::Processor &avalanche = EnsureAvalanche(node);
const avalanche::ProofId proofid =
avalanche::ProofId::fromHex(request.params[0].get_str());
avalanche.withPeerManager([&](avalanche::PeerManager &pm) {
if (!pm.exists(proofid) && !pm.isDangling(proofid)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Proof not found");
}
if (!pm.rejectProof(
proofid,
avalanche::PeerManager::RejectionMode::INVALIDATE)) {
throw JSONRPCError(RPC_INTERNAL_ERROR,
"Failed to reject the proof");
}
pm.setInvalid(proofid);
});
if (avalanche.isRecentlyFinalized(proofid)) {
// If the proof was previously finalized, clear the status.
// Because there is no way to selectively delete an entry from a
// Bloom filter, we have to clear the whole filter which could
// cause extra voting rounds.
avalanche.clearFinalizedItems();
}
return true;
},
};
}
static RPCHelpMan isfinalblock() {
return RPCHelpMan{
"isfinalblock",
"Check if a block has been finalized by avalanche votes.\n",
{
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The hash of the block."},
},
RPCResult{RPCResult::Type::BOOL, "success",
"Whether the block has been finalized by avalanche votes."},
RPCExamples{HelpExampleRpc("isfinalblock", "<block hash>") +
HelpExampleCli("isfinalblock", "<block hash>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
NodeContext &node = EnsureAnyNodeContext(request.context);
avalanche::Processor &avalanche = EnsureAvalanche(node);
if (!avalanche.isQuorumEstablished()) {
throw JSONRPCError(RPC_MISC_ERROR,
"Avalanche is not ready to poll yet.");
}
ChainstateManager &chainman = EnsureAnyChainman(request.context);
const BlockHash blockhash(
ParseHashV(request.params[0], "blockhash"));
const CBlockIndex *pindex;
{
LOCK(cs_main);
pindex = chainman.m_blockman.LookupBlockIndex(blockhash);
if (!pindex) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Block not found");
}
}
return chainman.ActiveChainstate().IsBlockAvalancheFinalized(
pindex);
},
};
}
static RPCHelpMan isfinaltransaction() {
return RPCHelpMan{
"isfinaltransaction",
"Check if a transaction has been finalized by avalanche votes.\n",
{
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The id of the transaction."},
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED,
"The block in which to look for the transaction"},
},
RPCResult{
RPCResult::Type::BOOL, "success",
"Whether the transaction has been finalized by avalanche votes."},
RPCExamples{HelpExampleRpc("isfinaltransaction", "<txid> <blockhash>") +
HelpExampleCli("isfinaltransaction", "<txid> <blockhash>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
const NodeContext &node = EnsureAnyNodeContext(request.context);
ChainstateManager &chainman = EnsureChainman(node);
const CTxMemPool &mempool = EnsureMemPool(node);
avalanche::Processor &avalanche = EnsureAvalanche(node);
const TxId txid = TxId(ParseHashV(request.params[0], "txid"));
CBlockIndex *pindex = nullptr;
if (!request.params[1].isNull()) {
const BlockHash blockhash(
ParseHashV(request.params[1], "blockhash"));
LOCK(cs_main);
pindex = chainman.m_blockman.LookupBlockIndex(blockhash);
if (!pindex) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Block not found");
}
}
bool f_txindex_ready = false;
if (g_txindex && !pindex) {
f_txindex_ready = g_txindex->BlockUntilSyncedToCurrentChain();
}
BlockHash hash_block;
const CTransactionRef tx = GetTransaction(
pindex, &mempool, txid, hash_block, chainman.m_blockman);
if (!avalanche.isQuorumEstablished()) {
throw JSONRPCError(RPC_MISC_ERROR,
"Avalanche is not ready to poll yet.");
}
if (!tx) {
std::string errmsg;
if (pindex) {
if (WITH_LOCK(::cs_main,
return !pindex->nStatus.hasData())) {
throw JSONRPCError(RPC_MISC_ERROR,
"Block data not downloaded yet.");
}
errmsg = "No such transaction found in the provided block.";
} else if (!g_txindex) {
errmsg = "No such transaction. Use -txindex or provide a "
"block hash to enable blockchain transaction "
"queries.";
} else if (!f_txindex_ready) {
errmsg = "No such transaction. Blockchain transactions are "
"still in the process of being indexed.";
} else {
errmsg = "No such mempool or blockchain transaction.";
}
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, errmsg);
}
if (!pindex) {
LOCK(cs_main);
pindex = chainman.m_blockman.LookupBlockIndex(hash_block);
}
if (!tx) {
// Tx not found, we should have raised an error at this stage
return false;
}
if (mempool.isAvalancheFinalized(txid)) {
// The transaction is finalized
return true;
}
// Return true if the tx is in a finalized block
return !node.mempool->exists(txid) &&
chainman.ActiveChainstate().IsBlockAvalancheFinalized(
pindex);
},
};
}
static RPCHelpMan reconsideravalancheproof() {
return RPCHelpMan{
"reconsideravalancheproof",
"Reconsider a known avalanche proof.\n",
{
{"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The hex encoded avalanche proof."},
},
RPCResult{
RPCResult::Type::BOOL,
"success",
"Whether the proof has been successfully registered.",
},
RPCExamples{HelpExampleRpc("reconsideravalancheproof", "<proof hex>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
auto proof = RCUPtr<avalanche::Proof>::make();
NodeContext &node = EnsureAnyNodeContext(request.context);
const avalanche::Processor &avalanche = EnsureAvalanche(node);
// Verify the proof. Note that this is redundant with the
// verification done when adding the proof to the pool, but we get a
// chance to give a better error message.
verifyProofOrThrow(node, *proof, request.params[0].get_str());
// There is no way to selectively clear the invalidation status of
// a single proof, so we clear the whole Bloom filter. This could
// cause extra voting rounds.
avalanche.withPeerManager([&](avalanche::PeerManager &pm) {
if (pm.isInvalid(proof->getId())) {
pm.clearAllInvalid();
}
});
// Add the proof to the pool if we don't have it already. Since the
// proof verification has already been done, a failure likely
// indicates that there already is a proof with conflicting utxos.
avalanche::ProofRegistrationState state;
if (!registerProofIfNeeded(avalanche, proof, state)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
strprintf("%s (%s)\n",
state.GetRejectReason(),
state.GetDebugMessage()));
}
return avalanche.withPeerManager(
[&](const avalanche::PeerManager &pm) {
return pm.isBoundToPeer(proof->getId());
});
},
};
}
static RPCHelpMan sendavalancheproof() {
return RPCHelpMan{
"sendavalancheproof",
"Broadcast an avalanche proof.\n",
{
{"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The avalanche proof to broadcast."},
},
RPCResult{RPCResult::Type::BOOL, "success",
"Whether the proof was sent successfully or not."},
RPCExamples{HelpExampleRpc("sendavalancheproof", "<proof>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
auto proof = RCUPtr<avalanche::Proof>::make();
NodeContext &node = EnsureAnyNodeContext(request.context);
const avalanche::Processor &avalanche = EnsureAvalanche(node);
// Verify the proof. Note that this is redundant with the
// verification done when adding the proof to the pool, but we get a
// chance to give a better error message.
verifyProofOrThrow(node, *proof, request.params[0].get_str());
// Add the proof to the pool if we don't have it already. Since the
// proof verification has already been done, a failure likely
// indicates that there already is a proof with conflicting utxos.
const avalanche::ProofId &proofid = proof->getId();
avalanche::ProofRegistrationState state;
if (!registerProofIfNeeded(avalanche, proof, state)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
strprintf("%s (%s)\n",
state.GetRejectReason(),
state.GetDebugMessage()));
}
avalanche.withPeerManager([&](avalanche::PeerManager &pm) {
pm.addUnbroadcastProof(proofid);
});
if (node.peerman) {
node.peerman->RelayProof(proofid);
}
return true;
},
};
}
static RPCHelpMan verifyavalancheproof() {
return RPCHelpMan{
"verifyavalancheproof",
"Verify an avalanche proof is valid and return the error otherwise.\n",
{
{"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"Proof to verify."},
},
RPCResult{RPCResult::Type::BOOL, "success",
"Whether the proof is valid or not."},
RPCExamples{HelpExampleRpc("verifyavalancheproof", "\"<proof>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
avalanche::Proof proof;
verifyProofOrThrow(EnsureAnyNodeContext(request.context), proof,
request.params[0].get_str());
return true;
},
};
}
static RPCHelpMan verifyavalanchedelegation() {
return RPCHelpMan{
"verifyavalanchedelegation",
"Verify an avalanche delegation is valid and return the error "
"otherwise.\n",
{
{"delegation", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The avalanche proof delegation to verify."},
},
RPCResult{RPCResult::Type::BOOL, "success",
"Whether the delegation is valid or not."},
RPCExamples{HelpExampleRpc("verifyavalanchedelegation", "\"<proof>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
avalanche::Delegation delegation;
CPubKey dummy;
verifyDelegationOrThrow(delegation, request.params[0].get_str(),
dummy);
return true;
},
};
}
+static RPCHelpMan setflakyproof() {
+ return RPCHelpMan{
+ "setflakyproof",
+ "Add or remove a proofid from the flaky list. This means that an "
+ "additional staking reward winner will be accepted if this proof is "
+ "the selected one.\n",
+ {
+ {"proofid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
+ "The avalanche proof id."},
+ {"flaky", RPCArg::Type::BOOL, RPCArg::Optional::NO,
+ "Whether to add (true) or remove (false) the proof from the flaky "
+ "list"},
+ },
+ RPCResult{RPCResult::Type::BOOL, "success",
+ "Whether the addition/removal is successful."},
+ RPCExamples{HelpExampleRpc("setflakyproof", "\"<proofid>\" true")},
+ [&](const RPCHelpMan &self, const Config &config,
+ const JSONRPCRequest &request) -> UniValue {
+ NodeContext &node = EnsureAnyNodeContext(request.context);
+ avalanche::Processor &avalanche = EnsureAvalanche(node);
+ ChainstateManager &chainman = EnsureChainman(node);
+
+ const auto proofid =
+ avalanche::ProofId::fromHex(request.params[0].get_str());
+ const bool addNotRemove = request.params[1].get_bool();
+
+ if (avalanche.withPeerManager(
+ [&proofid, addNotRemove](avalanche::PeerManager &pm) {
+ if (addNotRemove) {
+ return pm.setFlaky(proofid);
+ }
+ return pm.unsetFlaky(proofid);
+ })) {
+ const CBlockIndex *pprev =
+ WITH_LOCK(cs_main, return chainman.ActiveTip());
+ // Force recompute the staking reward winner by first erasing
+ // the cached entry if any
+ avalanche.eraseStakingRewardWinner(pprev->GetBlockHash());
+ return avalanche.computeStakingReward(pprev);
+ }
+
+ return false;
+ }};
+}
+
void RegisterAvalancheRPCCommands(CRPCTable &t) {
// clang-format off
static const CRPCCommand commands[] = {
// category actor (function)
// ----------------- --------------------
{ "avalanche", getavalanchekey, },
{ "avalanche", addavalanchenode, },
{ "avalanche", buildavalancheproof, },
{ "avalanche", decodeavalancheproof, },
{ "avalanche", delegateavalancheproof, },
{ "avalanche", decodeavalanchedelegation, },
{ "avalanche", getavalancheinfo, },
{ "avalanche", getavalanchepeerinfo, },
{ "avalanche", getavalancheproofs, },
{ "avalanche", getstakingreward, },
{ "avalanche", setstakingreward, },
{ "avalanche", getremoteproofs, },
{ "avalanche", getrawavalancheproof, },
{ "avalanche", invalidateavalancheproof, },
{ "avalanche", isfinalblock, },
{ "avalanche", isfinaltransaction, },
{ "avalanche", reconsideravalancheproof, },
{ "avalanche", sendavalancheproof, },
{ "avalanche", verifyavalancheproof, },
{ "avalanche", verifyavalanchedelegation, },
+ { "avalanche", setflakyproof, },
};
// clang-format on
for (const auto &c : commands) {
t.appendCommand(c.name, &c);
}
}
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index ee1ae49fe..d69d80b3d 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -1,312 +1,313 @@
// Copyright (c) 2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <rpc/client.h>
#include <common/args.h>
#include <tinyformat.h>
#include <cstdint>
#include <set>
#include <string>
#include <string_view>
class CRPCConvertParam {
public:
std::string methodName; //!< method whose params want conversion
int paramIdx; //!< 0-based idx of param to convert
std::string paramName; //!< parameter name
};
/**
* Specify a (method, idx, name) here if the argument is a non-string RPC
* argument and needs to be converted from JSON.
*
* @note Parameter indexes start from 0.
*/
static const CRPCConvertParam vRPCConvertParams[] = {
{"setmocktime", 0, "timestamp"},
{"mockscheduler", 0, "delta_time"},
{"utxoupdatepsbt", 1, "descriptors"},
{"generatetoaddress", 0, "nblocks"},
{"generatetoaddress", 2, "maxtries"},
{"generatetodescriptor", 0, "num_blocks"},
{"generatetodescriptor", 2, "maxtries"},
{"generateblock", 1, "transactions"},
{"getnetworkhashps", 0, "nblocks"},
{"getnetworkhashps", 1, "height"},
{"sendtoaddress", 1, "amount"},
{"sendtoaddress", 4, "subtractfeefromamount"},
{"sendtoaddress", 5, "avoid_reuse"},
{"settxfee", 0, "amount"},
{"sethdseed", 0, "newkeypool"},
{"getreceivedbyaddress", 1, "minconf"},
{"getreceivedbylabel", 1, "minconf"},
{"listreceivedbyaddress", 0, "minconf"},
{"listreceivedbyaddress", 1, "include_empty"},
{"listreceivedbyaddress", 2, "include_watchonly"},
{"listreceivedbylabel", 0, "minconf"},
{"listreceivedbylabel", 1, "include_empty"},
{"listreceivedbylabel", 2, "include_watchonly"},
{"getbalance", 1, "minconf"},
{"getbalance", 2, "include_watchonly"},
{"getbalance", 3, "avoid_reuse"},
{"getblockfrompeer", 1, "peer_id"},
{"getblockhash", 0, "height"},
{"waitforblockheight", 0, "height"},
{"waitforblockheight", 1, "timeout"},
{"waitforblock", 1, "timeout"},
{"waitfornewblock", 0, "timeout"},
{"listtransactions", 1, "count"},
{"listtransactions", 2, "skip"},
{"listtransactions", 3, "include_watchonly"},
{"walletpassphrase", 1, "timeout"},
{"getblocktemplate", 0, "template_request"},
{"listsinceblock", 1, "target_confirmations"},
{"listsinceblock", 2, "include_watchonly"},
{"listsinceblock", 3, "include_removed"},
{"sendmany", 1, "amounts"},
{"sendmany", 2, "minconf"},
{"sendmany", 4, "subtractfeefrom"},
{"deriveaddresses", 1, "range"},
{"scantxoutset", 1, "scanobjects"},
{"addmultisigaddress", 0, "nrequired"},
{"addmultisigaddress", 1, "keys"},
{"createmultisig", 0, "nrequired"},
{"createmultisig", 1, "keys"},
{"listunspent", 0, "minconf"},
{"listunspent", 1, "maxconf"},
{"listunspent", 2, "addresses"},
{"listunspent", 3, "include_unsafe"},
{"listunspent", 4, "query_options"},
{"listunspent", 4, "minimumAmount"},
{"listunspent", 4, "maximumAmount"},
{"listunspent", 4, "maximumCount"},
{"listunspent", 4, "minimumSumAmount"},
{"getblock", 1, "verbosity"},
{"getblock", 1, "verbose"},
{"getblockheader", 1, "verbose"},
{"getchaintxstats", 0, "nblocks"},
{"gettransaction", 1, "include_watchonly"},
{"gettransaction", 2, "verbose"},
{"getrawtransaction", 1, "verbose"},
{"createrawtransaction", 0, "inputs"},
{"createrawtransaction", 1, "outputs"},
{"createrawtransaction", 2, "locktime"},
{"signrawtransactionwithkey", 1, "privkeys"},
{"signrawtransactionwithkey", 2, "prevtxs"},
{"signrawtransactionwithwallet", 1, "prevtxs"},
{"sendrawtransaction", 1, "maxfeerate"},
{"testmempoolaccept", 0, "rawtxs"},
{"testmempoolaccept", 1, "maxfeerate"},
{"combinerawtransaction", 0, "txs"},
{"fundrawtransaction", 1, "options"},
{"fundrawtransaction", 1, "add_inputs"},
{"fundrawtransaction", 1, "include_unsafe"},
{"fundrawtransaction", 1, "changePosition"},
{"fundrawtransaction", 1, "includeWatching"},
{"fundrawtransaction", 1, "lockUnspents"},
{"fundrawtransaction", 1, "feeRate"},
{"fundrawtransaction", 1, "subtractFeeFromOutputs"},
{"walletcreatefundedpsbt", 0, "inputs"},
{"walletcreatefundedpsbt", 1, "outputs"},
{"walletcreatefundedpsbt", 2, "locktime"},
{"walletcreatefundedpsbt", 3, "options"},
{"walletcreatefundedpsbt", 3, "add_inputs"},
{"walletcreatefundedpsbt", 3, "include_unsafe"},
{"walletcreatefundedpsbt", 3, "changePosition"},
{"walletcreatefundedpsbt", 3, "includeWatching"},
{"walletcreatefundedpsbt", 3, "lockUnspents"},
{"walletcreatefundedpsbt", 3, "feeRate"},
{"walletcreatefundedpsbt", 3, "subtractFeeFromOutputs"},
{"walletcreatefundedpsbt", 4, "bip32derivs"},
{"walletprocesspsbt", 1, "sign"},
{"walletprocesspsbt", 3, "bip32derivs"},
{"createpsbt", 0, "inputs"},
{"createpsbt", 1, "outputs"},
{"createpsbt", 2, "locktime"},
{"combinepsbt", 0, "txs"},
{"joinpsbts", 0, "txs"},
{"finalizepsbt", 1, "extract"},
{"converttopsbt", 1, "permitsigdata"},
{"gettxout", 1, "n"},
{"gettxout", 2, "include_mempool"},
{"gettxoutproof", 0, "txids"},
{"gettxoutsetinfo", 1, "hash_or_height"},
{"gettxoutsetinfo", 2, "use_index"},
{"lockunspent", 0, "unlock"},
{"lockunspent", 1, "transactions"},
{"send", 0, "outputs"},
{"send", 1, "options"},
{"send", 1, "add_inputs"},
{"send", 1, "include_unsafe"},
{"send", 1, "add_to_wallet"},
{"send", 1, "change_position"},
{"send", 1, "fee_rate"},
{"send", 1, "include_watching"},
{"send", 1, "inputs"},
{"send", 1, "locktime"},
{"send", 1, "lock_unspents"},
{"send", 1, "psbt"},
{"send", 1, "subtract_fee_from_outputs"},
{"importprivkey", 2, "rescan"},
{"importaddress", 2, "rescan"},
{"importaddress", 3, "p2sh"},
{"importpubkey", 2, "rescan"},
{"importmulti", 0, "requests"},
{"importmulti", 1, "options"},
{"importmulti", 1, "rescan"},
{"importdescriptors", 0, "requests"},
{"verifychain", 0, "checklevel"},
{"verifychain", 1, "nblocks"},
{"getblockstats", 0, "hash_or_height"},
{"getblockstats", 1, "stats"},
{"pruneblockchain", 0, "height"},
{"keypoolrefill", 0, "newsize"},
{"getrawmempool", 0, "verbose"},
{"getrawmempool", 1, "mempool_sequence"},
{"prioritisetransaction", 1, "dummy"},
{"prioritisetransaction", 2, "fee_delta"},
{"setban", 2, "bantime"},
{"setban", 3, "absolute"},
{"setnetworkactive", 0, "state"},
{"setwalletflag", 1, "value"},
{"getmempoolancestors", 1, "verbose"},
{"getmempooldescendants", 1, "verbose"},
{"disconnectnode", 1, "nodeid"},
{"logging", 0, "include"},
{"logging", 1, "exclude"},
{"upgradewallet", 0, "version"},
// Echo with conversion (For testing only)
{"echojson", 0, "arg0"},
{"echojson", 1, "arg1"},
{"echojson", 2, "arg2"},
{"echojson", 3, "arg3"},
{"echojson", 4, "arg4"},
{"echojson", 5, "arg5"},
{"echojson", 6, "arg6"},
{"echojson", 7, "arg7"},
{"echojson", 8, "arg8"},
{"echojson", 9, "arg9"},
{"rescanblockchain", 0, "start_height"},
{"rescanblockchain", 1, "stop_height"},
{"createwallet", 1, "disable_private_keys"},
{"createwallet", 2, "blank"},
{"createwallet", 4, "avoid_reuse"},
{"createwallet", 5, "descriptors"},
{"createwallet", 6, "load_on_startup"},
{"restorewallet", 2, "load_on_startup"},
{"loadwallet", 1, "load_on_startup"},
{"unloadwallet", 1, "load_on_startup"},
{"getnodeaddresses", 0, "count"},
{"addpeeraddress", 1, "port"},
{"addpeeraddress", 2, "tried"},
{"stop", 0, "wait"},
{"createwallettransaction", 1, "amount"},
// Avalanche
{"addavalanchenode", 0, "nodeid"},
{"buildavalancheproof", 0, "sequence"},
{"buildavalancheproof", 1, "expiration"},
{"buildavalancheproof", 3, "stakes"},
{"getremoteproofs", 0, "nodeid"},
{"getstakingreward", 1, "recompute"},
{"setstakingreward", 2, "append"},
+ {"setflakyproof", 1, "flaky"},
};
/**
* Parse string to UniValue or throw runtime_error if string contains invalid
* JSON
*/
static UniValue Parse(std::string_view raw) {
UniValue parsed;
if (!parsed.read(raw)) {
throw std::runtime_error(tfm::format("Error parsing JSON: %s", raw));
}
return parsed;
}
class CRPCConvertTable {
private:
std::set<std::pair<std::string, int>> members;
std::set<std::pair<std::string, std::string>> membersByName;
public:
CRPCConvertTable();
/**
* Return arg_value as UniValue, and first parse it if it is a non-string
* parameter
*/
UniValue ArgToUniValue(std::string_view arg_value,
const std::string &method, int param_idx) {
return members.count({method, param_idx}) > 0 ? Parse(arg_value)
: arg_value;
}
/**
* Return arg_value as UniValue, and first parse it if it is a non-string
* parameter
*/
UniValue ArgToUniValue(std::string_view arg_value,
const std::string &method,
const std::string ¶m_name) {
return membersByName.count({method, param_name}) > 0 ? Parse(arg_value)
: arg_value;
}
};
CRPCConvertTable::CRPCConvertTable() {
for (const auto &cp : vRPCConvertParams) {
members.emplace(cp.methodName, cp.paramIdx);
membersByName.emplace(cp.methodName, cp.paramName);
}
}
static CRPCConvertTable rpcCvtTable;
UniValue RPCConvertValues(const std::string &strMethod,
const std::vector<std::string> &strParams) {
UniValue params(UniValue::VARR);
for (unsigned int idx = 0; idx < strParams.size(); idx++) {
std::string_view value{strParams[idx]};
params.push_back(rpcCvtTable.ArgToUniValue(value, strMethod, idx));
}
return params;
}
UniValue RPCConvertNamedValues(const std::string &strMethod,
const std::vector<std::string> &strParams) {
UniValue params(UniValue::VOBJ);
UniValue positional_args{UniValue::VARR};
for (std::string_view s : strParams) {
size_t pos = s.find('=');
if (pos == std::string::npos) {
positional_args.push_back(rpcCvtTable.ArgToUniValue(
s, strMethod, positional_args.size()));
continue;
}
std::string name{s.substr(0, pos)};
std::string_view value{s.substr(pos + 1)};
// Intentionally overwrite earlier named values with later ones as a
// convenience for scripts and command line users that want to merge
// options.
params.pushKV(name, rpcCvtTable.ArgToUniValue(value, strMethod, name));
}
if (!positional_args.empty()) {
// Use pushKVEnd instead of pushKV to avoid overwriting an explicit
// "args" value with an implicit one. Let the RPC server handle the
// request as given.
params.pushKVEnd("args", positional_args);
}
return params;
}
diff --git a/test/functional/abc_mining_stakingrewards.py b/test/functional/abc_mining_stakingrewards.py
index ca45d8bf7..b8a26a5be 100644
--- a/test/functional/abc_mining_stakingrewards.py
+++ b/test/functional/abc_mining_stakingrewards.py
@@ -1,323 +1,342 @@
# Copyright (c) 2023 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Tests for Bitcoin ABC mining with staking rewards
"""
import time
from decimal import Decimal
from test_framework.address import ADDRESS_ECREG_UNSPENDABLE
from test_framework.avatools import can_find_inv_in_poll, get_ava_p2p_interface
from test_framework.messages import XEC, AvalancheProofVoteResponse
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
assert_raises_rpc_error,
uint256_hex,
)
QUORUM_NODE_COUNT = 16
STAKING_REWARDS_COINBASE_RATIO_PERCENT = 10
class AbcMiningStakingRewardsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [
[
"-avaproofstakeutxodustthreshold=1000000",
"-avaproofstakeutxoconfirmations=1",
"-avacooldown=0",
"-avaminquorumstake=0",
"-avaminavaproofsnodecount=0",
"-whitelist=noban@127.0.0.1",
"-avalanchestakingrewards=1",
],
]
def run_test(self):
node = self.nodes[0]
now = int(time.time())
node.setmocktime(now)
# Build a quorum
quorum = [get_ava_p2p_interface(self, node) for _ in range(QUORUM_NODE_COUNT)]
assert node.getavalancheinfo()["ready_to_poll"] is True
now += 90 * 60 + 1
node.setmocktime(now)
invalid_block_hash = "0" * 63
assert_raises_rpc_error(
-8,
f"blockhash must be of length 64 (not 63, for '{invalid_block_hash}')",
node.getstakingreward,
invalid_block_hash,
)
assert_raises_rpc_error(
-8,
f"blockhash must be of length 64 (not 63, for '{invalid_block_hash}')",
node.setstakingreward,
invalid_block_hash,
"76a914000000000000000000000000000000000000000088ac",
)
invalid_block_hash = "0" * 64
assert_raises_rpc_error(
-8,
f"Block not found: {invalid_block_hash}",
node.getstakingreward,
invalid_block_hash,
)
assert_raises_rpc_error(
-8,
f"Block not found: {invalid_block_hash}",
node.setstakingreward,
invalid_block_hash,
"76a914000000000000000000000000000000000000000088ac",
)
def get_coinbase(blockhash):
return node.getblock(blockhash, 2)["tx"][0]
tiphash = node.getbestblockhash()
coinbase = get_coinbase(tiphash)
block_reward = sum([vout["value"] for vout in coinbase["vout"]])
self.log.info(
"Staking rewards not ready yet, check getblocktemplate lacks the staking rewards data"
)
assert_raises_rpc_error(
-32603,
f"Unable to determine a staking reward winner for block {tiphash}",
node.getstakingreward,
tiphash,
)
gbt = node.getblocktemplate()
assert_equal(gbt["previousblockhash"], tiphash)
assert "coinbasetxn" in gbt
assert "stakingrewards" not in gbt["coinbasetxn"]
self.log.info(
"Staking rewards not ready yet, check the miner doesn't produce the staking rewards output"
)
tiphash = self.generate(node, 1)[-1]
coinbase = get_coinbase(tiphash)
assert_equal(len(coinbase["vout"]), 1)
assert_raises_rpc_error(
-32603,
f"Unable to determine a staking reward winner for block {tiphash}",
node.getstakingreward,
tiphash,
)
self.log.info(
"Staking rewards are computed, check the block template returns the staking rewards data"
)
def wait_for_finalized_proof(proofid):
def finalize_proof(proofid):
can_find_inv_in_poll(
quorum, proofid, response=AvalancheProofVoteResponse.ACTIVE
)
return node.getrawavalancheproof(uint256_hex(proofid)).get(
"finalized", False
)
self.wait_until(lambda: finalize_proof(proofid))
for peer in quorum:
wait_for_finalized_proof(peer.proof.proofid)
tiphash = self.generate(node, 1)[-1]
gbt = node.getblocktemplate()
assert_equal(gbt["previousblockhash"], tiphash)
assert "coinbasetxn" in gbt
assert "stakingrewards" in gbt["coinbasetxn"]
assert_equal(
gbt["coinbasetxn"]["stakingrewards"],
{
"payoutscript": {
"asm": "OP_DUP OP_HASH160 0000000000000000000000000000000000000000 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914000000000000000000000000000000000000000088ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [ADDRESS_ECREG_UNSPENDABLE],
},
"minimumvalue": Decimal(
block_reward * STAKING_REWARDS_COINBASE_RATIO_PERCENT // 100 * XEC
),
},
)
reward = node.getstakingreward(tiphash)
assert_equal(len(reward), 1)
assert "proofid" in reward[0]
proofid = reward[0]["proofid"]
assert proofid in [uint256_hex(peer.proof.proofid) for peer in quorum]
assert_equal(
reward,
[
{
"proofid": proofid,
"asm": "OP_DUP OP_HASH160 0000000000000000000000000000000000000000 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914000000000000000000000000000000000000000088ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [ADDRESS_ECREG_UNSPENDABLE],
},
],
)
self.log.info(
"Staking rewards are computed, check the miner produces the staking rewards output"
)
tiphash = self.generate(node, 1)[-1]
coinbase = get_coinbase(tiphash)
assert_greater_than_or_equal(len(coinbase["vout"]), 2)
assert_equal(
coinbase["vout"][-1]["value"],
Decimal(block_reward * STAKING_REWARDS_COINBASE_RATIO_PERCENT // 100),
)
assert_equal(
coinbase["vout"][-1]["scriptPubKey"]["hex"],
"76a914000000000000000000000000000000000000000088ac",
)
self.log.info("Override the staking reward via RPC")
assert node.setstakingreward(
tiphash, "76a914000000000000000000000000000000000000000188ac"
)
assert_equal(
node.getstakingreward(tiphash),
[
{
"proofid": "0000000000000000000000000000000000000000000000000000000000000000",
"asm": "OP_DUP OP_HASH160 0000000000000000000000000000000000000001 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914000000000000000000000000000000000000000188ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [
"ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqyx0q3yvg0"
],
},
],
)
# Append another acceptable winner
assert node.setstakingreward(
tiphash,
"76a914000000000000000000000000000000000000000288ac",
True,
)
assert_equal(
node.getstakingreward(tiphash),
[
{
"proofid": "0000000000000000000000000000000000000000000000000000000000000000",
"asm": "OP_DUP OP_HASH160 0000000000000000000000000000000000000001 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914000000000000000000000000000000000000000188ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [
"ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqyx0q3yvg0"
],
},
{
"proofid": "0000000000000000000000000000000000000000000000000000000000000000",
"asm": "OP_DUP OP_HASH160 0000000000000000000000000000000000000002 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914000000000000000000000000000000000000000288ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [
"ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqgdmg7vcrr"
],
},
],
)
# We always pick the first one
gbt = node.getblocktemplate()
assert_equal(gbt["previousblockhash"], tiphash)
assert_equal(
gbt["coinbasetxn"]["stakingrewards"],
{
"payoutscript": {
"asm": "OP_DUP OP_HASH160 0000000000000000000000000000000000000001 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914000000000000000000000000000000000000000188ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [
"ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqyx0q3yvg0"
],
},
"minimumvalue": Decimal(
block_reward * STAKING_REWARDS_COINBASE_RATIO_PERCENT // 100 * XEC
),
},
)
for i in range(2, 10):
script_hex = f"76a914{i:0{40}x}88ac"
assert node.setstakingreward(tiphash, script_hex)
assert_equal(node.getstakingreward(tiphash)[0]["hex"], script_hex)
gbt = node.getblocktemplate()
assert_equal(
gbt["coinbasetxn"]["stakingrewards"]["payoutscript"]["hex"], script_hex
)
self.log.info("Recompute the staking reward")
reward = node.getstakingreward(blockhash=tiphash, recompute=True)
assert_equal(len(reward), 1)
assert "proofid" in reward[0]
proofid = reward[0]["proofid"]
assert proofid in [uint256_hex(peer.proof.proofid) for peer in quorum]
assert_equal(
reward,
[
{
"proofid": proofid,
"asm": "OP_DUP OP_HASH160 0000000000000000000000000000000000000000 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914000000000000000000000000000000000000000088ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [ADDRESS_ECREG_UNSPENDABLE],
},
],
)
gbt = node.getblocktemplate()
assert_equal(gbt["previousblockhash"], tiphash)
assert_equal(
gbt["coinbasetxn"]["stakingrewards"],
{
"payoutscript": {
"asm": "OP_DUP OP_HASH160 0000000000000000000000000000000000000000 OP_EQUALVERIFY OP_CHECKSIG",
"hex": "76a914000000000000000000000000000000000000000088ac",
"reqSigs": 1,
"type": "pubkeyhash",
"addresses": [ADDRESS_ECREG_UNSPENDABLE],
},
"minimumvalue": Decimal(
block_reward * STAKING_REWARDS_COINBASE_RATIO_PERCENT // 100 * XEC
),
},
)
+ proofids = []
+ for i in range(1, QUORUM_NODE_COUNT):
+ reward = node.getstakingreward(tiphash)
+ assert_equal(len(reward), i)
+
+ last_proofid = reward[-1]["proofid"]
+ assert node.setflakyproof(last_proofid, True)
+
+ proofids.append(last_proofid)
+
+ for i in range(QUORUM_NODE_COUNT, 1, -1):
+ reward = node.getstakingreward(tiphash)
+ assert_equal(len(reward), i)
+
+ last_proofid = proofids.pop()
+ assert node.setflakyproof(last_proofid, False)
+
+ assert_equal(len(node.getstakingreward(tiphash)), 1)
+
if __name__ == "__main__":
AbcMiningStakingRewardsTest().main()
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Wed, Jan 29, 17:10 (11 h, 20 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5053386
Default Alt Text
(161 KB)
Attached To
rABC Bitcoin ABC
Event Timeline
Log In to Comment