diff --git a/src/avalanche/peermanager.cpp b/src/avalanche/peermanager.cpp index f3e5ed30b..3f38856a1 100644 --- a/src/avalanche/peermanager.cpp +++ b/src/avalanche/peermanager.cpp @@ -1,498 +1,502 @@ // Copyright (c) 2020 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include // For ChainstateActive() #include namespace avalanche { static bool isOrphanState(const ProofValidationState &state) { return state.GetResult() == ProofValidationResult::MISSING_UTXO || state.GetResult() == ProofValidationResult::HEIGHT_MISMATCH; } bool PeerManager::addNode(NodeId nodeid, const std::shared_ptr &proof, const Delegation &delegation) { auto it = fetchOrCreatePeer(proof); if (it == peers.end()) { return false; } const PeerId peerid = it->peerid; DelegationState state; CPubKey pubkey; if (!delegation.verify(state, pubkey)) { return false; } auto nit = nodes.find(nodeid); if (nit == nodes.end()) { if (!nodes.emplace(nodeid, peerid, std::move(pubkey)).second) { return false; } } else { const PeerId oldpeerid = nit->peerid; if (!nodes.modify(nit, [&](Node &n) { n.peerid = peerid; n.pubkey = std::move(pubkey); })) { return false; } // We actually have this node already, we need to update it. bool success = removeNodeFromPeer(peers.find(oldpeerid)); assert(success); // Make sure it is not invalidated. it = peers.find(peerid); } bool success = addNodeToPeer(it); assert(success); return true; } bool PeerManager::addNodeToPeer(const PeerSet::iterator &it) { assert(it != peers.end()); return peers.modify(it, [&](Peer &p) { if (p.node_count++ > 0) { // We are done. return; } // We ned to allocate this peer. p.index = uint32_t(slots.size()); const uint32_t score = p.getScore(); const uint64_t start = slotCount; slots.emplace_back(start, score, it->peerid); slotCount = start + score; }); } bool PeerManager::removeNode(NodeId nodeid) { auto it = nodes.find(nodeid); if (it == nodes.end()) { return false; } const PeerId peerid = it->peerid; nodes.erase(it); // Keep the track of the reference count. bool success = removeNodeFromPeer(peers.find(peerid)); assert(success); return true; } bool PeerManager::removeNodeFromPeer(const PeerSet::iterator &it, uint32_t count) { assert(it != peers.end()); assert(count <= it->node_count); if (count == 0) { // This is a NOOP. return false; } const uint32_t new_count = it->node_count - count; if (!peers.modify(it, [&](Peer &p) { p.node_count = new_count; })) { return false; } if (new_count > 0) { // We are done. return true; } // There are no more node left, we need to cleanup. const size_t i = it->index; assert(i < slots.size()); if (i + 1 == slots.size()) { slots.pop_back(); slotCount = slots.empty() ? 0 : slots.back().getStop(); } else { fragmentation += slots[i].getScore(); slots[i] = slots[i].withPeerId(NO_PEER); } return true; } bool PeerManager::forNode(NodeId nodeid, std::function func) const { auto it = nodes.find(nodeid); return it != nodes.end() && func(*it); } bool PeerManager::updateNextRequestTime(NodeId nodeid, TimePoint timeout) { auto it = nodes.find(nodeid); if (it == nodes.end()) { return false; } return nodes.modify(it, [&](Node &n) { n.nextRequestTime = timeout; }); } NodeId PeerManager::selectNode() { for (int retry = 0; retry < SELECT_NODE_MAX_RETRY; retry++) { const PeerId p = selectPeer(); // If we cannot find a peer, it may be due to the fact that it is // unlikely due to high fragmentation, so compact and retry. if (p == NO_PEER) { compact(); continue; } // See if that peer has an available node. auto &nview = nodes.get(); auto it = nview.lower_bound(boost::make_tuple(p, TimePoint())); if (it != nview.end() && it->peerid == p && it->nextRequestTime <= std::chrono::steady_clock::now()) { return it->nodeid; } } return NO_NODE; } void PeerManager::updatedBlockTip() { std::vector invalidPeers; std::vector> newOrphans; { LOCK(cs_main); const CCoinsViewCache &coins = ::ChainstateActive().CoinsTip(); for (const auto &p : peers) { ProofValidationState state; if (!p.proof->verify(state, coins)) { if (isOrphanState(state)) { newOrphans.push_back(p.proof); } invalidPeers.push_back(p.peerid); } } } orphanProofs.rescan(*this); for (auto &p : newOrphans) { orphanProofs.addProof(p); } for (const auto &pid : invalidPeers) { removePeer(pid); } } PeerId PeerManager::getPeerId(const std::shared_ptr &proof) { auto it = fetchOrCreatePeer(proof); return it == peers.end() ? NO_PEER : it->peerid; } std::shared_ptr PeerManager::getProof(const ProofId &proofid) const { auto &pview = peers.get(); auto it = pview.find(proofid); return it == pview.end() ? nullptr : it->proof; } Peer::Timestamp PeerManager::getProofTime(const ProofId &proofid) const { auto &pview = peers.get(); auto it = pview.find(proofid); return it == pview.end() ? Peer::Timestamp::max() : it->time; } PeerManager::PeerSet::iterator PeerManager::fetchOrCreatePeer(const std::shared_ptr &proof) { { // Check if we already know of that peer. auto &pview = peers.get(); auto it = pview.find(proof->getId()); if (it != pview.end()) { return peers.project<0>(it); } } // Check the proof's validity. ProofValidationState state; bool valid = [&](ProofValidationState &state) { LOCK(cs_main); const CCoinsViewCache &coins = ::ChainstateActive().CoinsTip(); return proof->verify(state, coins); }(state); if (!valid) { if (isOrphanState(state)) { orphanProofs.addProof(proof); } // Reject invalid proof. return peers.end(); } orphanProofs.removeProof(proof->getId()); // New peer means new peerid! const PeerId peerid = nextPeerId++; // Attach UTXOs to this proof. std::unordered_set conflicting_peerids; for (const auto &s : proof->getStakes()) { auto p = utxos.emplace(s.getStake().getUTXO(), peerid); if (!p.second) { // We have a collision with an existing proof. conflicting_peerids.insert(p.first->second); } } // For now, if there is a conflict, just ceanup the mess. if (conflicting_peerids.size() > 0) { for (const auto &s : proof->getStakes()) { auto it = utxos.find(s.getStake().getUTXO()); assert(it != utxos.end()); // We need to delete that one. if (it->second == peerid) { utxos.erase(it); } } return peers.end(); } // We have no peer for this proof, time to create it. auto inserted = peers.emplace(peerid, proof); assert(inserted.second); return inserted.first; } bool PeerManager::removePeer(const PeerId peerid) { auto it = peers.find(peerid); if (it == peers.end()) { return false; } // Remove all nodes from this peer. removeNodeFromPeer(it, it->node_count); // Remove nodes associated with this peer, unless their timeout is still // active. This ensure that we don't overquery them in case they are // subsequently added to another peer. auto &nview = nodes.get(); nview.erase(nview.lower_bound(boost::make_tuple(peerid, TimePoint())), nview.upper_bound(boost::make_tuple( peerid, std::chrono::steady_clock::now()))); // Release UTXOs attached to this proof. for (const auto &s : it->proof->getStakes()) { bool deleted = utxos.erase(s.getStake().getUTXO()) > 0; assert(deleted); } peers.erase(it); return true; } PeerId PeerManager::selectPeer() const { if (slots.empty() || slotCount == 0) { return NO_PEER; } const uint64_t max = slotCount; for (int retry = 0; retry < SELECT_PEER_MAX_RETRY; retry++) { size_t i = selectPeerImpl(slots, GetRand(max), max); if (i != NO_PEER) { return i; } } return NO_PEER; } uint64_t PeerManager::compact() { // There is nothing to compact. if (fragmentation == 0) { return 0; } std::vector newslots; newslots.reserve(peers.size()); uint64_t prevStop = 0; uint32_t i = 0; for (auto it = peers.begin(); it != peers.end(); it++) { if (it->node_count == 0) { continue; } newslots.emplace_back(prevStop, it->getScore(), it->peerid); prevStop = slots[i].getStop(); if (!peers.modify(it, [&](Peer &p) { p.index = i++; })) { return 0; } } slots = std::move(newslots); const uint64_t saved = slotCount - prevStop; slotCount = prevStop; fragmentation = 0; return saved; } bool PeerManager::verify() const { uint64_t prevStop = 0; for (size_t i = 0; i < slots.size(); i++) { const Slot &s = slots[i]; // Slots must be in correct order. if (s.getStart() < prevStop) { return false; } prevStop = s.getStop(); // If this is a dead slot, then nothing more needs to be checked. if (s.getPeerId() == NO_PEER) { continue; } // We have a live slot, verify index. auto it = peers.find(s.getPeerId()); if (it == peers.end() || it->index != i) { return false; } } for (const auto &p : peers) { // Count node attached to this peer. const auto count_nodes = [&]() { size_t count = 0; auto &nview = nodes.get(); auto begin = nview.lower_bound(boost::make_tuple(p.peerid, TimePoint())); auto end = nview.upper_bound(boost::make_tuple(p.peerid + 1, TimePoint())); for (auto it = begin; it != end; ++it) { count++; } return count; }; if (p.node_count != count_nodes()) { return false; } // If there are no nodes attached to this peer, then we are done. if (p.node_count == 0) { continue; } // The index must point to a slot refering to this peer. if (p.index >= slots.size() || slots[p.index].getPeerId() != p.peerid) { return false; } // If the score do not match, same thing. if (slots[p.index].getScore() != p.getScore()) { return false; } } return true; } PeerId selectPeerImpl(const std::vector &slots, const uint64_t slot, const uint64_t max) { assert(slot <= max); size_t begin = 0, end = slots.size(); uint64_t bottom = 0, top = max; // Try to find the slot using dichotomic search. while ((end - begin) > 8) { // The slot we picked in not allocated. if (slot < bottom || slot >= top) { return NO_PEER; } // Guesstimate the position of the slot. size_t i = begin + ((slot - bottom) * (end - begin) / (top - bottom)); assert(begin <= i && i < end); // We have a match. if (slots[i].contains(slot)) { return slots[i].getPeerId(); } // We undershooted. if (slots[i].precedes(slot)) { begin = i + 1; if (begin >= end) { return NO_PEER; } bottom = slots[begin].getStart(); continue; } // We overshooted. if (slots[i].follows(slot)) { end = i; top = slots[end].getStart(); continue; } // We have an unalocated slot. return NO_PEER; } // Enough of that nonsense, let fallback to linear search. for (size_t i = begin; i < end; i++) { // We have a match. if (slots[i].contains(slot)) { return slots[i].getPeerId(); } } // We failed to find a slot, retry. return NO_PEER; } std::vector PeerManager::getPeers() const { std::vector vpeers; for (auto &it : peers.get<0>()) { vpeers.emplace_back(it); } return vpeers; } std::vector PeerManager::getNodeIdsForPeer(PeerId peerId) const { std::vector nodeids; auto &nview = nodes.get(); auto nodeRange = nview.equal_range(peerId); for (auto it = nodeRange.first; it != nodeRange.second; ++it) { nodeids.emplace_back(it->nodeid); } return nodeids; } -bool PeerManager::isOrphan(const ProofId &id) { +bool PeerManager::isOrphan(const ProofId &id) const { return orphanProofs.getProof(id) != nullptr; } +std::shared_ptr PeerManager::getOrphan(const ProofId &id) const { + return orphanProofs.getProof(id); +} + } // namespace avalanche diff --git a/src/avalanche/peermanager.h b/src/avalanche/peermanager.h index dc119ec3c..461c7de2d 100644 --- a/src/avalanche/peermanager.h +++ b/src/avalanche/peermanager.h @@ -1,218 +1,219 @@ // Copyright (c) 2020 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_AVALANCHE_PEERMANAGER_H #define BITCOIN_AVALANCHE_PEERMANAGER_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace avalanche { /** * Maximum number of stakes in the orphanProofs. * Benchmarking on a consumer grade computer shows that 10000 stakes can be * verified in less than 1 second. */ static constexpr size_t AVALANCHE_ORPHANPROOFPOOL_SIZE = 10000; class Delegation; struct Slot { private: uint64_t start; uint32_t score; PeerId peerid; public: Slot(uint64_t startIn, uint32_t scoreIn, PeerId peeridIn) : start(startIn), score(scoreIn), peerid(peeridIn) {} Slot withStart(uint64_t startIn) const { return Slot(startIn, score, peerid); } Slot withScore(uint64_t scoreIn) const { return Slot(start, scoreIn, peerid); } Slot withPeerId(PeerId peeridIn) const { return Slot(start, score, peeridIn); } uint64_t getStart() const { return start; } uint64_t getStop() const { return start + score; } uint32_t getScore() const { return score; } PeerId getPeerId() const { return peerid; } bool contains(uint64_t slot) const { return getStart() <= slot && slot < getStop(); } bool precedes(uint64_t slot) const { return slot >= getStop(); } bool follows(uint64_t slot) const { return getStart() > slot; } }; struct Peer { PeerId peerid; uint32_t index = -1; uint32_t node_count = 0; std::shared_ptr proof; using Timestamp = std::chrono::time_point; Timestamp time; Peer(PeerId peerid_, std::shared_ptr proof_) : peerid(peerid_), proof(std::move(proof_)), time(std::chrono::seconds(GetTime())) {} const ProofId &getProofId() const { return proof->getId(); } uint32_t getScore() const { return proof->getScore(); } }; struct proof_index { using result_type = ProofId; result_type operator()(const Peer &p) const { return p.proof->getId(); } }; struct next_request_time {}; class PeerManager { std::vector slots; uint64_t slotCount = 0; uint64_t fragmentation = 0; OrphanProofPool orphanProofs{AVALANCHE_ORPHANPROOFPOOL_SIZE}; /** * Several nodes can make an avalanche peer. In this case, all nodes are * considered interchangeable parts of the same peer. */ using PeerSet = boost::multi_index_container< Peer, boost::multi_index::indexed_by< // index by peerid boost::multi_index::hashed_unique< boost::multi_index::member>, // index by proof boost::multi_index::hashed_unique< boost::multi_index::tag, proof_index, SaltedProofIdHasher>>>; PeerId nextPeerId = 0; PeerSet peers; std::unordered_map utxos; using NodeSet = boost::multi_index_container< Node, boost::multi_index::indexed_by< // index by nodeid boost::multi_index::hashed_unique< boost::multi_index::member>, // sorted by peerid/nextRequestTime boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::composite_key< Node, boost::multi_index::member, boost::multi_index::member>>>>; NodeSet nodes; static constexpr int SELECT_PEER_MAX_RETRY = 3; static constexpr int SELECT_NODE_MAX_RETRY = 3; public: /** * Node API. */ bool addNode(NodeId nodeid, const std::shared_ptr &proof, const Delegation &delegation); bool removeNode(NodeId nodeid); bool forNode(NodeId nodeid, std::function func) const; bool updateNextRequestTime(NodeId nodeid, TimePoint timeout); /** * Randomly select a node to poll. */ NodeId selectNode(); /** * Update the peer set when a new block is connected. */ void updatedBlockTip(); /**************************************************** * Functions which are public for testing purposes. * ****************************************************/ /** * Provide the PeerId associated with the given proof. If the peer does not * exist, then it is created. */ PeerId getPeerId(const std::shared_ptr &proof); /** * Remove an existing peer. */ bool removePeer(const PeerId peerid); /** * Randomly select a peer to poll. */ PeerId selectPeer() const; /** * Trigger maintenance of internal data structures. * Returns how much slot space was saved after compaction. */ uint64_t compact(); /** * Perform consistency check on internal data structures. */ bool verify() const; // Accessors. uint64_t getSlotCount() const { return slotCount; } uint64_t getFragmentation() const { return fragmentation; } std::vector getPeers() const; std::vector getNodeIdsForPeer(PeerId peerId) const; std::shared_ptr getProof(const ProofId &proofid) const; Peer::Timestamp getProofTime(const ProofId &proofid) const; - bool isOrphan(const ProofId &id); + bool isOrphan(const ProofId &id) const; + std::shared_ptr getOrphan(const ProofId &id) const; private: PeerSet::iterator fetchOrCreatePeer(const std::shared_ptr &proof); bool addNodeToPeer(const PeerSet::iterator &it); bool removeNodeFromPeer(const PeerSet::iterator &it, uint32_t count = 1); }; /** * This is an internal method that is exposed for testing purposes. */ PeerId selectPeerImpl(const std::vector &slots, const uint64_t slot, const uint64_t max); } // namespace avalanche #endif // BITCOIN_AVALANCHE_PEERMANAGER_H diff --git a/src/avalanche/processor.cpp b/src/avalanche/processor.cpp index 65ec447a1..0149f7d9c 100644 --- a/src/avalanche/processor.cpp +++ b/src/avalanche/processor.cpp @@ -1,702 +1,707 @@ // Copyright (c) 2018-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include // For DecodeSecret #include // For ::PeerManager #include #include #include #include #include #include #include #include /** * Run the avalanche event loop every 10ms. */ static constexpr std::chrono::milliseconds AVALANCHE_TIME_STEP{10}; // Unfortunately, the bitcoind codebase is full of global and we are kinda // forced into it here. std::unique_ptr g_avalanche; namespace avalanche { bool VoteRecord::registerVote(NodeId nodeid, uint32_t error) { // We just got a new vote, so there is one less inflight request. clearInflightRequest(); // We want to avoid having the same node voting twice in a quorum. if (!addNodeToQuorum(nodeid)) { return false; } /** * The result of the vote is determined from the error code. If the error * code is 0, there is no error and therefore the vote is yes. If there is * an error, we check the most significant bit to decide if the vote is a no * (for instance, the block is invalid) or is the vote inconclusive (for * instance, the queried node does not have the block yet). */ votes = (votes << 1) | (error == 0); consider = (consider << 1) | (int32_t(error) >= 0); /** * We compute the number of yes and/or no votes as follow: * * votes: 1010 * consider: 1100 * * yes votes: 1000 using votes & consider * no votes: 0100 using ~votes & consider */ bool yes = countBits(votes & consider & 0xff) > 6; if (!yes) { bool no = countBits(~votes & consider & 0xff) > 6; if (!no) { // The round is inconclusive. return false; } } // If the round is in agreement with previous rounds, increase confidence. if (isAccepted() == yes) { confidence += 2; return getConfidence() == AVALANCHE_FINALIZATION_SCORE; } // The round changed our state. We reset the confidence. confidence = yes; return true; } bool VoteRecord::addNodeToQuorum(NodeId nodeid) { if (nodeid == NO_NODE) { // Helpful for testing. return true; } // MMIX Linear Congruent Generator. const uint64_t r1 = 6364136223846793005 * uint64_t(nodeid) + 1442695040888963407; // Fibonacci hashing. const uint64_t r2 = 11400714819323198485ull * (nodeid ^ seed); // Combine and extract hash. const uint16_t h = (r1 + r2) >> 48; /** * Check if the node is in the filter. */ for (size_t i = 1; i < nodeFilter.size(); i++) { if (nodeFilter[(successfulVotes + i) % nodeFilter.size()] == h) { return false; } } /** * Add the node which just voted to the filter. */ nodeFilter[successfulVotes % nodeFilter.size()] = h; successfulVotes++; return true; } bool VoteRecord::registerPoll() const { uint8_t count = inflight.load(); while (count < AVALANCHE_MAX_INFLIGHT_POLL) { if (inflight.compare_exchange_weak(count, count + 1)) { return true; } } return false; } static bool IsWorthPolling(const CBlockIndex *pindex) { AssertLockHeld(cs_main); if (pindex->nStatus.isInvalid()) { // No point polling invalid blocks. return false; } if (::ChainstateActive().IsBlockFinalized(pindex)) { // There is no point polling finalized block. return false; } return true; } struct Processor::PeerData { std::shared_ptr proof; Delegation delegation; }; class Processor::NotificationsHandler : public interfaces::Chain::Notifications { Processor *m_processor; public: NotificationsHandler(Processor *p) : m_processor(p) {} void updatedBlockTip() override { LOCK(m_processor->cs_peerManager); if (m_processor->mustRegisterProof && !::ChainstateActive().IsInitialBlockDownload()) { m_processor->peerManager->getPeerId(m_processor->peerData->proof); m_processor->mustRegisterProof = false; } m_processor->peerManager->updatedBlockTip(); } }; Processor::Processor(interfaces::Chain &chain, CConnman *connmanIn, NodePeerManager *nodePeerManagerIn, std::unique_ptr peerDataIn, CKey sessionKeyIn) : connman(connmanIn), nodePeerManager(nodePeerManagerIn), queryTimeoutDuration(AVALANCHE_DEFAULT_QUERY_TIMEOUT), round(0), peerManager(std::make_unique()), peerData(std::move(peerDataIn)), sessionKey(std::move(sessionKeyIn)), // Schedule proof registration at the first new block after IBD. // FIXME: get rid of this flag mustRegisterProof(!!peerData) { // Make sure we get notified of chain state changes. chainNotificationsHandler = chain.handleNotifications(std::make_shared(this)); } Processor::~Processor() { chainNotificationsHandler.reset(); stopEventLoop(); } std::unique_ptr Processor::MakeProcessor(const ArgsManager &argsman, interfaces::Chain &chain, CConnman *connman, NodePeerManager *nodePeerManager, bilingual_str &error) { std::unique_ptr peerData; CKey masterKey; CKey sessionKey; if (argsman.IsArgSet("-avasessionkey")) { sessionKey = DecodeSecret(argsman.GetArg("-avasessionkey", "")); if (!sessionKey.IsValid()) { error = _("the avalanche session key is invalid"); return nullptr; } } else { // Pick a random key for the session. sessionKey.MakeNewKey(true); } if (argsman.IsArgSet("-avaproof")) { if (!argsman.IsArgSet("-avamasterkey")) { error = _( "the avalanche master key is missing for the avalanche proof"); return nullptr; } masterKey = DecodeSecret(argsman.GetArg("-avamasterkey", "")); if (!masterKey.IsValid()) { error = _("the avalanche master key is invalid"); return nullptr; } peerData = std::make_unique(); peerData->proof = std::make_shared(); if (!Proof::FromHex(*peerData->proof, argsman.GetArg("-avaproof", ""), error)) { // error is set by FromHex return nullptr; } ProofValidationState proof_state; if (!peerData->proof->verify(proof_state)) { switch (proof_state.GetResult()) { case ProofValidationResult::NO_STAKE: error = _("the avalanche proof has no stake"); return nullptr; case ProofValidationResult::DUST_THRESOLD: error = _("the avalanche proof stake is too low"); return nullptr; case ProofValidationResult::DUPLICATE_STAKE: error = _("the avalanche proof has duplicated stake"); return nullptr; case ProofValidationResult::INVALID_SIGNATURE: error = _("the avalanche proof has invalid stake signatures"); return nullptr; case ProofValidationResult::TOO_MANY_UTXOS: error = strprintf( _("the avalanche proof has too many utxos (max: %u)"), AVALANCHE_MAX_PROOF_STAKES); return nullptr; default: error = _("the avalanche proof is invalid"); return nullptr; } } // Generate the delegation to the session key. DelegationBuilder dgb(*peerData->proof); if (sessionKey.GetPubKey() != peerData->proof->getMaster()) { dgb.addLevel(masterKey, sessionKey.GetPubKey()); } peerData->delegation = dgb.build(); } // We can't use std::make_unique with a private constructor return std::unique_ptr( new Processor(chain, connman, nodePeerManager, std::move(peerData), std::move(sessionKey))); } bool Processor::addBlockToReconcile(const CBlockIndex *pindex) { bool isAccepted; { LOCK(cs_main); if (!IsWorthPolling(pindex)) { // There is no point polling this block. return false; } isAccepted = ::ChainActive().Contains(pindex); } return vote_records.getWriteView() ->insert(std::make_pair(pindex, VoteRecord(isAccepted))) .second; } bool Processor::isAccepted(const CBlockIndex *pindex) const { auto r = vote_records.getReadView(); auto it = r->find(pindex); if (it == r.end()) { return false; } return it->second.isAccepted(); } int Processor::getConfidence(const CBlockIndex *pindex) const { auto r = vote_records.getReadView(); auto it = r->find(pindex); if (it == r.end()) { return -1; } return it->second.getConfidence(); } namespace { /** * When using TCP, we need to sign all messages as the transport layer is * not secure. */ class TCPResponse { Response response; SchnorrSig sig; public: TCPResponse(Response responseIn, const CKey &key) : response(std::move(responseIn)) { CHashWriter hasher(SER_GETHASH, 0); hasher << response; const uint256 hash = hasher.GetHash(); // Now let's sign! if (!key.SignSchnorr(hash, sig)) { sig.fill(0); } } // serialization support SERIALIZE_METHODS(TCPResponse, obj) { READWRITE(obj.response, obj.sig); } }; } // namespace void Processor::sendResponse(CNode *pfrom, Response response) const { connman->PushMessage( pfrom, CNetMsgMaker(pfrom->GetCommonVersion()) .Make(NetMsgType::AVARESPONSE, TCPResponse(std::move(response), sessionKey))); } bool Processor::registerVotes(NodeId nodeid, const Response &response, std::vector &updates) { { // Save the time at which we can query again. LOCK(cs_peerManager); // FIXME: This will override the time even when we received an old stale // message. This should check that the message is indeed the most up to // date one before updating the time. peerManager->updateNextRequestTime( nodeid, std::chrono::steady_clock::now() + std::chrono::milliseconds(response.getCooldown())); } std::vector invs; { // Check that the query exists. auto w = queries.getWriteView(); auto it = w->find(std::make_tuple(nodeid, response.getRound())); if (it == w.end()) { nodePeerManager->Misbehaving(nodeid, 2, "unexpected-ava-response"); return false; } invs = std::move(it->invs); w->erase(it); } // Verify that the request and the vote are consistent. const std::vector &votes = response.GetVotes(); size_t size = invs.size(); if (votes.size() != size) { nodePeerManager->Misbehaving(nodeid, 100, "invalid-ava-response-size"); return false; } for (size_t i = 0; i < size; i++) { if (invs[i].hash != votes[i].GetHash()) { nodePeerManager->Misbehaving(nodeid, 100, "invalid-ava-response-content"); return false; } } std::map responseIndex; { LOCK(cs_main); for (const auto &v : votes) { auto pindex = LookupBlockIndex(BlockHash(v.GetHash())); if (!pindex) { // This should not happen, but just in case... continue; } if (!IsWorthPolling(pindex)) { // There is no point polling this block. continue; } responseIndex.insert(std::make_pair(pindex, v)); } } { // Register votes. auto w = vote_records.getWriteView(); for (const auto &p : responseIndex) { CBlockIndex *pindex = p.first; const Vote &v = p.second; auto it = w->find(pindex); if (it == w.end()) { // We are not voting on that item anymore. continue; } auto &vr = it->second; if (!vr.registerVote(nodeid, v.GetError())) { // This vote did not provide any extra information, move on. continue; } if (!vr.hasFinalized()) { // This item has note been finalized, so we have nothing more to // do. updates.emplace_back( pindex, vr.isAccepted() ? BlockUpdate::Status::Accepted : BlockUpdate::Status::Rejected); continue; } // We just finalized a vote. If it is valid, then let the caller // know. Either way, remove the item from the map. updates.emplace_back(pindex, vr.isAccepted() ? BlockUpdate::Status::Finalized : BlockUpdate::Status::Invalid); w->erase(it); } } return true; } bool Processor::addNode(NodeId nodeid, const std::shared_ptr &proof, const Delegation &delegation) { LOCK(cs_peerManager); return peerManager->addNode(nodeid, proof, delegation); } bool Processor::forNode(NodeId nodeid, std::function func) const { LOCK(cs_peerManager); return peerManager->forNode(nodeid, std::move(func)); } CPubKey Processor::getSessionPubKey() const { return sessionKey.GetPubKey(); } uint256 Processor::buildLocalSighash(CNode *pfrom) const { CHashWriter hasher(SER_GETHASH, 0); hasher << peerData->delegation.getId(); hasher << pfrom->GetLocalNonce(); hasher << pfrom->nRemoteHostNonce; hasher << pfrom->GetLocalExtraEntropy(); hasher << pfrom->nRemoteExtraEntropy; return hasher.GetHash(); } uint256 Processor::buildRemoteSighash(CNode *pfrom) const { CHashWriter hasher(SER_GETHASH, 0); hasher << pfrom->m_avalanche_state->delegation.getId(); hasher << pfrom->nRemoteHostNonce; hasher << pfrom->GetLocalNonce(); hasher << pfrom->nRemoteExtraEntropy; hasher << pfrom->GetLocalExtraEntropy(); return hasher.GetHash(); } bool Processor::sendHello(CNode *pfrom) const { if (!peerData) { // We do not have a delegation to advertise. return false; } // Now let's sign! SchnorrSig sig; { const uint256 hash = buildLocalSighash(pfrom); if (!sessionKey.SignSchnorr(hash, sig)) { return false; } } connman->PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()) .Make(NetMsgType::AVAHELLO, Hello(peerData->delegation, sig))); pfrom->AddKnownProof(peerData->delegation.getProofId()); return true; } bool Processor::addProof(const std::shared_ptr &proof) { LOCK(cs_peerManager); return !peerManager->getProof(proof->getId()) && peerManager->getPeerId(proof) != NO_PEER; } std::shared_ptr Processor::getProof(const ProofId &proofid) const { LOCK(cs_peerManager); return peerManager->getProof(proofid); } std::shared_ptr Processor::getLocalProof() const { return peerData ? peerData->proof : nullptr; } Peer::Timestamp Processor::getProofTime(const ProofId &proofid) const { LOCK(cs_peerManager); return peerManager->getProofTime(proofid); } +std::shared_ptr Processor::getOrphan(const ProofId &proofid) const { + LOCK(cs_peerManager); + return peerManager->getOrphan(proofid); +} + bool Processor::startEventLoop(CScheduler &scheduler) { return eventLoop.startEventLoop( scheduler, [this]() { this->runEventLoop(); }, AVALANCHE_TIME_STEP); } bool Processor::stopEventLoop() { return eventLoop.stopEventLoop(); } std::vector Processor::getInvsForNextPoll(bool forPoll) { std::vector invs; // First remove all blocks that are not worth polling. { LOCK(cs_main); auto w = vote_records.getWriteView(); for (auto it = w->begin(); it != w->end();) { const CBlockIndex *pindex = it->first; if (!IsWorthPolling(pindex)) { w->erase(it++); } else { ++it; } } } auto r = vote_records.getReadView(); for (const std::pair &p : reverse_iterate(r)) { // Check if we can run poll. const bool shouldPoll = forPoll ? p.second.registerPoll() : p.second.shouldPoll(); if (!shouldPoll) { continue; } // We don't have a decision, we need more votes. invs.emplace_back(MSG_BLOCK, p.first->GetBlockHash()); if (invs.size() >= AVALANCHE_MAX_ELEMENT_POLL) { // Make sure we do not produce more invs than specified by the // protocol. return invs; } } return invs; } NodeId Processor::getSuitableNodeToQuery() { LOCK(cs_peerManager); return peerManager->selectNode(); } void Processor::clearTimedoutRequests() { auto now = std::chrono::steady_clock::now(); std::map timedout_items{}; { // Clear expired requests. auto w = queries.getWriteView(); auto it = w->get().begin(); while (it != w->get().end() && it->timeout < now) { for (const auto &i : it->invs) { timedout_items[i]++; } w->get().erase(it++); } } if (timedout_items.empty()) { return; } // In flight request accounting. for (const auto &p : timedout_items) { const CInv &inv = p.first; assert(inv.type == MSG_BLOCK); CBlockIndex *pindex; { LOCK(cs_main); pindex = LookupBlockIndex(BlockHash(inv.hash)); if (!pindex) { continue; } } auto w = vote_records.getWriteView(); auto it = w->find(pindex); if (it == w.end()) { continue; } it->second.clearInflightRequest(p.second); } } void Processor::runEventLoop() { // Don't do Avalanche while node is IBD'ing if (::ChainstateActive().IsInitialBlockDownload()) { return; } // First things first, check if we have requests that timed out and clear // them. clearTimedoutRequests(); // Make sure there is at least one suitable node to query before gathering // invs. NodeId nodeid = getSuitableNodeToQuery(); if (nodeid == NO_NODE) { return; } std::vector invs = getInvsForNextPoll(); if (invs.empty()) { return; } do { /** * If we lost contact to that node, then we remove it from nodeids, but * never add the request to queries, which ensures bad nodes get cleaned * up over time. */ bool hasSent = connman->ForNode(nodeid, [this, &invs](CNode *pnode) { uint64_t current_round = round++; { // Compute the time at which this requests times out. auto timeout = std::chrono::steady_clock::now() + queryTimeoutDuration; // Register the query. queries.getWriteView()->insert( {pnode->GetId(), current_round, timeout, invs}); // Set the timeout. LOCK(cs_peerManager); peerManager->updateNextRequestTime(pnode->GetId(), timeout); } // Send the query to the node. connman->PushMessage( pnode, CNetMsgMaker(pnode->GetCommonVersion()) .Make(NetMsgType::AVAPOLL, Poll(current_round, std::move(invs)))); return true; }); // Success! if (hasSent) { return; } { // This node is obsolete, delete it. LOCK(cs_peerManager); peerManager->removeNode(nodeid); } // Get next suitable node to try again nodeid = getSuitableNodeToQuery(); } while (nodeid != NO_NODE); } std::vector Processor::getPeers() const { LOCK(cs_peerManager); return peerManager->getPeers(); } std::vector Processor::getNodeIdsForPeer(PeerId peerId) const { LOCK(cs_peerManager); return peerManager->getNodeIdsForPeer(peerId); } } // namespace avalanche diff --git a/src/avalanche/processor.h b/src/avalanche/processor.h index c49b1d39d..9d2dd0b64 100644 --- a/src/avalanche/processor.h +++ b/src/avalanche/processor.h @@ -1,330 +1,331 @@ // Copyright (c) 2018-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_AVALANCHE_PROCESSOR_H #define BITCOIN_AVALANCHE_PROCESSOR_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include class ArgsManager; class Config; class CBlockIndex; class CScheduler; class PeerManager; struct bilingual_str; using NodePeerManager = PeerManager; /** * Finalization score. */ static constexpr int AVALANCHE_FINALIZATION_SCORE = 128; /** * Maximum item that can be polled at once. */ static constexpr size_t AVALANCHE_MAX_ELEMENT_POLL = 16; /** * How long before we consider that a query timed out. */ static constexpr std::chrono::milliseconds AVALANCHE_DEFAULT_QUERY_TIMEOUT{ 10000}; /** * How many inflight requests can exist for one item. */ static constexpr int AVALANCHE_MAX_INFLIGHT_POLL = 10; namespace avalanche { class Delegation; class PeerManager; class Proof; /** * Vote history. */ struct VoteRecord { private: // confidence's LSB bit is the result. Higher bits are actual confidence // score. uint16_t confidence = 0; // Historical record of votes. uint8_t votes = 0; // Each bit indicate if the vote is to be considered. uint8_t consider = 0; // How many in flight requests exists for this element. mutable std::atomic inflight{0}; // Seed for pseudorandom operations. const uint32_t seed = 0; // Track how many successful votes occured. uint32_t successfulVotes = 0; // Track the nodes which are part of the quorum. std::array nodeFilter{{0, 0, 0, 0, 0, 0, 0, 0}}; public: explicit VoteRecord(bool accepted) : confidence(accepted) {} /** * Copy semantic */ VoteRecord(const VoteRecord &other) : confidence(other.confidence), votes(other.votes), consider(other.consider), inflight(other.inflight.load()), successfulVotes(other.successfulVotes), nodeFilter(other.nodeFilter) { } /** * Vote accounting facilities. */ bool isAccepted() const { return confidence & 0x01; } uint16_t getConfidence() const { return confidence >> 1; } bool hasFinalized() const { return getConfidence() >= AVALANCHE_FINALIZATION_SCORE; } /** * Register a new vote for an item and update confidence accordingly. * Returns true if the acceptance or finalization state changed. */ bool registerVote(NodeId nodeid, uint32_t error); /** * Register that a request is being made regarding that item. * The method is made const so that it can be accessed via a read only view * of vote_records. It's not a problem as it is made thread safe. */ bool registerPoll() const; /** * Return if this item is in condition to be polled at the moment. */ bool shouldPoll() const { return inflight < AVALANCHE_MAX_INFLIGHT_POLL; } /** * Clear `count` inflight requests. */ void clearInflightRequest(uint8_t count = 1) { inflight -= count; } private: /** * Add the node to the quorum. * Returns true if the node was added, false if the node already was in the * quorum. */ bool addNodeToQuorum(NodeId nodeid); }; class BlockUpdate { union { CBlockIndex *pindex; uintptr_t raw; }; static const size_t STATUS_BITS = 2; static const uintptr_t MASK = (1 << STATUS_BITS) - 1; static_assert( alignof(CBlockIndex) >= (1 << STATUS_BITS), "CBlockIndex alignement doesn't allow for Status to be stored."); public: enum Status : uint8_t { Invalid, Rejected, Accepted, Finalized, }; BlockUpdate(CBlockIndex *pindexIn, Status statusIn) : pindex(pindexIn) { raw |= statusIn; } Status getStatus() const { return Status(raw & MASK); } CBlockIndex *getBlockIndex() { return reinterpret_cast(raw & ~MASK); } const CBlockIndex *getBlockIndex() const { return const_cast(this)->getBlockIndex(); } }; using BlockVoteMap = std::map; struct query_timeout {}; namespace { struct AvalancheTest; } class Processor { CConnman *connman; NodePeerManager *nodePeerManager; std::chrono::milliseconds queryTimeoutDuration; /** * Blocks to run avalanche on. */ RWCollection vote_records; /** * Keep track of peers and queries sent. */ std::atomic round; /** * Keep track of the peers and associated infos. */ mutable Mutex cs_peerManager; std::unique_ptr peerManager GUARDED_BY(cs_peerManager); struct Query { NodeId nodeid; uint64_t round; TimePoint timeout; /** * We declare this as mutable so it can be modified in the multi_index. * This is ok because we do not use this field to index in anyway. * * /!\ Do not use any mutable field as index. */ mutable std::vector invs; }; using QuerySet = boost::multi_index_container< Query, boost::multi_index::indexed_by< // index by nodeid/round boost::multi_index::hashed_unique, boost::multi_index::member>>, // sorted by timeout boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::member>>>; RWCollection queries; /** Data required to participate. */ struct PeerData; std::unique_ptr peerData; CKey sessionKey; /** Event loop machinery. */ EventLoop eventLoop; /** Registered interfaces::Chain::Notifications handler. */ class NotificationsHandler; std::unique_ptr chainNotificationsHandler; /** * Flag indicating that the proof must be registered at first new block * after IBD */ bool mustRegisterProof = false; Processor(interfaces::Chain &chain, CConnman *connmanIn, NodePeerManager *nodePeerManagerIn, std::unique_ptr peerDataIn, CKey sessionKeyIn); public: ~Processor(); static std::unique_ptr MakeProcessor(const ArgsManager &argsman, interfaces::Chain &chain, CConnman *connman, NodePeerManager *nodePeerManager, bilingual_str &error); void setQueryTimeoutDuration(std::chrono::milliseconds d) { queryTimeoutDuration = d; } bool addBlockToReconcile(const CBlockIndex *pindex); bool isAccepted(const CBlockIndex *pindex) const; int getConfidence(const CBlockIndex *pindex) const; // TDOD: Refactor the API to remove the dependency on avalanche/protocol.h void sendResponse(CNode *pfrom, Response response) const; bool registerVotes(NodeId nodeid, const Response &response, std::vector &updates); bool addNode(NodeId nodeid, const std::shared_ptr &proof, const Delegation &delegation); bool forNode(NodeId nodeid, std::function func) const; CPubKey getSessionPubKey() const; bool sendHello(CNode *pfrom) const; /** * Build and return the challenge whose signature we expect a peer to * include in his AVAHELLO message. */ uint256 buildRemoteSighash(CNode *pfrom) const; bool addProof(const std::shared_ptr &proof); std::shared_ptr getProof(const ProofId &proofid) const; std::shared_ptr getLocalProof() const; Peer::Timestamp getProofTime(const ProofId &proofid) const; + std::shared_ptr getOrphan(const ProofId &proofid) const; /* * Return whether the avalanche service flag should be set. */ bool isAvalancheServiceAvailable() { return !!peerData; } std::vector getPeers() const; std::vector getNodeIdsForPeer(PeerId peerId) const; bool startEventLoop(CScheduler &scheduler); bool stopEventLoop(); private: void runEventLoop(); void clearTimedoutRequests(); std::vector getInvsForNextPoll(bool forPoll = true); NodeId getSuitableNodeToQuery(); /** * Build and return the challenge whose signature is included in the * AVAHELLO message that we send to a peer. */ uint256 buildLocalSighash(CNode *pfrom) const; friend struct ::avalanche::AvalancheTest; }; } // namespace avalanche #endif // BITCOIN_AVALANCHE_PROCESSOR_H diff --git a/src/rpc/avalanche.cpp b/src/rpc/avalanche.cpp index 1d14301f9..fc24b10cb 100644 --- a/src/rpc/avalanche.cpp +++ b/src/rpc/avalanche.cpp @@ -1,550 +1,603 @@ // Copyright (c) 2020 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static UniValue getavalanchekey(const Config &config, const JSONRPCRequest &request) { RPCHelpMan{ "getavalanchekey", "Returns the key used to sign avalanche messages.\n", {}, RPCResult{RPCResult::Type::STR_HEX, "", ""}, RPCExamples{HelpExampleRpc("getavalanchekey", "")}, } .Check(request); if (!g_avalanche) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Avalanche is not initialized"); } return HexStr(g_avalanche->getSessionPubKey()); } static CPubKey ParsePubKey(const UniValue ¶m) { const std::string keyHex = param.get_str(); if ((keyHex.length() != 2 * CPubKey::COMPRESSED_SIZE && keyHex.length() != 2 * CPubKey::SIZE) || !IsHex(keyHex)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, strprintf("Invalid public key: %s\n", keyHex)); } return HexToPubKey(keyHex); } static UniValue addavalanchenode(const Config &config, const JSONRPCRequest &request) { RPCHelpMan{ "addavalanchenode", "Add a node in the set of peers to poll for avalanche.\n", { {"nodeid", RPCArg::Type::NUM, RPCArg::Optional::NO, "Node to be added to avalanche."}, {"publickey", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The public key of the node."}, {"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "Proof that the node is not a sybil."}, }, RPCResult{RPCResult::Type::BOOL, "success", "Whether the addition succeeded or not."}, RPCExamples{ HelpExampleRpc("addavalanchenode", "5, \"\", \"\"")}, } .Check(request); RPCTypeCheck(request.params, {UniValue::VNUM, UniValue::VSTR, UniValue::VSTR}); if (!g_avalanche) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Avalanche is not initialized"); } const NodeId nodeid = request.params[0].get_int64(); const CPubKey key = ParsePubKey(request.params[1]); auto proof = std::make_shared(); bilingual_str error; if (!avalanche::Proof::FromHex(*proof, request.params[2].get_str(), error)) { throw JSONRPCError(RPC_INVALID_PARAMETER, error.original); } if (key != proof->getMaster()) { // TODO: we want to provide a proper delegation. return false; } return g_avalanche->addNode(nodeid, proof, avalanche::DelegationBuilder(*proof).build()); } static UniValue buildavalancheproof(const Config &config, const JSONRPCRequest &request) { RPCHelpMan{ "buildavalancheproof", "Build a proof for avalanche's sybil resistance.\n", { {"sequence", RPCArg::Type::NUM, RPCArg::Optional::NO, "The proof's sequence"}, {"expiration", RPCArg::Type::NUM, RPCArg::Optional::NO, "A timestamp indicating when the proof expire"}, {"master", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The master public key"}, { "stakes", RPCArg::Type::ARR, RPCArg::Optional::NO, "The stakes to be signed and associated private keys", { { "stake", RPCArg::Type::OBJ, RPCArg::Optional::NO, "A stake to be attached to this proof", { {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id"}, {"vout", RPCArg::Type::NUM, RPCArg::Optional::NO, "The output number"}, {"amount", RPCArg::Type::AMOUNT, RPCArg::Optional::NO, "The amount in this UTXO"}, {"height", RPCArg::Type::NUM, RPCArg::Optional::NO, "The height at which this UTXO was mined"}, {"iscoinbase", RPCArg::Type::BOOL, /* default */ "false", "Indicate wether the UTXO is a coinbase"}, {"privatekey", RPCArg::Type::STR, RPCArg::Optional::NO, "private key in base58-encoding"}, }, }, }, }, }, RPCResult{RPCResult::Type::STR_HEX, "proof", "A string that is a serialized, hex-encoded proof data."}, RPCExamples{HelpExampleRpc("buildavalancheproof", "0 1234567800 \"\" []")}, } .Check(request); RPCTypeCheck(request.params, {UniValue::VNUM, UniValue::VNUM, UniValue::VSTR, UniValue::VARR}); const uint64_t sequence = request.params[0].get_int64(); const int64_t expiration = request.params[1].get_int64(); avalanche::ProofBuilder pb(sequence, expiration, ParsePubKey(request.params[2])); const UniValue &stakes = request.params[3].get_array(); for (size_t i = 0; i < stakes.size(); i++) { const UniValue &stake = stakes[i]; RPCTypeCheckObj(stake, { {"txid", UniValue::VSTR}, {"vout", UniValue::VNUM}, // "amount" is also required but check is done below // due to UniValue::VNUM erroneously not accepting // quoted numerics (which are valid JSON) {"height", UniValue::VNUM}, {"privatekey", UniValue::VSTR}, }); int nOut = find_value(stake, "vout").get_int(); if (nOut < 0) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "vout must be positive"); } const int height = find_value(stake, "height").get_int(); if (height < 1) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "height must be positive"); } const TxId txid(ParseHashO(stake, "txid")); const COutPoint utxo(txid, nOut); if (!stake.exists("amount")) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Missing amount"); } const Amount amount = AmountFromValue(find_value(stake, "amount")); const UniValue &iscbparam = find_value(stake, "iscoinbase"); const bool iscoinbase = iscbparam.isNull() ? false : iscbparam.get_bool(); CKey key = DecodeSecret(find_value(stake, "privatekey").get_str()); if (!pb.addUTXO(utxo, amount, uint32_t(height), iscoinbase, std::move(key))) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid private key"); } } const avalanche::Proof proof = pb.build(); CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss << proof; return HexStr(ss); } static UniValue decodeavalancheproof(const Config &config, const JSONRPCRequest &request) { RPCHelpMan{ "decodeavalancheproof", "Convert a serialized, hex-encoded proof, into JSON object. " "The validity of the proof is not verified.\n", { {"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The proof hex string"}, }, RPCResult{ RPCResult::Type::OBJ, "", "", { {RPCResult::Type::NUM, "sequence", "The proof's sequential number"}, {RPCResult::Type::NUM, "expiration", "A timestamp indicating when the proof expires"}, {RPCResult::Type::STR_HEX, "master", "The master public key"}, {RPCResult::Type::STR_HEX, "limitedid", "A hash of the proof data excluding the master key."}, {RPCResult::Type::STR_HEX, "proofid", "A hash of the limitedid and master key."}, {RPCResult::Type::ARR, "stakes", "", { {RPCResult::Type::OBJ, "", "", { {RPCResult::Type::STR_HEX, "txid", "The transaction id"}, {RPCResult::Type::NUM, "vout", "The output number"}, {RPCResult::Type::STR_AMOUNT, "amount", "The amount in this UTXO"}, {RPCResult::Type::NUM, "height", "The height at which this UTXO was mined"}, {RPCResult::Type::BOOL, "iscoinbase", "Indicate whether the UTXO is a coinbase"}, {RPCResult::Type::STR_HEX, "pubkey", "This UTXO's public key"}, {RPCResult::Type::STR, "signature", "Signature of the proofid with this UTXO's private " "key (base64 encoded)"}, }}, }}, }}, RPCExamples{HelpExampleCli("decodeavalancheproof", "\"\"") + HelpExampleRpc("decodeavalancheproof", "\"\"")}, } .Check(request); RPCTypeCheck(request.params, {UniValue::VSTR}); avalanche::Proof proof; bilingual_str error; if (!avalanche::Proof::FromHex(proof, request.params[0].get_str(), error)) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, error.original); } UniValue result(UniValue::VOBJ); result.pushKV("sequence", proof.getSequence()); result.pushKV("expiration", proof.getExpirationTime()); result.pushKV("master", HexStr(proof.getMaster())); result.pushKV("limitedid", proof.getLimitedId().ToString()); result.pushKV("proofid", proof.getId().ToString()); UniValue stakes(UniValue::VARR); for (const avalanche::SignedStake &s : proof.getStakes()) { const COutPoint &utxo = s.getStake().getUTXO(); UniValue stake(UniValue::VOBJ); stake.pushKV("txid", utxo.GetTxId().ToString()); stake.pushKV("vout", uint64_t(utxo.GetN())); stake.pushKV("amount", s.getStake().getAmount()); stake.pushKV("height", uint64_t(s.getStake().getHeight())); stake.pushKV("iscoinbase", s.getStake().isCoinbase()); stake.pushKV("pubkey", HexStr(s.getStake().getPubkey())); stake.pushKV("signature", EncodeBase64(s.getSignature())); stakes.push_back(stake); } result.pushKV("stakes", stakes); return result; } static UniValue delegateavalancheproof(const Config &config, const JSONRPCRequest &request) { RPCHelpMan{ "delegateavalancheproof", "Delegate the avalanche proof to another public key.\n", { {"limitedproofid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The limited id of the proof to be delegated."}, {"privatekey", RPCArg::Type::STR, RPCArg::Optional::NO, "The private key in base58-encoding. Must match the proof master " "public key or the upper level parent delegation public key if " " supplied."}, {"publickey", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The public key to delegate the proof to."}, {"delegation", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "A string that is the serialized, hex-encoded delegation for the " "proof and which is a parent for the delegation to build."}, }, RPCResult{RPCResult::Type::STR_HEX, "delegation", "A string that is a serialized, hex-encoded delegation."}, RPCExamples{ HelpExampleRpc("delegateavalancheproof", "\"\" \"\" \"\"")}, } .Check(request); RPCTypeCheck(request.params, {UniValue::VSTR, UniValue::VSTR, UniValue::VSTR}); if (!g_avalanche) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Avalanche is not initialized"); } avalanche::LimitedProofId limitedProofId{ ParseHashV(request.params[0], "limitedproofid")}; const CKey privkey = DecodeSecret(request.params[1].get_str()); if (!privkey.IsValid()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "The private key is invalid"); } const CPubKey pubkey = ParsePubKey(request.params[2]); std::unique_ptr dgb; if (request.params.size() >= 4 && !request.params[3].isNull()) { avalanche::Delegation dg; CDataStream ss(ParseHexV(request.params[3], "delegation"), SER_NETWORK, PROTOCOL_VERSION); ss >> dg; if (dg.getProofId() != limitedProofId.computeProofId(dg.getProofMaster())) { throw JSONRPCError( RPC_INVALID_PARAMETER, "The supplied delegation does not match the proof"); } CPubKey auth; avalanche::DelegationState dgState; if (!dg.verify(dgState, auth)) { throw JSONRPCError(RPC_INVALID_PARAMETER, "The supplied delegation is not valid"); } if (privkey.GetPubKey() != auth) { throw JSONRPCError( RPC_INVALID_PARAMETER, "The supplied private key does not match the delegation"); } dgb = std::make_unique(dg); } else { dgb = std::make_unique( limitedProofId, privkey.GetPubKey()); } if (!dgb->addLevel(privkey, pubkey)) { throw JSONRPCError(RPC_MISC_ERROR, "Unable to build the delegation"); } CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss << dgb->build(); return HexStr(ss); } static UniValue getavalanchepeerinfo(const Config &config, const JSONRPCRequest &request) { RPCHelpMan{ "getavalanchepeerinfo", "Returns data about each connected avalanche peer as a json array of " "objects.\n", {}, RPCResult{ RPCResult::Type::ARR, "", "", {{ RPCResult::Type::OBJ, "", "", {{ {RPCResult::Type::NUM, "peerid", "The peer id"}, {RPCResult::Type::STR_HEX, "proof", "The avalanche proof used by this peer"}, {RPCResult::Type::ARR, "nodes", "", { {RPCResult::Type::NUM, "nodeid", "Node id, as returned by getpeerinfo"}, }}, }}, }}, }, RPCExamples{HelpExampleCli("getavalanchepeerinfo", "") + HelpExampleRpc("getavalanchepeerinfo", "")}, } .Check(request); if (!g_avalanche) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Avalanche is not initialized"); } UniValue ret(UniValue::VARR); for (const auto &peer : g_avalanche->getPeers()) { UniValue obj(UniValue::VOBJ); CDataStream serproof(SER_NETWORK, PROTOCOL_VERSION); serproof << *peer.proof; obj.pushKV("peerid", uint64_t(peer.peerid)); obj.pushKV("proof", HexStr(serproof)); UniValue nodes(UniValue::VARR); for (const auto &id : g_avalanche->getNodeIdsForPeer(peer.peerid)) { nodes.push_back(id); } obj.pushKV("nodes", nodes); obj.pushKV("nodecount", uint64_t(peer.node_count)); ret.push_back(obj); } return ret; } +static UniValue getrawavalancheproof(const Config &config, + const JSONRPCRequest &request) { + RPCHelpMan{ + "getrawavalancheproof", + "Lookup for a known avalanche proof by id.\n", + { + {"proofid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, + "The hex encoded avalanche proof identifier."}, + }, + RPCResult{ + RPCResult::Type::OBJ, + "", + "", + {{ + {RPCResult::Type::STR_HEX, "proof", + "The hex encoded proof matching the identifier."}, + {RPCResult::Type::BOOL, "orphan", + "Whether the proof is an orphan."}, + }}, + }, + RPCExamples{HelpExampleRpc("getrawavalancheproof", "")}, + } + .Check(request); + + if (!g_avalanche) { + throw JSONRPCError(RPC_INTERNAL_ERROR, "Avalanche is not initialized"); + } + + const avalanche::ProofId proofid = + avalanche::ProofId::fromHex(request.params[0].get_str()); + + bool isOrphan = false; + auto proof = g_avalanche->getProof(proofid); + if (!proof) { + proof = g_avalanche->getOrphan(proofid); + isOrphan = true; + } + + if (!proof) { + throw JSONRPCError(RPC_INVALID_PARAMETER, "Proof not found"); + } + + UniValue ret(UniValue::VOBJ); + + CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); + ss << *proof; + ret.pushKV("proof", HexStr(ss)); + ret.pushKV("orphan", isOrphan); + + return ret; +} + static void verifyProofOrThrow(const NodeContext &node, avalanche::Proof &proof, const std::string &proofHex) { bilingual_str error; if (!avalanche::Proof::FromHex(proof, proofHex, error)) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, error.original); } avalanche::ProofValidationState state; { LOCK(cs_main); if (!proof.verify(state, node.chainman->ActiveChainstate().CoinsTip())) { throw JSONRPCError(RPC_INVALID_PARAMETER, "The proof is invalid: " + state.ToString()); } } } static UniValue sendavalancheproof(const Config &config, const JSONRPCRequest &request) { RPCHelpMan{ "sendavalancheproof", "Broadcast an avalanche proof.\n", { {"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The avalanche proof to broadcast."}, }, RPCResult{RPCResult::Type::BOOL, "success", "Whether the proof was sent successfully or not."}, RPCExamples{HelpExampleRpc("sendavalancheproof", "")}, } .Check(request); if (!g_avalanche) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Avalanche is not initialized"); } auto proof = std::make_shared(); NodeContext &node = EnsureNodeContext(request.context); // Verify the proof. Note that this is redundant with the verification done // when adding the proof to the pool, but we get a chance to give a better // error message. verifyProofOrThrow(node, *proof, request.params[0].get_str()); // Add the proof to the pool if we don't have it already. Since the proof // verification has already been done, a failure likely indicates that there // already is a proof with conflicting utxos. const avalanche::ProofId &proofid = proof->getId(); if (!g_avalanche->getProof(proofid) && !g_avalanche->addProof(proof)) { throw JSONRPCError( RPC_INVALID_PARAMETER, "The proof has conflicting utxo with an existing proof"); } RelayProof(proofid, *node.connman); return true; } static UniValue verifyavalancheproof(const Config &config, const JSONRPCRequest &request) { RPCHelpMan{ "verifyavalancheproof", "Verify an avalanche proof is valid and return the error otherwise.\n", { {"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "Proof to verify."}, }, RPCResult{RPCResult::Type::BOOL, "success", "Whether the proof is valid or not."}, RPCExamples{HelpExampleRpc("verifyavalancheproof", "\"\"")}, } .Check(request); RPCTypeCheck(request.params, {UniValue::VSTR}); avalanche::Proof proof; verifyProofOrThrow(EnsureNodeContext(request.context), proof, request.params[0].get_str()); return true; } void RegisterAvalancheRPCCommands(CRPCTable &t) { // clang-format off static const CRPCCommand commands[] = { // category name actor (function) argNames // ------------------- ------------------------ ---------------------- ---------- { "avalanche", "getavalanchekey", getavalanchekey, {}}, { "avalanche", "addavalanchenode", addavalanchenode, {"nodeid"}}, { "avalanche", "buildavalancheproof", buildavalancheproof, {"sequence", "expiration", "master", "stakes"}}, { "avalanche", "decodeavalancheproof", decodeavalancheproof, {"proof"}}, { "avalanche", "delegateavalancheproof", delegateavalancheproof, {"proof", "privatekey", "publickey", "delegation"}}, { "avalanche", "getavalanchepeerinfo", getavalanchepeerinfo, {}}, + { "avalanche", "getrawavalancheproof", getrawavalancheproof, {"proofid"}}, { "avalanche", "sendavalancheproof", sendavalancheproof, {"proof"}}, { "avalanche", "verifyavalancheproof", verifyavalancheproof, {"proof"}}, }; // clang-format on for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) { t.appendCommand(commands[vcidx].name, &commands[vcidx]); } } diff --git a/test/functional/abc_rpc_avalancheproof.py b/test/functional/abc_rpc_avalancheproof.py index f8af83e8b..d6888a4d9 100644 --- a/test/functional/abc_rpc_avalancheproof.py +++ b/test/functional/abc_rpc_avalancheproof.py @@ -1,346 +1,371 @@ #!/usr/bin/env python3 # Copyright (c) 2021 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test building avalanche proofs and using them to add avalanche peers.""" import base64 from decimal import Decimal +from test_framework.address import ADDRESS_BCHREG_UNSPENDABLE from test_framework.avatools import ( create_coinbase_stakes, create_stakes, get_proof_ids, ) from test_framework.key import ECKey, bytes_to_wif from test_framework.messages import ( AvalancheDelegation, AvalancheDelegationLevel, AvalancheProof, FromHex, ) from test_framework.p2p import P2PInterface, p2p_lock from test_framework.test_framework import BitcoinTestFramework from test_framework.test_node import ErrorMatch from test_framework.util import ( append_config, assert_equal, wait_until, assert_raises_rpc_error, ) AVALANCHE_MAX_PROOF_STAKES = 1000 PROOF_DUST_THRESHOLD = 1.0 """Minimum amount per UTXO in a proof (in coins, not in satoshis)""" def add_interface_node(test_node) -> str: """Create a mininode, connect it to test_node, return the nodeid of the mininode as registered by test_node. """ n = P2PInterface() test_node.add_p2p_connection(n) n.wait_for_verack() return test_node.getpeerinfo()[-1]['id'] class AvalancheProofTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [['-enableavalanche=1', '-avacooldown=0'], ] self.supports_cli = False self.rpc_timeout = 120 def run_test(self): node = self.nodes[0] addrkey0 = node.get_deterministic_priv_key() blockhashes = node.generatetoaddress(100, addrkey0.address) self.log.info( "Make build a valid proof and restart the node to use it") privkey = ECKey() privkey.set(bytes.fromhex( "12b004fff7f4b69ef8650e767f18f11ede158148b425660723b9f9a66e61f747"), True) def get_hex_pubkey(privkey): return privkey.get_pubkey().get_bytes().hex() proof_master = get_hex_pubkey(privkey) proof_sequence = 11 proof_expiration = 12 stakes = create_coinbase_stakes(node, [blockhashes[0]], addrkey0.key) proof = node.buildavalancheproof( proof_sequence, proof_expiration, proof_master, stakes) self.log.info("Test decodeavalancheproof RPC") proofobj = FromHex(AvalancheProof(), proof) decodedproof = node.decodeavalancheproof(proof) limited_id_hex = f"{proofobj.limited_proofid:0{64}x}" assert_equal(decodedproof["sequence"], proof_sequence) assert_equal(decodedproof["expiration"], proof_expiration) assert_equal(decodedproof["master"], proof_master) assert_equal(decodedproof["proofid"], f"{proofobj.proofid:0{64}x}") assert_equal(decodedproof["limitedid"], limited_id_hex) assert_equal(decodedproof["stakes"][0]["txid"], stakes[0]["txid"]) assert_equal(decodedproof["stakes"][0]["vout"], stakes[0]["vout"]) assert_equal(decodedproof["stakes"][0]["height"], stakes[0]["height"]) assert_equal( decodedproof["stakes"][0]["iscoinbase"], stakes[0]["iscoinbase"]) assert_equal( decodedproof["stakes"][0]["signature"], base64.b64encode(proofobj.stakes[0].sig).decode("ascii")) # Invalid hex (odd number of hex digits) assert_raises_rpc_error(-22, "Proof must be an hexadecimal string", node.decodeavalancheproof, proof[:-1]) # Valid hex but invalid proof assert_raises_rpc_error(-22, "Proof has invalid format", node.decodeavalancheproof, proof[:-2]) # Restart the node, making sure it is initially in IBD mode minchainwork = int(node.getblockchaininfo()["chainwork"], 16) + 1 self.restart_node(0, self.extra_args[0] + [ "-avaproof={}".format(proof), "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN", "-minimumchainwork=0x{:x}".format(minchainwork), ]) self.log.info( "The proof verification should be delayed until IBD is complete") assert node.getblockchaininfo()["initialblockdownload"] is True # Our proof cannot be verified during IBD, so we should have no peer assert not node.getavalanchepeerinfo() # Mining a few more blocks should cause us to leave IBD node.generate(2) # Our proof is now verified and our node is added as a peer assert node.getblockchaininfo()["initialblockdownload"] is False wait_until(lambda: len(node.getavalanchepeerinfo()) == 1, timeout=5) if self.is_wallet_compiled(): self.log.info( "A proof using the maximum number of stakes is accepted...") new_blocks = node.generate(AVALANCHE_MAX_PROOF_STAKES // 10 + 1) # confirm the coinbase UTXOs node.generate(101) too_many_stakes = create_stakes( node, new_blocks, AVALANCHE_MAX_PROOF_STAKES + 1) maximum_stakes = too_many_stakes[:-1] good_proof = node.buildavalancheproof( proof_sequence, proof_expiration, proof_master, maximum_stakes) peerid1 = add_interface_node(node) assert node.addavalanchenode(peerid1, proof_master, good_proof) self.log.info( "A proof using too many stakes should be rejected...") too_many_utxos = node.buildavalancheproof( proof_sequence, proof_expiration, proof_master, too_many_stakes) peerid2 = add_interface_node(node) assert not node.addavalanchenode( peerid2, proof_master, too_many_utxos) self.log.info("Generate delegations for the proof") # Stack up a few delegation levels def gen_privkey(): pk = ECKey() pk.generate() return pk delegator_privkey = privkey delegation = None for _ in range(10): delegated_privkey = gen_privkey() delegation = node.delegateavalancheproof( limited_id_hex, bytes_to_wif(delegator_privkey.get_bytes()), get_hex_pubkey(delegated_privkey), delegation, ) delegator_privkey = delegated_privkey random_privkey = gen_privkey() random_pubkey = get_hex_pubkey(random_privkey) # Invalid proof no_stake = node.buildavalancheproof(proof_sequence, proof_expiration, proof_master, []) # Invalid privkey assert_raises_rpc_error(-5, "The private key is invalid", node.delegateavalancheproof, limited_id_hex, bytes_to_wif(bytes(32)), random_pubkey, ) # Invalid delegation bad_dg = AvalancheDelegation() assert_raises_rpc_error(-8, "The supplied delegation does not match the proof", node.delegateavalancheproof, limited_id_hex, bytes_to_wif(privkey.get_bytes()), random_pubkey, bad_dg.serialize().hex(), ) # Still invalid, but with a matching proofid bad_dg.limited_proofid = proofobj.limited_proofid bad_dg.proof_master = proofobj.master bad_dg.levels = [AvalancheDelegationLevel()] assert_raises_rpc_error(-8, "The supplied delegation is not valid", node.delegateavalancheproof, limited_id_hex, bytes_to_wif(privkey.get_bytes()), random_pubkey, bad_dg.serialize().hex(), ) # Wrong privkey, match the proof but does not match the delegation assert_raises_rpc_error(-8, "The supplied private key does not match the delegation", node.delegateavalancheproof, limited_id_hex, bytes_to_wif(privkey.get_bytes()), random_pubkey, delegation, ) # Test invalid proofs dust = node.buildavalancheproof( proof_sequence, proof_expiration, proof_master, create_coinbase_stakes(node, [blockhashes[0]], addrkey0.key, amount="0")) dust_amount = Decimal(f"{PROOF_DUST_THRESHOLD * 0.9999:.4f}") dust2 = node.buildavalancheproof( proof_sequence, proof_expiration, proof_master, create_coinbase_stakes(node, [blockhashes[0]], addrkey0.key, amount=str(dust_amount))) duplicate_stake = node.buildavalancheproof( proof_sequence, proof_expiration, proof_master, create_coinbase_stakes(node, [blockhashes[0]] * 2, addrkey0.key)) missing_stake = node.buildavalancheproof( proof_sequence, proof_expiration, proof_master, [{ 'txid': '0' * 64, 'vout': 0, 'amount': 10, 'height': 42, 'iscoinbase': False, 'privatekey': addrkey0.key, }] ) bad_sig = ("0b000000000000000c0000000000000021030b4c866585dd868a9d62348" "a9cd008d6a312937048fff31670e7e920cfc7a7440105c5f72f5d6da3085" "583e75ee79340eb4eff208c89988e7ed0efb30b87298fa30000000000f20" "52a0100000003000000210227d85ba011276cf25b51df6a188b75e604b3" "8770a462b2d0e9fb2fc839ef5d3faf07f001dd38e9b4a43d07d5d449cc0" "f7d2888d96b82962b3ce516d1083c0e031773487fc3c4f2e38acd1db974" "1321b91a79b82d1c2cfd47793261e4ba003cf5") self.log.info( "Check the verifyavalancheproof and sendavalancheproof RPCs") for rpc in [node.verifyavalancheproof, node.sendavalancheproof]: assert_raises_rpc_error(-22, "Proof must be an hexadecimal string", rpc, "f00") assert_raises_rpc_error(-22, "Proof has invalid format", rpc, "f00d") def check_rpc_failure(proof, message): assert_raises_rpc_error(-8, "The proof is invalid: " + message, rpc, proof) check_rpc_failure(no_stake, "no-stake") check_rpc_failure(dust, "amount-below-dust-threshold") check_rpc_failure(duplicate_stake, "duplicated-stake") check_rpc_failure(missing_stake, "utxo-missing-or-spent") check_rpc_failure(bad_sig, "invalid-signature") if self.is_wallet_compiled(): check_rpc_failure(too_many_utxos, "too-many-utxos") conflicting_utxo = node.buildavalancheproof( proof_sequence + 1, proof_expiration, proof_master, stakes) assert_raises_rpc_error(-8, "The proof has conflicting utxo with an existing proof", node.sendavalancheproof, conflicting_utxo) # Good proof assert node.verifyavalancheproof(proof) peer = node.add_p2p_connection(P2PInterface()) proofid = FromHex(AvalancheProof(), proof).proofid node.sendavalancheproof(proof) assert proofid in get_proof_ids(node) def inv_found(): with p2p_lock: return peer.last_message.get( "inv") and peer.last_message["inv"].inv[-1].hash == proofid wait_until(inv_found) + self.log.info("Check the getrawproof RPC") + + raw_proof = node.getrawavalancheproof("{:064x}".format(proofid)) + assert_equal(raw_proof['proof'], proof) + assert_equal(raw_proof['orphan'], False) + + assert_raises_rpc_error(-8, "Proof not found", + node.getrawavalancheproof, '0' * 64) + + # Orphan the proof by sending the stake + raw_tx = node.createrawtransaction( + [{"txid": stakes[-1]["txid"], "vout": 0}], + {ADDRESS_BCHREG_UNSPENDABLE: stakes[-1] + ["amount"] - Decimal('0.01')} + ) + signed_tx = node.signrawtransactionwithkey(raw_tx, [addrkey0.key]) + node.sendrawtransaction(signed_tx["hex"]) + node.generate(1) + wait_until(lambda: proofid not in get_proof_ids(node)) + + raw_proof = node.getrawavalancheproof("{:064x}".format(proofid)) + assert_equal(raw_proof['proof'], proof) + assert_equal(raw_proof['orphan'], True) + self.log.info("Bad proof should be rejected at startup") self.stop_node(0) node.assert_start_raises_init_error( self.extra_args[0] + [ "-avasessionkey=0", ], expected_msg="Error: the avalanche session key is invalid", ) node.assert_start_raises_init_error( self.extra_args[0] + [ "-avaproof={}".format(proof), ], expected_msg="Error: the avalanche master key is missing for the avalanche proof", ) node.assert_start_raises_init_error( self.extra_args[0] + [ "-avaproof={}".format(proof), "-avamasterkey=0", ], expected_msg="Error: the avalanche master key is invalid", ) def check_proof_init_error(proof, message): node.assert_start_raises_init_error( self.extra_args[0] + [ "-avaproof={}".format(proof), "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN", ], expected_msg="Error: " + message, ) check_proof_init_error(no_stake, "the avalanche proof has no stake") check_proof_init_error(dust, "the avalanche proof stake is too low") check_proof_init_error(dust2, "the avalanche proof stake is too low") check_proof_init_error(duplicate_stake, "the avalanche proof has duplicated stake") check_proof_init_error(bad_sig, "the avalanche proof has invalid stake signatures") if self.is_wallet_compiled(): # The too many utxos case creates a proof which is that large that it # cannot fit on the command line append_config(node.datadir, ["avaproof={}".format(too_many_utxos)]) node.assert_start_raises_init_error( self.extra_args[0] + [ "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN", ], expected_msg="Error: the avalanche proof has too many utxos", match=ErrorMatch.PARTIAL_REGEX, ) if __name__ == '__main__': AvalancheProofTest().main()