diff --git a/src/avalanche/processor.cpp b/src/avalanche/processor.cpp index 4c12cbaea..a2430464e 100644 --- a/src/avalanche/processor.cpp +++ b/src/avalanche/processor.cpp @@ -1,621 +1,624 @@ // Copyright (c) 2018-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include +#include #include #include // For DecodeSecret #include // For ::PeerManager #include #include #include #include #include #include #include /** * Run the avalanche event loop every 10ms. */ static constexpr std::chrono::milliseconds AVALANCHE_TIME_STEP{10}; // Unfortunately, the bitcoind codebase is full of global and we are kinda // forced into it here. std::unique_ptr g_avalanche; namespace avalanche { bool VoteRecord::registerVote(NodeId nodeid, uint32_t error) { // We just got a new vote, so there is one less inflight request. clearInflightRequest(); // We want to avoid having the same node voting twice in a quorum. if (!addNodeToQuorum(nodeid)) { return false; } /** * The result of the vote is determined from the error code. If the error * code is 0, there is no error and therefore the vote is yes. If there is * an error, we check the most significant bit to decide if the vote is a no * (for instance, the block is invalid) or is the vote inconclusive (for * instance, the queried node does not have the block yet). */ votes = (votes << 1) | (error == 0); consider = (consider << 1) | (int32_t(error) >= 0); /** * We compute the number of yes and/or no votes as follow: * * votes: 1010 * consider: 1100 * * yes votes: 1000 using votes & consider * no votes: 0100 using ~votes & consider */ bool yes = countBits(votes & consider & 0xff) > 6; if (!yes) { bool no = countBits(~votes & consider & 0xff) > 6; if (!no) { // The round is inconclusive. return false; } } // If the round is in agreement with previous rounds, increase confidence. if (isAccepted() == yes) { confidence += 2; return getConfidence() == AVALANCHE_FINALIZATION_SCORE; } // The round changed our state. We reset the confidence. confidence = yes; return true; } bool VoteRecord::addNodeToQuorum(NodeId nodeid) { if (nodeid == NO_NODE) { // Helpful for testing. return true; } // MMIX Linear Congruent Generator. const uint64_t r1 = 6364136223846793005 * uint64_t(nodeid) + 1442695040888963407; // Fibonacci hashing. const uint64_t r2 = 11400714819323198485ull * (nodeid ^ seed); // Combine and extract hash. const uint16_t h = (r1 + r2) >> 48; /** * Check if the node is in the filter. */ for (size_t i = 1; i < nodeFilter.size(); i++) { if (nodeFilter[(successfulVotes + i) % nodeFilter.size()] == h) { return false; } } /** * Add the node which just voted to the filter. */ nodeFilter[successfulVotes % nodeFilter.size()] = h; successfulVotes++; return true; } bool VoteRecord::registerPoll() const { uint8_t count = inflight.load(); while (count < AVALANCHE_MAX_INFLIGHT_POLL) { if (inflight.compare_exchange_weak(count, count + 1)) { return true; } } return false; } static bool IsWorthPolling(const CBlockIndex *pindex) { AssertLockHeld(cs_main); if (pindex->nStatus.isInvalid()) { // No point polling invalid blocks. return false; } if (::ChainstateActive().IsBlockFinalized(pindex)) { // There is no point polling finalized block. return false; } return true; } struct Processor::PeerData { Proof proof; Delegation delegation; }; class Processor::NotificationsHandler : public interfaces::Chain::Notifications { Processor *m_processor; public: NotificationsHandler(Processor *p) : m_processor(p) {} void updatedBlockTip() override { LOCK(m_processor->cs_peerManager); + + if (m_processor->mustRegisterProof && + !::ChainstateActive().IsInitialBlockDownload()) { + m_processor->peerManager->getPeerId(m_processor->peerData->proof); + m_processor->mustRegisterProof = false; + } + m_processor->peerManager->updatedBlockTip(); } }; Processor::Processor(interfaces::Chain &chain, CConnman *connmanIn, NodePeerManager *nodePeerManagerIn) : connman(connmanIn), nodePeerManager(nodePeerManagerIn), queryTimeoutDuration(AVALANCHE_DEFAULT_QUERY_TIMEOUT), round(0), peerManager(std::make_unique()) { if (gArgs.IsArgSet("-avasessionkey")) { sessionKey = DecodeSecret(gArgs.GetArg("-avasessionkey", "")); } else { // Pick a random key for the session. sessionKey.MakeNewKey(true); } if (gArgs.IsArgSet("-avaproof")) { peerData = std::make_unique(); { // The proof. CDataStream stream(ParseHex(gArgs.GetArg("-avaproof", "")), SER_NETWORK, 0); stream >> peerData->proof; - // Ensure the peer manager knows about it. - // FIXME: There is no way to register the proof at this time because - // we might not have the proper chainstate at the moment. We need to - // find a way to delay the registration of the proof until after IBD - // has finished and the chain state is settled. - // LOCK(cs_peerManager); - // peerManager->getPeerId(peerData->proof); + // Schedule proof registration at the first new block after IBD. + mustRegisterProof = true; } // Generate the delegation to the session key. DelegationBuilder dgb(peerData->proof); if (sessionKey.GetPubKey() != peerData->proof.getMaster()) { dgb.addLevel(DecodeSecret(gArgs.GetArg("-avamasterkey", "")), sessionKey.GetPubKey()); } peerData->delegation = dgb.build(); } // Make sure we get notified of chain state changes. chainNotificationsHandler = chain.handleNotifications(std::make_shared(this)); } Processor::~Processor() { chainNotificationsHandler.reset(); stopEventLoop(); } bool Processor::addBlockToReconcile(const CBlockIndex *pindex) { bool isAccepted; { LOCK(cs_main); if (!IsWorthPolling(pindex)) { // There is no point polling this block. return false; } isAccepted = ::ChainActive().Contains(pindex); } return vote_records.getWriteView() ->insert(std::make_pair(pindex, VoteRecord(isAccepted))) .second; } bool Processor::isAccepted(const CBlockIndex *pindex) const { auto r = vote_records.getReadView(); auto it = r->find(pindex); if (it == r.end()) { return false; } return it->second.isAccepted(); } int Processor::getConfidence(const CBlockIndex *pindex) const { auto r = vote_records.getReadView(); auto it = r->find(pindex); if (it == r.end()) { return -1; } return it->second.getConfidence(); } namespace { /** * When using TCP, we need to sign all messages as the transport layer is * not secure. */ class TCPResponse { Response response; SchnorrSig sig; public: TCPResponse(Response responseIn, const CKey &key) : response(std::move(responseIn)) { CHashWriter hasher(SER_GETHASH, 0); hasher << response; const uint256 hash = hasher.GetHash(); // Now let's sign! if (!key.SignSchnorr(hash, sig)) { sig.fill(0); } } // serialization support SERIALIZE_METHODS(TCPResponse, obj) { READWRITE(obj.response, obj.sig); } }; } // namespace void Processor::sendResponse(CNode *pfrom, Response response) const { connman->PushMessage( pfrom, CNetMsgMaker(pfrom->GetCommonVersion()) .Make(NetMsgType::AVARESPONSE, TCPResponse(std::move(response), sessionKey))); } bool Processor::registerVotes(NodeId nodeid, const Response &response, std::vector &updates) { { // Save the time at which we can query again. LOCK(cs_peerManager); // FIXME: This will override the time even when we received an old stale // message. This should check that the message is indeed the most up to // date one before updating the time. peerManager->updateNextRequestTime( nodeid, std::chrono::steady_clock::now() + std::chrono::milliseconds(response.getCooldown())); } std::vector invs; { // Check that the query exists. auto w = queries.getWriteView(); auto it = w->find(std::make_tuple(nodeid, response.getRound())); if (it == w.end()) { nodePeerManager->Misbehaving(nodeid, 2, "unexpcted-ava-response"); return false; } invs = std::move(it->invs); w->erase(it); } // Verify that the request and the vote are consistent. const std::vector &votes = response.GetVotes(); size_t size = invs.size(); if (votes.size() != size) { nodePeerManager->Misbehaving(nodeid, 100, "invalid-ava-response-size"); return false; } for (size_t i = 0; i < size; i++) { if (invs[i].hash != votes[i].GetHash()) { nodePeerManager->Misbehaving(nodeid, 100, "invalid-ava-response-content"); return false; } } std::map responseIndex; { LOCK(cs_main); for (const auto &v : votes) { auto pindex = LookupBlockIndex(BlockHash(v.GetHash())); if (!pindex) { // This should not happen, but just in case... continue; } if (!IsWorthPolling(pindex)) { // There is no point polling this block. continue; } responseIndex.insert(std::make_pair(pindex, v)); } } { // Register votes. auto w = vote_records.getWriteView(); for (const auto &p : responseIndex) { CBlockIndex *pindex = p.first; const Vote &v = p.second; auto it = w->find(pindex); if (it == w.end()) { // We are not voting on that item anymore. continue; } auto &vr = it->second; if (!vr.registerVote(nodeid, v.GetError())) { // This vote did not provide any extra information, move on. continue; } if (!vr.hasFinalized()) { // This item has note been finalized, so we have nothing more to // do. updates.emplace_back( pindex, vr.isAccepted() ? BlockUpdate::Status::Accepted : BlockUpdate::Status::Rejected); continue; } // We just finalized a vote. If it is valid, then let the caller // know. Either way, remove the item from the map. updates.emplace_back(pindex, vr.isAccepted() ? BlockUpdate::Status::Finalized : BlockUpdate::Status::Invalid); w->erase(it); } } return true; } bool Processor::addNode(NodeId nodeid, const Proof &proof, const Delegation &delegation) { LOCK(cs_peerManager); return peerManager->addNode(nodeid, proof, delegation); } bool Processor::forNode(NodeId nodeid, std::function func) const { LOCK(cs_peerManager); return peerManager->forNode(nodeid, std::move(func)); } CPubKey Processor::getSessionPubKey() const { return sessionKey.GetPubKey(); } uint256 Processor::buildLocalSighash(CNode *pfrom) const { CHashWriter hasher(SER_GETHASH, 0); hasher << peerData->delegation.getId(); hasher << pfrom->GetLocalNonce(); hasher << pfrom->nRemoteHostNonce; hasher << pfrom->GetLocalExtraEntropy(); hasher << pfrom->nRemoteExtraEntropy; return hasher.GetHash(); } uint256 Processor::buildRemoteSighash(CNode *pfrom) const { CHashWriter hasher(SER_GETHASH, 0); hasher << pfrom->m_avalanche_state->delegation.getId(); hasher << pfrom->nRemoteHostNonce; hasher << pfrom->GetLocalNonce(); hasher << pfrom->nRemoteExtraEntropy; hasher << pfrom->GetLocalExtraEntropy(); return hasher.GetHash(); } bool Processor::sendHello(CNode *pfrom) const { if (!peerData) { // We do not have a delegation to advertise. return false; } // Now let's sign! SchnorrSig sig; { const uint256 hash = buildLocalSighash(pfrom); if (!sessionKey.SignSchnorr(hash, sig)) { return false; } } connman->PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()) .Make(NetMsgType::AVAHELLO, Hello(peerData->delegation, sig))); return true; } const Proof Processor::getProof() const { if (!peerData) { throw std::runtime_error("proof not set"); } return peerData->proof; } bool Processor::startEventLoop(CScheduler &scheduler) { return eventLoop.startEventLoop( scheduler, [this]() { this->runEventLoop(); }, AVALANCHE_TIME_STEP); } bool Processor::stopEventLoop() { return eventLoop.stopEventLoop(); } std::vector Processor::getInvsForNextPoll(bool forPoll) { std::vector invs; // First remove all blocks that are not worth polling. { LOCK(cs_main); auto w = vote_records.getWriteView(); for (auto it = w->begin(); it != w->end();) { const CBlockIndex *pindex = it->first; if (!IsWorthPolling(pindex)) { w->erase(it++); } else { ++it; } } } auto r = vote_records.getReadView(); for (const std::pair &p : reverse_iterate(r)) { // Check if we can run poll. const bool shouldPoll = forPoll ? p.second.registerPoll() : p.second.shouldPoll(); if (!shouldPoll) { continue; } // We don't have a decision, we need more votes. invs.emplace_back(MSG_BLOCK, p.first->GetBlockHash()); if (invs.size() >= AVALANCHE_MAX_ELEMENT_POLL) { // Make sure we do not produce more invs than specified by the // protocol. return invs; } } return invs; } NodeId Processor::getSuitableNodeToQuery() { LOCK(cs_peerManager); return peerManager->selectNode(); } void Processor::clearTimedoutRequests() { auto now = std::chrono::steady_clock::now(); std::map timedout_items{}; { // Clear expired requests. auto w = queries.getWriteView(); auto it = w->get().begin(); while (it != w->get().end() && it->timeout < now) { for (const auto &i : it->invs) { timedout_items[i]++; } w->get().erase(it++); } } if (timedout_items.empty()) { return; } // In flight request accounting. for (const auto &p : timedout_items) { const CInv &inv = p.first; assert(inv.type == MSG_BLOCK); CBlockIndex *pindex; { LOCK(cs_main); pindex = LookupBlockIndex(BlockHash(inv.hash)); if (!pindex) { continue; } } auto w = vote_records.getWriteView(); auto it = w->find(pindex); if (it == w.end()) { continue; } it->second.clearInflightRequest(p.second); } } void Processor::runEventLoop() { // First things first, check if we have requests that timed out and clear // them. clearTimedoutRequests(); // Make sure there is at least one suitable node to query before gathering // invs. NodeId nodeid = getSuitableNodeToQuery(); if (nodeid == NO_NODE) { return; } std::vector invs = getInvsForNextPoll(); if (invs.empty()) { return; } do { /** * If we lost contact to that node, then we remove it from nodeids, but * never add the request to queries, which ensures bad nodes get cleaned * up over time. */ bool hasSent = connman->ForNode(nodeid, [this, &invs](CNode *pnode) { uint64_t current_round = round++; { // Compute the time at which this requests times out. auto timeout = std::chrono::steady_clock::now() + queryTimeoutDuration; // Register the query. queries.getWriteView()->insert( {pnode->GetId(), current_round, timeout, invs}); // Set the timeout. LOCK(cs_peerManager); peerManager->updateNextRequestTime(pnode->GetId(), timeout); } // Send the query to the node. connman->PushMessage( pnode, CNetMsgMaker(pnode->GetCommonVersion()) .Make(NetMsgType::AVAPOLL, Poll(current_round, std::move(invs)))); return true; }); // Success! if (hasSent) { return; } { // This node is obsolete, delete it. LOCK(cs_peerManager); peerManager->removeNode(nodeid); } // Get next suitable node to try again nodeid = getSuitableNodeToQuery(); } while (nodeid != NO_NODE); } std::vector Processor::getPeers() const { LOCK(cs_peerManager); return peerManager->getPeers(); } std::vector Processor::getNodeIdsForPeer(PeerId peerId) const { LOCK(cs_peerManager); return peerManager->getNodeIdsForPeer(peerId); } } // namespace avalanche diff --git a/src/avalanche/processor.h b/src/avalanche/processor.h index d119ba64c..a7f8da3db 100644 --- a/src/avalanche/processor.h +++ b/src/avalanche/processor.h @@ -1,331 +1,337 @@ // Copyright (c) 2018-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_AVALANCHE_PROCESSOR_H #define BITCOIN_AVALANCHE_PROCESSOR_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include class Config; class CBlockIndex; class CScheduler; class PeerManager; using NodePeerManager = PeerManager; /** * Is avalanche enabled by default. */ static constexpr bool AVALANCHE_DEFAULT_ENABLED = false; /** * Finalization score. */ static constexpr int AVALANCHE_FINALIZATION_SCORE = 128; /** * Maximum item that can be polled at once. */ static constexpr size_t AVALANCHE_MAX_ELEMENT_POLL = 16; /** * Avalanche default cooldown in milliseconds. */ static constexpr size_t AVALANCHE_DEFAULT_COOLDOWN = 100; /** * How long before we consider that a query timed out. */ static constexpr std::chrono::milliseconds AVALANCHE_DEFAULT_QUERY_TIMEOUT{ 10000}; /** * How many inflight requests can exist for one item. */ static constexpr int AVALANCHE_MAX_INFLIGHT_POLL = 10; /** * How many UTXOs can be used for a single proof. */ static constexpr int AVALANCHE_MAX_PROOF_STAKES = 1000; namespace avalanche { class Delegation; class PeerManager; class Proof; /** * Vote history. */ struct VoteRecord { private: // confidence's LSB bit is the result. Higher bits are actual confidence // score. uint16_t confidence = 0; // Historical record of votes. uint8_t votes = 0; // Each bit indicate if the vote is to be considered. uint8_t consider = 0; // How many in flight requests exists for this element. mutable std::atomic inflight{0}; // Seed for pseudorandom operations. const uint32_t seed = 0; // Track how many successful votes occured. uint32_t successfulVotes = 0; // Track the nodes which are part of the quorum. std::array nodeFilter{{0, 0, 0, 0, 0, 0, 0, 0}}; public: explicit VoteRecord(bool accepted) : confidence(accepted) {} /** * Copy semantic */ VoteRecord(const VoteRecord &other) : confidence(other.confidence), votes(other.votes), consider(other.consider), inflight(other.inflight.load()), successfulVotes(other.successfulVotes), nodeFilter(other.nodeFilter) { } /** * Vote accounting facilities. */ bool isAccepted() const { return confidence & 0x01; } uint16_t getConfidence() const { return confidence >> 1; } bool hasFinalized() const { return getConfidence() >= AVALANCHE_FINALIZATION_SCORE; } /** * Register a new vote for an item and update confidence accordingly. * Returns true if the acceptance or finalization state changed. */ bool registerVote(NodeId nodeid, uint32_t error); /** * Register that a request is being made regarding that item. * The method is made const so that it can be accessed via a read only view * of vote_records. It's not a problem as it is made thread safe. */ bool registerPoll() const; /** * Return if this item is in condition to be polled at the moment. */ bool shouldPoll() const { return inflight < AVALANCHE_MAX_INFLIGHT_POLL; } /** * Clear `count` inflight requests. */ void clearInflightRequest(uint8_t count = 1) { inflight -= count; } private: /** * Add the node to the quorum. * Returns true if the node was added, false if the node already was in the * quorum. */ bool addNodeToQuorum(NodeId nodeid); }; class BlockUpdate { union { CBlockIndex *pindex; uintptr_t raw; }; static const size_t STATUS_BITS = 2; static const uintptr_t MASK = (1 << STATUS_BITS) - 1; static_assert( alignof(CBlockIndex) >= (1 << STATUS_BITS), "CBlockIndex alignement doesn't allow for Status to be stored."); public: enum Status : uint8_t { Invalid, Rejected, Accepted, Finalized, }; BlockUpdate(CBlockIndex *pindexIn, Status statusIn) : pindex(pindexIn) { raw |= statusIn; } Status getStatus() const { return Status(raw & MASK); } CBlockIndex *getBlockIndex() { return reinterpret_cast(raw & ~MASK); } const CBlockIndex *getBlockIndex() const { return const_cast(this)->getBlockIndex(); } }; using BlockVoteMap = std::map; struct query_timeout {}; namespace { struct AvalancheTest; } class Processor { CConnman *connman; NodePeerManager *nodePeerManager; std::chrono::milliseconds queryTimeoutDuration; /** * Blocks to run avalanche on. */ RWCollection vote_records; /** * Keep track of peers and queries sent. */ std::atomic round; /** * Keep track of the peers and associated infos. */ mutable Mutex cs_peerManager; std::unique_ptr peerManager GUARDED_BY(cs_peerManager); struct Query { NodeId nodeid; uint64_t round; TimePoint timeout; /** * We declare this as mutable so it can be modified in the multi_index. * This is ok because we do not use this field to index in anyway. * * /!\ Do not use any mutable field as index. */ mutable std::vector invs; }; using QuerySet = boost::multi_index_container< Query, boost::multi_index::indexed_by< // index by nodeid/round boost::multi_index::hashed_unique, boost::multi_index::member>>, // sorted by timeout boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::member>>>; RWCollection queries; /** Data required to participate. */ struct PeerData; std::unique_ptr peerData; CKey sessionKey; /** Event loop machinery. */ EventLoop eventLoop; /** Registered interfaces::Chain::Notifications handler. */ class NotificationsHandler; std::unique_ptr chainNotificationsHandler; + /** + * Flag indicating that the proof must be registered at first new block + * after IBD + */ + bool mustRegisterProof = false; + public: Processor(interfaces::Chain &chain, CConnman *connmanIn, NodePeerManager *nodePeerManagerIn); ~Processor(); void setQueryTimeoutDuration(std::chrono::milliseconds d) { queryTimeoutDuration = d; } bool addBlockToReconcile(const CBlockIndex *pindex); bool isAccepted(const CBlockIndex *pindex) const; int getConfidence(const CBlockIndex *pindex) const; // TDOD: Refactor the API to remove the dependency on avalanche/protocol.h void sendResponse(CNode *pfrom, Response response) const; bool registerVotes(NodeId nodeid, const Response &response, std::vector &updates); bool addNode(NodeId nodeid, const Proof &proof, const Delegation &delegation); bool forNode(NodeId nodeid, std::function func) const; CPubKey getSessionPubKey() const; bool sendHello(CNode *pfrom) const; /** * Build and return the challenge whose signature we expect a peer to * include in his AVAHELLO message. */ uint256 buildRemoteSighash(CNode *pfrom) const; /** * Get the local proof used by this node. * * @returns Proof for this node. * @throws a std::runtime_error if there is no proof set for this node */ const Proof getProof() const; std::vector getPeers() const; std::vector getNodeIdsForPeer(PeerId peerId) const; bool startEventLoop(CScheduler &scheduler); bool stopEventLoop(); private: void runEventLoop(); void clearTimedoutRequests(); std::vector getInvsForNextPoll(bool forPoll = true); NodeId getSuitableNodeToQuery(); /** * Build and return the challenge whose signature is included in the * AVAHELLO message that we send to a peer. */ uint256 buildLocalSighash(CNode *pfrom) const; friend struct ::avalanche::AvalancheTest; }; } // namespace avalanche /** * Global avalanche instance. */ extern std::unique_ptr g_avalanche; #endif // BITCOIN_AVALANCHE_PROCESSOR_H diff --git a/test/functional/abc_p2p_avalanche.py b/test/functional/abc_p2p_avalanche.py index 67f9d9a27..7f71d1928 100755 --- a/test/functional/abc_p2p_avalanche.py +++ b/test/functional/abc_p2p_avalanche.py @@ -1,474 +1,488 @@ #!/usr/bin/env python3 # Copyright (c) 2018 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the resolution of forks via avalanche.""" import random from typing import List, Dict from test_framework.key import ( ECKey, ECPubKey, ) from test_framework.mininode import P2PInterface, mininode_lock from test_framework.messages import ( AvalancheResponse, AvalancheVote, CInv, msg_avapoll, msg_tcpavaresponse, NODE_AVALANCHE, NODE_NETWORK, TCPAvalancheResponse, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, wait_until, ) AVALANCHE_MAX_PROOF_STAKES = 1000 BLOCK_ACCEPTED = 0 BLOCK_INVALID = 1 BLOCK_PARKED = 2 BLOCK_FORK = 3 BLOCK_UNKNOWN = -1 BLOCK_MISSING = -2 BLOCK_PENDING = -3 QUORUM_NODE_COUNT = 16 class TestNode(P2PInterface): def __init__(self): self.round = 0 self.avahello = None self.avaresponses = [] self.avapolls = [] super().__init__() def peer_connect(self, *args, **kwargs): create_conn = super().peer_connect(*args, **kwargs) # Save the nonce and extra entropy so they can be reused later. self.local_nonce = self.on_connection_send_msg.nNonce self.local_extra_entropy = self.on_connection_send_msg.nExtraEntropy return create_conn def on_version(self, message): super().on_version(message) # Save the nonce and extra entropy so they can be reused later. self.remote_nonce = message.nNonce self.remote_extra_entropy = message.nExtraEntropy def on_avaresponse(self, message): with mininode_lock: self.avaresponses.append(message.response) def on_avapoll(self, message): with mininode_lock: self.avapolls.append(message.poll) def on_avahello(self, message): with mininode_lock: assert(self.avahello is None) self.avahello = message def send_avaresponse(self, round, votes, privkey): response = AvalancheResponse(round, 0, votes) sig = privkey.sign_schnorr(response.get_hash()) msg = msg_tcpavaresponse() msg.response = TCPAvalancheResponse(response, sig) self.send_message(msg) def wait_for_avaresponse(self, timeout=5): wait_until( lambda: len(self.avaresponses) > 0, timeout=timeout, lock=mininode_lock) with mininode_lock: return self.avaresponses.pop(0) def send_poll(self, hashes): msg = msg_avapoll() msg.poll.round = self.round self.round += 1 for h in hashes: msg.poll.invs.append(CInv(2, h)) self.send_message(msg) def get_avapoll_if_available(self): with mininode_lock: return self.avapolls.pop(0) if len(self.avapolls) > 0 else None def wait_for_avahello(self, timeout=5): wait_until( lambda: self.avahello is not None, timeout=timeout, lock=mininode_lock) with mininode_lock: return self.avahello def get_stakes(coinbases: List[Dict], priv_key: str) -> List[Dict]: return [{ 'txid': coinbase['txid'], 'vout': coinbase['n'], 'amount': coinbase['value'], 'height': coinbase['height'], 'iscoinbase': True, 'privatekey': priv_key, } for coinbase in coinbases] class AvalancheTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [ ['-enableavalanche=1', '-avacooldown=0'], ['-enableavalanche=1', '-avacooldown=0', '-noparkdeepreorg', '-maxreorgdepth=-1']] self.supports_cli = False def run_test(self): node = self.nodes[0] self.log.info("Check the node is signalling the avalanche service.") assert_equal( int(node.getnetworkinfo()['localservices'], 16) & NODE_AVALANCHE, NODE_AVALANCHE) # Build a fake quorum of nodes. def get_node(): n = TestNode() node.add_p2p_connection( n, services=NODE_NETWORK | NODE_AVALANCHE) n.wait_for_verack() # Get our own node id so we can use it later. n.nodeid = node.getpeerinfo()[-1]['id'] return n def get_quorum(): return [get_node() for _ in range(0, QUORUM_NODE_COUNT)] # Pick on node from the quorum for polling. quorum = get_quorum() poll_node = quorum[0] # Generate many block and poll for them. addrkey0 = node.get_deterministic_priv_key() blocks = node.generatetoaddress(100, addrkey0.address) def get_coinbase(h): b = node.getblock(h, 2) return { 'height': b['height'], 'txid': b['tx'][0]['txid'], 'n': 0, 'value': b['tx'][0]['vout'][0]['value'], } coinbases = [get_coinbase(h) for h in blocks] fork_node = self.nodes[1] # Make sure the fork node has synced the blocks self.sync_blocks([node, fork_node]) # Get the key so we can verify signatures. avakey = ECPubKey() avakey.set(bytes.fromhex(node.getavalanchekey())) self.log.info("Poll for the chain tip...") best_block_hash = int(node.getbestblockhash(), 16) poll_node.send_poll([best_block_hash]) def assert_response(expected): response = poll_node.wait_for_avaresponse() r = response.response assert_equal(r.cooldown, 0) # Verify signature. assert avakey.verify_schnorr(response.sig, r.get_hash()) votes = r.votes assert_equal(len(votes), len(expected)) for i in range(0, len(votes)): assert_equal(repr(votes[i]), repr(expected[i])) assert_response([AvalancheVote(BLOCK_ACCEPTED, best_block_hash)]) self.log.info("Poll for a selection of blocks...") various_block_hashes = [ int(node.getblockhash(0), 16), int(node.getblockhash(1), 16), int(node.getblockhash(10), 16), int(node.getblockhash(25), 16), int(node.getblockhash(42), 16), int(node.getblockhash(96), 16), int(node.getblockhash(99), 16), int(node.getblockhash(100), 16), ] poll_node.send_poll(various_block_hashes) assert_response([AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes]) self.log.info( "Poll for a selection of blocks, but some are now invalid...") invalidated_block = node.getblockhash(76) node.invalidateblock(invalidated_block) # We need to send the coin to a new address in order to make sure we do # not regenerate the same block. node.generatetoaddress( 26, 'bchreg:pqv2r67sgz3qumufap3h2uuj0zfmnzuv8v7ej0fffv') node.reconsiderblock(invalidated_block) poll_node.send_poll(various_block_hashes) assert_response([AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes[:5]] + [AvalancheVote(BLOCK_FORK, h) for h in various_block_hashes[-3:]]) self.log.info("Poll for unknown blocks...") various_block_hashes = [ int(node.getblockhash(0), 16), int(node.getblockhash(25), 16), int(node.getblockhash(42), 16), various_block_hashes[5], various_block_hashes[6], various_block_hashes[7], random.randrange(1 << 255, (1 << 256) - 1), random.randrange(1 << 255, (1 << 256) - 1), random.randrange(1 << 255, (1 << 256) - 1), ] poll_node.send_poll(various_block_hashes) assert_response([AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes[:3]] + [AvalancheVote(BLOCK_FORK, h) for h in various_block_hashes[3:6]] + [AvalancheVote(BLOCK_UNKNOWN, h) for h in various_block_hashes[-3:]]) self.log.info("Trigger polling from the node...") # duplicate the deterministic sig test from src/test/key_tests.cpp privkey = ECKey() privkey.set(bytes.fromhex( "12b004fff7f4b69ef8650e767f18f11ede158148b425660723b9f9a66e61f747"), True) pubkey = privkey.get_pubkey() proof_sequence = 11 proof_expiration = 12 proof = node.buildavalancheproof( proof_sequence, proof_expiration, pubkey.get_bytes().hex(), [{ 'txid': coinbases[0]['txid'], 'vout': coinbases[0]['n'], 'amount': coinbases[0]['value'], 'height': coinbases[0]['height'], 'iscoinbase': True, 'privatekey': addrkey0.key, }]) # Activate the quorum. for n in quorum: success = node.addavalanchenode( n.nodeid, pubkey.get_bytes().hex(), proof) assert success is True self.log.info("Testing getavalanchepeerinfo...") avapeerinfo = node.getavalanchepeerinfo() # There is a single peer because all nodes share the same proof. assert_equal(len(avapeerinfo), 1) assert_equal(avapeerinfo[0]["peerid"], 0) assert_equal(avapeerinfo[0]["nodecount"], len(quorum)) # The first avalanche node index is 1, because 0 is self.nodes[1]. assert_equal(sorted(avapeerinfo[0]["nodes"]), list(range(1, QUORUM_NODE_COUNT + 1))) assert_equal(avapeerinfo[0]["sequence"], proof_sequence) assert_equal(avapeerinfo[0]["expiration"], proof_expiration) assert_equal(avapeerinfo[0]["master"], pubkey.get_bytes().hex()) assert_equal(avapeerinfo[0]["proof"], proof) assert_equal(len(avapeerinfo[0]["stakes"]), 1) assert_equal(avapeerinfo[0]["stakes"][0]["txid"], coinbases[0]['txid']) def can_find_block_in_poll(hash, resp=BLOCK_ACCEPTED): found_hash = False for n in quorum: poll = n.get_avapoll_if_available() # That node has not received a poll if poll is None: continue # We got a poll, check for the hash and repond votes = [] for inv in poll.invs: # Vote yes to everything r = BLOCK_ACCEPTED # Look for what we expect if inv.hash == hash: r = resp found_hash = True votes.append(AvalancheVote(r, inv.hash)) n.send_avaresponse(poll.round, votes, privkey) return found_hash # Now that we have a peer, we should start polling for the tip. hash_tip = int(node.getbestblockhash(), 16) wait_until(lambda: can_find_block_in_poll(hash_tip), timeout=5) # Make sure the fork node has synced the blocks self.sync_blocks([node, fork_node]) # Create a fork 2 blocks deep. This should trigger polling. fork_node.invalidateblock(fork_node.getblockhash(100)) fork_address = fork_node.get_deterministic_priv_key().address fork_node.generatetoaddress(2, fork_address) # Because the new tip is a deep reorg, the node will not accept it # right away, but poll for it. def parked_block(blockhash): for tip in node.getchaintips(): if tip["hash"] == blockhash: assert tip["status"] != "active" return tip["status"] == "parked" return False fork_tip = fork_node.getbestblockhash() wait_until(lambda: parked_block(fork_tip)) self.log.info("Answer all polls to finalize...") hash_to_find = int(fork_tip, 16) def has_accepted_new_tip(): can_find_block_in_poll(hash_to_find) return node.getbestblockhash() == fork_tip # Because everybody answers yes, the node will accept that block. wait_until(has_accepted_new_tip, timeout=15) assert_equal(node.getbestblockhash(), fork_tip) self.log.info("Answer all polls to park...") node.generate(1) tip_to_park = node.getbestblockhash() hash_to_find = int(tip_to_park, 16) assert(tip_to_park != fork_tip) def has_parked_new_tip(): can_find_block_in_poll(hash_to_find, BLOCK_PARKED) return node.getbestblockhash() == fork_tip # Because everybody answers no, the node will park that block. wait_until(has_parked_new_tip, timeout=15) assert_equal(node.getbestblockhash(), fork_tip) - # Restart the node and rebuild the quorum + # Restart the node + minchainwork = int(node.getblockchaininfo()["chainwork"], 16) + 1 self.restart_node(0, self.extra_args[0] + [ "-avaproof={}".format(proof), "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN", + "-minimumchainwork=0x{:x}".format(minchainwork), ]) + self.log.info( + "The proof verification should be delayed until IBD is complete") + assert node.getblockchaininfo()["initialblockdownload"] is True + # Our proof cannot be verified during IBD, so we should have no peer + assert not node.getavalanchepeerinfo() + # Mining a few more blocks should cause us to leave IBD + node.generate(2) + # Our proof is now verified and our node is added as a peer + assert node.getblockchaininfo()["initialblockdownload"] is False + assert_equal(len(node.getavalanchepeerinfo()), 1) + + # Rebuild the quorum + self.log.info("Test the avahello signature") quorum = get_quorum() poll_node = quorum[0] - # Check the avahello is consistent avahello = poll_node.wait_for_avahello().hello avakey.set(bytes.fromhex(node.getavalanchekey())) assert avakey.verify_schnorr( avahello.sig, avahello.get_sighash(poll_node)) # Check the maximum number of stakes policy blocks = node.generatetoaddress(AVALANCHE_MAX_PROOF_STAKES + 1, addrkey0.address) too_many_coinbases = [get_coinbase(h) for h in blocks] too_many_stakes = get_stakes(too_many_coinbases, addrkey0.key) self.log.info( "A proof using the maximum number of stakes is accepted...") maximum_stakes = get_stakes(too_many_coinbases[:-1], addrkey0.key) good_proof = node.buildavalancheproof( proof_sequence, proof_expiration, pubkey.get_bytes().hex(), maximum_stakes) node.addavalanchenode( get_node().nodeid, pubkey.get_bytes().hex(), good_proof) self.log.info("A proof using too many stakes should be rejected...") bad_proof = node.buildavalancheproof( proof_sequence, proof_expiration, pubkey.get_bytes().hex(), too_many_stakes) assert_raises_rpc_error(-32602, "Avalanche proof has too many UTXOs", node.addavalanchenode, get_node().nodeid, pubkey.get_bytes().hex(), bad_proof) self.log.info("Bad proof should be rejected at startup") no_stake = node.buildavalancheproof( proof_sequence, proof_expiration, pubkey.get_bytes().hex(), []) dust = node.buildavalancheproof( proof_sequence, proof_expiration, pubkey.get_bytes().hex(), [{ 'txid': coinbases[0]['txid'], 'vout': coinbases[0]['n'], 'amount': '0', 'height': coinbases[0]['height'], 'iscoinbase': True, 'privatekey': addrkey0.key, }]) duplicate_stake = node.buildavalancheproof( proof_sequence, proof_expiration, pubkey.get_bytes().hex(), [{ 'txid': coinbases[0]['txid'], 'vout': coinbases[0]['n'], 'amount': coinbases[0]['value'], 'height': coinbases[0]['height'], 'iscoinbase': True, 'privatekey': addrkey0.key, }] * 2) bad_sig = ("0b000000000000000c0000000000000021030b4c866585dd868a9d62348" "a9cd008d6a312937048fff31670e7e920cfc7a7440105c5f72f5d6da3085" "583e75ee79340eb4eff208c89988e7ed0efb30b87298fa30000000000f20" "52a0100000003000000210227d85ba011276cf25b51df6a188b75e604b3" "8770a462b2d0e9fb2fc839ef5d3faf07f001dd38e9b4a43d07d5d449cc0" "f7d2888d96b82962b3ce516d1083c0e031773487fc3c4f2e38acd1db974" "1321b91a79b82d1c2cfd47793261e4ba003cf5") self.stop_node(0) def check_proof_init_error(proof, message): node.assert_start_raises_init_error( self.extra_args[0] + [ "-avaproof={}".format(proof), "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN", ], expected_msg="Error: " + message, ) check_proof_init_error(no_stake, "the avalanche proof has no stake") check_proof_init_error(dust, "the avalanche proof stake is too low") check_proof_init_error(duplicate_stake, "the avalanche proof has duplicated stake") check_proof_init_error(bad_sig, "the avalanche proof has invalid stake signatures") if __name__ == '__main__': AvalancheTest().main()