diff --git a/src/avalanche/processor.h b/src/avalanche/processor.h --- a/src/avalanche/processor.h +++ b/src/avalanche/processor.h @@ -6,6 +6,7 @@ #define BITCOIN_AVALANCHE_PROCESSOR_H #include +#include #include #include #include @@ -91,6 +92,8 @@ using BlockVoteMap = std::map; +using ProofVoteMap = std::map, VoteRecord, + ProofSharedPointerComparator>; struct query_timeout {}; @@ -107,6 +110,11 @@ */ RWCollection blockVoteRecords; + /** + * Proofs to run avalanche on. + */ + RWCollection proofsVoteRecords; + /** * Keep track of peers and queries sent. */ @@ -176,6 +184,8 @@ } bool addBlockToReconcile(const CBlockIndex *pindex); + void addProofToReconcile(const std::shared_ptr &proof, + bool isAccepted); bool isAccepted(const CBlockIndex *pindex) const; int getConfidence(const CBlockIndex *pindex) const; diff --git a/src/avalanche/processor.cpp b/src/avalanche/processor.cpp --- a/src/avalanche/processor.cpp +++ b/src/avalanche/processor.cpp @@ -261,6 +261,15 @@ .second; } +void Processor::addProofToReconcile(const std::shared_ptr &proof, + bool isAccepted) { + // TODO We don't want to accept an infinite number of conflicting proofs. + // They should be some rules to make them expensive and/or limited by + // design. + proofsVoteRecords.getWriteView()->insert( + std::make_pair(proof, VoteRecord(isAccepted))); +} + bool Processor::isAccepted(const CBlockIndex *pindex) const { auto r = blockVoteRecords.getReadView(); auto it = r->find(pindex); @@ -481,6 +490,18 @@ std::vector Processor::getInvsForNextPoll(bool forPoll) { std::vector invs; + auto conflictingProofsReadView = proofsVoteRecords.getReadView(); + + auto pit = conflictingProofsReadView.begin(); + // Clamp to AVALANCHE_MAX_ELEMENT_POLL - 1 so we're always able to poll + // for a new block. Since the proofs are sorted by score, the most + // valuable are voted first. + while (pit != conflictingProofsReadView.end() && + invs.size() < AVALANCHE_MAX_ELEMENT_POLL - 1) { + invs.emplace_back(MSG_AVA_PROOF, pit->first->getId()); + ++pit; + } + // First remove all blocks that are not worth polling. { LOCK(cs_main); diff --git a/src/avalanche/test/processor_tests.cpp b/src/avalanche/test/processor_tests.cpp --- a/src/avalanche/test/processor_tests.cpp +++ b/src/avalanche/test/processor_tests.cpp @@ -18,6 +18,7 @@ // when LookupBlockIndex is refactored out of validation #include +#include #include #include @@ -982,4 +983,53 @@ schedulerThread.join(); } +BOOST_AUTO_TEST_CASE(add_proof_to_reconcile) { + uint32_t score = MIN_VALID_PROOF_SCORE; + + auto addProofToReconcile = [&](uint32_t proofScore) { + auto proof = std::make_shared(buildRandomProof(proofScore)); + m_processor->addProofToReconcile(proof, GetRandInt(1)); + return proof; + }; + + for (size_t i = 0; i < AVALANCHE_MAX_ELEMENT_POLL - 1; i++) { + auto proof = addProofToReconcile(++score); + + auto invs = AvalancheTest::getInvsForNextPoll(*m_processor); + BOOST_CHECK_EQUAL(invs.size(), i + 1); + BOOST_CHECK(invs.front().IsMsgProof()); + BOOST_CHECK_EQUAL(invs.front().hash, proof->getId()); + } + + // From here a new proof is only polled if its score is in the top + // AVALANCHE_MAX_ELEMENT_POLL - 1 + ProofId lastProofId; + for (size_t i = 0; i < 10; i++) { + auto proof = addProofToReconcile(++score); + + auto invs = AvalancheTest::getInvsForNextPoll(*m_processor); + BOOST_CHECK_EQUAL(invs.size(), AVALANCHE_MAX_ELEMENT_POLL - 1); + BOOST_CHECK(invs.front().IsMsgProof()); + BOOST_CHECK_EQUAL(invs.front().hash, proof->getId()); + + lastProofId = proof->getId(); + } + + for (size_t i = 0; i < 10; i++) { + auto proof = addProofToReconcile(--score); + + auto invs = AvalancheTest::getInvsForNextPoll(*m_processor); + BOOST_CHECK_EQUAL(invs.size(), AVALANCHE_MAX_ELEMENT_POLL - 1); + BOOST_CHECK(invs.front().IsMsgProof()); + BOOST_CHECK_EQUAL(invs.front().hash, lastProofId); + } + + // The score is not high enough to get polled + auto proof = addProofToReconcile(--score); + auto invs = AvalancheTest::getInvsForNextPoll(*m_processor); + for (auto &inv : invs) { + BOOST_CHECK_NE(inv.hash, proof->getId()); + } +} + BOOST_AUTO_TEST_SUITE_END()