Page MenuHomePhabricator

D9548.id28547.diff
No OneTemporary

D9548.id28547.diff

diff --git a/src/test/CMakeLists.txt b/src/test/CMakeLists.txt
--- a/src/test/CMakeLists.txt
+++ b/src/test/CMakeLists.txt
@@ -150,6 +150,7 @@
dbwrapper_tests.cpp
denialofservice_tests.cpp
descriptor_tests.cpp
+ dnsseeds_tests.cpp
dstencode_tests.cpp
excessiveblock_tests.cpp
feerate_tests.cpp
@@ -196,7 +197,6 @@
script_standard_tests.cpp
script_tests.cpp
scriptnum_tests.cpp
- dnsseeds_tests.cpp
serialize_tests.cpp
settings_tests.cpp
sigcache_tests.cpp
@@ -211,6 +211,7 @@
torcontrol_tests.cpp
transaction_tests.cpp
txindex_tests.cpp
+ txrequest_tests.cpp
txvalidation_tests.cpp
txvalidationcache_tests.cpp
uint256_tests.cpp
diff --git a/src/test/txrequest_tests.cpp b/src/test/txrequest_tests.cpp
new file mode 100644
--- /dev/null
+++ b/src/test/txrequest_tests.cpp
@@ -0,0 +1,732 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <txrequest.h>
+
+#include <test/util/setup_common.h>
+
+#include <boost/test/unit_test.hpp>
+
+#include <algorithm>
+#include <functional>
+#include <vector>
+
+BOOST_FIXTURE_TEST_SUITE(txrequest_tests, BasicTestingSetup)
+
+namespace {
+
+constexpr std::chrono::microseconds MIN_TIME = std::chrono::microseconds::min();
+constexpr std::chrono::microseconds MAX_TIME = std::chrono::microseconds::max();
+constexpr std::chrono::microseconds MICROSECOND = std::chrono::microseconds{1};
+constexpr std::chrono::microseconds NO_TIME = std::chrono::microseconds{0};
+
+/** An Action is a function to call at a particular (simulated) timestamp. */
+using Action = std::pair<std::chrono::microseconds, std::function<void()>>;
+
+/**
+ * Object that stores actions from multiple interleaved scenarios, and data
+ * shared across them.
+ *
+ * The Scenario below is used to fill this.
+ */
+struct Runner {
+ /** The TxRequestTracker being tested. */
+ TxRequestTracker txrequest;
+
+ /** List of actions to be executed (in order of increasing timestamp). */
+ std::vector<Action> actions;
+
+ /** Which node ids have been assigned already (to prevent reuse). */
+ std::set<NodeId> peerset;
+
+ /** Which txids have been assigned already (to prevent reuse). */
+ std::set<TxId> txidset;
+};
+
+std::chrono::microseconds RandomTime8s() {
+ return std::chrono::microseconds{1 + InsecureRandBits(23)};
+}
+std::chrono::microseconds RandomTime1y() {
+ return std::chrono::microseconds{1 + InsecureRandBits(45)};
+}
+
+/**
+ * A proxy for a Runner that helps build a sequence of consecutive test actions
+ * on a TxRequestTracker.
+ *
+ * Each Scenario is a proxy through which actions for the (sequential) execution
+ * of various tests are added to a Runner. The actions from multiple scenarios
+ * are then run concurrently, resulting in these tests being performed against a
+ * TxRequestTracker in parallel. Every test has its own unique txids and
+ * NodeIds which are not reused in other tests, and thus they should be
+ * independent from each other. Running them in parallel however means that we
+ * verify the behavior (w.r.t. one test's txids and NodeIds) even when the
+ * state of the data structure is more complicated due to the presence of other
+ * tests.
+ */
+class Scenario {
+ Runner &m_runner;
+ std::chrono::microseconds m_now;
+ std::string m_testname;
+
+public:
+ Scenario(Runner &runner, std::chrono::microseconds starttime)
+ : m_runner(runner), m_now(starttime) {}
+
+ /** Set a name for the current test, to give more clear error messages. */
+ void SetTestName(std::string testname) { m_testname = std::move(testname); }
+
+ /**
+ * Advance this Scenario's time; this affects the timestamps newly
+ * scheduled events get.
+ */
+ void AdvanceTime(std::chrono::microseconds amount) {
+ assert(amount.count() >= 0);
+ m_now += amount;
+ }
+
+ /** Schedule a ForgetTxId call at the Scheduler's current time. */
+ void ForgetTxId(const TxId &txid) {
+ auto &runner = m_runner;
+ runner.actions.emplace_back(m_now, [=, &runner]() {
+ runner.txrequest.ForgetTxId(txid);
+ runner.txrequest.SanityCheck();
+ });
+ }
+
+ /** Schedule a ReceivedInv call at the Scheduler's current time. */
+ void ReceivedInv(NodeId peer, const TxId &txid, bool pref,
+ std::chrono::microseconds reqtime) {
+ auto &runner = m_runner;
+ runner.actions.emplace_back(m_now, [=, &runner]() {
+ runner.txrequest.ReceivedInv(peer, txid, pref, reqtime);
+ runner.txrequest.SanityCheck();
+ });
+ }
+
+ /** Schedule a DisconnectedPeer call at the Scheduler's current time. */
+ void DisconnectedPeer(NodeId peer) {
+ auto &runner = m_runner;
+ runner.actions.emplace_back(m_now, [=, &runner]() {
+ runner.txrequest.DisconnectedPeer(peer);
+ runner.txrequest.SanityCheck();
+ });
+ }
+
+ /** Schedule a RequestedTx call at the Scheduler's current time. */
+ void RequestedTx(NodeId peer, const TxId &txid,
+ std::chrono::microseconds exptime) {
+ auto &runner = m_runner;
+ runner.actions.emplace_back(m_now, [=, &runner]() {
+ runner.txrequest.RequestedTx(peer, txid, exptime);
+ runner.txrequest.SanityCheck();
+ });
+ }
+
+ /** Schedule a ReceivedResponse call at the Scheduler's current time. */
+ void ReceivedResponse(NodeId peer, const TxId &txid) {
+ auto &runner = m_runner;
+ runner.actions.emplace_back(m_now, [=, &runner]() {
+ runner.txrequest.ReceivedResponse(peer, txid);
+ runner.txrequest.SanityCheck();
+ });
+ }
+
+ /**
+ * Schedule calls to verify the TxRequestTracker's state at the Scheduler's
+ * current time.
+ *
+ * @param peer The peer whose state will be inspected.
+ * @param expected The expected return value for GetRequestable(peer)
+ * @param candidates The expected return value CountCandidates(peer)
+ * @param inflight The expected return value CountInFlight(peer)
+ * @param completed The expected return value of Count(peer), minus
+ * candidates and inflight.
+ * @param checkname An arbitrary string to include in error messages, for
+ * test identificatrion.
+ * @param offset Offset with the current time to use (must be <= 0).
+ * This allows simulations of time going backwards (but note that the
+ * ordering of this event only follows the scenario's m_now.
+ */
+ void
+ Check(NodeId peer, const std::vector<TxId> &expected, size_t candidates,
+ size_t inflight, size_t completed, const std::string &checkname,
+ std::chrono::microseconds offset = std::chrono::microseconds{0}) {
+ const auto comment = m_testname + " " + checkname;
+ auto &runner = m_runner;
+ const auto now = m_now;
+ assert(offset.count() <= 0);
+ runner.actions.emplace_back(m_now, [=, &runner]() {
+ auto ret = runner.txrequest.GetRequestable(peer, now + offset);
+ runner.txrequest.SanityCheck();
+ runner.txrequest.PostGetRequestableSanityCheck(now + offset);
+ size_t total = candidates + inflight + completed;
+ size_t real_total = runner.txrequest.Count(peer);
+ size_t real_candidates = runner.txrequest.CountCandidates(peer);
+ size_t real_inflight = runner.txrequest.CountInFlight(peer);
+ BOOST_CHECK_MESSAGE(
+ real_total == total,
+ strprintf("[" + comment + "] total %i (%i expected)",
+ real_total, total));
+ BOOST_CHECK_MESSAGE(
+ real_inflight == inflight,
+ strprintf("[" + comment + "] inflight %i (%i expected)",
+ real_inflight, inflight));
+ BOOST_CHECK_MESSAGE(
+ real_candidates == candidates,
+ strprintf("[" + comment + "] candidates %i (%i expected)",
+ real_candidates, candidates));
+ BOOST_CHECK_MESSAGE(ret == expected,
+ "[" + comment + "] mismatching requestables");
+ });
+ }
+
+ /**
+ * Generate a random txid, whose priorities for certain peers are
+ * constrained.
+ *
+ * For example, NewTxId({{p1,p2,p3},{p2,p4,p5}}) will generate a txid T
+ * such that both:
+ * - priority(p1,T) > priority(p2,T) > priority(p3,T)
+ * - priority(p2,T) > priority(p4,T) > priority(p5,T)
+ * where priority is the predicted internal TxRequestTracker's priority,
+ * assuming all announcements are within the same preferredness class.
+ */
+ TxId NewTxId(const std::vector<std::vector<NodeId>> &orders = {}) {
+ TxId ret;
+ bool ok;
+ do {
+ ret = TxId(InsecureRand256());
+ ok = true;
+ for (const auto &order : orders) {
+ for (size_t pos = 1; pos < order.size(); ++pos) {
+ uint64_t prio_prev = m_runner.txrequest.ComputePriority(
+ ret, order[pos - 1], true);
+ uint64_t prio_cur = m_runner.txrequest.ComputePriority(
+ ret, order[pos], true);
+ if (prio_prev <= prio_cur) {
+ ok = false;
+ break;
+ }
+ }
+ if (!ok) {
+ break;
+ }
+ }
+ if (ok) {
+ ok = m_runner.txidset.insert(ret).second;
+ }
+ } while (!ok);
+ return ret;
+ }
+
+ /**
+ * Generate a new random NodeId to use as peer. The same NodeId is never
+ * returned twice (across all Scenarios combined).
+ */
+ NodeId NewPeer() {
+ bool ok;
+ NodeId ret;
+ do {
+ ret = InsecureRandBits(63);
+ ok = m_runner.peerset.insert(ret).second;
+ } while (!ok);
+ return ret;
+ }
+
+ std::chrono::microseconds Now() const { return m_now; }
+};
+
+/**
+ * Add to scenario a test with a single tx announced by a single peer.
+ *
+ * config is an integer in [0, 32), which controls which variant of the test is
+ * used.
+ */
+void BuildSingleTest(Scenario &scenario, int config) {
+ auto peer = scenario.NewPeer();
+ auto txid = scenario.NewTxId();
+ bool immediate = config & 1;
+ bool preferred = config & 2;
+ auto delay = immediate ? NO_TIME : RandomTime8s();
+
+ scenario.SetTestName(strprintf("Single(config=%i)", config));
+
+ // Receive an announcement, either immediately requestable or delayed.
+ scenario.ReceivedInv(peer, txid, preferred,
+ immediate ? MIN_TIME : scenario.Now() + delay);
+ if (immediate) {
+ scenario.Check(peer, {txid}, 1, 0, 0, "s1");
+ } else {
+ scenario.Check(peer, {}, 1, 0, 0, "s2");
+ scenario.AdvanceTime(delay - MICROSECOND);
+ scenario.Check(peer, {}, 1, 0, 0, "s3");
+ scenario.AdvanceTime(MICROSECOND);
+ scenario.Check(peer, {txid}, 1, 0, 0, "s4");
+ }
+
+ if (config >> 3) { // We'll request the transaction
+ scenario.AdvanceTime(RandomTime8s());
+ auto expiry = RandomTime8s();
+ scenario.Check(peer, {txid}, 1, 0, 0, "s5");
+ scenario.RequestedTx(peer, txid, scenario.Now() + expiry);
+ scenario.Check(peer, {}, 0, 1, 0, "s6");
+
+ if ((config >> 3) == 1) { // The request will time out
+ scenario.AdvanceTime(expiry - MICROSECOND);
+ scenario.Check(peer, {}, 0, 1, 0, "s7");
+ scenario.AdvanceTime(MICROSECOND);
+ scenario.Check(peer, {}, 0, 0, 0, "s8");
+ return;
+ } else {
+ scenario.AdvanceTime(
+ std::chrono::microseconds{InsecureRandRange(expiry.count())});
+ scenario.Check(peer, {}, 0, 1, 0, "s9");
+ if ((config >> 3) ==
+ 3) { // A response will arrive for the transaction
+ scenario.ReceivedResponse(peer, txid);
+ scenario.Check(peer, {}, 0, 0, 0, "s10");
+ return;
+ }
+ }
+ }
+
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+ if (config & 4) {
+ // The peer will go offline
+ scenario.DisconnectedPeer(peer);
+ } else {
+ // The transaction is no longer needed
+ scenario.ForgetTxId(txid);
+ }
+ scenario.Check(peer, {}, 0, 0, 0, "s11");
+}
+
+/**
+ * Add to scenario a test with a single tx announced by two peers, to verify
+ * the right peer is selected for requests.
+ *
+ * config is an integer in [0, 32), which controls which variant of the test is
+ * used.
+ */
+void BuildPriorityTest(Scenario &scenario, int config) {
+ scenario.SetTestName(strprintf("Priority(config=%i)", config));
+
+ // Two peers. They will announce in order {peer1, peer2}.
+ auto peer1 = scenario.NewPeer(), peer2 = scenario.NewPeer();
+ // Construct a transaction that under random rules would be preferred by
+ // peer2 or peer1, depending on configuration.
+ bool prio1 = config & 1;
+ auto txid = prio1 ? scenario.NewTxId({{peer1, peer2}})
+ : scenario.NewTxId({{peer2, peer1}});
+ bool pref1 = config & 2, pref2 = config & 4;
+
+ scenario.ReceivedInv(peer1, txid, pref1, MIN_TIME);
+ scenario.Check(peer1, {txid}, 1, 0, 0, "p1");
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ scenario.Check(peer1, {txid}, 1, 0, 0, "p2");
+ }
+
+ scenario.ReceivedInv(peer2, txid, pref2, MIN_TIME);
+ bool stage2_prio =
+ // At this point, peer2 will be given priority if:
+ // - It is preferred and peer1 is not
+ (pref2 && !pref1) ||
+ // - They're in the same preference class,
+ // and the randomized priority favors peer2 over peer1.
+ (pref1 == pref2 && !prio1);
+ NodeId priopeer = stage2_prio ? peer2 : peer1,
+ otherpeer = stage2_prio ? peer1 : peer2;
+ scenario.Check(otherpeer, {}, 1, 0, 0, "p3");
+ scenario.Check(priopeer, {txid}, 1, 0, 0, "p4");
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+ scenario.Check(otherpeer, {}, 1, 0, 0, "p5");
+ scenario.Check(priopeer, {txid}, 1, 0, 0, "p6");
+
+ // We possibly request from the selected peer.
+ if (config & 8) {
+ scenario.RequestedTx(priopeer, txid, MAX_TIME);
+ scenario.Check(priopeer, {}, 0, 1, 0, "p7");
+ scenario.Check(otherpeer, {}, 1, 0, 0, "p8");
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+ }
+
+ // The peer which was selected (or requested from) now goes offline, or a
+ // NOTFOUND is received from them.
+ if (config & 16) {
+ scenario.DisconnectedPeer(priopeer);
+ } else {
+ scenario.ReceivedResponse(priopeer, txid);
+ }
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+ scenario.Check(priopeer, {}, 0, 0, !(config & 16), "p8");
+ scenario.Check(otherpeer, {txid}, 1, 0, 0, "p9");
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+
+ // Now the other peer goes offline.
+ scenario.DisconnectedPeer(otherpeer);
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+ scenario.Check(peer1, {}, 0, 0, 0, "p10");
+ scenario.Check(peer2, {}, 0, 0, 0, "p11");
+}
+
+/**
+ * Add to scenario a randomized test in which N peers announce the same
+ * transaction, to verify the order in which they are requested.
+ */
+void BuildBigPriorityTest(Scenario &scenario, int peers) {
+ scenario.SetTestName(strprintf("BigPriority(peers=%i)", peers));
+
+ // We will have N peers announce the same transaction.
+ std::map<NodeId, bool> preferred;
+ std::vector<NodeId> pref_peers, npref_peers;
+ // Some preferred, ...
+ int num_pref = InsecureRandRange(peers + 1);
+ // some not preferred.
+ int num_npref = peers - num_pref;
+ for (int i = 0; i < num_pref; ++i) {
+ pref_peers.push_back(scenario.NewPeer());
+ preferred[pref_peers.back()] = true;
+ }
+ for (int i = 0; i < num_npref; ++i) {
+ npref_peers.push_back(scenario.NewPeer());
+ preferred[npref_peers.back()] = false;
+ }
+ // Make a list of all peers, in order of intended request order
+ // (concatenation of pref_peers and npref_peers).
+ std::vector<NodeId> request_order;
+ for (int i = 0; i < num_pref; ++i) {
+ request_order.push_back(pref_peers[i]);
+ }
+ for (int i = 0; i < num_npref; ++i) {
+ request_order.push_back(npref_peers[i]);
+ }
+
+ // Determine the announcement order randomly.
+ std::vector<NodeId> announce_order = request_order;
+ Shuffle(announce_order.begin(), announce_order.end(), g_insecure_rand_ctx);
+
+ // Find a txid whose prioritization is consistent with the required
+ // ordering within pref_peers and within npref_peers.
+ auto txid = scenario.NewTxId({pref_peers, npref_peers});
+
+ // Decide reqtimes in opposite order of the expected request order. This
+ // means that as time passes we expect the to-be-requested-from-peer will
+ // change every time a subsequent reqtime is passed.
+ std::map<NodeId, std::chrono::microseconds> reqtimes;
+ auto reqtime = scenario.Now();
+ for (int i = peers - 1; i >= 0; --i) {
+ reqtime += RandomTime8s();
+ reqtimes[request_order[i]] = reqtime;
+ }
+
+ // Actually announce from all peers simultaneously (but in announce_order).
+ for (const auto peer : announce_order) {
+ scenario.ReceivedInv(peer, txid, preferred[peer], reqtimes[peer]);
+ }
+ for (const auto peer : announce_order) {
+ scenario.Check(peer, {}, 1, 0, 0, "b1");
+ }
+
+ // Let time pass and observe the to-be-requested-from peer change, from
+ // nonpreferred to preferred, and from high priority to low priority within
+ // each class.
+ for (int i = peers - 1; i >= 0; --i) {
+ scenario.AdvanceTime(reqtimes[request_order[i]] - scenario.Now() -
+ MICROSECOND);
+ scenario.Check(request_order[i], {}, 1, 0, 0, "b2");
+ scenario.AdvanceTime(MICROSECOND);
+ scenario.Check(request_order[i], {txid}, 1, 0, 0, "b3");
+ }
+
+ // Peers now in random order go offline, or send NOTFOUNDs. At every point
+ // in time the new to-be-requested-from peer should be the best remaining
+ // one, so verify this after every response.
+ for (int i = 0; i < peers; ++i) {
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+ const int pos = InsecureRandRange(request_order.size());
+ const auto peer = request_order[pos];
+ request_order.erase(request_order.begin() + pos);
+ if (InsecureRandBool()) {
+ scenario.DisconnectedPeer(peer);
+ scenario.Check(peer, {}, 0, 0, 0, "b4");
+ } else {
+ scenario.ReceivedResponse(peer, txid);
+ scenario.Check(peer, {}, 0, 0, request_order.size() > 0, "b5");
+ }
+ if (request_order.size()) {
+ scenario.Check(request_order[0], {txid}, 1, 0, 0, "b6");
+ }
+ }
+
+ // Everything is gone in the end.
+ for (const auto peer : announce_order) {
+ scenario.Check(peer, {}, 0, 0, 0, "b7");
+ }
+}
+
+/**
+ * Add to scenario a test with one peer announcing two transactions, to verify
+ * they are fetched in announcement order.
+ *
+ * config is an integer in [0, 4) inclusive, and selects the variant of the
+ * test.
+ */
+void BuildRequestOrderTest(Scenario &scenario, int config) {
+ scenario.SetTestName(strprintf("RequestOrder(config=%i)", config));
+
+ auto peer = scenario.NewPeer();
+ auto txid1 = scenario.NewTxId();
+ auto txid2 = scenario.NewTxId();
+
+ auto reqtime2 = scenario.Now() + RandomTime8s();
+ auto reqtime1 = reqtime2 + RandomTime8s();
+
+ scenario.ReceivedInv(peer, txid1, config & 1, reqtime1);
+ // Simulate time going backwards by giving the second announcement an
+ // earlier reqtime.
+ scenario.ReceivedInv(peer, txid2, config & 2, reqtime2);
+
+ scenario.AdvanceTime(reqtime2 - MICROSECOND - scenario.Now());
+ scenario.Check(peer, {}, 2, 0, 0, "o1");
+ scenario.AdvanceTime(MICROSECOND);
+ scenario.Check(peer, {txid2}, 2, 0, 0, "o2");
+ scenario.AdvanceTime(reqtime1 - MICROSECOND - scenario.Now());
+ scenario.Check(peer, {txid2}, 2, 0, 0, "o3");
+ scenario.AdvanceTime(MICROSECOND);
+ // Even with time going backwards in between announcements, the return value
+ // of GetRequestable is in announcement order.
+ scenario.Check(peer, {txid1, txid2}, 2, 0, 0, "o4");
+
+ scenario.DisconnectedPeer(peer);
+ scenario.Check(peer, {}, 0, 0, 0, "o5");
+}
+
+/** Add to scenario a test that exercises clocks that go backwards. */
+void BuildTimeBackwardsTest(Scenario &scenario) {
+ auto peer1 = scenario.NewPeer();
+ auto peer2 = scenario.NewPeer();
+ auto txid = scenario.NewTxId({{peer1, peer2}});
+
+ // Announce from peer2.
+ auto reqtime = scenario.Now() + RandomTime8s();
+ scenario.ReceivedInv(peer2, txid, true, reqtime);
+ scenario.Check(peer2, {}, 1, 0, 0, "r1");
+ scenario.AdvanceTime(reqtime - scenario.Now());
+ scenario.Check(peer2, {txid}, 1, 0, 0, "r2");
+ // Check that if the clock goes backwards by 1us, the transaction would stop
+ // being requested.
+ scenario.Check(peer2, {}, 1, 0, 0, "r3", -MICROSECOND);
+ // But it reverts to being requested if time goes forward again.
+ scenario.Check(peer2, {txid}, 1, 0, 0, "r4");
+
+ // Announce from peer1.
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+ scenario.ReceivedInv(peer1, txid, true, MAX_TIME);
+ scenario.Check(peer2, {txid}, 1, 0, 0, "r5");
+ scenario.Check(peer1, {}, 1, 0, 0, "r6");
+
+ // Request from peer1.
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+ auto expiry = scenario.Now() + RandomTime8s();
+ scenario.RequestedTx(peer1, txid, expiry);
+ scenario.Check(peer1, {}, 0, 1, 0, "r7");
+ scenario.Check(peer2, {}, 1, 0, 0, "r8");
+
+ // Expiration passes.
+ scenario.AdvanceTime(expiry - scenario.Now());
+ scenario.Check(peer1, {}, 0, 0, 1, "r9");
+ // Request goes back to peer2.
+ scenario.Check(peer2, {txid}, 1, 0, 0, "r10");
+ // Going back does not unexpire.
+ scenario.Check(peer1, {}, 0, 0, 1, "r11", -MICROSECOND);
+ scenario.Check(peer2, {txid}, 1, 0, 0, "r12", -MICROSECOND);
+
+ // Peer2 goes offline, meaning no viable announcements remain.
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+ scenario.DisconnectedPeer(peer2);
+ scenario.Check(peer1, {}, 0, 0, 0, "r13");
+ scenario.Check(peer2, {}, 0, 0, 0, "r14");
+}
+
+/**
+ * Add to scenario a test that involves RequestedTx() calls for txids not
+ * returned by GetRequestable.
+ */
+void BuildWeirdRequestsTest(Scenario &scenario) {
+ auto peer1 = scenario.NewPeer();
+ auto peer2 = scenario.NewPeer();
+ auto txid1 = scenario.NewTxId({{peer1, peer2}});
+ auto txid2 = scenario.NewTxId({{peer2, peer1}});
+
+ // Announce txid1 by peer1.
+ scenario.ReceivedInv(peer1, txid1, true, MIN_TIME);
+ scenario.Check(peer1, {txid1}, 1, 0, 0, "q1");
+
+ // Announce txid2 by peer2.
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+ scenario.ReceivedInv(peer2, txid2, true, MIN_TIME);
+ scenario.Check(peer1, {txid1}, 1, 0, 0, "q2");
+ scenario.Check(peer2, {txid2}, 1, 0, 0, "q3");
+
+ // We request txid2 from *peer1* - no effect.
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+ scenario.RequestedTx(peer1, txid2, MAX_TIME);
+ scenario.Check(peer1, {txid1}, 1, 0, 0, "q4");
+ scenario.Check(peer2, {txid2}, 1, 0, 0, "q5");
+
+ // Now request txid1 from peer1 - marks it as REQUESTED.
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+ auto expiryA = scenario.Now() + RandomTime8s();
+ scenario.RequestedTx(peer1, txid1, expiryA);
+ scenario.Check(peer1, {}, 0, 1, 0, "q6");
+ scenario.Check(peer2, {txid2}, 1, 0, 0, "q7");
+
+ // Request it a second time - nothing happens, as it's already REQUESTED.
+ auto expiryB = expiryA + RandomTime8s();
+ scenario.RequestedTx(peer1, txid1, expiryB);
+ scenario.Check(peer1, {}, 0, 1, 0, "q8");
+ scenario.Check(peer2, {txid2}, 1, 0, 0, "q9");
+
+ // Also announce txid1 from peer2 now, so that the txid isn't forgotten
+ // when the peer1 request expires.
+ scenario.ReceivedInv(peer2, txid1, true, MIN_TIME);
+ scenario.Check(peer1, {}, 0, 1, 0, "q10");
+ scenario.Check(peer2, {txid2}, 2, 0, 0, "q11");
+
+ // When reaching expiryA, it expires (not expiryB, which is later).
+ scenario.AdvanceTime(expiryA - scenario.Now());
+ scenario.Check(peer1, {}, 0, 0, 1, "q12");
+ scenario.Check(peer2, {txid2, txid1}, 2, 0, 0, "q13");
+
+ // Requesting it yet again from peer1 doesn't do anything, as it's already
+ // COMPLETED.
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+ scenario.RequestedTx(peer1, txid1, MAX_TIME);
+ scenario.Check(peer1, {}, 0, 0, 1, "q14");
+ scenario.Check(peer2, {txid2, txid1}, 2, 0, 0, "q15");
+
+ // Now announce txid2 from peer1.
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+ scenario.ReceivedInv(peer1, txid2, true, MIN_TIME);
+ scenario.Check(peer1, {}, 1, 0, 1, "q16");
+ scenario.Check(peer2, {txid2, txid1}, 2, 0, 0, "q17");
+
+ // And request it from peer1 (weird as peer2 has the preference).
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+ scenario.RequestedTx(peer1, txid2, MAX_TIME);
+ scenario.Check(peer1, {}, 0, 1, 1, "q18");
+ scenario.Check(peer2, {txid1}, 2, 0, 0, "q19");
+
+ // If peer2 now (normally) requests txid2, the existing request by peer1
+ // becomes COMPLETED.
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ }
+ scenario.RequestedTx(peer2, txid2, MAX_TIME);
+ scenario.Check(peer1, {}, 0, 0, 2, "q20");
+ scenario.Check(peer2, {txid1}, 1, 1, 0, "q21");
+
+ // If peer2 goes offline, no viable announcements remain.
+ scenario.DisconnectedPeer(peer2);
+ scenario.Check(peer1, {}, 0, 0, 0, "q22");
+ scenario.Check(peer2, {}, 0, 0, 0, "q23");
+}
+
+void TestInterleavedScenarios() {
+ // Create a list of functions which add tests to scenarios.
+ std::vector<std::function<void(Scenario &)>> builders;
+ // Add instances of every test, for every configuration.
+ for (int n = 0; n < 64; ++n) {
+ builders.emplace_back([n](Scenario &scenario) {
+ BuildRequestOrderTest(scenario, n & 3);
+ });
+ builders.emplace_back(
+ [n](Scenario &scenario) { BuildSingleTest(scenario, n & 31); });
+ builders.emplace_back(
+ [n](Scenario &scenario) { BuildPriorityTest(scenario, n & 31); });
+ builders.emplace_back([n](Scenario &scenario) {
+ BuildBigPriorityTest(scenario, (n & 7) + 1);
+ });
+ builders.emplace_back(
+ [](Scenario &scenario) { BuildTimeBackwardsTest(scenario); });
+ builders.emplace_back(
+ [](Scenario &scenario) { BuildWeirdRequestsTest(scenario); });
+ }
+ // Randomly shuffle all those functions.
+ Shuffle(builders.begin(), builders.end(), g_insecure_rand_ctx);
+
+ Runner runner;
+ auto starttime = RandomTime1y();
+ // Construct many scenarios, and run (up to) 10 randomly-chosen tests
+ // consecutively in each.
+ while (builders.size()) {
+ // Introduce some variation in the start time of each scenario, so they
+ // don't all start off concurrently, but get a more random interleaving.
+ auto scenario_start =
+ starttime + RandomTime8s() + RandomTime8s() + RandomTime8s();
+ Scenario scenario(runner, scenario_start);
+ for (int j = 0; builders.size() && j < 10; ++j) {
+ builders.back()(scenario);
+ builders.pop_back();
+ }
+ }
+ // Sort all the actions from all those scenarios chronologically, resulting
+ // in the actions from distinct scenarios to become interleaved. Use
+ // stable_sort so that actions from one scenario aren't reordered w.r.t.
+ // each other.
+ std::stable_sort(
+ runner.actions.begin(), runner.actions.end(),
+ [](const Action &a1, const Action &a2) { return a1.first < a2.first; });
+
+ // Run all actions from all scenarios, in order.
+ for (auto &action : runner.actions) {
+ action.second();
+ }
+
+ BOOST_CHECK_EQUAL(runner.txrequest.Size(), 0U);
+}
+
+} // namespace
+
+BOOST_AUTO_TEST_CASE(TxRequestTest) {
+ for (int i = 0; i < 5; ++i) {
+ TestInterleavedScenarios();
+ }
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/txrequest.h b/src/txrequest.h
--- a/src/txrequest.h
+++ b/src/txrequest.h
@@ -239,6 +239,21 @@
* and transaction hashes.
*/
size_t Size() const;
+
+ /** Access to the internal priority computation (testing only) */
+ uint64_t ComputePriority(const TxId &txid, NodeId peer,
+ bool preferred) const;
+
+ /** Run internal consistency check (testing only). */
+ void SanityCheck() const;
+
+ /**
+ * Run a time-dependent internal consistency check (testing only).
+ *
+ * This can only be called immediately after GetRequestable, with the same
+ * 'now' parameter.
+ */
+ void PostGetRequestableSanityCheck(std::chrono::microseconds now) const;
};
#endif // BITCOIN_TXREQUEST_H
diff --git a/src/txrequest.cpp b/src/txrequest.cpp
--- a/src/txrequest.cpp
+++ b/src/txrequest.cpp
@@ -279,6 +279,76 @@
size_t m_requested = 0;
};
+/** Per-txid statistics object. Only used for sanity checking. */
+struct TxIdInfo {
+ //! Number of CANDIDATE_DELAYED announcements for this txid.
+ size_t m_candidate_delayed = 0;
+ //! Number of CANDIDATE_READY announcements for this txid.
+ size_t m_candidate_ready = 0;
+ //! Number of CANDIDATE_BEST announcements for this txid (at most one).
+ size_t m_candidate_best = 0;
+ //! Number of REQUESTED announcements for this txid (at most one; mutually
+ //! exclusive with CANDIDATE_BEST).
+ size_t m_requested = 0;
+ //! The priority of the CANDIDATE_BEST announcement if one exists, or max()
+ //! otherwise.
+ Priority m_priority_candidate_best = std::numeric_limits<Priority>::max();
+ //! The highest priority of all CANDIDATE_READY announcements (or min() if
+ //! none exist).
+ Priority m_priority_best_candidate_ready =
+ std::numeric_limits<Priority>::min();
+ //! All peers we have an announcement for this txid for.
+ std::vector<NodeId> m_peers;
+};
+
+/** Compare two PeerInfo objects. Only used for sanity checking. */
+bool operator==(const PeerInfo &a, const PeerInfo &b) {
+ return std::tie(a.m_total, a.m_completed, a.m_requested) ==
+ std::tie(b.m_total, b.m_completed, b.m_requested);
+};
+
+/**
+ * (Re)compute the PeerInfo map from the index. Only used for sanity checking.
+ */
+std::unordered_map<NodeId, PeerInfo> RecomputePeerInfo(const Index &index) {
+ std::unordered_map<NodeId, PeerInfo> ret;
+ for (const Announcement &ann : index) {
+ PeerInfo &info = ret[ann.m_peer];
+ ++info.m_total;
+ info.m_requested += (ann.GetState() == State::REQUESTED);
+ info.m_completed += (ann.GetState() == State::COMPLETED);
+ }
+ return ret;
+}
+
+/** Compute the TxIdInfo map. Only used for sanity checking. */
+std::map<TxId, TxIdInfo> ComputeTxIdInfo(const Index &index,
+ const PriorityComputer &computer) {
+ std::map<TxId, TxIdInfo> ret;
+ for (const Announcement &ann : index) {
+ TxIdInfo &info = ret[ann.m_txid];
+ // Classify how many announcements of each state we have for this txid.
+ info.m_candidate_delayed +=
+ (ann.GetState() == State::CANDIDATE_DELAYED);
+ info.m_candidate_ready += (ann.GetState() == State::CANDIDATE_READY);
+ info.m_candidate_best += (ann.GetState() == State::CANDIDATE_BEST);
+ info.m_requested += (ann.GetState() == State::REQUESTED);
+ // And track the priority of the best CANDIDATE_READY/CANDIDATE_BEST
+ // announcements.
+ if (ann.GetState() == State::CANDIDATE_BEST) {
+ info.m_priority_candidate_best = computer(ann);
+ }
+ if (ann.GetState() == State::CANDIDATE_READY) {
+ info.m_priority_best_candidate_ready =
+ std::max(info.m_priority_best_candidate_ready, computer(ann));
+ }
+ // Also keep track of which peers this txid has an announcement for
+ // (so we can detect duplicates).
+ info.m_peers.push_back(ann.m_peer);
+ }
+ return ret;
+}
+
} // namespace
/** Actual implementation for TxRequestTracker's data structure. */
@@ -290,12 +360,74 @@
//! This tracker's priority computer.
const PriorityComputer m_computer;
- //! This tracker's main data structure.
+ //! This tracker's main data structure. See SanityCheck() for the invariants
+ //! that apply to it.
Index m_index;
//! Map with this tracker's per-peer statistics.
std::unordered_map<NodeId, PeerInfo> m_peerinfo;
+public:
+ void SanityCheck() const {
+ // Recompute m_peerdata from m_index. This verifies the data in it as it
+ // should just be caching statistics on m_index. It also verifies the
+ // invariant that no PeerInfo announcements with m_total==0 exist.
+ assert(m_peerinfo == RecomputePeerInfo(m_index));
+
+ // Calculate per-txid statistics from m_index, and validate
+ // invariants.
+ for (auto &item : ComputeTxIdInfo(m_index, m_computer)) {
+ TxIdInfo &info = item.second;
+
+ // Cannot have only COMPLETED peer (txid should have been forgotten
+ // already)
+ assert(info.m_candidate_delayed + info.m_candidate_ready +
+ info.m_candidate_best + info.m_requested >
+ 0);
+
+ // Can have at most 1 CANDIDATE_BEST/REQUESTED peer
+ assert(info.m_candidate_best + info.m_requested <= 1);
+
+ // If there are any CANDIDATE_READY announcements, there must be
+ // exactly one CANDIDATE_BEST or REQUESTED announcement.
+ if (info.m_candidate_ready > 0) {
+ assert(info.m_candidate_best + info.m_requested == 1);
+ }
+
+ // If there is both a CANDIDATE_READY and a CANDIDATE_BEST
+ // announcement, the CANDIDATE_BEST one must be at least as good
+ // (equal or higher priority) as the best CANDIDATE_READY.
+ if (info.m_candidate_ready && info.m_candidate_best) {
+ assert(info.m_priority_candidate_best >=
+ info.m_priority_best_candidate_ready);
+ }
+
+ // No txid can have been announced by the same peer twice.
+ std::sort(info.m_peers.begin(), info.m_peers.end());
+ assert(
+ std::adjacent_find(info.m_peers.begin(), info.m_peers.end()) ==
+ info.m_peers.end());
+ }
+ }
+
+ void PostGetRequestableSanityCheck(std::chrono::microseconds now) const {
+ for (const Announcement &ann : m_index) {
+ if (ann.IsWaiting()) {
+ // REQUESTED and CANDIDATE_DELAYED must have a time in the
+ // future (they should have been converted to
+ // COMPLETED/CANDIDATE_READY respectively).
+ assert(ann.m_time > now);
+ } else if (ann.IsSelectable()) {
+ // CANDIDATE_READY and CANDIDATE_BEST cannot have a time in the
+ // future (they should have remained CANDIDATE_DELAYED, or
+ // should have been converted back to it if time went
+ // backwards).
+ assert(ann.m_time <= now);
+ }
+ }
+ }
+
+private:
//! Wrapper around Index::...::erase that keeps m_peerinfo up to date.
template <typename Tag> Iter<Tag> Erase(Iter<Tag> it) {
auto peerit = m_peerinfo.find(it->m_peer);
@@ -709,6 +841,12 @@
//! Count how many announcements are being tracked in total across all peers
//! and transactions.
size_t Size() const { return m_index.size(); }
+
+ uint64_t ComputePriority(const TxId &txid, NodeId peer,
+ bool preferred) const {
+ // Return Priority as a uint64_t as Priority is internal.
+ return uint64_t{m_computer(txid, peer, preferred)};
+ }
};
TxRequestTracker::TxRequestTracker(bool deterministic)
@@ -734,6 +872,14 @@
size_t TxRequestTracker::Size() const {
return m_impl->Size();
}
+void TxRequestTracker::SanityCheck() const {
+ m_impl->SanityCheck();
+}
+
+void TxRequestTracker::PostGetRequestableSanityCheck(
+ std::chrono::microseconds now) const {
+ m_impl->PostGetRequestableSanityCheck(now);
+}
void TxRequestTracker::ReceivedInv(NodeId peer, const TxId &txid,
bool preferred,
@@ -754,3 +900,8 @@
TxRequestTracker::GetRequestable(NodeId peer, std::chrono::microseconds now) {
return m_impl->GetRequestable(peer, now);
}
+
+uint64_t TxRequestTracker::ComputePriority(const TxId &txid, NodeId peer,
+ bool preferred) const {
+ return m_impl->ComputePriority(txid, peer, preferred);
+}

File Metadata

Mime Type
text/plain
Expires
Sat, Mar 1, 11:45 (6 h, 47 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5187689
Default Alt Text
D9548.id28547.diff (37 KB)

Event Timeline