Page MenuHomePhabricator

D11612.id33961.diff
No OneTemporary

D11612.id33961.diff

diff --git a/src/net.h b/src/net.h
--- a/src/net.h
+++ b/src/net.h
@@ -657,6 +657,7 @@
RadixTree<const avalanche::Proof, avalanche::ProofRadixTreeAdapter>
sharedProofs;
+ std::atomic<std::chrono::seconds> lastSharedProofsUpdate{0s};
std::atomic<bool> compactproofs_requested{false};
};
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -108,6 +108,12 @@
/** Minimum time between 2 successives getavaaddr messages from the same peer */
static constexpr std::chrono::minutes GETAVAADDR_INTERVAL{2};
+/**
+ * If no proof was requested from a compact proof message after this timeout
+ * expired, the proof radix tree can be cleaned up.
+ */
+static constexpr std::chrono::minutes AVALANCHE_AVAPROOFS_TIMEOUT{2};
+
struct DataRequestParameters {
/**
* Maximum number of in-flight data requests from a peer. It is not a hard
@@ -524,10 +530,9 @@
void UpdateAvalancheStatistics() const;
/**
- * Send a getavaaddr message to one of our avalanche outbounds if we are
- * missing good nodes.
+ * Process periodic avalanche network messaging and cleanups.
*/
- void MaybeRequestAvalancheNodes(CScheduler &scheduler) const;
+ void AvalanchePeriodicNetworking(CScheduler &scheduler) const;
/**
* Get a shared pointer to the Peer object.
@@ -1678,24 +1683,35 @@
(pnode->IsManualConn() && (pnode->nServices & NODE_AVALANCHE));
}
-void PeerManagerImpl::MaybeRequestAvalancheNodes(CScheduler &scheduler) const {
+void PeerManagerImpl::AvalanchePeriodicNetworking(CScheduler &scheduler) const {
+ const auto now = GetTime<std::chrono::seconds>();
+
+ std::vector<NodeId> avanode_outbound_ids;
+ m_connman.ForEachNode([&](CNode *pnode) {
+ // Build a list of the avalanche manual or outbound peers nodeids
+ if (isAvalancheOutboundOrManual(pnode)) {
+ avanode_outbound_ids.push_back(pnode->GetId());
+ }
+
+ // If a proof radix tree timed out, cleanup
+ if (pnode->m_proof_relay &&
+ now > (pnode->m_proof_relay->lastSharedProofsUpdate.load() +
+ AVALANCHE_AVAPROOFS_TIMEOUT)) {
+ pnode->m_proof_relay->sharedProofs = {};
+ }
+ });
+
+ Shuffle(avanode_outbound_ids.begin(), avanode_outbound_ids.end(),
+ FastRandomContext());
+
if (g_avalanche &&
(!g_avalanche->isQuorumEstablished() ||
g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
return pm.shouldRequestMoreNodes();
}))) {
- std::vector<NodeId> avanode_outbound_ids;
- m_connman.ForEachNode([&](CNode *pnode) {
- if (isAvalancheOutboundOrManual(pnode)) {
- avanode_outbound_ids.push_back(pnode->GetId());
- }
- });
-
// Randomly select an avalanche outbound peer to send the getavaaddr
// message to
if (!avanode_outbound_ids.empty()) {
- Shuffle(avanode_outbound_ids.begin(), avanode_outbound_ids.end(),
- FastRandomContext());
const NodeId avanodeId = avanode_outbound_ids.front();
m_connman.ForNode(avanodeId, [&](CNode *pavanode) {
@@ -1716,9 +1732,9 @@
// Schedule next run for 2-5 minutes in the future.
// We add randomness on every cycle to avoid the possibility of P2P
// fingerprinting.
- const auto requestAvalancheNodesInteval = 2min + GetRandMillis(3min);
- scheduler.scheduleFromNow([&] { MaybeRequestAvalancheNodes(scheduler); },
- requestAvalancheNodesInteval);
+ const auto avalanchePeriodicNetworkingInterval = 2min + GetRandMillis(3min);
+ scheduler.scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
+ avalanchePeriodicNetworkingInterval);
}
void PeerManagerImpl::FinalizeNode(const Config &config, const CNode &node,
@@ -2057,9 +2073,9 @@
AVALANCHE_STATISTICS_REFRESH_PERIOD);
// schedule next run for 2-5 minutes in the future
- const auto requestAvalancheNodesInteval = 2min + GetRandMillis(3min);
- scheduler.scheduleFromNow([&] { MaybeRequestAvalancheNodes(scheduler); },
- requestAvalancheNodesInteval);
+ const auto avalanchePeriodicNetworkingInterval = 2min + GetRandMillis(3min);
+ scheduler.scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
+ avalanchePeriodicNetworkingInterval);
}
/**
@@ -5166,6 +5182,9 @@
return;
}
+ pfrom.m_proof_relay->lastSharedProofsUpdate =
+ GetTime<std::chrono::seconds>();
+
pfrom.m_proof_relay->sharedProofs =
g_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
return pm.getShareableProofsSnapshot();
diff --git a/test/functional/abc_p2p_compactproofs.py b/test/functional/abc_p2p_compactproofs.py
--- a/test/functional/abc_p2p_compactproofs.py
+++ b/test/functional/abc_p2p_compactproofs.py
@@ -7,6 +7,7 @@
"""
import random
+import time
from test_framework.avatools import (
AvaP2PInterface,
@@ -28,6 +29,11 @@
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import MAX_NODES, assert_equal, p2p_port
+# Timeout after which the proofs can be cleaned up
+AVALANCHE_AVAPROOFS_TIMEOUT = 2 * 60
+# Max interval between 2 periodic networking processing
+AVALANCHE_MAX_PERIODIC_NETWORKING_INTERVAL = 5 * 60
+
class ProofStoreP2PInterface(AvaP2PInterface):
def __init__(self):
@@ -414,6 +420,28 @@
# All
check_received_proofs(range(numof_proof))
+ self.log.info(
+ "Check the node will not send the proofs if not requested before the timeout elapsed")
+
+ mocktime = int(time.time())
+ node.setmocktime(mocktime)
+
+ slow_peer = node.add_p2p_connection(ProofStoreP2PInterface())
+ _ = request_proofs(slow_peer)
+
+ # Elapse the timeout
+ mocktime += AVALANCHE_AVAPROOFS_TIMEOUT + 1
+ node.setmocktime(mocktime)
+ node.mockscheduler(AVALANCHE_MAX_PERIODIC_NETWORKING_INTERVAL)
+ slow_peer.sync_with_ping()
+
+ req = msg_avaproofsreq()
+ req.indices = range(numof_proof)
+ slow_peer.send_and_ping(req)
+
+ # Check we get no proof
+ assert_equal(len(slow_peer.get_proofs()), 0)
+
def test_compact_proofs_download_on_connect(self):
self.log.info(
"Check the node get compact proofs upon avalanche outbound discovery")

File Metadata

Mime Type
text/plain
Expires
Sat, Apr 26, 12:10 (2 h, 47 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5573523
Default Alt Text
D11612.id33961.diff (6 KB)

Event Timeline