Page Menu
Home
Phabricator
Search
Configure Global Search
Log In
Files
F14865034
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
175 KB
Subscribers
None
View Options
diff --git a/src/bench/CMakeLists.txt b/src/bench/CMakeLists.txt
index 24000295e..c844bba56 100644
--- a/src/bench/CMakeLists.txt
+++ b/src/bench/CMakeLists.txt
@@ -1,101 +1,102 @@
# Copyright (c) 2018 The Bitcoin developers
project(bitcoin-bench)
set(BENCH_DATA_RAW_FILES
data/block413567.raw
)
# Process raw files.
file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/data")
foreach(_raw_file ${BENCH_DATA_RAW_FILES})
string(APPEND
_generated_header_output
"${CMAKE_CURRENT_BINARY_DIR}/${_raw_file}" ".h"
)
list(APPEND BENCH_DATA_GENERATED_HEADERS ${_generated_header_output})
get_filename_component(_test_name ${_raw_file} NAME_WE)
add_custom_command(
OUTPUT "${_generated_header_output}"
COMMAND
"${Python_EXECUTABLE}"
"data/convert-raw-to-header.py"
"${_test_name}"
"${_raw_file}" > "${_generated_header_output}"
COMMENT "Transforming raw file ${_raw_file} into header"
MAIN_DEPEDENCY "${_raw_file}"
DEPENDS "data/convert-raw-to-header.py"
VERBATIM
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
)
endforeach()
add_executable(bitcoin-bench
addrman.cpp
base58.cpp
bench.cpp
bench_bitcoin.cpp
block_assemble.cpp
cashaddr.cpp
ccoins_caching.cpp
chacha_poly_aead.cpp
chacha20.cpp
checkblock.cpp
checkqueue.cpp
crypto_aes.cpp
crypto_hash.cpp
data.cpp
duplicate_inputs.cpp
examples.cpp
gcs_filter.cpp
hashpadding.cpp
lockedpool.cpp
mempool_eviction.cpp
mempool_stress.cpp
merkle_root.cpp
nanobench.cpp
+ peer_eviction.cpp
poly1305.cpp
prevector.cpp
rollingbloom.cpp
rpc_blockchain.cpp
rpc_mempool.cpp
util_time.cpp
verify_script.cpp
# Add the generated headers to trigger the conversion command
${BENCH_DATA_GENERATED_HEADERS}
)
target_link_libraries(bitcoin-bench testutil)
if(BUILD_BITCOIN_WALLET)
target_sources(bitcoin-bench
PRIVATE
coin_selection.cpp
wallet_balance.cpp
)
target_link_libraries(bitcoin-bench wallet)
endif()
include(InstallationHelper)
install_target(bitcoin-bench EXCLUDE_FROM_ALL)
include(TestSuite)
add_test_custom_target(bench-bitcoin
TEST_COMMAND
"$<TARGET_FILE:bitcoin-bench>"
-output_csv=BitcoinABC_Bench.csv
-output_json=BitcoinABC_Bench.json
CUSTOM_TARGET_ARGS
DEPENDS bitcoin-bench
USES_TERMINAL
)
set_property(DIRECTORY "${CMAKE_SOURCE_DIR}" APPEND PROPERTY ADDITIONAL_CLEAN_FILES
BitcoinABC_Bench.csv
BitcoinABC_Bench.json
)
diff --git a/src/bench/peer_eviction.cpp b/src/bench/peer_eviction.cpp
new file mode 100644
index 000000000..4c3c2e6db
--- /dev/null
+++ b/src/bench/peer_eviction.cpp
@@ -0,0 +1,154 @@
+// Copyright (c) 2021 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <bench/bench.h>
+#include <net.h>
+#include <netaddress.h>
+#include <random.h>
+
+#include <test/util/net.h>
+#include <test/util/setup_common.h>
+
+#include <algorithm>
+#include <functional>
+#include <vector>
+
+static void EvictionProtectionCommon(
+ benchmark::Bench &bench, int num_candidates,
+ std::function<void(NodeEvictionCandidate &)> candidate_setup_fn) {
+ using Candidates = std::vector<NodeEvictionCandidate>;
+ FastRandomContext random_context{true};
+ bench.warmup(100).epochIterations(1100);
+
+ Candidates candidates{
+ GetRandomNodeEvictionCandidates(num_candidates, random_context)};
+ for (auto &c : candidates) {
+ candidate_setup_fn(c);
+ }
+
+ std::vector<Candidates> copies{bench.epochs() * bench.epochIterations(),
+ candidates};
+ size_t i{0};
+ bench.run([&] {
+ ProtectEvictionCandidatesByRatio(copies.at(i));
+ ++i;
+ });
+}
+
+/* Benchmarks */
+
+static void EvictionProtection0Networks250Candidates(benchmark::Bench &bench) {
+ EvictionProtectionCommon(bench, 250 /* num_candidates */,
+ [](NodeEvictionCandidate &c) {
+ c.nTimeConnected = c.id;
+ c.m_network = NET_IPV4;
+ });
+}
+
+static void EvictionProtection1Networks250Candidates(benchmark::Bench &bench) {
+ EvictionProtectionCommon(bench, 250 /* num_candidates */,
+ [](NodeEvictionCandidate &c) {
+ c.nTimeConnected = c.id;
+ c.m_is_local = false;
+ // 110 Tor
+ if (c.id >= 130 && c.id < 240) {
+ c.m_network = NET_ONION;
+ } else {
+ c.m_network = NET_IPV4;
+ }
+ });
+}
+
+static void EvictionProtection2Networks250Candidates(benchmark::Bench &bench) {
+ EvictionProtectionCommon(bench, 250 /* num_candidates */,
+ [](NodeEvictionCandidate &c) {
+ c.nTimeConnected = c.id;
+ c.m_is_local = false;
+ if (c.id >= 90 && c.id < 160) {
+ // 70 Tor
+ c.m_network = NET_ONION;
+ } else if (c.id >= 170 && c.id < 250) {
+ // 80 I2P
+ c.m_network = NET_I2P;
+ } else {
+ c.m_network = NET_IPV4;
+ }
+ });
+}
+
+static void EvictionProtection3Networks050Candidates(benchmark::Bench &bench) {
+ EvictionProtectionCommon(bench, 50 /* num_candidates */,
+ [](NodeEvictionCandidate &c) {
+ c.nTimeConnected = c.id;
+ // 2 localhost
+ c.m_is_local = (c.id == 28 || c.id == 47);
+ if (c.id >= 30 && c.id < 47) {
+ // 17 I2P
+ c.m_network = NET_I2P;
+ } else if (c.id >= 24 && c.id < 28) {
+ // 4 Tor
+ c.m_network = NET_ONION;
+ } else {
+ c.m_network = NET_IPV4;
+ }
+ });
+}
+
+static void EvictionProtection3Networks100Candidates(benchmark::Bench &bench) {
+ EvictionProtectionCommon(bench, 100 /* num_candidates */,
+ [](NodeEvictionCandidate &c) {
+ c.nTimeConnected = c.id;
+ // 5 localhost
+ c.m_is_local = (c.id >= 55 && c.id < 60);
+ if (c.id >= 70 && c.id < 80) {
+ // 10 I2P
+ c.m_network = NET_I2P;
+ } else if (c.id >= 80 && c.id < 96) {
+ // 16 Tor
+ c.m_network = NET_ONION;
+ } else {
+ c.m_network = NET_IPV4;
+ }
+ });
+}
+
+static void EvictionProtection3Networks250Candidates(benchmark::Bench &bench) {
+ EvictionProtectionCommon(bench, 250 /* num_candidates */,
+ [](NodeEvictionCandidate &c) {
+ c.nTimeConnected = c.id;
+ // 20 localhost
+ c.m_is_local = (c.id >= 140 && c.id < 160);
+ if (c.id >= 170 && c.id < 180) {
+ // 10 I2P
+ c.m_network = NET_I2P;
+ } else if (c.id >= 190 && c.id < 240) {
+ // 50 Tor
+ c.m_network = NET_ONION;
+ } else {
+ c.m_network = NET_IPV4;
+ }
+ });
+}
+
+// Candidate numbers used for the benchmarks:
+// - 50 candidates simulates a possible use of -maxconnections
+// - 100 candidates approximates an average Bitcoin Core node with default
+// settings
+// - 250 candidates is the number of peers reported by operators of busy Bitcoin
+// Core nodes
+
+// No disadvantaged networks, with 250 eviction candidates.
+BENCHMARK(EvictionProtection0Networks250Candidates);
+
+// 1 disadvantaged network (Tor) with 250 eviction candidates.
+BENCHMARK(EvictionProtection1Networks250Candidates);
+
+// 2 disadvantaged networks (I2P, Tor) with 250 eviction candidates.
+BENCHMARK(EvictionProtection2Networks250Candidates);
+
+// 3 disadvantaged networks (I2P/localhost/Tor) with 50/100/250 eviction
+// candidates.
+BENCHMARK(EvictionProtection3Networks050Candidates);
+BENCHMARK(EvictionProtection3Networks100Candidates);
+BENCHMARK(EvictionProtection3Networks250Candidates);
diff --git a/src/net.cpp b/src/net.cpp
index 38fbc75d5..723eaa32b 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -1,3744 +1,3752 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#if defined(HAVE_CONFIG_H)
#include <config/bitcoin-config.h>
#endif
#include <net.h>
#include <avalanche/avalanche.h>
#include <banman.h>
#include <clientversion.h>
#include <compat.h>
#include <config.h>
#include <consensus/consensus.h>
#include <crypto/sha256.h>
#include <dnsseeds.h>
#include <i2p.h>
#include <netbase.h>
#include <node/ui_interface.h>
#include <protocol.h>
#include <random.h>
#include <scheduler.h>
#include <util/sock.h>
#include <util/strencodings.h>
#include <util/translation.h>
#ifdef WIN32
#include <cstring>
#else
#include <fcntl.h>
#endif
#ifdef USE_POLL
#include <poll.h>
#endif
#ifdef USE_UPNP
#include <miniupnpc/miniupnpc.h>
#include <miniupnpc/upnpcommands.h>
#include <miniupnpc/upnperrors.h>
// The minimum supported miniUPnPc API version is set to 10. This keeps
// compatibility with Ubuntu 16.04 LTS and Debian 8 libminiupnpc-dev packages.
static_assert(MINIUPNPC_API_VERSION >= 10,
"miniUPnPc API version >= 10 assumed");
#endif
#include <algorithm>
#include <array>
#include <cmath>
#include <cstdint>
#include <functional>
#include <limits>
#include <optional>
#include <unordered_map>
/** Maximum number of block-relay-only anchor connections */
static constexpr size_t MAX_BLOCK_RELAY_ONLY_ANCHORS = 2;
static_assert(MAX_BLOCK_RELAY_ONLY_ANCHORS <=
static_cast<size_t>(MAX_BLOCK_RELAY_ONLY_CONNECTIONS),
"MAX_BLOCK_RELAY_ONLY_ANCHORS must not exceed "
"MAX_BLOCK_RELAY_ONLY_CONNECTIONS.");
/** Anchor IP address database file name */
const char *const ANCHORS_DATABASE_FILENAME = "anchors.dat";
// How often to dump addresses to peers.dat
static constexpr std::chrono::minutes DUMP_PEERS_INTERVAL{15};
/**
* Number of DNS seeds to query when the number of connections is low.
*/
static constexpr int DNSSEEDS_TO_QUERY_AT_ONCE = 3;
/**
* How long to delay before querying DNS seeds
*
* If we have more than THRESHOLD entries in addrman, then it's likely
* that we got those addresses from having previously connected to the P2P
* network, and that we'll be able to successfully reconnect to the P2P
* network via contacting one of them. So if that's the case, spend a
* little longer trying to connect to known peers before querying the
* DNS seeds.
*/
static constexpr std::chrono::seconds DNSSEEDS_DELAY_FEW_PEERS{11};
static constexpr std::chrono::minutes DNSSEEDS_DELAY_MANY_PEERS{5};
// "many" vs "few" peers
static constexpr int DNSSEEDS_DELAY_PEER_THRESHOLD = 1000;
/** The default timeframe for -maxuploadtarget. 1 day. */
static constexpr std::chrono::seconds MAX_UPLOAD_TIMEFRAME{60 * 60 * 24};
// We add a random period time (0 to 1 seconds) to feeler connections to prevent
// synchronization.
#define FEELER_SLEEP_WINDOW 1
/** Used to pass flags to the Bind() function */
enum BindFlags {
BF_NONE = 0,
BF_EXPLICIT = (1U << 0),
BF_REPORT_ERROR = (1U << 1),
/**
* Do not call AddLocal() for our special addresses, e.g., for incoming
* Tor connections, to prevent gossiping them over the network.
*/
BF_DONT_ADVERTISE = (1U << 2),
};
// The set of sockets cannot be modified while waiting
// The sleep time needs to be small to avoid new sockets stalling
static const uint64_t SELECT_TIMEOUT_MILLISECONDS = 50;
const std::string NET_MESSAGE_COMMAND_OTHER = "*other*";
// SHA256("netgroup")[0:8]
static const uint64_t RANDOMIZER_ID_NETGROUP = 0x6c0edd8036ef4036ULL;
// SHA256("localhostnonce")[0:8]
static const uint64_t RANDOMIZER_ID_LOCALHOSTNONCE = 0xd93e69e2bbfa5735ULL;
// SHA256("localhostnonce")[8:16]
static const uint64_t RANDOMIZER_ID_EXTRAENTROPY = 0x94b05d41679a4ff7ULL;
// SHA256("addrcache")[0:8]
static const uint64_t RANDOMIZER_ID_ADDRCACHE = 0x1cf2e4ddd306dda9ULL;
//
// Global state variables
//
bool fDiscover = true;
bool fListen = true;
RecursiveMutex cs_mapLocalHost;
std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(cs_mapLocalHost);
static bool vfLimited[NET_MAX] GUARDED_BY(cs_mapLocalHost) = {};
void CConnman::AddAddrFetch(const std::string &strDest) {
LOCK(m_addr_fetches_mutex);
m_addr_fetches.push_back(strDest);
}
uint16_t GetListenPort() {
return uint16_t(gArgs.GetArg("-port", Params().GetDefaultPort()));
}
// find 'best' local address for a particular peer
bool GetLocal(CService &addr, const CNetAddr *paddrPeer) {
if (!fListen) {
return false;
}
int nBestScore = -1;
int nBestReachability = -1;
{
LOCK(cs_mapLocalHost);
for (const auto &entry : mapLocalHost) {
int nScore = entry.second.nScore;
int nReachability = entry.first.GetReachabilityFrom(paddrPeer);
if (nReachability > nBestReachability ||
(nReachability == nBestReachability && nScore > nBestScore)) {
addr = CService(entry.first, entry.second.nPort);
nBestReachability = nReachability;
nBestScore = nScore;
}
}
}
return nBestScore >= 0;
}
//! Convert the pnSeed6 array into usable address objects.
static std::vector<CAddress>
convertSeed6(const std::vector<SeedSpec6> &vSeedsIn) {
// It'll only connect to one or two seed nodes because once it connects,
// it'll get a pile of addresses with newer timestamps. Seed nodes are given
// a random 'last seen time' of between one and two weeks ago.
const int64_t nOneWeek = 7 * 24 * 60 * 60;
std::vector<CAddress> vSeedsOut;
vSeedsOut.reserve(vSeedsIn.size());
FastRandomContext rng;
for (const auto &seed_in : vSeedsIn) {
struct in6_addr ip;
memcpy(&ip, seed_in.addr, sizeof(ip));
CAddress addr(CService(ip, seed_in.port),
GetDesirableServiceFlags(NODE_NONE));
addr.nTime = GetTime() - rng.randrange(nOneWeek) - nOneWeek;
vSeedsOut.push_back(addr);
}
return vSeedsOut;
}
// Get best local address for a particular peer as a CAddress. Otherwise, return
// the unroutable 0.0.0.0 but filled in with the normal parameters, since the IP
// may be changed to a useful one by discovery.
CAddress GetLocalAddress(const CNetAddr *paddrPeer,
ServiceFlags nLocalServices) {
CAddress ret(CService(CNetAddr(), GetListenPort()), nLocalServices);
CService addr;
if (GetLocal(addr, paddrPeer)) {
ret = CAddress(addr, nLocalServices);
}
ret.nTime = GetAdjustedTime();
return ret;
}
static int GetnScore(const CService &addr) {
LOCK(cs_mapLocalHost);
if (mapLocalHost.count(addr) == 0) {
return 0;
}
return mapLocalHost[addr].nScore;
}
// Is our peer's addrLocal potentially useful as an external IP source?
bool IsPeerAddrLocalGood(CNode *pnode) {
CService addrLocal = pnode->GetAddrLocal();
return fDiscover && pnode->addr.IsRoutable() && addrLocal.IsRoutable() &&
IsReachable(addrLocal.GetNetwork());
}
std::optional<CAddress> GetLocalAddrForPeer(CNode *pnode) {
CAddress addrLocal =
GetLocalAddress(&pnode->addr, pnode->GetLocalServices());
if (gArgs.GetBoolArg("-addrmantest", false)) {
// use IPv4 loopback during addrmantest
addrLocal =
CAddress(CService(LookupNumeric("127.0.0.1", GetListenPort())),
pnode->GetLocalServices());
}
// If discovery is enabled, sometimes give our peer the address it
// tells us that it sees us as in case it has a better idea of our
// address than we do.
FastRandomContext rng;
if (IsPeerAddrLocalGood(pnode) &&
(!addrLocal.IsRoutable() ||
rng.randbits((GetnScore(addrLocal) > LOCAL_MANUAL) ? 3 : 1) == 0)) {
addrLocal.SetIP(pnode->GetAddrLocal());
}
if (addrLocal.IsRoutable() || gArgs.GetBoolArg("-addrmantest", false)) {
LogPrint(BCLog::NET, "Advertising address %s to peer=%d\n",
addrLocal.ToString(), pnode->GetId());
return addrLocal;
}
// Address is unroutable. Don't advertise.
return std::nullopt;
}
// Learn a new local address.
bool AddLocal(const CService &addr, int nScore) {
if (!addr.IsRoutable()) {
return false;
}
if (!fDiscover && nScore < LOCAL_MANUAL) {
return false;
}
if (!IsReachable(addr)) {
return false;
}
LogPrintf("AddLocal(%s,%i)\n", addr.ToString(), nScore);
{
LOCK(cs_mapLocalHost);
bool fAlready = mapLocalHost.count(addr) > 0;
LocalServiceInfo &info = mapLocalHost[addr];
if (!fAlready || nScore >= info.nScore) {
info.nScore = nScore + (fAlready ? 1 : 0);
info.nPort = addr.GetPort();
}
}
return true;
}
bool AddLocal(const CNetAddr &addr, int nScore) {
return AddLocal(CService(addr, GetListenPort()), nScore);
}
void RemoveLocal(const CService &addr) {
LOCK(cs_mapLocalHost);
LogPrintf("RemoveLocal(%s)\n", addr.ToString());
mapLocalHost.erase(addr);
}
void SetReachable(enum Network net, bool reachable) {
if (net == NET_UNROUTABLE || net == NET_INTERNAL) {
return;
}
LOCK(cs_mapLocalHost);
vfLimited[net] = !reachable;
}
bool IsReachable(enum Network net) {
LOCK(cs_mapLocalHost);
return !vfLimited[net];
}
bool IsReachable(const CNetAddr &addr) {
return IsReachable(addr.GetNetwork());
}
/** vote for a local address */
bool SeenLocal(const CService &addr) {
LOCK(cs_mapLocalHost);
if (mapLocalHost.count(addr) == 0) {
return false;
}
mapLocalHost[addr].nScore++;
return true;
}
/** check whether a given address is potentially local */
bool IsLocal(const CService &addr) {
LOCK(cs_mapLocalHost);
return mapLocalHost.count(addr) > 0;
}
CNode *CConnman::FindNode(const CNetAddr &ip) {
LOCK(cs_vNodes);
for (CNode *pnode : vNodes) {
if (static_cast<CNetAddr>(pnode->addr) == ip) {
return pnode;
}
}
return nullptr;
}
CNode *CConnman::FindNode(const CSubNet &subNet) {
LOCK(cs_vNodes);
for (CNode *pnode : vNodes) {
if (subNet.Match(static_cast<CNetAddr>(pnode->addr))) {
return pnode;
}
}
return nullptr;
}
CNode *CConnman::FindNode(const std::string &addrName) {
LOCK(cs_vNodes);
for (CNode *pnode : vNodes) {
if (pnode->GetAddrName() == addrName) {
return pnode;
}
}
return nullptr;
}
CNode *CConnman::FindNode(const CService &addr) {
LOCK(cs_vNodes);
for (CNode *pnode : vNodes) {
if (static_cast<CService>(pnode->addr) == addr) {
return pnode;
}
}
return nullptr;
}
bool CConnman::AlreadyConnectedToAddress(const CAddress &addr) {
return FindNode(static_cast<CNetAddr>(addr)) ||
FindNode(addr.ToStringIPPort());
}
bool CConnman::CheckIncomingNonce(uint64_t nonce) {
LOCK(cs_vNodes);
for (const CNode *pnode : vNodes) {
if (!pnode->fSuccessfullyConnected && !pnode->IsInboundConn() &&
pnode->GetLocalNonce() == nonce) {
return false;
}
}
return true;
}
/** Get the bind address for a socket as CAddress */
static CAddress GetBindAddress(SOCKET sock) {
CAddress addr_bind;
struct sockaddr_storage sockaddr_bind;
socklen_t sockaddr_bind_len = sizeof(sockaddr_bind);
if (sock != INVALID_SOCKET) {
if (!getsockname(sock, (struct sockaddr *)&sockaddr_bind,
&sockaddr_bind_len)) {
addr_bind.SetSockAddr((const struct sockaddr *)&sockaddr_bind);
} else {
LogPrint(BCLog::NET, "Warning: getsockname failed\n");
}
}
return addr_bind;
}
CNode *CConnman::ConnectNode(CAddress addrConnect, const char *pszDest,
bool fCountFailure, ConnectionType conn_type) {
assert(conn_type != ConnectionType::INBOUND);
if (pszDest == nullptr) {
if (IsLocal(addrConnect)) {
return nullptr;
}
// Look for an existing connection
CNode *pnode = FindNode(static_cast<CService>(addrConnect));
if (pnode) {
LogPrintf("Failed to open new connection, already connected\n");
return nullptr;
}
}
/// debug print
LogPrint(BCLog::NET, "trying connection %s lastseen=%.1fhrs\n",
pszDest ? pszDest : addrConnect.ToString(),
pszDest
? 0.0
: (double)(GetAdjustedTime() - addrConnect.nTime) / 3600.0);
// Resolve
const int default_port = Params().GetDefaultPort();
if (pszDest) {
std::vector<CService> resolved;
if (Lookup(pszDest, resolved, default_port,
fNameLookup && !HaveNameProxy(), 256) &&
!resolved.empty()) {
addrConnect =
CAddress(resolved[GetRand(resolved.size())], NODE_NONE);
if (!addrConnect.IsValid()) {
LogPrint(BCLog::NET,
"Resolver returned invalid address %s for %s\n",
addrConnect.ToString(), pszDest);
return nullptr;
}
// It is possible that we already have a connection to the IP/port
// pszDest resolved to. In that case, drop the connection that was
// just created, and return the existing CNode instead. Also store
// the name we used to connect in that CNode, so that future
// FindNode() calls to that name catch this early.
LOCK(cs_vNodes);
CNode *pnode = FindNode(static_cast<CService>(addrConnect));
if (pnode) {
pnode->MaybeSetAddrName(std::string(pszDest));
LogPrintf("Failed to open new connection, already connected\n");
return nullptr;
}
}
}
// Connect
bool connected = false;
std::unique_ptr<Sock> sock;
proxyType proxy;
CAddress addr_bind;
assert(!addr_bind.IsValid());
if (addrConnect.IsValid()) {
bool proxyConnectionFailed = false;
if (addrConnect.GetNetwork() == NET_I2P &&
m_i2p_sam_session.get() != nullptr) {
i2p::Connection conn;
if (m_i2p_sam_session->Connect(addrConnect, conn,
proxyConnectionFailed)) {
connected = true;
sock = std::move(conn.sock);
addr_bind = CAddress{conn.me, NODE_NONE};
}
} else if (GetProxy(addrConnect.GetNetwork(), proxy)) {
sock = CreateSock(proxy.proxy);
if (!sock) {
return nullptr;
}
connected = ConnectThroughProxy(
proxy, addrConnect.ToStringIP(), addrConnect.GetPort(), *sock,
nConnectTimeout, proxyConnectionFailed);
} else {
// no proxy needed (none set for target network)
sock = CreateSock(addrConnect);
if (!sock) {
return nullptr;
}
connected =
ConnectSocketDirectly(addrConnect, *sock, nConnectTimeout,
conn_type == ConnectionType::MANUAL);
}
if (!proxyConnectionFailed) {
// If a connection to the node was attempted, and failure (if any)
// is not caused by a problem connecting to the proxy, mark this as
// an attempt.
addrman.Attempt(addrConnect, fCountFailure);
}
} else if (pszDest && GetNameProxy(proxy)) {
sock = CreateSock(proxy.proxy);
if (!sock) {
return nullptr;
}
std::string host;
int port = default_port;
SplitHostPort(std::string(pszDest), port, host);
bool proxyConnectionFailed;
connected = ConnectThroughProxy(proxy, host, port, *sock,
nConnectTimeout, proxyConnectionFailed);
}
if (!connected) {
return nullptr;
}
// Add node
NodeId id = GetNewNodeId();
uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE)
.Write(id)
.Finalize();
uint64_t extra_entropy =
GetDeterministicRandomizer(RANDOMIZER_ID_EXTRAENTROPY)
.Write(id)
.Finalize();
if (!addr_bind.IsValid()) {
addr_bind = GetBindAddress(sock->Get());
}
CNode *pnode =
new CNode(id, nLocalServices, sock->Release(), addrConnect,
CalculateKeyedNetGroup(addrConnect), nonce, extra_entropy,
addr_bind, pszDest ? pszDest : "", conn_type,
/* inbound_onion */ false);
pnode->AddRef();
// We're making a new connection, harvest entropy from the time (and our
// peer count)
RandAddEvent(uint32_t(id));
return pnode;
}
void CNode::CloseSocketDisconnect() {
fDisconnect = true;
LOCK(cs_hSocket);
if (hSocket != INVALID_SOCKET) {
LogPrint(BCLog::NET, "disconnecting peer=%d\n", id);
CloseSocket(hSocket);
}
}
void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags &flags,
const CNetAddr &addr) const {
for (const auto &subnet : vWhitelistedRange) {
if (subnet.m_subnet.Match(addr)) {
NetPermissions::AddFlag(flags, subnet.m_flags);
}
}
}
std::string CNode::ConnectionTypeAsString() const {
switch (m_conn_type) {
case ConnectionType::INBOUND:
return "inbound";
case ConnectionType::MANUAL:
return "manual";
case ConnectionType::FEELER:
return "feeler";
case ConnectionType::OUTBOUND_FULL_RELAY:
return "outbound-full-relay";
case ConnectionType::BLOCK_RELAY:
return "block-relay-only";
case ConnectionType::ADDR_FETCH:
return "addr-fetch";
case ConnectionType::AVALANCHE_OUTBOUND:
return "avalanche";
} // no default case, so the compiler can warn about missing cases
assert(false);
}
std::string CNode::GetAddrName() const {
LOCK(cs_addrName);
return addrName;
}
void CNode::MaybeSetAddrName(const std::string &addrNameIn) {
LOCK(cs_addrName);
if (addrName.empty()) {
addrName = addrNameIn;
}
}
CService CNode::GetAddrLocal() const {
LOCK(cs_addrLocal);
return addrLocal;
}
void CNode::SetAddrLocal(const CService &addrLocalIn) {
LOCK(cs_addrLocal);
if (addrLocal.IsValid()) {
error("Addr local already set for node: %i. Refusing to change from %s "
"to %s",
id, addrLocal.ToString(), addrLocalIn.ToString());
} else {
addrLocal = addrLocalIn;
}
}
Network CNode::ConnectedThroughNetwork() const {
return m_inbound_onion ? NET_ONION : addr.GetNetClass();
}
void CNode::copyStats(CNodeStats &stats) {
stats.nodeid = this->GetId();
stats.nServices = nServices;
stats.addr = addr;
stats.addrBind = addrBind;
stats.m_network = ConnectedThroughNetwork();
if (m_tx_relay != nullptr) {
LOCK(m_tx_relay->cs_filter);
stats.fRelayTxes = m_tx_relay->fRelayTxes;
} else {
stats.fRelayTxes = false;
}
stats.m_last_send = m_last_send;
stats.m_last_recv = m_last_recv;
stats.nLastTXTime = nLastTXTime;
stats.nLastProofTime = nLastProofTime;
stats.nLastBlockTime = nLastBlockTime;
stats.nTimeConnected = nTimeConnected;
stats.nTimeOffset = nTimeOffset;
stats.addrName = GetAddrName();
stats.nVersion = nVersion;
{
LOCK(cs_SubVer);
stats.cleanSubVer = cleanSubVer;
}
stats.fInbound = IsInboundConn();
stats.m_manual_connection = IsManualConn();
stats.m_bip152_highbandwidth_to = m_bip152_highbandwidth_to;
stats.m_bip152_highbandwidth_from = m_bip152_highbandwidth_from;
{
LOCK(cs_vSend);
stats.mapSendBytesPerMsgCmd = mapSendBytesPerMsgCmd;
stats.nSendBytes = nSendBytes;
}
{
LOCK(cs_vRecv);
stats.mapRecvBytesPerMsgCmd = mapRecvBytesPerMsgCmd;
stats.nRecvBytes = nRecvBytes;
}
stats.m_legacyWhitelisted = m_legacyWhitelisted;
stats.m_permissionFlags = m_permissionFlags;
if (m_tx_relay != nullptr) {
LOCK(m_tx_relay->cs_feeFilter);
stats.minFeeFilter = m_tx_relay->minFeeFilter;
} else {
stats.minFeeFilter = Amount::zero();
}
stats.m_last_ping_time = m_last_ping_time;
stats.m_min_ping_time = m_min_ping_time;
// Leave string empty if addrLocal invalid (not filled in yet)
CService addrLocalUnlocked = GetAddrLocal();
stats.addrLocal =
addrLocalUnlocked.IsValid() ? addrLocalUnlocked.ToString() : "";
stats.m_conn_type_string = ConnectionTypeAsString();
}
bool CNode::ReceiveMsgBytes(const Config &config, Span<const char> msg_bytes,
bool &complete) {
complete = false;
const auto time = GetTime<std::chrono::microseconds>();
LOCK(cs_vRecv);
m_last_recv = std::chrono::duration_cast<std::chrono::seconds>(time);
nRecvBytes += msg_bytes.size();
while (msg_bytes.size() > 0) {
// Absorb network data.
int handled = m_deserializer->Read(config, msg_bytes);
if (handled < 0) {
return false;
}
if (m_deserializer->Complete()) {
// decompose a transport agnostic CNetMessage from the deserializer
CNetMessage msg = m_deserializer->GetMessage(config, time);
// Store received bytes per message command to prevent a memory DOS,
// only allow valid commands.
mapMsgCmdSize::iterator i =
mapRecvBytesPerMsgCmd.find(msg.m_command);
if (i == mapRecvBytesPerMsgCmd.end()) {
i = mapRecvBytesPerMsgCmd.find(NET_MESSAGE_COMMAND_OTHER);
}
assert(i != mapRecvBytesPerMsgCmd.end());
i->second += msg.m_raw_message_size;
// push the message to the process queue,
vRecvMsg.push_back(std::move(msg));
complete = true;
}
}
return true;
}
int V1TransportDeserializer::readHeader(const Config &config,
Span<const char> msg_bytes) {
// copy data to temporary parsing buffer
uint32_t nRemaining = CMessageHeader::HEADER_SIZE - nHdrPos;
uint32_t nCopy = std::min<unsigned int>(nRemaining, msg_bytes.size());
memcpy(&hdrbuf[nHdrPos], msg_bytes.data(), nCopy);
nHdrPos += nCopy;
// if header incomplete, exit
if (nHdrPos < CMessageHeader::HEADER_SIZE) {
return nCopy;
}
// deserialize to CMessageHeader
try {
hdrbuf >> hdr;
} catch (const std::exception &) {
return -1;
}
// Reject oversized messages
if (hdr.IsOversized(config)) {
LogPrint(BCLog::NET, "Oversized header detected\n");
return -1;
}
// switch state to reading message data
in_data = true;
return nCopy;
}
int V1TransportDeserializer::readData(Span<const char> msg_bytes) {
unsigned int nRemaining = hdr.nMessageSize - nDataPos;
unsigned int nCopy = std::min<unsigned int>(nRemaining, msg_bytes.size());
if (vRecv.size() < nDataPos + nCopy) {
// Allocate up to 256 KiB ahead, but never more than the total message
// size.
vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024));
}
hasher.Write(MakeUCharSpan(msg_bytes.first(nCopy)));
memcpy(&vRecv[nDataPos], msg_bytes.data(), nCopy);
nDataPos += nCopy;
return nCopy;
}
const uint256 &V1TransportDeserializer::GetMessageHash() const {
assert(Complete());
if (data_hash.IsNull()) {
hasher.Finalize(data_hash);
}
return data_hash;
}
CNetMessage
V1TransportDeserializer::GetMessage(const Config &config,
const std::chrono::microseconds time) {
// decompose a single CNetMessage from the TransportDeserializer
CNetMessage msg(std::move(vRecv));
// store state about valid header, netmagic and checksum
msg.m_valid_header = hdr.IsValid(config);
// FIXME Split CheckHeaderMagicAndCommand() into CheckHeaderMagic() and
// CheckCommand() to prevent the net magic check code duplication.
msg.m_valid_netmagic =
(memcmp(std::begin(hdr.pchMessageStart),
std::begin(config.GetChainParams().NetMagic()),
CMessageHeader::MESSAGE_START_SIZE) == 0);
uint256 hash = GetMessageHash();
// store command string, payload size
msg.m_command = hdr.GetCommand();
msg.m_message_size = hdr.nMessageSize;
msg.m_raw_message_size = hdr.nMessageSize + CMessageHeader::HEADER_SIZE;
// We just received a message off the wire, harvest entropy from the time
// (and the message checksum)
RandAddEvent(ReadLE32(hash.begin()));
msg.m_valid_checksum = (memcmp(hash.begin(), hdr.pchChecksum,
CMessageHeader::CHECKSUM_SIZE) == 0);
if (!msg.m_valid_checksum) {
LogPrint(
BCLog::NET, "CHECKSUM ERROR (%s, %u bytes), expected %s was %s\n",
SanitizeString(msg.m_command), msg.m_message_size,
HexStr(Span<uint8_t>(hash.begin(),
hash.begin() + CMessageHeader::CHECKSUM_SIZE)),
HexStr(hdr.pchChecksum));
}
// store receive time
msg.m_time = time;
// reset the network deserializer (prepare for the next message)
Reset();
return msg;
}
void V1TransportSerializer::prepareForTransport(const Config &config,
CSerializedNetMsg &msg,
std::vector<uint8_t> &header) {
// create dbl-sha256 checksum
uint256 hash = Hash(msg.data);
// create header
CMessageHeader hdr(config.GetChainParams().NetMagic(), msg.m_type.c_str(),
msg.data.size());
memcpy(hdr.pchChecksum, hash.begin(), CMessageHeader::CHECKSUM_SIZE);
// serialize header
header.reserve(CMessageHeader::HEADER_SIZE);
CVectorWriter{SER_NETWORK, INIT_PROTO_VERSION, header, 0, hdr};
}
size_t CConnman::SocketSendData(CNode &node) const {
size_t nSentSize = 0;
size_t nMsgCount = 0;
for (const auto &data : node.vSendMsg) {
assert(data.size() > node.nSendOffset);
int nBytes = 0;
{
LOCK(node.cs_hSocket);
if (node.hSocket == INVALID_SOCKET) {
break;
}
nBytes = send(
node.hSocket,
reinterpret_cast<const char *>(data.data()) + node.nSendOffset,
data.size() - node.nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT);
}
if (nBytes == 0) {
// couldn't send anything at all
break;
}
if (nBytes < 0) {
// error
int nErr = WSAGetLastError();
if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE &&
nErr != WSAEINTR && nErr != WSAEINPROGRESS) {
LogPrint(BCLog::NET, "socket send error for peer=%d: %s\n",
node.GetId(), NetworkErrorString(nErr));
node.CloseSocketDisconnect();
}
break;
}
assert(nBytes > 0);
node.m_last_send = GetTime<std::chrono::seconds>();
node.nSendBytes += nBytes;
node.nSendOffset += nBytes;
nSentSize += nBytes;
if (node.nSendOffset != data.size()) {
// could not send full message; stop sending more
break;
}
node.nSendOffset = 0;
node.nSendSize -= data.size();
node.fPauseSend = node.nSendSize > nSendBufferMaxSize;
nMsgCount++;
}
node.vSendMsg.erase(node.vSendMsg.begin(),
node.vSendMsg.begin() + nMsgCount);
if (node.vSendMsg.empty()) {
assert(node.nSendOffset == 0);
assert(node.nSendSize == 0);
}
return nSentSize;
}
static bool ReverseCompareNodeMinPingTime(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) {
return a.m_min_ping_time > b.m_min_ping_time;
}
static bool ReverseCompareNodeTimeConnected(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) {
return a.nTimeConnected > b.nTimeConnected;
}
static bool CompareNetGroupKeyed(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) {
return a.nKeyedNetGroup < b.nKeyedNetGroup;
}
static bool CompareNodeBlockTime(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) {
// There is a fall-through here because it is common for a node to have many
// peers which have not yet relayed a block.
if (a.nLastBlockTime != b.nLastBlockTime) {
return a.nLastBlockTime < b.nLastBlockTime;
}
if (a.fRelevantServices != b.fRelevantServices) {
return b.fRelevantServices;
}
return a.nTimeConnected > b.nTimeConnected;
}
static bool CompareNodeTXTime(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) {
// There is a fall-through here because it is common for a node to have more
// than a few peers that have not yet relayed txn.
if (a.nLastTXTime != b.nLastTXTime) {
return a.nLastTXTime < b.nLastTXTime;
}
if (a.fRelayTxes != b.fRelayTxes) {
return b.fRelayTxes;
}
if (a.fBloomFilter != b.fBloomFilter) {
return a.fBloomFilter;
}
return a.nTimeConnected > b.nTimeConnected;
}
static bool CompareNodeProofTime(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) {
// There is a fall-through here because it is common for a node to have more
// than a few peers that have not yet relayed proofs. This fallback is also
// used in the case avalanche is not enabled.
if (a.nLastProofTime != b.nLastProofTime) {
return a.nLastProofTime < b.nLastProofTime;
}
return a.nTimeConnected > b.nTimeConnected;
}
// Pick out the potential block-relay only peers, and sort them by last block
// time.
static bool CompareNodeBlockRelayOnlyTime(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) {
if (a.fRelayTxes != b.fRelayTxes) {
return a.fRelayTxes;
}
if (a.nLastBlockTime != b.nLastBlockTime) {
return a.nLastBlockTime < b.nLastBlockTime;
}
if (a.fRelevantServices != b.fRelevantServices) {
return b.fRelevantServices;
}
return a.nTimeConnected > b.nTimeConnected;
}
static bool CompareNodeAvailabilityScore(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) {
// Equality can happen if the nodes have no score or it has not been
// computed yet.
if (a.availabilityScore != b.availabilityScore) {
return a.availabilityScore < b.availabilityScore;
}
return a.nTimeConnected > b.nTimeConnected;
}
/**
* Sort eviction candidates by network/localhost and connection uptime.
* Candidates near the beginning are more likely to be evicted, and those
* near the end are more likely to be protected, e.g. less likely to be evicted.
* - First, nodes that are not `is_local` and that do not belong to `network`,
* sorted by increasing uptime (from most recently connected to connected
* longer).
* - Then, nodes that are `is_local` or belong to `network`, sorted by
* increasing uptime.
*/
struct CompareNodeNetworkTime {
const bool m_is_local;
const Network m_network;
CompareNodeNetworkTime(bool is_local, Network network)
: m_is_local(is_local), m_network(network) {}
bool operator()(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) const {
if (m_is_local && a.m_is_local != b.m_is_local) {
return b.m_is_local;
}
if ((a.m_network == m_network) != (b.m_network == m_network)) {
return b.m_network == m_network;
}
return a.nTimeConnected > b.nTimeConnected;
};
};
//! Sort an array by the specified comparator, then erase the last K elements
//! where predicate is true.
template <typename T, typename Comparator>
static void EraseLastKElements(
std::vector<T> &elements, Comparator comparator, size_t k,
std::function<bool(const NodeEvictionCandidate &)> predicate =
[](const NodeEvictionCandidate &n) { return true; }) {
std::sort(elements.begin(), elements.end(), comparator);
size_t eraseSize = std::min(k, elements.size());
elements.erase(
std::remove_if(elements.end() - eraseSize, elements.end(), predicate),
elements.end());
}
void ProtectEvictionCandidatesByRatio(
std::vector<NodeEvictionCandidate> &eviction_candidates) {
// Protect the half of the remaining nodes which have been connected the
// longest. This replicates the non-eviction implicit behavior, and
// precludes attacks that start later.
// To promote the diversity of our peer connections, reserve up to half of
// these protected spots for Tor/onion, localhost and I2P peers, even if
// they're not the longest uptime overall. This helps protect these
// higher-latency peers that tend to be otherwise disadvantaged under our
// eviction criteria.
const size_t initial_size = eviction_candidates.size();
const size_t total_protect_size{initial_size / 2};
// Disadvantaged networks to protect: I2P, localhost and Tor/onion. In case
// of equal counts, earlier array members have first opportunity to recover
// unused slots from the previous iteration.
struct Net {
bool is_local;
Network id;
size_t count;
};
std::array<Net, 3> networks{{{false, NET_I2P, 0},
{/* localhost */ true, NET_MAX, 0},
{false, NET_ONION, 0}}};
// Count and store the number of eviction candidates per network.
for (Net &n : networks) {
n.count = std::count_if(
eviction_candidates.cbegin(), eviction_candidates.cend(),
[&n](const NodeEvictionCandidate &c) {
return n.is_local ? c.m_is_local : c.m_network == n.id;
});
}
// Sort `networks` by ascending candidate count, to give networks having
// fewer candidates the first opportunity to recover unused protected slots
// from the previous iteration.
std::stable_sort(networks.begin(), networks.end(),
[](Net a, Net b) { return a.count < b.count; });
// Protect up to 25% of the eviction candidates by disadvantaged network.
const size_t max_protect_by_network{total_protect_size / 2};
size_t num_protected{0};
while (num_protected < max_protect_by_network) {
+ // Count the number of disadvantaged networks from which we have peers
+ // to protect.
+ auto num_networks = std::count_if(networks.begin(), networks.end(),
+ [](const Net &n) { return n.count; });
+ if (num_networks == 0) {
+ break;
+ }
const size_t disadvantaged_to_protect{max_protect_by_network -
num_protected};
- const size_t protect_per_network{
- std::max(disadvantaged_to_protect / networks.size(),
- static_cast<size_t>(1))};
+ const size_t protect_per_network{std::max(
+ disadvantaged_to_protect / num_networks, static_cast<size_t>(1))};
// Early exit flag if there are no remaining candidates by disadvantaged
// network.
bool protected_at_least_one{false};
- for (const Net &n : networks) {
+ for (Net &n : networks) {
if (n.count == 0) {
continue;
}
const size_t before = eviction_candidates.size();
EraseLastKElements(
eviction_candidates, CompareNodeNetworkTime(n.is_local, n.id),
protect_per_network, [&n](const NodeEvictionCandidate &c) {
return n.is_local ? c.m_is_local : c.m_network == n.id;
});
const size_t after = eviction_candidates.size();
if (before > after) {
protected_at_least_one = true;
- num_protected += before - after;
+ const size_t delta{before - after};
+ num_protected += delta;
if (num_protected >= max_protect_by_network) {
break;
}
+ n.count -= delta;
}
}
if (!protected_at_least_one) {
break;
}
}
// Calculate how many we removed, and update our total number of peers that
// we want to protect based on uptime accordingly.
assert(num_protected == initial_size - eviction_candidates.size());
const size_t remaining_to_protect{total_protect_size - num_protected};
EraseLastKElements(eviction_candidates, ReverseCompareNodeTimeConnected,
remaining_to_protect);
}
[[nodiscard]] std::optional<NodeId>
SelectNodeToEvict(std::vector<NodeEvictionCandidate> &&vEvictionCandidates) {
// Protect connections with certain characteristics
// Deterministically select 4 peers to protect by netgroup.
// An attacker cannot predict which netgroups will be protected
EraseLastKElements(vEvictionCandidates, CompareNetGroupKeyed, 4);
// Protect the 8 nodes with the lowest minimum ping time.
// An attacker cannot manipulate this metric without physically moving nodes
// closer to the target.
EraseLastKElements(vEvictionCandidates, ReverseCompareNodeMinPingTime, 8);
// Protect 4 nodes that most recently sent us novel transactions accepted
// into our mempool. An attacker cannot manipulate this metric without
// performing useful work.
EraseLastKElements(vEvictionCandidates, CompareNodeTXTime, 4);
// Protect 4 nodes that most recently sent us novel proofs accepted
// into our proof pool. An attacker cannot manipulate this metric without
// performing useful work.
// TODO this filter must happen before the last tx time once avalanche is
// enabled for pre-consensus.
EraseLastKElements(vEvictionCandidates, CompareNodeProofTime, 4);
// Protect up to 8 non-tx-relay peers that have sent us novel blocks.
EraseLastKElements(vEvictionCandidates, CompareNodeBlockRelayOnlyTime, 8,
[](const NodeEvictionCandidate &n) {
return !n.fRelayTxes && n.fRelevantServices;
});
// Protect 4 nodes that most recently sent us novel blocks.
// An attacker cannot manipulate this metric without performing useful work.
EraseLastKElements(vEvictionCandidates, CompareNodeBlockTime, 4);
// Protect up to 128 nodes that have the highest avalanche availability
// score.
EraseLastKElements(vEvictionCandidates, CompareNodeAvailabilityScore, 128,
[](NodeEvictionCandidate const &n) {
return n.availabilityScore > 0.;
});
// Protect some of the remaining eviction candidates by ratios of desirable
// or disadvantaged characteristics.
ProtectEvictionCandidatesByRatio(vEvictionCandidates);
if (vEvictionCandidates.empty()) {
return std::nullopt;
}
// If any remaining peers are preferred for eviction consider only them.
// This happens after the other preferences since if a peer is really the
// best by other criteria (esp relaying blocks)
// then we probably don't want to evict it no matter what.
if (std::any_of(
vEvictionCandidates.begin(), vEvictionCandidates.end(),
[](NodeEvictionCandidate const &n) { return n.prefer_evict; })) {
vEvictionCandidates.erase(
std::remove_if(
vEvictionCandidates.begin(), vEvictionCandidates.end(),
[](NodeEvictionCandidate const &n) { return !n.prefer_evict; }),
vEvictionCandidates.end());
}
// Identify the network group with the most connections and youngest member.
// (vEvictionCandidates is already sorted by reverse connect time)
uint64_t naMostConnections;
unsigned int nMostConnections = 0;
int64_t nMostConnectionsTime = 0;
std::map<uint64_t, std::vector<NodeEvictionCandidate>> mapNetGroupNodes;
for (const NodeEvictionCandidate &node : vEvictionCandidates) {
std::vector<NodeEvictionCandidate> &group =
mapNetGroupNodes[node.nKeyedNetGroup];
group.push_back(node);
const int64_t grouptime = group[0].nTimeConnected;
size_t group_size = group.size();
if (group_size > nMostConnections ||
(group_size == nMostConnections &&
grouptime > nMostConnectionsTime)) {
nMostConnections = group_size;
nMostConnectionsTime = grouptime;
naMostConnections = node.nKeyedNetGroup;
}
}
// Reduce to the network group with the most connections
vEvictionCandidates = std::move(mapNetGroupNodes[naMostConnections]);
// Disconnect from the network group with the most connections
return vEvictionCandidates.front().id;
}
/** Try to find a connection to evict when the node is full.
* Extreme care must be taken to avoid opening the node to attacker
* triggered network partitioning.
* The strategy used here is to protect a small number of peers
* for each of several distinct characteristics which are difficult
* to forge. In order to partition a node the attacker must be
* simultaneously better at all of them than honest peers.
*/
bool CConnman::AttemptToEvictConnection() {
std::vector<NodeEvictionCandidate> vEvictionCandidates;
{
LOCK(cs_vNodes);
for (const CNode *node : vNodes) {
if (node->HasPermission(PF_NOBAN)) {
continue;
}
if (!node->IsInboundConn()) {
continue;
}
if (node->fDisconnect) {
continue;
}
bool peer_relay_txes = false;
bool peer_filter_not_null = false;
if (node->m_tx_relay != nullptr) {
LOCK(node->m_tx_relay->cs_filter);
peer_relay_txes = node->m_tx_relay->fRelayTxes;
peer_filter_not_null = node->m_tx_relay->pfilter != nullptr;
}
NodeEvictionCandidate candidate = {
node->GetId(),
node->nTimeConnected,
node->m_min_ping_time,
node->nLastBlockTime,
node->nLastProofTime,
node->nLastTXTime,
HasAllDesirableServiceFlags(node->nServices),
peer_relay_txes,
peer_filter_not_null,
node->nKeyedNetGroup,
node->m_prefer_evict,
node->addr.IsLocal(),
node->ConnectedThroughNetwork(),
node->m_avalanche_state
? node->m_avalanche_state->getAvailabilityScore()
: -std::numeric_limits<double>::infinity()};
vEvictionCandidates.push_back(candidate);
}
}
const std::optional<NodeId> node_id_to_evict =
SelectNodeToEvict(std::move(vEvictionCandidates));
if (!node_id_to_evict) {
return false;
}
LOCK(cs_vNodes);
for (CNode *pnode : vNodes) {
if (pnode->GetId() == *node_id_to_evict) {
LogPrint(
BCLog::NET,
"selected %s connection for eviction peer=%d; disconnecting\n",
pnode->ConnectionTypeAsString(), pnode->GetId());
pnode->fDisconnect = true;
return true;
}
}
return false;
}
void CConnman::AcceptConnection(const ListenSocket &hListenSocket) {
struct sockaddr_storage sockaddr;
socklen_t len = sizeof(sockaddr);
SOCKET hSocket =
accept(hListenSocket.socket, (struct sockaddr *)&sockaddr, &len);
CAddress addr;
if (hSocket == INVALID_SOCKET) {
const int nErr = WSAGetLastError();
if (nErr != WSAEWOULDBLOCK) {
LogPrintf("socket error accept failed: %s\n",
NetworkErrorString(nErr));
}
return;
}
if (!addr.SetSockAddr((const struct sockaddr *)&sockaddr)) {
LogPrintf("Warning: Unknown socket family\n");
}
const CAddress addr_bind = GetBindAddress(hSocket);
NetPermissionFlags permissionFlags = NetPermissionFlags::PF_NONE;
hListenSocket.AddSocketPermissionFlags(permissionFlags);
CreateNodeFromAcceptedSocket(hSocket, permissionFlags, addr_bind, addr);
}
void CConnman::CreateNodeFromAcceptedSocket(SOCKET hSocket,
NetPermissionFlags permissionFlags,
const CAddress &addr_bind,
const CAddress &addr) {
int nInbound = 0;
int nMaxInbound = nMaxConnections - m_max_outbound;
AddWhitelistPermissionFlags(permissionFlags, addr);
bool legacyWhitelisted = false;
if (NetPermissions::HasFlag(permissionFlags,
NetPermissionFlags::PF_ISIMPLICIT)) {
NetPermissions::ClearFlag(permissionFlags, PF_ISIMPLICIT);
if (gArgs.GetBoolArg("-whitelistforcerelay",
DEFAULT_WHITELISTFORCERELAY)) {
NetPermissions::AddFlag(permissionFlags, PF_FORCERELAY);
}
if (gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)) {
NetPermissions::AddFlag(permissionFlags, PF_RELAY);
}
NetPermissions::AddFlag(permissionFlags, PF_MEMPOOL);
NetPermissions::AddFlag(permissionFlags, PF_NOBAN);
legacyWhitelisted = true;
}
{
LOCK(cs_vNodes);
for (const CNode *pnode : vNodes) {
if (pnode->IsInboundConn()) {
nInbound++;
}
}
}
if (!fNetworkActive) {
LogPrint(BCLog::NET,
"connection from %s dropped: not accepting new connections\n",
addr.ToString());
CloseSocket(hSocket);
return;
}
if (!IsSelectableSocket(hSocket)) {
LogPrintf("connection from %s dropped: non-selectable socket\n",
addr.ToString());
CloseSocket(hSocket);
return;
}
// According to the internet TCP_NODELAY is not carried into accepted
// sockets on all platforms. Set it again here just to be sure.
SetSocketNoDelay(hSocket);
// Don't accept connections from banned peers.
bool banned = m_banman && m_banman->IsBanned(addr);
if (!NetPermissions::HasFlag(permissionFlags,
NetPermissionFlags::PF_NOBAN) &&
banned) {
LogPrint(BCLog::NET, "connection from %s dropped (banned)\n",
addr.ToString());
CloseSocket(hSocket);
return;
}
// Only accept connections from discouraged peers if our inbound slots
// aren't (almost) full.
bool discouraged = m_banman && m_banman->IsDiscouraged(addr);
if (!NetPermissions::HasFlag(permissionFlags,
NetPermissionFlags::PF_NOBAN) &&
nInbound + 1 >= nMaxInbound && discouraged) {
LogPrint(BCLog::NET, "connection from %s dropped (discouraged)\n",
addr.ToString());
CloseSocket(hSocket);
return;
}
if (nInbound >= nMaxInbound) {
if (!AttemptToEvictConnection()) {
// No connection to evict, disconnect the new connection
LogPrint(BCLog::NET, "failed to find an eviction candidate - "
"connection dropped (full)\n");
CloseSocket(hSocket);
return;
}
}
NodeId id = GetNewNodeId();
uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE)
.Write(id)
.Finalize();
uint64_t extra_entropy =
GetDeterministicRandomizer(RANDOMIZER_ID_EXTRAENTROPY)
.Write(id)
.Finalize();
ServiceFlags nodeServices = nLocalServices;
if (NetPermissions::HasFlag(permissionFlags, PF_BLOOMFILTER)) {
nodeServices = static_cast<ServiceFlags>(nodeServices | NODE_BLOOM);
}
const bool inbound_onion =
std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) !=
m_onion_binds.end();
CNode *pnode = new CNode(
id, nodeServices, hSocket, addr, CalculateKeyedNetGroup(addr), nonce,
extra_entropy, addr_bind, "", ConnectionType::INBOUND, inbound_onion);
pnode->AddRef();
pnode->m_permissionFlags = permissionFlags;
// If this flag is present, the user probably expect that RPC and QT report
// it as whitelisted (backward compatibility)
pnode->m_legacyWhitelisted = legacyWhitelisted;
pnode->m_prefer_evict = discouraged;
m_msgproc->InitializeNode(*config, pnode);
LogPrint(BCLog::NET, "connection from %s accepted\n", addr.ToString());
{
LOCK(cs_vNodes);
vNodes.push_back(pnode);
}
// We received a new connection, harvest entropy from the time (and our peer
// count)
RandAddEvent(uint32_t(id));
}
bool CConnman::AddConnection(const std::string &address,
ConnectionType conn_type) {
std::optional<int> max_connections;
switch (conn_type) {
case ConnectionType::INBOUND:
case ConnectionType::MANUAL:
return false;
case ConnectionType::OUTBOUND_FULL_RELAY:
max_connections = m_max_outbound_full_relay;
break;
case ConnectionType::BLOCK_RELAY:
max_connections = m_max_outbound_block_relay;
break;
// no limit for ADDR_FETCH because -seednode has no limit either
case ConnectionType::ADDR_FETCH:
break;
// no limit for FEELER connections since they're short-lived
case ConnectionType::FEELER:
break;
case ConnectionType::AVALANCHE_OUTBOUND:
max_connections = m_max_avalanche_outbound;
break;
} // no default case, so the compiler can warn about missing cases
// Count existing connections
int existing_connections = WITH_LOCK(
cs_vNodes, return std::count_if(
vNodes.begin(), vNodes.end(), [conn_type](CNode *node) {
return node->m_conn_type == conn_type;
}););
// Max connections of specified type already exist
if (max_connections != std::nullopt &&
existing_connections >= max_connections) {
return false;
}
// Max total outbound connections already exist
CSemaphoreGrant grant(*semOutbound, true);
if (!grant) {
return false;
}
OpenNetworkConnection(CAddress(), false, &grant, address.c_str(),
conn_type);
return true;
}
void CConnman::DisconnectNodes() {
{
LOCK(cs_vNodes);
if (!fNetworkActive) {
// Disconnect any connected nodes
for (CNode *pnode : vNodes) {
if (!pnode->fDisconnect) {
LogPrint(BCLog::NET,
"Network not active, dropping peer=%d\n",
pnode->GetId());
pnode->fDisconnect = true;
}
}
}
// Disconnect unused nodes
std::vector<CNode *> vNodesCopy = vNodes;
for (CNode *pnode : vNodesCopy) {
if (pnode->fDisconnect) {
// remove from vNodes
vNodes.erase(remove(vNodes.begin(), vNodes.end(), pnode),
vNodes.end());
// release outbound grant (if any)
pnode->grantOutbound.Release();
// close socket and cleanup
pnode->CloseSocketDisconnect();
// hold in disconnected pool until all refs are released
pnode->Release();
vNodesDisconnected.push_back(pnode);
}
}
}
{
// Delete disconnected nodes
std::list<CNode *> vNodesDisconnectedCopy = vNodesDisconnected;
for (CNode *pnode : vNodesDisconnectedCopy) {
// wait until threads are done using it
if (pnode->GetRefCount() <= 0) {
bool fDelete = false;
{
TRY_LOCK(pnode->cs_vSend, lockSend);
if (lockSend) {
fDelete = true;
}
}
if (fDelete) {
vNodesDisconnected.remove(pnode);
DeleteNode(pnode);
}
}
}
}
}
void CConnman::NotifyNumConnectionsChanged() {
size_t vNodesSize;
{
LOCK(cs_vNodes);
vNodesSize = vNodes.size();
}
if (vNodesSize != nPrevNodeCount) {
nPrevNodeCount = vNodesSize;
if (clientInterface) {
clientInterface->NotifyNumConnectionsChanged(vNodesSize);
}
}
}
bool CConnman::ShouldRunInactivityChecks(const CNode &node,
std::chrono::seconds now) const {
return std::chrono::seconds{node.nTimeConnected} + m_peer_connect_timeout <
now;
}
bool CConnman::InactivityCheck(const CNode &node) const {
// Tests that see disconnects after using mocktime can start nodes with a
// large timeout. For example, -peertimeout=999999999.
const auto now{GetTime<std::chrono::seconds>()};
const auto last_send{node.m_last_send.load()};
const auto last_recv{node.m_last_recv.load()};
if (!ShouldRunInactivityChecks(node, now)) {
return false;
}
if (last_recv.count() == 0 || last_send.count() == 0) {
LogPrint(BCLog::NET,
"socket no message in first %i seconds, %d %d peer=%d\n",
count_seconds(m_peer_connect_timeout), last_recv.count() != 0,
last_send.count() != 0, node.GetId());
return true;
}
if (now > last_send + TIMEOUT_INTERVAL) {
LogPrint(BCLog::NET, "socket sending timeout: %is peer=%d\n",
count_seconds(now - last_send), node.GetId());
return true;
}
if (now > last_recv + TIMEOUT_INTERVAL) {
LogPrint(BCLog::NET, "socket receive timeout: %is peer=%d\n",
count_seconds(now - last_recv), node.GetId());
return true;
}
if (!node.fSuccessfullyConnected) {
LogPrint(BCLog::NET, "version handshake timeout peer=%d\n",
node.GetId());
return true;
}
return false;
}
bool CConnman::GenerateSelectSet(std::set<SOCKET> &recv_set,
std::set<SOCKET> &send_set,
std::set<SOCKET> &error_set) {
for (const ListenSocket &hListenSocket : vhListenSocket) {
recv_set.insert(hListenSocket.socket);
}
{
LOCK(cs_vNodes);
for (CNode *pnode : vNodes) {
// Implement the following logic:
// * If there is data to send, select() for sending data. As this
// only happens when optimistic write failed, we choose to first
// drain the write buffer in this case before receiving more. This
// avoids needlessly queueing received data, if the remote peer is
// not themselves receiving data. This means properly utilizing
// TCP flow control signalling.
// * Otherwise, if there is space left in the receive buffer,
// select() for receiving data.
// * Hand off all complete messages to the processor, to be handled
// without blocking here.
bool select_recv = !pnode->fPauseRecv;
bool select_send;
{
LOCK(pnode->cs_vSend);
select_send = !pnode->vSendMsg.empty();
}
LOCK(pnode->cs_hSocket);
if (pnode->hSocket == INVALID_SOCKET) {
continue;
}
error_set.insert(pnode->hSocket);
if (select_send) {
send_set.insert(pnode->hSocket);
continue;
}
if (select_recv) {
recv_set.insert(pnode->hSocket);
}
}
}
return !recv_set.empty() || !send_set.empty() || !error_set.empty();
}
#ifdef USE_POLL
void CConnman::SocketEvents(std::set<SOCKET> &recv_set,
std::set<SOCKET> &send_set,
std::set<SOCKET> &error_set) {
std::set<SOCKET> recv_select_set, send_select_set, error_select_set;
if (!GenerateSelectSet(recv_select_set, send_select_set,
error_select_set)) {
interruptNet.sleep_for(
std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS));
return;
}
std::unordered_map<SOCKET, struct pollfd> pollfds;
for (SOCKET socket_id : recv_select_set) {
pollfds[socket_id].fd = socket_id;
pollfds[socket_id].events |= POLLIN;
}
for (SOCKET socket_id : send_select_set) {
pollfds[socket_id].fd = socket_id;
pollfds[socket_id].events |= POLLOUT;
}
for (SOCKET socket_id : error_select_set) {
pollfds[socket_id].fd = socket_id;
// These flags are ignored, but we set them for clarity
pollfds[socket_id].events |= POLLERR | POLLHUP;
}
std::vector<struct pollfd> vpollfds;
vpollfds.reserve(pollfds.size());
for (auto it : pollfds) {
vpollfds.push_back(std::move(it.second));
}
if (poll(vpollfds.data(), vpollfds.size(), SELECT_TIMEOUT_MILLISECONDS) <
0) {
return;
}
if (interruptNet) {
return;
}
for (struct pollfd pollfd_entry : vpollfds) {
if (pollfd_entry.revents & POLLIN) {
recv_set.insert(pollfd_entry.fd);
}
if (pollfd_entry.revents & POLLOUT) {
send_set.insert(pollfd_entry.fd);
}
if (pollfd_entry.revents & (POLLERR | POLLHUP)) {
error_set.insert(pollfd_entry.fd);
}
}
}
#else
void CConnman::SocketEvents(std::set<SOCKET> &recv_set,
std::set<SOCKET> &send_set,
std::set<SOCKET> &error_set) {
std::set<SOCKET> recv_select_set, send_select_set, error_select_set;
if (!GenerateSelectSet(recv_select_set, send_select_set,
error_select_set)) {
interruptNet.sleep_for(
std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS));
return;
}
//
// Find which sockets have data to receive
//
struct timeval timeout;
timeout.tv_sec = 0;
// frequency to poll pnode->vSend
timeout.tv_usec = SELECT_TIMEOUT_MILLISECONDS * 1000;
fd_set fdsetRecv;
fd_set fdsetSend;
fd_set fdsetError;
FD_ZERO(&fdsetRecv);
FD_ZERO(&fdsetSend);
FD_ZERO(&fdsetError);
SOCKET hSocketMax = 0;
for (SOCKET hSocket : recv_select_set) {
FD_SET(hSocket, &fdsetRecv);
hSocketMax = std::max(hSocketMax, hSocket);
}
for (SOCKET hSocket : send_select_set) {
FD_SET(hSocket, &fdsetSend);
hSocketMax = std::max(hSocketMax, hSocket);
}
for (SOCKET hSocket : error_select_set) {
FD_SET(hSocket, &fdsetError);
hSocketMax = std::max(hSocketMax, hSocket);
}
int nSelect =
select(hSocketMax + 1, &fdsetRecv, &fdsetSend, &fdsetError, &timeout);
if (interruptNet) {
return;
}
if (nSelect == SOCKET_ERROR) {
int nErr = WSAGetLastError();
LogPrintf("socket select error %s\n", NetworkErrorString(nErr));
for (unsigned int i = 0; i <= hSocketMax; i++) {
FD_SET(i, &fdsetRecv);
}
FD_ZERO(&fdsetSend);
FD_ZERO(&fdsetError);
if (!interruptNet.sleep_for(
std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS))) {
return;
}
}
for (SOCKET hSocket : recv_select_set) {
if (FD_ISSET(hSocket, &fdsetRecv)) {
recv_set.insert(hSocket);
}
}
for (SOCKET hSocket : send_select_set) {
if (FD_ISSET(hSocket, &fdsetSend)) {
send_set.insert(hSocket);
}
}
for (SOCKET hSocket : error_select_set) {
if (FD_ISSET(hSocket, &fdsetError)) {
error_set.insert(hSocket);
}
}
}
#endif
void CConnman::SocketHandler() {
std::set<SOCKET> recv_set, send_set, error_set;
SocketEvents(recv_set, send_set, error_set);
if (interruptNet) {
return;
}
//
// Accept new connections
//
for (const ListenSocket &hListenSocket : vhListenSocket) {
if (hListenSocket.socket != INVALID_SOCKET &&
recv_set.count(hListenSocket.socket) > 0) {
AcceptConnection(hListenSocket);
}
}
//
// Service each socket
//
std::vector<CNode *> vNodesCopy;
{
LOCK(cs_vNodes);
vNodesCopy = vNodes;
for (CNode *pnode : vNodesCopy) {
pnode->AddRef();
}
}
for (CNode *pnode : vNodesCopy) {
if (interruptNet) {
return;
}
//
// Receive
//
bool recvSet = false;
bool sendSet = false;
bool errorSet = false;
{
LOCK(pnode->cs_hSocket);
if (pnode->hSocket == INVALID_SOCKET) {
continue;
}
recvSet = recv_set.count(pnode->hSocket) > 0;
sendSet = send_set.count(pnode->hSocket) > 0;
errorSet = error_set.count(pnode->hSocket) > 0;
}
if (recvSet || errorSet) {
// typical socket buffer is 8K-64K
char pchBuf[0x10000];
int32_t nBytes = 0;
{
LOCK(pnode->cs_hSocket);
if (pnode->hSocket == INVALID_SOCKET) {
continue;
}
nBytes =
recv(pnode->hSocket, pchBuf, sizeof(pchBuf), MSG_DONTWAIT);
}
if (nBytes > 0) {
bool notify = false;
if (!pnode->ReceiveMsgBytes(
*config, Span<const char>(pchBuf, nBytes), notify)) {
pnode->CloseSocketDisconnect();
}
RecordBytesRecv(nBytes);
if (notify) {
size_t nSizeAdded = 0;
auto it(pnode->vRecvMsg.begin());
for (; it != pnode->vRecvMsg.end(); ++it) {
// vRecvMsg contains only completed CNetMessage
// the single possible partially deserialized message
// are held by TransportDeserializer
nSizeAdded += it->m_raw_message_size;
}
{
LOCK(pnode->cs_vProcessMsg);
pnode->vProcessMsg.splice(pnode->vProcessMsg.end(),
pnode->vRecvMsg,
pnode->vRecvMsg.begin(), it);
pnode->nProcessQueueSize += nSizeAdded;
pnode->fPauseRecv =
pnode->nProcessQueueSize > nReceiveFloodSize;
}
WakeMessageHandler();
}
} else if (nBytes == 0) {
// socket closed gracefully
if (!pnode->fDisconnect) {
LogPrint(BCLog::NET, "socket closed for peer=%d\n",
pnode->GetId());
}
pnode->CloseSocketDisconnect();
} else if (nBytes < 0) {
// error
int nErr = WSAGetLastError();
if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE &&
nErr != WSAEINTR && nErr != WSAEINPROGRESS) {
if (!pnode->fDisconnect) {
LogPrint(BCLog::NET,
"socket recv error for peer=%d: %s\n",
pnode->GetId(), NetworkErrorString(nErr));
}
pnode->CloseSocketDisconnect();
}
}
}
//
// Send
//
if (sendSet) {
LOCK(pnode->cs_vSend);
size_t nBytes = SocketSendData(*pnode);
if (nBytes) {
RecordBytesSent(nBytes);
}
}
if (InactivityCheck(*pnode)) {
pnode->fDisconnect = true;
}
}
{
LOCK(cs_vNodes);
for (CNode *pnode : vNodesCopy) {
pnode->Release();
}
}
}
void CConnman::ThreadSocketHandler() {
while (!interruptNet) {
DisconnectNodes();
NotifyNumConnectionsChanged();
SocketHandler();
}
}
void CConnman::WakeMessageHandler() {
{
LOCK(mutexMsgProc);
fMsgProcWake = true;
}
condMsgProc.notify_one();
}
#ifdef USE_UPNP
static CThreadInterrupt g_upnp_interrupt;
static std::thread g_upnp_thread;
static void ThreadMapPort() {
std::string port = strprintf("%u", GetListenPort());
const char *multicastif = nullptr;
const char *minissdpdpath = nullptr;
struct UPNPDev *devlist = nullptr;
char lanaddr[64];
int error = 0;
#if MINIUPNPC_API_VERSION < 14
devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, &error);
#else
devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, 2, &error);
#endif
struct UPNPUrls urls;
struct IGDdatas data;
int r;
r = UPNP_GetValidIGD(devlist, &urls, &data, lanaddr, sizeof(lanaddr));
if (r == 1) {
if (fDiscover) {
char externalIPAddress[40];
r = UPNP_GetExternalIPAddress(
urls.controlURL, data.first.servicetype, externalIPAddress);
if (r != UPNPCOMMAND_SUCCESS) {
LogPrintf("UPnP: GetExternalIPAddress() returned %d\n", r);
} else {
if (externalIPAddress[0]) {
CNetAddr resolved;
if (LookupHost(externalIPAddress, resolved, false)) {
LogPrintf("UPnP: ExternalIPAddress = %s\n",
resolved.ToString());
AddLocal(resolved, LOCAL_UPNP);
}
} else {
LogPrintf("UPnP: GetExternalIPAddress failed.\n");
}
}
}
std::string strDesc = PACKAGE_NAME " " + FormatFullVersion();
do {
r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype,
port.c_str(), port.c_str(), lanaddr,
strDesc.c_str(), "TCP", 0, "0");
if (r != UPNPCOMMAND_SUCCESS) {
LogPrintf(
"AddPortMapping(%s, %s, %s) failed with code %d (%s)\n",
port, port, lanaddr, r, strupnperror(r));
} else {
LogPrintf("UPnP Port Mapping successful.\n");
}
} while (g_upnp_interrupt.sleep_for(std::chrono::minutes(20)));
r = UPNP_DeletePortMapping(urls.controlURL, data.first.servicetype,
port.c_str(), "TCP", 0);
LogPrintf("UPNP_DeletePortMapping() returned: %d\n", r);
freeUPNPDevlist(devlist);
devlist = nullptr;
FreeUPNPUrls(&urls);
} else {
LogPrintf("No valid UPnP IGDs found\n");
freeUPNPDevlist(devlist);
devlist = nullptr;
if (r != 0) {
FreeUPNPUrls(&urls);
}
}
}
void StartMapPort() {
if (!g_upnp_thread.joinable()) {
assert(!g_upnp_interrupt);
g_upnp_thread = std::thread(
(std::bind(&TraceThread<void (*)()>, "upnp", &ThreadMapPort)));
}
}
void InterruptMapPort() {
if (g_upnp_thread.joinable()) {
g_upnp_interrupt();
}
}
void StopMapPort() {
if (g_upnp_thread.joinable()) {
g_upnp_thread.join();
g_upnp_interrupt.reset();
}
}
#else
void StartMapPort() {
// Intentionally left blank.
}
void InterruptMapPort() {
// Intentionally left blank.
}
void StopMapPort() {
// Intentionally left blank.
}
#endif
void CConnman::ThreadDNSAddressSeed() {
FastRandomContext rng;
std::vector<std::string> seeds =
GetRandomizedDNSSeeds(config->GetChainParams());
// Number of seeds left before testing if we have enough connections
int seeds_right_now = 0;
int found = 0;
if (gArgs.GetBoolArg("-forcednsseed", DEFAULT_FORCEDNSSEED)) {
// When -forcednsseed is provided, query all.
seeds_right_now = seeds.size();
} else if (addrman.size() == 0) {
// If we have no known peers, query all.
// This will occur on the first run, or if peers.dat has been
// deleted.
seeds_right_now = seeds.size();
}
// goal: only query DNS seed if address need is acute
// * If we have a reasonable number of peers in addrman, spend
// some time trying them first. This improves user privacy by
// creating fewer identifying DNS requests, reduces trust by
// giving seeds less influence on the network topology, and
// reduces traffic to the seeds.
// * When querying DNS seeds query a few at once, this ensures
// that we don't give DNS seeds the ability to eclipse nodes
// that query them.
// * If we continue having problems, eventually query all the
// DNS seeds, and if that fails too, also try the fixed seeds.
// (done in ThreadOpenConnections)
const std::chrono::seconds seeds_wait_time =
(addrman.size() >= DNSSEEDS_DELAY_PEER_THRESHOLD
? DNSSEEDS_DELAY_MANY_PEERS
: DNSSEEDS_DELAY_FEW_PEERS);
for (const std::string &seed : seeds) {
if (seeds_right_now == 0) {
seeds_right_now += DNSSEEDS_TO_QUERY_AT_ONCE;
if (addrman.size() > 0) {
LogPrintf("Waiting %d seconds before querying DNS seeds.\n",
seeds_wait_time.count());
std::chrono::seconds to_wait = seeds_wait_time;
while (to_wait.count() > 0) {
// if sleeping for the MANY_PEERS interval, wake up
// early to see if we have enough peers and can stop
// this thread entirely freeing up its resources
std::chrono::seconds w =
std::min(DNSSEEDS_DELAY_FEW_PEERS, to_wait);
if (!interruptNet.sleep_for(w)) {
return;
}
to_wait -= w;
int nRelevant = 0;
{
LOCK(cs_vNodes);
for (const CNode *pnode : vNodes) {
if (pnode->fSuccessfullyConnected &&
pnode->IsOutboundOrBlockRelayConn()) {
++nRelevant;
}
}
}
if (nRelevant >= 2) {
if (found > 0) {
LogPrintf("%d addresses found from DNS seeds\n",
found);
LogPrintf(
"P2P peers available. Finished DNS seeding.\n");
} else {
LogPrintf(
"P2P peers available. Skipped DNS seeding.\n");
}
return;
}
}
}
}
if (interruptNet) {
return;
}
// hold off on querying seeds if P2P network deactivated
if (!fNetworkActive) {
LogPrintf("Waiting for network to be reactivated before querying "
"DNS seeds.\n");
do {
if (!interruptNet.sleep_for(std::chrono::seconds{1})) {
return;
}
} while (!fNetworkActive);
}
LogPrintf("Loading addresses from DNS seed %s\n", seed);
if (HaveNameProxy()) {
AddAddrFetch(seed);
} else {
std::vector<CNetAddr> vIPs;
std::vector<CAddress> vAdd;
ServiceFlags requiredServiceBits =
GetDesirableServiceFlags(NODE_NONE);
std::string host = strprintf("x%x.%s", requiredServiceBits, seed);
CNetAddr resolveSource;
if (!resolveSource.SetInternal(host)) {
continue;
}
// Limits number of IPs learned from a DNS seed
unsigned int nMaxIPs = 256;
if (LookupHost(host, vIPs, nMaxIPs, true)) {
for (const CNetAddr &ip : vIPs) {
int nOneDay = 24 * 3600;
CAddress addr = CAddress(
CService(ip, config->GetChainParams().GetDefaultPort()),
requiredServiceBits);
// Use a random age between 3 and 7 days old.
addr.nTime =
GetTime() - 3 * nOneDay - rng.randrange(4 * nOneDay);
vAdd.push_back(addr);
found++;
}
addrman.Add(vAdd, resolveSource);
} else {
// We now avoid directly using results from DNS Seeds which do
// not support service bit filtering, instead using them as a
// addrfetch to get nodes with our desired service bits.
AddAddrFetch(seed);
}
}
--seeds_right_now;
}
LogPrintf("%d addresses found from DNS seeds\n", found);
}
void CConnman::DumpAddresses() {
int64_t nStart = GetTimeMillis();
CAddrDB adb(config->GetChainParams());
adb.Write(addrman);
LogPrint(BCLog::NET, "Flushed %d addresses to peers.dat %dms\n",
addrman.size(), GetTimeMillis() - nStart);
}
void CConnman::ProcessAddrFetch() {
std::string strDest;
{
LOCK(m_addr_fetches_mutex);
if (m_addr_fetches.empty()) {
return;
}
strDest = m_addr_fetches.front();
m_addr_fetches.pop_front();
}
CAddress addr;
CSemaphoreGrant grant(*semOutbound, true);
if (grant) {
OpenNetworkConnection(addr, false, &grant, strDest.c_str(),
ConnectionType::ADDR_FETCH);
}
}
bool CConnman::GetTryNewOutboundPeer() {
return m_try_another_outbound_peer;
}
void CConnman::SetTryNewOutboundPeer(bool flag) {
m_try_another_outbound_peer = flag;
LogPrint(BCLog::NET, "net: setting try another outbound peer=%s\n",
flag ? "true" : "false");
}
// Return the number of peers we have over our outbound connection limit.
// Exclude peers that are marked for disconnect, or are going to be disconnected
// soon (eg ADDR_FETCH and FEELER).
// Also exclude peers that haven't finished initial connection handshake yet (so
// that we don't decide we're over our desired connection limit, and then evict
// some peer that has finished the handshake).
int CConnman::GetExtraFullOutboundCount() {
int full_outbound_peers = 0;
{
LOCK(cs_vNodes);
for (const CNode *pnode : vNodes) {
if (pnode->fSuccessfullyConnected && !pnode->fDisconnect &&
pnode->IsFullOutboundConn()) {
++full_outbound_peers;
}
}
}
return std::max(full_outbound_peers - m_max_outbound_full_relay, 0);
}
int CConnman::GetExtraBlockRelayCount() {
int block_relay_peers = 0;
{
LOCK(cs_vNodes);
for (const CNode *pnode : vNodes) {
if (pnode->fSuccessfullyConnected && !pnode->fDisconnect &&
pnode->IsBlockOnlyConn()) {
++block_relay_peers;
}
}
}
return std::max(block_relay_peers - m_max_outbound_block_relay, 0);
}
void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) {
// Connect to specific addresses
if (!connect.empty()) {
for (int64_t nLoop = 0;; nLoop++) {
ProcessAddrFetch();
for (const std::string &strAddr : connect) {
CAddress addr(CService(), NODE_NONE);
OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(),
ConnectionType::MANUAL);
for (int i = 0; i < 10 && i < nLoop; i++) {
if (!interruptNet.sleep_for(
std::chrono::milliseconds(500))) {
return;
}
}
}
if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) {
return;
}
}
}
// Initiate network connections
auto start = GetTime<std::chrono::microseconds>();
// Minimum time before next feeler connection (in microseconds).
auto next_feeler = PoissonNextSend(start, FEELER_INTERVAL);
auto next_extra_block_relay =
PoissonNextSend(start, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
const bool dnsseed = gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED);
bool add_fixed_seeds = gArgs.GetBoolArg("-fixedseeds", DEFAULT_FIXEDSEEDS);
if (!add_fixed_seeds) {
LogPrintf("Fixed seeds are disabled\n");
}
while (!interruptNet) {
ProcessAddrFetch();
if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) {
return;
}
CSemaphoreGrant grant(*semOutbound);
if (interruptNet) {
return;
}
if (add_fixed_seeds && addrman.size() == 0) {
// When the node starts with an empty peers.dat, there are a few
// other sources of peers before we fallback on to fixed seeds:
// -dnsseed, -seednode, -addnode If none of those are available, we
// fallback on to fixed seeds immediately, else we allow 60 seconds
// for any of those sources to populate addrman.
bool add_fixed_seeds_now = false;
// It is cheapest to check if enough time has passed first.
if (GetTime<std::chrono::seconds>() >
start + std::chrono::minutes{1}) {
add_fixed_seeds_now = true;
LogPrintf("Adding fixed seeds as 60 seconds have passed and "
"addrman is empty\n");
}
// Checking !dnsseed is cheaper before locking 2 mutexes.
if (!add_fixed_seeds_now && !dnsseed) {
LOCK2(m_addr_fetches_mutex, cs_vAddedNodes);
if (m_addr_fetches.empty() && vAddedNodes.empty()) {
add_fixed_seeds_now = true;
LogPrintf(
"Adding fixed seeds as -dnsseed=0, -addnode is not "
"provided and all -seednode(s) attempted\n");
}
}
if (add_fixed_seeds_now) {
CNetAddr local;
local.SetInternal("fixedseeds");
addrman.Add(convertSeed6(config->GetChainParams().FixedSeeds()),
local);
add_fixed_seeds = false;
}
}
//
// Choose an address to connect to based on most recently seen
//
CAddress addrConnect;
// Only connect out to one peer per network group (/16 for IPv4).
int nOutboundFullRelay = 0;
int nOutboundBlockRelay = 0;
int nOutboundAvalanche = 0;
std::set<std::vector<uint8_t>> setConnected;
{
LOCK(cs_vNodes);
for (const CNode *pnode : vNodes) {
// Netgroups for inbound and manual peers are not excluded
// because our goal here is to not use multiple of our
// limited outbound slots on a single netgroup but inbound
// and manual peers do not use our outbound slots. Inbound
// peers also have the added issue that they could be attacker
// controlled and could be used to prevent us from connecting
// to particular hosts if we used them here.
switch (pnode->m_conn_type) {
case ConnectionType::INBOUND:
case ConnectionType::MANUAL:
break;
case ConnectionType::AVALANCHE_OUTBOUND:
nOutboundAvalanche++;
case ConnectionType::OUTBOUND_FULL_RELAY:
nOutboundFullRelay++;
case ConnectionType::BLOCK_RELAY:
nOutboundBlockRelay++;
case ConnectionType::ADDR_FETCH:
case ConnectionType::FEELER:
setConnected.insert(
pnode->addr.GetGroup(addrman.m_asmap));
} // no default case, so the compiler can warn about missing
// cases
}
}
ConnectionType conn_type = ConnectionType::OUTBOUND_FULL_RELAY;
auto now = GetTime<std::chrono::microseconds>();
bool anchor = false;
bool fFeeler = false;
// Determine what type of connection to open. Opening
// BLOCK_RELAY connections to addresses from anchors.dat gets the
// highest priority. Then we open AVALANCHE_OUTBOUND connection until we
// hit our avalanche outbound peer limit, which is 0 if avalanche is not
// enabled. We fallback to OUTBOUND_FULL_RELAY if the peer is not
// avalanche capable until we meet our full-relay capacity. Then we open
// BLOCK_RELAY connection until we hit our block-relay-only peer limit.
// GetTryNewOutboundPeer() gets set when a stale tip is detected, so we
// try opening an additional OUTBOUND_FULL_RELAY connection. If none of
// these conditions are met, check to see if it's time to try an extra
// block-relay-only peer (to confirm our tip is current, see below) or
// the next_feeler timer to decide if we should open a FEELER.
if (!m_anchors.empty() &&
(nOutboundBlockRelay < m_max_outbound_block_relay)) {
conn_type = ConnectionType::BLOCK_RELAY;
anchor = true;
} else if (g_avalanche &&
(nOutboundAvalanche < m_max_avalanche_outbound)) {
conn_type = ConnectionType::AVALANCHE_OUTBOUND;
} else if (nOutboundFullRelay < m_max_outbound_full_relay) {
// OUTBOUND_FULL_RELAY
} else if (nOutboundBlockRelay < m_max_outbound_block_relay) {
conn_type = ConnectionType::BLOCK_RELAY;
} else if (GetTryNewOutboundPeer()) {
// OUTBOUND_FULL_RELAY
} else if (now > next_extra_block_relay &&
m_start_extra_block_relay_peers) {
// Periodically connect to a peer (using regular outbound selection
// methodology from addrman) and stay connected long enough to sync
// headers, but not much else.
//
// Then disconnect the peer, if we haven't learned anything new.
//
// The idea is to make eclipse attacks very difficult to pull off,
// because every few minutes we're finding a new peer to learn
// headers from.
//
// This is similar to the logic for trying extra outbound
// (full-relay) peers, except:
// - we do this all the time on a poisson timer, rather than just
// when our tip is stale
// - we potentially disconnect our next-youngest block-relay-only
// peer, if our newest block-relay-only peer delivers a block more
// recently.
// See the eviction logic in net_processing.cpp.
//
// Because we can promote these connections to block-relay-only
// connections, they do not get their own ConnectionType enum
// (similar to how we deal with extra outbound peers).
next_extra_block_relay =
PoissonNextSend(now, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
conn_type = ConnectionType::BLOCK_RELAY;
} else if (now > next_feeler) {
next_feeler = PoissonNextSend(now, FEELER_INTERVAL);
conn_type = ConnectionType::FEELER;
fFeeler = true;
} else {
// skip to next iteration of while loop
continue;
}
addrman.ResolveCollisions();
int64_t nANow = GetAdjustedTime();
int nTries = 0;
while (!interruptNet) {
if (anchor && !m_anchors.empty()) {
const CAddress addr = m_anchors.back();
m_anchors.pop_back();
if (!addr.IsValid() || IsLocal(addr) || !IsReachable(addr) ||
!HasAllDesirableServiceFlags(addr.nServices) ||
setConnected.count(addr.GetGroup(addrman.m_asmap))) {
continue;
}
addrConnect = addr;
LogPrint(BCLog::NET,
"Trying to make an anchor connection to %s\n",
addrConnect.ToString());
break;
}
// If we didn't find an appropriate destination after trying 100
// addresses fetched from addrman, stop this loop, and let the outer
// loop run again (which sleeps, adds seed nodes, recalculates
// already-connected network ranges, ...) before trying new addrman
// addresses.
nTries++;
if (nTries > 100) {
break;
}
CAddrInfo addr;
if (fFeeler) {
// First, try to get a tried table collision address. This
// returns an empty (invalid) address if there are no collisions
// to try.
addr = addrman.SelectTriedCollision();
if (!addr.IsValid()) {
// No tried table collisions. Select a new table address
// for our feeler.
addr = addrman.Select(true);
} else if (AlreadyConnectedToAddress(addr)) {
// If test-before-evict logic would have us connect to a
// peer that we're already connected to, just mark that
// address as Good(). We won't be able to initiate the
// connection anyway, so this avoids inadvertently evicting
// a currently-connected peer.
addrman.Good(addr);
// Select a new table address for our feeler instead.
addr = addrman.Select(true);
}
} else {
// Not a feeler
addr = addrman.Select();
}
// Require outbound connections, other than feelers, to be to
// distinct network groups
if (!fFeeler &&
setConnected.count(addr.GetGroup(addrman.m_asmap))) {
break;
}
// if we selected an invalid or local address, restart
if (!addr.IsValid() || IsLocal(addr)) {
break;
}
if (!IsReachable(addr)) {
continue;
}
// only consider very recently tried nodes after 30 failed attempts
if (nANow - addr.nLastTry < 600 && nTries < 30) {
continue;
}
// for non-feelers, require all the services we'll want,
// for feelers, only require they be a full node (only because most
// SPV clients don't have a good address DB available)
if (!fFeeler && !HasAllDesirableServiceFlags(addr.nServices)) {
continue;
}
if (fFeeler && !MayHaveUsefulAddressDB(addr.nServices)) {
continue;
}
// Do not allow non-default ports, unless after 50 invalid
// addresses selected already. This is to prevent malicious peers
// from advertising themselves as a service on another host and
// port, causing a DoS attack as nodes around the network attempt
// to connect to it fruitlessly.
if (addr.GetPort() != config->GetChainParams().GetDefaultPort() &&
nTries < 50) {
continue;
}
// For avalanche peers, check they have the avalanche service bit
// set.
if (conn_type == ConnectionType::AVALANCHE_OUTBOUND &&
!(addr.nServices & NODE_AVALANCHE)) {
// If this peer is not suitable as an avalanche one, see if we
// can fallback to a non avalanche full outbound.
if (nOutboundFullRelay >= m_max_outbound_full_relay) {
// Fallback is not possible, try another one
continue;
}
// Fallback is possible, update the connection type accordingly
conn_type = ConnectionType::OUTBOUND_FULL_RELAY;
}
addrConnect = addr;
break;
}
if (addrConnect.IsValid()) {
if (fFeeler) {
// Add small amount of random noise before connection to avoid
// synchronization.
int randsleep = GetRandInt(FEELER_SLEEP_WINDOW * 1000);
if (!interruptNet.sleep_for(
std::chrono::milliseconds(randsleep))) {
return;
}
LogPrint(BCLog::NET, "Making feeler connection to %s\n",
addrConnect.ToString());
}
OpenNetworkConnection(addrConnect,
int(setConnected.size()) >=
std::min(nMaxConnections - 1, 2),
&grant, nullptr, conn_type);
}
}
}
std::vector<CAddress> CConnman::GetCurrentBlockRelayOnlyConns() const {
std::vector<CAddress> ret;
LOCK(cs_vNodes);
for (const CNode *pnode : vNodes) {
if (pnode->IsBlockOnlyConn()) {
ret.push_back(pnode->addr);
}
}
return ret;
}
std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo() {
std::vector<AddedNodeInfo> ret;
std::list<std::string> lAddresses(0);
{
LOCK(cs_vAddedNodes);
ret.reserve(vAddedNodes.size());
std::copy(vAddedNodes.cbegin(), vAddedNodes.cend(),
std::back_inserter(lAddresses));
}
// Build a map of all already connected addresses (by IP:port and by name)
// to inbound/outbound and resolved CService
std::map<CService, bool> mapConnected;
std::map<std::string, std::pair<bool, CService>> mapConnectedByName;
{
LOCK(cs_vNodes);
for (const CNode *pnode : vNodes) {
if (pnode->addr.IsValid()) {
mapConnected[pnode->addr] = pnode->IsInboundConn();
}
std::string addrName = pnode->GetAddrName();
if (!addrName.empty()) {
mapConnectedByName[std::move(addrName)] =
std::make_pair(pnode->IsInboundConn(),
static_cast<const CService &>(pnode->addr));
}
}
}
for (const std::string &strAddNode : lAddresses) {
CService service(LookupNumeric(strAddNode, Params().GetDefaultPort()));
AddedNodeInfo addedNode{strAddNode, CService(), false, false};
if (service.IsValid()) {
// strAddNode is an IP:port
auto it = mapConnected.find(service);
if (it != mapConnected.end()) {
addedNode.resolvedAddress = service;
addedNode.fConnected = true;
addedNode.fInbound = it->second;
}
} else {
// strAddNode is a name
auto it = mapConnectedByName.find(strAddNode);
if (it != mapConnectedByName.end()) {
addedNode.resolvedAddress = it->second.second;
addedNode.fConnected = true;
addedNode.fInbound = it->second.first;
}
}
ret.emplace_back(std::move(addedNode));
}
return ret;
}
void CConnman::ThreadOpenAddedConnections() {
while (true) {
CSemaphoreGrant grant(*semAddnode);
std::vector<AddedNodeInfo> vInfo = GetAddedNodeInfo();
bool tried = false;
for (const AddedNodeInfo &info : vInfo) {
if (!info.fConnected) {
if (!grant.TryAcquire()) {
// If we've used up our semaphore and need a new one, let's
// not wait here since while we are waiting the
// addednodeinfo state might change.
break;
}
tried = true;
CAddress addr(CService(), NODE_NONE);
OpenNetworkConnection(addr, false, &grant,
info.strAddedNode.c_str(),
ConnectionType::MANUAL);
if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) {
return;
}
}
}
// Retry every 60 seconds if a connection was attempted, otherwise two
// seconds.
if (!interruptNet.sleep_for(std::chrono::seconds(tried ? 60 : 2))) {
return;
}
}
}
// If successful, this moves the passed grant to the constructed node.
void CConnman::OpenNetworkConnection(const CAddress &addrConnect,
bool fCountFailure,
CSemaphoreGrant *grantOutbound,
const char *pszDest,
ConnectionType conn_type) {
assert(conn_type != ConnectionType::INBOUND);
//
// Initiate outbound network connection
//
if (interruptNet) {
return;
}
if (!fNetworkActive) {
return;
}
if (!pszDest) {
bool banned_or_discouraged =
m_banman && (m_banman->IsDiscouraged(addrConnect) ||
m_banman->IsBanned(addrConnect));
if (IsLocal(addrConnect) || banned_or_discouraged ||
AlreadyConnectedToAddress(addrConnect)) {
return;
}
} else if (FindNode(std::string(pszDest))) {
return;
}
CNode *pnode = ConnectNode(addrConnect, pszDest, fCountFailure, conn_type);
if (!pnode) {
return;
}
if (grantOutbound) {
grantOutbound->MoveTo(pnode->grantOutbound);
}
m_msgproc->InitializeNode(*config, pnode);
{
LOCK(cs_vNodes);
vNodes.push_back(pnode);
}
}
void CConnman::ThreadMessageHandler() {
while (!flagInterruptMsgProc) {
std::vector<CNode *> vNodesCopy;
{
LOCK(cs_vNodes);
vNodesCopy = vNodes;
for (CNode *pnode : vNodesCopy) {
pnode->AddRef();
}
}
bool fMoreWork = false;
for (CNode *pnode : vNodesCopy) {
if (pnode->fDisconnect) {
continue;
}
// Receive messages
bool fMoreNodeWork = m_msgproc->ProcessMessages(
*config, pnode, flagInterruptMsgProc);
fMoreWork |= (fMoreNodeWork && !pnode->fPauseSend);
if (flagInterruptMsgProc) {
return;
}
// Send messages
{
LOCK(pnode->cs_sendProcessing);
m_msgproc->SendMessages(*config, pnode);
}
if (flagInterruptMsgProc) {
return;
}
}
{
LOCK(cs_vNodes);
for (CNode *pnode : vNodesCopy) {
pnode->Release();
}
}
WAIT_LOCK(mutexMsgProc, lock);
if (!fMoreWork) {
condMsgProc.wait_until(lock,
std::chrono::steady_clock::now() +
std::chrono::milliseconds(100),
[this]() EXCLUSIVE_LOCKS_REQUIRED(
mutexMsgProc) { return fMsgProcWake; });
}
fMsgProcWake = false;
}
}
void CConnman::ThreadI2PAcceptIncoming() {
static constexpr auto err_wait_begin = 1s;
static constexpr auto err_wait_cap = 5min;
auto err_wait = err_wait_begin;
bool advertising_listen_addr = false;
i2p::Connection conn;
while (!interruptNet) {
if (!m_i2p_sam_session->Listen(conn)) {
if (advertising_listen_addr && conn.me.IsValid()) {
RemoveLocal(conn.me);
advertising_listen_addr = false;
}
interruptNet.sleep_for(err_wait);
if (err_wait < err_wait_cap) {
err_wait *= 2;
}
continue;
}
if (!advertising_listen_addr) {
AddLocal(conn.me, LOCAL_MANUAL);
advertising_listen_addr = true;
}
if (!m_i2p_sam_session->Accept(conn)) {
continue;
}
CreateNodeFromAcceptedSocket(
conn.sock->Release(), NetPermissionFlags::PF_NONE,
CAddress{conn.me, NODE_NONE}, CAddress{conn.peer, NODE_NONE});
}
}
bool CConnman::BindListenPort(const CService &addrBind, bilingual_str &strError,
NetPermissionFlags permissions) {
int nOne = 1;
// Create socket for listening for incoming connections
struct sockaddr_storage sockaddr;
socklen_t len = sizeof(sockaddr);
if (!addrBind.GetSockAddr((struct sockaddr *)&sockaddr, &len)) {
strError = strprintf(
Untranslated("Error: Bind address family for %s not supported"),
addrBind.ToString());
LogPrintf("%s\n", strError.original);
return false;
}
std::unique_ptr<Sock> sock = CreateSock(addrBind);
if (!sock) {
strError =
strprintf(Untranslated("Error: Couldn't open socket for incoming "
"connections (socket returned error %s)"),
NetworkErrorString(WSAGetLastError()));
LogPrintf("%s\n", strError.original);
return false;
}
// Allow binding if the port is still in TIME_WAIT state after
// the program was closed and restarted.
setsockopt(sock->Get(), SOL_SOCKET, SO_REUSEADDR, (sockopt_arg_type)&nOne,
sizeof(int));
// Some systems don't have IPV6_V6ONLY but are always v6only; others do have
// the option and enable it by default or not. Try to enable it, if
// possible.
if (addrBind.IsIPv6()) {
#ifdef IPV6_V6ONLY
setsockopt(sock->Get(), IPPROTO_IPV6, IPV6_V6ONLY,
(sockopt_arg_type)&nOne, sizeof(int));
#endif
#ifdef WIN32
int nProtLevel = PROTECTION_LEVEL_UNRESTRICTED;
setsockopt(sock->Get(), IPPROTO_IPV6, IPV6_PROTECTION_LEVEL,
(sockopt_arg_type)&nProtLevel, sizeof(int));
#endif
}
if (::bind(sock->Get(), (struct sockaddr *)&sockaddr, len) ==
SOCKET_ERROR) {
int nErr = WSAGetLastError();
if (nErr == WSAEADDRINUSE) {
strError = strprintf(_("Unable to bind to %s on this computer. %s "
"is probably already running."),
addrBind.ToString(), PACKAGE_NAME);
} else {
strError = strprintf(_("Unable to bind to %s on this computer "
"(bind returned error %s)"),
addrBind.ToString(), NetworkErrorString(nErr));
}
LogPrintf("%s\n", strError.original);
return false;
}
LogPrintf("Bound to %s\n", addrBind.ToString());
// Listen for incoming connections
if (listen(sock->Get(), SOMAXCONN) == SOCKET_ERROR) {
strError = strprintf(_("Error: Listening for incoming connections "
"failed (listen returned error %s)"),
NetworkErrorString(WSAGetLastError()));
LogPrintf("%s\n", strError.original);
return false;
}
vhListenSocket.push_back(ListenSocket(sock->Release(), permissions));
return true;
}
void Discover() {
if (!fDiscover) {
return;
}
#ifdef WIN32
// Get local host IP
char pszHostName[256] = "";
if (gethostname(pszHostName, sizeof(pszHostName)) != SOCKET_ERROR) {
std::vector<CNetAddr> vaddr;
if (LookupHost(pszHostName, vaddr, 0, true)) {
for (const CNetAddr &addr : vaddr) {
if (AddLocal(addr, LOCAL_IF)) {
LogPrintf("%s: %s - %s\n", __func__, pszHostName,
addr.ToString());
}
}
}
}
#elif (HAVE_DECL_GETIFADDRS && HAVE_DECL_FREEIFADDRS)
// Get local host ip
struct ifaddrs *myaddrs;
if (getifaddrs(&myaddrs) == 0) {
for (struct ifaddrs *ifa = myaddrs; ifa != nullptr;
ifa = ifa->ifa_next) {
if (ifa->ifa_addr == nullptr || (ifa->ifa_flags & IFF_UP) == 0 ||
strcmp(ifa->ifa_name, "lo") == 0 ||
strcmp(ifa->ifa_name, "lo0") == 0) {
continue;
}
if (ifa->ifa_addr->sa_family == AF_INET) {
struct sockaddr_in *s4 =
reinterpret_cast<struct sockaddr_in *>(ifa->ifa_addr);
CNetAddr addr(s4->sin_addr);
if (AddLocal(addr, LOCAL_IF)) {
LogPrintf("%s: IPv4 %s: %s\n", __func__, ifa->ifa_name,
addr.ToString());
}
} else if (ifa->ifa_addr->sa_family == AF_INET6) {
struct sockaddr_in6 *s6 =
reinterpret_cast<struct sockaddr_in6 *>(ifa->ifa_addr);
CNetAddr addr(s6->sin6_addr);
if (AddLocal(addr, LOCAL_IF)) {
LogPrintf("%s: IPv6 %s: %s\n", __func__, ifa->ifa_name,
addr.ToString());
}
}
}
freeifaddrs(myaddrs);
}
#endif
}
void CConnman::SetNetworkActive(bool active) {
LogPrintf("%s: %s\n", __func__, active);
if (fNetworkActive == active) {
return;
}
fNetworkActive = active;
uiInterface.NotifyNetworkActiveChanged(fNetworkActive);
}
CConnman::CConnman(const Config &configIn, uint64_t nSeed0In, uint64_t nSeed1In,
bool network_active)
: config(&configIn), nSeed0(nSeed0In), nSeed1(nSeed1In) {
SetTryNewOutboundPeer(false);
Options connOptions;
Init(connOptions);
SetNetworkActive(network_active);
}
NodeId CConnman::GetNewNodeId() {
return nLastNodeId.fetch_add(1);
}
bool CConnman::Bind(const CService &addr, unsigned int flags,
NetPermissionFlags permissions) {
if (!(flags & BF_EXPLICIT) && !IsReachable(addr)) {
return false;
}
bilingual_str strError;
if (!BindListenPort(addr, strError, permissions)) {
if ((flags & BF_REPORT_ERROR) && clientInterface) {
clientInterface->ThreadSafeMessageBox(
strError, "", CClientUIInterface::MSG_ERROR);
}
return false;
}
if (addr.IsRoutable() && fDiscover && !(flags & BF_DONT_ADVERTISE) &&
!(permissions & PF_NOBAN)) {
AddLocal(addr, LOCAL_BIND);
}
return true;
}
bool CConnman::InitBinds(const std::vector<CService> &binds,
const std::vector<NetWhitebindPermissions> &whiteBinds,
const std::vector<CService> &onion_binds) {
bool fBound = false;
for (const auto &addrBind : binds) {
fBound |= Bind(addrBind, (BF_EXPLICIT | BF_REPORT_ERROR),
NetPermissionFlags::PF_NONE);
}
for (const auto &addrBind : whiteBinds) {
fBound |= Bind(addrBind.m_service, (BF_EXPLICIT | BF_REPORT_ERROR),
addrBind.m_flags);
}
if (binds.empty() && whiteBinds.empty()) {
struct in_addr inaddr_any;
inaddr_any.s_addr = htonl(INADDR_ANY);
struct in6_addr inaddr6_any = IN6ADDR_ANY_INIT;
fBound |= Bind(CService(inaddr6_any, GetListenPort()), BF_NONE,
NetPermissionFlags::PF_NONE);
fBound |= Bind(CService(inaddr_any, GetListenPort()),
!fBound ? BF_REPORT_ERROR : BF_NONE,
NetPermissionFlags::PF_NONE);
}
for (const auto &addr_bind : onion_binds) {
fBound |= Bind(addr_bind, BF_EXPLICIT | BF_DONT_ADVERTISE,
NetPermissionFlags::PF_NONE);
}
return fBound;
}
bool CConnman::Start(CScheduler &scheduler, const Options &connOptions) {
Init(connOptions);
if (fListen && !InitBinds(connOptions.vBinds, connOptions.vWhiteBinds,
connOptions.onion_binds)) {
if (clientInterface) {
clientInterface->ThreadSafeMessageBox(
_("Failed to listen on any port. Use -listen=0 if you want "
"this."),
"", CClientUIInterface::MSG_ERROR);
}
return false;
}
proxyType i2p_sam;
if (GetProxy(NET_I2P, i2p_sam)) {
m_i2p_sam_session = std::make_unique<i2p::sam::Session>(
GetDataDir() / "i2p_private_key", i2p_sam.proxy, &interruptNet);
}
for (const auto &strDest : connOptions.vSeedNodes) {
AddAddrFetch(strDest);
}
if (clientInterface) {
clientInterface->InitMessage(_("Loading P2P addresses...").translated);
}
// Load addresses from peers.dat
int64_t nStart = GetTimeMillis();
{
CAddrDB adb(config->GetChainParams());
if (adb.Read(addrman)) {
LogPrintf("Loaded %i addresses from peers.dat %dms\n",
addrman.size(), GetTimeMillis() - nStart);
} else {
// Addrman can be in an inconsistent state after failure, reset it
addrman.Clear();
LogPrintf("Invalid or missing peers.dat; recreating\n");
DumpAddresses();
}
}
if (m_use_addrman_outgoing) {
// Load addresses from anchors.dat
m_anchors = ReadAnchors(config->GetChainParams(),
GetDataDir() / ANCHORS_DATABASE_FILENAME);
if (m_anchors.size() > MAX_BLOCK_RELAY_ONLY_ANCHORS) {
m_anchors.resize(MAX_BLOCK_RELAY_ONLY_ANCHORS);
}
LogPrintf(
"%i block-relay-only anchors will be tried for connections.\n",
m_anchors.size());
}
uiInterface.InitMessage(_("Starting network threads...").translated);
fAddressesInitialized = true;
if (semOutbound == nullptr) {
// initialize semaphore
semOutbound = std::make_unique<CSemaphore>(
std::min(m_max_outbound, nMaxConnections));
}
if (semAddnode == nullptr) {
// initialize semaphore
semAddnode = std::make_unique<CSemaphore>(nMaxAddnode);
}
//
// Start threads
//
assert(m_msgproc);
InterruptSocks5(false);
interruptNet.reset();
flagInterruptMsgProc = false;
{
LOCK(mutexMsgProc);
fMsgProcWake = false;
}
// Send and receive from sockets, accept connections
threadSocketHandler = std::thread(
&TraceThread<std::function<void()>>, "net",
std::function<void()>(std::bind(&CConnman::ThreadSocketHandler, this)));
if (!gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED)) {
LogPrintf("DNS seeding disabled\n");
} else {
threadDNSAddressSeed =
std::thread(&TraceThread<std::function<void()>>, "dnsseed",
std::function<void()>(
std::bind(&CConnman::ThreadDNSAddressSeed, this)));
}
// Initiate manual connections
threadOpenAddedConnections =
std::thread(&TraceThread<std::function<void()>>, "addcon",
std::function<void()>(std::bind(
&CConnman::ThreadOpenAddedConnections, this)));
if (connOptions.m_use_addrman_outgoing &&
!connOptions.m_specified_outgoing.empty()) {
if (clientInterface) {
clientInterface->ThreadSafeMessageBox(
_("Cannot provide specific connections and have addrman find "
"outgoing connections at the same."),
"", CClientUIInterface::MSG_ERROR);
}
return false;
}
if (connOptions.m_use_addrman_outgoing ||
!connOptions.m_specified_outgoing.empty()) {
threadOpenConnections =
std::thread(&TraceThread<std::function<void()>>, "opencon",
std::function<void()>(
std::bind(&CConnman::ThreadOpenConnections, this,
connOptions.m_specified_outgoing)));
}
// Process messages
threadMessageHandler =
std::thread(&TraceThread<std::function<void()>>, "msghand",
std::function<void()>(
std::bind(&CConnman::ThreadMessageHandler, this)));
if (connOptions.m_i2p_accept_incoming &&
m_i2p_sam_session.get() != nullptr) {
threadI2PAcceptIncoming =
std::thread(&TraceThread<std::function<void()>>, "i2paccept",
std::function<void()>(std::bind(
&CConnman::ThreadI2PAcceptIncoming, this)));
}
// Dump network addresses
scheduler.scheduleEvery(
[this]() {
this->DumpAddresses();
return true;
},
DUMP_PEERS_INTERVAL);
return true;
}
class CNetCleanup {
public:
CNetCleanup() {}
~CNetCleanup() {
#ifdef WIN32
// Shutdown Windows Sockets
WSACleanup();
#endif
}
};
static CNetCleanup instance_of_cnetcleanup;
void CConnman::Interrupt() {
{
LOCK(mutexMsgProc);
flagInterruptMsgProc = true;
}
condMsgProc.notify_all();
interruptNet();
InterruptSocks5(true);
if (semOutbound) {
for (int i = 0; i < m_max_outbound; i++) {
semOutbound->post();
}
}
if (semAddnode) {
for (int i = 0; i < nMaxAddnode; i++) {
semAddnode->post();
}
}
}
void CConnman::StopThreads() {
if (threadI2PAcceptIncoming.joinable()) {
threadI2PAcceptIncoming.join();
}
if (threadMessageHandler.joinable()) {
threadMessageHandler.join();
}
if (threadOpenConnections.joinable()) {
threadOpenConnections.join();
}
if (threadOpenAddedConnections.joinable()) {
threadOpenAddedConnections.join();
}
if (threadDNSAddressSeed.joinable()) {
threadDNSAddressSeed.join();
}
if (threadSocketHandler.joinable()) {
threadSocketHandler.join();
}
}
void CConnman::StopNodes() {
if (fAddressesInitialized) {
DumpAddresses();
fAddressesInitialized = false;
if (m_use_addrman_outgoing) {
// Anchor connections are only dumped during clean shutdown.
std::vector<CAddress> anchors_to_dump =
GetCurrentBlockRelayOnlyConns();
if (anchors_to_dump.size() > MAX_BLOCK_RELAY_ONLY_ANCHORS) {
anchors_to_dump.resize(MAX_BLOCK_RELAY_ONLY_ANCHORS);
}
DumpAnchors(config->GetChainParams(),
GetDataDir() / ANCHORS_DATABASE_FILENAME,
anchors_to_dump);
}
}
// Close sockets
LOCK(cs_vNodes);
for (CNode *pnode : vNodes) {
pnode->CloseSocketDisconnect();
}
for (ListenSocket &hListenSocket : vhListenSocket) {
if (hListenSocket.socket != INVALID_SOCKET) {
if (!CloseSocket(hListenSocket.socket)) {
LogPrintf("CloseSocket(hListenSocket) failed with error %s\n",
NetworkErrorString(WSAGetLastError()));
}
}
}
// clean up some globals (to help leak detection)
for (CNode *pnode : vNodes) {
DeleteNode(pnode);
}
for (CNode *pnode : vNodesDisconnected) {
DeleteNode(pnode);
}
vNodes.clear();
vNodesDisconnected.clear();
vhListenSocket.clear();
semOutbound.reset();
semAddnode.reset();
}
void CConnman::DeleteNode(CNode *pnode) {
assert(pnode);
bool fUpdateConnectionTime = false;
m_msgproc->FinalizeNode(*config, *pnode, fUpdateConnectionTime);
if (fUpdateConnectionTime) {
addrman.Connected(pnode->addr);
}
delete pnode;
}
CConnman::~CConnman() {
Interrupt();
Stop();
}
void CConnman::SetServices(const CService &addr, ServiceFlags nServices) {
addrman.SetServices(addr, nServices);
}
void CConnman::MarkAddressGood(const CAddress &addr) {
addrman.Good(addr);
}
bool CConnman::AddNewAddresses(const std::vector<CAddress> &vAddr,
const CAddress &addrFrom, int64_t nTimePenalty) {
return addrman.Add(vAddr, addrFrom, nTimePenalty);
}
std::vector<CAddress> CConnman::GetAddresses(size_t max_addresses,
size_t max_pct) {
std::vector<CAddress> addresses = addrman.GetAddr(max_addresses, max_pct);
if (m_banman) {
addresses.erase(std::remove_if(addresses.begin(), addresses.end(),
[this](const CAddress &addr) {
return m_banman->IsDiscouraged(
addr) ||
m_banman->IsBanned(addr);
}),
addresses.end());
}
return addresses;
}
std::vector<CAddress>
CConnman::GetAddresses(CNode &requestor, size_t max_addresses, size_t max_pct) {
auto local_socket_bytes = requestor.addrBind.GetAddrBytes();
uint64_t cache_id =
GetDeterministicRandomizer(RANDOMIZER_ID_ADDRCACHE)
.Write(requestor.addr.GetNetwork())
.Write(local_socket_bytes.data(), local_socket_bytes.size())
.Finalize();
const auto current_time = GetTime<std::chrono::microseconds>();
auto r = m_addr_response_caches.emplace(cache_id, CachedAddrResponse{});
CachedAddrResponse &cache_entry = r.first->second;
// New CachedAddrResponse have expiration 0.
if (cache_entry.m_cache_entry_expiration < current_time) {
cache_entry.m_addrs_response_cache =
GetAddresses(max_addresses, max_pct);
// Choosing a proper cache lifetime is a trade-off between the privacy
// leak minimization and the usefulness of ADDR responses to honest
// users.
//
// Longer cache lifetime makes it more difficult for an attacker to
// scrape enough AddrMan data to maliciously infer something useful. By
// the time an attacker scraped enough AddrMan records, most of the
// records should be old enough to not leak topology info by e.g.
// analyzing real-time changes in timestamps.
//
// It takes only several hundred requests to scrape everything from an
// AddrMan containing 100,000 nodes, so ~24 hours of cache lifetime
// indeed makes the data less inferable by the time most of it could be
// scraped (considering that timestamps are updated via ADDR
// self-announcements and when nodes communicate). We also should be
// robust to those attacks which may not require scraping *full*
// victim's AddrMan (because even several timestamps of the same handful
// of nodes may leak privacy).
//
// On the other hand, longer cache lifetime makes ADDR responses
// outdated and less useful for an honest requestor, e.g. if most nodes
// in the ADDR response are no longer active.
//
// However, the churn in the network is known to be rather low. Since we
// consider nodes to be "terrible" (see IsTerrible()) if the timestamps
// are older than 30 days, max. 24 hours of "penalty" due to cache
// shouldn't make any meaningful difference in terms of the freshness of
// the response.
cache_entry.m_cache_entry_expiration =
current_time + std::chrono::hours(21) +
GetRandMillis(std::chrono::hours(6));
}
return cache_entry.m_addrs_response_cache;
}
bool CConnman::AddNode(const std::string &strNode) {
LOCK(cs_vAddedNodes);
for (const std::string &it : vAddedNodes) {
if (strNode == it) {
return false;
}
}
vAddedNodes.push_back(strNode);
return true;
}
bool CConnman::RemoveAddedNode(const std::string &strNode) {
LOCK(cs_vAddedNodes);
for (std::vector<std::string>::iterator it = vAddedNodes.begin();
it != vAddedNodes.end(); ++it) {
if (strNode == *it) {
vAddedNodes.erase(it);
return true;
}
}
return false;
}
size_t CConnman::GetNodeCount(NumConnections flags) {
LOCK(cs_vNodes);
// Shortcut if we want total
if (flags == CConnman::CONNECTIONS_ALL) {
return vNodes.size();
}
int nNum = 0;
for (const auto &pnode : vNodes) {
if (flags &
(pnode->IsInboundConn() ? CONNECTIONS_IN : CONNECTIONS_OUT)) {
nNum++;
}
}
return nNum;
}
void CConnman::GetNodeStats(std::vector<CNodeStats> &vstats) {
vstats.clear();
LOCK(cs_vNodes);
vstats.reserve(vNodes.size());
for (CNode *pnode : vNodes) {
vstats.emplace_back();
pnode->copyStats(vstats.back());
vstats.back().m_mapped_as = pnode->addr.GetMappedAS(addrman.m_asmap);
}
}
bool CConnman::DisconnectNode(const std::string &strNode) {
LOCK(cs_vNodes);
if (CNode *pnode = FindNode(strNode)) {
LogPrint(BCLog::NET,
"disconnect by address%s matched peer=%d; disconnecting\n",
(fLogIPs ? strprintf("=%s", strNode) : ""), pnode->GetId());
pnode->fDisconnect = true;
return true;
}
return false;
}
bool CConnman::DisconnectNode(const CSubNet &subnet) {
bool disconnected = false;
LOCK(cs_vNodes);
for (CNode *pnode : vNodes) {
if (subnet.Match(pnode->addr)) {
LogPrint(BCLog::NET,
"disconnect by subnet%s matched peer=%d; disconnecting\n",
(fLogIPs ? strprintf("=%s", subnet.ToString()) : ""),
pnode->GetId());
pnode->fDisconnect = true;
disconnected = true;
}
}
return disconnected;
}
bool CConnman::DisconnectNode(const CNetAddr &addr) {
return DisconnectNode(CSubNet(addr));
}
bool CConnman::DisconnectNode(NodeId id) {
LOCK(cs_vNodes);
for (CNode *pnode : vNodes) {
if (id == pnode->GetId()) {
LogPrint(BCLog::NET, "disconnect by id peer=%d; disconnecting\n",
pnode->GetId());
pnode->fDisconnect = true;
return true;
}
}
return false;
}
void CConnman::RecordBytesRecv(uint64_t bytes) {
LOCK(cs_totalBytesRecv);
nTotalBytesRecv += bytes;
}
void CConnman::RecordBytesSent(uint64_t bytes) {
LOCK(cs_totalBytesSent);
nTotalBytesSent += bytes;
const auto now = GetTime<std::chrono::seconds>();
if (nMaxOutboundCycleStartTime + MAX_UPLOAD_TIMEFRAME < now) {
// timeframe expired, reset cycle
nMaxOutboundCycleStartTime = now;
nMaxOutboundTotalBytesSentInCycle = 0;
}
// TODO, exclude peers with download permission
nMaxOutboundTotalBytesSentInCycle += bytes;
}
uint64_t CConnman::GetMaxOutboundTarget() {
LOCK(cs_totalBytesSent);
return nMaxOutboundLimit;
}
std::chrono::seconds CConnman::GetMaxOutboundTimeframe() {
return MAX_UPLOAD_TIMEFRAME;
}
std::chrono::seconds CConnman::GetMaxOutboundTimeLeftInCycle() {
LOCK(cs_totalBytesSent);
if (nMaxOutboundLimit == 0) {
return 0s;
}
if (nMaxOutboundCycleStartTime.count() == 0) {
return MAX_UPLOAD_TIMEFRAME;
}
const std::chrono::seconds cycleEndTime =
nMaxOutboundCycleStartTime + MAX_UPLOAD_TIMEFRAME;
const auto now = GetTime<std::chrono::seconds>();
return (cycleEndTime < now) ? 0s : cycleEndTime - now;
}
bool CConnman::OutboundTargetReached(bool historicalBlockServingLimit) {
LOCK(cs_totalBytesSent);
if (nMaxOutboundLimit == 0) {
return false;
}
if (historicalBlockServingLimit) {
// keep a large enough buffer to at least relay each block once.
const std::chrono::seconds timeLeftInCycle =
GetMaxOutboundTimeLeftInCycle();
const uint64_t buffer =
timeLeftInCycle / std::chrono::minutes{10} * ONE_MEGABYTE;
if (buffer >= nMaxOutboundLimit ||
nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit - buffer) {
return true;
}
} else if (nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit) {
return true;
}
return false;
}
uint64_t CConnman::GetOutboundTargetBytesLeft() {
LOCK(cs_totalBytesSent);
if (nMaxOutboundLimit == 0) {
return 0;
}
return (nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit)
? 0
: nMaxOutboundLimit - nMaxOutboundTotalBytesSentInCycle;
}
uint64_t CConnman::GetTotalBytesRecv() {
LOCK(cs_totalBytesRecv);
return nTotalBytesRecv;
}
uint64_t CConnman::GetTotalBytesSent() {
LOCK(cs_totalBytesSent);
return nTotalBytesSent;
}
ServiceFlags CConnman::GetLocalServices() const {
return nLocalServices;
}
unsigned int CConnman::GetReceiveFloodSize() const {
return nReceiveFloodSize;
}
void CNode::AvalancheState::invsPolled(uint32_t count) {
invCounters += count;
}
void CNode::AvalancheState::invsVoted(uint32_t count) {
invCounters += uint64_t(count) << 32;
}
void CNode::AvalancheState::updateAvailabilityScore() {
LOCK(cs_statistics);
uint64_t windowInvCounters = invCounters.exchange(0);
double previousScore = availabilityScore;
int64_t polls = windowInvCounters & std::numeric_limits<uint32_t>::max();
int64_t votes = windowInvCounters >> 32;
availabilityScore =
AVALANCHE_STATISTICS_DECAY_FACTOR * (2 * votes - polls) +
(1. - AVALANCHE_STATISTICS_DECAY_FACTOR) * previousScore;
}
double CNode::AvalancheState::getAvailabilityScore() const {
// The score is set atomically so there is no need to lock the statistics
// when reading.
return availabilityScore;
}
CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, SOCKET hSocketIn,
const CAddress &addrIn, uint64_t nKeyedNetGroupIn,
uint64_t nLocalHostNonceIn, uint64_t nLocalExtraEntropyIn,
const CAddress &addrBindIn, const std::string &addrNameIn,
ConnectionType conn_type_in, bool inbound_onion)
: nTimeConnected(GetTimeSeconds()), addr(addrIn), addrBind(addrBindIn),
m_inbound_onion(inbound_onion), nKeyedNetGroup(nKeyedNetGroupIn),
// Don't relay addr messages to peers that we connect to as
// block-relay-only peers (to prevent adversaries from inferring these
// links from addr traffic).
id(idIn), nLocalHostNonce(nLocalHostNonceIn),
nLocalExtraEntropy(nLocalExtraEntropyIn), m_conn_type(conn_type_in),
nLocalServices(nLocalServicesIn) {
if (inbound_onion) {
assert(conn_type_in == ConnectionType::INBOUND);
}
hSocket = hSocketIn;
addrName = addrNameIn == "" ? addr.ToStringIPPort() : addrNameIn;
if (conn_type_in != ConnectionType::BLOCK_RELAY) {
m_tx_relay = std::make_unique<TxRelay>();
}
// Don't relay proofs if avalanche is disabled
if (isAvalancheEnabled(gArgs)) {
m_proof_relay = std::make_unique<ProofRelay>();
}
for (const std::string &msg : getAllNetMessageTypes()) {
mapRecvBytesPerMsgCmd[msg] = 0;
}
mapRecvBytesPerMsgCmd[NET_MESSAGE_COMMAND_OTHER] = 0;
if (fLogIPs) {
LogPrint(BCLog::NET, "Added connection to %s peer=%d\n", addrName, id);
} else {
LogPrint(BCLog::NET, "Added connection peer=%d\n", id);
}
m_deserializer = std::make_unique<V1TransportDeserializer>(
V1TransportDeserializer(GetConfig().GetChainParams().NetMagic(),
SER_NETWORK, INIT_PROTO_VERSION));
m_serializer =
std::make_unique<V1TransportSerializer>(V1TransportSerializer());
}
CNode::~CNode() {
CloseSocket(hSocket);
}
bool CConnman::NodeFullyConnected(const CNode *pnode) {
return pnode && pnode->fSuccessfullyConnected && !pnode->fDisconnect;
}
void CConnman::PushMessage(CNode *pnode, CSerializedNetMsg &&msg) {
size_t nMessageSize = msg.data.size();
LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n",
SanitizeString(msg.m_type), nMessageSize, pnode->GetId());
// make sure we use the appropriate network transport format
std::vector<uint8_t> serializedHeader;
pnode->m_serializer->prepareForTransport(*config, msg, serializedHeader);
size_t nTotalSize = nMessageSize + serializedHeader.size();
size_t nBytesSent = 0;
{
LOCK(pnode->cs_vSend);
bool optimisticSend(pnode->vSendMsg.empty());
// log total amount of bytes per message type
pnode->mapSendBytesPerMsgCmd[msg.m_type] += nTotalSize;
pnode->nSendSize += nTotalSize;
if (pnode->nSendSize > nSendBufferMaxSize) {
pnode->fPauseSend = true;
}
pnode->vSendMsg.push_back(std::move(serializedHeader));
if (nMessageSize) {
pnode->vSendMsg.push_back(std::move(msg.data));
}
// If write queue empty, attempt "optimistic write"
if (optimisticSend == true) {
nBytesSent = SocketSendData(*pnode);
}
}
if (nBytesSent) {
RecordBytesSent(nBytesSent);
}
}
bool CConnman::ForNode(NodeId id, std::function<bool(CNode *pnode)> func) {
CNode *found = nullptr;
LOCK(cs_vNodes);
for (auto &&pnode : vNodes) {
if (pnode->GetId() == id) {
found = pnode;
break;
}
}
return found != nullptr && NodeFullyConnected(found) && func(found);
}
std::chrono::microseconds
CConnman::PoissonNextSendInbound(std::chrono::microseconds now,
std::chrono::seconds average_interval) {
if (m_next_send_inv_to_incoming.load() < now) {
// If this function were called from multiple threads simultaneously
// it would be possible that both update the next send variable, and
// return a different result to their caller. This is not possible in
// practice as only the net processing thread invokes this function.
m_next_send_inv_to_incoming = PoissonNextSend(now, average_interval);
}
return m_next_send_inv_to_incoming;
}
std::chrono::microseconds
PoissonNextSend(std::chrono::microseconds now,
std::chrono::seconds average_interval) {
double unscaled = -log1p(GetRand(1ULL << 48) *
-0.0000000000000035527136788 /* -1/2^48 */);
return now + std::chrono::duration_cast<std::chrono::microseconds>(
unscaled * average_interval + 0.5us);
}
CSipHasher CConnman::GetDeterministicRandomizer(uint64_t id) const {
return CSipHasher(nSeed0, nSeed1).Write(id);
}
uint64_t CConnman::CalculateKeyedNetGroup(const CAddress &ad) const {
std::vector<uint8_t> vchNetGroup(ad.GetGroup(addrman.m_asmap));
return GetDeterministicRandomizer(RANDOMIZER_ID_NETGROUP)
.Write(vchNetGroup.data(), vchNetGroup.size())
.Finalize();
}
/**
* This function convert MaxBlockSize from byte to
* MB with a decimal precision one digit rounded down
* E.g.
* 1660000 -> 1.6
* 2010000 -> 2.0
* 1000000 -> 1.0
* 230000 -> 0.2
* 50000 -> 0.0
*
* NB behavior for EB<1MB not standardized yet still
* the function applies the same algo used for
* EB greater or equal to 1MB
*/
std::string getSubVersionEB(uint64_t MaxBlockSize) {
// Prepare EB string we are going to add to SubVer:
// 1) translate from byte to MB and convert to string
// 2) limit the EB string to the first decimal digit (floored)
std::stringstream ebMBs;
ebMBs << (MaxBlockSize / (ONE_MEGABYTE / 10));
std::string eb = ebMBs.str();
eb.insert(eb.size() - 1, ".", 1);
if (eb.substr(0, 1) == ".") {
eb = "0" + eb;
}
return eb;
}
std::string userAgent(const Config &config) {
// format excessive blocksize value
std::string eb = getSubVersionEB(config.GetMaxBlockSize());
std::vector<std::string> uacomments;
uacomments.push_back("EB" + eb);
// Comments are checked for char compliance at startup, it is safe to add
// them to the user agent string
for (const std::string &cmt : gArgs.GetArgs("-uacomment")) {
uacomments.push_back(cmt);
}
const std::string client_name = gArgs.GetArg("-uaclientname", CLIENT_NAME);
const std::string client_version =
gArgs.GetArg("-uaclientversion", FormatVersion(CLIENT_VERSION));
// Size compliance is checked at startup, it is safe to not check it again
return FormatUserAgent(client_name, client_version, uacomments);
}
diff --git a/src/test/net_peer_eviction_tests.cpp b/src/test/net_peer_eviction_tests.cpp
index 1f0021ca2..4355ec9ea 100644
--- a/src/test/net_peer_eviction_tests.cpp
+++ b/src/test/net_peer_eviction_tests.cpp
@@ -1,653 +1,622 @@
// Copyright (c) 2021 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <net.h>
#include <netaddress.h>
#include <test/util/net.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
#include <functional>
#include <numeric>
#include <optional>
#include <unordered_set>
#include <vector>
BOOST_FIXTURE_TEST_SUITE(net_peer_eviction_tests, BasicTestingSetup)
-std::vector<NodeEvictionCandidate>
-GetRandomNodeEvictionCandidates(const int n_candidates,
- FastRandomContext &random_context) {
- std::vector<NodeEvictionCandidate> candidates;
- for (int id = 0; id < n_candidates; ++id) {
- candidates.push_back({
- /* id */ id,
- /* nTimeConnected */
- static_cast<int64_t>(random_context.randrange(100)),
- /* m_min_ping_time */
- std::chrono::microseconds{random_context.randrange(100)},
- /* nLastBlockTime */
- static_cast<int64_t>(random_context.randrange(100)),
- /* nLastProofTime */
- static_cast<int64_t>(random_context.randrange(100)),
- /* nLastTXTime */
- static_cast<int64_t>(random_context.randrange(100)),
- /* fRelevantServices */ random_context.randbool(),
- /* fRelayTxes */ random_context.randbool(),
- /* fBloomFilter */ random_context.randbool(),
- /* nKeyedNetGroup */ random_context.randrange(100),
- /* prefer_evict */ random_context.randbool(),
- /* m_is_local */ random_context.randbool(),
- /* m_network */
- ALL_NETWORKS[random_context.randrange(ALL_NETWORKS.size())],
- /* availabilityScore */ double(random_context.randrange(-1)),
- });
- }
- return candidates;
-}
-
// Create `num_peers` random nodes, apply setup function `candidate_setup_fn`,
// call ProtectEvictionCandidatesByRatio() to apply protection logic, and then
// return true if all of `protected_peer_ids` and none of `unprotected_peer_ids`
// are protected from eviction, i.e. removed from the eviction candidates.
bool IsProtected(
int num_peers,
std::function<void(NodeEvictionCandidate &)> candidate_setup_fn,
const std::unordered_set<NodeId> &protected_peer_ids,
const std::unordered_set<NodeId> &unprotected_peer_ids,
FastRandomContext &random_context) {
std::vector<NodeEvictionCandidate> candidates{
GetRandomNodeEvictionCandidates(num_peers, random_context)};
for (NodeEvictionCandidate &candidate : candidates) {
candidate_setup_fn(candidate);
}
Shuffle(candidates.begin(), candidates.end(), random_context);
const size_t size{candidates.size()};
// Expect half the candidates will be protected.
const size_t expected{size - size / 2};
ProtectEvictionCandidatesByRatio(candidates);
BOOST_CHECK_EQUAL(candidates.size(), expected);
size_t unprotected_count{0};
for (const NodeEvictionCandidate &candidate : candidates) {
if (protected_peer_ids.count(candidate.id)) {
// this peer should have been removed from the eviction candidates
BOOST_TEST_MESSAGE(strprintf(
"expected candidate to be protected: %d", candidate.id));
return false;
}
if (unprotected_peer_ids.count(candidate.id)) {
// this peer remains in the eviction candidates, as expected
++unprotected_count;
}
}
const bool is_protected{unprotected_count == unprotected_peer_ids.size()};
if (!is_protected) {
BOOST_TEST_MESSAGE(strprintf("unprotected: expected %d, actual %d",
unprotected_peer_ids.size(),
unprotected_count));
}
return is_protected;
}
BOOST_AUTO_TEST_CASE(peer_protection_test) {
FastRandomContext random_context{true};
int num_peers{12};
// Expect half of the peers with greatest uptime (the lowest nTimeConnected)
// to be protected from eviction.
BOOST_CHECK(IsProtected(
num_peers,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = false;
c.m_network = NET_IPV4;
},
/* protected_peer_ids */ {0, 1, 2, 3, 4, 5},
/* unprotected_peer_ids */ {6, 7, 8, 9, 10, 11}, random_context));
// Verify in the opposite direction.
BOOST_CHECK(IsProtected(
num_peers,
[num_peers](NodeEvictionCandidate &c) {
c.nTimeConnected = num_peers - c.id;
c.m_is_local = false;
c.m_network = NET_IPV6;
},
/* protected_peer_ids */ {6, 7, 8, 9, 10, 11},
/* unprotected_peer_ids */ {0, 1, 2, 3, 4, 5}, random_context));
// Test protection of onion, localhost, and I2P peers...
// Expect 1/4 onion peers to be protected from eviction,
// if no localhost or I2P peers.
BOOST_CHECK(IsProtected(
num_peers,
[](NodeEvictionCandidate &c) {
c.m_is_local = false;
c.m_network =
(c.id == 3 || c.id == 8 || c.id == 9) ? NET_ONION : NET_IPV4;
},
/* protected_peer_ids */ {3, 8, 9},
/* unprotected_peer_ids */ {}, random_context));
// Expect 1/4 onion peers and 1/4 of the other peers to be protected,
// sorted by longest uptime (lowest nTimeConnected), if no localhost or I2P
// peers.
BOOST_CHECK(IsProtected(
num_peers,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = false;
c.m_network = (c.id == 3 || c.id > 7) ? NET_ONION : NET_IPV6;
},
/* protected_peer_ids */ {0, 1, 2, 3, 8, 9},
/* unprotected_peer_ids */ {4, 5, 6, 7, 10, 11}, random_context));
// Expect 1/4 localhost peers to be protected from eviction,
// if no onion or I2P peers.
BOOST_CHECK(IsProtected(
num_peers,
[](NodeEvictionCandidate &c) {
c.m_is_local = (c.id == 1 || c.id == 9 || c.id == 11);
c.m_network = NET_IPV4;
},
/* protected_peer_ids */ {1, 9, 11},
/* unprotected_peer_ids */ {}, random_context));
// Expect 1/4 localhost peers and 1/4 of the other peers to be protected,
// sorted by longest uptime (lowest nTimeConnected), if no onion or I2P
// peers.
BOOST_CHECK(IsProtected(
num_peers,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id > 6);
c.m_network = NET_IPV6;
},
/* protected_peer_ids */ {0, 1, 2, 7, 8, 9},
/* unprotected_peer_ids */ {3, 4, 5, 6, 10, 11}, random_context));
// Expect 1/4 I2P peers to be protected from eviction,
// if no onion or localhost peers.
BOOST_CHECK(IsProtected(
num_peers,
[](NodeEvictionCandidate &c) {
c.m_is_local = false;
c.m_network =
(c.id == 2 || c.id == 7 || c.id == 10) ? NET_I2P : NET_IPV4;
},
/* protected_peer_ids */ {2, 7, 10},
/* unprotected_peer_ids */ {}, random_context));
// Expect 1/4 I2P peers and 1/4 of the other peers to be protected,
// sorted by longest uptime (lowest nTimeConnected), if no onion or
// localhost peers.
BOOST_CHECK(IsProtected(
num_peers,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = false;
c.m_network = (c.id == 4 || c.id > 8) ? NET_I2P : NET_IPV6;
},
/* protected_peer_ids */ {0, 1, 2, 4, 9, 10},
/* unprotected_peer_ids */ {3, 5, 6, 7, 8, 11}, random_context));
// Tests with 2 networks...
// Combined test: expect having 1 localhost and 1 onion peer out of 4 to
// protect 1 localhost, 0 onion and 1 other peer, sorted by longest uptime;
// stable sort breaks tie with array order of localhost first.
BOOST_CHECK(IsProtected(
4,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id == 4);
c.m_network = (c.id == 3) ? NET_ONION : NET_IPV4;
},
/* protected_peer_ids */ {0, 4},
/* unprotected_peer_ids */ {1, 2}, random_context));
// Combined test: expect having 1 localhost and 1 onion peer out of 7 to
// protect 1 localhost, 0 onion, and 2 other peers (3 total), sorted by
// uptime; stable sort breaks tie with array order of localhost first.
BOOST_CHECK(IsProtected(
7,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id == 6);
c.m_network = (c.id == 5) ? NET_ONION : NET_IPV4;
},
/* protected_peer_ids */ {0, 1, 6},
/* unprotected_peer_ids */ {2, 3, 4, 5}, random_context));
// Combined test: expect having 1 localhost and 1 onion peer out of 8 to
// protect protect 1 localhost, 1 onion and 2 other peers (4 total), sorted
// by uptime; stable sort breaks tie with array order of localhost first.
BOOST_CHECK(IsProtected(
8,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id == 6);
c.m_network = (c.id == 5) ? NET_ONION : NET_IPV4;
},
/* protected_peer_ids */ {0, 1, 5, 6},
/* unprotected_peer_ids */ {2, 3, 4, 7}, random_context));
// Combined test: expect having 3 localhost and 3 onion peers out of 12 to
// protect 2 localhost and 1 onion, plus 3 other peers, sorted by longest
// uptime; stable sort breaks ties with the array order of localhost first.
BOOST_CHECK(IsProtected(
num_peers,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id == 6 || c.id == 9 || c.id == 11);
c.m_network =
(c.id == 7 || c.id == 8 || c.id == 10) ? NET_ONION : NET_IPV6;
},
/* protected_peer_ids */ {0, 1, 2, 6, 7, 9},
/* unprotected_peer_ids */ {3, 4, 5, 8, 10, 11}, random_context));
// Combined test: expect having 4 localhost and 1 onion peer out of 12 to
// protect 2 localhost and 1 onion, plus 3 other peers, sorted by longest
// uptime.
BOOST_CHECK(IsProtected(
num_peers,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id > 4 && c.id < 9);
c.m_network = (c.id == 10) ? NET_ONION : NET_IPV4;
},
/* protected_peer_ids */ {0, 1, 2, 5, 6, 10},
/* unprotected_peer_ids */ {3, 4, 7, 8, 9, 11}, random_context));
// Combined test: expect having 4 localhost and 2 onion peers out of 16 to
// protect 2 localhost and 2 onions, plus 4 other peers, sorted by longest
// uptime.
BOOST_CHECK(IsProtected(
16,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id == 6 || c.id == 9 || c.id == 11 || c.id == 12);
c.m_network = (c.id == 8 || c.id == 10) ? NET_ONION : NET_IPV6;
},
/* protected_peer_ids */ {0, 1, 2, 3, 6, 8, 9, 10},
/* unprotected_peer_ids */ {4, 5, 7, 11, 12, 13, 14, 15},
random_context));
// Combined test: expect having 5 localhost and 1 onion peer out of 16 to
// protect 3 localhost (recovering the unused onion slot), 1 onion, and 4
// others, sorted by longest uptime.
BOOST_CHECK(IsProtected(
16,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id > 10);
c.m_network = (c.id == 10) ? NET_ONION : NET_IPV4;
},
/* protected_peer_ids */ {0, 1, 2, 3, 10, 11, 12, 13},
/* unprotected_peer_ids */ {4, 5, 6, 7, 8, 9, 14, 15}, random_context));
// Combined test: expect having 1 localhost and 4 onion peers out of 16 to
// protect 1 localhost and 3 onions (recovering the unused localhost slot),
// plus 4 others, sorted by longest uptime.
BOOST_CHECK(IsProtected(
16,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id == 15);
c.m_network = (c.id > 6 && c.id < 11) ? NET_ONION : NET_IPV6;
},
/* protected_peer_ids */ {0, 1, 2, 3, 7, 8, 9, 15},
/* unprotected_peer_ids */ {5, 6, 10, 11, 12, 13, 14}, random_context));
// Combined test: expect having 2 onion and 4 I2P out of 12 peers to protect
// 2 onion (prioritized for having fewer candidates) and 1 I2P, plus 3
// others, sorted by longest uptime.
BOOST_CHECK(IsProtected(
num_peers,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = false;
if (c.id == 8 || c.id == 10) {
c.m_network = NET_ONION;
} else if (c.id == 6 || c.id == 9 || c.id == 11 || c.id == 12) {
c.m_network = NET_I2P;
} else {
c.m_network = NET_IPV4;
}
},
/* protected_peer_ids */ {0, 1, 2, 6, 8, 10},
/* unprotected_peer_ids */ {3, 4, 5, 7, 9, 11}, random_context));
// Tests with 3 networks...
// Combined test: expect having 1 localhost, 1 I2P and 1 onion peer out of 4
// to protect 1 I2P, 0 localhost, 0 onion and 1 other peer (2 total), sorted
// by longest uptime; stable sort breaks tie with array order of I2P first.
BOOST_CHECK(IsProtected(
4,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id == 3);
if (c.id == 4) {
c.m_network = NET_I2P;
} else if (c.id == 2) {
c.m_network = NET_ONION;
} else {
c.m_network = NET_IPV6;
}
},
/* protected_peer_ids */ {0, 4},
/* unprotected_peer_ids */ {1, 2}, random_context));
// Combined test: expect having 1 localhost, 1 I2P and 1 onion peer out of 7
// to protect 1 I2P, 0 localhost, 0 onion and 2 other peers (3 total) sorted
// by longest uptime; stable sort breaks tie with array order of I2P first.
BOOST_CHECK(IsProtected(
7,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id == 4);
if (c.id == 6) {
c.m_network = NET_I2P;
} else if (c.id == 5) {
c.m_network = NET_ONION;
} else {
c.m_network = NET_IPV6;
}
},
/* protected_peer_ids */ {0, 1, 6},
/* unprotected_peer_ids */ {2, 3, 4, 5}, random_context));
// Combined test: expect having 1 localhost, 1 I2P and 1 onion peer out of 8
// to protect 1 I2P, 1 localhost, 0 onion and 2 other peers (4 total) sorted
// by uptime; stable sort breaks tie with array order of I2P then localhost.
BOOST_CHECK(IsProtected(
8,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id == 6);
if (c.id == 5) {
c.m_network = NET_I2P;
} else if (c.id == 4) {
c.m_network = NET_ONION;
} else {
c.m_network = NET_IPV6;
}
},
/* protected_peer_ids */ {0, 1, 5, 6},
/* unprotected_peer_ids */ {2, 3, 4, 7}, random_context));
// Combined test: expect having 4 localhost, 2 I2P, and 2 onion peers out of
// 16 to protect 1 localhost, 2 I2P, and 1 onion (4/16 total), plus 4 others
// for 8 total, sorted by longest uptime.
BOOST_CHECK(IsProtected(
16,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id == 6 || c.id > 11);
if (c.id == 7 || c.id == 11) {
c.m_network = NET_I2P;
} else if (c.id == 9 || c.id == 10) {
c.m_network = NET_ONION;
} else {
c.m_network = NET_IPV4;
}
},
/* protected_peer_ids */ {0, 1, 2, 3, 6, 7, 9, 11},
/* unprotected_peer_ids */ {4, 5, 8, 10, 12, 13, 14, 15},
random_context));
// Combined test: expect having 1 localhost, 8 I2P and 1 onion peer out of
// 24 to protect 1, 4, and 1 (6 total), plus 6 others for 12/24 total,
// sorted by longest uptime.
BOOST_CHECK(IsProtected(
24,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id == 12);
if (c.id > 14 && c.id < 23) { // 4 protected instead of usual 2
c.m_network = NET_I2P;
} else if (c.id == 23) {
c.m_network = NET_ONION;
} else {
c.m_network = NET_IPV6;
}
},
/* protected_peer_ids */ {0, 1, 2, 3, 4, 5, 12, 15, 16, 17, 18, 23},
/* unprotected_peer_ids */ {6, 7, 8, 9, 10, 11, 13, 14, 19, 20, 21, 22},
random_context));
// Combined test: expect having 1 localhost, 3 I2P and 6 onion peers out of
// 24 to protect 1, 3, and 2 (6 total, I2P has fewer candidates and so gets
// the unused localhost slot), plus 6 others for 12/24 total, sorted by
// longest uptime.
BOOST_CHECK(IsProtected(
24,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id == 15);
if (c.id == 12 || c.id == 14 || c.id == 17) {
c.m_network = NET_I2P;
} else if (c.id > 17) { // 4 protected instead of usual 2
c.m_network = NET_ONION;
} else {
c.m_network = NET_IPV4;
}
},
/* protected_peer_ids */ {0, 1, 2, 3, 4, 5, 12, 14, 15, 17, 18, 19},
/* unprotected_peer_ids */ {6, 7, 8, 9, 10, 11, 13, 16, 20, 21, 22, 23},
random_context));
// Combined test: expect having 1 localhost, 7 I2P and 4 onion peers out of
// 24 to protect 1 localhost, 2 I2P, and 3 onions (6 total), plus 6 others
// for 12/24 total, sorted by longest uptime.
BOOST_CHECK(IsProtected(
24,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id == 13);
if (c.id > 16) {
c.m_network = NET_I2P;
} else if (c.id == 12 || c.id == 14 || c.id == 15 || c.id == 16) {
c.m_network = NET_ONION;
} else {
c.m_network = NET_IPV6;
}
},
/* protected_peer_ids */ {0, 1, 2, 3, 4, 5, 12, 13, 14, 15, 17, 18},
/* unprotected_peer_ids */ {6, 7, 8, 9, 10, 11, 16, 19, 20, 21, 22, 23},
random_context));
// Combined test: expect having 8 localhost, 4 I2P, and 3 onion peers out of
// 24 to protect 2 of each (6 total), plus 6 others for 12/24 total, sorted
// by longest uptime.
BOOST_CHECK(IsProtected(
24,
[](NodeEvictionCandidate &c) {
c.nTimeConnected = c.id;
c.m_is_local = (c.id > 15);
if (c.id > 10 && c.id < 15) {
c.m_network = NET_I2P;
} else if (c.id > 6 && c.id < 10) {
c.m_network = NET_ONION;
} else {
c.m_network = NET_IPV4;
}
},
/* protected_peer_ids */ {0, 1, 2, 3, 4, 5, 7, 8, 11, 12, 16, 17},
/* unprotected_peer_ids */
{6, 9, 10, 13, 14, 15, 18, 19, 20, 21, 22, 23}, random_context));
}
// Returns true if any of the node ids in node_ids are selected for eviction.
bool IsEvicted(std::vector<NodeEvictionCandidate> candidates,
const std::unordered_set<NodeId> &node_ids,
FastRandomContext &random_context) {
Shuffle(candidates.begin(), candidates.end(), random_context);
const std::optional<NodeId> evicted_node_id =
SelectNodeToEvict(std::move(candidates));
if (!evicted_node_id) {
return false;
}
return node_ids.count(*evicted_node_id);
}
// Create number_of_nodes random nodes, apply setup function candidate_setup_fn,
// apply eviction logic and then return true if any of the node ids in node_ids
// are selected for eviction.
bool IsEvicted(const int number_of_nodes,
std::function<void(NodeEvictionCandidate &)> candidate_setup_fn,
const std::unordered_set<NodeId> &node_ids,
FastRandomContext &random_context) {
std::vector<NodeEvictionCandidate> candidates =
GetRandomNodeEvictionCandidates(number_of_nodes, random_context);
for (NodeEvictionCandidate &candidate : candidates) {
candidate_setup_fn(candidate);
}
return IsEvicted(candidates, node_ids, random_context);
}
BOOST_AUTO_TEST_CASE(node_eviction_test) {
FastRandomContext random_context{true};
for (int number_of_nodes = 0; number_of_nodes < 200; ++number_of_nodes) {
// Four nodes with the highest keyed netgroup values should be
// protected from eviction.
BOOST_CHECK(!IsEvicted(
number_of_nodes,
[number_of_nodes](NodeEvictionCandidate &candidate) {
candidate.nKeyedNetGroup = number_of_nodes - candidate.id;
},
{0, 1, 2, 3}, random_context));
// Eight nodes with the lowest minimum ping time should be protected
// from eviction.
BOOST_CHECK(!IsEvicted(
number_of_nodes,
[](NodeEvictionCandidate &candidate) {
candidate.m_min_ping_time =
std::chrono::microseconds{candidate.id};
},
{0, 1, 2, 3, 4, 5, 6, 7}, random_context));
// Four nodes that most recently sent us novel transactions accepted
// into our mempool should be protected from eviction.
BOOST_CHECK(!IsEvicted(
number_of_nodes,
[number_of_nodes](NodeEvictionCandidate &candidate) {
candidate.nLastTXTime = number_of_nodes - candidate.id;
},
{0, 1, 2, 3}, random_context));
// Four nodes that most recently sent us novel proofs accepted
// into our proof pool should be protected from eviction.
BOOST_CHECK(!IsEvicted(
number_of_nodes,
[number_of_nodes](NodeEvictionCandidate &candidate) {
candidate.nLastProofTime = number_of_nodes - candidate.id;
},
{0, 1, 2, 3}, random_context));
// Up to eight non-tx-relay peers that most recently sent us novel
// blocks should be protected from eviction.
BOOST_CHECK(!IsEvicted(
number_of_nodes,
[number_of_nodes](NodeEvictionCandidate &candidate) {
candidate.nLastBlockTime = number_of_nodes - candidate.id;
if (candidate.id <= 7) {
candidate.fRelayTxes = false;
candidate.fRelevantServices = true;
}
},
{0, 1, 2, 3, 4, 5, 6, 7}, random_context));
// Four peers that most recently sent us novel blocks should be
// protected from eviction.
BOOST_CHECK(!IsEvicted(
number_of_nodes,
[number_of_nodes](NodeEvictionCandidate &candidate) {
candidate.nLastBlockTime = number_of_nodes - candidate.id;
},
{0, 1, 2, 3}, random_context));
// Combination of the previous two tests.
BOOST_CHECK(!IsEvicted(
number_of_nodes,
[number_of_nodes](NodeEvictionCandidate &candidate) {
candidate.nLastBlockTime = number_of_nodes - candidate.id;
if (candidate.id <= 7) {
candidate.fRelayTxes = false;
candidate.fRelevantServices = true;
}
},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, random_context));
// Combination of all tests above.
BOOST_CHECK(!IsEvicted(
number_of_nodes,
[number_of_nodes](NodeEvictionCandidate &candidate) {
// 4 protected
candidate.nKeyedNetGroup = number_of_nodes - candidate.id;
// 8 protected
candidate.m_min_ping_time =
std::chrono::microseconds{candidate.id};
// 4 protected
candidate.nLastTXTime = number_of_nodes - candidate.id;
// 4 protected
candidate.nLastProofTime = number_of_nodes - candidate.id;
// 4 protected
candidate.nLastBlockTime = number_of_nodes - candidate.id;
},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
random_context));
// 128 peers with the highest availability score should be protected
// from eviction.
std::vector<NodeId> protectedNodes(128);
std::iota(protectedNodes.begin(), protectedNodes.end(), 0);
BOOST_CHECK(!IsEvicted(
number_of_nodes,
[number_of_nodes](NodeEvictionCandidate &candidate) {
candidate.availabilityScore =
double(number_of_nodes - candidate.id);
},
std::unordered_set<NodeId>(protectedNodes.begin(),
protectedNodes.end()),
random_context));
// An eviction is expected given >= 161 random eviction candidates.
// The eviction logic protects at most four peers by net group,
// eight by lowest ping time, four by last time of novel tx, four by
// last time of novel proof, up to eight non-tx-relay peers by last
// novel block time, four by last novel block time, and 128 more by
// avalanche availability score.
if (number_of_nodes >= 161) {
BOOST_CHECK(SelectNodeToEvict(GetRandomNodeEvictionCandidates(
number_of_nodes, random_context)));
}
// No eviction is expected given <= 24 random eviction candidates.
// The eviction logic protects at least four peers by net group,
// eight by lowest ping time, four by last time of novel tx, four by
// last time of novel proof, four peers by last novel block time.
if (number_of_nodes <= 24) {
BOOST_CHECK(!SelectNodeToEvict(GetRandomNodeEvictionCandidates(
number_of_nodes, random_context)));
}
// Cases left to test:
// * "If any remaining peers are preferred for eviction consider
// only them. [...]"
// * "Identify the network group with the most connections and
// youngest member. [...]"
}
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/util/net.cpp b/src/test/util/net.cpp
index c4c62c984..f1ec00e00 100644
--- a/src/test/util/net.cpp
+++ b/src/test/util/net.cpp
@@ -1,47 +1,81 @@
// Copyright (c) 2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <test/util/net.h>
#include <chainparams.h>
#include <config.h>
#include <net.h>
+#include <span.h>
+
+#include <vector>
void ConnmanTestMsg::NodeReceiveMsgBytes(CNode &node,
Span<const char> msg_bytes,
bool &complete) const {
assert(node.ReceiveMsgBytes(*config, msg_bytes, complete));
if (complete) {
size_t nSizeAdded = 0;
auto it(node.vRecvMsg.begin());
for (; it != node.vRecvMsg.end(); ++it) {
// vRecvMsg contains only completed CNetMessage
// the single possible partially deserialized message are held by
// TransportDeserializer
nSizeAdded += it->m_raw_message_size;
}
{
LOCK(node.cs_vProcessMsg);
node.vProcessMsg.splice(node.vProcessMsg.end(), node.vRecvMsg,
node.vRecvMsg.begin(), it);
node.nProcessQueueSize += nSizeAdded;
node.fPauseRecv = node.nProcessQueueSize > nReceiveFloodSize;
}
}
}
bool ConnmanTestMsg::ReceiveMsgFrom(CNode &node,
CSerializedNetMsg &ser_msg) const {
std::vector<uint8_t> ser_msg_header;
node.m_serializer->prepareForTransport(*config, ser_msg, ser_msg_header);
bool complete;
NodeReceiveMsgBytes(
node, {(const char *)ser_msg_header.data(), ser_msg_header.size()},
complete);
NodeReceiveMsgBytes(
node, {(const char *)ser_msg.data.data(), ser_msg.data.size()},
complete);
return complete;
}
+
+std::vector<NodeEvictionCandidate>
+GetRandomNodeEvictionCandidates(const int n_candidates,
+ FastRandomContext &random_context) {
+ std::vector<NodeEvictionCandidate> candidates;
+ for (int id = 0; id < n_candidates; ++id) {
+ candidates.push_back({
+ /* id */ id,
+ /* nTimeConnected */
+ static_cast<int64_t>(random_context.randrange(100)),
+ /* m_min_ping_time */
+ std::chrono::microseconds{random_context.randrange(100)},
+ /* nLastBlockTime */
+ static_cast<int64_t>(random_context.randrange(100)),
+ /* nLastProofTime */
+ static_cast<int64_t>(random_context.randrange(100)),
+ /* nLastTXTime */
+ static_cast<int64_t>(random_context.randrange(100)),
+ /* fRelevantServices */ random_context.randbool(),
+ /* fRelayTxes */ random_context.randbool(),
+ /* fBloomFilter */ random_context.randbool(),
+ /* nKeyedNetGroup */ random_context.randrange(100),
+ /* prefer_evict */ random_context.randbool(),
+ /* m_is_local */ random_context.randbool(),
+ /* m_network */
+ ALL_NETWORKS[random_context.randrange(ALL_NETWORKS.size())],
+ /* availabilityScore */ double(random_context.randrange(-1)),
+ });
+ }
+ return candidates;
+}
diff --git a/src/test/util/net.h b/src/test/util/net.h
index 9c9a97eb7..844645fe6 100644
--- a/src/test/util/net.h
+++ b/src/test/util/net.h
@@ -1,113 +1,117 @@
// Copyright (c) 2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_TEST_UTIL_NET_H
#define BITCOIN_TEST_UTIL_NET_H
#include <compat.h>
#include <net.h>
#include <netaddress.h>
#include <util/sock.h>
#include <array>
#include <cassert>
#include <cstring>
#include <string>
struct ConnmanTestMsg : public CConnman {
using CConnman::CConnman;
void SetPeerConnectTimeout(std::chrono::seconds timeout) {
m_peer_connect_timeout = timeout;
}
void AddTestNode(CNode &node) {
LOCK(cs_vNodes);
vNodes.push_back(&node);
}
void ClearTestNodes() {
LOCK(cs_vNodes);
for (CNode *node : vNodes) {
delete node;
}
vNodes.clear();
}
void ProcessMessagesOnce(CNode &node) {
m_msgproc->ProcessMessages(*config, &node, flagInterruptMsgProc);
}
void NodeReceiveMsgBytes(CNode &node, Span<const char> msg_bytes,
bool &complete) const;
bool ReceiveMsgFrom(CNode &node, CSerializedNetMsg &ser_msg) const;
};
constexpr std::array<Network, 7> ALL_NETWORKS = {{
Network::NET_UNROUTABLE,
Network::NET_IPV4,
Network::NET_IPV6,
Network::NET_ONION,
Network::NET_I2P,
Network::NET_CJDNS,
Network::NET_INTERNAL,
}};
/**
* A mocked Sock alternative that returns a statically contained data upon read
* and succeeds and ignores all writes. The data to be returned is given to the
* constructor and when it is exhausted an EOF is returned by further reads.
*/
class StaticContentsSock : public Sock {
public:
explicit StaticContentsSock(const std::string &contents)
: m_contents{contents}, m_consumed{0} {
// Just a dummy number that is not INVALID_SOCKET.
static_assert(INVALID_SOCKET != 1000);
m_socket = 1000;
}
~StaticContentsSock() override { Reset(); }
StaticContentsSock &operator=(Sock &&other) override {
assert(false && "Move of Sock into MockSock not allowed.");
return *this;
}
void Reset() override { m_socket = INVALID_SOCKET; }
ssize_t Send(const void *, size_t len, int) const override { return len; }
ssize_t Recv(void *buf, size_t len, int flags) const override {
const size_t consume_bytes{
std::min(len, m_contents.size() - m_consumed)};
std::memcpy(buf, m_contents.data() + m_consumed, consume_bytes);
if ((flags & MSG_PEEK) == 0) {
m_consumed += consume_bytes;
}
return consume_bytes;
}
int Connect(const sockaddr *, socklen_t) const override { return 0; }
int GetSockOpt(int level, int opt_name, void *opt_val,
socklen_t *opt_len) const override {
std::memset(opt_val, 0x0, *opt_len);
return 0;
}
bool Wait(std::chrono::milliseconds timeout, Event requested,
Event *occurred = nullptr) const override {
if (occurred != nullptr) {
*occurred = requested;
}
return true;
}
private:
const std::string m_contents;
mutable size_t m_consumed;
};
+std::vector<NodeEvictionCandidate>
+GetRandomNodeEvictionCandidates(int n_candidates,
+ FastRandomContext &random_context);
+
#endif // BITCOIN_TEST_UTIL_NET_H
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Thu, May 22, 00:35 (19 h, 1 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5866223
Default Alt Text
(175 KB)
Attached To
rABC Bitcoin ABC
Event Timeline
Log In to Comment