Page Menu
Home
Phabricator
Search
Configure Global Search
Log In
Files
F14362791
D396.id986.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
79 KB
Subscribers
None
D396.id986.diff
View Options
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -237,7 +237,10 @@
// Requires cs_main.
CNodeState *State(NodeId pnode) {
std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
- if (it == mapNodeState.end()) return nullptr;
+ if (it == mapNodeState.end()) {
+ return nullptr;
+ }
+
return &it->second;
}
@@ -271,16 +274,17 @@
addrMe, nonce, userAgent(config),
nNodeStartingHeight, ::fRelayTxes));
- if (fLogIPs)
+ if (fLogIPs) {
LogPrint("net", "send version message: version %d, blocks=%d, us=%s, "
"them=%s, peer=%d\n",
PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(),
addrYou.ToString(), nodeid);
- else
+ } else {
LogPrint(
"net",
"send version message: version %d, blocks=%d, us=%s, peer=%d\n",
PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
+ }
}
void InitializeNode(const Config &config, CNode *pnode, CConnman &connman) {
@@ -294,7 +298,10 @@
std::forward_as_tuple(nodeid),
std::forward_as_tuple(addr, std::move(addrName)));
}
- if (!pnode->fInbound) PushNodeVersion(config, pnode, connman, GetTime());
+
+ if (!pnode->fInbound) {
+ PushNodeVersion(config, pnode, connman, GetTime());
+ }
}
void FinalizeNode(NodeId nodeid, bool &fUpdateConnectionTime) {
@@ -302,7 +309,9 @@
LOCK(cs_main);
CNodeState *state = State(nodeid);
- if (state->fSyncStarted) nSyncStarted--;
+ if (state->fSyncStarted) {
+ nSyncStarted--;
+ }
if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
fUpdateConnectionTime = true;
@@ -311,6 +320,7 @@
for (const QueuedBlock &entry : state->vBlocksInFlight) {
mapBlocksInFlight.erase(entry.hash);
}
+
EraseOrphansFor(nodeid);
nPreferredDownload -= state->fPreferredDownload;
nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
@@ -355,6 +365,7 @@
mapBlocksInFlight.erase(itInFlight);
return true;
}
+
return false;
}
@@ -394,13 +405,19 @@
// We're starting a block download (batch) from this peer.
state->nDownloadingSince = GetTimeMicros();
}
+
if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
nPeersWithValidatedDownloads++;
}
+
itInFlight = mapBlocksInFlight
.insert(std::make_pair(hash, std::make_pair(nodeid, it)))
.first;
- if (pit) *pit = &itInFlight->second.second;
+
+ if (pit) {
+ *pit = &itInFlight->second.second;
+ }
+
return true;
}
@@ -415,8 +432,9 @@
if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) {
if (state->pindexBestKnownBlock == nullptr ||
itOld->second->nChainWork >=
- state->pindexBestKnownBlock->nChainWork)
+ state->pindexBestKnownBlock->nChainWork) {
state->pindexBestKnownBlock = itOld->second;
+ }
state->hashLastUnknownBlock.SetNull();
}
}
@@ -433,8 +451,9 @@
if (it != mapBlockIndex.end() && it->second->nChainWork > 0) {
// An actually better block was announced.
if (state->pindexBestKnownBlock == nullptr ||
- it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
+ it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
state->pindexBestKnownBlock = it->second;
+ }
} else {
// An unknown block was announced; just assume that the latest one is
// the best one.
@@ -498,11 +517,13 @@
// Requires cs_main
bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) {
if (state->pindexBestKnownBlock &&
- pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
+ pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
return true;
+ }
if (state->pindexBestHeaderSent &&
- pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
+ pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
return true;
+ }
return false;
}
@@ -534,7 +555,9 @@
std::vector<const CBlockIndex *> &vBlocks,
NodeId &nodeStaller,
const Consensus::Params &consensusParams) {
- if (count == 0) return;
+ if (count == 0) {
+ return;
+ }
vBlocks.reserve(vBlocks.size() + count);
CNodeState *state = State(nodeid);
@@ -561,7 +584,9 @@
// ancestor of its current tip anymore. Go back enough to fix that.
state->pindexLastCommonBlock = LastCommonAncestor(
state->pindexLastCommonBlock, state->pindexBestKnownBlock);
- if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) return;
+ if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
+ return;
+ }
std::vector<const CBlockIndex *> vToFetch;
const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
@@ -601,7 +626,9 @@
}
if (pindex->nStatus & BLOCK_HAVE_DATA ||
chainActive.Contains(pindex)) {
- if (pindex->nChainTx) state->pindexLastCommonBlock = pindex;
+ if (pindex->nChainTx) {
+ state->pindexLastCommonBlock = pindex;
+ }
} else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
// The block is not already downloaded, and not yet in flight.
if (pindex->nHeight > nWindowEnd) {
@@ -630,7 +657,9 @@
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
LOCK(cs_main);
CNodeState *state = State(nodeid);
- if (state == nullptr) return false;
+ if (state == nullptr) {
+ return false;
+ }
stats.nMisbehavior = state->nMisbehavior;
stats.nSyncHeight =
state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
@@ -638,8 +667,9 @@
? state->pindexLastCommonBlock->nHeight
: -1;
for (const QueuedBlock &queue : state->vBlocksInFlight) {
- if (queue.pindex)
+ if (queue.pindex) {
stats.vHeightInFlight.push_back(queue.pindex->nHeight);
+ }
}
return true;
}
@@ -666,8 +696,14 @@
void AddToCompactExtraTransactions(const CTransactionRef &tx) {
size_t max_extra_txn = GetArg("-blockreconstructionextratxn",
DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
- if (max_extra_txn <= 0) return;
- if (!vExtraTxnForCompact.size()) vExtraTxnForCompact.resize(max_extra_txn);
+ if (max_extra_txn <= 0) {
+ return;
+ }
+
+ if (!vExtraTxnForCompact.size()) {
+ vExtraTxnForCompact.resize(max_extra_txn);
+ }
+
vExtraTxnForCompact[vExtraTxnForCompactIt] =
std::make_pair(tx->GetHash(), tx);
vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
@@ -676,7 +712,9 @@
bool AddOrphanTx(const CTransactionRef &tx, NodeId peer)
EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
const uint256 &txid = tx->GetId();
- if (mapOrphanTransactions.count(txid)) return false;
+ if (mapOrphanTransactions.count(txid)) {
+ return false;
+ }
// Ignore big transactions, to avoid a send-big-orphans memory exhaustion
// attack. If a peer has a legitimate large transaction with a missing
@@ -709,12 +747,18 @@
int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
std::map<uint256, COrphanTx>::iterator it =
mapOrphanTransactions.find(hash);
- if (it == mapOrphanTransactions.end()) return 0;
+ if (it == mapOrphanTransactions.end()) {
+ return 0;
+ }
for (const CTxIn &txin : it->second.tx->vin) {
auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
- if (itPrev == mapOrphanTransactionsByPrev.end()) continue;
+ if (itPrev == mapOrphanTransactionsByPrev.end()) {
+ continue;
+ }
itPrev->second.erase(it);
- if (itPrev->second.empty()) mapOrphanTransactionsByPrev.erase(itPrev);
+ if (itPrev->second.empty()) {
+ mapOrphanTransactionsByPrev.erase(itPrev);
+ }
}
mapOrphanTransactions.erase(it);
return 1;
@@ -730,9 +774,10 @@
nErased += EraseOrphanTx(maybeErase->second.tx->GetId());
}
}
- if (nErased > 0)
+ if (nErased > 0) {
LogPrint("mempool", "Erased %d orphan tx from peer=%d\n", nErased,
peer);
+ }
}
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
@@ -759,17 +804,19 @@
// Sweep again 5 minutes after the next entry that expires in order to
// batch the linear scan.
nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
- if (nErased > 0)
+ if (nErased > 0) {
LogPrint("mempool", "Erased %d orphan tx due to expiration\n",
nErased);
+ }
}
while (mapOrphanTransactions.size() > nMaxOrphans) {
// Evict a random orphan:
uint256 randomhash = GetRandHash();
std::map<uint256, COrphanTx>::iterator it =
mapOrphanTransactions.lower_bound(randomhash);
- if (it == mapOrphanTransactions.end())
+ if (it == mapOrphanTransactions.end()) {
it = mapOrphanTransactions.begin();
+ }
EraseOrphanTx(it->first);
++nEvicted;
}
@@ -822,7 +869,9 @@
void PeerLogicValidation::SyncTransaction(const CTransaction &tx,
const CBlockIndex *pindex,
int nPosInBlock) {
- if (nPosInBlock == CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK) return;
+ if (nPosInBlock == CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK) {
+ return;
+ }
LOCK(cs_main);
@@ -830,7 +879,9 @@
// Which orphan pool entries must we evict?
for (size_t j = 0; j < tx.vin.size(); j++) {
auto itByPrev = mapOrphanTransactionsByPrev.find(tx.vin[j].prevout);
- if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
+ if (itByPrev == mapOrphanTransactionsByPrev.end()) {
+ continue;
+ }
for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end();
++mi) {
const CTransaction &orphanTx = *(*mi)->second.tx;
@@ -866,7 +917,9 @@
LOCK(cs_main);
static int nHighestFastAnnounce = 0;
- if (pindex->nHeight <= nHighestFastAnnounce) return;
+ if (pindex->nHeight <= nHighestFastAnnounce) {
+ return;
+ }
nHighestFastAnnounce = pindex->nHeight;
uint256 hashBlock(pblock->GetHash());
@@ -881,8 +934,9 @@
connman->ForEachNode([this, &pcmpctblock, pindex, &msgMaker,
&hashBlock](CNode *pnode) {
// TODO: Avoid the repeated-serialization here
- if (pnode->nVersion < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect)
+ if (pnode->nVersion < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect) {
return;
+ }
ProcessBlockAvailability(pnode->GetId());
CNodeState &state = *State(pnode->GetId());
// If the peer has, or we announced to them the previous block already,
@@ -972,7 +1026,10 @@
MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, *connman);
}
}
- if (it != mapBlockSource.end()) mapBlockSource.erase(it);
+
+ if (it != mapBlockSource.end()) {
+ mapBlockSource.erase(it);
+ }
}
//////////////////////////////////////////////////////////////////////////////
@@ -1070,11 +1127,15 @@
while (it != pfrom->vRecvGetData.end()) {
// Don't bother if send buffer is too full to respond anyway.
- if (pfrom->fPauseSend) break;
+ if (pfrom->fPauseSend) {
+ break;
+ }
const CInv &inv = *it;
{
- if (interruptMsgProc) return;
+ if (interruptMsgProc) {
+ return;
+ }
it++;
@@ -1153,12 +1214,14 @@
// Send block from disk
CBlock block;
if (!ReadBlockFromDisk(block, (*mi).second,
- consensusParams))
+ consensusParams)) {
assert(!"cannot load block from disk");
- if (inv.type == MSG_BLOCK)
+ }
+
+ if (inv.type == MSG_BLOCK) {
connman.PushMessage(
pfrom, msgMaker.Make(NetMsgType::BLOCK, block));
- else if (inv.type == MSG_FILTERED_BLOCK) {
+ } else if (inv.type == MSG_FILTERED_BLOCK) {
bool sendMerkleBlock = false;
CMerkleBlock merkleBlock;
{
@@ -1209,10 +1272,11 @@
pfrom, msgMaker.Make(nSendFlags,
NetMsgType::CMPCTBLOCK,
cmpctblock));
- } else
+ } else {
connman.PushMessage(
pfrom, msgMaker.Make(nSendFlags,
NetMsgType::BLOCK, block));
+ }
}
// Trigger the peer node to send a getblocks request for the
@@ -1261,8 +1325,9 @@
GetMainSignals().Inventory(inv.hash);
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK ||
- inv.type == MSG_CMPCT_BLOCK)
+ inv.type == MSG_CMPCT_BLOCK) {
break;
+ }
}
}
@@ -1423,7 +1488,9 @@
return false;
}
- if (!vRecv.empty()) vRecv >> addrFrom >> nNonce;
+ if (!vRecv.empty()) {
+ vRecv >> addrFrom >> nNonce;
+ }
if (!vRecv.empty()) {
vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
cleanSubVer = SanitizeString(strSubVer);
@@ -1431,7 +1498,9 @@
if (!vRecv.empty()) {
vRecv >> nStartingHeight;
}
- if (!vRecv.empty()) vRecv >> fRelay;
+ if (!vRecv.empty()) {
+ vRecv >> fRelay;
+ }
// Disconnect if we connected to ourself
if (pfrom->fInbound && !connman.CheckIncomingNonce(nNonce)) {
LogPrintf("connected to self at %s, disconnecting\n",
@@ -1445,8 +1514,9 @@
}
// Be shy and don't send version until we hear
- if (pfrom->fInbound)
+ if (pfrom->fInbound) {
PushNodeVersion(config, pfrom, connman, GetAdjustedTime());
+ }
connman.PushMessage(
pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK));
@@ -1506,7 +1576,9 @@
}
std::string remoteAddr;
- if (fLogIPs) remoteAddr = ", peeraddr=" + pfrom->addr.ToString();
+ if (fLogIPs) {
+ remoteAddr = ", peeraddr=" + pfrom->addr.ToString();
+ }
LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, "
"peer=%d%s\n",
@@ -1610,13 +1682,17 @@
int64_t nNow = GetAdjustedTime();
int64_t nSince = nNow - 10 * 60;
for (CAddress &addr : vAddr) {
- if (interruptMsgProc) return true;
+ if (interruptMsgProc) {
+ return true;
+ }
- if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES)
+ if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES) {
continue;
+ }
- if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
+ if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) {
addr.nTime = nNow - 5 * 24 * 60 * 60;
+ }
pfrom->AddAddressKnown(addr);
bool fReachable = IsReachable(addr);
if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 &&
@@ -1625,11 +1701,17 @@
RelayAddress(addr, fReachable, connman);
}
// Do not store addresses outside our network
- if (fReachable) vAddrOk.push_back(addr);
+ if (fReachable) {
+ vAddrOk.push_back(addr);
+ }
}
connman.AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60);
- if (vAddr.size() < 1000) pfrom->fGetAddr = false;
- if (pfrom->fOneShot) pfrom->fDisconnect = true;
+ if (vAddr.size() < 1000) {
+ pfrom->fGetAddr = false;
+ }
+ if (pfrom->fOneShot) {
+ pfrom->fDisconnect = true;
+ }
}
else if (strCommand == NetMsgType::SENDHEADERS) {
@@ -1719,22 +1801,24 @@
}
} else {
pfrom->AddInventoryKnown(inv);
- if (fBlocksOnly)
+ if (fBlocksOnly) {
LogPrint("net", "transaction (%s) inv sent in violation of "
"protocol peer=%d\n",
inv.hash.ToString(), pfrom->id);
- else if (!fAlreadyHave && !fImporting && !fReindex &&
- !IsInitialBlockDownload())
+ } else if (!fAlreadyHave && !fImporting && !fReindex &&
+ !IsInitialBlockDownload()) {
pfrom->AskFor(inv);
+ }
}
// Track requests for our stuff
GetMainSignals().Inventory(inv.hash);
}
- if (!vToFetch.empty())
+ if (!vToFetch.empty()) {
connman.PushMessage(pfrom,
msgMaker.Make(NetMsgType::GETDATA, vToFetch));
+ }
}
else if (strCommand == NetMsgType::GETDATA) {
@@ -1790,7 +1874,9 @@
const CBlockIndex *pindex = FindForkInGlobalIndex(chainActive, locator);
// Send the rest of the chain
- if (pindex) pindex = chainActive.Next(pindex);
+ if (pindex) {
+ pindex = chainActive.Next(pindex);
+ }
int nLimit = 500;
LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n",
(pindex ? pindex->nHeight : -1),
@@ -1837,8 +1923,9 @@
std::shared_ptr<const CBlock> recent_block;
{
LOCK(cs_most_recent_block);
- if (most_recent_block_hash == req.blockhash)
+ if (most_recent_block_hash == req.blockhash) {
recent_block = most_recent_block;
+ }
// Unlock cs_most_recent_block to avoid cs_main lock inversion
}
if (recent_block) {
@@ -1902,12 +1989,16 @@
if (locator.IsNull()) {
// If locator is null, return the hashStop block
BlockMap::iterator mi = mapBlockIndex.find(hashStop);
- if (mi == mapBlockIndex.end()) return true;
+ if (mi == mapBlockIndex.end()) {
+ return true;
+ }
pindex = (*mi).second;
} else {
// Find the last block the caller has in the main chain
pindex = FindForkInGlobalIndex(chainActive, locator);
- if (pindex) pindex = chainActive.Next(pindex);
+ if (pindex) {
+ pindex = chainActive.Next(pindex);
+ }
}
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx
@@ -1919,7 +2010,9 @@
hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->id);
for (; pindex; pindex = chainActive.Next(pindex)) {
vHeaders.push_back(pindex->GetBlockHeader());
- if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) break;
+ if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
+ break;
+ }
}
// pindex can be nullptr either if we sent chainActive.Tip() OR
// if our peer has chainActive.Tip() (and thus we are sending an empty
@@ -1975,7 +2068,7 @@
&fMissingInputs, &lRemovedTxn)) {
mempool.check(pcoinsTip);
RelayTransaction(tx, connman);
- for (unsigned int i = 0; i < tx.vout.size(); i++) {
+ for (size_t i = 0; i < tx.vout.size(); i++) {
vWorkQueue.emplace_back(inv.hash, i);
}
@@ -2069,19 +2162,22 @@
for (const CTxIn &txin : tx.vin) {
CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash);
pfrom->AddInventoryKnown(_inv);
- if (!AlreadyHave(_inv)) pfrom->AskFor(_inv);
+ if (!AlreadyHave(_inv)) {
+ pfrom->AskFor(_inv);
+ }
}
AddOrphanTx(ptx, pfrom->GetId());
// DoS prevention: do not allow mapOrphanTransactions to grow
// unbounded
unsigned int nMaxOrphanTx = (unsigned int)std::max(
- (int64_t)0,
+ int64_t(0),
GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
- if (nEvicted > 0)
+ if (nEvicted > 0) {
LogPrint("mempool", "mapOrphan overflow, removed %u tx\n",
nEvicted);
+ }
} else {
LogPrint("mempool",
"not keeping orphan with rejected parents %s\n",
@@ -2128,8 +2224,9 @@
}
}
- for (const CTransactionRef &removedTx : lRemovedTxn)
+ for (const CTransactionRef &removedTx : lRemovedTxn) {
AddToCompactExtraTransactions(removedTx);
+ }
int nDoS = 0;
if (state.IsInvalid(nDoS)) {
@@ -2220,13 +2317,15 @@
mapBlocksInFlight.find(pindex->GetBlockHash());
bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
- if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
+ if (pindex->nStatus & BLOCK_HAVE_DATA) {
+ // Nothing to do here
return true;
+ }
if (pindex->nChainWork <=
chainActive.Tip()->nChainWork || // We know something better
- pindex->nTx !=
- 0) { // We had this block at some point, but pruned it
+ pindex->nTx != 0) {
+ // We had this block at some point, but pruned it
if (fAlreadyInFlight) {
// We requested this block for some reason, but our mempool
// will probably be useless so we just grab the block via
@@ -2245,8 +2344,9 @@
// If we're not close to tip yet, give up and let parallel block
// fetch work its magic.
if (!fAlreadyInFlight &&
- !CanDirectFetch(chainparams.GetConsensus()))
+ !CanDirectFetch(chainparams.GetConsensus())) {
return true;
+ }
CNodeState *nodestate = State(pfrom->GetId());
@@ -2304,8 +2404,9 @@
BlockTransactionsRequest req;
for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
- if (!partialBlock.IsTxAvailable(i))
+ if (!partialBlock.IsTxAvailable(i)) {
req.indexes.push_back(i);
+ }
}
if (req.indexes.empty()) {
// Dirty hack to jump to BLOCKTXN code (TODO: move
@@ -2363,15 +2464,17 @@
}
} // cs_main
- if (fProcessBLOCKTXN)
+ if (fProcessBLOCKTXN) {
return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN,
blockTxnMsg, nTimeReceived, chainparams,
connman, interruptMsgProc);
+ }
- if (fRevertToHeaderProcessing)
+ if (fRevertToHeaderProcessing) {
return ProcessMessage(config, pfrom, NetMsgType::HEADERS,
vHeadersMsg, nTimeReceived, chainparams,
connman, interruptMsgProc);
+ }
if (fBlockReconstructed) {
// If we got here, we were able to optimistically reconstruct a
@@ -2383,9 +2486,12 @@
}
bool fNewBlock = false;
ProcessNewBlock(config, pblock, true, &fNewBlock);
- if (fNewBlock) pfrom->nLastBlockTime = GetTime();
+ if (fNewBlock) {
+ pfrom->nLastBlockTime = GetTime();
+ }
- LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
+ // hold cs_main for CBlockIndex::IsValid()
+ LOCK(cs_main);
if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
// Clear download state for this block, which is in process from
// some other peer. We do this after calling. ProcessNewBlock so
@@ -2478,7 +2584,9 @@
// force it to be processed, even if it would not be a candidate for
// new tip (missing previous block, chain not long enough, etc)
ProcessNewBlock(config, pblock, true, &fNewBlock);
- if (fNewBlock) pfrom->nLastBlockTime = GetTime();
+ if (fNewBlock) {
+ pfrom->nLastBlockTime = GetTime();
+ }
}
}
@@ -2704,7 +2812,9 @@
}
bool fNewBlock = false;
ProcessNewBlock(config, pblock, forceProcessing, &fNewBlock);
- if (fNewBlock) pfrom->nLastBlockTime = GetTime();
+ if (fNewBlock) {
+ pfrom->nLastBlockTime = GetTime();
+ }
}
else if (strCommand == NetMsgType::GETADDR) {
@@ -2965,22 +3075,31 @@
//
bool fMoreWork = false;
- if (!pfrom->vRecvGetData.empty())
+ if (!pfrom->vRecvGetData.empty()) {
ProcessGetData(config, pfrom, chainparams.GetConsensus(), connman,
interruptMsgProc);
+ }
- if (pfrom->fDisconnect) return false;
+ if (pfrom->fDisconnect) {
+ return false;
+ }
// this maintains the order of responses
- if (!pfrom->vRecvGetData.empty()) return true;
+ if (!pfrom->vRecvGetData.empty()) {
+ return true;
+ }
// Don't bother if send buffer is too full to respond anyway
- if (pfrom->fPauseSend) return false;
+ if (pfrom->fPauseSend) {
+ return false;
+ }
std::list<CNetMessage> msgs;
{
LOCK(pfrom->cs_vProcessMsg);
- if (pfrom->vProcessMsg.empty()) return false;
+ if (pfrom->vProcessMsg.empty()) {
+ return false;
+ }
// Just take one message
msgs.splice(msgs.begin(), pfrom->vProcessMsg,
pfrom->vProcessMsg.begin());
@@ -3033,7 +3152,9 @@
try {
fRet = ProcessMessage(config, pfrom, strCommand, vRecv, msg.nTime,
chainparams, connman, interruptMsgProc);
- if (interruptMsgProc) return false;
+ if (interruptMsgProc) {
+ return false;
+ }
if (!pfrom->vRecvGetData.empty()) fMoreWork = true;
} catch (const std::ios_base::failure &e) {
connman.PushMessage(
@@ -3091,586 +3212,595 @@
bool SendMessages(const Config &config, CNode *pto, CConnman &connman,
const std::atomic<bool> &interruptMsgProc) {
const Consensus::Params &consensusParams = Params().GetConsensus();
- {
- // Don't send anything until the version handshake is complete
- if (!pto->fSuccessfullyConnected || pto->fDisconnect) return true;
- // If we get here, the outgoing message serialization version is set and
- // can't change.
- const CNetMsgMaker msgMaker(pto->GetSendVersion());
+ // Don't send anything until the version handshake is complete
+ if (!pto->fSuccessfullyConnected || pto->fDisconnect) {
+ return true;
+ }
- //
- // Message: ping
- //
- bool pingSend = false;
- if (pto->fPingQueued) {
- // RPC ping request by user
- pingSend = true;
- }
- if (pto->nPingNonceSent == 0 &&
- pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
- // Ping automatically sent as a latency probe & keepalive.
- pingSend = true;
- }
- if (pingSend) {
- uint64_t nonce = 0;
- while (nonce == 0) {
- GetRandBytes((unsigned char *)&nonce, sizeof(nonce));
- }
- pto->fPingQueued = false;
- pto->nPingUsecStart = GetTimeMicros();
- if (pto->nVersion > BIP0031_VERSION) {
- pto->nPingNonceSent = nonce;
- connman.PushMessage(pto,
- msgMaker.Make(NetMsgType::PING, nonce));
- } else {
- // Peer is too old to support ping command with nonce, pong will
- // never arrive.
- pto->nPingNonceSent = 0;
- connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING));
- }
+ // If we get here, the outgoing message serialization version is set and
+ // can't change.
+ const CNetMsgMaker msgMaker(pto->GetSendVersion());
+
+ //
+ // Message: ping
+ //
+ bool pingSend = false;
+ if (pto->fPingQueued) {
+ // RPC ping request by user
+ pingSend = true;
+ }
+ if (pto->nPingNonceSent == 0 &&
+ pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
+ // Ping automatically sent as a latency probe & keepalive.
+ pingSend = true;
+ }
+ if (pingSend) {
+ uint64_t nonce = 0;
+ while (nonce == 0) {
+ GetRandBytes((unsigned char *)&nonce, sizeof(nonce));
+ }
+ pto->fPingQueued = false;
+ pto->nPingUsecStart = GetTimeMicros();
+ if (pto->nVersion > BIP0031_VERSION) {
+ pto->nPingNonceSent = nonce;
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
+ } else {
+ // Peer is too old to support ping command with nonce, pong will
+ // never arrive.
+ pto->nPingNonceSent = 0;
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING));
}
+ }
- // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
- TRY_LOCK(cs_main, lockMain);
- if (!lockMain) return true;
+ // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
+ TRY_LOCK(cs_main, lockMain);
+ if (!lockMain) {
+ return true;
+ }
- if (SendRejectsAndCheckIfBanned(pto, connman)) return true;
- CNodeState &state = *State(pto->GetId());
+ if (SendRejectsAndCheckIfBanned(pto, connman)) {
+ return true;
+ }
+ CNodeState &state = *State(pto->GetId());
- // Address refresh broadcast
- int64_t nNow = GetTimeMicros();
- if (!IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) {
- AdvertiseLocal(pto);
- pto->nNextLocalAddrSend =
- PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
- }
+ // Address refresh broadcast
+ int64_t nNow = GetTimeMicros();
+ if (!IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) {
+ AdvertiseLocal(pto);
+ pto->nNextLocalAddrSend =
+ PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
+ }
- //
- // Message: addr
- //
- if (pto->nNextAddrSend < nNow) {
- pto->nNextAddrSend =
- PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL);
- std::vector<CAddress> vAddr;
- vAddr.reserve(pto->vAddrToSend.size());
- for (const CAddress &addr : pto->vAddrToSend) {
- if (!pto->addrKnown.contains(addr.GetKey())) {
- pto->addrKnown.insert(addr.GetKey());
- vAddr.push_back(addr);
- // receiver rejects addr messages larger than 1000
- if (vAddr.size() >= 1000) {
- connman.PushMessage(
- pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
- vAddr.clear();
- }
+ //
+ // Message: addr
+ //
+ if (pto->nNextAddrSend < nNow) {
+ pto->nNextAddrSend =
+ PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL);
+ std::vector<CAddress> vAddr;
+ vAddr.reserve(pto->vAddrToSend.size());
+ for (const CAddress &addr : pto->vAddrToSend) {
+ if (!pto->addrKnown.contains(addr.GetKey())) {
+ pto->addrKnown.insert(addr.GetKey());
+ vAddr.push_back(addr);
+ // receiver rejects addr messages larger than 1000
+ if (vAddr.size() >= 1000) {
+ connman.PushMessage(pto,
+ msgMaker.Make(NetMsgType::ADDR, vAddr));
+ vAddr.clear();
}
}
- pto->vAddrToSend.clear();
- if (!vAddr.empty())
- connman.PushMessage(pto,
- msgMaker.Make(NetMsgType::ADDR, vAddr));
- // we only send the big addr message once
- if (pto->vAddrToSend.capacity() > 40)
- pto->vAddrToSend.shrink_to_fit();
- }
-
- // Start block sync
- if (pindexBestHeader == nullptr) pindexBestHeader = chainActive.Tip();
- bool fFetch = state.fPreferredDownload ||
- (nPreferredDownload == 0 && !pto->fClient &&
- !pto->fOneShot); // Download if this is a nice peer, or
- // we have no nice peers and this one
- // might do.
- if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
- // Only actively request headers from a single peer, unless we're
- // close to today.
- if ((nSyncStarted == 0 && fFetch) ||
- pindexBestHeader->GetBlockTime() >
- GetAdjustedTime() - 24 * 60 * 60) {
- state.fSyncStarted = true;
- nSyncStarted++;
- const CBlockIndex *pindexStart = pindexBestHeader;
- /* If possible, start at the block preceding the currently
- best known header. This ensures that we always get a
- non-empty list of headers back as long as the peer
- is up-to-date. With a non-empty response, we can initialise
- the peer's known best block. This wouldn't be possible
- if we requested starting at pindexBestHeader and
- got back an empty response. */
- if (pindexStart->pprev) pindexStart = pindexStart->pprev;
- LogPrint(
- "net",
- "initial getheaders (%d) to peer=%d (startheight:%d)\n",
- pindexStart->nHeight, pto->id, pto->nStartingHeight);
- connman.PushMessage(
- pto, msgMaker.Make(NetMsgType::GETHEADERS,
- chainActive.GetLocator(pindexStart),
- uint256()));
- }
+ }
+ pto->vAddrToSend.clear();
+ if (!vAddr.empty()) {
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
}
- // Resend wallet transactions that haven't gotten in a block yet
- // Except during reindex, importing and IBD, when old wallet
- // transactions become unconfirmed and spams other nodes.
- if (!fReindex && !fImporting && !IsInitialBlockDownload()) {
- GetMainSignals().Broadcast(nTimeBestReceived, &connman);
+ // we only send the big addr message once
+ if (pto->vAddrToSend.capacity() > 40) {
+ pto->vAddrToSend.shrink_to_fit();
}
+ }
- //
- // Try sending block announcements via headers
- //
- {
- // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
- // list of block hashes we're relaying, and our peer wants
- // headers announcements, then find the first header
- // not yet known to our peer but would connect, and send.
- // If no header would connect, or if we have too many
- // blocks, or if the peer doesn't want headers, just
- // add all to the inv queue.
- LOCK(pto->cs_inventory);
- std::vector<CBlock> vHeaders;
- bool fRevertToInv =
- ((!state.fPreferHeaders &&
- (!state.fPreferHeaderAndIDs ||
- pto->vBlockHashesToAnnounce.size() > 1)) ||
- pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
- // last header queued for delivery
- const CBlockIndex *pBestIndex = nullptr;
- ProcessBlockAvailability(
- pto->id); // ensure pindexBestKnownBlock is up-to-date
-
- if (!fRevertToInv) {
- bool fFoundStartingHeader = false;
- // Try to find first header that our peer doesn't have, and
- // then send all headers past that one. If we come across any
- // headers that aren't on chainActive, give up.
- for (const uint256 &hash : pto->vBlockHashesToAnnounce) {
- BlockMap::iterator mi = mapBlockIndex.find(hash);
- assert(mi != mapBlockIndex.end());
- const CBlockIndex *pindex = mi->second;
- if (chainActive[pindex->nHeight] != pindex) {
- // Bail out if we reorged away from this block
- fRevertToInv = true;
- break;
- }
- if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
- // This means that the list of blocks to announce don't
- // connect to each other.
- // This shouldn't really be possible to hit during
- // regular operation (because reorgs should take us to
- // a chain that has some block not on the prior chain,
- // which should be caught by the prior check), but one
- // way this could happen is by using invalidateblock /
- // reconsiderblock repeatedly on the tip, causing it to
- // be added multiple times to vBlockHashesToAnnounce.
- // Robustly deal with this rare situation by reverting
- // to an inv.
- fRevertToInv = true;
- break;
- }
- pBestIndex = pindex;
- if (fFoundStartingHeader) {
- // add this to the headers message
- vHeaders.push_back(pindex->GetBlockHeader());
- } else if (PeerHasHeader(&state, pindex)) {
- continue; // keep looking for the first new block
- } else if (pindex->pprev == nullptr ||
- PeerHasHeader(&state, pindex->pprev)) {
- // Peer doesn't have this header but they do have the
- // prior one.
- // Start sending headers.
- fFoundStartingHeader = true;
- vHeaders.push_back(pindex->GetBlockHeader());
- } else {
- // Peer doesn't have this header or the prior one --
- // nothing will connect, so bail out.
- fRevertToInv = true;
- break;
- }
+ // Start block sync
+ if (pindexBestHeader == nullptr) {
+ pindexBestHeader = chainActive.Tip();
+ }
+
+ // Download if this is a nice peer, or we have no nice peers and this one
+ // might do.
+ bool fFetch = state.fPreferredDownload ||
+ (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot);
+
+ if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
+ // Only actively request headers from a single peer, unless we're close
+ // to today.
+ if ((nSyncStarted == 0 && fFetch) ||
+ pindexBestHeader->GetBlockTime() >
+ GetAdjustedTime() - 24 * 60 * 60) {
+ state.fSyncStarted = true;
+ nSyncStarted++;
+ const CBlockIndex *pindexStart = pindexBestHeader;
+ /**
+ * If possible, start at the block preceding the currently best
+ * known header. This ensures that we always get a non-empty list of
+ * headers back as long as the peer is up-to-date. With a non-empty
+ * response, we can initialise the peer's known best block. This
+ * wouldn't be possible if we requested starting at pindexBestHeader
+ * and got back an empty response.
+ */
+ if (pindexStart->pprev) {
+ pindexStart = pindexStart->pprev;
+ }
+
+ LogPrint("net",
+ "initial getheaders (%d) to peer=%d (startheight:%d)\n",
+ pindexStart->nHeight, pto->id, pto->nStartingHeight);
+ connman.PushMessage(
+ pto,
+ msgMaker.Make(NetMsgType::GETHEADERS,
+ chainActive.GetLocator(pindexStart), uint256()));
+ }
+ }
+
+ // Resend wallet transactions that haven't gotten in a block yet
+ // Except during reindex, importing and IBD, when old wallet transactions
+ // become unconfirmed and spams other nodes.
+ if (!fReindex && !fImporting && !IsInitialBlockDownload()) {
+ GetMainSignals().Broadcast(nTimeBestReceived, &connman);
+ }
+
+ //
+ // Try sending block announcements via headers
+ //
+ {
+ // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our list of block
+ // hashes we're relaying, and our peer wants headers announcements, then
+ // find the first header not yet known to our peer but would connect,
+ // and send. If no header would connect, or if we have too many blocks,
+ // or if the peer doesn't want headers, just add all to the inv queue.
+ LOCK(pto->cs_inventory);
+ std::vector<CBlock> vHeaders;
+ bool fRevertToInv =
+ ((!state.fPreferHeaders &&
+ (!state.fPreferHeaderAndIDs ||
+ pto->vBlockHashesToAnnounce.size() > 1)) ||
+ pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
+ // last header queued for delivery
+ const CBlockIndex *pBestIndex = nullptr;
+ // ensure pindexBestKnownBlock is up-to-date
+ ProcessBlockAvailability(pto->id);
+
+ if (!fRevertToInv) {
+ bool fFoundStartingHeader = false;
+ // Try to find first header that our peer doesn't have, and then
+ // send all headers past that one. If we come across an headers that
+ // aren't on chainActive, give up.
+ for (const uint256 &hash : pto->vBlockHashesToAnnounce) {
+ BlockMap::iterator mi = mapBlockIndex.find(hash);
+ assert(mi != mapBlockIndex.end());
+ const CBlockIndex *pindex = mi->second;
+ if (chainActive[pindex->nHeight] != pindex) {
+ // Bail out if we reorged away from this block
+ fRevertToInv = true;
+ break;
+ }
+ if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
+ // This means that the list of blocks to announce don't
+ // connect to each other. This shouldn't really be possible
+ // to hit during regular operation (because reorgs should
+ // take us to a chain that has some block not on the prior
+ // chain, which should be caught by the prior check), but
+ // one way this could happen is by using invalidateblock /
+ // reconsiderblock repeatedly on the tip, causing it to be
+ // added multiple times to vBlockHashesToAnnounce. Robustly
+ // deal with this rare situation by reverting to an inv.
+ fRevertToInv = true;
+ break;
+ }
+ pBestIndex = pindex;
+ if (fFoundStartingHeader) {
+ // add this to the headers message
+ vHeaders.push_back(pindex->GetBlockHeader());
+ } else if (PeerHasHeader(&state, pindex)) {
+ // Keep looking for the first new block.
+ continue;
+ } else if (pindex->pprev == nullptr ||
+ PeerHasHeader(&state, pindex->pprev)) {
+ // Peer doesn't have this header but they do have the prior
+ // one.
+ // Start sending headers.
+ fFoundStartingHeader = true;
+ vHeaders.push_back(pindex->GetBlockHeader());
+ } else {
+ // Peer doesn't have this header or the prior one --
+ // nothing will connect, so bail out.
+ fRevertToInv = true;
+ break;
}
}
- if (!fRevertToInv && !vHeaders.empty()) {
- if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
- // We only send up to 1 block as header-and-ids, as
- // otherwise probably means we're doing an initial-ish-sync
- // or they're slow.
- LogPrint("net", "%s sending header-and-ids %s to peer=%d\n",
- __func__, vHeaders.front().GetHash().ToString(),
- pto->id);
+ }
+ if (!fRevertToInv && !vHeaders.empty()) {
+ if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
+ // We only send up to 1 block as header-and-ids, as otherwise
+ // probably means we're doing an initial-ish-sync or they're
+ // slow.
+ LogPrint("net", "%s sending header-and-ids %s to peer=%d\n",
+ __func__, vHeaders.front().GetHash().ToString(),
+ pto->id);
- int nSendFlags = 0;
+ int nSendFlags = 0;
- bool fGotBlockFromCache = false;
- {
- LOCK(cs_most_recent_block);
- if (most_recent_block_hash ==
- pBestIndex->GetBlockHash()) {
- CBlockHeaderAndShortTxIDs cmpctblock(
- *most_recent_block);
- connman.PushMessage(
- pto, msgMaker.Make(nSendFlags,
- NetMsgType::CMPCTBLOCK,
- cmpctblock));
- fGotBlockFromCache = true;
- }
- }
- if (!fGotBlockFromCache) {
- CBlock block;
- bool ret = ReadBlockFromDisk(block, pBestIndex,
- consensusParams);
- assert(ret);
- CBlockHeaderAndShortTxIDs cmpctblock(block);
+ bool fGotBlockFromCache = false;
+ {
+ LOCK(cs_most_recent_block);
+ if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
+ CBlockHeaderAndShortTxIDs cmpctblock(
+ *most_recent_block);
connman.PushMessage(
pto,
msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
cmpctblock));
+ fGotBlockFromCache = true;
}
- state.pindexBestHeaderSent = pBestIndex;
- } else if (state.fPreferHeaders) {
- if (vHeaders.size() > 1) {
- LogPrint("net",
- "%s: %u headers, range (%s, %s), to peer=%d\n",
- __func__, vHeaders.size(),
- vHeaders.front().GetHash().ToString(),
- vHeaders.back().GetHash().ToString(), pto->id);
- } else {
- LogPrint("net", "%s: sending header %s to peer=%d\n",
- __func__,
- vHeaders.front().GetHash().ToString(),
- pto->id);
- }
+ }
+ if (!fGotBlockFromCache) {
+ CBlock block;
+ bool ret =
+ ReadBlockFromDisk(block, pBestIndex, consensusParams);
+ assert(ret);
+ CBlockHeaderAndShortTxIDs cmpctblock(block);
connman.PushMessage(
- pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
- state.pindexBestHeaderSent = pBestIndex;
- } else
- fRevertToInv = true;
- }
- if (fRevertToInv) {
- // If falling back to using an inv, just try to inv the tip. The
- // last entry in vBlockHashesToAnnounce was our tip at some
- // point in the past.
- if (!pto->vBlockHashesToAnnounce.empty()) {
- const uint256 &hashToAnnounce =
- pto->vBlockHashesToAnnounce.back();
- BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce);
- assert(mi != mapBlockIndex.end());
- const CBlockIndex *pindex = mi->second;
-
- // Warn if we're announcing a block that is not on the main
- // chain. This should be very rare and could be optimized
- // out. Just log for now.
- if (chainActive[pindex->nHeight] != pindex) {
- LogPrint(
- "net",
- "Announcing block %s not on main chain (tip=%s)\n",
- hashToAnnounce.ToString(),
- chainActive.Tip()->GetBlockHash().ToString());
- }
+ pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
+ cmpctblock));
+ }
+ state.pindexBestHeaderSent = pBestIndex;
+ } else if (state.fPreferHeaders) {
+ if (vHeaders.size() > 1) {
+ LogPrint("net",
+ "%s: %u headers, range (%s, %s), to peer=%d\n",
+ __func__, vHeaders.size(),
+ vHeaders.front().GetHash().ToString(),
+ vHeaders.back().GetHash().ToString(), pto->id);
+ } else {
+ LogPrint("net", "%s: sending header %s to peer=%d\n",
+ __func__, vHeaders.front().GetHash().ToString(),
+ pto->id);
+ }
+ connman.PushMessage(
+ pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
+ state.pindexBestHeaderSent = pBestIndex;
+ } else {
+ fRevertToInv = true;
+ }
+ }
+ if (fRevertToInv) {
+ // If falling back to using an inv, just try to inv the tip. The
+ // last entry in vBlockHashesToAnnounce was our tip at some point in
+ // the past.
+ if (!pto->vBlockHashesToAnnounce.empty()) {
+ const uint256 &hashToAnnounce =
+ pto->vBlockHashesToAnnounce.back();
+ BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce);
+ assert(mi != mapBlockIndex.end());
+ const CBlockIndex *pindex = mi->second;
+
+ // Warn if we're announcing a block that is not on the main
+ // chain. This should be very rare and could be optimized out.
+ // Just log for now.
+ if (chainActive[pindex->nHeight] != pindex) {
+ LogPrint("net",
+ "Announcing block %s not on main chain (tip=%s)\n",
+ hashToAnnounce.ToString(),
+ chainActive.Tip()->GetBlockHash().ToString());
+ }
- // If the peer's chain has this block, don't inv it back.
- if (!PeerHasHeader(&state, pindex)) {
- pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
- LogPrint("net", "%s: sending inv peer=%d hash=%s\n",
- __func__, pto->id, hashToAnnounce.ToString());
- }
+ // If the peer's chain has this block, don't inv it back.
+ if (!PeerHasHeader(&state, pindex)) {
+ pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
+ LogPrint("net", "%s: sending inv peer=%d hash=%s\n",
+ __func__, pto->id, hashToAnnounce.ToString());
}
}
- pto->vBlockHashesToAnnounce.clear();
}
+ pto->vBlockHashesToAnnounce.clear();
+ }
- //
- // Message: inventory
- //
- std::vector<CInv> vInv;
- {
- LOCK(pto->cs_inventory);
- vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(),
- INVENTORY_BROADCAST_MAX));
+ //
+ // Message: inventory
+ //
+ std::vector<CInv> vInv;
+ {
+ LOCK(pto->cs_inventory);
+ vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(),
+ INVENTORY_BROADCAST_MAX));
- // Add blocks
- for (const uint256 &hash : pto->vInventoryBlockToSend) {
- vInv.push_back(CInv(MSG_BLOCK, hash));
- if (vInv.size() == MAX_INV_SZ) {
- connman.PushMessage(pto,
- msgMaker.Make(NetMsgType::INV, vInv));
- vInv.clear();
- }
+ // Add blocks
+ for (const uint256 &hash : pto->vInventoryBlockToSend) {
+ vInv.push_back(CInv(MSG_BLOCK, hash));
+ if (vInv.size() == MAX_INV_SZ) {
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
+ vInv.clear();
}
- pto->vInventoryBlockToSend.clear();
+ }
+ pto->vInventoryBlockToSend.clear();
- // Check whether periodic sends should happen
- bool fSendTrickle = pto->fWhitelisted;
- if (pto->nNextInvSend < nNow) {
- fSendTrickle = true;
- // Use half the delay for outbound peers, as there is less
- // privacy concern for them.
- pto->nNextInvSend = PoissonNextSend(
- nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound);
- }
+ // Check whether periodic sends should happen
+ bool fSendTrickle = pto->fWhitelisted;
+ if (pto->nNextInvSend < nNow) {
+ fSendTrickle = true;
+ // Use half the delay for outbound peers, as there is less privacy
+ // concern for them.
+ pto->nNextInvSend = PoissonNextSend(
+ nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound);
+ }
- // Time to send but the peer has requested we not relay
- // transactions.
- if (fSendTrickle) {
- LOCK(pto->cs_filter);
- if (!pto->fRelayTxes) pto->setInventoryTxToSend.clear();
+ // Time to send but the peer has requested we not relay transactions.
+ if (fSendTrickle) {
+ LOCK(pto->cs_filter);
+ if (!pto->fRelayTxes) {
+ pto->setInventoryTxToSend.clear();
}
+ }
- // Respond to BIP35 mempool requests
- if (fSendTrickle && pto->fSendMempool) {
- auto vtxinfo = mempool.infoAll();
- pto->fSendMempool = false;
- CAmount filterrate = 0;
- {
- LOCK(pto->cs_feeFilter);
- filterrate = pto->minFeeFilter;
- }
+ // Respond to BIP35 mempool requests
+ if (fSendTrickle && pto->fSendMempool) {
+ auto vtxinfo = mempool.infoAll();
+ pto->fSendMempool = false;
+ CAmount filterrate = 0;
+ {
+ LOCK(pto->cs_feeFilter);
+ filterrate = pto->minFeeFilter;
+ }
- LOCK(pto->cs_filter);
+ LOCK(pto->cs_filter);
- for (const auto &txinfo : vtxinfo) {
- const uint256 &txid = txinfo.tx->GetId();
- CInv inv(MSG_TX, txid);
- pto->setInventoryTxToSend.erase(txid);
- if (filterrate) {
- if (txinfo.feeRate.GetFeePerK() < filterrate) continue;
- }
- if (pto->pfilter) {
- if (!pto->pfilter->IsRelevantAndUpdate(*txinfo.tx))
- continue;
+ for (const auto &txinfo : vtxinfo) {
+ const uint256 &txid = txinfo.tx->GetId();
+ CInv inv(MSG_TX, txid);
+ pto->setInventoryTxToSend.erase(txid);
+ if (filterrate) {
+ if (txinfo.feeRate.GetFeePerK() < filterrate) {
+ continue;
}
- pto->filterInventoryKnown.insert(txid);
- vInv.push_back(inv);
- if (vInv.size() == MAX_INV_SZ) {
- connman.PushMessage(
- pto, msgMaker.Make(NetMsgType::INV, vInv));
- vInv.clear();
+ }
+ if (pto->pfilter) {
+ if (!pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) {
+ continue;
}
}
- pto->timeLastMempoolReq = GetTime();
- }
-
- // Determine transactions to relay
- if (fSendTrickle) {
- // Produce a vector with all candidates for sending
- std::vector<std::set<uint256>::iterator> vInvTx;
- vInvTx.reserve(pto->setInventoryTxToSend.size());
- for (std::set<uint256>::iterator it =
- pto->setInventoryTxToSend.begin();
- it != pto->setInventoryTxToSend.end(); it++) {
- vInvTx.push_back(it);
+ pto->filterInventoryKnown.insert(txid);
+ vInv.push_back(inv);
+ if (vInv.size() == MAX_INV_SZ) {
+ connman.PushMessage(pto,
+ msgMaker.Make(NetMsgType::INV, vInv));
+ vInv.clear();
}
- CAmount filterrate = 0;
- {
- LOCK(pto->cs_feeFilter);
- filterrate = pto->minFeeFilter;
+ }
+ pto->timeLastMempoolReq = GetTime();
+ }
+
+ // Determine transactions to relay
+ if (fSendTrickle) {
+ // Produce a vector with all candidates for sending
+ std::vector<std::set<uint256>::iterator> vInvTx;
+ vInvTx.reserve(pto->setInventoryTxToSend.size());
+ for (std::set<uint256>::iterator it =
+ pto->setInventoryTxToSend.begin();
+ it != pto->setInventoryTxToSend.end(); it++) {
+ vInvTx.push_back(it);
+ }
+ CAmount filterrate = 0;
+ {
+ LOCK(pto->cs_feeFilter);
+ filterrate = pto->minFeeFilter;
+ }
+ // Topologically and fee-rate sort the inventory we send for privacy
+ // and priority reasons. A heap is used so that not all items need
+ // sorting if only a few are being sent.
+ CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
+ std::make_heap(vInvTx.begin(), vInvTx.end(),
+ compareInvMempoolOrder);
+ // No reason to drain out at many times the network's capacity,
+ // especially since we have many peers and some will draw much
+ // shorter delays.
+ unsigned int nRelayedTransactions = 0;
+ LOCK(pto->cs_filter);
+ while (!vInvTx.empty() &&
+ nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
+ // Fetch the top element from the heap
+ std::pop_heap(vInvTx.begin(), vInvTx.end(),
+ compareInvMempoolOrder);
+ std::set<uint256>::iterator it = vInvTx.back();
+ vInvTx.pop_back();
+ uint256 hash = *it;
+ // Remove it from the to-be-sent set
+ pto->setInventoryTxToSend.erase(it);
+ // Check if not in the filter already
+ if (pto->filterInventoryKnown.contains(hash)) {
+ continue;
}
- // Topologically and fee-rate sort the inventory we send for
- // privacy and priority reasons. A heap is used so that not all
- // items need sorting if only a few are being sent.
- CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
- std::make_heap(vInvTx.begin(), vInvTx.end(),
- compareInvMempoolOrder);
- // No reason to drain out at many times the network's capacity,
- // especially since we have many peers and some will draw much
- // shorter delays.
- unsigned int nRelayedTransactions = 0;
- LOCK(pto->cs_filter);
- while (!vInvTx.empty() &&
- nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
- // Fetch the top element from the heap
- std::pop_heap(vInvTx.begin(), vInvTx.end(),
- compareInvMempoolOrder);
- std::set<uint256>::iterator it = vInvTx.back();
- vInvTx.pop_back();
- uint256 hash = *it;
- // Remove it from the to-be-sent set
- pto->setInventoryTxToSend.erase(it);
- // Check if not in the filter already
- if (pto->filterInventoryKnown.contains(hash)) {
- continue;
- }
- // Not in the mempool anymore? don't bother sending it.
- auto txinfo = mempool.info(hash);
- if (!txinfo.tx) {
- continue;
- }
- if (filterrate &&
- txinfo.feeRate.GetFeePerK() < filterrate) {
- continue;
+ // Not in the mempool anymore? don't bother sending it.
+ auto txinfo = mempool.info(hash);
+ if (!txinfo.tx) {
+ continue;
+ }
+ if (filterrate && txinfo.feeRate.GetFeePerK() < filterrate) {
+ continue;
+ }
+ if (pto->pfilter &&
+ !pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) {
+ continue;
+ }
+ // Send
+ vInv.push_back(CInv(MSG_TX, hash));
+ nRelayedTransactions++;
+ {
+ // Expire old relay messages
+ while (!vRelayExpiration.empty() &&
+ vRelayExpiration.front().first < nNow) {
+ mapRelay.erase(vRelayExpiration.front().second);
+ vRelayExpiration.pop_front();
}
- if (pto->pfilter &&
- !pto->pfilter->IsRelevantAndUpdate(*txinfo.tx))
- continue;
- // Send
- vInv.push_back(CInv(MSG_TX, hash));
- nRelayedTransactions++;
- {
- // Expire old relay messages
- while (!vRelayExpiration.empty() &&
- vRelayExpiration.front().first < nNow) {
- mapRelay.erase(vRelayExpiration.front().second);
- vRelayExpiration.pop_front();
- }
- auto ret = mapRelay.insert(
- std::make_pair(hash, std::move(txinfo.tx)));
- if (ret.second) {
- vRelayExpiration.push_back(std::make_pair(
- nNow + 15 * 60 * 1000000, ret.first));
- }
- }
- if (vInv.size() == MAX_INV_SZ) {
- connman.PushMessage(
- pto, msgMaker.Make(NetMsgType::INV, vInv));
- vInv.clear();
+ auto ret = mapRelay.insert(
+ std::make_pair(hash, std::move(txinfo.tx)));
+ if (ret.second) {
+ vRelayExpiration.push_back(std::make_pair(
+ nNow + 15 * 60 * 1000000, ret.first));
}
- pto->filterInventoryKnown.insert(hash);
}
+ if (vInv.size() == MAX_INV_SZ) {
+ connman.PushMessage(pto,
+ msgMaker.Make(NetMsgType::INV, vInv));
+ vInv.clear();
+ }
+ pto->filterInventoryKnown.insert(hash);
}
}
- if (!vInv.empty())
- connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
+ }
+ if (!vInv.empty()) {
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
+ }
- // Detect whether we're stalling
- nNow = GetTimeMicros();
- if (state.nStallingSince &&
- state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
- // Stalling only triggers when the block download window cannot
- // move. During normal steady state, the download window should be
- // much larger than the to-be-downloaded set of blocks, so
- // disconnection should only happen during initial block download.
- LogPrintf("Peer=%d is stalling block download, disconnecting\n",
- pto->id);
+ // Detect whether we're stalling
+ nNow = GetTimeMicros();
+ if (state.nStallingSince &&
+ state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
+ // Stalling only triggers when the block download window cannot move.
+ // During normal steady state, the download window should be much larger
+ // than the to-be-downloaded set of blocks, so disconnection should only
+ // happen during initial block download.
+ LogPrintf("Peer=%d is stalling block download, disconnecting\n",
+ pto->id);
+ pto->fDisconnect = true;
+ return true;
+ }
+ // In case there is a block that has been in flight from this peer for 2 +
+ // 0.5 * N times the block interval (with N the number of peers from which
+ // we're downloading validated blocks), disconnect due to timeout. We
+ // compensate for other peers to prevent killing off peers due to our own
+ // downstream link being saturated. We only count validated in-flight blocks
+ // so peers can't advertise non-existing block hashes to unreasonably
+ // increase our timeout.
+ if (state.vBlocksInFlight.size() > 0) {
+ QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
+ int nOtherPeersWithValidatedDownloads =
+ nPeersWithValidatedDownloads -
+ (state.nBlocksInFlightValidHeaders > 0);
+ if (nNow > state.nDownloadingSince +
+ consensusParams.nPowTargetSpacing *
+ (BLOCK_DOWNLOAD_TIMEOUT_BASE +
+ BLOCK_DOWNLOAD_TIMEOUT_PER_PEER *
+ nOtherPeersWithValidatedDownloads)) {
+ LogPrintf("Timeout downloading block %s from peer=%d, "
+ "disconnecting\n",
+ queuedBlock.hash.ToString(), pto->id);
pto->fDisconnect = true;
return true;
}
- // In case there is a block that has been in flight from this peer for 2
- // + 0.5 * N times the block interval (with N the number of peers from
- // which we're downloading validated blocks), disconnect due to timeout.
- // We compensate for other peers to prevent killing off peers due to our
- // own downstream link being saturated. We only count validated
- // in-flight blocks so peers can't advertise non-existing block hashes
- // to unreasonably increase our timeout.
- if (state.vBlocksInFlight.size() > 0) {
- QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
- int nOtherPeersWithValidatedDownloads =
- nPeersWithValidatedDownloads -
- (state.nBlocksInFlightValidHeaders > 0);
- if (nNow > state.nDownloadingSince +
- consensusParams.nPowTargetSpacing *
- (BLOCK_DOWNLOAD_TIMEOUT_BASE +
- BLOCK_DOWNLOAD_TIMEOUT_PER_PEER *
- nOtherPeersWithValidatedDownloads)) {
- LogPrintf("Timeout downloading block %s from peer=%d, "
- "disconnecting\n",
- queuedBlock.hash.ToString(), pto->id);
- pto->fDisconnect = true;
- return true;
+ }
+
+ //
+ // Message: getdata (blocks)
+ //
+ std::vector<CInv> vGetData;
+ if (!pto->fClient && (fFetch || !IsInitialBlockDownload()) &&
+ state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ std::vector<const CBlockIndex *> vToDownload;
+ NodeId staller = -1;
+ FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER -
+ state.nBlocksInFlight,
+ vToDownload, staller, consensusParams);
+ for (const CBlockIndex *pindex : vToDownload) {
+ uint32_t nFetchFlags =
+ GetFetchFlags(pto, pindex->pprev, consensusParams);
+ vGetData.push_back(
+ CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
+ MarkBlockAsInFlight(config, pto->GetId(), pindex->GetBlockHash(),
+ consensusParams, pindex);
+ LogPrint("net", "Requesting block %s (%d) peer=%d\n",
+ pindex->GetBlockHash().ToString(), pindex->nHeight,
+ pto->id);
+ }
+ if (state.nBlocksInFlight == 0 && staller != -1) {
+ if (State(staller)->nStallingSince == 0) {
+ State(staller)->nStallingSince = nNow;
+ LogPrint("net", "Stall started peer=%d\n", staller);
}
}
+ }
- //
- // Message: getdata (blocks)
- //
- std::vector<CInv> vGetData;
- if (!pto->fClient && (fFetch || !IsInitialBlockDownload()) &&
- state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
- std::vector<const CBlockIndex *> vToDownload;
- NodeId staller = -1;
- FindNextBlocksToDownload(pto->GetId(),
- MAX_BLOCKS_IN_TRANSIT_PER_PEER -
- state.nBlocksInFlight,
- vToDownload, staller, consensusParams);
- for (const CBlockIndex *pindex : vToDownload) {
- uint32_t nFetchFlags =
- GetFetchFlags(pto, pindex->pprev, consensusParams);
- vGetData.push_back(
- CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
- MarkBlockAsInFlight(config, pto->GetId(),
- pindex->GetBlockHash(), consensusParams,
- pindex);
- LogPrint("net", "Requesting block %s (%d) peer=%d\n",
- pindex->GetBlockHash().ToString(), pindex->nHeight,
+ //
+ // Message: getdata (non-blocks)
+ //
+ while (!pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow) {
+ const CInv &inv = (*pto->mapAskFor.begin()).second;
+ if (!AlreadyHave(inv)) {
+ if (fDebug) {
+ LogPrint("net", "Requesting %s peer=%d\n", inv.ToString(),
pto->id);
}
- if (state.nBlocksInFlight == 0 && staller != -1) {
- if (State(staller)->nStallingSince == 0) {
- State(staller)->nStallingSince = nNow;
- LogPrint("net", "Stall started peer=%d\n", staller);
- }
- }
- }
-
- //
- // Message: getdata (non-blocks)
- //
- while (!pto->mapAskFor.empty() &&
- (*pto->mapAskFor.begin()).first <= nNow) {
- const CInv &inv = (*pto->mapAskFor.begin()).second;
- if (!AlreadyHave(inv)) {
- if (fDebug)
- LogPrint("net", "Requesting %s peer=%d\n", inv.ToString(),
- pto->id);
- vGetData.push_back(inv);
- if (vGetData.size() >= 1000) {
- connman.PushMessage(
- pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
- vGetData.clear();
- }
- } else {
- // If we're not going to ask, don't expect a response.
- pto->setAskFor.erase(inv.hash);
+ vGetData.push_back(inv);
+ if (vGetData.size() >= 1000) {
+ connman.PushMessage(
+ pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
+ vGetData.clear();
}
- pto->mapAskFor.erase(pto->mapAskFor.begin());
+ } else {
+ // If we're not going to ask, don't expect a response.
+ pto->setAskFor.erase(inv.hash);
}
- if (!vGetData.empty())
- connman.PushMessage(pto,
- msgMaker.Make(NetMsgType::GETDATA, vGetData));
+ pto->mapAskFor.erase(pto->mapAskFor.begin());
+ }
+ if (!vGetData.empty()) {
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
+ }
- //
- // Message: feefilter
- //
- // We don't want white listed peers to filter txs to us if we have
- // -whitelistforcerelay
- if (pto->nVersion >= FEEFILTER_VERSION &&
- GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
- !(pto->fWhitelisted && GetBoolArg("-whitelistforcerelay",
- DEFAULT_WHITELISTFORCERELAY))) {
- CAmount currentFilter =
- mempool
- .GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
- 1000000)
- .GetFeePerK();
- int64_t timeNow = GetTimeMicros();
- if (timeNow > pto->nextSendTimeFeeFilter) {
- static CFeeRate default_feerate(DEFAULT_MIN_RELAY_TX_FEE);
- static FeeFilterRounder filterRounder(default_feerate);
- CAmount filterToSend = filterRounder.round(currentFilter);
- // If we don't allow free transactions, then we always have a
- // fee filter of at least minRelayTxFee
- if (GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY) <= 0)
- filterToSend =
- std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
- if (filterToSend != pto->lastSentFeeFilter) {
- connman.PushMessage(
- pto,
- msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
- pto->lastSentFeeFilter = filterToSend;
- }
- pto->nextSendTimeFeeFilter =
- PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
- }
- // If the fee filter has changed substantially and it's still more
- // than MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then
- // move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
- else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 <
- pto->nextSendTimeFeeFilter &&
- (currentFilter < 3 * pto->lastSentFeeFilter / 4 ||
- currentFilter > 4 * pto->lastSentFeeFilter / 3)) {
- pto->nextSendTimeFeeFilter =
- timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
- }
+ //
+ // Message: feefilter
+ //
+ // We don't want white listed peers to filter txs to us if we have
+ // -whitelistforcerelay
+ if (pto->nVersion >= FEEFILTER_VERSION &&
+ GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
+ !(pto->fWhitelisted &&
+ GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY))) {
+ CAmount currentFilter =
+ mempool
+ .GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
+ 1000000)
+ .GetFeePerK();
+ int64_t timeNow = GetTimeMicros();
+ if (timeNow > pto->nextSendTimeFeeFilter) {
+ static CFeeRate default_feerate(DEFAULT_MIN_RELAY_TX_FEE);
+ static FeeFilterRounder filterRounder(default_feerate);
+ CAmount filterToSend = filterRounder.round(currentFilter);
+ // If we don't allow free transactions, then we always have a fee
+ // filter of at least minRelayTxFee
+ if (GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY) <= 0) {
+ filterToSend =
+ std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
+ }
+
+ if (filterToSend != pto->lastSentFeeFilter) {
+ connman.PushMessage(
+ pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
+ pto->lastSentFeeFilter = filterToSend;
+ }
+ pto->nextSendTimeFeeFilter =
+ PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
+ }
+ // If the fee filter has changed substantially and it's still more than
+ // MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then move the
+ // broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
+ else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 <
+ pto->nextSendTimeFeeFilter &&
+ (currentFilter < 3 * pto->lastSentFeeFilter / 4 ||
+ currentFilter > 4 * pto->lastSentFeeFilter / 3)) {
+ pto->nextSendTimeFeeFilter =
+ timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
}
}
return true;
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Mon, May 12, 01:49 (3 h, 40 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5777077
Default Alt Text
D396.id986.diff (79 KB)
Attached To
D396: A pack of nits in net_processing.cpp
Event Timeline
Log In to Comment