diff --git a/src/net_processing.cpp b/src/net_processing.cpp --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1244,233 +1244,233 @@ connman->ForEachNodeThen(std::move(sortfunc), std::move(pushfunc)); } -static void ProcessGetData(const Config &config, CNode *pfrom, - CConnman *connman, - const std::atomic &interruptMsgProc) { +void static ProcessGetBlockData(const Config &config, CNode *pfrom, + const CInv &inv, CConnman *connman, + const std::atomic &interruptMsgProc) { const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); - std::deque::iterator it = pfrom->vRecvGetData.begin(); - std::vector vNotFound; - const CNetMsgMaker msgMaker(pfrom->GetSendVersion()); + LOCK(cs_main); - while (it != pfrom->vRecvGetData.end()) { - // Don't bother if send buffer is too full to respond anyway. - if (pfrom->fPauseSend) { - break; - } + bool send = false; + std::shared_ptr a_recent_block; + std::shared_ptr a_recent_compact_block; + { + LOCK(cs_most_recent_block); + a_recent_block = most_recent_block; + a_recent_compact_block = most_recent_compact_block; + } - const CInv &inv = *it; - { - if (interruptMsgProc) { - return; + { + BlockMap::iterator mi = mapBlockIndex.find(inv.hash); + if (mi != mapBlockIndex.end()) { + if (mi->second->nChainTx && + !mi->second->IsValid(BlockValidity::SCRIPTS) && + mi->second->IsValid(BlockValidity::TREE)) { + // If we have the block and all of its parents, but have + // not yet validated it, we might be in the middle of + // connecting it (ie in the unlock of cs_main before + // ActivateBestChain but after AcceptBlock). In this + // case, we need to run ActivateBestChain prior to + // checking the relay conditions below. + CValidationState dummy; + ActivateBestChain(config, dummy, a_recent_block); } + } + } + BlockMap::iterator mi = mapBlockIndex.find(inv.hash); + if (mi != mapBlockIndex.end()) { + send = BlockRequestAllowed(mi->second, consensusParams); + if (!send) { + LogPrint(BCLog::NET, + "%s: ignoring request from peer=%i for old " + "block that isn't in the main chain\n", + __func__, pfrom->GetId()); + } + } + const CNetMsgMaker msgMaker(pfrom->GetSendVersion()); + // Disconnect node in case we have reached the outbound limit + // for serving historical blocks. + // Never disconnect whitelisted nodes. + if (send && connman->OutboundTargetReached(true) && + (((pindexBestHeader != nullptr) && + (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > + HISTORICAL_BLOCK_AGE)) || + inv.type == MSG_FILTERED_BLOCK) && + !pfrom->fWhitelisted) { + LogPrint(BCLog::NET, + "historical block serving limit reached, " + "disconnect peer=%d\n", + pfrom->GetId()); - it++; + // disconnect node + pfrom->fDisconnect = true; + send = false; + } + // Avoid leaking prune-height by never sending blocks below the + // NODE_NETWORK_LIMITED threshold. + // Add two blocks buffer extension for possible races + if (send && !pfrom->fWhitelisted && + ((((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) == + NODE_NETWORK_LIMITED) && + ((pfrom->GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && + (chainActive.Tip()->nHeight - mi->second->nHeight > + (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2)))) { + LogPrint(BCLog::NET, + "Ignore block request below NODE_NETWORK_LIMITED " + "threshold from peer=%d\n", + pfrom->GetId()); - if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || - inv.type == MSG_CMPCT_BLOCK) { - bool send = false; - BlockMap::iterator mi = mapBlockIndex.find(inv.hash); - std::shared_ptr a_recent_block; - std::shared_ptr - a_recent_compact_block; - { - LOCK(cs_most_recent_block); - a_recent_block = most_recent_block; - a_recent_compact_block = most_recent_compact_block; + // disconnect node and prevent it from stalling (would + // otherwise wait for the missing block) + pfrom->fDisconnect = true; + send = false; + } + // Pruned nodes may have deleted the block, so check whether + // it's available before trying to send. + if (send && (mi->second->nStatus.hasData())) { + std::shared_ptr pblock; + if (a_recent_block && + a_recent_block->GetHash() == (*mi).second->GetBlockHash()) { + pblock = a_recent_block; + } else { + // Send block from disk + std::shared_ptr pblockRead = std::make_shared(); + if (!ReadBlockFromDisk(*pblockRead, (*mi).second, config)) + assert(!"cannot load block from disk"); + pblock = pblockRead; + } + if (inv.type == MSG_BLOCK) { + connman->PushMessage(pfrom, + msgMaker.Make(NetMsgType::BLOCK, *pblock)); + } else if (inv.type == MSG_FILTERED_BLOCK) { + bool sendMerkleBlock = false; + CMerkleBlock merkleBlock; + { + LOCK(pfrom->cs_filter); + if (pfrom->pfilter) { + sendMerkleBlock = true; + merkleBlock = CMerkleBlock(*pblock, *pfrom->pfilter); } - if (mi != mapBlockIndex.end()) { - if (mi->second->nChainTx && - !mi->second->IsValid(BlockValidity::SCRIPTS) && - mi->second->IsValid(BlockValidity::TREE)) { - // If we have the block and all of its parents, but have - // not yet validated it, we might be in the middle of - // connecting it (ie in the unlock of cs_main before - // ActivateBestChain but after AcceptBlock). In this - // case, we need to run ActivateBestChain prior to - // checking the relay conditions below. - CValidationState dummy; - ActivateBestChain(config, dummy, a_recent_block); - } - send = BlockRequestAllowed(mi->second, consensusParams); - if (!send) { - LogPrint(BCLog::NET, - "%s: ignoring request from peer=%i for old " - "block that isn't in the main chain\n", - __func__, pfrom->GetId()); - } + } + if (sendMerkleBlock) { + connman->PushMessage( + pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock)); + // CMerkleBlock just contains hashes, so also push + // any transactions in the block the client did not + // see. This avoids hurting performance by + // pointlessly requiring a round-trip. Note that + // there is currently no way for a node to request + // any single transactions we didn't send here - + // they must either disconnect and retry or request + // the full block. Thus, the protocol spec specified + // allows for us to provide duplicate txn here, + // however we MUST always provide at least what the + // remote peer needs. + typedef std::pair PairType; + for (PairType &pair : merkleBlock.vMatchedTxn) { + connman->PushMessage( + pfrom, msgMaker.Make(NetMsgType::TX, + *pblock->vtx[pair.first])); } + } + // else + // no response + } else if (inv.type == MSG_CMPCT_BLOCK) { + // If a peer is asking for old blocks, we're almost + // guaranteed they won't have a useful mempool to match + // against a compact block, and we don't feel like + // constructing the object for them, so instead we + // respond with the full, non-compact block. + int nSendFlags = 0; + if (CanDirectFetch(consensusParams) && + mi->second->nHeight >= + chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) { + CBlockHeaderAndShortTxIDs cmpctblock(*pblock); + connman->PushMessage( + pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, + cmpctblock)); + } else { + connman->PushMessage( + pfrom, + msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock)); + } + } - // Disconnect node in case we have reached the outbound limit - // for serving historical blocks. - // Never disconnect whitelisted nodes. - if (send && connman->OutboundTargetReached(true) && - (((pindexBestHeader != nullptr) && - (pindexBestHeader->GetBlockTime() - - mi->second->GetBlockTime() > - HISTORICAL_BLOCK_AGE)) || - inv.type == MSG_FILTERED_BLOCK) && - !pfrom->fWhitelisted) { - LogPrint(BCLog::NET, - "historical block serving limit reached, " - "disconnect peer=%d\n", - pfrom->GetId()); + // Trigger the peer node to send a getblocks request for the + // next batch of inventory. + if (inv.hash == pfrom->hashContinue) { + // Bypass PushInventory, this must send even if + // redundant, and we want it right after the last block + // so they don't wait for other stuff first. + std::vector vInv; + vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash())); + connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv)); + pfrom->hashContinue.SetNull(); + } + } +} - // disconnect node - pfrom->fDisconnect = true; - send = false; - } - // Avoid leaking prune-height by never sending blocks below the - // NODE_NETWORK_LIMITED threshold. - // Add two blocks buffer extension for possible races - if (send && !pfrom->fWhitelisted && - ((((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) == - NODE_NETWORK_LIMITED) && - ((pfrom->GetLocalServices() & NODE_NETWORK) != - NODE_NETWORK) && - (chainActive.Tip()->nHeight - mi->second->nHeight > - (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2)))) { - LogPrint(BCLog::NET, - "Ignore block request below NODE_NETWORK_LIMITED " - "threshold from peer=%d\n", - pfrom->GetId()); +static void ProcessGetData(const Config &config, CNode *pfrom, + CConnman *connman, + const std::atomic &interruptMsgProc) { + std::deque::iterator it = pfrom->vRecvGetData.begin(); + std::vector vNotFound; + const CNetMsgMaker msgMaker(pfrom->GetSendVersion()); + { + LOCK(cs_main); - // disconnect node and prevent it from stalling (would - // otherwise wait for the missing block) - pfrom->fDisconnect = true; - send = false; - } - // Pruned nodes may have deleted the block, so check whether - // it's available before trying to send. - if (send && (mi->second->nStatus.hasData())) { - std::shared_ptr pblock; - if (a_recent_block && a_recent_block->GetHash() == - (*mi).second->GetBlockHash()) { - pblock = a_recent_block; - } else { - // Send block from disk - std::shared_ptr pblockRead = - std::make_shared(); - if (!ReadBlockFromDisk(*pblockRead, (*mi).second, - config)) - assert(!"cannot load block from disk"); - pblock = pblockRead; - } + while (it != pfrom->vRecvGetData.end() && it->type == MSG_TX) { + if (interruptMsgProc) { + return; + } + // Don't bother if send buffer is too full to respond anyway. + if (pfrom->fPauseSend) { + break; + } - if (inv.type == MSG_BLOCK) { - connman->PushMessage( - pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock)); - } else if (inv.type == MSG_FILTERED_BLOCK) { - bool sendMerkleBlock = false; - CMerkleBlock merkleBlock; - { - LOCK(pfrom->cs_filter); - if (pfrom->pfilter) { - sendMerkleBlock = true; - merkleBlock = - CMerkleBlock(*pblock, *pfrom->pfilter); - } - } - if (sendMerkleBlock) { - connman->PushMessage( - pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, - merkleBlock)); - // CMerkleBlock just contains hashes, so also push - // any transactions in the block the client did not - // see. This avoids hurting performance by - // pointlessly requiring a round-trip. Note that - // there is currently no way for a node to request - // any single transactions we didn't send here - - // they must either disconnect and retry or request - // the full block. Thus, the protocol spec specified - // allows for us to provide duplicate txn here, - // however we MUST always provide at least what the - // remote peer needs. - typedef std::pair PairType; - for (PairType &pair : merkleBlock.vMatchedTxn) { - connman->PushMessage( - pfrom, - msgMaker.Make(NetMsgType::TX, - *pblock->vtx[pair.first])); - } - } - // else - // no response - } else if (inv.type == MSG_CMPCT_BLOCK) { - // If a peer is asking for old blocks, we're almost - // guaranteed they won't have a useful mempool to match - // against a compact block, and we don't feel like - // constructing the object for them, so instead we - // respond with the full, non-compact block. - int nSendFlags = 0; - if (CanDirectFetch(consensusParams) && - mi->second->nHeight >= - chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) { - CBlockHeaderAndShortTxIDs cmpctblock(*pblock); - connman->PushMessage( - pfrom, msgMaker.Make(nSendFlags, - NetMsgType::CMPCTBLOCK, - cmpctblock)); - } else { - connman->PushMessage( - pfrom, - msgMaker.Make(nSendFlags, NetMsgType::BLOCK, - *pblock)); - } - } + const CInv &inv = *it; + it++; - // Trigger the peer node to send a getblocks request for the - // next batch of inventory. - if (inv.hash == pfrom->hashContinue) { - // Bypass PushInventory, this must send even if - // redundant, and we want it right after the last block - // so they don't wait for other stuff first. - std::vector vInv; - vInv.push_back( - CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash())); - connman->PushMessage( - pfrom, msgMaker.Make(NetMsgType::INV, vInv)); - pfrom->hashContinue.SetNull(); - } - } - } else if (inv.type == MSG_TX) { - // Send stream from relay memory - bool push = false; - auto mi = mapRelay.find(inv.hash); - int nSendFlags = 0; - if (mi != mapRelay.end()) { + // Send stream from relay memory + bool push = false; + auto mi = mapRelay.find(inv.hash); + int nSendFlags = 0; + if (mi != mapRelay.end()) { + connman->PushMessage( + pfrom, + msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second)); + push = true; + } else if (pfrom->timeLastMempoolReq) { + auto txinfo = g_mempool.info(inv.hash); + // To protect privacy, do not answer getdata using the + // mempool when that TX couldn't have been INVed in reply to + // a MEMPOOL request. + if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) { connman->PushMessage( pfrom, - msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second)); + msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx)); push = true; - } else if (pfrom->timeLastMempoolReq) { - auto txinfo = g_mempool.info(inv.hash); - // To protect privacy, do not answer getdata using the - // mempool when that TX couldn't have been INVed in reply to - // a MEMPOOL request. - if (txinfo.tx && - txinfo.nTime <= pfrom->timeLastMempoolReq) { - connman->PushMessage( - pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, - *txinfo.tx)); - push = true; - } - } - if (!push) { - vNotFound.push_back(inv); } } + if (!push) { + vNotFound.push_back(inv); + } // Track requests for our stuff. GetMainSignals().Inventory(inv.hash); + } + if (it != pfrom->vRecvGetData.end()) { + const CInv &inv = *it; + it++; if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK) { - break; + ProcessGetBlockData(config, pfrom, inv, connman, + interruptMsgProc); } } - } + } // release cs_main pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);