Changeset View
Changeset View
Standalone View
Standalone View
src/net_processing.cpp
Show First 20 Lines • Show All 4,903 Lines • ▼ Show 20 Lines | if (pingSend) { | ||||
pto->nPingNonceSent = 0; | pto->nPingNonceSent = 0; | ||||
m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING)); | m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING)); | ||||
} | } | ||||
} | } | ||||
{ | { | ||||
LOCK(cs_main); | LOCK(cs_main); | ||||
CNodeState &state = *State(pto->GetId()); | CNodeState *state = State(pto->GetId()); | ||||
// Address refresh broadcast | // Address refresh broadcast | ||||
int64_t nNow = GetTimeMicros(); | int64_t nNow = GetTimeMicros(); | ||||
auto current_time = GetTime<std::chrono::microseconds>(); | auto current_time = GetTime<std::chrono::microseconds>(); | ||||
if (pto->IsAddrRelayPeer() && | if (pto->IsAddrRelayPeer() && | ||||
!::ChainstateActive().IsInitialBlockDownload() && | !::ChainstateActive().IsInitialBlockDownload() && | ||||
pto->m_next_local_addr_send < current_time) { | pto->m_next_local_addr_send < current_time) { | ||||
▲ Show 20 Lines • Show All 48 Lines • ▼ Show 20 Lines | if (pingSend) { | ||||
// Start block sync | // Start block sync | ||||
if (pindexBestHeader == nullptr) { | if (pindexBestHeader == nullptr) { | ||||
pindexBestHeader = ::ChainActive().Tip(); | pindexBestHeader = ::ChainActive().Tip(); | ||||
} | } | ||||
// Download if this is a nice peer, or we have no nice peers and this | // Download if this is a nice peer, or we have no nice peers and this | ||||
// one might do. | // one might do. | ||||
bool fFetch = state.fPreferredDownload || | bool fFetch = state->fPreferredDownload || | ||||
(nPreferredDownload == 0 && !pto->fClient && | (nPreferredDownload == 0 && !pto->fClient && | ||||
!pto->IsAddrFetchConn()); | !pto->IsAddrFetchConn()); | ||||
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) { | if (!state->fSyncStarted && !pto->fClient && !fImporting && !fReindex) { | ||||
// Only actively request headers from a single peer, unless we're | // Only actively request headers from a single peer, unless we're | ||||
// close to today. | // close to today. | ||||
if ((nSyncStarted == 0 && fFetch) || | if ((nSyncStarted == 0 && fFetch) || | ||||
pindexBestHeader->GetBlockTime() > | pindexBestHeader->GetBlockTime() > | ||||
GetAdjustedTime() - 24 * 60 * 60) { | GetAdjustedTime() - 24 * 60 * 60) { | ||||
state.fSyncStarted = true; | state->fSyncStarted = true; | ||||
state.nHeadersSyncTimeout = | state->nHeadersSyncTimeout = | ||||
GetTimeMicros() + HEADERS_DOWNLOAD_TIMEOUT_BASE + | GetTimeMicros() + HEADERS_DOWNLOAD_TIMEOUT_BASE + | ||||
HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER * | HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER * | ||||
(GetAdjustedTime() - pindexBestHeader->GetBlockTime()) / | (GetAdjustedTime() - pindexBestHeader->GetBlockTime()) / | ||||
(consensusParams.nPowTargetSpacing); | (consensusParams.nPowTargetSpacing); | ||||
nSyncStarted++; | nSyncStarted++; | ||||
const CBlockIndex *pindexStart = pindexBestHeader; | const CBlockIndex *pindexStart = pindexBestHeader; | ||||
/** | /** | ||||
* If possible, start at the block preceding the currently best | * If possible, start at the block preceding the currently best | ||||
Show All 26 Lines | if (pingSend) { | ||||
// hashes we're relaying, and our peer wants headers announcements, | // hashes we're relaying, and our peer wants headers announcements, | ||||
// then find the first header not yet known to our peer but would | // then find the first header not yet known to our peer but would | ||||
// connect, and send. If no header would connect, or if we have too | // connect, and send. If no header would connect, or if we have too | ||||
// many blocks, or if the peer doesn't want headers, just add all to | // many blocks, or if the peer doesn't want headers, just add all to | ||||
// the inv queue. | // the inv queue. | ||||
LOCK(pto->cs_inventory); | LOCK(pto->cs_inventory); | ||||
std::vector<CBlock> vHeaders; | std::vector<CBlock> vHeaders; | ||||
bool fRevertToInv = | bool fRevertToInv = | ||||
((!state.fPreferHeaders && | ((!state->fPreferHeaders && | ||||
(!state.fPreferHeaderAndIDs || | (!state->fPreferHeaderAndIDs || | ||||
pto->vBlockHashesToAnnounce.size() > 1)) || | pto->vBlockHashesToAnnounce.size() > 1)) || | ||||
pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE); | pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE); | ||||
// last header queued for delivery | // last header queued for delivery | ||||
const CBlockIndex *pBestIndex = nullptr; | const CBlockIndex *pBestIndex = nullptr; | ||||
// ensure pindexBestKnownBlock is up-to-date | // ensure pindexBestKnownBlock is up-to-date | ||||
ProcessBlockAvailability(pto->GetId()); | ProcessBlockAvailability(pto->GetId()); | ||||
if (!fRevertToInv) { | if (!fRevertToInv) { | ||||
Show All 22 Lines | if (pingSend) { | ||||
// situation by reverting to an inv. | // situation by reverting to an inv. | ||||
fRevertToInv = true; | fRevertToInv = true; | ||||
break; | break; | ||||
} | } | ||||
pBestIndex = pindex; | pBestIndex = pindex; | ||||
if (fFoundStartingHeader) { | if (fFoundStartingHeader) { | ||||
// add this to the headers message | // add this to the headers message | ||||
vHeaders.push_back(pindex->GetBlockHeader()); | vHeaders.push_back(pindex->GetBlockHeader()); | ||||
} else if (PeerHasHeader(&state, pindex)) { | } else if (PeerHasHeader(state, pindex)) { | ||||
// Keep looking for the first new block. | // Keep looking for the first new block. | ||||
continue; | continue; | ||||
} else if (pindex->pprev == nullptr || | } else if (pindex->pprev == nullptr || | ||||
PeerHasHeader(&state, pindex->pprev)) { | PeerHasHeader(state, pindex->pprev)) { | ||||
// Peer doesn't have this header but they do have the | // Peer doesn't have this header but they do have the | ||||
// prior one. Start sending headers. | // prior one. Start sending headers. | ||||
fFoundStartingHeader = true; | fFoundStartingHeader = true; | ||||
vHeaders.push_back(pindex->GetBlockHeader()); | vHeaders.push_back(pindex->GetBlockHeader()); | ||||
} else { | } else { | ||||
// Peer doesn't have this header or the prior one -- | // Peer doesn't have this header or the prior one -- | ||||
// nothing will connect, so bail out. | // nothing will connect, so bail out. | ||||
fRevertToInv = true; | fRevertToInv = true; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
if (!fRevertToInv && !vHeaders.empty()) { | if (!fRevertToInv && !vHeaders.empty()) { | ||||
if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) { | if (vHeaders.size() == 1 && state->fPreferHeaderAndIDs) { | ||||
// We only send up to 1 block as header-and-ids, as | // We only send up to 1 block as header-and-ids, as | ||||
// otherwise probably means we're doing an initial-ish-sync | // otherwise probably means we're doing an initial-ish-sync | ||||
// or they're slow. | // or they're slow. | ||||
LogPrint(BCLog::NET, | LogPrint(BCLog::NET, | ||||
"%s sending header-and-ids %s to peer=%d\n", | "%s sending header-and-ids %s to peer=%d\n", | ||||
__func__, vHeaders.front().GetHash().ToString(), | __func__, vHeaders.front().GetHash().ToString(), | ||||
pto->GetId()); | pto->GetId()); | ||||
Show All 19 Lines | if (pingSend) { | ||||
consensusParams); | consensusParams); | ||||
assert(ret); | assert(ret); | ||||
CBlockHeaderAndShortTxIDs cmpctblock(block); | CBlockHeaderAndShortTxIDs cmpctblock(block); | ||||
m_connman.PushMessage( | m_connman.PushMessage( | ||||
pto, | pto, | ||||
msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, | msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, | ||||
cmpctblock)); | cmpctblock)); | ||||
} | } | ||||
state.pindexBestHeaderSent = pBestIndex; | state->pindexBestHeaderSent = pBestIndex; | ||||
} else if (state.fPreferHeaders) { | } else if (state->fPreferHeaders) { | ||||
if (vHeaders.size() > 1) { | if (vHeaders.size() > 1) { | ||||
LogPrint(BCLog::NET, | LogPrint(BCLog::NET, | ||||
"%s: %u headers, range (%s, %s), to peer=%d\n", | "%s: %u headers, range (%s, %s), to peer=%d\n", | ||||
__func__, vHeaders.size(), | __func__, vHeaders.size(), | ||||
vHeaders.front().GetHash().ToString(), | vHeaders.front().GetHash().ToString(), | ||||
vHeaders.back().GetHash().ToString(), | vHeaders.back().GetHash().ToString(), | ||||
pto->GetId()); | pto->GetId()); | ||||
} else { | } else { | ||||
LogPrint(BCLog::NET, | LogPrint(BCLog::NET, | ||||
"%s: sending header %s to peer=%d\n", __func__, | "%s: sending header %s to peer=%d\n", __func__, | ||||
vHeaders.front().GetHash().ToString(), | vHeaders.front().GetHash().ToString(), | ||||
pto->GetId()); | pto->GetId()); | ||||
} | } | ||||
m_connman.PushMessage( | m_connman.PushMessage( | ||||
pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders)); | pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders)); | ||||
state.pindexBestHeaderSent = pBestIndex; | state->pindexBestHeaderSent = pBestIndex; | ||||
} else { | } else { | ||||
fRevertToInv = true; | fRevertToInv = true; | ||||
} | } | ||||
} | } | ||||
if (fRevertToInv) { | if (fRevertToInv) { | ||||
// If falling back to using an inv, just try to inv the tip. The | // If falling back to using an inv, just try to inv the tip. The | ||||
// last entry in vBlockHashesToAnnounce was our tip at some | // last entry in vBlockHashesToAnnounce was our tip at some | ||||
// point in the past. | // point in the past. | ||||
Show All 11 Lines | if (pingSend) { | ||||
LogPrint( | LogPrint( | ||||
BCLog::NET, | BCLog::NET, | ||||
"Announcing block %s not on main chain (tip=%s)\n", | "Announcing block %s not on main chain (tip=%s)\n", | ||||
hashToAnnounce.ToString(), | hashToAnnounce.ToString(), | ||||
::ChainActive().Tip()->GetBlockHash().ToString()); | ::ChainActive().Tip()->GetBlockHash().ToString()); | ||||
} | } | ||||
// If the peer's chain has this block, don't inv it back. | // If the peer's chain has this block, don't inv it back. | ||||
if (!PeerHasHeader(&state, pindex)) { | if (!PeerHasHeader(state, pindex)) { | ||||
pto->vInventoryBlockToSend.push_back(hashToAnnounce); | pto->vInventoryBlockToSend.push_back(hashToAnnounce); | ||||
LogPrint(BCLog::NET, | LogPrint(BCLog::NET, | ||||
"%s: sending inv peer=%d hash=%s\n", __func__, | "%s: sending inv peer=%d hash=%s\n", __func__, | ||||
pto->GetId(), hashToAnnounce.ToString()); | pto->GetId(), hashToAnnounce.ToString()); | ||||
} | } | ||||
} | } | ||||
} | } | ||||
pto->vBlockHashesToAnnounce.clear(); | pto->vBlockHashesToAnnounce.clear(); | ||||
▲ Show 20 Lines • Show All 179 Lines • ▼ Show 20 Lines | if (pingSend) { | ||||
m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); | m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); | ||||
} | } | ||||
// Detect whether we're stalling | // Detect whether we're stalling | ||||
current_time = GetTime<std::chrono::microseconds>(); | current_time = GetTime<std::chrono::microseconds>(); | ||||
// nNow is the current system time (GetTimeMicros is not mockable) and | // nNow is the current system time (GetTimeMicros is not mockable) and | ||||
// should be replaced by the mockable current_time eventually | // should be replaced by the mockable current_time eventually | ||||
nNow = GetTimeMicros(); | nNow = GetTimeMicros(); | ||||
if (state.nStallingSince && | if (state->nStallingSince && | ||||
state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) { | state->nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) { | ||||
// Stalling only triggers when the block download window cannot | // Stalling only triggers when the block download window cannot | ||||
// move. During normal steady state, the download window should be | // move. During normal steady state, the download window should be | ||||
// much larger than the to-be-downloaded set of blocks, so | // much larger than the to-be-downloaded set of blocks, so | ||||
// disconnection should only happen during initial block download. | // disconnection should only happen during initial block download. | ||||
LogPrintf("Peer=%d is stalling block download, disconnecting\n", | LogPrintf("Peer=%d is stalling block download, disconnecting\n", | ||||
pto->GetId()); | pto->GetId()); | ||||
pto->fDisconnect = true; | pto->fDisconnect = true; | ||||
return true; | return true; | ||||
} | } | ||||
// In case there is a block that has been in flight from this peer for 2 | // In case there is a block that has been in flight from this peer for 2 | ||||
// + 0.5 * N times the block interval (with N the number of peers from | // + 0.5 * N times the block interval (with N the number of peers from | ||||
// which we're downloading validated blocks), disconnect due to timeout. | // which we're downloading validated blocks), disconnect due to timeout. | ||||
// We compensate for other peers to prevent killing off peers due to our | // We compensate for other peers to prevent killing off peers due to our | ||||
// own downstream link being saturated. We only count validated | // own downstream link being saturated. We only count validated | ||||
// in-flight blocks so peers can't advertise non-existing block hashes | // in-flight blocks so peers can't advertise non-existing block hashes | ||||
// to unreasonably increase our timeout. | // to unreasonably increase our timeout. | ||||
if (state.vBlocksInFlight.size() > 0) { | if (state->vBlocksInFlight.size() > 0) { | ||||
QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); | QueuedBlock &queuedBlock = state->vBlocksInFlight.front(); | ||||
int nOtherPeersWithValidatedDownloads = | int nOtherPeersWithValidatedDownloads = | ||||
nPeersWithValidatedDownloads - | nPeersWithValidatedDownloads - | ||||
(state.nBlocksInFlightValidHeaders > 0); | (state->nBlocksInFlightValidHeaders > 0); | ||||
if (nNow > state.nDownloadingSince + | if (nNow > state->nDownloadingSince + | ||||
consensusParams.nPowTargetSpacing * | consensusParams.nPowTargetSpacing * | ||||
(BLOCK_DOWNLOAD_TIMEOUT_BASE + | (BLOCK_DOWNLOAD_TIMEOUT_BASE + | ||||
BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * | BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * | ||||
nOtherPeersWithValidatedDownloads)) { | nOtherPeersWithValidatedDownloads)) { | ||||
LogPrintf("Timeout downloading block %s from peer=%d, " | LogPrintf("Timeout downloading block %s from peer=%d, " | ||||
"disconnecting\n", | "disconnecting\n", | ||||
queuedBlock.hash.ToString(), pto->GetId()); | queuedBlock.hash.ToString(), pto->GetId()); | ||||
pto->fDisconnect = true; | pto->fDisconnect = true; | ||||
return true; | return true; | ||||
} | } | ||||
} | } | ||||
// Check for headers sync timeouts | // Check for headers sync timeouts | ||||
if (state.fSyncStarted && | if (state->fSyncStarted && | ||||
state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) { | state->nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) { | ||||
// Detect whether this is a stalling initial-headers-sync peer | // Detect whether this is a stalling initial-headers-sync peer | ||||
if (pindexBestHeader->GetBlockTime() <= | if (pindexBestHeader->GetBlockTime() <= | ||||
GetAdjustedTime() - 24 * 60 * 60) { | GetAdjustedTime() - 24 * 60 * 60) { | ||||
if (nNow > state.nHeadersSyncTimeout && nSyncStarted == 1 && | if (nNow > state->nHeadersSyncTimeout && nSyncStarted == 1 && | ||||
(nPreferredDownload - state.fPreferredDownload >= 1)) { | (nPreferredDownload - state->fPreferredDownload >= 1)) { | ||||
// Disconnect a (non-whitelisted) peer if it is our only | // Disconnect a (non-whitelisted) peer if it is our only | ||||
// sync peer, and we have others we could be using instead. | // sync peer, and we have others we could be using instead. | ||||
// Note: If all our peers are inbound, then we won't | // Note: If all our peers are inbound, then we won't | ||||
// disconnect our sync peer for stalling; we have bigger | // disconnect our sync peer for stalling; we have bigger | ||||
// problems if we can't get any outbound peers. | // problems if we can't get any outbound peers. | ||||
if (!pto->HasPermission(PF_NOBAN)) { | if (!pto->HasPermission(PF_NOBAN)) { | ||||
LogPrintf("Timeout downloading headers from peer=%d, " | LogPrintf("Timeout downloading headers from peer=%d, " | ||||
"disconnecting\n", | "disconnecting\n", | ||||
pto->GetId()); | pto->GetId()); | ||||
pto->fDisconnect = true; | pto->fDisconnect = true; | ||||
return true; | return true; | ||||
} else { | } else { | ||||
LogPrintf( | LogPrintf( | ||||
"Timeout downloading headers from whitelisted " | "Timeout downloading headers from whitelisted " | ||||
"peer=%d, not disconnecting\n", | "peer=%d, not disconnecting\n", | ||||
pto->GetId()); | pto->GetId()); | ||||
// Reset the headers sync state so that we have a chance | // Reset the headers sync state so that we have a chance | ||||
// to try downloading from a different peer. Note: this | // to try downloading from a different peer. Note: this | ||||
// will also result in at least one more getheaders | // will also result in at least one more getheaders | ||||
// message to be sent to this peer (eventually). | // message to be sent to this peer (eventually). | ||||
state.fSyncStarted = false; | state->fSyncStarted = false; | ||||
nSyncStarted--; | nSyncStarted--; | ||||
state.nHeadersSyncTimeout = 0; | state->nHeadersSyncTimeout = 0; | ||||
} | } | ||||
} | } | ||||
} else { | } else { | ||||
// After we've caught up once, reset the timeout so we can't | // After we've caught up once, reset the timeout so we can't | ||||
// trigger disconnect later. | // trigger disconnect later. | ||||
state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max(); | state->nHeadersSyncTimeout = | ||||
std::numeric_limits<int64_t>::max(); | |||||
} | } | ||||
} | } | ||||
// Check that outbound peers have reasonable chains GetTime() is used by | // Check that outbound peers have reasonable chains GetTime() is used by | ||||
// this anti-DoS logic so we can test this using mocktime. | // this anti-DoS logic so we can test this using mocktime. | ||||
ConsiderEviction(*pto, GetTime()); | ConsiderEviction(*pto, GetTime()); | ||||
// | // | ||||
// Message: getdata (blocks) | // Message: getdata (blocks) | ||||
// | // | ||||
std::vector<CInv> vGetData; | std::vector<CInv> vGetData; | ||||
if (!pto->fClient && | if (!pto->fClient && | ||||
((fFetch && !pto->m_limited_node) || | ((fFetch && !pto->m_limited_node) || | ||||
!::ChainstateActive().IsInitialBlockDownload()) && | !::ChainstateActive().IsInitialBlockDownload()) && | ||||
state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { | state->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { | ||||
std::vector<const CBlockIndex *> vToDownload; | std::vector<const CBlockIndex *> vToDownload; | ||||
NodeId staller = -1; | NodeId staller = -1; | ||||
FindNextBlocksToDownload(pto->GetId(), | FindNextBlocksToDownload(pto->GetId(), | ||||
MAX_BLOCKS_IN_TRANSIT_PER_PEER - | MAX_BLOCKS_IN_TRANSIT_PER_PEER - | ||||
state.nBlocksInFlight, | state->nBlocksInFlight, | ||||
vToDownload, staller, consensusParams); | vToDownload, staller, consensusParams); | ||||
for (const CBlockIndex *pindex : vToDownload) { | for (const CBlockIndex *pindex : vToDownload) { | ||||
vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash())); | vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash())); | ||||
MarkBlockAsInFlight(config, m_mempool, pto->GetId(), | MarkBlockAsInFlight(config, m_mempool, pto->GetId(), | ||||
pindex->GetBlockHash(), consensusParams, | pindex->GetBlockHash(), consensusParams, | ||||
pindex); | pindex); | ||||
LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", | LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", | ||||
pindex->GetBlockHash().ToString(), pindex->nHeight, | pindex->GetBlockHash().ToString(), pindex->nHeight, | ||||
pto->GetId()); | pto->GetId()); | ||||
} | } | ||||
if (state.nBlocksInFlight == 0 && staller != -1) { | if (state->nBlocksInFlight == 0 && staller != -1) { | ||||
if (State(staller)->nStallingSince == 0) { | if (State(staller)->nStallingSince == 0) { | ||||
State(staller)->nStallingSince = nNow; | State(staller)->nStallingSince = nNow; | ||||
LogPrint(BCLog::NET, "Stall started peer=%d\n", staller); | LogPrint(BCLog::NET, "Stall started peer=%d\n", staller); | ||||
} | } | ||||
} | } | ||||
} | } | ||||
// | // | ||||
// Message: getdata (transactions) | // Message: getdata (transactions) | ||||
// | // | ||||
// For robustness, expire old requests after a long timeout, so that we | // For robustness, expire old requests after a long timeout, so that we | ||||
// can resume downloading transactions from a peer even if they were | // can resume downloading transactions from a peer even if they were | ||||
// unresponsive in the past. Eventually we should consider disconnecting | // unresponsive in the past. Eventually we should consider disconnecting | ||||
// peers, but this is conservative. | // peers, but this is conservative. | ||||
if (state.m_tx_download.m_check_expiry_timer <= current_time) { | if (state->m_tx_download.m_check_expiry_timer <= current_time) { | ||||
for (auto it = state.m_tx_download.m_tx_in_flight.begin(); | for (auto it = state->m_tx_download.m_tx_in_flight.begin(); | ||||
it != state.m_tx_download.m_tx_in_flight.end();) { | it != state->m_tx_download.m_tx_in_flight.end();) { | ||||
if (it->second <= current_time - TX_EXPIRY_INTERVAL) { | if (it->second <= current_time - TX_EXPIRY_INTERVAL) { | ||||
LogPrint(BCLog::NET, | LogPrint(BCLog::NET, | ||||
"timeout of inflight tx %s from peer=%d\n", | "timeout of inflight tx %s from peer=%d\n", | ||||
it->first.ToString(), pto->GetId()); | it->first.ToString(), pto->GetId()); | ||||
state.m_tx_download.m_tx_announced.erase(it->first); | state->m_tx_download.m_tx_announced.erase(it->first); | ||||
state.m_tx_download.m_tx_in_flight.erase(it++); | state->m_tx_download.m_tx_in_flight.erase(it++); | ||||
} else { | } else { | ||||
++it; | ++it; | ||||
} | } | ||||
} | } | ||||
// On average, we do this check every TX_EXPIRY_INTERVAL. Randomize | // On average, we do this check every TX_EXPIRY_INTERVAL. Randomize | ||||
// so that we're not doing this for all peers at the same time. | // so that we're not doing this for all peers at the same time. | ||||
state.m_tx_download.m_check_expiry_timer = | state->m_tx_download.m_check_expiry_timer = | ||||
current_time + TX_EXPIRY_INTERVAL / 2 + | current_time + TX_EXPIRY_INTERVAL / 2 + | ||||
GetRandMicros(TX_EXPIRY_INTERVAL); | GetRandMicros(TX_EXPIRY_INTERVAL); | ||||
} | } | ||||
auto &tx_process_time = state.m_tx_download.m_tx_process_time; | auto &tx_process_time = state->m_tx_download.m_tx_process_time; | ||||
while (!tx_process_time.empty() && | while (!tx_process_time.empty() && | ||||
tx_process_time.begin()->first <= current_time && | tx_process_time.begin()->first <= current_time && | ||||
state.m_tx_download.m_tx_in_flight.size() < | state->m_tx_download.m_tx_in_flight.size() < | ||||
MAX_PEER_TX_IN_FLIGHT) { | MAX_PEER_TX_IN_FLIGHT) { | ||||
const TxId txid = tx_process_time.begin()->second; | const TxId txid = tx_process_time.begin()->second; | ||||
// Erase this entry from tx_process_time (it may be added back for | // Erase this entry from tx_process_time (it may be added back for | ||||
// processing at a later time, see below) | // processing at a later time, see below) | ||||
tx_process_time.erase(tx_process_time.begin()); | tx_process_time.erase(tx_process_time.begin()); | ||||
CInv inv(MSG_TX, txid); | CInv inv(MSG_TX, txid); | ||||
if (!AlreadyHaveTx(txid, m_mempool)) { | if (!AlreadyHaveTx(txid, m_mempool)) { | ||||
// If this transaction was last requested more than 1 minute | // If this transaction was last requested more than 1 minute | ||||
// ago, then request. | // ago, then request. | ||||
const auto last_request_time = GetTxRequestTime(txid); | const auto last_request_time = GetTxRequestTime(txid); | ||||
if (last_request_time <= current_time - GETDATA_TX_INTERVAL) { | if (last_request_time <= current_time - GETDATA_TX_INTERVAL) { | ||||
LogPrint(BCLog::NET, "Requesting %s peer=%d\n", | LogPrint(BCLog::NET, "Requesting %s peer=%d\n", | ||||
inv.ToString(), pto->GetId()); | inv.ToString(), pto->GetId()); | ||||
vGetData.push_back(inv); | vGetData.push_back(inv); | ||||
if (vGetData.size() >= MAX_GETDATA_SZ) { | if (vGetData.size() >= MAX_GETDATA_SZ) { | ||||
m_connman.PushMessage( | m_connman.PushMessage( | ||||
pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); | pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); | ||||
vGetData.clear(); | vGetData.clear(); | ||||
} | } | ||||
UpdateTxRequestTime(txid, current_time); | UpdateTxRequestTime(txid, current_time); | ||||
state.m_tx_download.m_tx_in_flight.emplace(txid, | state->m_tx_download.m_tx_in_flight.emplace(txid, | ||||
current_time); | current_time); | ||||
} else { | } else { | ||||
// This transaction is in flight from someone else; queue | // This transaction is in flight from someone else; queue | ||||
// up processing to happen after the download times out | // up processing to happen after the download times out | ||||
// (with a slight delay for inbound peers, to prefer | // (with a slight delay for inbound peers, to prefer | ||||
// requests to outbound peers). | // requests to outbound peers). | ||||
const auto next_process_time = CalculateTxGetDataTime( | const auto next_process_time = CalculateTxGetDataTime( | ||||
txid, current_time, !state.fPreferredDownload); | txid, current_time, !state->fPreferredDownload); | ||||
tx_process_time.emplace(next_process_time, txid); | tx_process_time.emplace(next_process_time, txid); | ||||
} | } | ||||
} else { | } else { | ||||
// We have already seen this transaction, no need to download. | // We have already seen this transaction, no need to download. | ||||
state.m_tx_download.m_tx_announced.erase(txid); | state->m_tx_download.m_tx_announced.erase(txid); | ||||
state.m_tx_download.m_tx_in_flight.erase(txid); | state->m_tx_download.m_tx_in_flight.erase(txid); | ||||
} | } | ||||
} | } | ||||
if (!vGetData.empty()) { | if (!vGetData.empty()) { | ||||
m_connman.PushMessage(pto, | m_connman.PushMessage(pto, | ||||
msgMaker.Make(NetMsgType::GETDATA, vGetData)); | msgMaker.Make(NetMsgType::GETDATA, vGetData)); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 72 Lines • Show Last 20 Lines |