diff --git a/src/blockencodings.h b/src/blockencodings.h --- a/src/blockencodings.h +++ b/src/blockencodings.h @@ -33,42 +33,44 @@ public: // A BlockTransactionsRequest message uint256 blockhash; - std::vector indexes; + std::vector indices; ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(blockhash); - uint64_t indexes_size = (uint64_t)indexes.size(); - READWRITE(COMPACTSIZE(indexes_size)); + uint64_t indices_size = uint64_t(indices.size()); + READWRITE(COMPACTSIZE(indices_size)); if (ser_action.ForRead()) { size_t i = 0; - while (indexes.size() < indexes_size) { - indexes.resize( - std::min((uint64_t)(1000 + indexes.size()), indexes_size)); - for (; i < indexes.size(); i++) { - uint64_t index = 0; - READWRITE(COMPACTSIZE(index)); - if (index > std::numeric_limits::max()) + while (indices.size() < indices_size) { + indices.resize( + std::min(uint64_t(1000 + indices.size()), indices_size)); + for (; i < indices.size(); i++) { + uint64_t n = 0; + READWRITE(COMPACTSIZE(n)); + if (indices[i] > std::numeric_limits::max()) { throw std::ios_base::failure( "index overflowed 16 bits"); - indexes[i] = index; + } + indices[i] = n; } } uint16_t offset = 0; - for (size_t j = 0; j < indexes.size(); j++) { - if (uint64_t(indexes[j]) + uint64_t(offset) > - std::numeric_limits::max()) - throw std::ios_base::failure("indexes overflowed 16 bits"); - indexes[j] = indexes[j] + offset; - offset = indexes[j] + 1; + for (auto &index : indices) { + if (uint64_t(index) + uint64_t(offset) > + std::numeric_limits::max()) { + throw std::ios_base::failure("indices overflowed 16 bits"); + } + index = index + offset; + offset = index + 1; } } else { - for (size_t i = 0; i < indexes.size(); i++) { + for (size_t i = 0; i < indices.size(); i++) { uint64_t index = - indexes[i] - (i == 0 ? 0 : (indexes[i - 1] + 1)); + indices[i] - (i == 0 ? 0 : (indices[i - 1] + 1)); READWRITE(COMPACTSIZE(index)); } } @@ -83,7 +85,7 @@ BlockTransactions() {} BlockTransactions(const BlockTransactionsRequest &req) - : blockhash(req.blockhash), txn(req.indexes.size()) {} + : blockhash(req.blockhash), txn(req.indices.size()) {} ADD_SERIALIZE_METHODS; @@ -96,12 +98,14 @@ size_t i = 0; while (txn.size() < txn_size) { txn.resize(std::min((uint64_t)(1000 + txn.size()), txn_size)); - for (; i < txn.size(); i++) + for (; i < txn.size(); i++) { READWRITE(REF(TransactionCompressor(txn[i]))); + } } } else { - for (size_t i = 0; i < txn.size(); i++) + for (size_t i = 0; i < txn.size(); i++) { READWRITE(REF(TransactionCompressor(txn[i]))); + } } } }; @@ -120,8 +124,9 @@ inline void SerializationOp(Stream &s, Operation ser_action) { uint64_t idx = index; READWRITE(COMPACTSIZE(idx)); - if (idx > std::numeric_limits::max()) + if (idx > std::numeric_limits::max()) { throw std::ios_base::failure("index overflowed 16-bits"); + } index = idx; READWRITE(REF(TransactionCompressor(tx))); } @@ -179,7 +184,7 @@ if (ser_action.ForRead()) { size_t i = 0; while (shorttxids.size() < shorttxids_size) { - shorttxids.resize(std::min((uint64_t)(1000 + shorttxids.size()), + shorttxids.resize(std::min(uint64_t(1000 + shorttxids.size()), shorttxids_size)); for (; i < shorttxids.size(); i++) { uint32_t lsb = 0; @@ -193,9 +198,9 @@ } } } else { - for (size_t i = 0; i < shorttxids.size(); i++) { - uint32_t lsb = shorttxids[i] & 0xffffffff; - uint16_t msb = (shorttxids[i] >> 32) & 0xffff; + for (uint64_t shortid : shorttxids) { + uint32_t lsb = shortid & 0xffffffff; + uint16_t msb = (shortid >> 32) & 0xffff; READWRITE(lsb); READWRITE(msb); } @@ -203,13 +208,15 @@ READWRITE(prefilledtxn); - if (ser_action.ForRead()) FillShortTxIDSelector(); + if (ser_action.ForRead()) { + FillShortTxIDSelector(); + } } }; class PartiallyDownloadedBlock { protected: - std::vector txn_available; + std::vector txns_available; size_t prefilled_count = 0, mempool_count = 0, extra_count = 0; CTxMemPool *pool; const Config *config; diff --git a/src/blockencodings.cpp b/src/blockencodings.cpp --- a/src/blockencodings.cpp +++ b/src/blockencodings.cpp @@ -48,35 +48,44 @@ ReadStatus PartiallyDownloadedBlock::InitData( const CBlockHeaderAndShortTxIDs &cmpctblock, - const std::vector> &extra_txn) { + const std::vector> &extra_txns) { if (cmpctblock.header.IsNull() || - (cmpctblock.shorttxids.empty() && cmpctblock.prefilledtxn.empty())) + (cmpctblock.shorttxids.empty() && cmpctblock.prefilledtxn.empty())) { return READ_STATUS_INVALID; + } if (cmpctblock.shorttxids.size() + cmpctblock.prefilledtxn.size() > - config->GetMaxBlockSize() / MIN_TRANSACTION_SIZE) + config->GetMaxBlockSize() / MIN_TRANSACTION_SIZE) { return READ_STATUS_INVALID; + } - assert(header.IsNull() && txn_available.empty()); + assert(header.IsNull() && txns_available.empty()); header = cmpctblock.header; - txn_available.resize(cmpctblock.BlockTxCount()); + txns_available.resize(cmpctblock.BlockTxCount()); int32_t lastprefilledindex = -1; for (size_t i = 0; i < cmpctblock.prefilledtxn.size(); i++) { - if (cmpctblock.prefilledtxn[i].tx->IsNull()) return READ_STATUS_INVALID; + auto &prefilledtxn = cmpctblock.prefilledtxn[i]; + if (prefilledtxn.tx->IsNull()) { + return READ_STATUS_INVALID; + } // index is a uint16_t, so can't overflow here. - lastprefilledindex += cmpctblock.prefilledtxn[i].index + 1; - if (lastprefilledindex > std::numeric_limits::max()) + lastprefilledindex += prefilledtxn.index + 1; + if (lastprefilledindex > std::numeric_limits::max()) { return READ_STATUS_INVALID; - if ((uint32_t)lastprefilledindex > cmpctblock.shorttxids.size() + i) { + } + + if (uint32_t(lastprefilledindex) > cmpctblock.shorttxids.size() + i) { // If we are inserting a tx at an index greater than our full list // of shorttxids plus the number of prefilled txn we've inserted, // then we have txn for which we have neither a prefilled txn or a // shorttxid! return READ_STATUS_INVALID; } - txn_available[lastprefilledindex] = cmpctblock.prefilledtxn[i].tx; + + txns_available[lastprefilledindex] = prefilledtxn.tx; } + prefilled_count = cmpctblock.prefilledtxn.size(); // Calculate map of txids -> positions and check mempool to see what we have @@ -87,8 +96,10 @@ cmpctblock.shorttxids.size()); uint16_t index_offset = 0; for (size_t i = 0; i < cmpctblock.shorttxids.size(); i++) { - while (txn_available[i + index_offset]) + while (txns_available[i + index_offset]) { index_offset++; + } + shorttxids[cmpctblock.shorttxids[i]] = i + index_offset; // To determine the chance that the number of entries in a bucket // exceeds N, we use the fact that the number of elements in a single @@ -102,9 +113,11 @@ // 16000, allowing 12 elements per bucket should only fail once per ~1 // million block transfers (per peer and connection). if (shorttxids.bucket_size( - shorttxids.bucket(cmpctblock.shorttxids[i])) > 12) + shorttxids.bucket(cmpctblock.shorttxids[i])) > 12) { return READ_STATUS_FAILED; + } } + // TODO: in the shortid-collision case, we should instead request both // transactions which collided. Falling back to full-block-request here is // overkill. @@ -113,19 +126,18 @@ return READ_STATUS_FAILED; } - std::vector have_txn(txn_available.size()); + std::vector have_txn(txns_available.size()); { LOCK(pool->cs); const std::vector> &vTxHashes = pool->vTxHashes; - for (size_t i = 0; i < vTxHashes.size(); i++) { - uint64_t shortid = cmpctblock.GetShortID(vTxHashes[i].first); + for (auto txHash : vTxHashes) { + uint64_t shortid = cmpctblock.GetShortID(txHash.first); std::unordered_map::iterator idit = shorttxids.find(shortid); if (idit != shorttxids.end()) { if (!have_txn[idit->second]) { - txn_available[idit->second] = - vTxHashes[i].second->GetSharedTx(); + txns_available[idit->second] = txHash.second->GetSharedTx(); have_txn[idit->second] = true; mempool_count++; } else { @@ -133,8 +145,8 @@ // request it. This should be rare enough that the extra // bandwidth doesn't matter, but eating a round-trip due to // FillBlock failure would be annoying. - if (txn_available[idit->second]) { - txn_available[idit->second].reset(); + if (txns_available[idit->second]) { + txns_available[idit->second].reset(); mempool_count--; } } @@ -142,17 +154,19 @@ // Though ideally we'd continue scanning for the // two-txn-match-shortid case, the performance win of an early exit // here is too good to pass up and worth the extra risk. - if (mempool_count == shorttxids.size()) break; + if (mempool_count == shorttxids.size()) { + break; + } } } - for (size_t i = 0; i < extra_txn.size(); i++) { - uint64_t shortid = cmpctblock.GetShortID(extra_txn[i].first); + for (auto &extra_txn : extra_txns) { + uint64_t shortid = cmpctblock.GetShortID(extra_txn.first); std::unordered_map::iterator idit = shorttxids.find(shortid); if (idit != shorttxids.end()) { if (!have_txn[idit->second]) { - txn_available[idit->second] = extra_txn[i].second; + txns_available[idit->second] = extra_txn.second; have_txn[idit->second] = true; mempool_count++; extra_count++; @@ -161,12 +175,12 @@ // just request it. This should be rare enough that the extra // bandwidth doesn't matter, but eating a round-trip due to // FillBlock failure would be annoying. Note that we dont want - // duplication between extra_txn and mempool to trigger this + // duplication between extra_txns and mempool to trigger this // case, so we compare hashes first. - if (txn_available[idit->second] && - txn_available[idit->second]->GetHash() != - extra_txn[i].second->GetHash()) { - txn_available[idit->second].reset(); + if (txns_available[idit->second] && + txns_available[idit->second]->GetHash() != + extra_txn.second->GetHash()) { + txns_available[idit->second].reset(); mempool_count--; extra_count--; } @@ -176,7 +190,9 @@ // Though ideally we'd continue scanning for the two-txn-match-shortid // case, the performance win of an early exit here is too good to pass // up and worth the extra risk. - if (mempool_count == shorttxids.size()) break; + if (mempool_count == shorttxids.size()) { + break; + } } LogPrint(BCLog::CMPCTBLOCK, "Initialized PartiallyDownloadedBlock for " @@ -189,8 +205,8 @@ bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const { assert(!header.IsNull()); - assert(index < txn_available.size()); - return txn_available[index] ? true : false; + assert(index < txns_available.size()); + return txns_available[index] ? true : false; } ReadStatus PartiallyDownloadedBlock::FillBlock( @@ -198,23 +214,28 @@ assert(!header.IsNull()); uint256 hash = header.GetHash(); block = header; - block.vtx.resize(txn_available.size()); + block.vtx.resize(txns_available.size()); size_t tx_missing_offset = 0; - for (size_t i = 0; i < txn_available.size(); i++) { - if (!txn_available[i]) { - if (vtx_missing.size() <= tx_missing_offset) + for (size_t i = 0; i < txns_available.size(); i++) { + auto &txn_available = txns_available[i]; + if (!txn_available) { + if (vtx_missing.size() <= tx_missing_offset) { return READ_STATUS_INVALID; + } block.vtx[i] = vtx_missing[tx_missing_offset++]; - } else - block.vtx[i] = std::move(txn_available[i]); + } else { + block.vtx[i] = std::move(txn_available); + } } // Make sure we can't call FillBlock again. header.SetNull(); - txn_available.clear(); + txns_available.clear(); - if (vtx_missing.size() != tx_missing_offset) return READ_STATUS_INVALID; + if (vtx_missing.size() != tx_missing_offset) { + return READ_STATUS_INVALID; + } CValidationState state; if (!CheckBlock(*config, block, state)) { diff --git a/src/net_processing.cpp b/src/net_processing.cpp --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1361,8 +1361,8 @@ const BlockTransactionsRequest &req, CNode *pfrom, CConnman &connman) { BlockTransactions resp(req); - for (size_t i = 0; i < req.indexes.size(); i++) { - if (req.indexes[i] >= block.vtx.size()) { + for (size_t i = 0; i < req.indices.size(); i++) { + if (req.indices[i] >= block.vtx.size()) { LOCK(cs_main); Misbehaving(pfrom, 100, "out-of-bound-tx-index"); LogPrintf( @@ -1370,7 +1370,7 @@ pfrom->id); return; } - resp.txn[i] = block.vtx[req.indexes[i]]; + resp.txn[i] = block.vtx[req.indices[i]]; } LOCK(cs_main); const CNetMsgMaker msgMaker(pfrom->GetSendVersion()); @@ -2399,7 +2399,7 @@ pfrom->id); return true; } else if (status == READ_STATUS_FAILED) { - // Duplicate txindexes, the block is now in-flight, so + // Duplicate txindices, the block is now in-flight, so // just request it. std::vector vInv(1); vInv[0] = @@ -2415,10 +2415,10 @@ BlockTransactionsRequest req; for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) { if (!partialBlock.IsTxAvailable(i)) { - req.indexes.push_back(i); + req.indices.push_back(i); } } - if (req.indexes.empty()) { + if (req.indices.empty()) { // Dirty hack to jump to BLOCKTXN code (TODO: move // message handling into their own functions) BlockTransactions txn; diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp --- a/src/test/blockencodings_tests.cpp +++ b/src/test/blockencodings_tests.cpp @@ -367,11 +367,11 @@ BOOST_AUTO_TEST_CASE(TransactionsRequestSerializationTest) { BlockTransactionsRequest req1; req1.blockhash = InsecureRand256(); - req1.indexes.resize(4); - req1.indexes[0] = 0; - req1.indexes[1] = 1; - req1.indexes[2] = 3; - req1.indexes[3] = 4; + req1.indices.resize(4); + req1.indices[0] = 0; + req1.indices[1] = 1; + req1.indices[2] = 3; + req1.indices[3] = 4; CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << req1; @@ -380,11 +380,11 @@ stream >> req2; BOOST_CHECK_EQUAL(req1.blockhash.ToString(), req2.blockhash.ToString()); - BOOST_CHECK_EQUAL(req1.indexes.size(), req2.indexes.size()); - BOOST_CHECK_EQUAL(req1.indexes[0], req2.indexes[0]); - BOOST_CHECK_EQUAL(req1.indexes[1], req2.indexes[1]); - BOOST_CHECK_EQUAL(req1.indexes[2], req2.indexes[2]); - BOOST_CHECK_EQUAL(req1.indexes[3], req2.indexes[3]); + BOOST_CHECK_EQUAL(req1.indices.size(), req2.indices.size()); + BOOST_CHECK_EQUAL(req1.indices[0], req2.indices[0]); + BOOST_CHECK_EQUAL(req1.indices[1], req2.indices[1]); + BOOST_CHECK_EQUAL(req1.indices[2], req2.indices[2]); + BOOST_CHECK_EQUAL(req1.indices[3], req2.indices[3]); } BOOST_AUTO_TEST_SUITE_END()