diff --git a/src/blockencodings.h b/src/blockencodings.h --- a/src/blockencodings.h +++ b/src/blockencodings.h @@ -33,7 +33,7 @@ public: // A BlockTransactionsRequest message uint256 blockhash; - std::vector indices; + std::vector indices; ADD_SERIALIZE_METHODS; @@ -50,19 +50,19 @@ for (; i < indices.size(); i++) { uint64_t n = 0; READWRITE(COMPACTSIZE(n)); - if (indices[i] > std::numeric_limits::max()) { + if (n > std::numeric_limits::max()) { throw std::ios_base::failure( - "index overflowed 16 bits"); + "index overflowed 32 bits"); } indices[i] = n; } } - uint16_t offset = 0; + uint32_t offset = 0; for (auto &index : indices) { if (uint64_t(index) + uint64_t(offset) > - std::numeric_limits::max()) { - throw std::ios_base::failure("indices overflowed 16 bits"); + std::numeric_limits::max()) { + throw std::ios_base::failure("indices overflowed 32 bits"); } index = index + offset; offset = index + 1; @@ -97,7 +97,7 @@ if (ser_action.ForRead()) { size_t i = 0; while (txn.size() < txn_size) { - txn.resize(std::min((uint64_t)(1000 + txn.size()), txn_size)); + txn.resize(std::min(uint64_t(1000 + txn.size()), txn_size)); for (; i < txn.size(); i++) { READWRITE(REF(TransactionCompressor(txn[i]))); } @@ -115,19 +115,19 @@ struct PrefilledTransaction { // Used as an offset since last prefilled tx in CBlockHeaderAndShortTxIDs, // as a proper transaction-in-block-index in PartiallyDownloadedBlock - uint16_t index; + uint32_t index; CTransactionRef tx; ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { - uint64_t idx = index; - READWRITE(COMPACTSIZE(idx)); - if (idx > std::numeric_limits::max()) { - throw std::ios_base::failure("index overflowed 16-bits"); + uint64_t n = index; + READWRITE(COMPACTSIZE(n)); + if (n > std::numeric_limits::max()) { + throw std::ios_base::failure("index overflowed 32-bits"); } - index = idx; + index = n; READWRITE(REF(TransactionCompressor(tx))); } }; diff --git a/src/blockencodings.cpp b/src/blockencodings.cpp --- a/src/blockencodings.cpp +++ b/src/blockencodings.cpp @@ -62,16 +62,16 @@ header = cmpctblock.header; txns_available.resize(cmpctblock.BlockTxCount()); - int32_t lastprefilledindex = -1; + int64_t lastprefilledindex = -1; for (size_t i = 0; i < cmpctblock.prefilledtxn.size(); i++) { auto &prefilledtxn = cmpctblock.prefilledtxn[i]; if (prefilledtxn.tx->IsNull()) { return READ_STATUS_INVALID; } - // index is a uint16_t, so can't overflow here. + // index is a uint32_t, so can't overflow here. lastprefilledindex += prefilledtxn.index + 1; - if (lastprefilledindex > std::numeric_limits::max()) { + if (lastprefilledindex > std::numeric_limits::max()) { return READ_STATUS_INVALID; } @@ -92,9 +92,9 @@ // (or don't). Because well-formed cmpctblock messages will have a // (relatively) uniform distribution of short IDs, any highly-uneven // distribution of elements can be safely treated as a READ_STATUS_FAILED. - std::unordered_map shorttxids( + std::unordered_map shorttxids( cmpctblock.shorttxids.size()); - uint16_t index_offset = 0; + uint32_t index_offset = 0; for (size_t i = 0; i < cmpctblock.shorttxids.size(); i++) { while (txns_available[i + index_offset]) { index_offset++; @@ -133,7 +133,7 @@ pool->vTxHashes; for (auto txHash : vTxHashes) { uint64_t shortid = cmpctblock.GetShortID(txHash.first); - std::unordered_map::iterator idit = + std::unordered_map::iterator idit = shorttxids.find(shortid); if (idit != shorttxids.end()) { if (!have_txn[idit->second]) { @@ -162,7 +162,7 @@ for (auto &extra_txn : extra_txns) { uint64_t shortid = cmpctblock.GetShortID(extra_txn.first); - std::unordered_map::iterator idit = + std::unordered_map::iterator idit = shorttxids.find(shortid); if (idit != shorttxids.end()) { if (!have_txn[idit->second]) { diff --git a/test/functional/abc-p2p-compactblocks.py b/test/functional/abc-p2p-compactblocks.py --- a/test/functional/abc-p2p-compactblocks.py +++ b/test/functional/abc-p2p-compactblocks.py @@ -72,14 +72,14 @@ self.block_heights = {} self.tip = None self.blocks = {} - self.excessive_block_size = 100 * ONE_MEGABYTE + self.excessive_block_size = 16 * ONE_MEGABYTE self.extra_args = [['-norelaypriority', '-whitelist=127.0.0.1', - '-limitancestorcount=9999', - '-limitancestorsize=9999', - '-limitdescendantcount=9999', - '-limitdescendantsize=9999', - '-maxmempool=999', + '-limitancestorcount=999999', + '-limitancestorsize=999999', + '-limitdescendantcount=999999', + '-limitdescendantsize=999999', + '-maxmempool=99999', "-excessiveblocksize=%d" % self.excessive_block_size]] @@ -106,7 +106,7 @@ tx = create_transaction(spend_tx, n, b"", value, script) return tx - def next_block(self, number, spend=None, script=CScript([OP_TRUE]), block_size=0): + def next_block(self, number, spend=None, script=CScript([OP_TRUE]), block_size=0, extra_txns=0): if self.tip == None: base_block_hash = self.genesis_hash block_time = int(time.time()) + 1 @@ -160,6 +160,10 @@ # Add the transaction to the block self.add_transactions_to_block(block, [tx]) + # Add transaction until we reach the expected transaction count + for _ in range(extra_txns): + self.add_transactions_to_block(block, [get_base_transaction()]) + # If we have a block size requirement, just fill # the block until we get there current_block_size = len(block.serialize()) @@ -321,9 +325,10 @@ cmpctblk_header.calc_sha256() assert(cmpctblk_header.sha256 == b1.sha256) - # Send a bigger block + # Send a large block with numerous transactions. peer.clear_block_data() - b2 = block(2, spend=out[1], block_size=self.excessive_block_size) + b2 = block(2, spend=out[1], extra_txns=70000, + block_size=self.excessive_block_size - 1000) yield accepted() # Checks the node forwards it via compact block @@ -334,19 +339,26 @@ cmpctblk_header.calc_sha256() assert(cmpctblk_header.sha256 == b2.sha256) + # In order to avoid having to resend a ton of transactions, we invalidate + # b2, which will send all its transactions in the mempool. + node.invalidateblock(node.getbestblockhash()) + # Let's send a compact block and see if the node accepts it. - # First, we generate the block and send all transaction to the mempool - b3 = block(3, spend=out[2], block_size=8 * ONE_MEGABYTE) - for i in range(1, len(b3.vtx)): - node.sendrawtransaction(ToHex(b3.vtx[i]), True) + # Let's modify b2 and use it so that we can reuse the mempool. + tx = b2.vtx[0] + tx.vout.append(CTxOut(0, CScript([random.randint(0, 256), OP_RETURN]))) + tx.rehash() + b2.vtx[0] = tx + b2.hashMerkleRoot = b2.calc_merkle_root() + b2.solve() # Now we create the compact block and send it comp_block = HeaderAndShortIDs() - comp_block.initialize_from_block(b3) + comp_block.initialize_from_block(b2) peer.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) # Check that compact block is received properly - assert(int(node.getbestblockhash(), 16) == b3.sha256) + assert(int(node.getbestblockhash(), 16) == b2.sha256) if __name__ == '__main__':