diff --git a/src/bloom.cpp b/src/bloom.cpp --- a/src/bloom.cpp +++ b/src/bloom.cpp @@ -124,8 +124,8 @@ // appear in a block if (isFull) return true; if (isEmpty) return false; - const uint256 &txid = tx.GetHash(); - if (contains(txid)) fFound = true; + const uint256 &txhash = tx.GetHash(); + if (contains(txhash)) fFound = true; for (unsigned int i = 0; i < tx.vout.size(); i++) { const CTxOut &txout = tx.vout[i]; @@ -143,14 +143,14 @@ if (data.size() != 0 && contains(data)) { fFound = true; if ((nFlags & BLOOM_UPDATE_MASK) == BLOOM_UPDATE_ALL) - insert(COutPoint(txid, i)); + insert(COutPoint(txhash, i)); else if ((nFlags & BLOOM_UPDATE_MASK) == BLOOM_UPDATE_P2PUBKEY_ONLY) { txnouttype type; std::vector> vSolutions; if (Solver(txout.scriptPubKey, type, vSolutions) && (type == TX_PUBKEY || type == TX_MULTISIG)) - insert(COutPoint(txid, i)); + insert(COutPoint(txhash, i)); } break; } diff --git a/src/chain.h b/src/chain.h --- a/src/chain.h +++ b/src/chain.h @@ -128,7 +128,7 @@ /** * Only first tx is coinbase, 2 <= coinbase input script length <= 100, - * transactions valid, no duplicate txids, sigops, size, merkle root. + * transactions valid, no duplicate txhashes, sigops, size, merkle root. * Implies all parents are at least TREE but not necessarily TRANSACTIONS. * When all parent blocks also have TRANSACTIONS, CBlockIndex::nChainTx will * be set. diff --git a/src/coins.h b/src/coins.h --- a/src/coins.h +++ b/src/coins.h @@ -292,7 +292,7 @@ // (pre-BIP34) cases. void AddCoins(CCoinsViewCache &cache, const CTransaction &tx, int nHeight); -//! Utility function to find any unspent output with a given txid. -const Coin &AccessByTxid(const CCoinsViewCache &cache, const uint256 &txid); +//! Utility function to find any unspent output with a given txhash. +const Coin &AccessByTxhash(const CCoinsViewCache &cache, const TxHash &txhash); #endif // BITCOIN_COINS_H diff --git a/src/coins.cpp b/src/coins.cpp --- a/src/coins.cpp +++ b/src/coins.cpp @@ -124,13 +124,13 @@ void AddCoins(CCoinsViewCache &cache, const CTransaction &tx, int nHeight) { bool fCoinbase = tx.IsCoinBase(); - const uint256 &txid = tx.GetHash(); + const TxHash &txhash = tx.GetHash(); for (size_t i = 0; i < tx.vout.size(); ++i) { // Pass fCoinbase as the possible_overwrite flag to AddCoin, in order to // correctly deal with the pre-BIP30 occurrances of duplicate coinbase // transactions. - cache.AddCoin(COutPoint(txid, i), Coin(tx.vout[i], nHeight, fCoinbase), - fCoinbase); + cache.AddCoin(COutPoint(txhash, i), + Coin(tx.vout[i], nHeight, fCoinbase), fCoinbase); } } @@ -323,8 +323,8 @@ static const size_t MAX_OUTPUTS_PER_TX = MAX_TX_SIZE / ::GetSerializeSize(CTxOut(), SER_NETWORK, PROTOCOL_VERSION); -const Coin &AccessByTxid(const CCoinsViewCache &view, const uint256 &txid) { - COutPoint iter(txid, 0); +const Coin &AccessByTxhash(const CCoinsViewCache &view, const TxHash &txhash) { + COutPoint iter(txhash, 0); while (iter.n < MAX_OUTPUTS_PER_TX) { const Coin &alternate = view.AccessCoin(iter); if (!alternate.IsSpent()) { diff --git a/src/consensus/merkle.cpp b/src/consensus/merkle.cpp --- a/src/consensus/merkle.cpp +++ b/src/consensus/merkle.cpp @@ -9,7 +9,7 @@ /* WARNING! If you're reading this because you're learning about crypto and/or designing a new system that will use merkle trees, keep in mind that the following merkle tree algorithm has a serious flaw related to - duplicate txids, resulting in a vulnerability (CVE-2012-2459). + duplicate txhashes, resulting in a vulnerability (CVE-2012-2459). The reason is that if the number of hashes in the list at a given time is odd, the last one is duplicated before computing the next level (which diff --git a/src/merkleblock.cpp b/src/merkleblock.cpp --- a/src/merkleblock.cpp +++ b/src/merkleblock.cpp @@ -19,20 +19,20 @@ vHashes.reserve(block.vtx.size()); for (unsigned int i = 0; i < block.vtx.size(); i++) { - const uint256 &txid = block.vtx[i]->GetHash(); + const uint256 &txhash = block.vtx[i]->GetHash(); if (filter.IsRelevantAndUpdate(*block.vtx[i])) { vMatch.push_back(true); - vMatchedTxn.push_back(std::make_pair(i, txid)); + vMatchedTxn.push_back(std::make_pair(i, txhash)); } else vMatch.push_back(false); - vHashes.push_back(txid); + vHashes.push_back(txhash); } txn = CPartialMerkleTree(vHashes, vMatch); } CMerkleBlock::CMerkleBlock(const CBlock &block, - const std::set &txids) { + const std::set &txhashes) { header = block.GetBlockHeader(); std::vector vMatch; @@ -42,29 +42,29 @@ vHashes.reserve(block.vtx.size()); for (unsigned int i = 0; i < block.vtx.size(); i++) { - const uint256 &txid = block.vtx[i]->GetHash(); - if (txids.count(txid)) + const uint256 &txhash = block.vtx[i]->GetHash(); + if (txhashes.count(txhash)) vMatch.push_back(true); else vMatch.push_back(false); - vHashes.push_back(txid); + vHashes.push_back(txhash); } txn = CPartialMerkleTree(vHashes, vMatch); } uint256 CPartialMerkleTree::CalcHash(int height, unsigned int pos, - const std::vector &vTxid) { + const std::vector &vTxhash) { if (height == 0) { // hash at height 0 is the txids themself. - return vTxid[pos]; + return vTxhash[pos]; } else { // Calculate left hash. - uint256 left = CalcHash(height - 1, pos * 2, vTxid), right; + uint256 left = CalcHash(height - 1, pos * 2, vTxhash), right; // Calculate right hash if not beyond the end of the array - copy left // hash otherwise1. if (pos * 2 + 1 < CalcTreeWidth(height - 1)) { - right = CalcHash(height - 1, pos * 2 + 1, vTxid); + right = CalcHash(height - 1, pos * 2 + 1, vTxhash); } else { right = left; } @@ -74,9 +74,9 @@ } void CPartialMerkleTree::TraverseAndBuild(int height, unsigned int pos, - const std::vector &vTxid, + const std::vector &vTxhash, const std::vector &vMatch) { - // Determine whether this node is the parent of at least one matched txid. + // Determine whether this node is the parent of at least one matched txhash. bool fParentOfMatch = false; for (unsigned int p = pos << height; p < (pos + 1) << height && p < nTransactions; p++) @@ -85,12 +85,12 @@ vBits.push_back(fParentOfMatch); if (height == 0 || !fParentOfMatch) { // If at height 0, or nothing interesting below, store hash and stop. - vHash.push_back(CalcHash(height, pos, vTxid)); + vHash.push_back(CalcHash(height, pos, vTxhash)); } else { // Otherwise, don't store any hash, but descend into the subtrees. - TraverseAndBuild(height - 1, pos * 2, vTxid, vMatch); + TraverseAndBuild(height - 1, pos * 2, vTxhash, vMatch); if (pos * 2 + 1 < CalcTreeWidth(height - 1)) - TraverseAndBuild(height - 1, pos * 2 + 1, vTxid, vMatch); + TraverseAndBuild(height - 1, pos * 2 + 1, vTxhash, vMatch); } } @@ -113,14 +113,14 @@ return uint256(); } const uint256 &hash = vHash[nHashUsed++]; - // In case of height 0, we have a matched txid. + // In case of height 0, we have a matched txhash. if (height == 0 && fParentOfMatch) { vMatch.push_back(hash); vnIndex.push_back(pos); } return hash; } else { - // Otherwise, descend into the subtrees to extract matched txids and + // Otherwise, descend into the subtrees to extract matched txhashes and // hashes. uint256 left = TraverseAndExtract(height - 1, pos * 2, nBitsUsed, nHashUsed, vMatch, vnIndex), @@ -141,9 +141,9 @@ } } -CPartialMerkleTree::CPartialMerkleTree(const std::vector &vTxid, +CPartialMerkleTree::CPartialMerkleTree(const std::vector &vTxhash, const std::vector &vMatch) - : nTransactions(vTxid.size()), fBad(false) { + : nTransactions(vTxhash.size()), fBad(false) { // reset state vBits.clear(); vHash.clear(); @@ -154,7 +154,7 @@ nHeight++; // traverse the partial tree - TraverseAndBuild(nHeight, 0, vTxid, vMatch); + TraverseAndBuild(nHeight, 0, vTxhash, vMatch); } CPartialMerkleTree::CPartialMerkleTree() : nTransactions(0), fBad(true) {} @@ -167,7 +167,7 @@ // Check for excessively high numbers of transactions. // FIXME: Track the maximum block size we've seen and use it here. - // There can never be more hashes provided than one for every txid. + // There can never be more hashes provided than one for every txhash. if (vHash.size() > nTransactions) return uint256(); // There must be at least one bit per node in the partial tree, and at least // one node per hash. diff --git a/src/miner.cpp b/src/miner.cpp --- a/src/miner.cpp +++ b/src/miner.cpp @@ -351,7 +351,7 @@ Amount dummy; mempool.ApplyDeltas(iter->GetTx().GetHash(), dPriority, dummy); LogPrintf( - "priority %.1f fee %s txid %s\n", dPriority, + "priority %.1f fee %s txhash %s\n", dPriority, CFeeRate(iter->GetModifiedFee(), iter->GetTxSize()).ToString(), iter->GetTx().GetHash().ToString()); } diff --git a/src/net_processing.cpp b/src/net_processing.cpp --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -53,9 +53,9 @@ NodeId fromPeer; int64_t nTimeExpire; }; -std::map mapOrphanTransactions GUARDED_BY(cs_main); +std::map mapOrphanTransactions GUARDED_BY(cs_main); std::map::iterator, IteratorComparator>> + std::set::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(cs_main); void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main); @@ -726,8 +726,8 @@ bool AddOrphanTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { - const uint256 &txid = tx->GetHash(); - if (mapOrphanTransactions.count(txid)) { + const TxHash &txhash = tx->GetHash(); + if (mapOrphanTransactions.count(txhash)) { return false; } @@ -740,12 +740,12 @@ unsigned int sz = GetTransactionSize(*tx); if (sz >= MAX_STANDARD_TX_SIZE) { LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", - sz, txid.ToString()); + sz, txhash.ToString()); return false; } auto ret = mapOrphanTransactions.emplace( - txid, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME}); + txhash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME}); assert(ret.second); for (const CTxIn &txin : tx->vin) { mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first); @@ -754,14 +754,13 @@ AddToCompactExtraTransactions(tx); LogPrint("mempool", "stored orphan tx %s (mapsz %u outsz %u)\n", - txid.ToString(), mapOrphanTransactions.size(), + txhash.ToString(), mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size()); return true; } -static int EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { - std::map::iterator it = - mapOrphanTransactions.find(hash); +static int EraseOrphanTx(TxHash hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { + std::map::iterator it = mapOrphanTransactions.find(hash); if (it == mapOrphanTransactions.end()) { return 0; } @@ -781,10 +780,10 @@ void EraseOrphansFor(NodeId peer) { int nErased = 0; - std::map::iterator iter = mapOrphanTransactions.begin(); + std::map::iterator iter = mapOrphanTransactions.begin(); while (iter != mapOrphanTransactions.end()) { // Increment to avoid iterator becoming invalid. - std::map::iterator maybeErase = iter++; + std::map::iterator maybeErase = iter++; if (maybeErase->second.fromPeer == peer) { nErased += EraseOrphanTx(maybeErase->second.tx->GetHash()); } @@ -805,10 +804,10 @@ int nErased = 0; int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL; - std::map::iterator iter = + std::map::iterator iter = mapOrphanTransactions.begin(); while (iter != mapOrphanTransactions.end()) { - std::map::iterator maybeErase = iter++; + std::map::iterator maybeErase = iter++; if (maybeErase->second.nTimeExpire <= nNow) { nErased += EraseOrphanTx(maybeErase->second.tx->GetHash()); } else { @@ -826,8 +825,8 @@ } while (mapOrphanTransactions.size() > nMaxOrphans) { // Evict a random orphan: - uint256 randomhash = GetRandHash(); - std::map::iterator it = + TxHash randomhash = GetRandHash(); + std::map::iterator it = mapOrphanTransactions.lower_bound(randomhash); if (it == mapOrphanTransactions.end()) { it = mapOrphanTransactions.begin(); @@ -3587,9 +3586,9 @@ LOCK(pto->cs_filter); for (const auto &txinfo : vtxinfo) { - const uint256 &txid = txinfo.tx->GetHash(); - CInv inv(MSG_TX, txid); - pto->setInventoryTxToSend.erase(txid); + const TxHash &txhash = txinfo.tx->GetHash(); + CInv inv(MSG_TX, txhash); + pto->setInventoryTxToSend.erase(txhash); if (filterrate != 0) { if (txinfo.feeRate.GetFeePerK() < filterrate) { continue; @@ -3600,7 +3599,7 @@ continue; } } - pto->filterInventoryKnown.insert(txid); + pto->filterInventoryKnown.insert(txhash); vInv.push_back(inv); if (vInv.size() == MAX_INV_SZ) { connman.PushMessage(pto, @@ -3644,7 +3643,7 @@ compareInvMempoolOrder); std::set::iterator it = vInvTx.back(); vInvTx.pop_back(); - uint256 hash = *it; + TxHash hash = *it; // Remove it from the to-be-sent set pto->setInventoryTxToSend.erase(it); // Check if not in the filter already diff --git a/src/policy/fees.h b/src/policy/fees.h --- a/src/policy/fees.h +++ b/src/policy/fees.h @@ -6,6 +6,7 @@ #define BITCOIN_POLICYESTIMATOR_H #include "amount.h" +#include "primitives/transaction.h" #include "random.h" #include "uint256.h" @@ -236,7 +237,7 @@ bool validFeeEstimate); /** Remove a transaction from the mempool tracking stats*/ - bool removeTx(uint256 hash); + bool removeTx(TxHash hash); /** Return a feerate estimate */ CFeeRate estimateFee(int confTarget); @@ -281,8 +282,8 @@ TxStatsInfo() : blockHeight(0), bucketIndex(0) {} }; - // map of txids to information about that transaction - std::map mapMemPoolTxs; + // map of txhashes to information about that transaction + std::map mapMemPoolTxs; /** Classes to track historical data on transaction confirmations */ TxConfirmStats feeStats; diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp --- a/src/policy/fees.cpp +++ b/src/policy/fees.cpp @@ -324,8 +324,8 @@ // removed from the mempool for any reason are no longer tracked. Txs that were // part of a block have already been removed in processBlockTx to ensure they // are never double tracked, but it is of no harm to try to remove them again. -bool CBlockPolicyEstimator::removeTx(uint256 hash) { - std::map::iterator pos = mapMemPoolTxs.find(hash); +bool CBlockPolicyEstimator::removeTx(TxHash hash) { + std::map::iterator pos = mapMemPoolTxs.find(hash); if (pos == mapMemPoolTxs.end()) { return false; } @@ -355,11 +355,11 @@ void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry &entry, bool validFeeEstimate) { uint32_t txHeight = entry.GetHeight(); - uint256 txid = entry.GetTx().GetHash(); - if (mapMemPoolTxs.count(txid)) { + TxHash txhash = entry.GetTx().GetHash(); + if (mapMemPoolTxs.count(txhash)) { LogPrint("estimatefee", "Blockpolicy error mempool tx %s already being tracked\n", - txid.ToString().c_str()); + txhash.ToString().c_str()); return; } @@ -383,8 +383,8 @@ // Feerates are stored and reported as BCC-per-kb: CFeeRate feeRate(entry.GetFee(), entry.GetTxSize()); - mapMemPoolTxs[txid].blockHeight = txHeight; - mapMemPoolTxs[txid].bucketIndex = + mapMemPoolTxs[txhash].blockHeight = txHeight; + mapMemPoolTxs[txhash].bucketIndex = feeStats.NewTx(txHeight, double(feeRate.GetFeePerK().GetSatoshis())); } diff --git a/src/primitives/transaction.cpp b/src/primitives/transaction.cpp --- a/src/primitives/transaction.cpp +++ b/src/primitives/transaction.cpp @@ -123,10 +123,11 @@ std::string CTransaction::ToString() const { std::string str; - str += strprintf("CTransaction(txid=%s, ver=%d, vin.size=%u, vout.size=%u, " - "nLockTime=%u)\n", - GetHash().ToString().substr(0, 10), nVersion, vin.size(), - vout.size(), nLockTime); + str += + strprintf("CTransaction(txhash=%s, ver=%d, vin.size=%u, vout.size=%u, " + "nLockTime=%u)\n", + GetHash().ToString().substr(0, 10), nVersion, vin.size(), + vout.size(), nLockTime); for (unsigned int i = 0; i < vin.size(); i++) str += " " + vin[i].ToString() + "\n"; for (unsigned int i = 0; i < vout.size(); i++) diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -315,7 +315,7 @@ throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); pblockindex = mapBlockIndex[hashBlock]; } else { - const Coin &coin = AccessByTxid(*pcoinsTip, oneTxid); + const Coin &coin = AccessByTxhash(*pcoinsTip, oneTxid); if (!coin.IsSpent() && coin.GetHeight() > 0 && int64_t(coin.GetHeight()) <= chainActive.Height()) { pblockindex = chainActive[coin.GetHeight()]; diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp --- a/src/test/coins_tests.cpp +++ b/src/test/coins_tests.cpp @@ -150,7 +150,7 @@ Coin &coin = result[COutPoint(txid, 0)]; const Coin &entry = (insecure_rand() % 500 == 0) - ? AccessByTxid(*stack.back(), txid) + ? AccessByTxhash(*stack.back(), txid) : stack.back()->AccessCoin(COutPoint(txid, 0)); BOOST_CHECK(coin == entry); diff --git a/src/txdb.h b/src/txdb.h --- a/src/txdb.h +++ b/src/txdb.h @@ -121,8 +121,8 @@ bool ReadLastBlockFile(int &nFile); bool WriteReindexing(bool fReindex); bool ReadReindexing(bool &fReindex); - bool ReadTxIndex(const uint256 &txid, CDiskTxPos &pos); - bool WriteTxIndex(const std::vector> &list); + bool ReadTxIndex(const TxHash &txhash, CDiskTxPos &pos); + bool WriteTxIndex(const std::vector> &list); bool WriteFlag(const std::string &name, bool fValue); bool ReadFlag(const std::string &name, bool &fValue); bool LoadBlockIndexGuts( diff --git a/src/txdb.cpp b/src/txdb.cpp --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -194,14 +194,14 @@ return WriteBatch(batch, true); } -bool CBlockTreeDB::ReadTxIndex(const uint256 &txid, CDiskTxPos &pos) { - return Read(std::make_pair(DB_TXINDEX, txid), pos); +bool CBlockTreeDB::ReadTxIndex(const TxHash &txhash, CDiskTxPos &pos) { + return Read(std::make_pair(DB_TXINDEX, txhash), pos); } bool CBlockTreeDB::WriteTxIndex( - const std::vector> &vect) { + const std::vector> &vect) { CDBBatch batch(*this); - for (std::vector>::const_iterator it = + for (std::vector>::const_iterator it = vect.begin(); it != vect.end(); it++) batch.Write(std::make_pair(DB_TXINDEX, it->first), it->second); diff --git a/src/txmempool.h b/src/txmempool.h --- a/src/txmempool.h +++ b/src/txmempool.h @@ -232,7 +232,7 @@ }; // extracts a TxMemPoolEntry's transaction hash -struct mempoolentry_txid { +struct mempoolentry_txhash { typedef TxHash result_type; result_type operator()(const CTxMemPoolEntry &entry) const { return entry.GetTx().GetHash(); @@ -491,9 +491,9 @@ typedef boost::multi_index_container< CTxMemPoolEntry, boost::multi_index::indexed_by< - // sorted by txid + // sorted by txhash boost::multi_index::hashed_unique< - mempoolentry_txid, SaltedTxidHasher>, + mempoolentry_txhash, SaltedTxidHasher>, // sorted by fee rate boost::multi_index::ordered_non_unique< boost::multi_index::tag, diff --git a/src/txmempool.cpp b/src/txmempool.cpp --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -467,7 +467,7 @@ void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason) { NotifyEntryRemoved(it->GetSharedTx(), reason); - const TxHash txid = it->GetTx().GetHash(); + const TxHash txhash = it->GetTx().GetHash(); for (const CTxIn &txin : it->GetTx().vin) { mapNextTx.erase(txin.prevout); } @@ -488,7 +488,7 @@ mapLinks.erase(it); mapTx.erase(it); nTransactionsUpdated++; - minerPolicyEstimator->removeTx(txid); + minerPolicyEstimator->removeTx(txhash); } // Calculates descendants of entry that are not already in setDescendants, and diff --git a/src/validation.cpp b/src/validation.cpp --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1079,7 +1079,7 @@ // use coin database to locate block that contains transaction, and scan it if (fAllowSlow) { - const Coin &coin = AccessByTxid(*pcoinsTip, txhash); + const Coin &coin = AccessByTxhash(*pcoinsTip, txhash); if (!coin.IsSpent()) { pindexSlow = chainActive[coin.GetHeight()]; } @@ -1608,7 +1608,7 @@ // this information only in undo records for the last spend of a // transactions' outputs. This implies that it must be present for some // other output of the same tx. - const Coin &alternate = AccessByTxid(view, out.hash); + const Coin &alternate = AccessByTxhash(view, out.hash); if (alternate.IsSpent()) { // Adding output for transaction without known metadata return DISCONNECT_FAILED; @@ -2018,7 +2018,7 @@ CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size())); - std::vector> vPos; + std::vector> vPos; vPos.reserve(block.vtx.size()); blockundo.vtxundo.reserve(block.vtx.size() - 1);