diff --git a/src/miner.cpp b/src/miner.cpp index a54eeead81..90cb97057f 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -1,701 +1,702 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "miner.h" #include "amount.h" #include "chain.h" #include "chainparams.h" #include "coins.h" #include "config.h" #include "consensus/activation.h" #include "consensus/consensus.h" #include "consensus/merkle.h" #include "consensus/tx_verify.h" #include "consensus/validation.h" #include "hash.h" #include "net.h" #include "policy/policy.h" #include "pow.h" #include "primitives/transaction.h" #include "script/standard.h" #include "timedata.h" #include "txmempool.h" #include "util.h" #include "utilmoneystr.h" #include "validation.h" #include "validationinterface.h" #include #include #include #include #include ////////////////////////////////////////////////////////////////////////////// // // BitcoinMiner // // // Unconfirmed transactions in the memory pool often depend on other // transactions in the memory pool. When we select transactions from the // pool, we select by highest priority or fee rate, so we might consider // transactions that depend on transactions that aren't yet in the block. uint64_t nLastBlockTx = 0; uint64_t nLastBlockSize = 0; int64_t UpdateTime(CBlockHeader *pblock, const Config &config, const CBlockIndex *pindexPrev) { int64_t nOldTime = pblock->nTime; int64_t nNewTime = std::max(pindexPrev->GetMedianTimePast() + 1, GetAdjustedTime()); if (nOldTime < nNewTime) { pblock->nTime = nNewTime; } const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); // Updating time can change work required on testnet: if (consensusParams.fPowAllowMinDifficultyBlocks) { pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, config); } return nNewTime - nOldTime; } static uint64_t ComputeMaxGeneratedBlockSize(const Config &config, const CBlockIndex *pindexPrev) { // Block resource limits // If -blockmaxsize is not given, limit to DEFAULT_MAX_GENERATED_BLOCK_SIZE // If only one is given, only restrict the specified resource. // If both are given, restrict both. uint64_t nMaxGeneratedBlockSize = DEFAULT_MAX_GENERATED_BLOCK_SIZE; if (gArgs.IsArgSet("-blockmaxsize")) { nMaxGeneratedBlockSize = gArgs.GetArg("-blockmaxsize", DEFAULT_MAX_GENERATED_BLOCK_SIZE); } // Limit size to between 1K and MaxBlockSize-1K for sanity: nMaxGeneratedBlockSize = std::max(uint64_t(1000), std::min(config.GetMaxBlockSize() - 1000, nMaxGeneratedBlockSize)); return nMaxGeneratedBlockSize; } BlockAssembler::BlockAssembler(const Config &_config) : config(&_config) { if (gArgs.IsArgSet("-blockmintxfee")) { Amount n = Amount::zero(); ParseMoney(gArgs.GetArg("-blockmintxfee", ""), n); blockMinFeeRate = CFeeRate(n); } else { blockMinFeeRate = CFeeRate(DEFAULT_BLOCK_MIN_TX_FEE_PER_KB); } LOCK(cs_main); nMaxGeneratedBlockSize = ComputeMaxGeneratedBlockSize(*config, chainActive.Tip()); } void BlockAssembler::resetBlock() { inBlock.clear(); // Reserve space for coinbase tx. nBlockSize = 1000; nBlockSigOps = 100; // These counters do not include coinbase tx. nBlockTx = 0; nFees = Amount::zero(); lastFewTxs = 0; } static const std::vector getExcessiveBlockSizeSig(const Config &config) { std::string cbmsg = "/EB" + getSubVersionEB(config.GetMaxBlockSize()) + "/"; const char *cbcstr = cbmsg.c_str(); std::vector vec(cbcstr, cbcstr + cbmsg.size()); return vec; } std::unique_ptr BlockAssembler::CreateNewBlock(const CScript &scriptPubKeyIn) { int64_t nTimeStart = GetTimeMicros(); resetBlock(); pblocktemplate.reset(new CBlockTemplate()); if (!pblocktemplate.get()) { return nullptr; } // Pointer for convenience. pblock = &pblocktemplate->block; // Add dummy coinbase tx as first transaction. pblock->vtx.emplace_back(); // updated at end pblocktemplate->vTxFees.push_back(-SATOSHI); // updated at end pblocktemplate->vTxSigOpsCount.push_back(-1); LOCK2(cs_main, mempool.cs); CBlockIndex *pindexPrev = chainActive.Tip(); nHeight = pindexPrev->nHeight + 1; const CChainParams &chainparams = config->GetChainParams(); pblock->nVersion = ComputeBlockVersion(pindexPrev, chainparams.GetConsensus()); // -regtest only: allow overriding block.nVersion with // -blockversion=N to test forking scenarios if (chainparams.MineBlocksOnDemand()) { pblock->nVersion = gArgs.GetArg("-blockversion", pblock->nVersion); } pblock->nTime = GetAdjustedTime(); nMaxGeneratedBlockSize = ComputeMaxGeneratedBlockSize(*config, pindexPrev); nMedianTimePast = pindexPrev->GetMedianTimePast(); nLockTimeCutoff = (STANDARD_LOCKTIME_VERIFY_FLAGS & LOCKTIME_MEDIAN_TIME_PAST) ? nMedianTimePast : pblock->GetBlockTime(); addPriorityTxs(); int nPackagesSelected = 0; int nDescendantsUpdated = 0; addPackageTxs(nPackagesSelected, nDescendantsUpdated); if (IsMagneticAnomalyEnabled(*config, pindexPrev)) { // If magnetic anomaly is enabled, we make sure transaction are // canonically ordered. std::sort(std::begin(pblock->vtx) + 1, std::end(pblock->vtx), [](const std::shared_ptr &a, const std::shared_ptr &b) -> bool { return a->GetId() < b->GetId(); }); } int64_t nTime1 = GetTimeMicros(); nLastBlockTx = nBlockTx; nLastBlockSize = nBlockSize; // Create coinbase transaction. CMutableTransaction coinbaseTx; coinbaseTx.vin.resize(1); coinbaseTx.vin[0].prevout = COutPoint(); coinbaseTx.vout.resize(1); coinbaseTx.vout[0].scriptPubKey = scriptPubKeyIn; coinbaseTx.vout[0].nValue = nFees + GetBlockSubsidy(nHeight, chainparams.GetConsensus()); coinbaseTx.vin[0].scriptSig = CScript() << nHeight << OP_0; // Make sure the coinbase is big enough. uint64_t coinbaseSize = ::GetSerializeSize(coinbaseTx, SER_NETWORK, PROTOCOL_VERSION); if (coinbaseSize < MIN_TX_SIZE) { coinbaseTx.vin[0].scriptSig << std::vector(MIN_TX_SIZE - coinbaseSize - 1); } pblock->vtx[0] = MakeTransactionRef(coinbaseTx); pblocktemplate->vTxFees[0] = -1 * nFees; uint64_t nSerializeSize = GetSerializeSize(*pblock, SER_NETWORK, PROTOCOL_VERSION); LogPrintf("CreateNewBlock(): total size: %u txs: %u fees: %ld sigops %d\n", nSerializeSize, nBlockTx, nFees, nBlockSigOps); // Fill in header. pblock->hashPrevBlock = pindexPrev->GetBlockHash(); UpdateTime(pblock, *config, pindexPrev); pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, *config); pblock->nNonce = 0; pblocktemplate->vTxSigOpsCount[0] = GetSigOpCountWithoutP2SH( *pblock->vtx[0], STANDARD_CHECKDATASIG_VERIFY_FLAGS); CValidationState state; BlockValidationOptions validationOptions(false, false); if (!TestBlockValidity(*config, state, *pblock, pindexPrev, validationOptions)) { throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, FormatStateMessage(state))); } int64_t nTime2 = GetTimeMicros(); LogPrint( BCLog::BENCH, "CreateNewBlock() packages: %.2fms (%d packages, %d " "updated descendants), validity: %.2fms (total %.2fms)\n", 0.001 * (nTime1 - nTimeStart), nPackagesSelected, nDescendantsUpdated, 0.001 * (nTime2 - nTime1), 0.001 * (nTime2 - nTimeStart)); return std::move(pblocktemplate); } bool BlockAssembler::isStillDependent(CTxMemPool::txiter iter) { for (CTxMemPool::txiter parent : mempool.GetMemPoolParents(iter)) { if (!inBlock.count(parent)) { return true; } } return false; } void BlockAssembler::onlyUnconfirmed(CTxMemPool::setEntries &testSet) { for (CTxMemPool::setEntries::iterator iit = testSet.begin(); iit != testSet.end();) { // Only test txs not already in the block. if (inBlock.count(*iit)) { testSet.erase(iit++); } else { iit++; } } } bool BlockAssembler::TestPackage(uint64_t packageSize, int64_t packageSigOps) { auto blockSizeWithPackage = nBlockSize + packageSize; if (blockSizeWithPackage >= nMaxGeneratedBlockSize) { return false; } if (nBlockSigOps + packageSigOps >= GetMaxBlockSigOpsCount(blockSizeWithPackage)) { return false; } return true; } /** * Perform transaction-level checks before adding to block: * - Transaction finality (locktime) * - Serialized size (in case -blockmaxsize is in use) */ bool BlockAssembler::TestPackageTransactions( const CTxMemPool::setEntries &package) { uint64_t nPotentialBlockSize = nBlockSize; for (const CTxMemPool::txiter it : package) { CValidationState state; if (!ContextualCheckTransaction(*config, it->GetTx(), state, nHeight, nLockTimeCutoff, nMedianTimePast)) { return false; } uint64_t nTxSize = ::GetSerializeSize(it->GetTx(), SER_NETWORK, PROTOCOL_VERSION); if (nPotentialBlockSize + nTxSize >= nMaxGeneratedBlockSize) { return false; } nPotentialBlockSize += nTxSize; } return true; } BlockAssembler::TestForBlockResult BlockAssembler::TestForBlock(CTxMemPool::txiter it) { auto blockSizeWithTx = nBlockSize + ::GetSerializeSize(it->GetTx(), SER_NETWORK, PROTOCOL_VERSION); if (blockSizeWithTx >= nMaxGeneratedBlockSize) { if (nBlockSize > nMaxGeneratedBlockSize - 100 || lastFewTxs > 50) { return TestForBlockResult::BlockFinished; } if (nBlockSize > nMaxGeneratedBlockSize - 1000) { lastFewTxs++; } return TestForBlockResult::TXCantFit; } auto maxBlockSigOps = GetMaxBlockSigOpsCount(blockSizeWithTx); if (nBlockSigOps + it->GetSigOpCount() >= maxBlockSigOps) { // If the block has room for no more sig ops then flag that the block is // finished. // TODO: We should consider adding another transaction that isn't very // dense in sigops instead of bailing out so easily. if (nBlockSigOps > maxBlockSigOps - 2) { return TestForBlockResult::BlockFinished; } // Otherwise attempt to find another tx with fewer sigops to put in the // block. return TestForBlockResult::TXCantFit; } // Must check that lock times are still valid. This can be removed once MTP // is always enforced as long as reorgs keep the mempool consistent. CValidationState state; if (!ContextualCheckTransaction(*config, it->GetTx(), state, nHeight, nLockTimeCutoff, nMedianTimePast)) { return TestForBlockResult::TXCantFit; } return TestForBlockResult::TXFits; } void BlockAssembler::AddToBlock(CTxMemPool::txiter iter) { pblock->vtx.emplace_back(iter->GetSharedTx()); pblocktemplate->vTxFees.push_back(iter->GetFee()); pblocktemplate->vTxSigOpsCount.push_back(iter->GetSigOpCount()); nBlockSize += iter->GetTxSize(); ++nBlockTx; nBlockSigOps += iter->GetSigOpCount(); nFees += iter->GetFee(); inBlock.insert(iter); bool fPrintPriority = gArgs.GetBoolArg("-printpriority", DEFAULT_PRINTPRIORITY); if (fPrintPriority) { double dPriority = iter->GetPriority(nHeight); Amount dummy; mempool.ApplyDeltas(iter->GetTx().GetId(), dPriority, dummy); LogPrintf( "priority %.1f fee %s txid %s\n", dPriority, CFeeRate(iter->GetModifiedFee(), iter->GetTxSize()).ToString(), iter->GetTx().GetId().ToString()); } } int BlockAssembler::UpdatePackagesForAdded( const CTxMemPool::setEntries &alreadyAdded, indexed_modified_transaction_set &mapModifiedTx) { int nDescendantsUpdated = 0; for (const CTxMemPool::txiter it : alreadyAdded) { CTxMemPool::setEntries descendants; mempool.CalculateDescendants(it, descendants); // Insert all descendants (not yet in block) into the modified set. for (CTxMemPool::txiter desc : descendants) { if (alreadyAdded.count(desc)) { continue; } ++nDescendantsUpdated; modtxiter mit = mapModifiedTx.find(desc); if (mit == mapModifiedTx.end()) { CTxMemPoolModifiedEntry modEntry(desc); modEntry.nSizeWithAncestors -= it->GetTxSize(); + modEntry.nBillableSizeWithAncestors -= it->GetTxBillableSize(); modEntry.nModFeesWithAncestors -= it->GetModifiedFee(); modEntry.nSigOpCountWithAncestors -= it->GetSigOpCount(); mapModifiedTx.insert(modEntry); } else { mapModifiedTx.modify(mit, update_for_parent_inclusion(it)); } } } return nDescendantsUpdated; } // Skip entries in mapTx that are already in a block or are present in // mapModifiedTx (which implies that the mapTx ancestor state is stale due to // ancestor inclusion in the block). Also skip transactions that we've already // failed to add. This can happen if we consider a transaction in mapModifiedTx // and it fails: we can then potentially consider it again while walking mapTx. // It's currently guaranteed to fail again, but as a belt-and-suspenders check // we put it in failedTx and avoid re-evaluation, since the re-evaluation would // be using cached size/sigops/fee values that are not actually correct. bool BlockAssembler::SkipMapTxEntry( CTxMemPool::txiter it, indexed_modified_transaction_set &mapModifiedTx, CTxMemPool::setEntries &failedTx) { assert(it != mempool.mapTx.end()); return mapModifiedTx.count(it) || inBlock.count(it) || failedTx.count(it); } void BlockAssembler::SortForBlock( const CTxMemPool::setEntries &package, CTxMemPool::txiter entry, std::vector &sortedEntries) { // Sort package by ancestor count. If a transaction A depends on transaction // B, then A's ancestor count must be greater than B's. So this is // sufficient to validly order the transactions for block inclusion. sortedEntries.clear(); sortedEntries.insert(sortedEntries.begin(), package.begin(), package.end()); std::sort(sortedEntries.begin(), sortedEntries.end(), CompareTxIterByAncestorCount()); } /** * addPackageTx includes transactions paying a fee by ensuring that * the partial ordering of transactions is maintained. That is to say * children come after parents, despite having a potentially larger fee. * @param[out] nPackagesSelected How many packages were selected * @param[out] nDescendantsUpdated Number of descendant transactions updated */ void BlockAssembler::addPackageTxs(int &nPackagesSelected, int &nDescendantsUpdated) { // selection algorithm orders the mempool based on feerate of a // transaction including all unconfirmed ancestors. Since we don't remove // transactions from the mempool as we select them for block inclusion, we // need an alternate method of updating the feerate of a transaction with // its not-yet-selected ancestors as we go. This is accomplished by // walking the in-mempool descendants of selected transactions and storing // a temporary modified state in mapModifiedTxs. Each time through the // loop, we compare the best transaction in mapModifiedTxs with the next // transaction in the mempool to decide what transaction package to work // on next. // mapModifiedTx will store sorted packages after they are modified because // some of their txs are already in the block. indexed_modified_transaction_set mapModifiedTx; // Keep track of entries that failed inclusion, to avoid duplicate work. CTxMemPool::setEntries failedTx; // Start by adding all descendants of previously added txs to mapModifiedTx // and modifying them for their already included ancestors. UpdatePackagesForAdded(inBlock, mapModifiedTx); CTxMemPool::indexed_transaction_set::index::type::iterator mi = mempool.mapTx.get().begin(); CTxMemPool::txiter iter; // Limit the number of attempts to add transactions to the block when it is // close to full; this is just a simple heuristic to finish quickly if the // mempool has a lot of entries. const int64_t MAX_CONSECUTIVE_FAILURES = 1000; int64_t nConsecutiveFailed = 0; while (mi != mempool.mapTx.get().end() || !mapModifiedTx.empty()) { // First try to find a new transaction in mapTx to evaluate. if (mi != mempool.mapTx.get().end() && SkipMapTxEntry(mempool.mapTx.project<0>(mi), mapModifiedTx, failedTx)) { ++mi; continue; } // Now that mi is not stale, determine which transaction to evaluate: // the next entry from mapTx, or the best from mapModifiedTx? bool fUsingModified = false; modtxscoreiter modit = mapModifiedTx.get().begin(); if (mi == mempool.mapTx.get().end()) { // We're out of entries in mapTx; use the entry from mapModifiedTx iter = modit->iter; fUsingModified = true; } else { // Try to compare the mapTx entry to the mapModifiedTx entry. iter = mempool.mapTx.project<0>(mi); if (modit != mapModifiedTx.get().end() && CompareModifiedEntry()(*modit, CTxMemPoolModifiedEntry(iter))) { // The best entry in mapModifiedTx has higher score than the one // from mapTx. Switch which transaction (package) to consider iter = modit->iter; fUsingModified = true; } else { // Either no entry in mapModifiedTx, or it's worse than mapTx. // Increment mi for the next loop iteration. ++mi; } } // We skip mapTx entries that are inBlock, and mapModifiedTx shouldn't // contain anything that is inBlock. assert(!inBlock.count(iter)); uint64_t packageSize = iter->GetSizeWithAncestors(); Amount packageFees = iter->GetModFeesWithAncestors(); int64_t packageSigOps = iter->GetSigOpCountWithAncestors(); if (fUsingModified) { packageSize = modit->nSizeWithAncestors; packageFees = modit->nModFeesWithAncestors; packageSigOps = modit->nSigOpCountWithAncestors; } if (packageFees < blockMinFeeRate.GetFee(packageSize)) { // Everything else we might consider has a lower fee rate return; } if (!TestPackage(packageSize, packageSigOps)) { if (fUsingModified) { // Since we always look at the best entry in mapModifiedTx, we // must erase failed entries so that we can consider the next // best entry on the next loop iteration mapModifiedTx.get().erase(modit); failedTx.insert(iter); } ++nConsecutiveFailed; if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockSize > nMaxGeneratedBlockSize - 1000) { // Give up if we're close to full and haven't succeeded in a // while. break; } continue; } CTxMemPool::setEntries ancestors; uint64_t nNoLimit = std::numeric_limits::max(); std::string dummy; mempool.CalculateMemPoolAncestors(*iter, ancestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false); onlyUnconfirmed(ancestors); ancestors.insert(iter); // Test if all tx's are Final. if (!TestPackageTransactions(ancestors)) { if (fUsingModified) { mapModifiedTx.get().erase(modit); failedTx.insert(iter); } continue; } // This transaction will make it in; reset the failed counter. nConsecutiveFailed = 0; // Package can be added. Sort the entries in a valid order. std::vector sortedEntries; SortForBlock(ancestors, iter, sortedEntries); for (auto &entry : sortedEntries) { AddToBlock(entry); // Erase from the modified set, if present mapModifiedTx.erase(entry); } ++nPackagesSelected; // Update transactions that depend on each of these nDescendantsUpdated += UpdatePackagesForAdded(ancestors, mapModifiedTx); } } void BlockAssembler::addPriorityTxs() { // How much of the block should be dedicated to high-priority transactions, // included regardless of the fees they pay. if (config->GetBlockPriorityPercentage() == 0) { return; } uint64_t nBlockPrioritySize = nMaxGeneratedBlockSize * config->GetBlockPriorityPercentage() / 100; // This vector will be sorted into a priority queue: std::vector vecPriority; TxCoinAgePriorityCompare pricomparer; std::map waitPriMap; typedef std::map::iterator waitPriIter; double actualPriority = -1; vecPriority.reserve(mempool.mapTx.size()); for (CTxMemPool::indexed_transaction_set::iterator mi = mempool.mapTx.begin(); mi != mempool.mapTx.end(); ++mi) { double dPriority = mi->GetPriority(nHeight); Amount dummy; mempool.ApplyDeltas(mi->GetTx().GetId(), dPriority, dummy); vecPriority.push_back(TxCoinAgePriority(dPriority, mi)); } std::make_heap(vecPriority.begin(), vecPriority.end(), pricomparer); CTxMemPool::txiter iter; // Add a tx from priority queue to fill the part of block reserved to // priority transactions. while (!vecPriority.empty()) { iter = vecPriority.front().second; actualPriority = vecPriority.front().first; std::pop_heap(vecPriority.begin(), vecPriority.end(), pricomparer); vecPriority.pop_back(); // If tx already in block, skip. if (inBlock.count(iter)) { // Shouldn't happen for priority txs. assert(false); continue; } // If tx is dependent on other mempool txs which haven't yet been // included then put it in the waitSet. if (isStillDependent(iter)) { waitPriMap.insert(std::make_pair(iter, actualPriority)); continue; } TestForBlockResult testResult = TestForBlock(iter); // Break if the block is completed if (testResult == TestForBlockResult::BlockFinished) { break; } // If this tx does not fit in the block, skip to next transaction. if (testResult != TestForBlockResult::TXFits) { continue; } AddToBlock(iter); // If now that this txs is added we've surpassed our desired priority // size, then we're done adding priority transactions. if (nBlockSize >= nBlockPrioritySize) { break; } // if we have dropped below the AllowFreeThreshold, then we're done // adding priority transactions. if (!AllowFree(actualPriority)) { break; } // This tx was successfully added, so add transactions that depend // on this one to the priority queue to try again. for (CTxMemPool::txiter child : mempool.GetMemPoolChildren(iter)) { waitPriIter wpiter = waitPriMap.find(child); if (wpiter == waitPriMap.end()) { continue; } vecPriority.push_back(TxCoinAgePriority(wpiter->second, child)); std::push_heap(vecPriority.begin(), vecPriority.end(), pricomparer); waitPriMap.erase(wpiter); } } } void IncrementExtraNonce(const Config &config, CBlock *pblock, const CBlockIndex *pindexPrev, unsigned int &nExtraNonce) { // Update nExtraNonce static uint256 hashPrevBlock; if (hashPrevBlock != pblock->hashPrevBlock) { nExtraNonce = 0; hashPrevBlock = pblock->hashPrevBlock; } ++nExtraNonce; // Height first in coinbase required for block.version=2 unsigned int nHeight = pindexPrev->nHeight + 1; CMutableTransaction txCoinbase(*pblock->vtx[0]); txCoinbase.vin[0].scriptSig = (CScript() << nHeight << CScriptNum(nExtraNonce) << getExcessiveBlockSizeSig(config)) + COINBASE_FLAGS; assert(txCoinbase.vin[0].scriptSig.size() <= MAX_COINBASE_SCRIPTSIG_SIZE); pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase)); pblock->hashMerkleRoot = BlockMerkleRoot(*pblock); } diff --git a/src/miner.h b/src/miner.h index 030146cbc3..a1f4a2ad53 100644 --- a/src/miner.h +++ b/src/miner.h @@ -1,224 +1,227 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_MINER_H #define BITCOIN_MINER_H #include "primitives/block.h" #include "txmempool.h" #include "boost/multi_index/ordered_index.hpp" #include "boost/multi_index_container.hpp" #include #include class CBlockIndex; class CChainParams; class Config; class CReserveKey; class CScript; class CWallet; static const bool DEFAULT_PRINTPRIORITY = false; struct CBlockTemplate { CBlock block; std::vector vTxFees; std::vector vTxSigOpsCount; }; // Container for tracking updates to ancestor feerate as we include (parent) // transactions in a block struct CTxMemPoolModifiedEntry { CTxMemPoolModifiedEntry(CTxMemPool::txiter entry) { iter = entry; nSizeWithAncestors = entry->GetSizeWithAncestors(); + nBillableSizeWithAncestors = entry->GetBillableSizeWithAncestors(); nModFeesWithAncestors = entry->GetModFeesWithAncestors(); nSigOpCountWithAncestors = entry->GetSigOpCountWithAncestors(); } CTxMemPool::txiter iter; uint64_t nSizeWithAncestors; + uint64_t nBillableSizeWithAncestors; Amount nModFeesWithAncestors; int64_t nSigOpCountWithAncestors; }; /** * Comparator for CTxMemPool::txiter objects. * It simply compares the internal memory address of the CTxMemPoolEntry object * pointed to. This means it has no meaning, and is only useful for using them * as key in other indexes. */ struct CompareCTxMemPoolIter { bool operator()(const CTxMemPool::txiter &a, const CTxMemPool::txiter &b) const { return &(*a) < &(*b); } }; struct modifiedentry_iter { typedef CTxMemPool::txiter result_type; result_type operator()(const CTxMemPoolModifiedEntry &entry) const { return entry.iter; } }; // This matches the calculation in CompareTxMemPoolEntryByAncestorFee, // except operating on CTxMemPoolModifiedEntry. // TODO: refactor to avoid duplication of this logic. struct CompareModifiedEntry { bool operator()(const CTxMemPoolModifiedEntry &a, const CTxMemPoolModifiedEntry &b) const { double f1 = b.nSizeWithAncestors * (a.nModFeesWithAncestors / SATOSHI); double f2 = a.nSizeWithAncestors * (b.nModFeesWithAncestors / SATOSHI); if (f1 == f2) { return CTxMemPool::CompareIteratorByHash()(a.iter, b.iter); } return f1 > f2; } }; // A comparator that sorts transactions based on number of ancestors. // This is sufficient to sort an ancestor package in an order that is valid // to appear in a block. struct CompareTxIterByAncestorCount { bool operator()(const CTxMemPool::txiter &a, const CTxMemPool::txiter &b) const { if (a->GetCountWithAncestors() != b->GetCountWithAncestors()) { return a->GetCountWithAncestors() < b->GetCountWithAncestors(); } return CTxMemPool::CompareIteratorByHash()(a, b); } }; typedef boost::multi_index_container< CTxMemPoolModifiedEntry, boost::multi_index::indexed_by< boost::multi_index::ordered_unique, // sorted by modified ancestor fee rate boost::multi_index::ordered_non_unique< // Reuse same tag from CTxMemPool's similar index boost::multi_index::tag, boost::multi_index::identity, CompareModifiedEntry>>> indexed_modified_transaction_set; typedef indexed_modified_transaction_set::nth_index<0>::type::iterator modtxiter; typedef indexed_modified_transaction_set::index::type::iterator modtxscoreiter; struct update_for_parent_inclusion { update_for_parent_inclusion(CTxMemPool::txiter it) : iter(it) {} void operator()(CTxMemPoolModifiedEntry &e) { e.nModFeesWithAncestors -= iter->GetFee(); e.nSizeWithAncestors -= iter->GetTxSize(); + e.nBillableSizeWithAncestors -= iter->GetTxBillableSize(); e.nSigOpCountWithAncestors -= iter->GetSigOpCount(); } CTxMemPool::txiter iter; }; /** Generate a new block, without valid proof-of-work */ class BlockAssembler { private: // The constructed block template std::unique_ptr pblocktemplate; // A convenience pointer that always refers to the CBlock in pblocktemplate CBlock *pblock; // Configuration parameters for the block size uint64_t nMaxGeneratedBlockSize; CFeeRate blockMinFeeRate; // Information on the current status of the block uint64_t nBlockSize; uint64_t nBlockTx; uint64_t nBlockSigOps; Amount nFees; CTxMemPool::setEntries inBlock; // Chain context for the block int nHeight; int64_t nLockTimeCutoff; int64_t nMedianTimePast; const Config *config; // Variables used for addPriorityTxs int lastFewTxs; public: BlockAssembler(const Config &_config); /** Construct a new block template with coinbase to scriptPubKeyIn */ std::unique_ptr CreateNewBlock(const CScript &scriptPubKeyIn); uint64_t GetMaxGeneratedBlockSize() const { return nMaxGeneratedBlockSize; } private: // utility functions /** Clear the block's state and prepare for assembling a new block */ void resetBlock(); /** Add a tx to the block */ void AddToBlock(CTxMemPool::txiter iter); // Methods for how to add transactions to a block. /** Add transactions based on tx "priority" */ void addPriorityTxs(); /** Add transactions based on feerate including unconfirmed ancestors * Increments nPackagesSelected / nDescendantsUpdated with corresponding * statistics from the package selection (for logging statistics). */ void addPackageTxs(int &nPackagesSelected, int &nDescendantsUpdated); /** Enum for the results from TestForBlock */ enum class TestForBlockResult : uint8_t { TXFits = 0, TXCantFit = 1, BlockFinished = 3, }; // helper function for addPriorityTxs /** Test if tx will still "fit" in the block */ TestForBlockResult TestForBlock(CTxMemPool::txiter iter); /** Test if tx still has unconfirmed parents not yet in block */ bool isStillDependent(CTxMemPool::txiter iter); // helper functions for addPackageTxs() /** Remove confirmed (inBlock) entries from given set */ void onlyUnconfirmed(CTxMemPool::setEntries &testSet); /** Test if a new package would "fit" in the block */ bool TestPackage(uint64_t packageSize, int64_t packageSigOpsCost); /** Perform checks on each transaction in a package: * locktime, serialized size (if necessary) * These checks should always succeed, and they're here * only as an extra check in case of suboptimal node configuration */ bool TestPackageTransactions(const CTxMemPool::setEntries &package); /** Return true if given transaction from mapTx has already been evaluated, * or if the transaction's cached data in mapTx is incorrect. */ bool SkipMapTxEntry(CTxMemPool::txiter it, indexed_modified_transaction_set &mapModifiedTx, CTxMemPool::setEntries &failedTx); /** Sort the package in an order that is valid to appear in a block */ void SortForBlock(const CTxMemPool::setEntries &package, CTxMemPool::txiter entry, std::vector &sortedEntries); /** Add descendants of given transactions to mapModifiedTx with ancestor * state updated assuming given transactions are inBlock. Returns number * of updated descendants. */ int UpdatePackagesForAdded(const CTxMemPool::setEntries &alreadyAdded, indexed_modified_transaction_set &mapModifiedTx); }; /** Modify the extranonce in a block */ void IncrementExtraNonce(const Config &config, CBlock *pblock, const CBlockIndex *pindexPrev, unsigned int &nExtraNonce); int64_t UpdateTime(CBlockHeader *pblock, const Config &config, const CBlockIndex *pindexPrev); #endif // BITCOIN_MINER_H diff --git a/src/primitives/transaction.cpp b/src/primitives/transaction.cpp index c28aa5c1fe..c809985831 100644 --- a/src/primitives/transaction.cpp +++ b/src/primitives/transaction.cpp @@ -1,134 +1,138 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "primitives/transaction.h" #include "hash.h" #include "tinyformat.h" #include "utilstrencodings.h" std::string COutPoint::ToString() const { return strprintf("COutPoint(%s, %u)", txid.ToString().substr(0, 10), n); } std::string CTxIn::ToString() const { std::string str; str += "CTxIn("; str += prevout.ToString(); if (prevout.IsNull()) { str += strprintf(", coinbase %s", HexStr(scriptSig)); } else { str += strprintf(", scriptSig=%s", HexStr(scriptSig).substr(0, 24)); } if (nSequence != SEQUENCE_FINAL) { str += strprintf(", nSequence=%u", nSequence); } str += ")"; return str; } std::string CTxOut::ToString() const { return strprintf("CTxOut(nValue=%d.%08d, scriptPubKey=%s)", nValue / COIN, (nValue % COIN) / SATOSHI, HexStr(scriptPubKey).substr(0, 30)); } CMutableTransaction::CMutableTransaction() : nVersion(CTransaction::CURRENT_VERSION), nLockTime(0) {} CMutableTransaction::CMutableTransaction(const CTransaction &tx) : nVersion(tx.nVersion), vin(tx.vin), vout(tx.vout), nLockTime(tx.nLockTime) {} static uint256 ComputeCMutableTransactionHash(const CMutableTransaction &tx) { return SerializeHash(tx, SER_GETHASH, 0); } TxId CMutableTransaction::GetId() const { return TxId(ComputeCMutableTransactionHash(*this)); } TxHash CMutableTransaction::GetHash() const { return TxHash(ComputeCMutableTransactionHash(*this)); } uint256 CTransaction::ComputeHash() const { return SerializeHash(*this, SER_GETHASH, 0); } /** * For backward compatibility, the hash is initialized to 0. * TODO: remove the need for this default constructor entirely. */ CTransaction::CTransaction() : nVersion(CTransaction::CURRENT_VERSION), vin(), vout(), nLockTime(0), hash() {} CTransaction::CTransaction(const CMutableTransaction &tx) : nVersion(tx.nVersion), vin(tx.vin), vout(tx.vout), nLockTime(tx.nLockTime), hash(ComputeHash()) {} CTransaction::CTransaction(CMutableTransaction &&tx) : nVersion(tx.nVersion), vin(std::move(tx.vin)), vout(std::move(tx.vout)), nLockTime(tx.nLockTime), hash(ComputeHash()) {} Amount CTransaction::GetValueOut() const { Amount nValueOut = Amount::zero(); for (std::vector::const_iterator it(vout.begin()); it != vout.end(); ++it) { nValueOut += it->nValue; if (!MoneyRange(it->nValue) || !MoneyRange(nValueOut)) { throw std::runtime_error(std::string(__func__) + ": value out of range"); } } return nValueOut; } double CTransaction::ComputePriority(double dPriorityInputs, unsigned int nTxSize) const { nTxSize = CalculateModifiedSize(nTxSize); if (nTxSize == 0) { return 0.0; } return dPriorityInputs / nTxSize; } unsigned int CTransaction::CalculateModifiedSize(unsigned int nTxSize) const { // In order to avoid disincentivizing cleaning up the UTXO set we don't // count the constant overhead for each txin and up to 110 bytes of // scriptSig (which is enough to cover a compressed pubkey p2sh redemption) // for priority. Providing any more cleanup incentive than making additional // inputs free would risk encouraging people to create junk outputs to // redeem later. if (nTxSize == 0) { nTxSize = GetTotalSize(); } for (const auto &nVin : vin) { unsigned int offset = 41U + std::min(110U, (unsigned int)nVin.scriptSig.size()); if (nTxSize > offset) { nTxSize -= offset; } } return nTxSize; } +size_t CTransaction::GetBillableSize() const { + return ::GetSerializeSize(*this, SER_NETWORK, PROTOCOL_VERSION); +} + unsigned int CTransaction::GetTotalSize() const { return ::GetSerializeSize(*this, SER_NETWORK, PROTOCOL_VERSION); } std::string CTransaction::ToString() const { std::string str; str += strprintf("CTransaction(txid=%s, ver=%d, vin.size=%u, vout.size=%u, " "nLockTime=%u)\n", GetId().ToString().substr(0, 10), nVersion, vin.size(), vout.size(), nLockTime); for (const auto &nVin : vin) { str += " " + nVin.ToString() + "\n"; } for (const auto &nVout : vout) { str += " " + nVout.ToString() + "\n"; } return str; } diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h index cadfdf96dd..daa18cbede 100644 --- a/src/primitives/transaction.h +++ b/src/primitives/transaction.h @@ -1,387 +1,391 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Copyright (c) 2017-2018 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_PRIMITIVES_TRANSACTION_H #define BITCOIN_PRIMITIVES_TRANSACTION_H #include "amount.h" #include "feerate.h" #include "primitives/txid.h" #include "script/script.h" #include "serialize.h" static const int SERIALIZE_TRANSACTION = 0x00; /** * An outpoint - a combination of a transaction hash and an index n into its * vout. */ class COutPoint { private: TxId txid; uint32_t n; public: COutPoint() : txid(), n(-1) {} COutPoint(uint256 txidIn, uint32_t nIn) : txid(TxId(txidIn)), n(nIn) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(txid); READWRITE(n); } bool IsNull() const { return txid.IsNull() && n == uint32_t(-1); } const TxId &GetTxId() const { return txid; } uint32_t GetN() const { return n; } friend bool operator<(const COutPoint &a, const COutPoint &b) { int cmp = a.txid.Compare(b.txid); return cmp < 0 || (cmp == 0 && a.n < b.n); } friend bool operator==(const COutPoint &a, const COutPoint &b) { return (a.txid == b.txid && a.n == b.n); } friend bool operator!=(const COutPoint &a, const COutPoint &b) { return !(a == b); } std::string ToString() const; }; /** * An input of a transaction. It contains the location of the previous * transaction's output that it claims and a signature that matches the output's * public key. */ class CTxIn { public: COutPoint prevout; CScript scriptSig; uint32_t nSequence; /** * Setting nSequence to this value for every input in a transaction disables * nLockTime. */ static const uint32_t SEQUENCE_FINAL = 0xffffffff; /* Below flags apply in the context of BIP 68*/ /** * If this flag set, CTxIn::nSequence is NOT interpreted as a relative * lock-time. */ static const uint32_t SEQUENCE_LOCKTIME_DISABLE_FLAG = (1 << 31); /** * If CTxIn::nSequence encodes a relative lock-time and this flag is set, * the relative lock-time has units of 512 seconds, otherwise it specifies * blocks with a granularity of 1. */ static const uint32_t SEQUENCE_LOCKTIME_TYPE_FLAG = (1 << 22); /** * If CTxIn::nSequence encodes a relative lock-time, this mask is applied to * extract that lock-time from the sequence field. */ static const uint32_t SEQUENCE_LOCKTIME_MASK = 0x0000ffff; /** * In order to use the same number of bits to encode roughly the same * wall-clock duration, and because blocks are naturally limited to occur * every 600s on average, the minimum granularity for time-based relative * lock-time is fixed at 512 seconds. Converting from CTxIn::nSequence to * seconds is performed by multiplying by 512 = 2^9, or equivalently * shifting up by 9 bits. */ static const int SEQUENCE_LOCKTIME_GRANULARITY = 9; CTxIn() { nSequence = SEQUENCE_FINAL; } explicit CTxIn(COutPoint prevoutIn, CScript scriptSigIn = CScript(), uint32_t nSequenceIn = SEQUENCE_FINAL) : prevout(prevoutIn), scriptSig(scriptSigIn), nSequence(nSequenceIn) {} CTxIn(TxId prevTxId, uint32_t nOut, CScript scriptSigIn = CScript(), uint32_t nSequenceIn = SEQUENCE_FINAL) : CTxIn(COutPoint(prevTxId, nOut), scriptSigIn, nSequenceIn) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(prevout); READWRITE(scriptSig); READWRITE(nSequence); } friend bool operator==(const CTxIn &a, const CTxIn &b) { return (a.prevout == b.prevout && a.scriptSig == b.scriptSig && a.nSequence == b.nSequence); } friend bool operator!=(const CTxIn &a, const CTxIn &b) { return !(a == b); } std::string ToString() const; }; /** * An output of a transaction. It contains the public key that the next input * must be able to sign with to claim it. */ class CTxOut { public: Amount nValue; CScript scriptPubKey; CTxOut() { SetNull(); } CTxOut(Amount nValueIn, CScript scriptPubKeyIn) : nValue(nValueIn), scriptPubKey(scriptPubKeyIn) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(nValue); READWRITE(scriptPubKey); } void SetNull() { nValue = -SATOSHI; scriptPubKey.clear(); } bool IsNull() const { return nValue == -SATOSHI; } Amount GetDustThreshold(const CFeeRate &minRelayTxFee) const { /** * "Dust" is defined in terms of CTransaction::minRelayTxFee, which has * units satoshis-per-kilobyte. If you'd pay more than 1/3 in fees to * spend something, then we consider it dust. A typical spendable * non-segwit txout is 34 bytes big, and will need a CTxIn of at least * 148 bytes to spend: so dust is a spendable txout less than * 546*minRelayTxFee/1000 (in satoshis). A typical spendable segwit * txout is 31 bytes big, and will need a CTxIn of at least 67 bytes to * spend: so dust is a spendable txout less than 294*minRelayTxFee/1000 * (in satoshis). */ if (scriptPubKey.IsUnspendable()) { return Amount::zero(); } size_t nSize = GetSerializeSize(*this, SER_DISK, 0); // the 148 mentioned above nSize += (32 + 4 + 1 + 107 + 4); return 3 * minRelayTxFee.GetFee(nSize); } bool IsDust(const CFeeRate &minRelayTxFee) const { return (nValue < GetDustThreshold(minRelayTxFee)); } friend bool operator==(const CTxOut &a, const CTxOut &b) { return (a.nValue == b.nValue && a.scriptPubKey == b.scriptPubKey); } friend bool operator!=(const CTxOut &a, const CTxOut &b) { return !(a == b); } std::string ToString() const; }; class CMutableTransaction; /** * Basic transaction serialization format: * - int32_t nVersion * - std::vector vin * - std::vector vout * - uint32_t nLockTime */ template inline void UnserializeTransaction(TxType &tx, Stream &s) { s >> tx.nVersion; tx.vin.clear(); tx.vout.clear(); /* Try to read the vin. In case the dummy is there, this will be read as an * empty vector. */ s >> tx.vin; /* We read a non-empty vin. Assume a normal vout follows. */ s >> tx.vout; s >> tx.nLockTime; } template inline void SerializeTransaction(const TxType &tx, Stream &s) { s << tx.nVersion; s << tx.vin; s << tx.vout; s << tx.nLockTime; } /** * The basic transaction that is broadcasted on the network and contained in * blocks. A transaction can contain multiple inputs and outputs. */ class CTransaction { public: // Default transaction version. static const int32_t CURRENT_VERSION = 2; // Changing the default transaction version requires a two step process: // first adapting relay policy by bumping MAX_STANDARD_VERSION, and then // later date bumping the default CURRENT_VERSION at which point both // CURRENT_VERSION and MAX_STANDARD_VERSION will be equal. static const int32_t MAX_STANDARD_VERSION = 2; // The local variables are made const to prevent unintended modification // without updating the cached hash value. However, CTransaction is not // actually immutable; deserialization and assignment are implemented, // and bypass the constness. This is safe, as they update the entire // structure, including the hash. const int32_t nVersion; const std::vector vin; const std::vector vout; const uint32_t nLockTime; private: /** Memory only. */ const uint256 hash; uint256 ComputeHash() const; public: /** Construct a CTransaction that qualifies as IsNull() */ CTransaction(); /** Convert a CMutableTransaction into a CTransaction. */ explicit CTransaction(const CMutableTransaction &tx); explicit CTransaction(CMutableTransaction &&tx); template inline void Serialize(Stream &s) const { SerializeTransaction(*this, s); } /** * This deserializing constructor is provided instead of an Unserialize * method. Unserialize is not possible, since it would require overwriting * const fields. */ template CTransaction(deserialize_type, Stream &s) : CTransaction(CMutableTransaction(deserialize, s)) {} bool IsNull() const { return vin.empty() && vout.empty(); } const TxId GetId() const { return TxId(hash); } const TxHash GetHash() const { return TxHash(hash); } // Return sum of txouts. Amount GetValueOut() const; // GetValueIn() is a method on CCoinsViewCache, because // inputs must be known to compute value in. // Compute priority, given priority of inputs and (optionally) tx size double ComputePriority(double dPriorityInputs, unsigned int nTxSize = 0) const; // Compute modified tx size for priority calculation (optionally given tx // size) unsigned int CalculateModifiedSize(unsigned int nTxSize = 0) const; + // Computes an adjusted tx size so that the UTXIs are billed partially + // upfront. + size_t GetBillableSize() const; + /** * Get the total transaction size in bytes. * @return Total transaction size in bytes */ unsigned int GetTotalSize() const; bool IsCoinBase() const { return (vin.size() == 1 && vin[0].prevout.IsNull()); } friend bool operator==(const CTransaction &a, const CTransaction &b) { return a.hash == b.hash; } friend bool operator!=(const CTransaction &a, const CTransaction &b) { return a.hash != b.hash; } std::string ToString() const; }; /** * A mutable version of CTransaction. */ class CMutableTransaction { public: int32_t nVersion; std::vector vin; std::vector vout; uint32_t nLockTime; CMutableTransaction(); CMutableTransaction(const CTransaction &tx); template inline void Serialize(Stream &s) const { SerializeTransaction(*this, s); } template inline void Unserialize(Stream &s) { UnserializeTransaction(*this, s); } template CMutableTransaction(deserialize_type, Stream &s) { Unserialize(s); } /** * Compute the id and hash of this CMutableTransaction. This is computed on * the fly, as opposed to GetId() and GetHash() in CTransaction, which uses * a cached result. */ TxId GetId() const; TxHash GetHash() const; friend bool operator==(const CMutableTransaction &a, const CMutableTransaction &b) { return a.GetId() == b.GetId(); } }; typedef std::shared_ptr CTransactionRef; static inline CTransactionRef MakeTransactionRef() { return std::make_shared(); } template static inline CTransactionRef MakeTransactionRef(Tx &&txIn) { return std::make_shared(std::forward(txIn)); } /** Precompute sighash midstate to avoid quadratic hashing */ struct PrecomputedTransactionData { uint256 hashPrevouts, hashSequence, hashOutputs; PrecomputedTransactionData() : hashPrevouts(), hashSequence(), hashOutputs() {} PrecomputedTransactionData(const PrecomputedTransactionData &txdata) : hashPrevouts(txdata.hashPrevouts), hashSequence(txdata.hashSequence), hashOutputs(txdata.hashOutputs) {} PrecomputedTransactionData(const CTransaction &tx); }; #endif // BITCOIN_PRIMITIVES_TRANSACTION_H diff --git a/src/test/mempool_tests.cpp b/src/test/mempool_tests.cpp index d7f9dee665..9deb4e1f0a 100644 --- a/src/test/mempool_tests.cpp +++ b/src/test/mempool_tests.cpp @@ -1,780 +1,796 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "policy/policy.h" #include "txmempool.h" #include "util.h" #include "test/test_bitcoin.h" #include #include #include #include BOOST_FIXTURE_TEST_SUITE(mempool_tests, TestingSetup) BOOST_AUTO_TEST_CASE(TestPackageAccounting) { CTxMemPool testPool; TestMemPoolEntryHelper entry; CMutableTransaction parentOfAll; std::vector outpoints; const size_t maxOutputs = 3; // Construct a parent for the rest of the chain parentOfAll.vin.resize(1); parentOfAll.vin[0].scriptSig = CScript(); // Give us a couple outpoints so we can spend them for (size_t i = 0; i < maxOutputs; i++) { parentOfAll.vout.emplace_back(10 * SATOSHI, CScript() << OP_TRUE); } TxId parentOfAllId = parentOfAll.GetId(); testPool.addUnchecked(parentOfAllId, entry.FromTx(parentOfAll)); // Add some outpoints to the tracking vector for (size_t i = 0; i < maxOutputs; i++) { outpoints.emplace_back(COutPoint(parentOfAllId, i)); } Amount totalFee = Amount::zero(); size_t totalSize = CTransaction(parentOfAll).GetTotalSize(); + size_t totalBillableSize = CTransaction(parentOfAll).GetBillableSize(); // Generate 100 transactions for (size_t totalTransactions = 0; totalTransactions < 100; totalTransactions++) { CMutableTransaction mtx; uint64_t minAncestors = std::numeric_limits::max(); uint64_t maxAncestors = 0; Amount minFees = MAX_MONEY; Amount maxFees = Amount::zero(); uint64_t minSize = std::numeric_limits::max(); uint64_t maxSize = 0; + uint64_t minBillableSize = std::numeric_limits::max(); + uint64_t maxBillableSize = 0; // Consume random inputs, but make sure we don't consume more than // available for (size_t input = std::min(InsecureRandRange(maxOutputs) + 1, uint64_t(outpoints.size())); input > 0; input--) { std::swap(outpoints[InsecureRandRange(outpoints.size())], outpoints.back()); mtx.vin.emplace_back(outpoints.back()); outpoints.pop_back(); // We don't know exactly how many ancestors this transaction has // due to possible duplicates. Calculate a valid range based on // parents. CTxMemPoolEntry parent = *testPool.mapTx.find(mtx.vin.back().prevout.GetTxId()); minAncestors = std::min(minAncestors, parent.GetCountWithAncestors()); maxAncestors += parent.GetCountWithAncestors(); minFees = std::min(minFees, parent.GetModFeesWithAncestors()); maxFees += parent.GetModFeesWithAncestors(); minSize = std::min(minSize, parent.GetSizeWithAncestors()); maxSize += parent.GetSizeWithAncestors(); + minBillableSize = std::min(minBillableSize, + parent.GetBillableSizeWithAncestors()); + maxBillableSize += parent.GetBillableSizeWithAncestors(); } // Produce random number of outputs for (size_t output = InsecureRandRange(maxOutputs) + 1; output > 0; output--) { mtx.vout.emplace_back(10 * SATOSHI, CScript() << OP_TRUE); } CTransaction tx(mtx); TxId curId = tx.GetId(); // Record the outputs for (size_t output = tx.vout.size(); output > 0; output--) { outpoints.emplace_back(COutPoint(curId, output)); } Amount randFee = int64_t(InsecureRandRange(300)) * SATOSHI; testPool.addUnchecked(curId, entry.Fee(randFee).FromTx(tx)); // Add this transaction to the totals. minAncestors += 1; maxAncestors += 1; minFees += randFee; maxFees += randFee; minSize += CTransaction(tx).GetTotalSize(); maxSize += CTransaction(tx).GetTotalSize(); + minBillableSize += CTransaction(tx).GetBillableSize(); + maxBillableSize += CTransaction(tx).GetBillableSize(); // Calculate overall values totalFee += randFee; totalSize += CTransaction(tx).GetTotalSize(); + totalBillableSize += CTransaction(tx).GetBillableSize(); CTxMemPoolEntry parentEntry = *testPool.mapTx.find(parentOfAllId); CTxMemPoolEntry latestEntry = *testPool.mapTx.find(curId); // Ensure values are within the expected ranges BOOST_CHECK(latestEntry.GetCountWithAncestors() >= minAncestors); BOOST_CHECK(latestEntry.GetCountWithAncestors() <= maxAncestors); BOOST_CHECK(latestEntry.GetSizeWithAncestors() >= minSize); BOOST_CHECK(latestEntry.GetSizeWithAncestors() <= maxSize); + BOOST_CHECK(latestEntry.GetBillableSizeWithAncestors() >= + minBillableSize); + BOOST_CHECK(latestEntry.GetBillableSizeWithAncestors() <= + maxBillableSize); + BOOST_CHECK(latestEntry.GetModFeesWithAncestors() >= minFees); BOOST_CHECK(latestEntry.GetModFeesWithAncestors() <= maxFees); BOOST_CHECK_EQUAL(parentEntry.GetCountWithDescendants(), testPool.mapTx.size()); BOOST_CHECK_EQUAL(parentEntry.GetSizeWithDescendants(), totalSize); + BOOST_CHECK_EQUAL(parentEntry.GetBillableSizeWithDescendants(), + totalBillableSize); BOOST_CHECK_EQUAL(parentEntry.GetModFeesWithDescendants(), totalFee); } } BOOST_AUTO_TEST_CASE(MempoolRemoveTest) { // Test CTxMemPool::remove functionality TestMemPoolEntryHelper entry; // Parent transaction with three children, and three grand-children: CMutableTransaction txParent; txParent.vin.resize(1); txParent.vin[0].scriptSig = CScript() << OP_11; txParent.vout.resize(3); for (int i = 0; i < 3; i++) { txParent.vout[i].scriptPubKey = CScript() << OP_11 << OP_EQUAL; txParent.vout[i].nValue = 33000 * SATOSHI; } CMutableTransaction txChild[3]; for (int i = 0; i < 3; i++) { txChild[i].vin.resize(1); txChild[i].vin[0].scriptSig = CScript() << OP_11; txChild[i].vin[0].prevout = COutPoint(txParent.GetId(), i); txChild[i].vout.resize(1); txChild[i].vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; txChild[i].vout[0].nValue = 11000 * SATOSHI; } CMutableTransaction txGrandChild[3]; for (int i = 0; i < 3; i++) { txGrandChild[i].vin.resize(1); txGrandChild[i].vin[0].scriptSig = CScript() << OP_11; txGrandChild[i].vin[0].prevout = COutPoint(txChild[i].GetId(), 0); txGrandChild[i].vout.resize(1); txGrandChild[i].vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; txGrandChild[i].vout[0].nValue = 11000 * SATOSHI; } CTxMemPool testPool; // Nothing in pool, remove should do nothing: unsigned int poolSize = testPool.size(); testPool.removeRecursive(CTransaction(txParent)); BOOST_CHECK_EQUAL(testPool.size(), poolSize); // Just the parent: testPool.addUnchecked(txParent.GetId(), entry.FromTx(txParent)); poolSize = testPool.size(); testPool.removeRecursive(CTransaction(txParent)); BOOST_CHECK_EQUAL(testPool.size(), poolSize - 1); // Parent, children, grandchildren: testPool.addUnchecked(txParent.GetId(), entry.FromTx(txParent)); for (int i = 0; i < 3; i++) { testPool.addUnchecked(txChild[i].GetId(), entry.FromTx(txChild[i])); testPool.addUnchecked(txGrandChild[i].GetId(), entry.FromTx(txGrandChild[i])); } // Remove Child[0], GrandChild[0] should be removed: poolSize = testPool.size(); testPool.removeRecursive(CTransaction(txChild[0])); BOOST_CHECK_EQUAL(testPool.size(), poolSize - 2); // ... make sure grandchild and child are gone: poolSize = testPool.size(); testPool.removeRecursive(CTransaction(txGrandChild[0])); BOOST_CHECK_EQUAL(testPool.size(), poolSize); poolSize = testPool.size(); testPool.removeRecursive(CTransaction(txChild[0])); BOOST_CHECK_EQUAL(testPool.size(), poolSize); // Remove parent, all children/grandchildren should go: poolSize = testPool.size(); testPool.removeRecursive(CTransaction(txParent)); BOOST_CHECK_EQUAL(testPool.size(), poolSize - 5); BOOST_CHECK_EQUAL(testPool.size(), 0UL); // Add children and grandchildren, but NOT the parent (simulate the parent // being in a block) for (int i = 0; i < 3; i++) { testPool.addUnchecked(txChild[i].GetId(), entry.FromTx(txChild[i])); testPool.addUnchecked(txGrandChild[i].GetId(), entry.FromTx(txGrandChild[i])); } // Now remove the parent, as might happen if a block-re-org occurs but the // parent cannot be put into the mempool (maybe because it is non-standard): poolSize = testPool.size(); testPool.removeRecursive(CTransaction(txParent)); BOOST_CHECK_EQUAL(testPool.size(), poolSize - 6); BOOST_CHECK_EQUAL(testPool.size(), 0UL); } BOOST_AUTO_TEST_CASE(MempoolClearTest) { // Test CTxMemPool::clear functionality TestMemPoolEntryHelper entry; // Create a transaction CMutableTransaction txParent; txParent.vin.resize(1); txParent.vin[0].scriptSig = CScript() << OP_11; txParent.vout.resize(3); for (int i = 0; i < 3; i++) { txParent.vout[i].scriptPubKey = CScript() << OP_11 << OP_EQUAL; txParent.vout[i].nValue = 33000 * SATOSHI; } CTxMemPool testPool; // Nothing in pool, clear should do nothing: testPool.clear(); BOOST_CHECK_EQUAL(testPool.size(), 0UL); // Add the transaction testPool.addUnchecked(txParent.GetId(), entry.FromTx(txParent)); BOOST_CHECK_EQUAL(testPool.size(), 1UL); BOOST_CHECK_EQUAL(testPool.mapTx.size(), 1UL); BOOST_CHECK_EQUAL(testPool.mapNextTx.size(), 1UL); BOOST_CHECK_EQUAL(testPool.vTxHashes.size(), 1UL); // CTxMemPool's members should be empty after a clear testPool.clear(); BOOST_CHECK_EQUAL(testPool.size(), 0UL); BOOST_CHECK_EQUAL(testPool.mapTx.size(), 0UL); BOOST_CHECK_EQUAL(testPool.mapNextTx.size(), 0UL); BOOST_CHECK_EQUAL(testPool.vTxHashes.size(), 0UL); } template void CheckSort(CTxMemPool &pool, std::vector &sortedOrder, std::string &&testcase) { BOOST_CHECK_EQUAL(pool.size(), sortedOrder.size()); typename CTxMemPool::indexed_transaction_set::index::type::iterator it = pool.mapTx.get().begin(); int count = 0; for (; it != pool.mapTx.get().end(); ++it, ++count) { BOOST_CHECK_MESSAGE(it->GetTx().GetId().ToString() == sortedOrder[count], it->GetTx().GetId().ToString() << " != " << sortedOrder[count] << " in test " << testcase << ":" << count); } } BOOST_AUTO_TEST_CASE(MempoolIndexingTest) { CTxMemPool pool; TestMemPoolEntryHelper entry; /* 3rd highest fee */ CMutableTransaction tx1 = CMutableTransaction(); tx1.vout.resize(1); tx1.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx1.vout[0].nValue = 10 * COIN; pool.addUnchecked(tx1.GetId(), entry.Fee(10000 * SATOSHI).Priority(10.0).FromTx(tx1)); /* highest fee */ CMutableTransaction tx2 = CMutableTransaction(); tx2.vout.resize(1); tx2.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx2.vout[0].nValue = 2 * COIN; pool.addUnchecked(tx2.GetId(), entry.Fee(20000 * SATOSHI).Priority(9.0).FromTx(tx2)); /* lowest fee */ CMutableTransaction tx3 = CMutableTransaction(); tx3.vout.resize(1); tx3.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx3.vout[0].nValue = 5 * COIN; pool.addUnchecked(tx3.GetId(), entry.Fee(Amount::zero()).Priority(100.0).FromTx(tx3)); /* 2nd highest fee */ CMutableTransaction tx4 = CMutableTransaction(); tx4.vout.resize(1); tx4.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx4.vout[0].nValue = 6 * COIN; pool.addUnchecked(tx4.GetId(), entry.Fee(15000 * SATOSHI).Priority(1.0).FromTx(tx4)); /* equal fee rate to tx1, but newer */ CMutableTransaction tx5 = CMutableTransaction(); tx5.vout.resize(1); tx5.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx5.vout[0].nValue = 11 * COIN; entry.nTime = 1; entry.dPriority = 10.0; pool.addUnchecked(tx5.GetId(), entry.Fee(10000 * SATOSHI).FromTx(tx5)); BOOST_CHECK_EQUAL(pool.size(), 5UL); std::vector sortedOrder; sortedOrder.resize(5); sortedOrder[0] = tx3.GetId().ToString(); // 0 sortedOrder[1] = tx5.GetId().ToString(); // 10000 sortedOrder[2] = tx1.GetId().ToString(); // 10000 sortedOrder[3] = tx4.GetId().ToString(); // 15000 sortedOrder[4] = tx2.GetId().ToString(); // 20000 CheckSort(pool, sortedOrder, "MempoolIndexingTest1"); /* low fee but with high fee child */ /* tx6 -> tx7 -> tx8, tx9 -> tx10 */ CMutableTransaction tx6 = CMutableTransaction(); tx6.vout.resize(1); tx6.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx6.vout[0].nValue = 20 * COIN; pool.addUnchecked(tx6.GetId(), entry.Fee(Amount::zero()).FromTx(tx6)); BOOST_CHECK_EQUAL(pool.size(), 6UL); // Check that at this point, tx6 is sorted low sortedOrder.insert(sortedOrder.begin(), tx6.GetId().ToString()); CheckSort(pool, sortedOrder, "MempoolIndexingTest2"); CTxMemPool::setEntries setAncestors; setAncestors.insert(pool.mapTx.find(tx6.GetId())); CMutableTransaction tx7 = CMutableTransaction(); tx7.vin.resize(1); tx7.vin[0].prevout = COutPoint(tx6.GetId(), 0); tx7.vin[0].scriptSig = CScript() << OP_11; tx7.vout.resize(2); tx7.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx7.vout[0].nValue = 10 * COIN; tx7.vout[1].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx7.vout[1].nValue = 1 * COIN; CTxMemPool::setEntries setAncestorsCalculated; std::string dummy; BOOST_CHECK_EQUAL( pool.CalculateMemPoolAncestors(entry.Fee(2000000 * SATOSHI).FromTx(tx7), setAncestorsCalculated, 100, 1000000, 1000, 1000000, dummy), true); BOOST_CHECK(setAncestorsCalculated == setAncestors); pool.addUnchecked(tx7.GetId(), entry.FromTx(tx7), setAncestors); BOOST_CHECK_EQUAL(pool.size(), 7UL); // Now tx6 should be sorted higher (high fee child): tx7, tx6, tx2, ... sortedOrder.erase(sortedOrder.begin()); sortedOrder.push_back(tx6.GetId().ToString()); sortedOrder.push_back(tx7.GetId().ToString()); CheckSort(pool, sortedOrder, "MempoolIndexingTest3"); /* low fee child of tx7 */ CMutableTransaction tx8 = CMutableTransaction(); tx8.vin.resize(1); tx8.vin[0].prevout = COutPoint(tx7.GetId(), 0); tx8.vin[0].scriptSig = CScript() << OP_11; tx8.vout.resize(1); tx8.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx8.vout[0].nValue = 10 * COIN; setAncestors.insert(pool.mapTx.find(tx7.GetId())); pool.addUnchecked(tx8.GetId(), entry.Fee(Amount::zero()).Time(2).FromTx(tx8), setAncestors); // Now tx8 should be sorted low, but tx6/tx both high sortedOrder.insert(sortedOrder.begin(), tx8.GetId().ToString()); CheckSort(pool, sortedOrder, "MempoolIndexingTest4"); /* low fee child of tx7 */ CMutableTransaction tx9 = CMutableTransaction(); tx9.vin.resize(1); tx9.vin[0].prevout = COutPoint(tx7.GetId(), 1); tx9.vin[0].scriptSig = CScript() << OP_11; tx9.vout.resize(1); tx9.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx9.vout[0].nValue = 1 * COIN; pool.addUnchecked(tx9.GetId(), entry.Fee(Amount::zero()).Time(3).FromTx(tx9), setAncestors); // tx9 should be sorted low BOOST_CHECK_EQUAL(pool.size(), 9UL); sortedOrder.insert(sortedOrder.begin(), tx9.GetId().ToString()); CheckSort(pool, sortedOrder, "MempoolIndexingTest5"); std::vector snapshotOrder = sortedOrder; setAncestors.insert(pool.mapTx.find(tx8.GetId())); setAncestors.insert(pool.mapTx.find(tx9.GetId())); /* tx10 depends on tx8 and tx9 and has a high fee*/ CMutableTransaction tx10 = CMutableTransaction(); tx10.vin.resize(2); tx10.vin[0].prevout = COutPoint(tx8.GetId(), 0); tx10.vin[0].scriptSig = CScript() << OP_11; tx10.vin[1].prevout = COutPoint(tx9.GetId(), 0); tx10.vin[1].scriptSig = CScript() << OP_11; tx10.vout.resize(1); tx10.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx10.vout[0].nValue = 10 * COIN; setAncestorsCalculated.clear(); BOOST_CHECK_EQUAL(pool.CalculateMemPoolAncestors( entry.Fee(200000 * SATOSHI).Time(4).FromTx(tx10), setAncestorsCalculated, 100, 1000000, 1000, 1000000, dummy), true); BOOST_CHECK(setAncestorsCalculated == setAncestors); pool.addUnchecked(tx10.GetId(), entry.FromTx(tx10), setAncestors); /** * tx8 and tx9 should both now be sorted higher * Final order after tx10 is added: * * tx3 = 0 (1) * tx5 = 10000 (1) * tx1 = 10000 (1) * tx4 = 15000 (1) * tx2 = 20000 (1) * tx9 = 200k (2 txs) * tx8 = 200k (2 txs) * tx10 = 200k (1 tx) * tx6 = 2.2M (5 txs) * tx7 = 2.2M (4 txs) */ // take out tx9, tx8 from the beginning sortedOrder.erase(sortedOrder.begin(), sortedOrder.begin() + 2); sortedOrder.insert(sortedOrder.begin() + 5, tx9.GetId().ToString()); sortedOrder.insert(sortedOrder.begin() + 6, tx8.GetId().ToString()); // tx10 is just before tx6 sortedOrder.insert(sortedOrder.begin() + 7, tx10.GetId().ToString()); CheckSort(pool, sortedOrder, "MempoolIndexingTest6"); // there should be 10 transactions in the mempool BOOST_CHECK_EQUAL(pool.size(), 10UL); // Now try removing tx10 and verify the sort order returns to normal pool.removeRecursive(pool.mapTx.find(tx10.GetId())->GetTx()); CheckSort(pool, snapshotOrder, "MempoolIndexingTest7"); pool.removeRecursive(pool.mapTx.find(tx9.GetId())->GetTx()); pool.removeRecursive(pool.mapTx.find(tx8.GetId())->GetTx()); /* Now check the sort on the mining score index. * Final order should be: * * tx7 (2M) * tx2 (20k) * tx4 (15000) * tx1/tx5 (10000) * tx3/6 (0) * (Ties resolved by hash) */ sortedOrder.clear(); sortedOrder.push_back(tx7.GetId().ToString()); sortedOrder.push_back(tx2.GetId().ToString()); sortedOrder.push_back(tx4.GetId().ToString()); if (tx1.GetId() < tx5.GetId()) { sortedOrder.push_back(tx5.GetId().ToString()); sortedOrder.push_back(tx1.GetId().ToString()); } else { sortedOrder.push_back(tx1.GetId().ToString()); sortedOrder.push_back(tx5.GetId().ToString()); } if (tx3.GetId() < tx6.GetId()) { sortedOrder.push_back(tx6.GetId().ToString()); sortedOrder.push_back(tx3.GetId().ToString()); } else { sortedOrder.push_back(tx3.GetId().ToString()); sortedOrder.push_back(tx6.GetId().ToString()); } CheckSort(pool, sortedOrder, "MempoolIndexingTest8"); } BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest) { CTxMemPool pool; TestMemPoolEntryHelper entry; /* 3rd highest fee */ CMutableTransaction tx1 = CMutableTransaction(); tx1.vout.resize(1); tx1.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx1.vout[0].nValue = 10 * COIN; pool.addUnchecked(tx1.GetId(), entry.Fee(10000 * SATOSHI).Priority(10.0).FromTx(tx1)); /* highest fee */ CMutableTransaction tx2 = CMutableTransaction(); tx2.vout.resize(1); tx2.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx2.vout[0].nValue = 2 * COIN; pool.addUnchecked(tx2.GetId(), entry.Fee(20000 * SATOSHI).Priority(9.0).FromTx(tx2)); uint64_t tx2Size = CTransaction(tx2).GetTotalSize(); /* lowest fee */ CMutableTransaction tx3 = CMutableTransaction(); tx3.vout.resize(1); tx3.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx3.vout[0].nValue = 5 * COIN; pool.addUnchecked(tx3.GetId(), entry.Fee(Amount::zero()).Priority(100.0).FromTx(tx3)); /* 2nd highest fee */ CMutableTransaction tx4 = CMutableTransaction(); tx4.vout.resize(1); tx4.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx4.vout[0].nValue = 6 * COIN; pool.addUnchecked(tx4.GetId(), entry.Fee(15000 * SATOSHI).Priority(1.0).FromTx(tx4)); /* equal fee rate to tx1, but newer */ CMutableTransaction tx5 = CMutableTransaction(); tx5.vout.resize(1); tx5.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx5.vout[0].nValue = 11 * COIN; pool.addUnchecked(tx5.GetId(), entry.Fee(10000 * SATOSHI).FromTx(tx5)); BOOST_CHECK_EQUAL(pool.size(), 5UL); std::vector sortedOrder; sortedOrder.resize(5); sortedOrder[0] = tx2.GetId().ToString(); // 20000 sortedOrder[1] = tx4.GetId().ToString(); // 15000 // tx1 and tx5 are both 10000 // Ties are broken by hash, not timestamp, so determine which hash comes // first. if (tx1.GetId() < tx5.GetId()) { sortedOrder[2] = tx1.GetId().ToString(); sortedOrder[3] = tx5.GetId().ToString(); } else { sortedOrder[2] = tx5.GetId().ToString(); sortedOrder[3] = tx1.GetId().ToString(); } sortedOrder[4] = tx3.GetId().ToString(); // 0 CheckSort(pool, sortedOrder, "MempoolAncestorIndexingTest1"); /* low fee parent with high fee child */ /* tx6 (0) -> tx7 (high) */ CMutableTransaction tx6 = CMutableTransaction(); tx6.vout.resize(1); tx6.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx6.vout[0].nValue = 20 * COIN; uint64_t tx6Size = CTransaction(tx6).GetTotalSize(); pool.addUnchecked(tx6.GetId(), entry.Fee(Amount::zero()).FromTx(tx6)); BOOST_CHECK_EQUAL(pool.size(), 6UL); // Ties are broken by hash if (tx3.GetId() < tx6.GetId()) { sortedOrder.push_back(tx6.GetId().ToString()); } else { sortedOrder.insert(sortedOrder.end() - 1, tx6.GetId().ToString()); } CheckSort(pool, sortedOrder, "MempoolAncestorIndexingTest2"); CMutableTransaction tx7 = CMutableTransaction(); tx7.vin.resize(1); tx7.vin[0].prevout = COutPoint(tx6.GetId(), 0); tx7.vin[0].scriptSig = CScript() << OP_11; tx7.vout.resize(1); tx7.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx7.vout[0].nValue = 10 * COIN; uint64_t tx7Size = CTransaction(tx7).GetTotalSize(); /* set the fee to just below tx2's feerate when including ancestor */ Amount fee = int64_t((20000 / tx2Size) * (tx7Size + tx6Size) - 1) * SATOSHI; // CTxMemPoolEntry entry7(tx7, fee, 2, 10.0, 1, true); pool.addUnchecked(tx7.GetId(), entry.Fee(Amount(fee)).FromTx(tx7)); BOOST_CHECK_EQUAL(pool.size(), 7UL); sortedOrder.insert(sortedOrder.begin() + 1, tx7.GetId().ToString()); CheckSort(pool, sortedOrder, "MempoolAncestorIndexingTest3"); /* after tx6 is mined, tx7 should move up in the sort */ std::vector vtx; vtx.push_back(MakeTransactionRef(tx6)); pool.removeForBlock(vtx, 1); sortedOrder.erase(sortedOrder.begin() + 1); // Ties are broken by hash if (tx3.GetId() < tx6.GetId()) { sortedOrder.pop_back(); } else { sortedOrder.erase(sortedOrder.end() - 2); } sortedOrder.insert(sortedOrder.begin(), tx7.GetId().ToString()); CheckSort(pool, sortedOrder, "MempoolAncestorIndexingTest4"); } BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest) { CTxMemPool pool; TestMemPoolEntryHelper entry; entry.dPriority = 10.0; Amount feeIncrement = MEMPOOL_FULL_FEE_INCREMENT.GetFeePerK(); CMutableTransaction tx1 = CMutableTransaction(); tx1.vin.resize(1); tx1.vin[0].scriptSig = CScript() << OP_1; tx1.vout.resize(1); tx1.vout[0].scriptPubKey = CScript() << OP_1 << OP_EQUAL; tx1.vout[0].nValue = 10 * COIN; pool.addUnchecked(tx1.GetId(), entry.Fee(10000 * SATOSHI).FromTx(tx1, &pool)); CMutableTransaction tx2 = CMutableTransaction(); tx2.vin.resize(1); tx2.vin[0].scriptSig = CScript() << OP_2; tx2.vout.resize(1); tx2.vout[0].scriptPubKey = CScript() << OP_2 << OP_EQUAL; tx2.vout[0].nValue = 10 * COIN; pool.addUnchecked(tx2.GetId(), entry.Fee(5000 * SATOSHI).FromTx(tx2, &pool)); // should do nothing pool.TrimToSize(pool.DynamicMemoryUsage()); BOOST_CHECK(pool.exists(tx1.GetId())); BOOST_CHECK(pool.exists(tx2.GetId())); // should remove the lower-feerate transaction pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4); BOOST_CHECK(pool.exists(tx1.GetId())); BOOST_CHECK(!pool.exists(tx2.GetId())); pool.addUnchecked(tx2.GetId(), entry.FromTx(tx2, &pool)); CMutableTransaction tx3 = CMutableTransaction(); tx3.vin.resize(1); tx3.vin[0].prevout = COutPoint(tx2.GetId(), 0); tx3.vin[0].scriptSig = CScript() << OP_2; tx3.vout.resize(1); tx3.vout[0].scriptPubKey = CScript() << OP_3 << OP_EQUAL; tx3.vout[0].nValue = 10 * COIN; pool.addUnchecked(tx3.GetId(), entry.Fee(20000 * SATOSHI).FromTx(tx3, &pool)); // tx3 should pay for tx2 (CPFP) pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4); BOOST_CHECK(!pool.exists(tx1.GetId())); BOOST_CHECK(pool.exists(tx2.GetId())); BOOST_CHECK(pool.exists(tx3.GetId())); // mempool is limited to tx1's size in memory usage, so nothing fits pool.TrimToSize(CTransaction(tx1).GetTotalSize()); BOOST_CHECK(!pool.exists(tx1.GetId())); BOOST_CHECK(!pool.exists(tx2.GetId())); BOOST_CHECK(!pool.exists(tx3.GetId())); CFeeRate maxFeeRateRemoved(25000 * SATOSHI, CTransaction(tx3).GetTotalSize() + CTransaction(tx2).GetTotalSize()); BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), maxFeeRateRemoved.GetFeePerK() + feeIncrement); CMutableTransaction tx4 = CMutableTransaction(); tx4.vin.resize(2); tx4.vin[0].prevout = COutPoint(); tx4.vin[0].scriptSig = CScript() << OP_4; tx4.vin[1].prevout = COutPoint(); tx4.vin[1].scriptSig = CScript() << OP_4; tx4.vout.resize(2); tx4.vout[0].scriptPubKey = CScript() << OP_4 << OP_EQUAL; tx4.vout[0].nValue = 10 * COIN; tx4.vout[1].scriptPubKey = CScript() << OP_4 << OP_EQUAL; tx4.vout[1].nValue = 10 * COIN; CMutableTransaction tx5 = CMutableTransaction(); tx5.vin.resize(2); tx5.vin[0].prevout = COutPoint(tx4.GetId(), 0); tx5.vin[0].scriptSig = CScript() << OP_4; tx5.vin[1].prevout = COutPoint(); tx5.vin[1].scriptSig = CScript() << OP_5; tx5.vout.resize(2); tx5.vout[0].scriptPubKey = CScript() << OP_5 << OP_EQUAL; tx5.vout[0].nValue = 10 * COIN; tx5.vout[1].scriptPubKey = CScript() << OP_5 << OP_EQUAL; tx5.vout[1].nValue = 10 * COIN; CMutableTransaction tx6 = CMutableTransaction(); tx6.vin.resize(2); tx6.vin[0].prevout = COutPoint(tx4.GetId(), 1); tx6.vin[0].scriptSig = CScript() << OP_4; tx6.vin[1].prevout = COutPoint(); tx6.vin[1].scriptSig = CScript() << OP_6; tx6.vout.resize(2); tx6.vout[0].scriptPubKey = CScript() << OP_6 << OP_EQUAL; tx6.vout[0].nValue = 10 * COIN; tx6.vout[1].scriptPubKey = CScript() << OP_6 << OP_EQUAL; tx6.vout[1].nValue = 10 * COIN; CMutableTransaction tx7 = CMutableTransaction(); tx7.vin.resize(2); tx7.vin[0].prevout = COutPoint(tx5.GetId(), 0); tx7.vin[0].scriptSig = CScript() << OP_5; tx7.vin[1].prevout = COutPoint(tx6.GetId(), 0); tx7.vin[1].scriptSig = CScript() << OP_6; tx7.vout.resize(2); tx7.vout[0].scriptPubKey = CScript() << OP_7 << OP_EQUAL; tx7.vout[0].nValue = 10 * COIN; tx7.vout[1].scriptPubKey = CScript() << OP_7 << OP_EQUAL; tx7.vout[1].nValue = 10 * COIN; pool.addUnchecked(tx4.GetId(), entry.Fee(7000 * SATOSHI).FromTx(tx4, &pool)); pool.addUnchecked(tx5.GetId(), entry.Fee(1000 * SATOSHI).FromTx(tx5, &pool)); pool.addUnchecked(tx6.GetId(), entry.Fee(1100 * SATOSHI).FromTx(tx6, &pool)); pool.addUnchecked(tx7.GetId(), entry.Fee(9000 * SATOSHI).FromTx(tx7, &pool)); // we only require this remove, at max, 2 txn, because its not clear what // we're really optimizing for aside from that pool.TrimToSize(pool.DynamicMemoryUsage() - 1); BOOST_CHECK(pool.exists(tx4.GetId())); BOOST_CHECK(pool.exists(tx6.GetId())); BOOST_CHECK(!pool.exists(tx7.GetId())); if (!pool.exists(tx5.GetId())) pool.addUnchecked(tx5.GetId(), entry.Fee(1000 * SATOSHI).FromTx(tx5, &pool)); pool.addUnchecked(tx7.GetId(), entry.Fee(9000 * SATOSHI).FromTx(tx7, &pool)); // should maximize mempool size by only removing 5/7 pool.TrimToSize(pool.DynamicMemoryUsage() / 2); BOOST_CHECK(pool.exists(tx4.GetId())); BOOST_CHECK(!pool.exists(tx5.GetId())); BOOST_CHECK(pool.exists(tx6.GetId())); BOOST_CHECK(!pool.exists(tx7.GetId())); pool.addUnchecked(tx5.GetId(), entry.Fee(1000 * SATOSHI).FromTx(tx5, &pool)); pool.addUnchecked(tx7.GetId(), entry.Fee(9000 * SATOSHI).FromTx(tx7, &pool)); std::vector vtx; SetMockTime(42); SetMockTime(42 + CTxMemPool::ROLLING_FEE_HALFLIFE); BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), maxFeeRateRemoved.GetFeePerK() + feeIncrement); // ... we should keep the same min fee until we get a block pool.removeForBlock(vtx, 1); SetMockTime(42 + 2 * CTxMemPool::ROLLING_FEE_HALFLIFE); BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), (maxFeeRateRemoved.GetFeePerK() + feeIncrement) / 2); // ... then feerate should drop 1/2 each halflife SetMockTime(42 + 2 * CTxMemPool::ROLLING_FEE_HALFLIFE + CTxMemPool::ROLLING_FEE_HALFLIFE / 2); BOOST_CHECK_EQUAL( pool.GetMinFee(pool.DynamicMemoryUsage() * 5 / 2).GetFeePerK(), (maxFeeRateRemoved.GetFeePerK() + feeIncrement) / 4); // ... with a 1/2 halflife when mempool is < 1/2 its target size SetMockTime(42 + 2 * CTxMemPool::ROLLING_FEE_HALFLIFE + CTxMemPool::ROLLING_FEE_HALFLIFE / 2 + CTxMemPool::ROLLING_FEE_HALFLIFE / 4); BOOST_CHECK_EQUAL( pool.GetMinFee(pool.DynamicMemoryUsage() * 9 / 2).GetFeePerK(), (maxFeeRateRemoved.GetFeePerK() + feeIncrement) / 8 + SATOSHI); // ... with a 1/4 halflife when mempool is < 1/4 its target size SetMockTime(0); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp index 06cf63316a..71375df5c4 100644 --- a/src/test/miner_tests.cpp +++ b/src/test/miner_tests.cpp @@ -1,758 +1,758 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Copyright (c) 2017-2018 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "miner.h" #include "chainparams.h" #include "coins.h" #include "config.h" #include "consensus/consensus.h" #include "consensus/merkle.h" #include "consensus/tx_verify.h" #include "consensus/validation.h" #include "policy/policy.h" #include "pubkey.h" #include "script/standard.h" #include "txmempool.h" #include "uint256.h" #include "util.h" #include "utilstrencodings.h" #include "validation.h" #include "test/test_bitcoin.h" #include #include BOOST_FIXTURE_TEST_SUITE(miner_tests, TestingSetup) static CFeeRate blockMinFeeRate = CFeeRate(DEFAULT_BLOCK_MIN_TX_FEE_PER_KB); static struct { uint8_t extranonce; uint32_t nonce; } blockinfo[] = { {4, 0xa4a3e223}, {2, 0x15c32f9e}, {1, 0x0375b547}, {1, 0x7004a8a5}, {2, 0xce440296}, {2, 0x52cfe198}, {1, 0x77a72cd0}, {2, 0xbb5d6f84}, {2, 0x83f30c2c}, {1, 0x48a73d5b}, {1, 0xef7dcd01}, {2, 0x6809c6c4}, {2, 0x0883ab3c}, {1, 0x087bbbe2}, {2, 0x2104a814}, {2, 0xdffb6daa}, {1, 0xee8a0a08}, {2, 0xba4237c1}, {1, 0xa70349dc}, {1, 0x344722bb}, {3, 0xd6294733}, {2, 0xec9f5c94}, {2, 0xca2fbc28}, {1, 0x6ba4f406}, {2, 0x015d4532}, {1, 0x6e119b7c}, {2, 0x43e8f314}, {2, 0x27962f38}, {2, 0xb571b51b}, {2, 0xb36bee23}, {2, 0xd17924a8}, {2, 0x6bc212d9}, {1, 0x630d4948}, {2, 0x9a4c4ebb}, {2, 0x554be537}, {1, 0xd63ddfc7}, {2, 0xa10acc11}, {1, 0x759a8363}, {2, 0xfb73090d}, {1, 0xe82c6a34}, {1, 0xe33e92d7}, {3, 0x658ef5cb}, {2, 0xba32ff22}, {5, 0x0227a10c}, {1, 0xa9a70155}, {5, 0xd096d809}, {1, 0x37176174}, {1, 0x830b8d0f}, {1, 0xc6e3910e}, {2, 0x823f3ca8}, {1, 0x99850849}, {1, 0x7521fb81}, {1, 0xaacaabab}, {1, 0xd645a2eb}, {5, 0x7aea1781}, {5, 0x9d6e4b78}, {1, 0x4ce90fd8}, {1, 0xabdc832d}, {6, 0x4a34f32a}, {2, 0xf2524c1c}, {2, 0x1bbeb08a}, {1, 0xad47f480}, {1, 0x9f026aeb}, {1, 0x15a95049}, {2, 0xd1cb95b2}, {2, 0xf84bbda5}, {1, 0x0fa62cd1}, {1, 0xe05f9169}, {1, 0x78d194a9}, {5, 0x3e38147b}, {5, 0x737ba0d4}, {1, 0x63378e10}, {1, 0x6d5f91cf}, {2, 0x88612eb8}, {2, 0xe9639484}, {1, 0xb7fabc9d}, {2, 0x19b01592}, {1, 0x5a90dd31}, {2, 0x5bd7e028}, {2, 0x94d00323}, {1, 0xa9b9c01a}, {1, 0x3a40de61}, {1, 0x56e7eec7}, {5, 0x859f7ef6}, {1, 0xfd8e5630}, {1, 0x2b0c9f7f}, {1, 0xba700e26}, {1, 0x7170a408}, {1, 0x70de86a8}, {1, 0x74d64cd5}, {1, 0x49e738a1}, {2, 0x6910b602}, {0, 0x643c565f}, {1, 0x54264b3f}, {2, 0x97ea6396}, {2, 0x55174459}, {2, 0x03e8779a}, {1, 0x98f34d8f}, {1, 0xc07b2b07}, {1, 0xdfe29668}, {1, 0x3141c7c1}, {1, 0xb3b595f4}, {1, 0x735abf08}, {5, 0x623bfbce}, {2, 0xd351e722}, {1, 0xf4ca48c9}, {1, 0x5b19c670}, {1, 0xa164bf0e}, {2, 0xbbbeb305}, {2, 0xfe1c810a}, }; CBlockIndex CreateBlockIndex(int nHeight) { CBlockIndex index; index.nHeight = nHeight; index.pprev = chainActive.Tip(); return index; } bool TestSequenceLocks(const CTransaction &tx, int flags) { LOCK(mempool.cs); return CheckSequenceLocks(tx, flags); } // Test suite for ancestor feerate transaction selection. // Implemented as an additional function, rather than a separate test case, to // allow reusing the blockchain created in CreateNewBlock_validity. // Note that this test assumes blockprioritypercentage is 0. void TestPackageSelection(Config &config, CScript scriptPubKey, std::vector &txFirst) { // Test the ancestor feerate transaction selection. TestMemPoolEntryHelper entry; // these 3 tests assume blockprioritypercentage is 0. config.SetBlockPriorityPercentage(0); // Test that a medium fee transaction will be selected after a higher fee // rate package with a low fee rate parent. CMutableTransaction tx; tx.vin.resize(1); tx.vin[0].scriptSig = CScript() << OP_1; tx.vin[0].prevout = COutPoint(txFirst[0]->GetId(), 0); tx.vout.resize(1); tx.vout[0].nValue = int64_t(5000000000LL - 1000) * SATOSHI; // This tx has a low fee: 1000 satoshis. // Save this txid for later use. TxId parentTxId = tx.GetId(); mempool.addUnchecked(parentTxId, entry.Fee(1000 * SATOSHI) .Time(GetTime()) .SpendsCoinbase(true) .FromTx(tx)); // This tx has a medium fee: 10000 satoshis. tx.vin[0].prevout = COutPoint(txFirst[1]->GetId(), 0); tx.vout[0].nValue = int64_t(5000000000LL - 10000) * SATOSHI; TxId mediumFeeTxId = tx.GetId(); mempool.addUnchecked(mediumFeeTxId, entry.Fee(10000 * SATOSHI) .Time(GetTime()) .SpendsCoinbase(true) .FromTx(tx)); // This tx has a high fee, but depends on the first transaction. tx.vin[0].prevout = COutPoint(parentTxId, 0); // 50k satoshi fee. tx.vout[0].nValue = int64_t(5000000000LL - 1000 - 50000) * SATOSHI; TxId highFeeTxId = tx.GetId(); mempool.addUnchecked(highFeeTxId, entry.Fee(50000 * SATOSHI) .Time(GetTime()) .SpendsCoinbase(false) .FromTx(tx)); std::unique_ptr pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey); BOOST_CHECK(pblocktemplate->block.vtx[1]->GetId() == parentTxId); BOOST_CHECK(pblocktemplate->block.vtx[2]->GetId() == highFeeTxId); BOOST_CHECK(pblocktemplate->block.vtx[3]->GetId() == mediumFeeTxId); // Test that a package below the block min tx fee doesn't get included tx.vin[0].prevout = COutPoint(highFeeTxId, 0); // 0 fee. tx.vout[0].nValue = int64_t(5000000000LL - 1000 - 50000) * SATOSHI; TxId freeTxId = tx.GetId(); mempool.addUnchecked(freeTxId, entry.Fee(Amount::zero()).FromTx(tx)); - size_t freeTxSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION); + size_t freeTxSize = CTransaction(tx).GetBillableSize(); // Calculate a fee on child transaction that will put the package just // below the block min tx fee (assuming 1 child tx of the same size). Amount feeToUse = blockMinFeeRate.GetFee(2 * freeTxSize) - SATOSHI; tx.vin[0].prevout = COutPoint(freeTxId, 0); tx.vout[0].nValue = int64_t(5000000000LL - 1000 - 50000) * SATOSHI - feeToUse; TxId lowFeeTxId = tx.GetId(); mempool.addUnchecked(lowFeeTxId, entry.Fee(feeToUse).FromTx(tx)); pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey); // Verify that the free tx and the low fee tx didn't get selected. for (const auto &txn : pblocktemplate->block.vtx) { BOOST_CHECK(txn->GetId() != freeTxId); BOOST_CHECK(txn->GetId() != lowFeeTxId); } // Test that packages above the min relay fee do get included, even if one // of the transactions is below the min relay fee. Remove the low fee // transaction and replace with a higher fee transaction mempool.removeRecursive(CTransaction(tx)); // Now we should be just over the min relay fee. tx.vout[0].nValue -= 2 * SATOSHI; lowFeeTxId = tx.GetId(); mempool.addUnchecked(lowFeeTxId, entry.Fee(feeToUse + 2 * SATOSHI).FromTx(tx)); pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey); BOOST_CHECK(pblocktemplate->block.vtx[4]->GetId() == freeTxId); BOOST_CHECK(pblocktemplate->block.vtx[5]->GetId() == lowFeeTxId); // Test that transaction selection properly updates ancestor fee // calculations as ancestor transactions get included in a block. Add a // 0-fee transaction that has 2 outputs. tx.vin[0].prevout = COutPoint(txFirst[2]->GetId(), 0); tx.vout.resize(2); tx.vout[0].nValue = int64_t(5000000000LL - 100000000) * SATOSHI; // 1BCC output. tx.vout[1].nValue = 100000000 * SATOSHI; TxId freeTxId2 = tx.GetId(); mempool.addUnchecked( freeTxId2, entry.Fee(Amount::zero()).SpendsCoinbase(true).FromTx(tx)); // This tx can't be mined by itself. tx.vin[0].prevout = COutPoint(freeTxId2, 0); tx.vout.resize(1); feeToUse = blockMinFeeRate.GetFee(freeTxSize); tx.vout[0].nValue = int64_t(5000000000LL - 100000000) * SATOSHI - feeToUse; TxId lowFeeTxId2 = tx.GetId(); mempool.addUnchecked(lowFeeTxId2, entry.Fee(feeToUse).SpendsCoinbase(false).FromTx(tx)); pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey); // Verify that this tx isn't selected. for (const auto &txn : pblocktemplate->block.vtx) { BOOST_CHECK(txn->GetId() != freeTxId2); BOOST_CHECK(txn->GetId() != lowFeeTxId2); } // This tx will be mineable, and should cause lowFeeTxId2 to be selected as // well. tx.vin[0].prevout = COutPoint(freeTxId2, 1); // 10k satoshi fee. tx.vout[0].nValue = (100000000 - 10000) * SATOSHI; mempool.addUnchecked(tx.GetId(), entry.Fee(10000 * SATOSHI).FromTx(tx)); pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey); BOOST_CHECK(pblocktemplate->block.vtx[8]->GetId() == lowFeeTxId2); } void TestCoinbaseMessageEB(uint64_t eb, std::string cbmsg) { GlobalConfig config; config.SetMaxBlockSize(eb); CScript scriptPubKey = CScript() << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909" "a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112" "de5c384df7ba0b8d578a4c702b6bf11d5f") << OP_CHECKSIG; std::unique_ptr pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey); CBlock *pblock = &pblocktemplate->block; // IncrementExtraNonce creates a valid coinbase and merkleRoot unsigned int extraNonce = 0; IncrementExtraNonce(config, pblock, chainActive.Tip(), extraNonce); unsigned int nHeight = chainActive.Tip()->nHeight + 1; std::vector vec(cbmsg.begin(), cbmsg.end()); BOOST_CHECK(pblock->vtx[0]->vin[0].scriptSig == ((CScript() << nHeight << CScriptNum(extraNonce) << vec) + COINBASE_FLAGS)); } // Coinbase scriptSig has to contains the correct EB value // converted to MB, rounded down to the first decimal BOOST_AUTO_TEST_CASE(CheckCoinbase_EB) { TestCoinbaseMessageEB(1000001, "/EB1.0/"); TestCoinbaseMessageEB(2000000, "/EB2.0/"); TestCoinbaseMessageEB(8000000, "/EB8.0/"); TestCoinbaseMessageEB(8320000, "/EB8.3/"); } // NOTE: These tests rely on CreateNewBlock doing its own self-validation! BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) { // Note that by default, these tests run with size accounting enabled. CScript scriptPubKey = CScript() << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909" "a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112" "de5c384df7ba0b8d578a4c702b6bf11d5f") << OP_CHECKSIG; std::unique_ptr pblocktemplate; CMutableTransaction tx, tx2; CScript script; uint256 hash; TestMemPoolEntryHelper entry; entry.nFee = 11 * SATOSHI; entry.dPriority = 111.0; entry.nHeight = 11; LOCK(cs_main); fCheckpointsEnabled = false; GlobalConfig config; // Simple block creation, nothing special yet: BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); // We can't make transactions until we have inputs. Therefore, load 100 // blocks :) int baseheight = 0; std::vector txFirst; for (size_t i = 0; i < sizeof(blockinfo) / sizeof(*blockinfo); ++i) { // pointer for convenience. CBlock *pblock = &pblocktemplate->block; pblock->nVersion = 1; pblock->nTime = chainActive.Tip()->GetMedianTimePast() + 1; CMutableTransaction txCoinbase(*pblock->vtx[0]); txCoinbase.nVersion = 1; txCoinbase.vin[0].scriptSig = CScript(); txCoinbase.vin[0].scriptSig.push_back(blockinfo[i].extranonce); txCoinbase.vin[0].scriptSig.push_back(chainActive.Height()); // Ignore the (optional) segwit commitment added by CreateNewBlock (as // the hardcoded nonces don't account for this) txCoinbase.vout.resize(1); txCoinbase.vout[0].scriptPubKey = CScript(); pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase)); if (txFirst.size() == 0) { baseheight = chainActive.Height(); } if (txFirst.size() < 4) { txFirst.push_back(pblock->vtx[0]); } pblock->hashMerkleRoot = BlockMerkleRoot(*pblock); pblock->nNonce = blockinfo[i].nonce; std::shared_ptr shared_pblock = std::make_shared(*pblock); BOOST_CHECK(ProcessNewBlock(config, shared_pblock, true, nullptr)); pblock->hashPrevBlock = pblock->GetHash(); } // Just to make sure we can still make simple blocks. BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); const Amount BLOCKSUBSIDY = 50 * COIN; const Amount LOWFEE = CENT; const Amount HIGHFEE = COIN; const Amount HIGHERFEE = 4 * COIN; // block sigops > limit: 1000 CHECKMULTISIG + 1 tx.vin.resize(1); // NOTE: OP_NOP is used to force 20 SigOps for the CHECKMULTISIG tx.vin[0].scriptSig = CScript() << OP_0 << OP_0 << OP_0 << OP_NOP << OP_CHECKMULTISIG << OP_1; tx.vin[0].prevout = COutPoint(txFirst[0]->GetId(), 0); tx.vout.resize(1); tx.vout[0].nValue = BLOCKSUBSIDY; for (unsigned int i = 0; i < 1001; ++i) { tx.vout[0].nValue -= LOWFEE; hash = tx.GetId(); // Only first tx spends coinbase. bool spendsCoinbase = (i == 0) ? true : false; // If we don't set the # of sig ops in the CTxMemPoolEntry, template // creation fails. mempool.addUnchecked(hash, entry.Fee(LOWFEE) .Time(GetTime()) .SpendsCoinbase(spendsCoinbase) .FromTx(tx)); tx.vin[0].prevout = COutPoint(hash, 0); } BOOST_CHECK_THROW(BlockAssembler(config).CreateNewBlock(scriptPubKey), std::runtime_error); mempool.clear(); tx.vin[0].prevout = COutPoint(txFirst[0]->GetId(), 0); tx.vout[0].nValue = BLOCKSUBSIDY; for (unsigned int i = 0; i < 1001; ++i) { tx.vout[0].nValue -= LOWFEE; hash = tx.GetId(); // Only first tx spends coinbase. bool spendsCoinbase = (i == 0) ? true : false; // If we do set the # of sig ops in the CTxMemPoolEntry, template // creation passes. mempool.addUnchecked(hash, entry.Fee(LOWFEE) .Time(GetTime()) .SpendsCoinbase(spendsCoinbase) .SigOpsCost(80) .FromTx(tx)); tx.vin[0].prevout = COutPoint(hash, 0); } BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); mempool.clear(); // block size > limit tx.vin[0].scriptSig = CScript(); // 18 * (520char + DROP) + OP_1 = 9433 bytes std::vector vchData(520); for (unsigned int i = 0; i < 18; ++i) { tx.vin[0].scriptSig << vchData << OP_DROP; } tx.vin[0].scriptSig << OP_1; tx.vin[0].prevout = COutPoint(txFirst[0]->GetId(), 0); tx.vout[0].nValue = BLOCKSUBSIDY; for (unsigned int i = 0; i < 128; ++i) { tx.vout[0].nValue -= LOWFEE; hash = tx.GetId(); // Only first tx spends coinbase. bool spendsCoinbase = (i == 0) ? true : false; mempool.addUnchecked(hash, entry.Fee(LOWFEE) .Time(GetTime()) .SpendsCoinbase(spendsCoinbase) .FromTx(tx)); tx.vin[0].prevout = COutPoint(hash, 0); } BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); mempool.clear(); // Orphan in mempool, template creation fails. hash = tx.GetId(); mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).FromTx(tx)); BOOST_CHECK_THROW(BlockAssembler(config).CreateNewBlock(scriptPubKey), std::runtime_error); mempool.clear(); // Child with higher priority than parent. tx.vin[0].scriptSig = CScript() << OP_1; tx.vin[0].prevout = COutPoint(txFirst[1]->GetId(), 0); tx.vout[0].nValue = BLOCKSUBSIDY - HIGHFEE; hash = tx.GetId(); mempool.addUnchecked( hash, entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); tx.vin[0].prevout = COutPoint(hash, 0); tx.vin.resize(2); tx.vin[1].scriptSig = CScript() << OP_1; tx.vin[1].prevout = COutPoint(txFirst[0]->GetId(), 0); // First txn output + fresh coinbase - new txn fee. tx.vout[0].nValue = tx.vout[0].nValue + BLOCKSUBSIDY - HIGHERFEE; hash = tx.GetId(); mempool.addUnchecked( hash, entry.Fee(HIGHERFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); mempool.clear(); // Coinbase in mempool, template creation fails. tx.vin.resize(1); tx.vin[0].prevout = COutPoint(); tx.vin[0].scriptSig = CScript() << OP_0 << OP_1; tx.vout[0].nValue = Amount::zero(); hash = tx.GetId(); // Give it a fee so it'll get mined. mempool.addUnchecked( hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(false).FromTx(tx)); BOOST_CHECK_THROW(BlockAssembler(config).CreateNewBlock(scriptPubKey), std::runtime_error); mempool.clear(); // Invalid (pre-p2sh) txn in mempool, template creation fails. std::array times; for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++) { // Trick the MedianTimePast. times[i] = chainActive.Tip() ->GetAncestor(chainActive.Tip()->nHeight - i) ->nTime; chainActive.Tip()->GetAncestor(chainActive.Tip()->nHeight - i)->nTime = P2SH_ACTIVATION_TIME; } tx.vin[0].prevout = COutPoint(txFirst[0]->GetId(), 0); tx.vin[0].scriptSig = CScript() << OP_1; tx.vout[0].nValue = BLOCKSUBSIDY - LOWFEE; script = CScript() << OP_0; tx.vout[0].scriptPubKey = GetScriptForDestination(CScriptID(script)); hash = tx.GetId(); mempool.addUnchecked( hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); tx.vin[0].prevout = COutPoint(hash, 0); tx.vin[0].scriptSig = CScript() << std::vector(script.begin(), script.end()); tx.vout[0].nValue -= LOWFEE; hash = tx.GetId(); mempool.addUnchecked( hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(false).FromTx(tx)); BOOST_CHECK_THROW(BlockAssembler(config).CreateNewBlock(scriptPubKey), std::runtime_error); mempool.clear(); for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++) { // Restore the MedianTimePast. chainActive.Tip()->GetAncestor(chainActive.Tip()->nHeight - i)->nTime = times[i]; } // Double spend txn pair in mempool, template creation fails. tx.vin[0].prevout = COutPoint(txFirst[0]->GetId(), 0); tx.vin[0].scriptSig = CScript() << OP_1; tx.vout[0].nValue = BLOCKSUBSIDY - HIGHFEE; tx.vout[0].scriptPubKey = CScript() << OP_1; hash = tx.GetId(); mempool.addUnchecked( hash, entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); tx.vout[0].scriptPubKey = CScript() << OP_2; hash = tx.GetId(); mempool.addUnchecked( hash, entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); BOOST_CHECK_THROW(BlockAssembler(config).CreateNewBlock(scriptPubKey), std::runtime_error); mempool.clear(); // Subsidy changing. int nHeight = chainActive.Height(); // Create an actual 209999-long block chain (without valid blocks). while (chainActive.Tip()->nHeight < 209999) { CBlockIndex *prev = chainActive.Tip(); CBlockIndex *next = new CBlockIndex(); next->phashBlock = new uint256(InsecureRand256()); pcoinsTip->SetBestBlock(next->GetBlockHash()); next->pprev = prev; next->nHeight = prev->nHeight + 1; next->BuildSkip(); chainActive.SetTip(next); } BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); // Extend to a 210000-long block chain. while (chainActive.Tip()->nHeight < 210000) { CBlockIndex *prev = chainActive.Tip(); CBlockIndex *next = new CBlockIndex(); next->phashBlock = new uint256(InsecureRand256()); pcoinsTip->SetBestBlock(next->GetBlockHash()); next->pprev = prev; next->nHeight = prev->nHeight + 1; next->BuildSkip(); chainActive.SetTip(next); } BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); // Delete the dummy blocks again. while (chainActive.Tip()->nHeight > nHeight) { CBlockIndex *del = chainActive.Tip(); chainActive.SetTip(del->pprev); pcoinsTip->SetBestBlock(del->pprev->GetBlockHash()); delete del->phashBlock; delete del; } // non-final txs in mempool SetMockTime(chainActive.Tip()->GetMedianTimePast() + 1); uint32_t flags = LOCKTIME_VERIFY_SEQUENCE | LOCKTIME_MEDIAN_TIME_PAST; // height map std::vector prevheights; // Relative height locked. tx.nVersion = 2; tx.vin.resize(1); prevheights.resize(1); // Only 1 transaction. tx.vin[0].prevout = COutPoint(txFirst[0]->GetId(), 0); tx.vin[0].scriptSig = CScript() << OP_1; // txFirst[0] is the 2nd block tx.vin[0].nSequence = chainActive.Tip()->nHeight + 1; prevheights[0] = baseheight + 1; tx.vout.resize(1); tx.vout[0].nValue = BLOCKSUBSIDY - HIGHFEE; tx.vout[0].scriptPubKey = CScript() << OP_1; tx.nLockTime = 0; hash = tx.GetId(); mempool.addUnchecked( hash, entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); { // Locktime passes. CValidationState state; BOOST_CHECK(ContextualCheckTransactionForCurrentBlock( config, CTransaction(tx), state, flags)); } // Sequence locks fail. BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks pass on 2nd block. BOOST_CHECK( SequenceLocks(CTransaction(tx), flags, &prevheights, CreateBlockIndex(chainActive.Tip()->nHeight + 2))); // Relative time locked. tx.vin[0].prevout = COutPoint(txFirst[1]->GetId(), 0); // txFirst[1] is the 3rd block. tx.vin[0].nSequence = CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG | (((chainActive.Tip()->GetMedianTimePast() + 1 - chainActive[1]->GetMedianTimePast()) >> CTxIn::SEQUENCE_LOCKTIME_GRANULARITY) + 1); prevheights[0] = baseheight + 2; hash = tx.GetId(); mempool.addUnchecked(hash, entry.Time(GetTime()).FromTx(tx)); { // Locktime passes. CValidationState state; BOOST_CHECK(ContextualCheckTransactionForCurrentBlock( config, CTransaction(tx), state, flags)); } // Sequence locks fail. BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++) { // Trick the MedianTimePast. chainActive.Tip()->GetAncestor(chainActive.Tip()->nHeight - i)->nTime += 512; } // Sequence locks pass 512 seconds later. BOOST_CHECK( SequenceLocks(CTransaction(tx), flags, &prevheights, CreateBlockIndex(chainActive.Tip()->nHeight + 1))); for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++) { // Undo tricked MTP. chainActive.Tip()->GetAncestor(chainActive.Tip()->nHeight - i)->nTime -= 512; } // Absolute height locked. tx.vin[0].prevout = COutPoint(txFirst[2]->GetId(), 0); tx.vin[0].nSequence = CTxIn::SEQUENCE_FINAL - 1; prevheights[0] = baseheight + 3; tx.nLockTime = chainActive.Tip()->nHeight + 1; hash = tx.GetId(); mempool.addUnchecked(hash, entry.Time(GetTime()).FromTx(tx)); { // Locktime fails. CValidationState state; BOOST_CHECK(!ContextualCheckTransactionForCurrentBlock( config, CTransaction(tx), state, flags)); BOOST_CHECK_EQUAL(state.GetRejectReason(), "bad-txns-nonfinal"); } // Sequence locks pass. BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); { // Locktime passes on 2nd block. CValidationState state; int64_t nMedianTimePast = chainActive.Tip()->GetMedianTimePast(); BOOST_CHECK(ContextualCheckTransaction( config, CTransaction(tx), state, chainActive.Tip()->nHeight + 2, nMedianTimePast, nMedianTimePast)); } // Absolute time locked. tx.vin[0].prevout = COutPoint(txFirst[3]->GetId(), 0); tx.nLockTime = chainActive.Tip()->GetMedianTimePast(); prevheights.resize(1); prevheights[0] = baseheight + 4; hash = tx.GetId(); mempool.addUnchecked(hash, entry.Time(GetTime()).FromTx(tx)); { // Locktime fails. CValidationState state; BOOST_CHECK(!ContextualCheckTransactionForCurrentBlock( config, CTransaction(tx), state, flags)); BOOST_CHECK_EQUAL(state.GetRejectReason(), "bad-txns-nonfinal"); } // Sequence locks pass. BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); { // Locktime passes 1 second later. CValidationState state; int64_t nMedianTimePast = chainActive.Tip()->GetMedianTimePast() + 1; BOOST_CHECK(ContextualCheckTransaction( config, CTransaction(tx), state, chainActive.Tip()->nHeight + 1, nMedianTimePast, nMedianTimePast)); } // mempool-dependent transactions (not added) tx.vin[0].prevout = COutPoint(hash, 0); prevheights[0] = chainActive.Tip()->nHeight + 1; tx.nLockTime = 0; tx.vin[0].nSequence = 0; { // Locktime passes. CValidationState state; BOOST_CHECK(ContextualCheckTransactionForCurrentBlock( config, CTransaction(tx), state, flags)); } // Sequence locks pass. BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); tx.vin[0].nSequence = 1; // Sequence locks fail. BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); tx.vin[0].nSequence = CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG; // Sequence locks pass. BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); tx.vin[0].nSequence = CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG | 1; // Sequence locks fail. BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey); BOOST_CHECK(pblocktemplate); // None of the of the absolute height/time locked tx should have made it // into the template because we still check IsFinalTx in CreateNewBlock, but // relative locked txs will if inconsistently added to mempool. For now // these will still generate a valid template until BIP68 soft fork. BOOST_CHECK_EQUAL(pblocktemplate->block.vtx.size(), 3UL); // However if we advance height by 1 and time by 512, all of them should be // mined. for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++) { // Trick the MedianTimePast. chainActive.Tip()->GetAncestor(chainActive.Tip()->nHeight - i)->nTime += 512; } chainActive.Tip()->nHeight++; SetMockTime(chainActive.Tip()->GetMedianTimePast() + 1); BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); BOOST_CHECK_EQUAL(pblocktemplate->block.vtx.size(), 5UL); chainActive.Tip()->nHeight--; SetMockTime(0); mempool.clear(); TestPackageSelection(config, scriptPubKey, txFirst); fCheckpointsEnabled = true; } void CheckBlockMaxSize(const Config &config, uint64_t size, uint64_t expected) { gArgs.ForceSetArg("-blockmaxsize", std::to_string(size)); BlockAssembler ba(config); BOOST_CHECK_EQUAL(ba.GetMaxGeneratedBlockSize(), expected); } BOOST_AUTO_TEST_CASE(BlockAssembler_construction) { GlobalConfig config; // We are working on a fake chain and need to protect ourselves. LOCK(cs_main); // Test around historical 1MB (plus one byte because that's mandatory) config.SetMaxBlockSize(ONE_MEGABYTE + 1); CheckBlockMaxSize(config, 0, 1000); CheckBlockMaxSize(config, 1000, 1000); CheckBlockMaxSize(config, 1001, 1001); CheckBlockMaxSize(config, 12345, 12345); CheckBlockMaxSize(config, ONE_MEGABYTE - 1001, ONE_MEGABYTE - 1001); CheckBlockMaxSize(config, ONE_MEGABYTE - 1000, ONE_MEGABYTE - 1000); CheckBlockMaxSize(config, ONE_MEGABYTE - 999, ONE_MEGABYTE - 999); CheckBlockMaxSize(config, ONE_MEGABYTE, ONE_MEGABYTE - 999); // Test around default cap config.SetMaxBlockSize(DEFAULT_MAX_BLOCK_SIZE); // Now we can use the default max block size. CheckBlockMaxSize(config, DEFAULT_MAX_BLOCK_SIZE - 1001, DEFAULT_MAX_BLOCK_SIZE - 1001); CheckBlockMaxSize(config, DEFAULT_MAX_BLOCK_SIZE - 1000, DEFAULT_MAX_BLOCK_SIZE - 1000); CheckBlockMaxSize(config, DEFAULT_MAX_BLOCK_SIZE - 999, DEFAULT_MAX_BLOCK_SIZE - 1000); CheckBlockMaxSize(config, DEFAULT_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE - 1000); // If the parameter is not specified, we use // DEFAULT_MAX_GENERATED_BLOCK_SIZE { gArgs.ClearArg("-blockmaxsize"); BlockAssembler ba(config); BOOST_CHECK_EQUAL(ba.GetMaxGeneratedBlockSize(), DEFAULT_MAX_GENERATED_BLOCK_SIZE); } } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index a9b9e78996..8ec1b218b3 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -1,791 +1,811 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Copyright (c) 2017-2018 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "data/tx_invalid.json.h" #include "data/tx_valid.json.h" #include "test/test_bitcoin.h" #include "chainparams.h" // For CChainParams #include "checkqueue.h" #include "clientversion.h" #include "config.h" #include "consensus/tx_verify.h" #include "consensus/validation.h" #include "core_io.h" #include "key.h" #include "keystore.h" #include "policy/policy.h" #include "script/script.h" #include "script/script_error.h" #include "script/sign.h" #include "script/standard.h" #include "test/jsonutil.h" #include "test/scriptflags.h" #include "utilstrencodings.h" #include "validation.h" #include #include #include #include #include typedef std::vector valtype; BOOST_FIXTURE_TEST_SUITE(transaction_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(tx_valid) { // Read tests from test/data/tx_valid.json // Format is an array of arrays // Inner arrays are either [ "comment" ] // or [[[prevout hash, prevout index, prevout scriptPubKey], [input 2], // ...],"], serializedTransaction, verifyFlags // ... where all scripts are stringified scripts. // // verifyFlags is a comma separated list of script verification flags to // apply, or "NONE" UniValue tests = read_json( std::string(json_tests::tx_valid, json_tests::tx_valid + sizeof(json_tests::tx_valid))); ScriptError err; for (size_t idx = 0; idx < tests.size(); idx++) { UniValue test = tests[idx]; std::string strTest = test.write(); if (test[0].isArray()) { if (test.size() != 3 || !test[1].isStr() || !test[2].isStr()) { BOOST_ERROR("Bad test: " << strTest); continue; } std::map mapprevOutScriptPubKeys; std::map mapprevOutValues; UniValue inputs = test[0].get_array(); bool fValid = true; for (size_t inpIdx = 0; inpIdx < inputs.size(); inpIdx++) { const UniValue &input = inputs[inpIdx]; if (!input.isArray()) { fValid = false; break; } UniValue vinput = input.get_array(); if (vinput.size() < 3 || vinput.size() > 4) { fValid = false; break; } COutPoint outpoint(uint256S(vinput[0].get_str()), vinput[1].get_int()); mapprevOutScriptPubKeys[outpoint] = ParseScript(vinput[2].get_str()); if (vinput.size() >= 4) { mapprevOutValues[outpoint] = vinput[3].get_int64() * SATOSHI; } } if (!fValid) { BOOST_ERROR("Bad test: " << strTest); continue; } std::string transaction = test[1].get_str(); CDataStream stream(ParseHex(transaction), SER_NETWORK, PROTOCOL_VERSION); CTransaction tx(deserialize, stream); CValidationState state; BOOST_CHECK_MESSAGE(tx.IsCoinBase() ? CheckCoinbase(tx, state) : CheckRegularTransaction(tx, state), strTest); BOOST_CHECK(state.IsValid()); // Check that CheckCoinbase reject non-coinbase transactions and // vice versa. BOOST_CHECK_MESSAGE(!(tx.IsCoinBase() ? CheckRegularTransaction(tx, state) : CheckCoinbase(tx, state)), strTest); BOOST_CHECK(state.IsInvalid()); PrecomputedTransactionData txdata(tx); for (size_t i = 0; i < tx.vin.size(); i++) { if (!mapprevOutScriptPubKeys.count(tx.vin[i].prevout)) { BOOST_ERROR("Bad test: " << strTest); break; } Amount amount = Amount::zero(); if (mapprevOutValues.count(tx.vin[i].prevout)) { amount = mapprevOutValues[tx.vin[i].prevout]; } uint32_t verify_flags = ParseScriptFlags(test[2].get_str()); BOOST_CHECK_MESSAGE( VerifyScript(tx.vin[i].scriptSig, mapprevOutScriptPubKeys[tx.vin[i].prevout], verify_flags, TransactionSignatureChecker( &tx, i, amount, txdata), &err), strTest); BOOST_CHECK_MESSAGE(err == SCRIPT_ERR_OK, ScriptErrorString(err)); } } } } BOOST_AUTO_TEST_CASE(tx_invalid) { // Read tests from test/data/tx_invalid.json // Format is an array of arrays // Inner arrays are either [ "comment" ] // or [[[prevout hash, prevout index, prevout scriptPubKey], [input 2], // ...],"], serializedTransaction, verifyFlags // ... where all scripts are stringified scripts. // // verifyFlags is a comma separated list of script verification flags to // apply, or "NONE" UniValue tests = read_json( std::string(json_tests::tx_invalid, json_tests::tx_invalid + sizeof(json_tests::tx_invalid))); ScriptError err; for (size_t idx = 0; idx < tests.size(); idx++) { UniValue test = tests[idx]; std::string strTest = test.write(); if (test[0].isArray()) { if (test.size() != 3 || !test[1].isStr() || !test[2].isStr()) { BOOST_ERROR("Bad test: " << strTest); continue; } std::map mapprevOutScriptPubKeys; std::map mapprevOutValues; UniValue inputs = test[0].get_array(); bool fValid = true; for (size_t inpIdx = 0; inpIdx < inputs.size(); inpIdx++) { const UniValue &input = inputs[inpIdx]; if (!input.isArray()) { fValid = false; break; } UniValue vinput = input.get_array(); if (vinput.size() < 3 || vinput.size() > 4) { fValid = false; break; } COutPoint outpoint(uint256S(vinput[0].get_str()), vinput[1].get_int()); mapprevOutScriptPubKeys[outpoint] = ParseScript(vinput[2].get_str()); if (vinput.size() >= 4) { mapprevOutValues[outpoint] = vinput[3].get_int64() * SATOSHI; } } if (!fValid) { BOOST_ERROR("Bad test: " << strTest); continue; } std::string transaction = test[1].get_str(); CDataStream stream(ParseHex(transaction), SER_NETWORK, PROTOCOL_VERSION); CTransaction tx(deserialize, stream); CValidationState state; fValid = CheckRegularTransaction(tx, state) && state.IsValid(); PrecomputedTransactionData txdata(tx); for (size_t i = 0; i < tx.vin.size() && fValid; i++) { if (!mapprevOutScriptPubKeys.count(tx.vin[i].prevout)) { BOOST_ERROR("Bad test: " << strTest); break; } Amount amount = Amount::zero(); if (0 != mapprevOutValues.count(tx.vin[i].prevout)) { amount = mapprevOutValues[tx.vin[i].prevout]; } uint32_t verify_flags = ParseScriptFlags(test[2].get_str()); fValid = VerifyScript( tx.vin[i].scriptSig, mapprevOutScriptPubKeys[tx.vin[i].prevout], verify_flags, TransactionSignatureChecker(&tx, i, amount, txdata), &err); } BOOST_CHECK_MESSAGE(!fValid, strTest); BOOST_CHECK_MESSAGE(err != SCRIPT_ERR_OK, ScriptErrorString(err)); } } } BOOST_AUTO_TEST_CASE(basic_transaction_tests) { // Random real transaction // (e2769b09e784f32f62ef849763d4f45b98e07ba658647343b915ff832b110436) uint8_t ch[] = { 0x01, 0x00, 0x00, 0x00, 0x01, 0x6b, 0xff, 0x7f, 0xcd, 0x4f, 0x85, 0x65, 0xef, 0x40, 0x6d, 0xd5, 0xd6, 0x3d, 0x4f, 0xf9, 0x4f, 0x31, 0x8f, 0xe8, 0x20, 0x27, 0xfd, 0x4d, 0xc4, 0x51, 0xb0, 0x44, 0x74, 0x01, 0x9f, 0x74, 0xb4, 0x00, 0x00, 0x00, 0x00, 0x8c, 0x49, 0x30, 0x46, 0x02, 0x21, 0x00, 0xda, 0x0d, 0xc6, 0xae, 0xce, 0xfe, 0x1e, 0x06, 0xef, 0xdf, 0x05, 0x77, 0x37, 0x57, 0xde, 0xb1, 0x68, 0x82, 0x09, 0x30, 0xe3, 0xb0, 0xd0, 0x3f, 0x46, 0xf5, 0xfc, 0xf1, 0x50, 0xbf, 0x99, 0x0c, 0x02, 0x21, 0x00, 0xd2, 0x5b, 0x5c, 0x87, 0x04, 0x00, 0x76, 0xe4, 0xf2, 0x53, 0xf8, 0x26, 0x2e, 0x76, 0x3e, 0x2d, 0xd5, 0x1e, 0x7f, 0xf0, 0xbe, 0x15, 0x77, 0x27, 0xc4, 0xbc, 0x42, 0x80, 0x7f, 0x17, 0xbd, 0x39, 0x01, 0x41, 0x04, 0xe6, 0xc2, 0x6e, 0xf6, 0x7d, 0xc6, 0x10, 0xd2, 0xcd, 0x19, 0x24, 0x84, 0x78, 0x9a, 0x6c, 0xf9, 0xae, 0xa9, 0x93, 0x0b, 0x94, 0x4b, 0x7e, 0x2d, 0xb5, 0x34, 0x2b, 0x9d, 0x9e, 0x5b, 0x9f, 0xf7, 0x9a, 0xff, 0x9a, 0x2e, 0xe1, 0x97, 0x8d, 0xd7, 0xfd, 0x01, 0xdf, 0xc5, 0x22, 0xee, 0x02, 0x28, 0x3d, 0x3b, 0x06, 0xa9, 0xd0, 0x3a, 0xcf, 0x80, 0x96, 0x96, 0x8d, 0x7d, 0xbb, 0x0f, 0x91, 0x78, 0xff, 0xff, 0xff, 0xff, 0x02, 0x8b, 0xa7, 0x94, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x19, 0x76, 0xa9, 0x14, 0xba, 0xde, 0xec, 0xfd, 0xef, 0x05, 0x07, 0x24, 0x7f, 0xc8, 0xf7, 0x42, 0x41, 0xd7, 0x3b, 0xc0, 0x39, 0x97, 0x2d, 0x7b, 0x88, 0xac, 0x40, 0x94, 0xa8, 0x02, 0x00, 0x00, 0x00, 0x00, 0x19, 0x76, 0xa9, 0x14, 0xc1, 0x09, 0x32, 0x48, 0x3f, 0xec, 0x93, 0xed, 0x51, 0xf5, 0xfe, 0x95, 0xe7, 0x25, 0x59, 0xf2, 0xcc, 0x70, 0x43, 0xf9, 0x88, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00}; std::vector vch(ch, ch + sizeof(ch) - 1); CDataStream stream(vch, SER_DISK, CLIENT_VERSION); CMutableTransaction tx; stream >> tx; CValidationState state; BOOST_CHECK_MESSAGE(CheckRegularTransaction(CTransaction(tx), state) && state.IsValid(), "Simple deserialized transaction should be valid."); // Check that duplicate txins fail tx.vin.push_back(tx.vin[0]); BOOST_CHECK_MESSAGE(!CheckRegularTransaction(CTransaction(tx), state) || !state.IsValid(), "Transaction with duplicate txins should be invalid."); } // // Helper: create two dummy transactions, each with // two outputs. The first has 11 and 50 CENT outputs // paid to a TX_PUBKEY, the second 21 and 22 CENT outputs // paid to a TX_PUBKEYHASH. // static std::vector SetupDummyInputs(CBasicKeyStore &keystoreRet, CCoinsViewCache &coinsRet) { std::vector dummyTransactions; dummyTransactions.resize(2); // Add some keys to the keystore: CKey key[4]; for (int i = 0; i < 4; i++) { key[i].MakeNewKey(i % 2); keystoreRet.AddKey(key[i]); } // Create some dummy input transactions dummyTransactions[0].vout.resize(2); dummyTransactions[0].vout[0].nValue = 11 * CENT; dummyTransactions[0].vout[0].scriptPubKey << ToByteVector(key[0].GetPubKey()) << OP_CHECKSIG; dummyTransactions[0].vout[1].nValue = 50 * CENT; dummyTransactions[0].vout[1].scriptPubKey << ToByteVector(key[1].GetPubKey()) << OP_CHECKSIG; AddCoins(coinsRet, CTransaction(dummyTransactions[0]), 0); dummyTransactions[1].vout.resize(2); dummyTransactions[1].vout[0].nValue = 21 * CENT; dummyTransactions[1].vout[0].scriptPubKey = GetScriptForDestination(key[2].GetPubKey().GetID()); dummyTransactions[1].vout[1].nValue = 22 * CENT; dummyTransactions[1].vout[1].scriptPubKey = GetScriptForDestination(key[3].GetPubKey().GetID()); AddCoins(coinsRet, CTransaction(dummyTransactions[1]), 0); return dummyTransactions; } BOOST_AUTO_TEST_CASE(test_Get) { CBasicKeyStore keystore; CCoinsView coinsDummy; CCoinsViewCache coins(&coinsDummy); std::vector dummyTransactions = SetupDummyInputs(keystore, coins); CMutableTransaction t1; t1.vin.resize(3); t1.vin[0].prevout = COutPoint(dummyTransactions[0].GetId(), 1); t1.vin[0].scriptSig << std::vector(65, 0); t1.vin[1].prevout = COutPoint(dummyTransactions[1].GetId(), 0); t1.vin[1].scriptSig << std::vector(65, 0) << std::vector(33, 4); t1.vin[2].prevout = COutPoint(dummyTransactions[1].GetId(), 1); t1.vin[2].scriptSig << std::vector(65, 0) << std::vector(33, 4); t1.vout.resize(2); t1.vout[0].nValue = 90 * CENT; t1.vout[0].scriptPubKey << OP_1; BOOST_CHECK(AreInputsStandard(CTransaction(t1), coins)); BOOST_CHECK_EQUAL(coins.GetValueIn(CTransaction(t1)), (50 + 21 + 22) * CENT); } void CreateCreditAndSpend(const CKeyStore &keystore, const CScript &outscript, CTransactionRef &output, CMutableTransaction &input, bool success = true) { CMutableTransaction outputm; outputm.nVersion = 1; outputm.vin.resize(1); outputm.vin[0].prevout = COutPoint(); outputm.vin[0].scriptSig = CScript(); outputm.vout.resize(1); outputm.vout[0].nValue = SATOSHI; outputm.vout[0].scriptPubKey = outscript; CDataStream ssout(SER_NETWORK, PROTOCOL_VERSION); ssout << outputm; ssout >> output; BOOST_CHECK_EQUAL(output->vin.size(), 1UL); BOOST_CHECK(output->vin[0] == outputm.vin[0]); BOOST_CHECK_EQUAL(output->vout.size(), 1UL); BOOST_CHECK(output->vout[0] == outputm.vout[0]); CMutableTransaction inputm; inputm.nVersion = 1; inputm.vin.resize(1); inputm.vin[0].prevout = COutPoint(output->GetId(), 0); inputm.vout.resize(1); inputm.vout[0].nValue = SATOSHI; inputm.vout[0].scriptPubKey = CScript(); bool ret = SignSignature(keystore, *output, inputm, 0, SigHashType().withForkId()); BOOST_CHECK_EQUAL(ret, success); CDataStream ssin(SER_NETWORK, PROTOCOL_VERSION); ssin << inputm; ssin >> input; BOOST_CHECK_EQUAL(input.vin.size(), 1UL); BOOST_CHECK(input.vin[0] == inputm.vin[0]); BOOST_CHECK_EQUAL(input.vout.size(), 1UL); BOOST_CHECK(input.vout[0] == inputm.vout[0]); } void CheckWithFlag(const CTransactionRef &output, const CMutableTransaction &input, int flags, bool success) { ScriptError error; CTransaction inputi(input); bool ret = VerifyScript( inputi.vin[0].scriptSig, output->vout[0].scriptPubKey, flags | SCRIPT_ENABLE_SIGHASH_FORKID, TransactionSignatureChecker(&inputi, 0, output->vout[0].nValue), &error); BOOST_CHECK_EQUAL(ret, success); } static CScript PushAll(const std::vector &values) { CScript result; for (const valtype &v : values) { if (v.size() == 0) { result << OP_0; } else if (v.size() == 1 && v[0] >= 1 && v[0] <= 16) { result << CScript::EncodeOP_N(v[0]); } else { result << v; } } return result; } void ReplaceRedeemScript(CScript &script, const CScript &redeemScript) { std::vector stack; EvalScript(stack, script, SCRIPT_VERIFY_STRICTENC, BaseSignatureChecker()); BOOST_CHECK(stack.size() > 0); stack.back() = std::vector(redeemScript.begin(), redeemScript.end()); script = PushAll(stack); } BOOST_AUTO_TEST_CASE(test_big_transaction) { CKey key; key.MakeNewKey(false); CBasicKeyStore keystore; keystore.AddKeyPubKey(key, key.GetPubKey()); CScript scriptPubKey = CScript() << ToByteVector(key.GetPubKey()) << OP_CHECKSIG; std::vector sigHashes; sigHashes.emplace_back(SIGHASH_NONE | SIGHASH_FORKID); sigHashes.emplace_back(SIGHASH_SINGLE | SIGHASH_FORKID); sigHashes.emplace_back(SIGHASH_ALL | SIGHASH_FORKID); sigHashes.emplace_back(SIGHASH_NONE | SIGHASH_FORKID | SIGHASH_ANYONECANPAY); sigHashes.emplace_back(SIGHASH_SINGLE | SIGHASH_FORKID | SIGHASH_ANYONECANPAY); sigHashes.emplace_back(SIGHASH_ALL | SIGHASH_FORKID | SIGHASH_ANYONECANPAY); CMutableTransaction mtx; mtx.nVersion = 1; // create a big transaction of 4500 inputs signed by the same key. const static size_t OUTPUT_COUNT = 4500; mtx.vout.reserve(OUTPUT_COUNT); for (size_t ij = 0; ij < OUTPUT_COUNT; ij++) { size_t i = mtx.vin.size(); uint256 prevId = uint256S( "0000000000000000000000000000000000000000000000000000000000000100"); COutPoint outpoint(prevId, i); mtx.vin.resize(mtx.vin.size() + 1); mtx.vin[i].prevout = outpoint; mtx.vin[i].scriptSig = CScript(); mtx.vout.emplace_back(1000 * SATOSHI, CScript() << OP_1); } // sign all inputs for (size_t i = 0; i < mtx.vin.size(); i++) { bool hashSigned = SignSignature(keystore, scriptPubKey, mtx, i, 1000 * SATOSHI, sigHashes.at(i % sigHashes.size())); BOOST_CHECK_MESSAGE(hashSigned, "Failed to sign test transaction"); } CTransaction tx(mtx); // check all inputs concurrently, with the cache PrecomputedTransactionData txdata(tx); boost::thread_group threadGroup; CCheckQueue scriptcheckqueue(128); CCheckQueueControl control(&scriptcheckqueue); for (int i = 0; i < 20; i++) { threadGroup.create_thread(boost::bind( &CCheckQueue::Thread, boost::ref(scriptcheckqueue))); } std::vector coins; for (size_t i = 0; i < mtx.vin.size(); i++) { CTxOut out; out.nValue = 1000 * SATOSHI; out.scriptPubKey = scriptPubKey; coins.emplace_back(std::move(out), 1, false); } for (size_t i = 0; i < mtx.vin.size(); i++) { std::vector vChecks; CTxOut &out = coins[tx.vin[i].prevout.GetN()].GetTxOut(); CScriptCheck check(out.scriptPubKey, out.nValue, tx, i, MANDATORY_SCRIPT_VERIFY_FLAGS, false, txdata); vChecks.push_back(CScriptCheck()); check.swap(vChecks.back()); control.Add(vChecks); } bool controlCheck = control.Wait(); BOOST_CHECK(controlCheck); threadGroup.interrupt_all(); threadGroup.join_all(); } BOOST_AUTO_TEST_CASE(test_witness) { CBasicKeyStore keystore, keystore2; CKey key1, key2, key3, key1L, key2L; CPubKey pubkey1, pubkey2, pubkey3, pubkey1L, pubkey2L; key1.MakeNewKey(true); key2.MakeNewKey(true); key3.MakeNewKey(true); key1L.MakeNewKey(false); key2L.MakeNewKey(false); pubkey1 = key1.GetPubKey(); pubkey2 = key2.GetPubKey(); pubkey3 = key3.GetPubKey(); pubkey1L = key1L.GetPubKey(); pubkey2L = key2L.GetPubKey(); keystore.AddKeyPubKey(key1, pubkey1); keystore.AddKeyPubKey(key2, pubkey2); keystore.AddKeyPubKey(key1L, pubkey1L); keystore.AddKeyPubKey(key2L, pubkey2L); CScript scriptPubkey1, scriptPubkey2, scriptPubkey1L, scriptPubkey2L, scriptMulti; scriptPubkey1 << ToByteVector(pubkey1) << OP_CHECKSIG; scriptPubkey2 << ToByteVector(pubkey2) << OP_CHECKSIG; scriptPubkey1L << ToByteVector(pubkey1L) << OP_CHECKSIG; scriptPubkey2L << ToByteVector(pubkey2L) << OP_CHECKSIG; std::vector oneandthree; oneandthree.push_back(pubkey1); oneandthree.push_back(pubkey3); scriptMulti = GetScriptForMultisig(2, oneandthree); keystore.AddCScript(scriptPubkey1); keystore.AddCScript(scriptPubkey2); keystore.AddCScript(scriptPubkey1L); keystore.AddCScript(scriptPubkey2L); keystore.AddCScript(scriptMulti); keystore2.AddCScript(scriptMulti); keystore2.AddKeyPubKey(key3, pubkey3); CTransactionRef output1, output2; CMutableTransaction input1, input2; SignatureData sigdata; // Normal pay-to-compressed-pubkey. CreateCreditAndSpend(keystore, scriptPubkey1, output1, input1); CreateCreditAndSpend(keystore, scriptPubkey2, output2, input2); CheckWithFlag(output1, input1, 0, true); CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true); CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true); CheckWithFlag(output1, input2, 0, false); CheckWithFlag(output1, input2, SCRIPT_VERIFY_P2SH, false); CheckWithFlag(output1, input2, STANDARD_SCRIPT_VERIFY_FLAGS, false); // P2SH pay-to-compressed-pubkey. CreateCreditAndSpend(keystore, GetScriptForDestination(CScriptID(scriptPubkey1)), output1, input1); CreateCreditAndSpend(keystore, GetScriptForDestination(CScriptID(scriptPubkey2)), output2, input2); ReplaceRedeemScript(input2.vin[0].scriptSig, scriptPubkey1); CheckWithFlag(output1, input1, 0, true); CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true); CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true); CheckWithFlag(output1, input2, 0, true); CheckWithFlag(output1, input2, SCRIPT_VERIFY_P2SH, false); CheckWithFlag(output1, input2, STANDARD_SCRIPT_VERIFY_FLAGS, false); // Normal pay-to-uncompressed-pubkey. CreateCreditAndSpend(keystore, scriptPubkey1L, output1, input1); CreateCreditAndSpend(keystore, scriptPubkey2L, output2, input2); CheckWithFlag(output1, input1, 0, true); CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true); CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true); CheckWithFlag(output1, input2, 0, false); CheckWithFlag(output1, input2, SCRIPT_VERIFY_P2SH, false); CheckWithFlag(output1, input2, STANDARD_SCRIPT_VERIFY_FLAGS, false); // P2SH pay-to-uncompressed-pubkey. CreateCreditAndSpend(keystore, GetScriptForDestination(CScriptID(scriptPubkey1L)), output1, input1); CreateCreditAndSpend(keystore, GetScriptForDestination(CScriptID(scriptPubkey2L)), output2, input2); ReplaceRedeemScript(input2.vin[0].scriptSig, scriptPubkey1L); CheckWithFlag(output1, input1, 0, true); CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true); CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true); CheckWithFlag(output1, input2, 0, true); CheckWithFlag(output1, input2, SCRIPT_VERIFY_P2SH, false); CheckWithFlag(output1, input2, STANDARD_SCRIPT_VERIFY_FLAGS, false); // Normal 2-of-2 multisig CreateCreditAndSpend(keystore, scriptMulti, output1, input1, false); CheckWithFlag(output1, input1, 0, false); CreateCreditAndSpend(keystore2, scriptMulti, output2, input2, false); CheckWithFlag(output2, input2, 0, false); BOOST_CHECK(*output1 == *output2); UpdateTransaction( input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker( &input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0))); CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true); // P2SH 2-of-2 multisig CreateCreditAndSpend(keystore, GetScriptForDestination(CScriptID(scriptMulti)), output1, input1, false); CheckWithFlag(output1, input1, 0, true); CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, false); CreateCreditAndSpend(keystore2, GetScriptForDestination(CScriptID(scriptMulti)), output2, input2, false); CheckWithFlag(output2, input2, 0, true); CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH, false); BOOST_CHECK(*output1 == *output2); UpdateTransaction( input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker( &input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0))); CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true); CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true); } BOOST_AUTO_TEST_CASE(test_IsStandard) { LOCK(cs_main); CBasicKeyStore keystore; CCoinsView coinsDummy; CCoinsViewCache coins(&coinsDummy); std::vector dummyTransactions = SetupDummyInputs(keystore, coins); CMutableTransaction t; t.vin.resize(1); t.vin[0].prevout = COutPoint(dummyTransactions[0].GetId(), 1); t.vin[0].scriptSig << std::vector(65, 0); t.vout.resize(1); t.vout[0].nValue = 90 * CENT; CKey key; key.MakeNewKey(true); t.vout[0].scriptPubKey = GetScriptForDestination(key.GetPubKey().GetID()); std::string reason; BOOST_CHECK(IsStandardTx(CTransaction(t), reason)); // Check dust with default relay fee: Amount nDustThreshold = 3 * 182 * dustRelayFee.GetFeePerK() / 1000; BOOST_CHECK_EQUAL(nDustThreshold, 546 * SATOSHI); // dust: t.vout[0].nValue = nDustThreshold - SATOSHI; BOOST_CHECK(!IsStandardTx(CTransaction(t), reason)); // not dust: t.vout[0].nValue = nDustThreshold; BOOST_CHECK(IsStandardTx(CTransaction(t), reason)); // Check dust with odd relay fee to verify rounding: // nDustThreshold = 182 * 1234 / 1000 * 3 dustRelayFee = CFeeRate(1234 * SATOSHI); // dust: t.vout[0].nValue = (672 - 1) * SATOSHI; BOOST_CHECK(!IsStandardTx(CTransaction(t), reason)); // not dust: t.vout[0].nValue = 672 * SATOSHI; BOOST_CHECK(IsStandardTx(CTransaction(t), reason)); dustRelayFee = CFeeRate(DUST_RELAY_TX_FEE); t.vout[0].scriptPubKey = CScript() << OP_1; BOOST_CHECK(!IsStandardTx(CTransaction(t), reason)); // MAX_OP_RETURN_RELAY-byte TX_NULL_DATA (standard) t.vout[0].scriptPubKey = CScript() << OP_RETURN << ParseHex("646578784062697477617463682e636f2092c558ed52c56d" "8dd14ca76226bc936a84820d898443873eb03d8854b21fa3" "952b99a2981873e74509281730d78a21786d34a38bd1ebab" "822fad42278f7f4420db6ab1fd2b6826148d4f73bb41ec2d" "40a6d5793d66e17074a0c56a8a7df21062308f483dd6e38d" "53609d350038df0a1b2a9ac8332016e0b904f66880dd0108" "81c4e8074cce8e4ad6c77cb3460e01bf0e7e811b5f945f83" "732ba6677520a893d75d9a966cb8f85dc301656b1635c631" "f5d00d4adf73f2dd112ca75cf19754651909becfbe65aed1" "3afb2ab8"); BOOST_CHECK_EQUAL(MAX_OP_RETURN_RELAY, t.vout[0].scriptPubKey.size()); BOOST_CHECK(IsStandardTx(CTransaction(t), reason)); // MAX_OP_RETURN_RELAY+1-byte TX_NULL_DATA (non-standard) t.vout[0].scriptPubKey = CScript() << OP_RETURN << ParseHex("646578784062697477617463682e636f2092c558ed52c56d" "8dd14ca76226bc936a84820d898443873eb03d8854b21fa3" "952b99a2981873e74509281730d78a21786d34a38bd1ebab" "822fad42278f7f4420db6ab1fd2b6826148d4f73bb41ec2d" "40a6d5793d66e17074a0c56a8a7df21062308f483dd6e38d" "53609d350038df0a1b2a9ac8332016e0b904f66880dd0108" "81c4e8074cce8e4ad6c77cb3460e01bf0e7e811b5f945f83" "732ba6677520a893d75d9a966cb8f85dc301656b1635c631" "f5d00d4adf73f2dd112ca75cf19754651909becfbe65aed1" "3afb2ab800"); BOOST_CHECK_EQUAL(MAX_OP_RETURN_RELAY + 1, t.vout[0].scriptPubKey.size()); BOOST_CHECK(!IsStandardTx(CTransaction(t), reason)); /** * Check when a custom value is used for -datacarriersize . */ unsigned newMaxSize = 90; gArgs.ForceSetArg("-datacarriersize", std::to_string(newMaxSize)); // Max user provided payload size is standard t.vout[0].scriptPubKey = CScript() << OP_RETURN << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909" "a67962e0ea1f61deb649f6bc3f4cef3804678afdb0fe5548" "271967f1a67130b7105cd6a828e03909a67962e0ea1f61de" "b649f6bc3f4cef3877696e64657878"); BOOST_CHECK_EQUAL(t.vout[0].scriptPubKey.size(), newMaxSize); BOOST_CHECK(IsStandardTx(CTransaction(t), reason)); // Max user provided payload size + 1 is non-standard t.vout[0].scriptPubKey = CScript() << OP_RETURN << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909" "a67962e0ea1f61deb649f6bc3f4cef3804678afdb0fe5548" "271967f1a67130b7105cd6a828e03909a67962e0ea1f61de" "b649f6bc3f4cef3877696e6465787800"); BOOST_CHECK_EQUAL(t.vout[0].scriptPubKey.size(), newMaxSize + 1); BOOST_CHECK(!IsStandardTx(CTransaction(t), reason)); // Clear custom confirguration. gArgs.ClearArg("-datacarriersize"); // Data payload can be encoded in any way... t.vout[0].scriptPubKey = CScript() << OP_RETURN << ParseHex(""); BOOST_CHECK(IsStandardTx(CTransaction(t), reason)); t.vout[0].scriptPubKey = CScript() << OP_RETURN << ParseHex("00") << ParseHex("01"); BOOST_CHECK(IsStandardTx(CTransaction(t), reason)); // OP_RESERVED *is* considered to be a PUSHDATA type opcode by IsPushOnly()! t.vout[0].scriptPubKey = CScript() << OP_RETURN << OP_RESERVED << -1 << 0 << ParseHex("01") << 2 << 3 << 4 << 5 << 6 << 7 << 8 << 9 << 10 << 11 << 12 << 13 << 14 << 15 << 16; BOOST_CHECK(IsStandardTx(CTransaction(t), reason)); t.vout[0].scriptPubKey = CScript() << OP_RETURN << 0 << ParseHex("01") << 2 << ParseHex("fffffffffffffffffffffffffffffffffffff" "fffffffffffffffffffffffffffffffffff"); BOOST_CHECK(IsStandardTx(CTransaction(t), reason)); // ...so long as it only contains PUSHDATA's t.vout[0].scriptPubKey = CScript() << OP_RETURN << OP_RETURN; BOOST_CHECK(!IsStandardTx(CTransaction(t), reason)); // TX_NULL_DATA w/o PUSHDATA t.vout.resize(1); t.vout[0].scriptPubKey = CScript() << OP_RETURN; BOOST_CHECK(IsStandardTx(CTransaction(t), reason)); // Only one TX_NULL_DATA permitted in all cases t.vout.resize(2); t.vout[0].scriptPubKey = CScript() << OP_RETURN << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909" "a67962e0ea1f61deb649f6bc3f4cef38"); t.vout[1].scriptPubKey = CScript() << OP_RETURN << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909" "a67962e0ea1f61deb649f6bc3f4cef38"); BOOST_CHECK(!IsStandardTx(CTransaction(t), reason)); t.vout[0].scriptPubKey = CScript() << OP_RETURN << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909" "a67962e0ea1f61deb649f6bc3f4cef38"); t.vout[1].scriptPubKey = CScript() << OP_RETURN; BOOST_CHECK(!IsStandardTx(CTransaction(t), reason)); t.vout[0].scriptPubKey = CScript() << OP_RETURN; t.vout[1].scriptPubKey = CScript() << OP_RETURN; BOOST_CHECK(!IsStandardTx(CTransaction(t), reason)); } BOOST_AUTO_TEST_CASE(txsize_activation_test) { const Config &config = GetConfig(); const int64_t magneticAnomalyActivationTime = config.GetChainParams().GetConsensus().magneticAnomalyActivationTime; // A minimaly sized transction. CTransaction minTx; CValidationState state; BOOST_CHECK(ContextualCheckTransaction(config, minTx, state, 1234, 5678, magneticAnomalyActivationTime - 1)); BOOST_CHECK(!ContextualCheckTransaction(config, minTx, state, 1234, 5678, magneticAnomalyActivationTime)); BOOST_CHECK_EQUAL(state.GetRejectCode(), REJECT_INVALID); BOOST_CHECK_EQUAL(state.GetRejectReason(), "bad-txns-undersize"); } +BOOST_AUTO_TEST_CASE(tx_transaction_fee) { + std::vector sizes = {1, 2, 4, 8, 16, 32, 64, 128, 256, 512}; + for (size_t inputs : sizes) { + for (size_t outputs : sizes) { + CMutableTransaction mtx; + mtx.vin.resize(inputs); + mtx.vout.resize(outputs); + CTransaction tx(mtx); + auto txBillableSize = tx.GetBillableSize(); + auto txSize = tx.GetTotalSize(); + BOOST_CHECK(txBillableSize > 0); + if (inputs > outputs) { + BOOST_CHECK(txBillableSize <= txSize); + } else { + BOOST_CHECK(txBillableSize >= txSize); + } + } + } +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 0a72bdd674..6d52de8c81 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -1,1382 +1,1404 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "txmempool.h" #include "chainparams.h" // for GetConsensus. #include "clientversion.h" #include "consensus/consensus.h" #include "consensus/tx_verify.h" #include "consensus/validation.h" #include "policy/fees.h" #include "policy/policy.h" #include "streams.h" #include "timedata.h" #include "util.h" #include "utilmoneystr.h" #include "utiltime.h" #include "validation.h" #include "version.h" #include CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef &_tx, const Amount _nFee, int64_t _nTime, double _entryPriority, unsigned int _entryHeight, Amount _inChainInputValue, bool _spendsCoinbase, int64_t _sigOpsCount, LockPoints lp) : tx(_tx), nFee(_nFee), nTime(_nTime), entryPriority(_entryPriority), entryHeight(_entryHeight), inChainInputValue(_inChainInputValue), spendsCoinbase(_spendsCoinbase), sigOpCount(_sigOpsCount), lockPoints(lp) { nTxSize = tx->GetTotalSize(); + nTxBillableSize = tx->GetBillableSize(); nModSize = tx->CalculateModifiedSize(GetTxSize()); nUsageSize = RecursiveDynamicUsage(tx); nCountWithDescendants = 1; nSizeWithDescendants = GetTxSize(); + nBillableSizeWithDescendants = GetTxBillableSize(); nModFeesWithDescendants = nFee; Amount nValueIn = tx->GetValueOut() + nFee; assert(inChainInputValue <= nValueIn); feeDelta = Amount::zero(); nCountWithAncestors = 1; nSizeWithAncestors = GetTxSize(); + nBillableSizeWithAncestors = GetTxBillableSize(); nModFeesWithAncestors = nFee; nSigOpCountWithAncestors = sigOpCount; } CTxMemPoolEntry::CTxMemPoolEntry(const CTxMemPoolEntry &other) { *this = other; } double CTxMemPoolEntry::GetPriority(unsigned int currentHeight) const { double deltaPriority = double((currentHeight - entryHeight) * (inChainInputValue / SATOSHI)) / nModSize; double dResult = entryPriority + deltaPriority; // This should only happen if it was called with a height below entry height if (dResult < 0) { dResult = 0; } return dResult; } void CTxMemPoolEntry::UpdateFeeDelta(Amount newFeeDelta) { nModFeesWithDescendants += newFeeDelta - feeDelta; nModFeesWithAncestors += newFeeDelta - feeDelta; feeDelta = newFeeDelta; } void CTxMemPoolEntry::UpdateLockPoints(const LockPoints &lp) { lockPoints = lp; } // Update the given tx for any in-mempool descendants. // Assumes that setMemPoolChildren is correct for the given tx and all // descendants. void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendants, const std::set &setExclude) { setEntries stageEntries, setAllDescendants; stageEntries = GetMemPoolChildren(updateIt); while (!stageEntries.empty()) { const txiter cit = *stageEntries.begin(); setAllDescendants.insert(cit); stageEntries.erase(cit); const setEntries &setChildren = GetMemPoolChildren(cit); for (const txiter childEntry : setChildren) { cacheMap::iterator cacheIt = cachedDescendants.find(childEntry); if (cacheIt != cachedDescendants.end()) { // We've already calculated this one, just add the entries for // this set but don't traverse again. for (const txiter cacheEntry : cacheIt->second) { setAllDescendants.insert(cacheEntry); } } else if (!setAllDescendants.count(childEntry)) { // Schedule for later processing stageEntries.insert(childEntry); } } } // setAllDescendants now contains all in-mempool descendants of updateIt. // Update and add to cached descendant map int64_t modifySize = 0; - Amount modifyFee = Amount::zero(); + int64_t modifyBillableSize = 0; int64_t modifyCount = 0; + Amount modifyFee = Amount::zero(); for (txiter cit : setAllDescendants) { if (!setExclude.count(cit->GetTx().GetId())) { modifySize += cit->GetTxSize(); + modifyBillableSize += cit->GetTxBillableSize(); modifyFee += cit->GetModifiedFee(); modifyCount++; cachedDescendants[updateIt].insert(cit); // Update ancestor state for each descendant mapTx.modify(cit, update_ancestor_state(updateIt->GetTxSize(), + updateIt->GetTxBillableSize(), updateIt->GetModifiedFee(), 1, updateIt->GetSigOpCount())); } } mapTx.modify(updateIt, - update_descendant_state(modifySize, modifyFee, modifyCount)); + update_descendant_state(modifySize, modifyBillableSize, + modifyFee, modifyCount)); } // txidsToUpdate is the set of transaction hashes from a disconnected block // which has been re-added to the mempool. For each entry, look for descendants // that are outside txidsToUpdate, and add fee/size information for such // descendants to the parent. For each such descendant, also update the ancestor // state to include the parent. void CTxMemPool::UpdateTransactionsFromBlock( const std::vector &txidsToUpdate) { LOCK(cs); // For each entry in txidsToUpdate, store the set of in-mempool, but not // in-txidsToUpdate transactions, so that we don't have to recalculate // descendants when we come across a previously seen entry. cacheMap mapMemPoolDescendantsToUpdate; // Use a set for lookups into txidsToUpdate (these entries are already // accounted for in the state of their ancestors) std::set setAlreadyIncluded(txidsToUpdate.begin(), txidsToUpdate.end()); // Iterate in reverse, so that whenever we are looking at at a transaction // we are sure that all in-mempool descendants have already been processed. // This maximizes the benefit of the descendant cache and guarantees that // setMemPoolChildren will be updated, an assumption made in // UpdateForDescendants. for (const TxId &txid : boost::adaptors::reverse(txidsToUpdate)) { // we cache the in-mempool children to avoid duplicate updates setEntries setChildren; // calculate children from mapNextTx txiter it = mapTx.find(txid); if (it == mapTx.end()) { continue; } auto iter = mapNextTx.lower_bound(COutPoint(txid, 0)); // First calculate the children, and update setMemPoolChildren to // include them, and update their setMemPoolParents to include this tx. for (; iter != mapNextTx.end() && iter->first->GetTxId() == txid; ++iter) { const TxId &childTxId = iter->second->GetId(); txiter childIter = mapTx.find(childTxId); assert(childIter != mapTx.end()); // We can skip updating entries we've encountered before or that are // in the block (which are already accounted for). if (setChildren.insert(childIter).second && !setAlreadyIncluded.count(childTxId)) { UpdateChild(it, childIter, true); UpdateParent(childIter, it, true); } } UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded); } } bool CTxMemPool::CalculateMemPoolAncestors( const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents /* = true */) const { LOCK(cs); setEntries parentHashes; const CTransaction &tx = entry.GetTx(); if (fSearchForParents) { // Get parents of this transaction that are in the mempool // GetMemPoolParents() is only valid for entries in the mempool, so we // iterate mapTx to find parents. for (const CTxIn &in : tx.vin) { txiter piter = mapTx.find(in.prevout.GetTxId()); if (piter == mapTx.end()) { continue; } parentHashes.insert(piter); if (parentHashes.size() + 1 > limitAncestorCount) { errString = strprintf("too many unconfirmed parents [limit: %u]", limitAncestorCount); return false; } } } else { // If we're not searching for parents, we require this to be an entry in // the mempool already. txiter it = mapTx.iterator_to(entry); parentHashes = GetMemPoolParents(it); } size_t totalSizeWithAncestors = entry.GetTxSize(); while (!parentHashes.empty()) { txiter stageit = *parentHashes.begin(); setAncestors.insert(stageit); parentHashes.erase(stageit); totalSizeWithAncestors += stageit->GetTxSize(); if (stageit->GetSizeWithDescendants() + entry.GetTxSize() > limitDescendantSize) { errString = strprintf( "exceeds descendant size limit for tx %s [limit: %u]", stageit->GetTx().GetId().ToString(), limitDescendantSize); return false; } if (stageit->GetCountWithDescendants() + 1 > limitDescendantCount) { errString = strprintf("too many descendants for tx %s [limit: %u]", stageit->GetTx().GetId().ToString(), limitDescendantCount); return false; } if (totalSizeWithAncestors > limitAncestorSize) { errString = strprintf("exceeds ancestor size limit [limit: %u]", limitAncestorSize); return false; } const setEntries &setMemPoolParents = GetMemPoolParents(stageit); for (const txiter &phash : setMemPoolParents) { // If this is a new ancestor, add it. if (setAncestors.count(phash) == 0) { parentHashes.insert(phash); } if (parentHashes.size() + setAncestors.size() + 1 > limitAncestorCount) { errString = strprintf("too many unconfirmed ancestors [limit: %u]", limitAncestorCount); return false; } } } return true; } void CTxMemPool::UpdateAncestorsOf(bool add, txiter it, setEntries &setAncestors) { setEntries parentIters = GetMemPoolParents(it); // add or remove this tx as a child of each parent for (txiter piter : parentIters) { UpdateChild(piter, it, add); } const int64_t updateCount = (add ? 1 : -1); const int64_t updateSize = updateCount * it->GetTxSize(); + const int64_t updateBillableSize = updateCount * it->GetTxBillableSize(); const Amount updateFee = updateCount * it->GetModifiedFee(); for (txiter ancestorIt : setAncestors) { - mapTx.modify(ancestorIt, update_descendant_state(updateSize, updateFee, - updateCount)); + mapTx.modify(ancestorIt, + update_descendant_state(updateSize, updateBillableSize, + updateFee, updateCount)); } } void CTxMemPool::UpdateEntryForAncestors(txiter it, const setEntries &setAncestors) { int64_t updateCount = setAncestors.size(); int64_t updateSize = 0; - Amount updateFee = Amount::zero(); + int64_t updateBillableSize = 0; int64_t updateSigOpsCount = 0; + Amount updateFee = Amount::zero(); + for (txiter ancestorIt : setAncestors) { updateSize += ancestorIt->GetTxSize(); + updateBillableSize += ancestorIt->GetTxBillableSize(); updateFee += ancestorIt->GetModifiedFee(); updateSigOpsCount += ancestorIt->GetSigOpCount(); } - mapTx.modify(it, update_ancestor_state(updateSize, updateFee, updateCount, + mapTx.modify(it, update_ancestor_state(updateSize, updateBillableSize, + updateFee, updateCount, updateSigOpsCount)); } void CTxMemPool::UpdateChildrenForRemoval(txiter it) { const setEntries &setMemPoolChildren = GetMemPoolChildren(it); for (txiter updateIt : setMemPoolChildren) { UpdateParent(updateIt, it, false); } } void CTxMemPool::UpdateForRemoveFromMempool(const setEntries &entriesToRemove, bool updateDescendants) { // For each entry, walk back all ancestors and decrement size associated // with this transaction. const uint64_t nNoLimit = std::numeric_limits::max(); if (updateDescendants) { // updateDescendants should be true whenever we're not recursively // removing a tx and all its descendants, eg when a transaction is // confirmed in a block. Here we only update statistics and not data in // mapLinks (which we need to preserve until we're finished with all // operations that need to traverse the mempool). for (txiter removeIt : entriesToRemove) { setEntries setDescendants; CalculateDescendants(removeIt, setDescendants); setDescendants.erase(removeIt); // don't update state for self - int64_t modifySize = -((int64_t)removeIt->GetTxSize()); + int64_t modifySize = -int64_t(removeIt->GetTxSize()); + int64_t modifyBillableSize = + -int64_t(removeIt->GetTxBillableSize()); Amount modifyFee = -1 * removeIt->GetModifiedFee(); int modifySigOps = -removeIt->GetSigOpCount(); for (txiter dit : setDescendants) { - mapTx.modify(dit, update_ancestor_state(modifySize, modifyFee, - -1, modifySigOps)); + mapTx.modify( + dit, update_ancestor_state(modifySize, modifyBillableSize, + modifyFee, -1, modifySigOps)); } } } for (txiter removeIt : entriesToRemove) { setEntries setAncestors; const CTxMemPoolEntry &entry = *removeIt; std::string dummy; // Since this is a tx that is already in the mempool, we can call CMPA // with fSearchForParents = false. If the mempool is in a consistent // state, then using true or false should both be correct, though false // should be a bit faster. // However, if we happen to be in the middle of processing a reorg, then // the mempool can be in an inconsistent state. In this case, the set of // ancestors reachable via mapLinks will be the same as the set of // ancestors whose packages include this transaction, because when we // add a new transaction to the mempool in addUnchecked(), we assume it // has no children, and in the case of a reorg where that assumption is // false, the in-mempool children aren't linked to the in-block tx's // until UpdateTransactionsFromBlock() is called. So if we're being // called during a reorg, ie before UpdateTransactionsFromBlock() has // been called, then mapLinks[] will differ from the set of mempool // parents we'd calculate by searching, and it's important that we use // the mapLinks[] notion of ancestor transactions as the set of things // to update for removal. CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false); // Note that UpdateAncestorsOf severs the child links that point to // removeIt in the entries for the parents of removeIt. UpdateAncestorsOf(false, removeIt, setAncestors); } // After updating all the ancestor sizes, we can now sever the link between // each transaction being removed and any mempool children (ie, update // setMemPoolParents for each direct child of a transaction being removed). for (txiter removeIt : entriesToRemove) { UpdateChildrenForRemoval(removeIt); } } void CTxMemPoolEntry::UpdateDescendantState(int64_t modifySize, + int64_t modifyBillableSize, Amount modifyFee, int64_t modifyCount) { nSizeWithDescendants += modifySize; assert(int64_t(nSizeWithDescendants) > 0); + nBillableSizeWithDescendants += modifyBillableSize; + assert(int64_t(nBillableSizeWithDescendants) >= 0); nModFeesWithDescendants += modifyFee; nCountWithDescendants += modifyCount; assert(int64_t(nCountWithDescendants) > 0); } -void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, Amount modifyFee, - int64_t modifyCount, +void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, + int64_t modifyBillableSize, + Amount modifyFee, int64_t modifyCount, int modifySigOps) { nSizeWithAncestors += modifySize; assert(int64_t(nSizeWithAncestors) > 0); + nBillableSizeWithAncestors += modifyBillableSize; + assert(int64_t(nBillableSizeWithAncestors) >= 0); nModFeesWithAncestors += modifyFee; nCountWithAncestors += modifyCount; assert(int64_t(nCountWithAncestors) > 0); nSigOpCountWithAncestors += modifySigOps; assert(int(nSigOpCountWithAncestors) >= 0); } CTxMemPool::CTxMemPool() : nTransactionsUpdated(0) { // lock free clear _clear(); // Sanity checks off by default for performance, because otherwise accepting // transactions becomes O(N^2) where N is the number of transactions in the // pool nCheckFrequency = 0; minerPolicyEstimator = new CBlockPolicyEstimator(); } CTxMemPool::~CTxMemPool() { delete minerPolicyEstimator; } bool CTxMemPool::isSpent(const COutPoint &outpoint) { LOCK(cs); return mapNextTx.count(outpoint); } unsigned int CTxMemPool::GetTransactionsUpdated() const { LOCK(cs); return nTransactionsUpdated; } void CTxMemPool::AddTransactionsUpdated(unsigned int n) { LOCK(cs); nTransactionsUpdated += n; } bool CTxMemPool::addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry, setEntries &setAncestors, bool validFeeEstimate) { NotifyEntryAdded(entry.GetSharedTx()); // Add to memory pool without checking anything. // Used by AcceptToMemoryPool(), which DOES do all the appropriate checks. LOCK(cs); indexed_transaction_set::iterator newit = mapTx.insert(entry).first; mapLinks.insert(make_pair(newit, TxLinks())); // Update transaction for any feeDelta created by PrioritiseTransaction // TODO: refactor so that the fee delta is calculated before inserting into // mapTx. std::map::const_iterator pos = mapDeltas.find(hash); if (pos != mapDeltas.end()) { const TXModifier &deltas = pos->second; if (deltas.second != Amount::zero()) { mapTx.modify(newit, update_fee_delta(deltas.second)); } } // Update cachedInnerUsage to include contained transaction's usage. // (When we update the entry for in-mempool parents, memory usage will be // further updated.) cachedInnerUsage += entry.DynamicMemoryUsage(); const CTransaction &tx = newit->GetTx(); std::set setParentTransactions; for (const CTxIn &in : tx.vin) { mapNextTx.insert(std::make_pair(&in.prevout, &tx)); setParentTransactions.insert(in.prevout.GetTxId()); } // Don't bother worrying about child transactions of this one. Normal case // of a new transaction arriving is that there can't be any children, // because such children would be orphans. An exception to that is if a // transaction enters that used to be in a block. In that case, our // disconnect block logic will call UpdateTransactionsFromBlock to clean up // the mess we're leaving here. // Update ancestors with information about this tx for (const uint256 &phash : setParentTransactions) { txiter pit = mapTx.find(phash); if (pit != mapTx.end()) { UpdateParent(newit, pit, true); } } UpdateAncestorsOf(true, newit, setAncestors); UpdateEntryForAncestors(newit, setAncestors); nTransactionsUpdated++; totalTxSize += entry.GetTxSize(); minerPolicyEstimator->processTransaction(entry, validFeeEstimate); vTxHashes.emplace_back(tx.GetHash(), newit); newit->vTxHashesIdx = vTxHashes.size() - 1; return true; } void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason) { NotifyEntryRemoved(it->GetSharedTx(), reason); const uint256 txid = it->GetTx().GetId(); for (const CTxIn &txin : it->GetTx().vin) { mapNextTx.erase(txin.prevout); } if (vTxHashes.size() > 1) { vTxHashes[it->vTxHashesIdx] = std::move(vTxHashes.back()); vTxHashes[it->vTxHashesIdx].second->vTxHashesIdx = it->vTxHashesIdx; vTxHashes.pop_back(); if (vTxHashes.size() * 2 < vTxHashes.capacity()) { vTxHashes.shrink_to_fit(); } } else { vTxHashes.clear(); } totalTxSize -= it->GetTxSize(); cachedInnerUsage -= it->DynamicMemoryUsage(); cachedInnerUsage -= memusage::DynamicUsage(mapLinks[it].parents) + memusage::DynamicUsage(mapLinks[it].children); mapLinks.erase(it); mapTx.erase(it); nTransactionsUpdated++; minerPolicyEstimator->removeTx(txid); } // Calculates descendants of entry that are not already in setDescendants, and // adds to setDescendants. Assumes entryit is already a tx in the mempool and // setMemPoolChildren is correct for tx and all descendants. Also assumes that // if an entry is in setDescendants already, then all in-mempool descendants of // it are already in setDescendants as well, so that we can save time by not // iterating over those entries. void CTxMemPool::CalculateDescendants(txiter entryit, setEntries &setDescendants) { setEntries stage; if (setDescendants.count(entryit) == 0) { stage.insert(entryit); } // Traverse down the children of entry, only adding children that are not // accounted for in setDescendants already (because those children have // either already been walked, or will be walked in this iteration). while (!stage.empty()) { txiter it = *stage.begin(); setDescendants.insert(it); stage.erase(it); const setEntries &setChildren = GetMemPoolChildren(it); for (const txiter &childiter : setChildren) { if (!setDescendants.count(childiter)) { stage.insert(childiter); } } } } void CTxMemPool::removeRecursive(const CTransaction &origTx, MemPoolRemovalReason reason) { // Remove transaction from memory pool. LOCK(cs); setEntries txToRemove; txiter origit = mapTx.find(origTx.GetId()); if (origit != mapTx.end()) { txToRemove.insert(origit); } else { // When recursively removing but origTx isn't in the mempool be sure to // remove any children that are in the pool. This can happen during // chain re-orgs if origTx isn't re-accepted into the mempool for any // reason. for (size_t i = 0; i < origTx.vout.size(); i++) { auto it = mapNextTx.find(COutPoint(origTx.GetId(), i)); if (it == mapNextTx.end()) { continue; } txiter nextit = mapTx.find(it->second->GetId()); assert(nextit != mapTx.end()); txToRemove.insert(nextit); } } setEntries setAllRemoves; for (txiter it : txToRemove) { CalculateDescendants(it, setAllRemoves); } RemoveStaged(setAllRemoves, false, reason); } void CTxMemPool::removeForReorg(const Config &config, const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags) { // Remove transactions spending a coinbase which are now immature and // no-longer-final transactions. LOCK(cs); setEntries txToRemove; for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { const CTransaction &tx = it->GetTx(); LockPoints lp = it->GetLockPoints(); bool validLP = TestLockPointValidity(&lp); CValidationState state; if (!ContextualCheckTransactionForCurrentBlock(config, tx, state, flags) || !CheckSequenceLocks(tx, flags, &lp, validLP)) { // Note if CheckSequenceLocks fails the LockPoints may still be // invalid. So it's critical that we remove the tx and not depend on // the LockPoints. txToRemove.insert(it); } else if (it->GetSpendsCoinbase()) { for (const CTxIn &txin : tx.vin) { indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.GetTxId()); if (it2 != mapTx.end()) { continue; } const Coin &coin = pcoins->AccessCoin(txin.prevout); if (nCheckFrequency != 0) { assert(!coin.IsSpent()); } if (coin.IsSpent() || (coin.IsCoinBase() && int64_t(nMemPoolHeight) - coin.GetHeight() < COINBASE_MATURITY)) { txToRemove.insert(it); break; } } } if (!validLP) { mapTx.modify(it, update_lock_points(lp)); } } setEntries setAllRemoves; for (txiter it : txToRemove) { CalculateDescendants(it, setAllRemoves); } RemoveStaged(setAllRemoves, false, MemPoolRemovalReason::REORG); } void CTxMemPool::removeConflicts(const CTransaction &tx) { // Remove transactions which depend on inputs of tx, recursively LOCK(cs); for (const CTxIn &txin : tx.vin) { auto it = mapNextTx.find(txin.prevout); if (it != mapNextTx.end()) { const CTransaction &txConflict = *it->second; if (txConflict != tx) { ClearPrioritisation(txConflict.GetId()); removeRecursive(txConflict, MemPoolRemovalReason::CONFLICT); } } } } /** * Called when a block is connected. Removes from mempool and updates the miner * fee estimator. */ void CTxMemPool::removeForBlock(const std::vector &vtx, unsigned int nBlockHeight) { LOCK(cs); DisconnectedBlockTransactions disconnectpool; disconnectpool.addForBlock(vtx); std::vector entries; for (const CTransactionRef &tx : boost::adaptors::reverse( disconnectpool.GetQueuedTx().get())) { uint256 txid = tx->GetId(); indexed_transaction_set::iterator i = mapTx.find(txid); if (i != mapTx.end()) { entries.push_back(&*i); } } // Before the txs in the new block have been removed from the mempool, // update policy estimates minerPolicyEstimator->processBlock(nBlockHeight, entries); for (const CTransactionRef &tx : boost::adaptors::reverse( disconnectpool.GetQueuedTx().get())) { txiter it = mapTx.find(tx->GetId()); if (it != mapTx.end()) { setEntries stage; stage.insert(it); RemoveStaged(stage, true, MemPoolRemovalReason::BLOCK); } removeConflicts(*tx); ClearPrioritisation(tx->GetId()); } disconnectpool.clear(); lastRollingFeeUpdate = GetTime(); blockSinceLastRollingFeeBump = true; } void CTxMemPool::_clear() { mapLinks.clear(); mapTx.clear(); mapNextTx.clear(); vTxHashes.clear(); totalTxSize = 0; cachedInnerUsage = 0; lastRollingFeeUpdate = GetTime(); blockSinceLastRollingFeeBump = false; rollingMinimumFeeRate = 0; ++nTransactionsUpdated; } void CTxMemPool::clear() { LOCK(cs); _clear(); } void CTxMemPool::check(const CCoinsViewCache *pcoins) const { if (nCheckFrequency == 0) { return; } if (GetRand(std::numeric_limits::max()) >= nCheckFrequency) { return; } LogPrint(BCLog::MEMPOOL, "Checking mempool with %u transactions and %u inputs\n", (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size()); uint64_t checkTotal = 0; uint64_t innerUsage = 0; CCoinsViewCache mempoolDuplicate(const_cast(pcoins)); const int64_t nSpendHeight = GetSpendHeight(mempoolDuplicate); LOCK(cs); std::list waitingOnDependants; for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { unsigned int i = 0; checkTotal += it->GetTxSize(); innerUsage += it->DynamicMemoryUsage(); const CTransaction &tx = it->GetTx(); txlinksMap::const_iterator linksiter = mapLinks.find(it); assert(linksiter != mapLinks.end()); const TxLinks &links = linksiter->second; innerUsage += memusage::DynamicUsage(links.parents) + memusage::DynamicUsage(links.children); bool fDependsWait = false; setEntries setParentCheck; int64_t parentSizes = 0; int64_t parentSigOpCount = 0; for (const CTxIn &txin : tx.vin) { // Check that every mempool transaction's inputs refer to available // coins, or other mempool tx's. indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.GetTxId()); if (it2 != mapTx.end()) { const CTransaction &tx2 = it2->GetTx(); assert(tx2.vout.size() > txin.prevout.GetN() && !tx2.vout[txin.prevout.GetN()].IsNull()); fDependsWait = true; if (setParentCheck.insert(it2).second) { parentSizes += it2->GetTxSize(); parentSigOpCount += it2->GetSigOpCount(); } } else { assert(pcoins->HaveCoin(txin.prevout)); } // Check whether its inputs are marked in mapNextTx. auto it3 = mapNextTx.find(txin.prevout); assert(it3 != mapNextTx.end()); assert(it3->first == &txin.prevout); assert(it3->second == &tx); i++; } assert(setParentCheck == GetMemPoolParents(it)); // Verify ancestor state is correct. setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits::max(); std::string dummy; CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy); uint64_t nCountCheck = setAncestors.size() + 1; uint64_t nSizeCheck = it->GetTxSize(); Amount nFeesCheck = it->GetModifiedFee(); int64_t nSigOpCheck = it->GetSigOpCount(); for (txiter ancestorIt : setAncestors) { nSizeCheck += ancestorIt->GetTxSize(); nFeesCheck += ancestorIt->GetModifiedFee(); nSigOpCheck += ancestorIt->GetSigOpCount(); } assert(it->GetCountWithAncestors() == nCountCheck); assert(it->GetSizeWithAncestors() == nSizeCheck); assert(it->GetSigOpCountWithAncestors() == nSigOpCheck); assert(it->GetModFeesWithAncestors() == nFeesCheck); // Check children against mapNextTx CTxMemPool::setEntries setChildrenCheck; auto iter = mapNextTx.lower_bound(COutPoint(it->GetTx().GetId(), 0)); int64_t childSizes = 0; for (; iter != mapNextTx.end() && iter->first->GetTxId() == it->GetTx().GetId(); ++iter) { txiter childit = mapTx.find(iter->second->GetId()); // mapNextTx points to in-mempool transactions assert(childit != mapTx.end()); if (setChildrenCheck.insert(childit).second) { childSizes += childit->GetTxSize(); } } assert(setChildrenCheck == GetMemPoolChildren(it)); // Also check to make sure size is greater than sum with immediate // children. Just a sanity check, not definitive that this calc is // correct... assert(it->GetSizeWithDescendants() >= childSizes + it->GetTxSize()); if (fDependsWait) { waitingOnDependants.push_back(&(*it)); } else { CValidationState state; bool fCheckResult = tx.IsCoinBase() || Consensus::CheckTxInputs( tx, state, mempoolDuplicate, nSpendHeight); assert(fCheckResult); UpdateCoins(mempoolDuplicate, tx, 1000000); } } unsigned int stepsSinceLastRemove = 0; while (!waitingOnDependants.empty()) { const CTxMemPoolEntry *entry = waitingOnDependants.front(); waitingOnDependants.pop_front(); CValidationState state; if (!mempoolDuplicate.HaveInputs(entry->GetTx())) { waitingOnDependants.push_back(entry); stepsSinceLastRemove++; assert(stepsSinceLastRemove < waitingOnDependants.size()); } else { bool fCheckResult = entry->GetTx().IsCoinBase() || Consensus::CheckTxInputs(entry->GetTx(), state, mempoolDuplicate, nSpendHeight); assert(fCheckResult); UpdateCoins(mempoolDuplicate, entry->GetTx(), 1000000); stepsSinceLastRemove = 0; } } for (auto it = mapNextTx.cbegin(); it != mapNextTx.cend(); it++) { uint256 txid = it->second->GetId(); indexed_transaction_set::const_iterator it2 = mapTx.find(txid); const CTransaction &tx = it2->GetTx(); assert(it2 != mapTx.end()); assert(&tx == it->second); } assert(totalTxSize == checkTotal); assert(innerUsage == cachedInnerUsage); } bool CTxMemPool::CompareDepthAndScore(const uint256 &hasha, const uint256 &hashb) { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(hasha); if (i == mapTx.end()) { return false; } indexed_transaction_set::const_iterator j = mapTx.find(hashb); if (j == mapTx.end()) { return true; } uint64_t counta = i->GetCountWithAncestors(); uint64_t countb = j->GetCountWithAncestors(); if (counta == countb) { return CompareTxMemPoolEntryByScore()(*i, *j); } return counta < countb; } namespace { class DepthAndScoreComparator { public: bool operator()(const CTxMemPool::indexed_transaction_set::const_iterator &a, const CTxMemPool::indexed_transaction_set::const_iterator &b) { uint64_t counta = a->GetCountWithAncestors(); uint64_t countb = b->GetCountWithAncestors(); if (counta == countb) { return CompareTxMemPoolEntryByScore()(*a, *b); } return counta < countb; } }; } // namespace std::vector CTxMemPool::GetSortedDepthAndScore() const { std::vector iters; AssertLockHeld(cs); iters.reserve(mapTx.size()); for (indexed_transaction_set::iterator mi = mapTx.begin(); mi != mapTx.end(); ++mi) { iters.push_back(mi); } std::sort(iters.begin(), iters.end(), DepthAndScoreComparator()); return iters; } void CTxMemPool::queryHashes(std::vector &vtxid) { LOCK(cs); auto iters = GetSortedDepthAndScore(); vtxid.clear(); vtxid.reserve(mapTx.size()); for (auto it : iters) { vtxid.push_back(it->GetTx().GetId()); } } static TxMempoolInfo GetInfo(CTxMemPool::indexed_transaction_set::const_iterator it) { return TxMempoolInfo{it->GetSharedTx(), it->GetTime(), - CFeeRate(it->GetFee(), it->GetTxSize()), + CFeeRate(it->GetFee(), it->GetTxBillableSize()), it->GetModifiedFee() - it->GetFee()}; } std::vector CTxMemPool::infoAll() const { LOCK(cs); auto iters = GetSortedDepthAndScore(); std::vector ret; ret.reserve(mapTx.size()); for (auto it : iters) { ret.push_back(GetInfo(it)); } return ret; } CTransactionRef CTxMemPool::get(const uint256 &txid) const { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(txid); if (i == mapTx.end()) { return nullptr; } return i->GetSharedTx(); } TxMempoolInfo CTxMemPool::info(const uint256 &txid) const { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(txid); if (i == mapTx.end()) { return TxMempoolInfo(); } return GetInfo(i); } CFeeRate CTxMemPool::estimateFee(int nBlocks) const { LOCK(cs); return minerPolicyEstimator->estimateFee(nBlocks); } CFeeRate CTxMemPool::estimateSmartFee(int nBlocks, int *answerFoundAtBlocks) const { LOCK(cs); return minerPolicyEstimator->estimateSmartFee(nBlocks, answerFoundAtBlocks, *this); } bool CTxMemPool::WriteFeeEstimates(CAutoFile &fileout) const { try { LOCK(cs); // version required to read: 0.13.99 or later fileout << 139900; // version that wrote the file fileout << CLIENT_VERSION; minerPolicyEstimator->Write(fileout); } catch (const std::exception &) { LogPrintf("CTxMemPool::WriteFeeEstimates(): unable to write policy " "estimator data (non-fatal)\n"); return false; } return true; } bool CTxMemPool::ReadFeeEstimates(CAutoFile &filein) { try { int nVersionRequired, nVersionThatWrote; filein >> nVersionRequired >> nVersionThatWrote; if (nVersionRequired > CLIENT_VERSION) { return error("CTxMemPool::ReadFeeEstimates(): up-version (%d) fee " "estimate file", nVersionRequired); } LOCK(cs); minerPolicyEstimator->Read(filein, nVersionThatWrote); } catch (const std::exception &) { LogPrintf("CTxMemPool::ReadFeeEstimates(): unable to read policy " "estimator data (non-fatal)\n"); return false; } return true; } void CTxMemPool::PrioritiseTransaction(const uint256 hash, const std::string strHash, double dPriorityDelta, const Amount nFeeDelta) { { LOCK(cs); TXModifier &deltas = mapDeltas[hash]; deltas.first += dPriorityDelta; deltas.second += nFeeDelta; txiter it = mapTx.find(hash); if (it != mapTx.end()) { mapTx.modify(it, update_fee_delta(deltas.second)); // Now update all ancestors' modified fees with descendants setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits::max(); std::string dummy; CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false); for (txiter ancestorIt : setAncestors) { mapTx.modify(ancestorIt, - update_descendant_state(0, nFeeDelta, 0)); + update_descendant_state(0, 0, nFeeDelta, 0)); } // Now update all descendants' modified fees with ancestors setEntries setDescendants; CalculateDescendants(it, setDescendants); setDescendants.erase(it); for (txiter descendantIt : setDescendants) { mapTx.modify(descendantIt, - update_ancestor_state(0, nFeeDelta, 0, 0)); + update_ancestor_state(0, 0, nFeeDelta, 0, 0)); } } } LogPrintf("PrioritiseTransaction: %s priority += %f, fee += %d\n", strHash, dPriorityDelta, FormatMoney(nFeeDelta)); } void CTxMemPool::ApplyDeltas(const uint256 hash, double &dPriorityDelta, Amount &nFeeDelta) const { LOCK(cs); std::map::const_iterator pos = mapDeltas.find(hash); if (pos == mapDeltas.end()) { return; } const TXModifier &deltas = pos->second; dPriorityDelta += deltas.first; nFeeDelta += deltas.second; } void CTxMemPool::ClearPrioritisation(const uint256 hash) { LOCK(cs); mapDeltas.erase(hash); } bool CTxMemPool::HasNoInputsOf(const CTransaction &tx) const { for (const CTxIn &in : tx.vin) { if (exists(in.prevout.GetTxId())) { return false; } } return true; } CCoinsViewMemPool::CCoinsViewMemPool(CCoinsView *baseIn, const CTxMemPool &mempoolIn) : CCoinsViewBacked(baseIn), mempool(mempoolIn) {} bool CCoinsViewMemPool::GetCoin(const COutPoint &outpoint, Coin &coin) const { // If an entry in the mempool exists, always return that one, as it's // guaranteed to never conflict with the underlying cache, and it cannot // have pruned entries (as it contains full) transactions. First checking // the underlying cache risks returning a pruned entry instead. CTransactionRef ptx = mempool.get(outpoint.GetTxId()); if (ptx) { if (outpoint.GetN() < ptx->vout.size()) { coin = Coin(ptx->vout[outpoint.GetN()], MEMPOOL_HEIGHT, false); return true; } return false; } return base->GetCoin(outpoint, coin) && !coin.IsSpent(); } bool CCoinsViewMemPool::HaveCoin(const COutPoint &outpoint) const { return mempool.exists(outpoint) || base->HaveCoin(outpoint); } size_t CTxMemPool::DynamicMemoryUsage() const { LOCK(cs); // Estimate the overhead of mapTx to be 15 pointers + an allocation, as no // exact formula for boost::multi_index_contained is implemented. return memusage::MallocUsage(sizeof(CTxMemPoolEntry) + 15 * sizeof(void *)) * mapTx.size() + memusage::DynamicUsage(mapNextTx) + memusage::DynamicUsage(mapDeltas) + memusage::DynamicUsage(mapLinks) + memusage::DynamicUsage(vTxHashes) + cachedInnerUsage; } void CTxMemPool::RemoveStaged(setEntries &stage, bool updateDescendants, MemPoolRemovalReason reason) { AssertLockHeld(cs); UpdateForRemoveFromMempool(stage, updateDescendants); for (const txiter &it : stage) { removeUnchecked(it, reason); } } int CTxMemPool::Expire(int64_t time) { LOCK(cs); indexed_transaction_set::index::type::iterator it = mapTx.get().begin(); setEntries toremove; while (it != mapTx.get().end() && it->GetTime() < time) { toremove.insert(mapTx.project<0>(it)); it++; } setEntries stage; for (txiter removeit : toremove) { CalculateDescendants(removeit, stage); } RemoveStaged(stage, false, MemPoolRemovalReason::EXPIRY); return stage.size(); } void CTxMemPool::LimitSize(size_t limit, unsigned long age) { int expired = Expire(GetTime() - age); if (expired != 0) { LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired); } std::vector vNoSpendsRemaining; TrimToSize(limit, &vNoSpendsRemaining); for (const COutPoint &removed : vNoSpendsRemaining) { pcoinsTip->Uncache(removed); } } bool CTxMemPool::addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry, bool validFeeEstimate) { LOCK(cs); setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits::max(); std::string dummy; CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy); return addUnchecked(hash, entry, setAncestors, validFeeEstimate); } void CTxMemPool::UpdateChild(txiter entry, txiter child, bool add) { setEntries s; if (add && mapLinks[entry].children.insert(child).second) { cachedInnerUsage += memusage::IncrementalDynamicUsage(s); } else if (!add && mapLinks[entry].children.erase(child)) { cachedInnerUsage -= memusage::IncrementalDynamicUsage(s); } } void CTxMemPool::UpdateParent(txiter entry, txiter parent, bool add) { setEntries s; if (add && mapLinks[entry].parents.insert(parent).second) { cachedInnerUsage += memusage::IncrementalDynamicUsage(s); } else if (!add && mapLinks[entry].parents.erase(parent)) { cachedInnerUsage -= memusage::IncrementalDynamicUsage(s); } } const CTxMemPool::setEntries & CTxMemPool::GetMemPoolParents(txiter entry) const { assert(entry != mapTx.end()); txlinksMap::const_iterator it = mapLinks.find(entry); assert(it != mapLinks.end()); return it->second.parents; } const CTxMemPool::setEntries & CTxMemPool::GetMemPoolChildren(txiter entry) const { assert(entry != mapTx.end()); txlinksMap::const_iterator it = mapLinks.find(entry); assert(it != mapLinks.end()); return it->second.children; } CFeeRate CTxMemPool::GetMinFee(size_t sizelimit) const { LOCK(cs); if (!blockSinceLastRollingFeeBump || rollingMinimumFeeRate == 0) { return CFeeRate(int64_t(ceill(rollingMinimumFeeRate)) * SATOSHI); } int64_t time = GetTime(); if (time > lastRollingFeeUpdate + 10) { double halflife = ROLLING_FEE_HALFLIFE; if (DynamicMemoryUsage() < sizelimit / 4) { halflife /= 4; } else if (DynamicMemoryUsage() < sizelimit / 2) { halflife /= 2; } rollingMinimumFeeRate = rollingMinimumFeeRate / pow(2.0, (time - lastRollingFeeUpdate) / halflife); lastRollingFeeUpdate = time; } return CFeeRate(int64_t(ceill(rollingMinimumFeeRate)) * SATOSHI); } void CTxMemPool::trackPackageRemoved(const CFeeRate &rate) { AssertLockHeld(cs); if ((rate.GetFeePerK() / SATOSHI) > rollingMinimumFeeRate) { rollingMinimumFeeRate = rate.GetFeePerK() / SATOSHI; blockSinceLastRollingFeeBump = false; } } void CTxMemPool::TrimToSize(size_t sizelimit, std::vector *pvNoSpendsRemaining) { LOCK(cs); unsigned nTxnRemoved = 0; CFeeRate maxFeeRateRemoved(Amount::zero()); while (!mapTx.empty() && DynamicMemoryUsage() > sizelimit) { indexed_transaction_set::index::type::iterator it = mapTx.get().begin(); // We set the new mempool min fee to the feerate of the removed set, // plus the "minimum reasonable fee rate" (ie some value under which we // consider txn to have 0 fee). This way, we don't allow txn to enter // mempool with feerate equal to txn which were removed with no block in // between. CFeeRate removed(it->GetModFeesWithDescendants(), it->GetSizeWithDescendants()); removed += MEMPOOL_FULL_FEE_INCREMENT; trackPackageRemoved(removed); maxFeeRateRemoved = std::max(maxFeeRateRemoved, removed); setEntries stage; CalculateDescendants(mapTx.project<0>(it), stage); nTxnRemoved += stage.size(); std::vector txn; if (pvNoSpendsRemaining) { txn.reserve(stage.size()); for (txiter iter : stage) { txn.push_back(iter->GetTx()); } } RemoveStaged(stage, false, MemPoolRemovalReason::SIZELIMIT); if (pvNoSpendsRemaining) { for (const CTransaction &tx : txn) { for (const CTxIn &txin : tx.vin) { if (exists(txin.prevout.GetTxId())) { continue; } if (!mapNextTx.count(txin.prevout)) { pvNoSpendsRemaining->push_back(txin.prevout); } } } } } if (maxFeeRateRemoved > CFeeRate(Amount::zero())) { LogPrint(BCLog::MEMPOOL, "Removed %u txn, rolling minimum fee bumped to %s\n", nTxnRemoved, maxFeeRateRemoved.ToString()); } } bool CTxMemPool::TransactionWithinChainLimit(const uint256 &txid, size_t chainLimit) const { LOCK(cs); auto it = mapTx.find(txid); return it == mapTx.end() || (it->GetCountWithAncestors() < chainLimit && it->GetCountWithDescendants() < chainLimit); } SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand(std::numeric_limits::max())), k1(GetRand(std::numeric_limits::max())) {} /** Maximum bytes for transactions to store for processing during reorg */ static const size_t MAX_DISCONNECTED_TX_POOL_SIZE = 20 * DEFAULT_MAX_BLOCK_SIZE; void DisconnectedBlockTransactions::addForBlock( const std::vector &vtx) { for (const auto &tx : vtx) { // If we already added it, just skip. auto it = queuedTx.find(tx->GetId()); if (it != queuedTx.end()) { continue; } // Insert the transaction into the pool. addTransaction(tx); // Fill in the set of parents. std::unordered_set parents; for (const CTxIn &in : tx->vin) { parents.insert(in.prevout.GetTxId()); } // In order to make sure we keep things in topological order, we check // if we already know of the parent of the current transaction. If so, // we remove them from the set and then add them back. while (parents.size() > 0) { std::unordered_set worklist( std::move(parents)); for (const TxId &txid : worklist) { // If we do not have that txid in the set, nothing needs to be // done. auto pit = queuedTx.find(txid); if (pit == queuedTx.end()) { continue; } // We have parent in our set, we reinsert them at the right // position. const CTransactionRef ptx = *pit; queuedTx.erase(pit); queuedTx.insert(ptx); // And we make sure ancestors are covered. for (const CTxIn &in : ptx->vin) { parents.insert(in.prevout.GetTxId()); } } } } // Keep the size under control. while (DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE) { // Drop the earliest entry, and remove its children from the // mempool. auto it = queuedTx.get().begin(); mempool.removeRecursive(**it, MemPoolRemovalReason::REORG); removeEntry(it); } } void DisconnectedBlockTransactions::updateMempoolForReorg(const Config &config, bool fAddToMempool) { AssertLockHeld(cs_main); std::vector txidsUpdate; // disconnectpool's insertion_order index sorts the entries from oldest to // newest, but the oldest entry will be the last tx from the latest mined // block that was disconnected. // Iterate disconnectpool in reverse, so that we add transactions back to // the mempool starting with the earliest transaction that had been // previously seen in a block. for (const CTransactionRef &tx : boost::adaptors::reverse(queuedTx.get())) { // ignore validation errors in resurrected transactions CValidationState stateDummy; if (!fAddToMempool || tx->IsCoinBase() || !AcceptToMemoryPool(config, mempool, stateDummy, tx, false, nullptr, true)) { // If the transaction doesn't make it in to the mempool, remove any // transactions that depend on it (which would now be orphans). mempool.removeRecursive(*tx, MemPoolRemovalReason::REORG); } else if (mempool.exists(tx->GetId())) { txidsUpdate.push_back(tx->GetId()); } } queuedTx.clear(); // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have // no in-mempool children, which is generally not true when adding // previously-confirmed transactions back to the mempool. // UpdateTransactionsFromBlock finds descendants of any transactions in the // disconnectpool that were added back and cleans up the mempool state. mempool.UpdateTransactionsFromBlock(txidsUpdate); // We also need to remove any now-immature transactions mempool.removeForReorg(config, pcoinsTip.get(), chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); // Re-limit mempool size, in case we added any transactions mempool.LimitSize( gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); } diff --git a/src/txmempool.h b/src/txmempool.h index 302bd4af2e..c95de77a07 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -1,932 +1,951 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_TXMEMPOOL_H #define BITCOIN_TXMEMPOOL_H #include "amount.h" #include "coins.h" #include "indirectmap.h" #include "primitives/transaction.h" #include "random.h" #include "sync.h" #include #include #include #include #include #include #include #include #include #include #include class CAutoFile; class CBlockIndex; class Config; inline double AllowFreeThreshold() { return (144 * COIN) / (250 * SATOSHI); } inline bool AllowFree(double dPriority) { // Large (in bytes) low-priority (new, small-coin) transactions need a fee. return dPriority > AllowFreeThreshold(); } /** * Fake height value used in Coins to signify they are only in the memory * pool(since 0.8) */ static const uint32_t MEMPOOL_HEIGHT = 0x7FFFFFFF; struct LockPoints { // Will be set to the blockchain height and median time past values that // would be necessary to satisfy all relative locktime constraints (BIP68) // of this tx given our view of block chain history int height; int64_t time; // As long as the current chain descends from the highest height block // containing one of the inputs used in the calculation, then the cached // values are still valid even after a reorg. CBlockIndex *maxInputBlock; LockPoints() : height(0), time(0), maxInputBlock(nullptr) {} }; class CTxMemPool; /** \class CTxMemPoolEntry * * CTxMemPoolEntry stores data about the corresponding transaction, as well as * data about all in-mempool transactions that depend on the transaction * ("descendant" transactions). * * When a new entry is added to the mempool, we update the descendant state * (nCountWithDescendants, nSizeWithDescendants, and nModFeesWithDescendants) * for all ancestors of the newly added transaction. * * If updating the descendant state is skipped, we can mark the entry as * "dirty", and set nSizeWithDescendants/nModFeesWithDescendants to equal * nTxSize/nFee+feeDelta. (This can potentially happen during a reorg, where we * limit the amount of work we're willing to do to avoid consuming too much * CPU.) */ class CTxMemPoolEntry { private: CTransactionRef tx; //!< Cached to avoid expensive parent-transaction lookups Amount nFee; //!< ... and avoid recomputing tx size size_t nTxSize; + //!< ... and billable size for billing + size_t nTxBillableSize; //!< ... and modified size for priority size_t nModSize; //!< ... and total memory usage size_t nUsageSize; //!< Local time when entering the mempool int64_t nTime; //!< Priority when entering the mempool double entryPriority; //!< Chain height when entering the mempool unsigned int entryHeight; //!< Sum of all txin values that are already in blockchain Amount inChainInputValue; //!< keep track of transactions that spend a coinbase bool spendsCoinbase; //!< Total sigop plus P2SH sigops count int64_t sigOpCount; //!< Used for determining the priority of the transaction for mining in a //! block Amount feeDelta; //!< Track the height and time at which tx was final LockPoints lockPoints; // Information about descendants of this transaction that are in the // mempool; if we remove this transaction we must remove all of these // descendants as well. if nCountWithDescendants is 0, treat this entry as // dirty, and nSizeWithDescendants and nModFeesWithDescendants will not be // correct. //!< number of descendant transactions uint64_t nCountWithDescendants; //!< ... and size uint64_t nSizeWithDescendants; + uint64_t nBillableSizeWithDescendants; + //!< ... and total fees (all including us) Amount nModFeesWithDescendants; // Analogous statistics for ancestor transactions uint64_t nCountWithAncestors; uint64_t nSizeWithAncestors; + uint64_t nBillableSizeWithAncestors; Amount nModFeesWithAncestors; int64_t nSigOpCountWithAncestors; public: CTxMemPoolEntry(const CTransactionRef &_tx, const Amount _nFee, int64_t _nTime, double _entryPriority, unsigned int _entryHeight, Amount _inChainInputValue, bool spendsCoinbase, int64_t nSigOpsCost, LockPoints lp); CTxMemPoolEntry(const CTxMemPoolEntry &other); const CTransaction &GetTx() const { return *this->tx; } CTransactionRef GetSharedTx() const { return this->tx; } /** * Fast calculation of lower bound of current priority as update from entry * priority. Only inputs that were originally in-chain will age. */ double GetPriority(unsigned int currentHeight) const; const Amount GetFee() const { return nFee; } size_t GetTxSize() const { return nTxSize; } + size_t GetTxBillableSize() const { return nTxBillableSize; } + int64_t GetTime() const { return nTime; } unsigned int GetHeight() const { return entryHeight; } int64_t GetSigOpCount() const { return sigOpCount; } Amount GetModifiedFee() const { return nFee + feeDelta; } size_t DynamicMemoryUsage() const { return nUsageSize; } const LockPoints &GetLockPoints() const { return lockPoints; } // Adjusts the descendant state, if this entry is not dirty. - void UpdateDescendantState(int64_t modifySize, Amount modifyFee, - int64_t modifyCount); + void UpdateDescendantState(int64_t modifySize, int64_t modifyBillableSize, + Amount modifyFee, int64_t modifyCount); // Adjusts the ancestor state - void UpdateAncestorState(int64_t modifySize, Amount modifyFee, - int64_t modifyCount, int modifySigOps); + void UpdateAncestorState(int64_t modifySize, int64_t modifyBillableSize, + Amount modifyFee, int64_t modifyCount, + int modifySigOps); // Updates the fee delta used for mining priority score, and the // modified fees with descendants. void UpdateFeeDelta(Amount feeDelta); // Update the LockPoints after a reorg void UpdateLockPoints(const LockPoints &lp); uint64_t GetCountWithDescendants() const { return nCountWithDescendants; } uint64_t GetSizeWithDescendants() const { return nSizeWithDescendants; } + uint64_t GetBillableSizeWithDescendants() const { + return nBillableSizeWithDescendants; + } Amount GetModFeesWithDescendants() const { return nModFeesWithDescendants; } bool GetSpendsCoinbase() const { return spendsCoinbase; } uint64_t GetCountWithAncestors() const { return nCountWithAncestors; } uint64_t GetSizeWithAncestors() const { return nSizeWithAncestors; } + uint64_t GetBillableSizeWithAncestors() const { + return nBillableSizeWithAncestors; + } Amount GetModFeesWithAncestors() const { return nModFeesWithAncestors; } int64_t GetSigOpCountWithAncestors() const { return nSigOpCountWithAncestors; } //!< Index in mempool's vTxHashes mutable size_t vTxHashesIdx; }; // Helpers for modifying CTxMemPool::mapTx, which is a boost multi_index. struct update_descendant_state { - update_descendant_state(int64_t _modifySize, Amount _modifyFee, - int64_t _modifyCount) - : modifySize(_modifySize), modifyFee(_modifyFee), - modifyCount(_modifyCount) {} + update_descendant_state(int64_t _modifySize, int64_t _modifyBillableSize, + Amount _modifyFee, int64_t _modifyCount) + : modifySize(_modifySize), modifyBillableSize(_modifyBillableSize), + modifyFee(_modifyFee), modifyCount(_modifyCount) {} void operator()(CTxMemPoolEntry &e) { - e.UpdateDescendantState(modifySize, modifyFee, modifyCount); + e.UpdateDescendantState(modifySize, modifyBillableSize, modifyFee, + modifyCount); } private: int64_t modifySize; + int64_t modifyBillableSize; Amount modifyFee; int64_t modifyCount; }; struct update_ancestor_state { - update_ancestor_state(int64_t _modifySize, Amount _modifyFee, - int64_t _modifyCount, int64_t _modifySigOpsCost) - : modifySize(_modifySize), modifyFee(_modifyFee), - modifyCount(_modifyCount), modifySigOpsCost(_modifySigOpsCost) {} + update_ancestor_state(int64_t _modifySize, int64_t _modifyBillableSize, + Amount _modifyFee, int64_t _modifyCount, + int64_t _modifySigOpsCost) + : modifySize(_modifySize), modifyBillableSize(_modifyBillableSize), + modifyFee(_modifyFee), modifyCount(_modifyCount), + modifySigOpsCost(_modifySigOpsCost) {} void operator()(CTxMemPoolEntry &e) { - e.UpdateAncestorState(modifySize, modifyFee, modifyCount, - modifySigOpsCost); + e.UpdateAncestorState(modifySize, modifyBillableSize, modifyFee, + modifyCount, modifySigOpsCost); } private: int64_t modifySize; + int64_t modifyBillableSize; Amount modifyFee; int64_t modifyCount; int64_t modifySigOpsCost; }; struct update_fee_delta { update_fee_delta(Amount _feeDelta) : feeDelta(_feeDelta) {} void operator()(CTxMemPoolEntry &e) { e.UpdateFeeDelta(feeDelta); } private: Amount feeDelta; }; struct update_lock_points { update_lock_points(const LockPoints &_lp) : lp(_lp) {} void operator()(CTxMemPoolEntry &e) { e.UpdateLockPoints(lp); } private: const LockPoints &lp; }; // extracts a transaction hash from CTxMempoolEntry or CTransactionRef struct mempoolentry_txid { typedef uint256 result_type; result_type operator()(const CTxMemPoolEntry &entry) const { return entry.GetTx().GetId(); } result_type operator()(const CTransactionRef &tx) const { return tx->GetId(); } }; /** \class CompareTxMemPoolEntryByDescendantScore * * Sort an entry by max(score/size of entry's tx, score/size with all * descendants). */ class CompareTxMemPoolEntryByDescendantScore { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { bool fUseADescendants = UseDescendantScore(a); bool fUseBDescendants = UseDescendantScore(b); double aModFee = (fUseADescendants ? a.GetModFeesWithDescendants() : a.GetModifiedFee()) / SATOSHI; double aSize = fUseADescendants ? a.GetSizeWithDescendants() : a.GetTxSize(); double bModFee = (fUseBDescendants ? b.GetModFeesWithDescendants() : b.GetModifiedFee()) / SATOSHI; double bSize = fUseBDescendants ? b.GetSizeWithDescendants() : b.GetTxSize(); // Avoid division by rewriting (a/b > c/d) as (a*d > c*b). double f1 = aModFee * bSize; double f2 = aSize * bModFee; if (f1 == f2) { return a.GetTime() >= b.GetTime(); } return f1 < f2; } // Calculate which score to use for an entry (avoiding division). bool UseDescendantScore(const CTxMemPoolEntry &a) const { double f1 = a.GetSizeWithDescendants() * (a.GetModifiedFee() / SATOSHI); double f2 = a.GetTxSize() * (a.GetModFeesWithDescendants() / SATOSHI); return f2 > f1; } }; /** \class CompareTxMemPoolEntryByScore * * Sort by score of entry ((fee+delta)/size) in descending order */ class CompareTxMemPoolEntryByScore { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { double f1 = b.GetTxSize() * (a.GetModifiedFee() / SATOSHI); double f2 = a.GetTxSize() * (b.GetModifiedFee() / SATOSHI); if (f1 == f2) { return b.GetTx().GetId() < a.GetTx().GetId(); } return f1 > f2; } }; class CompareTxMemPoolEntryByEntryTime { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { return a.GetTime() < b.GetTime(); } }; class CompareTxMemPoolEntryByAncestorFee { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { double aFees = a.GetModFeesWithAncestors() / SATOSHI; double aSize = a.GetSizeWithAncestors(); double bFees = b.GetModFeesWithAncestors() / SATOSHI; double bSize = b.GetSizeWithAncestors(); // Avoid division by rewriting (a/b > c/d) as (a*d > c*b). double f1 = aFees * bSize; double f2 = aSize * bFees; if (f1 == f2) { return a.GetTx().GetId() < b.GetTx().GetId(); } return f1 > f2; } }; // Multi_index tag names struct descendant_score {}; struct entry_time {}; struct mining_score {}; struct ancestor_score {}; class CBlockPolicyEstimator; /** * Information about a mempool transaction. */ struct TxMempoolInfo { /** The transaction itself */ CTransactionRef tx; /** Time the transaction entered the mempool. */ int64_t nTime; /** Feerate of the transaction. */ CFeeRate feeRate; /** The fee delta. */ Amount nFeeDelta; }; /** * Reason why a transaction was removed from the mempool, this is passed to the * notification signal. */ enum class MemPoolRemovalReason { //! Manually removed or unknown reason UNKNOWN = 0, //! Expired from mempool EXPIRY, //! Removed in size limiting SIZELIMIT, //! Removed for reorganization REORG, //! Removed for block BLOCK, //! Removed for conflict with in-block transaction CONFLICT, //! Removed for replacement REPLACED }; class SaltedTxidHasher { private: /** Salt */ const uint64_t k0, k1; public: SaltedTxidHasher(); size_t operator()(const uint256 &txid) const { return SipHashUint256(k0, k1, txid); } }; typedef std::pair TXModifier; /** * CTxMemPool stores valid-according-to-the-current-best-chain transactions that * may be included in the next block. * * Transactions are added when they are seen on the network (or created by the * local node), but not all transactions seen are added to the pool. For * example, the following new transactions will not be added to the mempool: * - a transaction which doesn't meet the minimum fee requirements. * - a new transaction that double-spends an input of a transaction already in * the pool where the new transaction does not meet the Replace-By-Fee * requirements as defined in BIP 125. * - a non-standard transaction. * * CTxMemPool::mapTx, and CTxMemPoolEntry bookkeeping: * * mapTx is a boost::multi_index that sorts the mempool on 4 criteria: * - transaction hash * - feerate [we use max(feerate of tx, feerate of tx with all descendants)] * - time in mempool * - mining score (feerate modified by any fee deltas from * PrioritiseTransaction) * * Note: the term "descendant" refers to in-mempool transactions that depend on * this one, while "ancestor" refers to in-mempool transactions that a given * transaction depends on. * * In order for the feerate sort to remain correct, we must update transactions * in the mempool when new descendants arrive. To facilitate this, we track the * set of in-mempool direct parents and direct children in mapLinks. Within each * CTxMemPoolEntry, we track the size and fees of all descendants. * * Usually when a new transaction is added to the mempool, it has no in-mempool * children (because any such children would be an orphan). So in * addUnchecked(), we: * - update a new entry's setMemPoolParents to include all in-mempool parents * - update the new entry's direct parents to include the new tx as a child * - update all ancestors of the transaction to include the new tx's size/fee * * When a transaction is removed from the mempool, we must: * - update all in-mempool parents to not track the tx in setMemPoolChildren * - update all ancestors to not include the tx's size/fees in descendant state * - update all in-mempool children to not include it as a parent * * These happen in UpdateForRemoveFromMempool(). (Note that when removing a * transaction along with its descendants, we must calculate that set of * transactions to be removed before doing the removal, or else the mempool can * be in an inconsistent state where it's impossible to walk the ancestors of a * transaction.) * * In the event of a reorg, the assumption that a newly added tx has no * in-mempool children is false. In particular, the mempool is in an * inconsistent state while new transactions are being added, because there may * be descendant transactions of a tx coming from a disconnected block that are * unreachable from just looking at transactions in the mempool (the linking * transactions may also be in the disconnected block, waiting to be added). * Because of this, there's not much benefit in trying to search for in-mempool * children in addUnchecked(). Instead, in the special case of transactions * being added from a disconnected block, we require the caller to clean up the * state, to account for in-mempool, out-of-block descendants for all the * in-block transactions by calling UpdateTransactionsFromBlock(). Note that * until this is called, the mempool state is not consistent, and in particular * mapLinks may not be correct (and therefore functions like * CalculateMemPoolAncestors() and CalculateDescendants() that rely on them to * walk the mempool are not generally safe to use). * * Computational limits: * * Updating all in-mempool ancestors of a newly added transaction can be slow, * if no bound exists on how many in-mempool ancestors there may be. * CalculateMemPoolAncestors() takes configurable limits that are designed to * prevent these calculations from being too CPU intensive. * * Adding transactions from a disconnected block can be very time consuming, * because we don't have a way to limit the number of in-mempool descendants. To * bound CPU processing, we limit the amount of work we're willing to do to * properly update the descendant information for a tx being added from a * disconnected block. If we would exceed the limit, then we instead mark the * entry as "dirty", and set the feerate for sorting purposes to be equal the * feerate of the transaction without any descendants. */ class CTxMemPool { private: //!< Value n means that n times in 2^32 we check. uint32_t nCheckFrequency; unsigned int nTransactionsUpdated; CBlockPolicyEstimator *minerPolicyEstimator; //!< sum of all mempool tx's virtual sizes. uint64_t totalTxSize; //!< sum of dynamic memory usage of all the map elements (NOT the maps //! themselves) uint64_t cachedInnerUsage; mutable int64_t lastRollingFeeUpdate; mutable bool blockSinceLastRollingFeeBump; //!< minimum fee to get into the pool, decreases exponentially mutable double rollingMinimumFeeRate; void trackPackageRemoved(const CFeeRate &rate); public: // public only for testing static const int ROLLING_FEE_HALFLIFE = 60 * 60 * 12; typedef boost::multi_index_container< CTxMemPoolEntry, boost::multi_index::indexed_by< // sorted by txid boost::multi_index::hashed_unique< mempoolentry_txid, SaltedTxidHasher>, // sorted by fee rate boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByDescendantScore>, // sorted by entry time boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByEntryTime>, // sorted by score (for mining prioritization) boost::multi_index::ordered_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByScore>, // sorted by fee rate with ancestors boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByAncestorFee>>> indexed_transaction_set; mutable CCriticalSection cs; indexed_transaction_set mapTx; typedef indexed_transaction_set::nth_index<0>::type::iterator txiter; //!< All tx hashes/entries in mapTx, in random order std::vector> vTxHashes; struct CompareIteratorByHash { bool operator()(const txiter &a, const txiter &b) const { return a->GetTx().GetId() < b->GetTx().GetId(); } }; typedef std::set setEntries; const setEntries &GetMemPoolParents(txiter entry) const; const setEntries &GetMemPoolChildren(txiter entry) const; private: typedef std::map cacheMap; struct TxLinks { setEntries parents; setEntries children; }; typedef std::map txlinksMap; txlinksMap mapLinks; void UpdateParent(txiter entry, txiter parent, bool add); void UpdateChild(txiter entry, txiter child, bool add); std::vector GetSortedDepthAndScore() const; public: indirectmap mapNextTx; std::map mapDeltas; /** Create a new CTxMemPool. */ CTxMemPool(); ~CTxMemPool(); /** * If sanity-checking is turned on, check makes sure the pool is consistent * (does not contain two transactions that spend the same inputs, all inputs * are in the mapNextTx array). If sanity-checking is turned off, check does * nothing. */ void check(const CCoinsViewCache *pcoins) const; void setSanityCheck(double dFrequency = 1.0) { nCheckFrequency = dFrequency * 4294967295.0; } // addUnchecked must updated state for all ancestors of a given transaction, // to track size/count of descendant transactions. First version of // addUnchecked can be used to have it call CalculateMemPoolAncestors(), and // then invoke the second version. bool addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry, bool validFeeEstimate = true); bool addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry, setEntries &setAncestors, bool validFeeEstimate = true); void removeRecursive( const CTransaction &tx, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN); void removeForReorg(const Config &config, const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags); void removeConflicts(const CTransaction &tx); void removeForBlock(const std::vector &vtx, unsigned int nBlockHeight); void clear(); // lock free void _clear(); bool CompareDepthAndScore(const uint256 &hasha, const uint256 &hashb); void queryHashes(std::vector &vtxid); bool isSpent(const COutPoint &outpoint); unsigned int GetTransactionsUpdated() const; void AddTransactionsUpdated(unsigned int n); /** * Check that none of this transactions inputs are in the mempool, and thus * the tx is not dependent on other mempool transactions to be included in a * block. */ bool HasNoInputsOf(const CTransaction &tx) const; /** Affect CreateNewBlock prioritisation of transactions */ void PrioritiseTransaction(const uint256 hash, const std::string strHash, double dPriorityDelta, const Amount nFeeDelta); void ApplyDeltas(const uint256 hash, double &dPriorityDelta, Amount &nFeeDelta) const; void ClearPrioritisation(const uint256 hash); public: /** * Remove a set of transactions from the mempool. If a transaction is in * this set, then all in-mempool descendants must also be in the set, unless * this transaction is being removed for being in a block. Set * updateDescendants to true when removing a tx that was in a block, so that * any in-mempool descendants have their ancestor state updated. */ void RemoveStaged(setEntries &stage, bool updateDescendants, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN); /** * When adding transactions from a disconnected block back to the mempool, * new mempool entries may have children in the mempool (which is generally * not the case when otherwise adding transactions). * UpdateTransactionsFromBlock() will find child transactions and update the * descendant state for each transaction in txidsToUpdate (excluding any * child transactions present in txidsToUpdate, which are already accounted * for). * Note: txidsToUpdate should be the set of transactions from the * disconnected block that have been accepted back into the mempool. */ void UpdateTransactionsFromBlock(const std::vector &txidsToUpdate); /** * Try to calculate all in-mempool ancestors of entry. * (these are all calculated including the tx itself) * limitAncestorCount = max number of ancestors * limitAncestorSize = max size of ancestors * limitDescendantCount = max number of descendants any ancestor can have * limitDescendantSize = max size of descendants any ancestor can have * errString = populated with error reason if any limits are hit * fSearchForParents = whether to search a tx's vin for in-mempool parents, * or look up parents from mapLinks. Must be true for entries not in the * mempool */ bool CalculateMemPoolAncestors( const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents = true) const; /** * Populate setDescendants with all in-mempool descendants of hash. * Assumes that setDescendants includes all in-mempool descendants of * anything already in it. */ void CalculateDescendants(txiter it, setEntries &setDescendants); /** * The minimum fee to get into the mempool, which may itself not be enough * for larger-sized transactions. The incrementalRelayFee policy variable is * used to bound the time it takes the fee rate to go back down all the way * to 0. When the feerate would otherwise be half of this, it is set to 0 * instead. */ CFeeRate GetMinFee(size_t sizelimit) const; /** * Remove transactions from the mempool until its dynamic size is <= * sizelimit. pvNoSpendsRemaining, if set, will be populated with the list * of outpoints which are not in mempool which no longer have any spends in * this mempool. */ void TrimToSize(size_t sizelimit, std::vector *pvNoSpendsRemaining = nullptr); /** * Expire all transaction (and their dependencies) in the mempool older than * time. Return the number of removed transactions. */ int Expire(int64_t time); /** * Reduce the size of the mempool by expiring and then trimming the mempool. */ void LimitSize(size_t limit, unsigned long age); /** * Returns false if the transaction is in the mempool and not within the * chain limit specified. */ bool TransactionWithinChainLimit(const uint256 &txid, size_t chainLimit) const; unsigned long size() { LOCK(cs); return mapTx.size(); } uint64_t GetTotalTxSize() { LOCK(cs); return totalTxSize; } bool exists(uint256 hash) const { LOCK(cs); return mapTx.count(hash) != 0; } bool exists(const COutPoint &outpoint) const { LOCK(cs); auto it = mapTx.find(outpoint.GetTxId()); return it != mapTx.end() && outpoint.GetN() < it->GetTx().vout.size(); } CTransactionRef get(const uint256 &hash) const; TxMempoolInfo info(const uint256 &hash) const; std::vector infoAll() const; /** * Estimate fee rate needed to get into the next nBlocks. If no answer can * be given at nBlocks, return an estimate at the lowest number of blocks * where one can be given. */ CFeeRate estimateSmartFee(int nBlocks, int *answerFoundAtBlocks = nullptr) const; /** Estimate fee rate needed to get into the next nBlocks */ CFeeRate estimateFee(int nBlocks) const; /** Write/Read estimates to disk */ bool WriteFeeEstimates(CAutoFile &fileout) const; bool ReadFeeEstimates(CAutoFile &filein); size_t DynamicMemoryUsage() const; boost::signals2::signal NotifyEntryAdded; boost::signals2::signal NotifyEntryRemoved; private: /** * UpdateForDescendants is used by UpdateTransactionsFromBlock to update the * descendants for a single transaction that has been added to the mempool * but may have child transactions in the mempool, eg during a chain reorg. * setExclude is the set of descendant transactions in the mempool that must * not be accounted for (because any descendants in setExclude were added to * the mempool after the transaction being updated and hence their state is * already reflected in the parent state). * * cachedDescendants will be updated with the descendants of the transaction * being updated, so that future invocations don't need to walk the same * transaction again, if encountered in another transaction chain. */ void UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendants, const std::set &setExclude); /** * Update ancestors of hash to add/remove it as a descendant transaction. */ void UpdateAncestorsOf(bool add, txiter hash, setEntries &setAncestors); /** Set ancestor state for an entry */ void UpdateEntryForAncestors(txiter it, const setEntries &setAncestors); /** * For each transaction being removed, update ancestors and any direct * children. If updateDescendants is true, then also update in-mempool * descendants' ancestor state. */ void UpdateForRemoveFromMempool(const setEntries &entriesToRemove, bool updateDescendants); /** Sever link between specified transaction and direct children. */ void UpdateChildrenForRemoval(txiter entry); /** * Before calling removeUnchecked for a given transaction, * UpdateForRemoveFromMempool must be called on the entire (dependent) set * of transactions being removed at the same time. We use each * CTxMemPoolEntry's setMemPoolParents in order to walk ancestors of a given * transaction that is removed, so we can't remove intermediate transactions * in a chain before we've updated all the state for the removal. */ void removeUnchecked( txiter entry, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN); }; /** * CCoinsView that brings transactions from a memorypool into view. * It does not check for spendings by memory pool transactions. */ class CCoinsViewMemPool : public CCoinsViewBacked { protected: const CTxMemPool &mempool; public: CCoinsViewMemPool(CCoinsView *baseIn, const CTxMemPool &mempoolIn); bool GetCoin(const COutPoint &outpoint, Coin &coin) const override; bool HaveCoin(const COutPoint &outpoint) const override; }; // We want to sort transactions by coin age priority typedef std::pair TxCoinAgePriority; struct TxCoinAgePriorityCompare { bool operator()(const TxCoinAgePriority &a, const TxCoinAgePriority &b) { if (a.first == b.first) { // Reverse order to make sort less than return CompareTxMemPoolEntryByScore()(*(b.second), *(a.second)); } return a.first < b.first; } }; /** * DisconnectedBlockTransactions * * During the reorg, it's desirable to re-add previously confirmed transactions * to the mempool, so that anything not re-confirmed in the new chain is * available to be mined. However, it's more efficient to wait until the reorg * is complete and process all still-unconfirmed transactions at that time, * since we expect most confirmed transactions to (typically) still be * confirmed in the new chain, and re-accepting to the memory pool is expensive * (and therefore better to not do in the middle of reorg-processing). * Instead, store the disconnected transactions (in order!) as we go, remove any * that are included in blocks in the new chain, and then process the remaining * still-unconfirmed transactions at the end. */ // multi_index tag names struct txid_index {}; struct insertion_order {}; class DisconnectedBlockTransactions { private: typedef boost::multi_index_container< CTransactionRef, boost::multi_index::indexed_by< // sorted by txid boost::multi_index::hashed_unique< boost::multi_index::tag, mempoolentry_txid, SaltedTxidHasher>, // sorted by order in the blockchain boost::multi_index::sequenced< boost::multi_index::tag>>> indexed_disconnected_transactions; indexed_disconnected_transactions queuedTx; uint64_t cachedInnerUsage = 0; void addTransaction(const CTransactionRef &tx) { queuedTx.insert(tx); cachedInnerUsage += RecursiveDynamicUsage(tx); } public: // It's almost certainly a logic bug if we don't clear out queuedTx before // destruction, as we add to it while disconnecting blocks, and then we // need to re-process remaining transactions to ensure mempool consistency. // For now, assert() that we've emptied out this object on destruction. // This assert() can always be removed if the reorg-processing code were // to be refactored such that this assumption is no longer true (for // instance if there was some other way we cleaned up the mempool after a // reorg, besides draining this object). ~DisconnectedBlockTransactions() { assert(queuedTx.empty()); } // Estimate the overhead of queuedTx to be 6 pointers + an allocation, as // no exact formula for boost::multi_index_contained is implemented. size_t DynamicMemoryUsage() const { return memusage::MallocUsage(sizeof(CTransactionRef) + 6 * sizeof(void *)) * queuedTx.size() + cachedInnerUsage; } const indexed_disconnected_transactions &GetQueuedTx() const { return queuedTx; } // Add entries for a block while reconstructing the topological ordering so // they can be added back to the mempool simply. void addForBlock(const std::vector &vtx); // Remove entries based on txid_index, and update memory usage. void removeForBlock(const std::vector &vtx) { // Short-circuit in the common case of a block being added to the tip if (queuedTx.empty()) { return; } for (auto const &tx : vtx) { auto it = queuedTx.find(tx->GetId()); if (it != queuedTx.end()) { cachedInnerUsage -= RecursiveDynamicUsage(*it); queuedTx.erase(it); } } } // Remove an entry by insertion_order index, and update memory usage. void removeEntry(indexed_disconnected_transactions::index< insertion_order>::type::iterator entry) { cachedInnerUsage -= RecursiveDynamicUsage(*entry); queuedTx.get().erase(entry); } void clear() { cachedInnerUsage = 0; queuedTx.clear(); } /** * Make mempool consistent after a reorg, by re-adding or recursively * erasing disconnected block transactions from the mempool, and also * removing any other transactions from the mempool that are no longer valid * given the new tip/height. * * Note: we assume that disconnectpool only contains transactions that are * NOT confirmed in the current chain nor already in the mempool (otherwise, * in-mempool descendants of such transactions would be removed). * * Passing fAddToMempool=false will skip trying to add the transactions * back, and instead just erase from the mempool as needed. */ void updateMempoolForReorg(const Config &config, bool fAddToMempool); }; #endif // BITCOIN_TXMEMPOOL_H diff --git a/src/validation.cpp b/src/validation.cpp index bfe31fccd6..5f0ccb2d68 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1,5191 +1,5194 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Copyright (c) 2017-2018 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "validation.h" #include "arith_uint256.h" #include "blockindexworkcomparator.h" #include "blockvalidity.h" #include "chainparams.h" #include "checkpoints.h" #include "checkqueue.h" #include "config.h" #include "consensus/activation.h" #include "consensus/consensus.h" #include "consensus/merkle.h" #include "consensus/tx_verify.h" #include "consensus/validation.h" #include "fs.h" #include "hash.h" #include "init.h" #include "policy/fees.h" #include "policy/policy.h" #include "pow.h" #include "primitives/block.h" #include "primitives/transaction.h" #include "random.h" #include "script/script.h" #include "script/scriptcache.h" #include "script/sigcache.h" #include "script/standard.h" #include "timedata.h" #include "tinyformat.h" #include "txdb.h" #include "txmempool.h" #include "ui_interface.h" #include "undo.h" #include "util.h" #include "utilmoneystr.h" #include "utilstrencodings.h" #include "validationinterface.h" #include "warnings.h" #include #include #include #include #include #include #include #if defined(NDEBUG) #error "Bitcoin cannot be compiled without assertions." #endif /** * Global state */ CCriticalSection cs_main; BlockMap mapBlockIndex; CChain chainActive; CBlockIndex *pindexBestHeader = nullptr; CWaitableCriticalSection g_best_block_mutex; CConditionVariable g_best_block_cv; uint256 g_best_block; int nScriptCheckThreads = 0; std::atomic_bool fImporting(false); bool fReindex = false; bool fTxIndex = false; bool fHavePruned = false; bool fPruneMode = false; bool fIsBareMultisigStd = DEFAULT_PERMIT_BAREMULTISIG; bool fRequireStandard = true; bool fCheckBlockIndex = false; bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED; size_t nCoinCacheUsage = 5000 * 300; uint64_t nPruneTarget = 0; int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE; uint256 hashAssumeValid; arith_uint256 nMinimumChainWork; Amount maxTxFee = DEFAULT_TRANSACTION_MAXFEE; CTxMemPool mempool; static void CheckBlockIndex(const Consensus::Params &consensusParams); /** Constant stuff for coinbase transactions we create: */ CScript COINBASE_FLAGS; const std::string strMessageMagic = "Bitcoin Signed Message:\n"; // Internal stuff namespace { CBlockIndex *pindexBestInvalid; CBlockIndex *pindexBestParked; /** * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself * and all ancestors) and as good as our current tip or better. Entries may be * failed, though, and pruning nodes may be missing the data for the block. */ std::set setBlockIndexCandidates; /** * All pairs A->B, where A (or one of its ancestors) misses transactions, but B * has transactions. Pruned nodes may have entries where B is missing data. */ std::multimap mapBlocksUnlinked; CCriticalSection cs_LastBlockFile; std::vector vinfoBlockFile; int nLastBlockFile = 0; /** * Global flag to indicate we should check to see if there are block/undo files * that should be deleted. Set on startup or if we allocate more file space when * we're in prune mode. */ bool fCheckForPruning = false; /** * Every received block is assigned a unique and increasing identifier, so we * know which one to give priority in case of a fork. * Blocks loaded from disk are assigned id 0, so start the counter at 1. */ std::atomic nBlockSequenceId{1}; /** Decreasing counter (used by subsequent preciousblock calls). */ int32_t nBlockReverseSequenceId = -1; /** chainwork for the last block that preciousblock has been applied to. */ arith_uint256 nLastPreciousChainwork = 0; /** Dirty block index entries. */ std::set setDirtyBlockIndex; /** Dirty block file entries. */ std::set setDirtyFileInfo; } // namespace CBlockIndex *FindForkInGlobalIndex(const CChain &chain, const CBlockLocator &locator) { // Find the first block the caller has in the main chain for (const uint256 &hash : locator.vHave) { BlockMap::iterator mi = mapBlockIndex.find(hash); if (mi != mapBlockIndex.end()) { CBlockIndex *pindex = (*mi).second; if (chain.Contains(pindex)) { return pindex; } if (pindex->GetAncestor(chain.Height()) == chain.Tip()) { return chain.Tip(); } } } return chain.Genesis(); } std::unique_ptr pcoinsdbview; std::unique_ptr pcoinsTip; std::unique_ptr pblocktree; enum FlushStateMode { FLUSH_STATE_NONE, FLUSH_STATE_IF_NEEDED, FLUSH_STATE_PERIODIC, FLUSH_STATE_ALWAYS }; // See definition for documentation static bool FlushStateToDisk(const CChainParams &chainParams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight = 0); static void FindFilesToPruneManual(std::set &setFilesToPrune, int nManualPruneHeight); static void FindFilesToPrune(std::set &setFilesToPrune, uint64_t nPruneAfterHeight); static FILE *OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly = false); static uint32_t GetBlockScriptFlags(const Config &config, const CBlockIndex *pChainTip); bool TestLockPointValidity(const LockPoints *lp) { AssertLockHeld(cs_main); assert(lp); // If there are relative lock times then the maxInputBlock will be set // If there are no relative lock times, the LockPoints don't depend on the // chain if (lp->maxInputBlock) { // Check whether chainActive is an extension of the block at which the // LockPoints // calculation was valid. If not LockPoints are no longer valid if (!chainActive.Contains(lp->maxInputBlock)) { return false; } } // LockPoints still valid return true; } bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints *lp, bool useExistingLockPoints) { AssertLockHeld(cs_main); AssertLockHeld(mempool.cs); CBlockIndex *tip = chainActive.Tip(); CBlockIndex index; index.pprev = tip; // CheckSequenceLocks() uses chainActive.Height()+1 to evaluate height based // locks because when SequenceLocks() is called within ConnectBlock(), the // height of the block *being* evaluated is what is used. Thus if we want to // know if a transaction can be part of the *next* block, we need to use one // more than chainActive.Height() index.nHeight = tip->nHeight + 1; std::pair lockPair; if (useExistingLockPoints) { assert(lp); lockPair.first = lp->height; lockPair.second = lp->time; } else { // pcoinsTip contains the UTXO set for chainActive.Tip() CCoinsViewMemPool viewMemPool(pcoinsTip.get(), mempool); std::vector prevheights; prevheights.resize(tx.vin.size()); for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) { const CTxIn &txin = tx.vin[txinIndex]; Coin coin; if (!viewMemPool.GetCoin(txin.prevout, coin)) { return error("%s: Missing input", __func__); } if (coin.GetHeight() == MEMPOOL_HEIGHT) { // Assume all mempool transaction confirm in the next block prevheights[txinIndex] = tip->nHeight + 1; } else { prevheights[txinIndex] = coin.GetHeight(); } } lockPair = CalculateSequenceLocks(tx, flags, &prevheights, index); if (lp) { lp->height = lockPair.first; lp->time = lockPair.second; // Also store the hash of the block with the highest height of all // the blocks which have sequence locked prevouts. This hash needs // to still be on the chain for these LockPoint calculations to be // valid. // Note: It is impossible to correctly calculate a maxInputBlock if // any of the sequence locked inputs depend on unconfirmed txs, // except in the special case where the relative lock time/height is // 0, which is equivalent to no sequence lock. Since we assume input // height of tip+1 for mempool txs and test the resulting lockPair // from CalculateSequenceLocks against tip+1. We know // EvaluateSequenceLocks will fail if there was a non-zero sequence // lock on a mempool input, so we can use the return value of // CheckSequenceLocks to indicate the LockPoints validity. int maxInputHeight = 0; for (int height : prevheights) { // Can ignore mempool inputs since we'll fail if they had // non-zero locks. if (height != tip->nHeight + 1) { maxInputHeight = std::max(maxInputHeight, height); } } lp->maxInputBlock = tip->GetAncestor(maxInputHeight); } } return EvaluateSequenceLocks(index, lockPair); } /** Convert CValidationState to a human-readable message for logging */ std::string FormatStateMessage(const CValidationState &state) { return strprintf( "%s%s (code %i)", state.GetRejectReason(), state.GetDebugMessage().empty() ? "" : ", " + state.GetDebugMessage(), state.GetRejectCode()); } static bool IsCurrentForFeeEstimation() { AssertLockHeld(cs_main); if (IsInitialBlockDownload()) { return false; } if (chainActive.Tip()->GetBlockTime() < (GetTime() - MAX_FEE_ESTIMATION_TIP_AGE)) { return false; } if (chainActive.Height() < pindexBestHeader->nHeight - 1) { return false; } return true; } static bool IsMagneticAnomalyEnabledForCurrentBlock(const Config &config) { AssertLockHeld(cs_main); return IsMagneticAnomalyEnabled(config, chainActive.Tip()); } // Command-line argument "-replayprotectionactivationtime=" will // cause the node to switch to replay protected SigHash ForkID value when the // median timestamp of the previous 11 blocks is greater than or equal to // . Defaults to the pre-defined timestamp when not set. static bool IsReplayProtectionEnabled(const Config &config, int64_t nMedianTimePast) { return nMedianTimePast >= gArgs.GetArg( "-replayprotectionactivationtime", config.GetChainParams().GetConsensus().greatWallActivationTime); } static bool IsReplayProtectionEnabled(const Config &config, const CBlockIndex *pindexPrev) { if (pindexPrev == nullptr) { return false; } return IsReplayProtectionEnabled(config, pindexPrev->GetMedianTimePast()); } static bool IsReplayProtectionEnabledForCurrentBlock(const Config &config) { AssertLockHeld(cs_main); return IsReplayProtectionEnabled(config, chainActive.Tip()); } // Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool // were somehow broken and returning the wrong scriptPubKeys static bool CheckInputsFromMempoolAndCache(const CTransaction &tx, CValidationState &state, const CCoinsViewCache &view, CTxMemPool &pool, const uint32_t flags, bool cacheSigStore, PrecomputedTransactionData &txdata) { AssertLockHeld(cs_main); // pool.cs should be locked already, but go ahead and re-take the lock here // to enforce that mempool doesn't change between when we check the view and // when we actually call through to CheckInputs LOCK(pool.cs); assert(!tx.IsCoinBase()); for (const CTxIn &txin : tx.vin) { const Coin &coin = view.AccessCoin(txin.prevout); // At this point we haven't actually checked if the coins are all // available (or shouldn't assume we have, since CheckInputs does). So // we just return failure if the inputs are not available here, and then // only have to check equivalence for available inputs. if (coin.IsSpent()) { return false; } const CTransactionRef &txFrom = pool.get(txin.prevout.GetTxId()); if (txFrom) { assert(txFrom->GetId() == txin.prevout.GetTxId()); assert(txFrom->vout.size() > txin.prevout.GetN()); assert(txFrom->vout[txin.prevout.GetN()] == coin.GetTxOut()); } else { const Coin &coinFromDisk = pcoinsTip->AccessCoin(txin.prevout); assert(!coinFromDisk.IsSpent()); assert(coinFromDisk.GetTxOut() == coin.GetTxOut()); } } return CheckInputs(tx, state, view, true, flags, cacheSigStore, true, txdata); } static bool AcceptToMemoryPoolWorker( const Config &config, CTxMemPool &pool, CValidationState &state, const CTransactionRef &ptx, bool fLimitFree, bool *pfMissingInputs, int64_t nAcceptTime, bool fOverrideMempoolLimit, const Amount nAbsurdFee, std::vector &coins_to_uncache) { AssertLockHeld(cs_main); const CTransaction &tx = *ptx; const TxId txid = tx.GetId(); // mempool "read lock" (held through // GetMainSignals().TransactionAddedToMempool()) LOCK(pool.cs); if (pfMissingInputs) { *pfMissingInputs = false; } // Coinbase is only valid in a block, not as a loose transaction. if (!CheckRegularTransaction(tx, state)) { // state filled in by CheckRegularTransaction. return false; } // Rather not work on nonstandard transactions (unless -testnet/-regtest) std::string reason; if (fRequireStandard && !IsStandardTx(tx, reason)) { return state.DoS(0, false, REJECT_NONSTANDARD, reason); } // Only accept nLockTime-using transactions that can be mined in the next // block; we don't want our mempool filled up with transactions that can't // be mined yet. CValidationState ctxState; if (!ContextualCheckTransactionForCurrentBlock( config, tx, ctxState, STANDARD_LOCKTIME_VERIFY_FLAGS)) { // We copy the state from a dummy to ensure we don't increase the // ban score of peer for transaction that could be valid in the future. return state.DoS( 0, false, REJECT_NONSTANDARD, ctxState.GetRejectReason(), ctxState.CorruptionPossible(), ctxState.GetDebugMessage()); } // Is it already in the memory pool? if (pool.exists(txid)) { return state.Invalid(false, REJECT_ALREADY_KNOWN, "txn-already-in-mempool"); } // Check for conflicts with in-memory transactions for (const CTxIn &txin : tx.vin) { auto itConflicting = pool.mapNextTx.find(txin.prevout); if (itConflicting != pool.mapNextTx.end()) { // Disable replacement feature for good return state.Invalid(false, REJECT_CONFLICT, "txn-mempool-conflict"); } } { CCoinsView dummy; CCoinsViewCache view(&dummy); Amount nValueIn = Amount::zero(); LockPoints lp; CCoinsViewMemPool viewMemPool(pcoinsTip.get(), pool); view.SetBackend(viewMemPool); // Do all inputs exist? for (const CTxIn txin : tx.vin) { if (!pcoinsTip->HaveCoinInCache(txin.prevout)) { coins_to_uncache.push_back(txin.prevout); } if (!view.HaveCoin(txin.prevout)) { // Are inputs missing because we already have the tx? for (size_t out = 0; out < tx.vout.size(); out++) { // Optimistically just do efficient check of cache for // outputs. if (pcoinsTip->HaveCoinInCache(COutPoint(txid, out))) { return state.Invalid(false, REJECT_DUPLICATE, "txn-already-known"); } } // Otherwise assume this might be an orphan tx for which we just // haven't seen parents yet. if (pfMissingInputs) { *pfMissingInputs = true; } // fMissingInputs and !state.IsInvalid() is used to detect this // condition, don't set state.Invalid() return false; } } // Are the actual inputs available? if (!view.HaveInputs(tx)) { return state.Invalid(false, REJECT_DUPLICATE, "bad-txns-inputs-spent"); } // Bring the best block into scope. view.GetBestBlock(); nValueIn = view.GetValueIn(tx); // We have all inputs cached now, so switch back to dummy, so we don't // need to keep lock on mempool. view.SetBackend(dummy); // Only accept BIP68 sequence locked transactions that can be mined in // the next block; we don't want our mempool filled up with transactions // that can't be mined yet. Must keep pool.cs for this unless we change // CheckSequenceLocks to take a CoinsViewCache instead of create its // own. if (!CheckSequenceLocks(tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp)) { return state.DoS(0, false, REJECT_NONSTANDARD, "non-BIP68-final"); } // Check for non-standard pay-to-script-hash in inputs if (fRequireStandard && !AreInputsStandard(tx, view)) { return state.Invalid(false, REJECT_NONSTANDARD, "bad-txns-nonstandard-inputs"); } int64_t nSigOpsCount = GetTransactionSigOpCount(tx, view, STANDARD_SCRIPT_VERIFY_FLAGS); Amount nValueOut = tx.GetValueOut(); Amount nFees = nValueIn - nValueOut; // nModifiedFees includes any fee deltas from PrioritiseTransaction Amount nModifiedFees = nFees; double nPriorityDummy = 0; pool.ApplyDeltas(txid, nPriorityDummy, nModifiedFees); Amount inChainInputValue; double dPriority = view.GetPriority(tx, chainActive.Height(), inChainInputValue); // Keep track of transactions that spend a coinbase, which we re-scan // during reorgs to ensure COINBASE_MATURITY is still met. bool fSpendsCoinbase = false; for (const CTxIn &txin : tx.vin) { const Coin &coin = view.AccessCoin(txin.prevout); if (coin.IsCoinBase()) { fSpendsCoinbase = true; break; } } CTxMemPoolEntry entry(ptx, nFees, nAcceptTime, dPriority, chainActive.Height(), inChainInputValue, fSpendsCoinbase, nSigOpsCount, lp); unsigned int nSize = entry.GetTxSize(); // Check that the transaction doesn't have an excessive number of // sigops, making it impossible to mine. Since the coinbase transaction // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than // MAX_BLOCK_SIGOPS_PER_MB; we still consider this an invalid rather // than merely non-standard transaction. if (nSigOpsCount > MAX_STANDARD_TX_SIGOPS) { return state.DoS(0, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops", false, strprintf("%d", nSigOpsCount)); } CFeeRate minRelayTxFee = config.GetMinFeePerKB(); Amount mempoolRejectFee = pool.GetMinFee( gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000) .GetFee(nSize); if (mempoolRejectFee > Amount::zero() && nModifiedFees < mempoolRejectFee) { return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool min fee not met", false, strprintf("%d < %d", nFees, mempoolRejectFee)); } if (gArgs.GetBoolArg("-relaypriority", DEFAULT_RELAYPRIORITY) && nModifiedFees < minRelayTxFee.GetFee(nSize) && !AllowFree(entry.GetPriority(chainActive.Height() + 1))) { // Require that free transactions have sufficient priority to be // mined in the next block. return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "insufficient priority"); } // Continuously rate-limit free (really, very-low-fee) transactions. // This mitigates 'penny-flooding' -- sending thousands of free // transactions just to be annoying or make others' transactions take // longer to confirm. if (fLimitFree && nModifiedFees < minRelayTxFee.GetFee(nSize)) { static CCriticalSection csFreeLimiter; static double dFreeCount; static int64_t nLastTime; int64_t nNow = GetTime(); LOCK(csFreeLimiter); // Use an exponentially decaying ~10-minute window: dFreeCount *= pow(1.0 - 1.0 / 600.0, double(nNow - nLastTime)); nLastTime = nNow; // -limitfreerelay unit is thousand-bytes-per-minute // At default rate it would take over a month to fill 1GB + + // NOTE: Use the actual size here, and not the fee size since this + // is counting real size for the rate limiter. if (dFreeCount + nSize >= gArgs.GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY) * 10 * 1000) { return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "rate limited free transaction"); } LogPrint(BCLog::MEMPOOL, "Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount + nSize); dFreeCount += nSize; } if (nAbsurdFee != Amount::zero() && nFees > nAbsurdFee) { return state.Invalid(false, REJECT_HIGHFEE, "absurdly-high-fee", strprintf("%d > %d", nFees, nAbsurdFee)); } // Calculate in-mempool ancestors, up to a limit. CTxMemPool::setEntries setAncestors; size_t nLimitAncestors = gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT); size_t nLimitAncestorSize = gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) * 1000; size_t nLimitDescendants = gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT); size_t nLimitDescendantSize = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000; std::string errString; if (!pool.CalculateMemPoolAncestors( entry, setAncestors, nLimitAncestors, nLimitAncestorSize, nLimitDescendants, nLimitDescendantSize, errString)) { return state.DoS(0, false, REJECT_NONSTANDARD, "too-long-mempool-chain", false, errString); } // Set extraFlags as a set of flags that needs to be activated. uint32_t extraFlags = SCRIPT_VERIFY_NONE; if (IsReplayProtectionEnabledForCurrentBlock(config)) { extraFlags |= SCRIPT_ENABLE_REPLAY_PROTECTION; } if (IsMagneticAnomalyEnabledForCurrentBlock(config)) { extraFlags |= SCRIPT_ENABLE_CHECKDATASIG; } // Check inputs based on the set of flags we activate. uint32_t scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS; if (!config.GetChainParams().RequireStandard()) { scriptVerifyFlags = SCRIPT_ENABLE_SIGHASH_FORKID | gArgs.GetArg("-promiscuousmempoolflags", scriptVerifyFlags); } // Make sure whatever we need to activate is actually activated. scriptVerifyFlags |= extraFlags; // Check against previous transactions. This is done last to help // prevent CPU exhaustion denial-of-service attacks. PrecomputedTransactionData txdata(tx); if (!CheckInputs(tx, state, view, true, scriptVerifyFlags, true, false, txdata)) { // State filled in by CheckInputs. return false; } // Check again against the current block tip's script verification flags // to cache our script execution flags. This is, of course, useless if // the next block has different script flags from the previous one, but // because the cache tracks script flags for us it will auto-invalidate // and we'll just have a few blocks of extra misses on soft-fork // activation. // // This is also useful in case of bugs in the standard flags that cause // transactions to pass as valid when they're actually invalid. For // instance the STRICTENC flag was incorrectly allowing certain CHECKSIG // NOT scripts to pass, even though they were invalid. // // There is a similar check in CreateNewBlock() to prevent creating // invalid blocks (using TestBlockValidity), however allowing such // transactions into the mempool can be exploited as a DoS attack. uint32_t currentBlockScriptVerifyFlags = GetBlockScriptFlags(config, chainActive.Tip()); if (!CheckInputsFromMempoolAndCache(tx, state, view, pool, currentBlockScriptVerifyFlags, true, txdata)) { // If we're using promiscuousmempoolflags, we may hit this normally. // Check if current block has some flags that scriptVerifyFlags does // not before printing an ominous warning. if (!(~scriptVerifyFlags & currentBlockScriptVerifyFlags)) { return error( "%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against " "MANDATORY but not STANDARD flags %s, %s", __func__, txid.ToString(), FormatStateMessage(state)); } if (!CheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS | extraFlags, true, false, txdata)) { return error( "%s: ConnectInputs failed against MANDATORY but not " "STANDARD flags due to promiscuous mempool %s, %s", __func__, txid.ToString(), FormatStateMessage(state)); } LogPrintf("Warning: -promiscuousmempool flags set to not include " "currently enforced soft forks, this may break mining or " "otherwise cause instability!\n"); } // This transaction should only count for fee estimation if // the node is not behind and it is not dependent on any other // transactions in the mempool. bool validForFeeEstimation = IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx); // Store transaction in memory. pool.addUnchecked(txid, entry, setAncestors, validForFeeEstimation); // Trim mempool and check if tx was trimmed. if (!fOverrideMempoolLimit) { pool.LimitSize( gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); if (!pool.exists(txid)) { return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool full"); } } } GetMainSignals().TransactionAddedToMempool(ptx); return true; } /** * (try to) add transaction to memory pool with a specified acceptance time. */ static bool AcceptToMemoryPoolWithTime( const Config &config, CTxMemPool &pool, CValidationState &state, const CTransactionRef &tx, bool fLimitFree, bool *pfMissingInputs, int64_t nAcceptTime, bool fOverrideMempoolLimit = false, const Amount nAbsurdFee = Amount::zero()) { std::vector coins_to_uncache; bool res = AcceptToMemoryPoolWorker( config, pool, state, tx, fLimitFree, pfMissingInputs, nAcceptTime, fOverrideMempoolLimit, nAbsurdFee, coins_to_uncache); if (!res) { for (const COutPoint &outpoint : coins_to_uncache) { pcoinsTip->Uncache(outpoint); } } // After we've (potentially) uncached entries, ensure our coins cache is // still within its size limits CValidationState stateDummy; FlushStateToDisk(config.GetChainParams(), stateDummy, FLUSH_STATE_PERIODIC); return res; } bool AcceptToMemoryPool(const Config &config, CTxMemPool &pool, CValidationState &state, const CTransactionRef &tx, bool fLimitFree, bool *pfMissingInputs, bool fOverrideMempoolLimit, const Amount nAbsurdFee) { return AcceptToMemoryPoolWithTime(config, pool, state, tx, fLimitFree, pfMissingInputs, GetTime(), fOverrideMempoolLimit, nAbsurdFee); } /** * Return transaction in txOut, and if it was found inside a block, its hash is * placed in hashBlock. */ bool GetTransaction(const Config &config, const TxId &txid, CTransactionRef &txOut, uint256 &hashBlock, bool fAllowSlow) { CBlockIndex *pindexSlow = nullptr; LOCK(cs_main); CTransactionRef ptx = mempool.get(txid); if (ptx) { txOut = ptx; return true; } if (fTxIndex) { CDiskTxPos postx; if (pblocktree->ReadTxIndex(txid, postx)) { CAutoFile file(OpenBlockFile(postx, true), SER_DISK, CLIENT_VERSION); if (file.IsNull()) { return error("%s: OpenBlockFile failed", __func__); } CBlockHeader header; try { file >> header; fseek(file.Get(), postx.nTxOffset, SEEK_CUR); file >> txOut; } catch (const std::exception &e) { return error("%s: Deserialize or I/O error - %s", __func__, e.what()); } hashBlock = header.GetHash(); if (txOut->GetId() != txid) { return error("%s: txid mismatch", __func__); } return true; } } // use coin database to locate block that contains transaction, and scan it if (fAllowSlow) { const Coin &coin = AccessByTxid(*pcoinsTip, txid); if (!coin.IsSpent()) { pindexSlow = chainActive[coin.GetHeight()]; } } if (pindexSlow) { CBlock block; if (ReadBlockFromDisk(block, pindexSlow, config)) { for (const auto &tx : block.vtx) { if (tx->GetId() == txid) { txOut = tx; hashBlock = pindexSlow->GetBlockHash(); return true; } } } } return false; } ////////////////////////////////////////////////////////////////////////////// // // CBlock and CBlockIndex // static bool WriteBlockToDisk(const CBlock &block, CDiskBlockPos &pos, const CMessageHeader::MessageMagic &messageStart) { // Open history file to append CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) { return error("WriteBlockToDisk: OpenBlockFile failed"); } // Write index header unsigned int nSize = GetSerializeSize(fileout, block); fileout << FLATDATA(messageStart) << nSize; // Write block long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) { return error("WriteBlockToDisk: ftell failed"); } pos.nPos = (unsigned int)fileOutPos; fileout << block; return true; } bool ReadBlockFromDisk(CBlock &block, const CDiskBlockPos &pos, const Config &config) { block.SetNull(); // Open history file to read CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) { return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString()); } // Read block try { filein >> block; } catch (const std::exception &e) { return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString()); } // Check the header if (!CheckProofOfWork(block.GetHash(), block.nBits, config)) { return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString()); } return true; } bool ReadBlockFromDisk(CBlock &block, const CBlockIndex *pindex, const Config &config) { if (!ReadBlockFromDisk(block, pindex->GetBlockPos(), config)) { return false; } if (block.GetHash() != pindex->GetBlockHash()) { return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() " "doesn't match index for %s at %s", pindex->ToString(), pindex->GetBlockPos().ToString()); } return true; } Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams) { int halvings = nHeight / consensusParams.nSubsidyHalvingInterval; // Force block reward to zero when right shift is undefined. if (halvings >= 64) { return Amount::zero(); } Amount nSubsidy = 50 * COIN; // Subsidy is cut in half every 210,000 blocks which will occur // approximately every 4 years. return ((nSubsidy / SATOSHI) >> halvings) * SATOSHI; } bool IsInitialBlockDownload() { // Once this function has returned false, it must remain false. static std::atomic latchToFalse{false}; // Optimization: pre-test latch before taking the lock. if (latchToFalse.load(std::memory_order_relaxed)) { return false; } LOCK(cs_main); if (latchToFalse.load(std::memory_order_relaxed)) { return false; } if (fImporting || fReindex) { return true; } if (chainActive.Tip() == nullptr) { return true; } if (chainActive.Tip()->nChainWork < nMinimumChainWork) { return true; } if (chainActive.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge)) { return true; } LogPrintf("Leaving InitialBlockDownload (latching to false)\n"); latchToFalse.store(true, std::memory_order_relaxed); return false; } CBlockIndex const *pindexBestForkTip = nullptr; CBlockIndex const *pindexBestForkBase = nullptr; static void AlertNotify(const std::string &strMessage) { uiInterface.NotifyAlertChanged(); std::string strCmd = gArgs.GetArg("-alertnotify", ""); if (strCmd.empty()) { return; } // Alert text should be plain ascii coming from a trusted source, but to be // safe we first strip anything not in safeChars, then add single quotes // around the whole string before passing it to the shell: std::string singleQuote("'"); std::string safeStatus = SanitizeString(strMessage); safeStatus = singleQuote + safeStatus + singleQuote; boost::replace_all(strCmd, "%s", safeStatus); std::thread t(runCommand, strCmd); // thread runs free t.detach(); } static void CheckForkWarningConditions() { AssertLockHeld(cs_main); // Before we get past initial download, we cannot reliably alert about forks // (we assume we don't get stuck on a fork before finishing our initial // sync) if (IsInitialBlockDownload()) { return; } // If our best fork is no longer within 72 blocks (+/- 12 hours if no one // mines it) of our head, drop it if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->nHeight >= 72) { pindexBestForkTip = nullptr; } if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > chainActive.Tip()->nChainWork + (GetBlockProof(*chainActive.Tip()) * 6))) { if (!GetfLargeWorkForkFound() && pindexBestForkBase) { std::string warning = std::string("'Warning: Large-work fork detected, forking after " "block ") + pindexBestForkBase->phashBlock->ToString() + std::string("'"); AlertNotify(warning); } if (pindexBestForkTip && pindexBestForkBase) { LogPrintf("%s: Warning: Large fork found\n forking the " "chain at height %d (%s)\n lasting to height %d " "(%s).\nChain state database corruption likely.\n", __func__, pindexBestForkBase->nHeight, pindexBestForkBase->phashBlock->ToString(), pindexBestForkTip->nHeight, pindexBestForkTip->phashBlock->ToString()); SetfLargeWorkForkFound(true); } else { LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks " "longer than our best chain.\nChain state database " "corruption likely.\n", __func__); SetfLargeWorkInvalidChainFound(true); } } else { SetfLargeWorkForkFound(false); SetfLargeWorkInvalidChainFound(false); } } static void CheckForkWarningConditionsOnNewFork(const CBlockIndex *pindexNewForkTip) { AssertLockHeld(cs_main); // If we are on a fork that is sufficiently large, set a warning flag. const CBlockIndex *pfork = chainActive.FindFork(pindexNewForkTip); // We define a condition where we should warn the user about as a fork of at // least 7 blocks with a tip within 72 blocks (+/- 12 hours if no one mines // it) of ours. We use 7 blocks rather arbitrarily as it represents just // under 10% of sustained network hash rate operating on the fork, or a // chain that is entirely longer than ours and invalid (note that this // should be detected by both). We define it this way because it allows us // to only store the highest fork tip (+ base) which meets the 7-block // condition and from this always have the most-likely-to-cause-warning fork if (pfork && (!pindexBestForkTip || (pindexBestForkTip && pindexNewForkTip->nHeight > pindexBestForkTip->nHeight)) && pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) && chainActive.Height() - pindexNewForkTip->nHeight < 72) { pindexBestForkTip = pindexNewForkTip; pindexBestForkBase = pfork; } CheckForkWarningConditions(); } static void InvalidChainFound(CBlockIndex *pindexNew) { if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork) { pindexBestInvalid = pindexNew; } LogPrintf( "%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__, pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, log(pindexNew->nChainWork.getdouble()) / log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexNew->GetBlockTime())); CBlockIndex *tip = chainActive.Tip(); assert(tip); LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__, tip->GetBlockHash().ToString(), chainActive.Height(), log(tip->nChainWork.getdouble()) / log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip->GetBlockTime())); } static void InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) { if (!state.CorruptionPossible()) { pindex->nStatus = pindex->nStatus.withFailed(); setDirtyBlockIndex.insert(pindex); InvalidChainFound(pindex); } } void SpendCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo, int nHeight) { // Mark inputs spent. if (tx.IsCoinBase()) { return; } txundo.vprevout.reserve(tx.vin.size()); for (const CTxIn &txin : tx.vin) { txundo.vprevout.emplace_back(); bool is_spent = view.SpendCoin(txin.prevout, &txundo.vprevout.back()); assert(is_spent); } } void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo, int nHeight) { SpendCoins(view, tx, txundo, nHeight); AddCoins(view, tx, nHeight); } void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, int nHeight) { // Mark inputs spent. if (!tx.IsCoinBase()) { for (const CTxIn &txin : tx.vin) { bool is_spent = view.SpendCoin(txin.prevout); assert(is_spent); } } // Add outputs. AddCoins(view, tx, nHeight); } bool CScriptCheck::operator()() { const CScript &scriptSig = ptxTo->vin[nIn].scriptSig; return VerifyScript(scriptSig, scriptPubKey, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, amount, cacheStore, txdata), &error); } int GetSpendHeight(const CCoinsViewCache &inputs) { LOCK(cs_main); CBlockIndex *pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second; return pindexPrev->nHeight + 1; } bool CheckInputs(const CTransaction &tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, const uint32_t flags, bool sigCacheStore, bool scriptCacheStore, const PrecomputedTransactionData &txdata, std::vector *pvChecks) { assert(!tx.IsCoinBase()); if (!Consensus::CheckTxInputs(tx, state, inputs, GetSpendHeight(inputs))) { return false; } if (pvChecks) { pvChecks->reserve(tx.vin.size()); } // The first loop above does all the inexpensive checks. Only if ALL inputs // pass do we perform expensive ECDSA signature checks. Helps prevent CPU // exhaustion attacks. // Skip script verification when connecting blocks under the assumedvalid // block. Assuming the assumedvalid block is valid this is safe because // block merkle hashes are still computed and checked, of course, if an // assumed valid block is invalid due to false scriptSigs this optimization // would allow an invalid chain to be accepted. if (!fScriptChecks) { return true; } // First check if script executions have been cached with the same flags. // Note that this assumes that the inputs provided are correct (ie that the // transaction hash which is in tx's prevouts properly commits to the // scriptPubKey in the inputs view of that transaction). uint256 hashCacheEntry = GetScriptCacheKey(tx, flags); if (IsKeyInScriptCache(hashCacheEntry, !scriptCacheStore)) { return true; } for (size_t i = 0; i < tx.vin.size(); i++) { const COutPoint &prevout = tx.vin[i].prevout; const Coin &coin = inputs.AccessCoin(prevout); assert(!coin.IsSpent()); // We very carefully only pass in things to CScriptCheck which are // clearly committed to by tx' witness hash. This provides a sanity // check that our caching is not introducing consensus failures through // additional data in, eg, the coins being spent being checked as a part // of CScriptCheck. const CScript &scriptPubKey = coin.GetTxOut().scriptPubKey; const Amount amount = coin.GetTxOut().nValue; // Verify signature CScriptCheck check(scriptPubKey, amount, tx, i, flags, sigCacheStore, txdata); if (pvChecks) { pvChecks->push_back(std::move(check)); } else if (!check()) { const bool hasNonMandatoryFlags = (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) != 0; if (hasNonMandatoryFlags) { // Check whether the failure was caused by a non-mandatory // script verification check, such as non-standard DER encodings // or non-null dummy arguments; if so, don't trigger DoS // protection to avoid splitting the network between upgraded // and non-upgraded nodes. // // We also check activating the monolith opcodes as it is a // strictly additive change and we would not like to ban some of // our peer that are ahead of us and are considering the fork // as activated. CScriptCheck check2(scriptPubKey, amount, tx, i, flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, sigCacheStore, txdata); if (check2()) { return state.Invalid( false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError()))); } } // Failures of other flags indicate a transaction that is invalid in // new blocks, e.g. a invalid P2SH. We DoS ban such nodes as they // are not following the protocol. That said during an upgrade // careful thought should be taken as to the correct behavior - we // may want to continue peering with non-upgraded nodes even after // soft-fork super-majority signaling has occurred. return state.DoS( 100, false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError()))); } } if (scriptCacheStore && !pvChecks) { // We executed all of the provided scripts, and were told to cache the // result. Do so now. AddKeyInScriptCache(hashCacheEntry); } return true; } namespace { bool UndoWriteToDisk(const CBlockUndo &blockundo, CDiskBlockPos &pos, const uint256 &hashBlock, const CMessageHeader::MessageMagic &messageStart) { // Open history file to append CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) { return error("%s: OpenUndoFile failed", __func__); } // Write index header unsigned int nSize = GetSerializeSize(fileout, blockundo); fileout << FLATDATA(messageStart) << nSize; // Write undo data long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) { return error("%s: ftell failed", __func__); } pos.nPos = (unsigned int)fileOutPos; fileout << blockundo; // calculate & write checksum CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); hasher << hashBlock; hasher << blockundo; fileout << hasher.GetHash(); return true; } bool UndoReadFromDisk(CBlockUndo &blockundo, const CDiskBlockPos &pos, const uint256 &hashBlock) { // Open history file to read CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) { return error("%s: OpenUndoFile failed", __func__); } // Read block uint256 hashChecksum; // We need a CHashVerifier as reserializing may lose data CHashVerifier verifier(&filein); try { verifier << hashBlock; verifier >> blockundo; filein >> hashChecksum; } catch (const std::exception &e) { return error("%s: Deserialize or I/O error - %s", __func__, e.what()); } // Verify checksum if (hashChecksum != verifier.GetHash()) { return error("%s: Checksum mismatch", __func__); } return true; } /** Abort with a message */ bool AbortNode(const std::string &strMessage, const std::string &userMessage = "") { SetMiscWarning(strMessage); LogPrintf("*** %s\n", strMessage); uiInterface.ThreadSafeMessageBox( userMessage.empty() ? _("Error: A fatal internal error occurred, see " "debug.log for details") : userMessage, "", CClientUIInterface::MSG_ERROR); StartShutdown(); return false; } bool AbortNode(CValidationState &state, const std::string &strMessage, const std::string &userMessage = "") { AbortNode(strMessage, userMessage); return state.Error(strMessage); } } // namespace /** Restore the UTXO in a Coin at a given COutPoint. */ DisconnectResult UndoCoinSpend(const Coin &undo, CCoinsViewCache &view, const COutPoint &out) { bool fClean = true; if (view.HaveCoin(out)) { // Overwriting transaction output. fClean = false; } if (undo.GetHeight() == 0) { // Missing undo metadata (height and coinbase). Older versions included // this information only in undo records for the last spend of a // transactions' outputs. This implies that it must be present for some // other output of the same tx. const Coin &alternate = AccessByTxid(view, out.GetTxId()); if (alternate.IsSpent()) { // Adding output for transaction without known metadata return DISCONNECT_FAILED; } // This is somewhat ugly, but hopefully utility is limited. This is only // useful when working from legacy on disck data. In any case, putting // the correct information in there doesn't hurt. const_cast(undo) = Coin(undo.GetTxOut(), alternate.GetHeight(), alternate.IsCoinBase()); } // The potential_overwrite parameter to AddCoin is only allowed to be false // if we know for sure that the coin did not already exist in the cache. As // we have queried for that above using HaveCoin, we don't need to guess. // When fClean is false, a coin already existed and it is an overwrite. view.AddCoin(out, std::move(undo), !fClean); return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN; } /** * Undo the effects of this block (with given index) on the UTXO set represented * by coins. When FAILED is returned, view is left in an indeterminate state. */ static DisconnectResult DisconnectBlock(const CBlock &block, const CBlockIndex *pindex, CCoinsViewCache &view) { CBlockUndo blockUndo; CDiskBlockPos pos = pindex->GetUndoPos(); if (pos.IsNull()) { error("DisconnectBlock(): no undo data available"); return DISCONNECT_FAILED; } if (!UndoReadFromDisk(blockUndo, pos, pindex->pprev->GetBlockHash())) { error("DisconnectBlock(): failure reading undo data"); return DISCONNECT_FAILED; } return ApplyBlockUndo(blockUndo, block, pindex, view); } DisconnectResult ApplyBlockUndo(const CBlockUndo &blockUndo, const CBlock &block, const CBlockIndex *pindex, CCoinsViewCache &view) { bool fClean = true; if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) { error("DisconnectBlock(): block and undo data inconsistent"); return DISCONNECT_FAILED; } // First, restore inputs. for (size_t i = 1; i < block.vtx.size(); i++) { const CTransaction &tx = *(block.vtx[i]); const CTxUndo &txundo = blockUndo.vtxundo[i - 1]; if (txundo.vprevout.size() != tx.vin.size()) { error("DisconnectBlock(): transaction and undo data inconsistent"); return DISCONNECT_FAILED; } for (size_t j = 0; j < tx.vin.size(); j++) { const COutPoint &out = tx.vin[j].prevout; const Coin &undo = txundo.vprevout[j]; DisconnectResult res = UndoCoinSpend(undo, view, out); if (res == DISCONNECT_FAILED) { return DISCONNECT_FAILED; } fClean = fClean && res != DISCONNECT_UNCLEAN; } } // Second, revert created outputs. for (const auto &ptx : block.vtx) { const CTransaction &tx = *ptx; const TxId &txid = tx.GetId(); const bool is_coinbase = tx.IsCoinBase(); // Check that all outputs are available and match the outputs in the // block itself exactly. for (size_t o = 0; o < tx.vout.size(); o++) { if (tx.vout[o].scriptPubKey.IsUnspendable()) { continue; } COutPoint out(txid, o); Coin coin; bool is_spent = view.SpendCoin(out, &coin); if (!is_spent || tx.vout[o] != coin.GetTxOut() || uint32_t(pindex->nHeight) != coin.GetHeight() || is_coinbase != coin.IsCoinBase()) { // transaction output mismatch fClean = false; } } } // Move best block pointer to previous block. view.SetBestBlock(block.hashPrevBlock); return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN; } static void FlushBlockFile(bool fFinalize = false) { LOCK(cs_LastBlockFile); CDiskBlockPos posOld(nLastBlockFile, 0); FILE *fileOld = OpenBlockFile(posOld); if (fileOld) { if (fFinalize) { TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize); } FileCommit(fileOld); fclose(fileOld); } fileOld = OpenUndoFile(posOld); if (fileOld) { if (fFinalize) { TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize); } FileCommit(fileOld); fclose(fileOld); } } static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize); static CCheckQueue scriptcheckqueue(128); void ThreadScriptCheck() { RenameThread("bitcoin-scriptch"); scriptcheckqueue.Thread(); } // Protected by cs_main VersionBitsCache versionbitscache; int32_t ComputeBlockVersion(const CBlockIndex *pindexPrev, const Consensus::Params ¶ms) { int32_t nVersion = VERSIONBITS_TOP_BITS; return nVersion; } // Returns the script flags which should be checked for a given block static uint32_t GetBlockScriptFlags(const Config &config, const CBlockIndex *pChainTip) { AssertLockHeld(cs_main); const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); uint32_t flags = SCRIPT_VERIFY_NONE; // P2SH didn't become active until Apr 1 2012 if (pChainTip->GetMedianTimePast() >= P2SH_ACTIVATION_TIME) { flags |= SCRIPT_VERIFY_P2SH; } // Start enforcing the DERSIG (BIP66) rule. if ((pChainTip->nHeight + 1) >= consensusParams.BIP66Height) { flags |= SCRIPT_VERIFY_DERSIG; } // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule. if ((pChainTip->nHeight + 1) >= consensusParams.BIP65Height) { flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY; } // Start enforcing CSV (BIP68, BIP112 and BIP113) rule. if ((pChainTip->nHeight + 1) >= consensusParams.CSVHeight) { flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY; } // If the UAHF is enabled, we start accepting replay protected txns if (IsUAHFenabled(config, pChainTip)) { flags |= SCRIPT_VERIFY_STRICTENC; flags |= SCRIPT_ENABLE_SIGHASH_FORKID; } // If the DAA HF is enabled, we start rejecting transaction that use a high // s in their signature. We also make sure that signature that are supposed // to fail (for instance in multisig or other forms of smart contracts) are // null. if (IsDAAEnabled(config, pChainTip)) { flags |= SCRIPT_VERIFY_LOW_S; flags |= SCRIPT_VERIFY_NULLFAIL; } // When the magnetic anomaly fork is enabled, we start accepting // transactions using the OP_CHECKDATASIG opcode and it's verify // alternative. We also start enforcing push only signatures and // clean stack. if (IsMagneticAnomalyEnabled(config, pChainTip)) { flags |= SCRIPT_ENABLE_CHECKDATASIG; flags |= SCRIPT_VERIFY_SIGPUSHONLY; flags |= SCRIPT_VERIFY_CLEANSTACK; } // We make sure this node will have replay protection during the next hard // fork. if (IsReplayProtectionEnabled(config, pChainTip)) { flags |= SCRIPT_ENABLE_REPLAY_PROTECTION; } return flags; } static int64_t nTimeCheck = 0; static int64_t nTimeForks = 0; static int64_t nTimeVerify = 0; static int64_t nTimeConnect = 0; static int64_t nTimeIndex = 0; static int64_t nTimeCallbacks = 0; static int64_t nTimeTotal = 0; /** * Apply the effects of this block (with given index) on the UTXO set * represented by coins. Validity checks that depend on the UTXO set are also * done; ConnectBlock() can fail if those validity checks fail (among other * reasons). */ static bool ConnectBlock(const Config &config, const CBlock &block, CValidationState &state, CBlockIndex *pindex, CCoinsViewCache &view, bool fJustCheck = false) { AssertLockHeld(cs_main); int64_t nTimeStart = GetTimeMicros(); // Check it again in case a previous version let a bad block in BlockValidationOptions validationOptions = BlockValidationOptions(!fJustCheck, !fJustCheck); if (!CheckBlock(config, block, state, validationOptions)) { return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state)); } // Verify that the view's current state corresponds to the previous block uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash(); assert(hashPrevBlock == view.GetBestBlock()); // Special case for the genesis block, skipping connection of its // transactions (its coinbase is unspendable) const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); if (block.GetHash() == consensusParams.hashGenesisBlock) { if (!fJustCheck) { view.SetBestBlock(pindex->GetBlockHash()); } return true; } bool fScriptChecks = true; if (!hashAssumeValid.IsNull()) { // We've been configured with the hash of a block which has been // externally verified to have a valid history. A suitable default value // is included with the software and updated from time to time. Because // validity relative to a piece of software is an objective fact these // defaults can be easily reviewed. This setting doesn't force the // selection of any particular chain but makes validating some faster by // effectively caching the result of part of the verification. BlockMap::const_iterator it = mapBlockIndex.find(hashAssumeValid); if (it != mapBlockIndex.end()) { if (it->second->GetAncestor(pindex->nHeight) == pindex && pindexBestHeader->GetAncestor(pindex->nHeight) == pindex && pindexBestHeader->nChainWork >= nMinimumChainWork) { // This block is a member of the assumed verified chain and an // ancestor of the best header. The equivalent time check // discourages hashpower from extorting the network via DOS // attack into accepting an invalid block through telling users // they must manually set assumevalid. Requiring a software // change or burying the invalid block, regardless of the // setting, makes it hard to hide the implication of the demand. // This also avoids having release candidates that are hardly // doing any signature verification at all in testing without // having to artificially set the default assumed verified block // further back. The test against nMinimumChainWork prevents the // skipping when denied access to any chain at least as good as // the expected chain. fScriptChecks = (GetBlockProofEquivalentTime( *pindexBestHeader, *pindex, *pindexBestHeader, consensusParams) <= 60 * 60 * 24 * 7 * 2); } } } int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart; LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1 - nTimeStart), nTimeCheck * 0.000001); // Do not allow blocks that contain transactions which 'overwrite' older // transactions, unless those are already completely spent. If such // overwrites are allowed, coinbases and transactions depending upon those // can be duplicated to remove the ability to spend the first instance -- // even after being sent to another address. See BIP30 and // http://r6.ca/blog/20120206T005236Z.html for more information. This logic // is not necessary for memory pool transactions, as AcceptToMemoryPool // already refuses previously-known transaction ids entirely. This rule was // originally applied to all blocks with a timestamp after March 15, 2012, // 0:00 UTC. Now that the whole chain is irreversibly beyond that time it is // applied to all blocks except the two in the chain that violate it. This // prevents exploiting the issue against nodes during their initial block // download. bool fEnforceBIP30 = (!pindex->phashBlock) || // Enforce on CreateNewBlock // invocations which don't // have a hash. !((pindex->nHeight == 91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763" "b1f4360639393e0e4c8e300e0caec")) || (pindex->nHeight == 91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f" "610ae9601ac046a38084ccb7cd721"))); // Once BIP34 activated it was not possible to create new duplicate // coinbases and thus other than starting with the 2 existing duplicate // coinbase pairs, not possible to create overwriting txs. But by the time // BIP34 activated, in each of the existing pairs the duplicate coinbase had // overwritten the first before the first had been spent. Since those // coinbases are sufficiently buried its no longer possible to create // further duplicate transactions descending from the known pairs either. If // we're on the known chain at height greater than where BIP34 activated, we // can save the db accesses needed for the BIP30 check. CBlockIndex *pindexBIP34height = pindex->pprev->GetAncestor(consensusParams.BIP34Height); // Only continue to enforce if we're below BIP34 activation height or the // block hash at that height doesn't correspond. fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == consensusParams.BIP34Hash)); if (fEnforceBIP30) { for (const auto &tx : block.vtx) { for (size_t o = 0; o < tx->vout.size(); o++) { if (view.HaveCoin(COutPoint(tx->GetId(), o))) { return state.DoS( 100, error("ConnectBlock(): tried to overwrite transaction"), REJECT_INVALID, "bad-txns-BIP30"); } } } } // Start enforcing BIP68 (sequence locks). int nLockTimeFlags = 0; if (pindex->nHeight >= consensusParams.CSVHeight) { nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE; } const uint32_t flags = GetBlockScriptFlags(config, pindex->pprev); const bool fIsMagneticAnomalyEnabled = IsMagneticAnomalyEnabled(config, pindex->pprev); int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1; LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2 - nTime1), nTimeForks * 0.000001); CBlockUndo blockundo; CCheckQueueControl control(fScriptChecks ? &scriptcheckqueue : nullptr); std::vector prevheights; Amount nFees = Amount::zero(); int nInputs = 0; // Sigops counting. We need to do it again because of P2SH. uint64_t nSigOpsCount = 0; const uint64_t currentBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); const uint64_t nMaxSigOpsCount = GetMaxBlockSigOpsCount(currentBlockSize); CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size())); std::vector> vPos; vPos.reserve(block.vtx.size()); blockundo.vtxundo.reserve(block.vtx.size() - 1); for (const auto &ptx : block.vtx) { const CTransaction &tx = *ptx; nInputs += tx.vin.size(); vPos.push_back(std::make_pair(tx.GetId(), pos)); pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION); if (tx.IsCoinBase()) { // We've already checked for sigops count before P2SH in CheckBlock. nSigOpsCount += GetSigOpCountWithoutP2SH(tx, flags); } if (fIsMagneticAnomalyEnabled || tx.IsCoinBase()) { AddCoins(view, tx, pindex->nHeight); } } for (const auto &ptx : block.vtx) { const CTransaction &tx = *ptx; if (tx.IsCoinBase()) { continue; } if (!view.HaveInputs(tx)) { return state.DoS(100, error("ConnectBlock(): inputs missing/spent"), REJECT_INVALID, "bad-txns-inputs-missingorspent"); } // Check that transaction is BIP68 final BIP68 lock checks (as // opposed to nLockTime checks) must be in ConnectBlock because they // require the UTXO set. prevheights.resize(tx.vin.size()); for (size_t j = 0; j < tx.vin.size(); j++) { prevheights[j] = view.AccessCoin(tx.vin[j].prevout).GetHeight(); } if (!SequenceLocks(tx, nLockTimeFlags, &prevheights, *pindex)) { return state.DoS( 100, error("%s: contains a non-BIP68-final transaction", __func__), REJECT_INVALID, "bad-txns-nonfinal"); } // GetTransactionSigOpCount counts 2 types of sigops: // * legacy (always) // * p2sh (when P2SH enabled in flags and excludes coinbase) auto txSigOpsCount = GetTransactionSigOpCount(tx, view, flags); if (txSigOpsCount > MAX_TX_SIGOPS_COUNT) { return state.DoS(100, false, REJECT_INVALID, "bad-txn-sigops"); } nSigOpsCount += txSigOpsCount; if (nSigOpsCount > nMaxSigOpsCount) { return state.DoS(100, error("ConnectBlock(): too many sigops"), REJECT_INVALID, "bad-blk-sigops"); } Amount fee = view.GetValueIn(tx) - tx.GetValueOut(); nFees += fee; // Don't cache results if we're actually connecting blocks (still // consult the cache, though). bool fCacheResults = fJustCheck; std::vector vChecks; if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, fCacheResults, PrecomputedTransactionData(tx), &vChecks)) { return error("ConnectBlock(): CheckInputs on %s failed with %s", tx.GetId().ToString(), FormatStateMessage(state)); } control.Add(vChecks); blockundo.vtxundo.push_back(CTxUndo()); SpendCoins(view, tx, blockundo.vtxundo.back(), pindex->nHeight); if (!fIsMagneticAnomalyEnabled) { AddCoins(view, tx, pindex->nHeight); } } int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2; LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, " "%.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime3 - nTime2), 0.001 * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime3 - nTime2) / (nInputs - 1), nTimeConnect * 0.000001); Amount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, consensusParams); if (block.vtx[0]->GetValueOut() > blockReward) { return state.DoS(100, error("ConnectBlock(): coinbase pays too much " "(actual=%d vs limit=%d)", block.vtx[0]->GetValueOut(), blockReward), REJECT_INVALID, "bad-cb-amount"); } if (!control.Wait()) { return state.DoS(100, false, REJECT_INVALID, "blk-bad-inputs", false, "parallel script check failed"); } int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2; LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime4 - nTime2), nInputs <= 1 ? 0 : 0.001 * (nTime4 - nTime2) / (nInputs - 1), nTimeVerify * 0.000001); if (fJustCheck) { return true; } // Write undo information to disk if (pindex->GetUndoPos().IsNull() || !pindex->IsValid(BlockValidity::SCRIPTS)) { if (pindex->GetUndoPos().IsNull()) { CDiskBlockPos _pos; if (!FindUndoPos( state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) + 40)) { return error("ConnectBlock(): FindUndoPos failed"); } if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), config.GetChainParams().DiskMagic())) { return AbortNode(state, "Failed to write undo data"); } // update nUndoPos in block index pindex->nUndoPos = _pos.nPos; pindex->nStatus = pindex->nStatus.withUndo(); } pindex->RaiseValidity(BlockValidity::SCRIPTS); setDirtyBlockIndex.insert(pindex); } if (fTxIndex && !pblocktree->WriteTxIndex(vPos)) { return AbortNode(state, "Failed to write transaction index"); } // add this block to the view's block chain view.SetBestBlock(pindex->GetBlockHash()); int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4; LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5 - nTime4), nTimeIndex * 0.000001); int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5; LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6 - nTime5), nTimeCallbacks * 0.000001); // If we just activated the replay protection with that block, it means // transaction in the mempool are now invalid. As a result, we need to clear // the mempool. if (IsReplayProtectionEnabled(config, pindex) && !IsReplayProtectionEnabled(config, pindex->pprev)) { mempool.clear(); } return true; } /** * Update the on-disk chain state. * The caches and indexes are flushed depending on the mode we're called with if * they're too large, if it's been a while since the last write, or always and * in all cases if we're in prune mode and are deleting files. */ static bool FlushStateToDisk(const CChainParams &chainparams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight) { int64_t nMempoolUsage = mempool.DynamicMemoryUsage(); LOCK(cs_main); static int64_t nLastWrite = 0; static int64_t nLastFlush = 0; static int64_t nLastSetChain = 0; std::set setFilesToPrune; bool fFlushForPrune = false; bool fDoFullFlush = false; int64_t nNow = 0; try { { LOCK(cs_LastBlockFile); if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) { if (nManualPruneHeight > 0) { FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight); } else { FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight()); fCheckForPruning = false; } if (!setFilesToPrune.empty()) { fFlushForPrune = true; if (!fHavePruned) { pblocktree->WriteFlag("prunedblockfiles", true); fHavePruned = true; } } } nNow = GetTimeMicros(); // Avoid writing/flushing immediately after startup. if (nLastWrite == 0) { nLastWrite = nNow; } if (nLastFlush == 0) { nLastFlush = nNow; } if (nLastSetChain == 0) { nLastSetChain = nNow; } int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; int64_t cacheSize = pcoinsTip->DynamicMemoryUsage(); int64_t nTotalSpace = nCoinCacheUsage + std::max(nMempoolSizeMax - nMempoolUsage, 0); // The cache is large and we're within 10% and 10 MiB of the limit, // but we have time now (not in the middle of a block processing). bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024); // The cache is over the limit, we have to write now. bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nTotalSpace; // It's been a while since we wrote the block index to disk. Do this // frequently, so we don't need to redownload after a crash. bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000; // It's been very long since we flushed the cache. Do this // infrequently, to optimize cache usage. bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000; // Combine all conditions that result in a full cache flush. fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune; // Write blocks and block index to disk. if (fDoFullFlush || fPeriodicWrite) { // Depend on nMinDiskSpace to ensure we can write block index if (!CheckDiskSpace(0)) { return state.Error("out of disk space"); } // First make sure all block and undo data is flushed to disk. FlushBlockFile(); // Then update all block file information (which may refer to // block and undo files). { std::vector> vFiles; vFiles.reserve(setDirtyFileInfo.size()); for (int i : setDirtyFileInfo) { vFiles.push_back(std::make_pair(i, &vinfoBlockFile[i])); } setDirtyFileInfo.clear(); std::vector vBlocks; vBlocks.reserve(setDirtyBlockIndex.size()); for (const CBlockIndex *cbi : setDirtyBlockIndex) { vBlocks.push_back(cbi); } setDirtyBlockIndex.clear(); if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) { return AbortNode( state, "Failed to write to block index database"); } } // Finally remove any pruned files if (fFlushForPrune) { UnlinkPrunedFiles(setFilesToPrune); } nLastWrite = nNow; } // Flush best chain related state. This can only be done if the // blocks / block index write was also done. if (fDoFullFlush) { // Typical Coin structures on disk are around 48 bytes in size. // Pushing a new one to the database can cause it to be written // twice (once in the log, and once in the tables). This is // already an overestimation, as most will delete an existing // entry or overwrite one. Still, use a conservative safety // factor of 2. if (!CheckDiskSpace(48 * 2 * 2 * pcoinsTip->GetCacheSize())) { return state.Error("out of disk space"); } // Flush the chainstate (which may refer to block index // entries). if (!pcoinsTip->Flush()) { return AbortNode(state, "Failed to write to coin database"); } nLastFlush = nNow; } } if (fDoFullFlush || ((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000)) { // Update best block in wallet (so we can detect restored wallets). GetMainSignals().SetBestChain(chainActive.GetLocator()); nLastSetChain = nNow; } } catch (const std::runtime_error &e) { return AbortNode( state, std::string("System error while flushing: ") + e.what()); } return true; } void FlushStateToDisk() { CValidationState state; const CChainParams &chainparams = Params(); FlushStateToDisk(chainparams, state, FLUSH_STATE_ALWAYS); } void PruneAndFlush() { CValidationState state; fCheckForPruning = true; const CChainParams &chainparams = Params(); FlushStateToDisk(chainparams, state, FLUSH_STATE_NONE); } /** * Update chainActive and related internal data structures when adding a new * block to the chain tip. */ static void UpdateTip(const Config &config, CBlockIndex *pindexNew) { const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); chainActive.SetTip(pindexNew); // New best block mempool.AddTransactionsUpdated(1); { LOCK(g_best_block_mutex); g_best_block = pindexNew->GetBlockHash(); g_best_block_cv.notify_all(); } static bool fWarned = false; std::vector warningMessages; if (!IsInitialBlockDownload()) { int nUpgraded = 0; const CBlockIndex *pindex = chainActive.Tip(); // Check the version of the last 100 blocks to see if we need to // upgrade: for (int i = 0; i < 100 && pindex != nullptr; i++) { int32_t nExpectedVersion = ComputeBlockVersion(pindex->pprev, consensusParams); if (pindex->nVersion > VERSIONBITS_LAST_OLD_BLOCK_VERSION && (pindex->nVersion & ~nExpectedVersion) != 0) { ++nUpgraded; } pindex = pindex->pprev; } if (nUpgraded > 0) { warningMessages.push_back(strprintf( "%d of last 100 blocks have unexpected version", nUpgraded)); } if (nUpgraded > 100 / 2) { std::string strWarning = _("Warning: Unknown block versions being mined! It's possible " "unknown rules are in effect"); // notify GetWarnings(), called by Qt and the JSON-RPC code to warn // the user: SetMiscWarning(strWarning); if (!fWarned) { AlertNotify(strWarning); fWarned = true; } } } LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu " "date='%s' progress=%f cache=%.1fMiB(%utxo)", __func__, chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), chainActive.Tip()->nVersion, log(chainActive.Tip()->nChainWork.getdouble()) / log(2.0), (unsigned long)chainActive.Tip()->nChainTx, DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()), GuessVerificationProgress(config.GetChainParams().TxData(), chainActive.Tip()), pcoinsTip->DynamicMemoryUsage() * (1.0 / (1 << 20)), pcoinsTip->GetCacheSize()); if (!warningMessages.empty()) { LogPrintf(" warning='%s'", boost::algorithm::join(warningMessages, ", ")); } LogPrintf("\n"); } /** * Disconnect chainActive's tip. * After calling, the mempool will be in an inconsistent state, with * transactions from disconnected blocks being added to disconnectpool. You * should make the mempool consistent again by calling updateMempoolForReorg. * with cs_main held. * * If disconnectpool is nullptr, then no disconnected transactions are added to * disconnectpool (note that the caller is responsible for mempool consistency * in any case). */ static bool DisconnectTip(const Config &config, CValidationState &state, DisconnectedBlockTransactions *disconnectpool) { CBlockIndex *pindexDelete = chainActive.Tip(); assert(pindexDelete); // Read block from disk. std::shared_ptr pblock = std::make_shared(); CBlock &block = *pblock; if (!ReadBlockFromDisk(block, pindexDelete, config)) { return AbortNode(state, "Failed to read block"); } // Apply the block atomically to the chain state. int64_t nStart = GetTimeMicros(); { CCoinsViewCache view(pcoinsTip.get()); assert(view.GetBestBlock() == pindexDelete->GetBlockHash()); if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) { return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); } bool flushed = view.Flush(); assert(flushed); } LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001); // Write the chain state to disk, if necessary. if (!FlushStateToDisk(config.GetChainParams(), state, FLUSH_STATE_IF_NEEDED)) { return false; } // If this block was deactivating the replay protection, then we need to // remove transactions that are replay protected from the mempool. There is // no easy way to do this so we'll just discard the whole mempool and then // add the transaction of the block we just disconnected back. // // If we are deactivating Magnetic anomaly, we want to make sure we do not // have transactions in the mempool that use newly introduced opcodes. As a // result, we also cleanup the mempool. if ((IsReplayProtectionEnabled(config, pindexDelete) && !IsReplayProtectionEnabled(config, pindexDelete->pprev)) || (IsMagneticAnomalyEnabled(config, pindexDelete) && !IsMagneticAnomalyEnabled(config, pindexDelete->pprev))) { LogPrint(BCLog::MEMPOOL, "Clearing mempool for reorg"); mempool.clear(); // While not strictly necessary, clearing the disconnect pool is also // beneficial so we don't try to reuse its content at the end of the // reorg, which we know will fail. if (disconnectpool) { disconnectpool->clear(); } } if (disconnectpool) { disconnectpool->addForBlock(block.vtx); } // Update chainActive and related variables. UpdateTip(config, pindexDelete->pprev); // Let wallets know transactions went from 1-confirmed to // 0-confirmed or conflicted: GetMainSignals().BlockDisconnected(pblock); return true; } static int64_t nTimeReadFromDisk = 0; static int64_t nTimeConnectTotal = 0; static int64_t nTimeFlush = 0; static int64_t nTimeChainState = 0; static int64_t nTimePostConnect = 0; struct PerBlockConnectTrace { CBlockIndex *pindex = nullptr; std::shared_ptr pblock; std::shared_ptr> conflictedTxs; PerBlockConnectTrace() : conflictedTxs(std::make_shared>()) {} }; /** * Used to track blocks whose transactions were applied to the UTXO state as a * part of a single ActivateBestChainStep call. * * This class also tracks transactions that are removed from the mempool as * conflicts (per block) and can be used to pass all those transactions through * SyncTransaction. * * This class assumes (and asserts) that the conflicted transactions for a given * block are added via mempool callbacks prior to the BlockConnected() * associated with those transactions. If any transactions are marked * conflicted, it is assumed that an associated block will always be added. * * This class is single-use, once you call GetBlocksConnected() you have to * throw it away and make a new one. */ class ConnectTrace { private: std::vector blocksConnected; CTxMemPool &pool; public: ConnectTrace(CTxMemPool &_pool) : blocksConnected(1), pool(_pool) { pool.NotifyEntryRemoved.connect( boost::bind(&ConnectTrace::NotifyEntryRemoved, this, _1, _2)); } ~ConnectTrace() { pool.NotifyEntryRemoved.disconnect( boost::bind(&ConnectTrace::NotifyEntryRemoved, this, _1, _2)); } void BlockConnected(CBlockIndex *pindex, std::shared_ptr pblock) { assert(!blocksConnected.back().pindex); assert(pindex); assert(pblock); blocksConnected.back().pindex = pindex; blocksConnected.back().pblock = std::move(pblock); blocksConnected.emplace_back(); } std::vector &GetBlocksConnected() { // We always keep one extra block at the end of our list because blocks // are added after all the conflicted transactions have been filled in. // Thus, the last entry should always be an empty one waiting for the // transactions from the next block. We pop the last entry here to make // sure the list we return is sane. assert(!blocksConnected.back().pindex); assert(blocksConnected.back().conflictedTxs->empty()); blocksConnected.pop_back(); return blocksConnected; } void NotifyEntryRemoved(CTransactionRef txRemoved, MemPoolRemovalReason reason) { assert(!blocksConnected.back().pindex); if (reason == MemPoolRemovalReason::CONFLICT) { blocksConnected.back().conflictedTxs->emplace_back( std::move(txRemoved)); } } }; /** * Connect a new block to chainActive. pblock is either nullptr or a pointer to * a CBlock corresponding to pindexNew, to bypass loading it again from disk. * * The block is always added to connectTrace (either after loading from disk or * by copying pblock) - if that is not intended, care must be taken to remove * the last entry in blocksConnected in case of failure. */ static bool ConnectTip(const Config &config, CValidationState &state, CBlockIndex *pindexNew, const std::shared_ptr &pblock, ConnectTrace &connectTrace, DisconnectedBlockTransactions &disconnectpool) { assert(pindexNew->pprev == chainActive.Tip()); // Read block from disk. int64_t nTime1 = GetTimeMicros(); std::shared_ptr pthisBlock; if (!pblock) { std::shared_ptr pblockNew = std::make_shared(); if (!ReadBlockFromDisk(*pblockNew, pindexNew, config)) { return AbortNode(state, "Failed to read block"); } pthisBlock = pblockNew; } else { pthisBlock = pblock; } const CBlock &blockConnecting = *pthisBlock; // Apply the block atomically to the chain state. int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1; int64_t nTime3; LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001); { CCoinsViewCache view(pcoinsTip.get()); bool rv = ConnectBlock(config, blockConnecting, state, pindexNew, view); GetMainSignals().BlockChecked(blockConnecting, state); if (!rv) { if (state.IsInvalid()) { InvalidBlockFound(pindexNew, state); } return error("ConnectTip(): ConnectBlock %s failed (%s)", pindexNew->GetBlockHash().ToString(), FormatStateMessage(state)); } nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2; LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001); bool flushed = view.Flush(); assert(flushed); } int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3; LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001); // Write the chain state to disk, if necessary. if (!FlushStateToDisk(config.GetChainParams(), state, FLUSH_STATE_IF_NEEDED)) { return false; } int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4; LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001); // Remove conflicting transactions from the mempool.; mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight); disconnectpool.removeForBlock(blockConnecting.vtx); // Update chainActive & related variables. UpdateTip(config, pindexNew); int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1; LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001); LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001); connectTrace.BlockConnected(pindexNew, std::move(pthisBlock)); return true; } /** * Return the tip of the chain with the most work in it, that isn't known to be * invalid (it's however far from certain to be valid). */ static CBlockIndex *FindMostWorkChain() { do { CBlockIndex *pindexNew = nullptr; // Find the best candidate header. { std::set::reverse_iterator it = setBlockIndexCandidates.rbegin(); if (it == setBlockIndexCandidates.rend()) { return nullptr; } pindexNew = *it; } const CBlockIndex *pindexFork = chainActive.FindFork(pindexNew); // Check whether all blocks on the path between the currently active // chain and the candidate are valid. Just going until the active chain // is an optimization, as we know all blocks in it are valid already. CBlockIndex *pindexTest = pindexNew; bool hasValidAncestor = true; while (hasValidAncestor && pindexTest && pindexTest != pindexFork) { assert(pindexTest->nChainTx || pindexTest->nHeight == 0); // Pruned nodes may have entries in setBlockIndexCandidates for // which block files have been deleted. Remove those as candidates // for the most work chain if we come across them; we can't switch // to a chain unless we have all the non-active-chain parent blocks. bool fInvalidChain = pindexTest->nStatus.isInvalid(); bool fParkedChain = pindexTest->nStatus.isOnParkedChain(); bool fMissingData = !pindexTest->nStatus.hasData(); if (!(fInvalidChain || fParkedChain || fMissingData)) { // The current block is acceptable, move to the parent, up to // the fork point. pindexTest = pindexTest->pprev; continue; } // Candidate chain is not usable (either invalid or missing data) hasValidAncestor = false; setBlockIndexCandidates.erase(pindexTest); if (fInvalidChain && (pindexBestInvalid == nullptr || pindexNew->nChainWork > pindexBestInvalid->nChainWork)) { pindexBestInvalid = pindexNew; } if (fParkedChain && (pindexBestParked == nullptr || pindexNew->nChainWork > pindexBestParked->nChainWork)) { pindexBestParked = pindexNew; } CBlockIndex *pindexFailed = pindexNew; // Remove the entire chain from the set. while (pindexTest != pindexFailed) { if (fInvalidChain || fParkedChain) { pindexFailed->nStatus = pindexFailed->nStatus.withFailedParent(fInvalidChain) .withParkedParent(fParkedChain); } else if (fMissingData) { // If we're missing data, then add back to // mapBlocksUnlinked, so that if the block arrives in the // future we can try adding to setBlockIndexCandidates // again. mapBlocksUnlinked.insert( std::make_pair(pindexFailed->pprev, pindexFailed)); } setBlockIndexCandidates.erase(pindexFailed); pindexFailed = pindexFailed->pprev; } if (fInvalidChain || fParkedChain) { // We discovered a new chain tip that is either parked or // invalid, we may want to warn. CheckForkWarningConditionsOnNewFork(pindexNew); } } // We found a candidate that has valid ancestors. This is our guy. if (hasValidAncestor) { return pindexNew; } } while (true); } /** * Delete all entries in setBlockIndexCandidates that are worse than the current * tip. */ static void PruneBlockIndexCandidates() { // Note that we can't delete the current block itself, as we may need to // return to it later in case a reorganization to a better block fails. std::set::iterator it = setBlockIndexCandidates.begin(); while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, chainActive.Tip())) { setBlockIndexCandidates.erase(it++); } // Either the current tip or a successor of it we're working towards is left // in setBlockIndexCandidates. assert(!setBlockIndexCandidates.empty()); } /** * Try to make some progress towards making pindexMostWork the active block. * pblock is either nullptr or a pointer to a CBlock corresponding to * pindexMostWork. */ static bool ActivateBestChainStep(const Config &config, CValidationState &state, CBlockIndex *pindexMostWork, const std::shared_ptr &pblock, bool &fInvalidFound, ConnectTrace &connectTrace) { AssertLockHeld(cs_main); const CBlockIndex *pindexOldTip = chainActive.Tip(); const CBlockIndex *pindexFork = chainActive.FindFork(pindexMostWork); // Disconnect active blocks which are no longer in the best chain. bool fBlocksDisconnected = false; DisconnectedBlockTransactions disconnectpool; while (chainActive.Tip() && chainActive.Tip() != pindexFork) { if (!DisconnectTip(config, state, &disconnectpool)) { // This is likely a fatal error, but keep the mempool consistent, // just in case. Only remove from the mempool in this case. disconnectpool.updateMempoolForReorg(config, false); return false; } fBlocksDisconnected = true; } // Build list of new blocks to connect. std::vector vpindexToConnect; bool fContinue = true; int nHeight = pindexFork ? pindexFork->nHeight : -1; while (fContinue && nHeight != pindexMostWork->nHeight) { // Don't iterate the entire list of potential improvements toward the // best tip, as we likely only need a few blocks along the way. int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight); vpindexToConnect.clear(); vpindexToConnect.reserve(nTargetHeight - nHeight); CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight); while (pindexIter && pindexIter->nHeight != nHeight) { vpindexToConnect.push_back(pindexIter); pindexIter = pindexIter->pprev; } nHeight = nTargetHeight; // Connect new blocks. for (CBlockIndex *pindexConnect : boost::adaptors::reverse(vpindexToConnect)) { if (!ConnectTip(config, state, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr(), connectTrace, disconnectpool)) { if (state.IsInvalid()) { // The block violates a consensus rule. if (!state.CorruptionPossible()) { InvalidChainFound(vpindexToConnect.back()); } state = CValidationState(); fInvalidFound = true; fContinue = false; break; } // A system error occurred (disk space, database error, ...). // Make the mempool consistent with the current tip, just in // case any observers try to use it before shutdown. disconnectpool.updateMempoolForReorg(config, false); return false; } else { PruneBlockIndexCandidates(); if (!pindexOldTip || chainActive.Tip()->nChainWork > pindexOldTip->nChainWork) { // We're in a better position than we were. Return // temporarily to release the lock. fContinue = false; break; } } } } if (fBlocksDisconnected) { // If any blocks were disconnected, disconnectpool may be non empty. Add // any disconnected transactions back to the mempool. disconnectpool.updateMempoolForReorg(config, true); } mempool.check(pcoinsTip.get()); // Callbacks/notifications for a new best chain. if (fInvalidFound) { CheckForkWarningConditionsOnNewFork(pindexMostWork); } else { CheckForkWarningConditions(); } return true; } static void NotifyHeaderTip() { bool fNotify = false; bool fInitialBlockDownload = false; static CBlockIndex *pindexHeaderOld = nullptr; CBlockIndex *pindexHeader = nullptr; { LOCK(cs_main); pindexHeader = pindexBestHeader; if (pindexHeader != pindexHeaderOld) { fNotify = true; fInitialBlockDownload = IsInitialBlockDownload(); pindexHeaderOld = pindexHeader; } } // Send block tip changed notifications without cs_main if (fNotify) { uiInterface.NotifyHeaderTip(fInitialBlockDownload, pindexHeader); } } bool ActivateBestChain(const Config &config, CValidationState &state, std::shared_ptr pblock) { // Note that while we're often called here from ProcessNewBlock, this is // far from a guarantee. Things in the P2P/RPC will often end up calling // us in the middle of ProcessNewBlock - do not assume pblock is set // sanely for performance or correctness! CBlockIndex *pindexMostWork = nullptr; CBlockIndex *pindexNewTip = nullptr; do { boost::this_thread::interruption_point(); if (ShutdownRequested()) { break; } const CBlockIndex *pindexFork; bool fInitialDownload; { LOCK(cs_main); // Destructed before cs_main is unlocked. ConnectTrace connectTrace(mempool); CBlockIndex *pindexOldTip = chainActive.Tip(); if (pindexMostWork == nullptr) { pindexMostWork = FindMostWorkChain(); } // Whether we have anything to do at all. if (pindexMostWork == nullptr || pindexMostWork == chainActive.Tip()) { return true; } bool fInvalidFound = false; std::shared_ptr nullBlockPtr; if (!ActivateBestChainStep( config, state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) { return false; } if (fInvalidFound) { // Wipe cache, we may need another branch now. pindexMostWork = nullptr; } pindexNewTip = chainActive.Tip(); pindexFork = chainActive.FindFork(pindexOldTip); fInitialDownload = IsInitialBlockDownload(); for (const PerBlockConnectTrace &trace : connectTrace.GetBlocksConnected()) { assert(trace.pblock && trace.pindex); GetMainSignals().BlockConnected(trace.pblock, trace.pindex, *trace.conflictedTxs); } } // When we reach this point, we switched to a new tip (stored in // pindexNewTip). // Notifications/callbacks that can run without cs_main // Notify external listeners about the new tip. GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload); // Always notify the UI if a new block tip was connected if (pindexFork != pindexNewTip) { uiInterface.NotifyBlockTip(fInitialDownload, pindexNewTip); } } while (pindexNewTip != pindexMostWork); const CChainParams ¶ms = config.GetChainParams(); CheckBlockIndex(params.GetConsensus()); // Write changes periodically to disk, after relay. if (!FlushStateToDisk(params, state, FLUSH_STATE_PERIODIC)) { return false; } int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT); if (nStopAtHeight && pindexNewTip && pindexNewTip->nHeight >= nStopAtHeight) { StartShutdown(); } return true; } bool PreciousBlock(const Config &config, CValidationState &state, CBlockIndex *pindex) { { LOCK(cs_main); if (pindex->nChainWork < chainActive.Tip()->nChainWork) { // Nothing to do, this block is not at the tip. return true; } if (chainActive.Tip()->nChainWork > nLastPreciousChainwork) { // The chain has been extended since the last call, reset the // counter. nBlockReverseSequenceId = -1; } nLastPreciousChainwork = chainActive.Tip()->nChainWork; setBlockIndexCandidates.erase(pindex); pindex->nSequenceId = nBlockReverseSequenceId; if (nBlockReverseSequenceId > std::numeric_limits::min()) { // We can't keep reducing the counter if somebody really wants to // call preciousblock 2**31-1 times on the same set of tips... nBlockReverseSequenceId--; } if (pindex->IsValid(BlockValidity::TRANSACTIONS) && pindex->nChainTx) { setBlockIndexCandidates.insert(pindex); PruneBlockIndexCandidates(); } } return ActivateBestChain(config, state); } bool InvalidateBlock(const Config &config, CValidationState &state, CBlockIndex *pindex) { AssertLockHeld(cs_main); // Mark the block itself as invalid. pindex->nStatus = pindex->nStatus.withFailed(); setDirtyBlockIndex.insert(pindex); DisconnectedBlockTransactions disconnectpool; while (chainActive.Contains(pindex)) { CBlockIndex *pindexWalk = chainActive.Tip(); pindexWalk->nStatus = pindexWalk->nStatus.withFailedParent(); setDirtyBlockIndex.insert(pindexWalk); // ActivateBestChain considers blocks already in chainActive // unconditionally valid already, so force disconnect away from it. if (!DisconnectTip(config, state, &disconnectpool)) { // It's probably hopeless to try to make the mempool consistent // here if DisconnectTip failed, but we can try. disconnectpool.updateMempoolForReorg(config, false); return false; } } // DisconnectTip will add transactions to disconnectpool; try to add these // back to the mempool. disconnectpool.updateMempoolForReorg(config, true); // The resulting new best tip may not be in setBlockIndexCandidates anymore, // so add it again. for (const std::pair &it : mapBlockIndex) { CBlockIndex *i = it.second; if (i->IsValid(BlockValidity::TRANSACTIONS) && i->nChainTx && !setBlockIndexCandidates.value_comp()(i, chainActive.Tip())) { setBlockIndexCandidates.insert(i); } } InvalidChainFound(pindex); uiInterface.NotifyBlockTip(IsInitialBlockDownload(), pindex->pprev); return true; } bool ResetBlockFailureFlags(CBlockIndex *pindex) { AssertLockHeld(cs_main); int nHeight = pindex->nHeight; // Remove the invalidity flag from this block and all its descendants. BlockMap::iterator it = mapBlockIndex.begin(); while (it != mapBlockIndex.end()) { if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) { it->second->nStatus = it->second->nStatus.withClearedFailureFlags(); setDirtyBlockIndex.insert(it->second); if (it->second->IsValid(BlockValidity::TRANSACTIONS) && it->second->nChainTx && setBlockIndexCandidates.value_comp()(chainActive.Tip(), it->second)) { setBlockIndexCandidates.insert(it->second); } if (it->second == pindexBestInvalid) { // Reset invalid block marker if it was pointing to one of // those. pindexBestInvalid = nullptr; } } it++; } // Remove the invalidity flag from all ancestors too. while (pindex != nullptr) { if (pindex->nStatus.isInvalid()) { pindex->nStatus = pindex->nStatus.withClearedFailureFlags(); setDirtyBlockIndex.insert(pindex); } pindex = pindex->pprev; } return true; } static CBlockIndex *AddToBlockIndex(const CBlockHeader &block) { // Check for duplicate uint256 hash = block.GetHash(); BlockMap::iterator it = mapBlockIndex.find(hash); if (it != mapBlockIndex.end()) { return it->second; } // Construct new block index object CBlockIndex *pindexNew = new CBlockIndex(block); assert(pindexNew); // We assign the sequence id to blocks only when the full data is available, // to avoid miners withholding blocks but broadcasting headers, to get a // competitive advantage. pindexNew->nSequenceId = 0; BlockMap::iterator mi = mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first; pindexNew->phashBlock = &((*mi).first); BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock); if (miPrev != mapBlockIndex.end()) { pindexNew->pprev = (*miPrev).second; pindexNew->nHeight = pindexNew->pprev->nHeight + 1; pindexNew->BuildSkip(); } pindexNew->nTimeReceived = GetTime(); pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime); pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew); pindexNew->RaiseValidity(BlockValidity::TREE); if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork) { pindexBestHeader = pindexNew; } setDirtyBlockIndex.insert(pindexNew); return pindexNew; } /** * Mark a block as having its data received and checked (up to * BLOCK_VALID_TRANSACTIONS). */ bool ReceivedBlockTransactions(const CBlock &block, CValidationState &state, CBlockIndex *pindexNew, const CDiskBlockPos &pos) { pindexNew->nTx = block.vtx.size(); pindexNew->nChainTx = 0; pindexNew->nFile = pos.nFile; pindexNew->nDataPos = pos.nPos; pindexNew->nUndoPos = 0; pindexNew->nStatus = pindexNew->nStatus.withData(); pindexNew->RaiseValidity(BlockValidity::TRANSACTIONS); setDirtyBlockIndex.insert(pindexNew); if (pindexNew->pprev == nullptr || pindexNew->pprev->nChainTx) { // If pindexNew is the genesis block or all parents are // BLOCK_VALID_TRANSACTIONS. std::deque queue; queue.push_back(pindexNew); // Recursively process any descendant blocks that now may be eligible to // be connected. while (!queue.empty()) { CBlockIndex *pindex = queue.front(); queue.pop_front(); pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx; if (pindex->nSequenceId == 0) { // We assign a sequence is when transaction are recieved to // prevent a miner from being able to broadcast a block but not // its content. However, a sequence id may have been set // manually, for instance via PreciousBlock, in which case, we // don't need to assign one. pindex->nSequenceId = nBlockSequenceId++; } if (chainActive.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) { setBlockIndexCandidates.insert(pindex); } std::pair::iterator, std::multimap::iterator> range = mapBlocksUnlinked.equal_range(pindex); while (range.first != range.second) { std::multimap::iterator it = range.first; queue.push_back(it->second); range.first++; mapBlocksUnlinked.erase(it); } } } else if (pindexNew->pprev && pindexNew->pprev->IsValid(BlockValidity::TREE)) { mapBlocksUnlinked.insert(std::make_pair(pindexNew->pprev, pindexNew)); } return true; } static bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false) { LOCK(cs_LastBlockFile); unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile; if (vinfoBlockFile.size() <= nFile) { vinfoBlockFile.resize(nFile + 1); } if (!fKnown) { while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) { nFile++; if (vinfoBlockFile.size() <= nFile) { vinfoBlockFile.resize(nFile + 1); } } pos.nFile = nFile; pos.nPos = vinfoBlockFile[nFile].nSize; } if ((int)nFile != nLastBlockFile) { if (!fKnown) { LogPrintf("Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString()); } FlushBlockFile(!fKnown); nLastBlockFile = nFile; } vinfoBlockFile[nFile].AddBlock(nHeight, nTime); if (fKnown) { vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize); } else { vinfoBlockFile[nFile].nSize += nAddSize; } if (!fKnown) { unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE; unsigned int nNewChunks = (vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE; if (nNewChunks > nOldChunks) { if (fPruneMode) { fCheckForPruning = true; } if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) { FILE *file = OpenBlockFile(pos); if (file) { LogPrintf( "Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile); AllocateFileRange(file, pos.nPos, nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos); fclose(file); } } else { return state.Error("out of disk space"); } } } setDirtyFileInfo.insert(nFile); return true; } static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize) { pos.nFile = nFile; LOCK(cs_LastBlockFile); unsigned int nNewSize; pos.nPos = vinfoBlockFile[nFile].nUndoSize; nNewSize = vinfoBlockFile[nFile].nUndoSize += nAddSize; setDirtyFileInfo.insert(nFile); unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE; unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE; if (nNewChunks > nOldChunks) { if (fPruneMode) { fCheckForPruning = true; } if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) { FILE *file = OpenUndoFile(pos); if (file) { LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile); AllocateFileRange(file, pos.nPos, nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos); fclose(file); } } else { return state.Error("out of disk space"); } } return true; } /** * Return true if the provided block header is valid. * Only verify PoW if blockValidationOptions is configured to do so. * This allows validation of headers on which the PoW hasn't been done. * For example: to validate template handed to mining software. * Do not call this for any check that depends on the context. * For context-dependant calls, see ContextualCheckBlockHeader. */ static bool CheckBlockHeader( const Config &config, const CBlockHeader &block, CValidationState &state, BlockValidationOptions validationOptions = BlockValidationOptions()) { // Check proof of work matches claimed amount if (validationOptions.shouldValidatePoW() && !CheckProofOfWork(block.GetHash(), block.nBits, config)) { return state.DoS(50, false, REJECT_INVALID, "high-hash", false, "proof of work failed"); } return true; } bool CheckBlock(const Config &config, const CBlock &block, CValidationState &state, BlockValidationOptions validationOptions) { // These are checks that are independent of context. if (block.fChecked) { return true; } // Check that the header is valid (particularly PoW). This is mostly // redundant with the call in AcceptBlockHeader. if (!CheckBlockHeader(config, block, state, validationOptions)) { return false; } // Check the merkle root. if (validationOptions.shouldValidateMerkleRoot()) { bool mutated; uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated); if (block.hashMerkleRoot != hashMerkleRoot2) { return state.DoS(100, false, REJECT_INVALID, "bad-txnmrklroot", true, "hashMerkleRoot mismatch"); } // Check for merkle tree malleability (CVE-2012-2459): repeating // sequences of transactions in a block without affecting the merkle // root of a block, while still invalidating it. if (mutated) { return state.DoS(100, false, REJECT_INVALID, "bad-txns-duplicate", true, "duplicate transaction"); } } // All potential-corruption validation must be done before we do any // transaction validation, as otherwise we may mark the header as invalid // because we receive the wrong transactions for it. // First transaction must be coinbase. if (block.vtx.empty()) { return state.DoS(100, false, REJECT_INVALID, "bad-cb-missing", false, "first tx is not coinbase"); } // Size limits. auto nMaxBlockSize = config.GetMaxBlockSize(); // Bail early if there is no way this block is of reasonable size. if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) { return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, "size limits failed"); } auto currentBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); if (currentBlockSize > nMaxBlockSize) { return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, "size limits failed"); } // And a valid coinbase. if (!CheckCoinbase(*block.vtx[0], state)) { return state.Invalid(false, state.GetRejectCode(), state.GetRejectReason(), strprintf("Coinbase check failed (txid %s) %s", block.vtx[0]->GetId().ToString(), state.GetDebugMessage())); } // Keep track of the sigops count. uint64_t nSigOps = 0; auto nMaxSigOpsCount = GetMaxBlockSigOpsCount(currentBlockSize); // Check transactions auto txCount = block.vtx.size(); auto *tx = block.vtx[0].get(); size_t i = 0; while (true) { // Count the sigops for the current transaction. If the total sigops // count is too high, the the block is invalid. nSigOps += GetSigOpCountWithoutP2SH(*tx, STANDARD_SCRIPT_VERIFY_FLAGS); if (nSigOps > nMaxSigOpsCount) { return state.DoS(100, false, REJECT_INVALID, "bad-blk-sigops", false, "out-of-bounds SigOpCount"); } // Go to the next transaction. i++; // We reached the end of the block, success. if (i >= txCount) { break; } // Check that the transaction is valid. Because this check differs for // the coinbase, the loop is arranged such as this only runs after at // least one increment. tx = block.vtx[i].get(); if (!CheckRegularTransaction(*tx, state)) { return state.Invalid( false, state.GetRejectCode(), state.GetRejectReason(), strprintf("Transaction check failed (txid %s) %s", tx->GetId().ToString(), state.GetDebugMessage())); } } if (validationOptions.shouldValidatePoW() && validationOptions.shouldValidateMerkleRoot()) { block.fChecked = true; } return true; } static bool CheckIndexAgainstCheckpoint(const CBlockIndex *pindexPrev, CValidationState &state, const CChainParams &chainparams, const uint256 &hash) { if (*pindexPrev->phashBlock == chainparams.GetConsensus().hashGenesisBlock) { return true; } int nHeight = pindexPrev->nHeight + 1; const CCheckpointData &checkpoints = chainparams.Checkpoints(); // Check that the block chain matches the known block chain up to a // checkpoint. if (!Checkpoints::CheckBlock(checkpoints, nHeight, hash)) { return state.DoS(100, error("%s: rejected by checkpoint lock-in at %d", __func__, nHeight), REJECT_CHECKPOINT, "checkpoint mismatch"); } // Don't accept any forks from the main chain prior to last checkpoint. // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in // our MapBlockIndex. CBlockIndex *pcheckpoint = Checkpoints::GetLastCheckpoint(checkpoints); if (pcheckpoint && nHeight < pcheckpoint->nHeight) { return state.DoS( 100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight), REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint"); } return true; } static bool ContextualCheckBlockHeader(const Config &config, const CBlockHeader &block, CValidationState &state, const CBlockIndex *pindexPrev, int64_t nAdjustedTime) { const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1; // Check proof of work if (block.nBits != GetNextWorkRequired(pindexPrev, &block, config)) { LogPrintf("bad bits after height: %d\n", pindexPrev->nHeight); return state.DoS(100, false, REJECT_INVALID, "bad-diffbits", false, "incorrect proof of work"); } // Check timestamp against prev if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) { return state.Invalid(false, REJECT_INVALID, "time-too-old", "block's timestamp is too early"); } // Check timestamp if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME) { return state.Invalid(false, REJECT_INVALID, "time-too-new", "block timestamp too far in the future"); } // Reject outdated version blocks when 95% (75% on testnet) of the network // has upgraded: // check for version 2, 3 and 4 upgrades if ((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) || (block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) || (block.nVersion < 4 && nHeight >= consensusParams.BIP65Height)) { return state.Invalid( false, REJECT_OBSOLETE, strprintf("bad-version(0x%08x)", block.nVersion), strprintf("rejected nVersion=0x%08x block", block.nVersion)); } return true; } bool ContextualCheckTransactionForCurrentBlock(const Config &config, const CTransaction &tx, CValidationState &state, int flags) { AssertLockHeld(cs_main); // By convention a negative value for flags indicates that the current // network-enforced consensus rules should be used. In a future soft-fork // scenario that would mean checking which rules would be enforced for the // next block and setting the appropriate flags. At the present time no // soft-forks are scheduled, so no flags are set. flags = std::max(flags, 0); // ContextualCheckTransactionForCurrentBlock() uses chainActive.Height()+1 // to evaluate nLockTime because when IsFinalTx() is called within // CBlock::AcceptBlock(), the height of the block *being* evaluated is what // is used. Thus if we want to know if a transaction can be part of the // *next* block, we need to call ContextualCheckTransaction() with one more // than chainActive.Height(). const int nBlockHeight = chainActive.Height() + 1; // BIP113 will require that time-locked transactions have nLockTime set to // less than the median time of the previous block they're contained in. // When the next block is created its previous block will be the current // chain tip, so we use that to calculate the median time passed to // ContextualCheckTransaction() if LOCKTIME_MEDIAN_TIME_PAST is set. const int64_t nMedianTimePast = chainActive.Tip() == nullptr ? 0 : chainActive.Tip()->GetMedianTimePast(); const int64_t nLockTimeCutoff = (flags & LOCKTIME_MEDIAN_TIME_PAST) ? nMedianTimePast : GetAdjustedTime(); return ContextualCheckTransaction(config, tx, state, nBlockHeight, nLockTimeCutoff, nMedianTimePast); } static bool ContextualCheckBlock(const Config &config, const CBlock &block, CValidationState &state, const CBlockIndex *pindexPrev) { const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1; const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); // Start enforcing BIP113 (Median Time Past). int nLockTimeFlags = 0; if (nHeight >= consensusParams.CSVHeight) { nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST; } const int64_t nMedianTimePast = pindexPrev == nullptr ? 0 : pindexPrev->GetMedianTimePast(); const int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST) ? nMedianTimePast : block.GetBlockTime(); const bool fIsMagneticAnomalyEnabled = IsMagneticAnomalyEnabled(config, pindexPrev); // Check that all transactions are finalized const CTransaction *prevTx = nullptr; for (const auto &ptx : block.vtx) { const CTransaction &tx = *ptx; if (fIsMagneticAnomalyEnabled) { if (prevTx && (tx.GetId() < prevTx->GetId())) { return state.DoS( 100, false, REJECT_INVALID, "tx-ordering", false, strprintf("Transaction order is invalid (%s < %s)", tx.GetId().ToString(), prevTx->GetId().ToString())); } if (prevTx || !tx.IsCoinBase()) { prevTx = &tx; } } if (!ContextualCheckTransaction(config, tx, state, nHeight, nLockTimeCutoff, nMedianTimePast)) { // state set by ContextualCheckTransaction. return false; } } // Enforce rule that the coinbase starts with serialized block height if (nHeight >= consensusParams.BIP34Height) { CScript expect = CScript() << nHeight; if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() || !std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) { return state.DoS(100, false, REJECT_INVALID, "bad-cb-height", false, "block height mismatch in coinbase"); } } return true; } /** * If the provided block header is valid, add it to the block index. * * Returns true if the block is succesfully added to the block index. */ static bool AcceptBlockHeader(const Config &config, const CBlockHeader &block, CValidationState &state, CBlockIndex **ppindex) { AssertLockHeld(cs_main); const CChainParams &chainparams = config.GetChainParams(); // Check for duplicate uint256 hash = block.GetHash(); BlockMap::iterator miSelf = mapBlockIndex.find(hash); CBlockIndex *pindex = nullptr; if (hash != chainparams.GetConsensus().hashGenesisBlock) { if (miSelf != mapBlockIndex.end()) { // Block header is already known. pindex = miSelf->second; if (ppindex) { *ppindex = pindex; } if (pindex->nStatus.isInvalid()) { return state.Invalid(error("%s: block %s is marked invalid", __func__, hash.ToString()), 0, "duplicate"); } return true; } if (!CheckBlockHeader(config, block, state)) { return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state)); } // Get prev block index CBlockIndex *pindexPrev = nullptr; BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock); if (mi == mapBlockIndex.end()) { return state.DoS(10, error("%s: prev block not found", __func__), 0, "prev-blk-not-found"); } pindexPrev = (*mi).second; if (pindexPrev->nStatus.isInvalid()) { return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk"); } assert(pindexPrev); if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, hash)) { return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__, state.GetRejectReason().c_str()); } if (!ContextualCheckBlockHeader(config, block, state, pindexPrev, GetAdjustedTime())) { return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state)); } } if (pindex == nullptr) { pindex = AddToBlockIndex(block); } if (ppindex) { *ppindex = pindex; } CheckBlockIndex(chainparams.GetConsensus()); return true; } // Exposed wrapper for AcceptBlockHeader bool ProcessNewBlockHeaders(const Config &config, const std::vector &headers, CValidationState &state, const CBlockIndex **ppindex, CBlockHeader *first_invalid) { if (first_invalid != nullptr) { first_invalid->SetNull(); } { LOCK(cs_main); for (const CBlockHeader &header : headers) { // Use a temp pindex instead of ppindex to avoid a const_cast CBlockIndex *pindex = nullptr; if (!AcceptBlockHeader(config, header, state, &pindex)) { if (first_invalid) { *first_invalid = header; } return false; } if (ppindex) { *ppindex = pindex; } } } NotifyHeaderTip(); return true; } /** * Store a block on disk. * * @param[in] config The global config. * @param[in-out] pblock The block we want to accept. * @param[out] ppindex The last new block index, only set if the block * was accepted. * @param[in] fRequested A boolean to indicate if this block was requested * from our peers. * @param[in] dbp If non-null, the disk position of the block. * @param[in-out] fNewBlock True if block was first received via this call. * @return True if the block is accepted as a valid block and written to disk. */ static bool AcceptBlock(const Config &config, const std::shared_ptr &pblock, CValidationState &state, CBlockIndex **ppindex, bool fRequested, const CDiskBlockPos *dbp, bool *fNewBlock) { AssertLockHeld(cs_main); const CBlock &block = *pblock; if (fNewBlock) { *fNewBlock = false; } CBlockIndex *pindexDummy = nullptr; CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy; if (!AcceptBlockHeader(config, block, state, &pindex)) { return false; } // Try to process all requested blocks that we don't have, but only // process an unrequested block if it's new and has enough work to // advance our tip, and isn't too many blocks ahead. bool fAlreadyHave = pindex->nStatus.hasData(); // Compare block header timestamps and received times of the block and the // chaintip. If they have the same chain height, use these diffs as a // tie-breaker, attempting to pick the more honestly-mined block. int64_t newBlockTimeDiff = std::llabs(pindex->GetReceivedTimeDiff()); int64_t chainTipTimeDiff = chainActive.Tip() ? std::llabs(chainActive.Tip()->GetReceivedTimeDiff()) : 0; bool isSameHeight = chainActive.Tip() && (pindex->nChainWork == chainActive.Tip()->nChainWork); if (isSameHeight) { LogPrintf("Chain tip timestamp-to-received-time difference: hash=%s, " "diff=%d\n", chainActive.Tip()->GetBlockHash().ToString(), chainTipTimeDiff); LogPrintf("New block timestamp-to-received-time difference: hash=%s, " "diff=%d\n", pindex->GetBlockHash().ToString(), newBlockTimeDiff); } bool fHasMoreWork = (chainActive.Tip() ? pindex->nChainWork > chainActive.Tip()->nChainWork : true); // Blocks that are too out-of-order needlessly limit the effectiveness of // pruning, because pruning will not delete block files that contain any // blocks which are too close in height to the tip. Apply this test // regardless of whether pruning is enabled; it should generally be safe to // not process unrequested blocks. bool fTooFarAhead = (pindex->nHeight > int(chainActive.Height() + MIN_BLOCKS_TO_KEEP)); // TODO: Decouple this function from the block download logic by removing // fRequested // This requires some new chain datastructure to efficiently look up if a // block is in a chain leading to a candidate for best tip, despite not // being such a candidate itself. // TODO: deal better with return value and error conditions for duplicate // and unrequested blocks. if (fAlreadyHave) { return true; } // If we didn't ask for it: if (!fRequested) { // This is a previously-processed block that was pruned. if (pindex->nTx != 0) { return true; } // Don't process less-work chains. if (!fHasMoreWork) { return true; } // Block height is too high. if (fTooFarAhead) { return true; } } if (fNewBlock) { *fNewBlock = true; } if (!CheckBlock(config, block, state) || !ContextualCheckBlock(config, block, state, pindex->pprev)) { if (state.IsInvalid() && !state.CorruptionPossible()) { pindex->nStatus = pindex->nStatus.withFailed(); setDirtyBlockIndex.insert(pindex); } return error("%s: %s (block %s)", __func__, FormatStateMessage(state), block.GetHash().ToString()); } // Header is valid/has work and the merkle tree is good. // Relay now, but if it does not build on our best tip, let the // SendMessages loop relay it. if (!IsInitialBlockDownload() && chainActive.Tip() == pindex->pprev) { GetMainSignals().NewPoWValidBlock(pindex, pblock); } int nHeight = pindex->nHeight; const CChainParams &chainparams = config.GetChainParams(); // Write block to history file try { unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); CDiskBlockPos blockPos; if (dbp != nullptr) { blockPos = *dbp; } if (!FindBlockPos(state, blockPos, nBlockSize + 8, nHeight, block.GetBlockTime(), dbp != nullptr)) { return error("AcceptBlock(): FindBlockPos failed"); } if (dbp == nullptr) { if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) { AbortNode(state, "Failed to write block"); } } if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) { return error("AcceptBlock(): ReceivedBlockTransactions failed"); } } catch (const std::runtime_error &e) { return AbortNode(state, std::string("System error: ") + e.what()); } if (fCheckForPruning) { // we just allocated more disk space for block files. FlushStateToDisk(config.GetChainParams(), state, FLUSH_STATE_NONE); } return true; } bool ProcessNewBlock(const Config &config, const std::shared_ptr pblock, bool fForceProcessing, bool *fNewBlock) { { CBlockIndex *pindex = nullptr; if (fNewBlock) { *fNewBlock = false; } const CChainParams &chainparams = config.GetChainParams(); CValidationState state; // Ensure that CheckBlock() passes before calling AcceptBlock, as // belt-and-suspenders. bool ret = CheckBlock(config, *pblock, state); LOCK(cs_main); if (ret) { // Store to disk ret = AcceptBlock(config, pblock, state, &pindex, fForceProcessing, nullptr, fNewBlock); } CheckBlockIndex(chainparams.GetConsensus()); if (!ret) { GetMainSignals().BlockChecked(*pblock, state); return error("%s: AcceptBlock FAILED", __func__); } } NotifyHeaderTip(); // Only used to report errors, not invalidity - ignore it CValidationState state; if (!ActivateBestChain(config, state, pblock)) { return error("%s: ActivateBestChain failed", __func__); } return true; } bool TestBlockValidity(const Config &config, CValidationState &state, const CBlock &block, CBlockIndex *pindexPrev, BlockValidationOptions validationOptions) { AssertLockHeld(cs_main); const CChainParams &chainparams = config.GetChainParams(); assert(pindexPrev && pindexPrev == chainActive.Tip()); if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, block.GetHash())) { return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__, state.GetRejectReason().c_str()); } CCoinsViewCache viewNew(pcoinsTip.get()); CBlockIndex indexDummy(block); indexDummy.pprev = pindexPrev; indexDummy.nHeight = pindexPrev->nHeight + 1; // NOTE: CheckBlockHeader is called by CheckBlock if (!ContextualCheckBlockHeader(config, block, state, pindexPrev, GetAdjustedTime())) { return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, FormatStateMessage(state)); } if (!CheckBlock(config, block, state, validationOptions)) { return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state)); } if (!ContextualCheckBlock(config, block, state, pindexPrev)) { return error("%s: Consensus::ContextualCheckBlock: %s", __func__, FormatStateMessage(state)); } if (!ConnectBlock(config, block, state, &indexDummy, viewNew, true)) { return false; } assert(state.IsValid()); return true; } /** * BLOCK PRUNING CODE */ /** * Calculate the amount of disk space the block & undo files currently use. */ static uint64_t CalculateCurrentUsage() { uint64_t retval = 0; for (const CBlockFileInfo &file : vinfoBlockFile) { retval += file.nSize + file.nUndoSize; } return retval; } /** * Prune a block file (modify associated database entries) */ void PruneOneBlockFile(const int fileNumber) { for (const std::pair &it : mapBlockIndex) { CBlockIndex *pindex = it.second; if (pindex->nFile == fileNumber) { pindex->nStatus = pindex->nStatus.withData(false).withUndo(false); pindex->nFile = 0; pindex->nDataPos = 0; pindex->nUndoPos = 0; setDirtyBlockIndex.insert(pindex); // Prune from mapBlocksUnlinked -- any block we prune would have // to be downloaded again in order to consider its chain, at which // point it would be considered as a candidate for // mapBlocksUnlinked or setBlockIndexCandidates. std::pair::iterator, std::multimap::iterator> range = mapBlocksUnlinked.equal_range(pindex->pprev); while (range.first != range.second) { std::multimap::iterator _it = range.first; range.first++; if (_it->second == pindex) { mapBlocksUnlinked.erase(_it); } } } } vinfoBlockFile[fileNumber].SetNull(); setDirtyFileInfo.insert(fileNumber); } void UnlinkPrunedFiles(const std::set &setFilesToPrune) { for (const int i : setFilesToPrune) { CDiskBlockPos pos(i, 0); fs::remove(GetBlockPosFilename(pos, "blk")); fs::remove(GetBlockPosFilename(pos, "rev")); LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, i); } } /** * Calculate the block/rev files to delete based on height specified by user * with RPC command pruneblockchain */ static void FindFilesToPruneManual(std::set &setFilesToPrune, int nManualPruneHeight) { assert(fPruneMode && nManualPruneHeight > 0); LOCK2(cs_main, cs_LastBlockFile); if (chainActive.Tip() == nullptr) { return; } // last block to prune is the lesser of (user-specified height, // MIN_BLOCKS_TO_KEEP from the tip) unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP); int count = 0; for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) { if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) { continue; } PruneOneBlockFile(fileNumber); setFilesToPrune.insert(fileNumber); count++; } LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count); } /* This function is called from the RPC code for pruneblockchain */ void PruneBlockFilesManual(int nManualPruneHeight) { CValidationState state; const CChainParams &chainparams = Params(); FlushStateToDisk(chainparams, state, FLUSH_STATE_NONE, nManualPruneHeight); } /** * Prune block and undo files (blk???.dat and undo???.dat) so that the disk * space used is less than a user-defined target. The user sets the target (in * MB) on the command line or in config file. This will be run on startup and * whenever new space is allocated in a block or undo file, staying below the * target. Changing back to unpruned requires a reindex (which in this case * means the blockchain must be re-downloaded.) * * Pruning functions are called from FlushStateToDisk when the global * fCheckForPruning flag has been set. Block and undo files are deleted in * lock-step (when blk00003.dat is deleted, so is rev00003.dat.). Pruning cannot * take place until the longest chain is at least a certain length (100000 on * mainnet, 1000 on testnet, 1000 on regtest). Pruning will never delete a block * within a defined distance (currently 288) from the active chain's tip. The * block index is updated by unsetting HAVE_DATA and HAVE_UNDO for any blocks * that were stored in the deleted files. A db flag records the fact that at * least some block files have been pruned. * * @param[out] setFilesToPrune The set of file indices that can be unlinked * will be returned */ static void FindFilesToPrune(std::set &setFilesToPrune, uint64_t nPruneAfterHeight) { LOCK2(cs_main, cs_LastBlockFile); if (chainActive.Tip() == nullptr || nPruneTarget == 0) { return; } if (uint64_t(chainActive.Tip()->nHeight) <= nPruneAfterHeight) { return; } unsigned int nLastBlockWeCanPrune = chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP; uint64_t nCurrentUsage = CalculateCurrentUsage(); // We don't check to prune until after we've allocated new space for files, // so we should leave a buffer under our target to account for another // allocation before the next pruning. uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE; uint64_t nBytesToPrune; int count = 0; if (nCurrentUsage + nBuffer >= nPruneTarget) { for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) { nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize; if (vinfoBlockFile[fileNumber].nSize == 0) { continue; } // are we below our target? if (nCurrentUsage + nBuffer < nPruneTarget) { break; } // don't prune files that could have a block within // MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) { continue; } PruneOneBlockFile(fileNumber); // Queue up the files for removal setFilesToPrune.insert(fileNumber); nCurrentUsage -= nBytesToPrune; count++; } } LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB " "max_prune_height=%d removed %d blk/rev pairs\n", nPruneTarget / 1024 / 1024, nCurrentUsage / 1024 / 1024, ((int64_t)nPruneTarget - (int64_t)nCurrentUsage) / 1024 / 1024, nLastBlockWeCanPrune, count); } bool CheckDiskSpace(uint64_t nAdditionalBytes) { uint64_t nFreeBytesAvailable = fs::space(GetDataDir()).available; // Check for nMinDiskSpace bytes (currently 50MB) if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes) { return AbortNode("Disk space is low!", _("Error: Disk space is low!")); } return true; } static FILE *OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fReadOnly) { if (pos.IsNull()) { return nullptr; } fs::path path = GetBlockPosFilename(pos, prefix); fs::create_directories(path.parent_path()); FILE *file = fsbridge::fopen(path, "rb+"); if (!file && !fReadOnly) { file = fsbridge::fopen(path, "wb+"); } if (!file) { LogPrintf("Unable to open file %s\n", path.string()); return nullptr; } if (pos.nPos) { if (fseek(file, pos.nPos, SEEK_SET)) { LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string()); fclose(file); return nullptr; } } return file; } FILE *OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly) { return OpenDiskFile(pos, "blk", fReadOnly); } /** Open an undo file (rev?????.dat) */ static FILE *OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) { return OpenDiskFile(pos, "rev", fReadOnly); } fs::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix) { return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile); } CBlockIndex *InsertBlockIndex(uint256 hash) { if (hash.IsNull()) { return nullptr; } // Return existing BlockMap::iterator mi = mapBlockIndex.find(hash); if (mi != mapBlockIndex.end()) { return (*mi).second; } // Create new CBlockIndex *pindexNew = new CBlockIndex(); if (!pindexNew) { throw std::runtime_error(std::string(__func__) + ": new CBlockIndex failed"); } mi = mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first; pindexNew->phashBlock = &((*mi).first); return pindexNew; } static bool LoadBlockIndexDB(const Config &config) { if (!pblocktree->LoadBlockIndexGuts(config, InsertBlockIndex)) { return false; } boost::this_thread::interruption_point(); // Calculate nChainWork std::vector> vSortedByHeight; vSortedByHeight.reserve(mapBlockIndex.size()); for (const std::pair &item : mapBlockIndex) { CBlockIndex *pindex = item.second; vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex)); } sort(vSortedByHeight.begin(), vSortedByHeight.end()); for (const std::pair &item : vSortedByHeight) { CBlockIndex *pindex = item.second; pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex); pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime); // We can link the chain of blocks for which we've received transactions // at some point. Pruned nodes may have deleted the block. if (pindex->nTx > 0) { if (pindex->pprev) { if (pindex->pprev->nChainTx) { pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx; } else { pindex->nChainTx = 0; mapBlocksUnlinked.insert( std::make_pair(pindex->pprev, pindex)); } } else { pindex->nChainTx = pindex->nTx; } } if (pindex->IsValid(BlockValidity::TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == nullptr)) { setBlockIndexCandidates.insert(pindex); } if (pindex->nStatus.isInvalid() && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork)) { pindexBestInvalid = pindex; } if (pindex->nStatus.isOnParkedChain() && (!pindexBestParked || pindex->nChainWork > pindexBestParked->nChainWork)) { pindexBestParked = pindex; } if (pindex->pprev) { pindex->BuildSkip(); } if (pindex->IsValid(BlockValidity::TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex))) { pindexBestHeader = pindex; } } // Load block file info pblocktree->ReadLastBlockFile(nLastBlockFile); vinfoBlockFile.resize(nLastBlockFile + 1); LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile); for (int nFile = 0; nFile <= nLastBlockFile; nFile++) { pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]); } LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString()); for (int nFile = nLastBlockFile + 1; true; nFile++) { CBlockFileInfo info; if (pblocktree->ReadBlockFileInfo(nFile, info)) { vinfoBlockFile.push_back(info); } else { break; } } // Check presence of blk files LogPrintf("Checking all blk files are present...\n"); std::set setBlkDataFiles; for (const std::pair &item : mapBlockIndex) { CBlockIndex *pindex = item.second; if (pindex->nStatus.hasData()) { setBlkDataFiles.insert(pindex->nFile); } } for (const int i : setBlkDataFiles) { CDiskBlockPos pos(i, 0); if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION) .IsNull()) { return false; } } // Check whether we have ever pruned block & undo files pblocktree->ReadFlag("prunedblockfiles", fHavePruned); if (fHavePruned) { LogPrintf( "LoadBlockIndexDB(): Block files have previously been pruned\n"); } // Check whether we need to continue reindexing bool fReindexing = false; pblocktree->ReadReindexing(fReindexing); fReindex |= fReindexing; // Check whether we have a transaction index pblocktree->ReadFlag("txindex", fTxIndex); LogPrintf("%s: transaction index %s\n", __func__, fTxIndex ? "enabled" : "disabled"); return true; } bool LoadChainTip(const Config &config) { if (chainActive.Tip() && chainActive.Tip()->GetBlockHash() == pcoinsTip->GetBestBlock()) { return true; } if (pcoinsTip->GetBestBlock().IsNull() && mapBlockIndex.size() == 1) { // In case we just added the genesis block, connect it now, so // that we always have a chainActive.Tip() when we return. LogPrintf("%s: Connecting genesis block...\n", __func__); CValidationState state; if (!ActivateBestChain(config, state)) { return false; } } // Load pointer to end of best chain BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock()); if (it == mapBlockIndex.end()) { return false; } chainActive.SetTip(it->second); PruneBlockIndexCandidates(); LogPrintf( "Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n", chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()), GuessVerificationProgress(config.GetChainParams().TxData(), chainActive.Tip())); return true; } CVerifyDB::CVerifyDB() { uiInterface.ShowProgress(_("Verifying blocks..."), 0); } CVerifyDB::~CVerifyDB() { uiInterface.ShowProgress("", 100); } bool CVerifyDB::VerifyDB(const Config &config, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth) { LOCK(cs_main); if (chainActive.Tip() == nullptr || chainActive.Tip()->pprev == nullptr) { return true; } // Verify blocks in the best chain if (nCheckDepth <= 0) { // suffices until the year 19000 nCheckDepth = 1000000000; } if (nCheckDepth > chainActive.Height()) { nCheckDepth = chainActive.Height(); } nCheckLevel = std::max(0, std::min(4, nCheckLevel)); LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel); CCoinsViewCache coins(coinsview); CBlockIndex *pindexState = chainActive.Tip(); CBlockIndex *pindexFailure = nullptr; int nGoodTransactions = 0; CValidationState state; int reportDone = 0; LogPrintf("[0%%]..."); for (CBlockIndex *pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) { boost::this_thread::interruption_point(); int percentageDone = std::max( 1, std::min( 99, (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100)))); if (reportDone < percentageDone / 10) { // report every 10% step LogPrintf("[%d%%]...", percentageDone); reportDone = percentageDone / 10; } uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone); if (pindex->nHeight < chainActive.Height() - nCheckDepth) { break; } if (fPruneMode && !pindex->nStatus.hasData()) { // If pruning, only go back as far as we have data. LogPrintf("VerifyDB(): block verification stopping at height %d " "(pruning, no data)\n", pindex->nHeight); break; } CBlock block; // check level 0: read from disk if (!ReadBlockFromDisk(block, pindex, config)) { return error( "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } // check level 1: verify block validity if (nCheckLevel >= 1 && !CheckBlock(config, block, state)) { return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__, pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state)); } // check level 2: verify undo validity if (nCheckLevel >= 2 && pindex) { CBlockUndo undo; CDiskBlockPos pos = pindex->GetUndoPos(); if (!pos.IsNull()) { if (!UndoReadFromDisk(undo, pos, pindex->pprev->GetBlockHash())) { return error( "VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); } } } // check level 3: check for inconsistencies during memory-only // disconnect of tip blocks if (nCheckLevel >= 3 && pindex == pindexState && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) { assert(coins.GetBestBlock() == pindex->GetBlockHash()); DisconnectResult res = DisconnectBlock(block, pindex, coins); if (res == DISCONNECT_FAILED) { return error("VerifyDB(): *** irrecoverable inconsistency in " "block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } pindexState = pindex->pprev; if (res == DISCONNECT_UNCLEAN) { nGoodTransactions = 0; pindexFailure = pindex; } else { nGoodTransactions += block.vtx.size(); } } if (ShutdownRequested()) { return true; } } if (pindexFailure) { return error("VerifyDB(): *** coin database inconsistencies found " "(last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->nHeight + 1, nGoodTransactions); } // check level 4: try reconnecting blocks if (nCheckLevel >= 4) { CBlockIndex *pindex = pindexState; while (pindex != chainActive.Tip()) { boost::this_thread::interruption_point(); uiInterface.ShowProgress( _("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)))); pindex = chainActive.Next(pindex); CBlock block; if (!ReadBlockFromDisk(block, pindex, config)) { return error( "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } if (!ConnectBlock(config, block, state, pindex, coins)) { return error( "VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } } } LogPrintf("[DONE].\n"); LogPrintf("No coin database inconsistencies in last %i blocks (%i " "transactions)\n", chainActive.Height() - pindexState->nHeight, nGoodTransactions); return true; } /** * Apply the effects of a block on the utxo cache, ignoring that it may already * have been applied. */ static bool RollforwardBlock(const CBlockIndex *pindex, CCoinsViewCache &view, const Config &config) { // TODO: merge with ConnectBlock CBlock block; if (!ReadBlockFromDisk(block, pindex, config)) { return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } for (const CTransactionRef &tx : block.vtx) { // Pass check = true as every addition may be an overwrite. AddCoins(view, *tx, pindex->nHeight, true); } for (const CTransactionRef &tx : block.vtx) { if (tx->IsCoinBase()) { continue; } for (const CTxIn &txin : tx->vin) { view.SpendCoin(txin.prevout); } } return true; } bool ReplayBlocks(const Config &config, CCoinsView *view) { LOCK(cs_main); CCoinsViewCache cache(view); std::vector hashHeads = view->GetHeadBlocks(); if (hashHeads.empty()) { // We're already in a consistent state. return true; } if (hashHeads.size() != 2) { return error("ReplayBlocks(): unknown inconsistent state"); } uiInterface.ShowProgress(_("Replaying blocks..."), 0); LogPrintf("Replaying blocks\n"); // Old tip during the interrupted flush. const CBlockIndex *pindexOld = nullptr; // New tip during the interrupted flush. const CBlockIndex *pindexNew; // Latest block common to both the old and the new tip. const CBlockIndex *pindexFork = nullptr; if (mapBlockIndex.count(hashHeads[0]) == 0) { return error( "ReplayBlocks(): reorganization to unknown block requested"); } pindexNew = mapBlockIndex[hashHeads[0]]; if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush. if (mapBlockIndex.count(hashHeads[1]) == 0) { return error( "ReplayBlocks(): reorganization from unknown block requested"); } pindexOld = mapBlockIndex[hashHeads[1]]; pindexFork = LastCommonAncestor(pindexOld, pindexNew); assert(pindexFork != nullptr); } // Rollback along the old branch. while (pindexOld != pindexFork) { if (pindexOld->nHeight > 0) { // Never disconnect the genesis block. CBlock block; if (!ReadBlockFromDisk(block, pindexOld, config)) { return error("RollbackBlock(): ReadBlockFromDisk() failed at " "%d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString()); } LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight); DisconnectResult res = DisconnectBlock(block, pindexOld, cache); if (res == DISCONNECT_FAILED) { return error( "RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString()); } // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO // was deleted, or an existing UTXO was overwritten. It corresponds // to cases where the block-to-be-disconnect never had all its // operations applied to the UTXO set. However, as both writing a // UTXO and deleting a UTXO are idempotent operations, the result is // still a version of the UTXO set with the effects of that block // undone. } pindexOld = pindexOld->pprev; } // Roll forward from the forking point to the new tip. int nForkHeight = pindexFork ? pindexFork->nHeight : 0; for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) { const CBlockIndex *pindex = pindexNew->GetAncestor(nHeight); LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight); if (!RollforwardBlock(pindex, cache, config)) { return false; } } cache.SetBestBlock(pindexNew->GetBlockHash()); cache.Flush(); uiInterface.ShowProgress("", 100); return true; } bool RewindBlockIndex(const Config &config) { LOCK(cs_main); const CChainParams ¶ms = config.GetChainParams(); int nHeight = chainActive.Height() + 1; // nHeight is now the height of the first insufficiently-validated block, or // tipheight + 1 CValidationState state; CBlockIndex *pindex = chainActive.Tip(); while (chainActive.Height() >= nHeight) { if (fPruneMode && !chainActive.Tip()->nStatus.hasData()) { // If pruning, don't try rewinding past the HAVE_DATA point; since // older blocks can't be served anyway, there's no need to walk // further, and trying to DisconnectTip() will fail (and require a // needless reindex/redownload of the blockchain). break; } if (!DisconnectTip(config, state, nullptr)) { return error( "RewindBlockIndex: unable to disconnect block at height %i", pindex->nHeight); } // Occasionally flush state to disk. if (!FlushStateToDisk(params, state, FLUSH_STATE_PERIODIC)) { return false; } } // Reduce validity flag and have-data flags. // We do this after actual disconnecting, otherwise we'll end up writing the // lack of data to disk before writing the chainstate, resulting in a // failure to continue if interrupted. for (const std::pair &p : mapBlockIndex) { CBlockIndex *pindexIter = p.second; if (pindexIter->IsValid(BlockValidity::TRANSACTIONS) && pindexIter->nChainTx) { setBlockIndexCandidates.insert(pindexIter); } } if (chainActive.Tip() != nullptr) { // We can't prune block index candidates based on our tip if we have // no tip due to chainActive being empty! PruneBlockIndexCandidates(); CheckBlockIndex(params.GetConsensus()); // FlushStateToDisk can possibly read chainActive. Be conservative // and skip it here, we're about to -reindex-chainstate anyway, so // it'll get called a bunch real soon. if (!FlushStateToDisk(params, state, FLUSH_STATE_ALWAYS)) { return false; } } return true; } // May NOT be used after any connections are up as much of the peer-processing // logic assumes a consistent block index state void UnloadBlockIndex() { LOCK(cs_main); setBlockIndexCandidates.clear(); chainActive.SetTip(nullptr); pindexBestInvalid = nullptr; pindexBestParked = nullptr; pindexBestHeader = nullptr; mempool.clear(); mapBlocksUnlinked.clear(); vinfoBlockFile.clear(); nLastBlockFile = 0; nBlockSequenceId = 1; setDirtyBlockIndex.clear(); setDirtyFileInfo.clear(); versionbitscache.Clear(); for (BlockMap::value_type &entry : mapBlockIndex) { delete entry.second; } mapBlockIndex.clear(); fHavePruned = false; } bool LoadBlockIndex(const Config &config) { // Load block index from databases bool needs_init = fReindex; if (!fReindex) { bool ret = LoadBlockIndexDB(config); if (!ret) { return false; } needs_init = mapBlockIndex.empty(); } if (needs_init) { // Everything here is for *new* reindex/DBs. Thus, though // LoadBlockIndexDB may have set fReindex if we shut down // mid-reindex previously, we don't check fReindex and // instead only check it prior to LoadBlockIndexDB to set // needs_init. LogPrintf("Initializing databases...\n"); // Use the provided setting for -txindex in the new database fTxIndex = gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX); pblocktree->WriteFlag("txindex", fTxIndex); } return true; } bool LoadGenesisBlock(const CChainParams &chainparams) { LOCK(cs_main); // Check whether we're already initialized by checking for genesis in // mapBlockIndex. Note that we can't use chainActive here, since it is // set based on the coins db, not the block index db, which is the only // thing loaded at this point. if (mapBlockIndex.count(chainparams.GenesisBlock().GetHash())) { return true; } // Only add the genesis block if not reindexing (in which case we reuse the // one already on disk) try { CBlock &block = const_cast(chainparams.GenesisBlock()); // Start new block file unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); CDiskBlockPos blockPos; CValidationState state; if (!FindBlockPos(state, blockPos, nBlockSize + 8, 0, block.GetBlockTime())) { return error("%s: FindBlockPos failed", __func__); } if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) { return error("%s: writing genesis block to disk failed", __func__); } CBlockIndex *pindex = AddToBlockIndex(block); if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) { return error("%s: genesis block not accepted", __func__); } } catch (const std::runtime_error &e) { return error("%s: failed to write genesis block: %s", __func__, e.what()); } return true; } bool LoadExternalBlockFile(const Config &config, FILE *fileIn, CDiskBlockPos *dbp) { // Map of disk positions for blocks with unknown parent (only used for // reindex) static std::multimap mapBlocksUnknownParent; int64_t nStart = GetTimeMillis(); const CChainParams &chainparams = config.GetChainParams(); int nLoaded = 0; try { // This takes over fileIn and calls fclose() on it in the CBufferedFile // destructor. Make sure we have at least 2*MAX_TX_SIZE space in there // so any transaction can fit in the buffer. CBufferedFile blkdat(fileIn, 2 * MAX_TX_SIZE, MAX_TX_SIZE + 8, SER_DISK, CLIENT_VERSION); uint64_t nRewind = blkdat.GetPos(); while (!blkdat.eof()) { boost::this_thread::interruption_point(); blkdat.SetPos(nRewind); // Start one byte further next time, in case of failure. nRewind++; // Remove former limit. blkdat.SetLimit(); unsigned int nSize = 0; try { // Locate a header. uint8_t buf[CMessageHeader::MESSAGE_START_SIZE]; blkdat.FindByte(chainparams.DiskMagic()[0]); nRewind = blkdat.GetPos() + 1; blkdat >> FLATDATA(buf); if (memcmp(buf, std::begin(chainparams.DiskMagic()), CMessageHeader::MESSAGE_START_SIZE)) { continue; } // Read size. blkdat >> nSize; if (nSize < 80) { continue; } } catch (const std::exception &) { // No valid block header found; don't complain. break; } try { // read block uint64_t nBlockPos = blkdat.GetPos(); if (dbp) { dbp->nPos = nBlockPos; } blkdat.SetLimit(nBlockPos + nSize); blkdat.SetPos(nBlockPos); std::shared_ptr pblock = std::make_shared(); CBlock &block = *pblock; blkdat >> block; nRewind = blkdat.GetPos(); // detect out of order blocks, and store them for later uint256 hash = block.GetHash(); if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex.find(block.hashPrevBlock) == mapBlockIndex.end()) { LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(), block.hashPrevBlock.ToString()); if (dbp) { mapBlocksUnknownParent.insert( std::make_pair(block.hashPrevBlock, *dbp)); } continue; } // process in case the block isn't known yet if (mapBlockIndex.count(hash) == 0 || !mapBlockIndex[hash]->nStatus.hasData()) { LOCK(cs_main); CValidationState state; if (AcceptBlock(config, pblock, state, nullptr, true, dbp, nullptr)) { nLoaded++; } if (state.IsError()) { break; } } else if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex[hash]->nHeight % 1000 == 0) { LogPrint( BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), mapBlockIndex[hash]->nHeight); } // Activate the genesis block so normal node progress can // continue if (hash == chainparams.GetConsensus().hashGenesisBlock) { CValidationState state; if (!ActivateBestChain(config, state)) { break; } } NotifyHeaderTip(); // Recursively process earlier encountered successors of this // block std::deque queue; queue.push_back(hash); while (!queue.empty()) { uint256 head = queue.front(); queue.pop_front(); std::pair::iterator, std::multimap::iterator> range = mapBlocksUnknownParent.equal_range(head); while (range.first != range.second) { std::multimap::iterator it = range.first; std::shared_ptr pblockrecursive = std::make_shared(); if (ReadBlockFromDisk(*pblockrecursive, it->second, config)) { LogPrint( BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(), head.ToString()); LOCK(cs_main); CValidationState dummy; if (AcceptBlock(config, pblockrecursive, dummy, nullptr, true, &it->second, nullptr)) { nLoaded++; queue.push_back(pblockrecursive->GetHash()); } } range.first++; mapBlocksUnknownParent.erase(it); NotifyHeaderTip(); } } } catch (const std::exception &e) { LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what()); } } } catch (const std::runtime_error &e) { AbortNode(std::string("System error: ") + e.what()); } if (nLoaded > 0) { LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart); } return nLoaded > 0; } static void CheckBlockIndex(const Consensus::Params &consensusParams) { if (!fCheckBlockIndex) { return; } LOCK(cs_main); // During a reindex, we read the genesis block and call CheckBlockIndex // before ActivateBestChain, so we have the genesis block in mapBlockIndex // but no active chain. (A few of the tests when iterating the block tree // require that chainActive has been initialized.) if (chainActive.Height() < 0) { assert(mapBlockIndex.size() <= 1); return; } // Build forward-pointing map of the entire block tree. std::multimap forward; for (const std::pair &it : mapBlockIndex) { forward.emplace(it.second->pprev, it.second); } assert(forward.size() == mapBlockIndex.size()); std::pair::iterator, std::multimap::iterator> rangeGenesis = forward.equal_range(nullptr); CBlockIndex *pindex = rangeGenesis.first->second; rangeGenesis.first++; // There is only one index entry with parent nullptr. assert(rangeGenesis.first == rangeGenesis.second); // Iterate over the entire block tree, using depth-first search. // Along the way, remember whether there are blocks on the path from genesis // block being explored which are the first to have certain properties. size_t nNodes = 0; int nHeight = 0; // Oldest ancestor of pindex which is invalid. CBlockIndex *pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is parked. CBlockIndex *pindexFirstParked = nullptr; // Oldest ancestor of pindex which does not have data available. CBlockIndex *pindexFirstMissing = nullptr; // Oldest ancestor of pindex for which nTx == 0. CBlockIndex *pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE // (regardless of being valid or not). CBlockIndex *pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS // (regardless of being valid or not). CBlockIndex *pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN // (regardless of being valid or not). CBlockIndex *pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS // (regardless of being valid or not). CBlockIndex *pindexFirstNotScriptsValid = nullptr; while (pindex != nullptr) { nNodes++; if (pindexFirstInvalid == nullptr && pindex->nStatus.hasFailed()) { pindexFirstInvalid = pindex; } if (pindexFirstParked == nullptr && pindex->nStatus.isParked()) { pindexFirstParked = pindex; } if (pindexFirstMissing == nullptr && !pindex->nStatus.hasData()) { pindexFirstMissing = pindex; } if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) { pindexFirstNeverProcessed = pindex; } if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && pindex->nStatus.getValidity() < BlockValidity::TREE) { pindexFirstNotTreeValid = pindex; } if (pindex->pprev != nullptr && pindexFirstNotTransactionsValid == nullptr && pindex->nStatus.getValidity() < BlockValidity::TRANSACTIONS) { pindexFirstNotTransactionsValid = pindex; } if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr && pindex->nStatus.getValidity() < BlockValidity::CHAIN) { pindexFirstNotChainValid = pindex; } if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr && pindex->nStatus.getValidity() < BlockValidity::SCRIPTS) { pindexFirstNotScriptsValid = pindex; } // Begin: actual consistency checks. if (pindex->pprev == nullptr) { // Genesis block checks. // Genesis block's hash must match. assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // The current active chain's genesis block must be this block. assert(pindex == chainActive.Genesis()); } if (pindex->nChainTx == 0) { // nSequenceId can't be set positive for blocks that aren't linked // (negative is used for preciousblock) assert(pindex->nSequenceId <= 0); } // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or // not pruning has occurred). HAVE_DATA is only equivalent to nTx > 0 // (or VALID_TRANSACTIONS) if no pruning has occurred. if (!fHavePruned) { // If we've never pruned, then HAVE_DATA should be equivalent to nTx // > 0 assert(!pindex->nStatus.hasData() == (pindex->nTx == 0)); assert(pindexFirstMissing == pindexFirstNeverProcessed); } else if (pindex->nStatus.hasData()) { // If we have pruned, then we can only say that HAVE_DATA implies // nTx > 0 assert(pindex->nTx > 0); } if (pindex->nStatus.hasUndo()) { assert(pindex->nStatus.hasData()); } // This is pruning-independent. assert((pindex->nStatus.getValidity() >= BlockValidity::TRANSACTIONS) == (pindex->nTx > 0)); // All parents having had data (at some point) is equivalent to all // parents being VALID_TRANSACTIONS, which is equivalent to nChainTx // being set. // nChainTx != 0 is used to signal that all parent blocks have been // processed (but may have been pruned). assert((pindexFirstNeverProcessed != nullptr) == (pindex->nChainTx == 0)); assert((pindexFirstNotTransactionsValid != nullptr) == (pindex->nChainTx == 0)); // nHeight must be consistent. assert(pindex->nHeight == nHeight); // For every block except the genesis block, the chainwork must be // larger than the parent's. assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // The pskip pointer must point back for all but the first 2 blocks. assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // All mapBlockIndex entries must at least be TREE valid assert(pindexFirstNotTreeValid == nullptr); if (pindex->nStatus.getValidity() >= BlockValidity::TREE) { // TREE valid implies all parents are TREE valid assert(pindexFirstNotTreeValid == nullptr); } if (pindex->nStatus.getValidity() >= BlockValidity::CHAIN) { // CHAIN valid implies all parents are CHAIN valid assert(pindexFirstNotChainValid == nullptr); } if (pindex->nStatus.getValidity() >= BlockValidity::SCRIPTS) { // SCRIPTS valid implies all parents are SCRIPTS valid assert(pindexFirstNotScriptsValid == nullptr); } if (pindexFirstInvalid == nullptr) { // Checks for not-invalid blocks. // The failed mask cannot be set for blocks without invalid parents. assert(!pindex->nStatus.isInvalid()); } if (pindexFirstParked == nullptr) { // Checks for not-invalid blocks. // The failed mask cannot be set for blocks without invalid parents. assert(!pindex->nStatus.isOnParkedChain()); } if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == nullptr) { if (pindexFirstInvalid == nullptr) { // If this block sorts at least as good as the current tip and // is valid and we have all data for its parents, it must be in // setBlockIndexCandidates. chainActive.Tip() must also be there // even if some data has been pruned. if (pindexFirstMissing == nullptr || pindex == chainActive.Tip()) { assert(setBlockIndexCandidates.count(pindex)); } // If some parent is missing, then it could be that this block // was in setBlockIndexCandidates but had to be removed because // of the missing data. In this case it must be in // mapBlocksUnlinked -- see test below. } } else { // If this block sorts worse than the current tip or some ancestor's // block has never been seen, it cannot be in // setBlockIndexCandidates. assert(setBlockIndexCandidates.count(pindex) == 0); } // Check whether this block is in mapBlocksUnlinked. std::pair::iterator, std::multimap::iterator> rangeUnlinked = mapBlocksUnlinked.equal_range(pindex->pprev); bool foundInUnlinked = false; while (rangeUnlinked.first != rangeUnlinked.second) { assert(rangeUnlinked.first->first == pindex->pprev); if (rangeUnlinked.first->second == pindex) { foundInUnlinked = true; break; } rangeUnlinked.first++; } if (pindex->pprev && pindex->nStatus.hasData() && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) { // If this block has block data available, some parent was never // received, and has no invalid parents, it must be in // mapBlocksUnlinked. assert(foundInUnlinked); } if (!pindex->nStatus.hasData()) { // Can't be in mapBlocksUnlinked if we don't HAVE_DATA assert(!foundInUnlinked); } if (pindexFirstMissing == nullptr) { // We aren't missing data for any parent -- cannot be in // mapBlocksUnlinked. assert(!foundInUnlinked); } if (pindex->pprev && pindex->nStatus.hasData() && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) { // We HAVE_DATA for this block, have received data for all parents // at some point, but we're currently missing data for some parent. // We must have pruned. assert(fHavePruned); // This block may have entered mapBlocksUnlinked if: // - it has a descendant that at some point had more work than the // tip, and // - we tried switching to that descendant but were missing // data for some intermediate block between chainActive and the // tip. // So if this block is itself better than chainActive.Tip() and it // wasn't in // setBlockIndexCandidates, then it must be in mapBlocksUnlinked. if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && setBlockIndexCandidates.count(pindex) == 0) { if (pindexFirstInvalid == nullptr) { assert(foundInUnlinked); } } } // Perhaps too slow // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // End: actual consistency checks. // Try descending into the first subnode. std::pair::iterator, std::multimap::iterator> range = forward.equal_range(pindex); if (range.first != range.second) { // A subnode was found. pindex = range.first->second; nHeight++; continue; } // This is a leaf node. Move upwards until we reach a node of which we // have not yet visited the last child. while (pindex) { // We are going to either move to a parent or a sibling of pindex. // If pindex was the first with a certain property, unset the // corresponding variable. if (pindex == pindexFirstInvalid) { pindexFirstInvalid = nullptr; } if (pindex == pindexFirstParked) { pindexFirstParked = nullptr; } if (pindex == pindexFirstMissing) { pindexFirstMissing = nullptr; } if (pindex == pindexFirstNeverProcessed) { pindexFirstNeverProcessed = nullptr; } if (pindex == pindexFirstNotTreeValid) { pindexFirstNotTreeValid = nullptr; } if (pindex == pindexFirstNotTransactionsValid) { pindexFirstNotTransactionsValid = nullptr; } if (pindex == pindexFirstNotChainValid) { pindexFirstNotChainValid = nullptr; } if (pindex == pindexFirstNotScriptsValid) { pindexFirstNotScriptsValid = nullptr; } // Find our parent. CBlockIndex *pindexPar = pindex->pprev; // Find which child we just visited. std::pair::iterator, std::multimap::iterator> rangePar = forward.equal_range(pindexPar); while (rangePar.first->second != pindex) { // Our parent must have at least the node we're coming from as // child. assert(rangePar.first != rangePar.second); rangePar.first++; } // Proceed to the next one. rangePar.first++; if (rangePar.first != rangePar.second) { // Move to the sibling. pindex = rangePar.first->second; break; } else { // Move up further. pindex = pindexPar; nHeight--; continue; } } } // Check that we actually traversed the entire map. assert(nNodes == forward.size()); } std::string CBlockFileInfo::ToString() const { return strprintf( "CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, DateTimeStrFormat("%Y-%m-%d", nTimeFirst), DateTimeStrFormat("%Y-%m-%d", nTimeLast)); } CBlockFileInfo *GetBlockFileInfo(size_t n) { return &vinfoBlockFile.at(n); } static const uint64_t MEMPOOL_DUMP_VERSION = 1; bool LoadMempool(const Config &config) { int64_t nExpiryTimeout = gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60; FILE *filestr = fsbridge::fopen(GetDataDir() / "mempool.dat", "rb"); CAutoFile file(filestr, SER_DISK, CLIENT_VERSION); if (file.IsNull()) { LogPrintf( "Failed to open mempool file from disk. Continuing anyway.\n"); return false; } int64_t count = 0; int64_t skipped = 0; int64_t failed = 0; int64_t nNow = GetTime(); try { uint64_t version; file >> version; if (version != MEMPOOL_DUMP_VERSION) { return false; } uint64_t num; file >> num; double prioritydummy = 0; while (num--) { CTransactionRef tx; int64_t nTime; int64_t nFeeDelta; file >> tx; file >> nTime; file >> nFeeDelta; Amount amountdelta = nFeeDelta * SATOSHI; if (amountdelta != Amount::zero()) { mempool.PrioritiseTransaction(tx->GetId(), tx->GetId().ToString(), prioritydummy, amountdelta); } CValidationState state; if (nTime + nExpiryTimeout > nNow) { LOCK(cs_main); AcceptToMemoryPoolWithTime(config, mempool, state, tx, true, nullptr, nTime); if (state.IsValid()) { ++count; } else { ++failed; } } else { ++skipped; } if (ShutdownRequested()) { return false; } } std::map mapDeltas; file >> mapDeltas; for (const auto &i : mapDeltas) { mempool.PrioritiseTransaction(i.first, i.first.ToString(), prioritydummy, i.second); } } catch (const std::exception &e) { LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing " "anyway.\n", e.what()); return false; } LogPrintf("Imported mempool transactions from disk: %i successes, %i " "failed, %i expired\n", count, failed, skipped); return true; } void DumpMempool(void) { int64_t start = GetTimeMicros(); std::map mapDeltas; std::vector vinfo; { LOCK(mempool.cs); for (const auto &i : mempool.mapDeltas) { mapDeltas[i.first] = i.second.second; } vinfo = mempool.infoAll(); } int64_t mid = GetTimeMicros(); try { FILE *filestr = fsbridge::fopen(GetDataDir() / "mempool.dat.new", "wb"); if (!filestr) { return; } CAutoFile file(filestr, SER_DISK, CLIENT_VERSION); uint64_t version = MEMPOOL_DUMP_VERSION; file << version; file << uint64_t(vinfo.size()); for (const auto &i : vinfo) { file << *(i.tx); file << int64_t(i.nTime); file << i.nFeeDelta; mapDeltas.erase(i.tx->GetId()); } file << mapDeltas; FileCommit(file.Get()); file.fclose(); RenameOver(GetDataDir() / "mempool.dat.new", GetDataDir() / "mempool.dat"); int64_t last = GetTimeMicros(); LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid - start) * 0.000001, (last - mid) * 0.000001); } catch (const std::exception &e) { LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what()); } } //! Guess how far we are in the verification process at the given block index double GuessVerificationProgress(const ChainTxData &data, CBlockIndex *pindex) { if (pindex == nullptr) { return 0.0; } int64_t nNow = time(nullptr); double fTxTotal; if (pindex->nChainTx <= data.nTxCount) { fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate; } else { fTxTotal = pindex->nChainTx + (nNow - pindex->GetBlockTime()) * data.dTxRate; } return pindex->nChainTx / fTxTotal; } class CMainCleanup { public: CMainCleanup() {} ~CMainCleanup() { // block headers for (const std::pair &it : mapBlockIndex) { delete it.second; } mapBlockIndex.clear(); } } instance_of_cmaincleanup;