diff --git a/doc/release-notes.md b/doc/release-notes.md
index 302189fc5..f2b2d4e27 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -1,9 +1,10 @@
Bitcoin ABC version 0.17.3 is now available from:
This release includes the following features and fixes:
- Remove the bip9params configuration.
- Remove the bip9_softforks result from the getblockchaininfo RPC call.
- Remove the rules, vbavailable and vbrequired result from the getblocktemplate RPC call.
- Remove the rules argument from the getblocktemplate RPC call.
+ - Log difference between block header time and received time when competing blocks are received for the same chain height.
diff --git a/src/validation.cpp b/src/validation.cpp
index b2e87b840..95ef866f6 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -1,5543 +1,5541 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Copyright (c) 2017-2018 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "validation.h"
#include "arith_uint256.h"
#include "blockindexworkcomparator.h"
#include "chainparams.h"
#include "checkpoints.h"
#include "checkqueue.h"
#include "config.h"
#include "consensus/consensus.h"
#include "consensus/merkle.h"
#include "consensus/validation.h"
#include "fs.h"
#include "hash.h"
#include "init.h"
#include "policy/fees.h"
#include "policy/policy.h"
#include "pow.h"
#include "primitives/block.h"
#include "primitives/transaction.h"
#include "random.h"
#include "script/script.h"
#include "script/scriptcache.h"
#include "script/sigcache.h"
#include "script/standard.h"
#include "timedata.h"
#include "tinyformat.h"
#include "txdb.h"
#include "txmempool.h"
#include "ui_interface.h"
#include "undo.h"
#include "util.h"
#include "utilmoneystr.h"
#include "utilstrencodings.h"
#include "validationinterface.h"
#include "warnings.h"
#include
#include
#include
#include
#include
#include
#include
#include
#if defined(NDEBUG)
#error "Bitcoin cannot be compiled without assertions."
#endif
/**
* Global state
*/
CCriticalSection cs_main;
BlockMap mapBlockIndex;
CChain chainActive;
CBlockIndex *pindexBestHeader = nullptr;
CWaitableCriticalSection csBestBlock;
CConditionVariable cvBlockChange;
int nScriptCheckThreads = 0;
std::atomic_bool fImporting(false);
bool fReindex = false;
bool fTxIndex = false;
bool fHavePruned = false;
bool fPruneMode = false;
bool fIsBareMultisigStd = DEFAULT_PERMIT_BAREMULTISIG;
bool fRequireStandard = true;
bool fCheckBlockIndex = false;
bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
size_t nCoinCacheUsage = 5000 * 300;
uint64_t nPruneTarget = 0;
int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
uint256 hashAssumeValid;
arith_uint256 nMinimumChainWork;
Amount maxTxFee = DEFAULT_TRANSACTION_MAXFEE;
CTxMemPool mempool;
static void CheckBlockIndex(const Consensus::Params &consensusParams);
/** Constant stuff for coinbase transactions we create: */
CScript COINBASE_FLAGS;
const std::string strMessageMagic = "Bitcoin Signed Message:\n";
// Internal stuff
namespace {
CBlockIndex *pindexBestInvalid;
/**
* The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself
* and all ancestors) and as good as our current tip or better. Entries may be
* failed, though, and pruning nodes may be missing the data for the block.
*/
std::set setBlockIndexCandidates;
/**
* All pairs A->B, where A (or one of its ancestors) misses transactions, but B
* has transactions. Pruned nodes may have entries where B is missing data.
*/
std::multimap mapBlocksUnlinked;
CCriticalSection cs_LastBlockFile;
std::vector vinfoBlockFile;
int nLastBlockFile = 0;
/**
* Global flag to indicate we should check to see if there are block/undo files
* that should be deleted. Set on startup or if we allocate more file space when
* we're in prune mode.
*/
bool fCheckForPruning = false;
/**
* Every received block is assigned a unique and increasing identifier, so we
* know which one to give priority in case of a fork.
*/
CCriticalSection cs_nBlockSequenceId;
/** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
int32_t nBlockSequenceId = 1;
/** Decreasing counter (used by subsequent preciousblock calls). */
int32_t nBlockReverseSequenceId = -1;
/** chainwork for the last block that preciousblock has been applied to. */
arith_uint256 nLastPreciousChainwork = 0;
/** Dirty block index entries. */
std::set setDirtyBlockIndex;
/** Dirty block file entries. */
std::set setDirtyFileInfo;
} // namespace
CBlockIndex *FindForkInGlobalIndex(const CChain &chain,
const CBlockLocator &locator) {
// Find the first block the caller has in the main chain
for (const uint256 &hash : locator.vHave) {
BlockMap::iterator mi = mapBlockIndex.find(hash);
if (mi != mapBlockIndex.end()) {
CBlockIndex *pindex = (*mi).second;
if (chain.Contains(pindex)) {
return pindex;
}
if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
return chain.Tip();
}
}
}
return chain.Genesis();
}
CCoinsViewCache *pcoinsTip = nullptr;
CBlockTreeDB *pblocktree = nullptr;
enum FlushStateMode {
FLUSH_STATE_NONE,
FLUSH_STATE_IF_NEEDED,
FLUSH_STATE_PERIODIC,
FLUSH_STATE_ALWAYS
};
// See definition for documentation
static bool FlushStateToDisk(const CChainParams &chainParams,
CValidationState &state, FlushStateMode mode,
int nManualPruneHeight = 0);
static void FindFilesToPruneManual(std::set &setFilesToPrune,
int nManualPruneHeight);
static void FindFilesToPrune(std::set &setFilesToPrune,
uint64_t nPruneAfterHeight);
static FILE *OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly = false);
static uint32_t GetBlockScriptFlags(const Config &config,
const CBlockIndex *pChainTip);
static bool IsFinalTx(const CTransaction &tx, int nBlockHeight,
int64_t nBlockTime) {
if (tx.nLockTime == 0) {
return true;
}
int64_t lockTime = tx.nLockTime;
int64_t lockTimeLimit =
(lockTime < LOCKTIME_THRESHOLD) ? nBlockHeight : nBlockTime;
if (lockTime < lockTimeLimit) {
return true;
}
for (const auto &txin : tx.vin) {
if (txin.nSequence != CTxIn::SEQUENCE_FINAL) {
return false;
}
}
return true;
}
/**
* Calculates the block height and previous block's median time past at
* which the transaction will be considered final in the context of BIP 68.
* Also removes from the vector of input heights any entries which did not
* correspond to sequence locked inputs as they do not affect the calculation.
*/
static std::pair
CalculateSequenceLocks(const CTransaction &tx, int flags,
std::vector *prevHeights,
const CBlockIndex &block) {
assert(prevHeights->size() == tx.vin.size());
// Will be set to the equivalent height- and time-based nLockTime
// values that would be necessary to satisfy all relative lock-
// time constraints given our view of block chain history.
// The semantics of nLockTime are the last invalid height/time, so
// use -1 to have the effect of any height or time being valid.
int nMinHeight = -1;
int64_t nMinTime = -1;
// tx.nVersion is signed integer so requires cast to unsigned otherwise
// we would be doing a signed comparison and half the range of nVersion
// wouldn't support BIP 68.
bool fEnforceBIP68 = static_cast(tx.nVersion) >= 2 &&
flags & LOCKTIME_VERIFY_SEQUENCE;
// Do not enforce sequence numbers as a relative lock time
// unless we have been instructed to
if (!fEnforceBIP68) {
return std::make_pair(nMinHeight, nMinTime);
}
for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
const CTxIn &txin = tx.vin[txinIndex];
// Sequence numbers with the most significant bit set are not
// treated as relative lock-times, nor are they given any
// consensus-enforced meaning at this point.
if (txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_DISABLE_FLAG) {
// The height of this input is not relevant for sequence locks
(*prevHeights)[txinIndex] = 0;
continue;
}
int nCoinHeight = (*prevHeights)[txinIndex];
if (txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG) {
int64_t nCoinTime = block.GetAncestor(std::max(nCoinHeight - 1, 0))
->GetMedianTimePast();
// NOTE: Subtract 1 to maintain nLockTime semantics.
// BIP 68 relative lock times have the semantics of calculating the
// first block or time at which the transaction would be valid. When
// calculating the effective block time or height for the entire
// transaction, we switch to using the semantics of nLockTime which
// is the last invalid block time or height. Thus we subtract 1 from
// the calculated time or height.
// Time-based relative lock-times are measured from the smallest
// allowed timestamp of the block containing the txout being spent,
// which is the median time past of the block prior.
nMinTime = std::max(
nMinTime,
nCoinTime +
(int64_t)((txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_MASK)
<< CTxIn::SEQUENCE_LOCKTIME_GRANULARITY) -
1);
} else {
nMinHeight = std::max(
nMinHeight,
nCoinHeight +
(int)(txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_MASK) - 1);
}
}
return std::make_pair(nMinHeight, nMinTime);
}
static bool EvaluateSequenceLocks(const CBlockIndex &block,
std::pair lockPair) {
assert(block.pprev);
int64_t nBlockTime = block.pprev->GetMedianTimePast();
if (lockPair.first >= block.nHeight || lockPair.second >= nBlockTime) {
return false;
}
return true;
}
bool SequenceLocks(const CTransaction &tx, int flags,
std::vector *prevHeights, const CBlockIndex &block) {
return EvaluateSequenceLocks(
block, CalculateSequenceLocks(tx, flags, prevHeights, block));
}
bool TestLockPointValidity(const LockPoints *lp) {
AssertLockHeld(cs_main);
assert(lp);
// If there are relative lock times then the maxInputBlock will be set
// If there are no relative lock times, the LockPoints don't depend on the
// chain
if (lp->maxInputBlock) {
// Check whether chainActive is an extension of the block at which the
// LockPoints
// calculation was valid. If not LockPoints are no longer valid
if (!chainActive.Contains(lp->maxInputBlock)) {
return false;
}
}
// LockPoints still valid
return true;
}
bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints *lp,
bool useExistingLockPoints) {
AssertLockHeld(cs_main);
AssertLockHeld(mempool.cs);
CBlockIndex *tip = chainActive.Tip();
CBlockIndex index;
index.pprev = tip;
// CheckSequenceLocks() uses chainActive.Height()+1 to evaluate height based
// locks because when SequenceLocks() is called within ConnectBlock(), the
// height of the block *being* evaluated is what is used. Thus if we want to
// know if a transaction can be part of the *next* block, we need to use one
// more than chainActive.Height()
index.nHeight = tip->nHeight + 1;
std::pair lockPair;
if (useExistingLockPoints) {
assert(lp);
lockPair.first = lp->height;
lockPair.second = lp->time;
} else {
// pcoinsTip contains the UTXO set for chainActive.Tip()
CCoinsViewMemPool viewMemPool(pcoinsTip, mempool);
std::vector prevheights;
prevheights.resize(tx.vin.size());
for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
const CTxIn &txin = tx.vin[txinIndex];
Coin coin;
if (!viewMemPool.GetCoin(txin.prevout, coin)) {
return error("%s: Missing input", __func__);
}
if (coin.GetHeight() == MEMPOOL_HEIGHT) {
// Assume all mempool transaction confirm in the next block
prevheights[txinIndex] = tip->nHeight + 1;
} else {
prevheights[txinIndex] = coin.GetHeight();
}
}
lockPair = CalculateSequenceLocks(tx, flags, &prevheights, index);
if (lp) {
lp->height = lockPair.first;
lp->time = lockPair.second;
// Also store the hash of the block with the highest height of all
// the blocks which have sequence locked prevouts. This hash needs
// to still be on the chain for these LockPoint calculations to be
// valid.
// Note: It is impossible to correctly calculate a maxInputBlock if
// any of the sequence locked inputs depend on unconfirmed txs,
// except in the special case where the relative lock time/height is
// 0, which is equivalent to no sequence lock. Since we assume input
// height of tip+1 for mempool txs and test the resulting lockPair
// from CalculateSequenceLocks against tip+1. We know
// EvaluateSequenceLocks will fail if there was a non-zero sequence
// lock on a mempool input, so we can use the return value of
// CheckSequenceLocks to indicate the LockPoints validity.
int maxInputHeight = 0;
for (int height : prevheights) {
// Can ignore mempool inputs since we'll fail if they had
// non-zero locks.
if (height != tip->nHeight + 1) {
maxInputHeight = std::max(maxInputHeight, height);
}
}
lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
}
}
return EvaluateSequenceLocks(index, lockPair);
}
uint64_t GetSigOpCountWithoutP2SH(const CTransaction &tx, uint32_t flags) {
uint64_t nSigOps = 0;
for (const auto &txin : tx.vin) {
nSigOps += txin.scriptSig.GetSigOpCount(false);
}
for (const auto &txout : tx.vout) {
nSigOps += txout.scriptPubKey.GetSigOpCount(false);
}
return nSigOps;
}
uint64_t GetP2SHSigOpCount(const CTransaction &tx, const CCoinsViewCache &view,
uint32_t flags) {
if ((flags & SCRIPT_VERIFY_P2SH) == 0 || tx.IsCoinBase()) {
return 0;
}
uint64_t nSigOps = 0;
for (auto &i : tx.vin) {
const CTxOut &prevout = view.GetOutputFor(i);
if (prevout.scriptPubKey.IsPayToScriptHash()) {
nSigOps += prevout.scriptPubKey.GetSigOpCount(i.scriptSig);
}
}
return nSigOps;
}
uint64_t GetTransactionSigOpCount(const CTransaction &tx,
const CCoinsViewCache &view, uint32_t flags) {
return GetSigOpCountWithoutP2SH(tx, flags) +
GetP2SHSigOpCount(tx, view, flags);
}
static bool CheckTransactionCommon(const CTransaction &tx,
CValidationState &state,
bool fCheckDuplicateInputs) {
// Basic checks that don't depend on any context
if (tx.vin.empty()) {
return state.DoS(10, false, REJECT_INVALID, "bad-txns-vin-empty");
}
if (tx.vout.empty()) {
return state.DoS(10, false, REJECT_INVALID, "bad-txns-vout-empty");
}
// Size limit
if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_TX_SIZE) {
return state.DoS(100, false, REJECT_INVALID, "bad-txns-oversize");
}
// Check for negative or overflow output values
Amount nValueOut(0);
for (const auto &txout : tx.vout) {
if (txout.nValue < Amount(0)) {
return state.DoS(100, false, REJECT_INVALID,
"bad-txns-vout-negative");
}
if (txout.nValue > MAX_MONEY) {
return state.DoS(100, false, REJECT_INVALID,
"bad-txns-vout-toolarge");
}
nValueOut += txout.nValue;
if (!MoneyRange(nValueOut)) {
return state.DoS(100, false, REJECT_INVALID,
"bad-txns-txouttotal-toolarge");
}
}
if (GetSigOpCountWithoutP2SH(tx, STANDARD_CHECKDATASIG_VERIFY_FLAGS) >
MAX_TX_SIGOPS_COUNT) {
return state.DoS(100, false, REJECT_INVALID, "bad-txn-sigops");
}
// Check for duplicate inputs - note that this check is slow so we skip it
// in CheckBlock
if (fCheckDuplicateInputs) {
std::set vInOutPoints;
for (const auto &txin : tx.vin) {
if (!vInOutPoints.insert(txin.prevout).second) {
return state.DoS(100, false, REJECT_INVALID,
"bad-txns-inputs-duplicate");
}
}
}
return true;
}
bool CheckCoinbase(const CTransaction &tx, CValidationState &state,
bool fCheckDuplicateInputs) {
if (!tx.IsCoinBase()) {
return state.DoS(100, false, REJECT_INVALID, "bad-cb-missing", false,
"first tx is not coinbase");
}
if (!CheckTransactionCommon(tx, state, fCheckDuplicateInputs)) {
// CheckTransactionCommon fill in the state.
return false;
}
if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > 100) {
return state.DoS(100, false, REJECT_INVALID, "bad-cb-length");
}
return true;
}
bool CheckRegularTransaction(const CTransaction &tx, CValidationState &state,
bool fCheckDuplicateInputs) {
if (tx.IsCoinBase()) {
return state.DoS(100, false, REJECT_INVALID, "bad-tx-coinbase");
}
if (!CheckTransactionCommon(tx, state, fCheckDuplicateInputs)) {
// CheckTransactionCommon fill in the state.
return false;
}
for (const auto &txin : tx.vin) {
if (txin.prevout.IsNull()) {
return state.DoS(10, false, REJECT_INVALID,
"bad-txns-prevout-null");
}
}
return true;
}
static void LimitMempoolSize(CTxMemPool &pool, size_t limit,
unsigned long age) {
int expired = pool.Expire(GetTime() - age);
if (expired != 0) {
LogPrint(BCLog::MEMPOOL,
"Expired %i transactions from the memory pool\n", expired);
}
std::vector vNoSpendsRemaining;
pool.TrimToSize(limit, &vNoSpendsRemaining);
for (const COutPoint &removed : vNoSpendsRemaining) {
pcoinsTip->Uncache(removed);
}
}
/** Convert CValidationState to a human-readable message for logging */
std::string FormatStateMessage(const CValidationState &state) {
return strprintf(
"%s%s (code %i)", state.GetRejectReason(),
state.GetDebugMessage().empty() ? "" : ", " + state.GetDebugMessage(),
state.GetRejectCode());
}
static bool IsCurrentForFeeEstimation() {
AssertLockHeld(cs_main);
if (IsInitialBlockDownload()) {
return false;
}
if (chainActive.Tip()->GetBlockTime() <
(GetTime() - MAX_FEE_ESTIMATION_TIP_AGE)) {
return false;
}
if (chainActive.Height() < pindexBestHeader->nHeight - 1) {
return false;
}
return true;
}
static bool IsUAHFenabled(const Config &config, int nHeight) {
return nHeight >= config.GetChainParams().GetConsensus().uahfHeight;
}
bool IsUAHFenabled(const Config &config, const CBlockIndex *pindexPrev) {
if (pindexPrev == nullptr) {
return false;
}
return IsUAHFenabled(config, pindexPrev->nHeight);
}
static bool IsDAAEnabled(const Config &config, int nHeight) {
return nHeight >= config.GetChainParams().GetConsensus().daaHeight;
}
bool IsDAAEnabled(const Config &config, const CBlockIndex *pindexPrev) {
if (pindexPrev == nullptr) {
return false;
}
return IsDAAEnabled(config, pindexPrev->nHeight);
}
static bool IsMagneticAnomalyEnabled(const Config &config,
int64_t nMedianTimePast) {
return nMedianTimePast >= gArgs.GetArg("-magneticanomalyactivationtime",
config.GetChainParams()
.GetConsensus()
.magneticAnomalyActivationTime);
}
bool IsMagneticAnomalyEnabled(const Config &config,
const CBlockIndex *pindexPrev) {
if (pindexPrev == nullptr) {
return false;
}
return IsMagneticAnomalyEnabled(config, pindexPrev->GetMedianTimePast());
}
static bool IsReplayProtectionEnabled(const Config &config,
int64_t nMedianTimePast) {
return nMedianTimePast >= gArgs.GetArg("-replayprotectionactivationtime",
config.GetChainParams()
.GetConsensus()
.magneticAnomalyActivationTime);
}
static bool IsReplayProtectionEnabled(const Config &config,
const CBlockIndex *pindexPrev) {
if (pindexPrev == nullptr) {
return false;
}
return IsReplayProtectionEnabled(config, pindexPrev->GetMedianTimePast());
}
static bool IsReplayProtectionEnabledForCurrentBlock(const Config &config) {
AssertLockHeld(cs_main);
return IsReplayProtectionEnabled(config, chainActive.Tip());
}
/**
* Make mempool consistent after a reorg, by re-adding or recursively erasing
* disconnected block transactions from the mempool, and also removing any other
* transactions from the mempool that are no longer valid given the new
* tip/height.
*
* Note: we assume that disconnectpool only contains transactions that are NOT
* confirmed in the current chain nor already in the mempool (otherwise,
* in-mempool descendants of such transactions would be removed).
*
* Passing fAddToMempool=false will skip trying to add the transactions back,
* and instead just erase from the mempool as needed.
*/
void UpdateMempoolForReorg(const Config &config,
DisconnectedBlockTransactions &disconnectpool,
bool fAddToMempool) {
AssertLockHeld(cs_main);
std::vector vHashUpdate;
// disconnectpool's insertion_order index sorts the entries from oldest to
// newest, but the oldest entry will be the last tx from the latest mined
// block that was disconnected.
// Iterate disconnectpool in reverse, so that we add transactions back to
// the mempool starting with the earliest transaction that had been
// previously seen in a block.
auto it = disconnectpool.queuedTx.get().rbegin();
while (it != disconnectpool.queuedTx.get().rend()) {
// ignore validation errors in resurrected transactions
CValidationState stateDummy;
if (!fAddToMempool || (*it)->IsCoinBase() ||
!AcceptToMemoryPool(config, mempool, stateDummy, *it, false,
nullptr, true)) {
// If the transaction doesn't make it in to the mempool, remove any
// transactions that depend on it (which would now be orphans).
mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
} else if (mempool.exists((*it)->GetId())) {
vHashUpdate.push_back((*it)->GetId());
}
++it;
}
disconnectpool.queuedTx.clear();
// AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
// no in-mempool children, which is generally not true when adding
// previously-confirmed transactions back to the mempool.
// UpdateTransactionsFromBlock finds descendants of any transactions in the
// disconnectpool that were added back and cleans up the mempool state.
mempool.UpdateTransactionsFromBlock(vHashUpdate);
// We also need to remove any now-immature transactions
mempool.removeForReorg(config, pcoinsTip, chainActive.Tip()->nHeight + 1,
STANDARD_LOCKTIME_VERIFY_FLAGS);
// Re-limit mempool size, in case we added any transactions
LimitMempoolSize(
mempool,
gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
}
// Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool
// were somehow broken and returning the wrong scriptPubKeys
static bool
CheckInputsFromMempoolAndCache(const CTransaction &tx, CValidationState &state,
const CCoinsViewCache &view, CTxMemPool &pool,
const uint32_t flags, bool cacheSigStore,
PrecomputedTransactionData &txdata) {
AssertLockHeld(cs_main);
// pool.cs should be locked already, but go ahead and re-take the lock here
// to enforce that mempool doesn't change between when we check the view and
// when we actually call through to CheckInputs
LOCK(pool.cs);
assert(!tx.IsCoinBase());
for (const CTxIn &txin : tx.vin) {
const Coin &coin = view.AccessCoin(txin.prevout);
// At this point we haven't actually checked if the coins are all
// available (or shouldn't assume we have, since CheckInputs does). So
// we just return failure if the inputs are not available here, and then
// only have to check equivalence for available inputs.
if (coin.IsSpent()) {
return false;
}
const CTransactionRef &txFrom = pool.get(txin.prevout.GetTxId());
if (txFrom) {
assert(txFrom->GetHash() == txin.prevout.GetTxId());
assert(txFrom->vout.size() > txin.prevout.GetN());
assert(txFrom->vout[txin.prevout.GetN()] == coin.GetTxOut());
} else {
const Coin &coinFromDisk = pcoinsTip->AccessCoin(txin.prevout);
assert(!coinFromDisk.IsSpent());
assert(coinFromDisk.GetTxOut() == coin.GetTxOut());
}
}
return CheckInputs(tx, state, view, true, flags, cacheSigStore, true,
txdata);
}
static bool AcceptToMemoryPoolWorker(
const Config &config, CTxMemPool &pool, CValidationState &state,
const CTransactionRef &ptx, bool fLimitFree, bool *pfMissingInputs,
int64_t nAcceptTime, bool fOverrideMempoolLimit, const Amount nAbsurdFee,
std::vector &coins_to_uncache) {
AssertLockHeld(cs_main);
const CTransaction &tx = *ptx;
const TxId txid = tx.GetId();
if (pfMissingInputs) {
*pfMissingInputs = false;
}
// Coinbase is only valid in a block, not as a loose transaction.
if (!CheckRegularTransaction(tx, state, true)) {
// state filled in by CheckRegularTransaction.
return false;
}
// Rather not work on nonstandard transactions (unless -testnet/-regtest)
std::string reason;
if (fRequireStandard && !IsStandardTx(tx, reason)) {
return state.DoS(0, false, REJECT_NONSTANDARD, reason);
}
// Only accept nLockTime-using transactions that can be mined in the next
// block; we don't want our mempool filled up with transactions that can't
// be mined yet.
CValidationState ctxState;
if (!ContextualCheckTransactionForCurrentBlock(
config, tx, ctxState, STANDARD_LOCKTIME_VERIFY_FLAGS)) {
// We copy the state from a dummy to ensure we don't increase the
// ban score of peer for transaction that could be valid in the future.
return state.DoS(
0, false, REJECT_NONSTANDARD, ctxState.GetRejectReason(),
ctxState.CorruptionPossible(), ctxState.GetDebugMessage());
}
// Is it already in the memory pool?
if (pool.exists(txid)) {
return state.Invalid(false, REJECT_ALREADY_KNOWN,
"txn-already-in-mempool");
}
// Check for conflicts with in-memory transactions
{
// Protect pool.mapNextTx
LOCK(pool.cs);
for (const CTxIn &txin : tx.vin) {
auto itConflicting = pool.mapNextTx.find(txin.prevout);
if (itConflicting != pool.mapNextTx.end()) {
// Disable replacement feature for good
return state.Invalid(false, REJECT_CONFLICT,
"txn-mempool-conflict");
}
}
}
{
CCoinsView dummy;
CCoinsViewCache view(&dummy);
Amount nValueIn(0);
LockPoints lp;
{
LOCK(pool.cs);
CCoinsViewMemPool viewMemPool(pcoinsTip, pool);
view.SetBackend(viewMemPool);
// Do we already have it?
for (size_t out = 0; out < tx.vout.size(); out++) {
COutPoint outpoint(txid, out);
bool had_coin_in_cache = pcoinsTip->HaveCoinInCache(outpoint);
if (view.HaveCoin(outpoint)) {
if (!had_coin_in_cache) {
coins_to_uncache.push_back(outpoint);
}
return state.Invalid(false, REJECT_ALREADY_KNOWN,
"txn-already-known");
}
}
// Do all inputs exist?
for (const CTxIn txin : tx.vin) {
if (!pcoinsTip->HaveCoinInCache(txin.prevout)) {
coins_to_uncache.push_back(txin.prevout);
}
if (!view.HaveCoin(txin.prevout)) {
if (pfMissingInputs) {
*pfMissingInputs = true;
}
// fMissingInputs and !state.IsInvalid() is used to detect
// this condition, don't set state.Invalid()
return false;
}
}
// Are the actual inputs available?
if (!view.HaveInputs(tx)) {
return state.Invalid(false, REJECT_DUPLICATE,
"bad-txns-inputs-spent");
}
// Bring the best block into scope.
view.GetBestBlock();
nValueIn = view.GetValueIn(tx);
// We have all inputs cached now, so switch back to dummy, so we
// don't need to keep lock on mempool.
view.SetBackend(dummy);
// Only accept BIP68 sequence locked transactions that can be mined
// in the next block; we don't want our mempool filled up with
// transactions that can't be mined yet. Must keep pool.cs for this
// unless we change CheckSequenceLocks to take a CoinsViewCache
// instead of create its own.
if (!CheckSequenceLocks(tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp)) {
return state.DoS(0, false, REJECT_NONSTANDARD,
"non-BIP68-final");
}
}
// Check for non-standard pay-to-script-hash in inputs
if (fRequireStandard && !AreInputsStandard(tx, view)) {
return state.Invalid(false, REJECT_NONSTANDARD,
"bad-txns-nonstandard-inputs");
}
int64_t nSigOpsCount =
GetTransactionSigOpCount(tx, view, STANDARD_SCRIPT_VERIFY_FLAGS);
Amount nValueOut = tx.GetValueOut();
Amount nFees = nValueIn - nValueOut;
// nModifiedFees includes any fee deltas from PrioritiseTransaction
Amount nModifiedFees = nFees;
double nPriorityDummy = 0;
pool.ApplyDeltas(txid, nPriorityDummy, nModifiedFees);
Amount inChainInputValue;
double dPriority =
view.GetPriority(tx, chainActive.Height(), inChainInputValue);
// Keep track of transactions that spend a coinbase, which we re-scan
// during reorgs to ensure COINBASE_MATURITY is still met.
bool fSpendsCoinbase = false;
for (const CTxIn &txin : tx.vin) {
const Coin &coin = view.AccessCoin(txin.prevout);
if (coin.IsCoinBase()) {
fSpendsCoinbase = true;
break;
}
}
CTxMemPoolEntry entry(ptx, nFees, nAcceptTime, dPriority,
chainActive.Height(), inChainInputValue,
fSpendsCoinbase, nSigOpsCount, lp);
unsigned int nSize = entry.GetTxSize();
// Check that the transaction doesn't have an excessive number of
// sigops, making it impossible to mine. Since the coinbase transaction
// itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
// MAX_BLOCK_SIGOPS_PER_MB; we still consider this an invalid rather
// than merely non-standard transaction.
if (nSigOpsCount > MAX_STANDARD_TX_SIGOPS) {
return state.DoS(0, false, REJECT_NONSTANDARD,
"bad-txns-too-many-sigops", false,
strprintf("%d", nSigOpsCount));
}
CFeeRate minRelayTxFee = config.GetMinFeePerKB();
Amount mempoolRejectFee =
pool.GetMinFee(
gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
1000000)
.GetFee(nSize);
if (mempoolRejectFee > Amount(0) && nModifiedFees < mempoolRejectFee) {
return state.DoS(0, false, REJECT_INSUFFICIENTFEE,
"mempool min fee not met", false,
strprintf("%d < %d", nFees, mempoolRejectFee));
}
if (gArgs.GetBoolArg("-relaypriority", DEFAULT_RELAYPRIORITY) &&
nModifiedFees < minRelayTxFee.GetFee(nSize) &&
!AllowFree(entry.GetPriority(chainActive.Height() + 1))) {
// Require that free transactions have sufficient priority to be
// mined in the next block.
return state.DoS(0, false, REJECT_INSUFFICIENTFEE,
"insufficient priority");
}
// Continuously rate-limit free (really, very-low-fee) transactions.
// This mitigates 'penny-flooding' -- sending thousands of free
// transactions just to be annoying or make others' transactions take
// longer to confirm.
if (fLimitFree && nModifiedFees < minRelayTxFee.GetFee(nSize)) {
static CCriticalSection csFreeLimiter;
static double dFreeCount;
static int64_t nLastTime;
int64_t nNow = GetTime();
LOCK(csFreeLimiter);
// Use an exponentially decaying ~10-minute window:
dFreeCount *= pow(1.0 - 1.0 / 600.0, double(nNow - nLastTime));
nLastTime = nNow;
// -limitfreerelay unit is thousand-bytes-per-minute
// At default rate it would take over a month to fill 1GB
if (dFreeCount + nSize >=
gArgs.GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY) * 10 *
1000) {
return state.DoS(0, false, REJECT_INSUFFICIENTFEE,
"rate limited free transaction");
}
LogPrint(BCLog::MEMPOOL, "Rate limit dFreeCount: %g => %g\n",
dFreeCount, dFreeCount + nSize);
dFreeCount += nSize;
}
if (nAbsurdFee != Amount(0) && nFees > nAbsurdFee) {
return state.Invalid(false, REJECT_HIGHFEE, "absurdly-high-fee",
strprintf("%d > %d", nFees, nAbsurdFee));
}
// Calculate in-mempool ancestors, up to a limit.
CTxMemPool::setEntries setAncestors;
size_t nLimitAncestors =
gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
size_t nLimitAncestorSize =
gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) *
1000;
size_t nLimitDescendants =
gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
size_t nLimitDescendantSize =
gArgs.GetArg("-limitdescendantsize",
DEFAULT_DESCENDANT_SIZE_LIMIT) *
1000;
std::string errString;
if (!pool.CalculateMemPoolAncestors(
entry, setAncestors, nLimitAncestors, nLimitAncestorSize,
nLimitDescendants, nLimitDescendantSize, errString)) {
return state.DoS(0, false, REJECT_NONSTANDARD,
"too-long-mempool-chain", false, errString);
}
// Set extraFlags as a set of flags that needs to be activated.
uint32_t extraFlags = SCRIPT_VERIFY_NONE;
if (IsReplayProtectionEnabledForCurrentBlock(config)) {
extraFlags |= SCRIPT_ENABLE_REPLAY_PROTECTION;
}
// Check inputs based on the set of flags we activate.
uint32_t scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
if (!config.GetChainParams().RequireStandard()) {
scriptVerifyFlags =
SCRIPT_ENABLE_SIGHASH_FORKID |
gArgs.GetArg("-promiscuousmempoolflags", scriptVerifyFlags);
}
// Make sure whatever we need to activate is actually activated.
scriptVerifyFlags |= extraFlags;
// Check against previous transactions. This is done last to help
// prevent CPU exhaustion denial-of-service attacks.
PrecomputedTransactionData txdata(tx);
if (!CheckInputs(tx, state, view, true, scriptVerifyFlags, true, false,
txdata)) {
// State filled in by CheckInputs.
return false;
}
// Check again against the current block tip's script verification flags
// to cache our script execution flags. This is, of course, useless if
// the next block has different script flags from the previous one, but
// because the cache tracks script flags for us it will auto-invalidate
// and we'll just have a few blocks of extra misses on soft-fork
// activation.
//
// This is also useful in case of bugs in the standard flags that cause
// transactions to pass as valid when they're actually invalid. For
// instance the STRICTENC flag was incorrectly allowing certain CHECKSIG
// NOT scripts to pass, even though they were invalid.
//
// There is a similar check in CreateNewBlock() to prevent creating
// invalid blocks (using TestBlockValidity), however allowing such
// transactions into the mempool can be exploited as a DoS attack.
uint32_t currentBlockScriptVerifyFlags =
GetBlockScriptFlags(config, chainActive.Tip());
if (!CheckInputsFromMempoolAndCache(tx, state, view, pool,
currentBlockScriptVerifyFlags, true,
txdata)) {
// If we're using promiscuousmempoolflags, we may hit this normally.
// Check if current block has some flags that scriptVerifyFlags does
// not before printing an ominous warning.
if (!(~scriptVerifyFlags & currentBlockScriptVerifyFlags)) {
return error(
"%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against "
"MANDATORY but not STANDARD flags %s, %s",
__func__, txid.ToString(), FormatStateMessage(state));
}
if (!CheckInputs(tx, state, view, true,
MANDATORY_SCRIPT_VERIFY_FLAGS | extraFlags, true,
false, txdata)) {
return error(
"%s: ConnectInputs failed against MANDATORY but not "
"STANDARD flags due to promiscuous mempool %s, %s",
__func__, txid.ToString(), FormatStateMessage(state));
}
LogPrintf("Warning: -promiscuousmempool flags set to not include "
"currently enforced soft forks, this may break mining or "
"otherwise cause instability!\n");
}
// This transaction should only count for fee estimation if
// the node is not behind and it is not dependent on any other
// transactions in the mempool.
bool validForFeeEstimation =
IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx);
// Store transaction in memory.
pool.addUnchecked(txid, entry, setAncestors, validForFeeEstimation);
// Trim mempool and check if tx was trimmed.
if (!fOverrideMempoolLimit) {
LimitMempoolSize(
pool,
gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 *
60);
if (!pool.exists(txid)) {
return state.DoS(0, false, REJECT_INSUFFICIENTFEE,
"mempool full");
}
}
}
GetMainSignals().TransactionAddedToMempool(ptx);
return true;
}
/**
* (try to) add transaction to memory pool with a specified acceptance time.
*/
static bool AcceptToMemoryPoolWithTime(const Config &config, CTxMemPool &pool,
CValidationState &state,
const CTransactionRef &tx,
bool fLimitFree, bool *pfMissingInputs,
int64_t nAcceptTime,
bool fOverrideMempoolLimit = false,
const Amount nAbsurdFee = Amount(0)) {
std::vector coins_to_uncache;
bool res = AcceptToMemoryPoolWorker(
config, pool, state, tx, fLimitFree, pfMissingInputs, nAcceptTime,
fOverrideMempoolLimit, nAbsurdFee, coins_to_uncache);
if (!res) {
for (const COutPoint &outpoint : coins_to_uncache) {
pcoinsTip->Uncache(outpoint);
}
}
// After we've (potentially) uncached entries, ensure our coins cache is
// still within its size limits
CValidationState stateDummy;
FlushStateToDisk(config.GetChainParams(), stateDummy, FLUSH_STATE_PERIODIC);
return res;
}
bool AcceptToMemoryPool(const Config &config, CTxMemPool &pool,
CValidationState &state, const CTransactionRef &tx,
bool fLimitFree, bool *pfMissingInputs,
bool fOverrideMempoolLimit, const Amount nAbsurdFee) {
return AcceptToMemoryPoolWithTime(config, pool, state, tx, fLimitFree,
pfMissingInputs, GetTime(),
fOverrideMempoolLimit, nAbsurdFee);
}
/**
* Return transaction in txOut, and if it was found inside a block, its hash is
* placed in hashBlock.
*/
bool GetTransaction(const Config &config, const TxId &txid,
CTransactionRef &txOut, uint256 &hashBlock,
bool fAllowSlow) {
CBlockIndex *pindexSlow = nullptr;
LOCK(cs_main);
CTransactionRef ptx = mempool.get(txid);
if (ptx) {
txOut = ptx;
return true;
}
if (fTxIndex) {
CDiskTxPos postx;
if (pblocktree->ReadTxIndex(txid, postx)) {
CAutoFile file(OpenBlockFile(postx, true), SER_DISK,
CLIENT_VERSION);
if (file.IsNull()) {
return error("%s: OpenBlockFile failed", __func__);
}
CBlockHeader header;
try {
file >> header;
fseek(file.Get(), postx.nTxOffset, SEEK_CUR);
file >> txOut;
} catch (const std::exception &e) {
return error("%s: Deserialize or I/O error - %s", __func__,
e.what());
}
hashBlock = header.GetHash();
if (txOut->GetId() != txid) {
return error("%s: txid mismatch", __func__);
}
return true;
}
}
// use coin database to locate block that contains transaction, and scan it
if (fAllowSlow) {
const Coin &coin = AccessByTxid(*pcoinsTip, txid);
if (!coin.IsSpent()) {
pindexSlow = chainActive[coin.GetHeight()];
}
}
if (pindexSlow) {
CBlock block;
if (ReadBlockFromDisk(block, pindexSlow, config)) {
for (const auto &tx : block.vtx) {
if (tx->GetId() == txid) {
txOut = tx;
hashBlock = pindexSlow->GetBlockHash();
return true;
}
}
}
}
return false;
}
//////////////////////////////////////////////////////////////////////////////
//
// CBlock and CBlockIndex
//
static bool WriteBlockToDisk(const CBlock &block, CDiskBlockPos &pos,
const CMessageHeader::MessageMagic &messageStart) {
// Open history file to append
CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION);
if (fileout.IsNull()) {
return error("WriteBlockToDisk: OpenBlockFile failed");
}
// Write index header
unsigned int nSize = GetSerializeSize(fileout, block);
fileout << FLATDATA(messageStart) << nSize;
// Write block
long fileOutPos = ftell(fileout.Get());
if (fileOutPos < 0) {
return error("WriteBlockToDisk: ftell failed");
}
pos.nPos = (unsigned int)fileOutPos;
fileout << block;
return true;
}
bool ReadBlockFromDisk(CBlock &block, const CDiskBlockPos &pos,
const Config &config) {
block.SetNull();
// Open history file to read
CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION);
if (filein.IsNull()) {
return error("ReadBlockFromDisk: OpenBlockFile failed for %s",
pos.ToString());
}
// Read block
try {
filein >> block;
} catch (const std::exception &e) {
return error("%s: Deserialize or I/O error - %s at %s", __func__,
e.what(), pos.ToString());
}
// Check the header
if (!CheckProofOfWork(block.GetHash(), block.nBits, config)) {
return error("ReadBlockFromDisk: Errors in block header at %s",
pos.ToString());
}
return true;
}
bool ReadBlockFromDisk(CBlock &block, const CBlockIndex *pindex,
const Config &config) {
if (!ReadBlockFromDisk(block, pindex->GetBlockPos(), config)) {
return false;
}
if (block.GetHash() != pindex->GetBlockHash()) {
return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() "
"doesn't match index for %s at %s",
pindex->ToString(), pindex->GetBlockPos().ToString());
}
return true;
}
Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams) {
int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
// Force block reward to zero when right shift is undefined.
if (halvings >= 64) {
return Amount(0);
}
Amount nSubsidy = 50 * COIN;
// Subsidy is cut in half every 210,000 blocks which will occur
// approximately every 4 years.
return Amount(nSubsidy.GetSatoshis() >> halvings);
}
bool IsInitialBlockDownload() {
// Once this function has returned false, it must remain false.
static std::atomic latchToFalse{false};
// Optimization: pre-test latch before taking the lock.
if (latchToFalse.load(std::memory_order_relaxed)) {
return false;
}
LOCK(cs_main);
if (latchToFalse.load(std::memory_order_relaxed)) {
return false;
}
if (fImporting || fReindex) {
return true;
}
if (chainActive.Tip() == nullptr) {
return true;
}
if (chainActive.Tip()->nChainWork < nMinimumChainWork) {
return true;
}
if (chainActive.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge)) {
return true;
}
LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
latchToFalse.store(true, std::memory_order_relaxed);
return false;
}
CBlockIndex *pindexBestForkTip = nullptr, *pindexBestForkBase = nullptr;
static void AlertNotify(const std::string &strMessage) {
uiInterface.NotifyAlertChanged();
std::string strCmd = gArgs.GetArg("-alertnotify", "");
if (strCmd.empty()) {
return;
}
// Alert text should be plain ascii coming from a trusted source, but to be
// safe we first strip anything not in safeChars, then add single quotes
// around the whole string before passing it to the shell:
std::string singleQuote("'");
std::string safeStatus = SanitizeString(strMessage);
safeStatus = singleQuote + safeStatus + singleQuote;
boost::replace_all(strCmd, "%s", safeStatus);
// thread runs free
boost::thread t(runCommand, strCmd);
}
static void CheckForkWarningConditions() {
AssertLockHeld(cs_main);
// Before we get past initial download, we cannot reliably alert about forks
// (we assume we don't get stuck on a fork before finishing our initial
// sync)
if (IsInitialBlockDownload()) {
return;
}
// If our best fork is no longer within 72 blocks (+/- 12 hours if no one
// mines it) of our head, drop it
if (pindexBestForkTip &&
chainActive.Height() - pindexBestForkTip->nHeight >= 72) {
pindexBestForkTip = nullptr;
}
if (pindexBestForkTip ||
(pindexBestInvalid &&
pindexBestInvalid->nChainWork >
chainActive.Tip()->nChainWork +
(GetBlockProof(*chainActive.Tip()) * 6))) {
if (!GetfLargeWorkForkFound() && pindexBestForkBase) {
std::string warning =
std::string("'Warning: Large-work fork detected, forking after "
"block ") +
pindexBestForkBase->phashBlock->ToString() + std::string("'");
AlertNotify(warning);
}
if (pindexBestForkTip && pindexBestForkBase) {
LogPrintf("%s: Warning: Large valid fork found\n forking the "
"chain at height %d (%s)\n lasting to height %d "
"(%s).\nChain state database corruption likely.\n",
__func__, pindexBestForkBase->nHeight,
pindexBestForkBase->phashBlock->ToString(),
pindexBestForkTip->nHeight,
pindexBestForkTip->phashBlock->ToString());
SetfLargeWorkForkFound(true);
} else {
LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks "
"longer than our best chain.\nChain state database "
"corruption likely.\n",
__func__);
SetfLargeWorkInvalidChainFound(true);
}
} else {
SetfLargeWorkForkFound(false);
SetfLargeWorkInvalidChainFound(false);
}
}
static void CheckForkWarningConditionsOnNewFork(CBlockIndex *pindexNewForkTip) {
AssertLockHeld(cs_main);
// If we are on a fork that is sufficiently large, set a warning flag
CBlockIndex *pfork = pindexNewForkTip;
CBlockIndex *plonger = chainActive.Tip();
while (pfork && pfork != plonger) {
while (plonger && plonger->nHeight > pfork->nHeight) {
plonger = plonger->pprev;
}
if (pfork == plonger) {
break;
}
pfork = pfork->pprev;
}
// We define a condition where we should warn the user about as a fork of at
// least 7 blocks with a tip within 72 blocks (+/- 12 hours if no one mines
// it) of ours. We use 7 blocks rather arbitrarily as it represents just
// under 10% of sustained network hash rate operating on the fork, or a
// chain that is entirely longer than ours and invalid (note that this
// should be detected by both). We define it this way because it allows us
// to only store the highest fork tip (+ base) which meets the 7-block
// condition and from this always have the most-likely-to-cause-warning fork
if (pfork && (!pindexBestForkTip ||
(pindexBestForkTip &&
pindexNewForkTip->nHeight > pindexBestForkTip->nHeight)) &&
pindexNewForkTip->nChainWork - pfork->nChainWork >
(GetBlockProof(*pfork) * 7) &&
chainActive.Height() - pindexNewForkTip->nHeight < 72) {
pindexBestForkTip = pindexNewForkTip;
pindexBestForkBase = pfork;
}
CheckForkWarningConditions();
}
static void InvalidChainFound(CBlockIndex *pindexNew) {
if (!pindexBestInvalid ||
pindexNew->nChainWork > pindexBestInvalid->nChainWork) {
pindexBestInvalid = pindexNew;
}
LogPrintf(
"%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__,
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
log(pindexNew->nChainWork.getdouble()) / log(2.0),
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexNew->GetBlockTime()));
CBlockIndex *tip = chainActive.Tip();
assert(tip);
LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n",
__func__, tip->GetBlockHash().ToString(), chainActive.Height(),
log(tip->nChainWork.getdouble()) / log(2.0),
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip->GetBlockTime()));
CheckForkWarningConditions();
}
static void InvalidBlockFound(CBlockIndex *pindex,
const CValidationState &state) {
if (!state.CorruptionPossible()) {
pindex->nStatus = pindex->nStatus.withFailed();
setDirtyBlockIndex.insert(pindex);
setBlockIndexCandidates.erase(pindex);
InvalidChainFound(pindex);
}
}
void SpendCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
int nHeight) {
// Mark inputs spent.
if (tx.IsCoinBase()) {
return;
}
txundo.vprevout.reserve(tx.vin.size());
for (const CTxIn &txin : tx.vin) {
txundo.vprevout.emplace_back();
bool is_spent = view.SpendCoin(txin.prevout, &txundo.vprevout.back());
assert(is_spent);
}
}
void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
int nHeight) {
SpendCoins(view, tx, txundo, nHeight);
AddCoins(view, tx, nHeight);
}
void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, int nHeight) {
// Mark inputs spent.
if (!tx.IsCoinBase()) {
for (const CTxIn &txin : tx.vin) {
bool is_spent = view.SpendCoin(txin.prevout);
assert(is_spent);
}
}
// Add outputs.
AddCoins(view, tx, nHeight);
}
bool CScriptCheck::operator()() {
const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
return VerifyScript(scriptSig, scriptPubKey, nFlags,
CachingTransactionSignatureChecker(ptxTo, nIn, amount,
cacheStore, txdata),
&error);
}
int GetSpendHeight(const CCoinsViewCache &inputs) {
LOCK(cs_main);
CBlockIndex *pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second;
return pindexPrev->nHeight + 1;
}
namespace Consensus {
bool CheckTxInputs(const CTransaction &tx, CValidationState &state,
const CCoinsViewCache &inputs, int nSpendHeight) {
// This doesn't trigger the DoS code on purpose; if it did, it would make it
// easier for an attacker to attempt to split the network.
if (!inputs.HaveInputs(tx)) {
return state.Invalid(false, 0, "", "Inputs unavailable");
}
Amount nValueIn(0);
Amount nFees(0);
for (const auto &in : tx.vin) {
const COutPoint &prevout = in.prevout;
const Coin &coin = inputs.AccessCoin(prevout);
assert(!coin.IsSpent());
// If prev is coinbase, check that it's matured
if (coin.IsCoinBase()) {
if (nSpendHeight - coin.GetHeight() < COINBASE_MATURITY) {
return state.Invalid(
false, REJECT_INVALID,
"bad-txns-premature-spend-of-coinbase",
strprintf("tried to spend coinbase at depth %d",
nSpendHeight - coin.GetHeight()));
}
}
// Check for negative or overflow input values
nValueIn += coin.GetTxOut().nValue;
if (!MoneyRange(coin.GetTxOut().nValue) || !MoneyRange(nValueIn)) {
return state.DoS(100, false, REJECT_INVALID,
"bad-txns-inputvalues-outofrange");
}
}
if (nValueIn < tx.GetValueOut()) {
return state.DoS(100, false, REJECT_INVALID, "bad-txns-in-belowout",
false, strprintf("value in (%s) < value out (%s)",
FormatMoney(nValueIn),
FormatMoney(tx.GetValueOut())));
}
// Tally transaction fees
Amount nTxFee = nValueIn - tx.GetValueOut();
if (nTxFee < Amount(0)) {
return state.DoS(100, false, REJECT_INVALID, "bad-txns-fee-negative");
}
nFees += nTxFee;
if (!MoneyRange(nFees)) {
return state.DoS(100, false, REJECT_INVALID, "bad-txns-fee-outofrange");
}
return true;
}
} // namespace Consensus
bool CheckInputs(const CTransaction &tx, CValidationState &state,
const CCoinsViewCache &inputs, bool fScriptChecks,
const uint32_t flags, bool sigCacheStore,
bool scriptCacheStore,
const PrecomputedTransactionData &txdata,
std::vector *pvChecks) {
assert(!tx.IsCoinBase());
if (!Consensus::CheckTxInputs(tx, state, inputs, GetSpendHeight(inputs))) {
return false;
}
if (pvChecks) {
pvChecks->reserve(tx.vin.size());
}
// The first loop above does all the inexpensive checks. Only if ALL inputs
// pass do we perform expensive ECDSA signature checks. Helps prevent CPU
// exhaustion attacks.
// Skip script verification when connecting blocks under the assumedvalid
// block. Assuming the assumedvalid block is valid this is safe because
// block merkle hashes are still computed and checked, of course, if an
// assumed valid block is invalid due to false scriptSigs this optimization
// would allow an invalid chain to be accepted.
if (!fScriptChecks) {
return true;
}
// First check if script executions have been cached with the same flags.
// Note that this assumes that the inputs provided are correct (ie that the
// transaction hash which is in tx's prevouts properly commits to the
// scriptPubKey in the inputs view of that transaction).
uint256 hashCacheEntry = GetScriptCacheKey(tx, flags);
if (IsKeyInScriptCache(hashCacheEntry, !scriptCacheStore)) {
return true;
}
for (size_t i = 0; i < tx.vin.size(); i++) {
const COutPoint &prevout = tx.vin[i].prevout;
const Coin &coin = inputs.AccessCoin(prevout);
assert(!coin.IsSpent());
// We very carefully only pass in things to CScriptCheck which are
// clearly committed to by tx' witness hash. This provides a sanity
// check that our caching is not introducing consensus failures through
// additional data in, eg, the coins being spent being checked as a part
// of CScriptCheck.
const CScript &scriptPubKey = coin.GetTxOut().scriptPubKey;
const Amount amount = coin.GetTxOut().nValue;
// Verify signature
CScriptCheck check(scriptPubKey, amount, tx, i, flags, sigCacheStore,
txdata);
if (pvChecks) {
pvChecks->push_back(std::move(check));
} else if (!check()) {
const bool hasNonMandatoryFlags =
(flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) != 0;
if (hasNonMandatoryFlags) {
// Check whether the failure was caused by a non-mandatory
// script verification check, such as non-standard DER encodings
// or non-null dummy arguments; if so, don't trigger DoS
// protection to avoid splitting the network between upgraded
// and non-upgraded nodes.
//
// We also check activating the monolith opcodes as it is a
// strictly additive change and we would not like to ban some of
// our peer that are ahead of us and are considering the fork
// as activated.
CScriptCheck check2(scriptPubKey, amount, tx, i,
flags &
~STANDARD_NOT_MANDATORY_VERIFY_FLAGS,
sigCacheStore, txdata);
if (check2()) {
return state.Invalid(
false, REJECT_NONSTANDARD,
strprintf("non-mandatory-script-verify-flag (%s)",
ScriptErrorString(check.GetScriptError())));
}
}
// Failures of other flags indicate a transaction that is invalid in
// new blocks, e.g. a invalid P2SH. We DoS ban such nodes as they
// are not following the protocol. That said during an upgrade
// careful thought should be taken as to the correct behavior - we
// may want to continue peering with non-upgraded nodes even after
// soft-fork super-majority signaling has occurred.
return state.DoS(
100, false, REJECT_INVALID,
strprintf("mandatory-script-verify-flag-failed (%s)",
ScriptErrorString(check.GetScriptError())));
}
}
if (scriptCacheStore && !pvChecks) {
// We executed all of the provided scripts, and were told to cache the
// result. Do so now.
AddKeyInScriptCache(hashCacheEntry);
}
return true;
}
namespace {
bool UndoWriteToDisk(const CBlockUndo &blockundo, CDiskBlockPos &pos,
const uint256 &hashBlock,
const CMessageHeader::MessageMagic &messageStart) {
// Open history file to append
CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
if (fileout.IsNull()) {
return error("%s: OpenUndoFile failed", __func__);
}
// Write index header
unsigned int nSize = GetSerializeSize(fileout, blockundo);
fileout << FLATDATA(messageStart) << nSize;
// Write undo data
long fileOutPos = ftell(fileout.Get());
if (fileOutPos < 0) {
return error("%s: ftell failed", __func__);
}
pos.nPos = (unsigned int)fileOutPos;
fileout << blockundo;
// calculate & write checksum
CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
hasher << hashBlock;
hasher << blockundo;
fileout << hasher.GetHash();
return true;
}
bool UndoReadFromDisk(CBlockUndo &blockundo, const CDiskBlockPos &pos,
const uint256 &hashBlock) {
// Open history file to read
CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
if (filein.IsNull()) {
return error("%s: OpenUndoFile failed", __func__);
}
// Read block
uint256 hashChecksum;
// We need a CHashVerifier as reserializing may lose data
CHashVerifier verifier(&filein);
try {
verifier << hashBlock;
verifier >> blockundo;
filein >> hashChecksum;
} catch (const std::exception &e) {
return error("%s: Deserialize or I/O error - %s", __func__, e.what());
}
// Verify checksum
if (hashChecksum != verifier.GetHash()) {
return error("%s: Checksum mismatch", __func__);
}
return true;
}
/** Abort with a message */
bool AbortNode(const std::string &strMessage,
const std::string &userMessage = "") {
SetMiscWarning(strMessage);
LogPrintf("*** %s\n", strMessage);
uiInterface.ThreadSafeMessageBox(
userMessage.empty() ? _("Error: A fatal internal error occurred, see "
"debug.log for details")
: userMessage,
"", CClientUIInterface::MSG_ERROR);
StartShutdown();
return false;
}
bool AbortNode(CValidationState &state, const std::string &strMessage,
const std::string &userMessage = "") {
AbortNode(strMessage, userMessage);
return state.Error(strMessage);
}
} // namespace
/** Restore the UTXO in a Coin at a given COutPoint. */
DisconnectResult UndoCoinSpend(const Coin &undo, CCoinsViewCache &view,
const COutPoint &out) {
bool fClean = true;
if (view.HaveCoin(out)) {
// Overwriting transaction output.
fClean = false;
}
if (undo.GetHeight() == 0) {
// Missing undo metadata (height and coinbase). Older versions included
// this information only in undo records for the last spend of a
// transactions' outputs. This implies that it must be present for some
// other output of the same tx.
const Coin &alternate = AccessByTxid(view, out.GetTxId());
if (alternate.IsSpent()) {
// Adding output for transaction without known metadata
return DISCONNECT_FAILED;
}
// This is somewhat ugly, but hopefully utility is limited. This is only
// useful when working from legacy on disck data. In any case, putting
// the correct information in there doesn't hurt.
const_cast(undo) = Coin(undo.GetTxOut(), alternate.GetHeight(),
alternate.IsCoinBase());
}
// The potential_overwrite parameter to AddCoin is only allowed to be false
// if we know for sure that the coin did not already exist in the cache. As
// we have queried for that above using HaveCoin, we don't need to guess.
// When fClean is false, a coin already existed and it is an overwrite.
view.AddCoin(out, std::move(undo), !fClean);
return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
}
/**
* Undo the effects of this block (with given index) on the UTXO set represented
* by coins. When FAILED is returned, view is left in an indeterminate state.
*/
static DisconnectResult DisconnectBlock(const CBlock &block,
const CBlockIndex *pindex,
CCoinsViewCache &view) {
CBlockUndo blockUndo;
CDiskBlockPos pos = pindex->GetUndoPos();
if (pos.IsNull()) {
error("DisconnectBlock(): no undo data available");
return DISCONNECT_FAILED;
}
if (!UndoReadFromDisk(blockUndo, pos, pindex->pprev->GetBlockHash())) {
error("DisconnectBlock(): failure reading undo data");
return DISCONNECT_FAILED;
}
return ApplyBlockUndo(blockUndo, block, pindex, view);
}
DisconnectResult ApplyBlockUndo(const CBlockUndo &blockUndo,
const CBlock &block, const CBlockIndex *pindex,
CCoinsViewCache &view) {
bool fClean = true;
if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
error("DisconnectBlock(): block and undo data inconsistent");
return DISCONNECT_FAILED;
}
// First, restore inputs.
for (size_t i = 1; i < block.vtx.size(); i++) {
const CTransaction &tx = *(block.vtx[i]);
const CTxUndo &txundo = blockUndo.vtxundo[i - 1];
if (txundo.vprevout.size() != tx.vin.size()) {
error("DisconnectBlock(): transaction and undo data inconsistent");
return DISCONNECT_FAILED;
}
for (size_t j = 0; j < tx.vin.size(); j++) {
const COutPoint &out = tx.vin[j].prevout;
const Coin &undo = txundo.vprevout[j];
DisconnectResult res = UndoCoinSpend(undo, view, out);
if (res == DISCONNECT_FAILED) {
return DISCONNECT_FAILED;
}
fClean = fClean && res != DISCONNECT_UNCLEAN;
}
}
// Second, revert created outputs.
for (const auto &ptx : block.vtx) {
const CTransaction &tx = *ptx;
uint256 txid = tx.GetId();
// Check that all outputs are available and match the outputs in the
// block itself exactly.
for (size_t o = 0; o < tx.vout.size(); o++) {
if (tx.vout[o].scriptPubKey.IsUnspendable()) {
continue;
}
COutPoint out(txid, o);
Coin coin;
bool is_spent = view.SpendCoin(out, &coin);
if (!is_spent || tx.vout[o] != coin.GetTxOut()) {
// transaction output mismatch
fClean = false;
}
}
}
// Move best block pointer to previous block.
view.SetBestBlock(block.hashPrevBlock);
return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
}
static void FlushBlockFile(bool fFinalize = false) {
LOCK(cs_LastBlockFile);
CDiskBlockPos posOld(nLastBlockFile, 0);
FILE *fileOld = OpenBlockFile(posOld);
if (fileOld) {
if (fFinalize) {
TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize);
}
FileCommit(fileOld);
fclose(fileOld);
}
fileOld = OpenUndoFile(posOld);
if (fileOld) {
if (fFinalize) {
TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize);
}
FileCommit(fileOld);
fclose(fileOld);
}
}
static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos,
unsigned int nAddSize);
static CCheckQueue scriptcheckqueue(128);
void ThreadScriptCheck() {
RenameThread("bitcoin-scriptch");
scriptcheckqueue.Thread();
}
// Protected by cs_main
VersionBitsCache versionbitscache;
int32_t ComputeBlockVersion(const CBlockIndex *pindexPrev,
const Consensus::Params ¶ms) {
int32_t nVersion = VERSIONBITS_TOP_BITS;
return nVersion;
}
// Returns the script flags which should be checked for a given block
static uint32_t GetBlockScriptFlags(const Config &config,
const CBlockIndex *pChainTip) {
AssertLockHeld(cs_main);
const Consensus::Params &consensusParams =
config.GetChainParams().GetConsensus();
uint32_t flags = SCRIPT_VERIFY_NONE;
// P2SH didn't become active until Apr 1 2012
if (pChainTip->GetMedianTimePast() >= P2SH_ACTIVATION_TIME) {
flags |= SCRIPT_VERIFY_P2SH;
}
// Start enforcing the DERSIG (BIP66) rule.
if ((pChainTip->nHeight + 1) >= consensusParams.BIP66Height) {
flags |= SCRIPT_VERIFY_DERSIG;
}
// Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule.
if ((pChainTip->nHeight + 1) >= consensusParams.BIP65Height) {
flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
}
// Start enforcing CSV (BIP68, BIP112 and BIP113) rule.
if ((pChainTip->nHeight + 1) >= consensusParams.CSVHeight) {
flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
}
// If the UAHF is enabled, we start accepting replay protected txns
if (IsUAHFenabled(config, pChainTip)) {
flags |= SCRIPT_VERIFY_STRICTENC;
flags |= SCRIPT_ENABLE_SIGHASH_FORKID;
}
// If the DAA HF is enabled, we start rejecting transaction that use a high
// s in their signature. We also make sure that signature that are supposed
// to fail (for instance in multisig or other forms of smart contracts) are
// null.
if (IsDAAEnabled(config, pChainTip)) {
flags |= SCRIPT_VERIFY_LOW_S;
flags |= SCRIPT_VERIFY_NULLFAIL;
}
// We make sure this node will have replay protection during the next hard
// fork.
if (IsReplayProtectionEnabled(config, pChainTip)) {
flags |= SCRIPT_ENABLE_REPLAY_PROTECTION;
}
return flags;
}
static int64_t nTimeCheck = 0;
static int64_t nTimeForks = 0;
static int64_t nTimeVerify = 0;
static int64_t nTimeConnect = 0;
static int64_t nTimeIndex = 0;
static int64_t nTimeCallbacks = 0;
static int64_t nTimeTotal = 0;
/**
* Apply the effects of this block (with given index) on the UTXO set
* represented by coins. Validity checks that depend on the UTXO set are also
* done; ConnectBlock() can fail if those validity checks fail (among other
* reasons).
*/
static bool ConnectBlock(const Config &config, const CBlock &block,
CValidationState &state, CBlockIndex *pindex,
CCoinsViewCache &view, bool fJustCheck = false) {
AssertLockHeld(cs_main);
int64_t nTimeStart = GetTimeMicros();
// Check it again in case a previous version let a bad block in
BlockValidationOptions validationOptions =
BlockValidationOptions(!fJustCheck, !fJustCheck);
if (!CheckBlock(config, block, state, validationOptions)) {
return error("%s: Consensus::CheckBlock: %s", __func__,
FormatStateMessage(state));
}
// Verify that the view's current state corresponds to the previous block
uint256 hashPrevBlock =
pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash();
assert(hashPrevBlock == view.GetBestBlock());
// Special case for the genesis block, skipping connection of its
// transactions (its coinbase is unspendable)
const Consensus::Params &consensusParams =
config.GetChainParams().GetConsensus();
if (block.GetHash() == consensusParams.hashGenesisBlock) {
if (!fJustCheck) {
view.SetBestBlock(pindex->GetBlockHash());
}
return true;
}
bool fScriptChecks = true;
if (!hashAssumeValid.IsNull()) {
// We've been configured with the hash of a block which has been
// externally verified to have a valid history. A suitable default value
// is included with the software and updated from time to time. Because
// validity relative to a piece of software is an objective fact these
// defaults can be easily reviewed. This setting doesn't force the
// selection of any particular chain but makes validating some faster by
// effectively caching the result of part of the verification.
BlockMap::const_iterator it = mapBlockIndex.find(hashAssumeValid);
if (it != mapBlockIndex.end()) {
if (it->second->GetAncestor(pindex->nHeight) == pindex &&
pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
pindexBestHeader->nChainWork >= nMinimumChainWork) {
// This block is a member of the assumed verified chain and an
// ancestor of the best header. The equivalent time check
// discourages hashpower from extorting the network via DOS
// attack into accepting an invalid block through telling users
// they must manually set assumevalid. Requiring a software
// change or burying the invalid block, regardless of the
// setting, makes it hard to hide the implication of the demand.
// This also avoids having release candidates that are hardly
// doing any signature verification at all in testing without
// having to artificially set the default assumed verified block
// further back. The test against nMinimumChainWork prevents the
// skipping when denied access to any chain at least as good as
// the expected chain.
fScriptChecks =
(GetBlockProofEquivalentTime(
*pindexBestHeader, *pindex, *pindexBestHeader,
consensusParams) <= 60 * 60 * 24 * 7 * 2);
}
}
}
int64_t nTime1 = GetTimeMicros();
nTimeCheck += nTime1 - nTimeStart;
LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs]\n",
0.001 * (nTime1 - nTimeStart), nTimeCheck * 0.000001);
// Do not allow blocks that contain transactions which 'overwrite' older
// transactions, unless those are already completely spent. If such
// overwrites are allowed, coinbases and transactions depending upon those
// can be duplicated to remove the ability to spend the first instance --
// even after being sent to another address. See BIP30 and
// http://r6.ca/blog/20120206T005236Z.html for more information. This logic
// is not necessary for memory pool transactions, as AcceptToMemoryPool
// already refuses previously-known transaction ids entirely. This rule was
// originally applied to all blocks with a timestamp after March 15, 2012,
// 0:00 UTC. Now that the whole chain is irreversibly beyond that time it is
// applied to all blocks except the two in the chain that violate it. This
// prevents exploiting the issue against nodes during their initial block
// download.
bool fEnforceBIP30 = (!pindex->phashBlock) || // Enforce on CreateNewBlock
// invocations which don't
// have a hash.
!((pindex->nHeight == 91842 &&
pindex->GetBlockHash() ==
uint256S("0x00000000000a4d0a398161ffc163c503763"
"b1f4360639393e0e4c8e300e0caec")) ||
(pindex->nHeight == 91880 &&
pindex->GetBlockHash() ==
uint256S("0x00000000000743f190a18c5577a3c2d2a1f"
"610ae9601ac046a38084ccb7cd721")));
// Once BIP34 activated it was not possible to create new duplicate
// coinbases and thus other than starting with the 2 existing duplicate
// coinbase pairs, not possible to create overwriting txs. But by the time
// BIP34 activated, in each of the existing pairs the duplicate coinbase had
// overwritten the first before the first had been spent. Since those
// coinbases are sufficiently buried its no longer possible to create
// further duplicate transactions descending from the known pairs either. If
// we're on the known chain at height greater than where BIP34 activated, we
// can save the db accesses needed for the BIP30 check.
CBlockIndex *pindexBIP34height =
pindex->pprev->GetAncestor(consensusParams.BIP34Height);
// Only continue to enforce if we're below BIP34 activation height or the
// block hash at that height doesn't correspond.
fEnforceBIP30 =
fEnforceBIP30 &&
(!pindexBIP34height ||
!(pindexBIP34height->GetBlockHash() == consensusParams.BIP34Hash));
if (fEnforceBIP30) {
for (const auto &tx : block.vtx) {
for (size_t o = 0; o < tx->vout.size(); o++) {
if (view.HaveCoin(COutPoint(tx->GetId(), o))) {
return state.DoS(
100,
error("ConnectBlock(): tried to overwrite transaction"),
REJECT_INVALID, "bad-txns-BIP30");
}
}
}
}
// Start enforcing BIP68 (sequence locks).
int nLockTimeFlags = 0;
if (pindex->nHeight >= consensusParams.CSVHeight) {
nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
}
const uint32_t flags = GetBlockScriptFlags(config, pindex->pprev);
const bool fIsMagneticAnomalyEnabled =
IsMagneticAnomalyEnabled(config, pindex->pprev);
int64_t nTime2 = GetTimeMicros();
nTimeForks += nTime2 - nTime1;
LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs]\n",
0.001 * (nTime2 - nTime1), nTimeForks * 0.000001);
CBlockUndo blockundo;
CCheckQueueControl control(fScriptChecks ? &scriptcheckqueue
: nullptr);
std::vector prevheights;
Amount nFees(0);
int nInputs = 0;
// Sigops counting. We need to do it again because of P2SH.
uint64_t nSigOpsCount = 0;
const uint64_t currentBlockSize =
::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION);
const uint64_t nMaxSigOpsCount = GetMaxBlockSigOpsCount(currentBlockSize);
CDiskTxPos pos(pindex->GetBlockPos(),
GetSizeOfCompactSize(block.vtx.size()));
std::vector> vPos;
vPos.reserve(block.vtx.size());
blockundo.vtxundo.reserve(block.vtx.size() - 1);
for (const auto &ptx : block.vtx) {
const CTransaction &tx = *ptx;
nInputs += tx.vin.size();
vPos.push_back(std::make_pair(tx.GetId(), pos));
pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION);
if (tx.IsCoinBase()) {
// We've already checked for sigops count before P2SH in CheckBlock.
nSigOpsCount += GetSigOpCountWithoutP2SH(tx, flags);
}
if (fIsMagneticAnomalyEnabled || tx.IsCoinBase()) {
AddCoins(view, tx, pindex->nHeight);
}
}
for (const auto &ptx : block.vtx) {
const CTransaction &tx = *ptx;
if (tx.IsCoinBase()) {
continue;
}
if (!view.HaveInputs(tx)) {
return state.DoS(100, error("ConnectBlock(): inputs missing/spent"),
REJECT_INVALID, "bad-txns-inputs-missingorspent");
}
// Check that transaction is BIP68 final BIP68 lock checks (as
// opposed to nLockTime checks) must be in ConnectBlock because they
// require the UTXO set.
prevheights.resize(tx.vin.size());
for (size_t j = 0; j < tx.vin.size(); j++) {
prevheights[j] = view.AccessCoin(tx.vin[j].prevout).GetHeight();
}
if (!SequenceLocks(tx, nLockTimeFlags, &prevheights, *pindex)) {
return state.DoS(
100,
error("%s: contains a non-BIP68-final transaction", __func__),
REJECT_INVALID, "bad-txns-nonfinal");
}
// GetTransactionSigOpCount counts 2 types of sigops:
// * legacy (always)
// * p2sh (when P2SH enabled in flags and excludes coinbase)
auto txSigOpsCount = GetTransactionSigOpCount(tx, view, flags);
if (txSigOpsCount > MAX_TX_SIGOPS_COUNT) {
return state.DoS(100, false, REJECT_INVALID, "bad-txn-sigops");
}
nSigOpsCount += txSigOpsCount;
if (nSigOpsCount > nMaxSigOpsCount) {
return state.DoS(100, error("ConnectBlock(): too many sigops"),
REJECT_INVALID, "bad-blk-sigops");
}
Amount fee = view.GetValueIn(tx) - tx.GetValueOut();
nFees += fee;
// Don't cache results if we're actually connecting blocks (still
// consult the cache, though).
bool fCacheResults = fJustCheck;
std::vector vChecks;
if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults,
fCacheResults, PrecomputedTransactionData(tx),
&vChecks)) {
return error("ConnectBlock(): CheckInputs on %s failed with %s",
tx.GetId().ToString(), FormatStateMessage(state));
}
control.Add(vChecks);
blockundo.vtxundo.push_back(CTxUndo());
SpendCoins(view, tx, blockundo.vtxundo.back(), pindex->nHeight);
if (!fIsMagneticAnomalyEnabled) {
AddCoins(view, tx, pindex->nHeight);
}
}
int64_t nTime3 = GetTimeMicros();
nTimeConnect += nTime3 - nTime2;
LogPrint(BCLog::BENCH,
" - Connect %u transactions: %.2fms (%.3fms/tx, "
"%.3fms/txin) [%.2fs]\n",
(unsigned)block.vtx.size(), 0.001 * (nTime3 - nTime2),
0.001 * (nTime3 - nTime2) / block.vtx.size(),
nInputs <= 1 ? 0 : 0.001 * (nTime3 - nTime2) / (nInputs - 1),
nTimeConnect * 0.000001);
Amount blockReward =
nFees + GetBlockSubsidy(pindex->nHeight, consensusParams);
if (block.vtx[0]->GetValueOut() > blockReward) {
return state.DoS(100, error("ConnectBlock(): coinbase pays too much "
"(actual=%d vs limit=%d)",
block.vtx[0]->GetValueOut(), blockReward),
REJECT_INVALID, "bad-cb-amount");
}
if (!control.Wait()) {
return state.DoS(100, false, REJECT_INVALID, "blk-bad-inputs", false,
"parallel script check failed");
}
int64_t nTime4 = GetTimeMicros();
nTimeVerify += nTime4 - nTime2;
LogPrint(BCLog::BENCH,
" - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n",
nInputs - 1, 0.001 * (nTime4 - nTime2),
nInputs <= 1 ? 0 : 0.001 * (nTime4 - nTime2) / (nInputs - 1),
nTimeVerify * 0.000001);
if (fJustCheck) {
return true;
}
// Write undo information to disk
if (pindex->GetUndoPos().IsNull() ||
!pindex->IsValid(BlockValidity::SCRIPTS)) {
if (pindex->GetUndoPos().IsNull()) {
CDiskBlockPos _pos;
if (!FindUndoPos(
state, pindex->nFile, _pos,
::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) +
40)) {
return error("ConnectBlock(): FindUndoPos failed");
}
if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(),
config.GetChainParams().DiskMagic())) {
return AbortNode(state, "Failed to write undo data");
}
// update nUndoPos in block index
pindex->nUndoPos = _pos.nPos;
pindex->nStatus = pindex->nStatus.withUndo();
}
pindex->RaiseValidity(BlockValidity::SCRIPTS);
setDirtyBlockIndex.insert(pindex);
}
if (fTxIndex && !pblocktree->WriteTxIndex(vPos)) {
return AbortNode(state, "Failed to write transaction index");
}
// add this block to the view's block chain
view.SetBestBlock(pindex->GetBlockHash());
int64_t nTime5 = GetTimeMicros();
nTimeIndex += nTime5 - nTime4;
LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs]\n",
0.001 * (nTime5 - nTime4), nTimeIndex * 0.000001);
int64_t nTime6 = GetTimeMicros();
nTimeCallbacks += nTime6 - nTime5;
LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs]\n",
0.001 * (nTime6 - nTime5), nTimeCallbacks * 0.000001);
// If we just activated the replay protection with that block, it means
// transaction in the mempool are now invalid. As a result, we need to clear
// the mempool.
if (IsReplayProtectionEnabled(config, pindex) &&
!IsReplayProtectionEnabled(config, pindex->pprev)) {
mempool.clear();
}
return true;
}
/**
* Update the on-disk chain state.
* The caches and indexes are flushed depending on the mode we're called with if
* they're too large, if it's been a while since the last write, or always and
* in all cases if we're in prune mode and are deleting files.
*/
static bool FlushStateToDisk(const CChainParams &chainparams,
CValidationState &state, FlushStateMode mode,
int nManualPruneHeight) {
int64_t nMempoolUsage = mempool.DynamicMemoryUsage();
LOCK(cs_main);
static int64_t nLastWrite = 0;
static int64_t nLastFlush = 0;
static int64_t nLastSetChain = 0;
std::set setFilesToPrune;
bool fFlushForPrune = false;
bool fDoFullFlush = false;
int64_t nNow = 0;
try {
{
LOCK(cs_LastBlockFile);
if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) &&
!fReindex) {
if (nManualPruneHeight > 0) {
FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight);
} else {
FindFilesToPrune(setFilesToPrune,
chainparams.PruneAfterHeight());
fCheckForPruning = false;
}
if (!setFilesToPrune.empty()) {
fFlushForPrune = true;
if (!fHavePruned) {
pblocktree->WriteFlag("prunedblockfiles", true);
fHavePruned = true;
}
}
}
nNow = GetTimeMicros();
// Avoid writing/flushing immediately after startup.
if (nLastWrite == 0) {
nLastWrite = nNow;
}
if (nLastFlush == 0) {
nLastFlush = nNow;
}
if (nLastSetChain == 0) {
nLastSetChain = nNow;
}
int64_t nMempoolSizeMax =
gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
int64_t cacheSize = pcoinsTip->DynamicMemoryUsage();
int64_t nTotalSpace =
nCoinCacheUsage +
std::max(nMempoolSizeMax - nMempoolUsage, 0);
// The cache is large and we're within 10% and 10 MiB of the limit,
// but we have time now (not in the middle of a block processing).
bool fCacheLarge =
mode == FLUSH_STATE_PERIODIC &&
cacheSize > std::max((9 * nTotalSpace) / 10,
nTotalSpace -
MAX_BLOCK_COINSDB_USAGE * 1024 * 1024);
// The cache is over the limit, we have to write now.
bool fCacheCritical =
mode == FLUSH_STATE_IF_NEEDED && cacheSize > nTotalSpace;
// It's been a while since we wrote the block index to disk. Do this
// frequently, so we don't need to redownload after a crash.
bool fPeriodicWrite =
mode == FLUSH_STATE_PERIODIC &&
nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000;
// It's been very long since we flushed the cache. Do this
// infrequently, to optimize cache usage.
bool fPeriodicFlush =
mode == FLUSH_STATE_PERIODIC &&
nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000;
// Combine all conditions that result in a full cache flush.
fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge ||
fCacheCritical || fPeriodicFlush || fFlushForPrune;
// Write blocks and block index to disk.
if (fDoFullFlush || fPeriodicWrite) {
// Depend on nMinDiskSpace to ensure we can write block index
if (!CheckDiskSpace(0)) {
return state.Error("out of disk space");
}
// First make sure all block and undo data is flushed to disk.
FlushBlockFile();
// Then update all block file information (which may refer to
// block and undo files).
{
std::vector> vFiles;
vFiles.reserve(setDirtyFileInfo.size());
for (int i : setDirtyFileInfo) {
vFiles.push_back(std::make_pair(i, &vinfoBlockFile[i]));
}
setDirtyFileInfo.clear();
std::vector vBlocks;
vBlocks.reserve(setDirtyBlockIndex.size());
for (const CBlockIndex *cbi : setDirtyBlockIndex) {
vBlocks.push_back(cbi);
}
setDirtyBlockIndex.clear();
if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile,
vBlocks)) {
return AbortNode(
state, "Failed to write to block index database");
}
}
// Finally remove any pruned files
if (fFlushForPrune) {
UnlinkPrunedFiles(setFilesToPrune);
}
nLastWrite = nNow;
}
// Flush best chain related state. This can only be done if the
// blocks / block index write was also done.
if (fDoFullFlush) {
// Typical Coin structures on disk are around 48 bytes in size.
// Pushing a new one to the database can cause it to be written
// twice (once in the log, and once in the tables). This is
// already an overestimation, as most will delete an existing
// entry or overwrite one. Still, use a conservative safety
// factor of 2.
if (!CheckDiskSpace(48 * 2 * 2 * pcoinsTip->GetCacheSize())) {
return state.Error("out of disk space");
}
// Flush the chainstate (which may refer to block index
// entries).
if (!pcoinsTip->Flush()) {
return AbortNode(state, "Failed to write to coin database");
}
nLastFlush = nNow;
}
}
if (fDoFullFlush ||
((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) &&
nNow >
nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000)) {
// Update best block in wallet (so we can detect restored wallets).
GetMainSignals().SetBestChain(chainActive.GetLocator());
nLastSetChain = nNow;
}
} catch (const std::runtime_error &e) {
return AbortNode(
state, std::string("System error while flushing: ") + e.what());
}
return true;
}
void FlushStateToDisk() {
CValidationState state;
const CChainParams &chainparams = Params();
FlushStateToDisk(chainparams, state, FLUSH_STATE_ALWAYS);
}
void PruneAndFlush() {
CValidationState state;
fCheckForPruning = true;
const CChainParams &chainparams = Params();
FlushStateToDisk(chainparams, state, FLUSH_STATE_NONE);
}
/**
* Update chainActive and related internal data structures when adding a new
* block to the chain tip.
*/
static void UpdateTip(const Config &config, CBlockIndex *pindexNew) {
const Consensus::Params &consensusParams =
config.GetChainParams().GetConsensus();
chainActive.SetTip(pindexNew);
// New best block
mempool.AddTransactionsUpdated(1);
cvBlockChange.notify_all();
static bool fWarned = false;
std::vector warningMessages;
if (!IsInitialBlockDownload()) {
int nUpgraded = 0;
const CBlockIndex *pindex = chainActive.Tip();
// Check the version of the last 100 blocks to see if we need to
// upgrade:
for (int i = 0; i < 100 && pindex != nullptr; i++) {
int32_t nExpectedVersion =
ComputeBlockVersion(pindex->pprev, consensusParams);
if (pindex->nVersion > VERSIONBITS_LAST_OLD_BLOCK_VERSION &&
(pindex->nVersion & ~nExpectedVersion) != 0) {
++nUpgraded;
}
pindex = pindex->pprev;
}
if (nUpgraded > 0) {
warningMessages.push_back(strprintf(
"%d of last 100 blocks have unexpected version", nUpgraded));
}
if (nUpgraded > 100 / 2) {
std::string strWarning =
_("Warning: Unknown block versions being mined! It's possible "
"unknown rules are in effect");
// notify GetWarnings(), called by Qt and the JSON-RPC code to warn
// the user:
SetMiscWarning(strWarning);
if (!fWarned) {
AlertNotify(strWarning);
fWarned = true;
}
}
}
LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu "
"date='%s' progress=%f cache=%.1fMiB(%utxo)",
__func__, chainActive.Tip()->GetBlockHash().ToString(),
chainActive.Height(), chainActive.Tip()->nVersion,
log(chainActive.Tip()->nChainWork.getdouble()) / log(2.0),
(unsigned long)chainActive.Tip()->nChainTx,
DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
chainActive.Tip()->GetBlockTime()),
GuessVerificationProgress(config.GetChainParams().TxData(),
chainActive.Tip()),
pcoinsTip->DynamicMemoryUsage() * (1.0 / (1 << 20)),
pcoinsTip->GetCacheSize());
if (!warningMessages.empty()) {
LogPrintf(" warning='%s'",
boost::algorithm::join(warningMessages, ", "));
}
LogPrintf("\n");
}
/**
* Disconnect chainActive's tip.
* After calling, the mempool will be in an inconsistent state, with
* transactions from disconnected blocks being added to disconnectpool. You
* should make the mempool consistent again by calling UpdateMempoolForReorg.
* with cs_main held.
*
* If disconnectpool is nullptr, then no disconnected transactions are added to
* disconnectpool (note that the caller is responsible for mempool consistency
* in any case).
*/
static bool DisconnectTip(const Config &config, CValidationState &state,
DisconnectedBlockTransactions *disconnectpool) {
CBlockIndex *pindexDelete = chainActive.Tip();
assert(pindexDelete);
// Read block from disk.
std::shared_ptr pblock = std::make_shared();
CBlock &block = *pblock;
if (!ReadBlockFromDisk(block, pindexDelete, config)) {
return AbortNode(state, "Failed to read block");
}
// Apply the block atomically to the chain state.
int64_t nStart = GetTimeMicros();
{
CCoinsViewCache view(pcoinsTip);
assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) {
return error("DisconnectTip(): DisconnectBlock %s failed",
pindexDelete->GetBlockHash().ToString());
}
bool flushed = view.Flush();
assert(flushed);
}
LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n",
(GetTimeMicros() - nStart) * 0.001);
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(config.GetChainParams(), state,
FLUSH_STATE_IF_NEEDED)) {
return false;
}
// If this block was deactivating the replay protection, then we need to
// remove transactions that are replay protected from the mempool. There is
// no easy way to do this so we'll just discard the whole mempool and then
// add the transaction of the block we just disconnected back.
if (IsReplayProtectionEnabled(config, pindexDelete) &&
!IsReplayProtectionEnabled(config, pindexDelete->pprev)) {
LogPrint(BCLog::MEMPOOL, "Clearing mempool for reorg");
mempool.clear();
// While not strictly necessary, clearing the disconnect pool is also
// beneficial so we don't try to reuse its content at the end of the
// reorg, which we know will fail.
if (disconnectpool) {
disconnectpool->clear();
}
}
if (disconnectpool) {
// Save transactions to re-add to mempool at end of reorg
for (const auto &tx : boost::adaptors::reverse(block.vtx)) {
disconnectpool->addTransaction(tx);
}
while (disconnectpool->DynamicMemoryUsage() >
MAX_DISCONNECTED_TX_POOL_SIZE) {
// Drop the earliest entry, and remove its children from the
// mempool.
auto it = disconnectpool->queuedTx.get().begin();
mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
disconnectpool->removeEntry(it);
}
}
// Update chainActive and related variables.
UpdateTip(config, pindexDelete->pprev);
// Let wallets know transactions went from 1-confirmed to
// 0-confirmed or conflicted:
GetMainSignals().BlockDisconnected(pblock);
return true;
}
static int64_t nTimeReadFromDisk = 0;
static int64_t nTimeConnectTotal = 0;
static int64_t nTimeFlush = 0;
static int64_t nTimeChainState = 0;
static int64_t nTimePostConnect = 0;
struct PerBlockConnectTrace {
CBlockIndex *pindex = nullptr;
std::shared_ptr pblock;
std::shared_ptr> conflictedTxs;
PerBlockConnectTrace()
: conflictedTxs(std::make_shared>()) {}
};
/**
* Used to track blocks whose transactions were applied to the UTXO state as a
* part of a single ActivateBestChainStep call.
*
* This class also tracks transactions that are removed from the mempool as
* conflicts (per block) and can be used to pass all those transactions through
* SyncTransaction.
*
* This class assumes (and asserts) that the conflicted transactions for a given
* block are added via mempool callbacks prior to the BlockConnected()
* associated with those transactions. If any transactions are marked
* conflicted, it is assumed that an associated block will always be added.
*
* This class is single-use, once you call GetBlocksConnected() you have to
* throw it away and make a new one.
*/
class ConnectTrace {
private:
std::vector blocksConnected;
CTxMemPool &pool;
public:
ConnectTrace(CTxMemPool &_pool) : blocksConnected(1), pool(_pool) {
pool.NotifyEntryRemoved.connect(
boost::bind(&ConnectTrace::NotifyEntryRemoved, this, _1, _2));
}
~ConnectTrace() {
pool.NotifyEntryRemoved.disconnect(
boost::bind(&ConnectTrace::NotifyEntryRemoved, this, _1, _2));
}
void BlockConnected(CBlockIndex *pindex,
std::shared_ptr pblock) {
assert(!blocksConnected.back().pindex);
assert(pindex);
assert(pblock);
blocksConnected.back().pindex = pindex;
blocksConnected.back().pblock = std::move(pblock);
blocksConnected.emplace_back();
}
std::vector &GetBlocksConnected() {
// We always keep one extra block at the end of our list because blocks
// are added after all the conflicted transactions have been filled in.
// Thus, the last entry should always be an empty one waiting for the
// transactions from the next block. We pop the last entry here to make
// sure the list we return is sane.
assert(!blocksConnected.back().pindex);
assert(blocksConnected.back().conflictedTxs->empty());
blocksConnected.pop_back();
return blocksConnected;
}
void NotifyEntryRemoved(CTransactionRef txRemoved,
MemPoolRemovalReason reason) {
assert(!blocksConnected.back().pindex);
if (reason == MemPoolRemovalReason::CONFLICT) {
blocksConnected.back().conflictedTxs->emplace_back(
std::move(txRemoved));
}
}
};
/**
* Connect a new block to chainActive. pblock is either nullptr or a pointer to
* a CBlock corresponding to pindexNew, to bypass loading it again from disk.
*
* The block is always added to connectTrace (either after loading from disk or
* by copying pblock) - if that is not intended, care must be taken to remove
* the last entry in blocksConnected in case of failure.
*/
static bool ConnectTip(const Config &config, CValidationState &state,
CBlockIndex *pindexNew,
const std::shared_ptr &pblock,
ConnectTrace &connectTrace,
DisconnectedBlockTransactions &disconnectpool) {
assert(pindexNew->pprev == chainActive.Tip());
// Read block from disk.
int64_t nTime1 = GetTimeMicros();
std::shared_ptr pthisBlock;
if (!pblock) {
std::shared_ptr pblockNew = std::make_shared();
if (!ReadBlockFromDisk(*pblockNew, pindexNew, config)) {
return AbortNode(state, "Failed to read block");
}
pthisBlock = pblockNew;
} else {
pthisBlock = pblock;
}
const CBlock &blockConnecting = *pthisBlock;
// Apply the block atomically to the chain state.
int64_t nTime2 = GetTimeMicros();
nTimeReadFromDisk += nTime2 - nTime1;
int64_t nTime3;
LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n",
(nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001);
{
CCoinsViewCache view(pcoinsTip);
bool rv = ConnectBlock(config, blockConnecting, state, pindexNew, view);
GetMainSignals().BlockChecked(blockConnecting, state);
if (!rv) {
if (state.IsInvalid()) {
InvalidBlockFound(pindexNew, state);
}
return error("ConnectTip(): ConnectBlock %s failed (%s)",
pindexNew->GetBlockHash().ToString(),
FormatStateMessage(state));
}
nTime3 = GetTimeMicros();
nTimeConnectTotal += nTime3 - nTime2;
LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs]\n",
(nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001);
bool flushed = view.Flush();
assert(flushed);
}
int64_t nTime4 = GetTimeMicros();
nTimeFlush += nTime4 - nTime3;
LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs]\n",
(nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001);
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(config.GetChainParams(), state,
FLUSH_STATE_IF_NEEDED)) {
return false;
}
int64_t nTime5 = GetTimeMicros();
nTimeChainState += nTime5 - nTime4;
LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs]\n",
(nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001);
// Remove conflicting transactions from the mempool.;
mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
disconnectpool.removeForBlock(blockConnecting.vtx);
// Update chainActive & related variables.
UpdateTip(config, pindexNew);
int64_t nTime6 = GetTimeMicros();
nTimePostConnect += nTime6 - nTime5;
nTimeTotal += nTime6 - nTime1;
LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs]\n",
(nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001);
LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs]\n",
(nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001);
connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
return true;
}
/**
* Return the tip of the chain with the most work in it, that isn't known to be
* invalid (it's however far from certain to be valid).
*/
static CBlockIndex *FindMostWorkChain() {
do {
CBlockIndex *pindexNew = nullptr;
// Find the best candidate header.
{
std::set::reverse_iterator
it = setBlockIndexCandidates.rbegin();
if (it == setBlockIndexCandidates.rend()) {
return nullptr;
}
pindexNew = *it;
}
// Check whether all blocks on the path between the currently active
// chain and the candidate are valid. Just going until the active chain
// is an optimization, as we know all blocks in it are valid already.
CBlockIndex *pindexTest = pindexNew;
bool fInvalidAncestor = false;
while (pindexTest && !chainActive.Contains(pindexTest)) {
assert(pindexTest->nChainTx || pindexTest->nHeight == 0);
// Pruned nodes may have entries in setBlockIndexCandidates for
// which block files have been deleted. Remove those as candidates
// for the most work chain if we come across them; we can't switch
// to a chain unless we have all the non-active-chain parent blocks.
bool fInvalidChain = pindexTest->nStatus.isInvalid();
bool fParkedChain = pindexTest->nStatus.isOnParkedChain();
bool fMissingData = !pindexTest->nStatus.hasData();
if (fInvalidChain || fParkedChain || fMissingData) {
// Candidate chain is not usable (either invalid or missing
// data)
if (fInvalidChain &&
(pindexBestInvalid == nullptr ||
pindexNew->nChainWork > pindexBestInvalid->nChainWork)) {
pindexBestInvalid = pindexNew;
}
CBlockIndex *pindexFailed = pindexNew;
// Remove the entire chain from the set.
while (pindexTest != pindexFailed) {
if (fInvalidChain || fParkedChain) {
pindexFailed->nStatus =
pindexFailed->nStatus
.withFailedParent(fInvalidChain)
.withParkedParent(fParkedChain);
} else if (fMissingData) {
// If we're missing data, then add back to
// mapBlocksUnlinked, so that if the block arrives in
// the future we can try adding to
// setBlockIndexCandidates again.
mapBlocksUnlinked.insert(
std::make_pair(pindexFailed->pprev, pindexFailed));
}
setBlockIndexCandidates.erase(pindexFailed);
pindexFailed = pindexFailed->pprev;
}
setBlockIndexCandidates.erase(pindexTest);
fInvalidAncestor = true;
break;
}
pindexTest = pindexTest->pprev;
}
if (!fInvalidAncestor) {
return pindexNew;
}
} while (true);
}
/**
* Delete all entries in setBlockIndexCandidates that are worse than the current
* tip.
*/
static void PruneBlockIndexCandidates() {
// Note that we can't delete the current block itself, as we may need to
// return to it later in case a reorganization to a better block fails.
std::set::iterator it =
setBlockIndexCandidates.begin();
while (it != setBlockIndexCandidates.end() &&
setBlockIndexCandidates.value_comp()(*it, chainActive.Tip())) {
setBlockIndexCandidates.erase(it++);
}
// Either the current tip or a successor of it we're working towards is left
// in setBlockIndexCandidates.
assert(!setBlockIndexCandidates.empty());
}
/**
* Try to make some progress towards making pindexMostWork the active block.
* pblock is either nullptr or a pointer to a CBlock corresponding to
* pindexMostWork.
*/
static bool ActivateBestChainStep(const Config &config, CValidationState &state,
CBlockIndex *pindexMostWork,
const std::shared_ptr &pblock,
bool &fInvalidFound,
ConnectTrace &connectTrace) {
AssertLockHeld(cs_main);
const CBlockIndex *pindexOldTip = chainActive.Tip();
const CBlockIndex *pindexFork = chainActive.FindFork(pindexMostWork);
// Disconnect active blocks which are no longer in the best chain.
bool fBlocksDisconnected = false;
DisconnectedBlockTransactions disconnectpool;
while (chainActive.Tip() && chainActive.Tip() != pindexFork) {
if (!DisconnectTip(config, state, &disconnectpool)) {
// This is likely a fatal error, but keep the mempool consistent,
// just in case. Only remove from the mempool in this case.
UpdateMempoolForReorg(config, disconnectpool, false);
return false;
}
fBlocksDisconnected = true;
}
// Build list of new blocks to connect.
std::vector vpindexToConnect;
bool fContinue = true;
int nHeight = pindexFork ? pindexFork->nHeight : -1;
while (fContinue && nHeight != pindexMostWork->nHeight) {
// Don't iterate the entire list of potential improvements toward the
// best tip, as we likely only need a few blocks along the way.
int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
vpindexToConnect.clear();
vpindexToConnect.reserve(nTargetHeight - nHeight);
CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
while (pindexIter && pindexIter->nHeight != nHeight) {
vpindexToConnect.push_back(pindexIter);
pindexIter = pindexIter->pprev;
}
nHeight = nTargetHeight;
// Connect new blocks.
for (CBlockIndex *pindexConnect :
boost::adaptors::reverse(vpindexToConnect)) {
if (!ConnectTip(config, state, pindexConnect,
pindexConnect == pindexMostWork
? pblock
: std::shared_ptr(),
connectTrace, disconnectpool)) {
if (state.IsInvalid()) {
// The block violates a consensus rule.
if (!state.CorruptionPossible()) {
InvalidChainFound(vpindexToConnect.back());
}
state = CValidationState();
fInvalidFound = true;
fContinue = false;
break;
}
// A system error occurred (disk space, database error, ...).
// Make the mempool consistent with the current tip, just in
// case any observers try to use it before shutdown.
UpdateMempoolForReorg(config, disconnectpool, false);
return false;
} else {
PruneBlockIndexCandidates();
if (!pindexOldTip ||
chainActive.Tip()->nChainWork > pindexOldTip->nChainWork) {
// We're in a better position than we were. Return
// temporarily to release the lock.
fContinue = false;
break;
}
}
}
}
if (fBlocksDisconnected) {
// If any blocks were disconnected, disconnectpool may be non empty. Add
// any disconnected transactions back to the mempool.
UpdateMempoolForReorg(config, disconnectpool, true);
}
mempool.check(pcoinsTip);
// Callbacks/notifications for a new best chain.
if (fInvalidFound) {
CheckForkWarningConditionsOnNewFork(vpindexToConnect.back());
} else {
CheckForkWarningConditions();
}
return true;
}
static void NotifyHeaderTip() {
bool fNotify = false;
bool fInitialBlockDownload = false;
static CBlockIndex *pindexHeaderOld = nullptr;
CBlockIndex *pindexHeader = nullptr;
{
LOCK(cs_main);
pindexHeader = pindexBestHeader;
if (pindexHeader != pindexHeaderOld) {
fNotify = true;
fInitialBlockDownload = IsInitialBlockDownload();
pindexHeaderOld = pindexHeader;
}
}
// Send block tip changed notifications without cs_main
if (fNotify) {
uiInterface.NotifyHeaderTip(fInitialBlockDownload, pindexHeader);
}
}
bool ActivateBestChain(const Config &config, CValidationState &state,
std::shared_ptr pblock) {
// Note that while we're often called here from ProcessNewBlock, this is
// far from a guarantee. Things in the P2P/RPC will often end up calling
// us in the middle of ProcessNewBlock - do not assume pblock is set
// sanely for performance or correctness!
CBlockIndex *pindexMostWork = nullptr;
CBlockIndex *pindexNewTip = nullptr;
do {
boost::this_thread::interruption_point();
if (ShutdownRequested()) {
break;
}
const CBlockIndex *pindexFork;
bool fInitialDownload;
{
LOCK(cs_main);
// Destructed before cs_main is unlocked.
ConnectTrace connectTrace(mempool);
CBlockIndex *pindexOldTip = chainActive.Tip();
if (pindexMostWork == nullptr) {
pindexMostWork = FindMostWorkChain();
}
// Whether we have anything to do at all.
if (pindexMostWork == nullptr ||
pindexMostWork == chainActive.Tip()) {
return true;
}
bool fInvalidFound = false;
std::shared_ptr nullBlockPtr;
if (!ActivateBestChainStep(
config, state, pindexMostWork,
pblock &&
pblock->GetHash() == pindexMostWork->GetBlockHash()
? pblock
: nullBlockPtr,
fInvalidFound, connectTrace)) {
return false;
}
if (fInvalidFound) {
// Wipe cache, we may need another branch now.
pindexMostWork = nullptr;
}
pindexNewTip = chainActive.Tip();
pindexFork = chainActive.FindFork(pindexOldTip);
fInitialDownload = IsInitialBlockDownload();
for (const PerBlockConnectTrace &trace :
connectTrace.GetBlocksConnected()) {
assert(trace.pblock && trace.pindex);
GetMainSignals().BlockConnected(trace.pblock, trace.pindex,
*trace.conflictedTxs);
}
}
// When we reach this point, we switched to a new tip (stored in
// pindexNewTip).
// Notifications/callbacks that can run without cs_main
// Notify external listeners about the new tip.
GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork,
fInitialDownload);
// Always notify the UI if a new block tip was connected
if (pindexFork != pindexNewTip) {
uiInterface.NotifyBlockTip(fInitialDownload, pindexNewTip);
}
} while (pindexNewTip != pindexMostWork);
const CChainParams ¶ms = config.GetChainParams();
CheckBlockIndex(params.GetConsensus());
// Write changes periodically to disk, after relay.
if (!FlushStateToDisk(params, state, FLUSH_STATE_PERIODIC)) {
return false;
}
int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
if (nStopAtHeight && pindexNewTip &&
pindexNewTip->nHeight >= nStopAtHeight) {
StartShutdown();
}
return true;
}
bool PreciousBlock(const Config &config, CValidationState &state,
CBlockIndex *pindex) {
{
LOCK(cs_main);
if (pindex->nChainWork < chainActive.Tip()->nChainWork) {
// Nothing to do, this block is not at the tip.
return true;
}
if (chainActive.Tip()->nChainWork > nLastPreciousChainwork) {
// The chain has been extended since the last call, reset the
// counter.
nBlockReverseSequenceId = -1;
}
nLastPreciousChainwork = chainActive.Tip()->nChainWork;
setBlockIndexCandidates.erase(pindex);
pindex->nSequenceId = nBlockReverseSequenceId;
if (nBlockReverseSequenceId > std::numeric_limits::min()) {
// We can't keep reducing the counter if somebody really wants to
// call preciousblock 2**31-1 times on the same set of tips...
nBlockReverseSequenceId--;
}
if (pindex->IsValid(BlockValidity::TRANSACTIONS) && pindex->nChainTx) {
setBlockIndexCandidates.insert(pindex);
PruneBlockIndexCandidates();
}
}
return ActivateBestChain(config, state);
}
bool InvalidateBlock(const Config &config, CValidationState &state,
CBlockIndex *pindex) {
AssertLockHeld(cs_main);
// Mark the block itself as invalid.
pindex->nStatus = pindex->nStatus.withFailed();
setDirtyBlockIndex.insert(pindex);
setBlockIndexCandidates.erase(pindex);
DisconnectedBlockTransactions disconnectpool;
while (chainActive.Contains(pindex)) {
CBlockIndex *pindexWalk = chainActive.Tip();
pindexWalk->nStatus = pindexWalk->nStatus.withFailedParent();
setDirtyBlockIndex.insert(pindexWalk);
setBlockIndexCandidates.erase(pindexWalk);
// ActivateBestChain considers blocks already in chainActive
// unconditionally valid already, so force disconnect away from it.
if (!DisconnectTip(config, state, &disconnectpool)) {
// It's probably hopeless to try to make the mempool consistent
// here if DisconnectTip failed, but we can try.
UpdateMempoolForReorg(config, disconnectpool, false);
return false;
}
}
// DisconnectTip will add transactions to disconnectpool; try to add these
// back to the mempool.
UpdateMempoolForReorg(config, disconnectpool, true);
// The resulting new best tip may not be in setBlockIndexCandidates anymore,
// so add it again.
for (const std::pair &it : mapBlockIndex) {
CBlockIndex *i = it.second;
if (i->IsValid(BlockValidity::TRANSACTIONS) && i->nChainTx &&
!setBlockIndexCandidates.value_comp()(i, chainActive.Tip())) {
setBlockIndexCandidates.insert(i);
}
}
InvalidChainFound(pindex);
uiInterface.NotifyBlockTip(IsInitialBlockDownload(), pindex->pprev);
return true;
}
bool ResetBlockFailureFlags(CBlockIndex *pindex) {
AssertLockHeld(cs_main);
int nHeight = pindex->nHeight;
// Remove the invalidity flag from this block and all its descendants.
BlockMap::iterator it = mapBlockIndex.begin();
while (it != mapBlockIndex.end()) {
if (!it->second->IsValid() &&
it->second->GetAncestor(nHeight) == pindex) {
it->second->nStatus = it->second->nStatus.withClearedFailureFlags();
setDirtyBlockIndex.insert(it->second);
if (it->second->IsValid(BlockValidity::TRANSACTIONS) &&
it->second->nChainTx &&
setBlockIndexCandidates.value_comp()(chainActive.Tip(),
it->second)) {
setBlockIndexCandidates.insert(it->second);
}
if (it->second == pindexBestInvalid) {
// Reset invalid block marker if it was pointing to one of
// those.
pindexBestInvalid = nullptr;
}
}
it++;
}
// Remove the invalidity flag from all ancestors too.
while (pindex != nullptr) {
if (pindex->nStatus.isInvalid()) {
pindex->nStatus = pindex->nStatus.withClearedFailureFlags();
setDirtyBlockIndex.insert(pindex);
}
pindex = pindex->pprev;
}
return true;
}
static CBlockIndex *AddToBlockIndex(const CBlockHeader &block) {
// Check for duplicate
uint256 hash = block.GetHash();
BlockMap::iterator it = mapBlockIndex.find(hash);
if (it != mapBlockIndex.end()) {
return it->second;
}
// Construct new block index object
CBlockIndex *pindexNew = new CBlockIndex(block);
assert(pindexNew);
// We assign the sequence id to blocks only when the full data is available,
// to avoid miners withholding blocks but broadcasting headers, to get a
// competitive advantage.
pindexNew->nSequenceId = 0;
BlockMap::iterator mi =
mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock);
if (miPrev != mapBlockIndex.end()) {
pindexNew->pprev = (*miPrev).second;
pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
pindexNew->BuildSkip();
}
pindexNew->nTimeReceived = GetTime();
pindexNew->nTimeMax =
(pindexNew->pprev
? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime)
: pindexNew->nTime);
pindexNew->nChainWork =
(pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) +
GetBlockProof(*pindexNew);
pindexNew->RaiseValidity(BlockValidity::TREE);
if (pindexBestHeader == nullptr ||
pindexBestHeader->nChainWork < pindexNew->nChainWork) {
pindexBestHeader = pindexNew;
}
setDirtyBlockIndex.insert(pindexNew);
return pindexNew;
}
/**
* Mark a block as having its data received and checked (up to
* BLOCK_VALID_TRANSACTIONS).
*/
bool ReceivedBlockTransactions(const CBlock &block, CValidationState &state,
CBlockIndex *pindexNew,
const CDiskBlockPos &pos) {
pindexNew->nTx = block.vtx.size();
pindexNew->nChainTx = 0;
pindexNew->nFile = pos.nFile;
pindexNew->nDataPos = pos.nPos;
pindexNew->nUndoPos = 0;
pindexNew->nStatus = pindexNew->nStatus.withData();
pindexNew->RaiseValidity(BlockValidity::TRANSACTIONS);
setDirtyBlockIndex.insert(pindexNew);
if (pindexNew->pprev == nullptr || pindexNew->pprev->nChainTx) {
// If pindexNew is the genesis block or all parents are
// BLOCK_VALID_TRANSACTIONS.
std::deque queue;
queue.push_back(pindexNew);
// Recursively process any descendant blocks that now may be eligible to
// be connected.
while (!queue.empty()) {
CBlockIndex *pindex = queue.front();
queue.pop_front();
pindex->nChainTx =
(pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
{
LOCK(cs_nBlockSequenceId);
pindex->nSequenceId = nBlockSequenceId++;
}
if (chainActive.Tip() == nullptr ||
!setBlockIndexCandidates.value_comp()(pindex,
chainActive.Tip())) {
setBlockIndexCandidates.insert(pindex);
}
std::pair::iterator,
std::multimap::iterator>
range = mapBlocksUnlinked.equal_range(pindex);
while (range.first != range.second) {
std::multimap::iterator it =
range.first;
queue.push_back(it->second);
range.first++;
mapBlocksUnlinked.erase(it);
}
}
} else if (pindexNew->pprev &&
pindexNew->pprev->IsValid(BlockValidity::TREE)) {
mapBlocksUnlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
}
return true;
}
static bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos,
unsigned int nAddSize, unsigned int nHeight,
uint64_t nTime, bool fKnown = false) {
LOCK(cs_LastBlockFile);
unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
if (vinfoBlockFile.size() <= nFile) {
vinfoBlockFile.resize(nFile + 1);
}
if (!fKnown) {
while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
nFile++;
if (vinfoBlockFile.size() <= nFile) {
vinfoBlockFile.resize(nFile + 1);
}
}
pos.nFile = nFile;
pos.nPos = vinfoBlockFile[nFile].nSize;
}
if ((int)nFile != nLastBlockFile) {
if (!fKnown) {
LogPrintf("Leaving block file %i: %s\n", nLastBlockFile,
vinfoBlockFile[nLastBlockFile].ToString());
}
FlushBlockFile(!fKnown);
nLastBlockFile = nFile;
}
vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
if (fKnown) {
vinfoBlockFile[nFile].nSize =
std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
} else {
vinfoBlockFile[nFile].nSize += nAddSize;
}
if (!fKnown) {
unsigned int nOldChunks =
(pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
unsigned int nNewChunks =
(vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) /
BLOCKFILE_CHUNK_SIZE;
if (nNewChunks > nOldChunks) {
if (fPruneMode) {
fCheckForPruning = true;
}
if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) {
FILE *file = OpenBlockFile(pos);
if (file) {
LogPrintf(
"Pre-allocating up to position 0x%x in blk%05u.dat\n",
nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile);
AllocateFileRange(file, pos.nPos,
nNewChunks * BLOCKFILE_CHUNK_SIZE -
pos.nPos);
fclose(file);
}
} else {
return state.Error("out of disk space");
}
}
}
setDirtyFileInfo.insert(nFile);
return true;
}
static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos,
unsigned int nAddSize) {
pos.nFile = nFile;
LOCK(cs_LastBlockFile);
unsigned int nNewSize;
pos.nPos = vinfoBlockFile[nFile].nUndoSize;
nNewSize = vinfoBlockFile[nFile].nUndoSize += nAddSize;
setDirtyFileInfo.insert(nFile);
unsigned int nOldChunks =
(pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
unsigned int nNewChunks =
(nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
if (nNewChunks > nOldChunks) {
if (fPruneMode) {
fCheckForPruning = true;
}
if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) {
FILE *file = OpenUndoFile(pos);
if (file) {
LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n",
nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile);
AllocateFileRange(file, pos.nPos,
nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos);
fclose(file);
}
} else {
return state.Error("out of disk space");
}
}
return true;
}
/**
* Return true if the provided block header is valid.
* Only verify PoW if blockValidationOptions is configured to do so.
* This allows validation of headers on which the PoW hasn't been done.
* For example: to validate template handed to mining software.
* Do not call this for any check that depends on the context.
* For context-dependant calls, see ContextualCheckBlockHeader.
*/
static bool CheckBlockHeader(
const Config &config, const CBlockHeader &block, CValidationState &state,
BlockValidationOptions validationOptions = BlockValidationOptions()) {
// Check proof of work matches claimed amount
if (validationOptions.shouldValidatePoW() &&
!CheckProofOfWork(block.GetHash(), block.nBits, config)) {
return state.DoS(50, false, REJECT_INVALID, "high-hash", false,
"proof of work failed");
}
return true;
}
bool CheckBlock(const Config &config, const CBlock &block,
CValidationState &state,
BlockValidationOptions validationOptions) {
// These are checks that are independent of context.
if (block.fChecked) {
return true;
}
// Check that the header is valid (particularly PoW). This is mostly
// redundant with the call in AcceptBlockHeader.
if (!CheckBlockHeader(config, block, state, validationOptions)) {
return false;
}
// Check the merkle root.
if (validationOptions.shouldValidateMerkleRoot()) {
bool mutated;
uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
if (block.hashMerkleRoot != hashMerkleRoot2) {
return state.DoS(100, false, REJECT_INVALID, "bad-txnmrklroot",
true, "hashMerkleRoot mismatch");
}
// Check for merkle tree malleability (CVE-2012-2459): repeating
// sequences of transactions in a block without affecting the merkle
// root of a block, while still invalidating it.
if (mutated) {
return state.DoS(100, false, REJECT_INVALID, "bad-txns-duplicate",
true, "duplicate transaction");
}
}
// All potential-corruption validation must be done before we do any
// transaction validation, as otherwise we may mark the header as invalid
// because we receive the wrong transactions for it.
// First transaction must be coinbase.
if (block.vtx.empty()) {
return state.DoS(100, false, REJECT_INVALID, "bad-cb-missing", false,
"first tx is not coinbase");
}
// Size limits.
auto nMaxBlockSize = config.GetMaxBlockSize();
// Bail early if there is no way this block is of reasonable size.
if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) {
return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false,
"size limits failed");
}
auto currentBlockSize =
::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION);
if (currentBlockSize > nMaxBlockSize) {
return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false,
"size limits failed");
}
// And a valid coinbase.
if (!CheckCoinbase(*block.vtx[0], state, false)) {
return state.Invalid(false, state.GetRejectCode(),
state.GetRejectReason(),
strprintf("Coinbase check failed (txid %s) %s",
block.vtx[0]->GetId().ToString(),
state.GetDebugMessage()));
}
// Keep track of the sigops count.
uint64_t nSigOps = 0;
auto nMaxSigOpsCount = GetMaxBlockSigOpsCount(currentBlockSize);
// Check transactions
auto txCount = block.vtx.size();
auto *tx = block.vtx[0].get();
size_t i = 0;
while (true) {
// Count the sigops for the current transaction. If the total sigops
// count is too high, the the block is invalid.
nSigOps += GetSigOpCountWithoutP2SH(*tx, STANDARD_SCRIPT_VERIFY_FLAGS);
if (nSigOps > nMaxSigOpsCount) {
return state.DoS(100, false, REJECT_INVALID, "bad-blk-sigops",
false, "out-of-bounds SigOpCount");
}
// Go to the next transaction.
i++;
// We reached the end of the block, success.
if (i >= txCount) {
break;
}
// Check that the transaction is valid. because this check differs for
// the coinbase, the loos is arranged such as this only runs after at
// least one increment.
tx = block.vtx[i].get();
if (!CheckRegularTransaction(*tx, state, false)) {
return state.Invalid(
false, state.GetRejectCode(), state.GetRejectReason(),
strprintf("Transaction check failed (txid %s) %s",
tx->GetId().ToString(), state.GetDebugMessage()));
}
}
if (validationOptions.shouldValidatePoW() &&
validationOptions.shouldValidateMerkleRoot()) {
block.fChecked = true;
}
return true;
}
static bool CheckIndexAgainstCheckpoint(const CBlockIndex *pindexPrev,
CValidationState &state,
const CChainParams &chainparams,
const uint256 &hash) {
if (*pindexPrev->phashBlock ==
chainparams.GetConsensus().hashGenesisBlock) {
return true;
}
int nHeight = pindexPrev->nHeight + 1;
const CCheckpointData &checkpoints = chainparams.Checkpoints();
// Check that the block chain matches the known block chain up to a
// checkpoint.
if (!Checkpoints::CheckBlock(checkpoints, nHeight, hash)) {
return state.DoS(100, error("%s: rejected by checkpoint lock-in at %d",
__func__, nHeight),
REJECT_CHECKPOINT, "checkpoint mismatch");
}
// Don't accept any forks from the main chain prior to last checkpoint.
// GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in
// our MapBlockIndex.
CBlockIndex *pcheckpoint = Checkpoints::GetLastCheckpoint(checkpoints);
if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
return state.DoS(
100,
error("%s: forked chain older than last checkpoint (height %d)",
__func__, nHeight),
REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint");
}
return true;
}
static bool ContextualCheckBlockHeader(const Config &config,
const CBlockHeader &block,
CValidationState &state,
const CBlockIndex *pindexPrev,
int64_t nAdjustedTime) {
const Consensus::Params &consensusParams =
config.GetChainParams().GetConsensus();
const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
// Check proof of work
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, config)) {
LogPrintf("bad bits after height: %d\n", pindexPrev->nHeight);
return state.DoS(100, false, REJECT_INVALID, "bad-diffbits", false,
"incorrect proof of work");
}
// Check timestamp against prev
if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) {
return state.Invalid(false, REJECT_INVALID, "time-too-old",
"block's timestamp is too early");
}
// Check timestamp
if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME) {
return state.Invalid(false, REJECT_INVALID, "time-too-new",
"block timestamp too far in the future");
}
// Reject outdated version blocks when 95% (75% on testnet) of the network
// has upgraded:
// check for version 2, 3 and 4 upgrades
if ((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
(block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
(block.nVersion < 4 && nHeight >= consensusParams.BIP65Height)) {
return state.Invalid(
false, REJECT_OBSOLETE,
strprintf("bad-version(0x%08x)", block.nVersion),
strprintf("rejected nVersion=0x%08x block", block.nVersion));
}
return true;
}
bool ContextualCheckTransaction(const Config &config, const CTransaction &tx,
CValidationState &state, int nHeight,
int64_t nLockTimeCutoff) {
if (!IsFinalTx(tx, nHeight, nLockTimeCutoff)) {
// While this is only one transaction, we use txns in the error to
// ensure continuity with other clients.
return state.DoS(10, false, REJECT_INVALID, "bad-txns-nonfinal", false,
"non-final transaction");
}
return true;
}
bool ContextualCheckTransactionForCurrentBlock(const Config &config,
const CTransaction &tx,
CValidationState &state,
int flags) {
AssertLockHeld(cs_main);
// By convention a negative value for flags indicates that the current
// network-enforced consensus rules should be used. In a future soft-fork
// scenario that would mean checking which rules would be enforced for the
// next block and setting the appropriate flags. At the present time no
// soft-forks are scheduled, so no flags are set.
flags = std::max(flags, 0);
// ContextualCheckTransactionForCurrentBlock() uses chainActive.Height()+1
// to evaluate nLockTime because when IsFinalTx() is called within
// CBlock::AcceptBlock(), the height of the block *being* evaluated is what
// is used. Thus if we want to know if a transaction can be part of the
// *next* block, we need to call ContextualCheckTransaction() with one more
// than chainActive.Height().
const int nBlockHeight = chainActive.Height() + 1;
// BIP113 will require that time-locked transactions have nLockTime set to
// less than the median time of the previous block they're contained in.
// When the next block is created its previous block will be the current
// chain tip, so we use that to calculate the median time passed to
// ContextualCheckTransaction() if LOCKTIME_MEDIAN_TIME_PAST is set.
const int64_t nLockTimeCutoff = (flags & LOCKTIME_MEDIAN_TIME_PAST)
? chainActive.Tip()->GetMedianTimePast()
: GetAdjustedTime();
return ContextualCheckTransaction(config, tx, state, nBlockHeight,
nLockTimeCutoff);
}
static bool ContextualCheckBlock(const Config &config, const CBlock &block,
CValidationState &state,
const CBlockIndex *pindexPrev) {
const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
const Consensus::Params &consensusParams =
config.GetChainParams().GetConsensus();
// Start enforcing BIP113 (Median Time Past).
int nLockTimeFlags = 0;
if (nHeight >= consensusParams.CSVHeight) {
nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
}
const int64_t nMedianTimePast =
pindexPrev == nullptr ? 0 : pindexPrev->GetMedianTimePast();
const int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
? nMedianTimePast
: block.GetBlockTime();
const bool fIsMagneticAnomalyEnabled =
IsMagneticAnomalyEnabled(config, pindexPrev);
// Check that all transactions are finalized
const CTransaction *prevTx = nullptr;
for (const auto &ptx : block.vtx) {
const CTransaction &tx = *ptx;
if (fIsMagneticAnomalyEnabled) {
if (prevTx && (tx.GetId() < prevTx->GetId())) {
return state.DoS(
100, false, REJECT_INVALID, "tx-ordering", false,
strprintf("Transaction order is invalid (%s < %s)",
tx.GetId().ToString(),
prevTx->GetId().ToString()));
}
if (prevTx || !tx.IsCoinBase()) {
prevTx = &tx;
}
}
if (!ContextualCheckTransaction(config, tx, state, nHeight,
nLockTimeCutoff)) {
// state set by ContextualCheckTransaction.
return false;
}
}
// Enforce rule that the coinbase starts with serialized block height
if (nHeight >= consensusParams.BIP34Height) {
CScript expect = CScript() << nHeight;
if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
!std::equal(expect.begin(), expect.end(),
block.vtx[0]->vin[0].scriptSig.begin())) {
return state.DoS(100, false, REJECT_INVALID, "bad-cb-height", false,
"block height mismatch in coinbase");
}
}
return true;
}
/**
* If the provided block header is valid, add it to the block index.
*
* Returns true if the block is succesfully added to the block index.
*/
static bool AcceptBlockHeader(const Config &config, const CBlockHeader &block,
CValidationState &state, CBlockIndex **ppindex) {
AssertLockHeld(cs_main);
const CChainParams &chainparams = config.GetChainParams();
// Check for duplicate
uint256 hash = block.GetHash();
BlockMap::iterator miSelf = mapBlockIndex.find(hash);
CBlockIndex *pindex = nullptr;
if (hash != chainparams.GetConsensus().hashGenesisBlock) {
if (miSelf != mapBlockIndex.end()) {
// Block header is already known.
pindex = miSelf->second;
if (ppindex) {
*ppindex = pindex;
}
if (pindex->nStatus.isInvalid()) {
return state.Invalid(error("%s: block %s is marked invalid",
__func__, hash.ToString()),
0, "duplicate");
}
return true;
}
if (!CheckBlockHeader(config, block, state)) {
return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__,
hash.ToString(), FormatStateMessage(state));
}
// Get prev block index
CBlockIndex *pindexPrev = nullptr;
BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock);
if (mi == mapBlockIndex.end()) {
return state.DoS(10, error("%s: prev block not found", __func__), 0,
"prev-blk-not-found");
}
pindexPrev = (*mi).second;
if (pindexPrev->nStatus.isInvalid()) {
return state.DoS(100, error("%s: prev block invalid", __func__),
REJECT_INVALID, "bad-prevblk");
}
assert(pindexPrev);
if (fCheckpointsEnabled &&
!CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams,
hash)) {
return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__,
state.GetRejectReason().c_str());
}
if (!ContextualCheckBlockHeader(config, block, state, pindexPrev,
GetAdjustedTime())) {
return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s",
__func__, hash.ToString(), FormatStateMessage(state));
}
}
if (pindex == nullptr) {
pindex = AddToBlockIndex(block);
}
if (ppindex) {
*ppindex = pindex;
}
CheckBlockIndex(chainparams.GetConsensus());
return true;
}
// Exposed wrapper for AcceptBlockHeader
bool ProcessNewBlockHeaders(const Config &config,
const std::vector &headers,
CValidationState &state,
const CBlockIndex **ppindex) {
{
LOCK(cs_main);
for (const CBlockHeader &header : headers) {
// Use a temp pindex instead of ppindex to avoid a const_cast
CBlockIndex *pindex = nullptr;
if (!AcceptBlockHeader(config, header, state, &pindex)) {
return false;
}
if (ppindex) {
*ppindex = pindex;
}
}
}
NotifyHeaderTip();
return true;
}
/**
* Store a block on disk.
*
* @param[in] config The global config.
* @param[in-out] pblock The block we want to accept.
* @param[out] ppindex The last new block index, only set if the block
* was accepted.
* @param[in] fRequested A boolean to indicate if this block was requested
* from our peers.
* @param[in] dbp If non-null, the disk position of the block.
* @param[in-out] fNewBlock True if block was first received via this call.
* @return True if the block is accepted as a valid block and written to disk.
*/
static bool AcceptBlock(const Config &config,
const std::shared_ptr &pblock,
CValidationState &state, CBlockIndex **ppindex,
bool fRequested, const CDiskBlockPos *dbp,
bool *fNewBlock) {
AssertLockHeld(cs_main);
const CBlock &block = *pblock;
if (fNewBlock) {
*fNewBlock = false;
}
CBlockIndex *pindexDummy = nullptr;
CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy;
if (!AcceptBlockHeader(config, block, state, &pindex)) {
return false;
}
// Try to process all requested blocks that we don't have, but only
// process an unrequested block if it's new and has enough work to
// advance our tip, and isn't too many blocks ahead.
bool fAlreadyHave = pindex->nStatus.hasData();
// Compare block header timestamps and received times of the block and the
// chaintip. If they have the same chain height, use these diffs as a
// tie-breaker, attempting to pick the more honestly-mined block.
int64_t newBlockTimeDiff = std::llabs(pindex->GetReceivedTimeDiff());
int64_t chainTipTimeDiff =
chainActive.Tip() ? std::llabs(chainActive.Tip()->GetReceivedTimeDiff())
: 0;
- bool isSameHeightAndMoreHonestlyMined =
- chainActive.Tip() &&
- (pindex->nChainWork == chainActive.Tip()->nChainWork) &&
- (newBlockTimeDiff < chainTipTimeDiff);
- if (isSameHeightAndMoreHonestlyMined) {
+ bool isSameHeight = chainActive.Tip() &&
+ (pindex->nChainWork == chainActive.Tip()->nChainWork);
+ if (isSameHeight) {
LogPrintf("Chain tip timestamp-to-received-time difference: hash=%s, "
"diff=%d\n",
chainActive.Tip()->GetBlockHash().ToString(),
chainTipTimeDiff);
LogPrintf("New block timestamp-to-received-time difference: hash=%s, "
"diff=%d\n",
pindex->GetBlockHash().ToString(), newBlockTimeDiff);
}
bool fHasMoreWork =
(chainActive.Tip() ? pindex->nChainWork > chainActive.Tip()->nChainWork
: true);
// Blocks that are too out-of-order needlessly limit the effectiveness of
// pruning, because pruning will not delete block files that contain any
// blocks which are too close in height to the tip. Apply this test
// regardless of whether pruning is enabled; it should generally be safe to
// not process unrequested blocks.
bool fTooFarAhead =
(pindex->nHeight > int(chainActive.Height() + MIN_BLOCKS_TO_KEEP));
// TODO: Decouple this function from the block download logic by removing
// fRequested
// This requires some new chain datastructure to efficiently look up if a
// block is in a chain leading to a candidate for best tip, despite not
// being such a candidate itself.
// TODO: deal better with return value and error conditions for duplicate
// and unrequested blocks.
if (fAlreadyHave) {
return true;
}
// If we didn't ask for it:
if (!fRequested) {
// This is a previously-processed block that was pruned.
if (pindex->nTx != 0) {
return true;
}
// Don't process less-work chains.
if (!fHasMoreWork) {
return true;
}
// Block height is too high.
if (fTooFarAhead) {
return true;
}
}
if (fNewBlock) {
*fNewBlock = true;
}
if (!CheckBlock(config, block, state) ||
!ContextualCheckBlock(config, block, state, pindex->pprev)) {
if (state.IsInvalid() && !state.CorruptionPossible()) {
pindex->nStatus = pindex->nStatus.withFailed();
setDirtyBlockIndex.insert(pindex);
}
return error("%s: %s (block %s)", __func__, FormatStateMessage(state),
block.GetHash().ToString());
}
// Header is valid/has work and the merkle tree is good.
// Relay now, but if it does not build on our best tip, let the
// SendMessages loop relay it.
if (!IsInitialBlockDownload() && chainActive.Tip() == pindex->pprev) {
GetMainSignals().NewPoWValidBlock(pindex, pblock);
}
int nHeight = pindex->nHeight;
const CChainParams &chainparams = config.GetChainParams();
// Write block to history file
try {
unsigned int nBlockSize =
::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
CDiskBlockPos blockPos;
if (dbp != nullptr) {
blockPos = *dbp;
}
if (!FindBlockPos(state, blockPos, nBlockSize + 8, nHeight,
block.GetBlockTime(), dbp != nullptr)) {
return error("AcceptBlock(): FindBlockPos failed");
}
if (dbp == nullptr) {
if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) {
AbortNode(state, "Failed to write block");
}
}
if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) {
return error("AcceptBlock(): ReceivedBlockTransactions failed");
}
} catch (const std::runtime_error &e) {
return AbortNode(state, std::string("System error: ") + e.what());
}
if (fCheckForPruning) {
// we just allocated more disk space for block files.
FlushStateToDisk(config.GetChainParams(), state, FLUSH_STATE_NONE);
}
return true;
}
bool ProcessNewBlock(const Config &config,
const std::shared_ptr pblock,
bool fForceProcessing, bool *fNewBlock) {
{
CBlockIndex *pindex = nullptr;
if (fNewBlock) {
*fNewBlock = false;
}
const CChainParams &chainparams = config.GetChainParams();
CValidationState state;
// Ensure that CheckBlock() passes before calling AcceptBlock, as
// belt-and-suspenders.
bool ret = CheckBlock(config, *pblock, state);
LOCK(cs_main);
if (ret) {
// Store to disk
ret = AcceptBlock(config, pblock, state, &pindex, fForceProcessing,
nullptr, fNewBlock);
}
CheckBlockIndex(chainparams.GetConsensus());
if (!ret) {
GetMainSignals().BlockChecked(*pblock, state);
return error("%s: AcceptBlock FAILED", __func__);
}
}
NotifyHeaderTip();
// Only used to report errors, not invalidity - ignore it
CValidationState state;
if (!ActivateBestChain(config, state, pblock)) {
return error("%s: ActivateBestChain failed", __func__);
}
return true;
}
bool TestBlockValidity(const Config &config, CValidationState &state,
const CBlock &block, CBlockIndex *pindexPrev,
BlockValidationOptions validationOptions) {
AssertLockHeld(cs_main);
const CChainParams &chainparams = config.GetChainParams();
assert(pindexPrev && pindexPrev == chainActive.Tip());
if (fCheckpointsEnabled &&
!CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams,
block.GetHash())) {
return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__,
state.GetRejectReason().c_str());
}
CCoinsViewCache viewNew(pcoinsTip);
CBlockIndex indexDummy(block);
indexDummy.pprev = pindexPrev;
indexDummy.nHeight = pindexPrev->nHeight + 1;
// NOTE: CheckBlockHeader is called by CheckBlock
if (!ContextualCheckBlockHeader(config, block, state, pindexPrev,
GetAdjustedTime())) {
return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__,
FormatStateMessage(state));
}
if (!CheckBlock(config, block, state, validationOptions)) {
return error("%s: Consensus::CheckBlock: %s", __func__,
FormatStateMessage(state));
}
if (!ContextualCheckBlock(config, block, state, pindexPrev)) {
return error("%s: Consensus::ContextualCheckBlock: %s", __func__,
FormatStateMessage(state));
}
if (!ConnectBlock(config, block, state, &indexDummy, viewNew, true)) {
return false;
}
assert(state.IsValid());
return true;
}
/**
* BLOCK PRUNING CODE
*/
/**
* Calculate the amount of disk space the block & undo files currently use.
*/
static uint64_t CalculateCurrentUsage() {
uint64_t retval = 0;
for (const CBlockFileInfo &file : vinfoBlockFile) {
retval += file.nSize + file.nUndoSize;
}
return retval;
}
/**
* Prune a block file (modify associated database entries)
*/
void PruneOneBlockFile(const int fileNumber) {
for (const std::pair &it : mapBlockIndex) {
CBlockIndex *pindex = it.second;
if (pindex->nFile == fileNumber) {
pindex->nStatus = pindex->nStatus.withData(false).withUndo(false);
pindex->nFile = 0;
pindex->nDataPos = 0;
pindex->nUndoPos = 0;
setDirtyBlockIndex.insert(pindex);
// Prune from mapBlocksUnlinked -- any block we prune would have
// to be downloaded again in order to consider its chain, at which
// point it would be considered as a candidate for
// mapBlocksUnlinked or setBlockIndexCandidates.
std::pair::iterator,
std::multimap::iterator>
range = mapBlocksUnlinked.equal_range(pindex->pprev);
while (range.first != range.second) {
std::multimap::iterator _it =
range.first;
range.first++;
if (_it->second == pindex) {
mapBlocksUnlinked.erase(_it);
}
}
}
}
vinfoBlockFile[fileNumber].SetNull();
setDirtyFileInfo.insert(fileNumber);
}
void UnlinkPrunedFiles(const std::set &setFilesToPrune) {
for (const int i : setFilesToPrune) {
CDiskBlockPos pos(i, 0);
fs::remove(GetBlockPosFilename(pos, "blk"));
fs::remove(GetBlockPosFilename(pos, "rev"));
LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, i);
}
}
/**
* Calculate the block/rev files to delete based on height specified by user
* with RPC command pruneblockchain
*/
static void FindFilesToPruneManual(std::set &setFilesToPrune,
int nManualPruneHeight) {
assert(fPruneMode && nManualPruneHeight > 0);
LOCK2(cs_main, cs_LastBlockFile);
if (chainActive.Tip() == nullptr) {
return;
}
// last block to prune is the lesser of (user-specified height,
// MIN_BLOCKS_TO_KEEP from the tip)
unsigned int nLastBlockWeCanPrune =
std::min((unsigned)nManualPruneHeight,
chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP);
int count = 0;
for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
if (vinfoBlockFile[fileNumber].nSize == 0 ||
vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
continue;
}
PruneOneBlockFile(fileNumber);
setFilesToPrune.insert(fileNumber);
count++;
}
LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n",
nLastBlockWeCanPrune, count);
}
/* This function is called from the RPC code for pruneblockchain */
void PruneBlockFilesManual(int nManualPruneHeight) {
CValidationState state;
const CChainParams &chainparams = Params();
FlushStateToDisk(chainparams, state, FLUSH_STATE_NONE, nManualPruneHeight);
}
/**
* Prune block and undo files (blk???.dat and undo???.dat) so that the disk
* space used is less than a user-defined target. The user sets the target (in
* MB) on the command line or in config file. This will be run on startup and
* whenever new space is allocated in a block or undo file, staying below the
* target. Changing back to unpruned requires a reindex (which in this case
* means the blockchain must be re-downloaded.)
*
* Pruning functions are called from FlushStateToDisk when the global
* fCheckForPruning flag has been set. Block and undo files are deleted in
* lock-step (when blk00003.dat is deleted, so is rev00003.dat.). Pruning cannot
* take place until the longest chain is at least a certain length (100000 on
* mainnet, 1000 on testnet, 1000 on regtest). Pruning will never delete a block
* within a defined distance (currently 288) from the active chain's tip. The
* block index is updated by unsetting HAVE_DATA and HAVE_UNDO for any blocks
* that were stored in the deleted files. A db flag records the fact that at
* least some block files have been pruned.
*
* @param[out] setFilesToPrune The set of file indices that can be unlinked
* will be returned
*/
static void FindFilesToPrune(std::set &setFilesToPrune,
uint64_t nPruneAfterHeight) {
LOCK2(cs_main, cs_LastBlockFile);
if (chainActive.Tip() == nullptr || nPruneTarget == 0) {
return;
}
if (uint64_t(chainActive.Tip()->nHeight) <= nPruneAfterHeight) {
return;
}
unsigned int nLastBlockWeCanPrune =
chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP;
uint64_t nCurrentUsage = CalculateCurrentUsage();
// We don't check to prune until after we've allocated new space for files,
// so we should leave a buffer under our target to account for another
// allocation before the next pruning.
uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
uint64_t nBytesToPrune;
int count = 0;
if (nCurrentUsage + nBuffer >= nPruneTarget) {
for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
nBytesToPrune = vinfoBlockFile[fileNumber].nSize +
vinfoBlockFile[fileNumber].nUndoSize;
if (vinfoBlockFile[fileNumber].nSize == 0) {
continue;
}
// are we below our target?
if (nCurrentUsage + nBuffer < nPruneTarget) {
break;
}
// don't prune files that could have a block within
// MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
continue;
}
PruneOneBlockFile(fileNumber);
// Queue up the files for removal
setFilesToPrune.insert(fileNumber);
nCurrentUsage -= nBytesToPrune;
count++;
}
}
LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB "
"max_prune_height=%d removed %d blk/rev pairs\n",
nPruneTarget / 1024 / 1024, nCurrentUsage / 1024 / 1024,
((int64_t)nPruneTarget - (int64_t)nCurrentUsage) / 1024 / 1024,
nLastBlockWeCanPrune, count);
}
bool CheckDiskSpace(uint64_t nAdditionalBytes) {
uint64_t nFreeBytesAvailable = fs::space(GetDataDir()).available;
// Check for nMinDiskSpace bytes (currently 50MB)
if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes) {
return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
}
return true;
}
static FILE *OpenDiskFile(const CDiskBlockPos &pos, const char *prefix,
bool fReadOnly) {
if (pos.IsNull()) {
return nullptr;
}
fs::path path = GetBlockPosFilename(pos, prefix);
fs::create_directories(path.parent_path());
FILE *file = fsbridge::fopen(path, "rb+");
if (!file && !fReadOnly) {
file = fsbridge::fopen(path, "wb+");
}
if (!file) {
LogPrintf("Unable to open file %s\n", path.string());
return nullptr;
}
if (pos.nPos) {
if (fseek(file, pos.nPos, SEEK_SET)) {
LogPrintf("Unable to seek to position %u of %s\n", pos.nPos,
path.string());
fclose(file);
return nullptr;
}
}
return file;
}
FILE *OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly) {
return OpenDiskFile(pos, "blk", fReadOnly);
}
/** Open an undo file (rev?????.dat) */
static FILE *OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) {
return OpenDiskFile(pos, "rev", fReadOnly);
}
fs::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix) {
return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile);
}
CBlockIndex *InsertBlockIndex(uint256 hash) {
if (hash.IsNull()) {
return nullptr;
}
// Return existing
BlockMap::iterator mi = mapBlockIndex.find(hash);
if (mi != mapBlockIndex.end()) {
return (*mi).second;
}
// Create new
CBlockIndex *pindexNew = new CBlockIndex();
if (!pindexNew) {
throw std::runtime_error(std::string(__func__) +
": new CBlockIndex failed");
}
mi = mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
return pindexNew;
}
static bool LoadBlockIndexDB(const Config &config) {
if (!pblocktree->LoadBlockIndexGuts(config, InsertBlockIndex)) {
return false;
}
boost::this_thread::interruption_point();
// Calculate nChainWork
std::vector> vSortedByHeight;
vSortedByHeight.reserve(mapBlockIndex.size());
for (const std::pair &item : mapBlockIndex) {
CBlockIndex *pindex = item.second;
vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
}
sort(vSortedByHeight.begin(), vSortedByHeight.end());
for (const std::pair &item : vSortedByHeight) {
CBlockIndex *pindex = item.second;
pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) +
GetBlockProof(*pindex);
pindex->nTimeMax =
(pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime)
: pindex->nTime);
// We can link the chain of blocks for which we've received transactions
// at some point. Pruned nodes may have deleted the block.
if (pindex->nTx > 0) {
if (pindex->pprev) {
if (pindex->pprev->nChainTx) {
pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
} else {
pindex->nChainTx = 0;
mapBlocksUnlinked.insert(
std::make_pair(pindex->pprev, pindex));
}
} else {
pindex->nChainTx = pindex->nTx;
}
}
if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
(pindex->nChainTx || pindex->pprev == nullptr)) {
setBlockIndexCandidates.insert(pindex);
}
if (pindex->nStatus.isInvalid() &&
(!pindexBestInvalid ||
pindex->nChainWork > pindexBestInvalid->nChainWork)) {
pindexBestInvalid = pindex;
}
if (pindex->pprev) {
pindex->BuildSkip();
}
if (pindex->IsValid(BlockValidity::TREE) &&
(pindexBestHeader == nullptr ||
CBlockIndexWorkComparator()(pindexBestHeader, pindex))) {
pindexBestHeader = pindex;
}
}
// Load block file info
pblocktree->ReadLastBlockFile(nLastBlockFile);
vinfoBlockFile.resize(nLastBlockFile + 1);
LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
}
LogPrintf("%s: last block file info: %s\n", __func__,
vinfoBlockFile[nLastBlockFile].ToString());
for (int nFile = nLastBlockFile + 1; true; nFile++) {
CBlockFileInfo info;
if (pblocktree->ReadBlockFileInfo(nFile, info)) {
vinfoBlockFile.push_back(info);
} else {
break;
}
}
// Check presence of blk files
LogPrintf("Checking all blk files are present...\n");
std::set setBlkDataFiles;
for (const std::pair &item : mapBlockIndex) {
CBlockIndex *pindex = item.second;
if (pindex->nStatus.hasData()) {
setBlkDataFiles.insert(pindex->nFile);
}
}
for (const int i : setBlkDataFiles) {
CDiskBlockPos pos(i, 0);
if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION)
.IsNull()) {
return false;
}
}
// Check whether we have ever pruned block & undo files
pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
if (fHavePruned) {
LogPrintf(
"LoadBlockIndexDB(): Block files have previously been pruned\n");
}
// Check whether we need to continue reindexing
bool fReindexing = false;
pblocktree->ReadReindexing(fReindexing);
fReindex |= fReindexing;
// Check whether we have a transaction index
pblocktree->ReadFlag("txindex", fTxIndex);
LogPrintf("%s: transaction index %s\n", __func__,
fTxIndex ? "enabled" : "disabled");
return true;
}
void LoadChainTip(const CChainParams &chainparams) {
if (chainActive.Tip() &&
chainActive.Tip()->GetBlockHash() == pcoinsTip->GetBestBlock()) {
return;
}
// Load pointer to end of best chain
BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock());
if (it == mapBlockIndex.end()) {
return;
}
chainActive.SetTip(it->second);
PruneBlockIndexCandidates();
LogPrintf(
"Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(),
DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
chainActive.Tip()->GetBlockTime()),
GuessVerificationProgress(chainparams.TxData(), chainActive.Tip()));
}
CVerifyDB::CVerifyDB() {
uiInterface.ShowProgress(_("Verifying blocks..."), 0);
}
CVerifyDB::~CVerifyDB() {
uiInterface.ShowProgress("", 100);
}
bool CVerifyDB::VerifyDB(const Config &config, CCoinsView *coinsview,
int nCheckLevel, int nCheckDepth) {
LOCK(cs_main);
if (chainActive.Tip() == nullptr || chainActive.Tip()->pprev == nullptr) {
return true;
}
// Verify blocks in the best chain
if (nCheckDepth <= 0) {
// suffices until the year 19000
nCheckDepth = 1000000000;
}
if (nCheckDepth > chainActive.Height()) {
nCheckDepth = chainActive.Height();
}
nCheckLevel = std::max(0, std::min(4, nCheckLevel));
LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth,
nCheckLevel);
CCoinsViewCache coins(coinsview);
CBlockIndex *pindexState = chainActive.Tip();
CBlockIndex *pindexFailure = nullptr;
int nGoodTransactions = 0;
CValidationState state;
int reportDone = 0;
LogPrintf("[0%%]...");
for (CBlockIndex *pindex = chainActive.Tip(); pindex && pindex->pprev;
pindex = pindex->pprev) {
boost::this_thread::interruption_point();
int percentageDone = std::max(
1, std::min(
99,
(int)(((double)(chainActive.Height() - pindex->nHeight)) /
(double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
if (reportDone < percentageDone / 10) {
// report every 10% step
LogPrintf("[%d%%]...", percentageDone);
reportDone = percentageDone / 10;
}
uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone);
if (pindex->nHeight < chainActive.Height() - nCheckDepth) {
break;
}
if (fPruneMode && !pindex->nStatus.hasData()) {
// If pruning, only go back as far as we have data.
LogPrintf("VerifyDB(): block verification stopping at height %d "
"(pruning, no data)\n",
pindex->nHeight);
break;
}
CBlock block;
// check level 0: read from disk
if (!ReadBlockFromDisk(block, pindex, config)) {
return error(
"VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s",
pindex->nHeight, pindex->GetBlockHash().ToString());
}
// check level 1: verify block validity
if (nCheckLevel >= 1 && !CheckBlock(config, block, state)) {
return error("%s: *** found bad block at %d, hash=%s (%s)\n",
__func__, pindex->nHeight,
pindex->GetBlockHash().ToString(),
FormatStateMessage(state));
}
// check level 2: verify undo validity
if (nCheckLevel >= 2 && pindex) {
CBlockUndo undo;
CDiskBlockPos pos = pindex->GetUndoPos();
if (!pos.IsNull()) {
if (!UndoReadFromDisk(undo, pos,
pindex->pprev->GetBlockHash())) {
return error(
"VerifyDB(): *** found bad undo data at %d, hash=%s\n",
pindex->nHeight, pindex->GetBlockHash().ToString());
}
}
}
// check level 3: check for inconsistencies during memory-only
// disconnect of tip blocks
if (nCheckLevel >= 3 && pindex == pindexState &&
(coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <=
nCoinCacheUsage) {
assert(coins.GetBestBlock() == pindex->GetBlockHash());
DisconnectResult res = DisconnectBlock(block, pindex, coins);
if (res == DISCONNECT_FAILED) {
return error("VerifyDB(): *** irrecoverable inconsistency in "
"block data at %d, hash=%s",
pindex->nHeight,
pindex->GetBlockHash().ToString());
}
pindexState = pindex->pprev;
if (res == DISCONNECT_UNCLEAN) {
nGoodTransactions = 0;
pindexFailure = pindex;
} else {
nGoodTransactions += block.vtx.size();
}
}
if (ShutdownRequested()) {
return true;
}
}
if (pindexFailure) {
return error("VerifyDB(): *** coin database inconsistencies found "
"(last %i blocks, %i good transactions before that)\n",
chainActive.Height() - pindexFailure->nHeight + 1,
nGoodTransactions);
}
// check level 4: try reconnecting blocks
if (nCheckLevel >= 4) {
CBlockIndex *pindex = pindexState;
while (pindex != chainActive.Tip()) {
boost::this_thread::interruption_point();
uiInterface.ShowProgress(
_("Verifying blocks..."),
std::max(1,
std::min(99,
100 - (int)(((double)(chainActive.Height() -
pindex->nHeight)) /
(double)nCheckDepth * 50))));
pindex = chainActive.Next(pindex);
CBlock block;
if (!ReadBlockFromDisk(block, pindex, config)) {
return error(
"VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s",
pindex->nHeight, pindex->GetBlockHash().ToString());
}
if (!ConnectBlock(config, block, state, pindex, coins)) {
return error(
"VerifyDB(): *** found unconnectable block at %d, hash=%s",
pindex->nHeight, pindex->GetBlockHash().ToString());
}
}
}
LogPrintf("[DONE].\n");
LogPrintf("No coin database inconsistencies in last %i blocks (%i "
"transactions)\n",
chainActive.Height() - pindexState->nHeight, nGoodTransactions);
return true;
}
/**
* Apply the effects of a block on the utxo cache, ignoring that it may already
* have been applied.
*/
static bool RollforwardBlock(const CBlockIndex *pindex, CCoinsViewCache &view,
const Config &config) {
// TODO: merge with ConnectBlock
CBlock block;
if (!ReadBlockFromDisk(block, pindex, config)) {
return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s",
pindex->nHeight, pindex->GetBlockHash().ToString());
}
for (const CTransactionRef &tx : block.vtx) {
// Pass check = true as every addition may be an overwrite.
AddCoins(view, *tx, pindex->nHeight, true);
}
for (const CTransactionRef &tx : block.vtx) {
if (tx->IsCoinBase()) {
continue;
}
for (const CTxIn &txin : tx->vin) {
view.SpendCoin(txin.prevout);
}
}
return true;
}
bool ReplayBlocks(const Config &config, CCoinsView *view) {
LOCK(cs_main);
CCoinsViewCache cache(view);
std::vector hashHeads = view->GetHeadBlocks();
if (hashHeads.empty()) {
// We're already in a consistent state.
return true;
}
if (hashHeads.size() != 2) {
return error("ReplayBlocks(): unknown inconsistent state");
}
uiInterface.ShowProgress(_("Replaying blocks..."), 0);
LogPrintf("Replaying blocks\n");
// Old tip during the interrupted flush.
const CBlockIndex *pindexOld = nullptr;
// New tip during the interrupted flush.
const CBlockIndex *pindexNew;
// Latest block common to both the old and the new tip.
const CBlockIndex *pindexFork = nullptr;
if (mapBlockIndex.count(hashHeads[0]) == 0) {
return error(
"ReplayBlocks(): reorganization to unknown block requested");
}
pindexNew = mapBlockIndex[hashHeads[0]];
if (!hashHeads[1].IsNull()) {
// The old tip is allowed to be 0, indicating it's the first flush.
if (mapBlockIndex.count(hashHeads[1]) == 0) {
return error(
"ReplayBlocks(): reorganization from unknown block requested");
}
pindexOld = mapBlockIndex[hashHeads[1]];
pindexFork = LastCommonAncestor(pindexOld, pindexNew);
assert(pindexFork != nullptr);
}
// Rollback along the old branch.
while (pindexOld != pindexFork) {
if (pindexOld->nHeight > 0) {
// Never disconnect the genesis block.
CBlock block;
if (!ReadBlockFromDisk(block, pindexOld, config)) {
return error("RollbackBlock(): ReadBlockFromDisk() failed at "
"%d, hash=%s",
pindexOld->nHeight,
pindexOld->GetBlockHash().ToString());
}
LogPrintf("Rolling back %s (%i)\n",
pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
if (res == DISCONNECT_FAILED) {
return error(
"RollbackBlock(): DisconnectBlock failed at %d, hash=%s",
pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
}
// If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO
// was deleted, or an existing UTXO was overwritten. It corresponds
// to cases where the block-to-be-disconnect never had all its
// operations applied to the UTXO set. However, as both writing a
// UTXO and deleting a UTXO are idempotent operations, the result is
// still a version of the UTXO set with the effects of that block
// undone.
}
pindexOld = pindexOld->pprev;
}
// Roll forward from the forking point to the new tip.
int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight;
++nHeight) {
const CBlockIndex *pindex = pindexNew->GetAncestor(nHeight);
LogPrintf("Rolling forward %s (%i)\n",
pindex->GetBlockHash().ToString(), nHeight);
if (!RollforwardBlock(pindex, cache, config)) {
return false;
}
}
cache.SetBestBlock(pindexNew->GetBlockHash());
cache.Flush();
uiInterface.ShowProgress("", 100);
return true;
}
bool RewindBlockIndex(const Config &config) {
LOCK(cs_main);
const CChainParams ¶ms = config.GetChainParams();
int nHeight = chainActive.Height() + 1;
// nHeight is now the height of the first insufficiently-validated block, or
// tipheight + 1
CValidationState state;
CBlockIndex *pindex = chainActive.Tip();
while (chainActive.Height() >= nHeight) {
if (fPruneMode && !chainActive.Tip()->nStatus.hasData()) {
// If pruning, don't try rewinding past the HAVE_DATA point; since
// older blocks can't be served anyway, there's no need to walk
// further, and trying to DisconnectTip() will fail (and require a
// needless reindex/redownload of the blockchain).
break;
}
if (!DisconnectTip(config, state, nullptr)) {
return error(
"RewindBlockIndex: unable to disconnect block at height %i",
pindex->nHeight);
}
// Occasionally flush state to disk.
if (!FlushStateToDisk(params, state, FLUSH_STATE_PERIODIC)) {
return false;
}
}
// Reduce validity flag and have-data flags.
// We do this after actual disconnecting, otherwise we'll end up writing the
// lack of data to disk before writing the chainstate, resulting in a
// failure to continue if interrupted.
for (const std::pair &p : mapBlockIndex) {
CBlockIndex *pindexIter = p.second;
if (pindexIter->IsValid(BlockValidity::TRANSACTIONS) &&
pindexIter->nChainTx) {
setBlockIndexCandidates.insert(pindexIter);
}
}
PruneBlockIndexCandidates();
CheckBlockIndex(params.GetConsensus());
if (!FlushStateToDisk(params, state, FLUSH_STATE_ALWAYS)) {
return false;
}
return true;
}
// May NOT be used after any connections are up as much of the peer-processing
// logic assumes a consistent block index state
void UnloadBlockIndex() {
LOCK(cs_main);
setBlockIndexCandidates.clear();
chainActive.SetTip(nullptr);
pindexBestInvalid = nullptr;
pindexBestHeader = nullptr;
mempool.clear();
mapBlocksUnlinked.clear();
vinfoBlockFile.clear();
nLastBlockFile = 0;
nBlockSequenceId = 1;
setDirtyBlockIndex.clear();
setDirtyFileInfo.clear();
versionbitscache.Clear();
for (BlockMap::value_type &entry : mapBlockIndex) {
delete entry.second;
}
mapBlockIndex.clear();
fHavePruned = false;
}
bool LoadBlockIndex(const Config &config) {
// Load block index from databases
if (!fReindex && !LoadBlockIndexDB(config)) {
return false;
}
return true;
}
bool InitBlockIndex(const Config &config) {
LOCK(cs_main);
// Check whether we're already initialized
if (chainActive.Genesis() != nullptr) {
return true;
}
// Use the provided setting for -txindex in the new database
fTxIndex = gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX);
pblocktree->WriteFlag("txindex", fTxIndex);
LogPrintf("Initializing databases...\n");
// Only add the genesis block if not reindexing (in which case we reuse the
// one already on disk)
if (!fReindex) {
try {
const CChainParams &chainparams = config.GetChainParams();
CBlock &block = const_cast(chainparams.GenesisBlock());
// Start new block file
unsigned int nBlockSize =
::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
CDiskBlockPos blockPos;
CValidationState state;
if (!FindBlockPos(state, blockPos, nBlockSize + 8, 0,
block.GetBlockTime())) {
return error("LoadBlockIndex(): FindBlockPos failed");
}
if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) {
return error(
"LoadBlockIndex(): writing genesis block to disk failed");
}
CBlockIndex *pindex = AddToBlockIndex(block);
if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) {
return error("LoadBlockIndex(): genesis block not accepted");
}
} catch (const std::runtime_error &e) {
return error(
"LoadBlockIndex(): failed to initialize block database: %s",
e.what());
}
}
return true;
}
bool LoadExternalBlockFile(const Config &config, FILE *fileIn,
CDiskBlockPos *dbp) {
// Map of disk positions for blocks with unknown parent (only used for
// reindex)
static std::multimap mapBlocksUnknownParent;
int64_t nStart = GetTimeMillis();
const CChainParams &chainparams = config.GetChainParams();
int nLoaded = 0;
try {
// This takes over fileIn and calls fclose() on it in the CBufferedFile
// destructor. Make sure we have at least 2*MAX_TX_SIZE space in there
// so any transaction can fit in the buffer.
CBufferedFile blkdat(fileIn, 2 * MAX_TX_SIZE, MAX_TX_SIZE + 8, SER_DISK,
CLIENT_VERSION);
uint64_t nRewind = blkdat.GetPos();
while (!blkdat.eof()) {
boost::this_thread::interruption_point();
blkdat.SetPos(nRewind);
// Start one byte further next time, in case of failure.
nRewind++;
// Remove former limit.
blkdat.SetLimit();
unsigned int nSize = 0;
try {
// Locate a header.
uint8_t buf[CMessageHeader::MESSAGE_START_SIZE];
blkdat.FindByte(chainparams.DiskMagic()[0]);
nRewind = blkdat.GetPos() + 1;
blkdat >> FLATDATA(buf);
if (memcmp(buf, std::begin(chainparams.DiskMagic()),
CMessageHeader::MESSAGE_START_SIZE)) {
continue;
}
// Read size.
blkdat >> nSize;
if (nSize < 80) {
continue;
}
} catch (const std::exception &) {
// No valid block header found; don't complain.
break;
}
try {
// read block
uint64_t nBlockPos = blkdat.GetPos();
if (dbp) {
dbp->nPos = nBlockPos;
}
blkdat.SetLimit(nBlockPos + nSize);
blkdat.SetPos(nBlockPos);
std::shared_ptr pblock = std::make_shared();
CBlock &block = *pblock;
blkdat >> block;
nRewind = blkdat.GetPos();
// detect out of order blocks, and store them for later
uint256 hash = block.GetHash();
if (hash != chainparams.GetConsensus().hashGenesisBlock &&
mapBlockIndex.find(block.hashPrevBlock) ==
mapBlockIndex.end()) {
LogPrint(BCLog::REINDEX,
"%s: Out of order block %s, parent %s not known\n",
__func__, hash.ToString(),
block.hashPrevBlock.ToString());
if (dbp) {
mapBlocksUnknownParent.insert(
std::make_pair(block.hashPrevBlock, *dbp));
}
continue;
}
// process in case the block isn't known yet
if (mapBlockIndex.count(hash) == 0 ||
!mapBlockIndex[hash]->nStatus.hasData()) {
LOCK(cs_main);
CValidationState state;
if (AcceptBlock(config, pblock, state, nullptr, true, dbp,
nullptr)) {
nLoaded++;
}
if (state.IsError()) {
break;
}
} else if (hash !=
chainparams.GetConsensus().hashGenesisBlock &&
mapBlockIndex[hash]->nHeight % 1000 == 0) {
LogPrint(
BCLog::REINDEX,
"Block Import: already had block %s at height %d\n",
hash.ToString(), mapBlockIndex[hash]->nHeight);
}
// Activate the genesis block so normal node progress can
// continue
if (hash == chainparams.GetConsensus().hashGenesisBlock) {
CValidationState state;
if (!ActivateBestChain(config, state)) {
break;
}
}
NotifyHeaderTip();
// Recursively process earlier encountered successors of this
// block
std::deque queue;
queue.push_back(hash);
while (!queue.empty()) {
uint256 head = queue.front();
queue.pop_front();
std::pair::iterator,
std::multimap::iterator>
range = mapBlocksUnknownParent.equal_range(head);
while (range.first != range.second) {
std::multimap::iterator it =
range.first;
std::shared_ptr pblockrecursive =
std::make_shared();
if (ReadBlockFromDisk(*pblockrecursive, it->second,
config)) {
LogPrint(
BCLog::REINDEX,
"%s: Processing out of order child %s of %s\n",
__func__, pblockrecursive->GetHash().ToString(),
head.ToString());
LOCK(cs_main);
CValidationState dummy;
if (AcceptBlock(config, pblockrecursive, dummy,
nullptr, true, &it->second,
nullptr)) {
nLoaded++;
queue.push_back(pblockrecursive->GetHash());
}
}
range.first++;
mapBlocksUnknownParent.erase(it);
NotifyHeaderTip();
}
}
} catch (const std::exception &e) {
LogPrintf("%s: Deserialize or I/O error - %s\n", __func__,
e.what());
}
}
} catch (const std::runtime_error &e) {
AbortNode(std::string("System error: ") + e.what());
}
if (nLoaded > 0) {
LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded,
GetTimeMillis() - nStart);
}
return nLoaded > 0;
}
static void CheckBlockIndex(const Consensus::Params &consensusParams) {
if (!fCheckBlockIndex) {
return;
}
LOCK(cs_main);
// During a reindex, we read the genesis block and call CheckBlockIndex
// before ActivateBestChain, so we have the genesis block in mapBlockIndex
// but no active chain. (A few of the tests when iterating the block tree
// require that chainActive has been initialized.)
if (chainActive.Height() < 0) {
assert(mapBlockIndex.size() <= 1);
return;
}
// Build forward-pointing map of the entire block tree.
std::multimap forward;
for (const std::pair &it : mapBlockIndex) {
forward.emplace(it.second->pprev, it.second);
}
assert(forward.size() == mapBlockIndex.size());
std::pair::iterator,
std::multimap::iterator>
rangeGenesis = forward.equal_range(nullptr);
CBlockIndex *pindex = rangeGenesis.first->second;
rangeGenesis.first++;
// There is only one index entry with parent nullptr.
assert(rangeGenesis.first == rangeGenesis.second);
// Iterate over the entire block tree, using depth-first search.
// Along the way, remember whether there are blocks on the path from genesis
// block being explored which are the first to have certain properties.
size_t nNodes = 0;
int nHeight = 0;
// Oldest ancestor of pindex which is invalid.
CBlockIndex *pindexFirstInvalid = nullptr;
// Oldest ancestor of pindex which is parked.
CBlockIndex *pindexFirstParked = nullptr;
// Oldest ancestor of pindex which does not have data available.
CBlockIndex *pindexFirstMissing = nullptr;
// Oldest ancestor of pindex for which nTx == 0.
CBlockIndex *pindexFirstNeverProcessed = nullptr;
// Oldest ancestor of pindex which does not have BLOCK_VALID_TREE
// (regardless of being valid or not).
CBlockIndex *pindexFirstNotTreeValid = nullptr;
// Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS
// (regardless of being valid or not).
CBlockIndex *pindexFirstNotTransactionsValid = nullptr;
// Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN
// (regardless of being valid or not).
CBlockIndex *pindexFirstNotChainValid = nullptr;
// Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS
// (regardless of being valid or not).
CBlockIndex *pindexFirstNotScriptsValid = nullptr;
while (pindex != nullptr) {
nNodes++;
if (pindexFirstInvalid == nullptr && pindex->nStatus.hasFailed()) {
pindexFirstInvalid = pindex;
}
if (pindexFirstParked == nullptr && pindex->nStatus.isParked()) {
pindexFirstParked = pindex;
}
if (pindexFirstMissing == nullptr && !pindex->nStatus.hasData()) {
pindexFirstMissing = pindex;
}
if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) {
pindexFirstNeverProcessed = pindex;
}
if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr &&
pindex->nStatus.getValidity() < BlockValidity::TREE) {
pindexFirstNotTreeValid = pindex;
}
if (pindex->pprev != nullptr &&
pindexFirstNotTransactionsValid == nullptr &&
pindex->nStatus.getValidity() < BlockValidity::TRANSACTIONS) {
pindexFirstNotTransactionsValid = pindex;
}
if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr &&
pindex->nStatus.getValidity() < BlockValidity::CHAIN) {
pindexFirstNotChainValid = pindex;
}
if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr &&
pindex->nStatus.getValidity() < BlockValidity::SCRIPTS) {
pindexFirstNotScriptsValid = pindex;
}
// Begin: actual consistency checks.
if (pindex->pprev == nullptr) {
// Genesis block checks.
// Genesis block's hash must match.
assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock);
// The current active chain's genesis block must be this block.
assert(pindex == chainActive.Genesis());
}
if (pindex->nChainTx == 0) {
// nSequenceId can't be set positive for blocks that aren't linked
// (negative is used for preciousblock)
assert(pindex->nSequenceId <= 0);
}
// VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or
// not pruning has occurred). HAVE_DATA is only equivalent to nTx > 0
// (or VALID_TRANSACTIONS) if no pruning has occurred.
if (!fHavePruned) {
// If we've never pruned, then HAVE_DATA should be equivalent to nTx
// > 0
assert(!pindex->nStatus.hasData() == (pindex->nTx == 0));
assert(pindexFirstMissing == pindexFirstNeverProcessed);
} else if (pindex->nStatus.hasData()) {
// If we have pruned, then we can only say that HAVE_DATA implies
// nTx > 0
assert(pindex->nTx > 0);
}
if (pindex->nStatus.hasUndo()) {
assert(pindex->nStatus.hasData());
}
// This is pruning-independent.
assert((pindex->nStatus.getValidity() >= BlockValidity::TRANSACTIONS) ==
(pindex->nTx > 0));
// All parents having had data (at some point) is equivalent to all
// parents being VALID_TRANSACTIONS, which is equivalent to nChainTx
// being set.
// nChainTx != 0 is used to signal that all parent blocks have been
// processed (but may have been pruned).
assert((pindexFirstNeverProcessed != nullptr) ==
(pindex->nChainTx == 0));
assert((pindexFirstNotTransactionsValid != nullptr) ==
(pindex->nChainTx == 0));
// nHeight must be consistent.
assert(pindex->nHeight == nHeight);
// For every block except the genesis block, the chainwork must be
// larger than the parent's.
assert(pindex->pprev == nullptr ||
pindex->nChainWork >= pindex->pprev->nChainWork);
// The pskip pointer must point back for all but the first 2 blocks.
assert(nHeight < 2 ||
(pindex->pskip && (pindex->pskip->nHeight < nHeight)));
// All mapBlockIndex entries must at least be TREE valid
assert(pindexFirstNotTreeValid == nullptr);
if (pindex->nStatus.getValidity() >= BlockValidity::TREE) {
// TREE valid implies all parents are TREE valid
assert(pindexFirstNotTreeValid == nullptr);
}
if (pindex->nStatus.getValidity() >= BlockValidity::CHAIN) {
// CHAIN valid implies all parents are CHAIN valid
assert(pindexFirstNotChainValid == nullptr);
}
if (pindex->nStatus.getValidity() >= BlockValidity::SCRIPTS) {
// SCRIPTS valid implies all parents are SCRIPTS valid
assert(pindexFirstNotScriptsValid == nullptr);
}
if (pindexFirstInvalid == nullptr) {
// Checks for not-invalid blocks.
// The failed mask cannot be set for blocks without invalid parents.
assert(!pindex->nStatus.isInvalid());
}
if (pindexFirstParked == nullptr) {
// Checks for not-invalid blocks.
// The failed mask cannot be set for blocks without invalid parents.
assert(!pindex->nStatus.isOnParkedChain());
}
if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) &&
pindexFirstNeverProcessed == nullptr) {
if (pindexFirstInvalid == nullptr) {
// If this block sorts at least as good as the current tip and
// is valid and we have all data for its parents, it must be in
// setBlockIndexCandidates. chainActive.Tip() must also be there
// even if some data has been pruned.
if (pindexFirstMissing == nullptr ||
pindex == chainActive.Tip()) {
assert(setBlockIndexCandidates.count(pindex));
}
// If some parent is missing, then it could be that this block
// was in setBlockIndexCandidates but had to be removed because
// of the missing data. In this case it must be in
// mapBlocksUnlinked -- see test below.
}
} else {
// If this block sorts worse than the current tip or some ancestor's
// block has never been seen, it cannot be in
// setBlockIndexCandidates.
assert(setBlockIndexCandidates.count(pindex) == 0);
}
// Check whether this block is in mapBlocksUnlinked.
std::pair::iterator,
std::multimap::iterator>
rangeUnlinked = mapBlocksUnlinked.equal_range(pindex->pprev);
bool foundInUnlinked = false;
while (rangeUnlinked.first != rangeUnlinked.second) {
assert(rangeUnlinked.first->first == pindex->pprev);
if (rangeUnlinked.first->second == pindex) {
foundInUnlinked = true;
break;
}
rangeUnlinked.first++;
}
if (pindex->pprev && pindex->nStatus.hasData() &&
pindexFirstNeverProcessed != nullptr &&
pindexFirstInvalid == nullptr) {
// If this block has block data available, some parent was never
// received, and has no invalid parents, it must be in
// mapBlocksUnlinked.
assert(foundInUnlinked);
}
if (!pindex->nStatus.hasData()) {
// Can't be in mapBlocksUnlinked if we don't HAVE_DATA
assert(!foundInUnlinked);
}
if (pindexFirstMissing == nullptr) {
// We aren't missing data for any parent -- cannot be in
// mapBlocksUnlinked.
assert(!foundInUnlinked);
}
if (pindex->pprev && pindex->nStatus.hasData() &&
pindexFirstNeverProcessed == nullptr &&
pindexFirstMissing != nullptr) {
// We HAVE_DATA for this block, have received data for all parents
// at some point, but we're currently missing data for some parent.
// We must have pruned.
assert(fHavePruned);
// This block may have entered mapBlocksUnlinked if:
// - it has a descendant that at some point had more work than the
// tip, and
// - we tried switching to that descendant but were missing
// data for some intermediate block between chainActive and the
// tip.
// So if this block is itself better than chainActive.Tip() and it
// wasn't in
// setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) &&
setBlockIndexCandidates.count(pindex) == 0) {
if (pindexFirstInvalid == nullptr) {
assert(foundInUnlinked);
}
}
}
// Perhaps too slow
// assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash());
// End: actual consistency checks.
// Try descending into the first subnode.
std::pair::iterator,
std::multimap::iterator>
range = forward.equal_range(pindex);
if (range.first != range.second) {
// A subnode was found.
pindex = range.first->second;
nHeight++;
continue;
}
// This is a leaf node. Move upwards until we reach a node of which we
// have not yet visited the last child.
while (pindex) {
// We are going to either move to a parent or a sibling of pindex.
// If pindex was the first with a certain property, unset the
// corresponding variable.
if (pindex == pindexFirstInvalid) {
pindexFirstInvalid = nullptr;
}
if (pindex == pindexFirstParked) {
pindexFirstParked = nullptr;
}
if (pindex == pindexFirstMissing) {
pindexFirstMissing = nullptr;
}
if (pindex == pindexFirstNeverProcessed) {
pindexFirstNeverProcessed = nullptr;
}
if (pindex == pindexFirstNotTreeValid) {
pindexFirstNotTreeValid = nullptr;
}
if (pindex == pindexFirstNotTransactionsValid) {
pindexFirstNotTransactionsValid = nullptr;
}
if (pindex == pindexFirstNotChainValid) {
pindexFirstNotChainValid = nullptr;
}
if (pindex == pindexFirstNotScriptsValid) {
pindexFirstNotScriptsValid = nullptr;
}
// Find our parent.
CBlockIndex *pindexPar = pindex->pprev;
// Find which child we just visited.
std::pair::iterator,
std::multimap::iterator>
rangePar = forward.equal_range(pindexPar);
while (rangePar.first->second != pindex) {
// Our parent must have at least the node we're coming from as
// child.
assert(rangePar.first != rangePar.second);
rangePar.first++;
}
// Proceed to the next one.
rangePar.first++;
if (rangePar.first != rangePar.second) {
// Move to the sibling.
pindex = rangePar.first->second;
break;
} else {
// Move up further.
pindex = pindexPar;
nHeight--;
continue;
}
}
}
// Check that we actually traversed the entire map.
assert(nNodes == forward.size());
}
std::string CBlockFileInfo::ToString() const {
return strprintf(
"CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)",
nBlocks, nSize, nHeightFirst, nHeightLast,
DateTimeStrFormat("%Y-%m-%d", nTimeFirst),
DateTimeStrFormat("%Y-%m-%d", nTimeLast));
}
CBlockFileInfo *GetBlockFileInfo(size_t n) {
return &vinfoBlockFile.at(n);
}
static const uint64_t MEMPOOL_DUMP_VERSION = 1;
bool LoadMempool(const Config &config) {
int64_t nExpiryTimeout =
gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
FILE *filestr = fsbridge::fopen(GetDataDir() / "mempool.dat", "rb");
CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
if (file.IsNull()) {
LogPrintf(
"Failed to open mempool file from disk. Continuing anyway.\n");
return false;
}
int64_t count = 0;
int64_t skipped = 0;
int64_t failed = 0;
int64_t nNow = GetTime();
try {
uint64_t version;
file >> version;
if (version != MEMPOOL_DUMP_VERSION) {
return false;
}
uint64_t num;
file >> num;
double prioritydummy = 0;
while (num--) {
CTransactionRef tx;
int64_t nTime;
int64_t nFeeDelta;
file >> tx;
file >> nTime;
file >> nFeeDelta;
Amount amountdelta(nFeeDelta);
if (amountdelta != Amount(0)) {
mempool.PrioritiseTransaction(tx->GetId(),
tx->GetId().ToString(),
prioritydummy, amountdelta);
}
CValidationState state;
if (nTime + nExpiryTimeout > nNow) {
LOCK(cs_main);
AcceptToMemoryPoolWithTime(config, mempool, state, tx, true,
nullptr, nTime);
if (state.IsValid()) {
++count;
} else {
++failed;
}
} else {
++skipped;
}
if (ShutdownRequested()) {
return false;
}
}
std::map mapDeltas;
file >> mapDeltas;
for (const auto &i : mapDeltas) {
mempool.PrioritiseTransaction(i.first, i.first.ToString(),
prioritydummy, i.second);
}
} catch (const std::exception &e) {
LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing "
"anyway.\n",
e.what());
return false;
}
LogPrintf("Imported mempool transactions from disk: %i successes, %i "
"failed, %i expired\n",
count, failed, skipped);
return true;
}
void DumpMempool(void) {
int64_t start = GetTimeMicros();
std::map mapDeltas;
std::vector vinfo;
{
LOCK(mempool.cs);
for (const auto &i : mempool.mapDeltas) {
mapDeltas[i.first] = i.second.second;
}
vinfo = mempool.infoAll();
}
int64_t mid = GetTimeMicros();
try {
FILE *filestr = fsbridge::fopen(GetDataDir() / "mempool.dat.new", "wb");
if (!filestr) {
return;
}
CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
uint64_t version = MEMPOOL_DUMP_VERSION;
file << version;
file << uint64_t(vinfo.size());
for (const auto &i : vinfo) {
file << *(i.tx);
file << int64_t(i.nTime);
file << i.nFeeDelta;
mapDeltas.erase(i.tx->GetId());
}
file << mapDeltas;
FileCommit(file.Get());
file.fclose();
RenameOver(GetDataDir() / "mempool.dat.new",
GetDataDir() / "mempool.dat");
int64_t last = GetTimeMicros();
LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n",
(mid - start) * 0.000001, (last - mid) * 0.000001);
} catch (const std::exception &e) {
LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what());
}
}
//! Guess how far we are in the verification process at the given block index
double GuessVerificationProgress(const ChainTxData &data, CBlockIndex *pindex) {
if (pindex == nullptr) {
return 0.0;
}
int64_t nNow = time(nullptr);
double fTxTotal;
if (pindex->nChainTx <= data.nTxCount) {
fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate;
} else {
fTxTotal =
pindex->nChainTx + (nNow - pindex->GetBlockTime()) * data.dTxRate;
}
return pindex->nChainTx / fTxTotal;
}
class CMainCleanup {
public:
CMainCleanup() {}
~CMainCleanup() {
// block headers
for (const std::pair &it :
mapBlockIndex) {
delete it.second;
}
mapBlockIndex.clear();
}
} instance_of_cmaincleanup;