diff --git a/src/index/base.cpp b/src/index/base.cpp index 48d110e04..3c6c60fe8 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -1,390 +1,396 @@ // Copyright (c) 2017-2018 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include // For g_chainman #include #include constexpr char DB_BEST_BLOCK = 'B'; constexpr int64_t SYNC_LOG_INTERVAL = 30; // seconds constexpr int64_t SYNC_LOCATOR_WRITE_INTERVAL = 30; // seconds template static void FatalError(const char *fmt, const Args &...args) { std::string strMessage = tfm::format(fmt, args...); SetMiscWarning(Untranslated(strMessage)); LogPrintf("*** %s\n", strMessage); AbortError(_("A fatal internal error occurred, see debug.log for details")); StartShutdown(); } BaseIndex::DB::DB(const fs::path &path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) : CDBWrapper(path, n_cache_size, f_memory, f_wipe, f_obfuscate) {} bool BaseIndex::DB::ReadBestBlock(CBlockLocator &locator) const { bool success = Read(DB_BEST_BLOCK, locator); if (!success) { locator.SetNull(); } return success; } void BaseIndex::DB::WriteBestBlock(CDBBatch &batch, const CBlockLocator &locator) { batch.Write(DB_BEST_BLOCK, locator); } BaseIndex::~BaseIndex() { Interrupt(); Stop(); } bool BaseIndex::Init() { CBlockLocator locator; if (!GetDB().ReadBestBlock(locator)) { locator.SetNull(); } LOCK(cs_main); + CChain &active_chain = m_chainstate->m_chain; if (locator.IsNull()) { m_best_block_index = nullptr; } else { - m_best_block_index = g_chainman.m_blockman.FindForkInGlobalIndex( - ::ChainActive(), locator); + m_best_block_index = m_chainstate->m_blockman.FindForkInGlobalIndex( + active_chain, locator); } - m_synced = m_best_block_index.load() == ::ChainActive().Tip(); + m_synced = m_best_block_index.load() == active_chain.Tip(); if (!m_synced) { bool prune_violation = false; if (!m_best_block_index) { // index is not built yet // make sure we have all block data back to the genesis - const CBlockIndex *block = ::ChainActive().Tip(); + const CBlockIndex *block = active_chain.Tip(); while (block->pprev && block->pprev->nStatus.hasData()) { block = block->pprev; } - prune_violation = block != ::ChainActive().Genesis(); + prune_violation = block != active_chain.Genesis(); } // in case the index has a best block set and is not fully synced // check if we have the required blocks to continue building the index else { const CBlockIndex *block_to_test = m_best_block_index.load(); - if (!ChainActive().Contains(block_to_test)) { + if (!active_chain.Contains(block_to_test)) { // if the bestblock is not part of the mainchain, find the fork // and make sure we have all data down to the fork - block_to_test = ::ChainActive().FindFork(block_to_test); + block_to_test = active_chain.FindFork(block_to_test); } - const CBlockIndex *block = ::ChainActive().Tip(); + const CBlockIndex *block = active_chain.Tip(); prune_violation = true; // check backwards from the tip if we have all block data until we // reach the indexes bestblock while (block_to_test && block && block->nStatus.hasData()) { if (block_to_test == block) { prune_violation = false; break; } // block->pprev must exist at this point, since block_to_test is // part of the chain and thus must be encountered when going // backwards from the tip assert(block->pprev); block = block->pprev; } } if (prune_violation) { // throw error and graceful shutdown if we can't build the index FatalError("%s: %s best block of the index goes beyond pruned " "data. Please disable the index or reindex (which will " "download the whole blockchain again)", __func__, GetName()); return false; } } return true; } -static const CBlockIndex *NextSyncBlock(const CBlockIndex *pindex_prev) +static const CBlockIndex *NextSyncBlock(const CBlockIndex *pindex_prev, + CChain &chain) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); if (!pindex_prev) { - return ::ChainActive().Genesis(); + return chain.Genesis(); } - const CBlockIndex *pindex = ::ChainActive().Next(pindex_prev); + const CBlockIndex *pindex = chain.Next(pindex_prev); if (pindex) { return pindex; } - return ::ChainActive().Next(::ChainActive().FindFork(pindex_prev)); + return chain.Next(chain.FindFork(pindex_prev)); } void BaseIndex::ThreadSync() { const CBlockIndex *pindex = m_best_block_index.load(); if (!m_synced) { auto &consensus_params = GetConfig().GetChainParams().GetConsensus(); int64_t last_log_time = 0; int64_t last_locator_write_time = 0; while (true) { if (m_interrupt) { m_best_block_index = pindex; // No need to handle errors in Commit. If it fails, the error // will be already be logged. The best way to recover is to // continue, as index cannot be corrupted by a missed commit to // disk for an advanced index state. Commit(); return; } { LOCK(cs_main); - const CBlockIndex *pindex_next = NextSyncBlock(pindex); + const CBlockIndex *pindex_next = + NextSyncBlock(pindex, m_chainstate->m_chain); if (!pindex_next) { m_best_block_index = pindex; m_synced = true; // No need to handle errors in Commit. See rationale above. Commit(); break; } if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) { FatalError( "%s: Failed to rewind index %s to a previous chain tip", __func__, GetName()); return; } pindex = pindex_next; } int64_t current_time = GetTime(); if (last_log_time + SYNC_LOG_INTERVAL < current_time) { LogPrintf("Syncing %s with block chain from height %d\n", GetName(), pindex->nHeight); last_log_time = current_time; } if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) { m_best_block_index = pindex; last_locator_write_time = current_time; // No need to handle errors in Commit. See rationale above. Commit(); } CBlock block; if (!ReadBlockFromDisk(block, pindex, consensus_params)) { FatalError("%s: Failed to read block %s from disk", __func__, pindex->GetBlockHash().ToString()); return; } if (!WriteBlock(block, pindex)) { FatalError("%s: Failed to write block %s to index database", __func__, pindex->GetBlockHash().ToString()); return; } } } if (pindex) { LogPrintf("%s is enabled at height %d\n", GetName(), pindex->nHeight); } else { LogPrintf("%s is enabled\n", GetName()); } } bool BaseIndex::Commit() { CDBBatch batch(GetDB()); if (!CommitInternal(batch) || !GetDB().WriteBatch(batch)) { return error("%s: Failed to commit latest %s state", __func__, GetName()); } return true; } bool BaseIndex::CommitInternal(CDBBatch &batch) { LOCK(cs_main); - GetDB().WriteBestBlock(batch, - ::ChainActive().GetLocator(m_best_block_index)); + GetDB().WriteBestBlock( + batch, m_chainstate->m_chain.GetLocator(m_best_block_index)); return true; } bool BaseIndex::Rewind(const CBlockIndex *current_tip, const CBlockIndex *new_tip) { assert(current_tip == m_best_block_index); assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip); // In the case of a reorg, ensure persisted block locator is not stale. // Pruning has a minimum of 288 blocks-to-keep and getting the index // out of sync may be possible but a users fault. // In case we reorg beyond the pruned depth, ReadBlockFromDisk would // throw and lead to a graceful shutdown m_best_block_index = new_tip; if (!Commit()) { // If commit fails, revert the best block index to avoid corruption. m_best_block_index = current_tip; return false; } return true; } void BaseIndex::BlockConnected(const std::shared_ptr &block, const CBlockIndex *pindex) { if (!m_synced) { return; } const CBlockIndex *best_block_index = m_best_block_index.load(); if (!best_block_index) { if (pindex->nHeight != 0) { FatalError("%s: First block connected is not the genesis block " "(height=%d)", __func__, pindex->nHeight); return; } } else { // Ensure block connects to an ancestor of the current best block. This // should be the case most of the time, but may not be immediately after // the the sync thread catches up and sets m_synced. Consider the case // where there is a reorg and the blocks on the stale branch are in the // ValidationInterface queue backlog even after the sync thread has // caught up to the new chain tip. In this unlikely event, log a warning // and let the queue clear. if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) { LogPrintf("%s: WARNING: Block %s does not connect to an ancestor " "of known best chain (tip=%s); not updating index\n", __func__, pindex->GetBlockHash().ToString(), best_block_index->GetBlockHash().ToString()); return; } if (best_block_index != pindex->pprev && !Rewind(best_block_index, pindex->pprev)) { FatalError("%s: Failed to rewind index %s to a previous chain tip", __func__, GetName()); return; } } if (WriteBlock(*block, pindex)) { m_best_block_index = pindex; } else { FatalError("%s: Failed to write block %s to index", __func__, pindex->GetBlockHash().ToString()); return; } } void BaseIndex::ChainStateFlushed(const CBlockLocator &locator) { if (!m_synced) { return; } const BlockHash &locator_tip_hash = locator.vHave.front(); const CBlockIndex *locator_tip_index; { LOCK(cs_main); locator_tip_index = - g_chainman.m_blockman.LookupBlockIndex(locator_tip_hash); + m_chainstate->m_blockman.LookupBlockIndex(locator_tip_hash); } if (!locator_tip_index) { FatalError("%s: First block (hash=%s) in locator was not found", __func__, locator_tip_hash.ToString()); return; } // This checks that ChainStateFlushed callbacks are received after // BlockConnected. The check may fail immediately after the the sync thread // catches up and sets m_synced. Consider the case where there is a reorg // and the blocks on the stale branch are in the ValidationInterface queue // backlog even after the sync thread has caught up to the new chain tip. In // this unlikely event, log a warning and let the queue clear. const CBlockIndex *best_block_index = m_best_block_index.load(); if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) { LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known " "best chain (tip=%s); not writing index locator\n", __func__, locator_tip_hash.ToString(), best_block_index->GetBlockHash().ToString()); return; } // No need to handle errors in Commit. If it fails, the error will be // already be logged. The best way to recover is to continue, as index // cannot be corrupted by a missed commit to disk for an advanced index // state. Commit(); } bool BaseIndex::BlockUntilSyncedToCurrentChain() const { AssertLockNotHeld(cs_main); if (!m_synced) { return false; } { // Skip the queue-draining stuff if we know we're caught up with // ::ChainActive().Tip(). LOCK(cs_main); - const CBlockIndex *chain_tip = ::ChainActive().Tip(); + const CBlockIndex *chain_tip = m_chainstate->m_chain.Tip(); const CBlockIndex *best_block_index = m_best_block_index.load(); if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) { return true; } } LogPrintf("%s: %s is catching up on block notifications\n", __func__, GetName()); SyncWithValidationInterfaceQueue(); return true; } void BaseIndex::Interrupt() { m_interrupt(); } -void BaseIndex::Start() { +void BaseIndex::Start(CChainState &active_chainstate) { + assert(std::addressof(::ChainstateActive()) == + std::addressof(active_chainstate)); + m_chainstate = &active_chainstate; // Need to register this ValidationInterface before running Init(), so that // callbacks are not missed if Init sets m_synced to true. RegisterValidationInterface(this); if (!Init()) { FatalError("%s: %s failed to initialize", __func__, GetName()); return; } m_thread_sync = std::thread(&TraceThread>, GetName(), std::bind(&BaseIndex::ThreadSync, this)); } void BaseIndex::Stop() { UnregisterValidationInterface(this); if (m_thread_sync.joinable()) { m_thread_sync.join(); } } IndexSummary BaseIndex::GetSummary() const { IndexSummary summary{}; summary.name = GetName(); summary.synced = m_synced; summary.best_block_height = m_best_block_index ? m_best_block_index.load()->nHeight : 0; return summary; } diff --git a/src/index/base.h b/src/index/base.h index b533bd0be..34440adc4 100644 --- a/src/index/base.h +++ b/src/index/base.h @@ -1,133 +1,136 @@ // Copyright (c) 2017-2018 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_INDEX_BASE_H #define BITCOIN_INDEX_BASE_H #include #include #include #include #include class CBlockIndex; +class CChainState; struct IndexSummary { std::string name; bool synced{false}; int best_block_height{0}; }; /** * Base class for indices of blockchain data. This implements * CValidationInterface and ensures blocks are indexed sequentially according * to their position in the active chain. */ class BaseIndex : public CValidationInterface { protected: /** * The database stores a block locator of the chain the database is synced * to so that the TxIndex can efficiently determine the point it last * stopped at. A locator is used instead of a simple hash of the chain tip * because blocks and block index entries may not be flushed to disk until * after this database is updated. */ class DB : public CDBWrapper { public: DB(const fs::path &path, size_t n_cache_size, bool f_memory = false, bool f_wipe = false, bool f_obfuscate = false); /// Read block locator of the chain that the txindex is in sync with. bool ReadBestBlock(CBlockLocator &locator) const; /// Write block locator of the chain that the txindex is in sync with. void WriteBestBlock(CDBBatch &batch, const CBlockLocator &locator); }; private: /// Whether the index is in sync with the main chain. The flag is flipped /// from false to true once, after which point this starts processing /// ValidationInterface notifications to stay in sync. std::atomic m_synced{false}; /// The last block in the chain that the index is in sync with. std::atomic m_best_block_index{nullptr}; std::thread m_thread_sync; CThreadInterrupt m_interrupt; /// Sync the index with the block index starting from the current best /// block. Intended to be run in its own thread, m_thread_sync, and can be /// interrupted with m_interrupt. Once the index gets in sync, the m_synced /// flag is set and the BlockConnected ValidationInterface callback takes /// over and the sync thread exits. void ThreadSync(); /// Write the current index state (eg. chain block locator and /// subclass-specific items) to disk. /// /// Recommendations for error handling: /// If called on a successor of the previous committed best block in the /// index, the index can continue processing without risk of corruption, /// though the index state will need to catch up from further behind on /// reboot. If the new state is not a successor of the previous state (due /// to a chain reorganization), the index must halt until Commit succeeds or /// else it could end up getting corrupted. bool Commit(); protected: + CChainState *m_chainstate{nullptr}; + void BlockConnected(const std::shared_ptr &block, const CBlockIndex *pindex) override; void ChainStateFlushed(const CBlockLocator &locator) override; const CBlockIndex *CurrentIndex() { return m_best_block_index.load(); }; /// Initialize internal state from the database and block index. virtual bool Init(); /// Write update index entries for a newly connected block. virtual bool WriteBlock(const CBlock &block, const CBlockIndex *pindex) { return true; } /// Virtual method called internally by Commit that can be overridden to /// atomically commit more index state. virtual bool CommitInternal(CDBBatch &batch); /// Rewind index to an earlier chain tip during a chain reorg. The tip must /// be an ancestor of the current best block. virtual bool Rewind(const CBlockIndex *current_tip, const CBlockIndex *new_tip); virtual DB &GetDB() const = 0; /// Get the name of the index for display in logs. virtual const char *GetName() const = 0; public: /// Destructor interrupts sync thread if running and blocks until it exits. virtual ~BaseIndex(); /// Blocks the current thread until the index is caught up to the current /// state of the block chain. This only blocks if the index has gotten in /// sync once and only needs to process blocks in the ValidationInterface /// queue. If the index is catching up from far behind, this method does /// not block and immediately returns false. bool BlockUntilSyncedToCurrentChain() const LOCKS_EXCLUDED(::cs_main); void Interrupt(); /// Start initializes the sync state and registers the instance as a /// ValidationInterface so that it stays in sync with blockchain updates. - void Start(); + void Start(CChainState &active_chainstate); /// Stops the instance from staying in sync with blockchain updates. void Stop(); /// Get a summary of the index and its state. IndexSummary GetSummary() const; }; #endif // BITCOIN_INDEX_BASE_H diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp index 552fb8d84..777be7a53 100644 --- a/src/index/coinstatsindex.cpp +++ b/src/index/coinstatsindex.cpp @@ -1,522 +1,522 @@ // Copyright (c) 2020-2021 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include static constexpr char DB_BLOCK_HASH = 's'; static constexpr char DB_BLOCK_HEIGHT = 't'; static constexpr char DB_MUHASH = 'M'; namespace { struct DBVal { uint256 muhash; uint64_t transaction_output_count; uint64_t bogo_size; Amount total_amount; Amount total_subsidy; Amount total_unspendable_amount; Amount total_prevout_spent_amount; Amount total_new_outputs_ex_coinbase_amount; Amount total_coinbase_amount; Amount total_unspendables_genesis_block; Amount total_unspendables_bip30; Amount total_unspendables_scripts; Amount total_unspendables_unclaimed_rewards; SERIALIZE_METHODS(DBVal, obj) { READWRITE(obj.muhash); READWRITE(obj.transaction_output_count); READWRITE(obj.bogo_size); READWRITE(obj.total_amount); READWRITE(obj.total_subsidy); READWRITE(obj.total_unspendable_amount); READWRITE(obj.total_prevout_spent_amount); READWRITE(obj.total_new_outputs_ex_coinbase_amount); READWRITE(obj.total_coinbase_amount); READWRITE(obj.total_unspendables_genesis_block); READWRITE(obj.total_unspendables_bip30); READWRITE(obj.total_unspendables_scripts); READWRITE(obj.total_unspendables_unclaimed_rewards); } }; struct DBHeightKey { int height; explicit DBHeightKey(int height_in) : height(height_in) {} template void Serialize(Stream &s) const { ser_writedata8(s, DB_BLOCK_HEIGHT); ser_writedata32be(s, height); } template void Unserialize(Stream &s) { char prefix{static_cast(ser_readdata8(s))}; if (prefix != DB_BLOCK_HEIGHT) { throw std::ios_base::failure( "Invalid format for coinstatsindex DB height key"); } height = ser_readdata32be(s); } }; struct DBHashKey { BlockHash block_hash; explicit DBHashKey(const BlockHash &hash_in) : block_hash(hash_in) {} SERIALIZE_METHODS(DBHashKey, obj) { char prefix{DB_BLOCK_HASH}; READWRITE(prefix); if (prefix != DB_BLOCK_HASH) { throw std::ios_base::failure( "Invalid format for coinstatsindex DB hash key"); } READWRITE(obj.block_hash); } }; }; // namespace std::unique_ptr g_coin_stats_index; CoinStatsIndex::CoinStatsIndex(size_t n_cache_size, bool f_memory, bool f_wipe) { fs::path path{GetDataDir() / "indexes" / "coinstats"}; fs::create_directories(path); m_db = std::make_unique(path / "db", n_cache_size, f_memory, f_wipe); } bool CoinStatsIndex::WriteBlock(const CBlock &block, const CBlockIndex *pindex) { CBlockUndo block_undo; const Amount block_subsidy{ GetBlockSubsidy(pindex->nHeight, Params().GetConsensus())}; m_total_subsidy += block_subsidy; // Ignore genesis block if (pindex->nHeight > 0) { if (!UndoReadFromDisk(block_undo, pindex)) { return false; } std::pair read_out; if (!m_db->Read(DBHeightKey(pindex->nHeight - 1), read_out)) { return false; } BlockHash expected_block_hash{pindex->pprev->GetBlockHash()}; if (read_out.first != expected_block_hash) { if (!m_db->Read(DBHashKey(expected_block_hash), read_out)) { return error("%s: previous block header belongs to unexpected " "block %s; expected %s", __func__, read_out.first.ToString(), expected_block_hash.ToString()); } } // TODO: Deduplicate BIP30 related code bool is_bip30_block{ (pindex->nHeight == 91722 && pindex->GetBlockHash() == BlockHash{uint256S("0x00000000000271a2dc26e7667f8419f2e15416dc" "6955e5a6c6cdf3f2574dd08e")}) || (pindex->nHeight == 91812 && pindex->GetBlockHash() == BlockHash{uint256S("0x00000000000af0aed4792b1acee3d966af36cf5d" "ef14935db8de83d6f9306f2f")})}; // Add the new utxos created from the block for (size_t i = 0; i < block.vtx.size(); ++i) { const auto &tx{block.vtx.at(i)}; // Skip duplicate txid coinbase transactions (BIP30). if (is_bip30_block && tx->IsCoinBase()) { m_total_unspendable_amount += block_subsidy; m_total_unspendables_bip30 += block_subsidy; continue; } for (uint32_t j = 0; j < tx->vout.size(); ++j) { const CTxOut &out{tx->vout[j]}; Coin coin{out, static_cast(pindex->nHeight), tx->IsCoinBase()}; COutPoint outpoint{tx->GetId(), j}; // Skip unspendable coins if (coin.GetTxOut().scriptPubKey.IsUnspendable()) { m_total_unspendable_amount += coin.GetTxOut().nValue; m_total_unspendables_scripts += coin.GetTxOut().nValue; continue; } m_muhash.Insert(MakeUCharSpan(TxOutSer(outpoint, coin))); if (tx->IsCoinBase()) { m_total_coinbase_amount += coin.GetTxOut().nValue; } else { m_total_new_outputs_ex_coinbase_amount += coin.GetTxOut().nValue; } ++m_transaction_output_count; m_total_amount += coin.GetTxOut().nValue; m_bogo_size += GetBogoSize(coin.GetTxOut().scriptPubKey); } // The coinbase tx has no undo data since no former output is spent if (!tx->IsCoinBase()) { const auto &tx_undo{block_undo.vtxundo.at(i - 1)}; for (size_t j = 0; j < tx_undo.vprevout.size(); ++j) { Coin coin{tx_undo.vprevout[j]}; COutPoint outpoint{tx->vin[j].prevout.GetTxId(), tx->vin[j].prevout.GetN()}; m_muhash.Remove(MakeUCharSpan(TxOutSer(outpoint, coin))); m_total_prevout_spent_amount += coin.GetTxOut().nValue; --m_transaction_output_count; m_total_amount -= coin.GetTxOut().nValue; m_bogo_size -= GetBogoSize(coin.GetTxOut().scriptPubKey); } } } } else { // genesis block m_total_unspendable_amount += block_subsidy; m_total_unspendables_genesis_block += block_subsidy; } // If spent prevouts + block subsidy are still a higher amount than // new outputs + coinbase + current unspendable amount this means // the miner did not claim the full block reward. Unclaimed block // rewards are also unspendable. const Amount unclaimed_rewards{ (m_total_prevout_spent_amount + m_total_subsidy) - (m_total_new_outputs_ex_coinbase_amount + m_total_coinbase_amount + m_total_unspendable_amount)}; m_total_unspendable_amount += unclaimed_rewards; m_total_unspendables_unclaimed_rewards += unclaimed_rewards; std::pair value; value.first = pindex->GetBlockHash(); value.second.transaction_output_count = m_transaction_output_count; value.second.bogo_size = m_bogo_size; value.second.total_amount = m_total_amount; value.second.total_subsidy = m_total_subsidy; value.second.total_unspendable_amount = m_total_unspendable_amount; value.second.total_prevout_spent_amount = m_total_prevout_spent_amount; value.second.total_new_outputs_ex_coinbase_amount = m_total_new_outputs_ex_coinbase_amount; value.second.total_coinbase_amount = m_total_coinbase_amount; value.second.total_unspendables_genesis_block = m_total_unspendables_genesis_block; value.second.total_unspendables_bip30 = m_total_unspendables_bip30; value.second.total_unspendables_scripts = m_total_unspendables_scripts; value.second.total_unspendables_unclaimed_rewards = m_total_unspendables_unclaimed_rewards; uint256 out; m_muhash.Finalize(out); value.second.muhash = out; CDBBatch batch(*m_db); batch.Write(DBHeightKey(pindex->nHeight), value); batch.Write(DB_MUHASH, m_muhash); return m_db->WriteBatch(batch); } static bool CopyHeightIndexToHashIndex(CDBIterator &db_it, CDBBatch &batch, const std::string &index_name, int start_height, int stop_height) { DBHeightKey key{start_height}; db_it.Seek(key); for (int height = start_height; height <= stop_height; ++height) { if (!db_it.GetKey(key) || key.height != height) { return error("%s: unexpected key in %s: expected (%c, %d)", __func__, index_name, DB_BLOCK_HEIGHT, height); } std::pair value; if (!db_it.GetValue(value)) { return error("%s: unable to read value in %s at key (%c, %d)", __func__, index_name, DB_BLOCK_HEIGHT, height); } batch.Write(DBHashKey(value.first), std::move(value.second)); db_it.Next(); } return true; } bool CoinStatsIndex::Rewind(const CBlockIndex *current_tip, const CBlockIndex *new_tip) { assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip); CDBBatch batch(*m_db); std::unique_ptr db_it(m_db->NewIterator()); // During a reorg, we need to copy all hash digests for blocks that are // getting disconnected from the height index to the hash index so we can // still find them when the height index entries are overwritten. if (!CopyHeightIndexToHashIndex(*db_it, batch, m_name, new_tip->nHeight, current_tip->nHeight)) { return false; } if (!m_db->WriteBatch(batch)) { return false; } { LOCK(cs_main); - CBlockIndex *iter_tip{g_chainman.m_blockman.LookupBlockIndex( + CBlockIndex *iter_tip{m_chainstate->m_blockman.LookupBlockIndex( current_tip->GetBlockHash())}; const auto &consensus_params{Params().GetConsensus()}; do { CBlock block; if (!ReadBlockFromDisk(block, iter_tip, consensus_params)) { return error("%s: Failed to read block %s from disk", __func__, iter_tip->GetBlockHash().ToString()); } ReverseBlock(block, iter_tip); iter_tip = iter_tip->GetAncestor(iter_tip->nHeight - 1); } while (new_tip != iter_tip); } return BaseIndex::Rewind(current_tip, new_tip); } static bool LookUpOne(const CDBWrapper &db, const CBlockIndex *block_index, DBVal &result) { // First check if the result is stored under the height index and the value // there matches the block hash. This should be the case if the block is on // the active chain. std::pair read_out; if (!db.Read(DBHeightKey(block_index->nHeight), read_out)) { return false; } if (read_out.first == block_index->GetBlockHash()) { result = std::move(read_out.second); return true; } // If value at the height index corresponds to an different block, the // result will be stored in the hash index. return db.Read(DBHashKey(block_index->GetBlockHash()), result); } bool CoinStatsIndex::LookUpStats(const CBlockIndex *block_index, CCoinsStats &coins_stats) const { DBVal entry; if (!LookUpOne(*m_db, block_index, entry)) { return false; } coins_stats.hashSerialized = entry.muhash; coins_stats.nTransactionOutputs = entry.transaction_output_count; coins_stats.nBogoSize = entry.bogo_size; coins_stats.nTotalAmount = entry.total_amount; coins_stats.total_subsidy = entry.total_subsidy; coins_stats.total_unspendable_amount = entry.total_unspendable_amount; coins_stats.total_prevout_spent_amount = entry.total_prevout_spent_amount; coins_stats.total_new_outputs_ex_coinbase_amount = entry.total_new_outputs_ex_coinbase_amount; coins_stats.total_coinbase_amount = entry.total_coinbase_amount; coins_stats.total_unspendables_genesis_block = entry.total_unspendables_genesis_block; coins_stats.total_unspendables_bip30 = entry.total_unspendables_bip30; coins_stats.total_unspendables_scripts = entry.total_unspendables_scripts; coins_stats.total_unspendables_unclaimed_rewards = entry.total_unspendables_unclaimed_rewards; return true; } bool CoinStatsIndex::Init() { if (!m_db->Read(DB_MUHASH, m_muhash)) { // Check that the cause of the read failure is that the key does not // exist. Any other errors indicate database corruption or a disk // failure, and starting the index would cause further corruption. if (m_db->Exists(DB_MUHASH)) { return error( "%s: Cannot read current %s state; index may be corrupted", __func__, GetName()); } } if (BaseIndex::Init()) { const CBlockIndex *pindex{CurrentIndex()}; if (pindex) { DBVal entry; if (!LookUpOne(*m_db, pindex, entry)) { return false; } m_transaction_output_count = entry.transaction_output_count; m_bogo_size = entry.bogo_size; m_total_amount = entry.total_amount; m_total_subsidy = entry.total_subsidy; m_total_unspendable_amount = entry.total_unspendable_amount; m_total_prevout_spent_amount = entry.total_prevout_spent_amount; m_total_new_outputs_ex_coinbase_amount = entry.total_new_outputs_ex_coinbase_amount; m_total_coinbase_amount = entry.total_coinbase_amount; m_total_unspendables_genesis_block = entry.total_unspendables_genesis_block; m_total_unspendables_bip30 = entry.total_unspendables_bip30; m_total_unspendables_scripts = entry.total_unspendables_scripts; m_total_unspendables_unclaimed_rewards = entry.total_unspendables_unclaimed_rewards; } return true; } return false; } // Reverse a single block as part of a reorg bool CoinStatsIndex::ReverseBlock(const CBlock &block, const CBlockIndex *pindex) { CBlockUndo block_undo; std::pair read_out; const Amount block_subsidy{ GetBlockSubsidy(pindex->nHeight, Params().GetConsensus())}; m_total_subsidy -= block_subsidy; // Ignore genesis block if (pindex->nHeight > 0) { if (!UndoReadFromDisk(block_undo, pindex)) { return false; } if (!m_db->Read(DBHeightKey(pindex->nHeight - 1), read_out)) { return false; } BlockHash expected_block_hash{pindex->pprev->GetBlockHash()}; if (read_out.first != expected_block_hash) { if (!m_db->Read(DBHashKey(expected_block_hash), read_out)) { return error("%s: previous block header belongs to unexpected " "block %s; expected %s", __func__, read_out.first.ToString(), expected_block_hash.ToString()); } } } // Remove the new UTXOs that were created from the block for (size_t i = 0; i < block.vtx.size(); ++i) { const auto &tx{block.vtx.at(i)}; for (uint32_t j = 0; j < tx->vout.size(); ++j) { const CTxOut &out{tx->vout[j]}; COutPoint outpoint{tx->GetId(), j}; Coin coin{out, static_cast(pindex->nHeight), tx->IsCoinBase()}; // Skip unspendable coins if (coin.GetTxOut().scriptPubKey.IsUnspendable()) { m_total_unspendable_amount -= coin.GetTxOut().nValue; m_total_unspendables_scripts -= coin.GetTxOut().nValue; continue; } m_muhash.Remove(MakeUCharSpan(TxOutSer(outpoint, coin))); if (tx->IsCoinBase()) { m_total_coinbase_amount -= coin.GetTxOut().nValue; } else { m_total_new_outputs_ex_coinbase_amount -= coin.GetTxOut().nValue; } --m_transaction_output_count; m_total_amount -= coin.GetTxOut().nValue; m_bogo_size -= GetBogoSize(coin.GetTxOut().scriptPubKey); } // The coinbase tx has no undo data since no former output is spent if (!tx->IsCoinBase()) { const auto &tx_undo{block_undo.vtxundo.at(i - 1)}; for (size_t j = 0; j < tx_undo.vprevout.size(); ++j) { Coin coin{tx_undo.vprevout[j]}; COutPoint outpoint{tx->vin[j].prevout.GetTxId(), tx->vin[j].prevout.GetN()}; m_muhash.Insert(MakeUCharSpan(TxOutSer(outpoint, coin))); m_total_prevout_spent_amount -= coin.GetTxOut().nValue; m_transaction_output_count++; m_total_amount += coin.GetTxOut().nValue; m_bogo_size += GetBogoSize(coin.GetTxOut().scriptPubKey); } } } const Amount unclaimed_rewards{ (m_total_new_outputs_ex_coinbase_amount + m_total_coinbase_amount + m_total_unspendable_amount) - (m_total_prevout_spent_amount + m_total_subsidy)}; m_total_unspendable_amount -= unclaimed_rewards; m_total_unspendables_unclaimed_rewards -= unclaimed_rewards; // Check that the rolled back internal values are consistent with the DB // read out uint256 out; m_muhash.Finalize(out); Assert(read_out.second.muhash == out); Assert(m_transaction_output_count == read_out.second.transaction_output_count); Assert(m_total_amount == read_out.second.total_amount); Assert(m_bogo_size == read_out.second.bogo_size); Assert(m_total_subsidy == read_out.second.total_subsidy); Assert(m_total_unspendable_amount == read_out.second.total_unspendable_amount); Assert(m_total_prevout_spent_amount == read_out.second.total_prevout_spent_amount); Assert(m_total_new_outputs_ex_coinbase_amount == read_out.second.total_new_outputs_ex_coinbase_amount); Assert(m_total_coinbase_amount == read_out.second.total_coinbase_amount); Assert(m_total_unspendables_genesis_block == read_out.second.total_unspendables_genesis_block); Assert(m_total_unspendables_bip30 == read_out.second.total_unspendables_bip30); Assert(m_total_unspendables_scripts == read_out.second.total_unspendables_scripts); Assert(m_total_unspendables_unclaimed_rewards == read_out.second.total_unspendables_unclaimed_rewards); return m_db->Write(DB_MUHASH, m_muhash); } diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp index 21cd2bc76..9e62b19e8 100644 --- a/src/index/txindex.cpp +++ b/src/index/txindex.cpp @@ -1,266 +1,266 @@ // Copyright (c) 2017-2018 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include constexpr char DB_BEST_BLOCK = 'B'; constexpr char DB_TXINDEX = 't'; constexpr char DB_TXINDEX_BLOCK = 'T'; std::unique_ptr g_txindex; /** Access to the txindex database (indexes/txindex/) */ class TxIndex::DB : public BaseIndex::DB { public: explicit DB(size_t n_cache_size, bool f_memory = false, bool f_wipe = false); /// Read the disk location of the transaction data with the given ID. /// Returns false if the transaction ID is not indexed. bool ReadTxPos(const TxId &txid, CDiskTxPos &pos) const; /// Write a batch of transaction positions to the DB. bool WriteTxs(const std::vector> &v_pos); /// Migrate txindex data from the block tree DB, where it may be for older /// nodes that have not been upgraded yet to the new database. bool MigrateData(CBlockTreeDB &block_tree_db, const CBlockLocator &best_locator); }; TxIndex::DB::DB(size_t n_cache_size, bool f_memory, bool f_wipe) : BaseIndex::DB(GetDataDir() / "indexes" / "txindex", n_cache_size, f_memory, f_wipe) {} bool TxIndex::DB::ReadTxPos(const TxId &txid, CDiskTxPos &pos) const { return Read(std::make_pair(DB_TXINDEX, txid), pos); } bool TxIndex::DB::WriteTxs( const std::vector> &v_pos) { CDBBatch batch(*this); for (const auto &tuple : v_pos) { batch.Write(std::make_pair(DB_TXINDEX, tuple.first), tuple.second); } return WriteBatch(batch); } /* * Safely persist a transfer of data from the old txindex database to the new * one, and compact the range of keys updated. This is used internally by * MigrateData. */ static void WriteTxIndexMigrationBatches(CDBWrapper &newdb, CDBWrapper &olddb, CDBBatch &batch_newdb, CDBBatch &batch_olddb, const std::pair &begin_key, const std::pair &end_key) { // Sync new DB changes to disk before deleting from old DB. newdb.WriteBatch(batch_newdb, /*fSync=*/true); olddb.WriteBatch(batch_olddb); olddb.CompactRange(begin_key, end_key); batch_newdb.Clear(); batch_olddb.Clear(); } bool TxIndex::DB::MigrateData(CBlockTreeDB &block_tree_db, const CBlockLocator &best_locator) { // The prior implementation of txindex was always in sync with block index // and presence was indicated with a boolean DB flag. If the flag is set, // this means the txindex from a previous version is valid and in sync with // the chain tip. The first step of the migration is to unset the flag and // write the chain hash to a separate key, DB_TXINDEX_BLOCK. After that, the // index entries are copied over in batches to the new database. Finally, // DB_TXINDEX_BLOCK is erased from the old database and the block hash is // written to the new database. // // Unsetting the boolean flag ensures that if the node is downgraded to a // previous version, it will not see a corrupted, partially migrated index // -- it will see that the txindex is disabled. When the node is upgraded // again, the migration will pick up where it left off and sync to the block // with hash DB_TXINDEX_BLOCK. bool f_legacy_flag = false; block_tree_db.ReadFlag("txindex", f_legacy_flag); if (f_legacy_flag) { if (!block_tree_db.Write(DB_TXINDEX_BLOCK, best_locator)) { return error("%s: cannot write block indicator", __func__); } if (!block_tree_db.WriteFlag("txindex", false)) { return error("%s: cannot write block index db flag", __func__); } } CBlockLocator locator; if (!block_tree_db.Read(DB_TXINDEX_BLOCK, locator)) { return true; } int64_t count = 0; uiInterface.InitMessage(_("Upgrading txindex database").translated); LogPrintf("Upgrading txindex database... [0%%]\n"); uiInterface.ShowProgress(_("Upgrading txindex database").translated, 0, true); int report_done = 0; const size_t batch_size = 1 << 24; // 16 MiB CDBBatch batch_newdb(*this); CDBBatch batch_olddb(block_tree_db); std::pair key; std::pair begin_key{DB_TXINDEX, TxId()}; std::pair prev_key = begin_key; bool interrupted = false; std::unique_ptr cursor(block_tree_db.NewIterator()); for (cursor->Seek(begin_key); cursor->Valid(); cursor->Next()) { if (ShutdownRequested()) { interrupted = true; break; } if (!cursor->GetKey(key)) { return error("%s: cannot get key from valid cursor", __func__); } if (key.first != DB_TXINDEX) { break; } // Log progress every 10%. if (++count % 256 == 0) { // Since txids are uniformly random and traversed in increasing // order, the high 16 bits of the ID can be used to estimate the // current progress. const TxId &txid = key.second; uint32_t high_nibble = (static_cast(*(txid.begin() + 0)) << 8) + (static_cast(*(txid.begin() + 1)) << 0); int percentage_done = (int)(high_nibble * 100.0 / 65536.0 + 0.5); uiInterface.ShowProgress(_("Upgrading txindex database").translated, percentage_done, true); if (report_done < percentage_done / 10) { LogPrintf("Upgrading txindex database... [%d%%]\n", percentage_done); report_done = percentage_done / 10; } } CDiskTxPos value; if (!cursor->GetValue(value)) { return error("%s: cannot parse txindex record", __func__); } batch_newdb.Write(key, value); batch_olddb.Erase(key); if (batch_newdb.SizeEstimate() > batch_size || batch_olddb.SizeEstimate() > batch_size) { // NOTE: it's OK to delete the key pointed at by the current DB // cursor while iterating because LevelDB iterators are guaranteed // to provide a consistent view of the underlying data, like a // lightweight snapshot. WriteTxIndexMigrationBatches(*this, block_tree_db, batch_newdb, batch_olddb, prev_key, key); prev_key = key; } } // If these final DB batches complete the migration, write the best block // hash marker to the new database and delete from the old one. This signals // that the former is fully caught up to that point in the blockchain and // that all txindex entries have been removed from the latter. if (!interrupted) { batch_olddb.Erase(DB_TXINDEX_BLOCK); batch_newdb.Write(DB_BEST_BLOCK, locator); } WriteTxIndexMigrationBatches(*this, block_tree_db, batch_newdb, batch_olddb, begin_key, key); if (interrupted) { LogPrintf("[CANCELLED].\n"); return false; } uiInterface.ShowProgress("", 100, false); LogPrintf("[DONE].\n"); return true; } TxIndex::TxIndex(size_t n_cache_size, bool f_memory, bool f_wipe) : m_db(std::make_unique(n_cache_size, f_memory, f_wipe)) {} TxIndex::~TxIndex() {} bool TxIndex::Init() { LOCK(cs_main); // Attempt to migrate txindex from the old database to the new one. Even if // chain_tip is null, the node could be reindexing and we still want to // delete txindex records in the old database. - if (!m_db->MigrateData(*pblocktree, ::ChainActive().GetLocator())) { + if (!m_db->MigrateData(*pblocktree, m_chainstate->m_chain.GetLocator())) { return false; } return BaseIndex::Init(); } bool TxIndex::WriteBlock(const CBlock &block, const CBlockIndex *pindex) { // Exclude genesis block transaction because outputs are not spendable. if (pindex->nHeight == 0) { return true; } CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size())); std::vector> vPos; vPos.reserve(block.vtx.size()); for (const auto &tx : block.vtx) { vPos.emplace_back(tx->GetId(), pos); pos.nTxOffset += ::GetSerializeSize(*tx, CLIENT_VERSION); } return m_db->WriteTxs(vPos); } BaseIndex::DB &TxIndex::GetDB() const { return *m_db; } bool TxIndex::FindTx(const TxId &txid, BlockHash &block_hash, CTransactionRef &tx) const { CDiskTxPos postx; if (!m_db->ReadTxPos(txid, postx)) { return false; } CAutoFile file(OpenBlockFile(postx, true), SER_DISK, CLIENT_VERSION); if (file.IsNull()) { return error("%s: OpenBlockFile failed", __func__); } CBlockHeader header; try { file >> header; if (fseek(file.Get(), postx.nTxOffset, SEEK_CUR)) { return error("%s: fseek(...) failed", __func__); } file >> tx; } catch (const std::exception &e) { return error("%s: Deserialize or I/O error - %s", __func__, e.what()); } if (tx->GetId() != txid) { return error("%s: txid mismatch", __func__); } block_hash = header.GetHash(); return true; } diff --git a/src/init.cpp b/src/init.cpp index c68254c7a..f47b599d2 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1,3157 +1,3157 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2018 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include #endif #include #include #include #include #include #include // For AVALANCHE_LEGACY_PROOF_DEFAULT #include #include // For AVALANCHE_VOTE_STALE_* #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include