Page Menu
Home
Phabricator
Search
Configure Global Search
Log In
Files
F14864564
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
21 KB
Subscribers
None
View Options
diff --git a/src/index/base.cpp b/src/index/base.cpp
index afd3ba38f..bb71ae38c 100644
--- a/src/index/base.cpp
+++ b/src/index/base.cpp
@@ -1,426 +1,431 @@
// Copyright (c) 2017-2018 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <chain.h>
#include <chainparams.h>
#include <common/args.h>
#include <config.h>
#include <index/base.h>
#include <logging.h>
#include <node/blockstorage.h>
#include <node/database_args.h>
#include <node/ui_interface.h>
#include <shutdown.h>
#include <tinyformat.h>
#include <util/thread.h>
#include <util/translation.h>
#include <validation.h> // For Chainstate
#include <warnings.h>
#include <functional>
constexpr uint8_t DB_BEST_BLOCK{'B'};
constexpr int64_t SYNC_LOG_INTERVAL = 30; // secon
constexpr int64_t SYNC_LOCATOR_WRITE_INTERVAL = 30; // seconds
template <typename... Args>
static void FatalError(const char *fmt, const Args &...args) {
std::string strMessage = tfm::format(fmt, args...);
SetMiscWarning(Untranslated(strMessage));
LogPrintf("*** %s\n", strMessage);
AbortError(_("A fatal internal error occurred, see debug.log for details"));
StartShutdown();
}
BaseIndex::DB::DB(const fs::path &path, size_t n_cache_size, bool f_memory,
bool f_wipe, bool f_obfuscate)
: CDBWrapper{DBParams{.path = path,
.cache_bytes = n_cache_size,
.memory_only = f_memory,
.wipe_data = f_wipe,
.obfuscate = f_obfuscate,
.options = [] {
DBOptions options;
node::ReadDatabaseArgs(gArgs, options);
return options;
}()}} {}
bool BaseIndex::DB::ReadBestBlock(CBlockLocator &locator) const {
bool success = Read(DB_BEST_BLOCK, locator);
if (!success) {
locator.SetNull();
}
return success;
}
void BaseIndex::DB::WriteBestBlock(CDBBatch &batch,
const CBlockLocator &locator) {
batch.Write(DB_BEST_BLOCK, locator);
}
BaseIndex::~BaseIndex() {
Interrupt();
Stop();
}
bool BaseIndex::Init() {
CBlockLocator locator;
if (!GetDB().ReadBestBlock(locator)) {
locator.SetNull();
}
LOCK(cs_main);
CChain &active_chain = m_chainstate->m_chain;
if (locator.IsNull()) {
SetBestBlockIndex(nullptr);
} else {
SetBestBlockIndex(m_chainstate->FindForkInGlobalIndex(locator));
}
+
+ // Note: this will latch to true immediately if the user starts up with an
+ // empty datadir and an index enabled. If this is the case, indexation will
+ // happen solely via `BlockConnected` signals until, possibly, the next
+ // restart.
m_synced = m_best_block_index.load() == active_chain.Tip();
if (!m_synced) {
bool prune_violation = false;
if (!m_best_block_index) {
// index is not built yet
// make sure we have all block data back to the genesis
prune_violation =
m_chainstate->m_blockman.GetFirstStoredBlock(
*active_chain.Tip()) != active_chain.Genesis();
}
// in case the index has a best block set and is not fully synced
// check if we have the required blocks to continue building the index
else {
const CBlockIndex *block_to_test = m_best_block_index.load();
if (!active_chain.Contains(block_to_test)) {
// if the bestblock is not part of the mainchain, find the fork
// and make sure we have all data down to the fork
block_to_test = active_chain.FindFork(block_to_test);
}
const CBlockIndex *block = active_chain.Tip();
prune_violation = true;
// check backwards from the tip if we have all block data until we
// reach the indexes bestblock
while (block_to_test && block && block->nStatus.hasData()) {
if (block_to_test == block) {
prune_violation = false;
break;
}
// block->pprev must exist at this point, since block_to_test is
// part of the chain and thus must be encountered when going
// backwards from the tip
assert(block->pprev);
block = block->pprev;
}
}
if (prune_violation) {
return InitError(strprintf(
Untranslated("%s best block of the index goes beyond pruned "
"data. Please disable the index or reindex (which "
"will download the whole blockchain again)"),
GetName()));
}
}
return true;
}
static const CBlockIndex *NextSyncBlock(const CBlockIndex *pindex_prev,
CChain &chain)
EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
AssertLockHeld(cs_main);
if (!pindex_prev) {
return chain.Genesis();
}
const CBlockIndex *pindex = chain.Next(pindex_prev);
if (pindex) {
return pindex;
}
return chain.Next(chain.FindFork(pindex_prev));
}
void BaseIndex::ThreadSync() {
const CBlockIndex *pindex = m_best_block_index.load();
if (!m_synced) {
int64_t last_log_time = 0;
int64_t last_locator_write_time = 0;
while (true) {
if (m_interrupt) {
SetBestBlockIndex(pindex);
// No need to handle errors in Commit. If it fails, the error
// will be already be logged. The best way to recover is to
// continue, as index cannot be corrupted by a missed commit to
// disk for an advanced index state.
Commit();
return;
}
{
LOCK(cs_main);
const CBlockIndex *pindex_next =
NextSyncBlock(pindex, m_chainstate->m_chain);
if (!pindex_next) {
SetBestBlockIndex(pindex);
m_synced = true;
// No need to handle errors in Commit. See rationale above.
Commit();
break;
}
if (pindex_next->pprev != pindex &&
!Rewind(pindex, pindex_next->pprev)) {
FatalError(
"%s: Failed to rewind index %s to a previous chain tip",
__func__, GetName());
return;
}
pindex = pindex_next;
}
int64_t current_time = GetTime();
if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
LogPrintf("Syncing %s with block chain from height %d\n",
GetName(), pindex->nHeight);
last_log_time = current_time;
}
if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL <
current_time) {
SetBestBlockIndex(pindex->pprev);
last_locator_write_time = current_time;
// No need to handle errors in Commit. See rationale above.
Commit();
}
CBlock block;
if (!m_chainstate->m_blockman.ReadBlockFromDisk(block, *pindex)) {
FatalError("%s: Failed to read block %s from disk", __func__,
pindex->GetBlockHash().ToString());
return;
}
if (!WriteBlock(block, pindex)) {
FatalError("%s: Failed to write block %s to index database",
__func__, pindex->GetBlockHash().ToString());
return;
}
}
}
if (pindex) {
LogPrintf("%s is enabled at height %d\n", GetName(), pindex->nHeight);
} else {
LogPrintf("%s is enabled\n", GetName());
}
}
bool BaseIndex::Commit() {
CDBBatch batch(GetDB());
if (!CommitInternal(batch) || !GetDB().WriteBatch(batch)) {
return error("%s: Failed to commit latest %s state", __func__,
GetName());
}
return true;
}
bool BaseIndex::CommitInternal(CDBBatch &batch) {
LOCK(cs_main);
// Don't commit anything if we haven't indexed any block yet
// (this could happen if init is interrupted).
if (m_best_block_index == nullptr) {
return false;
}
GetDB().WriteBestBlock(batch, GetLocator(m_best_block_index));
return true;
}
bool BaseIndex::Rewind(const CBlockIndex *current_tip,
const CBlockIndex *new_tip) {
assert(current_tip == m_best_block_index);
assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip);
// In the case of a reorg, ensure persisted block locator is not stale.
// Pruning has a minimum of 288 blocks-to-keep and getting the index
// out of sync may be possible but a users fault.
// In case we reorg beyond the pruned depth, ReadBlockFromDisk would
// throw and lead to a graceful shutdown
SetBestBlockIndex(new_tip);
if (!Commit()) {
// If commit fails, revert the best block index to avoid corruption.
SetBestBlockIndex(current_tip);
return false;
}
return true;
}
void BaseIndex::BlockConnected(const std::shared_ptr<const CBlock> &block,
const CBlockIndex *pindex) {
if (!m_synced) {
return;
}
const CBlockIndex *best_block_index = m_best_block_index.load();
if (!best_block_index) {
if (pindex->nHeight != 0) {
FatalError("%s: First block connected is not the genesis block "
"(height=%d)",
__func__, pindex->nHeight);
return;
}
} else {
// Ensure block connects to an ancestor of the current best block. This
// should be the case most of the time, but may not be immediately after
// the the sync thread catches up and sets m_synced. Consider the case
// where there is a reorg and the blocks on the stale branch are in the
// ValidationInterface queue backlog even after the sync thread has
// caught up to the new chain tip. In this unlikely event, log a warning
// and let the queue clear.
if (best_block_index->GetAncestor(pindex->nHeight - 1) !=
pindex->pprev) {
LogPrintf("%s: WARNING: Block %s does not connect to an ancestor "
"of known best chain (tip=%s); not updating index\n",
__func__, pindex->GetBlockHash().ToString(),
best_block_index->GetBlockHash().ToString());
return;
}
if (best_block_index != pindex->pprev &&
!Rewind(best_block_index, pindex->pprev)) {
FatalError("%s: Failed to rewind index %s to a previous chain tip",
__func__, GetName());
return;
}
}
if (WriteBlock(*block, pindex)) {
// Setting the best block index is intentionally the last step of this
// function, so BlockUntilSyncedToCurrentChain callers waiting for the
// best block index to be updated can rely on the block being fully
// processed, and the index object being safe to delete.
SetBestBlockIndex(pindex);
} else {
FatalError("%s: Failed to write block %s to index", __func__,
pindex->GetBlockHash().ToString());
return;
}
}
void BaseIndex::ChainStateFlushed(const CBlockLocator &locator) {
if (!m_synced) {
return;
}
const BlockHash &locator_tip_hash = locator.vHave.front();
const CBlockIndex *locator_tip_index;
{
LOCK(cs_main);
locator_tip_index =
m_chainstate->m_blockman.LookupBlockIndex(locator_tip_hash);
}
if (!locator_tip_index) {
FatalError("%s: First block (hash=%s) in locator was not found",
__func__, locator_tip_hash.ToString());
return;
}
// This checks that ChainStateFlushed callbacks are received after
// BlockConnected. The check may fail immediately after the the sync thread
// catches up and sets m_synced. Consider the case where there is a reorg
// and the blocks on the stale branch are in the ValidationInterface queue
// backlog even after the sync thread has caught up to the new chain tip. In
// this unlikely event, log a warning and let the queue clear.
const CBlockIndex *best_block_index = m_best_block_index.load();
if (best_block_index->GetAncestor(locator_tip_index->nHeight) !=
locator_tip_index) {
LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known "
"best chain (tip=%s); not writing index locator\n",
__func__, locator_tip_hash.ToString(),
best_block_index->GetBlockHash().ToString());
return;
}
// No need to handle errors in Commit. If it fails, the error will be
// already be logged. The best way to recover is to continue, as index
// cannot be corrupted by a missed commit to disk for an advanced index
// state.
Commit();
}
bool BaseIndex::BlockUntilSyncedToCurrentChain() const {
AssertLockNotHeld(cs_main);
if (!m_synced) {
return false;
}
{
// Skip the queue-draining stuff if we know we're caught up with
// m_chain.Tip().
LOCK(cs_main);
const CBlockIndex *chain_tip = m_chainstate->m_chain.Tip();
const CBlockIndex *best_block_index = m_best_block_index.load();
if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
return true;
}
}
LogPrintf("%s: %s is catching up on block notifications\n", __func__,
GetName());
SyncWithValidationInterfaceQueue();
return true;
}
void BaseIndex::Interrupt() {
m_interrupt();
}
bool BaseIndex::Start(Chainstate &active_chainstate) {
m_chainstate = &active_chainstate;
// Need to register this ValidationInterface before running Init(), so that
// callbacks are not missed if Init sets m_synced to true.
RegisterValidationInterface(this);
if (!Init()) {
return false;
}
m_thread_sync =
std::thread(&util::TraceThread, GetName(), [this] { ThreadSync(); });
return true;
}
void BaseIndex::Stop() {
UnregisterValidationInterface(this);
if (m_thread_sync.joinable()) {
m_thread_sync.join();
}
}
IndexSummary BaseIndex::GetSummary() const {
IndexSummary summary{};
summary.name = GetName();
summary.synced = m_synced;
summary.best_block_height =
m_best_block_index ? m_best_block_index.load()->nHeight : 0;
return summary;
}
void BaseIndex::SetBestBlockIndex(const CBlockIndex *block) {
assert(!m_chainstate->m_blockman.IsPruneMode() || AllowPrune());
if (AllowPrune() && block) {
node::PruneLockInfo prune_lock;
prune_lock.height_first = block->nHeight;
WITH_LOCK(::cs_main, m_chainstate->m_blockman.UpdatePruneLock(
GetName(), prune_lock));
}
// Intentionally set m_best_block_index as the last step in this function,
// after updating prune locks above, and after making any other references
// to *this, so the BlockUntilSyncedToCurrentChain function (which checks
// m_best_block_index as an optimization) can be used to wait for the last
// BlockConnected notification and safely assume that prune locks are
// updated and that the index object is safe to delete.
m_best_block_index = block;
}
diff --git a/src/index/base.h b/src/index/base.h
index b5900fadb..dcfa598a5 100644
--- a/src/index/base.h
+++ b/src/index/base.h
@@ -1,140 +1,144 @@
// Copyright (c) 2017-2018 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_INDEX_BASE_H
#define BITCOIN_INDEX_BASE_H
#include <dbwrapper.h>
#include <threadinterrupt.h>
#include <validationinterface.h>
class CBlock;
class CBlockIndex;
class Chainstate;
struct IndexSummary {
std::string name;
bool synced{false};
int best_block_height{0};
};
/**
* Base class for indices of blockchain data. This implements
* CValidationInterface and ensures blocks are indexed sequentially according
* to their position in the active chain.
*/
class BaseIndex : public CValidationInterface {
protected:
/**
* The database stores a block locator of the chain the database is synced
* to so that the TxIndex can efficiently determine the point it last
* stopped at. A locator is used instead of a simple hash of the chain tip
* because blocks and block index entries may not be flushed to disk until
* after this database is updated.
*/
class DB : public CDBWrapper {
public:
DB(const fs::path &path, size_t n_cache_size, bool f_memory = false,
bool f_wipe = false, bool f_obfuscate = false);
/// Read block locator of the chain that the index is in sync with.
bool ReadBestBlock(CBlockLocator &locator) const;
/// Write block locator of the chain that the index is in sync with.
void WriteBestBlock(CDBBatch &batch, const CBlockLocator &locator);
};
private:
/// Whether the index is in sync with the main chain. The flag is flipped
/// from false to true once, after which point this starts processing
/// ValidationInterface notifications to stay in sync.
+ ///
+ /// Note that this will latch to true *immediately* upon startup if
+ /// `m_chainstate->m_chain` is empty, which will be the case upon startup
+ /// with an empty datadir if, e.g., `-txindex=1` is specified.
std::atomic<bool> m_synced{false};
/// The last block in the chain that the index is in sync with.
std::atomic<const CBlockIndex *> m_best_block_index{nullptr};
std::thread m_thread_sync;
CThreadInterrupt m_interrupt;
/// Sync the index with the block index starting from the current best
/// block. Intended to be run in its own thread, m_thread_sync, and can be
/// interrupted with m_interrupt. Once the index gets in sync, the m_synced
/// flag is set and the BlockConnected ValidationInterface callback takes
/// over and the sync thread exits.
void ThreadSync();
/// Write the current index state (eg. chain block locator and
/// subclass-specific items) to disk.
///
/// Recommendations for error handling:
/// If called on a successor of the previous committed best block in the
/// index, the index can continue processing without risk of corruption,
/// though the index state will need to catch up from further behind on
/// reboot. If the new state is not a successor of the previous state (due
/// to a chain reorganization), the index must halt until Commit succeeds or
/// else it could end up getting corrupted.
bool Commit();
virtual bool AllowPrune() const = 0;
protected:
Chainstate *m_chainstate{nullptr};
void BlockConnected(const std::shared_ptr<const CBlock> &block,
const CBlockIndex *pindex) override;
void ChainStateFlushed(const CBlockLocator &locator) override;
const CBlockIndex *CurrentIndex() { return m_best_block_index.load(); };
/// Initialize internal state from the database and block index.
[[nodiscard]] virtual bool Init();
/// Write update index entries for a newly connected block.
virtual bool WriteBlock(const CBlock &block, const CBlockIndex *pindex) {
return true;
}
/// Virtual method called internally by Commit that can be overridden to
/// atomically commit more index state.
virtual bool CommitInternal(CDBBatch &batch);
/// Rewind index to an earlier chain tip during a chain reorg. The tip must
/// be an ancestor of the current best block.
virtual bool Rewind(const CBlockIndex *current_tip,
const CBlockIndex *new_tip);
virtual DB &GetDB() const = 0;
/// Get the name of the index for display in logs.
virtual const char *GetName() const = 0;
/// Update the internal best block index as well as the prune lock.
void SetBestBlockIndex(const CBlockIndex *block);
public:
/// Destructor interrupts sync thread if running and blocks until it exits.
virtual ~BaseIndex();
/// Blocks the current thread until the index is caught up to the current
/// state of the block chain. This only blocks if the index has gotten in
/// sync once and only needs to process blocks in the ValidationInterface
/// queue. If the index is catching up from far behind, this method does
/// not block and immediately returns false.
bool BlockUntilSyncedToCurrentChain() const LOCKS_EXCLUDED(::cs_main);
void Interrupt();
/// Start initializes the sync state and registers the instance as a
/// ValidationInterface so that it stays in sync with blockchain updates.
[[nodiscard]] bool Start(Chainstate &active_chainstate);
/// Stops the instance from staying in sync with blockchain updates.
void Stop();
/// Get a summary of the index and its state.
IndexSummary GetSummary() const;
};
#endif // BITCOIN_INDEX_BASE_H
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Wed, May 21, 20:35 (1 d, 14 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5865892
Default Alt Text
(21 KB)
Attached To
rABC Bitcoin ABC
Event Timeline
Log In to Comment