Page MenuHomePhabricator

No OneTemporary

diff --git a/src/index/base.cpp b/src/index/base.cpp
index 8be490411..08a3656e6 100644
--- a/src/index/base.cpp
+++ b/src/index/base.cpp
@@ -1,438 +1,440 @@
// Copyright (c) 2017-2018 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <chain.h>
#include <chainparams.h>
#include <common/args.h>
#include <config.h>
#include <index/base.h>
#include <interfaces/chain.h>
#include <logging.h>
#include <node/blockstorage.h>
#include <node/context.h>
#include <node/database_args.h>
#include <node/ui_interface.h>
#include <shutdown.h>
#include <tinyformat.h>
#include <util/thread.h>
#include <util/translation.h>
#include <validation.h> // For Chainstate
#include <warnings.h>
#include <functional>
+#include <string>
+#include <utility>
constexpr uint8_t DB_BEST_BLOCK{'B'};
constexpr int64_t SYNC_LOG_INTERVAL = 30; // secon
constexpr int64_t SYNC_LOCATOR_WRITE_INTERVAL = 30; // seconds
template <typename... Args>
static void FatalError(const char *fmt, const Args &...args) {
std::string strMessage = tfm::format(fmt, args...);
SetMiscWarning(Untranslated(strMessage));
LogPrintf("*** %s\n", strMessage);
AbortError(_("A fatal internal error occurred, see debug.log for details"));
StartShutdown();
}
BaseIndex::DB::DB(const fs::path &path, size_t n_cache_size, bool f_memory,
bool f_wipe, bool f_obfuscate)
: CDBWrapper{DBParams{.path = path,
.cache_bytes = n_cache_size,
.memory_only = f_memory,
.wipe_data = f_wipe,
.obfuscate = f_obfuscate,
.options = [] {
DBOptions options;
node::ReadDatabaseArgs(gArgs, options);
return options;
}()}} {}
bool BaseIndex::DB::ReadBestBlock(CBlockLocator &locator) const {
bool success = Read(DB_BEST_BLOCK, locator);
if (!success) {
locator.SetNull();
}
return success;
}
void BaseIndex::DB::WriteBestBlock(CDBBatch &batch,
const CBlockLocator &locator) {
batch.Write(DB_BEST_BLOCK, locator);
}
-BaseIndex::BaseIndex(std::unique_ptr<interfaces::Chain> chain)
- : m_chain{std::move(chain)} {}
+BaseIndex::BaseIndex(std::unique_ptr<interfaces::Chain> chain, std::string name)
+ : m_chain{std::move(chain)}, m_name{std::move(name)} {}
BaseIndex::~BaseIndex() {
Interrupt();
Stop();
}
bool BaseIndex::Init() {
CBlockLocator locator;
if (!GetDB().ReadBestBlock(locator)) {
locator.SetNull();
}
LOCK(cs_main);
CChain &active_chain = m_chainstate->m_chain;
if (locator.IsNull()) {
SetBestBlockIndex(nullptr);
} else {
SetBestBlockIndex(m_chainstate->FindForkInGlobalIndex(locator));
}
// Note: this will latch to true immediately if the user starts up with an
// empty datadir and an index enabled. If this is the case, indexation will
// happen solely via `BlockConnected` signals until, possibly, the next
// restart.
m_synced = m_best_block_index.load() == active_chain.Tip();
if (!m_synced) {
bool prune_violation = false;
if (!m_best_block_index) {
// index is not built yet
// make sure we have all block data back to the genesis
prune_violation =
m_chainstate->m_blockman.GetFirstStoredBlock(
*active_chain.Tip()) != active_chain.Genesis();
}
// in case the index has a best block set and is not fully synced
// check if we have the required blocks to continue building the index
else {
const CBlockIndex *block_to_test = m_best_block_index.load();
if (!active_chain.Contains(block_to_test)) {
// if the bestblock is not part of the mainchain, find the fork
// and make sure we have all data down to the fork
block_to_test = active_chain.FindFork(block_to_test);
}
const CBlockIndex *block = active_chain.Tip();
prune_violation = true;
// check backwards from the tip if we have all block data until we
// reach the indexes bestblock
while (block_to_test && block && block->nStatus.hasData()) {
if (block_to_test == block) {
prune_violation = false;
break;
}
// block->pprev must exist at this point, since block_to_test is
// part of the chain and thus must be encountered when going
// backwards from the tip
assert(block->pprev);
block = block->pprev;
}
}
if (prune_violation) {
return InitError(strprintf(
Untranslated("%s best block of the index goes beyond pruned "
"data. Please disable the index or reindex (which "
"will download the whole blockchain again)"),
GetName()));
}
}
return true;
}
static const CBlockIndex *NextSyncBlock(const CBlockIndex *pindex_prev,
CChain &chain)
EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
AssertLockHeld(cs_main);
if (!pindex_prev) {
return chain.Genesis();
}
const CBlockIndex *pindex = chain.Next(pindex_prev);
if (pindex) {
return pindex;
}
return chain.Next(chain.FindFork(pindex_prev));
}
void BaseIndex::ThreadSync() {
const CBlockIndex *pindex = m_best_block_index.load();
if (!m_synced) {
int64_t last_log_time = 0;
int64_t last_locator_write_time = 0;
while (true) {
if (m_interrupt) {
SetBestBlockIndex(pindex);
// No need to handle errors in Commit. If it fails, the error
// will be already be logged. The best way to recover is to
// continue, as index cannot be corrupted by a missed commit to
// disk for an advanced index state.
Commit();
return;
}
{
LOCK(cs_main);
const CBlockIndex *pindex_next =
NextSyncBlock(pindex, m_chainstate->m_chain);
if (!pindex_next) {
SetBestBlockIndex(pindex);
m_synced = true;
// No need to handle errors in Commit. See rationale above.
Commit();
break;
}
if (pindex_next->pprev != pindex &&
!Rewind(pindex, pindex_next->pprev)) {
FatalError(
"%s: Failed to rewind index %s to a previous chain tip",
__func__, GetName());
return;
}
pindex = pindex_next;
}
CBlock block;
if (!m_chainstate->m_blockman.ReadBlockFromDisk(block, *pindex)) {
FatalError("%s: Failed to read block %s from disk", __func__,
pindex->GetBlockHash().ToString());
return;
}
if (!WriteBlock(block, pindex)) {
FatalError("%s: Failed to write block %s to index database",
__func__, pindex->GetBlockHash().ToString());
return;
}
int64_t current_time = GetTime();
if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
LogPrintf("Syncing %s with block chain from height %d\n",
GetName(), pindex->nHeight);
last_log_time = current_time;
}
if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL <
current_time) {
SetBestBlockIndex(pindex->pprev);
last_locator_write_time = current_time;
// No need to handle errors in Commit. See rationale above.
Commit();
}
}
}
if (pindex) {
LogPrintf("%s is enabled at height %d\n", GetName(), pindex->nHeight);
} else {
LogPrintf("%s is enabled\n", GetName());
}
}
bool BaseIndex::Commit() {
CDBBatch batch(GetDB());
if (!CommitInternal(batch) || !GetDB().WriteBatch(batch)) {
return error("%s: Failed to commit latest %s state", __func__,
GetName());
}
return true;
}
bool BaseIndex::CommitInternal(CDBBatch &batch) {
LOCK(cs_main);
// Don't commit anything if we haven't indexed any block yet
// (this could happen if init is interrupted).
if (m_best_block_index == nullptr) {
return false;
}
GetDB().WriteBestBlock(batch, GetLocator(m_best_block_index));
return true;
}
bool BaseIndex::Rewind(const CBlockIndex *current_tip,
const CBlockIndex *new_tip) {
assert(current_tip == m_best_block_index);
assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip);
// In the case of a reorg, ensure persisted block locator is not stale.
// Pruning has a minimum of 288 blocks-to-keep and getting the index
// out of sync may be possible but a users fault.
// In case we reorg beyond the pruned depth, ReadBlockFromDisk would
// throw and lead to a graceful shutdown
SetBestBlockIndex(new_tip);
if (!Commit()) {
// If commit fails, revert the best block index to avoid corruption.
SetBestBlockIndex(current_tip);
return false;
}
return true;
}
void BaseIndex::BlockConnected(const std::shared_ptr<const CBlock> &block,
const CBlockIndex *pindex) {
if (!m_synced) {
return;
}
const CBlockIndex *best_block_index = m_best_block_index.load();
if (!best_block_index) {
if (pindex->nHeight != 0) {
FatalError("%s: First block connected is not the genesis block "
"(height=%d)",
__func__, pindex->nHeight);
return;
}
} else {
// Ensure block connects to an ancestor of the current best block. This
// should be the case most of the time, but may not be immediately after
// the the sync thread catches up and sets m_synced. Consider the case
// where there is a reorg and the blocks on the stale branch are in the
// ValidationInterface queue backlog even after the sync thread has
// caught up to the new chain tip. In this unlikely event, log a warning
// and let the queue clear.
if (best_block_index->GetAncestor(pindex->nHeight - 1) !=
pindex->pprev) {
LogPrintf("%s: WARNING: Block %s does not connect to an ancestor "
"of known best chain (tip=%s); not updating index\n",
__func__, pindex->GetBlockHash().ToString(),
best_block_index->GetBlockHash().ToString());
return;
}
if (best_block_index != pindex->pprev &&
!Rewind(best_block_index, pindex->pprev)) {
FatalError("%s: Failed to rewind index %s to a previous chain tip",
__func__, GetName());
return;
}
}
if (WriteBlock(*block, pindex)) {
// Setting the best block index is intentionally the last step of this
// function, so BlockUntilSyncedToCurrentChain callers waiting for the
// best block index to be updated can rely on the block being fully
// processed, and the index object being safe to delete.
SetBestBlockIndex(pindex);
} else {
FatalError("%s: Failed to write block %s to index", __func__,
pindex->GetBlockHash().ToString());
return;
}
}
void BaseIndex::ChainStateFlushed(const CBlockLocator &locator) {
if (!m_synced) {
return;
}
const BlockHash &locator_tip_hash = locator.vHave.front();
const CBlockIndex *locator_tip_index;
{
LOCK(cs_main);
locator_tip_index =
m_chainstate->m_blockman.LookupBlockIndex(locator_tip_hash);
}
if (!locator_tip_index) {
FatalError("%s: First block (hash=%s) in locator was not found",
__func__, locator_tip_hash.ToString());
return;
}
// This checks that ChainStateFlushed callbacks are received after
// BlockConnected. The check may fail immediately after the the sync thread
// catches up and sets m_synced. Consider the case where there is a reorg
// and the blocks on the stale branch are in the ValidationInterface queue
// backlog even after the sync thread has caught up to the new chain tip. In
// this unlikely event, log a warning and let the queue clear.
const CBlockIndex *best_block_index = m_best_block_index.load();
if (best_block_index->GetAncestor(locator_tip_index->nHeight) !=
locator_tip_index) {
LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known "
"best chain (tip=%s); not writing index locator\n",
__func__, locator_tip_hash.ToString(),
best_block_index->GetBlockHash().ToString());
return;
}
// No need to handle errors in Commit. If it fails, the error will be
// already be logged. The best way to recover is to continue, as index
// cannot be corrupted by a missed commit to disk for an advanced index
// state.
Commit();
}
bool BaseIndex::BlockUntilSyncedToCurrentChain() const {
AssertLockNotHeld(cs_main);
if (!m_synced) {
return false;
}
{
// Skip the queue-draining stuff if we know we're caught up with
// m_chain.Tip().
LOCK(cs_main);
const CBlockIndex *chain_tip = m_chainstate->m_chain.Tip();
const CBlockIndex *best_block_index = m_best_block_index.load();
if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
return true;
}
}
LogPrintf("%s: %s is catching up on block notifications\n", __func__,
GetName());
SyncWithValidationInterfaceQueue();
return true;
}
void BaseIndex::Interrupt() {
m_interrupt();
}
bool BaseIndex::Start() {
// m_chainstate member gives indexing code access to node internals. It is
// removed in a followup
m_chainstate = &m_chain->context()->chainman->ActiveChainstate();
// Need to register this ValidationInterface before running Init(), so that
// callbacks are not missed if Init sets m_synced to true.
RegisterValidationInterface(this);
if (!Init()) {
return false;
}
m_thread_sync =
std::thread(&util::TraceThread, GetName(), [this] { ThreadSync(); });
return true;
}
void BaseIndex::Stop() {
UnregisterValidationInterface(this);
if (m_thread_sync.joinable()) {
m_thread_sync.join();
}
}
IndexSummary BaseIndex::GetSummary() const {
IndexSummary summary{};
summary.name = GetName();
summary.synced = m_synced;
summary.best_block_height =
m_best_block_index ? m_best_block_index.load()->nHeight : 0;
return summary;
}
void BaseIndex::SetBestBlockIndex(const CBlockIndex *block) {
assert(!m_chainstate->m_blockman.IsPruneMode() || AllowPrune());
if (AllowPrune() && block) {
node::PruneLockInfo prune_lock;
prune_lock.height_first = block->nHeight;
WITH_LOCK(::cs_main, m_chainstate->m_blockman.UpdatePruneLock(
GetName(), prune_lock));
}
// Intentionally set m_best_block_index as the last step in this function,
// after updating prune locks above, and after making any other references
// to *this, so the BlockUntilSyncedToCurrentChain function (which checks
// m_best_block_index as an optimization) can be used to wait for the last
// BlockConnected notification and safely assume that prune locks are
// updated and that the index object is safe to delete.
m_best_block_index = block;
}
diff --git a/src/index/base.h b/src/index/base.h
index 4663b614d..cc586b4f1 100644
--- a/src/index/base.h
+++ b/src/index/base.h
@@ -1,147 +1,150 @@
// Copyright (c) 2017-2018 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_INDEX_BASE_H
#define BITCOIN_INDEX_BASE_H
#include <dbwrapper.h>
#include <interfaces/chain.h>
#include <threadinterrupt.h>
#include <validationinterface.h>
+#include <string>
+
class CBlock;
class CBlockIndex;
class Chainstate;
struct IndexSummary {
std::string name;
bool synced{false};
int best_block_height{0};
};
/**
* Base class for indices of blockchain data. This implements
* CValidationInterface and ensures blocks are indexed sequentially according
* to their position in the active chain.
*/
class BaseIndex : public CValidationInterface {
protected:
/**
* The database stores a block locator of the chain the database is synced
* to so that the TxIndex can efficiently determine the point it last
* stopped at. A locator is used instead of a simple hash of the chain tip
* because blocks and block index entries may not be flushed to disk until
* after this database is updated.
*/
class DB : public CDBWrapper {
public:
DB(const fs::path &path, size_t n_cache_size, bool f_memory = false,
bool f_wipe = false, bool f_obfuscate = false);
/// Read block locator of the chain that the index is in sync with.
bool ReadBestBlock(CBlockLocator &locator) const;
/// Write block locator of the chain that the index is in sync with.
void WriteBestBlock(CDBBatch &batch, const CBlockLocator &locator);
};
private:
/// Whether the index is in sync with the main chain. The flag is flipped
/// from false to true once, after which point this starts processing
/// ValidationInterface notifications to stay in sync.
///
/// Note that this will latch to true *immediately* upon startup if
/// `m_chainstate->m_chain` is empty, which will be the case upon startup
/// with an empty datadir if, e.g., `-txindex=1` is specified.
std::atomic<bool> m_synced{false};
/// The last block in the chain that the index is in sync with.
std::atomic<const CBlockIndex *> m_best_block_index{nullptr};
std::thread m_thread_sync;
CThreadInterrupt m_interrupt;
/// Sync the index with the block index starting from the current best
/// block. Intended to be run in its own thread, m_thread_sync, and can be
/// interrupted with m_interrupt. Once the index gets in sync, the m_synced
/// flag is set and the BlockConnected ValidationInterface callback takes
/// over and the sync thread exits.
void ThreadSync();
/// Write the current index state (eg. chain block locator and
/// subclass-specific items) to disk.
///
/// Recommendations for error handling:
/// If called on a successor of the previous committed best block in the
/// index, the index can continue processing without risk of corruption,
/// though the index state will need to catch up from further behind on
/// reboot. If the new state is not a successor of the previous state (due
/// to a chain reorganization), the index must halt until Commit succeeds or
/// else it could end up getting corrupted.
bool Commit();
virtual bool AllowPrune() const = 0;
protected:
std::unique_ptr<interfaces::Chain> m_chain;
Chainstate *m_chainstate{nullptr};
+ const std::string m_name;
void BlockConnected(const std::shared_ptr<const CBlock> &block,
const CBlockIndex *pindex) override;
void ChainStateFlushed(const CBlockLocator &locator) override;
const CBlockIndex *CurrentIndex() { return m_best_block_index.load(); };
/// Initialize internal state from the database and block index.
[[nodiscard]] virtual bool Init();
/// Write update index entries for a newly connected block.
virtual bool WriteBlock(const CBlock &block, const CBlockIndex *pindex) {
return true;
}
/// Virtual method called internally by Commit that can be overridden to
/// atomically commit more index state.
virtual bool CommitInternal(CDBBatch &batch);
/// Rewind index to an earlier chain tip during a chain reorg. The tip must
/// be an ancestor of the current best block.
virtual bool Rewind(const CBlockIndex *current_tip,
const CBlockIndex *new_tip);
virtual DB &GetDB() const = 0;
/// Get the name of the index for display in logs.
- virtual const char *GetName() const = 0;
+ const std::string &GetName() const LIFETIMEBOUND { return m_name; }
/// Update the internal best block index as well as the prune lock.
void SetBestBlockIndex(const CBlockIndex *block);
public:
- BaseIndex(std::unique_ptr<interfaces::Chain> chain);
+ BaseIndex(std::unique_ptr<interfaces::Chain> chain, std::string name);
/// Destructor interrupts sync thread if running and blocks until it exits.
virtual ~BaseIndex();
/// Blocks the current thread until the index is caught up to the current
/// state of the block chain. This only blocks if the index has gotten in
/// sync once and only needs to process blocks in the ValidationInterface
/// queue. If the index is catching up from far behind, this method does
/// not block and immediately returns false.
bool BlockUntilSyncedToCurrentChain() const LOCKS_EXCLUDED(::cs_main);
void Interrupt();
/// Start initializes the sync state and registers the instance as a
/// ValidationInterface so that it stays in sync with blockchain updates.
[[nodiscard]] bool Start();
/// Stops the instance from staying in sync with blockchain updates.
void Stop();
/// Get a summary of the index and its state.
IndexSummary GetSummary() const;
};
#endif // BITCOIN_INDEX_BASE_H
diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp
index be3ad03c8..5d56fddfc 100644
--- a/src/index/blockfilterindex.cpp
+++ b/src/index/blockfilterindex.cpp
@@ -1,516 +1,517 @@
// Copyright (c) 2018 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <common/args.h>
#include <dbwrapper.h>
#include <index/blockfilterindex.h>
#include <node/blockstorage.h>
#include <primitives/blockhash.h>
#include <util/fs_helpers.h>
#include <validation.h>
#include <map>
/**
* The index database stores three items for each block: the disk location of
* the encoded filter, its dSHA256 hash, and the header. Those belonging to
* blocks on the active chain are indexed by height, and those belonging to
* blocks that have been reorganized out of the active chain are indexed by
* block hash. This ensures that filter data for any block that becomes part of
* the active chain can always be retrieved, alleviating timing concerns.
*
* The filters themselves are stored in flat files and referenced by the LevelDB
* entries. This minimizes the amount of data written to LevelDB and keeps the
* database values constant size. The disk location of the next block filter to
* be written (represented as a FlatFilePos) is stored under the DB_FILTER_POS
* key.
*
* Keys for the height index have the type [DB_BLOCK_HEIGHT, uint32 (BE)]. The
* height is represented as big-endian so that sequential reads of filters by
* height are fast. Keys for the hash index have the type [DB_BLOCK_HASH,
* uint256].
*/
constexpr uint8_t DB_BLOCK_HASH{'s'};
constexpr uint8_t DB_BLOCK_HEIGHT{'t'};
constexpr uint8_t DB_FILTER_POS{'P'};
// 16 MiB
constexpr unsigned int MAX_FLTR_FILE_SIZE = 0x1000000;
/** The pre-allocation chunk size for fltr?????.dat files */
// 1 MiB
constexpr unsigned int FLTR_FILE_CHUNK_SIZE = 0x100000;
/**
* Maximum size of the cfheaders cache.
* We have a limit to prevent a bug in filling this cache
* potentially turning into an OOM. At 2000 entries, this cache
* is big enough for a 2,000,000 length block chain, which
* we should be enough until ~2047.
*/
constexpr size_t CF_HEADERS_CACHE_MAX_SZ{2000};
namespace {
struct DBVal {
uint256 hash;
uint256 header;
FlatFilePos pos;
SERIALIZE_METHODS(DBVal, obj) { READWRITE(obj.hash, obj.header, obj.pos); }
};
struct DBHeightKey {
int height;
DBHeightKey() : height(0) {}
explicit DBHeightKey(int height_in) : height(height_in) {}
template <typename Stream> void Serialize(Stream &s) const {
ser_writedata8(s, DB_BLOCK_HEIGHT);
ser_writedata32be(s, height);
}
template <typename Stream> void Unserialize(Stream &s) {
const uint8_t prefix{ser_readdata8(s)};
if (prefix != DB_BLOCK_HEIGHT) {
throw std::ios_base::failure(
"Invalid format for block filter index DB height key");
}
height = ser_readdata32be(s);
}
};
struct DBHashKey {
BlockHash hash;
explicit DBHashKey(const BlockHash &hash_in) : hash(hash_in) {}
SERIALIZE_METHODS(DBHashKey, obj) {
uint8_t prefix{DB_BLOCK_HASH};
READWRITE(prefix);
if (prefix != DB_BLOCK_HASH) {
throw std::ios_base::failure(
"Invalid format for block filter index DB hash key");
}
READWRITE(obj.hash);
}
};
}; // namespace
static std::map<BlockFilterType, BlockFilterIndex> g_filter_indexes;
BlockFilterIndex::BlockFilterIndex(std::unique_ptr<interfaces::Chain> chain,
BlockFilterType filter_type,
size_t n_cache_size, bool f_memory,
bool f_wipe)
- : BaseIndex(std::move(chain)), m_filter_type(filter_type) {
+ : BaseIndex(std::move(chain),
+ BlockFilterTypeName(filter_type) + " block filter index"),
+ m_filter_type(filter_type) {
const std::string &filter_name = BlockFilterTypeName(filter_type);
if (filter_name.empty()) {
throw std::invalid_argument("unknown filter_type");
}
fs::path path =
gArgs.GetDataDirNet() / "indexes" / "blockfilter" / filter_name;
fs::create_directories(path);
- m_name = filter_name + " block filter index";
m_db = std::make_unique<BaseIndex::DB>(path / "db", n_cache_size, f_memory,
f_wipe);
m_filter_fileseq = std::make_unique<FlatFileSeq>(std::move(path), "fltr",
FLTR_FILE_CHUNK_SIZE);
}
bool BlockFilterIndex::Init() {
if (!m_db->Read(DB_FILTER_POS, m_next_filter_pos)) {
// Check that the cause of the read failure is that the key does not
// exist. Any other errors indicate database corruption or a disk
// failure, and starting the index would cause further corruption.
if (m_db->Exists(DB_FILTER_POS)) {
return error(
"%s: Cannot read current %s state; index may be corrupted",
__func__, GetName());
}
// If the DB_FILTER_POS is not set, then initialize to the first
// location.
m_next_filter_pos.nFile = 0;
m_next_filter_pos.nPos = 0;
}
return BaseIndex::Init();
}
bool BlockFilterIndex::CommitInternal(CDBBatch &batch) {
const FlatFilePos &pos = m_next_filter_pos;
// Flush current filter file to disk.
AutoFile file{m_filter_fileseq->Open(pos)};
if (file.IsNull()) {
return error("%s: Failed to open filter file %d", __func__, pos.nFile);
}
if (!FileCommit(file.Get())) {
return error("%s: Failed to commit filter file %d", __func__,
pos.nFile);
}
batch.Write(DB_FILTER_POS, pos);
return BaseIndex::CommitInternal(batch);
}
bool BlockFilterIndex::ReadFilterFromDisk(const FlatFilePos &pos,
BlockFilter &filter) const {
AutoFile filein{m_filter_fileseq->Open(pos, true)};
if (filein.IsNull()) {
return false;
}
BlockHash block_hash;
std::vector<uint8_t> encoded_filter;
try {
filein >> block_hash >> encoded_filter;
filter =
BlockFilter(GetFilterType(), block_hash, std::move(encoded_filter));
} catch (const std::exception &e) {
return error("%s: Failed to deserialize block filter from disk: %s",
__func__, e.what());
}
return true;
}
size_t BlockFilterIndex::WriteFilterToDisk(FlatFilePos &pos,
const BlockFilter &filter) {
assert(filter.GetFilterType() == GetFilterType());
size_t data_size =
GetSerializeSize(filter.GetBlockHash(), CLIENT_VERSION) +
GetSerializeSize(filter.GetEncodedFilter(), CLIENT_VERSION);
// If writing the filter would overflow the file, flush and move to the next
// one.
if (pos.nPos + data_size > MAX_FLTR_FILE_SIZE) {
AutoFile last_file{m_filter_fileseq->Open(pos)};
if (last_file.IsNull()) {
LogPrintf("%s: Failed to open filter file %d\n", __func__,
pos.nFile);
return 0;
}
if (!TruncateFile(last_file.Get(), pos.nPos)) {
LogPrintf("%s: Failed to truncate filter file %d\n", __func__,
pos.nFile);
return 0;
}
if (!FileCommit(last_file.Get())) {
LogPrintf("%s: Failed to commit filter file %d\n", __func__,
pos.nFile);
return 0;
}
pos.nFile++;
pos.nPos = 0;
}
// Pre-allocate sufficient space for filter data.
bool out_of_space;
m_filter_fileseq->Allocate(pos, data_size, out_of_space);
if (out_of_space) {
LogPrintf("%s: out of disk space\n", __func__);
return 0;
}
AutoFile fileout{m_filter_fileseq->Open(pos)};
if (fileout.IsNull()) {
LogPrintf("%s: Failed to open filter file %d\n", __func__, pos.nFile);
return 0;
}
fileout << filter.GetBlockHash() << filter.GetEncodedFilter();
return data_size;
}
bool BlockFilterIndex::WriteBlock(const CBlock &block,
const CBlockIndex *pindex) {
CBlockUndo block_undo;
uint256 prev_header;
if (pindex->nHeight > 0) {
if (!m_chainstate->m_blockman.UndoReadFromDisk(block_undo, *pindex)) {
return false;
}
std::pair<BlockHash, DBVal> read_out;
if (!m_db->Read(DBHeightKey(pindex->nHeight - 1), read_out)) {
return false;
}
BlockHash expected_block_hash = pindex->pprev->GetBlockHash();
if (read_out.first != expected_block_hash) {
return error("%s: previous block header belongs to unexpected "
"block %s; expected %s",
__func__, read_out.first.ToString(),
expected_block_hash.ToString());
}
prev_header = read_out.second.header;
}
BlockFilter filter(m_filter_type, block, block_undo);
size_t bytes_written = WriteFilterToDisk(m_next_filter_pos, filter);
if (bytes_written == 0) {
return false;
}
std::pair<BlockHash, DBVal> value;
value.first = pindex->GetBlockHash();
value.second.hash = filter.GetHash();
value.second.header = filter.ComputeHeader(prev_header);
value.second.pos = m_next_filter_pos;
if (!m_db->Write(DBHeightKey(pindex->nHeight), value)) {
return false;
}
m_next_filter_pos.nPos += bytes_written;
return true;
}
static bool CopyHeightIndexToHashIndex(CDBIterator &db_it, CDBBatch &batch,
const std::string &index_name,
int start_height, int stop_height) {
DBHeightKey key(start_height);
db_it.Seek(key);
for (int height = start_height; height <= stop_height; ++height) {
if (!db_it.GetKey(key) || key.height != height) {
return error("%s: unexpected key in %s: expected (%c, %d)",
__func__, index_name, DB_BLOCK_HEIGHT, height);
}
std::pair<BlockHash, DBVal> value;
if (!db_it.GetValue(value)) {
return error("%s: unable to read value in %s at key (%c, %d)",
__func__, index_name, DB_BLOCK_HEIGHT, height);
}
batch.Write(DBHashKey(value.first), std::move(value.second));
db_it.Next();
}
return true;
}
bool BlockFilterIndex::Rewind(const CBlockIndex *current_tip,
const CBlockIndex *new_tip) {
assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip);
CDBBatch batch(*m_db);
std::unique_ptr<CDBIterator> db_it(m_db->NewIterator());
// During a reorg, we need to copy all filters for blocks that are getting
// disconnected from the height index to the hash index so we can still find
// them when the height index entries are overwritten.
if (!CopyHeightIndexToHashIndex(*db_it, batch, m_name, new_tip->nHeight,
current_tip->nHeight)) {
return false;
}
// The latest filter position gets written in Commit by the call to the
// BaseIndex::Rewind. But since this creates new references to the filter,
// the position should get updated here atomically as well in case Commit
// fails.
batch.Write(DB_FILTER_POS, m_next_filter_pos);
if (!m_db->WriteBatch(batch)) {
return false;
}
return BaseIndex::Rewind(current_tip, new_tip);
}
static bool LookupOne(const CDBWrapper &db, const CBlockIndex *block_index,
DBVal &result) {
// First check if the result is stored under the height index and the value
// there matches the block hash. This should be the case if the block is on
// the active chain.
std::pair<BlockHash, DBVal> read_out;
if (!db.Read(DBHeightKey(block_index->nHeight), read_out)) {
return false;
}
if (read_out.first == block_index->GetBlockHash()) {
result = std::move(read_out.second);
return true;
}
// If value at the height index corresponds to an different block, the
// result will be stored in the hash index.
return db.Read(DBHashKey(block_index->GetBlockHash()), result);
}
static bool LookupRange(CDBWrapper &db, const std::string &index_name,
int start_height, const CBlockIndex *stop_index,
std::vector<DBVal> &results) {
if (start_height < 0) {
return error("%s: start height (%d) is negative", __func__,
start_height);
}
if (start_height > stop_index->nHeight) {
return error("%s: start height (%d) is greater than stop height (%d)",
__func__, start_height, stop_index->nHeight);
}
size_t results_size =
static_cast<size_t>(stop_index->nHeight - start_height + 1);
std::vector<std::pair<BlockHash, DBVal>> values(results_size);
DBHeightKey key(start_height);
std::unique_ptr<CDBIterator> db_it(db.NewIterator());
db_it->Seek(DBHeightKey(start_height));
for (int height = start_height; height <= stop_index->nHeight; ++height) {
if (!db_it->Valid() || !db_it->GetKey(key) || key.height != height) {
return false;
}
size_t i = static_cast<size_t>(height - start_height);
if (!db_it->GetValue(values[i])) {
return error("%s: unable to read value in %s at key (%c, %d)",
__func__, index_name, DB_BLOCK_HEIGHT, height);
}
db_it->Next();
}
results.resize(results_size);
// Iterate backwards through block indexes collecting results in order to
// access the block hash of each entry in case we need to look it up in the
// hash index.
for (const CBlockIndex *block_index = stop_index;
block_index && block_index->nHeight >= start_height;
block_index = block_index->pprev) {
BlockHash block_hash = block_index->GetBlockHash();
size_t i = static_cast<size_t>(block_index->nHeight - start_height);
if (block_hash == values[i].first) {
results[i] = std::move(values[i].second);
continue;
}
if (!db.Read(DBHashKey(block_hash), results[i])) {
return error("%s: unable to read value in %s at key (%c, %s)",
__func__, index_name, DB_BLOCK_HASH,
block_hash.ToString());
}
}
return true;
}
bool BlockFilterIndex::LookupFilter(const CBlockIndex *block_index,
BlockFilter &filter_out) const {
DBVal entry;
if (!LookupOne(*m_db, block_index, entry)) {
return false;
}
return ReadFilterFromDisk(entry.pos, filter_out);
}
bool BlockFilterIndex::LookupFilterHeader(const CBlockIndex *block_index,
uint256 &header_out) {
LOCK(m_cs_headers_cache);
bool is_checkpoint{block_index->nHeight % CFCHECKPT_INTERVAL == 0};
if (is_checkpoint) {
// Try to find the block in the headers cache if this is a checkpoint
// height.
auto header = m_headers_cache.find(block_index->GetBlockHash());
if (header != m_headers_cache.end()) {
header_out = header->second;
return true;
}
}
DBVal entry;
if (!LookupOne(*m_db, block_index, entry)) {
return false;
}
if (is_checkpoint && m_headers_cache.size() < CF_HEADERS_CACHE_MAX_SZ) {
// Add to the headers cache if this is a checkpoint height.
m_headers_cache.emplace(block_index->GetBlockHash(), entry.header);
}
header_out = entry.header;
return true;
}
bool BlockFilterIndex::LookupFilterRange(
int start_height, const CBlockIndex *stop_index,
std::vector<BlockFilter> &filters_out) const {
std::vector<DBVal> entries;
if (!LookupRange(*m_db, m_name, start_height, stop_index, entries)) {
return false;
}
filters_out.resize(entries.size());
auto filter_pos_it = filters_out.begin();
for (const auto &entry : entries) {
if (!ReadFilterFromDisk(entry.pos, *filter_pos_it)) {
return false;
}
++filter_pos_it;
}
return true;
}
bool BlockFilterIndex::LookupFilterHashRange(
int start_height, const CBlockIndex *stop_index,
std::vector<uint256> &hashes_out) const
{
std::vector<DBVal> entries;
if (!LookupRange(*m_db, m_name, start_height, stop_index, entries)) {
return false;
}
hashes_out.clear();
hashes_out.reserve(entries.size());
for (const auto &entry : entries) {
hashes_out.push_back(entry.hash);
}
return true;
}
BlockFilterIndex *GetBlockFilterIndex(BlockFilterType filter_type) {
auto it = g_filter_indexes.find(filter_type);
return it != g_filter_indexes.end() ? &it->second : nullptr;
}
void ForEachBlockFilterIndex(std::function<void(BlockFilterIndex &)> fn) {
for (auto &entry : g_filter_indexes) {
fn(entry.second);
}
}
bool InitBlockFilterIndex(
std::function<std::unique_ptr<interfaces::Chain>()> make_chain,
BlockFilterType filter_type, size_t n_cache_size, bool f_memory,
bool f_wipe) {
auto result = g_filter_indexes.emplace(
std::piecewise_construct, std::forward_as_tuple(filter_type),
std::forward_as_tuple(make_chain(), filter_type, n_cache_size, f_memory,
f_wipe));
return result.second;
}
bool DestroyBlockFilterIndex(BlockFilterType filter_type) {
return g_filter_indexes.erase(filter_type);
}
void DestroyAllBlockFilterIndexes() {
g_filter_indexes.clear();
}
diff --git a/src/index/blockfilterindex.h b/src/index/blockfilterindex.h
index 0a971feff..123e483df 100644
--- a/src/index/blockfilterindex.h
+++ b/src/index/blockfilterindex.h
@@ -1,117 +1,113 @@
// Copyright (c) 2018 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_INDEX_BLOCKFILTERINDEX_H
#define BITCOIN_INDEX_BLOCKFILTERINDEX_H
#include <blockfilter.h>
#include <chain.h>
#include <flatfile.h>
#include <index/base.h>
#include <util/hasher.h>
static const char *const DEFAULT_BLOCKFILTERINDEX = "0";
/** Interval between compact filter checkpoints. See BIP 157. */
static constexpr int CFCHECKPT_INTERVAL = 1000;
/**
* BlockFilterIndex is used to store and retrieve block filters, hashes, and
* headers for a range of blocks by height. An index is constructed for each
* supported filter type with its own database (ie. filter data for different
* types are stored in separate databases).
*
* This index is used to serve BIP 157 net requests.
*/
class BlockFilterIndex final : public BaseIndex {
private:
BlockFilterType m_filter_type;
- std::string m_name;
std::unique_ptr<BaseIndex::DB> m_db;
FlatFilePos m_next_filter_pos;
std::unique_ptr<FlatFileSeq> m_filter_fileseq;
bool ReadFilterFromDisk(const FlatFilePos &pos, BlockFilter &filter) const;
size_t WriteFilterToDisk(FlatFilePos &pos, const BlockFilter &filter);
Mutex m_cs_headers_cache;
/**
* Cache of block hash to filter header, to avoid disk access when
* responding to getcfcheckpt.
*/
std::unordered_map<BlockHash, uint256, FilterHeaderHasher>
m_headers_cache GUARDED_BY(m_cs_headers_cache);
bool AllowPrune() const override { return true; }
protected:
bool Init() override;
bool CommitInternal(CDBBatch &batch) override;
bool WriteBlock(const CBlock &block, const CBlockIndex *pindex) override;
bool Rewind(const CBlockIndex *current_tip,
const CBlockIndex *new_tip) override;
BaseIndex::DB &GetDB() const override { return *m_db; }
-
- const char *GetName() const override { return m_name.c_str(); }
-
public:
/** Constructs the index, which becomes available to be queried. */
explicit BlockFilterIndex(std::unique_ptr<interfaces::Chain> chain,
BlockFilterType filter_type, size_t n_cache_size,
bool f_memory = false, bool f_wipe = false);
BlockFilterType GetFilterType() const { return m_filter_type; }
/** Get a single filter by block. */
bool LookupFilter(const CBlockIndex *block_index,
BlockFilter &filter_out) const;
/** Get a single filter header by block. */
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out)
EXCLUSIVE_LOCKS_REQUIRED(!m_cs_headers_cache);
/** Get a range of filters between two heights on a chain. */
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index,
std::vector<BlockFilter> &filters_out) const;
/** Get a range of filter hashes between two heights on a chain. */
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index,
std::vector<uint256> &hashes_out) const;
};
/**
* Get a block filter index by type. Returns nullptr if index has not been
* initialized or was already destroyed.
*/
BlockFilterIndex *GetBlockFilterIndex(BlockFilterType filter_type);
/** Iterate over all running block filter indexes, invoking fn on each. */
void ForEachBlockFilterIndex(std::function<void(BlockFilterIndex &)> fn);
/**
* Initialize a block filter index for the given type if one does not already
* exist. Returns true if a new index is created and false if one has already
* been initialized.
*/
bool InitBlockFilterIndex(
std::function<std::unique_ptr<interfaces::Chain>()> make_chain,
BlockFilterType filter_type, size_t n_cache_size, bool f_memory = false,
bool f_wipe = false);
/**
* Destroy the block filter index with the given type. Returns false if no such
* index exists. This just releases the allocated memory and closes the database
* connection, it does not delete the index data.
*/
bool DestroyBlockFilterIndex(BlockFilterType filter_type);
/** Destroy all open block filter indexes. */
void DestroyAllBlockFilterIndexes();
#endif // BITCOIN_INDEX_BLOCKFILTERINDEX_H
diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp
index 50d4e2548..063e3dce0 100644
--- a/src/index/coinstatsindex.cpp
+++ b/src/index/coinstatsindex.cpp
@@ -1,555 +1,555 @@
// Copyright (c) 2020-2021 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <index/coinstatsindex.h>
#include <chainparams.h>
#include <coins.h>
#include <common/args.h>
#include <consensus/amount.h>
#include <crypto/muhash.h>
#include <logging.h>
#include <node/blockstorage.h>
#include <primitives/blockhash.h>
#include <serialize.h>
#include <txdb.h>
#include <undo.h>
#include <util/check.h>
#include <validation.h>
using kernel::CCoinsStats;
using kernel::GetBogoSize;
using kernel::TxOutSer;
static constexpr uint8_t DB_BLOCK_HASH{'s'};
static constexpr uint8_t DB_BLOCK_HEIGHT{'t'};
static constexpr uint8_t DB_MUHASH{'M'};
namespace {
struct DBVal {
uint256 muhash;
uint64_t transaction_output_count;
uint64_t bogo_size;
Amount total_amount;
Amount total_subsidy;
Amount total_unspendable_amount;
Amount total_prevout_spent_amount;
Amount total_new_outputs_ex_coinbase_amount;
Amount total_coinbase_amount;
Amount total_unspendables_genesis_block;
Amount total_unspendables_bip30;
Amount total_unspendables_scripts;
Amount total_unspendables_unclaimed_rewards;
SERIALIZE_METHODS(DBVal, obj) {
READWRITE(obj.muhash);
READWRITE(obj.transaction_output_count);
READWRITE(obj.bogo_size);
READWRITE(obj.total_amount);
READWRITE(obj.total_subsidy);
READWRITE(obj.total_unspendable_amount);
READWRITE(obj.total_prevout_spent_amount);
READWRITE(obj.total_new_outputs_ex_coinbase_amount);
READWRITE(obj.total_coinbase_amount);
READWRITE(obj.total_unspendables_genesis_block);
READWRITE(obj.total_unspendables_bip30);
READWRITE(obj.total_unspendables_scripts);
READWRITE(obj.total_unspendables_unclaimed_rewards);
}
};
struct DBHeightKey {
int height;
explicit DBHeightKey(int height_in) : height(height_in) {}
template <typename Stream> void Serialize(Stream &s) const {
ser_writedata8(s, DB_BLOCK_HEIGHT);
ser_writedata32be(s, height);
}
template <typename Stream> void Unserialize(Stream &s) {
const uint8_t prefix{ser_readdata8(s)};
if (prefix != DB_BLOCK_HEIGHT) {
throw std::ios_base::failure(
"Invalid format for coinstatsindex DB height key");
}
height = ser_readdata32be(s);
}
};
struct DBHashKey {
BlockHash block_hash;
explicit DBHashKey(const BlockHash &hash_in) : block_hash(hash_in) {}
SERIALIZE_METHODS(DBHashKey, obj) {
uint8_t prefix{DB_BLOCK_HASH};
READWRITE(prefix);
if (prefix != DB_BLOCK_HASH) {
throw std::ios_base::failure(
"Invalid format for coinstatsindex DB hash key");
}
READWRITE(obj.block_hash);
}
};
}; // namespace
std::unique_ptr<CoinStatsIndex> g_coin_stats_index;
CoinStatsIndex::CoinStatsIndex(std::unique_ptr<interfaces::Chain> chain,
size_t n_cache_size, bool f_memory, bool f_wipe)
- : BaseIndex(std::move(chain)) {
+ : BaseIndex(std::move(chain), "coinstatsindex") {
fs::path path{gArgs.GetDataDirNet() / "indexes" / "coinstats"};
fs::create_directories(path);
m_db = std::make_unique<CoinStatsIndex::DB>(path / "db", n_cache_size,
f_memory, f_wipe);
}
bool CoinStatsIndex::WriteBlock(const CBlock &block,
const CBlockIndex *pindex) {
CBlockUndo block_undo;
const Amount block_subsidy{
GetBlockSubsidy(pindex->nHeight, Params().GetConsensus())};
m_total_subsidy += block_subsidy;
// Ignore genesis block
if (pindex->nHeight > 0) {
if (!m_chainstate->m_blockman.UndoReadFromDisk(block_undo, *pindex)) {
return false;
}
std::pair<BlockHash, DBVal> read_out;
if (!m_db->Read(DBHeightKey(pindex->nHeight - 1), read_out)) {
return false;
}
BlockHash expected_block_hash{pindex->pprev->GetBlockHash()};
if (read_out.first != expected_block_hash) {
LogPrintf("WARNING: previous block header belongs to unexpected "
"block %s; expected %s\n",
read_out.first.ToString(),
expected_block_hash.ToString());
if (!m_db->Read(DBHashKey(expected_block_hash), read_out)) {
return error("%s: previous block header not found; expected %s",
__func__, expected_block_hash.ToString());
}
}
// TODO: Deduplicate BIP30 related code
bool is_bip30_block{
(pindex->nHeight == 91722 &&
pindex->GetBlockHash() ==
BlockHash{uint256S("0x00000000000271a2dc26e7667f8419f2e15416dc"
"6955e5a6c6cdf3f2574dd08e")}) ||
(pindex->nHeight == 91812 &&
pindex->GetBlockHash() ==
BlockHash{uint256S("0x00000000000af0aed4792b1acee3d966af36cf5d"
"ef14935db8de83d6f9306f2f")})};
// Add the new utxos created from the block
for (size_t i = 0; i < block.vtx.size(); ++i) {
const auto &tx{block.vtx.at(i)};
// Skip duplicate txid coinbase transactions (BIP30).
if (is_bip30_block && tx->IsCoinBase()) {
m_total_unspendable_amount += block_subsidy;
m_total_unspendables_bip30 += block_subsidy;
continue;
}
for (uint32_t j = 0; j < tx->vout.size(); ++j) {
const CTxOut &out{tx->vout[j]};
Coin coin{out, static_cast<uint32_t>(pindex->nHeight),
tx->IsCoinBase()};
COutPoint outpoint{tx->GetId(), j};
// Skip unspendable coins
if (coin.GetTxOut().scriptPubKey.IsUnspendable()) {
m_total_unspendable_amount += coin.GetTxOut().nValue;
m_total_unspendables_scripts += coin.GetTxOut().nValue;
continue;
}
m_muhash.Insert(MakeUCharSpan(TxOutSer(outpoint, coin)));
if (tx->IsCoinBase()) {
m_total_coinbase_amount += coin.GetTxOut().nValue;
} else {
m_total_new_outputs_ex_coinbase_amount +=
coin.GetTxOut().nValue;
}
++m_transaction_output_count;
m_total_amount += coin.GetTxOut().nValue;
m_bogo_size += GetBogoSize(coin.GetTxOut().scriptPubKey);
}
// The coinbase tx has no undo data since no former output is spent
if (!tx->IsCoinBase()) {
const auto &tx_undo{block_undo.vtxundo.at(i - 1)};
for (size_t j = 0; j < tx_undo.vprevout.size(); ++j) {
Coin coin{tx_undo.vprevout[j]};
COutPoint outpoint{tx->vin[j].prevout.GetTxId(),
tx->vin[j].prevout.GetN()};
m_muhash.Remove(MakeUCharSpan(TxOutSer(outpoint, coin)));
m_total_prevout_spent_amount += coin.GetTxOut().nValue;
--m_transaction_output_count;
m_total_amount -= coin.GetTxOut().nValue;
m_bogo_size -= GetBogoSize(coin.GetTxOut().scriptPubKey);
}
}
}
} else {
// genesis block
m_total_unspendable_amount += block_subsidy;
m_total_unspendables_genesis_block += block_subsidy;
}
// If spent prevouts + block subsidy are still a higher amount than
// new outputs + coinbase + current unspendable amount this means
// the miner did not claim the full block reward. Unclaimed block
// rewards are also unspendable.
const Amount unclaimed_rewards{
(m_total_prevout_spent_amount + m_total_subsidy) -
(m_total_new_outputs_ex_coinbase_amount + m_total_coinbase_amount +
m_total_unspendable_amount)};
m_total_unspendable_amount += unclaimed_rewards;
m_total_unspendables_unclaimed_rewards += unclaimed_rewards;
std::pair<BlockHash, DBVal> value;
value.first = pindex->GetBlockHash();
value.second.transaction_output_count = m_transaction_output_count;
value.second.bogo_size = m_bogo_size;
value.second.total_amount = m_total_amount;
value.second.total_subsidy = m_total_subsidy;
value.second.total_unspendable_amount = m_total_unspendable_amount;
value.second.total_prevout_spent_amount = m_total_prevout_spent_amount;
value.second.total_new_outputs_ex_coinbase_amount =
m_total_new_outputs_ex_coinbase_amount;
value.second.total_coinbase_amount = m_total_coinbase_amount;
value.second.total_unspendables_genesis_block =
m_total_unspendables_genesis_block;
value.second.total_unspendables_bip30 = m_total_unspendables_bip30;
value.second.total_unspendables_scripts = m_total_unspendables_scripts;
value.second.total_unspendables_unclaimed_rewards =
m_total_unspendables_unclaimed_rewards;
uint256 out;
m_muhash.Finalize(out);
value.second.muhash = out;
// Intentionally do not update DB_MUHASH here so it stays in sync with
// DB_BEST_BLOCK, and the index is not corrupted if there is an unclean
// shutdown.
return m_db->Write(DBHeightKey(pindex->nHeight), value);
}
static bool CopyHeightIndexToHashIndex(CDBIterator &db_it, CDBBatch &batch,
const std::string &index_name,
int start_height, int stop_height) {
DBHeightKey key{start_height};
db_it.Seek(key);
for (int height = start_height; height <= stop_height; ++height) {
if (!db_it.GetKey(key) || key.height != height) {
return error("%s: unexpected key in %s: expected (%c, %d)",
__func__, index_name, DB_BLOCK_HEIGHT, height);
}
std::pair<BlockHash, DBVal> value;
if (!db_it.GetValue(value)) {
return error("%s: unable to read value in %s at key (%c, %d)",
__func__, index_name, DB_BLOCK_HEIGHT, height);
}
batch.Write(DBHashKey(value.first), std::move(value.second));
db_it.Next();
}
return true;
}
bool CoinStatsIndex::Rewind(const CBlockIndex *current_tip,
const CBlockIndex *new_tip) {
assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip);
CDBBatch batch(*m_db);
std::unique_ptr<CDBIterator> db_it(m_db->NewIterator());
// During a reorg, we need to copy all hash digests for blocks that are
// getting disconnected from the height index to the hash index so we can
// still find them when the height index entries are overwritten.
if (!CopyHeightIndexToHashIndex(*db_it, batch, m_name, new_tip->nHeight,
current_tip->nHeight)) {
return false;
}
if (!m_db->WriteBatch(batch)) {
return false;
}
{
LOCK(cs_main);
const CBlockIndex *iter_tip{m_chainstate->m_blockman.LookupBlockIndex(
current_tip->GetBlockHash())};
do {
CBlock block;
if (!m_chainstate->m_blockman.ReadBlockFromDisk(block, *iter_tip)) {
return error("%s: Failed to read block %s from disk", __func__,
iter_tip->GetBlockHash().ToString());
}
ReverseBlock(block, iter_tip);
iter_tip = iter_tip->GetAncestor(iter_tip->nHeight - 1);
} while (new_tip != iter_tip);
}
return BaseIndex::Rewind(current_tip, new_tip);
}
static bool LookUpOne(const CDBWrapper &db, const CBlockIndex *block_index,
DBVal &result) {
// First check if the result is stored under the height index and the value
// there matches the block hash. This should be the case if the block is on
// the active chain.
std::pair<BlockHash, DBVal> read_out;
if (!db.Read(DBHeightKey(block_index->nHeight), read_out)) {
return false;
}
if (read_out.first == block_index->GetBlockHash()) {
result = std::move(read_out.second);
return true;
}
// If value at the height index corresponds to an different block, the
// result will be stored in the hash index.
return db.Read(DBHashKey(block_index->GetBlockHash()), result);
}
std::optional<CCoinsStats>
CoinStatsIndex::LookUpStats(const CBlockIndex *block_index) const {
CCoinsStats stats{Assert(block_index)->nHeight,
block_index->GetBlockHash()};
stats.index_used = true;
DBVal entry;
if (!LookUpOne(*m_db, block_index, entry)) {
return std::nullopt;
}
stats.hashSerialized = entry.muhash;
stats.nTransactionOutputs = entry.transaction_output_count;
stats.nBogoSize = entry.bogo_size;
stats.nTotalAmount = entry.total_amount;
stats.total_subsidy = entry.total_subsidy;
stats.total_unspendable_amount = entry.total_unspendable_amount;
stats.total_prevout_spent_amount = entry.total_prevout_spent_amount;
stats.total_new_outputs_ex_coinbase_amount =
entry.total_new_outputs_ex_coinbase_amount;
stats.total_coinbase_amount = entry.total_coinbase_amount;
stats.total_unspendables_genesis_block =
entry.total_unspendables_genesis_block;
stats.total_unspendables_bip30 = entry.total_unspendables_bip30;
stats.total_unspendables_scripts = entry.total_unspendables_scripts;
stats.total_unspendables_unclaimed_rewards =
entry.total_unspendables_unclaimed_rewards;
return stats;
}
bool CoinStatsIndex::Init() {
if (!m_db->Read(DB_MUHASH, m_muhash)) {
// Check that the cause of the read failure is that the key does not
// exist. Any other errors indicate database corruption or a disk
// failure, and starting the index would cause further corruption.
if (m_db->Exists(DB_MUHASH)) {
return error(
"%s: Cannot read current %s state; index may be corrupted",
__func__, GetName());
}
}
if (!BaseIndex::Init()) {
return false;
}
const CBlockIndex *pindex{CurrentIndex()};
if (pindex) {
DBVal entry;
if (!LookUpOne(*m_db, pindex, entry)) {
return error(
"%s: Cannot read current %s state; index may be corrupted",
__func__, GetName());
}
uint256 out;
m_muhash.Finalize(out);
if (entry.muhash != out) {
return error(
"%s: Cannot read current %s state; index may be corrupted",
__func__, GetName());
}
m_transaction_output_count = entry.transaction_output_count;
m_bogo_size = entry.bogo_size;
m_total_amount = entry.total_amount;
m_total_subsidy = entry.total_subsidy;
m_total_unspendable_amount = entry.total_unspendable_amount;
m_total_prevout_spent_amount = entry.total_prevout_spent_amount;
m_total_new_outputs_ex_coinbase_amount =
entry.total_new_outputs_ex_coinbase_amount;
m_total_coinbase_amount = entry.total_coinbase_amount;
m_total_unspendables_genesis_block =
entry.total_unspendables_genesis_block;
m_total_unspendables_bip30 = entry.total_unspendables_bip30;
m_total_unspendables_scripts = entry.total_unspendables_scripts;
m_total_unspendables_unclaimed_rewards =
entry.total_unspendables_unclaimed_rewards;
}
return true;
}
bool CoinStatsIndex::CommitInternal(CDBBatch &batch) {
// DB_MUHASH should always be committed in a batch together with
// DB_BEST_BLOCK to prevent an inconsistent state of the DB.
batch.Write(DB_MUHASH, m_muhash);
return BaseIndex::CommitInternal(batch);
}
// Reverse a single block as part of a reorg
bool CoinStatsIndex::ReverseBlock(const CBlock &block,
const CBlockIndex *pindex) {
CBlockUndo block_undo;
std::pair<BlockHash, DBVal> read_out;
const Amount block_subsidy{
GetBlockSubsidy(pindex->nHeight, Params().GetConsensus())};
m_total_subsidy -= block_subsidy;
// Ignore genesis block
if (pindex->nHeight > 0) {
if (!m_chainstate->m_blockman.UndoReadFromDisk(block_undo, *pindex)) {
return false;
}
if (!m_db->Read(DBHeightKey(pindex->nHeight - 1), read_out)) {
return false;
}
BlockHash expected_block_hash{pindex->pprev->GetBlockHash()};
if (read_out.first != expected_block_hash) {
LogPrintf("WARNING: previous block header belongs to unexpected "
"block %s; expected %s\n",
read_out.first.ToString(),
expected_block_hash.ToString());
if (!m_db->Read(DBHashKey(expected_block_hash), read_out)) {
return error("%s: previous block header not found; expected %s",
__func__, expected_block_hash.ToString());
}
}
}
// Remove the new UTXOs that were created from the block
for (size_t i = 0; i < block.vtx.size(); ++i) {
const auto &tx{block.vtx.at(i)};
for (uint32_t j = 0; j < tx->vout.size(); ++j) {
const CTxOut &out{tx->vout[j]};
COutPoint outpoint{tx->GetId(), j};
Coin coin{out, static_cast<uint32_t>(pindex->nHeight),
tx->IsCoinBase()};
// Skip unspendable coins
if (coin.GetTxOut().scriptPubKey.IsUnspendable()) {
m_total_unspendable_amount -= coin.GetTxOut().nValue;
m_total_unspendables_scripts -= coin.GetTxOut().nValue;
continue;
}
m_muhash.Remove(MakeUCharSpan(TxOutSer(outpoint, coin)));
if (tx->IsCoinBase()) {
m_total_coinbase_amount -= coin.GetTxOut().nValue;
} else {
m_total_new_outputs_ex_coinbase_amount -=
coin.GetTxOut().nValue;
}
--m_transaction_output_count;
m_total_amount -= coin.GetTxOut().nValue;
m_bogo_size -= GetBogoSize(coin.GetTxOut().scriptPubKey);
}
// The coinbase tx has no undo data since no former output is spent
if (!tx->IsCoinBase()) {
const auto &tx_undo{block_undo.vtxundo.at(i - 1)};
for (size_t j = 0; j < tx_undo.vprevout.size(); ++j) {
Coin coin{tx_undo.vprevout[j]};
COutPoint outpoint{tx->vin[j].prevout.GetTxId(),
tx->vin[j].prevout.GetN()};
m_muhash.Insert(MakeUCharSpan(TxOutSer(outpoint, coin)));
m_total_prevout_spent_amount -= coin.GetTxOut().nValue;
m_transaction_output_count++;
m_total_amount += coin.GetTxOut().nValue;
m_bogo_size += GetBogoSize(coin.GetTxOut().scriptPubKey);
}
}
}
const Amount unclaimed_rewards{
(m_total_new_outputs_ex_coinbase_amount + m_total_coinbase_amount +
m_total_unspendable_amount) -
(m_total_prevout_spent_amount + m_total_subsidy)};
m_total_unspendable_amount -= unclaimed_rewards;
m_total_unspendables_unclaimed_rewards -= unclaimed_rewards;
// Check that the rolled back internal values are consistent with the DB
// read out
uint256 out;
m_muhash.Finalize(out);
Assert(read_out.second.muhash == out);
Assert(m_transaction_output_count ==
read_out.second.transaction_output_count);
Assert(m_total_amount == read_out.second.total_amount);
Assert(m_bogo_size == read_out.second.bogo_size);
Assert(m_total_subsidy == read_out.second.total_subsidy);
Assert(m_total_unspendable_amount ==
read_out.second.total_unspendable_amount);
Assert(m_total_prevout_spent_amount ==
read_out.second.total_prevout_spent_amount);
Assert(m_total_new_outputs_ex_coinbase_amount ==
read_out.second.total_new_outputs_ex_coinbase_amount);
Assert(m_total_coinbase_amount == read_out.second.total_coinbase_amount);
Assert(m_total_unspendables_genesis_block ==
read_out.second.total_unspendables_genesis_block);
Assert(m_total_unspendables_bip30 ==
read_out.second.total_unspendables_bip30);
Assert(m_total_unspendables_scripts ==
read_out.second.total_unspendables_scripts);
Assert(m_total_unspendables_unclaimed_rewards ==
read_out.second.total_unspendables_unclaimed_rewards);
return true;
}
diff --git a/src/index/coinstatsindex.h b/src/index/coinstatsindex.h
index 7923b2c70..a67752b5e 100644
--- a/src/index/coinstatsindex.h
+++ b/src/index/coinstatsindex.h
@@ -1,72 +1,69 @@
// Copyright (c) 2020-2021 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_INDEX_COINSTATSINDEX_H
#define BITCOIN_INDEX_COINSTATSINDEX_H
#include <chain.h>
#include <crypto/muhash.h>
#include <flatfile.h>
#include <index/base.h>
#include <kernel/coinstats.h>
struct Amount;
static constexpr bool DEFAULT_COINSTATSINDEX{false};
/**
* CoinStatsIndex maintains statistics on the UTXO set.
*/
class CoinStatsIndex final : public BaseIndex {
private:
- std::string m_name;
std::unique_ptr<BaseIndex::DB> m_db;
MuHash3072 m_muhash;
uint64_t m_transaction_output_count{0};
uint64_t m_bogo_size{0};
Amount m_total_amount{Amount::zero()};
Amount m_total_subsidy{Amount::zero()};
Amount m_total_unspendable_amount{Amount::zero()};
Amount m_total_prevout_spent_amount{Amount::zero()};
Amount m_total_new_outputs_ex_coinbase_amount{Amount::zero()};
Amount m_total_coinbase_amount{Amount::zero()};
Amount m_total_unspendables_genesis_block{Amount::zero()};
Amount m_total_unspendables_bip30{Amount::zero()};
Amount m_total_unspendables_scripts{Amount::zero()};
Amount m_total_unspendables_unclaimed_rewards{Amount::zero()};
bool ReverseBlock(const CBlock &block, const CBlockIndex *pindex);
bool AllowPrune() const override { return true; }
protected:
bool Init() override;
bool CommitInternal(CDBBatch &batch) override;
bool WriteBlock(const CBlock &block, const CBlockIndex *pindex) override;
bool Rewind(const CBlockIndex *current_tip,
const CBlockIndex *new_tip) override;
BaseIndex::DB &GetDB() const override { return *m_db; }
- const char *GetName() const override { return "coinstatsindex"; }
-
public:
// Constructs the index, which becomes available to be queried.
explicit CoinStatsIndex(std::unique_ptr<interfaces::Chain> chain,
size_t n_cache_size, bool f_memory = false,
bool f_wipe = false);
// Look up stats for a specific block using CBlockIndex
std::optional<kernel::CCoinsStats>
LookUpStats(const CBlockIndex *block_index) const;
};
/// The global UTXO set hash object.
extern std::unique_ptr<CoinStatsIndex> g_coin_stats_index;
#endif // BITCOIN_INDEX_COINSTATSINDEX_H
diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp
index 8a2c16501..2453d8ecc 100644
--- a/src/index/txindex.cpp
+++ b/src/index/txindex.cpp
@@ -1,104 +1,104 @@
// Copyright (c) 2017-2018 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <index/txindex.h>
#include <chain.h>
#include <common/args.h>
#include <index/disktxpos.h>
#include <logging.h>
#include <node/blockstorage.h>
#include <validation.h>
constexpr uint8_t DB_TXINDEX{'t'};
std::unique_ptr<TxIndex> g_txindex;
/** Access to the txindex database (indexes/txindex/) */
class TxIndex::DB : public BaseIndex::DB {
public:
explicit DB(size_t n_cache_size, bool f_memory = false,
bool f_wipe = false);
/// Read the disk location of the transaction data with the given ID.
/// Returns false if the transaction ID is not indexed.
bool ReadTxPos(const TxId &txid, CDiskTxPos &pos) const;
/// Write a batch of transaction positions to the DB.
bool WriteTxs(const std::vector<std::pair<TxId, CDiskTxPos>> &v_pos);
};
TxIndex::DB::DB(size_t n_cache_size, bool f_memory, bool f_wipe)
: BaseIndex::DB(gArgs.GetDataDirNet() / "indexes" / "txindex", n_cache_size,
f_memory, f_wipe) {}
bool TxIndex::DB::ReadTxPos(const TxId &txid, CDiskTxPos &pos) const {
return Read(std::make_pair(DB_TXINDEX, txid), pos);
}
bool TxIndex::DB::WriteTxs(
const std::vector<std::pair<TxId, CDiskTxPos>> &v_pos) {
CDBBatch batch(*this);
for (const auto &tuple : v_pos) {
batch.Write(std::make_pair(DB_TXINDEX, tuple.first), tuple.second);
}
return WriteBatch(batch);
}
TxIndex::TxIndex(std::unique_ptr<interfaces::Chain> chain, size_t n_cache_size,
bool f_memory, bool f_wipe)
- : BaseIndex(std::move(chain)),
+ : BaseIndex(std::move(chain), "txindex"),
m_db(std::make_unique<TxIndex::DB>(n_cache_size, f_memory, f_wipe)) {}
TxIndex::~TxIndex() {}
bool TxIndex::WriteBlock(const CBlock &block, const CBlockIndex *pindex) {
// Exclude genesis block transaction because outputs are not spendable.
if (pindex->nHeight == 0) {
return true;
}
CDiskTxPos pos(WITH_LOCK(::cs_main, return pindex->GetBlockPos()),
GetSizeOfCompactSize(block.vtx.size()));
std::vector<std::pair<TxId, CDiskTxPos>> vPos;
vPos.reserve(block.vtx.size());
for (const auto &tx : block.vtx) {
vPos.emplace_back(tx->GetId(), pos);
pos.nTxOffset += ::GetSerializeSize(*tx, CLIENT_VERSION);
}
return m_db->WriteTxs(vPos);
}
BaseIndex::DB &TxIndex::GetDB() const {
return *m_db;
}
bool TxIndex::FindTx(const TxId &txid, BlockHash &block_hash,
CTransactionRef &tx) const {
CDiskTxPos postx;
if (!m_db->ReadTxPos(txid, postx)) {
return false;
}
CAutoFile file(m_chainstate->m_blockman.OpenBlockFile(postx, true),
SER_DISK, CLIENT_VERSION);
if (file.IsNull()) {
return error("%s: OpenBlockFile failed", __func__);
}
CBlockHeader header;
try {
file >> header;
if (fseek(file.Get(), postx.nTxOffset, SEEK_CUR)) {
return error("%s: fseek(...) failed", __func__);
}
file >> tx;
} catch (const std::exception &e) {
return error("%s: Deserialize or I/O error - %s", __func__, e.what());
}
if (tx->GetId() != txid) {
return error("%s: txid mismatch", __func__);
}
block_hash = header.GetHash();
return true;
}
diff --git a/src/index/txindex.h b/src/index/txindex.h
index 34ac5ade4..44961fc67 100644
--- a/src/index/txindex.h
+++ b/src/index/txindex.h
@@ -1,62 +1,60 @@
// Copyright (c) 2017-2018 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_INDEX_TXINDEX_H
#define BITCOIN_INDEX_TXINDEX_H
#include <index/base.h>
#include <memory>
struct BlockHash;
struct TxId;
static constexpr bool DEFAULT_TXINDEX{false};
/**
* TxIndex is used to look up transactions included in the blockchain by ID.
* The index is written to a LevelDB database and records the filesystem
* location of each transaction by transaction ID.
*/
class TxIndex final : public BaseIndex {
protected:
class DB;
private:
const std::unique_ptr<DB> m_db;
bool AllowPrune() const override { return false; }
protected:
bool WriteBlock(const CBlock &block, const CBlockIndex *pindex) override;
BaseIndex::DB &GetDB() const override;
- const char *GetName() const override { return "txindex"; }
-
public:
/// Constructs the index, which becomes available to be queried.
explicit TxIndex(std::unique_ptr<interfaces::Chain> chain,
size_t n_cache_size, bool f_memory = false,
bool f_wipe = false);
// Destructor is declared because this class contains a unique_ptr to an
// incomplete type.
virtual ~TxIndex() override;
/// Look up a transaction by identifier.
///
/// @param[in] txid The ID of the transaction to be returned.
/// @param[out] block_hash The hash of the block the transaction is found
/// in.
/// @param[out] tx The transaction itself.
/// @return true if transaction is found, false otherwise
bool FindTx(const TxId &txid, BlockHash &block_hash,
CTransactionRef &tx) const;
};
/// The global transaction index, used in GetTransaction. May be null.
extern std::unique_ptr<TxIndex> g_txindex;
#endif // BITCOIN_INDEX_TXINDEX_H

File Metadata

Mime Type
text/x-diff
Expires
Sun, Mar 2, 10:43 (1 d, 11 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5181024
Default Alt Text
(74 KB)

Event Timeline