diff --git a/src/index/base.cpp b/src/index/base.cpp index c6d619dd2..d86fcd807 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -1,399 +1,396 @@ // Copyright (c) 2017-2018 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include // For CChainState #include #include using node::ReadBlockFromDisk; constexpr char DB_BEST_BLOCK = 'B'; constexpr int64_t SYNC_LOG_INTERVAL = 30; // secon constexpr int64_t SYNC_LOCATOR_WRITE_INTERVAL = 30; // seconds template static void FatalError(const char *fmt, const Args &...args) { std::string strMessage = tfm::format(fmt, args...); SetMiscWarning(Untranslated(strMessage)); LogPrintf("*** %s\n", strMessage); AbortError(_("A fatal internal error occurred, see debug.log for details")); StartShutdown(); } BaseIndex::DB::DB(const fs::path &path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) : CDBWrapper(path, n_cache_size, f_memory, f_wipe, f_obfuscate) {} bool BaseIndex::DB::ReadBestBlock(CBlockLocator &locator) const { bool success = Read(DB_BEST_BLOCK, locator); if (!success) { locator.SetNull(); } return success; } void BaseIndex::DB::WriteBestBlock(CDBBatch &batch, const CBlockLocator &locator) { batch.Write(DB_BEST_BLOCK, locator); } BaseIndex::~BaseIndex() { Interrupt(); Stop(); } bool BaseIndex::Init() { CBlockLocator locator; if (!GetDB().ReadBestBlock(locator)) { locator.SetNull(); } LOCK(cs_main); CChain &active_chain = m_chainstate->m_chain; if (locator.IsNull()) { m_best_block_index = nullptr; } else { m_best_block_index = m_chainstate->FindForkInGlobalIndex(locator); } m_synced = m_best_block_index.load() == active_chain.Tip(); if (!m_synced) { bool prune_violation = false; if (!m_best_block_index) { // index is not built yet // make sure we have all block data back to the genesis - const CBlockIndex *block = active_chain.Tip(); - while (block->pprev && block->pprev->nStatus.hasData()) { - block = block->pprev; - } - prune_violation = block != active_chain.Genesis(); + prune_violation = node::GetFirstStoredBlock(active_chain.Tip()) != + active_chain.Genesis(); } // in case the index has a best block set and is not fully synced // check if we have the required blocks to continue building the index else { const CBlockIndex *block_to_test = m_best_block_index.load(); if (!active_chain.Contains(block_to_test)) { // if the bestblock is not part of the mainchain, find the fork // and make sure we have all data down to the fork block_to_test = active_chain.FindFork(block_to_test); } const CBlockIndex *block = active_chain.Tip(); prune_violation = true; // check backwards from the tip if we have all block data until we // reach the indexes bestblock while (block_to_test && block && block->nStatus.hasData()) { if (block_to_test == block) { prune_violation = false; break; } // block->pprev must exist at this point, since block_to_test is // part of the chain and thus must be encountered when going // backwards from the tip assert(block->pprev); block = block->pprev; } } if (prune_violation) { // throw error and graceful shutdown if we can't build the index FatalError("%s: %s best block of the index goes beyond pruned " "data. Please disable the index or reindex (which will " "download the whole blockchain again)", __func__, GetName()); return false; } } return true; } static const CBlockIndex *NextSyncBlock(const CBlockIndex *pindex_prev, CChain &chain) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); if (!pindex_prev) { return chain.Genesis(); } const CBlockIndex *pindex = chain.Next(pindex_prev); if (pindex) { return pindex; } return chain.Next(chain.FindFork(pindex_prev)); } void BaseIndex::ThreadSync() { const CBlockIndex *pindex = m_best_block_index.load(); if (!m_synced) { auto &consensus_params = GetConfig().GetChainParams().GetConsensus(); int64_t last_log_time = 0; int64_t last_locator_write_time = 0; while (true) { if (m_interrupt) { m_best_block_index = pindex; // No need to handle errors in Commit. If it fails, the error // will be already be logged. The best way to recover is to // continue, as index cannot be corrupted by a missed commit to // disk for an advanced index state. Commit(); return; } { LOCK(cs_main); const CBlockIndex *pindex_next = NextSyncBlock(pindex, m_chainstate->m_chain); if (!pindex_next) { m_best_block_index = pindex; m_synced = true; // No need to handle errors in Commit. See rationale above. Commit(); break; } if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) { FatalError( "%s: Failed to rewind index %s to a previous chain tip", __func__, GetName()); return; } pindex = pindex_next; } int64_t current_time = GetTime(); if (last_log_time + SYNC_LOG_INTERVAL < current_time) { LogPrintf("Syncing %s with block chain from height %d\n", GetName(), pindex->nHeight); last_log_time = current_time; } if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) { m_best_block_index = pindex; last_locator_write_time = current_time; // No need to handle errors in Commit. See rationale above. Commit(); } CBlock block; if (!ReadBlockFromDisk(block, pindex, consensus_params)) { FatalError("%s: Failed to read block %s from disk", __func__, pindex->GetBlockHash().ToString()); return; } if (!WriteBlock(block, pindex)) { FatalError("%s: Failed to write block %s to index database", __func__, pindex->GetBlockHash().ToString()); return; } } } if (pindex) { LogPrintf("%s is enabled at height %d\n", GetName(), pindex->nHeight); } else { LogPrintf("%s is enabled\n", GetName()); } } bool BaseIndex::Commit() { CDBBatch batch(GetDB()); if (!CommitInternal(batch) || !GetDB().WriteBatch(batch)) { return error("%s: Failed to commit latest %s state", __func__, GetName()); } return true; } bool BaseIndex::CommitInternal(CDBBatch &batch) { LOCK(cs_main); // Don't commit anything if we haven't indexed any block yet // (this could happen if init is interrupted). if (m_best_block_index == nullptr) { return false; } GetDB().WriteBestBlock( batch, m_chainstate->m_chain.GetLocator(m_best_block_index)); return true; } bool BaseIndex::Rewind(const CBlockIndex *current_tip, const CBlockIndex *new_tip) { assert(current_tip == m_best_block_index); assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip); // In the case of a reorg, ensure persisted block locator is not stale. // Pruning has a minimum of 288 blocks-to-keep and getting the index // out of sync may be possible but a users fault. // In case we reorg beyond the pruned depth, ReadBlockFromDisk would // throw and lead to a graceful shutdown m_best_block_index = new_tip; if (!Commit()) { // If commit fails, revert the best block index to avoid corruption. m_best_block_index = current_tip; return false; } return true; } void BaseIndex::BlockConnected(const std::shared_ptr &block, const CBlockIndex *pindex) { if (!m_synced) { return; } const CBlockIndex *best_block_index = m_best_block_index.load(); if (!best_block_index) { if (pindex->nHeight != 0) { FatalError("%s: First block connected is not the genesis block " "(height=%d)", __func__, pindex->nHeight); return; } } else { // Ensure block connects to an ancestor of the current best block. This // should be the case most of the time, but may not be immediately after // the the sync thread catches up and sets m_synced. Consider the case // where there is a reorg and the blocks on the stale branch are in the // ValidationInterface queue backlog even after the sync thread has // caught up to the new chain tip. In this unlikely event, log a warning // and let the queue clear. if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) { LogPrintf("%s: WARNING: Block %s does not connect to an ancestor " "of known best chain (tip=%s); not updating index\n", __func__, pindex->GetBlockHash().ToString(), best_block_index->GetBlockHash().ToString()); return; } if (best_block_index != pindex->pprev && !Rewind(best_block_index, pindex->pprev)) { FatalError("%s: Failed to rewind index %s to a previous chain tip", __func__, GetName()); return; } } if (WriteBlock(*block, pindex)) { m_best_block_index = pindex; } else { FatalError("%s: Failed to write block %s to index", __func__, pindex->GetBlockHash().ToString()); return; } } void BaseIndex::ChainStateFlushed(const CBlockLocator &locator) { if (!m_synced) { return; } const BlockHash &locator_tip_hash = locator.vHave.front(); const CBlockIndex *locator_tip_index; { LOCK(cs_main); locator_tip_index = m_chainstate->m_blockman.LookupBlockIndex(locator_tip_hash); } if (!locator_tip_index) { FatalError("%s: First block (hash=%s) in locator was not found", __func__, locator_tip_hash.ToString()); return; } // This checks that ChainStateFlushed callbacks are received after // BlockConnected. The check may fail immediately after the the sync thread // catches up and sets m_synced. Consider the case where there is a reorg // and the blocks on the stale branch are in the ValidationInterface queue // backlog even after the sync thread has caught up to the new chain tip. In // this unlikely event, log a warning and let the queue clear. const CBlockIndex *best_block_index = m_best_block_index.load(); if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) { LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known " "best chain (tip=%s); not writing index locator\n", __func__, locator_tip_hash.ToString(), best_block_index->GetBlockHash().ToString()); return; } // No need to handle errors in Commit. If it fails, the error will be // already be logged. The best way to recover is to continue, as index // cannot be corrupted by a missed commit to disk for an advanced index // state. Commit(); } bool BaseIndex::BlockUntilSyncedToCurrentChain() const { AssertLockNotHeld(cs_main); if (!m_synced) { return false; } { // Skip the queue-draining stuff if we know we're caught up with // m_chain.Tip(). LOCK(cs_main); const CBlockIndex *chain_tip = m_chainstate->m_chain.Tip(); const CBlockIndex *best_block_index = m_best_block_index.load(); if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) { return true; } } LogPrintf("%s: %s is catching up on block notifications\n", __func__, GetName()); SyncWithValidationInterfaceQueue(); return true; } void BaseIndex::Interrupt() { m_interrupt(); } void BaseIndex::Start(CChainState &active_chainstate) { m_chainstate = &active_chainstate; // Need to register this ValidationInterface before running Init(), so that // callbacks are not missed if Init sets m_synced to true. RegisterValidationInterface(this); if (!Init()) { FatalError("%s: %s failed to initialize", __func__, GetName()); return; } m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { ThreadSync(); }); } void BaseIndex::Stop() { UnregisterValidationInterface(this); if (m_thread_sync.joinable()) { m_thread_sync.join(); } } IndexSummary BaseIndex::GetSummary() const { IndexSummary summary{}; summary.name = GetName(); summary.synced = m_synced; summary.best_block_height = m_best_block_index ? m_best_block_index.load()->nHeight : 0; return summary; } diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index d63e39fae..b018b2e32 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -1,923 +1,933 @@ // Copyright (c) 2011-2022 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace node { std::atomic_bool fImporting(false); std::atomic_bool fReindex(false); bool fPruneMode = false; uint64_t nPruneTarget = 0; static FILE *OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false); static FlatFileSeq BlockFileSeq(); static FlatFileSeq UndoFileSeq(); std::vector BlockManager::GetAllBlockIndices() { AssertLockHeld(cs_main); std::vector rv; rv.reserve(m_block_index.size()); for (auto &[_, block_index] : m_block_index) { rv.push_back(&block_index); } return rv; } CBlockIndex *BlockManager::LookupBlockIndex(const BlockHash &hash) { AssertLockHeld(cs_main); BlockMap::iterator it = m_block_index.find(hash); return it == m_block_index.end() ? nullptr : &it->second; } const CBlockIndex *BlockManager::LookupBlockIndex(const BlockHash &hash) const { AssertLockHeld(cs_main); BlockMap::const_iterator it = m_block_index.find(hash); return it == m_block_index.end() ? nullptr : &it->second; } CBlockIndex *BlockManager::AddToBlockIndex(const CBlockHeader &block, CBlockIndex *&best_header) { AssertLockHeld(cs_main); const auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block); if (!inserted) { return &mi->second; } CBlockIndex *pindexNew = &(*mi).second; // We assign the sequence id to blocks only when the full data is available, // to avoid miners withholding blocks but broadcasting headers, to get a // competitive advantage. pindexNew->nSequenceId = 0; pindexNew->phashBlock = &((*mi).first); BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock); if (miPrev != m_block_index.end()) { pindexNew->pprev = &(*miPrev).second; pindexNew->nHeight = pindexNew->pprev->nHeight + 1; pindexNew->BuildSkip(); } pindexNew->nTimeReceived = GetTime(); pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime); pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew); pindexNew->RaiseValidity(BlockValidity::TREE); if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) { best_header = pindexNew; } m_dirty_blockindex.insert(pindexNew); return pindexNew; } void BlockManager::PruneOneBlockFile(const int fileNumber) { AssertLockHeld(cs_main); LOCK(cs_LastBlockFile); for (auto &entry : m_block_index) { CBlockIndex *pindex = &entry.second; if (pindex->nFile == fileNumber) { pindex->nStatus = pindex->nStatus.withData(false).withUndo(false); pindex->nFile = 0; pindex->nDataPos = 0; pindex->nUndoPos = 0; m_dirty_blockindex.insert(pindex); // Prune from m_blocks_unlinked -- any block we prune would have // to be downloaded again in order to consider its chain, at which // point it would be considered as a candidate for // m_blocks_unlinked or setBlockIndexCandidates. auto range = m_blocks_unlinked.equal_range(pindex->pprev); while (range.first != range.second) { std::multimap::iterator _it = range.first; range.first++; if (_it->second == pindex) { m_blocks_unlinked.erase(_it); } } } } m_blockfile_info[fileNumber].SetNull(); m_dirty_fileinfo.insert(fileNumber); } void BlockManager::FindFilesToPruneManual(std::set &setFilesToPrune, int nManualPruneHeight, int chain_tip_height) { assert(fPruneMode && nManualPruneHeight > 0); LOCK2(cs_main, cs_LastBlockFile); if (chain_tip_height < 0) { return; } // last block to prune is the lesser of (user-specified height, // MIN_BLOCKS_TO_KEEP from the tip) unsigned int nLastBlockWeCanPrune{std::min( (unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP)}; int count = 0; for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) { if (m_blockfile_info[fileNumber].nSize == 0 || m_blockfile_info[fileNumber].nHeightLast > nLastBlockWeCanPrune) { continue; } PruneOneBlockFile(fileNumber); setFilesToPrune.insert(fileNumber); count++; } LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count); } void BlockManager::FindFilesToPrune(std::set &setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd) { LOCK2(cs_main, cs_LastBlockFile); if (chain_tip_height < 0 || nPruneTarget == 0) { return; } if (uint64_t(chain_tip_height) <= nPruneAfterHeight) { return; } unsigned int nLastBlockWeCanPrune = std::min( prune_height, chain_tip_height - static_cast(MIN_BLOCKS_TO_KEEP)); uint64_t nCurrentUsage = CalculateCurrentUsage(); // We don't check to prune until after we've allocated new space for files, // so we should leave a buffer under our target to account for another // allocation before the next pruning. uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE; uint64_t nBytesToPrune; int count = 0; if (nCurrentUsage + nBuffer >= nPruneTarget) { // On a prune event, the chainstate DB is flushed. // To avoid excessive prune events negating the benefit of high dbcache // values, we should not prune too rapidly. // So when pruning in IBD, increase the buffer a bit to avoid a re-prune // too soon. if (is_ibd) { // Since this is only relevant during IBD, we use a fixed 10% nBuffer += nPruneTarget / 10; } for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) { nBytesToPrune = m_blockfile_info[fileNumber].nSize + m_blockfile_info[fileNumber].nUndoSize; if (m_blockfile_info[fileNumber].nSize == 0) { continue; } // are we below our target? if (nCurrentUsage + nBuffer < nPruneTarget) { break; } // don't prune files that could have a block within // MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning if (m_blockfile_info[fileNumber].nHeightLast > nLastBlockWeCanPrune) { continue; } PruneOneBlockFile(fileNumber); // Queue up the files for removal setFilesToPrune.insert(fileNumber); nCurrentUsage -= nBytesToPrune; count++; } } LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB " "max_prune_height=%d removed %d blk/rev pairs\n", nPruneTarget / 1024 / 1024, nCurrentUsage / 1024 / 1024, ((int64_t)nPruneTarget - (int64_t)nCurrentUsage) / 1024 / 1024, nLastBlockWeCanPrune, count); } CBlockIndex *BlockManager::InsertBlockIndex(const BlockHash &hash) { AssertLockHeld(cs_main); if (hash.IsNull()) { return nullptr; } const auto [mi, inserted] = m_block_index.try_emplace(hash); CBlockIndex *pindex = &(*mi).second; if (inserted) { pindex->phashBlock = &((*mi).first); } return pindex; } bool BlockManager::LoadBlockIndex(const Consensus::Params ¶ms) { AssertLockHeld(cs_main); if (!m_block_tree_db->LoadBlockIndexGuts( params, [this](const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED( cs_main) { return this->InsertBlockIndex(hash); })) { return false; } // Calculate nChainWork std::vector vSortedByHeight{GetAllBlockIndices()}; std::sort(vSortedByHeight.begin(), vSortedByHeight.end(), CBlockIndexHeightOnlyComparator()); for (CBlockIndex *pindex : vSortedByHeight) { if (ShutdownRequested()) { return false; } pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex); pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime); // We can link the chain of blocks for which we've received // transactions at some point, or blocks that are assumed-valid on the // basis of snapshot load (see PopulateAndValidateSnapshot()). // Pruned nodes may have deleted the block. if (pindex->nTx > 0) { if (!pindex->UpdateChainStats() && pindex->pprev) { m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex)); } } if (!pindex->nStatus.hasFailed() && pindex->pprev && pindex->pprev->nStatus.hasFailed()) { pindex->nStatus = pindex->nStatus.withFailedParent(); m_dirty_blockindex.insert(pindex); } if (pindex->pprev) { pindex->BuildSkip(); } } return true; } void BlockManager::Unload() { m_blocks_unlinked.clear(); m_block_index.clear(); m_blockfile_info.clear(); m_last_blockfile = 0; m_dirty_blockindex.clear(); m_dirty_fileinfo.clear(); m_have_pruned = false; } bool BlockManager::WriteBlockIndexDB() { std::vector> vFiles; vFiles.reserve(m_dirty_fileinfo.size()); for (int i : m_dirty_fileinfo) { vFiles.push_back(std::make_pair(i, &m_blockfile_info[i])); } m_dirty_fileinfo.clear(); std::vector vBlocks; vBlocks.reserve(m_dirty_blockindex.size()); for (const CBlockIndex *cbi : m_dirty_blockindex) { vBlocks.push_back(cbi); } m_dirty_blockindex.clear(); if (!m_block_tree_db->WriteBatchSync(vFiles, m_last_blockfile, vBlocks)) { return false; } return true; } bool BlockManager::LoadBlockIndexDB() { if (!LoadBlockIndex(::Params().GetConsensus())) { return false; } // Load block file info m_block_tree_db->ReadLastBlockFile(m_last_blockfile); m_blockfile_info.resize(m_last_blockfile + 1); LogPrintf("%s: last block file = %i\n", __func__, m_last_blockfile); for (int nFile = 0; nFile <= m_last_blockfile; nFile++) { m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]); } LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[m_last_blockfile].ToString()); for (int nFile = m_last_blockfile + 1; true; nFile++) { CBlockFileInfo info; if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) { m_blockfile_info.push_back(info); } else { break; } } // Check presence of blk files LogPrintf("Checking all blk files are present...\n"); std::set setBlkDataFiles; for (const auto &[_, block_index] : m_block_index) { if (block_index.nStatus.hasData()) { setBlkDataFiles.insert(block_index.nFile); } } for (const int i : setBlkDataFiles) { FlatFilePos pos(i, 0); if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION) .IsNull()) { return false; } } // Check whether we have ever pruned block & undo files m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned); if (m_have_pruned) { LogPrintf( "LoadBlockIndexDB(): Block files have previously been pruned\n"); } // Check whether we need to continue reindexing if (m_block_tree_db->IsReindexing()) { fReindex = true; } return true; } const CBlockIndex * BlockManager::GetLastCheckpoint(const CCheckpointData &data) { const MapCheckpoints &checkpoints = data.mapCheckpoints; for (const MapCheckpoints::value_type &i : reverse_iterate(checkpoints)) { const BlockHash &hash = i.second; const CBlockIndex *pindex = LookupBlockIndex(hash); if (pindex) { return pindex; } } return nullptr; } bool BlockManager::IsBlockPruned(const CBlockIndex *pblockindex) { AssertLockHeld(::cs_main); return (m_have_pruned && !pblockindex->nStatus.hasData() && pblockindex->nTx > 0); } +const CBlockIndex *GetFirstStoredBlock(const CBlockIndex *start_block) { + AssertLockHeld(::cs_main); + assert(start_block); + const CBlockIndex *last_block = start_block; + while (last_block->pprev && (last_block->pprev->nStatus.hasData())) { + last_block = last_block->pprev; + } + return last_block; +} + // If we're using -prune with -reindex, then delete block files that will be // ignored by the reindex. Since reindexing works by starting at block file 0 // and looping until a blockfile is missing, do the same here to delete any // later block files after a gap. Also delete all rev files since they'll be // rewritten by the reindex anyway. This ensures that m_blockfile_info is in // sync with what's actually on disk by the time we start downloading, so that // pruning works correctly. void CleanupBlockRevFiles() { std::map mapBlockFiles; // Glob all blk?????.dat and rev?????.dat files from the blocks directory. // Remove the rev files immediately and insert the blk file paths into an // ordered map keyed by block file index. LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for " "-reindex with -prune\n"); for (const auto &file : fs::directory_iterator{gArgs.GetBlocksDirPath()}) { const std::string path = fs::PathToString(file.path().filename()); if (fs::is_regular_file(file) && path.length() == 12 && path.substr(8, 4) == ".dat") { if (path.substr(0, 3) == "blk") { mapBlockFiles[path.substr(3, 5)] = file.path(); } else if (path.substr(0, 3) == "rev") { remove(file.path()); } } } // Remove all block files that aren't part of a contiguous set starting at // zero by walking the ordered map (keys are block file indices) by keeping // a separate counter. Once we hit a gap (or if 0 doesn't exist) start // removing block files. int contiguousCounter = 0; for (const auto &item : mapBlockFiles) { if (atoi(item.first) == contiguousCounter) { contiguousCounter++; continue; } remove(item.second); } } CBlockFileInfo *BlockManager::GetBlockFileInfo(size_t n) { LOCK(cs_LastBlockFile); return &m_blockfile_info.at(n); } static bool UndoWriteToDisk(const CBlockUndo &blockundo, FlatFilePos &pos, const BlockHash &hashBlock, const CMessageHeader::MessageMagic &messageStart) { // Open history file to append CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) { return error("%s: OpenUndoFile failed", __func__); } // Write index header unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion()); fileout << messageStart << nSize; // Write undo data long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) { return error("%s: ftell failed", __func__); } pos.nPos = (unsigned int)fileOutPos; fileout << blockundo; // calculate & write checksum CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); hasher << hashBlock; hasher << blockundo; fileout << hasher.GetHash(); return true; } bool UndoReadFromDisk(CBlockUndo &blockundo, const CBlockIndex *pindex) { const FlatFilePos pos{WITH_LOCK(::cs_main, return pindex->GetUndoPos())}; if (pos.IsNull()) { return error("%s: no undo data available", __func__); } // Open history file to read CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) { return error("%s: OpenUndoFile failed", __func__); } // Read block uint256 hashChecksum; // We need a CHashVerifier as reserializing may lose data CHashVerifier verifier(&filein); try { verifier << pindex->pprev->GetBlockHash(); verifier >> blockundo; filein >> hashChecksum; } catch (const std::exception &e) { return error("%s: Deserialize or I/O error - %s", __func__, e.what()); } // Verify checksum if (hashChecksum != verifier.GetHash()) { return error("%s: Checksum mismatch", __func__); } return true; } void BlockManager::FlushUndoFile(int block_file, bool finalize) { FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize); if (!UndoFileSeq().Flush(undo_pos_old, finalize)) { AbortNode("Flushing undo file to disk failed. This is likely the " "result of an I/O error."); } } void BlockManager::FlushBlockFile(bool fFinalize, bool finalize_undo) { LOCK(cs_LastBlockFile); FlatFilePos block_pos_old(m_last_blockfile, m_blockfile_info[m_last_blockfile].nSize); if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) { AbortNode("Flushing block file to disk failed. This is likely the " "result of an I/O error."); } // we do not always flush the undo file, as the chain tip may be lagging // behind the incoming blocks, // e.g. during IBD or a sync after a node going offline if (!fFinalize || finalize_undo) { FlushUndoFile(m_last_blockfile, finalize_undo); } } uint64_t BlockManager::CalculateCurrentUsage() { LOCK(cs_LastBlockFile); uint64_t retval = 0; for (const CBlockFileInfo &file : m_blockfile_info) { retval += file.nSize + file.nUndoSize; } return retval; } void UnlinkPrunedFiles(const std::set &setFilesToPrune) { for (const int i : setFilesToPrune) { FlatFilePos pos(i, 0); fs::remove(BlockFileSeq().FileName(pos)); fs::remove(UndoFileSeq().FileName(pos)); LogPrint(BCLog::BLOCKSTORE, "Prune: %s deleted blk/rev (%05u)\n", __func__, i); } } static FlatFileSeq BlockFileSeq() { return FlatFileSeq(gArgs.GetBlocksDirPath(), "blk", gArgs.GetBoolArg("-fastprune", false) ? 0x4000 /* 16kb */ : BLOCKFILE_CHUNK_SIZE); } static FlatFileSeq UndoFileSeq() { return FlatFileSeq(gArgs.GetBlocksDirPath(), "rev", UNDOFILE_CHUNK_SIZE); } FILE *OpenBlockFile(const FlatFilePos &pos, bool fReadOnly) { return BlockFileSeq().Open(pos, fReadOnly); } /** Open an undo file (rev?????.dat) */ static FILE *OpenUndoFile(const FlatFilePos &pos, bool fReadOnly) { return UndoFileSeq().Open(pos, fReadOnly); } fs::path GetBlockPosFilename(const FlatFilePos &pos) { return BlockFileSeq().FileName(pos); } bool BlockManager::FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int nHeight, CChain &active_chain, uint64_t nTime, bool fKnown) { LOCK(cs_LastBlockFile); unsigned int nFile = fKnown ? pos.nFile : m_last_blockfile; if (m_blockfile_info.size() <= nFile) { m_blockfile_info.resize(nFile + 1); } bool finalize_undo = false; if (!fKnown) { while (m_blockfile_info[nFile].nSize + nAddSize >= (gArgs.GetBoolArg("-fastprune", false) ? 0x10000 /* 64kb */ : MAX_BLOCKFILE_SIZE)) { // when the undo file is keeping up with the block file, we want to // flush it explicitly when it is lagging behind (more blocks arrive // than are being connected), we let the undo block write case // handle it finalize_undo = (m_blockfile_info[nFile].nHeightLast == (unsigned int)active_chain.Tip()->nHeight); nFile++; if (m_blockfile_info.size() <= nFile) { m_blockfile_info.resize(nFile + 1); } } pos.nFile = nFile; pos.nPos = m_blockfile_info[nFile].nSize; } if ((int)nFile != m_last_blockfile) { if (!fKnown) { LogPrint(BCLog::BLOCKSTORE, "Leaving block file %i: %s\n", m_last_blockfile, m_blockfile_info[m_last_blockfile].ToString()); } FlushBlockFile(!fKnown, finalize_undo); m_last_blockfile = nFile; } m_blockfile_info[nFile].AddBlock(nHeight, nTime); if (fKnown) { m_blockfile_info[nFile].nSize = std::max(pos.nPos + nAddSize, m_blockfile_info[nFile].nSize); } else { m_blockfile_info[nFile].nSize += nAddSize; } if (!fKnown) { bool out_of_space; size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space); if (out_of_space) { return AbortNode("Disk space is too low!", _("Disk space is too low!")); } if (bytes_allocated != 0 && fPruneMode) { m_check_for_pruning = true; } } m_dirty_fileinfo.insert(nFile); return true; } bool BlockManager::FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize) { pos.nFile = nFile; LOCK(cs_LastBlockFile); pos.nPos = m_blockfile_info[nFile].nUndoSize; m_blockfile_info[nFile].nUndoSize += nAddSize; m_dirty_fileinfo.insert(nFile); bool out_of_space; size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space); if (out_of_space) { return AbortNode(state, "Disk space is too low!", _("Disk space is too low!")); } if (bytes_allocated != 0 && fPruneMode) { m_check_for_pruning = true; } return true; } static bool WriteBlockToDisk(const CBlock &block, FlatFilePos &pos, const CMessageHeader::MessageMagic &messageStart) { // Open history file to append CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) { return error("WriteBlockToDisk: OpenBlockFile failed"); } // Write index header unsigned int nSize = GetSerializeSize(block, fileout.GetVersion()); fileout << messageStart << nSize; // Write block long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) { return error("WriteBlockToDisk: ftell failed"); } pos.nPos = (unsigned int)fileOutPos; fileout << block; return true; } bool BlockManager::WriteUndoDataForBlock(const CBlockUndo &blockundo, BlockValidationState &state, CBlockIndex *pindex, const CChainParams &chainparams) { AssertLockHeld(::cs_main); // Write undo information to disk if (pindex->GetUndoPos().IsNull()) { FlatFilePos _pos; if (!FindUndoPos(state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40)) { return error("ConnectBlock(): FindUndoPos failed"); } if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), chainparams.DiskMagic())) { return AbortNode(state, "Failed to write undo data"); } // rev files are written in block height order, whereas blk files are // written as blocks come in (often out of order) we want to flush the // rev (undo) file once we've written the last block, which is indicated // by the last height in the block file info as below; note that this // does not catch the case where the undo writes are keeping up with the // block writes (usually when a synced up node is getting newly mined // blocks) -- this case is caught in the FindBlockPos function if (_pos.nFile < m_last_blockfile && static_cast(pindex->nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) { FlushUndoFile(_pos.nFile, true); } // update nUndoPos in block index pindex->nUndoPos = _pos.nPos; pindex->nStatus = pindex->nStatus.withUndo(); m_dirty_blockindex.insert(pindex); } return true; } bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos, const Consensus::Params ¶ms) { block.SetNull(); // Open history file to read CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) { return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString()); } // Read block try { filein >> block; } catch (const std::exception &e) { return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString()); } // Check the header if (!CheckProofOfWork(block.GetHash(), block.nBits, params)) { return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString()); } return true; } bool ReadBlockFromDisk(CBlock &block, const CBlockIndex *pindex, const Consensus::Params ¶ms) { const FlatFilePos block_pos{ WITH_LOCK(cs_main, return pindex->GetBlockPos())}; if (!ReadBlockFromDisk(block, block_pos, params)) { return false; } if (block.GetHash() != pindex->GetBlockHash()) { return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() " "doesn't match index for %s at %s", pindex->ToString(), block_pos.ToString()); } return true; } /** * Store block on disk. If dbp is non-nullptr, the file is known to already * reside on disk. */ FlatFilePos BlockManager::SaveBlockToDisk(const CBlock &block, int nHeight, CChain &active_chain, const CChainParams &chainparams, const FlatFilePos *dbp) { unsigned int nBlockSize = ::GetSerializeSize(block, CLIENT_VERSION); FlatFilePos blockPos; if (dbp != nullptr) { blockPos = *dbp; } if (!FindBlockPos(blockPos, nBlockSize + 8, nHeight, active_chain, block.GetBlockTime(), dbp != nullptr)) { error("%s: FindBlockPos failed", __func__); return FlatFilePos(); } if (dbp == nullptr) { if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) { AbortNode("Failed to write block"); return FlatFilePos(); } } return blockPos; } struct CImportingNow { CImportingNow() { assert(fImporting == false); fImporting = true; } ~CImportingNow() { assert(fImporting == true); fImporting = false; } }; void ThreadImport(const Config &config, ChainstateManager &chainman, std::vector vImportFiles, const ArgsManager &args) { ScheduleBatchPriority(); { const CChainParams &chainParams = config.GetChainParams(); CImportingNow imp; // -reindex if (fReindex) { int nFile = 0; while (true) { FlatFilePos pos(nFile, 0); if (!fs::exists(GetBlockPosFilename(pos))) { // No block files left to reindex break; } FILE *file = OpenBlockFile(pos, true); if (!file) { // This error is logged in OpenBlockFile break; } LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile); chainman.ActiveChainstate().LoadExternalBlockFile(config, file, &pos); if (ShutdownRequested()) { LogPrintf("Shutdown requested. Exit %s\n", __func__); return; } nFile++; } WITH_LOCK( ::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false)); fReindex = false; LogPrintf("Reindexing finished\n"); // To avoid ending up in a situation without genesis block, re-try // initializing (no-op if reindexing worked): chainman.ActiveChainstate().LoadGenesisBlock(); } // -loadblock= for (const fs::path &path : vImportFiles) { FILE *file = fsbridge::fopen(path, "rb"); if (file) { LogPrintf("Importing blocks file %s...\n", fs::PathToString(path)); chainman.ActiveChainstate().LoadExternalBlockFile(config, file); if (ShutdownRequested()) { LogPrintf("Shutdown requested. Exit %s\n", __func__); return; } } else { LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path)); } } // Reconsider blocks we know are valid. They may have been marked // invalid by, for instance, running an outdated version of the node // software. const MapCheckpoints &checkpoints = chainParams.Checkpoints().mapCheckpoints; for (const MapCheckpoints::value_type &i : checkpoints) { const BlockHash &hash = i.second; LOCK(cs_main); CBlockIndex *pblockindex = chainman.m_blockman.LookupBlockIndex(hash); if (pblockindex && !pblockindex->nStatus.isValid()) { LogPrintf("Reconsidering checkpointed block %s ...\n", hash.GetHex()); chainman.ActiveChainstate().ResetBlockFailureFlags(pblockindex); } } // scan for better chains in the block chain database, that are not yet // connected in the active best chain // We can't hold cs_main during ActivateBestChain even though we're // accessing the chainman unique_ptrs since ABC requires us not to be // holding cs_main, so retrieve the relevant pointers before the ABC // call. for (CChainState *chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) { BlockValidationState state; if (!chainstate->ActivateBestChain(config, state, nullptr)) { LogPrintf("Failed to connect best block (%s)\n", state.ToString()); StartShutdown(); return; } } if (args.GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) { LogPrintf("Stopping after block import\n"); StartShutdown(); return; } } // End scope of CImportingNow chainman.ActiveChainstate().LoadMempool(config, args); } } // namespace node diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 281cbae59..a92673ac0 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -1,228 +1,232 @@ // Copyright (c) 2011-2021 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_NODE_BLOCKSTORAGE_H #define BITCOIN_NODE_BLOCKSTORAGE_H #include #include #include #include #include // For CMessageHeader::MessageStartChars #include #include extern RecursiveMutex cs_main; class ArgsManager; class BlockValidationState; class CBlock; class CBlockFileInfo; class CBlockHeader; class CBlockUndo; class CChain; class CChainParams; class CChainState; class ChainstateManager; struct CCheckpointData; class Config; struct FlatFilePos; namespace Consensus { struct Params; } namespace node { static constexpr bool DEFAULT_STOPAFTERBLOCKIMPORT{false}; /** The pre-allocation chunk size for blk?????.dat files (since 0.8) */ static constexpr unsigned int BLOCKFILE_CHUNK_SIZE = 0x1000000; // 16 MiB /** The pre-allocation chunk size for rev?????.dat files (since 0.8) */ static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB /** The maximum size of a blk?????.dat file (since 0.8) */ static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB extern std::atomic_bool fImporting; extern std::atomic_bool fReindex; /** Pruning-related variables and constants */ /** True if we're running in -prune mode. */ extern bool fPruneMode; /** Number of MiB of block files that we're trying to stay below. */ extern uint64_t nPruneTarget; // Because validation code takes pointers to the map's CBlockIndex objects, if // we ever switch to another associative container, we need to either use a // container that has stable addressing (true of all std associative // containers), or make the key a `std::unique_ptr` using BlockMap = std::unordered_map; /** * Maintains a tree of blocks (stored in `m_block_index`) which is consulted * to determine where the most-work tip is. * * This data is used mostly in `CChainState` - information about, e.g., * candidate tips is not maintained here. */ class BlockManager { friend CChainState; friend ChainstateManager; private: /** * Load the blocktree off disk and into memory. Populate certain metadata * per index entry (nStatus, nChainWork, nTimeMax, etc.) as well as * peripheral collections like m_dirty_blockindex. */ bool LoadBlockIndex(const Consensus::Params &consensus_params) EXCLUSIVE_LOCKS_REQUIRED(cs_main); void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false); void FlushUndoFile(int block_file, bool finalize = false); bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int nHeight, CChain &active_chain, uint64_t nTime, bool fKnown); bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize); /** * Calculate the block/rev files to delete based on height specified * by user with RPC command pruneblockchain */ void FindFilesToPruneManual(std::set &setFilesToPrune, int nManualPruneHeight, int chain_tip_height); /** * Prune block and undo files (blk???.dat and undo???.dat) so that the disk * space used is less than a user-defined target. The user sets the target * (in MB) on the command line or in config file. This will be run on * startup and whenever new space is allocated in a block or undo file, * staying below the target. Changing back to unpruned requires a reindex * (which in this case means the blockchain must be re-downloaded.) * * Pruning functions are called from FlushStateToDisk when the * m_check_for_pruning flag has been set. Block and undo files are deleted * in lock-step (when blk00003.dat is deleted, so is rev00003.dat.) Pruning * cannot take place until the longest chain is at least a certain length * (CChainParams::nPruneAfterHeight). Pruning will never delete a block * within a defined distance (currently 288) from the active chain's tip. * The block index is updated by unsetting HAVE_DATA and HAVE_UNDO for any * blocks that were stored in the deleted files. A db flag records the fact * that at least some block files have been pruned. * * @param[out] setFilesToPrune The set of file indices that can be * unlinked will be returned */ void FindFilesToPrune(std::set &setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd); RecursiveMutex cs_LastBlockFile; std::vector m_blockfile_info; int m_last_blockfile = 0; /** * Global flag to indicate we should check to see if there are * block/undo files that should be deleted. Set on startup * or if we allocate more file space when we're in prune mode */ bool m_check_for_pruning = false; /** Dirty block index entries. */ std::set m_dirty_blockindex; /** Dirty block file entries. */ std::set m_dirty_fileinfo; public: BlockMap m_block_index GUARDED_BY(cs_main); std::vector GetAllBlockIndices() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** * All pairs A->B, where A (or one of its ancestors) misses transactions, * but B has transactions. Pruned nodes may have entries where B is missing * data. */ std::multimap m_blocks_unlinked; std::unique_ptr m_block_tree_db GUARDED_BY(::cs_main); bool WriteBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); bool LoadBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** Clear all data members. */ void Unload() EXCLUSIVE_LOCKS_REQUIRED(cs_main); CBlockIndex *AddToBlockIndex(const CBlockHeader &block, CBlockIndex *&best_header) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** Create a new block index entry for a given block hash */ CBlockIndex *InsertBlockIndex(const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); //! Mark one block file as pruned (modify associated database entries) void PruneOneBlockFile(const int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main); CBlockIndex *LookupBlockIndex(const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); const CBlockIndex *LookupBlockIndex(const BlockHash &hash) const EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** Get block file info entry for one block file */ CBlockFileInfo *GetBlockFileInfo(size_t n); bool WriteUndoDataForBlock(const CBlockUndo &blockundo, BlockValidationState &state, CBlockIndex *pindex, const CChainParams &chainparams) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); FlatFilePos SaveBlockToDisk(const CBlock &block, int nHeight, CChain &active_chain, const CChainParams &chainparams, const FlatFilePos *dbp); /** * Calculate the amount of disk space the block & undo files currently use */ uint64_t CalculateCurrentUsage(); //! Returns last CBlockIndex* that is a checkpoint const CBlockIndex *GetLastCheckpoint(const CCheckpointData &data) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** True if any block files have ever been pruned. */ bool m_have_pruned = false; //! Check whether the block associated with this index entry is pruned or //! not. bool IsBlockPruned(const CBlockIndex *pblockindex) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); ~BlockManager() { Unload(); } }; +//! Find the first block that is not pruned +const CBlockIndex *GetFirstStoredBlock(const CBlockIndex *start_block) + EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + void CleanupBlockRevFiles(); /** Open a block file (blk?????.dat) */ FILE *OpenBlockFile(const FlatFilePos &pos, bool fReadOnly = false); /** Translation to a filesystem path. */ fs::path GetBlockPosFilename(const FlatFilePos &pos); /** * Actually unlink the specified files */ void UnlinkPrunedFiles(const std::set &setFilesToPrune); /** Functions for disk access for blocks */ bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos, const Consensus::Params &consensusParams); bool ReadBlockFromDisk(CBlock &block, const CBlockIndex *pindex, const Consensus::Params &consensusParams); bool UndoReadFromDisk(CBlockUndo &blockundo, const CBlockIndex *pindex); void ThreadImport(const Config &config, ChainstateManager &chainman, std::vector vImportFiles, const ArgsManager &args); } // namespace node #endif // BITCOIN_NODE_BLOCKSTORAGE_H diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 79b973588..1480a46e1 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1,3381 +1,3377 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2019 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include