diff --git a/src/coins.h b/src/coins.h --- a/src/coins.h +++ b/src/coins.h @@ -12,11 +12,14 @@ #include "memusage.h" #include "serialize.h" #include "uint256.h" +#include "utxocommit.h" #include #include #include +class CUtxoCommit; + /** * A UTXO entry. * @@ -163,7 +166,8 @@ //! Do a bulk modification (multiple Coin changes + BestBlock change). //! The passed mapCoins can be modified. - virtual bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock); + virtual bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, + CUtxoCommit *commitDelta); //! Get a cursor to iterate over the whole state virtual CCoinsViewCursor *Cursor() const; @@ -173,6 +177,9 @@ //! Estimate database size (0 if not implemented) virtual size_t EstimateSize() const { return 0; } + + //! Returns the UTXO-commitment or nullptr if it is not yet up-to-date + virtual CUtxoCommit *GetCommitment() const; }; /** CCoinsView backed by another CCoinsView */ @@ -187,9 +194,11 @@ uint256 GetBestBlock() const override; std::vector GetHeadBlocks() const override; void SetBackend(CCoinsView &viewIn); - bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override; + bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, + CUtxoCommit *commitDelta) override; CCoinsViewCursor *Cursor() const override; size_t EstimateSize() const override; + CUtxoCommit *GetCommitment() const override; }; /** @@ -207,15 +216,20 @@ /* Cached dynamic memory usage for the inner Coin objects. */ mutable size_t cachedCoinsUsage; + CUtxoCommit *cacheUtxoCommitDelta; + public: CCoinsViewCache(CCoinsView *baseIn); + virtual ~CCoinsViewCache(); + // Standard CCoinsView methods bool GetCoin(const COutPoint &outpoint, Coin &coin) const override; bool HaveCoin(const COutPoint &outpoint) const override; uint256 GetBestBlock() const override; void SetBestBlock(const uint256 &hashBlock); - bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override; + bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, + CUtxoCommit *commitDelta) override; /** * Check if we have the given utxo already loaded in this cache. @@ -253,6 +267,17 @@ */ bool Flush(); + /** + * Returns the UTXO commitment from the current full set + * This is the combination of the backed commitment and the commitment delta + */ + CUtxoCommit *GetCommitment() const; + + /** + * Start maintaining the commitment for this View + */ + void CalculateCommitment(); + /** * Removes the UTXO with the given outpoint from the cache, if it is not * modified. diff --git a/src/coins.cpp b/src/coins.cpp --- a/src/coins.cpp +++ b/src/coins.cpp @@ -22,13 +22,18 @@ std::vector CCoinsView::GetHeadBlocks() const { return std::vector(); } -bool CCoinsView::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { +bool CCoinsView::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, + CUtxoCommit *commitDelta) { return false; } CCoinsViewCursor *CCoinsView::Cursor() const { return nullptr; } +CUtxoCommit *CCoinsView::GetCommitment() const { + return nullptr; +} + CCoinsViewBacked::CCoinsViewBacked(CCoinsView *viewIn) : base(viewIn) {} bool CCoinsViewBacked::GetCoin(const COutPoint &outpoint, Coin &coin) const { return base->GetCoin(outpoint, coin); @@ -45,9 +50,9 @@ void CCoinsViewBacked::SetBackend(CCoinsView &viewIn) { base = &viewIn; } -bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins, - const uint256 &hashBlock) { - return base->BatchWrite(mapCoins, hashBlock); +bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, + CUtxoCommit *commitDelta) { + return base->BatchWrite(mapCoins, hashBlock, commitDelta); } CCoinsViewCursor *CCoinsViewBacked::Cursor() const { return base->Cursor(); @@ -56,6 +61,10 @@ return base->EstimateSize(); } +CUtxoCommit *CCoinsViewBacked::GetCommitment() const { + return base->GetCommitment(); +} + SaltedOutpointHasher::SaltedOutpointHasher() : k0(GetRand(std::numeric_limits::max())), k1(GetRand(std::numeric_limits::max())) {} @@ -63,6 +72,29 @@ CCoinsViewCache::CCoinsViewCache(CCoinsView *baseIn) : CCoinsViewBacked(baseIn), cachedCoinsUsage(0) {} +CCoinsViewCache::~CCoinsViewCache() { + if (cacheUtxoCommitDelta != nullptr) { + delete cacheUtxoCommitDelta; + } +} + +void CCoinsViewCache::CalculateCommitment() { + // Currently this is called *before* the cache is filled, + // and the commitment is maintained on AddCoin/SpentCoin + + // It is probably faster to do it *after* the cache is filled, + // as this can be parellelized; this does require us to change the + // in-memory representation if Coin to include the scriptpubkeys for spent + // outputs + + // we can't start maintaing the commitment in the middle of + // a cache batch + assert(cacheCoins.empty()); + + assert(cacheUtxoCommitDelta == nullptr); + cacheUtxoCommitDelta = new CUtxoCommit(); +} + size_t CCoinsViewCache::DynamicMemoryUsage() const { return memusage::DynamicUsage(cacheCoins) + cachedCoinsUsage; } @@ -126,6 +158,10 @@ it->second.flags |= CCoinsCacheEntry::DIRTY | (fresh ? CCoinsCacheEntry::FRESH : 0); cachedCoinsUsage += it->second.coin.DynamicMemoryUsage(); + + if (cacheUtxoCommitDelta != nullptr) { + cacheUtxoCommitDelta->Add(outpoint, coin); + } } void AddCoins(CCoinsViewCache &cache, const CTransaction &tx, int nHeight, @@ -147,6 +183,11 @@ if (it == cacheCoins.end()) { return false; } + + if (cacheUtxoCommitDelta != nullptr) { + cacheUtxoCommitDelta->Remove(outpoint, it->second.coin); + } + cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage(); if (moveout) { *moveout = std::move(it->second.coin); @@ -192,7 +233,19 @@ } bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, - const uint256 &hashBlockIn) { + const uint256 &hashBlockIn, + CUtxoCommit *commitDelta) { + + // Merge commitment + if (commitDelta != nullptr) { + if (cacheUtxoCommitDelta == nullptr) { + // Can't start in the middle of a batch + assert(cacheCoins.empty()); + cacheUtxoCommitDelta = new CUtxoCommit(); + } + cacheUtxoCommitDelta->Add(*commitDelta); + } + for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) { // Ignore non-dirty entries (optimization). if (it->second.flags & CCoinsCacheEntry::DIRTY) { @@ -255,12 +308,25 @@ } bool CCoinsViewCache::Flush() { - bool fOk = base->BatchWrite(cacheCoins, hashBlock); + bool fOk = base->BatchWrite(cacheCoins, hashBlock, cacheUtxoCommitDelta); cacheCoins.clear(); cachedCoinsUsage = 0; return fOk; } +CUtxoCommit *CCoinsViewCache::GetCommitment() const { + CUtxoCommit *parent = base->GetCommitment(); + if (parent == nullptr || cacheUtxoCommitDelta == nullptr) { + return nullptr; + } else { + // merge this commitment delta with parent commitment + CUtxoCommit *result = new CUtxoCommit(); + result->Add(*parent); + result->Add(*this->cacheUtxoCommitDelta); + return result; + } +} + void CCoinsViewCache::Uncache(const COutPoint &outpoint) { CCoinsMap::iterator it = cacheCoins.find(outpoint); if (it != cacheCoins.end() && it->second.flags == 0) { diff --git a/src/init.cpp b/src/init.cpp --- a/src/init.cpp +++ b/src/init.cpp @@ -467,9 +467,8 @@ _("Discover own IP addresses (default: 1 when " "listening and no -externalip or -proxy)")); strUsage += HelpMessageOpt( - "-dns", - _("Allow DNS lookups for -addnode, -seednode and -connect") + " " + - strprintf(_("(default: %d)"), DEFAULT_NAME_LOOKUP)); + "-dns", _("Allow DNS lookups for -addnode, -seednode and -connect") + + " " + strprintf(_("(default: %d)"), DEFAULT_NAME_LOOKUP)); strUsage += HelpMessageOpt( "-dnsseed", _("Query for peer addresses via DNS lookup, if low on " "addresses (default: 1 unless -connect/-noconnect)")); @@ -2110,9 +2109,8 @@ _("Do you want to rebuild the block database now?"), strLoadError + ".\nPlease restart with -reindex or " "-reindex-chainstate to recover.", - "", - CClientUIInterface::MSG_ERROR | - CClientUIInterface::BTN_ABORT); + "", CClientUIInterface::MSG_ERROR | + CClientUIInterface::BTN_ABORT); if (fRet) { fReindex = true; fRequestShutdown = false; diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -857,6 +857,7 @@ uint64_t nTransactionOutputs; uint64_t nBogoSize; uint256 hashSerialized; + uint256 hashCommitment; uint64_t nDiskSize; Amount nTotalAmount; @@ -920,6 +921,9 @@ } stats.hashSerialized = ss.GetHash(); stats.nDiskSize = view->EstimateSize(); + CUtxoCommit *commitment = view->GetCommitment(); + stats.hashCommitment = + (commitment == nullptr ? uint256() : commitment->GetHash()); return true; } @@ -1022,6 +1026,7 @@ ret.push_back(Pair("txouts", int64_t(stats.nTransactionOutputs))); ret.push_back(Pair("bogosize", int64_t(stats.nBogoSize))); ret.push_back(Pair("hash_serialized", stats.hashSerialized.GetHex())); + ret.push_back(Pair("hash_commitment", stats.hashCommitment.GetHex())); ret.push_back(Pair("disk_size", stats.nDiskSize)); ret.push_back( Pair("total_amount", ValueFromAmount(stats.nTotalAmount))); diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp --- a/src/test/coins_tests.cpp +++ b/src/test/coins_tests.cpp @@ -54,7 +54,8 @@ uint256 GetBestBlock() const override { return hashBestBlock_; } - bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override { + bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, + CUtxoCommit *commitDelta) override { for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) { if (it->second.flags & CCoinsCacheEntry::DIRTY) { // Same optimization used in CCoinsViewDB is to only write dirty @@ -610,7 +611,9 @@ void WriteCoinViewEntry(CCoinsView &view, const Amount value, char flags) { CCoinsMap map; InsertCoinMapEntry(map, value, flags); - view.BatchWrite(map, {}); + CUtxoCommit commit; + commit.Add(map.begin()->first, map.begin()->second.coin); + view.BatchWrite(map, {}, &commit); } class SingleEntryCacheTest { diff --git a/src/txdb.h b/src/txdb.h --- a/src/txdb.h +++ b/src/txdb.h @@ -73,9 +73,12 @@ bool HaveCoin(const COutPoint &outpoint) const override; uint256 GetBestBlock() const override; std::vector GetHeadBlocks() const override; - bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override; + bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, + CUtxoCommit *commitDelta) override; CCoinsViewCursor *Cursor() const override; + CUtxoCommit *GetCommitment() const override; + //! Attempt to update from an older database format. //! Returns whether an error occurred. bool Upgrade(); diff --git a/src/txdb.cpp b/src/txdb.cpp --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -31,6 +31,8 @@ static const char DB_REINDEX_FLAG = 'R'; static const char DB_LAST_BLOCK = 'l'; +static const char DB_COMMITMENT = 'U'; + namespace { struct CoinEntry { @@ -78,7 +80,18 @@ return vhashHeadBlocks; } -bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { +CUtxoCommit *CCoinsViewDB::GetCommitment() const { + CUtxoCommit *result = new CUtxoCommit(); + if (!db.Read(DB_COMMITMENT, *result)) { + delete result; + return nullptr; + } else { + return result; + } +} + +bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, + CUtxoCommit *commitDelta) { CDBBatch batch(db); size_t count = 0; size_t changed = 0; @@ -104,6 +117,55 @@ batch.Erase(DB_BEST_BLOCK); batch.Write(DB_HEAD_BLOCKS, std::vector{hashBlock, old_tip}); + if (commitDelta != nullptr) { + // TODO. Procedure: + // If we have a commitment in the DB, merge commitDelta + // If we don't have a commitment in the DB, + // create one from the current snapshot, then merge commitDelta + + // This provides + // A: Fast IBD as commitDelta will be nullptr until assumevalid is + // reached + // B: An upgrade from a pre-utxo-commitment version will happen smoothly + + // This could be done async. We start creating the commitment from a + // snapshot + // in a different thread; meanwhile we maintain a commitmentDelta from + // this point onwards + // and merge and store them when done. + // The problem is, that we may have to verify commitments while this + // procedure is busy, + // as we're beyond the assumevalid block. This is tricky, especially + // since it needs to be carried + // over shutdown/startup. It may be possible yet non-trivial to enqueue + // these pending + // validations in the DB somehow. + // Alternatively, we could close our eyes for not checking the + // commitments just behind assumevalid... + + // It may be easiest to start synchronous, blocking a few minutes: + CUtxoCommit *dbCommitment = GetCommitment(); + if (dbCommitment == nullptr) { + LogPrint(BCLog::COINDB, "Start maintaining UTXO commitment\n"); + + CUtxoCommit snapshotCommitment; + std::unique_ptr cursor(this->Cursor()); + snapshotCommitment.AddCoinView( + cursor.get()); // blocks a few minutes + snapshotCommitment.Add(*commitDelta); + batch.Write(DB_COMMITMENT, snapshotCommitment); + } else { + LogPrint(BCLog::COINDB, "Merging UTXO commitment\n"); + dbCommitment->Add(*commitDelta); + batch.Write(DB_COMMITMENT, *dbCommitment); + } + } else { + // Once we have a commitment, we must maintain it + LogPrint(BCLog::COINDB, + "Skipping commitment while blocks are assumed valid\n"); + batch.Erase(DB_COMMITMENT); + } + for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) { if (it->second.flags & CCoinsCacheEntry::DIRTY) { CoinEntry entry(&it->first); diff --git a/src/utxocommit.h b/src/utxocommit.h --- a/src/utxocommit.h +++ b/src/utxocommit.h @@ -5,12 +5,12 @@ #ifndef BITCOIN_UTXOCOMMIT_H #define BITCOIN_UTXOCOMMIT_H -#include "coins.h" #include "hash.h" #include "secp256k1/include/secp256k1_multiset.h" #include "streams.h" #include +class COutPoint; class Coin; class CCoinsViewCursor; diff --git a/src/utxocommit.cpp b/src/utxocommit.cpp --- a/src/utxocommit.cpp +++ b/src/utxocommit.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "utxocommit.h" - +#include "coins.h" #include "util.h" namespace { @@ -67,7 +67,7 @@ bool CUtxoCommit::AddCoinView(CCoinsViewCursor *pcursor) { - LogPrintf("Adding existing UTXO set to the UTXO commitment"); + LogPrintf("Adding existing UTXO set to the UTXO commitment\n"); // TODO: Parallelize int n = 0; diff --git a/src/validation.cpp b/src/validation.cpp --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1885,6 +1885,56 @@ return flags; } +/** + * Verifies whether the given block is assumed valid. + * This will cause the script not to be checked and the + * commitment not to be checked + */ +static bool IsAssumedValid(const Config &config, CBlockIndex *pindex) { + + AssertLockHeld(cs_main); + + if (!hashAssumeValid.IsNull()) { + + const Consensus::Params &consensusParams = + config.GetChainParams().GetConsensus(); + + // We've been configured with the hash of a block which has been + // externally verified to have a valid history. A suitable default value + // is included with the software and updated from time to time. Because + // validity relative to a piece of software is an objective fact these + // defaults can be easily reviewed. This setting doesn't force the + // selection of any particular chain but makes validating some faster by + // effectively caching the result of part of the verification. + BlockMap::const_iterator it = mapBlockIndex.find(hashAssumeValid); + if (it != mapBlockIndex.end()) { + if (it->second->GetAncestor(pindex->nHeight) == pindex && + pindexBestHeader->GetAncestor(pindex->nHeight) == pindex && + pindexBestHeader->nChainWork >= + UintToArith256(consensusParams.nMinimumChainWork)) { + // This block is a member of the assumed verified chain and an + // ancestor of the best header. The equivalent time check + // discourages hashpower from extorting the network via DOS + // attack into accepting an invalid block through telling users + // they must manually set assumevalid. Requiring a software + // change or burying the invalid block, regardless of the + // setting, makes it hard to hide the implication of the demand. + // This also avoids having release candidates that are hardly + // doing any signature verification at all in testing without + // having to artificially set the default assumed verified block + // further back. The test against nMinimumChainWork prevents the + // skipping when denied access to any chain at least as good as + // the expected chain. + return !(GetBlockProofEquivalentTime( + *pindexBestHeader, *pindex, *pindexBestHeader, + consensusParams) <= 60 * 60 * 24 * 7 * 2); + } + } + } + + return false; +} + static int64_t nTimeCheck = 0; static int64_t nTimeForks = 0; static int64_t nTimeVerify = 0; @@ -1929,41 +1979,8 @@ return true; } - bool fScriptChecks = true; - if (!hashAssumeValid.IsNull()) { - // We've been configured with the hash of a block which has been - // externally verified to have a valid history. A suitable default value - // is included with the software and updated from time to time. Because - // validity relative to a piece of software is an objective fact these - // defaults can be easily reviewed. This setting doesn't force the - // selection of any particular chain but makes validating some faster by - // effectively caching the result of part of the verification. - BlockMap::const_iterator it = mapBlockIndex.find(hashAssumeValid); - if (it != mapBlockIndex.end()) { - if (it->second->GetAncestor(pindex->nHeight) == pindex && - pindexBestHeader->GetAncestor(pindex->nHeight) == pindex && - pindexBestHeader->nChainWork >= - UintToArith256(consensusParams.nMinimumChainWork)) { - // This block is a member of the assumed verified chain and an - // ancestor of the best header. The equivalent time check - // discourages hashpower from extorting the network via DOS - // attack into accepting an invalid block through telling users - // they must manually set assumevalid. Requiring a software - // change or burying the invalid block, regardless of the - // setting, makes it hard to hide the implication of the demand. - // This also avoids having release candidates that are hardly - // doing any signature verification at all in testing without - // having to artificially set the default assumed verified block - // further back. The test against nMinimumChainWork prevents the - // skipping when denied access to any chain at least as good as - // the expected chain. - fScriptChecks = - (GetBlockProofEquivalentTime( - *pindexBestHeader, *pindex, *pindexBestHeader, - consensusParams) <= 60 * 60 * 24 * 7 * 2); - } - } - } + bool fScriptChecks = !IsAssumedValid(config, pindex); + LogPrintf("Scriptchecks=%d\n", (int)fScriptChecks); int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart; @@ -2359,8 +2376,8 @@ nLastSetChain = nNow; } } catch (const std::runtime_error &e) { - return AbortNode( - state, std::string("System error while flushing: ") + e.what()); + return AbortNode(state, std::string("System error while flushing: ") + + e.what()); } return true; } @@ -2489,6 +2506,7 @@ int64_t nStart = GetTimeMicros(); { CCoinsViewCache view(pcoinsTip); + view.CalculateCommitment(); assert(view.GetBestBlock() == pindexDelete->GetBlockHash()); if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) { return error("DisconnectTip(): DisconnectBlock %s failed", @@ -2638,6 +2656,19 @@ const CBlock &blockConnecting = *pthisBlock; + // If we should start or stop mainting the UTXO commitment + // we must flush (except at genesis) + bool assumeValid = IsAssumedValid(config, pindexNew); + bool skippingCommitment = (pcoinsTip->GetCommitment() == nullptr); + if (skippingCommitment != assumeValid) { + + const Consensus::Params &consensusParams = + config.GetChainParams().GetConsensus(); + if (pthisBlock->GetHash() != consensusParams.hashGenesisBlock) { + pcoinsTip->Flush(); + } + } + // Apply the block atomically to the chain state. int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1; @@ -2646,6 +2677,10 @@ (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001); { CCoinsViewCache view(pcoinsTip); + if (!assumeValid) { + view.CalculateCommitment(); + } + bool rv = ConnectBlock(config, blockConnecting, state, pindexNew, view); GetMainSignals().BlockChecked(blockConnecting, state); if (!rv) { @@ -2666,6 +2701,7 @@ nTimeFlush += nTime4 - nTime3; LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001); + // Write the chain state to disk, if necessary. if (!FlushStateToDisk(config.GetChainParams(), state, FLUSH_STATE_IF_NEEDED)) { @@ -4403,11 +4439,10 @@ boost::this_thread::interruption_point(); uiInterface.ShowProgress( _("Verifying blocks..."), - std::max(1, - std::min(99, - 100 - (int)(((double)(chainActive.Height() - - pindex->nHeight)) / - (double)nCheckDepth * 50)))); + std::max( + 1, std::min(99, 100 - (int)(((double)(chainActive.Height() - + pindex->nHeight)) / + (double)nCheckDepth * 50)))); pindex = chainActive.Next(pindex); CBlock block; if (!ReadBlockFromDisk(block, pindex, config)) {