diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp index a3d173697..c631ca644 100644 --- a/src/index/blockfilterindex.cpp +++ b/src/index/blockfilterindex.cpp @@ -1,493 +1,493 @@ // Copyright (c) 2018 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include /** * The index database stores three items for each block: the disk location of * the encoded filter, its dSHA256 hash, and the header. Those belonging to * blocks on the active chain are indexed by height, and those belonging to * blocks that have been reorganized out of the active chain are indexed by * block hash. This ensures that filter data for any block that becomes part of * the active chain can always be retrieved, alleviating timing concerns. * * The filters themselves are stored in flat files and referenced by the LevelDB * entries. This minimizes the amount of data written to LevelDB and keeps the * database values constant size. The disk location of the next block filter to * be written (represented as a FlatFilePos) is stored under the DB_FILTER_POS * key. * * Keys for the height index have the type [DB_BLOCK_HEIGHT, uint32 (BE)]. The * height is represented as big-endian so that sequential reads of filters by * height are fast. Keys for the hash index have the type [DB_BLOCK_HASH, * uint256]. */ constexpr char DB_BLOCK_HASH = 's'; constexpr char DB_BLOCK_HEIGHT = 't'; constexpr char DB_FILTER_POS = 'P'; // 16 MiB constexpr unsigned int MAX_FLTR_FILE_SIZE = 0x1000000; /** The pre-allocation chunk size for fltr?????.dat files */ // 1 MiB constexpr unsigned int FLTR_FILE_CHUNK_SIZE = 0x100000; namespace { struct DBVal { uint256 hash; uint256 header; FlatFilePos pos; ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(hash); READWRITE(header); READWRITE(pos); } }; struct DBHeightKey { int height; DBHeightKey() : height(0) {} - DBHeightKey(int height_in) : height(height_in) {} + explicit DBHeightKey(int height_in) : height(height_in) {} template void Serialize(Stream &s) const { ser_writedata8(s, DB_BLOCK_HEIGHT); ser_writedata32be(s, height); } template void Unserialize(Stream &s) { char prefix = ser_readdata8(s); if (prefix != DB_BLOCK_HEIGHT) { throw std::ios_base::failure( "Invalid format for block filter index DB height key"); } height = ser_readdata32be(s); } }; struct DBHashKey { BlockHash hash; - DBHashKey(const BlockHash &hash_in) : hash(hash_in) {} + explicit DBHashKey(const BlockHash &hash_in) : hash(hash_in) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { char prefix = DB_BLOCK_HASH; READWRITE(prefix); if (prefix != DB_BLOCK_HASH) { throw std::ios_base::failure( "Invalid format for block filter index DB hash key"); } READWRITE(hash); } }; }; // namespace static std::map g_filter_indexes; BlockFilterIndex::BlockFilterIndex(BlockFilterType filter_type, size_t n_cache_size, bool f_memory, bool f_wipe) : m_filter_type(filter_type) { const std::string &filter_name = BlockFilterTypeName(filter_type); if (filter_name.empty()) { throw std::invalid_argument("unknown filter_type"); } fs::path path = GetDataDir() / "indexes" / "blockfilter" / filter_name; fs::create_directories(path); m_name = filter_name + " block filter index"; m_db = std::make_unique(path / "db", n_cache_size, f_memory, f_wipe); m_filter_fileseq = std::make_unique(std::move(path), "fltr", FLTR_FILE_CHUNK_SIZE); } bool BlockFilterIndex::Init() { if (!m_db->Read(DB_FILTER_POS, m_next_filter_pos)) { // Check that the cause of the read failure is that the key does not // exist. Any other errors indicate database corruption or a disk // failure, and starting the index would cause further corruption. if (m_db->Exists(DB_FILTER_POS)) { return error( "%s: Cannot read current %s state; index may be corrupted", __func__, GetName()); } // If the DB_FILTER_POS is not set, then initialize to the first // location. m_next_filter_pos.nFile = 0; m_next_filter_pos.nPos = 0; } return BaseIndex::Init(); } bool BlockFilterIndex::CommitInternal(CDBBatch &batch) { const FlatFilePos &pos = m_next_filter_pos; // Flush current filter file to disk. CAutoFile file(m_filter_fileseq->Open(pos), SER_DISK, CLIENT_VERSION); if (file.IsNull()) { return error("%s: Failed to open filter file %d", __func__, pos.nFile); } if (!FileCommit(file.Get())) { return error("%s: Failed to commit filter file %d", __func__, pos.nFile); } batch.Write(DB_FILTER_POS, pos); return BaseIndex::CommitInternal(batch); } bool BlockFilterIndex::ReadFilterFromDisk(const FlatFilePos &pos, BlockFilter &filter) const { CAutoFile filein(m_filter_fileseq->Open(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) { return false; } BlockHash block_hash; std::vector encoded_filter; try { filein >> block_hash >> encoded_filter; filter = BlockFilter(GetFilterType(), block_hash, std::move(encoded_filter)); } catch (const std::exception &e) { return error("%s: Failed to deserialize block filter from disk: %s", __func__, e.what()); } return true; } size_t BlockFilterIndex::WriteFilterToDisk(FlatFilePos &pos, const BlockFilter &filter) { assert(filter.GetFilterType() == GetFilterType()); size_t data_size = GetSerializeSize(filter.GetBlockHash(), CLIENT_VERSION) + GetSerializeSize(filter.GetEncodedFilter(), CLIENT_VERSION); // If writing the filter would overflow the file, flush and move to the next // one. if (pos.nPos + data_size > MAX_FLTR_FILE_SIZE) { CAutoFile last_file(m_filter_fileseq->Open(pos), SER_DISK, CLIENT_VERSION); if (last_file.IsNull()) { LogPrintf("%s: Failed to open filter file %d\n", __func__, pos.nFile); return 0; } if (!TruncateFile(last_file.Get(), pos.nPos)) { LogPrintf("%s: Failed to truncate filter file %d\n", __func__, pos.nFile); return 0; } if (!FileCommit(last_file.Get())) { LogPrintf("%s: Failed to commit filter file %d\n", __func__, pos.nFile); return 0; } pos.nFile++; pos.nPos = 0; } // Pre-allocate sufficient space for filter data. bool out_of_space; m_filter_fileseq->Allocate(pos, data_size, out_of_space); if (out_of_space) { LogPrintf("%s: out of disk space\n", __func__); return 0; } CAutoFile fileout(m_filter_fileseq->Open(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) { LogPrintf("%s: Failed to open filter file %d\n", __func__, pos.nFile); return 0; } fileout << filter.GetBlockHash() << filter.GetEncodedFilter(); return data_size; } bool BlockFilterIndex::WriteBlock(const CBlock &block, const CBlockIndex *pindex) { CBlockUndo block_undo; uint256 prev_header; if (pindex->nHeight > 0) { if (!UndoReadFromDisk(block_undo, pindex)) { return false; } std::pair read_out; if (!m_db->Read(DBHeightKey(pindex->nHeight - 1), read_out)) { return false; } BlockHash expected_block_hash = pindex->pprev->GetBlockHash(); if (read_out.first != expected_block_hash) { return error("%s: previous block header belongs to unexpected " "block %s; expected %s", __func__, read_out.first.ToString(), expected_block_hash.ToString()); } prev_header = read_out.second.header; } BlockFilter filter(m_filter_type, block, block_undo); size_t bytes_written = WriteFilterToDisk(m_next_filter_pos, filter); if (bytes_written == 0) { return false; } std::pair value; value.first = pindex->GetBlockHash(); value.second.hash = filter.GetHash(); value.second.header = filter.ComputeHeader(prev_header); value.second.pos = m_next_filter_pos; if (!m_db->Write(DBHeightKey(pindex->nHeight), value)) { return false; } m_next_filter_pos.nPos += bytes_written; return true; } static bool CopyHeightIndexToHashIndex(CDBIterator &db_it, CDBBatch &batch, const std::string &index_name, int start_height, int stop_height) { DBHeightKey key(start_height); db_it.Seek(key); for (int height = start_height; height <= stop_height; ++height) { if (!db_it.GetKey(key) || key.height != height) { return error("%s: unexpected key in %s: expected (%c, %d)", __func__, index_name, DB_BLOCK_HEIGHT, height); } std::pair value; if (!db_it.GetValue(value)) { return error("%s: unable to read value in %s at key (%c, %d)", __func__, index_name, DB_BLOCK_HEIGHT, height); } batch.Write(DBHashKey(value.first), std::move(value.second)); db_it.Next(); } return true; } bool BlockFilterIndex::Rewind(const CBlockIndex *current_tip, const CBlockIndex *new_tip) { assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip); CDBBatch batch(*m_db); std::unique_ptr db_it(m_db->NewIterator()); // During a reorg, we need to copy all filters for blocks that are getting // disconnected from the height index to the hash index so we can still find // them when the height index entries are overwritten. if (!CopyHeightIndexToHashIndex(*db_it, batch, m_name, new_tip->nHeight, current_tip->nHeight)) { return false; } // The latest filter position gets written in Commit by the call to the // BaseIndex::Rewind. But since this creates new references to the filter, // the position should get updated here atomically as well in case Commit // fails. batch.Write(DB_FILTER_POS, m_next_filter_pos); if (!m_db->WriteBatch(batch)) { return false; } return BaseIndex::Rewind(current_tip, new_tip); } static bool LookupOne(const CDBWrapper &db, const CBlockIndex *block_index, DBVal &result) { // First check if the result is stored under the height index and the value // there matches the block hash. This should be the case if the block is on // the active chain. std::pair read_out; if (!db.Read(DBHeightKey(block_index->nHeight), read_out)) { return false; } if (read_out.first == block_index->GetBlockHash()) { result = std::move(read_out.second); return true; } // If value at the height index corresponds to an different block, the // result will be stored in the hash index. return db.Read(DBHashKey(block_index->GetBlockHash()), result); } static bool LookupRange(CDBWrapper &db, const std::string &index_name, int start_height, const CBlockIndex *stop_index, std::vector &results) { if (start_height < 0) { return error("%s: start height (%d) is negative", __func__, start_height); } if (start_height > stop_index->nHeight) { return error("%s: start height (%d) is greater than stop height (%d)", __func__, start_height, stop_index->nHeight); } size_t results_size = static_cast(stop_index->nHeight - start_height + 1); std::vector> values(results_size); DBHeightKey key(start_height); std::unique_ptr db_it(db.NewIterator()); db_it->Seek(DBHeightKey(start_height)); for (int height = start_height; height <= stop_index->nHeight; ++height) { if (!db_it->Valid() || !db_it->GetKey(key) || key.height != height) { return false; } size_t i = static_cast(height - start_height); if (!db_it->GetValue(values[i])) { return error("%s: unable to read value in %s at key (%c, %d)", __func__, index_name, DB_BLOCK_HEIGHT, height); } db_it->Next(); } results.resize(results_size); // Iterate backwards through block indexes collecting results in order to // access the block hash of each entry in case we need to look it up in the // hash index. for (const CBlockIndex *block_index = stop_index; block_index && block_index->nHeight >= start_height; block_index = block_index->pprev) { BlockHash block_hash = block_index->GetBlockHash(); size_t i = static_cast(block_index->nHeight - start_height); if (block_hash == values[i].first) { results[i] = std::move(values[i].second); continue; } if (!db.Read(DBHashKey(block_hash), results[i])) { return error("%s: unable to read value in %s at key (%c, %s)", __func__, index_name, DB_BLOCK_HASH, block_hash.ToString()); } } return true; } bool BlockFilterIndex::LookupFilter(const CBlockIndex *block_index, BlockFilter &filter_out) const { DBVal entry; if (!LookupOne(*m_db, block_index, entry)) { return false; } return ReadFilterFromDisk(entry.pos, filter_out); } bool BlockFilterIndex::LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out) const { DBVal entry; if (!LookupOne(*m_db, block_index, entry)) { return false; } header_out = entry.header; return true; } bool BlockFilterIndex::LookupFilterRange( int start_height, const CBlockIndex *stop_index, std::vector &filters_out) const { std::vector entries; if (!LookupRange(*m_db, m_name, start_height, stop_index, entries)) { return false; } filters_out.resize(entries.size()); auto filter_pos_it = filters_out.begin(); for (const auto &entry : entries) { if (!ReadFilterFromDisk(entry.pos, *filter_pos_it)) { return false; } ++filter_pos_it; } return true; } bool BlockFilterIndex::LookupFilterHashRange( int start_height, const CBlockIndex *stop_index, std::vector &hashes_out) const { std::vector entries; if (!LookupRange(*m_db, m_name, start_height, stop_index, entries)) { return false; } hashes_out.clear(); hashes_out.reserve(entries.size()); for (const auto &entry : entries) { hashes_out.push_back(entry.hash); } return true; } BlockFilterIndex *GetBlockFilterIndex(BlockFilterType filter_type) { auto it = g_filter_indexes.find(filter_type); return it != g_filter_indexes.end() ? &it->second : nullptr; } void ForEachBlockFilterIndex(std::function fn) { for (auto &entry : g_filter_indexes) { fn(entry.second); } } bool InitBlockFilterIndex(BlockFilterType filter_type, size_t n_cache_size, bool f_memory, bool f_wipe) { auto result = g_filter_indexes.emplace( std::piecewise_construct, std::forward_as_tuple(filter_type), std::forward_as_tuple(filter_type, n_cache_size, f_memory, f_wipe)); return result.second; } bool DestroyBlockFilterIndex(BlockFilterType filter_type) { return g_filter_indexes.erase(filter_type); } void DestroyAllBlockFilterIndexes() { g_filter_indexes.clear(); } diff --git a/src/interfaces/chain.cpp b/src/interfaces/chain.cpp index 304c1371e..c0f6707c0 100644 --- a/src/interfaces/chain.cpp +++ b/src/interfaces/chain.cpp @@ -1,411 +1,411 @@ // Copyright (c) 2018 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace interfaces { namespace { class LockImpl : public Chain::Lock, public UniqueLock { Optional getHeight() override { LockAssertion lock(::cs_main); int height = ::ChainActive().Height(); if (height >= 0) { return height; } return nullopt; } Optional getBlockHeight(const BlockHash &hash) override { LockAssertion lock(::cs_main); CBlockIndex *block = LookupBlockIndex(hash); if (block && ::ChainActive().Contains(block)) { return block->nHeight; } return nullopt; } int getBlockDepth(const BlockHash &hash) override { const Optional tip_height = getHeight(); const Optional height = getBlockHeight(hash); return tip_height && height ? *tip_height - *height + 1 : 0; } BlockHash getBlockHash(int height) override { LockAssertion lock(::cs_main); CBlockIndex *block = ::ChainActive()[height]; assert(block != nullptr); return block->GetBlockHash(); } int64_t getBlockTime(int height) override { LockAssertion lock(::cs_main); CBlockIndex *block = ::ChainActive()[height]; assert(block != nullptr); return block->GetBlockTime(); } int64_t getBlockMedianTimePast(int height) override { LockAssertion lock(::cs_main); CBlockIndex *block = ::ChainActive()[height]; assert(block != nullptr); return block->GetMedianTimePast(); } bool haveBlockOnDisk(int height) override { LockAssertion lock(::cs_main); CBlockIndex *block = ::ChainActive()[height]; return block && (block->nStatus.hasData() != 0) && block->nTx > 0; } Optional findFirstBlockWithTimeAndHeight(int64_t time, int height, BlockHash *hash) override { LockAssertion lock(::cs_main); CBlockIndex *block = ::ChainActive().FindEarliestAtLeast(time, height); if (block) { if (hash) { *hash = block->GetBlockHash(); } return block->nHeight; } return nullopt; } Optional findPruned(int start_height, Optional stop_height) override { LockAssertion lock(::cs_main); if (::fPruneMode) { CBlockIndex *block = stop_height ? ::ChainActive()[*stop_height] : ::ChainActive().Tip(); while (block && block->nHeight >= start_height) { if (block->nStatus.hasData() == 0) { return block->nHeight; } block = block->pprev; } } return nullopt; } Optional findFork(const BlockHash &hash, Optional *height) override { LockAssertion lock(::cs_main); const CBlockIndex *block = LookupBlockIndex(hash); const CBlockIndex *fork = block ? ::ChainActive().FindFork(block) : nullptr; if (height) { if (block) { *height = block->nHeight; } else { height->reset(); } } if (fork) { return fork->nHeight; } return nullopt; } CBlockLocator getTipLocator() override { LockAssertion lock(::cs_main); return ::ChainActive().GetLocator(); } Optional findLocatorFork(const CBlockLocator &locator) override { LockAssertion lock(::cs_main); if (CBlockIndex *fork = FindForkInGlobalIndex(::ChainActive(), locator)) { return fork->nHeight; } return nullopt; } bool contextualCheckTransactionForCurrentBlock( const Consensus::Params ¶ms, const CTransaction &tx, CValidationState &state) override { LockAssertion lock(::cs_main); return ContextualCheckTransactionForCurrentBlock(params, tx, state); } using UniqueLock::UniqueLock; }; // namespace interfaces class NotificationsHandlerImpl : public Handler, CValidationInterface { public: explicit NotificationsHandlerImpl(Chain &chain, Chain::Notifications ¬ifications) : m_chain(chain), m_notifications(¬ifications) { RegisterValidationInterface(this); } ~NotificationsHandlerImpl() override { disconnect(); } void disconnect() override { if (m_notifications) { m_notifications = nullptr; UnregisterValidationInterface(this); } } void TransactionAddedToMempool(const CTransactionRef &tx) override { m_notifications->TransactionAddedToMempool(tx); } void TransactionRemovedFromMempool(const CTransactionRef &tx) override { m_notifications->TransactionRemovedFromMempool(tx); } void BlockConnected( const std::shared_ptr &block, const CBlockIndex *index, const std::vector &tx_conflicted) override { m_notifications->BlockConnected(*block, tx_conflicted); } void BlockDisconnected(const std::shared_ptr &block) override { m_notifications->BlockDisconnected(*block); } void UpdatedBlockTip(const CBlockIndex *index, const CBlockIndex *fork_index, bool is_ibd) override { m_notifications->UpdatedBlockTip(); } void ChainStateFlushed(const CBlockLocator &locator) override { m_notifications->ChainStateFlushed(locator); } Chain &m_chain; Chain::Notifications *m_notifications; }; class RpcHandlerImpl : public Handler { public: - RpcHandlerImpl(const CRPCCommand &command) + explicit RpcHandlerImpl(const CRPCCommand &command) : m_command(command), m_wrapped_command(&command) { m_command.actor = [this](Config &config, const JSONRPCRequest &request, UniValue &result, bool last_handler) { if (!m_wrapped_command) { return false; } try { return m_wrapped_command->actor(config, request, result, last_handler); } catch (const UniValue &e) { // If this is not the last handler and a wallet not found // exception was thrown, return false so the next handler // can try to handle the request. Otherwise, reraise the // exception. if (!last_handler) { const UniValue &code = e["code"]; if (code.isNum() && code.get_int() == RPC_WALLET_NOT_FOUND) { return false; } } throw; } }; ::tableRPC.appendCommand(m_command.name, &m_command); } void disconnect() override final { if (m_wrapped_command) { m_wrapped_command = nullptr; ::tableRPC.removeCommand(m_command.name, &m_command); } } ~RpcHandlerImpl() override { disconnect(); } CRPCCommand m_command; const CRPCCommand *m_wrapped_command; }; class ChainImpl : public Chain { public: explicit ChainImpl(NodeContext &node) : m_node(node) {} std::unique_ptr lock(bool try_lock) override { auto lock = std::make_unique( ::cs_main, "cs_main", __FILE__, __LINE__, try_lock); if (try_lock && lock && !*lock) { return {}; } // Temporary to avoid CWG 1579 std::unique_ptr result = std::move(lock); return result; } bool findBlock(const BlockHash &hash, CBlock *block, int64_t *time, int64_t *time_max) override { CBlockIndex *index; { LOCK(cs_main); index = LookupBlockIndex(hash); if (!index) { return false; } if (time) { *time = index->GetBlockTime(); } if (time_max) { *time_max = index->GetBlockTimeMax(); } } if (block && !ReadBlockFromDisk(*block, index, Params().GetConsensus())) { block->SetNull(); } return true; } void findCoins(std::map &coins) override { return FindCoins(coins); } double guessVerificationProgress(const BlockHash &block_hash) override { LOCK(cs_main); return GuessVerificationProgress(Params().TxData(), LookupBlockIndex(block_hash)); } bool hasDescendantsInMempool(const TxId &txid) override { LOCK(::g_mempool.cs); auto it = ::g_mempool.GetIter(txid); return it && (*it)->GetCountWithDescendants() > 1; } bool broadcastTransaction(const Config &config, const CTransactionRef &tx, std::string &err_string, const Amount &max_tx_fee, bool relay) override { const TransactionError err = BroadcastTransaction( m_node, config, tx, err_string, max_tx_fee, relay, /*wait_callback*/ false); // Chain clients only care about failures to accept the tx to the // mempool. Disregard non-mempool related failures. Note: this will // need to be updated if BroadcastTransactions() is updated to // return other non-mempool failures that Chain clients do not need // to know about. return err == TransactionError::OK; } void getTransactionAncestry(const TxId &txid, size_t &ancestors, size_t &descendants) override { ::g_mempool.GetTransactionAncestry(txid, ancestors, descendants); } bool checkChainLimits(const CTransactionRef &tx) override { LockPoints lp; CTxMemPoolEntry entry(tx, Amount(), 0, 0, false, 0, lp); CTxMemPool::setEntries ancestors; auto limit_ancestor_count = gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT); auto limit_ancestor_size = gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) * 1000; auto limit_descendant_count = gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT); auto limit_descendant_size = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000; std::string unused_error_string; LOCK(::g_mempool.cs); return ::g_mempool.CalculateMemPoolAncestors( entry, ancestors, limit_ancestor_count, limit_ancestor_size, limit_descendant_count, limit_descendant_size, unused_error_string); } CFeeRate estimateFee() const override { return ::g_mempool.estimateFee(); } CFeeRate relayMinFee() override { return ::minRelayTxFee; } CFeeRate relayDustFee() override { return ::dustRelayFee; } bool getPruneMode() override { return ::fPruneMode; } bool p2pEnabled() override { return m_node.connman != nullptr; } bool isReadyToBroadcast() override { return !::fImporting && !::fReindex && !isInitialBlockDownload(); } bool isInitialBlockDownload() override { return ::ChainstateActive().IsInitialBlockDownload(); } bool shutdownRequested() override { return ShutdownRequested(); } int64_t getAdjustedTime() override { return GetAdjustedTime(); } void initMessage(const std::string &message) override { ::uiInterface.InitMessage(message); } void initWarning(const std::string &message) override { InitWarning(message); } void initError(const std::string &message) override { InitError(message); } void loadWallet(std::unique_ptr wallet) override { ::uiInterface.LoadWallet(wallet); } void showProgress(const std::string &title, int progress, bool resume_possible) override { ::uiInterface.ShowProgress(title, progress, resume_possible); } std::unique_ptr handleNotifications(Notifications ¬ifications) override { return std::make_unique(*this, notifications); } void waitForNotificationsIfNewBlocksConnected( const BlockHash &old_tip) override { if (!old_tip.IsNull()) { LOCK(::cs_main); if (old_tip == ::ChainActive().Tip()->GetBlockHash()) { return; } CBlockIndex *block = LookupBlockIndex(old_tip); if (block && block->GetAncestor(::ChainActive().Height()) == ::ChainActive().Tip()) { return; } } SyncWithValidationInterfaceQueue(); } std::unique_ptr handleRpc(const CRPCCommand &command) override { return std::make_unique(command); } bool rpcEnableDeprecated(const std::string &method) override { return IsDeprecatedRPCEnabled(gArgs, method); } void rpcRunLater(const std::string &name, std::function fn, int64_t seconds) override { RPCRunLater(name, std::move(fn), seconds); } int rpcSerializationFlags() override { return RPCSerializationFlags(); } void requestMempoolTransactions(Notifications ¬ifications) override { LOCK2(::cs_main, ::g_mempool.cs); for (const CTxMemPoolEntry &entry : ::g_mempool.mapTx) { notifications.TransactionAddedToMempool(entry.GetSharedTx()); } } NodeContext &m_node; }; } // namespace std::unique_ptr MakeChain(NodeContext &node) { return std::make_unique(node); } } // namespace interfaces diff --git a/src/rpc/util.h b/src/rpc/util.h index 89b2e126e..d78058408 100644 --- a/src/rpc/util.h +++ b/src/rpc/util.h @@ -1,246 +1,247 @@ // Copyright (c) 2017-2019 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_RPC_UTIL_H #define BITCOIN_RPC_UTIL_H #include #include #include #include