diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include index 88fa714a9..6c8f5c8df 100644 --- a/src/Makefile.bench.include +++ b/src/Makefile.bench.include @@ -1,82 +1,85 @@ # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. bin_PROGRAMS += bench/bench_bitcoin BENCH_SRCDIR = bench BENCH_BINARY = bench/bench_bitcoin$(EXEEXT) RAW_BENCH_FILES = \ bench/data/block413567.raw GENERATED_BENCH_FILES = $(RAW_BENCH_FILES:.raw=.raw.h) bench_bench_bitcoin_SOURCES = \ $(RAW_BENCH_FILES) \ bench/bench_bitcoin.cpp \ bench/bench.cpp \ bench/bench.h \ bench/cashaddr.cpp \ bench/checkblock.cpp \ bench/checkqueue.cpp \ bench/examples.cpp \ bench/rollingbloom.cpp \ bench/crypto_aes.cpp \ bench/crypto_hash.cpp \ bench/ccoins_caching.cpp \ bench/gcs_filter.cpp \ bench/merkle_root.cpp \ bench/mempool_eviction.cpp \ + bench/rpc_mempool.cpp \ bench/base58.cpp \ bench/lockedpool.cpp \ bench/prevector.cpp nodist_bench_bench_bitcoin_SOURCES = $(GENERATED_BENCH_FILES) bench_bench_bitcoin_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(EVENT_CLFAGS) $(EVENT_PTHREADS_CFLAGS) -I$(builddir)/bench/ bench_bench_bitcoin_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) bench_bench_bitcoin_LDADD = \ $(LIBBITCOIN_SERVER) \ $(LIBBITCOIN_WALLET) \ $(LIBBITCOIN_COMMON) \ $(LIBBITCOIN_UTIL) \ $(LIBBITCOIN_CONSENSUS) \ $(LIBBITCOIN_CRYPTO) \ $(LIBLEVELDB) \ $(LIBLEVELDB_SSE42) \ $(LIBMEMENV) \ $(LIBSECP256K1) \ - $(LIBUNIVALUE) + $(LIBUNIVALUE) \ + $(EVENT_PTHREADS_LIBS) \ + $(EVENT_LIBS) if ENABLE_ZMQ bench_bench_bitcoin_LDADD += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS) endif if ENABLE_WALLET bench_bench_bitcoin_SOURCES += bench/coin_selection.cpp endif bench_bench_bitcoin_LDADD += $(BOOST_LIBS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(MINIUPNPC_LIBS) $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) bench_bench_bitcoin_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) CLEAN_BITCOIN_BENCH = bench/*.gcda bench/*.gcno $(GENERATED_BENCH_FILES) CLEANFILES += $(CLEAN_BITCOIN_BENCH) bench/checkblock.cpp: bench/data/block413567.raw.h bitcoin_bench: $(BENCH_BINARY) bench: $(BENCH_BINARY) FORCE $(BENCH_BINARY) bitcoin_bench_clean : FORCE rm -f $(CLEAN_BITCOIN_BENCH) $(bench_bench_bitcoin_OBJECTS) $(BENCH_BINARY) %.raw.h: %.raw @$(MKDIR_P) $(@D) @{ \ echo "static unsigned const char $(*F)[] = {" && \ $(HEXDUMP) -v -e '8/1 "0x%02x, "' -e '"\n"' $< | $(SED) -e 's/0x ,//g' && \ echo "};"; \ } > "$@.new" && mv -f "$@.new" "$@" @echo "Generated $@" diff --git a/src/bench/CMakeLists.txt b/src/bench/CMakeLists.txt index 4cac1af96..0a24b863d 100644 --- a/src/bench/CMakeLists.txt +++ b/src/bench/CMakeLists.txt @@ -1,70 +1,71 @@ # Copyright (c) 2018 The Bitcoin developers project(bitcoin-bench) set(BENCH_DATA_RAW_FILES data/block413567.raw ) # Process raw files. file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/data") foreach(_raw_file ${BENCH_DATA_RAW_FILES}) string(APPEND _generated_header_output "${CMAKE_CURRENT_BINARY_DIR}/${_raw_file}" ".h" ) list(APPEND BENCH_DATA_GENERATED_HEADERS ${_generated_header_output}) get_filename_component(_test_name ${_raw_file} NAME_WE) add_custom_command( OUTPUT "${_generated_header_output}" COMMAND "${PYTHON_EXECUTABLE}" "data/convert-raw-to-header.py" "${_test_name}" "${_raw_file}" > "${_generated_header_output}" COMMENT "Transforming raw file ${_raw_file} into header" MAIN_DEPEDENCY "${_raw_file}" DEPENDS "data/convert-raw-to-header.py" VERBATIM WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" ) endforeach() add_executable(bitcoin-bench EXCLUDE_FROM_ALL base58.cpp bench.cpp bench_bitcoin.cpp cashaddr.cpp ccoins_caching.cpp checkblock.cpp checkqueue.cpp crypto_aes.cpp crypto_hash.cpp examples.cpp gcs_filter.cpp lockedpool.cpp mempool_eviction.cpp merkle_root.cpp prevector.cpp rollingbloom.cpp + rpc_mempool.cpp # Add the generated headers to trigger the conversion command ${BENCH_DATA_GENERATED_HEADERS} ) target_link_libraries(bitcoin-bench common bitcoinconsensus server) if(BUILD_BITCOIN_WALLET) target_sources(bitcoin-bench PRIVATE coin_selection.cpp) endif() add_custom_target(bench-bitcoin COMMAND ./bitcoin-bench DEPENDS bitcoin-bench ) diff --git a/src/bench/rpc_mempool.cpp b/src/bench/rpc_mempool.cpp new file mode 100644 index 000000000..4c6cb384b --- /dev/null +++ b/src/bench/rpc_mempool.cpp @@ -0,0 +1,45 @@ +// Copyright (c) 2011-2019 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include + +#include + +#include +#include + +static void AddTx(const CTransactionRef &tx, const Amount &fee, + CTxMemPool &pool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs) { + LockPoints lp; + pool.addUnchecked(tx->GetId(), + CTxMemPoolEntry(tx, fee, /* time */ 0, /*priority*/ 10.0, + /* height */ 1, tx->GetValueOut(), + /* spendsCoinbase */ false, + /* sigOpCost */ 4, lp)); +} + +static void RpcMempool(benchmark::State &state) { + CTxMemPool pool; + LOCK2(cs_main, pool.cs); + + for (int i = 0; i < 1000; ++i) { + CMutableTransaction tx = CMutableTransaction(); + tx.vin.resize(1); + tx.vin[0].scriptSig = CScript() << OP_1; + tx.vout.resize(1); + tx.vout[0].scriptPubKey = CScript() << OP_1 << OP_EQUAL; + tx.vout[0].nValue = i * CENT; + const CTransactionRef tx_r{MakeTransactionRef(tx)}; + AddTx(tx_r, /* fee */ i * CENT, pool); + } + + while (state.KeepRunning()) { + (void)MempoolToJSON(pool, /*verbose*/ true); + } +} + +BENCHMARK(RpcMempool, 40); diff --git a/src/rest.cpp b/src/rest.cpp index 2661f2799..68652a6ac 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -1,684 +1,684 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // Allow a max of 15 outpoints to be queried at once. static const size_t MAX_GETUTXOS_OUTPOINTS = 15; enum class RetFormat { UNDEF, BINARY, HEX, JSON, }; static const struct { enum RetFormat rf; const char *name; } rf_names[] = { {RetFormat::UNDEF, ""}, {RetFormat::BINARY, "bin"}, {RetFormat::HEX, "hex"}, {RetFormat::JSON, "json"}, }; struct CCoin { uint32_t nHeight; CTxOut out; CCoin() : nHeight(0) {} explicit CCoin(Coin in) : nHeight(in.GetHeight()), out(std::move(in.GetTxOut())) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { uint32_t nTxVerDummy = 0; READWRITE(nTxVerDummy); READWRITE(nHeight); READWRITE(out); } }; static bool RESTERR(HTTPRequest *req, enum HTTPStatusCode status, std::string message) { req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(status, message + "\r\n"); return false; } static enum RetFormat ParseDataFormat(std::string ¶m, const std::string &strReq) { const std::string::size_type pos = strReq.rfind('.'); if (pos == std::string::npos) { param = strReq; return rf_names[0].rf; } param = strReq.substr(0, pos); const std::string suff(strReq, pos + 1); for (size_t i = 0; i < ARRAYLEN(rf_names); i++) { if (suff == rf_names[i].name) { return rf_names[i].rf; } } /* If no suffix is found, return original string. */ param = strReq; return rf_names[0].rf; } static std::string AvailableDataFormatsString() { std::string formats; for (size_t i = 0; i < ARRAYLEN(rf_names); i++) { if (strlen(rf_names[i].name) > 0) { formats.append("."); formats.append(rf_names[i].name); formats.append(", "); } } if (formats.length() > 0) { return formats.substr(0, formats.length() - 2); } return formats; } static bool ParseHashStr(const std::string &strReq, uint256 &v) { if (!IsHex(strReq) || (strReq.size() != 64)) { return false; } v.SetHex(strReq); return true; } static bool CheckWarmup(HTTPRequest *req) { std::string statusmessage; if (RPCIsInWarmup(&statusmessage)) { return RESTERR(req, HTTP_SERVICE_UNAVAILABLE, "Service temporarily unavailable: " + statusmessage); } return true; } static bool rest_headers(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); std::vector path; boost::split(path, param, boost::is_any_of("/")); if (path.size() != 2) { return RESTERR(req, HTTP_BAD_REQUEST, "No header count specified. Use " "/rest/headers//.."); } long count = strtol(path[0].c_str(), nullptr, 10); if (count < 1 || count > 2000) { return RESTERR(req, HTTP_BAD_REQUEST, "Header count out of range: " + path[0]); } std::string hashStr = path[1]; uint256 hash; if (!ParseHashStr(hashStr, hash)) { return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr); } const CBlockIndex *tip = nullptr; std::vector headers; headers.reserve(count); { LOCK(cs_main); tip = chainActive.Tip(); const CBlockIndex *pindex = LookupBlockIndex(hash); while (pindex != nullptr && chainActive.Contains(pindex)) { headers.push_back(pindex); if (headers.size() == size_t(count)) { break; } pindex = chainActive.Next(pindex); } } CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION); for (const CBlockIndex *pindex : headers) { ssHeader << pindex->GetBlockHeader(); } switch (rf) { case RetFormat::BINARY: { std::string binaryHeader = ssHeader.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryHeader); return true; } case RetFormat::HEX: { std::string strHex = HexStr(ssHeader.begin(), ssHeader.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } case RetFormat::JSON: { UniValue jsonHeaders(UniValue::VARR); for (const CBlockIndex *pindex : headers) { jsonHeaders.push_back(blockheaderToJSON(tip, pindex)); } std::string strJSON = jsonHeaders.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: .bin, .hex)"); } } } static bool rest_block(const Config &config, HTTPRequest *req, const std::string &strURIPart, bool showTxDetails) { if (!CheckWarmup(req)) { return false; } std::string hashStr; const RetFormat rf = ParseDataFormat(hashStr, strURIPart); uint256 hash; if (!ParseHashStr(hashStr, hash)) { return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr); } CBlock block; CBlockIndex *pblockindex = nullptr; CBlockIndex *tip = nullptr; { LOCK(cs_main); tip = chainActive.Tip(); pblockindex = LookupBlockIndex(hash); if (!pblockindex) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); } if (fHavePruned && !pblockindex->nStatus.hasData() && pblockindex->nTx > 0) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not available (pruned data)"); } if (!ReadBlockFromDisk(block, pblockindex, config.GetChainParams().GetConsensus())) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); } } CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ssBlock << block; switch (rf) { case RetFormat::BINARY: { std::string binaryBlock = ssBlock.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryBlock); return true; } case RetFormat::HEX: { std::string strHex = HexStr(ssBlock.begin(), ssBlock.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } case RetFormat::JSON: { UniValue objBlock = blockToJSON(block, tip, pblockindex, showTxDetails); std::string strJSON = objBlock.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); } } } static bool rest_block_extended(Config &config, HTTPRequest *req, const std::string &strURIPart) { return rest_block(config, req, strURIPart, true); } static bool rest_block_notxdetails(Config &config, HTTPRequest *req, const std::string &strURIPart) { return rest_block(config, req, strURIPart, false); } static bool rest_chaininfo(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { case RetFormat::JSON: { JSONRPCRequest jsonRequest; jsonRequest.params = UniValue(UniValue::VARR); UniValue chainInfoObject = getblockchaininfo(config, jsonRequest); std::string strJSON = chainInfoObject.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: json)"); } } } static bool rest_mempool_info(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { case RetFormat::JSON: { - UniValue mempoolInfoObject = mempoolInfoToJSON(); + UniValue mempoolInfoObject = MempoolInfoToJSON(::g_mempool); std::string strJSON = mempoolInfoObject.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: json)"); } } } static bool rest_mempool_contents(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { case RetFormat::JSON: { - UniValue mempoolObject = mempoolToJSON(true); + UniValue mempoolObject = MempoolToJSON(::g_mempool, true); std::string strJSON = mempoolObject.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: json)"); } } } static bool rest_tx(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string hashStr; const RetFormat rf = ParseDataFormat(hashStr, strURIPart); uint256 hash; if (!ParseHashStr(hashStr, hash)) { return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr); } const TxId txid(hash); if (g_txindex) { g_txindex->BlockUntilSyncedToCurrentChain(); } CTransactionRef tx; uint256 hashBlock = uint256(); if (!GetTransaction(config.GetChainParams().GetConsensus(), txid, tx, hashBlock, true)) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); } CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ssTx << tx; switch (rf) { case RetFormat::BINARY: { std::string binaryTx = ssTx.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryTx); return true; } case RetFormat::HEX: { std::string strHex = HexStr(ssTx.begin(), ssTx.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } case RetFormat::JSON: { UniValue objTx(UniValue::VOBJ); TxToUniv(*tx, hashBlock, objTx); std::string strJSON = objTx.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); } } } static bool rest_getutxos(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); std::vector uriParts; if (param.length() > 1) { std::string strUriParams = param.substr(1); boost::split(uriParts, strUriParams, boost::is_any_of("/")); } // throw exception in case of an empty request std::string strRequestMutable = req->ReadBody(); if (strRequestMutable.length() == 0 && uriParts.size() == 0) { return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request"); } bool fInputParsed = false; bool fCheckMemPool = false; std::vector vOutPoints; // parse/deserialize input // input-format = output-format, rest/getutxos/bin requires binary input, // gives binary output, ... if (uriParts.size() > 0) { // inputs is sent over URI scheme // (/rest/getutxos/checkmempool/txid1-n/txid2-n/...) if (uriParts[0] == "checkmempool") { fCheckMemPool = true; } for (size_t i = (fCheckMemPool) ? 1 : 0; i < uriParts.size(); i++) { int32_t nOutput; std::string strTxid = uriParts[i].substr(0, uriParts[i].find('-')); std::string strOutput = uriParts[i].substr(uriParts[i].find('-') + 1); if (!ParseInt32(strOutput, &nOutput) || !IsHex(strTxid)) { return RESTERR(req, HTTP_BAD_REQUEST, "Parse error"); } TxId txid; txid.SetHex(strTxid); vOutPoints.push_back(COutPoint(txid, uint32_t(nOutput))); } if (vOutPoints.size() > 0) { fInputParsed = true; } else { return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request"); } } switch (rf) { case RetFormat::HEX: { // convert hex to bin, continue then with bin part std::vector strRequestV = ParseHex(strRequestMutable); strRequestMutable.assign(strRequestV.begin(), strRequestV.end()); } // FALLTHROUGH case RetFormat::BINARY: { try { // deserialize only if user sent a request if (strRequestMutable.size() > 0) { // don't allow sending input over URI and HTTP RAW DATA if (fInputParsed) { return RESTERR(req, HTTP_BAD_REQUEST, "Combination of URI scheme inputs and " "raw post data is not allowed"); } CDataStream oss(SER_NETWORK, PROTOCOL_VERSION); oss << strRequestMutable; oss >> fCheckMemPool; oss >> vOutPoints; } } catch (const std::ios_base::failure &e) { // abort in case of unreadable binary data return RESTERR(req, HTTP_BAD_REQUEST, "Parse error"); } break; } case RetFormat::JSON: { if (!fInputParsed) { return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request"); } break; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); } } // limit max outpoints if (vOutPoints.size() > MAX_GETUTXOS_OUTPOINTS) { return RESTERR( req, HTTP_BAD_REQUEST, strprintf("Error: max outpoints exceeded (max: %d, tried: %d)", MAX_GETUTXOS_OUTPOINTS, vOutPoints.size())); } // check spentness and form a bitmap (as well as a JSON capable // human-readable string representation) std::vector bitmap; std::vector outs; std::string bitmapStringRepresentation; std::vector hits; bitmap.resize((vOutPoints.size() + 7) / 8); { auto process_utxos = [&vOutPoints, &outs, &hits](const CCoinsView &view, const CTxMemPool &mempool) { for (const COutPoint &vOutPoint : vOutPoints) { Coin coin; bool hit = !mempool.isSpent(vOutPoint) && view.GetCoin(vOutPoint, coin); hits.push_back(hit); if (hit) { outs.emplace_back(std::move(coin)); } } }; if (fCheckMemPool) { // use db+mempool as cache backend in case user likes to query // mempool LOCK2(cs_main, g_mempool.cs); CCoinsViewCache &viewChain = *pcoinsTip; CCoinsViewMemPool viewMempool(&viewChain, g_mempool); process_utxos(viewMempool, g_mempool); } else { // no need to lock mempool! LOCK(cs_main); process_utxos(*pcoinsTip, CTxMemPool()); } for (size_t i = 0; i < hits.size(); ++i) { const bool hit = hits[i]; // form a binary string representation (human-readable for json // output) bitmapStringRepresentation.append(hit ? "1" : "0"); bitmap[i / 8] |= ((uint8_t)hit) << (i % 8); } } switch (rf) { case RetFormat::BINARY: { // serialize data // use exact same output as mentioned in Bip64 CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION); ssGetUTXOResponse << chainActive.Height() << chainActive.Tip()->GetBlockHash() << bitmap << outs; std::string ssGetUTXOResponseString = ssGetUTXOResponse.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, ssGetUTXOResponseString); return true; } case RetFormat::HEX: { CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION); ssGetUTXOResponse << chainActive.Height() << chainActive.Tip()->GetBlockHash() << bitmap << outs; std::string strHex = HexStr(ssGetUTXOResponse.begin(), ssGetUTXOResponse.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } case RetFormat::JSON: { UniValue objGetUTXOResponse(UniValue::VOBJ); // pack in some essentials // use more or less the same output as mentioned in Bip64 objGetUTXOResponse.pushKV("chainHeight", chainActive.Height()); objGetUTXOResponse.pushKV( "chaintipHash", chainActive.Tip()->GetBlockHash().GetHex()); objGetUTXOResponse.pushKV("bitmap", bitmapStringRepresentation); UniValue utxos(UniValue::VARR); for (const CCoin &coin : outs) { UniValue utxo(UniValue::VOBJ); utxo.pushKV("height", int32_t(coin.nHeight)); utxo.pushKV("value", ValueFromAmount(coin.out.nValue)); // include the script in a json output UniValue o(UniValue::VOBJ); ScriptPubKeyToUniv(coin.out.scriptPubKey, o, true); utxo.pushKV("scriptPubKey", o); utxos.push_back(utxo); } objGetUTXOResponse.pushKV("utxos", utxos); // return json string std::string strJSON = objGetUTXOResponse.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); } } } static const struct { const char *prefix; bool (*handler)(Config &config, HTTPRequest *req, const std::string &strReq); } uri_prefixes[] = { {"/rest/tx/", rest_tx}, {"/rest/block/notxdetails/", rest_block_notxdetails}, {"/rest/block/", rest_block_extended}, {"/rest/chaininfo", rest_chaininfo}, {"/rest/mempool/info", rest_mempool_info}, {"/rest/mempool/contents", rest_mempool_contents}, {"/rest/headers/", rest_headers}, {"/rest/getutxos", rest_getutxos}, }; bool StartREST() { for (size_t i = 0; i < ARRAYLEN(uri_prefixes); i++) { RegisterHTTPHandler(uri_prefixes[i].prefix, false, uri_prefixes[i].handler); } return true; } void InterruptREST() {} void StopREST() { for (size_t i = 0; i < ARRAYLEN(uri_prefixes); i++) { UnregisterHTTPHandler(uri_prefixes[i].prefix, false); } } diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 5ab58df49..841624c16 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1,2255 +1,2255 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // boost::thread::interrupt #include #include #include struct CUpdatedBlock { uint256 hash; int height; }; static Mutex cs_blockchange; static std::condition_variable cond_blockchange; static CUpdatedBlock latestblock; /** * Calculate the difficulty for a given block index. */ double GetDifficulty(const CBlockIndex *blockindex) { assert(blockindex); int nShift = (blockindex->nBits >> 24) & 0xff; double dDiff = double(0x0000ffff) / double(blockindex->nBits & 0x00ffffff); while (nShift < 29) { dDiff *= 256.0; nShift++; } while (nShift > 29) { dDiff /= 256.0; nShift--; } return dDiff; } static int ComputeNextBlockAndDepth(const CBlockIndex *tip, const CBlockIndex *blockindex, const CBlockIndex *&next) { next = tip->GetAncestor(blockindex->nHeight + 1); if (next && next->pprev == blockindex) { return tip->nHeight - blockindex->nHeight + 1; } next = nullptr; return blockindex == tip ? 1 : -1; } UniValue blockheaderToJSON(const CBlockIndex *tip, const CBlockIndex *blockindex) { UniValue result(UniValue::VOBJ); result.pushKV("hash", blockindex->GetBlockHash().GetHex()); const CBlockIndex *pnext; int confirmations = ComputeNextBlockAndDepth(tip, blockindex, pnext); result.pushKV("confirmations", confirmations); result.pushKV("height", blockindex->nHeight); result.pushKV("version", blockindex->nVersion); result.pushKV("versionHex", strprintf("%08x", blockindex->nVersion)); result.pushKV("merkleroot", blockindex->hashMerkleRoot.GetHex()); result.pushKV("time", int64_t(blockindex->nTime)); result.pushKV("mediantime", int64_t(blockindex->GetMedianTimePast())); result.pushKV("nonce", uint64_t(blockindex->nNonce)); result.pushKV("bits", strprintf("%08x", blockindex->nBits)); result.pushKV("difficulty", GetDifficulty(blockindex)); result.pushKV("chainwork", blockindex->nChainWork.GetHex()); if (blockindex->pprev) { result.pushKV("previousblockhash", blockindex->pprev->GetBlockHash().GetHex()); } if (pnext) { result.pushKV("nextblockhash", pnext->GetBlockHash().GetHex()); } return result; } UniValue blockToJSON(const CBlock &block, const CBlockIndex *tip, const CBlockIndex *blockindex, bool txDetails) { UniValue result(UniValue::VOBJ); result.pushKV("hash", blockindex->GetBlockHash().GetHex()); const CBlockIndex *pnext; int confirmations = ComputeNextBlockAndDepth(tip, blockindex, pnext); result.pushKV("confirmations", confirmations); result.pushKV( "size", (int)::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION)); result.pushKV("height", blockindex->nHeight); result.pushKV("version", block.nVersion); result.pushKV("versionHex", strprintf("%08x", block.nVersion)); result.pushKV("merkleroot", block.hashMerkleRoot.GetHex()); UniValue txs(UniValue::VARR); for (const auto &tx : block.vtx) { if (txDetails) { UniValue objTx(UniValue::VOBJ); TxToUniv(*tx, uint256(), objTx, true, RPCSerializationFlags()); txs.push_back(objTx); } else { txs.push_back(tx->GetId().GetHex()); } } result.pushKV("tx", txs); result.pushKV("time", block.GetBlockTime()); result.pushKV("mediantime", int64_t(blockindex->GetMedianTimePast())); result.pushKV("nonce", uint64_t(block.nNonce)); result.pushKV("bits", strprintf("%08x", block.nBits)); result.pushKV("difficulty", GetDifficulty(blockindex)); result.pushKV("chainwork", blockindex->nChainWork.GetHex()); if (blockindex->pprev) { result.pushKV("previousblockhash", blockindex->pprev->GetBlockHash().GetHex()); } if (pnext) { result.pushKV("nextblockhash", pnext->GetBlockHash().GetHex()); } return result; } static UniValue getblockcount(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getblockcount\n" "\nReturns the number of blocks in the longest blockchain.\n" "\nResult:\n" "n (numeric) The current block count\n" "\nExamples:\n" + HelpExampleCli("getblockcount", "") + HelpExampleRpc("getblockcount", "")); } LOCK(cs_main); return chainActive.Height(); } static UniValue getbestblockhash(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getbestblockhash\n" "\nReturns the hash of the best (tip) block in the " "longest blockchain.\n" "\nResult:\n" "\"hex\" (string) the block hash hex encoded\n" "\nExamples:\n" + HelpExampleCli("getbestblockhash", "") + HelpExampleRpc("getbestblockhash", "")); } LOCK(cs_main); return chainActive.Tip()->GetBlockHash().GetHex(); } UniValue getfinalizedblockhash(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getfinalizedblockhash\n" "\nReturns the hash of the currently finalized block\n" "\nResult:\n" "\"hex\" (string) the block hash hex encoded\n"); } LOCK(cs_main); const CBlockIndex *blockIndexFinalized = GetFinalizedBlock(); if (blockIndexFinalized) { return blockIndexFinalized->GetBlockHash().GetHex(); } return UniValue(UniValue::VSTR); } void RPCNotifyBlockChange(bool ibd, const CBlockIndex *pindex) { if (pindex) { std::lock_guard lock(cs_blockchange); latestblock.hash = pindex->GetBlockHash(); latestblock.height = pindex->nHeight; } cond_blockchange.notify_all(); } static UniValue waitfornewblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 1) { throw std::runtime_error( "waitfornewblock (timeout)\n" "\nWaits for a specific new block and returns " "useful info about it.\n" "\nReturns the current block on timeout or exit.\n" "\nArguments:\n" "1. timeout (int, optional, default=0) Time in " "milliseconds to wait for a response. 0 indicates " "no timeout.\n" "\nResult:\n" "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" " \"height\" : { (int) Block height\n" "}\n" "\nExamples:\n" + HelpExampleCli("waitfornewblock", "1000") + HelpExampleRpc("waitfornewblock", "1000")); } int timeout = 0; if (!request.params[0].isNull()) { timeout = request.params[0].get_int(); } CUpdatedBlock block; { WAIT_LOCK(cs_blockchange, lock); block = latestblock; if (timeout) { cond_blockchange.wait_for( lock, std::chrono::milliseconds(timeout), [&block] { return latestblock.height != block.height || latestblock.hash != block.hash || !IsRPCRunning(); }); } else { cond_blockchange.wait(lock, [&block] { return latestblock.height != block.height || latestblock.hash != block.hash || !IsRPCRunning(); }); } block = latestblock; } UniValue ret(UniValue::VOBJ); ret.pushKV("hash", block.hash.GetHex()); ret.pushKV("height", block.height); return ret; } static UniValue waitforblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "waitforblock (timeout)\n" "\nWaits for a specific new block and returns useful info about " "it.\n" "\nReturns the current block on timeout or exit.\n" "\nArguments:\n" "1. \"blockhash\" (required, string) Block hash to wait for.\n" "2. timeout (int, optional, default=0) Time in milliseconds " "to wait for a response. 0 indicates no timeout.\n" "\nResult:\n" "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" " \"height\" : { (int) Block height\n" "}\n" "\nExamples:\n" + HelpExampleCli("waitforblock", "\"0000000000079f8ef3d2c688c244eb7a4" "570b24c9ed7b4a8c619eb02596f8862\", " "1000") + HelpExampleRpc("waitforblock", "\"0000000000079f8ef3d2c688c244eb7a4" "570b24c9ed7b4a8c619eb02596f8862\", " "1000")); } int timeout = 0; uint256 hash = uint256S(request.params[0].get_str()); if (!request.params[1].isNull()) { timeout = request.params[1].get_int(); } CUpdatedBlock block; { WAIT_LOCK(cs_blockchange, lock); if (timeout) { cond_blockchange.wait_for( lock, std::chrono::milliseconds(timeout), [&hash] { return latestblock.hash == hash || !IsRPCRunning(); }); } else { cond_blockchange.wait(lock, [&hash] { return latestblock.hash == hash || !IsRPCRunning(); }); } block = latestblock; } UniValue ret(UniValue::VOBJ); ret.pushKV("hash", block.hash.GetHex()); ret.pushKV("height", block.height); return ret; } static UniValue waitforblockheight(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "waitforblockheight (timeout)\n" "\nWaits for (at least) block height and returns the height and " "hash\n" "of the current tip.\n" "\nReturns the current block on timeout or exit.\n" "\nArguments:\n" "1. height (required, int) Block height to wait for (int)\n" "2. timeout (int, optional, default=0) Time in milliseconds to " "wait for a response. 0 indicates no timeout.\n" "\nResult:\n" "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" " \"height\" : { (int) Block height\n" "}\n" "\nExamples:\n" + HelpExampleCli("waitforblockheight", "\"100\", 1000") + HelpExampleRpc("waitforblockheight", "\"100\", 1000")); } int timeout = 0; int height = request.params[0].get_int(); if (!request.params[1].isNull()) { timeout = request.params[1].get_int(); } CUpdatedBlock block; { WAIT_LOCK(cs_blockchange, lock); if (timeout) { cond_blockchange.wait_for( lock, std::chrono::milliseconds(timeout), [&height] { return latestblock.height >= height || !IsRPCRunning(); }); } else { cond_blockchange.wait(lock, [&height] { return latestblock.height >= height || !IsRPCRunning(); }); } block = latestblock; } UniValue ret(UniValue::VOBJ); ret.pushKV("hash", block.hash.GetHex()); ret.pushKV("height", block.height); return ret; } static UniValue syncwithvalidationinterfacequeue(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 0) { throw std::runtime_error( "syncwithvalidationinterfacequeue\n" "\nWaits for the validation interface queue to catch up on " "everything that was there when we entered this function.\n" "\nExamples:\n" + HelpExampleCli("syncwithvalidationinterfacequeue", "") + HelpExampleRpc("syncwithvalidationinterfacequeue", "")); } SyncWithValidationInterfaceQueue(); return NullUniValue; } static UniValue getdifficulty(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error("getdifficulty\n" "\nReturns the proof-of-work difficulty as a " "multiple of the minimum difficulty.\n" "\nResult:\n" "n.nnn (numeric) the proof-of-work " "difficulty as a multiple of the minimum " "difficulty.\n" "\nExamples:\n" + HelpExampleCli("getdifficulty", "") + HelpExampleRpc("getdifficulty", "")); } LOCK(cs_main); return GetDifficulty(chainActive.Tip()); } static std::string EntryDescriptionString() { return " \"size\" : n, (numeric) transaction size.\n" " \"fee\" : n, (numeric) transaction fee in " + CURRENCY_UNIT + "(DEPRECATED)" + "\n" " \"modifiedfee\" : n, (numeric) transaction fee with fee " "deltas used for mining priority (DEPRECATED)\n" " \"time\" : n, (numeric) local time transaction " "entered pool in seconds since 1 Jan 1970 GMT\n" " \"height\" : n, (numeric) block height when " "transaction entered pool\n" " \"startingpriority\" : n, (numeric) DEPRECATED. Priority when " "transaction entered pool\n" " \"currentpriority\" : n, (numeric) DEPRECATED. Transaction " "priority now\n" " \"descendantcount\" : n, (numeric) number of in-mempool " "descendant transactions (including this one)\n" " \"descendantsize\" : n, (numeric) virtual transaction size " "of in-mempool descendants (including this one)\n" " \"descendantfees\" : n, (numeric) modified fees (see above) " "of in-mempool descendants (including this one) (DEPRECATED)\n" " \"ancestorcount\" : n, (numeric) number of in-mempool " "ancestor transactions (including this one)\n" " \"ancestorsize\" : n, (numeric) virtual transaction size " "of in-mempool ancestors (including this one)\n" " \"ancestorfees\" : n, (numeric) modified fees (see above) " "of in-mempool ancestors (including this one) (DEPRECATED)\n" " \"fees\" : {\n" " \"base\" : n, (numeric) transaction fee in " + CURRENCY_UNIT + "\n" " \"modified\" : n, (numeric) transaction fee with fee " "deltas used for mining priority in " + CURRENCY_UNIT + "\n" " \"ancestor\" : n, (numeric) modified fees (see above) " "of in-mempool ancestors (including this one) in " + CURRENCY_UNIT + "\n" " \"descendant\" : n, (numeric) modified fees (see above) " "of in-mempool descendants (including this one) in " + CURRENCY_UNIT + "\n" " }\n" " \"depends\" : [ (array) unconfirmed transactions " "used as inputs for this transaction\n" " \"transactionid\", (string) parent transaction id\n" " ... ]\n" " \"spentby\" : [ (array) unconfirmed transactions " "spending outputs from this transaction\n" " \"transactionid\", (string) child transaction id\n" " ... ]\n"; } -static void entryToJSON(UniValue &info, const CTxMemPoolEntry &e) - EXCLUSIVE_LOCKS_REQUIRED(g_mempool.cs) { - AssertLockHeld(g_mempool.cs); +static void entryToJSON(const CTxMemPool &pool, UniValue &info, + const CTxMemPoolEntry &e) + EXCLUSIVE_LOCKS_REQUIRED(pool.cs) { + AssertLockHeld(pool.cs); UniValue fees(UniValue::VOBJ); fees.pushKV("base", ValueFromAmount(e.GetFee())); fees.pushKV("modified", ValueFromAmount(e.GetModifiedFee())); fees.pushKV("ancestor", ValueFromAmount(e.GetModFeesWithAncestors())); fees.pushKV("descendant", ValueFromAmount(e.GetModFeesWithDescendants())); info.pushKV("fees", fees); info.pushKV("size", (int)e.GetTxSize()); info.pushKV("fee", ValueFromAmount(e.GetFee())); info.pushKV("modifiedfee", ValueFromAmount(e.GetModifiedFee())); info.pushKV("time", e.GetTime()); info.pushKV("height", (int)e.GetHeight()); info.pushKV("startingpriority", e.GetPriority(e.GetHeight())); info.pushKV("currentpriority", e.GetPriority(chainActive.Height())); info.pushKV("descendantcount", e.GetCountWithDescendants()); info.pushKV("descendantsize", e.GetSizeWithDescendants()); info.pushKV("descendantfees", e.GetModFeesWithDescendants() / SATOSHI); info.pushKV("ancestorcount", e.GetCountWithAncestors()); info.pushKV("ancestorsize", e.GetSizeWithAncestors()); info.pushKV("ancestorfees", e.GetModFeesWithAncestors() / SATOSHI); const CTransaction &tx = e.GetTx(); std::set setDepends; for (const CTxIn &txin : tx.vin) { - if (g_mempool.exists(txin.prevout.GetTxId())) { + if (pool.exists(txin.prevout.GetTxId())) { setDepends.insert(txin.prevout.GetTxId().ToString()); } } UniValue depends(UniValue::VARR); for (const std::string &dep : setDepends) { depends.push_back(dep); } info.pushKV("depends", depends); UniValue spent(UniValue::VARR); - const CTxMemPool::txiter &it = g_mempool.mapTx.find(tx.GetId()); - const CTxMemPool::setEntries &setChildren = - g_mempool.GetMemPoolChildren(it); + const CTxMemPool::txiter &it = pool.mapTx.find(tx.GetId()); + const CTxMemPool::setEntries &setChildren = pool.GetMemPoolChildren(it); for (CTxMemPool::txiter childiter : setChildren) { spent.push_back(childiter->GetTx().GetId().ToString()); } info.pushKV("spentby", spent); } -UniValue mempoolToJSON(bool fVerbose) { - if (fVerbose) { - LOCK(g_mempool.cs); +UniValue MempoolToJSON(const CTxMemPool &pool, bool verbose) { + if (verbose) { + LOCK(pool.cs); UniValue o(UniValue::VOBJ); - for (const CTxMemPoolEntry &e : g_mempool.mapTx) { + for (const CTxMemPoolEntry &e : pool.mapTx) { const uint256 &txid = e.GetTx().GetId(); UniValue info(UniValue::VOBJ); - entryToJSON(info, e); + entryToJSON(pool, info, e); o.pushKV(txid.ToString(), info); } return o; } else { std::vector vtxids; - g_mempool.queryHashes(vtxids); + pool.queryHashes(vtxids); UniValue a(UniValue::VARR); for (const uint256 &txid : vtxids) { a.push_back(txid.ToString()); } return a; } } static UniValue getrawmempool(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 1) { throw std::runtime_error( "getrawmempool ( verbose )\n" "\nReturns all transaction ids in memory pool as a json array of " "string transaction ids.\n" "\nHint: use getmempoolentry to fetch a specific transaction from " "the mempool.\n" "\nArguments:\n" "1. verbose (boolean, optional, default=false) True for a json " "object, false for array of transaction ids\n" "\nResult: (for verbose = false):\n" "[ (json array of string)\n" " \"transactionid\" (string) The transaction id\n" " ,...\n" "]\n" "\nResult: (for verbose = true):\n" "{ (json object)\n" " \"transactionid\" : { (json object)\n" + EntryDescriptionString() + " }, ...\n" "}\n" "\nExamples:\n" + HelpExampleCli("getrawmempool", "true") + HelpExampleRpc("getrawmempool", "true")); } bool fVerbose = false; if (!request.params[0].isNull()) { fVerbose = request.params[0].get_bool(); } - return mempoolToJSON(fVerbose); + return MempoolToJSON(::g_mempool, fVerbose); } static UniValue getmempoolancestors(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "getmempoolancestors txid (verbose)\n" "\nIf txid is in the mempool, returns all in-mempool ancestors.\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id " "(must be in mempool)\n" "2. verbose (boolean, optional, default=false) " "True for a json object, false for array of transaction ids\n" "\nResult (for verbose=false):\n" "[ (json array of strings)\n" " \"transactionid\" (string) The transaction id of an " "in-mempool ancestor transaction\n" " ,...\n" "]\n" "\nResult (for verbose=true):\n" "{ (json object)\n" " \"transactionid\" : { (json object)\n" + EntryDescriptionString() + " }, ...\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmempoolancestors", "\"mytxid\"") + HelpExampleRpc("getmempoolancestors", "\"mytxid\"")); } bool fVerbose = false; if (!request.params[1].isNull()) { fVerbose = request.params[1].get_bool(); } uint256 hash = ParseHashV(request.params[0], "parameter 1"); LOCK(g_mempool.cs); CTxMemPool::txiter it = g_mempool.mapTx.find(hash); if (it == g_mempool.mapTx.end()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool"); } CTxMemPool::setEntries setAncestors; uint64_t noLimit = std::numeric_limits::max(); std::string dummy; g_mempool.CalculateMemPoolAncestors(*it, setAncestors, noLimit, noLimit, noLimit, noLimit, dummy, false); if (!fVerbose) { UniValue o(UniValue::VARR); for (CTxMemPool::txiter ancestorIt : setAncestors) { o.push_back(ancestorIt->GetTx().GetId().ToString()); } return o; } else { UniValue o(UniValue::VOBJ); for (CTxMemPool::txiter ancestorIt : setAncestors) { const CTxMemPoolEntry &e = *ancestorIt; const uint256 &_hash = e.GetTx().GetId(); UniValue info(UniValue::VOBJ); - entryToJSON(info, e); + entryToJSON(::g_mempool, info, e); o.pushKV(_hash.ToString(), info); } return o; } } static UniValue getmempooldescendants(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "getmempooldescendants txid (verbose)\n" "\nIf txid is in the mempool, returns all in-mempool descendants.\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id " "(must be in mempool)\n" "2. verbose (boolean, optional, default=false) " "True for a json object, false for array of transaction ids\n" "\nResult (for verbose=false):\n" "[ (json array of strings)\n" " \"transactionid\" (string) The transaction id of an " "in-mempool descendant transaction\n" " ,...\n" "]\n" "\nResult (for verbose=true):\n" "{ (json object)\n" " \"transactionid\" : { (json object)\n" + EntryDescriptionString() + " }, ...\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmempooldescendants", "\"mytxid\"") + HelpExampleRpc("getmempooldescendants", "\"mytxid\"")); } bool fVerbose = false; if (!request.params[1].isNull()) { fVerbose = request.params[1].get_bool(); } uint256 hash = ParseHashV(request.params[0], "parameter 1"); LOCK(g_mempool.cs); CTxMemPool::txiter it = g_mempool.mapTx.find(hash); if (it == g_mempool.mapTx.end()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool"); } CTxMemPool::setEntries setDescendants; g_mempool.CalculateDescendants(it, setDescendants); // CTxMemPool::CalculateDescendants will include the given tx setDescendants.erase(it); if (!fVerbose) { UniValue o(UniValue::VARR); for (CTxMemPool::txiter descendantIt : setDescendants) { o.push_back(descendantIt->GetTx().GetId().ToString()); } return o; } else { UniValue o(UniValue::VOBJ); for (CTxMemPool::txiter descendantIt : setDescendants) { const CTxMemPoolEntry &e = *descendantIt; const uint256 &_hash = e.GetTx().GetId(); UniValue info(UniValue::VOBJ); - entryToJSON(info, e); + entryToJSON(::g_mempool, info, e); o.pushKV(_hash.ToString(), info); } return o; } } static UniValue getmempoolentry(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "getmempoolentry txid\n" "\nReturns mempool data for given transaction\n" "\nArguments:\n" "1. \"txid\" (string, required) " "The transaction id (must be in mempool)\n" "\nResult:\n" "{ (json object)\n" + EntryDescriptionString() + "}\n" "\nExamples:\n" + HelpExampleCli("getmempoolentry", "\"mytxid\"") + HelpExampleRpc("getmempoolentry", "\"mytxid\"")); } uint256 hash = ParseHashV(request.params[0], "parameter 1"); LOCK(g_mempool.cs); CTxMemPool::txiter it = g_mempool.mapTx.find(hash); if (it == g_mempool.mapTx.end()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool"); } const CTxMemPoolEntry &e = *it; UniValue info(UniValue::VOBJ); - entryToJSON(info, e); + entryToJSON(::g_mempool, info, e); return info; } static UniValue getblockhash(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "getblockhash height\n" "\nReturns hash of block in best-block-chain at height provided.\n" "\nArguments:\n" "1. height (numeric, required) The height index\n" "\nResult:\n" "\"hash\" (string) The block hash\n" "\nExamples:\n" + HelpExampleCli("getblockhash", "1000") + HelpExampleRpc("getblockhash", "1000")); } LOCK(cs_main); int nHeight = request.params[0].get_int(); if (nHeight < 0 || nHeight > chainActive.Height()) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Block height out of range"); } CBlockIndex *pblockindex = chainActive[nHeight]; return pblockindex->GetBlockHash().GetHex(); } static UniValue getblockheader(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "getblockheader \"hash\" ( verbose )\n" "\nIf verbose is false, returns a string that is serialized, " "hex-encoded data for blockheader 'hash'.\n" "If verbose is true, returns an Object with information about " "blockheader .\n" "\nArguments:\n" "1. \"hash\" (string, required) The block hash\n" "2. verbose (boolean, optional, default=true) true for a " "json object, false for the hex encoded data\n" "\nResult (for verbose = true):\n" "{\n" " \"hash\" : \"hash\", (string) the block hash (same as " "provided)\n" " \"confirmations\" : n, (numeric) The number of confirmations, " "or -1 if the block is not on the main chain\n" " \"height\" : n, (numeric) The block height or index\n" " \"version\" : n, (numeric) The block version\n" " \"versionHex\" : \"00000000\", (string) The block version " "formatted in hexadecimal\n" " \"merkleroot\" : \"xxxx\", (string) The merkle root\n" " \"time\" : ttt, (numeric) The block time in seconds " "since epoch (Jan 1 1970 GMT)\n" " \"mediantime\" : ttt, (numeric) The median block time in " "seconds since epoch (Jan 1 1970 GMT)\n" " \"nonce\" : n, (numeric) The nonce\n" " \"bits\" : \"1d00ffff\", (string) The bits\n" " \"difficulty\" : x.xxx, (numeric) The difficulty\n" " \"chainwork\" : \"0000...1f3\" (string) Expected number of " "hashes required to produce the current chain (in hex)\n" " \"previousblockhash\" : \"hash\", (string) The hash of the " "previous block\n" " \"nextblockhash\" : \"hash\", (string) The hash of the " "next block\n" "}\n" "\nResult (for verbose=false):\n" "\"data\" (string) A string that is serialized, " "hex-encoded data for block 'hash'.\n" "\nExamples:\n" + HelpExampleCli("getblockheader", "\"00000000c937983704a73af28acdec3" "7b049d214adbda81d7e2a3dd146f6ed09" "\"") + HelpExampleRpc("getblockheader", "\"00000000c937983704a73af28acdec3" "7b049d214adbda81d7e2a3dd146f6ed09" "\"")); } LOCK(cs_main); std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); bool fVerbose = true; if (!request.params[1].isNull()) { fVerbose = request.params[1].get_bool(); } const CBlockIndex *pblockindex = LookupBlockIndex(hash); if (!pblockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } if (!fVerbose) { CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION); ssBlock << pblockindex->GetBlockHeader(); std::string strHex = HexStr(ssBlock.begin(), ssBlock.end()); return strHex; } return blockheaderToJSON(chainActive.Tip(), pblockindex); } static CBlock GetBlockChecked(const Config &config, const CBlockIndex *pblockindex) { CBlock block; if (fHavePruned && !pblockindex->nStatus.hasData() && pblockindex->nTx > 0) { throw JSONRPCError(RPC_MISC_ERROR, "Block not available (pruned data)"); } if (!ReadBlockFromDisk(block, pblockindex, config.GetChainParams().GetConsensus())) { // Block not found on disk. This could be because we have the block // header in our index but don't have the block (for example if a // non-whitelisted node sends us an unrequested long chain of valid // blocks, we add the headers to our index, but don't accept the block). throw JSONRPCError(RPC_MISC_ERROR, "Block not found on disk"); } return block; } static UniValue getblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "getblock \"blockhash\" ( verbosity )\n" "\nIf verbosity is 0 or false, returns a string that is " "serialized, hex-encoded data for block 'hash'.\n" "If verbosity is 1 or true, returns an Object with information " "about block .\n" "If verbosity is 2, returns an Object with information about block " " and information about each transaction.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) The block hash\n" "2. verbosity (numeric, optional, default=1) 0 for " "hex-encoded data, 1 for a json object, and 2 for json object with " "transaction data\n" "\nResult (for verbosity = 0):\n" "\"data\" (string) A string that is serialized, " "hex-encoded data for block 'hash'.\n" "\nResult (for verbosity = 1):\n" "{\n" " \"hash\" : \"hash\", (string) The block hash (same as " "provided)\n" " \"confirmations\" : n, (numeric) The number of confirmations, " "or -1 if the block is not on the main chain\n" " \"size\" : n, (numeric) The block size\n" " \"height\" : n, (numeric) The block height or index\n" " \"version\" : n, (numeric) The block version\n" " \"versionHex\" : \"00000000\", (string) The block version " "formatted in hexadecimal\n" " \"merkleroot\" : \"xxxx\", (string) The merkle root\n" " \"tx\" : [ (array of string) The transaction ids\n" " \"transactionid\" (string) The transaction id\n" " ,...\n" " ],\n" " \"time\" : ttt, (numeric) The block time in seconds " "since epoch (Jan 1 1970 GMT)\n" " \"mediantime\" : ttt, (numeric) The median block time in " "seconds since epoch (Jan 1 1970 GMT)\n" " \"nonce\" : n, (numeric) The nonce\n" " \"bits\" : \"1d00ffff\", (string) The bits\n" " \"difficulty\" : x.xxx, (numeric) The difficulty\n" " \"chainwork\" : \"xxxx\", (string) Expected number of hashes " "required to produce the chain up to this block (in hex)\n" " \"previousblockhash\" : \"hash\", (string) The hash of the " "previous block\n" " \"nextblockhash\" : \"hash\" (string) The hash of the " "next block\n" "}\n" "\nResult (for verbosity = 2):\n" "{\n" " ..., Same output as verbosity = 1\n" " \"tx\" : [ (array of Objects) The transactions in " "the format of the getrawtransaction RPC; different from verbosity " "= 1 \"tx\" result\n" " ...\n" " ],\n" " ... Same output as verbosity = 1\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblock", "\"00000000c937983704a73af28acdec37b049d" "214adbda81d7e2a3dd146f6ed09\"") + HelpExampleRpc("getblock", "\"00000000c937983704a73af28acdec37b049d" "214adbda81d7e2a3dd146f6ed09\"")); } LOCK(cs_main); std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); int verbosity = 1; if (!request.params[1].isNull()) { if (request.params[1].isNum()) { verbosity = request.params[1].get_int(); } else { verbosity = request.params[1].get_bool() ? 1 : 0; } } const CBlockIndex *pblockindex = LookupBlockIndex(hash); if (!pblockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } const CBlock block = GetBlockChecked(config, pblockindex); if (verbosity <= 0) { CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ssBlock << block; std::string strHex = HexStr(ssBlock.begin(), ssBlock.end()); return strHex; } return blockToJSON(block, chainActive.Tip(), pblockindex, verbosity >= 2); } struct CCoinsStats { int nHeight; uint256 hashBlock; uint64_t nTransactions; uint64_t nTransactionOutputs; uint64_t nBogoSize; uint256 hashSerialized; uint64_t nDiskSize; Amount nTotalAmount; CCoinsStats() : nHeight(0), nTransactions(0), nTransactionOutputs(0), nBogoSize(0), nDiskSize(0), nTotalAmount() {} }; static void ApplyStats(CCoinsStats &stats, CHashWriter &ss, const uint256 &hash, const std::map &outputs) { assert(!outputs.empty()); ss << hash; ss << VARINT(outputs.begin()->second.GetHeight() * 2 + outputs.begin()->second.IsCoinBase()); stats.nTransactions++; for (const auto &output : outputs) { ss << VARINT(output.first + 1); ss << output.second.GetTxOut().scriptPubKey; ss << VARINT(output.second.GetTxOut().nValue / SATOSHI, VarIntMode::NONNEGATIVE_SIGNED); stats.nTransactionOutputs++; stats.nTotalAmount += output.second.GetTxOut().nValue; stats.nBogoSize += 32 /* txid */ + 4 /* vout index */ + 4 /* height + coinbase */ + 8 /* amount */ + 2 /* scriptPubKey len */ + output.second.GetTxOut().scriptPubKey.size() /* scriptPubKey */; } ss << VARINT(0u); } //! Calculate statistics about the unspent transaction output set static bool GetUTXOStats(CCoinsView *view, CCoinsStats &stats) { std::unique_ptr pcursor(view->Cursor()); assert(pcursor); CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION); stats.hashBlock = pcursor->GetBestBlock(); { LOCK(cs_main); stats.nHeight = LookupBlockIndex(stats.hashBlock)->nHeight; } ss << stats.hashBlock; uint256 prevkey; std::map outputs; while (pcursor->Valid()) { boost::this_thread::interruption_point(); COutPoint key; Coin coin; if (pcursor->GetKey(key) && pcursor->GetValue(coin)) { if (!outputs.empty() && key.GetTxId() != prevkey) { ApplyStats(stats, ss, prevkey, outputs); outputs.clear(); } prevkey = key.GetTxId(); outputs[key.GetN()] = std::move(coin); } else { return error("%s: unable to read value", __func__); } pcursor->Next(); } if (!outputs.empty()) { ApplyStats(stats, ss, prevkey, outputs); } stats.hashSerialized = ss.GetHash(); stats.nDiskSize = view->EstimateSize(); return true; } static UniValue pruneblockchain(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "pruneblockchain\n" "\nArguments:\n" "1. \"height\" (numeric, required) The block height to prune " "up to. May be set to a discrete height, or a unix timestamp\n" " to prune blocks whose block time is at least 2 " "hours older than the provided timestamp.\n" "\nResult:\n" "n (numeric) Height of the last block pruned.\n" "\nExamples:\n" + HelpExampleCli("pruneblockchain", "1000") + HelpExampleRpc("pruneblockchain", "1000")); } if (!fPruneMode) { throw JSONRPCError( RPC_MISC_ERROR, "Cannot prune blocks because node is not in prune mode."); } LOCK(cs_main); int heightParam = request.params[0].get_int(); if (heightParam < 0) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Negative block height."); } // Height value more than a billion is too high to be a block height, and // too low to be a block time (corresponds to timestamp from Sep 2001). if (heightParam > 1000000000) { // Add a 2 hour buffer to include blocks which might have had old // timestamps CBlockIndex *pindex = chainActive.FindEarliestAtLeast(heightParam - TIMESTAMP_WINDOW); if (!pindex) { throw JSONRPCError( RPC_INVALID_PARAMETER, "Could not find block with at least the specified timestamp."); } heightParam = pindex->nHeight; } unsigned int height = (unsigned int)heightParam; unsigned int chainHeight = (unsigned int)chainActive.Height(); if (chainHeight < config.GetChainParams().PruneAfterHeight()) { throw JSONRPCError(RPC_MISC_ERROR, "Blockchain is too short for pruning."); } else if (height > chainHeight) { throw JSONRPCError( RPC_INVALID_PARAMETER, "Blockchain is shorter than the attempted prune height."); } else if (height > chainHeight - MIN_BLOCKS_TO_KEEP) { LogPrint(BCLog::RPC, "Attempt to prune blocks close to the tip. " "Retaining the minimum number of blocks.\n"); height = chainHeight - MIN_BLOCKS_TO_KEEP; } PruneBlockFilesManual(height); return uint64_t(height); } static UniValue gettxoutsetinfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "gettxoutsetinfo\n" "\nReturns statistics about the unspent transaction output set.\n" "Note this call may take some time.\n" "\nResult:\n" "{\n" " \"height\":n, (numeric) The current block height (index)\n" " \"bestblock\": \"hex\", (string) the best block hash hex\n" " \"transactions\": n, (numeric) The number of transactions\n" " \"txouts\": n, (numeric) The number of output " "transactions\n" " \"bogosize\": n, (numeric) A database-independent " "metric for UTXO set size\n" " \"hash_serialized\": \"hash\", (string) The serialized hash\n" " \"disk_size\": n, (numeric) The estimated size of the " "chainstate on disk\n" " \"total_amount\": x.xxx (numeric) The total amount\n" "}\n" "\nExamples:\n" + HelpExampleCli("gettxoutsetinfo", "") + HelpExampleRpc("gettxoutsetinfo", "")); } UniValue ret(UniValue::VOBJ); CCoinsStats stats; FlushStateToDisk(); if (GetUTXOStats(pcoinsdbview.get(), stats)) { ret.pushKV("height", int64_t(stats.nHeight)); ret.pushKV("bestblock", stats.hashBlock.GetHex()); ret.pushKV("transactions", int64_t(stats.nTransactions)); ret.pushKV("txouts", int64_t(stats.nTransactionOutputs)); ret.pushKV("bogosize", int64_t(stats.nBogoSize)); ret.pushKV("hash_serialized", stats.hashSerialized.GetHex()); ret.pushKV("disk_size", stats.nDiskSize); ret.pushKV("total_amount", ValueFromAmount(stats.nTotalAmount)); } else { throw JSONRPCError(RPC_INTERNAL_ERROR, "Unable to read UTXO set"); } return ret; } UniValue gettxout(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 2 || request.params.size() > 3) { throw std::runtime_error( "gettxout \"txid\" n ( include_mempool )\n" "\nReturns details about an unspent transaction output.\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id\n" "2. \"n\" (numeric, required) vout number\n" "3. \"include_mempool\" (boolean, optional) Whether to include " "the mempool. Default: true." " Note that an unspent output that is spent in the mempool " "won't appear.\n" "\nResult:\n" "{\n" " \"bestblock\" : \"hash\", (string) the block hash\n" " \"confirmations\" : n, (numeric) The number of " "confirmations\n" " \"value\" : x.xxx, (numeric) The transaction value " "in " + CURRENCY_UNIT + "\n" " \"scriptPubKey\" : { (json object)\n" " \"asm\" : \"code\", (string) \n" " \"hex\" : \"hex\", (string) \n" " \"reqSigs\" : n, (numeric) Number of required " "signatures\n" " \"type\" : \"pubkeyhash\", (string) The type, eg pubkeyhash\n" " \"addresses\" : [ (array of string) array of " "bitcoin addresses\n" " \"address\" (string) bitcoin address\n" " ,...\n" " ]\n" " },\n" " \"coinbase\" : true|false (boolean) Coinbase or not\n" "}\n" "\nExamples:\n" "\nGet unspent transactions\n" + HelpExampleCli("listunspent", "") + "\nView the details\n" + HelpExampleCli("gettxout", "\"txid\" 1") + "\nAs a json rpc call\n" + HelpExampleRpc("gettxout", "\"txid\", 1")); } LOCK(cs_main); UniValue ret(UniValue::VOBJ); std::string strTxId = request.params[0].get_str(); TxId txid(uint256S(strTxId)); int n = request.params[1].get_int(); COutPoint out(txid, n); bool fMempool = true; if (!request.params[2].isNull()) { fMempool = request.params[2].get_bool(); } Coin coin; if (fMempool) { LOCK(g_mempool.cs); CCoinsViewMemPool view(pcoinsTip.get(), g_mempool); if (!view.GetCoin(out, coin) || g_mempool.isSpent(out)) { return NullUniValue; } } else { if (!pcoinsTip->GetCoin(out, coin)) { return NullUniValue; } } const CBlockIndex *pindex = LookupBlockIndex(pcoinsTip->GetBestBlock()); ret.pushKV("bestblock", pindex->GetBlockHash().GetHex()); if (coin.GetHeight() == MEMPOOL_HEIGHT) { ret.pushKV("confirmations", 0); } else { ret.pushKV("confirmations", int64_t(pindex->nHeight - coin.GetHeight() + 1)); } ret.pushKV("value", ValueFromAmount(coin.GetTxOut().nValue)); UniValue o(UniValue::VOBJ); ScriptPubKeyToUniv(coin.GetTxOut().scriptPubKey, o, true); ret.pushKV("scriptPubKey", o); ret.pushKV("coinbase", coin.IsCoinBase()); return ret; } static UniValue verifychain(const Config &config, const JSONRPCRequest &request) { int nCheckLevel = gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL); int nCheckDepth = gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS); if (request.fHelp || request.params.size() > 2) { throw std::runtime_error( "verifychain ( checklevel nblocks )\n" "\nVerifies blockchain database.\n" "\nArguments:\n" "1. checklevel (numeric, optional, 0-4, default=" + strprintf("%d", nCheckLevel) + ") How thorough the block verification is.\n" "2. nblocks (numeric, optional, default=" + strprintf("%d", nCheckDepth) + ", 0=all) The number of blocks to check.\n" "\nResult:\n" "true|false (boolean) Verified or not\n" "\nExamples:\n" + HelpExampleCli("verifychain", "") + HelpExampleRpc("verifychain", "")); } LOCK(cs_main); if (!request.params[0].isNull()) { nCheckLevel = request.params[0].get_int(); } if (!request.params[1].isNull()) { nCheckDepth = request.params[1].get_int(); } return CVerifyDB().VerifyDB(config, pcoinsTip.get(), nCheckLevel, nCheckDepth); } UniValue getblockchaininfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getblockchaininfo\n" "Returns an object containing various state info regarding " "blockchain processing.\n" "\nResult:\n" "{\n" " \"chain\": \"xxxx\", (string) current network name " "as defined in BIP70 (main, test, regtest)\n" " \"blocks\": xxxxxx, (numeric) the current number of " "blocks processed in the server\n" " \"headers\": xxxxxx, (numeric) the current number of " "headers we have validated\n" " \"bestblockhash\": \"...\", (string) the hash of the " "currently best block\n" " \"difficulty\": xxxxxx, (numeric) the current " "difficulty\n" " \"mediantime\": xxxxxx, (numeric) median time for the " "current best block\n" " \"verificationprogress\": xxxx, (numeric) estimate of " "verification progress [0..1]\n" " \"initialblockdownload\": xxxx, (bool) (debug information) " "estimate of whether this node is in Initial Block Download mode.\n" " \"chainwork\": \"xxxx\" (string) total amount of work " "in active chain, in hexadecimal\n" " \"size_on_disk\": xxxxxx, (numeric) the estimated size of " "the block and undo files on disk\n" " \"pruned\": xx, (boolean) if the blocks are " "subject to pruning\n" " \"pruneheight\": xxxxxx, (numeric) lowest-height " "complete block stored (only present if pruning is enabled)\n" " \"automatic_pruning\": xx, (boolean) whether automatic " "pruning is enabled (only present if pruning is enabled)\n" " \"prune_target_size\": xxxxxx, (numeric) the target size " "used by pruning (only present if automatic pruning is enabled)\n" " \"warnings\" : \"...\", (string) any network and " "blockchain warnings.\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblockchaininfo", "") + HelpExampleRpc("getblockchaininfo", "")); } LOCK(cs_main); const CBlockIndex *tip = chainActive.Tip(); UniValue obj(UniValue::VOBJ); obj.pushKV("chain", config.GetChainParams().NetworkIDString()); obj.pushKV("blocks", int(chainActive.Height())); obj.pushKV("headers", pindexBestHeader ? pindexBestHeader->nHeight : -1); obj.pushKV("bestblockhash", tip->GetBlockHash().GetHex()); obj.pushKV("difficulty", double(GetDifficulty(tip))); obj.pushKV("mediantime", int64_t(tip->GetMedianTimePast())); obj.pushKV("verificationprogress", GuessVerificationProgress(Params().TxData(), tip)); obj.pushKV("initialblockdownload", IsInitialBlockDownload()); obj.pushKV("chainwork", tip->nChainWork.GetHex()); obj.pushKV("size_on_disk", CalculateCurrentUsage()); obj.pushKV("pruned", fPruneMode); if (fPruneMode) { const CBlockIndex *block = tip; assert(block); while (block->pprev && (block->pprev->nStatus.hasData())) { block = block->pprev; } obj.pushKV("pruneheight", block->nHeight); // if 0, execution bypasses the whole if block. bool automatic_pruning = (gArgs.GetArg("-prune", 0) != 1); obj.pushKV("automatic_pruning", automatic_pruning); if (automatic_pruning) { obj.pushKV("prune_target_size", nPruneTarget); } } obj.pushKV("warnings", GetWarnings("statusbar")); return obj; } /** Comparison function for sorting the getchaintips heads. */ struct CompareBlocksByHeight { bool operator()(const CBlockIndex *a, const CBlockIndex *b) const { // Make sure that unequal blocks with the same height do not compare // equal. Use the pointers themselves to make a distinction. if (a->nHeight != b->nHeight) { return (a->nHeight > b->nHeight); } return a < b; } }; static UniValue getchaintips(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getchaintips\n" "Return information about all known tips in the block tree," " including the main chain as well as orphaned branches.\n" "\nResult:\n" "[\n" " {\n" " \"height\": xxxx, (numeric) height of the chain tip\n" " \"hash\": \"xxxx\", (string) block hash of the tip\n" " \"branchlen\": 0 (numeric) zero for main chain\n" " \"status\": \"active\" (string) \"active\" for the main " "chain\n" " },\n" " {\n" " \"height\": xxxx,\n" " \"hash\": \"xxxx\",\n" " \"branchlen\": 1 (numeric) length of branch " "connecting the tip to the main chain\n" " \"status\": \"xxxx\" (string) status of the chain " "(active, valid-fork, valid-headers, headers-only, invalid)\n" " }\n" "]\n" "Possible values for status:\n" "1. \"invalid\" This branch contains at least one " "invalid block\n" "2. \"parked\" This branch contains at least one " "parked block\n" "3. \"headers-only\" Not all blocks for this branch are " "available, but the headers are valid\n" "4. \"valid-headers\" All blocks are available for this " "branch, but they were never fully validated\n" "5. \"valid-fork\" This branch is not part of the " "active chain, but is fully validated\n" "6. \"active\" This is the tip of the active main " "chain, which is certainly valid\n" "\nExamples:\n" + HelpExampleCli("getchaintips", "") + HelpExampleRpc("getchaintips", "")); } LOCK(cs_main); /** * Idea: the set of chain tips is chainActive.tip, plus orphan blocks which * do not have another orphan building off of them. * Algorithm: * - Make one pass through mapBlockIndex, picking out the orphan blocks, * and also storing a set of the orphan block's pprev pointers. * - Iterate through the orphan blocks. If the block isn't pointed to by * another orphan, it is a chain tip. * - add chainActive.Tip() */ std::set setTips; std::set setOrphans; std::set setPrevs; for (const std::pair &item : mapBlockIndex) { if (!chainActive.Contains(item.second)) { setOrphans.insert(item.second); setPrevs.insert(item.second->pprev); } } for (std::set::iterator it = setOrphans.begin(); it != setOrphans.end(); ++it) { if (setPrevs.erase(*it) == 0) { setTips.insert(*it); } } // Always report the currently active tip. setTips.insert(chainActive.Tip()); /* Construct the output array. */ UniValue res(UniValue::VARR); for (const CBlockIndex *block : setTips) { UniValue obj(UniValue::VOBJ); obj.pushKV("height", block->nHeight); obj.pushKV("hash", block->phashBlock->GetHex()); const int branchLen = block->nHeight - chainActive.FindFork(block)->nHeight; obj.pushKV("branchlen", branchLen); std::string status; if (chainActive.Contains(block)) { // This block is part of the currently active chain. status = "active"; } else if (block->nStatus.isInvalid()) { // This block or one of its ancestors is invalid. status = "invalid"; } else if (block->nStatus.isOnParkedChain()) { // This block or one of its ancestors is parked. status = "parked"; } else if (block->nChainTx == 0) { // This block cannot be connected because full block data for it or // one of its parents is missing. status = "headers-only"; } else if (block->IsValid(BlockValidity::SCRIPTS)) { // This block is fully validated, but no longer part of the active // chain. It was probably the active block once, but was // reorganized. status = "valid-fork"; } else if (block->IsValid(BlockValidity::TREE)) { // The headers for this block are valid, but it has not been // validated. It was probably never part of the most-work chain. status = "valid-headers"; } else { // No clue. status = "unknown"; } obj.pushKV("status", status); res.push_back(obj); } return res; } -UniValue mempoolInfoToJSON() { +UniValue MempoolInfoToJSON(const CTxMemPool &pool) { UniValue ret(UniValue::VOBJ); - ret.pushKV("size", (int64_t)g_mempool.size()); - ret.pushKV("bytes", (int64_t)g_mempool.GetTotalTxSize()); - ret.pushKV("usage", (int64_t)g_mempool.DynamicMemoryUsage()); + ret.pushKV("size", (int64_t)pool.size()); + ret.pushKV("bytes", (int64_t)pool.GetTotalTxSize()); + ret.pushKV("usage", (int64_t)pool.DynamicMemoryUsage()); size_t maxmempool = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; ret.pushKV("maxmempool", (int64_t)maxmempool); - ret.pushKV("mempoolminfee", - ValueFromAmount( - std::max(g_mempool.GetMinFee(maxmempool), ::minRelayTxFee) - .GetFeePerK())); + ret.pushKV( + "mempoolminfee", + ValueFromAmount(std::max(pool.GetMinFee(maxmempool), ::minRelayTxFee) + .GetFeePerK())); ret.pushKV("minrelaytxfee", ValueFromAmount(::minRelayTxFee.GetFeePerK())); return ret; } static UniValue getmempoolinfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getmempoolinfo\n" "\nReturns details on the active state of the TX memory pool.\n" "\nResult:\n" "{\n" " \"size\": xxxxx, (numeric) Current tx count\n" " \"bytes\": xxxxx, (numeric) Transaction size.\n" " \"usage\": xxxxx, (numeric) Total memory usage for " "the mempool\n" " \"maxmempool\": xxxxx, (numeric) Maximum memory usage " "for the mempool\n" " \"mempoolminfee\": xxxxx (numeric) Minimum fee rate in " + CURRENCY_UNIT + "/kB for tx to be accepted. Is the maximum of minrelaytxfee and " "minimum mempool fee\n" " \"minrelaytxfee\": xxxxx (numeric) Current minimum relay " "fee for transactions\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmempoolinfo", "") + HelpExampleRpc("getmempoolinfo", "")); } - return mempoolInfoToJSON(); + return MempoolInfoToJSON(::g_mempool); } static UniValue preciousblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "preciousblock \"blockhash\"\n" "\nTreats a block as if it were received before others with the " "same work.\n" "\nA later preciousblock call can override the effect of an " "earlier one.\n" "\nThe effects of preciousblock are not retained across restarts.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) the hash of the block to " "mark as precious\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("preciousblock", "\"blockhash\"") + HelpExampleRpc("preciousblock", "\"blockhash\"")); } std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); CBlockIndex *pblockindex; { LOCK(cs_main); pblockindex = LookupBlockIndex(hash); if (!pblockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } } CValidationState state; PreciousBlock(config, state, pblockindex); if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason()); } return NullUniValue; } UniValue finalizeblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "finalizeblock \"blockhash\"\n" "\nTreats a block as final. It cannot be reorged. Any chain\n" "that does not contain this block is invalid. Used on a less\n" "work chain, it can effectively PUTS YOU OUT OF CONSENSUS.\n" "USE WITH CAUTION!\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("finalizeblock", "\"blockhash\"") + HelpExampleRpc("finalizeblock", "\"blockhash\"")); } std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); CValidationState state; { LOCK(cs_main); CBlockIndex *pblockindex = LookupBlockIndex(hash); if (!pblockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } FinalizeBlockAndInvalidate(config, state, pblockindex); } if (state.IsValid()) { ActivateBestChain(config, state); } if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, FormatStateMessage(state)); } return NullUniValue; } static UniValue invalidateblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "invalidateblock \"blockhash\"\n" "\nPermanently marks a block as invalid, as if it " "violated a consensus rule.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) the hash of " "the block to mark as invalid\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("invalidateblock", "\"blockhash\"") + HelpExampleRpc("invalidateblock", "\"blockhash\"")); } const std::string strHash = request.params[0].get_str(); const uint256 hash(uint256S(strHash)); CValidationState state; { LOCK(cs_main); CBlockIndex *pblockindex = LookupBlockIndex(hash); if (!pblockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } InvalidateBlock(config, state, pblockindex); } if (state.IsValid()) { ActivateBestChain(config, state); } if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, FormatStateMessage(state)); } return NullUniValue; } UniValue parkblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error("parkblock \"blockhash\"\n" "\nMarks a block as parked.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) the " "hash of the block to park\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("parkblock", "\"blockhash\"") + HelpExampleRpc("parkblock", "\"blockhash\"")); } const std::string strHash = request.params[0].get_str(); const uint256 hash(uint256S(strHash)); CValidationState state; { LOCK(cs_main); if (mapBlockIndex.count(hash) == 0) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } CBlockIndex *pblockindex = mapBlockIndex[hash]; ParkBlock(config, state, pblockindex); } if (state.IsValid()) { ActivateBestChain(config, state); } if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason()); } return NullUniValue; } static UniValue reconsiderblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "reconsiderblock \"blockhash\"\n" "\nRemoves invalidity status of a block and its descendants, " "reconsider them for activation.\n" "This can be used to undo the effects of invalidateblock.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) the hash of the block to " "reconsider\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("reconsiderblock", "\"blockhash\"") + HelpExampleRpc("reconsiderblock", "\"blockhash\"")); } const std::string strHash = request.params[0].get_str(); const uint256 hash(uint256S(strHash)); { LOCK(cs_main); CBlockIndex *pblockindex = LookupBlockIndex(hash); if (!pblockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } ResetBlockFailureFlags(pblockindex); } CValidationState state; ActivateBestChain(config, state); if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, FormatStateMessage(state)); } return NullUniValue; } UniValue unparkblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "unparkblock \"blockhash\"\n" "\nRemoves parked status of a block and its descendants, " "reconsider them for activation.\n" "This can be used to undo the effects of parkblock.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) the hash of the block to " "unpark\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("unparkblock", "\"blockhash\"") + HelpExampleRpc("unparkblock", "\"blockhash\"")); } const std::string strHash = request.params[0].get_str(); const uint256 hash(uint256S(strHash)); { LOCK(cs_main); if (mapBlockIndex.count(hash) == 0) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } CBlockIndex *pblockindex = mapBlockIndex[hash]; UnparkBlockAndChildren(pblockindex); } CValidationState state; ActivateBestChain(config, state); if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason()); } return NullUniValue; } static UniValue getchaintxstats(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 2) { throw std::runtime_error( "getchaintxstats ( nblocks blockhash )\n" "\nCompute statistics about the total number and rate of " "transactions in the chain.\n" "\nArguments:\n" "1. nblocks (numeric, optional) Size of the window in number " "of blocks (default: one month).\n" "2. \"blockhash\" (string, optional) The hash of the block that " "ends the window.\n" "\nResult:\n" "{\n" " \"time\": xxxxx, (numeric) The " "timestamp for the final block in the window in UNIX format.\n" " \"txcount\": xxxxx, (numeric) The total " "number of transactions in the chain up to that point.\n" " \"window_final_block_hash\": \"...\", (string) The hash of " "the final block in the window.\n" " \"window_block_count\": xxxxx, (numeric) Size of " "the window in number of blocks.\n" " \"window_tx_count\": xxxxx, (numeric) The number " "of transactions in the window. Only returned if " "\"window_block_count\" is > 0.\n" " \"window_interval\": xxxxx, (numeric) The elapsed " "time in the window in seconds. Only returned if " "\"window_block_count\" is > 0.\n" " \"txrate\": x.xx, (numeric) The average " "rate of transactions per second in the window. Only returned if " "\"window_interval\" is > 0.\n" "}\n" "\nExamples:\n" + HelpExampleCli("getchaintxstats", "") + HelpExampleRpc("getchaintxstats", "2016")); } const CBlockIndex *pindex; // By default: 1 month int blockcount = 30 * 24 * 60 * 60 / config.GetChainParams().GetConsensus().nPowTargetSpacing; if (request.params[1].isNull()) { LOCK(cs_main); pindex = chainActive.Tip(); } else { uint256 hash = uint256S(request.params[1].get_str()); LOCK(cs_main); pindex = LookupBlockIndex(hash); if (!pindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } if (!chainActive.Contains(pindex)) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Block is not in main chain"); } } assert(pindex != nullptr); if (request.params[0].isNull()) { blockcount = std::max(0, std::min(blockcount, pindex->nHeight - 1)); } else { blockcount = request.params[0].get_int(); if (blockcount < 0 || (blockcount > 0 && blockcount >= pindex->nHeight)) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid block count: " "should be between 0 and " "the block's height - 1"); } } const CBlockIndex *pindexPast = pindex->GetAncestor(pindex->nHeight - blockcount); int nTimeDiff = pindex->GetMedianTimePast() - pindexPast->GetMedianTimePast(); int nTxDiff = pindex->nChainTx - pindexPast->nChainTx; UniValue ret(UniValue::VOBJ); ret.pushKV("time", int64_t(pindex->nTime)); ret.pushKV("txcount", int64_t(pindex->nChainTx)); ret.pushKV("window_final_block_hash", pindex->GetBlockHash().GetHex()); ret.pushKV("window_block_count", blockcount); if (blockcount > 0) { ret.pushKV("window_tx_count", nTxDiff); ret.pushKV("window_interval", nTimeDiff); if (nTimeDiff > 0) { ret.pushKV("txrate", double(nTxDiff) / nTimeDiff); } } return ret; } template static T CalculateTruncatedMedian(std::vector &scores) { size_t size = scores.size(); if (size == 0) { return T(); } std::sort(scores.begin(), scores.end()); if (size % 2 == 0) { return (scores[size / 2 - 1] + scores[size / 2]) / 2; } else { return scores[size / 2]; } } template static inline bool SetHasKeys(const std::set &set) { return false; } template static inline bool SetHasKeys(const std::set &set, const Tk &key, const Args &... args) { return (set.count(key) != 0) || SetHasKeys(set, args...); } // outpoint (needed for the utxo index) + nHeight + fCoinBase static constexpr size_t PER_UTXO_OVERHEAD = sizeof(COutPoint) + sizeof(uint32_t) + sizeof(bool); static UniValue getblockstats(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 4) { throw std::runtime_error( "getblockstats hash_or_height ( stats )\n" "\nCompute per block statistics for a given window. All amounts " "are in " + CURRENCY_UNIT + ".\n" "It won't work for some heights with pruning.\n" "It won't work without -txindex for utxo_size_inc, *fee or " "*feerate stats.\n" "\nArguments:\n" "1. \"hash_or_height\" (string or numeric, required) The block " "hash or height of the target block\n" "2. \"stats\" (array, optional) Values to plot, by " "default all values (see result below)\n" " [\n" " \"height\", (string, optional) Selected statistic\n" " \"time\", (string, optional) Selected statistic\n" " ,...\n" " ]\n" "\nResult:\n" "{ (json object)\n" " \"avgfee\": x.xxx, (numeric) Average fee in the block\n" " \"avgfeerate\": x.xxx, (numeric) Average feerate (in " + CURRENCY_UNIT + " per byte)\n" " \"avgtxsize\": xxxxx, (numeric) Average transaction size\n" " \"blockhash\": xxxxx, (string) The block hash (to check " "for potential reorgs)\n" " \"height\": xxxxx, (numeric) The height of the block\n" " \"ins\": xxxxx, (numeric) The number of inputs " "(excluding coinbase)\n" " \"maxfee\": xxxxx, (numeric) Maximum fee in the block\n" " \"maxfeerate\": xxxxx, (numeric) Maximum feerate (in " + CURRENCY_UNIT + " per byte)\n" " \"maxtxsize\": xxxxx, (numeric) Maximum transaction size\n" " \"medianfee\": x.xxx, (numeric) Truncated median fee in " "the block\n" " \"medianfeerate\": x.xxx, (numeric) Truncated median feerate " "(in " + CURRENCY_UNIT + " per byte)\n" " \"mediantime\": xxxxx, (numeric) The block median time " "past\n" " \"mediantxsize\": xxxxx, (numeric) Truncated median " "transaction size\n" " \"minfee\": x.xxx, (numeric) Minimum fee in the block\n" " \"minfeerate\": xx.xx, (numeric) Minimum feerate (in " + CURRENCY_UNIT + " per byte)\n" " \"mintxsize\": xxxxx, (numeric) Minimum transaction size\n" " \"outs\": xxxxx, (numeric) The number of outputs\n" " \"subsidy\": x.xxx, (numeric) The block subsidy\n" " \"time\": xxxxx, (numeric) The block time\n" " \"total_out\": x.xxx, (numeric) Total amount in all " "outputs (excluding coinbase and thus reward [ie subsidy + " "totalfee])\n" " \"total_size\": xxxxx, (numeric) Total size of all " "non-coinbase transactions\n" " \"totalfee\": x.xxx, (numeric) The fee total\n" " \"txs\": xxxxx, (numeric) The number of " "transactions (excluding coinbase)\n" " \"utxo_increase\": xxxxx, (numeric) The increase/decrease in " "the number of unspent outputs\n" " \"utxo_size_inc\": xxxxx, (numeric) The increase/decrease in " "size for the utxo index (not discounting op_return and similar)\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblockstats", "1000 '[\"minfeerate\",\"avgfeerate\"]'") + HelpExampleRpc("getblockstats", "1000 '[\"minfeerate\",\"avgfeerate\"]'")); } LOCK(cs_main); CBlockIndex *pindex; if (request.params[0].isNum()) { const int height = request.params[0].get_int(); const int current_tip = chainActive.Height(); if (height < 0) { throw JSONRPCError( RPC_INVALID_PARAMETER, strprintf("Target block height %d is negative", height)); } if (height > current_tip) { throw JSONRPCError( RPC_INVALID_PARAMETER, strprintf("Target block height %d after current tip %d", height, current_tip)); } pindex = chainActive[height]; } else { const std::string strHash = request.params[0].get_str(); const uint256 hash(uint256S(strHash)); pindex = LookupBlockIndex(hash); if (!pindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } if (!chainActive.Contains(pindex)) { throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Block is not in chain %s", Params().NetworkIDString())); } } assert(pindex != nullptr); std::set stats; if (!request.params[1].isNull()) { const UniValue stats_univalue = request.params[1].get_array(); for (unsigned int i = 0; i < stats_univalue.size(); i++) { const std::string stat = stats_univalue[i].get_str(); stats.insert(stat); } } const CBlock block = GetBlockChecked(config, pindex); // Calculate everything if nothing selected (default) const bool do_all = stats.size() == 0; const bool do_mediantxsize = do_all || stats.count("mediantxsize") != 0; const bool do_medianfee = do_all || stats.count("medianfee") != 0; const bool do_medianfeerate = do_all || stats.count("medianfeerate") != 0; const bool loop_inputs = do_all || do_medianfee || do_medianfeerate || SetHasKeys(stats, "utxo_size_inc", "totalfee", "avgfee", "avgfeerate", "minfee", "maxfee", "minfeerate", "maxfeerate"); const bool loop_outputs = do_all || loop_inputs || stats.count("total_out"); const bool do_calculate_size = do_mediantxsize || loop_inputs || SetHasKeys(stats, "total_size", "avgtxsize", "mintxsize", "maxtxsize"); const int64_t blockMaxSize = config.GetMaxBlockSize(); Amount maxfee = Amount::zero(); Amount maxfeerate = Amount::zero(); Amount minfee = MAX_MONEY; Amount minfeerate = MAX_MONEY; Amount total_out = Amount::zero(); Amount totalfee = Amount::zero(); int64_t inputs = 0; int64_t maxtxsize = 0; int64_t mintxsize = blockMaxSize; int64_t outputs = 0; int64_t total_size = 0; int64_t utxo_size_inc = 0; std::vector fee_array; std::vector feerate_array; std::vector txsize_array; const Consensus::Params ¶ms = config.GetChainParams().GetConsensus(); for (const auto &tx : block.vtx) { outputs += tx->vout.size(); Amount tx_total_out = Amount::zero(); if (loop_outputs) { for (const CTxOut &out : tx->vout) { tx_total_out += out.nValue; utxo_size_inc += GetSerializeSize(out, SER_NETWORK, PROTOCOL_VERSION) + PER_UTXO_OVERHEAD; } } if (tx->IsCoinBase()) { continue; } // Don't count coinbase's fake input inputs += tx->vin.size(); // Don't count coinbase reward total_out += tx_total_out; int64_t tx_size = 0; if (do_calculate_size) { tx_size = tx->GetTotalSize(); if (do_mediantxsize) { txsize_array.push_back(tx_size); } maxtxsize = std::max(maxtxsize, tx_size); mintxsize = std::min(mintxsize, tx_size); total_size += tx_size; } if (loop_inputs) { if (!g_txindex) { throw JSONRPCError(RPC_INVALID_PARAMETER, "One or more of the selected stats requires " "-txindex enabled"); } Amount tx_total_in = Amount::zero(); for (const CTxIn &in : tx->vin) { CTransactionRef tx_in; uint256 hashBlock; if (!GetTransaction(params, in.prevout.GetTxId(), tx_in, hashBlock, false)) { throw JSONRPCError(RPC_INTERNAL_ERROR, std::string("Unexpected internal error " "(tx index seems corrupt)")); } CTxOut prevoutput = tx_in->vout[in.prevout.GetN()]; tx_total_in += prevoutput.nValue; utxo_size_inc -= GetSerializeSize(prevoutput, SER_NETWORK, PROTOCOL_VERSION) + PER_UTXO_OVERHEAD; } Amount txfee = tx_total_in - tx_total_out; assert(MoneyRange(txfee)); if (do_medianfee) { fee_array.push_back(txfee); } maxfee = std::max(maxfee, txfee); minfee = std::min(minfee, txfee); totalfee += txfee; Amount feerate = txfee / tx_size; if (do_medianfeerate) { feerate_array.push_back(feerate); } maxfeerate = std::max(maxfeerate, feerate); minfeerate = std::min(minfeerate, feerate); } } UniValue ret_all(UniValue::VOBJ); ret_all.pushKV("avgfee", ValueFromAmount((block.vtx.size() > 1) ? totalfee / int((block.vtx.size() - 1)) : Amount::zero())); ret_all.pushKV("avgfeerate", ValueFromAmount((total_size > 0) ? totalfee / total_size : Amount::zero())); ret_all.pushKV("avgtxsize", (block.vtx.size() > 1) ? total_size / (block.vtx.size() - 1) : 0); ret_all.pushKV("blockhash", pindex->GetBlockHash().GetHex()); ret_all.pushKV("height", (int64_t)pindex->nHeight); ret_all.pushKV("ins", inputs); ret_all.pushKV("maxfee", ValueFromAmount(maxfee)); ret_all.pushKV("maxfeerate", ValueFromAmount(maxfeerate)); ret_all.pushKV("maxtxsize", maxtxsize); ret_all.pushKV("medianfee", ValueFromAmount(CalculateTruncatedMedian(fee_array))); ret_all.pushKV("medianfeerate", ValueFromAmount(CalculateTruncatedMedian(feerate_array))); ret_all.pushKV("mediantime", pindex->GetMedianTimePast()); ret_all.pushKV("mediantxsize", CalculateTruncatedMedian(txsize_array)); ret_all.pushKV( "minfee", ValueFromAmount((minfee == MAX_MONEY) ? Amount::zero() : minfee)); ret_all.pushKV("minfeerate", ValueFromAmount((minfeerate == MAX_MONEY) ? Amount::zero() : minfeerate)); ret_all.pushKV("mintxsize", mintxsize == blockMaxSize ? 0 : mintxsize); ret_all.pushKV("outs", outputs); ret_all.pushKV("subsidy", ValueFromAmount(GetBlockSubsidy( pindex->nHeight, Params().GetConsensus()))); ret_all.pushKV("time", pindex->GetBlockTime()); ret_all.pushKV("total_out", ValueFromAmount(total_out)); ret_all.pushKV("total_size", total_size); ret_all.pushKV("totalfee", ValueFromAmount(totalfee)); ret_all.pushKV("txs", (int64_t)block.vtx.size()); ret_all.pushKV("utxo_increase", outputs - inputs); ret_all.pushKV("utxo_size_inc", utxo_size_inc); if (do_all) { return ret_all; } UniValue ret(UniValue::VOBJ); for (const std::string &stat : stats) { const UniValue &value = ret_all[stat]; if (value.isNull()) { throw JSONRPCError( RPC_INVALID_PARAMETER, strprintf("Invalid selected statistic %s", stat)); } ret.pushKV(stat, value); } return ret; } static UniValue savemempool(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error("savemempool\n" "\nDumps the mempool to disk. It will fail " "until the previous dump is fully loaded.\n" "\nExamples:\n" + HelpExampleCli("savemempool", "") + HelpExampleRpc("savemempool", "")); } if (!g_is_mempool_loaded) { throw JSONRPCError(RPC_MISC_ERROR, "The mempool was not loaded yet"); } if (!DumpMempool()) { throw JSONRPCError(RPC_MISC_ERROR, "Unable to dump mempool to disk"); } return NullUniValue; } // clang-format off static const ContextFreeRPCCommand commands[] = { // category name actor (function) argNames // ------------------- ------------------------ ---------------------- ---------- { "blockchain", "getbestblockhash", getbestblockhash, {} }, { "blockchain", "getblock", getblock, {"blockhash","verbosity|verbose"} }, { "blockchain", "getblockchaininfo", getblockchaininfo, {} }, { "blockchain", "getblockcount", getblockcount, {} }, { "blockchain", "getblockhash", getblockhash, {"height"} }, { "blockchain", "getblockheader", getblockheader, {"blockhash","verbose"} }, { "blockchain", "getblockstats", getblockstats, {"hash_or_height","stats"} }, { "blockchain", "getchaintips", getchaintips, {} }, { "blockchain", "getchaintxstats", getchaintxstats, {"nblocks", "blockhash"} }, { "blockchain", "getdifficulty", getdifficulty, {} }, { "blockchain", "getmempoolancestors", getmempoolancestors, {"txid","verbose"} }, { "blockchain", "getmempooldescendants", getmempooldescendants, {"txid","verbose"} }, { "blockchain", "getmempoolentry", getmempoolentry, {"txid"} }, { "blockchain", "getmempoolinfo", getmempoolinfo, {} }, { "blockchain", "getrawmempool", getrawmempool, {"verbose"} }, { "blockchain", "gettxout", gettxout, {"txid","n","include_mempool"} }, { "blockchain", "gettxoutsetinfo", gettxoutsetinfo, {} }, { "blockchain", "pruneblockchain", pruneblockchain, {"height"} }, { "blockchain", "savemempool", savemempool, {} }, { "blockchain", "verifychain", verifychain, {"checklevel","nblocks"} }, { "blockchain", "preciousblock", preciousblock, {"blockhash"} }, /* Not shown in help */ { "hidden", "getfinalizedblockhash", getfinalizedblockhash, {} }, { "hidden", "finalizeblock", finalizeblock, {"blockhash"} }, { "hidden", "invalidateblock", invalidateblock, {"blockhash"} }, { "hidden", "parkblock", parkblock, {"blockhash"} }, { "hidden", "reconsiderblock", reconsiderblock, {"blockhash"} }, { "hidden", "syncwithvalidationinterfacequeue", syncwithvalidationinterfacequeue, {} }, { "hidden", "unparkblock", unparkblock, {"blockhash"} }, { "hidden", "waitfornewblock", waitfornewblock, {"timeout"} }, { "hidden", "waitforblock", waitforblock, {"blockhash","timeout"} }, { "hidden", "waitforblockheight", waitforblockheight, {"height","timeout"} }, }; // clang-format on void RegisterBlockchainRPCCommands(CRPCTable &t) { for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) { t.appendCommand(commands[vcidx].name, &commands[vcidx]); } } diff --git a/src/rpc/blockchain.h b/src/rpc/blockchain.h index 2c4fd0c9a..ae547aaed 100644 --- a/src/rpc/blockchain.h +++ b/src/rpc/blockchain.h @@ -1,42 +1,43 @@ // Copyright (c) 2017 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_RPC_BLOCKCHAIN_H #define BITCOIN_RPC_BLOCKCHAIN_H #include class CBlock; class CBlockIndex; class Config; +class CTxMemPool; class JSONRPCRequest; UniValue getblockchaininfo(const Config &config, const JSONRPCRequest &request); /** * Get the required difficulty of the next block w/r/t the given block index. * * @return A floating point number that is a multiple of the main net minimum * difficulty (4295032833 hashes). */ double GetDifficulty(const CBlockIndex *blockindex); /** Callback for when block tip changed. */ void RPCNotifyBlockChange(bool ibd, const CBlockIndex *pindex); /** Block description to JSON */ UniValue blockToJSON(const CBlock &block, const CBlockIndex *tip, const CBlockIndex *blockindex, bool txDetails = false); /** Mempool information to JSON */ -UniValue mempoolInfoToJSON(); +UniValue MempoolInfoToJSON(const CTxMemPool &pool); /** Mempool to JSON */ -UniValue mempoolToJSON(bool fVerbose = false); +UniValue MempoolToJSON(const CTxMemPool &pool, bool verbose = false); /** Block header to JSON */ UniValue blockheaderToJSON(const CBlockIndex *tip, const CBlockIndex *blockindex); #endif // BITCOIN_RPC_BLOCKCHAIN_H diff --git a/src/txmempool.cpp b/src/txmempool.cpp index cd7d22ace..c656896a2 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -1,1387 +1,1387 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include // for GetConsensus. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef &_tx, const Amount _nFee, int64_t _nTime, double _entryPriority, unsigned int _entryHeight, Amount _inChainInputValue, bool _spendsCoinbase, int64_t _sigOpsCount, LockPoints lp) : tx(_tx), nFee(_nFee), nTime(_nTime), entryPriority(_entryPriority), entryHeight(_entryHeight), inChainInputValue(_inChainInputValue), spendsCoinbase(_spendsCoinbase), sigOpCount(_sigOpsCount), lockPoints(lp) { nTxSize = tx->GetTotalSize(); nModSize = tx->CalculateModifiedSize(GetTxSize()); nUsageSize = RecursiveDynamicUsage(tx); nCountWithDescendants = 1; nSizeWithDescendants = GetTxSize(); nModFeesWithDescendants = nFee; Amount nValueIn = tx->GetValueOut() + nFee; assert(inChainInputValue <= nValueIn); feeDelta = Amount::zero(); nCountWithAncestors = 1; nSizeWithAncestors = GetTxSize(); nModFeesWithAncestors = nFee; nSigOpCountWithAncestors = sigOpCount; } double CTxMemPoolEntry::GetPriority(unsigned int currentHeight) const { double deltaPriority = double((currentHeight - entryHeight) * (inChainInputValue / SATOSHI)) / nModSize; double dResult = entryPriority + deltaPriority; // This should only happen if it was called with a height below entry height if (dResult < 0) { dResult = 0; } return dResult; } void CTxMemPoolEntry::UpdateFeeDelta(Amount newFeeDelta) { nModFeesWithDescendants += newFeeDelta - feeDelta; nModFeesWithAncestors += newFeeDelta - feeDelta; feeDelta = newFeeDelta; } void CTxMemPoolEntry::UpdateLockPoints(const LockPoints &lp) { lockPoints = lp; } // Update the given tx for any in-mempool descendants. // Assumes that setMemPoolChildren is correct for the given tx and all // descendants. void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendants, const std::set &setExclude) { setEntries stageEntries, setAllDescendants; stageEntries = GetMemPoolChildren(updateIt); while (!stageEntries.empty()) { const txiter cit = *stageEntries.begin(); setAllDescendants.insert(cit); stageEntries.erase(cit); const setEntries &setChildren = GetMemPoolChildren(cit); for (txiter childEntry : setChildren) { cacheMap::iterator cacheIt = cachedDescendants.find(childEntry); if (cacheIt != cachedDescendants.end()) { // We've already calculated this one, just add the entries for // this set but don't traverse again. for (txiter cacheEntry : cacheIt->second) { setAllDescendants.insert(cacheEntry); } } else if (!setAllDescendants.count(childEntry)) { // Schedule for later processing stageEntries.insert(childEntry); } } } // setAllDescendants now contains all in-mempool descendants of updateIt. // Update and add to cached descendant map int64_t modifySize = 0; int64_t modifyCount = 0; Amount modifyFee = Amount::zero(); for (txiter cit : setAllDescendants) { if (!setExclude.count(cit->GetTx().GetId())) { modifySize += cit->GetTxSize(); modifyFee += cit->GetModifiedFee(); modifyCount++; cachedDescendants[updateIt].insert(cit); // Update ancestor state for each descendant mapTx.modify(cit, update_ancestor_state(updateIt->GetTxSize(), updateIt->GetModifiedFee(), 1, updateIt->GetSigOpCount())); } } mapTx.modify(updateIt, update_descendant_state(modifySize, modifyFee, modifyCount)); } // txidsToUpdate is the set of transaction hashes from a disconnected block // which has been re-added to the mempool. For each entry, look for descendants // that are outside txidsToUpdate, and add fee/size information for such // descendants to the parent. For each such descendant, also update the ancestor // state to include the parent. void CTxMemPool::UpdateTransactionsFromBlock( const std::vector &txidsToUpdate) { LOCK(cs); // For each entry in txidsToUpdate, store the set of in-mempool, but not // in-txidsToUpdate transactions, so that we don't have to recalculate // descendants when we come across a previously seen entry. cacheMap mapMemPoolDescendantsToUpdate; // Use a set for lookups into txidsToUpdate (these entries are already // accounted for in the state of their ancestors) std::set setAlreadyIncluded(txidsToUpdate.begin(), txidsToUpdate.end()); // Iterate in reverse, so that whenever we are looking at a transaction // we are sure that all in-mempool descendants have already been processed. // This maximizes the benefit of the descendant cache and guarantees that // setMemPoolChildren will be updated, an assumption made in // UpdateForDescendants. for (const TxId &txid : reverse_iterate(txidsToUpdate)) { // we cache the in-mempool children to avoid duplicate updates setEntries setChildren; // calculate children from mapNextTx txiter it = mapTx.find(txid); if (it == mapTx.end()) { continue; } auto iter = mapNextTx.lower_bound(COutPoint(txid, 0)); // First calculate the children, and update setMemPoolChildren to // include them, and update their setMemPoolParents to include this tx. for (; iter != mapNextTx.end() && iter->first->GetTxId() == txid; ++iter) { const TxId &childTxId = iter->second->GetId(); txiter childIter = mapTx.find(childTxId); assert(childIter != mapTx.end()); // We can skip updating entries we've encountered before or that are // in the block (which are already accounted for). if (setChildren.insert(childIter).second && !setAlreadyIncluded.count(childTxId)) { UpdateChild(it, childIter, true); UpdateParent(childIter, it, true); } } UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded); } } bool CTxMemPool::CalculateMemPoolAncestors( const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents /* = true */) const { setEntries parentHashes; const CTransaction &tx = entry.GetTx(); if (fSearchForParents) { // Get parents of this transaction that are in the mempool // GetMemPoolParents() is only valid for entries in the mempool, so we // iterate mapTx to find parents. for (const CTxIn &in : tx.vin) { txiter piter = mapTx.find(in.prevout.GetTxId()); if (piter == mapTx.end()) { continue; } parentHashes.insert(piter); if (parentHashes.size() + 1 > limitAncestorCount) { errString = strprintf("too many unconfirmed parents [limit: %u]", limitAncestorCount); return false; } } } else { // If we're not searching for parents, we require this to be an entry in // the mempool already. txiter it = mapTx.iterator_to(entry); parentHashes = GetMemPoolParents(it); } size_t totalSizeWithAncestors = entry.GetTxSize(); while (!parentHashes.empty()) { txiter stageit = *parentHashes.begin(); setAncestors.insert(stageit); parentHashes.erase(stageit); totalSizeWithAncestors += stageit->GetTxSize(); if (stageit->GetSizeWithDescendants() + entry.GetTxSize() > limitDescendantSize) { errString = strprintf( "exceeds descendant size limit for tx %s [limit: %u]", stageit->GetTx().GetId().ToString(), limitDescendantSize); return false; } if (stageit->GetCountWithDescendants() + 1 > limitDescendantCount) { errString = strprintf("too many descendants for tx %s [limit: %u]", stageit->GetTx().GetId().ToString(), limitDescendantCount); return false; } if (totalSizeWithAncestors > limitAncestorSize) { errString = strprintf("exceeds ancestor size limit [limit: %u]", limitAncestorSize); return false; } const setEntries &setMemPoolParents = GetMemPoolParents(stageit); for (txiter phash : setMemPoolParents) { // If this is a new ancestor, add it. if (setAncestors.count(phash) == 0) { parentHashes.insert(phash); } if (parentHashes.size() + setAncestors.size() + 1 > limitAncestorCount) { errString = strprintf("too many unconfirmed ancestors [limit: %u]", limitAncestorCount); return false; } } } return true; } void CTxMemPool::UpdateAncestorsOf(bool add, txiter it, setEntries &setAncestors) { setEntries parentIters = GetMemPoolParents(it); // add or remove this tx as a child of each parent for (txiter piter : parentIters) { UpdateChild(piter, it, add); } const int64_t updateCount = (add ? 1 : -1); const int64_t updateSize = updateCount * it->GetTxSize(); const Amount updateFee = updateCount * it->GetModifiedFee(); for (txiter ancestorIt : setAncestors) { mapTx.modify(ancestorIt, update_descendant_state(updateSize, updateFee, updateCount)); } } void CTxMemPool::UpdateEntryForAncestors(txiter it, const setEntries &setAncestors) { int64_t updateCount = setAncestors.size(); int64_t updateSize = 0; int64_t updateSigOpsCount = 0; Amount updateFee = Amount::zero(); for (txiter ancestorIt : setAncestors) { updateSize += ancestorIt->GetTxSize(); updateFee += ancestorIt->GetModifiedFee(); updateSigOpsCount += ancestorIt->GetSigOpCount(); } mapTx.modify(it, update_ancestor_state(updateSize, updateFee, updateCount, updateSigOpsCount)); } void CTxMemPool::UpdateChildrenForRemoval(txiter it) { const setEntries &setMemPoolChildren = GetMemPoolChildren(it); for (txiter updateIt : setMemPoolChildren) { UpdateParent(updateIt, it, false); } } void CTxMemPool::UpdateForRemoveFromMempool(const setEntries &entriesToRemove, bool updateDescendants) { // For each entry, walk back all ancestors and decrement size associated // with this transaction. const uint64_t nNoLimit = std::numeric_limits::max(); if (updateDescendants) { // updateDescendants should be true whenever we're not recursively // removing a tx and all its descendants, eg when a transaction is // confirmed in a block. Here we only update statistics and not data in // mapLinks (which we need to preserve until we're finished with all // operations that need to traverse the mempool). for (txiter removeIt : entriesToRemove) { setEntries setDescendants; CalculateDescendants(removeIt, setDescendants); setDescendants.erase(removeIt); // don't update state for self int64_t modifySize = -int64_t(removeIt->GetTxSize()); Amount modifyFee = -1 * removeIt->GetModifiedFee(); int modifySigOps = -removeIt->GetSigOpCount(); for (txiter dit : setDescendants) { mapTx.modify(dit, update_ancestor_state(modifySize, modifyFee, -1, modifySigOps)); } } } for (txiter removeIt : entriesToRemove) { setEntries setAncestors; const CTxMemPoolEntry &entry = *removeIt; std::string dummy; // Since this is a tx that is already in the mempool, we can call CMPA // with fSearchForParents = false. If the mempool is in a consistent // state, then using true or false should both be correct, though false // should be a bit faster. // However, if we happen to be in the middle of processing a reorg, then // the mempool can be in an inconsistent state. In this case, the set of // ancestors reachable via mapLinks will be the same as the set of // ancestors whose packages include this transaction, because when we // add a new transaction to the mempool in addUnchecked(), we assume it // has no children, and in the case of a reorg where that assumption is // false, the in-mempool children aren't linked to the in-block tx's // until UpdateTransactionsFromBlock() is called. So if we're being // called during a reorg, ie before UpdateTransactionsFromBlock() has // been called, then mapLinks[] will differ from the set of mempool // parents we'd calculate by searching, and it's important that we use // the mapLinks[] notion of ancestor transactions as the set of things // to update for removal. CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false); // Note that UpdateAncestorsOf severs the child links that point to // removeIt in the entries for the parents of removeIt. UpdateAncestorsOf(false, removeIt, setAncestors); } // After updating all the ancestor sizes, we can now sever the link between // each transaction being removed and any mempool children (ie, update // setMemPoolParents for each direct child of a transaction being removed). for (txiter removeIt : entriesToRemove) { UpdateChildrenForRemoval(removeIt); } } void CTxMemPoolEntry::UpdateDescendantState(int64_t modifySize, Amount modifyFee, int64_t modifyCount) { nSizeWithDescendants += modifySize; assert(int64_t(nSizeWithDescendants) > 0); nModFeesWithDescendants += modifyFee; nCountWithDescendants += modifyCount; assert(int64_t(nCountWithDescendants) > 0); } void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, Amount modifyFee, int64_t modifyCount, int modifySigOps) { nSizeWithAncestors += modifySize; assert(int64_t(nSizeWithAncestors) > 0); nModFeesWithAncestors += modifyFee; nCountWithAncestors += modifyCount; assert(int64_t(nCountWithAncestors) > 0); nSigOpCountWithAncestors += modifySigOps; assert(int(nSigOpCountWithAncestors) >= 0); } CTxMemPool::CTxMemPool() : nTransactionsUpdated(0) { // lock free clear _clear(); // Sanity checks off by default for performance, because otherwise accepting // transactions becomes O(N^2) where N is the number of transactions in the // pool nCheckFrequency = 0; } CTxMemPool::~CTxMemPool() {} bool CTxMemPool::isSpent(const COutPoint &outpoint) const { LOCK(cs); return mapNextTx.count(outpoint); } unsigned int CTxMemPool::GetTransactionsUpdated() const { LOCK(cs); return nTransactionsUpdated; } void CTxMemPool::AddTransactionsUpdated(unsigned int n) { LOCK(cs); nTransactionsUpdated += n; } void CTxMemPool::addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry, setEntries &setAncestors) { NotifyEntryAdded(entry.GetSharedTx()); // Add to memory pool without checking anything. // Used by AcceptToMemoryPool(), which DOES do all the appropriate checks. indexed_transaction_set::iterator newit = mapTx.insert(entry).first; mapLinks.insert(make_pair(newit, TxLinks())); // Update transaction for any feeDelta created by PrioritiseTransaction // TODO: refactor so that the fee delta is calculated before inserting into // mapTx. std::map::const_iterator pos = mapDeltas.find(hash); if (pos != mapDeltas.end()) { const TXModifier &deltas = pos->second; if (deltas.second != Amount::zero()) { mapTx.modify(newit, update_fee_delta(deltas.second)); } } // Update cachedInnerUsage to include contained transaction's usage. // (When we update the entry for in-mempool parents, memory usage will be // further updated.) cachedInnerUsage += entry.DynamicMemoryUsage(); const CTransaction &tx = newit->GetTx(); std::set setParentTransactions; for (const CTxIn &in : tx.vin) { mapNextTx.insert(std::make_pair(&in.prevout, &tx)); setParentTransactions.insert(in.prevout.GetTxId()); } // Don't bother worrying about child transactions of this one. Normal case // of a new transaction arriving is that there can't be any children, // because such children would be orphans. An exception to that is if a // transaction enters that used to be in a block. In that case, our // disconnect block logic will call UpdateTransactionsFromBlock to clean up // the mess we're leaving here. // Update ancestors with information about this tx for (const uint256 &phash : setParentTransactions) { txiter pit = mapTx.find(phash); if (pit != mapTx.end()) { UpdateParent(newit, pit, true); } } UpdateAncestorsOf(true, newit, setAncestors); UpdateEntryForAncestors(newit, setAncestors); nTransactionsUpdated++; totalTxSize += entry.GetTxSize(); vTxHashes.emplace_back(tx.GetHash(), newit); newit->vTxHashesIdx = vTxHashes.size() - 1; } void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason) { NotifyEntryRemoved(it->GetSharedTx(), reason); for (const CTxIn &txin : it->GetTx().vin) { mapNextTx.erase(txin.prevout); } if (vTxHashes.size() > 1) { vTxHashes[it->vTxHashesIdx] = std::move(vTxHashes.back()); vTxHashes[it->vTxHashesIdx].second->vTxHashesIdx = it->vTxHashesIdx; vTxHashes.pop_back(); if (vTxHashes.size() * 2 < vTxHashes.capacity()) { vTxHashes.shrink_to_fit(); } } else { vTxHashes.clear(); } totalTxSize -= it->GetTxSize(); cachedInnerUsage -= it->DynamicMemoryUsage(); cachedInnerUsage -= memusage::DynamicUsage(mapLinks[it].parents) + memusage::DynamicUsage(mapLinks[it].children); mapLinks.erase(it); mapTx.erase(it); nTransactionsUpdated++; } // Calculates descendants of entry that are not already in setDescendants, and // adds to setDescendants. Assumes entryit is already a tx in the mempool and // setMemPoolChildren is correct for tx and all descendants. Also assumes that // if an entry is in setDescendants already, then all in-mempool descendants of // it are already in setDescendants as well, so that we can save time by not // iterating over those entries. void CTxMemPool::CalculateDescendants(txiter entryit, setEntries &setDescendants) const { setEntries stage; if (setDescendants.count(entryit) == 0) { stage.insert(entryit); } // Traverse down the children of entry, only adding children that are not // accounted for in setDescendants already (because those children have // either already been walked, or will be walked in this iteration). while (!stage.empty()) { txiter it = *stage.begin(); setDescendants.insert(it); stage.erase(it); const setEntries &setChildren = GetMemPoolChildren(it); for (txiter childiter : setChildren) { if (!setDescendants.count(childiter)) { stage.insert(childiter); } } } } void CTxMemPool::removeRecursive(const CTransaction &origTx, MemPoolRemovalReason reason) { // Remove transaction from memory pool. LOCK(cs); setEntries txToRemove; txiter origit = mapTx.find(origTx.GetId()); if (origit != mapTx.end()) { txToRemove.insert(origit); } else { // When recursively removing but origTx isn't in the mempool be sure to // remove any children that are in the pool. This can happen during // chain re-orgs if origTx isn't re-accepted into the mempool for any // reason. for (size_t i = 0; i < origTx.vout.size(); i++) { auto it = mapNextTx.find(COutPoint(origTx.GetId(), i)); if (it == mapNextTx.end()) { continue; } txiter nextit = mapTx.find(it->second->GetId()); assert(nextit != mapTx.end()); txToRemove.insert(nextit); } } setEntries setAllRemoves; for (txiter it : txToRemove) { CalculateDescendants(it, setAllRemoves); } RemoveStaged(setAllRemoves, false, reason); } void CTxMemPool::removeForReorg(const Config &config, const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags) { // Remove transactions spending a coinbase which are now immature and // no-longer-final transactions. LOCK(cs); setEntries txToRemove; for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { const CTransaction &tx = it->GetTx(); LockPoints lp = it->GetLockPoints(); bool validLP = TestLockPointValidity(&lp); CValidationState state; if (!ContextualCheckTransactionForCurrentBlock( config.GetChainParams().GetConsensus(), tx, state, flags) || !CheckSequenceLocks(*this, tx, flags, &lp, validLP)) { // Note if CheckSequenceLocks fails the LockPoints may still be // invalid. So it's critical that we remove the tx and not depend on // the LockPoints. txToRemove.insert(it); } else if (it->GetSpendsCoinbase()) { for (const CTxIn &txin : tx.vin) { indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.GetTxId()); if (it2 != mapTx.end()) { continue; } const Coin &coin = pcoins->AccessCoin(txin.prevout); if (nCheckFrequency != 0) { assert(!coin.IsSpent()); } if (coin.IsSpent() || (coin.IsCoinBase() && int64_t(nMemPoolHeight) - coin.GetHeight() < COINBASE_MATURITY)) { txToRemove.insert(it); break; } } } if (!validLP) { mapTx.modify(it, update_lock_points(lp)); } } setEntries setAllRemoves; for (txiter it : txToRemove) { CalculateDescendants(it, setAllRemoves); } RemoveStaged(setAllRemoves, false, MemPoolRemovalReason::REORG); } void CTxMemPool::removeConflicts(const CTransaction &tx) { // Remove transactions which depend on inputs of tx, recursively AssertLockHeld(cs); for (const CTxIn &txin : tx.vin) { auto it = mapNextTx.find(txin.prevout); if (it != mapNextTx.end()) { const CTransaction &txConflict = *it->second; if (txConflict != tx) { ClearPrioritisation(txConflict.GetId()); removeRecursive(txConflict, MemPoolRemovalReason::CONFLICT); } } } } /** * Called when a block is connected. Removes from mempool and updates the miner * fee estimator. */ void CTxMemPool::removeForBlock(const std::vector &vtx, unsigned int nBlockHeight) { LOCK(cs); DisconnectedBlockTransactions disconnectpool; disconnectpool.addForBlock(vtx); std::vector entries; for (const CTransactionRef &tx : reverse_iterate(disconnectpool.GetQueuedTx().get())) { uint256 txid = tx->GetId(); indexed_transaction_set::iterator i = mapTx.find(txid); if (i != mapTx.end()) { entries.push_back(&*i); } } for (const CTransactionRef &tx : reverse_iterate(disconnectpool.GetQueuedTx().get())) { txiter it = mapTx.find(tx->GetId()); if (it != mapTx.end()) { setEntries stage; stage.insert(it); RemoveStaged(stage, true, MemPoolRemovalReason::BLOCK); } removeConflicts(*tx); ClearPrioritisation(tx->GetId()); } disconnectpool.clear(); lastRollingFeeUpdate = GetTime(); blockSinceLastRollingFeeBump = true; } void CTxMemPool::_clear() { mapLinks.clear(); mapTx.clear(); mapNextTx.clear(); vTxHashes.clear(); totalTxSize = 0; cachedInnerUsage = 0; lastRollingFeeUpdate = GetTime(); blockSinceLastRollingFeeBump = false; rollingMinimumFeeRate = 0; ++nTransactionsUpdated; } void CTxMemPool::clear() { LOCK(cs); _clear(); } static void CheckInputsAndUpdateCoins(const CTransaction &tx, CCoinsViewCache &mempoolDuplicate, const int64_t spendheight) { CValidationState state; Amount txfee = Amount::zero(); bool fCheckResult = tx.IsCoinBase() || Consensus::CheckTxInputs(tx, state, mempoolDuplicate, spendheight, txfee); assert(fCheckResult); UpdateCoins(mempoolDuplicate, tx, std::numeric_limits::max()); } void CTxMemPool::check(const CCoinsViewCache *pcoins) const { LOCK(cs); if (nCheckFrequency == 0) { return; } if (GetRand(std::numeric_limits::max()) >= nCheckFrequency) { return; } LogPrint(BCLog::MEMPOOL, "Checking mempool with %u transactions and %u inputs\n", (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size()); uint64_t checkTotal = 0; uint64_t innerUsage = 0; CCoinsViewCache mempoolDuplicate(const_cast(pcoins)); const int64_t spendheight = GetSpendHeight(mempoolDuplicate); std::list waitingOnDependants; for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { unsigned int i = 0; checkTotal += it->GetTxSize(); innerUsage += it->DynamicMemoryUsage(); const CTransaction &tx = it->GetTx(); txlinksMap::const_iterator linksiter = mapLinks.find(it); assert(linksiter != mapLinks.end()); const TxLinks &links = linksiter->second; innerUsage += memusage::DynamicUsage(links.parents) + memusage::DynamicUsage(links.children); bool fDependsWait = false; setEntries setParentCheck; for (const CTxIn &txin : tx.vin) { // Check that every mempool transaction's inputs refer to available // coins, or other mempool tx's. indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.GetTxId()); if (it2 != mapTx.end()) { const CTransaction &tx2 = it2->GetTx(); assert(tx2.vout.size() > txin.prevout.GetN() && !tx2.vout[txin.prevout.GetN()].IsNull()); fDependsWait = true; setParentCheck.insert(it2); } else { assert(pcoins->HaveCoin(txin.prevout)); } // Check whether its inputs are marked in mapNextTx. auto it3 = mapNextTx.find(txin.prevout); assert(it3 != mapNextTx.end()); assert(it3->first == &txin.prevout); assert(it3->second == &tx); i++; } assert(setParentCheck == GetMemPoolParents(it)); // Verify ancestor state is correct. setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits::max(); std::string dummy; CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy); uint64_t nCountCheck = setAncestors.size() + 1; uint64_t nSizeCheck = it->GetTxSize(); Amount nFeesCheck = it->GetModifiedFee(); int64_t nSigOpCheck = it->GetSigOpCount(); for (txiter ancestorIt : setAncestors) { nSizeCheck += ancestorIt->GetTxSize(); nFeesCheck += ancestorIt->GetModifiedFee(); nSigOpCheck += ancestorIt->GetSigOpCount(); } assert(it->GetCountWithAncestors() == nCountCheck); assert(it->GetSizeWithAncestors() == nSizeCheck); assert(it->GetSigOpCountWithAncestors() == nSigOpCheck); assert(it->GetModFeesWithAncestors() == nFeesCheck); // Check children against mapNextTx CTxMemPool::setEntries setChildrenCheck; auto iter = mapNextTx.lower_bound(COutPoint(it->GetTx().GetId(), 0)); uint64_t child_sizes = 0; for (; iter != mapNextTx.end() && iter->first->GetTxId() == it->GetTx().GetId(); ++iter) { txiter childit = mapTx.find(iter->second->GetId()); // mapNextTx points to in-mempool transactions assert(childit != mapTx.end()); if (setChildrenCheck.insert(childit).second) { child_sizes += childit->GetTxSize(); } } assert(setChildrenCheck == GetMemPoolChildren(it)); // Also check to make sure size is greater than sum with immediate // children. Just a sanity check, not definitive that this calc is // correct... assert(it->GetSizeWithDescendants() >= child_sizes + it->GetTxSize()); if (fDependsWait) { waitingOnDependants.push_back(&(*it)); } else { CheckInputsAndUpdateCoins(tx, mempoolDuplicate, spendheight); } } unsigned int stepsSinceLastRemove = 0; while (!waitingOnDependants.empty()) { const CTxMemPoolEntry *entry = waitingOnDependants.front(); waitingOnDependants.pop_front(); if (!mempoolDuplicate.HaveInputs(entry->GetTx())) { waitingOnDependants.push_back(entry); stepsSinceLastRemove++; assert(stepsSinceLastRemove < waitingOnDependants.size()); } else { CheckInputsAndUpdateCoins(entry->GetTx(), mempoolDuplicate, spendheight); stepsSinceLastRemove = 0; } } for (auto it = mapNextTx.cbegin(); it != mapNextTx.cend(); it++) { uint256 txid = it->second->GetId(); indexed_transaction_set::const_iterator it2 = mapTx.find(txid); const CTransaction &tx = it2->GetTx(); assert(it2 != mapTx.end()); assert(&tx == it->second); } assert(totalTxSize == checkTotal); assert(innerUsage == cachedInnerUsage); } bool CTxMemPool::CompareDepthAndScore(const uint256 &hasha, const uint256 &hashb) { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(hasha); if (i == mapTx.end()) { return false; } indexed_transaction_set::const_iterator j = mapTx.find(hashb); if (j == mapTx.end()) { return true; } uint64_t counta = i->GetCountWithAncestors(); uint64_t countb = j->GetCountWithAncestors(); if (counta == countb) { return CompareTxMemPoolEntryByScore()(*i, *j); } return counta < countb; } namespace { class DepthAndScoreComparator { public: bool operator()(const CTxMemPool::indexed_transaction_set::const_iterator &a, const CTxMemPool::indexed_transaction_set::const_iterator &b) { uint64_t counta = a->GetCountWithAncestors(); uint64_t countb = b->GetCountWithAncestors(); if (counta == countb) { return CompareTxMemPoolEntryByScore()(*a, *b); } return counta < countb; } }; } // namespace std::vector CTxMemPool::GetSortedDepthAndScore() const { std::vector iters; AssertLockHeld(cs); iters.reserve(mapTx.size()); for (indexed_transaction_set::iterator mi = mapTx.begin(); mi != mapTx.end(); ++mi) { iters.push_back(mi); } std::sort(iters.begin(), iters.end(), DepthAndScoreComparator()); return iters; } -void CTxMemPool::queryHashes(std::vector &vtxid) { +void CTxMemPool::queryHashes(std::vector &vtxid) const { LOCK(cs); auto iters = GetSortedDepthAndScore(); vtxid.clear(); vtxid.reserve(mapTx.size()); for (auto it : iters) { vtxid.push_back(it->GetTx().GetId()); } } static TxMempoolInfo GetInfo(CTxMemPool::indexed_transaction_set::const_iterator it) { return TxMempoolInfo{it->GetSharedTx(), it->GetTime(), CFeeRate(it->GetFee(), it->GetTxSize()), it->GetModifiedFee() - it->GetFee()}; } std::vector CTxMemPool::infoAll() const { LOCK(cs); auto iters = GetSortedDepthAndScore(); std::vector ret; ret.reserve(mapTx.size()); for (auto it : iters) { ret.push_back(GetInfo(it)); } return ret; } CTransactionRef CTxMemPool::get(const uint256 &txid) const { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(txid); if (i == mapTx.end()) { return nullptr; } return i->GetSharedTx(); } TxMempoolInfo CTxMemPool::info(const uint256 &txid) const { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(txid); if (i == mapTx.end()) { return TxMempoolInfo(); } return GetInfo(i); } CFeeRate CTxMemPool::estimateFee() const { LOCK(cs); uint64_t maxMempoolSize = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; // minerPolicy uses recent blocks to figure out a reasonable fee. This // may disagree with the rollingMinimumFeerate under certain scenarios // where the mempool increases rapidly, or blocks are being mined which // do not contain propagated transactions. return std::max(::minRelayTxFee, GetMinFee(maxMempoolSize)); } void CTxMemPool::PrioritiseTransaction(const uint256 &hash, double dPriorityDelta, const Amount nFeeDelta) { { LOCK(cs); TXModifier &deltas = mapDeltas[hash]; deltas.first += dPriorityDelta; deltas.second += nFeeDelta; txiter it = mapTx.find(hash); if (it != mapTx.end()) { mapTx.modify(it, update_fee_delta(deltas.second)); // Now update all ancestors' modified fees with descendants setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits::max(); std::string dummy; CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false); for (txiter ancestorIt : setAncestors) { mapTx.modify(ancestorIt, update_descendant_state(0, nFeeDelta, 0)); } // Now update all descendants' modified fees with ancestors setEntries setDescendants; CalculateDescendants(it, setDescendants); setDescendants.erase(it); for (txiter descendantIt : setDescendants) { mapTx.modify(descendantIt, update_ancestor_state(0, nFeeDelta, 0, 0)); } ++nTransactionsUpdated; } } LogPrintf("PrioritiseTransaction: %s priority += %f, fee += %d\n", hash.ToString(), dPriorityDelta, FormatMoney(nFeeDelta)); } void CTxMemPool::ApplyDeltas(const uint256 hash, double &dPriorityDelta, Amount &nFeeDelta) const { LOCK(cs); std::map::const_iterator pos = mapDeltas.find(hash); if (pos == mapDeltas.end()) { return; } const TXModifier &deltas = pos->second; dPriorityDelta += deltas.first; nFeeDelta += deltas.second; } void CTxMemPool::ClearPrioritisation(const uint256 hash) { LOCK(cs); mapDeltas.erase(hash); } bool CTxMemPool::HasNoInputsOf(const CTransaction &tx) const { for (const CTxIn &in : tx.vin) { if (exists(in.prevout.GetTxId())) { return false; } } return true; } CCoinsViewMemPool::CCoinsViewMemPool(CCoinsView *baseIn, const CTxMemPool &mempoolIn) : CCoinsViewBacked(baseIn), mempool(mempoolIn) {} bool CCoinsViewMemPool::GetCoin(const COutPoint &outpoint, Coin &coin) const { // If an entry in the mempool exists, always return that one, as it's // guaranteed to never conflict with the underlying cache, and it cannot // have pruned entries (as it contains full) transactions. First checking // the underlying cache risks returning a pruned entry instead. CTransactionRef ptx = mempool.get(outpoint.GetTxId()); if (ptx) { if (outpoint.GetN() < ptx->vout.size()) { coin = Coin(ptx->vout[outpoint.GetN()], MEMPOOL_HEIGHT, false); return true; } return false; } return base->GetCoin(outpoint, coin); } size_t CTxMemPool::DynamicMemoryUsage() const { LOCK(cs); // Estimate the overhead of mapTx to be 12 pointers + an allocation, as no // exact formula for boost::multi_index_contained is implemented. return memusage::MallocUsage(sizeof(CTxMemPoolEntry) + 12 * sizeof(void *)) * mapTx.size() + memusage::DynamicUsage(mapNextTx) + memusage::DynamicUsage(mapDeltas) + memusage::DynamicUsage(mapLinks) + memusage::DynamicUsage(vTxHashes) + cachedInnerUsage; } void CTxMemPool::RemoveStaged(setEntries &stage, bool updateDescendants, MemPoolRemovalReason reason) { AssertLockHeld(cs); UpdateForRemoveFromMempool(stage, updateDescendants); for (txiter it : stage) { removeUnchecked(it, reason); } } int CTxMemPool::Expire(int64_t time) { LOCK(cs); indexed_transaction_set::index::type::iterator it = mapTx.get().begin(); setEntries toremove; while (it != mapTx.get().end() && it->GetTime() < time) { toremove.insert(mapTx.project<0>(it)); it++; } setEntries stage; for (txiter removeit : toremove) { CalculateDescendants(removeit, stage); } RemoveStaged(stage, false, MemPoolRemovalReason::EXPIRY); return stage.size(); } void CTxMemPool::LimitSize(size_t limit, unsigned long age) { int expired = Expire(GetTime() - age); if (expired != 0) { LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired); } std::vector vNoSpendsRemaining; TrimToSize(limit, &vNoSpendsRemaining); for (const COutPoint &removed : vNoSpendsRemaining) { pcoinsTip->Uncache(removed); } } void CTxMemPool::addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry) { setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits::max(); std::string dummy; CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy); return addUnchecked(hash, entry, setAncestors); } void CTxMemPool::UpdateChild(txiter entry, txiter child, bool add) { setEntries s; if (add && mapLinks[entry].children.insert(child).second) { cachedInnerUsage += memusage::IncrementalDynamicUsage(s); } else if (!add && mapLinks[entry].children.erase(child)) { cachedInnerUsage -= memusage::IncrementalDynamicUsage(s); } } void CTxMemPool::UpdateParent(txiter entry, txiter parent, bool add) { setEntries s; if (add && mapLinks[entry].parents.insert(parent).second) { cachedInnerUsage += memusage::IncrementalDynamicUsage(s); } else if (!add && mapLinks[entry].parents.erase(parent)) { cachedInnerUsage -= memusage::IncrementalDynamicUsage(s); } } const CTxMemPool::setEntries & CTxMemPool::GetMemPoolParents(txiter entry) const { assert(entry != mapTx.end()); txlinksMap::const_iterator it = mapLinks.find(entry); assert(it != mapLinks.end()); return it->second.parents; } const CTxMemPool::setEntries & CTxMemPool::GetMemPoolChildren(txiter entry) const { assert(entry != mapTx.end()); txlinksMap::const_iterator it = mapLinks.find(entry); assert(it != mapLinks.end()); return it->second.children; } CFeeRate CTxMemPool::GetMinFee(size_t sizelimit) const { LOCK(cs); if (!blockSinceLastRollingFeeBump || rollingMinimumFeeRate == 0) { return CFeeRate(int64_t(ceill(rollingMinimumFeeRate)) * SATOSHI); } int64_t time = GetTime(); if (time > lastRollingFeeUpdate + 10) { double halflife = ROLLING_FEE_HALFLIFE; if (DynamicMemoryUsage() < sizelimit / 4) { halflife /= 4; } else if (DynamicMemoryUsage() < sizelimit / 2) { halflife /= 2; } rollingMinimumFeeRate = rollingMinimumFeeRate / pow(2.0, (time - lastRollingFeeUpdate) / halflife); lastRollingFeeUpdate = time; } return CFeeRate(int64_t(ceill(rollingMinimumFeeRate)) * SATOSHI); } void CTxMemPool::trackPackageRemoved(const CFeeRate &rate) { AssertLockHeld(cs); if ((rate.GetFeePerK() / SATOSHI) > rollingMinimumFeeRate) { rollingMinimumFeeRate = rate.GetFeePerK() / SATOSHI; blockSinceLastRollingFeeBump = false; } } void CTxMemPool::TrimToSize(size_t sizelimit, std::vector *pvNoSpendsRemaining) { LOCK(cs); unsigned nTxnRemoved = 0; CFeeRate maxFeeRateRemoved(Amount::zero()); while (!mapTx.empty() && DynamicMemoryUsage() > sizelimit) { indexed_transaction_set::index::type::iterator it = mapTx.get().begin(); // We set the new mempool min fee to the feerate of the removed set, // plus the "minimum reasonable fee rate" (ie some value under which we // consider txn to have 0 fee). This way, we don't allow txn to enter // mempool with feerate equal to txn which were removed with no block in // between. CFeeRate removed(it->GetModFeesWithDescendants(), it->GetSizeWithDescendants()); removed += MEMPOOL_FULL_FEE_INCREMENT; trackPackageRemoved(removed); maxFeeRateRemoved = std::max(maxFeeRateRemoved, removed); setEntries stage; CalculateDescendants(mapTx.project<0>(it), stage); nTxnRemoved += stage.size(); std::vector txn; if (pvNoSpendsRemaining) { txn.reserve(stage.size()); for (txiter iter : stage) { txn.push_back(iter->GetTx()); } } RemoveStaged(stage, false, MemPoolRemovalReason::SIZELIMIT); if (pvNoSpendsRemaining) { for (const CTransaction &tx : txn) { for (const CTxIn &txin : tx.vin) { if (exists(txin.prevout.GetTxId())) { continue; } pvNoSpendsRemaining->push_back(txin.prevout); } } } } if (maxFeeRateRemoved > CFeeRate(Amount::zero())) { LogPrint(BCLog::MEMPOOL, "Removed %u txn, rolling minimum fee bumped to %s\n", nTxnRemoved, maxFeeRateRemoved.ToString()); } } uint64_t CTxMemPool::CalculateDescendantMaximum(txiter entry) const { // find parent with highest descendant count std::vector candidates; setEntries counted; candidates.push_back(entry); uint64_t maximum = 0; while (candidates.size()) { txiter candidate = candidates.back(); candidates.pop_back(); if (!counted.insert(candidate).second) { continue; } const setEntries &parents = GetMemPoolParents(candidate); if (parents.size() == 0) { maximum = std::max(maximum, candidate->GetCountWithDescendants()); } else { for (txiter i : parents) { candidates.push_back(i); } } } return maximum; } void CTxMemPool::GetTransactionAncestry(const uint256 &txid, size_t &ancestors, size_t &descendants) const { LOCK(cs); auto it = mapTx.find(txid); ancestors = descendants = 0; if (it != mapTx.end()) { ancestors = it->GetCountWithAncestors(); descendants = CalculateDescendantMaximum(it); } } SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand(std::numeric_limits::max())), k1(GetRand(std::numeric_limits::max())) {} /** Maximum bytes for transactions to store for processing during reorg */ static const size_t MAX_DISCONNECTED_TX_POOL_SIZE = 20 * DEFAULT_MAX_BLOCK_SIZE; void DisconnectedBlockTransactions::addForBlock( const std::vector &vtx) { for (const auto &tx : reverse_iterate(vtx)) { // If we already added it, just skip. auto it = queuedTx.find(tx->GetId()); if (it != queuedTx.end()) { continue; } // Insert the transaction into the pool. addTransaction(tx); // Fill in the set of parents. std::unordered_set parents; for (const CTxIn &in : tx->vin) { parents.insert(in.prevout.GetTxId()); } // In order to make sure we keep things in topological order, we check // if we already know of the parent of the current transaction. If so, // we remove them from the set and then add them back. while (parents.size() > 0) { std::unordered_set worklist( std::move(parents)); parents.clear(); for (const TxId &txid : worklist) { // If we do not have that txid in the set, nothing needs to be // done. auto pit = queuedTx.find(txid); if (pit == queuedTx.end()) { continue; } // We have parent in our set, we reinsert them at the right // position. const CTransactionRef ptx = *pit; queuedTx.erase(pit); queuedTx.insert(ptx); // And we make sure ancestors are covered. for (const CTxIn &in : ptx->vin) { parents.insert(in.prevout.GetTxId()); } } } } // Keep the size under control. while (DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE) { // Drop the earliest entry, and remove its children from the // mempool. auto it = queuedTx.get().begin(); g_mempool.removeRecursive(**it, MemPoolRemovalReason::REORG); removeEntry(it); } } void DisconnectedBlockTransactions::importMempool(CTxMemPool &pool) { // addForBlock's algorithm sorts a vector of transactions back into // topological order. We use it in a separate object to create a valid // ordering of all mempool transactions, which we then splice in front of // the current queuedTx. This results in a valid sequence of transactions to // be reprocessed in updateMempoolForReorg. // We create vtx in order of the entry_time index to facilitate for // addForBlocks (which iterates in reverse order), as vtx probably end in // the correct ordering for queuedTx. std::vector vtx; { LOCK(pool.cs); vtx.reserve(pool.mapTx.size()); for (const CTxMemPoolEntry &e : pool.mapTx.get()) { vtx.push_back(e.GetSharedTx()); } pool.clear(); } // Use addForBlocks to sort the transactions and then splice them in front // of queuedTx DisconnectedBlockTransactions orderedTxnPool; orderedTxnPool.addForBlock(vtx); cachedInnerUsage += orderedTxnPool.cachedInnerUsage; queuedTx.get().splice( queuedTx.get().begin(), orderedTxnPool.queuedTx.get()); // We limit memory usage because we can't know if more blocks will be // disconnected while (DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE) { // Drop the earliest entry which, by definition, has no children removeEntry(queuedTx.get().begin()); } } void DisconnectedBlockTransactions::updateMempoolForReorg(const Config &config, bool fAddToMempool) { AssertLockHeld(cs_main); std::vector txidsUpdate; // disconnectpool's insertion_order index sorts the entries from oldest to // newest, but the oldest entry will be the last tx from the latest mined // block that was disconnected. // Iterate disconnectpool in reverse, so that we add transactions back to // the mempool starting with the earliest transaction that had been // previously seen in a block. for (const CTransactionRef &tx : reverse_iterate(queuedTx.get())) { // ignore validation errors in resurrected transactions CValidationState stateDummy; if (!fAddToMempool || tx->IsCoinBase() || !AcceptToMemoryPool(config, g_mempool, stateDummy, tx, false, nullptr, true)) { // If the transaction doesn't make it in to the mempool, remove any // transactions that depend on it (which would now be orphans). g_mempool.removeRecursive(*tx, MemPoolRemovalReason::REORG); } else if (g_mempool.exists(tx->GetId())) { txidsUpdate.push_back(tx->GetId()); } } queuedTx.clear(); // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have // no in-mempool children, which is generally not true when adding // previously-confirmed transactions back to the mempool. // UpdateTransactionsFromBlock finds descendants of any transactions in the // disconnectpool that were added back and cleans up the mempool state. g_mempool.UpdateTransactionsFromBlock(txidsUpdate); // We also need to remove any now-immature transactions g_mempool.removeForReorg(config, pcoinsTip.get(), chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); // Re-limit mempool size, in case we added any transactions g_mempool.LimitSize( gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); } diff --git a/src/txmempool.h b/src/txmempool.h index e514f0c1d..ba33b91f5 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -1,988 +1,988 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_TXMEMPOOL_H #define BITCOIN_TXMEMPOOL_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include class CBlockIndex; class Config; extern CCriticalSection cs_main; inline double AllowFreeThreshold() { return (144 * COIN) / (250 * SATOSHI); } inline bool AllowFree(double dPriority) { // Large (in bytes) low-priority (new, small-coin) transactions need a fee. return dPriority > AllowFreeThreshold(); } /** * Fake height value used in Coins to signify they are only in the memory * pool(since 0.8) */ static const uint32_t MEMPOOL_HEIGHT = 0x7FFFFFFF; struct LockPoints { // Will be set to the blockchain height and median time past values that // would be necessary to satisfy all relative locktime constraints (BIP68) // of this tx given our view of block chain history int height; int64_t time; // As long as the current chain descends from the highest height block // containing one of the inputs used in the calculation, then the cached // values are still valid even after a reorg. CBlockIndex *maxInputBlock; LockPoints() : height(0), time(0), maxInputBlock(nullptr) {} }; class CTxMemPool; /** \class CTxMemPoolEntry * * CTxMemPoolEntry stores data about the corresponding transaction, as well as * data about all in-mempool transactions that depend on the transaction * ("descendant" transactions). * * When a new entry is added to the mempool, we update the descendant state * (nCountWithDescendants, nSizeWithDescendants, and nModFeesWithDescendants) * for all ancestors of the newly added transaction. */ class CTxMemPoolEntry { private: CTransactionRef tx; //!< Cached to avoid expensive parent-transaction lookups Amount nFee; //!< ... and avoid recomputing tx size size_t nTxSize; //!< ... and modified size for priority size_t nModSize; //!< ... and total memory usage size_t nUsageSize; //!< Local time when entering the mempool int64_t nTime; //!< Priority when entering the mempool double entryPriority; //!< Chain height when entering the mempool unsigned int entryHeight; //!< Sum of all txin values that are already in blockchain Amount inChainInputValue; //!< keep track of transactions that spend a coinbase bool spendsCoinbase; //!< Total sigop plus P2SH sigops count int64_t sigOpCount; //!< Used for determining the priority of the transaction for mining in a //! block Amount feeDelta; //!< Track the height and time at which tx was final LockPoints lockPoints; // Information about descendants of this transaction that are in the // mempool; if we remove this transaction we must remove all of these // descendants as well. //!< number of descendant transactions uint64_t nCountWithDescendants; //!< ... and size uint64_t nSizeWithDescendants; //!< ... and total fees (all including us) Amount nModFeesWithDescendants; // Analogous statistics for ancestor transactions uint64_t nCountWithAncestors; uint64_t nSizeWithAncestors; Amount nModFeesWithAncestors; int64_t nSigOpCountWithAncestors; public: CTxMemPoolEntry(const CTransactionRef &_tx, const Amount _nFee, int64_t _nTime, double _entryPriority, unsigned int _entryHeight, Amount _inChainInputValue, bool spendsCoinbase, int64_t nSigOpsCost, LockPoints lp); const CTransaction &GetTx() const { return *this->tx; } CTransactionRef GetSharedTx() const { return this->tx; } /** * Fast calculation of lower bound of current priority as update from entry * priority. Only inputs that were originally in-chain will age. */ double GetPriority(unsigned int currentHeight) const; const Amount GetFee() const { return nFee; } size_t GetTxSize() const { return nTxSize; } int64_t GetTime() const { return nTime; } unsigned int GetHeight() const { return entryHeight; } int64_t GetSigOpCount() const { return sigOpCount; } Amount GetModifiedFee() const { return nFee + feeDelta; } size_t DynamicMemoryUsage() const { return nUsageSize; } const LockPoints &GetLockPoints() const { return lockPoints; } // Adjusts the descendant state. void UpdateDescendantState(int64_t modifySize, Amount modifyFee, int64_t modifyCount); // Adjusts the ancestor state void UpdateAncestorState(int64_t modifySize, Amount modifyFee, int64_t modifyCount, int modifySigOps); // Updates the fee delta used for mining priority score, and the // modified fees with descendants. void UpdateFeeDelta(Amount feeDelta); // Update the LockPoints after a reorg void UpdateLockPoints(const LockPoints &lp); uint64_t GetCountWithDescendants() const { return nCountWithDescendants; } uint64_t GetSizeWithDescendants() const { return nSizeWithDescendants; } Amount GetModFeesWithDescendants() const { return nModFeesWithDescendants; } bool GetSpendsCoinbase() const { return spendsCoinbase; } uint64_t GetCountWithAncestors() const { return nCountWithAncestors; } uint64_t GetSizeWithAncestors() const { return nSizeWithAncestors; } Amount GetModFeesWithAncestors() const { return nModFeesWithAncestors; } int64_t GetSigOpCountWithAncestors() const { return nSigOpCountWithAncestors; } //!< Index in mempool's vTxHashes mutable size_t vTxHashesIdx; }; // Helpers for modifying CTxMemPool::mapTx, which is a boost multi_index. struct update_descendant_state { update_descendant_state(int64_t _modifySize, Amount _modifyFee, int64_t _modifyCount) : modifySize(_modifySize), modifyFee(_modifyFee), modifyCount(_modifyCount) {} void operator()(CTxMemPoolEntry &e) { e.UpdateDescendantState(modifySize, modifyFee, modifyCount); } private: int64_t modifySize; Amount modifyFee; int64_t modifyCount; }; struct update_ancestor_state { update_ancestor_state(int64_t _modifySize, Amount _modifyFee, int64_t _modifyCount, int64_t _modifySigOpsCost) : modifySize(_modifySize), modifyFee(_modifyFee), modifyCount(_modifyCount), modifySigOpsCost(_modifySigOpsCost) {} void operator()(CTxMemPoolEntry &e) { e.UpdateAncestorState(modifySize, modifyFee, modifyCount, modifySigOpsCost); } private: int64_t modifySize; Amount modifyFee; int64_t modifyCount; int64_t modifySigOpsCost; }; struct update_fee_delta { explicit update_fee_delta(Amount _feeDelta) : feeDelta(_feeDelta) {} void operator()(CTxMemPoolEntry &e) { e.UpdateFeeDelta(feeDelta); } private: Amount feeDelta; }; struct update_lock_points { explicit update_lock_points(const LockPoints &_lp) : lp(_lp) {} void operator()(CTxMemPoolEntry &e) { e.UpdateLockPoints(lp); } private: const LockPoints &lp; }; -// extracts a transaction hash from CTxMempoolEntry or CTransactionRef +// extracts a transaction hash from CTxMemPoolEntry or CTransactionRef struct mempoolentry_txid { typedef uint256 result_type; result_type operator()(const CTxMemPoolEntry &entry) const { return entry.GetTx().GetId(); } result_type operator()(const CTransactionRef &tx) const { return tx->GetId(); } }; /** \class CompareTxMemPoolEntryByDescendantScore * * Sort an entry by max(score/size of entry's tx, score/size with all * descendants). */ class CompareTxMemPoolEntryByDescendantScore { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { double a_mod_fee, a_size, b_mod_fee, b_size; GetModFeeAndSize(a, a_mod_fee, a_size); GetModFeeAndSize(b, b_mod_fee, b_size); // Avoid division by rewriting (a/b > c/d) as (a*d > c*b). double f1 = a_mod_fee * b_size; double f2 = a_size * b_mod_fee; if (f1 == f2) { return a.GetTime() >= b.GetTime(); } return f1 < f2; } // Return the fee/size we're using for sorting this entry. void GetModFeeAndSize(const CTxMemPoolEntry &a, double &mod_fee, double &size) const { // Compare feerate with descendants to feerate of the transaction, and // return the fee/size for the max. double f1 = a.GetSizeWithDescendants() * (a.GetModifiedFee() / SATOSHI); double f2 = a.GetTxSize() * (a.GetModFeesWithDescendants() / SATOSHI); if (f2 > f1) { mod_fee = a.GetModFeesWithDescendants() / SATOSHI; size = a.GetSizeWithDescendants(); } else { mod_fee = a.GetModifiedFee() / SATOSHI; size = a.GetTxSize(); } } }; /** \class CompareTxMemPoolEntryByScore * * Sort by feerate of entry (fee/size) in descending order * This is only used for transaction relay, so we use GetFee() * instead of GetModifiedFee() to avoid leaking prioritization * information via the sort order. */ class CompareTxMemPoolEntryByScore { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { double f1 = b.GetTxSize() * (a.GetFee() / SATOSHI); double f2 = a.GetTxSize() * (b.GetFee() / SATOSHI); if (f1 == f2) { return b.GetTx().GetId() < a.GetTx().GetId(); } return f1 > f2; } }; class CompareTxMemPoolEntryByEntryTime { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { return a.GetTime() < b.GetTime(); } }; /** \class CompareTxMemPoolEntryByAncestorScore * * Sort an entry by min(score/size of entry's tx, score/size with all * ancestors). */ class CompareTxMemPoolEntryByAncestorFee { public: template bool operator()(const T &a, const T &b) const { double a_mod_fee, a_size, b_mod_fee, b_size; GetModFeeAndSize(a, a_mod_fee, a_size); GetModFeeAndSize(b, b_mod_fee, b_size); // Avoid division by rewriting (a/b > c/d) as (a*d > c*b). double f1 = a_mod_fee * b_size; double f2 = a_size * b_mod_fee; if (f1 == f2) { return a.GetTx().GetId() < b.GetTx().GetId(); } return f1 > f2; } // Return the fee/size we're using for sorting this entry. template void GetModFeeAndSize(const T &a, double &mod_fee, double &size) const { // Compare feerate with ancestors to feerate of the transaction, and // return the fee/size for the min. double f1 = a.GetSizeWithAncestors() * (a.GetModifiedFee() / SATOSHI); double f2 = a.GetTxSize() * (a.GetModFeesWithAncestors() / SATOSHI); if (f1 > f2) { mod_fee = a.GetModFeesWithAncestors() / SATOSHI; size = a.GetSizeWithAncestors(); } else { mod_fee = a.GetModifiedFee() / SATOSHI; size = a.GetTxSize(); } } }; // Multi_index tag names struct descendant_score {}; struct entry_time {}; struct ancestor_score {}; /** * Information about a mempool transaction. */ struct TxMempoolInfo { /** The transaction itself */ CTransactionRef tx; /** Time the transaction entered the mempool. */ int64_t nTime; /** Feerate of the transaction. */ CFeeRate feeRate; /** The fee delta. */ Amount nFeeDelta; }; /** * Reason why a transaction was removed from the mempool, this is passed to the * notification signal. */ enum class MemPoolRemovalReason { //!< Manually removed or unknown reason UNKNOWN = 0, //!< Expired from mempool EXPIRY, //!< Removed in size limiting SIZELIMIT, //!< Removed for reorganization REORG, //!< Removed for block BLOCK, //!< Removed for conflict with in-block transaction CONFLICT, //!< Removed for replacement REPLACED }; class SaltedTxidHasher { private: /** Salt */ const uint64_t k0, k1; public: SaltedTxidHasher(); size_t operator()(const uint256 &txid) const { return SipHashUint256(k0, k1, txid); } }; typedef std::pair TXModifier; /** * CTxMemPool stores valid-according-to-the-current-best-chain transactions that * may be included in the next block. * * Transactions are added when they are seen on the network (or created by the * local node), but not all transactions seen are added to the pool. For * example, the following new transactions will not be added to the mempool: * - a transaction which doesn't meet the minimum fee requirements. * - a new transaction that double-spends an input of a transaction already in * the pool where the new transaction does not meet the Replace-By-Fee * requirements as defined in BIP 125. * - a non-standard transaction. * * CTxMemPool::mapTx, and CTxMemPoolEntry bookkeeping: * * mapTx is a boost::multi_index that sorts the mempool on 4 criteria: * - transaction hash * - descendant feerate [we use max(feerate of tx, feerate of tx with all * descendants)] * - time in mempool * - ancestor feerate [we use min(feerate of tx, feerate of tx with all * unconfirmed ancestors)] * * Note: the term "descendant" refers to in-mempool transactions that depend on * this one, while "ancestor" refers to in-mempool transactions that a given * transaction depends on. * * In order for the feerate sort to remain correct, we must update transactions * in the mempool when new descendants arrive. To facilitate this, we track the * set of in-mempool direct parents and direct children in mapLinks. Within each * CTxMemPoolEntry, we track the size and fees of all descendants. * * Usually when a new transaction is added to the mempool, it has no in-mempool * children (because any such children would be an orphan). So in * addUnchecked(), we: * - update a new entry's setMemPoolParents to include all in-mempool parents * - update the new entry's direct parents to include the new tx as a child * - update all ancestors of the transaction to include the new tx's size/fee * * When a transaction is removed from the mempool, we must: * - update all in-mempool parents to not track the tx in setMemPoolChildren * - update all ancestors to not include the tx's size/fees in descendant state * - update all in-mempool children to not include it as a parent * * These happen in UpdateForRemoveFromMempool(). (Note that when removing a * transaction along with its descendants, we must calculate that set of * transactions to be removed before doing the removal, or else the mempool can * be in an inconsistent state where it's impossible to walk the ancestors of a * transaction.) * * In the event of a reorg, the assumption that a newly added tx has no * in-mempool children is false. In particular, the mempool is in an * inconsistent state while new transactions are being added, because there may * be descendant transactions of a tx coming from a disconnected block that are * unreachable from just looking at transactions in the mempool (the linking * transactions may also be in the disconnected block, waiting to be added). * Because of this, there's not much benefit in trying to search for in-mempool * children in addUnchecked(). Instead, in the special case of transactions * being added from a disconnected block, we require the caller to clean up the * state, to account for in-mempool, out-of-block descendants for all the * in-block transactions by calling UpdateTransactionsFromBlock(). Note that * until this is called, the mempool state is not consistent, and in particular * mapLinks may not be correct (and therefore functions like * CalculateMemPoolAncestors() and CalculateDescendants() that rely on them to * walk the mempool are not generally safe to use). * * Computational limits: * * Updating all in-mempool ancestors of a newly added transaction can be slow, * if no bound exists on how many in-mempool ancestors there may be. * CalculateMemPoolAncestors() takes configurable limits that are designed to * prevent these calculations from being too CPU intensive. */ class CTxMemPool { private: //!< Value n means that n times in 2^32 we check. uint32_t nCheckFrequency GUARDED_BY(cs); //!< Used by getblocktemplate to trigger CreateNewBlock() invocation unsigned int nTransactionsUpdated; //!< sum of all mempool tx's virtual sizes. uint64_t totalTxSize; //!< sum of dynamic memory usage of all the map elements (NOT the maps //! themselves) uint64_t cachedInnerUsage; mutable int64_t lastRollingFeeUpdate; mutable bool blockSinceLastRollingFeeBump; //!< minimum fee to get into the pool, decreases exponentially mutable double rollingMinimumFeeRate; void trackPackageRemoved(const CFeeRate &rate) EXCLUSIVE_LOCKS_REQUIRED(cs); public: // public only for testing static const int ROLLING_FEE_HALFLIFE = 60 * 60 * 12; typedef boost::multi_index_container< CTxMemPoolEntry, boost::multi_index::indexed_by< // sorted by txid boost::multi_index::hashed_unique< mempoolentry_txid, SaltedTxidHasher>, // sorted by fee rate boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByDescendantScore>, // sorted by entry time boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByEntryTime>, // sorted by fee rate with ancestors boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByAncestorFee>>> indexed_transaction_set; /** * This mutex needs to be locked when accessing `mapTx` or other members * that are guarded by it. * * @par Consistency guarantees * * By design, it is guaranteed that: * * 1. Locking both `cs_main` and `mempool.cs` will give a view of mempool * that is consistent with current chain tip (`chainActive` and * `pcoinsTip`) and is fully populated. Fully populated means that if the * current active chain is missing transactions that were present in a * previously active chain, all the missing transactions will have been * re-added to the mempool and should be present if they meet size and * consistency constraints. * * 2. Locking `mempool.cs` without `cs_main` will give a view of a mempool * consistent with some chain that was active since `cs_main` was last * locked, and that is fully populated as described above. It is ok for * code that only needs to query or remove transactions from the mempool * to lock just `mempool.cs` without `cs_main`. * * To provide these guarantees, it is necessary to lock both `cs_main` and * `mempool.cs` whenever adding transactions to the mempool and whenever * changing the chain tip. It's necessary to keep both mutexes locked until * the mempool is consistent with the new chain tip and fully populated. * * @par Consistency bug * * The second guarantee above is not currently enforced, but * https://github.com/bitcoin/bitcoin/pull/14193 will fix it. No known code * in bitcoin currently depends on second guarantee, but it is important to * fix for third party code that needs be able to frequently poll the * mempool without locking `cs_main` and without encountering missing * transactions during reorgs. */ mutable RecursiveMutex cs; indexed_transaction_set mapTx GUARDED_BY(cs); typedef indexed_transaction_set::nth_index<0>::type::iterator txiter; //!< All tx hashes/entries in mapTx, in random order std::vector> vTxHashes; struct CompareIteratorByHash { bool operator()(const txiter &a, const txiter &b) const { return a->GetTx().GetId() < b->GetTx().GetId(); } }; typedef std::set setEntries; const setEntries &GetMemPoolParents(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs); const setEntries &GetMemPoolChildren(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs); uint64_t CalculateDescendantMaximum(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs); private: typedef std::map cacheMap; struct TxLinks { setEntries parents; setEntries children; }; typedef std::map txlinksMap; txlinksMap mapLinks; void UpdateParent(txiter entry, txiter parent, bool add); void UpdateChild(txiter entry, txiter child, bool add); std::vector GetSortedDepthAndScore() const EXCLUSIVE_LOCKS_REQUIRED(cs); public: indirectmap mapNextTx GUARDED_BY(cs); std::map mapDeltas; /** * Create a new CTxMemPool. */ CTxMemPool(); ~CTxMemPool(); /** * If sanity-checking is turned on, check makes sure the pool is consistent * (does not contain two transactions that spend the same inputs, all inputs * are in the mapNextTx array). If sanity-checking is turned off, check does * nothing. */ void check(const CCoinsViewCache *pcoins) const; void setSanityCheck(double dFrequency = 1.0) { LOCK(cs); nCheckFrequency = static_cast(dFrequency * 4294967295.0); } // addUnchecked must updated state for all ancestors of a given transaction, // to track size/count of descendant transactions. First version of // addUnchecked can be used to have it call CalculateMemPoolAncestors(), and // then invoke the second version. // Note that addUnchecked is ONLY called from ATMP outside of tests // and any other callers may break wallet's in-mempool tracking (due to // lack of CValidationInterface::TransactionAddedToMempool callbacks). void addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main); void addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry, setEntries &setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main); void removeRecursive( const CTransaction &tx, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN); void removeForReorg(const Config &config, const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags) EXCLUSIVE_LOCKS_REQUIRED(cs_main); void removeConflicts(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(cs); void removeForBlock(const std::vector &vtx, unsigned int nBlockHeight); void clear(); // lock free void _clear() EXCLUSIVE_LOCKS_REQUIRED(cs); bool CompareDepthAndScore(const uint256 &hasha, const uint256 &hashb); - void queryHashes(std::vector &vtxid); + void queryHashes(std::vector &vtxid) const; bool isSpent(const COutPoint &outpoint) const; unsigned int GetTransactionsUpdated() const; void AddTransactionsUpdated(unsigned int n); /** * Check that none of this transactions inputs are in the mempool, and thus * the tx is not dependent on other mempool transactions to be included in a * block. */ bool HasNoInputsOf(const CTransaction &tx) const; /** Affect CreateNewBlock prioritisation of transactions */ void PrioritiseTransaction(const uint256 &hash, double dPriorityDelta, const Amount nFeeDelta); void ApplyDeltas(const uint256 hash, double &dPriorityDelta, Amount &nFeeDelta) const; void ClearPrioritisation(const uint256 hash); public: /** * Remove a set of transactions from the mempool. If a transaction is in * this set, then all in-mempool descendants must also be in the set, unless * this transaction is being removed for being in a block. Set * updateDescendants to true when removing a tx that was in a block, so that * any in-mempool descendants have their ancestor state updated. */ void RemoveStaged(setEntries &stage, bool updateDescendants, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN) EXCLUSIVE_LOCKS_REQUIRED(cs); /** * When adding transactions from a disconnected block back to the mempool, * new mempool entries may have children in the mempool (which is generally * not the case when otherwise adding transactions). * UpdateTransactionsFromBlock() will find child transactions and update the * descendant state for each transaction in txidsToUpdate (excluding any * child transactions present in txidsToUpdate, which are already accounted * for). * Note: txidsToUpdate should be the set of transactions from the * disconnected block that have been accepted back into the mempool. */ void UpdateTransactionsFromBlock(const std::vector &txidsToUpdate) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** * Try to calculate all in-mempool ancestors of entry. * (these are all calculated including the tx itself) * limitAncestorCount = max number of ancestors * limitAncestorSize = max size of ancestors * limitDescendantCount = max number of descendants any ancestor can have * limitDescendantSize = max size of descendants any ancestor can have * errString = populated with error reason if any limits are hit * fSearchForParents = whether to search a tx's vin for in-mempool parents, * or look up parents from mapLinks. Must be true for entries not in the * mempool */ bool CalculateMemPoolAncestors( const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents = true) const EXCLUSIVE_LOCKS_REQUIRED(cs); /** * Populate setDescendants with all in-mempool descendants of hash. * Assumes that setDescendants includes all in-mempool descendants of * anything already in it. */ void CalculateDescendants(txiter it, setEntries &setDescendants) const EXCLUSIVE_LOCKS_REQUIRED(cs); /** * The minimum fee to get into the mempool, which may itself not be enough * for larger-sized transactions. The incrementalRelayFee policy variable is * used to bound the time it takes the fee rate to go back down all the way * to 0. When the feerate would otherwise be half of this, it is set to 0 * instead. */ CFeeRate GetMinFee(size_t sizelimit) const; /** * Remove transactions from the mempool until its dynamic size is <= * sizelimit. pvNoSpendsRemaining, if set, will be populated with the list * of outpoints which are not in mempool which no longer have any spends in * this mempool. */ void TrimToSize(size_t sizelimit, std::vector *pvNoSpendsRemaining = nullptr); /** * Expire all transaction (and their dependencies) in the mempool older than * time. Return the number of removed transactions. */ int Expire(int64_t time); /** * Reduce the size of the mempool by expiring and then trimming the mempool. */ void LimitSize(size_t limit, unsigned long age); /** * Calculate the ancestor and descendant count for the given transaction. * The counts include the transaction itself. */ void GetTransactionAncestry(const uint256 &txid, size_t &ancestors, size_t &descendants) const; unsigned long size() const { LOCK(cs); return mapTx.size(); } uint64_t GetTotalTxSize() const { LOCK(cs); return totalTxSize; } bool exists(uint256 hash) const { LOCK(cs); return mapTx.count(hash) != 0; } CTransactionRef get(const uint256 &hash) const; TxMempoolInfo info(const uint256 &hash) const; std::vector infoAll() const; CFeeRate estimateFee() const; size_t DynamicMemoryUsage() const; boost::signals2::signal NotifyEntryAdded; boost::signals2::signal NotifyEntryRemoved; private: /** * UpdateForDescendants is used by UpdateTransactionsFromBlock to update the * descendants for a single transaction that has been added to the mempool * but may have child transactions in the mempool, eg during a chain reorg. * setExclude is the set of descendant transactions in the mempool that must * not be accounted for (because any descendants in setExclude were added to * the mempool after the transaction being updated and hence their state is * already reflected in the parent state). * * cachedDescendants will be updated with the descendants of the transaction * being updated, so that future invocations don't need to walk the same * transaction again, if encountered in another transaction chain. */ void UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendants, const std::set &setExclude) EXCLUSIVE_LOCKS_REQUIRED(cs); /** * Update ancestors of hash to add/remove it as a descendant transaction. */ void UpdateAncestorsOf(bool add, txiter hash, setEntries &setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs); /** Set ancestor state for an entry */ void UpdateEntryForAncestors(txiter it, const setEntries &setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs); /** * For each transaction being removed, update ancestors and any direct * children. If updateDescendants is true, then also update in-mempool * descendants' ancestor state. */ void UpdateForRemoveFromMempool(const setEntries &entriesToRemove, bool updateDescendants) EXCLUSIVE_LOCKS_REQUIRED(cs); /** Sever link between specified transaction and direct children. */ void UpdateChildrenForRemoval(txiter entry) EXCLUSIVE_LOCKS_REQUIRED(cs); /** * Before calling removeUnchecked for a given transaction, * UpdateForRemoveFromMempool must be called on the entire (dependent) set * of transactions being removed at the same time. We use each * CTxMemPoolEntry's setMemPoolParents in order to walk ancestors of a given * transaction that is removed, so we can't remove intermediate transactions * in a chain before we've updated all the state for the removal. */ void removeUnchecked(txiter entry, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN) EXCLUSIVE_LOCKS_REQUIRED(cs); }; /** * CCoinsView that brings transactions from a mempool into view. * It does not check for spendings by memory pool transactions. * Instead, it provides access to all Coins which are either unspent in the * base CCoinsView, or are outputs from any mempool transaction! * This allows transaction replacement to work as expected, as you want to * have all inputs "available" to check signatures, and any cycles in the * dependency graph are checked directly in AcceptToMemoryPool. * It also allows you to sign a double-spend directly in * signrawtransactionwithkey and signrawtransactionwithwallet, as long as the * conflicting transaction is not yet confirmed. */ class CCoinsViewMemPool : public CCoinsViewBacked { protected: const CTxMemPool &mempool; public: CCoinsViewMemPool(CCoinsView *baseIn, const CTxMemPool &mempoolIn); bool GetCoin(const COutPoint &outpoint, Coin &coin) const override; }; // We want to sort transactions by coin age priority typedef std::pair TxCoinAgePriority; struct TxCoinAgePriorityCompare { bool operator()(const TxCoinAgePriority &a, const TxCoinAgePriority &b) { if (a.first == b.first) { // Reverse order to make sort less than return CompareTxMemPoolEntryByScore()(*(b.second), *(a.second)); } return a.first < b.first; } }; /** * DisconnectedBlockTransactions * * During the reorg, it's desirable to re-add previously confirmed transactions * to the mempool, so that anything not re-confirmed in the new chain is * available to be mined. However, it's more efficient to wait until the reorg * is complete and process all still-unconfirmed transactions at that time, * since we expect most confirmed transactions to (typically) still be * confirmed in the new chain, and re-accepting to the memory pool is expensive * (and therefore better to not do in the middle of reorg-processing). * Instead, store the disconnected transactions (in order!) as we go, remove any * that are included in blocks in the new chain, and then process the remaining * still-unconfirmed transactions at the end. * * It also enables efficient reprocessing of current mempool entries, useful * when (de)activating forks that result in in-mempool transactions becoming * invalid */ // multi_index tag names struct txid_index {}; struct insertion_order {}; class DisconnectedBlockTransactions { private: typedef boost::multi_index_container< CTransactionRef, boost::multi_index::indexed_by< // sorted by txid boost::multi_index::hashed_unique< boost::multi_index::tag, mempoolentry_txid, SaltedTxidHasher>, // sorted by order in the blockchain boost::multi_index::sequenced< boost::multi_index::tag>>> indexed_disconnected_transactions; indexed_disconnected_transactions queuedTx; uint64_t cachedInnerUsage = 0; void addTransaction(const CTransactionRef &tx) { queuedTx.insert(tx); cachedInnerUsage += RecursiveDynamicUsage(tx); } public: // It's almost certainly a logic bug if we don't clear out queuedTx before // destruction, as we add to it while disconnecting blocks, and then we // need to re-process remaining transactions to ensure mempool consistency. // For now, assert() that we've emptied out this object on destruction. // This assert() can always be removed if the reorg-processing code were // to be refactored such that this assumption is no longer true (for // instance if there was some other way we cleaned up the mempool after a // reorg, besides draining this object). ~DisconnectedBlockTransactions() { assert(queuedTx.empty()); } // Estimate the overhead of queuedTx to be 6 pointers + an allocation, as // no exact formula for boost::multi_index_contained is implemented. size_t DynamicMemoryUsage() const { return memusage::MallocUsage(sizeof(CTransactionRef) + 6 * sizeof(void *)) * queuedTx.size() + cachedInnerUsage; } const indexed_disconnected_transactions &GetQueuedTx() const { return queuedTx; } // Import mempool entries in topological order into queuedTx and clear the // mempool. Caller should call updateMempoolForReorg to reprocess these // transactions void importMempool(CTxMemPool &pool); // Add entries for a block while reconstructing the topological ordering so // they can be added back to the mempool simply. void addForBlock(const std::vector &vtx); // Remove entries based on txid_index, and update memory usage. void removeForBlock(const std::vector &vtx) { // Short-circuit in the common case of a block being added to the tip if (queuedTx.empty()) { return; } for (auto const &tx : vtx) { auto it = queuedTx.find(tx->GetId()); if (it != queuedTx.end()) { cachedInnerUsage -= RecursiveDynamicUsage(*it); queuedTx.erase(it); } } } // Remove an entry by insertion_order index, and update memory usage. void removeEntry(indexed_disconnected_transactions::index< insertion_order>::type::iterator entry) { cachedInnerUsage -= RecursiveDynamicUsage(*entry); queuedTx.get().erase(entry); } bool isEmpty() const { return queuedTx.empty(); } void clear() { cachedInnerUsage = 0; queuedTx.clear(); } /** * Make mempool consistent after a reorg, by re-adding or recursively * erasing disconnected block transactions from the mempool, and also * removing any other transactions from the mempool that are no longer valid * given the new tip/height. * * Note: we assume that disconnectpool only contains transactions that are * NOT confirmed in the current chain nor already in the mempool (otherwise, * in-mempool descendants of such transactions would be removed). * * Passing fAddToMempool=false will skip trying to add the transactions * back, and instead just erase from the mempool as needed. */ void updateMempoolForReorg(const Config &config, bool fAddToMempool); }; #endif // BITCOIN_TXMEMPOOL_H