diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index 11d6accd8..223bad2df 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -1,816 +1,816 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /** * Return average network hashes per second based on the last 'lookup' blocks, * or from the last difficulty change if 'lookup' is nonpositive. If 'height' is * nonnegative, compute the estimate at the time when a given block was found. */ static UniValue GetNetworkHashPS(int lookup, int height) { CBlockIndex *pb = chainActive.Tip(); if (height >= 0 && height < chainActive.Height()) { pb = chainActive[height]; } if (pb == nullptr || !pb->nHeight) { return 0; } // If lookup is -1, then use blocks since last difficulty change. if (lookup <= 0) { lookup = pb->nHeight % Params().GetConsensus().DifficultyAdjustmentInterval() + 1; } // If lookup is larger than chain, then set it to chain length. if (lookup > pb->nHeight) { lookup = pb->nHeight; } CBlockIndex *pb0 = pb; int64_t minTime = pb0->GetBlockTime(); int64_t maxTime = minTime; for (int i = 0; i < lookup; i++) { pb0 = pb0->pprev; int64_t time = pb0->GetBlockTime(); minTime = std::min(time, minTime); maxTime = std::max(time, maxTime); } // In case there's a situation where minTime == maxTime, we don't want a // divide by zero exception. if (minTime == maxTime) { return 0; } arith_uint256 workDiff = pb->nChainWork - pb0->nChainWork; int64_t timeDiff = maxTime - minTime; return workDiff.getdouble() / timeDiff; } static UniValue getnetworkhashps(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 2) { throw std::runtime_error( "getnetworkhashps ( nblocks height )\n" "\nReturns the estimated network hashes per second based on the " "last n blocks.\n" "Pass in [blocks] to override # of blocks, -1 specifies since last " "difficulty change.\n" "Pass in [height] to estimate the network speed at the time when a " "certain block was found.\n" "\nArguments:\n" "1. nblocks (numeric, optional, default=120) The number of " "blocks, or -1 for blocks since last difficulty change.\n" "2. height (numeric, optional, default=-1) To estimate at the " "time of the given height.\n" "\nResult:\n" "x (numeric) Hashes per second estimated\n" "\nExamples:\n" + HelpExampleCli("getnetworkhashps", "") + HelpExampleRpc("getnetworkhashps", "")); } LOCK(cs_main); return GetNetworkHashPS( !request.params[0].isNull() ? request.params[0].get_int() : 120, !request.params[1].isNull() ? request.params[1].get_int() : -1); } UniValue generateBlocks(const Config &config, std::shared_ptr coinbaseScript, int nGenerate, uint64_t nMaxTries, bool keepScript) { static const int nInnerLoopCount = 0x100000; int nHeightStart = 0; int nHeightEnd = 0; int nHeight = 0; { // Don't keep cs_main locked. LOCK(cs_main); nHeightStart = chainActive.Height(); nHeight = nHeightStart; nHeightEnd = nHeightStart + nGenerate; } unsigned int nExtraNonce = 0; UniValue blockHashes(UniValue::VARR); while (nHeight < nHeightEnd) { std::unique_ptr pblocktemplate( BlockAssembler(config, g_mempool) .CreateNewBlock(coinbaseScript->reserveScript)); if (!pblocktemplate.get()) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block"); } CBlock *pblock = &pblocktemplate->block; { LOCK(cs_main); IncrementExtraNonce(config, pblock, chainActive.Tip(), nExtraNonce); } while (nMaxTries > 0 && pblock->nNonce < nInnerLoopCount && !CheckProofOfWork(pblock->GetHash(), pblock->nBits, config)) { ++pblock->nNonce; --nMaxTries; } if (nMaxTries == 0) { break; } if (pblock->nNonce == nInnerLoopCount) { continue; } std::shared_ptr shared_pblock = std::make_shared(*pblock); if (!ProcessNewBlock(config, shared_pblock, true, nullptr)) { throw JSONRPCError(RPC_INTERNAL_ERROR, "ProcessNewBlock, block not accepted"); } ++nHeight; blockHashes.push_back(pblock->GetHash().GetHex()); // Mark script as important because it was used at least for one // coinbase output if the script came from the wallet. if (keepScript) { coinbaseScript->KeepScript(); } } return blockHashes; } static UniValue generatetoaddress(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 2 || request.params.size() > 3) { throw std::runtime_error( "generatetoaddress nblocks address (maxtries)\n" "\nMine blocks immediately to a specified address (before the RPC " "call returns)\n" "\nArguments:\n" "1. nblocks (numeric, required) How many blocks are generated " "immediately.\n" "2. address (string, required) The address to send the newly " "generated bitcoin to.\n" "3. maxtries (numeric, optional) How many iterations to try " "(default = 1000000).\n" "\nResult:\n" "[ blockhashes ] (array) hashes of blocks generated\n" "\nExamples:\n" "\nGenerate 11 blocks to myaddress\n" + HelpExampleCli("generatetoaddress", "11 \"myaddress\"")); } int nGenerate = request.params[0].get_int(); uint64_t nMaxTries = 1000000; if (!request.params[2].isNull()) { nMaxTries = request.params[2].get_int(); } CTxDestination destination = DecodeDestination(request.params[1].get_str(), config.GetChainParams()); if (!IsValidDestination(destination)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Error: Invalid address"); } std::shared_ptr coinbaseScript = std::make_shared(); coinbaseScript->reserveScript = GetScriptForDestination(destination); return generateBlocks(config, coinbaseScript, nGenerate, nMaxTries, false); } static UniValue getmininginfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getmininginfo\n" "\nReturns a json object containing mining-related information." "\nResult:\n" "{\n" " \"blocks\": nnn, (numeric) The current block\n" " \"currentblocksize\": nnn, (numeric) The last block size\n" " \"currentblocktx\": nnn, (numeric) The last block " "transaction\n" " \"difficulty\": xxx.xxxxx (numeric) The current difficulty\n" " \"networkhashps\": nnn, (numeric) The network hashes per " "second\n" " \"pooledtx\": n (numeric) The size of the mempool\n" " \"chain\": \"xxxx\", (string) current network name as " "defined in BIP70 (main, test, regtest)\n" " \"warnings\": \"...\" (string) any network and " "blockchain warnings\n" " \"errors\": \"...\" (string) DEPRECATED. Same as " "warnings. Only shown when bitcoind is started with " "-deprecatedrpc=getmininginfo\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmininginfo", "") + HelpExampleRpc("getmininginfo", "")); } LOCK(cs_main); UniValue obj(UniValue::VOBJ); obj.pushKV("blocks", int(chainActive.Height())); obj.pushKV("currentblocksize", uint64_t(nLastBlockSize)); obj.pushKV("currentblocktx", uint64_t(nLastBlockTx)); obj.pushKV("difficulty", double(GetDifficulty(chainActive.Tip()))); obj.pushKV("blockprioritypercentage", uint8_t(gArgs.GetArg("-blockprioritypercentage", DEFAULT_BLOCK_PRIORITY_PERCENTAGE))); obj.pushKV("networkhashps", getnetworkhashps(config, request)); obj.pushKV("pooledtx", uint64_t(g_mempool.size())); obj.pushKV("chain", config.GetChainParams().NetworkIDString()); if (IsDeprecatedRPCEnabled(gArgs, "getmininginfo")) { obj.pushKV("errors", GetWarnings("statusbar")); } else { obj.pushKV("warnings", GetWarnings("statusbar")); } return obj; } // NOTE: Unlike wallet RPC (which use BCH values), mining RPCs follow GBT (BIP // 22) in using satoshi amounts static UniValue prioritisetransaction(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 3) { throw std::runtime_error( "prioritisetransaction \n" "Accepts the transaction into mined blocks at a higher (or lower) " "priority\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id.\n" "2. priority_delta (numeric, required) The priority to add or " "subtract.\n" " The transaction selection algorithm considers " "the tx as it would have a higher priority.\n" " (priority of a transaction is calculated: " "coinage * value_in_satoshis / txsize) \n" "3. fee_delta (numeric, required) The fee value (in satoshis) " "to add (or subtract, if negative).\n" " The fee is not actually paid, only the " "algorithm for selecting transactions into a block\n" " considers the transaction as it would have paid " "a higher (or lower) fee.\n" "\nResult:\n" "true (boolean) Returns true\n" "\nExamples:\n" + HelpExampleCli("prioritisetransaction", "\"txid\" 0.0 10000") + HelpExampleRpc("prioritisetransaction", "\"txid\", 0.0, 10000")); } LOCK(cs_main); uint256 hash = ParseHashStr(request.params[0].get_str(), "txid"); Amount nAmount = request.params[2].get_int64() * SATOSHI; - g_mempool.PrioritiseTransaction(hash, request.params[0].get_str(), - request.params[1].get_real(), nAmount); + g_mempool.PrioritiseTransaction(hash, request.params[1].get_real(), + nAmount); return true; } // NOTE: Assumes a conclusive result; if result is inconclusive, it must be // handled by caller static UniValue BIP22ValidationResult(const Config &config, const CValidationState &state) { if (state.IsValid()) { return NullUniValue; } std::string strRejectReason = state.GetRejectReason(); if (state.IsError()) { throw JSONRPCError(RPC_VERIFY_ERROR, strRejectReason); } if (state.IsInvalid()) { if (strRejectReason.empty()) { return "rejected"; } return strRejectReason; } // Should be impossible. return "valid?"; } static UniValue getblocktemplate(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 1) { throw std::runtime_error( "getblocktemplate ( TemplateRequest )\n" "\nIf the request parameters include a 'mode' key, that is used to " "explicitly select between the default 'template' request or a " "'proposal'.\n" "It returns data needed to construct a block to work on.\n" "For full specification, see BIPs 22, 23, 9, and 145:\n" " " "https://github.com/bitcoin/bips/blob/master/bip-0022.mediawiki\n" " " "https://github.com/bitcoin/bips/blob/master/bip-0023.mediawiki\n" " " "https://github.com/bitcoin/bips/blob/master/" "bip-0009.mediawiki#getblocktemplate_changes\n" " " "https://github.com/bitcoin/bips/blob/master/bip-0145.mediawiki\n" "\nArguments:\n" "1. template_request (json object, optional) A json object " "in the following spec\n" " {\n" " \"mode\":\"template\" (string, optional) This must be " "set to \"template\", \"proposal\" (see BIP 23), or omitted\n" " \"capabilities\":[ (array, optional) A list of " "strings\n" " \"support\" (string) client side supported " "feature, 'longpoll', 'coinbasetxn', 'coinbasevalue', 'proposal', " "'serverlist', 'workid'\n" " ,...\n" " ]\n" " }\n" "\n" "\nResult:\n" "{\n" " \"version\" : n, (numeric) The preferred " "block version\n" " \"previousblockhash\" : \"xxxx\", (string) The hash of " "current highest block\n" " \"transactions\" : [ (array) contents of " "non-coinbase transactions that should be included in the next " "block\n" " {\n" " \"data\" : \"xxxx\", (string) transaction " "data encoded in hexadecimal (byte-for-byte)\n" " \"txid\" : \"xxxx\", (string) transaction id " "encoded in little-endian hexadecimal\n" " \"hash\" : \"xxxx\", (string) hash encoded " "in little-endian hexadecimal (including witness data)\n" " \"depends\" : [ (array) array of numbers " "\n" " n (numeric) transactions " "before this one (by 1-based index in 'transactions' list) that " "must be present in the final block if this one is\n" " ,...\n" " ],\n" " \"fee\": n, (numeric) difference in " "value between transaction inputs and outputs (in Satoshis); for " "coinbase transactions, this is a negative Number of the total " "collected block fees (ie, not including the block subsidy); if " "key is not present, fee is unknown and clients MUST NOT assume " "there isn't one\n" " \"sigops\" : n, (numeric) total SigOps " "cost, as counted for purposes of block limits; if key is not " "present, sigop cost is unknown and clients MUST NOT assume it is " "zero\n" " \"required\" : true|false (boolean) if provided and " "true, this transaction must be in the final block\n" " }\n" " ,...\n" " ],\n" " \"coinbaseaux\" : { (json object) data that " "should be included in the coinbase's scriptSig content\n" " \"flags\" : \"xx\" (string) key name is to " "be ignored, and value included in scriptSig\n" " },\n" " \"coinbasevalue\" : n, (numeric) maximum allowable " "input to coinbase transaction, including the generation award and " "transaction fees (in Satoshis)\n" " \"coinbasetxn\" : { ... }, (json object) information " "for coinbase transaction\n" " \"target\" : \"xxxx\", (string) The hash target\n" " \"mintime\" : xxx, (numeric) The minimum " "timestamp appropriate for next block time in seconds since epoch " "(Jan 1 1970 GMT)\n" " \"mutable\" : [ (array of string) list of " "ways the block template may be changed \n" " \"value\" (string) A way the block " "template may be changed, e.g. 'time', 'transactions', " "'prevblock'\n" " ,...\n" " ],\n" " \"noncerange\" : \"00000000ffffffff\",(string) A range of valid " "nonces\n" " \"sigoplimit\" : n, (numeric) limit of sigops " "in blocks\n" " \"sizelimit\" : n, (numeric) limit of block " "size\n" " \"curtime\" : ttt, (numeric) current timestamp " "in seconds since epoch (Jan 1 1970 GMT)\n" " \"bits\" : \"xxxxxxxx\", (string) compressed " "target of next block\n" " \"height\" : n (numeric) The height of the " "next block\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblocktemplate", "") + HelpExampleRpc("getblocktemplate", "")); } LOCK(cs_main); std::string strMode = "template"; UniValue lpval = NullUniValue; std::set setClientRules; if (!request.params[0].isNull()) { const UniValue &oparam = request.params[0].get_obj(); const UniValue &modeval = find_value(oparam, "mode"); if (modeval.isStr()) { strMode = modeval.get_str(); } else if (modeval.isNull()) { /* Do nothing */ } else { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); } lpval = find_value(oparam, "longpollid"); if (strMode == "proposal") { const UniValue &dataval = find_value(oparam, "data"); if (!dataval.isStr()) { throw JSONRPCError(RPC_TYPE_ERROR, "Missing data String key for proposal"); } CBlock block; if (!DecodeHexBlk(block, dataval.get_str())) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed"); } uint256 hash = block.GetHash(); const CBlockIndex *pindex = LookupBlockIndex(hash); if (pindex) { if (pindex->IsValid(BlockValidity::SCRIPTS)) { return "duplicate"; } if (pindex->nStatus.isInvalid()) { return "duplicate-invalid"; } return "duplicate-inconclusive"; } CBlockIndex *const pindexPrev = chainActive.Tip(); // TestBlockValidity only supports blocks built on the current Tip if (block.hashPrevBlock != pindexPrev->GetBlockHash()) { return "inconclusive-not-best-prevblk"; } CValidationState state; BlockValidationOptions validationOptions = BlockValidationOptions(false, true); TestBlockValidity(config, state, block, pindexPrev, validationOptions); return BIP22ValidationResult(config, state); } } if (strMode != "template") { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); } if (!g_connman) { throw JSONRPCError( RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); } if (g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL) == 0) { throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Bitcoin is not connected!"); } if (IsInitialBlockDownload()) { throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Bitcoin is downloading blocks..."); } static unsigned int nTransactionsUpdatedLast; if (!lpval.isNull()) { // Wait to respond until either the best block changes, OR a minute has // passed and there are more transactions uint256 hashWatchedChain; std::chrono::steady_clock::time_point checktxtime; unsigned int nTransactionsUpdatedLastLP; if (lpval.isStr()) { // Format: std::string lpstr = lpval.get_str(); hashWatchedChain.SetHex(lpstr.substr(0, 64)); nTransactionsUpdatedLastLP = atoi64(lpstr.substr(64)); } else { // NOTE: Spec does not specify behaviour for non-string longpollid, // but this makes testing easier hashWatchedChain = chainActive.Tip()->GetBlockHash(); nTransactionsUpdatedLastLP = nTransactionsUpdatedLast; } // Release the wallet and main lock while waiting LEAVE_CRITICAL_SECTION(cs_main); { checktxtime = std::chrono::steady_clock::now() + std::chrono::minutes(1); WAIT_LOCK(g_best_block_mutex, lock); while (g_best_block == hashWatchedChain && IsRPCRunning()) { if (g_best_block_cv.wait_until(lock, checktxtime) == std::cv_status::timeout) { // Timeout: Check transactions for update if (g_mempool.GetTransactionsUpdated() != nTransactionsUpdatedLastLP) { break; } checktxtime += std::chrono::seconds(10); } } } ENTER_CRITICAL_SECTION(cs_main); if (!IsRPCRunning()) { throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Shutting down"); } // TODO: Maybe recheck connections/IBD and (if something wrong) send an // expires-immediately template to stop miners? } // Update block static CBlockIndex *pindexPrev; static int64_t nStart; static std::unique_ptr pblocktemplate; if (pindexPrev != chainActive.Tip() || (g_mempool.GetTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - nStart > 5)) { // Clear pindexPrev so future calls make a new block, despite any // failures from here on pindexPrev = nullptr; // Store the pindexBest used before CreateNewBlock, to avoid races nTransactionsUpdatedLast = g_mempool.GetTransactionsUpdated(); CBlockIndex *pindexPrevNew = chainActive.Tip(); nStart = GetTime(); // Create new block CScript scriptDummy = CScript() << OP_TRUE; pblocktemplate = BlockAssembler(config, g_mempool).CreateNewBlock(scriptDummy); if (!pblocktemplate) { throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory"); } // Need to update only after we know CreateNewBlock succeeded pindexPrev = pindexPrevNew; } // pointer for convenience CBlock *pblock = &pblocktemplate->block; // Update nTime UpdateTime(pblock, config, pindexPrev); pblock->nNonce = 0; UniValue aCaps(UniValue::VARR); aCaps.push_back("proposal"); UniValue transactions(UniValue::VARR); int index_in_template = 0; for (const auto &it : pblock->vtx) { const CTransaction &tx = *it; uint256 txId = tx.GetId(); if (tx.IsCoinBase()) { index_in_template++; continue; } UniValue entry(UniValue::VOBJ); entry.pushKV("data", EncodeHexTx(tx)); entry.pushKV("txid", txId.GetHex()); entry.pushKV("hash", tx.GetHash().GetHex()); entry.pushKV("fee", pblocktemplate->entries[index_in_template].fees / SATOSHI); int64_t nTxSigOps = pblocktemplate->entries[index_in_template].sigOpCount; entry.pushKV("sigops", nTxSigOps); transactions.push_back(entry); index_in_template++; } UniValue aux(UniValue::VOBJ); aux.pushKV("flags", HexStr(COINBASE_FLAGS.begin(), COINBASE_FLAGS.end())); arith_uint256 hashTarget = arith_uint256().SetCompact(pblock->nBits); UniValue aMutable(UniValue::VARR); aMutable.push_back("time"); aMutable.push_back("transactions"); aMutable.push_back("prevblock"); UniValue result(UniValue::VOBJ); result.pushKV("capabilities", aCaps); result.pushKV("version", pblock->nVersion); result.pushKV("previousblockhash", pblock->hashPrevBlock.GetHex()); result.pushKV("transactions", transactions); result.pushKV("coinbaseaux", aux); result.pushKV("coinbasevalue", int64_t(pblock->vtx[0]->vout[0].nValue / SATOSHI)); result.pushKV("longpollid", chainActive.Tip()->GetBlockHash().GetHex() + i64tostr(nTransactionsUpdatedLast)); result.pushKV("target", hashTarget.GetHex()); result.pushKV("mintime", int64_t(pindexPrev->GetMedianTimePast()) + 1); result.pushKV("mutable", aMutable); result.pushKV("noncerange", "00000000ffffffff"); // FIXME: Allow for mining block greater than 1M. result.pushKV("sigoplimit", GetMaxBlockSigOpsCount(DEFAULT_MAX_BLOCK_SIZE)); result.pushKV("sizelimit", DEFAULT_MAX_BLOCK_SIZE); result.pushKV("curtime", pblock->GetBlockTime()); result.pushKV("bits", strprintf("%08x", pblock->nBits)); result.pushKV("height", int64_t(pindexPrev->nHeight) + 1); return result; } class submitblock_StateCatcher : public CValidationInterface { public: uint256 hash; bool found; CValidationState state; explicit submitblock_StateCatcher(const uint256 &hashIn) : hash(hashIn), found(false), state() {} protected: void BlockChecked(const CBlock &block, const CValidationState &stateIn) override { if (block.GetHash() != hash) { return; } found = true; state = stateIn; } }; static UniValue submitblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "submitblock \"hexdata\" ( \"jsonparametersobject\" )\n" "\nAttempts to submit new block to network.\n" "The 'jsonparametersobject' parameter is currently ignored.\n" "See https://en.bitcoin.it/wiki/BIP_0022 for full specification.\n" "\nArguments\n" "1. \"hexdata\" (string, required) the hex-encoded block " "data to submit\n" "2. \"parameters\" (string, optional) object of optional " "parameters\n" " {\n" " \"workid\" : \"id\" (string, optional) if the server " "provided a workid, it MUST be included with submissions\n" " }\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("submitblock", "\"mydata\"") + HelpExampleRpc("submitblock", "\"mydata\"")); } std::shared_ptr blockptr = std::make_shared(); CBlock &block = *blockptr; if (!DecodeHexBlk(block, request.params[0].get_str())) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed"); } if (block.vtx.empty() || !block.vtx[0]->IsCoinBase()) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block does not start with a coinbase"); } uint256 hash = block.GetHash(); bool fBlockPresent = false; { LOCK(cs_main); const CBlockIndex *pindex = LookupBlockIndex(hash); if (pindex) { if (pindex->IsValid(BlockValidity::SCRIPTS)) { return "duplicate"; } if (pindex->nStatus.isInvalid()) { return "duplicate-invalid"; } // Otherwise, we might only have the header - process the block // before returning fBlockPresent = true; } } submitblock_StateCatcher sc(block.GetHash()); RegisterValidationInterface(&sc); bool fAccepted = ProcessNewBlock(config, blockptr, true, nullptr); UnregisterValidationInterface(&sc); if (fBlockPresent) { if (fAccepted && !sc.found) { return "duplicate-inconclusive"; } return "duplicate"; } if (!sc.found) { return "inconclusive"; } return BIP22ValidationResult(config, sc.state); } static UniValue estimatefee(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 1) { throw std::runtime_error( "estimatefee\n" "\nEstimates the approximate fee per kilobyte needed for a " "transaction\n" "\nResult:\n" "n (numeric) estimated fee-per-kilobyte\n" "\nExample:\n" + HelpExampleCli("estimatefee", "")); } if ((request.params.size() == 1) && !IsDeprecatedRPCEnabled(gArgs, "estimatefee")) { // FIXME: Remove this message in 0.20 throw JSONRPCError( RPC_METHOD_DEPRECATED, "estimatefee with the nblocks argument is no longer supported\n" "Please call estimatefee with no arguments instead.\n" "\nExample:\n" + HelpExampleCli("estimatefee", "")); } return ValueFromAmount(g_mempool.estimateFee().GetFeePerK()); } // clang-format off static const ContextFreeRPCCommand commands[] = { // category name actor (function) argNames // ---------- ------------------------ ---------------------- ---------- {"mining", "getnetworkhashps", getnetworkhashps, {"nblocks", "height"}}, {"mining", "getmininginfo", getmininginfo, {}}, {"mining", "prioritisetransaction", prioritisetransaction, {"txid", "priority_delta", "fee_delta"}}, {"mining", "getblocktemplate", getblocktemplate, {"template_request"}}, {"mining", "submitblock", submitblock, {"hexdata", "parameters"}}, {"generating", "generatetoaddress", generatetoaddress, {"nblocks", "address", "maxtries"}}, {"util", "estimatefee", estimatefee, {"nblocks"}}, }; // clang-format on void RegisterMiningRPCCommands(CRPCTable &t) { for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) t.appendCommand(commands[vcidx].name, &commands[vcidx]); } diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 376982c02..b4a8c298b 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -1,1396 +1,1395 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include // for GetConsensus. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef &_tx, const Amount _nFee, int64_t _nTime, double _entryPriority, unsigned int _entryHeight, Amount _inChainInputValue, bool _spendsCoinbase, int64_t _sigOpsCount, LockPoints lp) : tx(_tx), nFee(_nFee), nTime(_nTime), entryPriority(_entryPriority), entryHeight(_entryHeight), inChainInputValue(_inChainInputValue), spendsCoinbase(_spendsCoinbase), sigOpCount(_sigOpsCount), lockPoints(lp) { nTxSize = tx->GetTotalSize(); nTxBillableSize = tx->GetBillableSize(); nModSize = tx->CalculateModifiedSize(GetTxSize()); nUsageSize = RecursiveDynamicUsage(tx); nCountWithDescendants = 1; nSizeWithDescendants = GetTxSize(); nBillableSizeWithDescendants = GetTxBillableSize(); nModFeesWithDescendants = nFee; Amount nValueIn = tx->GetValueOut() + nFee; assert(inChainInputValue <= nValueIn); feeDelta = Amount::zero(); nCountWithAncestors = 1; nSizeWithAncestors = GetTxSize(); nBillableSizeWithAncestors = GetTxBillableSize(); nModFeesWithAncestors = nFee; nSigOpCountWithAncestors = sigOpCount; } double CTxMemPoolEntry::GetPriority(unsigned int currentHeight) const { double deltaPriority = double((currentHeight - entryHeight) * (inChainInputValue / SATOSHI)) / nModSize; double dResult = entryPriority + deltaPriority; // This should only happen if it was called with a height below entry height if (dResult < 0) { dResult = 0; } return dResult; } void CTxMemPoolEntry::UpdateFeeDelta(Amount newFeeDelta) { nModFeesWithDescendants += newFeeDelta - feeDelta; nModFeesWithAncestors += newFeeDelta - feeDelta; feeDelta = newFeeDelta; } void CTxMemPoolEntry::UpdateLockPoints(const LockPoints &lp) { lockPoints = lp; } // Update the given tx for any in-mempool descendants. // Assumes that setMemPoolChildren is correct for the given tx and all // descendants. void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendants, const std::set &setExclude) { setEntries stageEntries, setAllDescendants; stageEntries = GetMemPoolChildren(updateIt); while (!stageEntries.empty()) { const txiter cit = *stageEntries.begin(); setAllDescendants.insert(cit); stageEntries.erase(cit); const setEntries &setChildren = GetMemPoolChildren(cit); for (const txiter childEntry : setChildren) { cacheMap::iterator cacheIt = cachedDescendants.find(childEntry); if (cacheIt != cachedDescendants.end()) { // We've already calculated this one, just add the entries for // this set but don't traverse again. for (const txiter cacheEntry : cacheIt->second) { setAllDescendants.insert(cacheEntry); } } else if (!setAllDescendants.count(childEntry)) { // Schedule for later processing stageEntries.insert(childEntry); } } } // setAllDescendants now contains all in-mempool descendants of updateIt. // Update and add to cached descendant map int64_t modifySize = 0; int64_t modifyBillableSize = 0; int64_t modifyCount = 0; Amount modifyFee = Amount::zero(); for (txiter cit : setAllDescendants) { if (!setExclude.count(cit->GetTx().GetId())) { modifySize += cit->GetTxSize(); modifyBillableSize += cit->GetTxBillableSize(); modifyFee += cit->GetModifiedFee(); modifyCount++; cachedDescendants[updateIt].insert(cit); // Update ancestor state for each descendant mapTx.modify(cit, update_ancestor_state(updateIt->GetTxSize(), updateIt->GetTxBillableSize(), updateIt->GetModifiedFee(), 1, updateIt->GetSigOpCount())); } } mapTx.modify(updateIt, update_descendant_state(modifySize, modifyBillableSize, modifyFee, modifyCount)); } // txidsToUpdate is the set of transaction hashes from a disconnected block // which has been re-added to the mempool. For each entry, look for descendants // that are outside txidsToUpdate, and add fee/size information for such // descendants to the parent. For each such descendant, also update the ancestor // state to include the parent. void CTxMemPool::UpdateTransactionsFromBlock( const std::vector &txidsToUpdate) { LOCK(cs); // For each entry in txidsToUpdate, store the set of in-mempool, but not // in-txidsToUpdate transactions, so that we don't have to recalculate // descendants when we come across a previously seen entry. cacheMap mapMemPoolDescendantsToUpdate; // Use a set for lookups into txidsToUpdate (these entries are already // accounted for in the state of their ancestors) std::set setAlreadyIncluded(txidsToUpdate.begin(), txidsToUpdate.end()); // Iterate in reverse, so that whenever we are looking at at a transaction // we are sure that all in-mempool descendants have already been processed. // This maximizes the benefit of the descendant cache and guarantees that // setMemPoolChildren will be updated, an assumption made in // UpdateForDescendants. for (const TxId &txid : reverse_iterate(txidsToUpdate)) { // we cache the in-mempool children to avoid duplicate updates setEntries setChildren; // calculate children from mapNextTx txiter it = mapTx.find(txid); if (it == mapTx.end()) { continue; } auto iter = mapNextTx.lower_bound(COutPoint(txid, 0)); // First calculate the children, and update setMemPoolChildren to // include them, and update their setMemPoolParents to include this tx. for (; iter != mapNextTx.end() && iter->first->GetTxId() == txid; ++iter) { const TxId &childTxId = iter->second->GetId(); txiter childIter = mapTx.find(childTxId); assert(childIter != mapTx.end()); // We can skip updating entries we've encountered before or that are // in the block (which are already accounted for). if (setChildren.insert(childIter).second && !setAlreadyIncluded.count(childTxId)) { UpdateChild(it, childIter, true); UpdateParent(childIter, it, true); } } UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded); } } bool CTxMemPool::CalculateMemPoolAncestors( const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents /* = true */) const { LOCK(cs); setEntries parentHashes; const CTransaction &tx = entry.GetTx(); if (fSearchForParents) { // Get parents of this transaction that are in the mempool // GetMemPoolParents() is only valid for entries in the mempool, so we // iterate mapTx to find parents. for (const CTxIn &in : tx.vin) { txiter piter = mapTx.find(in.prevout.GetTxId()); if (piter == mapTx.end()) { continue; } parentHashes.insert(piter); if (parentHashes.size() + 1 > limitAncestorCount) { errString = strprintf("too many unconfirmed parents [limit: %u]", limitAncestorCount); return false; } } } else { // If we're not searching for parents, we require this to be an entry in // the mempool already. txiter it = mapTx.iterator_to(entry); parentHashes = GetMemPoolParents(it); } size_t totalSizeWithAncestors = entry.GetTxSize(); while (!parentHashes.empty()) { txiter stageit = *parentHashes.begin(); setAncestors.insert(stageit); parentHashes.erase(stageit); totalSizeWithAncestors += stageit->GetTxSize(); if (stageit->GetSizeWithDescendants() + entry.GetTxSize() > limitDescendantSize) { errString = strprintf( "exceeds descendant size limit for tx %s [limit: %u]", stageit->GetTx().GetId().ToString(), limitDescendantSize); return false; } if (stageit->GetCountWithDescendants() + 1 > limitDescendantCount) { errString = strprintf("too many descendants for tx %s [limit: %u]", stageit->GetTx().GetId().ToString(), limitDescendantCount); return false; } if (totalSizeWithAncestors > limitAncestorSize) { errString = strprintf("exceeds ancestor size limit [limit: %u]", limitAncestorSize); return false; } const setEntries &setMemPoolParents = GetMemPoolParents(stageit); for (const txiter &phash : setMemPoolParents) { // If this is a new ancestor, add it. if (setAncestors.count(phash) == 0) { parentHashes.insert(phash); } if (parentHashes.size() + setAncestors.size() + 1 > limitAncestorCount) { errString = strprintf("too many unconfirmed ancestors [limit: %u]", limitAncestorCount); return false; } } } return true; } void CTxMemPool::UpdateAncestorsOf(bool add, txiter it, setEntries &setAncestors) { setEntries parentIters = GetMemPoolParents(it); // add or remove this tx as a child of each parent for (txiter piter : parentIters) { UpdateChild(piter, it, add); } const int64_t updateCount = (add ? 1 : -1); const int64_t updateSize = updateCount * it->GetTxSize(); const int64_t updateBillableSize = updateCount * it->GetTxBillableSize(); const Amount updateFee = updateCount * it->GetModifiedFee(); for (txiter ancestorIt : setAncestors) { mapTx.modify(ancestorIt, update_descendant_state(updateSize, updateBillableSize, updateFee, updateCount)); } } void CTxMemPool::UpdateEntryForAncestors(txiter it, const setEntries &setAncestors) { int64_t updateCount = setAncestors.size(); int64_t updateSize = 0; int64_t updateBillableSize = 0; int64_t updateSigOpsCount = 0; Amount updateFee = Amount::zero(); for (txiter ancestorIt : setAncestors) { updateSize += ancestorIt->GetTxSize(); updateBillableSize += ancestorIt->GetTxBillableSize(); updateFee += ancestorIt->GetModifiedFee(); updateSigOpsCount += ancestorIt->GetSigOpCount(); } mapTx.modify(it, update_ancestor_state(updateSize, updateBillableSize, updateFee, updateCount, updateSigOpsCount)); } void CTxMemPool::UpdateChildrenForRemoval(txiter it) { const setEntries &setMemPoolChildren = GetMemPoolChildren(it); for (txiter updateIt : setMemPoolChildren) { UpdateParent(updateIt, it, false); } } void CTxMemPool::UpdateForRemoveFromMempool(const setEntries &entriesToRemove, bool updateDescendants) { // For each entry, walk back all ancestors and decrement size associated // with this transaction. const uint64_t nNoLimit = std::numeric_limits::max(); if (updateDescendants) { // updateDescendants should be true whenever we're not recursively // removing a tx and all its descendants, eg when a transaction is // confirmed in a block. Here we only update statistics and not data in // mapLinks (which we need to preserve until we're finished with all // operations that need to traverse the mempool). for (txiter removeIt : entriesToRemove) { setEntries setDescendants; CalculateDescendants(removeIt, setDescendants); setDescendants.erase(removeIt); // don't update state for self int64_t modifySize = -int64_t(removeIt->GetTxSize()); int64_t modifyBillableSize = -int64_t(removeIt->GetTxBillableSize()); Amount modifyFee = -1 * removeIt->GetModifiedFee(); int modifySigOps = -removeIt->GetSigOpCount(); for (txiter dit : setDescendants) { mapTx.modify( dit, update_ancestor_state(modifySize, modifyBillableSize, modifyFee, -1, modifySigOps)); } } } for (txiter removeIt : entriesToRemove) { setEntries setAncestors; const CTxMemPoolEntry &entry = *removeIt; std::string dummy; // Since this is a tx that is already in the mempool, we can call CMPA // with fSearchForParents = false. If the mempool is in a consistent // state, then using true or false should both be correct, though false // should be a bit faster. // However, if we happen to be in the middle of processing a reorg, then // the mempool can be in an inconsistent state. In this case, the set of // ancestors reachable via mapLinks will be the same as the set of // ancestors whose packages include this transaction, because when we // add a new transaction to the mempool in addUnchecked(), we assume it // has no children, and in the case of a reorg where that assumption is // false, the in-mempool children aren't linked to the in-block tx's // until UpdateTransactionsFromBlock() is called. So if we're being // called during a reorg, ie before UpdateTransactionsFromBlock() has // been called, then mapLinks[] will differ from the set of mempool // parents we'd calculate by searching, and it's important that we use // the mapLinks[] notion of ancestor transactions as the set of things // to update for removal. CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false); // Note that UpdateAncestorsOf severs the child links that point to // removeIt in the entries for the parents of removeIt. UpdateAncestorsOf(false, removeIt, setAncestors); } // After updating all the ancestor sizes, we can now sever the link between // each transaction being removed and any mempool children (ie, update // setMemPoolParents for each direct child of a transaction being removed). for (txiter removeIt : entriesToRemove) { UpdateChildrenForRemoval(removeIt); } } void CTxMemPoolEntry::UpdateDescendantState(int64_t modifySize, int64_t modifyBillableSize, Amount modifyFee, int64_t modifyCount) { nSizeWithDescendants += modifySize; assert(int64_t(nSizeWithDescendants) > 0); nBillableSizeWithDescendants += modifyBillableSize; assert(int64_t(nBillableSizeWithDescendants) >= 0); nModFeesWithDescendants += modifyFee; nCountWithDescendants += modifyCount; assert(int64_t(nCountWithDescendants) > 0); } void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, int64_t modifyBillableSize, Amount modifyFee, int64_t modifyCount, int modifySigOps) { nSizeWithAncestors += modifySize; assert(int64_t(nSizeWithAncestors) > 0); nBillableSizeWithAncestors += modifyBillableSize; assert(int64_t(nBillableSizeWithAncestors) >= 0); nModFeesWithAncestors += modifyFee; nCountWithAncestors += modifyCount; assert(int64_t(nCountWithAncestors) > 0); nSigOpCountWithAncestors += modifySigOps; assert(int(nSigOpCountWithAncestors) >= 0); } CTxMemPool::CTxMemPool() : nTransactionsUpdated(0) { // lock free clear _clear(); // Sanity checks off by default for performance, because otherwise accepting // transactions becomes O(N^2) where N is the number of transactions in the // pool nCheckFrequency = 0; } CTxMemPool::~CTxMemPool() {} bool CTxMemPool::isSpent(const COutPoint &outpoint) { LOCK(cs); return mapNextTx.count(outpoint); } unsigned int CTxMemPool::GetTransactionsUpdated() const { LOCK(cs); return nTransactionsUpdated; } void CTxMemPool::AddTransactionsUpdated(unsigned int n) { LOCK(cs); nTransactionsUpdated += n; } bool CTxMemPool::addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry, setEntries &setAncestors) { NotifyEntryAdded(entry.GetSharedTx()); // Add to memory pool without checking anything. // Used by AcceptToMemoryPool(), which DOES do all the appropriate checks. LOCK(cs); indexed_transaction_set::iterator newit = mapTx.insert(entry).first; mapLinks.insert(make_pair(newit, TxLinks())); // Update transaction for any feeDelta created by PrioritiseTransaction // TODO: refactor so that the fee delta is calculated before inserting into // mapTx. std::map::const_iterator pos = mapDeltas.find(hash); if (pos != mapDeltas.end()) { const TXModifier &deltas = pos->second; if (deltas.second != Amount::zero()) { mapTx.modify(newit, update_fee_delta(deltas.second)); } } // Update cachedInnerUsage to include contained transaction's usage. // (When we update the entry for in-mempool parents, memory usage will be // further updated.) cachedInnerUsage += entry.DynamicMemoryUsage(); const CTransaction &tx = newit->GetTx(); std::set setParentTransactions; for (const CTxIn &in : tx.vin) { mapNextTx.insert(std::make_pair(&in.prevout, &tx)); setParentTransactions.insert(in.prevout.GetTxId()); } // Don't bother worrying about child transactions of this one. Normal case // of a new transaction arriving is that there can't be any children, // because such children would be orphans. An exception to that is if a // transaction enters that used to be in a block. In that case, our // disconnect block logic will call UpdateTransactionsFromBlock to clean up // the mess we're leaving here. // Update ancestors with information about this tx for (const uint256 &phash : setParentTransactions) { txiter pit = mapTx.find(phash); if (pit != mapTx.end()) { UpdateParent(newit, pit, true); } } UpdateAncestorsOf(true, newit, setAncestors); UpdateEntryForAncestors(newit, setAncestors); nTransactionsUpdated++; totalTxSize += entry.GetTxSize(); vTxHashes.emplace_back(tx.GetHash(), newit); newit->vTxHashesIdx = vTxHashes.size() - 1; return true; } void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason) { NotifyEntryRemoved(it->GetSharedTx(), reason); for (const CTxIn &txin : it->GetTx().vin) { mapNextTx.erase(txin.prevout); } if (vTxHashes.size() > 1) { vTxHashes[it->vTxHashesIdx] = std::move(vTxHashes.back()); vTxHashes[it->vTxHashesIdx].second->vTxHashesIdx = it->vTxHashesIdx; vTxHashes.pop_back(); if (vTxHashes.size() * 2 < vTxHashes.capacity()) { vTxHashes.shrink_to_fit(); } } else { vTxHashes.clear(); } totalTxSize -= it->GetTxSize(); cachedInnerUsage -= it->DynamicMemoryUsage(); cachedInnerUsage -= memusage::DynamicUsage(mapLinks[it].parents) + memusage::DynamicUsage(mapLinks[it].children); mapLinks.erase(it); mapTx.erase(it); nTransactionsUpdated++; } // Calculates descendants of entry that are not already in setDescendants, and // adds to setDescendants. Assumes entryit is already a tx in the mempool and // setMemPoolChildren is correct for tx and all descendants. Also assumes that // if an entry is in setDescendants already, then all in-mempool descendants of // it are already in setDescendants as well, so that we can save time by not // iterating over those entries. void CTxMemPool::CalculateDescendants(txiter entryit, setEntries &setDescendants) const { setEntries stage; if (setDescendants.count(entryit) == 0) { stage.insert(entryit); } // Traverse down the children of entry, only adding children that are not // accounted for in setDescendants already (because those children have // either already been walked, or will be walked in this iteration). while (!stage.empty()) { txiter it = *stage.begin(); setDescendants.insert(it); stage.erase(it); const setEntries &setChildren = GetMemPoolChildren(it); for (const txiter &childiter : setChildren) { if (!setDescendants.count(childiter)) { stage.insert(childiter); } } } } void CTxMemPool::removeRecursive(const CTransaction &origTx, MemPoolRemovalReason reason) { // Remove transaction from memory pool. LOCK(cs); setEntries txToRemove; txiter origit = mapTx.find(origTx.GetId()); if (origit != mapTx.end()) { txToRemove.insert(origit); } else { // When recursively removing but origTx isn't in the mempool be sure to // remove any children that are in the pool. This can happen during // chain re-orgs if origTx isn't re-accepted into the mempool for any // reason. for (size_t i = 0; i < origTx.vout.size(); i++) { auto it = mapNextTx.find(COutPoint(origTx.GetId(), i)); if (it == mapNextTx.end()) { continue; } txiter nextit = mapTx.find(it->second->GetId()); assert(nextit != mapTx.end()); txToRemove.insert(nextit); } } setEntries setAllRemoves; for (txiter it : txToRemove) { CalculateDescendants(it, setAllRemoves); } RemoveStaged(setAllRemoves, false, reason); } void CTxMemPool::removeForReorg(const Config &config, const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags) { // Remove transactions spending a coinbase which are now immature and // no-longer-final transactions. LOCK(cs); setEntries txToRemove; for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { const CTransaction &tx = it->GetTx(); LockPoints lp = it->GetLockPoints(); bool validLP = TestLockPointValidity(&lp); CValidationState state; if (!ContextualCheckTransactionForCurrentBlock(config, tx, state, flags) || !CheckSequenceLocks(tx, flags, &lp, validLP)) { // Note if CheckSequenceLocks fails the LockPoints may still be // invalid. So it's critical that we remove the tx and not depend on // the LockPoints. txToRemove.insert(it); } else if (it->GetSpendsCoinbase()) { for (const CTxIn &txin : tx.vin) { indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.GetTxId()); if (it2 != mapTx.end()) { continue; } const Coin &coin = pcoins->AccessCoin(txin.prevout); if (nCheckFrequency != 0) { assert(!coin.IsSpent()); } if (coin.IsSpent() || (coin.IsCoinBase() && int64_t(nMemPoolHeight) - coin.GetHeight() < COINBASE_MATURITY)) { txToRemove.insert(it); break; } } } if (!validLP) { mapTx.modify(it, update_lock_points(lp)); } } setEntries setAllRemoves; for (txiter it : txToRemove) { CalculateDescendants(it, setAllRemoves); } RemoveStaged(setAllRemoves, false, MemPoolRemovalReason::REORG); } void CTxMemPool::removeConflicts(const CTransaction &tx) { // Remove transactions which depend on inputs of tx, recursively LOCK(cs); for (const CTxIn &txin : tx.vin) { auto it = mapNextTx.find(txin.prevout); if (it != mapNextTx.end()) { const CTransaction &txConflict = *it->second; if (txConflict != tx) { ClearPrioritisation(txConflict.GetId()); removeRecursive(txConflict, MemPoolRemovalReason::CONFLICT); } } } } /** * Called when a block is connected. Removes from mempool and updates the miner * fee estimator. */ void CTxMemPool::removeForBlock(const std::vector &vtx, unsigned int nBlockHeight) { LOCK(cs); DisconnectedBlockTransactions disconnectpool; disconnectpool.addForBlock(vtx); std::vector entries; for (const CTransactionRef &tx : reverse_iterate(disconnectpool.GetQueuedTx().get())) { uint256 txid = tx->GetId(); indexed_transaction_set::iterator i = mapTx.find(txid); if (i != mapTx.end()) { entries.push_back(&*i); } } for (const CTransactionRef &tx : reverse_iterate(disconnectpool.GetQueuedTx().get())) { txiter it = mapTx.find(tx->GetId()); if (it != mapTx.end()) { setEntries stage; stage.insert(it); RemoveStaged(stage, true, MemPoolRemovalReason::BLOCK); } removeConflicts(*tx); ClearPrioritisation(tx->GetId()); } disconnectpool.clear(); lastRollingFeeUpdate = GetTime(); blockSinceLastRollingFeeBump = true; } void CTxMemPool::_clear() { mapLinks.clear(); mapTx.clear(); mapNextTx.clear(); vTxHashes.clear(); totalTxSize = 0; cachedInnerUsage = 0; lastRollingFeeUpdate = GetTime(); blockSinceLastRollingFeeBump = false; rollingMinimumFeeRate = 0; ++nTransactionsUpdated; } void CTxMemPool::clear() { LOCK(cs); _clear(); } void CTxMemPool::check(const CCoinsViewCache *pcoins) const { if (nCheckFrequency == 0) { return; } if (GetRand(std::numeric_limits::max()) >= nCheckFrequency) { return; } LogPrint(BCLog::MEMPOOL, "Checking mempool with %u transactions and %u inputs\n", (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size()); uint64_t checkTotal = 0; uint64_t innerUsage = 0; CCoinsViewCache mempoolDuplicate(const_cast(pcoins)); const int64_t nSpendHeight = GetSpendHeight(mempoolDuplicate); LOCK(cs); std::list waitingOnDependants; for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { unsigned int i = 0; checkTotal += it->GetTxSize(); innerUsage += it->DynamicMemoryUsage(); const CTransaction &tx = it->GetTx(); txlinksMap::const_iterator linksiter = mapLinks.find(it); assert(linksiter != mapLinks.end()); const TxLinks &links = linksiter->second; innerUsage += memusage::DynamicUsage(links.parents) + memusage::DynamicUsage(links.children); bool fDependsWait = false; setEntries setParentCheck; int64_t parentSizes = 0; int64_t parentSigOpCount = 0; for (const CTxIn &txin : tx.vin) { // Check that every mempool transaction's inputs refer to available // coins, or other mempool tx's. indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.GetTxId()); if (it2 != mapTx.end()) { const CTransaction &tx2 = it2->GetTx(); assert(tx2.vout.size() > txin.prevout.GetN() && !tx2.vout[txin.prevout.GetN()].IsNull()); fDependsWait = true; if (setParentCheck.insert(it2).second) { parentSizes += it2->GetTxSize(); parentSigOpCount += it2->GetSigOpCount(); } } else { assert(pcoins->HaveCoin(txin.prevout)); } // Check whether its inputs are marked in mapNextTx. auto it3 = mapNextTx.find(txin.prevout); assert(it3 != mapNextTx.end()); assert(it3->first == &txin.prevout); assert(it3->second == &tx); i++; } assert(setParentCheck == GetMemPoolParents(it)); // Verify ancestor state is correct. setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits::max(); std::string dummy; CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy); uint64_t nCountCheck = setAncestors.size() + 1; uint64_t nSizeCheck = it->GetTxSize(); Amount nFeesCheck = it->GetModifiedFee(); int64_t nSigOpCheck = it->GetSigOpCount(); for (txiter ancestorIt : setAncestors) { nSizeCheck += ancestorIt->GetTxSize(); nFeesCheck += ancestorIt->GetModifiedFee(); nSigOpCheck += ancestorIt->GetSigOpCount(); } assert(it->GetCountWithAncestors() == nCountCheck); assert(it->GetSizeWithAncestors() == nSizeCheck); assert(it->GetSigOpCountWithAncestors() == nSigOpCheck); assert(it->GetModFeesWithAncestors() == nFeesCheck); // Check children against mapNextTx CTxMemPool::setEntries setChildrenCheck; auto iter = mapNextTx.lower_bound(COutPoint(it->GetTx().GetId(), 0)); int64_t childSizes = 0; for (; iter != mapNextTx.end() && iter->first->GetTxId() == it->GetTx().GetId(); ++iter) { txiter childit = mapTx.find(iter->second->GetId()); // mapNextTx points to in-mempool transactions assert(childit != mapTx.end()); if (setChildrenCheck.insert(childit).second) { childSizes += childit->GetTxSize(); } } assert(setChildrenCheck == GetMemPoolChildren(it)); // Also check to make sure size is greater than sum with immediate // children. Just a sanity check, not definitive that this calc is // correct... assert(it->GetSizeWithDescendants() >= childSizes + it->GetTxSize()); if (fDependsWait) { waitingOnDependants.push_back(&(*it)); } else { CValidationState state; bool fCheckResult = tx.IsCoinBase() || Consensus::CheckTxInputs( tx, state, mempoolDuplicate, nSpendHeight); assert(fCheckResult); UpdateCoins(mempoolDuplicate, tx, 1000000); } } unsigned int stepsSinceLastRemove = 0; while (!waitingOnDependants.empty()) { const CTxMemPoolEntry *entry = waitingOnDependants.front(); waitingOnDependants.pop_front(); CValidationState state; if (!mempoolDuplicate.HaveInputs(entry->GetTx())) { waitingOnDependants.push_back(entry); stepsSinceLastRemove++; assert(stepsSinceLastRemove < waitingOnDependants.size()); } else { bool fCheckResult = entry->GetTx().IsCoinBase() || Consensus::CheckTxInputs(entry->GetTx(), state, mempoolDuplicate, nSpendHeight); assert(fCheckResult); UpdateCoins(mempoolDuplicate, entry->GetTx(), 1000000); stepsSinceLastRemove = 0; } } for (auto it = mapNextTx.cbegin(); it != mapNextTx.cend(); it++) { uint256 txid = it->second->GetId(); indexed_transaction_set::const_iterator it2 = mapTx.find(txid); const CTransaction &tx = it2->GetTx(); assert(it2 != mapTx.end()); assert(&tx == it->second); } assert(totalTxSize == checkTotal); assert(innerUsage == cachedInnerUsage); } bool CTxMemPool::CompareDepthAndScore(const uint256 &hasha, const uint256 &hashb) { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(hasha); if (i == mapTx.end()) { return false; } indexed_transaction_set::const_iterator j = mapTx.find(hashb); if (j == mapTx.end()) { return true; } uint64_t counta = i->GetCountWithAncestors(); uint64_t countb = j->GetCountWithAncestors(); if (counta == countb) { return CompareTxMemPoolEntryByScore()(*i, *j); } return counta < countb; } namespace { class DepthAndScoreComparator { public: bool operator()(const CTxMemPool::indexed_transaction_set::const_iterator &a, const CTxMemPool::indexed_transaction_set::const_iterator &b) { uint64_t counta = a->GetCountWithAncestors(); uint64_t countb = b->GetCountWithAncestors(); if (counta == countb) { return CompareTxMemPoolEntryByScore()(*a, *b); } return counta < countb; } }; } // namespace std::vector CTxMemPool::GetSortedDepthAndScore() const { std::vector iters; AssertLockHeld(cs); iters.reserve(mapTx.size()); for (indexed_transaction_set::iterator mi = mapTx.begin(); mi != mapTx.end(); ++mi) { iters.push_back(mi); } std::sort(iters.begin(), iters.end(), DepthAndScoreComparator()); return iters; } void CTxMemPool::queryHashes(std::vector &vtxid) { LOCK(cs); auto iters = GetSortedDepthAndScore(); vtxid.clear(); vtxid.reserve(mapTx.size()); for (auto it : iters) { vtxid.push_back(it->GetTx().GetId()); } } static TxMempoolInfo GetInfo(CTxMemPool::indexed_transaction_set::const_iterator it) { return TxMempoolInfo{it->GetSharedTx(), it->GetTime(), CFeeRate(it->GetFee(), it->GetTxBillableSize()), it->GetModifiedFee() - it->GetFee()}; } std::vector CTxMemPool::infoAll() const { LOCK(cs); auto iters = GetSortedDepthAndScore(); std::vector ret; ret.reserve(mapTx.size()); for (auto it : iters) { ret.push_back(GetInfo(it)); } return ret; } CTransactionRef CTxMemPool::get(const uint256 &txid) const { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(txid); if (i == mapTx.end()) { return nullptr; } return i->GetSharedTx(); } TxMempoolInfo CTxMemPool::info(const uint256 &txid) const { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(txid); if (i == mapTx.end()) { return TxMempoolInfo(); } return GetInfo(i); } CFeeRate CTxMemPool::estimateFee() const { LOCK(cs); uint64_t maxMempoolSize = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; // minerPolicy uses recent blocks to figure out a reasonable fee. This // may disagree with the rollingMinimumFeerate under certain scenarios // where the mempool increases rapidly, or blocks are being mined which // do not contain propagated transactions. return std::max(GetConfig().GetMinFeePerKB(), GetMinFee(maxMempoolSize)); } -void CTxMemPool::PrioritiseTransaction(const uint256 hash, - const std::string strHash, +void CTxMemPool::PrioritiseTransaction(const uint256 &hash, double dPriorityDelta, const Amount nFeeDelta) { { LOCK(cs); TXModifier &deltas = mapDeltas[hash]; deltas.first += dPriorityDelta; deltas.second += nFeeDelta; txiter it = mapTx.find(hash); if (it != mapTx.end()) { mapTx.modify(it, update_fee_delta(deltas.second)); // Now update all ancestors' modified fees with descendants setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits::max(); std::string dummy; CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false); for (txiter ancestorIt : setAncestors) { mapTx.modify(ancestorIt, update_descendant_state(0, 0, nFeeDelta, 0)); } // Now update all descendants' modified fees with ancestors setEntries setDescendants; CalculateDescendants(it, setDescendants); setDescendants.erase(it); for (txiter descendantIt : setDescendants) { mapTx.modify(descendantIt, update_ancestor_state(0, 0, nFeeDelta, 0, 0)); } } } - LogPrintf("PrioritiseTransaction: %s priority += %f, fee += %d\n", strHash, - dPriorityDelta, FormatMoney(nFeeDelta)); + LogPrintf("PrioritiseTransaction: %s priority += %f, fee += %d\n", + hash.ToString(), dPriorityDelta, FormatMoney(nFeeDelta)); } void CTxMemPool::ApplyDeltas(const uint256 hash, double &dPriorityDelta, Amount &nFeeDelta) const { LOCK(cs); std::map::const_iterator pos = mapDeltas.find(hash); if (pos == mapDeltas.end()) { return; } const TXModifier &deltas = pos->second; dPriorityDelta += deltas.first; nFeeDelta += deltas.second; } void CTxMemPool::ClearPrioritisation(const uint256 hash) { LOCK(cs); mapDeltas.erase(hash); } bool CTxMemPool::HasNoInputsOf(const CTransaction &tx) const { for (const CTxIn &in : tx.vin) { if (exists(in.prevout.GetTxId())) { return false; } } return true; } CCoinsViewMemPool::CCoinsViewMemPool(CCoinsView *baseIn, const CTxMemPool &mempoolIn) : CCoinsViewBacked(baseIn), mempool(mempoolIn) {} bool CCoinsViewMemPool::GetCoin(const COutPoint &outpoint, Coin &coin) const { // If an entry in the mempool exists, always return that one, as it's // guaranteed to never conflict with the underlying cache, and it cannot // have pruned entries (as it contains full) transactions. First checking // the underlying cache risks returning a pruned entry instead. CTransactionRef ptx = mempool.get(outpoint.GetTxId()); if (ptx) { if (outpoint.GetN() < ptx->vout.size()) { coin = Coin(ptx->vout[outpoint.GetN()], MEMPOOL_HEIGHT, false); return true; } return false; } return base->GetCoin(outpoint, coin) && !coin.IsSpent(); } bool CCoinsViewMemPool::HaveCoin(const COutPoint &outpoint) const { return mempool.exists(outpoint) || base->HaveCoin(outpoint); } size_t CTxMemPool::DynamicMemoryUsage() const { LOCK(cs); // Estimate the overhead of mapTx to be 15 pointers + an allocation, as no // exact formula for boost::multi_index_contained is implemented. return memusage::MallocUsage(sizeof(CTxMemPoolEntry) + 15 * sizeof(void *)) * mapTx.size() + memusage::DynamicUsage(mapNextTx) + memusage::DynamicUsage(mapDeltas) + memusage::DynamicUsage(mapLinks) + memusage::DynamicUsage(vTxHashes) + cachedInnerUsage; } void CTxMemPool::RemoveStaged(setEntries &stage, bool updateDescendants, MemPoolRemovalReason reason) { AssertLockHeld(cs); UpdateForRemoveFromMempool(stage, updateDescendants); for (const txiter &it : stage) { removeUnchecked(it, reason); } } int CTxMemPool::Expire(int64_t time) { LOCK(cs); indexed_transaction_set::index::type::iterator it = mapTx.get().begin(); setEntries toremove; while (it != mapTx.get().end() && it->GetTime() < time) { toremove.insert(mapTx.project<0>(it)); it++; } setEntries stage; for (txiter removeit : toremove) { CalculateDescendants(removeit, stage); } RemoveStaged(stage, false, MemPoolRemovalReason::EXPIRY); return stage.size(); } void CTxMemPool::LimitSize(size_t limit, unsigned long age) { int expired = Expire(GetTime() - age); if (expired != 0) { LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired); } std::vector vNoSpendsRemaining; TrimToSize(limit, &vNoSpendsRemaining); for (const COutPoint &removed : vNoSpendsRemaining) { pcoinsTip->Uncache(removed); } } bool CTxMemPool::addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry) { LOCK(cs); setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits::max(); std::string dummy; CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy); return addUnchecked(hash, entry, setAncestors); } void CTxMemPool::UpdateChild(txiter entry, txiter child, bool add) { setEntries s; if (add && mapLinks[entry].children.insert(child).second) { cachedInnerUsage += memusage::IncrementalDynamicUsage(s); } else if (!add && mapLinks[entry].children.erase(child)) { cachedInnerUsage -= memusage::IncrementalDynamicUsage(s); } } void CTxMemPool::UpdateParent(txiter entry, txiter parent, bool add) { setEntries s; if (add && mapLinks[entry].parents.insert(parent).second) { cachedInnerUsage += memusage::IncrementalDynamicUsage(s); } else if (!add && mapLinks[entry].parents.erase(parent)) { cachedInnerUsage -= memusage::IncrementalDynamicUsage(s); } } const CTxMemPool::setEntries & CTxMemPool::GetMemPoolParents(txiter entry) const { assert(entry != mapTx.end()); txlinksMap::const_iterator it = mapLinks.find(entry); assert(it != mapLinks.end()); return it->second.parents; } const CTxMemPool::setEntries & CTxMemPool::GetMemPoolChildren(txiter entry) const { assert(entry != mapTx.end()); txlinksMap::const_iterator it = mapLinks.find(entry); assert(it != mapLinks.end()); return it->second.children; } CFeeRate CTxMemPool::GetMinFee(size_t sizelimit) const { LOCK(cs); if (!blockSinceLastRollingFeeBump || rollingMinimumFeeRate == 0) { return CFeeRate(int64_t(ceill(rollingMinimumFeeRate)) * SATOSHI); } int64_t time = GetTime(); if (time > lastRollingFeeUpdate + 10) { double halflife = ROLLING_FEE_HALFLIFE; if (DynamicMemoryUsage() < sizelimit / 4) { halflife /= 4; } else if (DynamicMemoryUsage() < sizelimit / 2) { halflife /= 2; } rollingMinimumFeeRate = rollingMinimumFeeRate / pow(2.0, (time - lastRollingFeeUpdate) / halflife); lastRollingFeeUpdate = time; } return CFeeRate(int64_t(ceill(rollingMinimumFeeRate)) * SATOSHI); } void CTxMemPool::trackPackageRemoved(const CFeeRate &rate) { AssertLockHeld(cs); if ((rate.GetFeePerK() / SATOSHI) > rollingMinimumFeeRate) { rollingMinimumFeeRate = rate.GetFeePerK() / SATOSHI; blockSinceLastRollingFeeBump = false; } } void CTxMemPool::TrimToSize(size_t sizelimit, std::vector *pvNoSpendsRemaining) { LOCK(cs); unsigned nTxnRemoved = 0; CFeeRate maxFeeRateRemoved(Amount::zero()); while (!mapTx.empty() && DynamicMemoryUsage() > sizelimit) { indexed_transaction_set::index::type::iterator it = mapTx.get().begin(); // We set the new mempool min fee to the feerate of the removed set, // plus the "minimum reasonable fee rate" (ie some value under which we // consider txn to have 0 fee). This way, we don't allow txn to enter // mempool with feerate equal to txn which were removed with no block in // between. CFeeRate removed(it->GetModFeesWithDescendants(), it->GetSizeWithDescendants()); removed += MEMPOOL_FULL_FEE_INCREMENT; trackPackageRemoved(removed); maxFeeRateRemoved = std::max(maxFeeRateRemoved, removed); setEntries stage; CalculateDescendants(mapTx.project<0>(it), stage); nTxnRemoved += stage.size(); std::vector txn; if (pvNoSpendsRemaining) { txn.reserve(stage.size()); for (txiter iter : stage) { txn.push_back(iter->GetTx()); } } RemoveStaged(stage, false, MemPoolRemovalReason::SIZELIMIT); if (pvNoSpendsRemaining) { for (const CTransaction &tx : txn) { for (const CTxIn &txin : tx.vin) { if (exists(txin.prevout.GetTxId())) { continue; } if (!mapNextTx.count(txin.prevout)) { pvNoSpendsRemaining->push_back(txin.prevout); } } } } } if (maxFeeRateRemoved > CFeeRate(Amount::zero())) { LogPrint(BCLog::MEMPOOL, "Removed %u txn, rolling minimum fee bumped to %s\n", nTxnRemoved, maxFeeRateRemoved.ToString()); } } bool CTxMemPool::TransactionWithinChainLimit(const uint256 &txid, size_t chainLimit) const { LOCK(cs); auto it = mapTx.find(txid); return it == mapTx.end() || (it->GetCountWithAncestors() < chainLimit && it->GetCountWithDescendants() < chainLimit); } SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand(std::numeric_limits::max())), k1(GetRand(std::numeric_limits::max())) {} /** Maximum bytes for transactions to store for processing during reorg */ static const size_t MAX_DISCONNECTED_TX_POOL_SIZE = 20 * DEFAULT_MAX_BLOCK_SIZE; void DisconnectedBlockTransactions::addForBlock( const std::vector &vtx) { for (const auto &tx : reverse_iterate(vtx)) { // If we already added it, just skip. auto it = queuedTx.find(tx->GetId()); if (it != queuedTx.end()) { continue; } // Insert the transaction into the pool. addTransaction(tx); // Fill in the set of parents. std::unordered_set parents; for (const CTxIn &in : tx->vin) { parents.insert(in.prevout.GetTxId()); } // In order to make sure we keep things in topological order, we check // if we already know of the parent of the current transaction. If so, // we remove them from the set and then add them back. while (parents.size() > 0) { std::unordered_set worklist( std::move(parents)); parents.clear(); for (const TxId &txid : worklist) { // If we do not have that txid in the set, nothing needs to be // done. auto pit = queuedTx.find(txid); if (pit == queuedTx.end()) { continue; } // We have parent in our set, we reinsert them at the right // position. const CTransactionRef ptx = *pit; queuedTx.erase(pit); queuedTx.insert(ptx); // And we make sure ancestors are covered. for (const CTxIn &in : ptx->vin) { parents.insert(in.prevout.GetTxId()); } } } } // Keep the size under control. while (DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE) { // Drop the earliest entry, and remove its children from the // mempool. auto it = queuedTx.get().begin(); g_mempool.removeRecursive(**it, MemPoolRemovalReason::REORG); removeEntry(it); } } void DisconnectedBlockTransactions::importMempool(CTxMemPool &pool) { // addForBlock's algorithm sorts a vector of transactions back into // topological order. We use it in a separate object to create a valid // ordering of all mempool transactions, which we then splice in front of // the current queuedTx. This results in a valid sequence of transactions to // be reprocessed in updateMempoolForReorg. // We create vtx in order of the entry_time index to facilitate for // addForBlocks (which iterates in reverse order), as vtx probably end in // the correct ordering for queuedTx. std::vector vtx; { LOCK(pool.cs); vtx.reserve(pool.mapTx.size()); for (const CTxMemPoolEntry &e : pool.mapTx.get()) { vtx.push_back(e.GetSharedTx()); } pool.clear(); } // Use addForBlocks to sort the transactions and then splice them in front // of queuedTx DisconnectedBlockTransactions orderedTxnPool; orderedTxnPool.addForBlock(vtx); cachedInnerUsage += orderedTxnPool.cachedInnerUsage; queuedTx.get().splice( queuedTx.get().begin(), orderedTxnPool.queuedTx.get()); // We limit memory usage because we can't know if more blocks will be // disconnected while (DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE) { // Drop the earliest entry which, by definition, has no children removeEntry(queuedTx.get().begin()); } } void DisconnectedBlockTransactions::updateMempoolForReorg(const Config &config, bool fAddToMempool) { AssertLockHeld(cs_main); std::vector txidsUpdate; // disconnectpool's insertion_order index sorts the entries from oldest to // newest, but the oldest entry will be the last tx from the latest mined // block that was disconnected. // Iterate disconnectpool in reverse, so that we add transactions back to // the mempool starting with the earliest transaction that had been // previously seen in a block. for (const CTransactionRef &tx : reverse_iterate(queuedTx.get())) { // ignore validation errors in resurrected transactions CValidationState stateDummy; if (!fAddToMempool || tx->IsCoinBase() || !AcceptToMemoryPool(config, g_mempool, stateDummy, tx, false, nullptr, true)) { // If the transaction doesn't make it in to the mempool, remove any // transactions that depend on it (which would now be orphans). g_mempool.removeRecursive(*tx, MemPoolRemovalReason::REORG); } else if (g_mempool.exists(tx->GetId())) { txidsUpdate.push_back(tx->GetId()); } } queuedTx.clear(); // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have // no in-mempool children, which is generally not true when adding // previously-confirmed transactions back to the mempool. // UpdateTransactionsFromBlock finds descendants of any transactions in the // disconnectpool that were added back and cleans up the mempool state. g_mempool.UpdateTransactionsFromBlock(txidsUpdate); // We also need to remove any now-immature transactions g_mempool.removeForReorg(config, pcoinsTip.get(), chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); // Re-limit mempool size, in case we added any transactions g_mempool.LimitSize( gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); } diff --git a/src/txmempool.h b/src/txmempool.h index fae2e149e..a115311a2 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -1,945 +1,945 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_TXMEMPOOL_H #define BITCOIN_TXMEMPOOL_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include class CAutoFile; class CBlockIndex; class Config; inline double AllowFreeThreshold() { return (144 * COIN) / (250 * SATOSHI); } inline bool AllowFree(double dPriority) { // Large (in bytes) low-priority (new, small-coin) transactions need a fee. return dPriority > AllowFreeThreshold(); } /** * Fake height value used in Coins to signify they are only in the memory * pool(since 0.8) */ static const uint32_t MEMPOOL_HEIGHT = 0x7FFFFFFF; struct LockPoints { // Will be set to the blockchain height and median time past values that // would be necessary to satisfy all relative locktime constraints (BIP68) // of this tx given our view of block chain history int height; int64_t time; // As long as the current chain descends from the highest height block // containing one of the inputs used in the calculation, then the cached // values are still valid even after a reorg. CBlockIndex *maxInputBlock; LockPoints() : height(0), time(0), maxInputBlock(nullptr) {} }; class CTxMemPool; /** \class CTxMemPoolEntry * * CTxMemPoolEntry stores data about the corresponding transaction, as well as * data about all in-mempool transactions that depend on the transaction * ("descendant" transactions). * * When a new entry is added to the mempool, we update the descendant state * (nCountWithDescendants, nSizeWithDescendants, and nModFeesWithDescendants) * for all ancestors of the newly added transaction. * * If updating the descendant state is skipped, we can mark the entry as * "dirty", and set nSizeWithDescendants/nModFeesWithDescendants to equal * nTxSize/nFee+feeDelta. (This can potentially happen during a reorg, where we * limit the amount of work we're willing to do to avoid consuming too much * CPU.) */ class CTxMemPoolEntry { private: CTransactionRef tx; //!< Cached to avoid expensive parent-transaction lookups Amount nFee; //!< ... and avoid recomputing tx size size_t nTxSize; //!< ... and billable size for billing size_t nTxBillableSize; //!< ... and modified size for priority size_t nModSize; //!< ... and total memory usage size_t nUsageSize; //!< Local time when entering the mempool int64_t nTime; //!< Priority when entering the mempool double entryPriority; //!< Chain height when entering the mempool unsigned int entryHeight; //!< Sum of all txin values that are already in blockchain Amount inChainInputValue; //!< keep track of transactions that spend a coinbase bool spendsCoinbase; //!< Total sigop plus P2SH sigops count int64_t sigOpCount; //!< Used for determining the priority of the transaction for mining in a //! block Amount feeDelta; //!< Track the height and time at which tx was final LockPoints lockPoints; // Information about descendants of this transaction that are in the // mempool; if we remove this transaction we must remove all of these // descendants as well. if nCountWithDescendants is 0, treat this entry as // dirty, and nSizeWithDescendants and nModFeesWithDescendants will not be // correct. //!< number of descendant transactions uint64_t nCountWithDescendants; //!< ... and size uint64_t nSizeWithDescendants; uint64_t nBillableSizeWithDescendants; //!< ... and total fees (all including us) Amount nModFeesWithDescendants; // Analogous statistics for ancestor transactions uint64_t nCountWithAncestors; uint64_t nSizeWithAncestors; uint64_t nBillableSizeWithAncestors; Amount nModFeesWithAncestors; int64_t nSigOpCountWithAncestors; public: CTxMemPoolEntry(const CTransactionRef &_tx, const Amount _nFee, int64_t _nTime, double _entryPriority, unsigned int _entryHeight, Amount _inChainInputValue, bool spendsCoinbase, int64_t nSigOpsCost, LockPoints lp); const CTransaction &GetTx() const { return *this->tx; } CTransactionRef GetSharedTx() const { return this->tx; } /** * Fast calculation of lower bound of current priority as update from entry * priority. Only inputs that were originally in-chain will age. */ double GetPriority(unsigned int currentHeight) const; const Amount GetFee() const { return nFee; } size_t GetTxSize() const { return nTxSize; } size_t GetTxBillableSize() const { return nTxBillableSize; } int64_t GetTime() const { return nTime; } unsigned int GetHeight() const { return entryHeight; } int64_t GetSigOpCount() const { return sigOpCount; } Amount GetModifiedFee() const { return nFee + feeDelta; } size_t DynamicMemoryUsage() const { return nUsageSize; } const LockPoints &GetLockPoints() const { return lockPoints; } // Adjusts the descendant state, if this entry is not dirty. void UpdateDescendantState(int64_t modifySize, int64_t modifyBillableSize, Amount modifyFee, int64_t modifyCount); // Adjusts the ancestor state void UpdateAncestorState(int64_t modifySize, int64_t modifyBillableSize, Amount modifyFee, int64_t modifyCount, int modifySigOps); // Updates the fee delta used for mining priority score, and the // modified fees with descendants. void UpdateFeeDelta(Amount feeDelta); // Update the LockPoints after a reorg void UpdateLockPoints(const LockPoints &lp); uint64_t GetCountWithDescendants() const { return nCountWithDescendants; } uint64_t GetSizeWithDescendants() const { return nSizeWithDescendants; } uint64_t GetBillableSizeWithDescendants() const { return nBillableSizeWithDescendants; } Amount GetModFeesWithDescendants() const { return nModFeesWithDescendants; } bool GetSpendsCoinbase() const { return spendsCoinbase; } uint64_t GetCountWithAncestors() const { return nCountWithAncestors; } uint64_t GetSizeWithAncestors() const { return nSizeWithAncestors; } uint64_t GetBillableSizeWithAncestors() const { return nBillableSizeWithAncestors; } Amount GetModFeesWithAncestors() const { return nModFeesWithAncestors; } int64_t GetSigOpCountWithAncestors() const { return nSigOpCountWithAncestors; } //!< Index in mempool's vTxHashes mutable size_t vTxHashesIdx; }; // Helpers for modifying CTxMemPool::mapTx, which is a boost multi_index. struct update_descendant_state { update_descendant_state(int64_t _modifySize, int64_t _modifyBillableSize, Amount _modifyFee, int64_t _modifyCount) : modifySize(_modifySize), modifyBillableSize(_modifyBillableSize), modifyFee(_modifyFee), modifyCount(_modifyCount) {} void operator()(CTxMemPoolEntry &e) { e.UpdateDescendantState(modifySize, modifyBillableSize, modifyFee, modifyCount); } private: int64_t modifySize; int64_t modifyBillableSize; Amount modifyFee; int64_t modifyCount; }; struct update_ancestor_state { update_ancestor_state(int64_t _modifySize, int64_t _modifyBillableSize, Amount _modifyFee, int64_t _modifyCount, int64_t _modifySigOpsCost) : modifySize(_modifySize), modifyBillableSize(_modifyBillableSize), modifyFee(_modifyFee), modifyCount(_modifyCount), modifySigOpsCost(_modifySigOpsCost) {} void operator()(CTxMemPoolEntry &e) { e.UpdateAncestorState(modifySize, modifyBillableSize, modifyFee, modifyCount, modifySigOpsCost); } private: int64_t modifySize; int64_t modifyBillableSize; Amount modifyFee; int64_t modifyCount; int64_t modifySigOpsCost; }; struct update_fee_delta { explicit update_fee_delta(Amount _feeDelta) : feeDelta(_feeDelta) {} void operator()(CTxMemPoolEntry &e) { e.UpdateFeeDelta(feeDelta); } private: Amount feeDelta; }; struct update_lock_points { explicit update_lock_points(const LockPoints &_lp) : lp(_lp) {} void operator()(CTxMemPoolEntry &e) { e.UpdateLockPoints(lp); } private: const LockPoints &lp; }; // extracts a transaction hash from CTxMempoolEntry or CTransactionRef struct mempoolentry_txid { typedef uint256 result_type; result_type operator()(const CTxMemPoolEntry &entry) const { return entry.GetTx().GetId(); } result_type operator()(const CTransactionRef &tx) const { return tx->GetId(); } }; /** \class CompareTxMemPoolEntryByDescendantScore * * Sort an entry by max(score/size of entry's tx, score/size with all * descendants). */ class CompareTxMemPoolEntryByDescendantScore { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { bool fUseADescendants = UseDescendantScore(a); bool fUseBDescendants = UseDescendantScore(b); double aModFee = (fUseADescendants ? a.GetModFeesWithDescendants() : a.GetModifiedFee()) / SATOSHI; double aSize = fUseADescendants ? a.GetSizeWithDescendants() : a.GetTxSize(); double bModFee = (fUseBDescendants ? b.GetModFeesWithDescendants() : b.GetModifiedFee()) / SATOSHI; double bSize = fUseBDescendants ? b.GetSizeWithDescendants() : b.GetTxSize(); // Avoid division by rewriting (a/b > c/d) as (a*d > c*b). double f1 = aModFee * bSize; double f2 = aSize * bModFee; if (f1 == f2) { return a.GetTime() >= b.GetTime(); } return f1 < f2; } // Calculate which score to use for an entry (avoiding division). bool UseDescendantScore(const CTxMemPoolEntry &a) const { double f1 = a.GetSizeWithDescendants() * (a.GetModifiedFee() / SATOSHI); double f2 = a.GetTxSize() * (a.GetModFeesWithDescendants() / SATOSHI); return f2 > f1; } }; /** \class CompareTxMemPoolEntryByScore * * Sort by score of entry ((fee+delta)/size) in descending order */ class CompareTxMemPoolEntryByScore { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { double f1 = b.GetTxSize() * (a.GetModifiedFee() / SATOSHI); double f2 = a.GetTxSize() * (b.GetModifiedFee() / SATOSHI); if (f1 == f2) { return b.GetTx().GetId() < a.GetTx().GetId(); } return f1 > f2; } }; class CompareTxMemPoolEntryByEntryTime { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { return a.GetTime() < b.GetTime(); } }; class CompareTxMemPoolEntryByAncestorFee { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { double aFees = a.GetModFeesWithAncestors() / SATOSHI; double aSize = a.GetSizeWithAncestors(); double bFees = b.GetModFeesWithAncestors() / SATOSHI; double bSize = b.GetSizeWithAncestors(); // Avoid division by rewriting (a/b > c/d) as (a*d > c*b). double f1 = aFees * bSize; double f2 = aSize * bFees; if (f1 == f2) { return a.GetTx().GetId() < b.GetTx().GetId(); } return f1 > f2; } }; // Multi_index tag names struct descendant_score {}; struct entry_time {}; struct mining_score {}; struct ancestor_score {}; /** * Information about a mempool transaction. */ struct TxMempoolInfo { /** The transaction itself */ CTransactionRef tx; /** Time the transaction entered the mempool. */ int64_t nTime; /** Feerate of the transaction. */ CFeeRate feeRate; /** The fee delta. */ Amount nFeeDelta; }; /** * Reason why a transaction was removed from the mempool, this is passed to the * notification signal. */ enum class MemPoolRemovalReason { //! Manually removed or unknown reason UNKNOWN = 0, //! Expired from mempool EXPIRY, //! Removed in size limiting SIZELIMIT, //! Removed for reorganization REORG, //! Removed for block BLOCK, //! Removed for conflict with in-block transaction CONFLICT, //! Removed for replacement REPLACED }; class SaltedTxidHasher { private: /** Salt */ const uint64_t k0, k1; public: SaltedTxidHasher(); size_t operator()(const uint256 &txid) const { return SipHashUint256(k0, k1, txid); } }; typedef std::pair TXModifier; /** * CTxMemPool stores valid-according-to-the-current-best-chain transactions that * may be included in the next block. * * Transactions are added when they are seen on the network (or created by the * local node), but not all transactions seen are added to the pool. For * example, the following new transactions will not be added to the mempool: * - a transaction which doesn't meet the minimum fee requirements. * - a new transaction that double-spends an input of a transaction already in * the pool where the new transaction does not meet the Replace-By-Fee * requirements as defined in BIP 125. * - a non-standard transaction. * * CTxMemPool::mapTx, and CTxMemPoolEntry bookkeeping: * * mapTx is a boost::multi_index that sorts the mempool on 4 criteria: * - transaction hash * - feerate [we use max(feerate of tx, feerate of tx with all descendants)] * - time in mempool * - mining score (feerate modified by any fee deltas from * PrioritiseTransaction) * * Note: the term "descendant" refers to in-mempool transactions that depend on * this one, while "ancestor" refers to in-mempool transactions that a given * transaction depends on. * * In order for the feerate sort to remain correct, we must update transactions * in the mempool when new descendants arrive. To facilitate this, we track the * set of in-mempool direct parents and direct children in mapLinks. Within each * CTxMemPoolEntry, we track the size and fees of all descendants. * * Usually when a new transaction is added to the mempool, it has no in-mempool * children (because any such children would be an orphan). So in * addUnchecked(), we: * - update a new entry's setMemPoolParents to include all in-mempool parents * - update the new entry's direct parents to include the new tx as a child * - update all ancestors of the transaction to include the new tx's size/fee * * When a transaction is removed from the mempool, we must: * - update all in-mempool parents to not track the tx in setMemPoolChildren * - update all ancestors to not include the tx's size/fees in descendant state * - update all in-mempool children to not include it as a parent * * These happen in UpdateForRemoveFromMempool(). (Note that when removing a * transaction along with its descendants, we must calculate that set of * transactions to be removed before doing the removal, or else the mempool can * be in an inconsistent state where it's impossible to walk the ancestors of a * transaction.) * * In the event of a reorg, the assumption that a newly added tx has no * in-mempool children is false. In particular, the mempool is in an * inconsistent state while new transactions are being added, because there may * be descendant transactions of a tx coming from a disconnected block that are * unreachable from just looking at transactions in the mempool (the linking * transactions may also be in the disconnected block, waiting to be added). * Because of this, there's not much benefit in trying to search for in-mempool * children in addUnchecked(). Instead, in the special case of transactions * being added from a disconnected block, we require the caller to clean up the * state, to account for in-mempool, out-of-block descendants for all the * in-block transactions by calling UpdateTransactionsFromBlock(). Note that * until this is called, the mempool state is not consistent, and in particular * mapLinks may not be correct (and therefore functions like * CalculateMemPoolAncestors() and CalculateDescendants() that rely on them to * walk the mempool are not generally safe to use). * * Computational limits: * * Updating all in-mempool ancestors of a newly added transaction can be slow, * if no bound exists on how many in-mempool ancestors there may be. * CalculateMemPoolAncestors() takes configurable limits that are designed to * prevent these calculations from being too CPU intensive. * * Adding transactions from a disconnected block can be very time consuming, * because we don't have a way to limit the number of in-mempool descendants. To * bound CPU processing, we limit the amount of work we're willing to do to * properly update the descendant information for a tx being added from a * disconnected block. If we would exceed the limit, then we instead mark the * entry as "dirty", and set the feerate for sorting purposes to be equal the * feerate of the transaction without any descendants. */ class CTxMemPool { private: //!< Value n means that n times in 2^32 we check. uint32_t nCheckFrequency; unsigned int nTransactionsUpdated; //!< sum of all mempool tx's virtual sizes. uint64_t totalTxSize; //!< sum of dynamic memory usage of all the map elements (NOT the maps //! themselves) uint64_t cachedInnerUsage; mutable int64_t lastRollingFeeUpdate; mutable bool blockSinceLastRollingFeeBump; //!< minimum fee to get into the pool, decreases exponentially mutable double rollingMinimumFeeRate; void trackPackageRemoved(const CFeeRate &rate); public: // public only for testing static const int ROLLING_FEE_HALFLIFE = 60 * 60 * 12; typedef boost::multi_index_container< CTxMemPoolEntry, boost::multi_index::indexed_by< // sorted by txid boost::multi_index::hashed_unique< mempoolentry_txid, SaltedTxidHasher>, // sorted by fee rate boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByDescendantScore>, // sorted by entry time boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByEntryTime>, // sorted by score (for mining prioritization) boost::multi_index::ordered_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByScore>, // sorted by fee rate with ancestors boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByAncestorFee>>> indexed_transaction_set; mutable CCriticalSection cs; indexed_transaction_set mapTx; typedef indexed_transaction_set::nth_index<0>::type::iterator txiter; //!< All tx hashes/entries in mapTx, in random order std::vector> vTxHashes; struct CompareIteratorByHash { bool operator()(const txiter &a, const txiter &b) const { return a->GetTx().GetId() < b->GetTx().GetId(); } }; typedef std::set setEntries; const setEntries &GetMemPoolParents(txiter entry) const; const setEntries &GetMemPoolChildren(txiter entry) const; private: typedef std::map cacheMap; struct TxLinks { setEntries parents; setEntries children; }; typedef std::map txlinksMap; txlinksMap mapLinks; void UpdateParent(txiter entry, txiter parent, bool add); void UpdateChild(txiter entry, txiter child, bool add); std::vector GetSortedDepthAndScore() const; public: indirectmap mapNextTx; std::map mapDeltas; /** Create a new CTxMemPool. */ CTxMemPool(); ~CTxMemPool(); /** * If sanity-checking is turned on, check makes sure the pool is consistent * (does not contain two transactions that spend the same inputs, all inputs * are in the mapNextTx array). If sanity-checking is turned off, check does * nothing. */ void check(const CCoinsViewCache *pcoins) const; void setSanityCheck(double dFrequency = 1.0) { nCheckFrequency = dFrequency * 4294967295.0; } // addUnchecked must updated state for all ancestors of a given transaction, // to track size/count of descendant transactions. First version of // addUnchecked can be used to have it call CalculateMemPoolAncestors(), and // then invoke the second version. // Note that addUnchecked is ONLY called from ATMP outside of tests // and any other callers may break wallet's in-mempool tracking (due to // lack of CValidationInterface::TransactionAddedToMempool callbacks). bool addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry); bool addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry, setEntries &setAncestors); void removeRecursive( const CTransaction &tx, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN); void removeForReorg(const Config &config, const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags); void removeConflicts(const CTransaction &tx); void removeForBlock(const std::vector &vtx, unsigned int nBlockHeight); void clear(); // lock free void _clear(); bool CompareDepthAndScore(const uint256 &hasha, const uint256 &hashb); void queryHashes(std::vector &vtxid); bool isSpent(const COutPoint &outpoint); unsigned int GetTransactionsUpdated() const; void AddTransactionsUpdated(unsigned int n); /** * Check that none of this transactions inputs are in the mempool, and thus * the tx is not dependent on other mempool transactions to be included in a * block. */ bool HasNoInputsOf(const CTransaction &tx) const; /** Affect CreateNewBlock prioritisation of transactions */ - void PrioritiseTransaction(const uint256 hash, const std::string strHash, - double dPriorityDelta, const Amount nFeeDelta); + void PrioritiseTransaction(const uint256 &hash, double dPriorityDelta, + const Amount nFeeDelta); void ApplyDeltas(const uint256 hash, double &dPriorityDelta, Amount &nFeeDelta) const; void ClearPrioritisation(const uint256 hash); public: /** * Remove a set of transactions from the mempool. If a transaction is in * this set, then all in-mempool descendants must also be in the set, unless * this transaction is being removed for being in a block. Set * updateDescendants to true when removing a tx that was in a block, so that * any in-mempool descendants have their ancestor state updated. */ void RemoveStaged(setEntries &stage, bool updateDescendants, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN); /** * When adding transactions from a disconnected block back to the mempool, * new mempool entries may have children in the mempool (which is generally * not the case when otherwise adding transactions). * UpdateTransactionsFromBlock() will find child transactions and update the * descendant state for each transaction in txidsToUpdate (excluding any * child transactions present in txidsToUpdate, which are already accounted * for). * Note: txidsToUpdate should be the set of transactions from the * disconnected block that have been accepted back into the mempool. */ void UpdateTransactionsFromBlock(const std::vector &txidsToUpdate); /** * Try to calculate all in-mempool ancestors of entry. * (these are all calculated including the tx itself) * limitAncestorCount = max number of ancestors * limitAncestorSize = max size of ancestors * limitDescendantCount = max number of descendants any ancestor can have * limitDescendantSize = max size of descendants any ancestor can have * errString = populated with error reason if any limits are hit * fSearchForParents = whether to search a tx's vin for in-mempool parents, * or look up parents from mapLinks. Must be true for entries not in the * mempool */ bool CalculateMemPoolAncestors( const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents = true) const; /** * Populate setDescendants with all in-mempool descendants of hash. * Assumes that setDescendants includes all in-mempool descendants of * anything already in it. */ void CalculateDescendants(txiter it, setEntries &setDescendants) const; /** * The minimum fee to get into the mempool, which may itself not be enough * for larger-sized transactions. The incrementalRelayFee policy variable is * used to bound the time it takes the fee rate to go back down all the way * to 0. When the feerate would otherwise be half of this, it is set to 0 * instead. */ CFeeRate GetMinFee(size_t sizelimit) const; /** * Remove transactions from the mempool until its dynamic size is <= * sizelimit. pvNoSpendsRemaining, if set, will be populated with the list * of outpoints which are not in mempool which no longer have any spends in * this mempool. */ void TrimToSize(size_t sizelimit, std::vector *pvNoSpendsRemaining = nullptr); /** * Expire all transaction (and their dependencies) in the mempool older than * time. Return the number of removed transactions. */ int Expire(int64_t time); /** * Reduce the size of the mempool by expiring and then trimming the mempool. */ void LimitSize(size_t limit, unsigned long age); /** * Returns false if the transaction is in the mempool and not within the * chain limit specified. */ bool TransactionWithinChainLimit(const uint256 &txid, size_t chainLimit) const; unsigned long size() { LOCK(cs); return mapTx.size(); } uint64_t GetTotalTxSize() const { LOCK(cs); return totalTxSize; } bool exists(uint256 hash) const { LOCK(cs); return mapTx.count(hash) != 0; } bool exists(const COutPoint &outpoint) const { LOCK(cs); auto it = mapTx.find(outpoint.GetTxId()); return it != mapTx.end() && outpoint.GetN() < it->GetTx().vout.size(); } CTransactionRef get(const uint256 &hash) const; TxMempoolInfo info(const uint256 &hash) const; std::vector infoAll() const; CFeeRate estimateFee() const; size_t DynamicMemoryUsage() const; boost::signals2::signal NotifyEntryAdded; boost::signals2::signal NotifyEntryRemoved; private: /** * UpdateForDescendants is used by UpdateTransactionsFromBlock to update the * descendants for a single transaction that has been added to the mempool * but may have child transactions in the mempool, eg during a chain reorg. * setExclude is the set of descendant transactions in the mempool that must * not be accounted for (because any descendants in setExclude were added to * the mempool after the transaction being updated and hence their state is * already reflected in the parent state). * * cachedDescendants will be updated with the descendants of the transaction * being updated, so that future invocations don't need to walk the same * transaction again, if encountered in another transaction chain. */ void UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendants, const std::set &setExclude); /** * Update ancestors of hash to add/remove it as a descendant transaction. */ void UpdateAncestorsOf(bool add, txiter hash, setEntries &setAncestors); /** Set ancestor state for an entry */ void UpdateEntryForAncestors(txiter it, const setEntries &setAncestors); /** * For each transaction being removed, update ancestors and any direct * children. If updateDescendants is true, then also update in-mempool * descendants' ancestor state. */ void UpdateForRemoveFromMempool(const setEntries &entriesToRemove, bool updateDescendants); /** Sever link between specified transaction and direct children. */ void UpdateChildrenForRemoval(txiter entry); /** * Before calling removeUnchecked for a given transaction, * UpdateForRemoveFromMempool must be called on the entire (dependent) set * of transactions being removed at the same time. We use each * CTxMemPoolEntry's setMemPoolParents in order to walk ancestors of a given * transaction that is removed, so we can't remove intermediate transactions * in a chain before we've updated all the state for the removal. */ void removeUnchecked(txiter entry, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN); }; /** * CCoinsView that brings transactions from a memorypool into view. * It does not check for spendings by memory pool transactions. */ class CCoinsViewMemPool : public CCoinsViewBacked { protected: const CTxMemPool &mempool; public: CCoinsViewMemPool(CCoinsView *baseIn, const CTxMemPool &mempoolIn); bool GetCoin(const COutPoint &outpoint, Coin &coin) const override; bool HaveCoin(const COutPoint &outpoint) const override; }; // We want to sort transactions by coin age priority typedef std::pair TxCoinAgePriority; struct TxCoinAgePriorityCompare { bool operator()(const TxCoinAgePriority &a, const TxCoinAgePriority &b) { if (a.first == b.first) { // Reverse order to make sort less than return CompareTxMemPoolEntryByScore()(*(b.second), *(a.second)); } return a.first < b.first; } }; /** * DisconnectedBlockTransactions * * During the reorg, it's desirable to re-add previously confirmed transactions * to the mempool, so that anything not re-confirmed in the new chain is * available to be mined. However, it's more efficient to wait until the reorg * is complete and process all still-unconfirmed transactions at that time, * since we expect most confirmed transactions to (typically) still be * confirmed in the new chain, and re-accepting to the memory pool is expensive * (and therefore better to not do in the middle of reorg-processing). * Instead, store the disconnected transactions (in order!) as we go, remove any * that are included in blocks in the new chain, and then process the remaining * still-unconfirmed transactions at the end. * * It also enables efficient reprocessing of current mempool entries, useful * when (de)activating forks that result in in-mempool transactions becoming * invalid */ // multi_index tag names struct txid_index {}; struct insertion_order {}; class DisconnectedBlockTransactions { private: typedef boost::multi_index_container< CTransactionRef, boost::multi_index::indexed_by< // sorted by txid boost::multi_index::hashed_unique< boost::multi_index::tag, mempoolentry_txid, SaltedTxidHasher>, // sorted by order in the blockchain boost::multi_index::sequenced< boost::multi_index::tag>>> indexed_disconnected_transactions; indexed_disconnected_transactions queuedTx; uint64_t cachedInnerUsage = 0; void addTransaction(const CTransactionRef &tx) { queuedTx.insert(tx); cachedInnerUsage += RecursiveDynamicUsage(tx); } public: // It's almost certainly a logic bug if we don't clear out queuedTx before // destruction, as we add to it while disconnecting blocks, and then we // need to re-process remaining transactions to ensure mempool consistency. // For now, assert() that we've emptied out this object on destruction. // This assert() can always be removed if the reorg-processing code were // to be refactored such that this assumption is no longer true (for // instance if there was some other way we cleaned up the mempool after a // reorg, besides draining this object). ~DisconnectedBlockTransactions() { assert(queuedTx.empty()); } // Estimate the overhead of queuedTx to be 6 pointers + an allocation, as // no exact formula for boost::multi_index_contained is implemented. size_t DynamicMemoryUsage() const { return memusage::MallocUsage(sizeof(CTransactionRef) + 6 * sizeof(void *)) * queuedTx.size() + cachedInnerUsage; } const indexed_disconnected_transactions &GetQueuedTx() const { return queuedTx; } // Import mempool entries in topological order into queuedTx and clear the // mempool. Caller should call updateMempoolForReorg to reprocess these // transactions void importMempool(CTxMemPool &pool); // Add entries for a block while reconstructing the topological ordering so // they can be added back to the mempool simply. void addForBlock(const std::vector &vtx); // Remove entries based on txid_index, and update memory usage. void removeForBlock(const std::vector &vtx) { // Short-circuit in the common case of a block being added to the tip if (queuedTx.empty()) { return; } for (auto const &tx : vtx) { auto it = queuedTx.find(tx->GetId()); if (it != queuedTx.end()) { cachedInnerUsage -= RecursiveDynamicUsage(*it); queuedTx.erase(it); } } } // Remove an entry by insertion_order index, and update memory usage. void removeEntry(indexed_disconnected_transactions::index< insertion_order>::type::iterator entry) { cachedInnerUsage -= RecursiveDynamicUsage(*entry); queuedTx.get().erase(entry); } bool isEmpty() const { return queuedTx.empty(); } void clear() { cachedInnerUsage = 0; queuedTx.clear(); } /** * Make mempool consistent after a reorg, by re-adding or recursively * erasing disconnected block transactions from the mempool, and also * removing any other transactions from the mempool that are no longer valid * given the new tip/height. * * Note: we assume that disconnectpool only contains transactions that are * NOT confirmed in the current chain nor already in the mempool (otherwise, * in-mempool descendants of such transactions would be removed). * * Passing fAddToMempool=false will skip trying to add the transactions * back, and instead just erase from the mempool as needed. */ void updateMempoolForReorg(const Config &config, bool fAddToMempool); }; #endif // BITCOIN_TXMEMPOOL_H diff --git a/src/validation.cpp b/src/validation.cpp index e74dd3635..ca77d33ae 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1,5721 +1,5719 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Copyright (c) 2017-2018 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include