diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp index 35d00c8c1b..312a4eb8d6 100644 --- a/src/rpc/client.cpp +++ b/src/rpc/client.cpp @@ -1,248 +1,248 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include class CRPCConvertParam { public: std::string methodName; //!< method whose params want conversion int paramIdx; //!< 0-based idx of param to convert std::string paramName; //!< parameter name }; /** * Specify a (method, idx, name) here if the argument is a non-string RPC * argument and needs to be converted from JSON. * * @note Parameter indexes start from 0. */ static const CRPCConvertParam vRPCConvertParams[] = { {"setmocktime", 0, "timestamp"}, {"generate", 0, "nblocks"}, {"generate", 1, "maxtries"}, {"generatetoaddress", 0, "nblocks"}, {"generatetoaddress", 2, "maxtries"}, {"getnetworkhashps", 0, "nblocks"}, {"getnetworkhashps", 1, "height"}, {"sendtoaddress", 1, "amount"}, {"sendtoaddress", 4, "subtractfeefromamount"}, {"settxfee", 0, "amount"}, {"sethdseed", 0, "newkeypool"}, {"getreceivedbyaddress", 1, "minconf"}, {"getreceivedbyaccount", 1, "minconf"}, {"getreceivedbylabel", 1, "minconf"}, {"listreceivedbyaddress", 0, "minconf"}, {"listreceivedbyaddress", 1, "include_empty"}, {"listreceivedbyaddress", 2, "include_watchonly"}, {"listreceivedbyaddress", 3, "address_filter"}, {"listreceivedbyaccount", 0, "minconf"}, {"listreceivedbyaccount", 1, "include_empty"}, {"listreceivedbyaccount", 2, "include_watchonly"}, {"listreceivedbylabel", 0, "minconf"}, {"listreceivedbylabel", 1, "include_empty"}, {"listreceivedbylabel", 2, "include_watchonly"}, {"getbalance", 1, "minconf"}, {"getbalance", 2, "include_watchonly"}, {"getblockhash", 0, "height"}, {"waitforblockheight", 0, "height"}, {"waitforblockheight", 1, "timeout"}, {"waitforblock", 1, "timeout"}, {"waitfornewblock", 0, "timeout"}, {"move", 2, "amount"}, {"move", 3, "minconf"}, {"sendfrom", 2, "amount"}, {"sendfrom", 3, "minconf"}, {"listtransactions", 1, "count"}, {"listtransactions", 2, "skip"}, {"listtransactions", 3, "include_watchonly"}, {"listaccounts", 0, "minconf"}, {"listaccounts", 1, "include_watchonly"}, {"walletpassphrase", 1, "timeout"}, {"getblocktemplate", 0, "template_request"}, {"listsinceblock", 1, "target_confirmations"}, {"listsinceblock", 2, "include_watchonly"}, {"listsinceblock", 3, "include_removed"}, {"sendmany", 1, "amounts"}, {"sendmany", 2, "minconf"}, {"sendmany", 4, "subtractfeefrom"}, {"scantxoutset", 1, "scanobjects"}, {"addmultisigaddress", 0, "nrequired"}, {"addmultisigaddress", 1, "keys"}, {"createmultisig", 0, "nrequired"}, {"createmultisig", 1, "keys"}, {"listunspent", 0, "minconf"}, {"listunspent", 1, "maxconf"}, {"listunspent", 2, "addresses"}, {"listunspent", 3, "include_unsafe"}, {"listunspent", 4, "query_options"}, {"getblock", 1, "verbosity"}, {"getblock", 1, "verbose"}, {"getblockheader", 1, "verbose"}, {"getchaintxstats", 0, "nblocks"}, {"gettransaction", 1, "include_watchonly"}, {"getrawtransaction", 1, "verbose"}, {"createrawtransaction", 0, "inputs"}, {"createrawtransaction", 1, "outputs"}, {"createrawtransaction", 2, "locktime"}, {"signrawtransactionwithkey", 1, "privkeys"}, {"signrawtransactionwithkey", 2, "prevtxs"}, {"signrawtransactionwithwallet", 1, "prevtxs"}, {"sendrawtransaction", 1, "allowhighfees"}, {"testmempoolaccept", 0, "rawtxs"}, {"testmempoolaccept", 1, "allowhighfees"}, {"combinerawtransaction", 0, "txs"}, {"fundrawtransaction", 1, "options"}, {"walletcreatefundedpsbt", 0, "inputs"}, {"walletcreatefundedpsbt", 1, "outputs"}, {"walletcreatefundedpsbt", 2, "locktime"}, {"walletcreatefundedpsbt", 3, "options"}, {"walletcreatefundedpsbt", 4, "bip32derivs"}, {"walletprocesspsbt", 1, "sign"}, {"walletprocesspsbt", 3, "bip32derivs"}, {"createpsbt", 0, "inputs"}, {"createpsbt", 1, "outputs"}, {"createpsbt", 2, "locktime"}, {"combinepsbt", 0, "txs"}, {"finalizepsbt", 1, "extract"}, {"converttopsbt", 1, "permitsigdata"}, {"gettxout", 1, "n"}, {"gettxout", 2, "include_mempool"}, {"gettxoutproof", 0, "txids"}, {"lockunspent", 0, "unlock"}, {"lockunspent", 1, "transactions"}, {"importprivkey", 2, "rescan"}, {"importaddress", 2, "rescan"}, {"importaddress", 3, "p2sh"}, {"importpubkey", 2, "rescan"}, {"importmulti", 0, "requests"}, {"importmulti", 1, "options"}, {"verifychain", 0, "checklevel"}, {"verifychain", 1, "nblocks"}, {"getblockstats", 0, "hash_or_height"}, {"getblockstats", 1, "stats"}, {"pruneblockchain", 0, "height"}, {"keypoolrefill", 0, "newsize"}, {"getrawmempool", 0, "verbose"}, {"estimatefee", 0, "nblocks"}, - {"prioritisetransaction", 1, "priority_delta"}, + {"prioritisetransaction", 1, "dummy"}, {"prioritisetransaction", 2, "fee_delta"}, {"setban", 2, "bantime"}, {"setban", 3, "absolute"}, {"setnetworkactive", 0, "state"}, {"getmempoolancestors", 1, "verbose"}, {"getmempooldescendants", 1, "verbose"}, {"disconnectnode", 1, "nodeid"}, {"logging", 0, "include"}, {"logging", 1, "exclude"}, // Echo with conversion (For testing only) {"echojson", 0, "arg0"}, {"echojson", 1, "arg1"}, {"echojson", 2, "arg2"}, {"echojson", 3, "arg3"}, {"echojson", 4, "arg4"}, {"echojson", 5, "arg5"}, {"echojson", 6, "arg6"}, {"echojson", 7, "arg7"}, {"echojson", 8, "arg8"}, {"echojson", 9, "arg9"}, {"rescanblockchain", 0, "start_height"}, {"rescanblockchain", 1, "stop_height"}, {"createwallet", 1, "disable_private_keys"}, }; class CRPCConvertTable { private: std::set> members; std::set> membersByName; public: CRPCConvertTable(); bool convert(const std::string &method, int idx) { return (members.count(std::make_pair(method, idx)) > 0); } bool convert(const std::string &method, const std::string &name) { return (membersByName.count(std::make_pair(method, name)) > 0); } }; CRPCConvertTable::CRPCConvertTable() { const unsigned int n_elem = (sizeof(vRPCConvertParams) / sizeof(vRPCConvertParams[0])); for (unsigned int i = 0; i < n_elem; i++) { members.insert(std::make_pair(vRPCConvertParams[i].methodName, vRPCConvertParams[i].paramIdx)); membersByName.insert(std::make_pair(vRPCConvertParams[i].methodName, vRPCConvertParams[i].paramName)); } } static CRPCConvertTable rpcCvtTable; /** * Non-RFC4627 JSON parser, accepts internal values (such as numbers, true, * false, null) as well as objects and arrays. */ UniValue ParseNonRFCJSONValue(const std::string &strVal) { UniValue jVal; if (!jVal.read(std::string("[") + strVal + std::string("]")) || !jVal.isArray() || jVal.size() != 1) throw std::runtime_error(std::string("Error parsing JSON:") + strVal); return jVal[0]; } UniValue RPCConvertValues(const std::string &strMethod, const std::vector &strParams) { UniValue params(UniValue::VARR); for (unsigned int idx = 0; idx < strParams.size(); idx++) { const std::string &strVal = strParams[idx]; if (!rpcCvtTable.convert(strMethod, idx)) { // insert string value directly params.push_back(strVal); } else { // parse string as JSON, insert bool/number/object/etc. value params.push_back(ParseNonRFCJSONValue(strVal)); } } return params; } UniValue RPCConvertNamedValues(const std::string &strMethod, const std::vector &strParams) { UniValue params(UniValue::VOBJ); for (const std::string &s : strParams) { size_t pos = s.find('='); if (pos == std::string::npos) { throw(std::runtime_error("No '=' in named argument '" + s + "', this needs to be present for every " "argument (even if it is empty)")); } std::string name = s.substr(0, pos); std::string value = s.substr(pos + 1); if (!rpcCvtTable.convert(strMethod, name)) { // insert string value directly params.pushKV(name, value); } else { // parse string as JSON, insert bool/number/object/etc. value params.pushKV(name, ParseNonRFCJSONValue(value)); } } return params; } diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index 836c55d9f0..1914863236 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -1,832 +1,826 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2018 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /** * Return average network hashes per second based on the last 'lookup' blocks, * or from the last difficulty change if 'lookup' is nonpositive. If 'height' is * nonnegative, compute the estimate at the time when a given block was found. */ static UniValue GetNetworkHashPS(int lookup, int height) { CBlockIndex *pb = chainActive.Tip(); if (height >= 0 && height < chainActive.Height()) { pb = chainActive[height]; } if (pb == nullptr || !pb->nHeight) { return 0; } // If lookup is -1, then use blocks since last difficulty change. if (lookup <= 0) { lookup = pb->nHeight % Params().GetConsensus().DifficultyAdjustmentInterval() + 1; } // If lookup is larger than chain, then set it to chain length. if (lookup > pb->nHeight) { lookup = pb->nHeight; } CBlockIndex *pb0 = pb; int64_t minTime = pb0->GetBlockTime(); int64_t maxTime = minTime; for (int i = 0; i < lookup; i++) { pb0 = pb0->pprev; int64_t time = pb0->GetBlockTime(); minTime = std::min(time, minTime); maxTime = std::max(time, maxTime); } // In case there's a situation where minTime == maxTime, we don't want a // divide by zero exception. if (minTime == maxTime) { return 0; } arith_uint256 workDiff = pb->nChainWork - pb0->nChainWork; int64_t timeDiff = maxTime - minTime; return workDiff.getdouble() / timeDiff; } static UniValue getnetworkhashps(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 2) { throw std::runtime_error( "getnetworkhashps ( nblocks height )\n" "\nReturns the estimated network hashes per second based on the " "last n blocks.\n" "Pass in [blocks] to override # of blocks, -1 specifies since last " "difficulty change.\n" "Pass in [height] to estimate the network speed at the time when a " "certain block was found.\n" "\nArguments:\n" "1. nblocks (numeric, optional, default=120) The number of " "blocks, or -1 for blocks since last difficulty change.\n" "2. height (numeric, optional, default=-1) To estimate at the " "time of the given height.\n" "\nResult:\n" "x (numeric) Hashes per second estimated\n" "\nExamples:\n" + HelpExampleCli("getnetworkhashps", "") + HelpExampleRpc("getnetworkhashps", "")); } LOCK(cs_main); return GetNetworkHashPS( !request.params[0].isNull() ? request.params[0].get_int() : 120, !request.params[1].isNull() ? request.params[1].get_int() : -1); } UniValue generateBlocks(const Config &config, std::shared_ptr coinbaseScript, int nGenerate, uint64_t nMaxTries, bool keepScript) { static const int nInnerLoopCount = 0x100000; int nHeightEnd = 0; int nHeight = 0; { // Don't keep cs_main locked. LOCK(cs_main); nHeight = chainActive.Height(); nHeightEnd = nHeight + nGenerate; } unsigned int nExtraNonce = 0; UniValue blockHashes(UniValue::VARR); while (nHeight < nHeightEnd && !ShutdownRequested()) { std::unique_ptr pblocktemplate( BlockAssembler(config, g_mempool) .CreateNewBlock(coinbaseScript->reserveScript)); if (!pblocktemplate.get()) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block"); } CBlock *pblock = &pblocktemplate->block; { LOCK(cs_main); IncrementExtraNonce(pblock, chainActive.Tip(), config.GetMaxBlockSize(), nExtraNonce); } while (nMaxTries > 0 && pblock->nNonce < nInnerLoopCount && !CheckProofOfWork(pblock->GetHash(), pblock->nBits, config.GetChainParams().GetConsensus())) { ++pblock->nNonce; --nMaxTries; } if (nMaxTries == 0) { break; } if (pblock->nNonce == nInnerLoopCount) { continue; } std::shared_ptr shared_pblock = std::make_shared(*pblock); if (!ProcessNewBlock(config, shared_pblock, true, nullptr)) { throw JSONRPCError(RPC_INTERNAL_ERROR, "ProcessNewBlock, block not accepted"); } ++nHeight; blockHashes.push_back(pblock->GetHash().GetHex()); // Mark script as important because it was used at least for one // coinbase output if the script came from the wallet. if (keepScript) { coinbaseScript->KeepScript(); } } return blockHashes; } static UniValue generatetoaddress(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 2 || request.params.size() > 3) { throw std::runtime_error( "generatetoaddress nblocks address (maxtries)\n" "\nMine blocks immediately to a specified address (before the RPC " "call returns)\n" "\nArguments:\n" "1. nblocks (numeric, required) How many blocks are generated " "immediately.\n" "2. address (string, required) The address to send the newly " "generated bitcoin to.\n" "3. maxtries (numeric, optional) How many iterations to try " "(default = 1000000).\n" "\nResult:\n" "[ blockhashes ] (array) hashes of blocks generated\n" "\nExamples:\n" "\nGenerate 11 blocks to myaddress\n" + HelpExampleCli("generatetoaddress", "11 \"myaddress\"")); } int nGenerate = request.params[0].get_int(); uint64_t nMaxTries = 1000000; if (!request.params[2].isNull()) { nMaxTries = request.params[2].get_int(); } CTxDestination destination = DecodeDestination(request.params[1].get_str(), config.GetChainParams()); if (!IsValidDestination(destination)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Error: Invalid address"); } std::shared_ptr coinbaseScript = std::make_shared(); coinbaseScript->reserveScript = GetScriptForDestination(destination); return generateBlocks(config, coinbaseScript, nGenerate, nMaxTries, false); } static UniValue getmininginfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getmininginfo\n" "\nReturns a json object containing mining-related information." "\nResult:\n" "{\n" " \"blocks\": nnn, (numeric) The current block\n" " \"currentblocksize\": nnn, (numeric) The last block size\n" " \"currentblocktx\": nnn, (numeric) The last block " "transaction\n" " \"difficulty\": xxx.xxxxx (numeric) The current difficulty\n" " \"networkhashps\": nnn, (numeric) The network hashes per " "second\n" " \"pooledtx\": n (numeric) The size of the mempool\n" " \"chain\": \"xxxx\", (string) current network name as " "defined in BIP70 (main, test, regtest)\n" " \"warnings\": \"...\" (string) any network and " "blockchain warnings\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmininginfo", "") + HelpExampleRpc("getmininginfo", "")); } LOCK(cs_main); UniValue obj(UniValue::VOBJ); obj.pushKV("blocks", int(chainActive.Height())); obj.pushKV("currentblocksize", uint64_t(nLastBlockSize)); obj.pushKV("currentblocktx", uint64_t(nLastBlockTx)); obj.pushKV("difficulty", double(GetDifficulty(chainActive.Tip()))); obj.pushKV("networkhashps", getnetworkhashps(config, request)); obj.pushKV("pooledtx", uint64_t(g_mempool.size())); obj.pushKV("chain", config.GetChainParams().NetworkIDString()); obj.pushKV("warnings", GetWarnings("statusbar")); return obj; } // NOTE: Unlike wallet RPC (which use BCH values), mining RPCs follow GBT (BIP // 22) in using satoshi amounts static UniValue prioritisetransaction(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 3) { throw std::runtime_error( - "prioritisetransaction \n" + "prioritisetransaction \n" "Accepts the transaction into mined blocks at a higher (or lower) " "priority\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id.\n" - "2. priority_delta (numeric, required) The priority to add or " - "subtract.\n" - " The transaction selection algorithm considers " - "the tx as it would have a higher priority.\n" - " (priority of a transaction is calculated: " - "coinage * value_in_satoshis / txsize) \n" + "2. dummy (required) unused.\n" "3. fee_delta (numeric, required) The fee value (in satoshis) " "to add (or subtract, if negative).\n" " The fee is not actually paid, only the " "algorithm for selecting transactions into a block\n" " considers the transaction as it would have paid " "a higher (or lower) fee.\n" "\nResult:\n" "true (boolean) Returns true\n" "\nExamples:\n" + HelpExampleCli("prioritisetransaction", "\"txid\" 0.0 10000") + HelpExampleRpc("prioritisetransaction", "\"txid\", 0.0, 10000")); } LOCK(cs_main); TxId txid(ParseHashStr(request.params[0].get_str(), "txid")); Amount nAmount = request.params[2].get_int64() * SATOSHI; - g_mempool.PrioritiseTransaction(txid, request.params[1].get_real(), - nAmount); + g_mempool.PrioritiseTransaction(txid, nAmount); return true; } // NOTE: Assumes a conclusive result; if result is inconclusive, it must be // handled by caller static UniValue BIP22ValidationResult(const Config &config, const CValidationState &state) { if (state.IsValid()) { return NullUniValue; } if (state.IsError()) { throw JSONRPCError(RPC_VERIFY_ERROR, FormatStateMessage(state)); } if (state.IsInvalid()) { std::string strRejectReason = state.GetRejectReason(); if (strRejectReason.empty()) { return "rejected"; } return strRejectReason; } // Should be impossible. return "valid?"; } static UniValue getblocktemplate(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 1) { throw std::runtime_error( "getblocktemplate ( TemplateRequest )\n" "\nIf the request parameters include a 'mode' key, that is used to " "explicitly select between the default 'template' request or a " "'proposal'.\n" "It returns data needed to construct a block to work on.\n" "For full specification, see BIPs 22, 23, 9, and 145:\n" " " "https://github.com/bitcoin/bips/blob/master/bip-0022.mediawiki\n" " " "https://github.com/bitcoin/bips/blob/master/bip-0023.mediawiki\n" " " "https://github.com/bitcoin/bips/blob/master/" "bip-0009.mediawiki#getblocktemplate_changes\n" " " "https://github.com/bitcoin/bips/blob/master/bip-0145.mediawiki\n" "\nArguments:\n" "1. template_request (json object, optional) A json object " "in the following spec\n" " {\n" " \"mode\":\"template\" (string, optional) This must be " "set to \"template\", \"proposal\" (see BIP 23), or omitted\n" " \"capabilities\":[ (array, optional) A list of " "strings\n" " \"support\" (string) client side supported " "feature, 'longpoll', 'coinbasetxn', 'coinbasevalue', 'proposal', " "'serverlist', 'workid'\n" " ,...\n" " ]\n" " }\n" "\n" "\nResult:\n" "{\n" " \"version\" : n, (numeric) The preferred " "block version\n" " \"previousblockhash\" : \"xxxx\", (string) The hash of " "current highest block\n" " \"transactions\" : [ (array) contents of " "non-coinbase transactions that should be included in the next " "block\n" " {\n" " \"data\" : \"xxxx\", (string) transaction " "data encoded in hexadecimal (byte-for-byte)\n" " \"txid\" : \"xxxx\", (string) transaction id " "encoded in little-endian hexadecimal\n" " \"hash\" : \"xxxx\", (string) hash encoded " "in little-endian hexadecimal (including witness data)\n" " \"depends\" : [ (array) array of numbers " "\n" " n (numeric) transactions " "before this one (by 1-based index in 'transactions' list) that " "must be present in the final block if this one is\n" " ,...\n" " ],\n" " \"fee\": n, (numeric) difference in " "value between transaction inputs and outputs (in satoshis); for " "coinbase transactions, this is a negative Number of the total " "collected block fees (ie, not including the block subsidy); if " "key is not present, fee is unknown and clients MUST NOT assume " "there isn't one\n" " \"sigops\" : n, (numeric) total SigOps " "cost, as counted for purposes of block limits; if key is not " "present, sigop cost is unknown and clients MUST NOT assume it is " "zero\n" " \"required\" : true|false (boolean) if provided and " "true, this transaction must be in the final block\n" " }\n" " ,...\n" " ],\n" " \"coinbaseaux\" : { (json object) data that " "should be included in the coinbase's scriptSig content\n" " \"flags\" : \"xx\" (string) key name is to " "be ignored, and value included in scriptSig\n" " },\n" " \"coinbasevalue\" : n, (numeric) maximum allowable " "input to coinbase transaction, including the generation award and " "transaction fees (in satoshis)\n" " \"coinbasetxn\" : { ... }, (json object) information " "for coinbase transaction\n" " \"target\" : \"xxxx\", (string) The hash target\n" " \"mintime\" : xxx, (numeric) The minimum " "timestamp appropriate for next block time in seconds since epoch " "(Jan 1 1970 GMT)\n" " \"mutable\" : [ (array of string) list of " "ways the block template may be changed \n" " \"value\" (string) A way the block " "template may be changed, e.g. 'time', 'transactions', " "'prevblock'\n" " ,...\n" " ],\n" " \"noncerange\" : \"00000000ffffffff\",(string) A range of valid " "nonces\n" " \"sigoplimit\" : n, (numeric) limit of sigops " "in blocks\n" " \"sizelimit\" : n, (numeric) limit of block " "size\n" " \"curtime\" : ttt, (numeric) current timestamp " "in seconds since epoch (Jan 1 1970 GMT)\n" " \"bits\" : \"xxxxxxxx\", (string) compressed " "target of next block\n" " \"height\" : n (numeric) The height of the " "next block\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblocktemplate", "") + HelpExampleRpc("getblocktemplate", "")); } LOCK(cs_main); std::string strMode = "template"; UniValue lpval = NullUniValue; std::set setClientRules; if (!request.params[0].isNull()) { const UniValue &oparam = request.params[0].get_obj(); const UniValue &modeval = find_value(oparam, "mode"); if (modeval.isStr()) { strMode = modeval.get_str(); } else if (modeval.isNull()) { /* Do nothing */ } else { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); } lpval = find_value(oparam, "longpollid"); if (strMode == "proposal") { const UniValue &dataval = find_value(oparam, "data"); if (!dataval.isStr()) { throw JSONRPCError(RPC_TYPE_ERROR, "Missing data String key for proposal"); } CBlock block; if (!DecodeHexBlk(block, dataval.get_str())) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed"); } const BlockHash hash = block.GetHash(); const CBlockIndex *pindex = LookupBlockIndex(hash); if (pindex) { if (pindex->IsValid(BlockValidity::SCRIPTS)) { return "duplicate"; } if (pindex->nStatus.isInvalid()) { return "duplicate-invalid"; } return "duplicate-inconclusive"; } CBlockIndex *const pindexPrev = chainActive.Tip(); // TestBlockValidity only supports blocks built on the current Tip if (block.hashPrevBlock != pindexPrev->GetBlockHash()) { return "inconclusive-not-best-prevblk"; } CValidationState state; TestBlockValidity(state, config.GetChainParams(), block, pindexPrev, BlockValidationOptions(config) .withCheckPoW(false) .withCheckMerkleRoot(true)); return BIP22ValidationResult(config, state); } } if (strMode != "template") { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); } if (!g_connman) { throw JSONRPCError( RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); } if (g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL) == 0) { throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Bitcoin is not connected!"); } if (IsInitialBlockDownload()) { throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Bitcoin is downloading blocks..."); } static unsigned int nTransactionsUpdatedLast; if (!lpval.isNull()) { // Wait to respond until either the best block changes, OR a minute has // passed and there are more transactions uint256 hashWatchedChain; std::chrono::steady_clock::time_point checktxtime; unsigned int nTransactionsUpdatedLastLP; if (lpval.isStr()) { // Format: std::string lpstr = lpval.get_str(); hashWatchedChain.SetHex(lpstr.substr(0, 64)); nTransactionsUpdatedLastLP = atoi64(lpstr.substr(64)); } else { // NOTE: Spec does not specify behaviour for non-string longpollid, // but this makes testing easier hashWatchedChain = chainActive.Tip()->GetBlockHash(); nTransactionsUpdatedLastLP = nTransactionsUpdatedLast; } // Release the wallet and main lock while waiting LEAVE_CRITICAL_SECTION(cs_main); { checktxtime = std::chrono::steady_clock::now() + std::chrono::minutes(1); WAIT_LOCK(g_best_block_mutex, lock); while (g_best_block == hashWatchedChain && IsRPCRunning()) { if (g_best_block_cv.wait_until(lock, checktxtime) == std::cv_status::timeout) { // Timeout: Check transactions for update if (g_mempool.GetTransactionsUpdated() != nTransactionsUpdatedLastLP) { break; } checktxtime += std::chrono::seconds(10); } } } ENTER_CRITICAL_SECTION(cs_main); if (!IsRPCRunning()) { throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Shutting down"); } // TODO: Maybe recheck connections/IBD and (if something wrong) send an // expires-immediately template to stop miners? } // Update block static CBlockIndex *pindexPrev; static int64_t nStart; static std::unique_ptr pblocktemplate; if (pindexPrev != chainActive.Tip() || (g_mempool.GetTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - nStart > 5)) { // Clear pindexPrev so future calls make a new block, despite any // failures from here on pindexPrev = nullptr; // Store the pindexBest used before CreateNewBlock, to avoid races nTransactionsUpdatedLast = g_mempool.GetTransactionsUpdated(); CBlockIndex *pindexPrevNew = chainActive.Tip(); nStart = GetTime(); // Create new block CScript scriptDummy = CScript() << OP_TRUE; pblocktemplate = BlockAssembler(config, g_mempool).CreateNewBlock(scriptDummy); if (!pblocktemplate) { throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory"); } // Need to update only after we know CreateNewBlock succeeded pindexPrev = pindexPrevNew; } assert(pindexPrev); // pointer for convenience CBlock *pblock = &pblocktemplate->block; // Update nTime UpdateTime(pblock, config.GetChainParams().GetConsensus(), pindexPrev); pblock->nNonce = 0; UniValue aCaps(UniValue::VARR); aCaps.push_back("proposal"); UniValue transactions(UniValue::VARR); int index_in_template = 0; for (const auto &it : pblock->vtx) { const CTransaction &tx = *it; uint256 txId = tx.GetId(); if (tx.IsCoinBase()) { index_in_template++; continue; } UniValue entry(UniValue::VOBJ); entry.pushKV("data", EncodeHexTx(tx)); entry.pushKV("txid", txId.GetHex()); entry.pushKV("hash", tx.GetHash().GetHex()); entry.pushKV("fee", pblocktemplate->entries[index_in_template].txFee / SATOSHI); int64_t nTxSigOps = pblocktemplate->entries[index_in_template].txSigOps; entry.pushKV("sigops", nTxSigOps); transactions.push_back(entry); index_in_template++; } UniValue aux(UniValue::VOBJ); aux.pushKV("flags", HexStr(COINBASE_FLAGS.begin(), COINBASE_FLAGS.end())); arith_uint256 hashTarget = arith_uint256().SetCompact(pblock->nBits); UniValue aMutable(UniValue::VARR); aMutable.push_back("time"); aMutable.push_back("transactions"); aMutable.push_back("prevblock"); UniValue result(UniValue::VOBJ); result.pushKV("capabilities", aCaps); result.pushKV("version", pblock->nVersion); result.pushKV("previousblockhash", pblock->hashPrevBlock.GetHex()); result.pushKV("transactions", transactions); result.pushKV("coinbaseaux", aux); result.pushKV("coinbasevalue", int64_t(pblock->vtx[0]->vout[0].nValue / SATOSHI)); result.pushKV("longpollid", chainActive.Tip()->GetBlockHash().GetHex() + i64tostr(nTransactionsUpdatedLast)); result.pushKV("target", hashTarget.GetHex()); result.pushKV("mintime", int64_t(pindexPrev->GetMedianTimePast()) + 1); result.pushKV("mutable", aMutable); result.pushKV("noncerange", "00000000ffffffff"); // FIXME: Allow for mining block greater than 1M. result.pushKV("sigoplimit", GetMaxBlockSigOpsCount(DEFAULT_MAX_BLOCK_SIZE)); result.pushKV("sizelimit", DEFAULT_MAX_BLOCK_SIZE); result.pushKV("curtime", pblock->GetBlockTime()); result.pushKV("bits", strprintf("%08x", pblock->nBits)); result.pushKV("height", int64_t(pindexPrev->nHeight) + 1); return result; } class submitblock_StateCatcher : public CValidationInterface { public: uint256 hash; bool found; CValidationState state; explicit submitblock_StateCatcher(const uint256 &hashIn) : hash(hashIn), found(false), state() {} protected: void BlockChecked(const CBlock &block, const CValidationState &stateIn) override { if (block.GetHash() != hash) { return; } found = true; state = stateIn; } }; static UniValue submitblock(const Config &config, const JSONRPCRequest &request) { // We allow 2 arguments for compliance with BIP22. Argument 2 is ignored. if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "submitblock \"hexdata\" ( \"dummy\" )\n" "\nAttempts to submit new block to network.\n" "See https://en.bitcoin.it/wiki/BIP_0022 for full specification.\n" "\nArguments\n" "1. \"hexdata\" (string, required) the hex-encoded block " "data to submit\n" "2. \"dummy\" (optional) dummy value, for compatibility " "with BIP22. This value is ignored.\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("submitblock", "\"mydata\"") + HelpExampleRpc("submitblock", "\"mydata\"")); } std::shared_ptr blockptr = std::make_shared(); CBlock &block = *blockptr; if (!DecodeHexBlk(block, request.params[0].get_str())) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed"); } if (block.vtx.empty() || !block.vtx[0]->IsCoinBase()) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block does not start with a coinbase"); } const BlockHash hash = block.GetHash(); { LOCK(cs_main); const CBlockIndex *pindex = LookupBlockIndex(hash); if (pindex) { if (pindex->IsValid(BlockValidity::SCRIPTS)) { return "duplicate"; } if (pindex->nStatus.isInvalid()) { return "duplicate-invalid"; } } } bool new_block; submitblock_StateCatcher sc(block.GetHash()); RegisterValidationInterface(&sc); bool accepted = ProcessNewBlock(config, blockptr, /* fForceProcessing */ true, /* fNewBlock */ &new_block); UnregisterValidationInterface(&sc); if (!new_block && accepted) { return "duplicate"; } if (!sc.found) { return "inconclusive"; } return BIP22ValidationResult(config, sc.state); } static UniValue submitheader(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error("submitheader \"hexdata\"\n" "\nDecode the given hexdata as a header and " "submit it as a candidate chain tip if valid." "\nThrows when the header is invalid.\n" "\nArguments\n" "1. \"hexdata\" (string, required) the " "hex-encoded block header data\n" "\nResult:\n" "None" "\nExamples:\n" + HelpExampleCli("submitheader", "\"aabbcc\"") + HelpExampleRpc("submitheader", "\"aabbcc\"")); } CBlockHeader h; if (!DecodeHexBlockHeader(h, request.params[0].get_str())) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block header decode failed"); } { LOCK(cs_main); if (!LookupBlockIndex(h.hashPrevBlock)) { throw JSONRPCError(RPC_VERIFY_ERROR, "Must submit previous header (" + h.hashPrevBlock.GetHex() + ") first"); } } CValidationState state; ProcessNewBlockHeaders(config, {h}, state, /* ppindex */ nullptr, /* first_invalid */ nullptr); if (state.IsValid()) { return NullUniValue; } if (state.IsError()) { throw JSONRPCError(RPC_VERIFY_ERROR, FormatStateMessage(state)); } throw JSONRPCError(RPC_VERIFY_ERROR, state.GetRejectReason()); } static UniValue estimatefee(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 0) { throw std::runtime_error( "estimatefee\n" "\nEstimates the approximate fee per kilobyte needed for a " "transaction\n" "\nResult:\n" "n (numeric) estimated fee-per-kilobyte\n" "\nExample:\n" + HelpExampleCli("estimatefee", "")); } return ValueFromAmount(g_mempool.estimateFee().GetFeePerK()); } // clang-format off static const ContextFreeRPCCommand commands[] = { // category name actor (function) argNames // ---------- ------------------------ ---------------------- ---------- {"mining", "getnetworkhashps", getnetworkhashps, {"nblocks", "height"}}, {"mining", "getmininginfo", getmininginfo, {}}, - {"mining", "prioritisetransaction", prioritisetransaction, {"txid", "priority_delta", "fee_delta"}}, + {"mining", "prioritisetransaction", prioritisetransaction, {"txid", "dummy", "fee_delta"}}, {"mining", "getblocktemplate", getblocktemplate, {"template_request"}}, {"mining", "submitblock", submitblock, {"hexdata", "dummy"}}, {"mining", "submitheader", submitheader, {"hexdata"}}, {"generating", "generatetoaddress", generatetoaddress, {"nblocks", "address", "maxtries"}}, {"util", "estimatefee", estimatefee, {"nblocks"}}, }; // clang-format on void RegisterMiningRPCCommands(CRPCTable &t) { for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) t.appendCommand(commands[vcidx].name, &commands[vcidx]); } diff --git a/src/txmempool.cpp b/src/txmempool.cpp index e8e5da32e9..04e9c22632 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -1,1399 +1,1394 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include // for GetConsensus. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef &_tx, const Amount _nFee, int64_t _nTime, unsigned int _entryHeight, bool _spendsCoinbase, int64_t _sigOpsCount, LockPoints lp) : tx(_tx), nFee(_nFee), nTime(_nTime), entryHeight(_entryHeight), spendsCoinbase(_spendsCoinbase), sigOpCount(_sigOpsCount), lockPoints(lp) { nTxSize = tx->GetTotalSize(); nUsageSize = RecursiveDynamicUsage(tx); nCountWithDescendants = 1; nSizeWithDescendants = GetTxSize(); nModFeesWithDescendants = nFee; feeDelta = Amount::zero(); nCountWithAncestors = 1; nSizeWithAncestors = GetTxSize(); nModFeesWithAncestors = nFee; nSigOpCountWithAncestors = sigOpCount; } void CTxMemPoolEntry::UpdateFeeDelta(Amount newFeeDelta) { nModFeesWithDescendants += newFeeDelta - feeDelta; nModFeesWithAncestors += newFeeDelta - feeDelta; feeDelta = newFeeDelta; } void CTxMemPoolEntry::UpdateLockPoints(const LockPoints &lp) { lockPoints = lp; } // Update the given tx for any in-mempool descendants. // Assumes that setMemPoolChildren is correct for the given tx and all // descendants. void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendants, const std::set &setExclude) { setEntries stageEntries, setAllDescendants; stageEntries = GetMemPoolChildren(updateIt); while (!stageEntries.empty()) { const txiter cit = *stageEntries.begin(); setAllDescendants.insert(cit); stageEntries.erase(cit); const setEntries &setChildren = GetMemPoolChildren(cit); for (txiter childEntry : setChildren) { cacheMap::iterator cacheIt = cachedDescendants.find(childEntry); if (cacheIt != cachedDescendants.end()) { // We've already calculated this one, just add the entries for // this set but don't traverse again. for (txiter cacheEntry : cacheIt->second) { setAllDescendants.insert(cacheEntry); } } else if (!setAllDescendants.count(childEntry)) { // Schedule for later processing stageEntries.insert(childEntry); } } } // setAllDescendants now contains all in-mempool descendants of updateIt. // Update and add to cached descendant map int64_t modifySize = 0; int64_t modifyCount = 0; Amount modifyFee = Amount::zero(); for (txiter cit : setAllDescendants) { if (!setExclude.count(cit->GetTx().GetId())) { modifySize += cit->GetTxSize(); modifyFee += cit->GetModifiedFee(); modifyCount++; cachedDescendants[updateIt].insert(cit); // Update ancestor state for each descendant mapTx.modify(cit, update_ancestor_state(updateIt->GetTxSize(), updateIt->GetModifiedFee(), 1, updateIt->GetSigOpCount())); } } mapTx.modify(updateIt, update_descendant_state(modifySize, modifyFee, modifyCount)); } // txidsToUpdate is the set of transaction hashes from a disconnected block // which has been re-added to the mempool. For each entry, look for descendants // that are outside txidsToUpdate, and add fee/size information for such // descendants to the parent. For each such descendant, also update the ancestor // state to include the parent. void CTxMemPool::UpdateTransactionsFromBlock( const std::vector &txidsToUpdate) { LOCK(cs); // For each entry in txidsToUpdate, store the set of in-mempool, but not // in-txidsToUpdate transactions, so that we don't have to recalculate // descendants when we come across a previously seen entry. cacheMap mapMemPoolDescendantsToUpdate; // Use a set for lookups into txidsToUpdate (these entries are already // accounted for in the state of their ancestors) std::set setAlreadyIncluded(txidsToUpdate.begin(), txidsToUpdate.end()); // Iterate in reverse, so that whenever we are looking at a transaction // we are sure that all in-mempool descendants have already been processed. // This maximizes the benefit of the descendant cache and guarantees that // setMemPoolChildren will be updated, an assumption made in // UpdateForDescendants. for (const TxId &txid : reverse_iterate(txidsToUpdate)) { // we cache the in-mempool children to avoid duplicate updates setEntries setChildren; // calculate children from mapNextTx txiter it = mapTx.find(txid); if (it == mapTx.end()) { continue; } auto iter = mapNextTx.lower_bound(COutPoint(txid, 0)); // First calculate the children, and update setMemPoolChildren to // include them, and update their setMemPoolParents to include this tx. for (; iter != mapNextTx.end() && iter->first->GetTxId() == txid; ++iter) { const TxId &childTxId = iter->second->GetId(); txiter childIter = mapTx.find(childTxId); assert(childIter != mapTx.end()); // We can skip updating entries we've encountered before or that are // in the block (which are already accounted for). if (setChildren.insert(childIter).second && !setAlreadyIncluded.count(childTxId)) { UpdateChild(it, childIter, true); UpdateParent(childIter, it, true); } } UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded); } } bool CTxMemPool::CalculateMemPoolAncestors( const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents /* = true */) const { setEntries parentHashes; const CTransaction &tx = entry.GetTx(); if (fSearchForParents) { // Get parents of this transaction that are in the mempool // GetMemPoolParents() is only valid for entries in the mempool, so we // iterate mapTx to find parents. for (const CTxIn &in : tx.vin) { boost::optional piter = GetIter(in.prevout.GetTxId()); if (!piter) { continue; } parentHashes.insert(*piter); if (parentHashes.size() + 1 > limitAncestorCount) { errString = strprintf("too many unconfirmed parents [limit: %u]", limitAncestorCount); return false; } } } else { // If we're not searching for parents, we require this to be an entry in // the mempool already. txiter it = mapTx.iterator_to(entry); parentHashes = GetMemPoolParents(it); } size_t totalSizeWithAncestors = entry.GetTxSize(); while (!parentHashes.empty()) { txiter stageit = *parentHashes.begin(); setAncestors.insert(stageit); parentHashes.erase(stageit); totalSizeWithAncestors += stageit->GetTxSize(); if (stageit->GetSizeWithDescendants() + entry.GetTxSize() > limitDescendantSize) { errString = strprintf( "exceeds descendant size limit for tx %s [limit: %u]", stageit->GetTx().GetId().ToString(), limitDescendantSize); return false; } if (stageit->GetCountWithDescendants() + 1 > limitDescendantCount) { errString = strprintf("too many descendants for tx %s [limit: %u]", stageit->GetTx().GetId().ToString(), limitDescendantCount); return false; } if (totalSizeWithAncestors > limitAncestorSize) { errString = strprintf("exceeds ancestor size limit [limit: %u]", limitAncestorSize); return false; } const setEntries &setMemPoolParents = GetMemPoolParents(stageit); for (txiter phash : setMemPoolParents) { // If this is a new ancestor, add it. if (setAncestors.count(phash) == 0) { parentHashes.insert(phash); } if (parentHashes.size() + setAncestors.size() + 1 > limitAncestorCount) { errString = strprintf("too many unconfirmed ancestors [limit: %u]", limitAncestorCount); return false; } } } return true; } void CTxMemPool::UpdateAncestorsOf(bool add, txiter it, setEntries &setAncestors) { setEntries parentIters = GetMemPoolParents(it); // add or remove this tx as a child of each parent for (txiter piter : parentIters) { UpdateChild(piter, it, add); } const int64_t updateCount = (add ? 1 : -1); const int64_t updateSize = updateCount * it->GetTxSize(); const Amount updateFee = updateCount * it->GetModifiedFee(); for (txiter ancestorIt : setAncestors) { mapTx.modify(ancestorIt, update_descendant_state(updateSize, updateFee, updateCount)); } } void CTxMemPool::UpdateEntryForAncestors(txiter it, const setEntries &setAncestors) { int64_t updateCount = setAncestors.size(); int64_t updateSize = 0; int64_t updateSigOpsCount = 0; Amount updateFee = Amount::zero(); for (txiter ancestorIt : setAncestors) { updateSize += ancestorIt->GetTxSize(); updateFee += ancestorIt->GetModifiedFee(); updateSigOpsCount += ancestorIt->GetSigOpCount(); } mapTx.modify(it, update_ancestor_state(updateSize, updateFee, updateCount, updateSigOpsCount)); } void CTxMemPool::UpdateChildrenForRemoval(txiter it) { const setEntries &setMemPoolChildren = GetMemPoolChildren(it); for (txiter updateIt : setMemPoolChildren) { UpdateParent(updateIt, it, false); } } void CTxMemPool::UpdateForRemoveFromMempool(const setEntries &entriesToRemove, bool updateDescendants) { // For each entry, walk back all ancestors and decrement size associated // with this transaction. const uint64_t nNoLimit = std::numeric_limits::max(); if (updateDescendants) { // updateDescendants should be true whenever we're not recursively // removing a tx and all its descendants, eg when a transaction is // confirmed in a block. Here we only update statistics and not data in // mapLinks (which we need to preserve until we're finished with all // operations that need to traverse the mempool). for (txiter removeIt : entriesToRemove) { setEntries setDescendants; CalculateDescendants(removeIt, setDescendants); setDescendants.erase(removeIt); // don't update state for self int64_t modifySize = -int64_t(removeIt->GetTxSize()); Amount modifyFee = -1 * removeIt->GetModifiedFee(); int modifySigOps = -removeIt->GetSigOpCount(); for (txiter dit : setDescendants) { mapTx.modify(dit, update_ancestor_state(modifySize, modifyFee, -1, modifySigOps)); } } } for (txiter removeIt : entriesToRemove) { setEntries setAncestors; const CTxMemPoolEntry &entry = *removeIt; std::string dummy; // Since this is a tx that is already in the mempool, we can call CMPA // with fSearchForParents = false. If the mempool is in a consistent // state, then using true or false should both be correct, though false // should be a bit faster. // However, if we happen to be in the middle of processing a reorg, then // the mempool can be in an inconsistent state. In this case, the set of // ancestors reachable via mapLinks will be the same as the set of // ancestors whose packages include this transaction, because when we // add a new transaction to the mempool in addUnchecked(), we assume it // has no children, and in the case of a reorg where that assumption is // false, the in-mempool children aren't linked to the in-block tx's // until UpdateTransactionsFromBlock() is called. So if we're being // called during a reorg, ie before UpdateTransactionsFromBlock() has // been called, then mapLinks[] will differ from the set of mempool // parents we'd calculate by searching, and it's important that we use // the mapLinks[] notion of ancestor transactions as the set of things // to update for removal. CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false); // Note that UpdateAncestorsOf severs the child links that point to // removeIt in the entries for the parents of removeIt. UpdateAncestorsOf(false, removeIt, setAncestors); } // After updating all the ancestor sizes, we can now sever the link between // each transaction being removed and any mempool children (ie, update // setMemPoolParents for each direct child of a transaction being removed). for (txiter removeIt : entriesToRemove) { UpdateChildrenForRemoval(removeIt); } } void CTxMemPoolEntry::UpdateDescendantState(int64_t modifySize, Amount modifyFee, int64_t modifyCount) { nSizeWithDescendants += modifySize; assert(int64_t(nSizeWithDescendants) > 0); nModFeesWithDescendants += modifyFee; nCountWithDescendants += modifyCount; assert(int64_t(nCountWithDescendants) > 0); } void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, Amount modifyFee, int64_t modifyCount, int modifySigOps) { nSizeWithAncestors += modifySize; assert(int64_t(nSizeWithAncestors) > 0); nModFeesWithAncestors += modifyFee; nCountWithAncestors += modifyCount; assert(int64_t(nCountWithAncestors) > 0); nSigOpCountWithAncestors += modifySigOps; assert(int(nSigOpCountWithAncestors) >= 0); } CTxMemPool::CTxMemPool() : nTransactionsUpdated(0) { // lock free clear _clear(); // Sanity checks off by default for performance, because otherwise accepting // transactions becomes O(N^2) where N is the number of transactions in the // pool nCheckFrequency = 0; } CTxMemPool::~CTxMemPool() {} bool CTxMemPool::isSpent(const COutPoint &outpoint) const { LOCK(cs); return mapNextTx.count(outpoint); } unsigned int CTxMemPool::GetTransactionsUpdated() const { LOCK(cs); return nTransactionsUpdated; } void CTxMemPool::AddTransactionsUpdated(unsigned int n) { LOCK(cs); nTransactionsUpdated += n; } void CTxMemPool::addUnchecked(const TxId &txid, const CTxMemPoolEntry &entry, setEntries &setAncestors) { NotifyEntryAdded(entry.GetSharedTx()); // Add to memory pool without checking anything. // Used by AcceptToMemoryPool(), which DOES do all the appropriate checks. indexed_transaction_set::iterator newit = mapTx.insert(entry).first; mapLinks.insert(make_pair(newit, TxLinks())); // Update transaction for any feeDelta created by PrioritiseTransaction // TODO: refactor so that the fee delta is calculated before inserting into // mapTx. - double priorityDelta = 0; Amount feeDelta = Amount::zero(); - ApplyDeltas(entry.GetTx().GetId(), priorityDelta, feeDelta); + ApplyDelta(entry.GetTx().GetId(), feeDelta); if (feeDelta != Amount::zero()) { mapTx.modify(newit, update_fee_delta(feeDelta)); } // Update cachedInnerUsage to include contained transaction's usage. // (When we update the entry for in-mempool parents, memory usage will be // further updated.) cachedInnerUsage += entry.DynamicMemoryUsage(); const CTransaction &tx = newit->GetTx(); std::set setParentTransactions; for (const CTxIn &in : tx.vin) { mapNextTx.insert(std::make_pair(&in.prevout, &tx)); setParentTransactions.insert(in.prevout.GetTxId()); } // Don't bother worrying about child transactions of this one. Normal case // of a new transaction arriving is that there can't be any children, // because such children would be orphans. An exception to that is if a // transaction enters that used to be in a block. In that case, our // disconnect block logic will call UpdateTransactionsFromBlock to clean up // the mess we're leaving here. // Update ancestors with information about this tx for (const auto &pit : GetIterSet(setParentTransactions)) { UpdateParent(newit, pit, true); } UpdateAncestorsOf(true, newit, setAncestors); UpdateEntryForAncestors(newit, setAncestors); nTransactionsUpdated++; totalTxSize += entry.GetTxSize(); vTxHashes.emplace_back(tx.GetHash(), newit); newit->vTxHashesIdx = vTxHashes.size() - 1; } void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason) { NotifyEntryRemoved(it->GetSharedTx(), reason); for (const CTxIn &txin : it->GetTx().vin) { mapNextTx.erase(txin.prevout); } if (vTxHashes.size() > 1) { vTxHashes[it->vTxHashesIdx] = std::move(vTxHashes.back()); vTxHashes[it->vTxHashesIdx].second->vTxHashesIdx = it->vTxHashesIdx; vTxHashes.pop_back(); if (vTxHashes.size() * 2 < vTxHashes.capacity()) { vTxHashes.shrink_to_fit(); } } else { vTxHashes.clear(); } totalTxSize -= it->GetTxSize(); cachedInnerUsage -= it->DynamicMemoryUsage(); cachedInnerUsage -= memusage::DynamicUsage(mapLinks[it].parents) + memusage::DynamicUsage(mapLinks[it].children); mapLinks.erase(it); mapTx.erase(it); nTransactionsUpdated++; } // Calculates descendants of entry that are not already in setDescendants, and // adds to setDescendants. Assumes entryit is already a tx in the mempool and // setMemPoolChildren is correct for tx and all descendants. Also assumes that // if an entry is in setDescendants already, then all in-mempool descendants of // it are already in setDescendants as well, so that we can save time by not // iterating over those entries. void CTxMemPool::CalculateDescendants(txiter entryit, setEntries &setDescendants) const { setEntries stage; if (setDescendants.count(entryit) == 0) { stage.insert(entryit); } // Traverse down the children of entry, only adding children that are not // accounted for in setDescendants already (because those children have // either already been walked, or will be walked in this iteration). while (!stage.empty()) { txiter it = *stage.begin(); setDescendants.insert(it); stage.erase(it); const setEntries &setChildren = GetMemPoolChildren(it); for (txiter childiter : setChildren) { if (!setDescendants.count(childiter)) { stage.insert(childiter); } } } } void CTxMemPool::removeRecursive(const CTransaction &origTx, MemPoolRemovalReason reason) { // Remove transaction from memory pool. LOCK(cs); setEntries txToRemove; txiter origit = mapTx.find(origTx.GetId()); if (origit != mapTx.end()) { txToRemove.insert(origit); } else { // When recursively removing but origTx isn't in the mempool be sure to // remove any children that are in the pool. This can happen during // chain re-orgs if origTx isn't re-accepted into the mempool for any // reason. for (size_t i = 0; i < origTx.vout.size(); i++) { auto it = mapNextTx.find(COutPoint(origTx.GetId(), i)); if (it == mapNextTx.end()) { continue; } txiter nextit = mapTx.find(it->second->GetId()); assert(nextit != mapTx.end()); txToRemove.insert(nextit); } } setEntries setAllRemoves; for (txiter it : txToRemove) { CalculateDescendants(it, setAllRemoves); } RemoveStaged(setAllRemoves, false, reason); } void CTxMemPool::removeForReorg(const Config &config, const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags) { // Remove transactions spending a coinbase which are now immature and // no-longer-final transactions. LOCK(cs); setEntries txToRemove; for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { const CTransaction &tx = it->GetTx(); LockPoints lp = it->GetLockPoints(); bool validLP = TestLockPointValidity(&lp); CValidationState state; if (!ContextualCheckTransactionForCurrentBlock( config.GetChainParams().GetConsensus(), tx, state, flags) || !CheckSequenceLocks(*this, tx, flags, &lp, validLP)) { // Note if CheckSequenceLocks fails the LockPoints may still be // invalid. So it's critical that we remove the tx and not depend on // the LockPoints. txToRemove.insert(it); } else if (it->GetSpendsCoinbase()) { for (const CTxIn &txin : tx.vin) { indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.GetTxId()); if (it2 != mapTx.end()) { continue; } const Coin &coin = pcoins->AccessCoin(txin.prevout); if (nCheckFrequency != 0) { assert(!coin.IsSpent()); } if (coin.IsSpent() || (coin.IsCoinBase() && int64_t(nMemPoolHeight) - coin.GetHeight() < COINBASE_MATURITY)) { txToRemove.insert(it); break; } } } if (!validLP) { mapTx.modify(it, update_lock_points(lp)); } } setEntries setAllRemoves; for (txiter it : txToRemove) { CalculateDescendants(it, setAllRemoves); } RemoveStaged(setAllRemoves, false, MemPoolRemovalReason::REORG); } void CTxMemPool::removeConflicts(const CTransaction &tx) { // Remove transactions which depend on inputs of tx, recursively AssertLockHeld(cs); for (const CTxIn &txin : tx.vin) { auto it = mapNextTx.find(txin.prevout); if (it != mapNextTx.end()) { const CTransaction &txConflict = *it->second; if (txConflict != tx) { ClearPrioritisation(txConflict.GetId()); removeRecursive(txConflict, MemPoolRemovalReason::CONFLICT); } } } } /** * Called when a block is connected. Removes from mempool and updates the miner * fee estimator. */ void CTxMemPool::removeForBlock(const std::vector &vtx, unsigned int nBlockHeight) { LOCK(cs); DisconnectedBlockTransactions disconnectpool; disconnectpool.addForBlock(vtx); std::vector entries; for (const CTransactionRef &tx : reverse_iterate(disconnectpool.GetQueuedTx().get())) { const TxId &txid = tx->GetId(); indexed_transaction_set::iterator i = mapTx.find(txid); if (i != mapTx.end()) { entries.push_back(&*i); } } for (const CTransactionRef &tx : reverse_iterate(disconnectpool.GetQueuedTx().get())) { txiter it = mapTx.find(tx->GetId()); if (it != mapTx.end()) { setEntries stage; stage.insert(it); RemoveStaged(stage, true, MemPoolRemovalReason::BLOCK); } removeConflicts(*tx); ClearPrioritisation(tx->GetId()); } disconnectpool.clear(); lastRollingFeeUpdate = GetTime(); blockSinceLastRollingFeeBump = true; } void CTxMemPool::_clear() { mapLinks.clear(); mapTx.clear(); mapNextTx.clear(); vTxHashes.clear(); totalTxSize = 0; cachedInnerUsage = 0; lastRollingFeeUpdate = GetTime(); blockSinceLastRollingFeeBump = false; rollingMinimumFeeRate = 0; ++nTransactionsUpdated; } void CTxMemPool::clear() { LOCK(cs); _clear(); } static void CheckInputsAndUpdateCoins(const CTransaction &tx, CCoinsViewCache &mempoolDuplicate, const int64_t spendheight) { CValidationState state; Amount txfee = Amount::zero(); bool fCheckResult = tx.IsCoinBase() || Consensus::CheckTxInputs(tx, state, mempoolDuplicate, spendheight, txfee); assert(fCheckResult); UpdateCoins(mempoolDuplicate, tx, std::numeric_limits::max()); } void CTxMemPool::check(const CCoinsViewCache *pcoins) const { LOCK(cs); if (nCheckFrequency == 0) { return; } if (GetRand(std::numeric_limits::max()) >= nCheckFrequency) { return; } LogPrint(BCLog::MEMPOOL, "Checking mempool with %u transactions and %u inputs\n", (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size()); uint64_t checkTotal = 0; uint64_t innerUsage = 0; CCoinsViewCache mempoolDuplicate(const_cast(pcoins)); const int64_t spendheight = GetSpendHeight(mempoolDuplicate); std::list waitingOnDependants; for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { unsigned int i = 0; checkTotal += it->GetTxSize(); innerUsage += it->DynamicMemoryUsage(); const CTransaction &tx = it->GetTx(); txlinksMap::const_iterator linksiter = mapLinks.find(it); assert(linksiter != mapLinks.end()); const TxLinks &links = linksiter->second; innerUsage += memusage::DynamicUsage(links.parents) + memusage::DynamicUsage(links.children); bool fDependsWait = false; setEntries setParentCheck; for (const CTxIn &txin : tx.vin) { // Check that every mempool transaction's inputs refer to available // coins, or other mempool tx's. indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.GetTxId()); if (it2 != mapTx.end()) { const CTransaction &tx2 = it2->GetTx(); assert(tx2.vout.size() > txin.prevout.GetN() && !tx2.vout[txin.prevout.GetN()].IsNull()); fDependsWait = true; setParentCheck.insert(it2); } else { assert(pcoins->HaveCoin(txin.prevout)); } // Check whether its inputs are marked in mapNextTx. auto it3 = mapNextTx.find(txin.prevout); assert(it3 != mapNextTx.end()); assert(it3->first == &txin.prevout); assert(it3->second == &tx); i++; } assert(setParentCheck == GetMemPoolParents(it)); // Verify ancestor state is correct. setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits::max(); std::string dummy; CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy); uint64_t nCountCheck = setAncestors.size() + 1; uint64_t nSizeCheck = it->GetTxSize(); Amount nFeesCheck = it->GetModifiedFee(); int64_t nSigOpCheck = it->GetSigOpCount(); for (txiter ancestorIt : setAncestors) { nSizeCheck += ancestorIt->GetTxSize(); nFeesCheck += ancestorIt->GetModifiedFee(); nSigOpCheck += ancestorIt->GetSigOpCount(); } assert(it->GetCountWithAncestors() == nCountCheck); assert(it->GetSizeWithAncestors() == nSizeCheck); assert(it->GetSigOpCountWithAncestors() == nSigOpCheck); assert(it->GetModFeesWithAncestors() == nFeesCheck); // Check children against mapNextTx CTxMemPool::setEntries setChildrenCheck; auto iter = mapNextTx.lower_bound(COutPoint(it->GetTx().GetId(), 0)); uint64_t child_sizes = 0; for (; iter != mapNextTx.end() && iter->first->GetTxId() == it->GetTx().GetId(); ++iter) { txiter childit = mapTx.find(iter->second->GetId()); // mapNextTx points to in-mempool transactions assert(childit != mapTx.end()); if (setChildrenCheck.insert(childit).second) { child_sizes += childit->GetTxSize(); } } assert(setChildrenCheck == GetMemPoolChildren(it)); // Also check to make sure size is greater than sum with immediate // children. Just a sanity check, not definitive that this calc is // correct... assert(it->GetSizeWithDescendants() >= child_sizes + it->GetTxSize()); if (fDependsWait) { waitingOnDependants.push_back(&(*it)); } else { CheckInputsAndUpdateCoins(tx, mempoolDuplicate, spendheight); } } unsigned int stepsSinceLastRemove = 0; while (!waitingOnDependants.empty()) { const CTxMemPoolEntry *entry = waitingOnDependants.front(); waitingOnDependants.pop_front(); if (!mempoolDuplicate.HaveInputs(entry->GetTx())) { waitingOnDependants.push_back(entry); stepsSinceLastRemove++; assert(stepsSinceLastRemove < waitingOnDependants.size()); } else { CheckInputsAndUpdateCoins(entry->GetTx(), mempoolDuplicate, spendheight); stepsSinceLastRemove = 0; } } for (auto it = mapNextTx.cbegin(); it != mapNextTx.cend(); it++) { const TxId &txid = it->second->GetId(); indexed_transaction_set::const_iterator it2 = mapTx.find(txid); const CTransaction &tx = it2->GetTx(); assert(it2 != mapTx.end()); assert(&tx == it->second); } assert(totalTxSize == checkTotal); assert(innerUsage == cachedInnerUsage); } bool CTxMemPool::CompareDepthAndScore(const TxId &txida, const TxId &txidb) { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(txida); if (i == mapTx.end()) { return false; } indexed_transaction_set::const_iterator j = mapTx.find(txidb); if (j == mapTx.end()) { return true; } uint64_t counta = i->GetCountWithAncestors(); uint64_t countb = j->GetCountWithAncestors(); if (counta == countb) { return CompareTxMemPoolEntryByScore()(*i, *j); } return counta < countb; } namespace { class DepthAndScoreComparator { public: bool operator()(const CTxMemPool::indexed_transaction_set::const_iterator &a, const CTxMemPool::indexed_transaction_set::const_iterator &b) { uint64_t counta = a->GetCountWithAncestors(); uint64_t countb = b->GetCountWithAncestors(); if (counta == countb) { return CompareTxMemPoolEntryByScore()(*a, *b); } return counta < countb; } }; } // namespace std::vector CTxMemPool::GetSortedDepthAndScore() const { std::vector iters; AssertLockHeld(cs); iters.reserve(mapTx.size()); for (indexed_transaction_set::iterator mi = mapTx.begin(); mi != mapTx.end(); ++mi) { iters.push_back(mi); } std::sort(iters.begin(), iters.end(), DepthAndScoreComparator()); return iters; } void CTxMemPool::queryHashes(std::vector &vtxid) const { LOCK(cs); auto iters = GetSortedDepthAndScore(); vtxid.clear(); vtxid.reserve(mapTx.size()); for (auto it : iters) { vtxid.push_back(it->GetTx().GetId()); } } static TxMempoolInfo GetInfo(CTxMemPool::indexed_transaction_set::const_iterator it) { return TxMempoolInfo{it->GetSharedTx(), it->GetTime(), CFeeRate(it->GetFee(), it->GetTxSize()), it->GetModifiedFee() - it->GetFee()}; } std::vector CTxMemPool::infoAll() const { LOCK(cs); auto iters = GetSortedDepthAndScore(); std::vector ret; ret.reserve(mapTx.size()); for (auto it : iters) { ret.push_back(GetInfo(it)); } return ret; } CTransactionRef CTxMemPool::get(const TxId &txid) const { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(txid); if (i == mapTx.end()) { return nullptr; } return i->GetSharedTx(); } TxMempoolInfo CTxMemPool::info(const TxId &txid) const { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(txid); if (i == mapTx.end()) { return TxMempoolInfo(); } return GetInfo(i); } CFeeRate CTxMemPool::estimateFee() const { LOCK(cs); uint64_t maxMempoolSize = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; // minerPolicy uses recent blocks to figure out a reasonable fee. This // may disagree with the rollingMinimumFeerate under certain scenarios // where the mempool increases rapidly, or blocks are being mined which // do not contain propagated transactions. return std::max(::minRelayTxFee, GetMinFee(maxMempoolSize)); } -void CTxMemPool::PrioritiseTransaction(const TxId &txid, double dPriorityDelta, +void CTxMemPool::PrioritiseTransaction(const TxId &txid, const Amount nFeeDelta) { { LOCK(cs); - TXModifier &deltas = mapDeltas[txid]; - deltas.first += dPriorityDelta; - deltas.second += nFeeDelta; + Amount &delta = mapDeltas[txid]; + delta += nFeeDelta; txiter it = mapTx.find(txid); if (it != mapTx.end()) { - mapTx.modify(it, update_fee_delta(deltas.second)); + mapTx.modify(it, update_fee_delta(delta)); // Now update all ancestors' modified fees with descendants setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits::max(); std::string dummy; CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false); for (txiter ancestorIt : setAncestors) { mapTx.modify(ancestorIt, update_descendant_state(0, nFeeDelta, 0)); } // Now update all descendants' modified fees with ancestors setEntries setDescendants; CalculateDescendants(it, setDescendants); setDescendants.erase(it); for (txiter descendantIt : setDescendants) { mapTx.modify(descendantIt, update_ancestor_state(0, nFeeDelta, 0, 0)); } ++nTransactionsUpdated; } } - LogPrintf("PrioritiseTransaction: %s priority += %f, fee += %d\n", - txid.ToString(), dPriorityDelta, FormatMoney(nFeeDelta)); + LogPrintf("PrioritiseTransaction: %s fee += %s\n", txid.ToString(), + FormatMoney(nFeeDelta)); } -void CTxMemPool::ApplyDeltas(const TxId &txid, double &dPriorityDelta, - Amount &nFeeDelta) const { +void CTxMemPool::ApplyDelta(const TxId &txid, Amount &nFeeDelta) const { LOCK(cs); - std::map::const_iterator pos = mapDeltas.find(txid); + std::map::const_iterator pos = mapDeltas.find(txid); if (pos == mapDeltas.end()) { return; } - const TXModifier &deltas = pos->second; - dPriorityDelta += deltas.first; - nFeeDelta += deltas.second; + nFeeDelta += pos->second; } void CTxMemPool::ClearPrioritisation(const TxId &txid) { LOCK(cs); mapDeltas.erase(txid); } const CTransaction *CTxMemPool::GetConflictTx(const COutPoint &prevout) const { const auto it = mapNextTx.find(prevout); return it == mapNextTx.end() ? nullptr : it->second; } boost::optional CTxMemPool::GetIter(const TxId &txid) const { auto it = mapTx.find(txid); if (it != mapTx.end()) { return it; } return boost::optional{}; } CTxMemPool::setEntries CTxMemPool::GetIterSet(const std::set &txids) const { CTxMemPool::setEntries ret; for (const auto &txid : txids) { const auto mi = GetIter(txid); if (mi) { ret.insert(*mi); } } return ret; } bool CTxMemPool::HasNoInputsOf(const CTransaction &tx) const { for (const CTxIn &in : tx.vin) { if (exists(in.prevout.GetTxId())) { return false; } } return true; } CCoinsViewMemPool::CCoinsViewMemPool(CCoinsView *baseIn, const CTxMemPool &mempoolIn) : CCoinsViewBacked(baseIn), mempool(mempoolIn) {} bool CCoinsViewMemPool::GetCoin(const COutPoint &outpoint, Coin &coin) const { // If an entry in the mempool exists, always return that one, as it's // guaranteed to never conflict with the underlying cache, and it cannot // have pruned entries (as it contains full) transactions. First checking // the underlying cache risks returning a pruned entry instead. CTransactionRef ptx = mempool.get(outpoint.GetTxId()); if (ptx) { if (outpoint.GetN() < ptx->vout.size()) { coin = Coin(ptx->vout[outpoint.GetN()], MEMPOOL_HEIGHT, false); return true; } return false; } return base->GetCoin(outpoint, coin); } size_t CTxMemPool::DynamicMemoryUsage() const { LOCK(cs); // Estimate the overhead of mapTx to be 12 pointers + an allocation, as no // exact formula for boost::multi_index_contained is implemented. return memusage::MallocUsage(sizeof(CTxMemPoolEntry) + 12 * sizeof(void *)) * mapTx.size() + memusage::DynamicUsage(mapNextTx) + memusage::DynamicUsage(mapDeltas) + memusage::DynamicUsage(mapLinks) + memusage::DynamicUsage(vTxHashes) + cachedInnerUsage; } void CTxMemPool::RemoveStaged(setEntries &stage, bool updateDescendants, MemPoolRemovalReason reason) { AssertLockHeld(cs); UpdateForRemoveFromMempool(stage, updateDescendants); for (txiter it : stage) { removeUnchecked(it, reason); } } int CTxMemPool::Expire(int64_t time) { LOCK(cs); indexed_transaction_set::index::type::iterator it = mapTx.get().begin(); setEntries toremove; while (it != mapTx.get().end() && it->GetTime() < time) { toremove.insert(mapTx.project<0>(it)); it++; } setEntries stage; for (txiter removeit : toremove) { CalculateDescendants(removeit, stage); } RemoveStaged(stage, false, MemPoolRemovalReason::EXPIRY); return stage.size(); } void CTxMemPool::LimitSize(size_t limit, unsigned long age) { int expired = Expire(GetTime() - age); if (expired != 0) { LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired); } std::vector vNoSpendsRemaining; TrimToSize(limit, &vNoSpendsRemaining); for (const COutPoint &removed : vNoSpendsRemaining) { pcoinsTip->Uncache(removed); } } void CTxMemPool::addUnchecked(const TxId &txid, const CTxMemPoolEntry &entry) { setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits::max(); std::string dummy; CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy); return addUnchecked(txid, entry, setAncestors); } void CTxMemPool::UpdateChild(txiter entry, txiter child, bool add) { setEntries s; if (add && mapLinks[entry].children.insert(child).second) { cachedInnerUsage += memusage::IncrementalDynamicUsage(s); } else if (!add && mapLinks[entry].children.erase(child)) { cachedInnerUsage -= memusage::IncrementalDynamicUsage(s); } } void CTxMemPool::UpdateParent(txiter entry, txiter parent, bool add) { setEntries s; if (add && mapLinks[entry].parents.insert(parent).second) { cachedInnerUsage += memusage::IncrementalDynamicUsage(s); } else if (!add && mapLinks[entry].parents.erase(parent)) { cachedInnerUsage -= memusage::IncrementalDynamicUsage(s); } } const CTxMemPool::setEntries & CTxMemPool::GetMemPoolParents(txiter entry) const { assert(entry != mapTx.end()); txlinksMap::const_iterator it = mapLinks.find(entry); assert(it != mapLinks.end()); return it->second.parents; } const CTxMemPool::setEntries & CTxMemPool::GetMemPoolChildren(txiter entry) const { assert(entry != mapTx.end()); txlinksMap::const_iterator it = mapLinks.find(entry); assert(it != mapLinks.end()); return it->second.children; } CFeeRate CTxMemPool::GetMinFee(size_t sizelimit) const { LOCK(cs); if (!blockSinceLastRollingFeeBump || rollingMinimumFeeRate == 0) { return CFeeRate(int64_t(ceill(rollingMinimumFeeRate)) * SATOSHI); } int64_t time = GetTime(); if (time > lastRollingFeeUpdate + 10) { double halflife = ROLLING_FEE_HALFLIFE; if (DynamicMemoryUsage() < sizelimit / 4) { halflife /= 4; } else if (DynamicMemoryUsage() < sizelimit / 2) { halflife /= 2; } rollingMinimumFeeRate = rollingMinimumFeeRate / pow(2.0, (time - lastRollingFeeUpdate) / halflife); lastRollingFeeUpdate = time; } return CFeeRate(int64_t(ceill(rollingMinimumFeeRate)) * SATOSHI); } void CTxMemPool::trackPackageRemoved(const CFeeRate &rate) { AssertLockHeld(cs); if ((rate.GetFeePerK() / SATOSHI) > rollingMinimumFeeRate) { rollingMinimumFeeRate = rate.GetFeePerK() / SATOSHI; blockSinceLastRollingFeeBump = false; } } void CTxMemPool::TrimToSize(size_t sizelimit, std::vector *pvNoSpendsRemaining) { LOCK(cs); unsigned nTxnRemoved = 0; CFeeRate maxFeeRateRemoved(Amount::zero()); while (!mapTx.empty() && DynamicMemoryUsage() > sizelimit) { indexed_transaction_set::index::type::iterator it = mapTx.get().begin(); // We set the new mempool min fee to the feerate of the removed set, // plus the "minimum reasonable fee rate" (ie some value under which we // consider txn to have 0 fee). This way, we don't allow txn to enter // mempool with feerate equal to txn which were removed with no block in // between. CFeeRate removed(it->GetModFeesWithDescendants(), it->GetSizeWithDescendants()); removed += MEMPOOL_FULL_FEE_INCREMENT; trackPackageRemoved(removed); maxFeeRateRemoved = std::max(maxFeeRateRemoved, removed); setEntries stage; CalculateDescendants(mapTx.project<0>(it), stage); nTxnRemoved += stage.size(); std::vector txn; if (pvNoSpendsRemaining) { txn.reserve(stage.size()); for (txiter iter : stage) { txn.push_back(iter->GetTx()); } } RemoveStaged(stage, false, MemPoolRemovalReason::SIZELIMIT); if (pvNoSpendsRemaining) { for (const CTransaction &tx : txn) { for (const CTxIn &txin : tx.vin) { if (exists(txin.prevout.GetTxId())) { continue; } pvNoSpendsRemaining->push_back(txin.prevout); } } } } if (maxFeeRateRemoved > CFeeRate(Amount::zero())) { LogPrint(BCLog::MEMPOOL, "Removed %u txn, rolling minimum fee bumped to %s\n", nTxnRemoved, maxFeeRateRemoved.ToString()); } } uint64_t CTxMemPool::CalculateDescendantMaximum(txiter entry) const { // find parent with highest descendant count std::vector candidates; setEntries counted; candidates.push_back(entry); uint64_t maximum = 0; while (candidates.size()) { txiter candidate = candidates.back(); candidates.pop_back(); if (!counted.insert(candidate).second) { continue; } const setEntries &parents = GetMemPoolParents(candidate); if (parents.size() == 0) { maximum = std::max(maximum, candidate->GetCountWithDescendants()); } else { for (txiter i : parents) { candidates.push_back(i); } } } return maximum; } void CTxMemPool::GetTransactionAncestry(const TxId &txid, size_t &ancestors, size_t &descendants) const { LOCK(cs); auto it = mapTx.find(txid); ancestors = descendants = 0; if (it != mapTx.end()) { ancestors = it->GetCountWithAncestors(); descendants = CalculateDescendantMaximum(it); } } bool CTxMemPool::IsLoaded() const { LOCK(cs); return m_is_loaded; } void CTxMemPool::SetIsLoaded(bool loaded) { LOCK(cs); m_is_loaded = loaded; } SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand(std::numeric_limits::max())), k1(GetRand(std::numeric_limits::max())) {} /** Maximum bytes for transactions to store for processing during reorg */ static const size_t MAX_DISCONNECTED_TX_POOL_SIZE = 20 * DEFAULT_MAX_BLOCK_SIZE; void DisconnectedBlockTransactions::addForBlock( const std::vector &vtx) { for (const auto &tx : reverse_iterate(vtx)) { // If we already added it, just skip. auto it = queuedTx.find(tx->GetId()); if (it != queuedTx.end()) { continue; } // Insert the transaction into the pool. addTransaction(tx); // Fill in the set of parents. std::unordered_set parents; for (const CTxIn &in : tx->vin) { parents.insert(in.prevout.GetTxId()); } // In order to make sure we keep things in topological order, we check // if we already know of the parent of the current transaction. If so, // we remove them from the set and then add them back. while (parents.size() > 0) { std::unordered_set worklist( std::move(parents)); parents.clear(); for (const TxId &txid : worklist) { // If we do not have that txid in the set, nothing needs to be // done. auto pit = queuedTx.find(txid); if (pit == queuedTx.end()) { continue; } // We have parent in our set, we reinsert them at the right // position. const CTransactionRef ptx = *pit; queuedTx.erase(pit); queuedTx.insert(ptx); // And we make sure ancestors are covered. for (const CTxIn &in : ptx->vin) { parents.insert(in.prevout.GetTxId()); } } } } // Keep the size under control. while (DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE) { // Drop the earliest entry, and remove its children from the // mempool. auto it = queuedTx.get().begin(); g_mempool.removeRecursive(**it, MemPoolRemovalReason::REORG); removeEntry(it); } } void DisconnectedBlockTransactions::importMempool(CTxMemPool &pool) { // addForBlock's algorithm sorts a vector of transactions back into // topological order. We use it in a separate object to create a valid // ordering of all mempool transactions, which we then splice in front of // the current queuedTx. This results in a valid sequence of transactions to // be reprocessed in updateMempoolForReorg. // We create vtx in order of the entry_time index to facilitate for // addForBlocks (which iterates in reverse order), as vtx probably end in // the correct ordering for queuedTx. std::vector vtx; { LOCK(pool.cs); vtx.reserve(pool.mapTx.size()); for (const CTxMemPoolEntry &e : pool.mapTx.get()) { vtx.push_back(e.GetSharedTx()); } pool.clear(); } // Use addForBlocks to sort the transactions and then splice them in front // of queuedTx DisconnectedBlockTransactions orderedTxnPool; orderedTxnPool.addForBlock(vtx); cachedInnerUsage += orderedTxnPool.cachedInnerUsage; queuedTx.get().splice( queuedTx.get().begin(), orderedTxnPool.queuedTx.get()); // We limit memory usage because we can't know if more blocks will be // disconnected while (DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE) { // Drop the earliest entry which, by definition, has no children removeEntry(queuedTx.get().begin()); } } void DisconnectedBlockTransactions::updateMempoolForReorg(const Config &config, bool fAddToMempool) { AssertLockHeld(cs_main); std::vector txidsUpdate; // disconnectpool's insertion_order index sorts the entries from oldest to // newest, but the oldest entry will be the last tx from the latest mined // block that was disconnected. // Iterate disconnectpool in reverse, so that we add transactions back to // the mempool starting with the earliest transaction that had been // previously seen in a block. for (const CTransactionRef &tx : reverse_iterate(queuedTx.get())) { // ignore validation errors in resurrected transactions CValidationState stateDummy; if (!fAddToMempool || tx->IsCoinBase() || !AcceptToMemoryPool(config, g_mempool, stateDummy, tx, nullptr /* pfMissingInputs */, true /* bypass_limits */, Amount::zero() /* nAbsurdFee */)) { // If the transaction doesn't make it in to the mempool, remove any // transactions that depend on it (which would now be orphans). g_mempool.removeRecursive(*tx, MemPoolRemovalReason::REORG); } else if (g_mempool.exists(tx->GetId())) { txidsUpdate.push_back(tx->GetId()); } } queuedTx.clear(); // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have // no in-mempool children, which is generally not true when adding // previously-confirmed transactions back to the mempool. // UpdateTransactionsFromBlock finds descendants of any transactions in the // disconnectpool that were added back and cleans up the mempool state. g_mempool.UpdateTransactionsFromBlock(txidsUpdate); // We also need to remove any now-immature transactions g_mempool.removeForReorg(config, pcoinsTip.get(), chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); // Re-limit mempool size, in case we added any transactions g_mempool.LimitSize( gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); } diff --git a/src/txmempool.h b/src/txmempool.h index cd60b50069..2015b2b870 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -1,975 +1,971 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_TXMEMPOOL_H #define BITCOIN_TXMEMPOOL_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include class CBlockIndex; class Config; extern CCriticalSection cs_main; /** * Fake height value used in Coins to signify they are only in the memory * pool(since 0.8) */ static const uint32_t MEMPOOL_HEIGHT = 0x7FFFFFFF; struct LockPoints { // Will be set to the blockchain height and median time past values that // would be necessary to satisfy all relative locktime constraints (BIP68) // of this tx given our view of block chain history int height; int64_t time; // As long as the current chain descends from the highest height block // containing one of the inputs used in the calculation, then the cached // values are still valid even after a reorg. CBlockIndex *maxInputBlock; LockPoints() : height(0), time(0), maxInputBlock(nullptr) {} }; class CTxMemPool; /** \class CTxMemPoolEntry * * CTxMemPoolEntry stores data about the corresponding transaction, as well as * data about all in-mempool transactions that depend on the transaction * ("descendant" transactions). * * When a new entry is added to the mempool, we update the descendant state * (nCountWithDescendants, nSizeWithDescendants, and nModFeesWithDescendants) * for all ancestors of the newly added transaction. */ class CTxMemPoolEntry { private: CTransactionRef tx; //!< Cached to avoid expensive parent-transaction lookups Amount nFee; //!< ... and avoid recomputing tx size size_t nTxSize; //!< ... and total memory usage size_t nUsageSize; //!< Local time when entering the mempool int64_t nTime; //!< Chain height when entering the mempool unsigned int entryHeight; //!< keep track of transactions that spend a coinbase bool spendsCoinbase; //!< Total sigop plus P2SH sigops count int64_t sigOpCount; //!< Used for determining the priority of the transaction for mining in a //! block Amount feeDelta; //!< Track the height and time at which tx was final LockPoints lockPoints; // Information about descendants of this transaction that are in the // mempool; if we remove this transaction we must remove all of these // descendants as well. //!< number of descendant transactions uint64_t nCountWithDescendants; //!< ... and size uint64_t nSizeWithDescendants; //!< ... and total fees (all including us) Amount nModFeesWithDescendants; // Analogous statistics for ancestor transactions uint64_t nCountWithAncestors; uint64_t nSizeWithAncestors; Amount nModFeesWithAncestors; int64_t nSigOpCountWithAncestors; public: CTxMemPoolEntry(const CTransactionRef &_tx, const Amount _nFee, int64_t _nTime, unsigned int _entryHeight, bool spendsCoinbase, int64_t nSigOpsCost, LockPoints lp); const CTransaction &GetTx() const { return *this->tx; } CTransactionRef GetSharedTx() const { return this->tx; } const Amount GetFee() const { return nFee; } size_t GetTxSize() const { return nTxSize; } int64_t GetTime() const { return nTime; } unsigned int GetHeight() const { return entryHeight; } int64_t GetSigOpCount() const { return sigOpCount; } Amount GetModifiedFee() const { return nFee + feeDelta; } size_t DynamicMemoryUsage() const { return nUsageSize; } const LockPoints &GetLockPoints() const { return lockPoints; } // Adjusts the descendant state. void UpdateDescendantState(int64_t modifySize, Amount modifyFee, int64_t modifyCount); // Adjusts the ancestor state void UpdateAncestorState(int64_t modifySize, Amount modifyFee, int64_t modifyCount, int modifySigOps); // Updates the fee delta used for mining priority score, and the // modified fees with descendants. void UpdateFeeDelta(Amount feeDelta); // Update the LockPoints after a reorg void UpdateLockPoints(const LockPoints &lp); uint64_t GetCountWithDescendants() const { return nCountWithDescendants; } uint64_t GetSizeWithDescendants() const { return nSizeWithDescendants; } Amount GetModFeesWithDescendants() const { return nModFeesWithDescendants; } bool GetSpendsCoinbase() const { return spendsCoinbase; } uint64_t GetCountWithAncestors() const { return nCountWithAncestors; } uint64_t GetSizeWithAncestors() const { return nSizeWithAncestors; } Amount GetModFeesWithAncestors() const { return nModFeesWithAncestors; } int64_t GetSigOpCountWithAncestors() const { return nSigOpCountWithAncestors; } //!< Index in mempool's vTxHashes mutable size_t vTxHashesIdx; }; // Helpers for modifying CTxMemPool::mapTx, which is a boost multi_index. struct update_descendant_state { update_descendant_state(int64_t _modifySize, Amount _modifyFee, int64_t _modifyCount) : modifySize(_modifySize), modifyFee(_modifyFee), modifyCount(_modifyCount) {} void operator()(CTxMemPoolEntry &e) { e.UpdateDescendantState(modifySize, modifyFee, modifyCount); } private: int64_t modifySize; Amount modifyFee; int64_t modifyCount; }; struct update_ancestor_state { update_ancestor_state(int64_t _modifySize, Amount _modifyFee, int64_t _modifyCount, int64_t _modifySigOpsCost) : modifySize(_modifySize), modifyFee(_modifyFee), modifyCount(_modifyCount), modifySigOpsCost(_modifySigOpsCost) {} void operator()(CTxMemPoolEntry &e) { e.UpdateAncestorState(modifySize, modifyFee, modifyCount, modifySigOpsCost); } private: int64_t modifySize; Amount modifyFee; int64_t modifyCount; int64_t modifySigOpsCost; }; struct update_fee_delta { explicit update_fee_delta(Amount _feeDelta) : feeDelta(_feeDelta) {} void operator()(CTxMemPoolEntry &e) { e.UpdateFeeDelta(feeDelta); } private: Amount feeDelta; }; struct update_lock_points { explicit update_lock_points(const LockPoints &_lp) : lp(_lp) {} void operator()(CTxMemPoolEntry &e) { e.UpdateLockPoints(lp); } private: const LockPoints &lp; }; // extracts a transaction id from CTxMemPoolEntry or CTransactionRef struct mempoolentry_txid { typedef TxId result_type; result_type operator()(const CTxMemPoolEntry &entry) const { return entry.GetTx().GetId(); } result_type operator()(const CTransactionRef &tx) const { return tx->GetId(); } }; /** \class CompareTxMemPoolEntryByDescendantScore * * Sort an entry by max(score/size of entry's tx, score/size with all * descendants). */ class CompareTxMemPoolEntryByDescendantScore { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { double a_mod_fee, a_size, b_mod_fee, b_size; GetModFeeAndSize(a, a_mod_fee, a_size); GetModFeeAndSize(b, b_mod_fee, b_size); // Avoid division by rewriting (a/b > c/d) as (a*d > c*b). double f1 = a_mod_fee * b_size; double f2 = a_size * b_mod_fee; if (f1 == f2) { return a.GetTime() >= b.GetTime(); } return f1 < f2; } // Return the fee/size we're using for sorting this entry. void GetModFeeAndSize(const CTxMemPoolEntry &a, double &mod_fee, double &size) const { // Compare feerate with descendants to feerate of the transaction, and // return the fee/size for the max. double f1 = a.GetSizeWithDescendants() * (a.GetModifiedFee() / SATOSHI); double f2 = a.GetTxSize() * (a.GetModFeesWithDescendants() / SATOSHI); if (f2 > f1) { mod_fee = a.GetModFeesWithDescendants() / SATOSHI; size = a.GetSizeWithDescendants(); } else { mod_fee = a.GetModifiedFee() / SATOSHI; size = a.GetTxSize(); } } }; /** \class CompareTxMemPoolEntryByScore * * Sort by feerate of entry (fee/size) in descending order * This is only used for transaction relay, so we use GetFee() * instead of GetModifiedFee() to avoid leaking prioritization * information via the sort order. */ class CompareTxMemPoolEntryByScore { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { double f1 = b.GetTxSize() * (a.GetFee() / SATOSHI); double f2 = a.GetTxSize() * (b.GetFee() / SATOSHI); if (f1 == f2) { return b.GetTx().GetId() < a.GetTx().GetId(); } return f1 > f2; } }; class CompareTxMemPoolEntryByEntryTime { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { return a.GetTime() < b.GetTime(); } }; /** \class CompareTxMemPoolEntryByAncestorScore * * Sort an entry by min(score/size of entry's tx, score/size with all * ancestors). */ class CompareTxMemPoolEntryByAncestorFee { public: template bool operator()(const T &a, const T &b) const { double a_mod_fee, a_size, b_mod_fee, b_size; GetModFeeAndSize(a, a_mod_fee, a_size); GetModFeeAndSize(b, b_mod_fee, b_size); // Avoid division by rewriting (a/b > c/d) as (a*d > c*b). double f1 = a_mod_fee * b_size; double f2 = a_size * b_mod_fee; if (f1 == f2) { return a.GetTx().GetId() < b.GetTx().GetId(); } return f1 > f2; } // Return the fee/size we're using for sorting this entry. template void GetModFeeAndSize(const T &a, double &mod_fee, double &size) const { // Compare feerate with ancestors to feerate of the transaction, and // return the fee/size for the min. double f1 = a.GetSizeWithAncestors() * (a.GetModifiedFee() / SATOSHI); double f2 = a.GetTxSize() * (a.GetModFeesWithAncestors() / SATOSHI); if (f1 > f2) { mod_fee = a.GetModFeesWithAncestors() / SATOSHI; size = a.GetSizeWithAncestors(); } else { mod_fee = a.GetModifiedFee() / SATOSHI; size = a.GetTxSize(); } } }; // Multi_index tag names struct descendant_score {}; struct entry_time {}; struct ancestor_score {}; /** * Information about a mempool transaction. */ struct TxMempoolInfo { /** The transaction itself */ CTransactionRef tx; /** Time the transaction entered the mempool. */ int64_t nTime; /** Feerate of the transaction. */ CFeeRate feeRate; /** The fee delta. */ Amount nFeeDelta; }; /** * Reason why a transaction was removed from the mempool, this is passed to the * notification signal. */ enum class MemPoolRemovalReason { //!< Manually removed or unknown reason UNKNOWN = 0, //!< Expired from mempool EXPIRY, //!< Removed in size limiting SIZELIMIT, //!< Removed for reorganization REORG, //!< Removed for block BLOCK, //!< Removed for conflict with in-block transaction CONFLICT, //!< Removed for replacement REPLACED }; class SaltedTxidHasher { private: /** Salt */ const uint64_t k0, k1; public: SaltedTxidHasher(); size_t operator()(const TxId &txid) const { return SipHashUint256(k0, k1, txid); } }; -typedef std::pair TXModifier; - /** * CTxMemPool stores valid-according-to-the-current-best-chain transactions that * may be included in the next block. * * Transactions are added when they are seen on the network (or created by the * local node), but not all transactions seen are added to the pool. For * example, the following new transactions will not be added to the mempool: * - a transaction which doesn't meet the minimum fee requirements. * - a new transaction that double-spends an input of a transaction already in * the pool where the new transaction does not meet the Replace-By-Fee * requirements as defined in BIP 125. * - a non-standard transaction. * * CTxMemPool::mapTx, and CTxMemPoolEntry bookkeeping: * * mapTx is a boost::multi_index that sorts the mempool on 4 criteria: * - transaction hash * - descendant feerate [we use max(feerate of tx, feerate of tx with all * descendants)] * - time in mempool * - ancestor feerate [we use min(feerate of tx, feerate of tx with all * unconfirmed ancestors)] * * Note: the term "descendant" refers to in-mempool transactions that depend on * this one, while "ancestor" refers to in-mempool transactions that a given * transaction depends on. * * In order for the feerate sort to remain correct, we must update transactions * in the mempool when new descendants arrive. To facilitate this, we track the * set of in-mempool direct parents and direct children in mapLinks. Within each * CTxMemPoolEntry, we track the size and fees of all descendants. * * Usually when a new transaction is added to the mempool, it has no in-mempool * children (because any such children would be an orphan). So in * addUnchecked(), we: * - update a new entry's setMemPoolParents to include all in-mempool parents * - update the new entry's direct parents to include the new tx as a child * - update all ancestors of the transaction to include the new tx's size/fee * * When a transaction is removed from the mempool, we must: * - update all in-mempool parents to not track the tx in setMemPoolChildren * - update all ancestors to not include the tx's size/fees in descendant state * - update all in-mempool children to not include it as a parent * * These happen in UpdateForRemoveFromMempool(). (Note that when removing a * transaction along with its descendants, we must calculate that set of * transactions to be removed before doing the removal, or else the mempool can * be in an inconsistent state where it's impossible to walk the ancestors of a * transaction.) * * In the event of a reorg, the assumption that a newly added tx has no * in-mempool children is false. In particular, the mempool is in an * inconsistent state while new transactions are being added, because there may * be descendant transactions of a tx coming from a disconnected block that are * unreachable from just looking at transactions in the mempool (the linking * transactions may also be in the disconnected block, waiting to be added). * Because of this, there's not much benefit in trying to search for in-mempool * children in addUnchecked(). Instead, in the special case of transactions * being added from a disconnected block, we require the caller to clean up the * state, to account for in-mempool, out-of-block descendants for all the * in-block transactions by calling UpdateTransactionsFromBlock(). Note that * until this is called, the mempool state is not consistent, and in particular * mapLinks may not be correct (and therefore functions like * CalculateMemPoolAncestors() and CalculateDescendants() that rely on them to * walk the mempool are not generally safe to use). * * Computational limits: * * Updating all in-mempool ancestors of a newly added transaction can be slow, * if no bound exists on how many in-mempool ancestors there may be. * CalculateMemPoolAncestors() takes configurable limits that are designed to * prevent these calculations from being too CPU intensive. */ class CTxMemPool { private: //!< Value n means that n times in 2^32 we check. uint32_t nCheckFrequency GUARDED_BY(cs); //!< Used by getblocktemplate to trigger CreateNewBlock() invocation unsigned int nTransactionsUpdated; //!< sum of all mempool tx's virtual sizes. uint64_t totalTxSize; //!< sum of dynamic memory usage of all the map elements (NOT the maps //! themselves) uint64_t cachedInnerUsage; mutable int64_t lastRollingFeeUpdate; mutable bool blockSinceLastRollingFeeBump; //!< minimum fee to get into the pool, decreases exponentially mutable double rollingMinimumFeeRate; void trackPackageRemoved(const CFeeRate &rate) EXCLUSIVE_LOCKS_REQUIRED(cs); bool m_is_loaded GUARDED_BY(cs){false}; public: // public only for testing static const int ROLLING_FEE_HALFLIFE = 60 * 60 * 12; typedef boost::multi_index_container< CTxMemPoolEntry, boost::multi_index::indexed_by< // sorted by txid boost::multi_index::hashed_unique< mempoolentry_txid, SaltedTxidHasher>, // sorted by fee rate boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByDescendantScore>, // sorted by entry time boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByEntryTime>, // sorted by fee rate with ancestors boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByAncestorFee>>> indexed_transaction_set; /** * This mutex needs to be locked when accessing `mapTx` or other members * that are guarded by it. * * @par Consistency guarantees * * By design, it is guaranteed that: * * 1. Locking both `cs_main` and `mempool.cs` will give a view of mempool * that is consistent with current chain tip (`chainActive` and * `pcoinsTip`) and is fully populated. Fully populated means that if the * current active chain is missing transactions that were present in a * previously active chain, all the missing transactions will have been * re-added to the mempool and should be present if they meet size and * consistency constraints. * * 2. Locking `mempool.cs` without `cs_main` will give a view of a mempool * consistent with some chain that was active since `cs_main` was last * locked, and that is fully populated as described above. It is ok for * code that only needs to query or remove transactions from the mempool * to lock just `mempool.cs` without `cs_main`. * * To provide these guarantees, it is necessary to lock both `cs_main` and * `mempool.cs` whenever adding transactions to the mempool and whenever * changing the chain tip. It's necessary to keep both mutexes locked until * the mempool is consistent with the new chain tip and fully populated. * * @par Consistency bug * * The second guarantee above is not currently enforced, but * https://github.com/bitcoin/bitcoin/pull/14193 will fix it. No known code * in bitcoin currently depends on second guarantee, but it is important to * fix for third party code that needs be able to frequently poll the * mempool without locking `cs_main` and without encountering missing * transactions during reorgs. */ mutable RecursiveMutex cs; indexed_transaction_set mapTx GUARDED_BY(cs); typedef indexed_transaction_set::nth_index<0>::type::iterator txiter; //!< All tx hashes/entries in mapTx, in random order std::vector> vTxHashes; struct CompareIteratorById { bool operator()(const txiter &a, const txiter &b) const { return a->GetTx().GetId() < b->GetTx().GetId(); } }; typedef std::set setEntries; const setEntries &GetMemPoolParents(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs); const setEntries &GetMemPoolChildren(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs); uint64_t CalculateDescendantMaximum(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs); private: typedef std::map cacheMap; struct TxLinks { setEntries parents; setEntries children; }; typedef std::map txlinksMap; txlinksMap mapLinks; void UpdateParent(txiter entry, txiter parent, bool add); void UpdateChild(txiter entry, txiter child, bool add); std::vector GetSortedDepthAndScore() const EXCLUSIVE_LOCKS_REQUIRED(cs); public: indirectmap mapNextTx GUARDED_BY(cs); - std::map mapDeltas; + std::map mapDeltas; /** * Create a new CTxMemPool. */ CTxMemPool(); ~CTxMemPool(); /** * If sanity-checking is turned on, check makes sure the pool is consistent * (does not contain two transactions that spend the same inputs, all inputs * are in the mapNextTx array). If sanity-checking is turned off, check does * nothing. */ void check(const CCoinsViewCache *pcoins) const; void setSanityCheck(double dFrequency = 1.0) { LOCK(cs); nCheckFrequency = static_cast(dFrequency * 4294967295.0); } // addUnchecked must updated state for all ancestors of a given transaction, // to track size/count of descendant transactions. First version of // addUnchecked can be used to have it call CalculateMemPoolAncestors(), and // then invoke the second version. // Note that addUnchecked is ONLY called from ATMP outside of tests // and any other callers may break wallet's in-mempool tracking (due to // lack of CValidationInterface::TransactionAddedToMempool callbacks). void addUnchecked(const TxId &txid, const CTxMemPoolEntry &entry) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main); void addUnchecked(const TxId &txid, const CTxMemPoolEntry &entry, setEntries &setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main); void removeRecursive( const CTransaction &tx, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN); void removeForReorg(const Config &config, const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags) EXCLUSIVE_LOCKS_REQUIRED(cs_main); void removeConflicts(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(cs); void removeForBlock(const std::vector &vtx, unsigned int nBlockHeight); void clear(); // lock free void _clear() EXCLUSIVE_LOCKS_REQUIRED(cs); bool CompareDepthAndScore(const TxId &txida, const TxId &txidb); void queryHashes(std::vector &vtxid) const; bool isSpent(const COutPoint &outpoint) const; unsigned int GetTransactionsUpdated() const; void AddTransactionsUpdated(unsigned int n); /** * Check that none of this transactions inputs are in the mempool, and thus * the tx is not dependent on other mempool transactions to be included in a * block. */ bool HasNoInputsOf(const CTransaction &tx) const; /** Affect CreateNewBlock prioritisation of transactions */ - void PrioritiseTransaction(const TxId &txid, double dPriorityDelta, - const Amount nFeeDelta); - void ApplyDeltas(const TxId &txid, double &dPriorityDelta, - Amount &nFeeDelta) const; + void PrioritiseTransaction(const TxId &txid, const Amount nFeeDelta); + void ApplyDelta(const TxId &txid, Amount &nFeeDelta) const; void ClearPrioritisation(const TxId &txid); /** Get the transaction in the pool that spends the same prevout */ const CTransaction *GetConflictTx(const COutPoint &prevout) const EXCLUSIVE_LOCKS_REQUIRED(cs); /** Returns an iterator to the given txid, if found */ boost::optional GetIter(const TxId &txid) const EXCLUSIVE_LOCKS_REQUIRED(cs); /** * Translate a set of txids into a set of pool iterators to avoid repeated * lookups. */ setEntries GetIterSet(const std::set &txids) const EXCLUSIVE_LOCKS_REQUIRED(cs); /** * Remove a set of transactions from the mempool. If a transaction is in * this set, then all in-mempool descendants must also be in the set, unless * this transaction is being removed for being in a block. Set * updateDescendants to true when removing a tx that was in a block, so that * any in-mempool descendants have their ancestor state updated. */ void RemoveStaged(setEntries &stage, bool updateDescendants, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN) EXCLUSIVE_LOCKS_REQUIRED(cs); /** * When adding transactions from a disconnected block back to the mempool, * new mempool entries may have children in the mempool (which is generally * not the case when otherwise adding transactions). * UpdateTransactionsFromBlock() will find child transactions and update the * descendant state for each transaction in txidsToUpdate (excluding any * child transactions present in txidsToUpdate, which are already accounted * for). * Note: txidsToUpdate should be the set of transactions from the * disconnected block that have been accepted back into the mempool. */ void UpdateTransactionsFromBlock(const std::vector &txidsToUpdate) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** * Try to calculate all in-mempool ancestors of entry. * (these are all calculated including the tx itself) * limitAncestorCount = max number of ancestors * limitAncestorSize = max size of ancestors * limitDescendantCount = max number of descendants any ancestor can have * limitDescendantSize = max size of descendants any ancestor can have * errString = populated with error reason if any limits are hit * fSearchForParents = whether to search a tx's vin for in-mempool parents, * or look up parents from mapLinks. Must be true for entries not in the * mempool */ bool CalculateMemPoolAncestors( const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents = true) const EXCLUSIVE_LOCKS_REQUIRED(cs); /** * Populate setDescendants with all in-mempool descendants of hash. * Assumes that setDescendants includes all in-mempool descendants of * anything already in it. */ void CalculateDescendants(txiter it, setEntries &setDescendants) const EXCLUSIVE_LOCKS_REQUIRED(cs); /** * The minimum fee to get into the mempool, which may itself not be enough * for larger-sized transactions. The incrementalRelayFee policy variable is * used to bound the time it takes the fee rate to go back down all the way * to 0. When the feerate would otherwise be half of this, it is set to 0 * instead. */ CFeeRate GetMinFee(size_t sizelimit) const; /** * Remove transactions from the mempool until its dynamic size is <= * sizelimit. pvNoSpendsRemaining, if set, will be populated with the list * of outpoints which are not in mempool which no longer have any spends in * this mempool. */ void TrimToSize(size_t sizelimit, std::vector *pvNoSpendsRemaining = nullptr); /** * Expire all transaction (and their dependencies) in the mempool older than * time. Return the number of removed transactions. */ int Expire(int64_t time); /** * Reduce the size of the mempool by expiring and then trimming the mempool. */ void LimitSize(size_t limit, unsigned long age); /** * Calculate the ancestor and descendant count for the given transaction. * The counts include the transaction itself. */ void GetTransactionAncestry(const TxId &txid, size_t &ancestors, size_t &descendants) const; /** @returns true if the mempool is fully loaded */ bool IsLoaded() const; /** Sets the current loaded state */ void SetIsLoaded(bool loaded); unsigned long size() const { LOCK(cs); return mapTx.size(); } uint64_t GetTotalTxSize() const { LOCK(cs); return totalTxSize; } bool exists(const TxId &txid) const { LOCK(cs); return mapTx.count(txid) != 0; } CTransactionRef get(const TxId &txid) const; TxMempoolInfo info(const TxId &txid) const; std::vector infoAll() const; CFeeRate estimateFee() const; size_t DynamicMemoryUsage() const; boost::signals2::signal NotifyEntryAdded; boost::signals2::signal NotifyEntryRemoved; private: /** * UpdateForDescendants is used by UpdateTransactionsFromBlock to update the * descendants for a single transaction that has been added to the mempool * but may have child transactions in the mempool, eg during a chain reorg. * setExclude is the set of descendant transactions in the mempool that must * not be accounted for (because any descendants in setExclude were added to * the mempool after the transaction being updated and hence their state is * already reflected in the parent state). * * cachedDescendants will be updated with the descendants of the transaction * being updated, so that future invocations don't need to walk the same * transaction again, if encountered in another transaction chain. */ void UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendants, const std::set &setExclude) EXCLUSIVE_LOCKS_REQUIRED(cs); /** * Update ancestors of hash to add/remove it as a descendant transaction. */ void UpdateAncestorsOf(bool add, txiter hash, setEntries &setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs); /** Set ancestor state for an entry */ void UpdateEntryForAncestors(txiter it, const setEntries &setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs); /** * For each transaction being removed, update ancestors and any direct * children. If updateDescendants is true, then also update in-mempool * descendants' ancestor state. */ void UpdateForRemoveFromMempool(const setEntries &entriesToRemove, bool updateDescendants) EXCLUSIVE_LOCKS_REQUIRED(cs); /** Sever link between specified transaction and direct children. */ void UpdateChildrenForRemoval(txiter entry) EXCLUSIVE_LOCKS_REQUIRED(cs); /** * Before calling removeUnchecked for a given transaction, * UpdateForRemoveFromMempool must be called on the entire (dependent) set * of transactions being removed at the same time. We use each * CTxMemPoolEntry's setMemPoolParents in order to walk ancestors of a given * transaction that is removed, so we can't remove intermediate transactions * in a chain before we've updated all the state for the removal. */ void removeUnchecked(txiter entry, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN) EXCLUSIVE_LOCKS_REQUIRED(cs); }; /** * CCoinsView that brings transactions from a mempool into view. * It does not check for spendings by memory pool transactions. * Instead, it provides access to all Coins which are either unspent in the * base CCoinsView, or are outputs from any mempool transaction! * This allows transaction replacement to work as expected, as you want to * have all inputs "available" to check signatures, and any cycles in the * dependency graph are checked directly in AcceptToMemoryPool. * It also allows you to sign a double-spend directly in * signrawtransactionwithkey and signrawtransactionwithwallet, * as long as the conflicting transaction is not yet confirmed. */ class CCoinsViewMemPool : public CCoinsViewBacked { protected: const CTxMemPool &mempool; public: CCoinsViewMemPool(CCoinsView *baseIn, const CTxMemPool &mempoolIn); bool GetCoin(const COutPoint &outpoint, Coin &coin) const override; }; /** * DisconnectedBlockTransactions * * During the reorg, it's desirable to re-add previously confirmed transactions * to the mempool, so that anything not re-confirmed in the new chain is * available to be mined. However, it's more efficient to wait until the reorg * is complete and process all still-unconfirmed transactions at that time, * since we expect most confirmed transactions to (typically) still be * confirmed in the new chain, and re-accepting to the memory pool is expensive * (and therefore better to not do in the middle of reorg-processing). * Instead, store the disconnected transactions (in order!) as we go, remove any * that are included in blocks in the new chain, and then process the remaining * still-unconfirmed transactions at the end. * * It also enables efficient reprocessing of current mempool entries, useful * when (de)activating forks that result in in-mempool transactions becoming * invalid */ // multi_index tag names struct txid_index {}; struct insertion_order {}; class DisconnectedBlockTransactions { private: typedef boost::multi_index_container< CTransactionRef, boost::multi_index::indexed_by< // sorted by txid boost::multi_index::hashed_unique< boost::multi_index::tag, mempoolentry_txid, SaltedTxidHasher>, // sorted by order in the blockchain boost::multi_index::sequenced< boost::multi_index::tag>>> indexed_disconnected_transactions; indexed_disconnected_transactions queuedTx; uint64_t cachedInnerUsage = 0; void addTransaction(const CTransactionRef &tx) { queuedTx.insert(tx); cachedInnerUsage += RecursiveDynamicUsage(tx); } public: // It's almost certainly a logic bug if we don't clear out queuedTx before // destruction, as we add to it while disconnecting blocks, and then we // need to re-process remaining transactions to ensure mempool consistency. // For now, assert() that we've emptied out this object on destruction. // This assert() can always be removed if the reorg-processing code were // to be refactored such that this assumption is no longer true (for // instance if there was some other way we cleaned up the mempool after a // reorg, besides draining this object). ~DisconnectedBlockTransactions() { assert(queuedTx.empty()); } // Estimate the overhead of queuedTx to be 6 pointers + an allocation, as // no exact formula for boost::multi_index_contained is implemented. size_t DynamicMemoryUsage() const { return memusage::MallocUsage(sizeof(CTransactionRef) + 6 * sizeof(void *)) * queuedTx.size() + cachedInnerUsage; } const indexed_disconnected_transactions &GetQueuedTx() const { return queuedTx; } // Import mempool entries in topological order into queuedTx and clear the // mempool. Caller should call updateMempoolForReorg to reprocess these // transactions void importMempool(CTxMemPool &pool); // Add entries for a block while reconstructing the topological ordering so // they can be added back to the mempool simply. void addForBlock(const std::vector &vtx); // Remove entries based on txid_index, and update memory usage. void removeForBlock(const std::vector &vtx) { // Short-circuit in the common case of a block being added to the tip if (queuedTx.empty()) { return; } for (auto const &tx : vtx) { auto it = queuedTx.find(tx->GetId()); if (it != queuedTx.end()) { cachedInnerUsage -= RecursiveDynamicUsage(*it); queuedTx.erase(it); } } } // Remove an entry by insertion_order index, and update memory usage. void removeEntry(indexed_disconnected_transactions::index< insertion_order>::type::iterator entry) { cachedInnerUsage -= RecursiveDynamicUsage(*entry); queuedTx.get().erase(entry); } bool isEmpty() const { return queuedTx.empty(); } void clear() { cachedInnerUsage = 0; queuedTx.clear(); } /** * Make mempool consistent after a reorg, by re-adding or recursively * erasing disconnected block transactions from the mempool, and also * removing any other transactions from the mempool that are no longer valid * given the new tip/height. * * Note: we assume that disconnectpool only contains transactions that are * NOT confirmed in the current chain nor already in the mempool (otherwise, * in-mempool descendants of such transactions would be removed). * * Passing fAddToMempool=false will skip trying to add the transactions * back, and instead just erase from the mempool as needed. */ void updateMempoolForReorg(const Config &config, bool fAddToMempool); }; #endif // BITCOIN_TXMEMPOOL_H diff --git a/src/validation.cpp b/src/validation.cpp index 22f1f0758f..3e53dd1c59 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1,5660 +1,5657 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2018 The Bitcoin Core developers // Copyright (c) 2017-2018 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include