diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index 1d5542ada7..2638446626 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -1,462 +1,463 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "chainparamsbase.h" #include "clientversion.h" #include "rpc/client.h" #include "rpc/protocol.h" #include "support/events.h" #include "util.h" #include "utilstrencodings.h" #include #include #include #include #include static const char DEFAULT_RPCCONNECT[] = "127.0.0.1"; static const int DEFAULT_HTTP_CLIENT_TIMEOUT = 900; static const bool DEFAULT_NAMED = false; static const int CONTINUE_EXECUTION = -1; std::string HelpMessageCli() { std::string strUsage; strUsage += HelpMessageGroup(_("Options:")); strUsage += HelpMessageOpt("-?", _("This help message")); strUsage += HelpMessageOpt( "-conf=", strprintf(_("Specify configuration file (default: %s)"), BITCOIN_CONF_FILENAME)); strUsage += HelpMessageOpt("-datadir=", _("Specify data directory")); AppendParamsHelpMessages(strUsage); strUsage += HelpMessageOpt( "-named", strprintf(_("Pass named instead of positional arguments (default: %s)"), DEFAULT_NAMED)); strUsage += HelpMessageOpt( "-rpcconnect=", strprintf(_("Send commands to node running on (default: %s)"), DEFAULT_RPCCONNECT)); strUsage += HelpMessageOpt( "-rpcport=", strprintf( _("Connect to JSON-RPC on (default: %u or testnet: %u)"), BaseParams(CBaseChainParams::MAIN).RPCPort(), BaseParams(CBaseChainParams::TESTNET).RPCPort())); strUsage += HelpMessageOpt("-rpcwait", _("Wait for RPC server to start")); strUsage += HelpMessageOpt("-rpcuser=", _("Username for JSON-RPC connections")); strUsage += HelpMessageOpt("-rpcpassword=", _("Password for JSON-RPC connections")); strUsage += HelpMessageOpt("-rpcclienttimeout=", strprintf(_("Timeout in seconds during HTTP requests, " "or 0 for no timeout. (default: %d)"), DEFAULT_HTTP_CLIENT_TIMEOUT)); strUsage += HelpMessageOpt( "-stdin", _("Read extra arguments from standard input, one per line " "until EOF/Ctrl-D (recommended for sensitive information " "such as passphrases)")); strUsage += HelpMessageOpt( "-usewallet=", _("Send RPC for non-default wallet on RPC server (argument is wallet " "filename in bitcoind directory, required if bitcoind/-Qt runs with " "multiple wallets)")); return strUsage; } ////////////////////////////////////////////////////////////////////////////// // // Start // // // Exception thrown on connection error. This error is used to determine when // to wait if -rpcwait is given. // class CConnectionFailed : public std::runtime_error { public: explicit inline CConnectionFailed(const std::string &msg) : std::runtime_error(msg) {} }; // // This function returns either one of EXIT_ codes when it's expected to stop // the process or CONTINUE_EXECUTION when it's expected to continue further. // static int AppInitRPC(int argc, char *argv[]) { // // Parameters // - ParseParameters(argc, argv); - if (argc < 2 || IsArgSet("-?") || IsArgSet("-h") || IsArgSet("-help") || - IsArgSet("-version")) { + gArgs.ParseParameters(argc, argv); + if (argc < 2 || gArgs.IsArgSet("-?") || gArgs.IsArgSet("-h") || + gArgs.IsArgSet("-help") || gArgs.IsArgSet("-version")) { std::string strUsage = strprintf(_("%s RPC client version"), _(PACKAGE_NAME)) + " " + FormatFullVersion() + "\n"; - if (!IsArgSet("-version")) { + if (!gArgs.IsArgSet("-version")) { strUsage += "\n" + _("Usage:") + "\n" + " bitcoin-cli [options] [params] " + strprintf(_("Send command to %s"), _(PACKAGE_NAME)) + "\n" + " bitcoin-cli [options] -named [name=value] ... " + strprintf(_("Send command to %s (with named arguments)"), _(PACKAGE_NAME)) + "\n" + " bitcoin-cli [options] help " + _("List commands") + "\n" + " bitcoin-cli [options] help " + _("Get help for a command") + "\n"; strUsage += "\n" + HelpMessageCli(); } fprintf(stdout, "%s", strUsage.c_str()); if (argc < 2) { fprintf(stderr, "Error: too few parameters\n"); return EXIT_FAILURE; } return EXIT_SUCCESS; } if (!fs::is_directory(GetDataDir(false))) { fprintf(stderr, "Error: Specified data directory \"%s\" does not exist.\n", - GetArg("-datadir", "").c_str()); + gArgs.GetArg("-datadir", "").c_str()); return EXIT_FAILURE; } try { - ReadConfigFile(GetArg("-conf", BITCOIN_CONF_FILENAME)); + gArgs.ReadConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME)); } catch (const std::exception &e) { fprintf(stderr, "Error reading configuration file: %s\n", e.what()); return EXIT_FAILURE; } // Check for -testnet or -regtest parameter (BaseParams() calls are only // valid after this clause) try { SelectBaseParams(ChainNameFromCommandLine()); } catch (const std::exception &e) { fprintf(stderr, "Error: %s\n", e.what()); return EXIT_FAILURE; } - if (GetBoolArg("-rpcssl", false)) { + if (gArgs.GetBoolArg("-rpcssl", false)) { fprintf(stderr, "Error: SSL mode for RPC (-rpcssl) is no longer supported.\n"); return EXIT_FAILURE; } return CONTINUE_EXECUTION; } /** Reply structure for request_done to fill in */ struct HTTPReply { HTTPReply() : status(0), error(-1) {} int status; int error; std::string body; }; const char *http_errorstring(int code) { switch (code) { #if LIBEVENT_VERSION_NUMBER >= 0x02010300 case EVREQ_HTTP_TIMEOUT: return "timeout reached"; case EVREQ_HTTP_EOF: return "EOF reached"; case EVREQ_HTTP_INVALID_HEADER: return "error while reading header, or invalid header"; case EVREQ_HTTP_BUFFER_ERROR: return "error encountered while reading or writing"; case EVREQ_HTTP_REQUEST_CANCEL: return "request was canceled"; case EVREQ_HTTP_DATA_TOO_LONG: return "response body is larger than allowed"; #endif default: return "unknown"; } } static void http_request_done(struct evhttp_request *req, void *ctx) { HTTPReply *reply = static_cast(ctx); if (req == nullptr) { /** * If req is nullptr, it means an error occurred while connecting: the * error code will have been passed to http_error_cb. */ reply->status = 0; return; } reply->status = evhttp_request_get_response_code(req); struct evbuffer *buf = evhttp_request_get_input_buffer(req); if (buf) { size_t size = evbuffer_get_length(buf); const char *data = (const char *)evbuffer_pullup(buf, size); if (data) reply->body = std::string(data, size); evbuffer_drain(buf, size); } } #if LIBEVENT_VERSION_NUMBER >= 0x02010300 static void http_error_cb(enum evhttp_request_error err, void *ctx) { HTTPReply *reply = static_cast(ctx); reply->error = err; } #endif UniValue CallRPC(const std::string &strMethod, const UniValue ¶ms) { - std::string host = GetArg("-rpcconnect", DEFAULT_RPCCONNECT); - int port = GetArg("-rpcport", BaseParams().RPCPort()); + std::string host = gArgs.GetArg("-rpcconnect", DEFAULT_RPCCONNECT); + int port = gArgs.GetArg("-rpcport", BaseParams().RPCPort()); // Obtain event base raii_event_base base = obtain_event_base(); // Synchronously look up hostname raii_evhttp_connection evcon = obtain_evhttp_connection_base(base.get(), host, port); evhttp_connection_set_timeout( - evcon.get(), GetArg("-rpcclienttimeout", DEFAULT_HTTP_CLIENT_TIMEOUT)); + evcon.get(), + gArgs.GetArg("-rpcclienttimeout", DEFAULT_HTTP_CLIENT_TIMEOUT)); HTTPReply response; raii_evhttp_request req = obtain_evhttp_request(http_request_done, (void *)&response); if (req == nullptr) throw std::runtime_error("create http request failed"); #if LIBEVENT_VERSION_NUMBER >= 0x02010300 evhttp_request_set_error_cb(req.get(), http_error_cb); #endif // Get credentials std::string strRPCUserColonPass; - if (GetArg("-rpcpassword", "") == "") { + if (gArgs.GetArg("-rpcpassword", "") == "") { // Try fall back to cookie-based authentication if no password is // provided if (!GetAuthCookie(&strRPCUserColonPass)) { throw std::runtime_error(strprintf( _("Could not locate RPC credentials. No authentication cookie " "could be found, and no rpcpassword is set in the " "configuration file (%s)"), - GetConfigFile(GetArg("-conf", BITCOIN_CONF_FILENAME)) + GetConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME)) .string() .c_str())); } } else { - strRPCUserColonPass = - GetArg("-rpcuser", "") + ":" + GetArg("-rpcpassword", ""); + strRPCUserColonPass = gArgs.GetArg("-rpcuser", "") + ":" + + gArgs.GetArg("-rpcpassword", ""); } struct evkeyvalq *output_headers = evhttp_request_get_output_headers(req.get()); assert(output_headers); evhttp_add_header(output_headers, "Host", host.c_str()); evhttp_add_header(output_headers, "Connection", "close"); evhttp_add_header( output_headers, "Authorization", (std::string("Basic ") + EncodeBase64(strRPCUserColonPass)).c_str()); // Attach request data std::string strRequest = JSONRPCRequestObj(strMethod, params, 1).write() + "\n"; struct evbuffer *output_buffer = evhttp_request_get_output_buffer(req.get()); assert(output_buffer); evbuffer_add(output_buffer, strRequest.data(), strRequest.size()); // check if we should use a special wallet endpoint std::string endpoint = "/"; - std::string walletName = GetArg("-usewallet", ""); + std::string walletName = gArgs.GetArg("-usewallet", ""); if (!walletName.empty()) { char *encodedURI = evhttp_uriencode(walletName.c_str(), walletName.size(), false); if (encodedURI) { endpoint = "/wallet/" + std::string(encodedURI); free(encodedURI); } else { throw CConnectionFailed("uri-encode failed"); } } int r = evhttp_make_request(evcon.get(), req.get(), EVHTTP_REQ_POST, endpoint.c_str()); // ownership moved to evcon in above call req.release(); if (r != 0) { throw CConnectionFailed("send http request failed"); } event_base_dispatch(base.get()); if (response.status == 0) { throw CConnectionFailed(strprintf( "couldn't connect to server: %s (code %d)\n(make sure server is " "running and you are connecting to the correct RPC port)", http_errorstring(response.error), response.error)); } else if (response.status == HTTP_UNAUTHORIZED) { throw std::runtime_error( "incorrect rpcuser or rpcpassword (authorization failed)"); } else if (response.status >= 400 && response.status != HTTP_BAD_REQUEST && response.status != HTTP_NOT_FOUND && response.status != HTTP_INTERNAL_SERVER_ERROR) { throw std::runtime_error( strprintf("server returned HTTP error %d", response.status)); } else if (response.body.empty()) { throw std::runtime_error("no response from server"); } // Parse reply UniValue valReply(UniValue::VSTR); if (!valReply.read(response.body)) { throw std::runtime_error("couldn't parse reply from server"); } const UniValue &reply = valReply.get_obj(); if (reply.empty()) { throw std::runtime_error( "expected reply to have result, error and id properties"); } return reply; } int CommandLineRPC(int argc, char *argv[]) { std::string strPrint; int nRet = 0; try { // Skip switches while (argc > 1 && IsSwitchChar(argv[1][0])) { argc--; argv++; } std::vector args = std::vector(&argv[1], &argv[argc]); - if (GetBoolArg("-stdin", false)) { + if (gArgs.GetBoolArg("-stdin", false)) { // Read one arg per line from stdin and append std::string line; while (std::getline(std::cin, line)) { args.push_back(line); } } if (args.size() < 1) { throw std::runtime_error( "too few parameters (need at least command)"); } std::string strMethod = args[0]; // Remove trailing method name from arguments vector args.erase(args.begin()); UniValue params; - if (GetBoolArg("-named", DEFAULT_NAMED)) { + if (gArgs.GetBoolArg("-named", DEFAULT_NAMED)) { params = RPCConvertNamedValues(strMethod, args); } else { params = RPCConvertValues(strMethod, args); } // Execute and handle connection failures with -rpcwait - const bool fWait = GetBoolArg("-rpcwait", false); + const bool fWait = gArgs.GetBoolArg("-rpcwait", false); do { try { const UniValue reply = CallRPC(strMethod, params); // Parse reply const UniValue &result = find_value(reply, "result"); const UniValue &error = find_value(reply, "error"); if (!error.isNull()) { // Error int code = error["code"].get_int(); if (fWait && code == RPC_IN_WARMUP) throw CConnectionFailed("server in warmup"); strPrint = "error: " + error.write(); nRet = abs(code); if (error.isObject()) { UniValue errCode = find_value(error, "code"); UniValue errMsg = find_value(error, "message"); strPrint = errCode.isNull() ? "" : "error code: " + errCode.getValStr() + "\n"; if (errMsg.isStr()) { strPrint += "error message:\n" + errMsg.get_str(); } if (errCode.isNum() && errCode.get_int() == RPC_WALLET_NOT_SPECIFIED) { strPrint += "\nTry adding " "\"-rpcwallet=\" option to " "bitcoin-cli command line."; } } } else { // Result if (result.isNull()) { strPrint = ""; } else if (result.isStr()) { strPrint = result.get_str(); } else { strPrint = result.write(2); } } // Connection succeeded, no need to retry. break; } catch (const CConnectionFailed &) { if (fWait) { MilliSleep(1000); } else { throw; } } } while (fWait); } catch (const boost::thread_interrupted &) { throw; } catch (const std::exception &e) { strPrint = std::string("error: ") + e.what(); nRet = EXIT_FAILURE; } catch (...) { PrintExceptionContinue(nullptr, "CommandLineRPC()"); throw; } if (strPrint != "") { fprintf((nRet == 0 ? stdout : stderr), "%s\n", strPrint.c_str()); } return nRet; } int main(int argc, char *argv[]) { SetupEnvironment(); if (!SetupNetworking()) { fprintf(stderr, "Error: Initializing networking failed\n"); return EXIT_FAILURE; } try { int ret = AppInitRPC(argc, argv); if (ret != CONTINUE_EXECUTION) { return ret; } } catch (const std::exception &e) { PrintExceptionContinue(&e, "AppInitRPC()"); return EXIT_FAILURE; } catch (...) { PrintExceptionContinue(nullptr, "AppInitRPC()"); return EXIT_FAILURE; } int ret = EXIT_FAILURE; try { ret = CommandLineRPC(argc, argv); } catch (const std::exception &e) { PrintExceptionContinue(&e, "CommandLineRPC()"); } catch (...) { PrintExceptionContinue(nullptr, "CommandLineRPC()"); } return ret; } diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp index a3de887eee..fa4739c850 100644 --- a/src/bitcoin-tx.cpp +++ b/src/bitcoin-tx.cpp @@ -1,900 +1,901 @@ // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "base58.h" #include "chainparams.h" #include "clientversion.h" #include "coins.h" #include "consensus/consensus.h" #include "core_io.h" #include "dstencode.h" #include "keystore.h" #include "policy/policy.h" #include "primitives/transaction.h" #include "script/script.h" #include "script/sign.h" #include "univalue.h" #include "util.h" #include "utilmoneystr.h" #include "utilstrencodings.h" #include #include static bool fCreateBlank; static std::map registers; static const int CONTINUE_EXECUTION = -1; // // This function returns either one of EXIT_ codes when it's expected to stop // the process or CONTINUE_EXECUTION when it's expected to continue further. // static int AppInitRawTx(int argc, char *argv[]) { // // Parameters // - ParseParameters(argc, argv); + gArgs.ParseParameters(argc, argv); // Check for -testnet or -regtest parameter (Params() calls are only valid // after this clause) try { SelectParams(ChainNameFromCommandLine()); } catch (const std::exception &e) { fprintf(stderr, "Error: %s\n", e.what()); return EXIT_FAILURE; } - fCreateBlank = GetBoolArg("-create", false); + fCreateBlank = gArgs.GetBoolArg("-create", false); - if (argc < 2 || IsArgSet("-?") || IsArgSet("-h") || IsArgSet("-help")) { + if (argc < 2 || gArgs.IsArgSet("-?") || gArgs.IsArgSet("-h") || + gArgs.IsArgSet("-help")) { // First part of help message is specific to this utility std::string strUsage = strprintf(_("%s bitcoin-tx utility version"), _(PACKAGE_NAME)) + " " + FormatFullVersion() + "\n\n" + _("Usage:") + "\n" + " bitcoin-tx [options] [commands] " + _("Update hex-encoded bitcoin transaction") + "\n" + " bitcoin-tx [options] -create [commands] " + _("Create hex-encoded bitcoin transaction") + "\n" + "\n"; fprintf(stdout, "%s", strUsage.c_str()); strUsage = HelpMessageGroup(_("Options:")); strUsage += HelpMessageOpt("-?", _("This help message")); strUsage += HelpMessageOpt("-create", _("Create new, empty TX.")); strUsage += HelpMessageOpt("-json", _("Select JSON output")); strUsage += HelpMessageOpt("-txid", _("Output only the hex-encoded transaction " "id of the resultant transaction.")); AppendParamsHelpMessages(strUsage); fprintf(stdout, "%s", strUsage.c_str()); strUsage = HelpMessageGroup(_("Commands:")); strUsage += HelpMessageOpt("delin=N", _("Delete input N from TX")); strUsage += HelpMessageOpt("delout=N", _("Delete output N from TX")); strUsage += HelpMessageOpt("in=TXID:VOUT(:SEQUENCE_NUMBER)", _("Add input to TX")); strUsage += HelpMessageOpt("locktime=N", _("Set TX lock time to N")); strUsage += HelpMessageOpt("nversion=N", _("Set TX version to N")); strUsage += HelpMessageOpt("outaddr=VALUE:ADDRESS", _("Add address-based output to TX")); strUsage += HelpMessageOpt("outpubkey=VALUE:PUBKEY[:FLAGS]", _("Add pay-to-pubkey output to TX") + ". " + _("Optionally add the \"S\" flag to wrap the " "output in a pay-to-script-hash.")); strUsage += HelpMessageOpt("outdata=[VALUE:]DATA", _("Add data-based output to TX")); strUsage += HelpMessageOpt("outscript=VALUE:SCRIPT[:FLAGS]", _("Add raw script output to TX") + ". " + _("Optionally add the \"S\" flag to wrap the " "output in a pay-to-script-hash.")); strUsage += HelpMessageOpt( "outmultisig=VALUE:REQUIRED:PUBKEYS:PUBKEY1:PUBKEY2:....[:FLAGS]", _("Add Pay To n-of-m Multi-sig output to TX. n = REQUIRED, m = " "PUBKEYS") + ". " + _("Optionally add the \"S\" flag to wrap the output in " "a pay-to-script-hash.")); strUsage += HelpMessageOpt( "sign=SIGHASH-FLAGS", _("Add zero or more signatures to transaction") + ". " + _("This command requires JSON registers:") + _("prevtxs=JSON object") + ", " + _("privatekeys=JSON object") + ". " + _("See signrawtransaction docs for format of sighash " "flags, JSON objects.")); fprintf(stdout, "%s", strUsage.c_str()); strUsage = HelpMessageGroup(_("Register Commands:")); strUsage += HelpMessageOpt("load=NAME:FILENAME", _("Load JSON file FILENAME into register NAME")); strUsage += HelpMessageOpt("set=NAME:JSON-STRING", _("Set register NAME to given JSON-STRING")); fprintf(stdout, "%s", strUsage.c_str()); if (argc < 2) { fprintf(stderr, "Error: too few parameters\n"); return EXIT_FAILURE; } return EXIT_SUCCESS; } return CONTINUE_EXECUTION; } static void RegisterSetJson(const std::string &key, const std::string &rawJson) { UniValue val; if (!val.read(rawJson)) { std::string strErr = "Cannot parse JSON for key " + key; throw std::runtime_error(strErr); } registers[key] = val; } static void RegisterSet(const std::string &strInput) { // separate NAME:VALUE in string size_t pos = strInput.find(':'); if ((pos == std::string::npos) || (pos == 0) || (pos == (strInput.size() - 1))) { throw std::runtime_error("Register input requires NAME:VALUE"); } std::string key = strInput.substr(0, pos); std::string valStr = strInput.substr(pos + 1, std::string::npos); RegisterSetJson(key, valStr); } static void RegisterLoad(const std::string &strInput) { // separate NAME:FILENAME in string size_t pos = strInput.find(':'); if ((pos == std::string::npos) || (pos == 0) || (pos == (strInput.size() - 1))) { throw std::runtime_error("Register load requires NAME:FILENAME"); } std::string key = strInput.substr(0, pos); std::string filename = strInput.substr(pos + 1, std::string::npos); FILE *f = fopen(filename.c_str(), "r"); if (!f) { std::string strErr = "Cannot open file " + filename; throw std::runtime_error(strErr); } // load file chunks into one big buffer std::string valStr; while ((!feof(f)) && (!ferror(f))) { char buf[4096]; int bread = fread(buf, 1, sizeof(buf), f); if (bread <= 0) { break; } valStr.insert(valStr.size(), buf, bread); } int error = ferror(f); fclose(f); if (error) { std::string strErr = "Error reading file " + filename; throw std::runtime_error(strErr); } // evaluate as JSON buffer register RegisterSetJson(key, valStr); } static Amount ExtractAndValidateValue(const std::string &strValue) { Amount value; if (!ParseMoney(strValue, value)) { throw std::runtime_error("invalid TX output value"); } return value; } static void MutateTxVersion(CMutableTransaction &tx, const std::string &cmdVal) { int64_t newVersion = atoi64(cmdVal); if (newVersion < 1 || newVersion > CTransaction::MAX_STANDARD_VERSION) { throw std::runtime_error("Invalid TX version requested"); } tx.nVersion = int(newVersion); } static void MutateTxLocktime(CMutableTransaction &tx, const std::string &cmdVal) { int64_t newLocktime = atoi64(cmdVal); if (newLocktime < 0LL || newLocktime > 0xffffffffLL) { throw std::runtime_error("Invalid TX locktime requested"); } tx.nLockTime = (unsigned int)newLocktime; } static void MutateTxAddInput(CMutableTransaction &tx, const std::string &strInput) { std::vector vStrInputParts; boost::split(vStrInputParts, strInput, boost::is_any_of(":")); // separate TXID:VOUT in string if (vStrInputParts.size() < 2) { throw std::runtime_error("TX input missing separator"); } // extract and validate TXID std::string strTxid = vStrInputParts[0]; if ((strTxid.size() != 64) || !IsHex(strTxid)) { throw std::runtime_error("invalid TX input txid"); } uint256 txid(uint256S(strTxid)); static const unsigned int minTxOutSz = 9; static const unsigned int maxVout = MAX_TX_SIZE / minTxOutSz; // extract and validate vout std::string strVout = vStrInputParts[1]; int vout = atoi(strVout); if ((vout < 0) || (vout > (int)maxVout)) { throw std::runtime_error("invalid TX input vout"); } // extract the optional sequence number uint32_t nSequenceIn = std::numeric_limits::max(); if (vStrInputParts.size() > 2) { nSequenceIn = std::stoul(vStrInputParts[2]); } // append to transaction input list CTxIn txin(txid, vout, CScript(), nSequenceIn); tx.vin.push_back(txin); } static void MutateTxAddOutAddr(CMutableTransaction &tx, const std::string &strInput) { // Separate into VALUE:ADDRESS std::vector vStrInputParts; boost::split(vStrInputParts, strInput, boost::is_any_of(":")); if (vStrInputParts.size() != 2) { throw std::runtime_error("TX output missing or too many separators"); } // Extract and validate VALUE Amount value = ExtractAndValidateValue(vStrInputParts[0]); // extract and validate ADDRESS std::string strAddr = vStrInputParts[1]; CTxDestination destination = DecodeDestination(strAddr); if (!IsValidDestination(destination)) { throw std::runtime_error("invalid TX output address"); } CScript scriptPubKey = GetScriptForDestination(destination); // construct TxOut, append to transaction output list CTxOut txout(value, scriptPubKey); tx.vout.push_back(txout); } static void MutateTxAddOutPubKey(CMutableTransaction &tx, const std::string &strInput) { // Separate into VALUE:PUBKEY[:FLAGS] std::vector vStrInputParts; boost::split(vStrInputParts, strInput, boost::is_any_of(":")); if (vStrInputParts.size() < 2 || vStrInputParts.size() > 3) { throw std::runtime_error("TX output missing or too many separators"); } // Extract and validate VALUE Amount value = ExtractAndValidateValue(vStrInputParts[0]); // Extract and validate PUBKEY CPubKey pubkey(ParseHex(vStrInputParts[1])); if (!pubkey.IsFullyValid()) { throw std::runtime_error("invalid TX output pubkey"); } CScript scriptPubKey = GetScriptForRawPubKey(pubkey); // Extract and validate FLAGS bool bScriptHash = false; if (vStrInputParts.size() == 3) { std::string flags = vStrInputParts[2]; bScriptHash = (flags.find("S") != std::string::npos); } if (bScriptHash) { // Get the ID for the script, and then construct a P2SH destination for // it. scriptPubKey = GetScriptForDestination(CScriptID(scriptPubKey)); } // construct TxOut, append to transaction output list CTxOut txout(value, scriptPubKey); tx.vout.push_back(txout); } static void MutateTxAddOutMultiSig(CMutableTransaction &tx, const std::string &strInput) { // Separate into VALUE:REQUIRED:NUMKEYS:PUBKEY1:PUBKEY2:....[:FLAGS] std::vector vStrInputParts; boost::split(vStrInputParts, strInput, boost::is_any_of(":")); // Check that there are enough parameters if (vStrInputParts.size() < 3) { throw std::runtime_error("Not enough multisig parameters"); } // Extract and validate VALUE Amount value = ExtractAndValidateValue(vStrInputParts[0]); // Extract REQUIRED uint32_t required = stoul(vStrInputParts[1]); // Extract NUMKEYS uint32_t numkeys = stoul(vStrInputParts[2]); // Validate there are the correct number of pubkeys if (vStrInputParts.size() < numkeys + 3) { throw std::runtime_error("incorrect number of multisig pubkeys"); } if (required < 1 || required > 20 || numkeys < 1 || numkeys > 20 || numkeys < required) { throw std::runtime_error("multisig parameter mismatch. Required " + std::to_string(required) + " of " + std::to_string(numkeys) + "signatures."); } // extract and validate PUBKEYs std::vector pubkeys; for (int pos = 1; pos <= int(numkeys); pos++) { CPubKey pubkey(ParseHex(vStrInputParts[pos + 2])); if (!pubkey.IsFullyValid()) { throw std::runtime_error("invalid TX output pubkey"); } pubkeys.push_back(pubkey); } // Extract FLAGS bool bScriptHash = false; if (vStrInputParts.size() == numkeys + 4) { std::string flags = vStrInputParts.back(); bScriptHash = (flags.find("S") != std::string::npos); } else if (vStrInputParts.size() > numkeys + 4) { // Validate that there were no more parameters passed throw std::runtime_error("Too many parameters"); } CScript scriptPubKey = GetScriptForMultisig(required, pubkeys); if (bScriptHash) { // Get the ID for the script, and then construct a P2SH destination for // it. scriptPubKey = GetScriptForDestination(CScriptID(scriptPubKey)); } // construct TxOut, append to transaction output list CTxOut txout(value, scriptPubKey); tx.vout.push_back(txout); } static void MutateTxAddOutData(CMutableTransaction &tx, const std::string &strInput) { Amount value(0); // separate [VALUE:]DATA in string size_t pos = strInput.find(':'); if (pos == 0) { throw std::runtime_error("TX output value not specified"); } if (pos != std::string::npos) { // Extract and validate VALUE value = ExtractAndValidateValue(strInput.substr(0, pos)); } // extract and validate DATA std::string strData = strInput.substr(pos + 1, std::string::npos); if (!IsHex(strData)) { throw std::runtime_error("invalid TX output data"); } std::vector data = ParseHex(strData); CTxOut txout(value, CScript() << OP_RETURN << data); tx.vout.push_back(txout); } static void MutateTxAddOutScript(CMutableTransaction &tx, const std::string &strInput) { // separate VALUE:SCRIPT[:FLAGS] std::vector vStrInputParts; boost::split(vStrInputParts, strInput, boost::is_any_of(":")); if (vStrInputParts.size() < 2) throw std::runtime_error("TX output missing separator"); // Extract and validate VALUE Amount value = ExtractAndValidateValue(vStrInputParts[0]); // extract and validate script std::string strScript = vStrInputParts[1]; CScript scriptPubKey = ParseScript(strScript); // Extract FLAGS bool bScriptHash = false; if (vStrInputParts.size() == 3) { std::string flags = vStrInputParts.back(); bScriptHash = (flags.find("S") != std::string::npos); } if (bScriptHash) { scriptPubKey = GetScriptForDestination(CScriptID(scriptPubKey)); } // construct TxOut, append to transaction output list CTxOut txout(value, scriptPubKey); tx.vout.push_back(txout); } static void MutateTxDelInput(CMutableTransaction &tx, const std::string &strInIdx) { // parse requested deletion index int inIdx = atoi(strInIdx); if (inIdx < 0 || inIdx >= (int)tx.vin.size()) { std::string strErr = "Invalid TX input index '" + strInIdx + "'"; throw std::runtime_error(strErr.c_str()); } // delete input from transaction tx.vin.erase(tx.vin.begin() + inIdx); } static void MutateTxDelOutput(CMutableTransaction &tx, const std::string &strOutIdx) { // parse requested deletion index int outIdx = atoi(strOutIdx); if (outIdx < 0 || outIdx >= (int)tx.vout.size()) { std::string strErr = "Invalid TX output index '" + strOutIdx + "'"; throw std::runtime_error(strErr.c_str()); } // delete output from transaction tx.vout.erase(tx.vout.begin() + outIdx); } static const unsigned int N_SIGHASH_OPTS = 12; static const struct { const char *flagStr; int flags; } sigHashOptions[N_SIGHASH_OPTS] = { {"ALL", SIGHASH_ALL}, {"NONE", SIGHASH_NONE}, {"SINGLE", SIGHASH_SINGLE}, {"ALL|ANYONECANPAY", SIGHASH_ALL | SIGHASH_ANYONECANPAY}, {"NONE|ANYONECANPAY", SIGHASH_NONE | SIGHASH_ANYONECANPAY}, {"SINGLE|ANYONECANPAY", SIGHASH_SINGLE | SIGHASH_ANYONECANPAY}, {"ALL|FORKID", SIGHASH_ALL | SIGHASH_FORKID}, {"NONE|FORKID", SIGHASH_NONE | SIGHASH_FORKID}, {"SINGLE|FORKID", SIGHASH_SINGLE | SIGHASH_FORKID}, {"ALL|FORKID|ANYONECANPAY", SIGHASH_ALL | SIGHASH_FORKID | SIGHASH_ANYONECANPAY}, {"NONE|FORKID|ANYONECANPAY", SIGHASH_NONE | SIGHASH_FORKID | SIGHASH_ANYONECANPAY}, {"SINGLE|FORKID|ANYONECANPAY", SIGHASH_SINGLE | SIGHASH_FORKID | SIGHASH_ANYONECANPAY}, }; static bool findSigHashFlags(SigHashType &sigHashType, const std::string &flagStr) { sigHashType = SigHashType(); for (unsigned int i = 0; i < N_SIGHASH_OPTS; i++) { if (flagStr == sigHashOptions[i].flagStr) { sigHashType = SigHashType(sigHashOptions[i].flags); return true; } } return false; } uint256 ParseHashUO(std::map &o, std::string strKey) { if (!o.count(strKey)) { return uint256(); } return ParseHashUV(o[strKey], strKey); } std::vector ParseHexUO(std::map &o, std::string strKey) { if (!o.count(strKey)) { std::vector emptyVec; return emptyVec; } return ParseHexUV(o[strKey], strKey); } static Amount AmountFromValue(const UniValue &value) { if (!value.isNum() && !value.isStr()) { throw std::runtime_error("Amount is not a number or string"); } int64_t n; if (!ParseFixedPoint(value.getValStr(), 8, &n)) { throw std::runtime_error("Invalid amount"); } Amount amount = Amount(n); if (!MoneyRange(amount)) { throw std::runtime_error("Amount out of range"); } return amount; } static void MutateTxSign(CMutableTransaction &tx, const std::string &flagStr) { SigHashType sigHashType = SigHashType().withForkId(true); if ((flagStr.size() > 0) && !findSigHashFlags(sigHashType, flagStr)) { throw std::runtime_error("unknown sighash flag/sign option"); } std::vector txVariants; txVariants.push_back(tx); // mergedTx will end up with all the signatures; it starts as a clone of the // raw tx: CMutableTransaction mergedTx(txVariants[0]); bool fComplete = true; CCoinsView viewDummy; CCoinsViewCache view(&viewDummy); if (!registers.count("privatekeys")) { throw std::runtime_error("privatekeys register variable must be set."); } CBasicKeyStore tempKeystore; UniValue keysObj = registers["privatekeys"]; for (unsigned int kidx = 0; kidx < keysObj.size(); kidx++) { if (!keysObj[kidx].isStr()) { throw std::runtime_error("privatekey not a std::string"); } CBitcoinSecret vchSecret; bool fGood = vchSecret.SetString(keysObj[kidx].getValStr()); if (!fGood) { throw std::runtime_error("privatekey not valid"); } CKey key = vchSecret.GetKey(); tempKeystore.AddKey(key); } // Add previous txouts given in the RPC call: if (!registers.count("prevtxs")) { throw std::runtime_error("prevtxs register variable must be set."); } UniValue prevtxsObj = registers["prevtxs"]; for (unsigned int previdx = 0; previdx < prevtxsObj.size(); previdx++) { UniValue prevOut = prevtxsObj[previdx]; if (!prevOut.isObject()) { throw std::runtime_error("expected prevtxs internal object"); } std::map types = { {"txid", UniValue::VSTR}, {"vout", UniValue::VNUM}, {"scriptPubKey", UniValue::VSTR}}; if (!prevOut.checkObject(types)) { throw std::runtime_error("prevtxs internal object typecheck fail"); } uint256 txid = ParseHashUV(prevOut["txid"], "txid"); int nOut = atoi(prevOut["vout"].getValStr()); if (nOut < 0) { throw std::runtime_error("vout must be positive"); } COutPoint out(txid, nOut); std::vector pkData( ParseHexUV(prevOut["scriptPubKey"], "scriptPubKey")); CScript scriptPubKey(pkData.begin(), pkData.end()); { const Coin &coin = view.AccessCoin(out); if (!coin.IsSpent() && coin.GetTxOut().scriptPubKey != scriptPubKey) { std::string err("Previous output scriptPubKey mismatch:\n"); err = err + ScriptToAsmStr(coin.GetTxOut().scriptPubKey) + "\nvs:\n" + ScriptToAsmStr(scriptPubKey); throw std::runtime_error(err); } CTxOut txout; txout.scriptPubKey = scriptPubKey; txout.nValue = Amount(0); if (prevOut.exists("amount")) { txout.nValue = AmountFromValue(prevOut["amount"]); } view.AddCoin(out, Coin(txout, 1, false), true); } // If redeemScript given and private keys given, add redeemScript to the // tempKeystore so it can be signed: if (scriptPubKey.IsPayToScriptHash() && prevOut.exists("redeemScript")) { UniValue v = prevOut["redeemScript"]; std::vector rsData(ParseHexUV(v, "redeemScript")); CScript redeemScript(rsData.begin(), rsData.end()); tempKeystore.AddCScript(redeemScript); } } const CKeyStore &keystore = tempKeystore; // Sign what we can: for (size_t i = 0; i < mergedTx.vin.size(); i++) { CTxIn &txin = mergedTx.vin[i]; const Coin &coin = view.AccessCoin(txin.prevout); if (coin.IsSpent()) { fComplete = false; continue; } const CScript &prevPubKey = coin.GetTxOut().scriptPubKey; const Amount amount = coin.GetTxOut().nValue; SignatureData sigdata; // Only sign SIGHASH_SINGLE if there's a corresponding output: if ((sigHashType.getBaseSigHashType() != BaseSigHashType::SINGLE) || (i < mergedTx.vout.size())) { ProduceSignature(MutableTransactionSignatureCreator( &keystore, &mergedTx, i, amount, sigHashType), prevPubKey, sigdata); } // ... and merge in other signatures: for (const CTransaction &txv : txVariants) { sigdata = CombineSignatures( prevPubKey, MutableTransactionSignatureChecker(&mergedTx, i, amount), sigdata, DataFromTransaction(txv, i)); } UpdateTransaction(mergedTx, i, sigdata); if (!VerifyScript( txin.scriptSig, prevPubKey, STANDARD_SCRIPT_VERIFY_FLAGS, MutableTransactionSignatureChecker(&mergedTx, i, amount))) { fComplete = false; } } if (fComplete) { // do nothing... for now // perhaps store this for later optional JSON output } tx = mergedTx; } class Secp256k1Init { ECCVerifyHandle globalVerifyHandle; public: Secp256k1Init() { ECC_Start(); } ~Secp256k1Init() { ECC_Stop(); } }; static void MutateTx(CMutableTransaction &tx, const std::string &command, const std::string &commandVal) { std::unique_ptr ecc; if (command == "nversion") { MutateTxVersion(tx, commandVal); } else if (command == "locktime") { MutateTxLocktime(tx, commandVal); } else if (command == "delin") { MutateTxDelInput(tx, commandVal); } else if (command == "in") { MutateTxAddInput(tx, commandVal); } else if (command == "delout") { MutateTxDelOutput(tx, commandVal); } else if (command == "outaddr") { MutateTxAddOutAddr(tx, commandVal); } else if (command == "outpubkey") { MutateTxAddOutPubKey(tx, commandVal); } else if (command == "outmultisig") { MutateTxAddOutMultiSig(tx, commandVal); } else if (command == "outscript") { MutateTxAddOutScript(tx, commandVal); } else if (command == "outdata") { MutateTxAddOutData(tx, commandVal); } else if (command == "sign") { if (!ecc) { ecc.reset(new Secp256k1Init()); } MutateTxSign(tx, commandVal); } else if (command == "load") { RegisterLoad(commandVal); } else if (command == "set") { RegisterSet(commandVal); } else { throw std::runtime_error("unknown command"); } } static void OutputTxJSON(const CTransaction &tx) { UniValue entry(UniValue::VOBJ); TxToUniv(tx, uint256(), entry); std::string jsonOutput = entry.write(4); fprintf(stdout, "%s\n", jsonOutput.c_str()); } static void OutputTxHash(const CTransaction &tx) { // the hex-encoded transaction id. std::string strHexHash = tx.GetId().GetHex(); fprintf(stdout, "%s\n", strHexHash.c_str()); } static void OutputTxHex(const CTransaction &tx) { std::string strHex = EncodeHexTx(tx); fprintf(stdout, "%s\n", strHex.c_str()); } static void OutputTx(const CTransaction &tx) { - if (GetBoolArg("-json", false)) { + if (gArgs.GetBoolArg("-json", false)) { OutputTxJSON(tx); - } else if (GetBoolArg("-txid", false)) { + } else if (gArgs.GetBoolArg("-txid", false)) { OutputTxHash(tx); } else { OutputTxHex(tx); } } static std::string readStdin() { char buf[4096]; std::string ret; while (!feof(stdin)) { size_t bread = fread(buf, 1, sizeof(buf), stdin); ret.append(buf, bread); if (bread < sizeof(buf)) { break; } } if (ferror(stdin)) { throw std::runtime_error("error reading stdin"); } boost::algorithm::trim_right(ret); return ret; } static int CommandLineRawTx(int argc, char *argv[]) { std::string strPrint; int nRet = 0; try { // Skip switches; Permit common stdin convention "-" while (argc > 1 && IsSwitchChar(argv[1][0]) && (argv[1][1] != 0)) { argc--; argv++; } CMutableTransaction tx; int startArg; if (!fCreateBlank) { // require at least one param if (argc < 2) { throw std::runtime_error("too few parameters"); } // param: hex-encoded bitcoin transaction std::string strHexTx(argv[1]); // "-" implies standard input if (strHexTx == "-") { strHexTx = readStdin(); } if (!DecodeHexTx(tx, strHexTx)) { throw std::runtime_error("invalid transaction encoding"); } startArg = 2; } else { startArg = 1; } for (int i = startArg; i < argc; i++) { std::string arg = argv[i]; std::string key, value; size_t eqpos = arg.find('='); if (eqpos == std::string::npos) { key = arg; } else { key = arg.substr(0, eqpos); value = arg.substr(eqpos + 1); } MutateTx(tx, key, value); } OutputTx(tx); } catch (const boost::thread_interrupted &) { throw; } catch (const std::exception &e) { strPrint = std::string("error: ") + e.what(); nRet = EXIT_FAILURE; } catch (...) { PrintExceptionContinue(nullptr, "CommandLineRawTx()"); throw; } if (strPrint != "") { fprintf((nRet == 0 ? stdout : stderr), "%s\n", strPrint.c_str()); } return nRet; } int main(int argc, char *argv[]) { SetupEnvironment(); try { int ret = AppInitRawTx(argc, argv); if (ret != CONTINUE_EXECUTION) return ret; } catch (const std::exception &e) { PrintExceptionContinue(&e, "AppInitRawTx()"); return EXIT_FAILURE; } catch (...) { PrintExceptionContinue(nullptr, "AppInitRawTx()"); return EXIT_FAILURE; } int ret = EXIT_FAILURE; try { ret = CommandLineRawTx(argc, argv); } catch (const std::exception &e) { PrintExceptionContinue(&e, "CommandLineRawTx()"); } catch (...) { PrintExceptionContinue(nullptr, "CommandLineRawTx()"); } return ret; } diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp index e35faf44b6..f7672bdd5c 100644 --- a/src/bitcoind.cpp +++ b/src/bitcoind.cpp @@ -1,208 +1,208 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "chainparams.h" #include "clientversion.h" #include "compat.h" #include "config.h" #include "fs.h" #include "httprpc.h" #include "httpserver.h" #include "init.h" #include "noui.h" #include "rpc/server.h" #include "scheduler.h" #include "util.h" #include "utilstrencodings.h" #include #include #include /* Introduction text for doxygen: */ /*! \mainpage Developer documentation * * \section intro_sec Introduction * * This is the developer documentation of the reference client for an * experimental new digital currency called Bitcoin (https://www.bitcoin.org/), * which enables instant payments to anyone, anywhere in the world. Bitcoin uses * peer-to-peer technology to operate with no central authority: managing * transactions and issuing money are carried out collectively by the network. * * The software is a community-driven open source project, released under the * MIT license. * * \section Navigation * Use the buttons Namespaces, Classes or * Files at the top of the page to start navigating the code. */ void WaitForShutdown(boost::thread_group *threadGroup) { bool fShutdown = ShutdownRequested(); // Tell the main threads to shutdown. while (!fShutdown) { MilliSleep(200); fShutdown = ShutdownRequested(); } if (threadGroup) { Interrupt(*threadGroup); threadGroup->join_all(); } } ////////////////////////////////////////////////////////////////////////////// // // Start // bool AppInit(int argc, char *argv[]) { boost::thread_group threadGroup; CScheduler scheduler; // FIXME: Ideally, we'd like to build the config here, but that's currently // not possible as the whole application has too many global state. However, // this is a first step. auto &config = const_cast(GetConfig()); bool fRet = false; // // Parameters // // If Qt is used, parameters/bitcoin.conf are parsed in qt/bitcoin.cpp's // main() - ParseParameters(argc, argv); + gArgs.ParseParameters(argc, argv); // Process help and version before taking care about datadir - if (IsArgSet("-?") || IsArgSet("-h") || IsArgSet("-help") || - IsArgSet("-version")) { + if (gArgs.IsArgSet("-?") || gArgs.IsArgSet("-h") || + gArgs.IsArgSet("-help") || gArgs.IsArgSet("-version")) { std::string strUsage = strprintf(_("%s Daemon"), _(PACKAGE_NAME)) + " " + _("version") + " " + FormatFullVersion() + "\n"; - if (IsArgSet("-version")) { + if (gArgs.IsArgSet("-version")) { strUsage += FormatParagraph(LicenseInfo()); } else { strUsage += "\n" + _("Usage:") + "\n" + " bitcoind [options] " + strprintf(_("Start %s Daemon"), _(PACKAGE_NAME)) + "\n"; strUsage += "\n" + HelpMessage(HMM_BITCOIND); } fprintf(stdout, "%s", strUsage.c_str()); return true; } try { if (!fs::is_directory(GetDataDir(false))) { fprintf(stderr, "Error: Specified data directory \"%s\" does not exist.\n", - GetArg("-datadir", "").c_str()); + gArgs.GetArg("-datadir", "").c_str()); return false; } try { - ReadConfigFile(GetArg("-conf", BITCOIN_CONF_FILENAME)); + gArgs.ReadConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME)); } catch (const std::exception &e) { fprintf(stderr, "Error reading configuration file: %s\n", e.what()); return false; } // Check for -testnet or -regtest parameter (Params() calls are only // valid after this clause) try { SelectParams(ChainNameFromCommandLine()); } catch (const std::exception &e) { fprintf(stderr, "Error: %s\n", e.what()); return false; } // Command-line RPC bool fCommandLine = false; for (int i = 1; i < argc; i++) if (!IsSwitchChar(argv[i][0]) && !boost::algorithm::istarts_with(argv[i], "bitcoin:")) fCommandLine = true; if (fCommandLine) { fprintf(stderr, "Error: There is no RPC client functionality in " "bitcoind anymore. Use the bitcoin-cli utility " "instead.\n"); exit(EXIT_FAILURE); } // -server defaults to true for bitcoind but not for the GUI so do this // here - SoftSetBoolArg("-server", true); + gArgs.SoftSetBoolArg("-server", true); // Set this early so that parameter interactions go to console InitLogging(); InitParameterInteraction(); if (!AppInitBasicSetup()) { // InitError will have been called with detailed error, which ends // up on console exit(1); } if (!AppInitParameterInteraction(config)) { // InitError will have been called with detailed error, which ends // up on console exit(1); } if (!AppInitSanityChecks()) { // InitError will have been called with detailed error, which ends // up on console exit(1); } - if (GetBoolArg("-daemon", false)) { + if (gArgs.GetBoolArg("-daemon", false)) { #if HAVE_DECL_DAEMON fprintf(stdout, "Bitcoin server starting\n"); // Daemonize if (daemon(1, 0)) { // don't chdir (1), do close FDs (0) fprintf(stderr, "Error: daemon() failed: %s\n", strerror(errno)); return false; } #else fprintf( stderr, "Error: -daemon is not supported on this operating system\n"); return false; #endif // HAVE_DECL_DAEMON } fRet = AppInitMain(config, threadGroup, scheduler); } catch (const std::exception &e) { PrintExceptionContinue(&e, "AppInit()"); } catch (...) { PrintExceptionContinue(nullptr, "AppInit()"); } if (!fRet) { Interrupt(threadGroup); // threadGroup.join_all(); was left out intentionally here, because we // didn't re-test all of the startup-failure cases to make sure they // don't result in a hang due to some // thread-blocking-waiting-for-another-thread-during-startup case. } else { WaitForShutdown(&threadGroup); } Shutdown(); return fRet; } int main(int argc, char *argv[]) { SetupEnvironment(); // Connect bitcoind signal handlers noui_connect(); return (AppInit(argc, argv) ? EXIT_SUCCESS : EXIT_FAILURE); } diff --git a/src/chainparamsbase.cpp b/src/chainparamsbase.cpp index bdb429f749..69714aa108 100644 --- a/src/chainparamsbase.cpp +++ b/src/chainparamsbase.cpp @@ -1,99 +1,99 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "chainparamsbase.h" #include "tinyformat.h" #include "util.h" #include const std::string CBaseChainParams::MAIN = "main"; const std::string CBaseChainParams::TESTNET = "test"; const std::string CBaseChainParams::REGTEST = "regtest"; void AppendParamsHelpMessages(std::string &strUsage, bool debugHelp) { strUsage += HelpMessageGroup(_("Chain selection options:")); strUsage += HelpMessageOpt("-testnet", _("Use the test chain")); if (debugHelp) { strUsage += HelpMessageOpt( "-regtest", "Enter regression test mode, which uses a special " "chain in which blocks can be solved instantly. " "This is intended for regression testing tools and app " "development."); } } /** * Main network */ class CBaseMainParams : public CBaseChainParams { public: CBaseMainParams() { nRPCPort = 8332; } }; static CBaseMainParams mainParams; /** * Testnet (v3) */ class CBaseTestNetParams : public CBaseChainParams { public: CBaseTestNetParams() { nRPCPort = 18332; strDataDir = "testnet3"; } }; static CBaseTestNetParams testNetParams; /* * Regression test */ class CBaseRegTestParams : public CBaseChainParams { public: CBaseRegTestParams() { nRPCPort = 18332; strDataDir = "regtest"; } }; static CBaseRegTestParams regTestParams; static CBaseChainParams *pCurrentBaseParams = 0; const CBaseChainParams &BaseParams() { assert(pCurrentBaseParams); return *pCurrentBaseParams; } CBaseChainParams &BaseParams(const std::string &chain) { if (chain == CBaseChainParams::MAIN) return mainParams; else if (chain == CBaseChainParams::TESTNET) return testNetParams; else if (chain == CBaseChainParams::REGTEST) return regTestParams; else throw std::runtime_error( strprintf("%s: Unknown chain %s.", __func__, chain)); } void SelectBaseParams(const std::string &chain) { pCurrentBaseParams = &BaseParams(chain); } std::string ChainNameFromCommandLine() { - bool fRegTest = GetBoolArg("-regtest", false); - bool fTestNet = GetBoolArg("-testnet", false); + bool fRegTest = gArgs.GetBoolArg("-regtest", false); + bool fTestNet = gArgs.GetBoolArg("-testnet", false); if (fTestNet && fRegTest) throw std::runtime_error( "Invalid combination of -regtest and -testnet."); if (fRegTest) return CBaseChainParams::REGTEST; if (fTestNet) return CBaseChainParams::TESTNET; return CBaseChainParams::MAIN; } bool AreBaseParamsConfigured() { return pCurrentBaseParams != nullptr; } diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp index e98963d737..557dbe254f 100644 --- a/src/dbwrapper.cpp +++ b/src/dbwrapper.cpp @@ -1,227 +1,227 @@ // Copyright (c) 2012-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "dbwrapper.h" #include "random.h" #include "util.h" #include #include #include #include #include #include #include class CBitcoinLevelDBLogger : public leveldb::Logger { public: // This code is adapted from posix_logger.h, which is why it is using // vsprintf. // Please do not do this in normal code void Logv(const char *format, va_list ap) override { if (!LogAcceptCategory(BCLog::LEVELDB)) { return; } char buffer[500]; for (int iter = 0; iter < 2; iter++) { char *base; int bufsize; if (iter == 0) { bufsize = sizeof(buffer); base = buffer; } else { bufsize = 30000; base = new char[bufsize]; } char *p = base; char *limit = base + bufsize; // Print the message if (p < limit) { va_list backup_ap; va_copy(backup_ap, ap); // Do not use vsnprintf elsewhere in bitcoin source code, see // above. p += vsnprintf(p, limit - p, format, backup_ap); va_end(backup_ap); } // Truncate to available space if necessary if (p >= limit) { if (iter == 0) { continue; // Try again with larger buffer } else { p = limit - 1; } } // Add newline if necessary if (p == base || p[-1] != '\n') { *p++ = '\n'; } assert(p <= limit); base[std::min(bufsize - 1, (int)(p - base))] = '\0'; LogPrintf("leveldb: %s", base); if (base != buffer) { delete[] base; } break; } } }; static leveldb::Options GetOptions(size_t nCacheSize) { leveldb::Options options; options.block_cache = leveldb::NewLRUCache(nCacheSize / 2); // up to two write buffers may be held in memory simultaneously options.write_buffer_size = nCacheSize / 4; options.filter_policy = leveldb::NewBloomFilterPolicy(10); options.compression = leveldb::kNoCompression; options.max_open_files = 64; options.info_log = new CBitcoinLevelDBLogger(); if (leveldb::kMajorVersion > 1 || (leveldb::kMajorVersion == 1 && leveldb::kMinorVersion >= 16)) { // LevelDB versions before 1.16 consider short writes to be corruption. // Only trigger error on corruption in later versions. options.paranoid_checks = true; } return options; } CDBWrapper::CDBWrapper(const fs::path &path, size_t nCacheSize, bool fMemory, bool fWipe, bool obfuscate) { penv = nullptr; readoptions.verify_checksums = true; iteroptions.verify_checksums = true; iteroptions.fill_cache = false; syncoptions.sync = true; options = GetOptions(nCacheSize); options.create_if_missing = true; if (fMemory) { penv = leveldb::NewMemEnv(leveldb::Env::Default()); options.env = penv; } else { if (fWipe) { LogPrintf("Wiping LevelDB in %s\n", path.string()); leveldb::Status result = leveldb::DestroyDB(path.string(), options); dbwrapper_private::HandleError(result); } TryCreateDirectories(path); LogPrintf("Opening LevelDB in %s\n", path.string()); } leveldb::Status status = leveldb::DB::Open(options, path.string(), &pdb); dbwrapper_private::HandleError(status); LogPrintf("Opened LevelDB successfully\n"); - if (GetBoolArg("-forcecompactdb", false)) { + if (gArgs.GetBoolArg("-forcecompactdb", false)) { LogPrintf("Starting database compaction of %s\n", path.string()); pdb->CompactRange(nullptr, nullptr); LogPrintf("Finished database compaction of %s\n", path.string()); } // The base-case obfuscation key, which is a noop. obfuscate_key = std::vector(OBFUSCATE_KEY_NUM_BYTES, '\000'); bool key_exists = Read(OBFUSCATE_KEY_KEY, obfuscate_key); if (!key_exists && obfuscate && IsEmpty()) { // Initialize non-degenerate obfuscation if it won't upset existing, // non-obfuscated data. std::vector new_key = CreateObfuscateKey(); // Write `new_key` so we don't obfuscate the key with itself Write(OBFUSCATE_KEY_KEY, new_key); obfuscate_key = new_key; LogPrintf("Wrote new obfuscate key for %s: %s\n", path.string(), HexStr(obfuscate_key)); } LogPrintf("Using obfuscation key for %s: %s\n", path.string(), HexStr(obfuscate_key)); } CDBWrapper::~CDBWrapper() { delete pdb; pdb = nullptr; delete options.filter_policy; options.filter_policy = nullptr; delete options.info_log; options.info_log = nullptr; delete options.block_cache; options.block_cache = nullptr; delete penv; options.env = nullptr; } bool CDBWrapper::WriteBatch(CDBBatch &batch, bool fSync) { leveldb::Status status = pdb->Write(fSync ? syncoptions : writeoptions, &batch.batch); dbwrapper_private::HandleError(status); return true; } // Prefixed with null character to avoid collisions with other keys // // We must use a string constructor which specifies length so that we copy past // the null-terminator. const std::string CDBWrapper::OBFUSCATE_KEY_KEY("\000obfuscate_key", 14); const unsigned int CDBWrapper::OBFUSCATE_KEY_NUM_BYTES = 8; /** * Returns a string (consisting of 8 random bytes) suitable for use as an * obfuscating XOR key. */ std::vector CDBWrapper::CreateObfuscateKey() const { uint8_t buff[OBFUSCATE_KEY_NUM_BYTES]; GetRandBytes(buff, OBFUSCATE_KEY_NUM_BYTES); return std::vector(&buff[0], &buff[OBFUSCATE_KEY_NUM_BYTES]); } bool CDBWrapper::IsEmpty() { std::unique_ptr it(NewIterator()); it->SeekToFirst(); return !(it->Valid()); } CDBIterator::~CDBIterator() { delete piter; } bool CDBIterator::Valid() { return piter->Valid(); } void CDBIterator::SeekToFirst() { piter->SeekToFirst(); } void CDBIterator::Next() { piter->Next(); } namespace dbwrapper_private { void HandleError(const leveldb::Status &status) { if (status.ok()) { return; } LogPrintf("%s\n", status.ToString()); if (status.IsCorruption()) { throw dbwrapper_error("Database corrupted"); } if (status.IsIOError()) { throw dbwrapper_error("Database I/O error"); } if (status.IsNotFound()) { throw dbwrapper_error("Database entry missing"); } throw dbwrapper_error("Unknown database error"); } const std::vector &GetObfuscateKey(const CDBWrapper &w) { return w.obfuscate_key; } }; // namespace dbwrapper_private diff --git a/src/httprpc.cpp b/src/httprpc.cpp index 206d37837c..4ad1059179 100644 --- a/src/httprpc.cpp +++ b/src/httprpc.cpp @@ -1,271 +1,271 @@ // Copyright (c) 2015-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "httprpc.h" #include "base58.h" #include "chainparams.h" #include "config.h" #include "crypto/hmac_sha256.h" #include "httpserver.h" #include "random.h" #include "rpc/protocol.h" #include "rpc/server.h" #include "sync.h" #include "ui_interface.h" #include "util.h" #include "utilstrencodings.h" #include // boost::trim #include /** WWW-Authenticate to present with 401 Unauthorized response */ static const char *WWW_AUTH_HEADER_DATA = "Basic realm=\"jsonrpc\""; /** Simple one-shot callback timer to be used by the RPC mechanism to e.g. * re-lock the wallet. */ class HTTPRPCTimer : public RPCTimerBase { public: HTTPRPCTimer(struct event_base *eventBase, std::function &func, int64_t millis) : ev(eventBase, false, func) { struct timeval tv; tv.tv_sec = millis / 1000; tv.tv_usec = (millis % 1000) * 1000; ev.trigger(&tv); } private: HTTPEvent ev; }; class HTTPRPCTimerInterface : public RPCTimerInterface { public: HTTPRPCTimerInterface(struct event_base *_base) : base(_base) {} const char *Name() override { return "HTTP"; } RPCTimerBase *NewTimer(std::function &func, int64_t millis) override { return new HTTPRPCTimer(base, func, millis); } private: struct event_base *base; }; /* Pre-base64-encoded authentication token */ static std::string strRPCUserColonPass; /* Stored RPC timer interface (for unregistration) */ static HTTPRPCTimerInterface *httpRPCTimerInterface = 0; static void JSONErrorReply(HTTPRequest *req, const UniValue &objError, const UniValue &id) { // Send error reply from json-rpc error object. int nStatus = HTTP_INTERNAL_SERVER_ERROR; int code = find_value(objError, "code").get_int(); if (code == RPC_INVALID_REQUEST) nStatus = HTTP_BAD_REQUEST; else if (code == RPC_METHOD_NOT_FOUND) nStatus = HTTP_NOT_FOUND; std::string strReply = JSONRPCReply(NullUniValue, objError, id); req->WriteHeader("Content-Type", "application/json"); req->WriteReply(nStatus, strReply); } // This function checks username and password against -rpcauth entries from // config file. static bool multiUserAuthorized(std::string strUserPass) { if (strUserPass.find(":") == std::string::npos) { return false; } std::string strUser = strUserPass.substr(0, strUserPass.find(":")); std::string strPass = strUserPass.substr(strUserPass.find(":") + 1); if (gArgs.IsArgSet("-rpcauth")) { // Search for multi-user login/pass "rpcauth" from config for (const std::string &strRPCAuth : gArgs.GetArgs("-rpcauth")) { std::vector vFields; boost::split(vFields, strRPCAuth, boost::is_any_of(":$")); if (vFields.size() != 3) { // Incorrect formatting in config file continue; } std::string strName = vFields[0]; if (!TimingResistantEqual(strName, strUser)) { continue; } std::string strSalt = vFields[1]; std::string strHash = vFields[2]; static const unsigned int KEY_SIZE = 32; uint8_t out[KEY_SIZE]; CHMAC_SHA256(reinterpret_cast(strSalt.c_str()), strSalt.size()) .Write(reinterpret_cast(strPass.c_str()), strPass.size()) .Finalize(out); std::vector hexvec(out, out + KEY_SIZE); std::string strHashFromPass = HexStr(hexvec); if (TimingResistantEqual(strHashFromPass, strHash)) { return true; } } } return false; } static bool RPCAuthorized(const std::string &strAuth, std::string &strAuthUsernameOut) { // Belt-and-suspenders measure if InitRPCAuthentication was not called. if (strRPCUserColonPass.empty()) { return false; } if (strAuth.substr(0, 6) != "Basic ") { return false; } std::string strUserPass64 = strAuth.substr(6); boost::trim(strUserPass64); std::string strUserPass = DecodeBase64(strUserPass64); if (strUserPass.find(":") != std::string::npos) { strAuthUsernameOut = strUserPass.substr(0, strUserPass.find(":")); } // Check if authorized under single-user field if (TimingResistantEqual(strUserPass, strRPCUserColonPass)) { return true; } return multiUserAuthorized(strUserPass); } static bool HTTPReq_JSONRPC(Config &config, HTTPRequest *req, const std::string &) { // JSONRPC handles only POST if (req->GetRequestMethod() != HTTPRequest::POST) { req->WriteReply(HTTP_BAD_METHOD, "JSONRPC server handles only POST requests"); return false; } // Check authorization std::pair authHeader = req->GetHeader("authorization"); if (!authHeader.first) { req->WriteHeader("WWW-Authenticate", WWW_AUTH_HEADER_DATA); req->WriteReply(HTTP_UNAUTHORIZED); return false; } JSONRPCRequest jreq; if (!RPCAuthorized(authHeader.second, jreq.authUser)) { LogPrintf("ThreadRPCServer incorrect password attempt from %s\n", req->GetPeer().ToString()); /* Deter brute-forcing. * If this results in a DoS the user really shouldn't have their RPC * port exposed. */ MilliSleep(250); req->WriteHeader("WWW-Authenticate", WWW_AUTH_HEADER_DATA); req->WriteReply(HTTP_UNAUTHORIZED); return false; } try { // Parse request UniValue valRequest; if (!valRequest.read(req->ReadBody())) throw JSONRPCError(RPC_PARSE_ERROR, "Parse error"); // Set the URI jreq.URI = req->GetURI(); std::string strReply; // singleton request if (valRequest.isObject()) { jreq.parse(valRequest); UniValue result = tableRPC.execute(config, jreq); // Send reply strReply = JSONRPCReply(result, NullUniValue, jreq.id); // array of requests } else if (valRequest.isArray()) { strReply = JSONRPCExecBatch(config, valRequest.get_array()); } else { throw JSONRPCError(RPC_PARSE_ERROR, "Top-level object parse error"); } req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strReply); } catch (const UniValue &objError) { JSONErrorReply(req, objError, jreq.id); return false; } catch (const std::exception &e) { JSONErrorReply(req, JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id); return false; } return true; } static bool InitRPCAuthentication() { - if (GetArg("-rpcpassword", "") == "") { + if (gArgs.GetArg("-rpcpassword", "") == "") { LogPrintf("No rpcpassword set - using random cookie authentication\n"); if (!GenerateAuthCookie(&strRPCUserColonPass)) { // Same message as AbortNode. uiInterface.ThreadSafeMessageBox( _("Error: A fatal internal error occurred, see debug.log for " "details"), "", CClientUIInterface::MSG_ERROR); return false; } } else { LogPrintf("Config options rpcuser and rpcpassword will soon be " "deprecated. Locally-run instances may remove rpcuser to use " "cookie-based auth, or may be replaced with rpcauth. Please " "see share/rpcuser for rpcauth auth generation.\n"); - strRPCUserColonPass = - GetArg("-rpcuser", "") + ":" + GetArg("-rpcpassword", ""); + strRPCUserColonPass = gArgs.GetArg("-rpcuser", "") + ":" + + gArgs.GetArg("-rpcpassword", ""); } return true; } bool StartHTTPRPC() { LogPrint(BCLog::RPC, "Starting HTTP RPC server\n"); if (!InitRPCAuthentication()) return false; RegisterHTTPHandler("/", true, HTTPReq_JSONRPC); #ifdef ENABLE_WALLET // ifdef can be removed once we switch to better endpoint support and API // versioning RegisterHTTPHandler("/wallet/", false, HTTPReq_JSONRPC); #endif assert(EventBase()); httpRPCTimerInterface = new HTTPRPCTimerInterface(EventBase()); RPCSetTimerInterface(httpRPCTimerInterface); return true; } void InterruptHTTPRPC() { LogPrint(BCLog::RPC, "Interrupting HTTP RPC server\n"); } void StopHTTPRPC() { LogPrint(BCLog::RPC, "Stopping HTTP RPC server\n"); UnregisterHTTPHandler("/", true); if (httpRPCTimerInterface) { RPCUnsetTimerInterface(httpRPCTimerInterface); delete httpRPCTimerInterface; httpRPCTimerInterface = 0; } } diff --git a/src/httpserver.cpp b/src/httpserver.cpp index c58e7e2a84..b0a9fc95c4 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -1,659 +1,659 @@ // Copyright (c) 2015-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "httpserver.h" #include "chainparamsbase.h" #include "compat.h" #include "netbase.h" #include "rpc/protocol.h" // For HTTP status codes #include "sync.h" #include "ui_interface.h" #include "util.h" #include #include #include #include #include #include #include #include #include #ifdef EVENT__HAVE_NETINET_IN_H #include #ifdef _XOPEN_SOURCE_EXTENDED #include #endif #endif #include #include #include #include /** Maximum size of http request (request line + headers) */ static const size_t MAX_HEADERS_SIZE = 8192; /** HTTP request work item */ class HTTPWorkItem final : public HTTPClosure { public: HTTPWorkItem(Config &_config, std::unique_ptr _req, const std::string &_path, const HTTPRequestHandler &_func) : req(std::move(_req)), path(_path), func(_func), config(&_config) {} void operator()() override { func(*config, req.get(), path); } std::unique_ptr req; private: std::string path; HTTPRequestHandler func; Config *config; }; /** Simple work queue for distributing work over multiple threads. * Work items are simply callable objects. */ template class WorkQueue { private: /** Mutex protects entire object */ std::mutex cs; std::condition_variable cond; std::deque> queue; bool running; size_t maxDepth; int numThreads; /** RAII object to keep track of number of running worker threads */ class ThreadCounter { public: WorkQueue &wq; ThreadCounter(WorkQueue &w) : wq(w) { std::lock_guard lock(wq.cs); wq.numThreads += 1; } ~ThreadCounter() { std::lock_guard lock(wq.cs); wq.numThreads -= 1; wq.cond.notify_all(); } }; public: WorkQueue(size_t _maxDepth) : running(true), maxDepth(_maxDepth), numThreads(0) {} /** Precondition: worker threads have all stopped * (call WaitExit) */ ~WorkQueue() {} /** Enqueue a work item */ bool Enqueue(WorkItem *item) { std::unique_lock lock(cs); if (queue.size() >= maxDepth) { return false; } queue.emplace_back(std::unique_ptr(item)); cond.notify_one(); return true; } /** Thread function */ void Run() { ThreadCounter count(*this); while (true) { std::unique_ptr i; { std::unique_lock lock(cs); while (running && queue.empty()) cond.wait(lock); if (!running) break; i = std::move(queue.front()); queue.pop_front(); } (*i)(); } } /** Interrupt and exit loops */ void Interrupt() { std::unique_lock lock(cs); running = false; cond.notify_all(); } /** Wait for worker threads to exit */ void WaitExit() { std::unique_lock lock(cs); while (numThreads > 0) cond.wait(lock); } /** Return current depth of queue */ size_t Depth() { std::unique_lock lock(cs); return queue.size(); } }; struct HTTPPathHandler { HTTPPathHandler() {} HTTPPathHandler(std::string _prefix, bool _exactMatch, HTTPRequestHandler _handler) : prefix(_prefix), exactMatch(_exactMatch), handler(_handler) {} std::string prefix; bool exactMatch; HTTPRequestHandler handler; }; /** HTTP module state */ //! libevent event loop static struct event_base *eventBase = 0; //! HTTP server struct evhttp *eventHTTP = 0; //! List of subnets to allow RPC connections from static std::vector rpc_allow_subnets; //! Work queue for handling longer requests off the event loop thread static WorkQueue *workQueue = 0; //! Handlers for (sub)paths std::vector pathHandlers; //! Bound listening sockets std::vector boundSockets; /** Check if a network address is allowed to access the HTTP server */ static bool ClientAllowed(const CNetAddr &netaddr) { if (!netaddr.IsValid()) return false; for (const CSubNet &subnet : rpc_allow_subnets) if (subnet.Match(netaddr)) return true; return false; } /** Initialize ACL list for HTTP server */ static bool InitHTTPAllowList() { rpc_allow_subnets.clear(); CNetAddr localv4; CNetAddr localv6; LookupHost("127.0.0.1", localv4, false); LookupHost("::1", localv6, false); // always allow IPv4 local subnet. rpc_allow_subnets.push_back(CSubNet(localv4, 8)); // always allow IPv6 localhost. rpc_allow_subnets.push_back(CSubNet(localv6)); if (gArgs.IsArgSet("-rpcallowip")) { for (const std::string &strAllow : gArgs.GetArgs("-rpcallowip")) { CSubNet subnet; LookupSubNet(strAllow.c_str(), subnet); if (!subnet.IsValid()) { uiInterface.ThreadSafeMessageBox( strprintf("Invalid -rpcallowip subnet specification: %s. " "Valid are a single IP (e.g. 1.2.3.4), a " "network/netmask (e.g. 1.2.3.4/255.255.255.0) or " "a network/CIDR (e.g. 1.2.3.4/24).", strAllow), "", CClientUIInterface::MSG_ERROR); return false; } rpc_allow_subnets.push_back(subnet); } } std::string strAllowed; for (const CSubNet &subnet : rpc_allow_subnets) strAllowed += subnet.ToString() + " "; LogPrint(BCLog::HTTP, "Allowing HTTP connections from: %s\n", strAllowed); return true; } /** HTTP request method as string - use for logging only */ static std::string RequestMethodString(HTTPRequest::RequestMethod m) { switch (m) { case HTTPRequest::GET: return "GET"; break; case HTTPRequest::POST: return "POST"; break; case HTTPRequest::HEAD: return "HEAD"; break; case HTTPRequest::PUT: return "PUT"; break; default: return "unknown"; } } /** HTTP request callback */ static void http_request_cb(struct evhttp_request *req, void *arg) { Config &config = *reinterpret_cast(arg); std::unique_ptr hreq(new HTTPRequest(req)); LogPrint(BCLog::HTTP, "Received a %s request for %s from %s\n", RequestMethodString(hreq->GetRequestMethod()), hreq->GetURI(), hreq->GetPeer().ToString()); // Early address-based allow check if (!ClientAllowed(hreq->GetPeer())) { hreq->WriteReply(HTTP_FORBIDDEN); return; } // Early reject unknown HTTP methods if (hreq->GetRequestMethod() == HTTPRequest::UNKNOWN) { hreq->WriteReply(HTTP_BADMETHOD); return; } // Find registered handler for prefix std::string strURI = hreq->GetURI(); std::string path; std::vector::const_iterator i = pathHandlers.begin(); std::vector::const_iterator iend = pathHandlers.end(); for (; i != iend; ++i) { bool match = false; if (i->exactMatch) { match = (strURI == i->prefix); } else { match = (strURI.substr(0, i->prefix.size()) == i->prefix); } if (match) { path = strURI.substr(i->prefix.size()); break; } } // Dispatch to worker thread. if (i != iend) { std::unique_ptr item( new HTTPWorkItem(config, std::move(hreq), path, i->handler)); assert(workQueue); if (workQueue->Enqueue(item.get())) { /* if true, queue took ownership */ item.release(); } else { LogPrintf("WARNING: request rejected because http work queue depth " "exceeded, it can be increased with the -rpcworkqueue= " "setting\n"); item->req->WriteReply(HTTP_INTERNAL, "Work queue depth exceeded"); } } else { hreq->WriteReply(HTTP_NOTFOUND); } } /** Callback to reject HTTP requests after shutdown. */ static void http_reject_request_cb(struct evhttp_request *req, void *) { LogPrint(BCLog::HTTP, "Rejecting request while shutting down\n"); evhttp_send_error(req, HTTP_SERVUNAVAIL, nullptr); } /** Event dispatcher thread */ static bool ThreadHTTP(struct event_base *base, struct evhttp *http) { RenameThread("bitcoin-http"); LogPrint(BCLog::HTTP, "Entering http event loop\n"); event_base_dispatch(base); // Event loop will be interrupted by InterruptHTTPServer() LogPrint(BCLog::HTTP, "Exited http event loop\n"); return event_base_got_break(base) == 0; } /** Bind HTTP server to specified addresses */ static bool HTTPBindAddresses(struct evhttp *http) { - int defaultPort = GetArg("-rpcport", BaseParams().RPCPort()); + int defaultPort = gArgs.GetArg("-rpcport", BaseParams().RPCPort()); std::vector> endpoints; // Determine what addresses to bind to - if (!IsArgSet("-rpcallowip")) { + if (!gArgs.IsArgSet("-rpcallowip")) { // Default to loopback if not allowing external IPs. endpoints.push_back(std::make_pair("::1", defaultPort)); endpoints.push_back(std::make_pair("127.0.0.1", defaultPort)); - if (IsArgSet("-rpcbind")) { + if (gArgs.IsArgSet("-rpcbind")) { LogPrintf("WARNING: option -rpcbind was ignored because " "-rpcallowip was not specified, refusing to allow " "everyone to connect\n"); } } else if (gArgs.IsArgSet("-rpcbind")) { // Specific bind address. for (const std::string &strRPCBind : gArgs.GetArgs("-rpcbind")) { int port = defaultPort; std::string host; SplitHostPort(strRPCBind, port, host); endpoints.push_back(std::make_pair(host, port)); } } else { // No specific bind address specified, bind to any. endpoints.push_back(std::make_pair("::", defaultPort)); endpoints.push_back(std::make_pair("0.0.0.0", defaultPort)); } // Bind addresses for (std::vector>::iterator i = endpoints.begin(); i != endpoints.end(); ++i) { LogPrint(BCLog::HTTP, "Binding RPC on address %s port %i\n", i->first, i->second); evhttp_bound_socket *bind_handle = evhttp_bind_socket_with_handle( http, i->first.empty() ? nullptr : i->first.c_str(), i->second); if (bind_handle) { boundSockets.push_back(bind_handle); } else { LogPrintf("Binding RPC on address %s port %i failed.\n", i->first, i->second); } } return !boundSockets.empty(); } /** Simple wrapper to set thread name and run work queue */ static void HTTPWorkQueueRun(WorkQueue *queue) { RenameThread("bitcoin-httpworker"); queue->Run(); } /** libevent event log callback */ static void libevent_log_cb(int severity, const char *msg) { #ifndef EVENT_LOG_WARN // EVENT_LOG_WARN was added in 2.0.19; but before then _EVENT_LOG_WARN existed. #define EVENT_LOG_WARN _EVENT_LOG_WARN #endif // Log warn messages and higher without debug category. if (severity >= EVENT_LOG_WARN) { LogPrintf("libevent: %s\n", msg); } else { LogPrint(BCLog::LIBEVENT, "libevent: %s\n", msg); } } bool InitHTTPServer(Config &config) { struct evhttp *http = 0; struct event_base *base = 0; if (!InitHTTPAllowList()) return false; - if (GetBoolArg("-rpcssl", false)) { + if (gArgs.GetBoolArg("-rpcssl", false)) { uiInterface.ThreadSafeMessageBox( "SSL mode for RPC (-rpcssl) is no longer supported.", "", CClientUIInterface::MSG_ERROR); return false; } // Redirect libevent's logging to our own log event_set_log_callback(&libevent_log_cb); #if LIBEVENT_VERSION_NUMBER >= 0x02010100 // If -debug=libevent, set full libevent debugging. // Otherwise, disable all libevent debugging. if (LogAcceptCategory(BCLog::LIBEVENT)) { event_enable_debug_logging(EVENT_DBG_ALL); } else { event_enable_debug_logging(EVENT_DBG_NONE); } #endif #ifdef WIN32 evthread_use_windows_threads(); #else evthread_use_pthreads(); #endif // XXX RAII base = event_base_new(); if (!base) { LogPrintf("Couldn't create an event_base: exiting\n"); return false; } /* Create a new evhttp object to handle requests. */ // XXX RAII http = evhttp_new(base); if (!http) { LogPrintf("couldn't create evhttp. Exiting.\n"); event_base_free(base); return false; } evhttp_set_timeout( - http, GetArg("-rpcservertimeout", DEFAULT_HTTP_SERVER_TIMEOUT)); + http, gArgs.GetArg("-rpcservertimeout", DEFAULT_HTTP_SERVER_TIMEOUT)); evhttp_set_max_headers_size(http, MAX_HEADERS_SIZE); evhttp_set_max_body_size(http, MAX_SIZE); evhttp_set_gencb(http, http_request_cb, &config); if (!HTTPBindAddresses(http)) { LogPrintf("Unable to bind any endpoint for RPC server\n"); evhttp_free(http); event_base_free(base); return false; } LogPrint(BCLog::HTTP, "Initialized HTTP server\n"); - int workQueueDepth = - std::max((long)GetArg("-rpcworkqueue", DEFAULT_HTTP_WORKQUEUE), 1L); + int workQueueDepth = std::max( + (long)gArgs.GetArg("-rpcworkqueue", DEFAULT_HTTP_WORKQUEUE), 1L); LogPrintf("HTTP: creating work queue of depth %d\n", workQueueDepth); workQueue = new WorkQueue(workQueueDepth); eventBase = base; eventHTTP = http; return true; } std::thread threadHTTP; std::future threadResult; bool StartHTTPServer() { LogPrint(BCLog::HTTP, "Starting HTTP server\n"); int rpcThreads = - std::max((long)GetArg("-rpcthreads", DEFAULT_HTTP_THREADS), 1L); + std::max((long)gArgs.GetArg("-rpcthreads", DEFAULT_HTTP_THREADS), 1L); LogPrintf("HTTP: starting %d worker threads\n", rpcThreads); std::packaged_task task(ThreadHTTP); threadResult = task.get_future(); threadHTTP = std::thread(std::move(task), eventBase, eventHTTP); for (int i = 0; i < rpcThreads; i++) { std::thread rpc_worker(HTTPWorkQueueRun, workQueue); rpc_worker.detach(); } return true; } void InterruptHTTPServer() { LogPrint(BCLog::HTTP, "Interrupting HTTP server\n"); if (eventHTTP) { // Unlisten sockets for (evhttp_bound_socket *socket : boundSockets) { evhttp_del_accept_socket(eventHTTP, socket); } // Reject requests on current connections evhttp_set_gencb(eventHTTP, http_reject_request_cb, nullptr); } if (workQueue) workQueue->Interrupt(); } void StopHTTPServer() { LogPrint(BCLog::HTTP, "Stopping HTTP server\n"); if (workQueue) { LogPrint(BCLog::HTTP, "Waiting for HTTP worker threads to exit\n"); workQueue->WaitExit(); delete workQueue; } if (eventBase) { LogPrint(BCLog::HTTP, "Waiting for HTTP event thread to exit\n"); // Give event loop a few seconds to exit (to send back last RPC // responses), then break it. Before this was solved with // event_base_loopexit, but that didn't work as expected in at least // libevent 2.0.21 and always introduced a delay. In libevent master // that appears to be solved, so in the future that solution could be // used again (if desirable). // (see discussion in https://github.com/bitcoin/bitcoin/pull/6990) if (threadResult.valid() && threadResult.wait_for(std::chrono::milliseconds(2000)) == std::future_status::timeout) { LogPrintf("HTTP event loop did not exit within allotted time, " "sending loopbreak\n"); event_base_loopbreak(eventBase); } threadHTTP.join(); } if (eventHTTP) { evhttp_free(eventHTTP); eventHTTP = 0; } if (eventBase) { event_base_free(eventBase); eventBase = 0; } LogPrint(BCLog::HTTP, "Stopped HTTP server\n"); } struct event_base *EventBase() { return eventBase; } static void httpevent_callback_fn(evutil_socket_t, short, void *data) { // Static handler: simply call inner handler HTTPEvent *self = ((HTTPEvent *)data); self->handler(); if (self->deleteWhenTriggered) delete self; } HTTPEvent::HTTPEvent(struct event_base *base, bool _deleteWhenTriggered, const std::function &_handler) : deleteWhenTriggered(_deleteWhenTriggered), handler(_handler) { ev = event_new(base, -1, 0, httpevent_callback_fn, this); assert(ev); } HTTPEvent::~HTTPEvent() { event_free(ev); } void HTTPEvent::trigger(struct timeval *tv) { if (tv == nullptr) { // Immediately trigger event in main thread. event_active(ev, 0, 0); } else { // Trigger after timeval passed. evtimer_add(ev, tv); } } HTTPRequest::HTTPRequest(struct evhttp_request *_req) : req(_req), replySent(false) {} HTTPRequest::~HTTPRequest() { if (!replySent) { // Keep track of whether reply was sent to avoid request leaks LogPrintf("%s: Unhandled request\n", __func__); WriteReply(HTTP_INTERNAL, "Unhandled request"); } // evhttpd cleans up the request, as long as a reply was sent. } std::pair HTTPRequest::GetHeader(const std::string &hdr) { const struct evkeyvalq *headers = evhttp_request_get_input_headers(req); assert(headers); const char *val = evhttp_find_header(headers, hdr.c_str()); if (val) return std::make_pair(true, val); else return std::make_pair(false, ""); } std::string HTTPRequest::ReadBody() { struct evbuffer *buf = evhttp_request_get_input_buffer(req); if (!buf) return ""; size_t size = evbuffer_get_length(buf); /** Trivial implementation: if this is ever a performance bottleneck, * internal copying can be avoided in multi-segment buffers by using * evbuffer_peek and an awkward loop. Though in that case, it'd be even * better to not copy into an intermediate string but use a stream * abstraction to consume the evbuffer on the fly in the parsing algorithm. */ const char *data = (const char *)evbuffer_pullup(buf, size); // returns nullptr in case of empty buffer. if (!data) { return ""; } std::string rv(data, size); evbuffer_drain(buf, size); return rv; } void HTTPRequest::WriteHeader(const std::string &hdr, const std::string &value) { struct evkeyvalq *headers = evhttp_request_get_output_headers(req); assert(headers); evhttp_add_header(headers, hdr.c_str(), value.c_str()); } /** Closure sent to main thread to request a reply to be sent to a HTTP request. * Replies must be sent in the main loop in the main http thread, this cannot be * done from worker threads. */ void HTTPRequest::WriteReply(int nStatus, const std::string &strReply) { assert(!replySent && req); // Send event to main http thread to send reply message struct evbuffer *evb = evhttp_request_get_output_buffer(req); assert(evb); evbuffer_add(evb, strReply.data(), strReply.size()); HTTPEvent *ev = new HTTPEvent(eventBase, true, std::bind(evhttp_send_reply, req, nStatus, (const char *)nullptr, (struct evbuffer *)nullptr)); ev->trigger(0); replySent = true; // transferred back to main thread. req = 0; } CService HTTPRequest::GetPeer() { evhttp_connection *con = evhttp_request_get_connection(req); CService peer; if (con) { // evhttp retains ownership over returned address string const char *address = ""; uint16_t port = 0; evhttp_connection_get_peer(con, (char **)&address, &port); peer = LookupNumeric(address, port); } return peer; } std::string HTTPRequest::GetURI() { return evhttp_request_get_uri(req); } HTTPRequest::RequestMethod HTTPRequest::GetRequestMethod() { switch (evhttp_request_get_command(req)) { case EVHTTP_REQ_GET: return GET; break; case EVHTTP_REQ_POST: return POST; break; case EVHTTP_REQ_HEAD: return HEAD; break; case EVHTTP_REQ_PUT: return PUT; break; default: return UNKNOWN; break; } } void RegisterHTTPHandler(const std::string &prefix, bool exactMatch, const HTTPRequestHandler &handler) { LogPrint(BCLog::HTTP, "Registering HTTP handler for %s (exactmatch %d)\n", prefix, exactMatch); pathHandlers.push_back(HTTPPathHandler(prefix, exactMatch, handler)); } void UnregisterHTTPHandler(const std::string &prefix, bool exactMatch) { std::vector::iterator i = pathHandlers.begin(); std::vector::iterator iend = pathHandlers.end(); for (; i != iend; ++i) if (i->prefix == prefix && i->exactMatch == exactMatch) break; if (i != iend) { LogPrint(BCLog::HTTP, "Unregistering HTTP handler for %s (exactmatch %d)\n", prefix, exactMatch); pathHandlers.erase(i); } } diff --git a/src/init.cpp b/src/init.cpp index 8b8afaab5e..640cfcb20f 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1,2249 +1,2258 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "init.h" #include "addrman.h" #include "amount.h" #include "chain.h" #include "chainparams.h" #include "checkpoints.h" #include "compat/sanity.h" #include "config.h" #include "consensus/validation.h" #include "fs.h" #include "httprpc.h" #include "httpserver.h" #include "key.h" #include "miner.h" #include "net.h" #include "net_processing.h" #include "netbase.h" #include "policy/policy.h" #include "rpc/register.h" #include "rpc/server.h" #include "scheduler.h" #include "script/scriptcache.h" #include "script/sigcache.h" #include "script/standard.h" #include "timedata.h" #include "torcontrol.h" #include "txdb.h" #include "txmempool.h" #include "ui_interface.h" #include "util.h" #include "utilmoneystr.h" #include "validation.h" #include "validationinterface.h" #ifdef ENABLE_WALLET #include "wallet/rpcdump.h" #include "wallet/wallet.h" #endif #include "warnings.h" #include #include #include #ifndef WIN32 #include #endif #include #include #include #include #include #include #include #if ENABLE_ZMQ #include "zmq/zmqnotificationinterface.h" #endif bool fFeeEstimatesInitialized = false; static const bool DEFAULT_PROXYRANDOMIZE = true; static const bool DEFAULT_REST_ENABLE = false; static const bool DEFAULT_DISABLE_SAFEMODE = false; static const bool DEFAULT_STOPAFTERBLOCKIMPORT = false; std::unique_ptr g_connman; std::unique_ptr peerLogic; #if ENABLE_ZMQ static CZMQNotificationInterface *pzmqNotificationInterface = nullptr; #endif #ifdef WIN32 // Win32 LevelDB doesn't use filedescriptors, and the ones used for accessing // block files don't count towards the fd_set size limit anyway. #define MIN_CORE_FILEDESCRIPTORS 0 #else #define MIN_CORE_FILEDESCRIPTORS 150 #endif /** Used to pass flags to the Bind() function */ enum BindFlags { BF_NONE = 0, BF_EXPLICIT = (1U << 0), BF_REPORT_ERROR = (1U << 1), BF_WHITELIST = (1U << 2), }; static const char *FEE_ESTIMATES_FILENAME = "fee_estimates.dat"; ////////////////////////////////////////////////////////////////////////////// // // Shutdown // // // Thread management and startup/shutdown: // // The network-processing threads are all part of a thread group created by // AppInit() or the Qt main() function. // // A clean exit happens when StartShutdown() or the SIGTERM signal handler sets // fRequestShutdown, which triggers the DetectShutdownThread(), which interrupts // the main thread group. DetectShutdownThread() then exits, which causes // AppInit() to continue (it .joins the shutdown thread). Shutdown() is then // called to clean up database connections, and stop other threads that should // only be stopped after the main network-processing threads have exited. // // Note that if running -daemon the parent process returns from AppInit2 before // adding any threads to the threadGroup, so .join_all() returns immediately and // the parent exits from main(). // // Shutdown for Qt is very similar, only it uses a QTimer to detect // fRequestShutdown getting set, and then does the normal Qt shutdown thing. // std::atomic fRequestShutdown(false); std::atomic fDumpMempoolLater(false); void StartShutdown() { fRequestShutdown = true; } bool ShutdownRequested() { return fRequestShutdown; } /** * This is a minimally invasive approach to shutdown on LevelDB read errors from * the chainstate, while keeping user interface out of the common library, which * is shared between bitcoind, and bitcoin-qt and non-server tools. */ class CCoinsViewErrorCatcher final : public CCoinsViewBacked { public: CCoinsViewErrorCatcher(CCoinsView *view) : CCoinsViewBacked(view) {} bool GetCoin(const COutPoint &outpoint, Coin &coin) const override { try { return CCoinsViewBacked::GetCoin(outpoint, coin); } catch (const std::runtime_error &e) { uiInterface.ThreadSafeMessageBox( _("Error reading from database, shutting down."), "", CClientUIInterface::MSG_ERROR); LogPrintf("Error reading from database: %s\n", e.what()); // Starting the shutdown sequence and returning false to the caller // would be interpreted as 'entry not found' (as opposed to unable // to read data), and could lead to invalid interpretation. Just // exit immediately, as we can't continue anyway, and all writes // should be atomic. abort(); } } // Writes do not need similar protection, as failure to write is handled by // the caller. }; static CCoinsViewDB *pcoinsdbview = nullptr; static CCoinsViewErrorCatcher *pcoinscatcher = nullptr; static std::unique_ptr globalVerifyHandle; void Interrupt(boost::thread_group &threadGroup) { InterruptHTTPServer(); InterruptHTTPRPC(); InterruptRPC(); InterruptREST(); InterruptTorControl(); if (g_connman) g_connman->Interrupt(); threadGroup.interrupt_all(); } void Shutdown() { LogPrintf("%s: In progress...\n", __func__); static CCriticalSection cs_Shutdown; TRY_LOCK(cs_Shutdown, lockShutdown); if (!lockShutdown) return; /// Note: Shutdown() must be able to handle cases in which AppInit2() failed /// part of the way, for example if the data directory was found to be /// locked. Be sure that anything that writes files or flushes caches only /// does this if the respective module was initialized. RenameThread("bitcoin-shutoff"); mempool.AddTransactionsUpdated(1); StopHTTPRPC(); StopREST(); StopRPC(); StopHTTPServer(); #ifdef ENABLE_WALLET for (CWalletRef pwallet : vpwallets) { pwallet->Flush(false); } #endif MapPort(false); UnregisterValidationInterface(peerLogic.get()); peerLogic.reset(); g_connman.reset(); StopTorControl(); UnregisterNodeSignals(GetNodeSignals()); if (fDumpMempoolLater && - GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { + gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { DumpMempool(); } if (fFeeEstimatesInitialized) { fs::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME; CAutoFile est_fileout(fsbridge::fopen(est_path, "wb"), SER_DISK, CLIENT_VERSION); if (!est_fileout.IsNull()) mempool.WriteFeeEstimates(est_fileout); else LogPrintf("%s: Failed to write fee estimates to %s\n", __func__, est_path.string()); fFeeEstimatesInitialized = false; } { LOCK(cs_main); if (pcoinsTip != nullptr) { FlushStateToDisk(); } delete pcoinsTip; pcoinsTip = nullptr; delete pcoinscatcher; pcoinscatcher = nullptr; delete pcoinsdbview; pcoinsdbview = nullptr; delete pblocktree; pblocktree = nullptr; } #ifdef ENABLE_WALLET for (CWalletRef pwallet : vpwallets) { pwallet->Flush(true); } #endif #if ENABLE_ZMQ if (pzmqNotificationInterface) { UnregisterValidationInterface(pzmqNotificationInterface); delete pzmqNotificationInterface; pzmqNotificationInterface = nullptr; } #endif #ifndef WIN32 try { fs::remove(GetPidFile()); } catch (const fs::filesystem_error &e) { LogPrintf("%s: Unable to remove pidfile: %s\n", __func__, e.what()); } #endif UnregisterAllValidationInterfaces(); #ifdef ENABLE_WALLET for (CWalletRef pwallet : vpwallets) { delete pwallet; } vpwallets.clear(); #endif globalVerifyHandle.reset(); ECC_Stop(); LogPrintf("%s: done\n", __func__); } /** * Signal handlers are very limited in what they are allowed to do, so: */ void HandleSIGTERM(int) { fRequestShutdown = true; } void HandleSIGHUP(int) { fReopenDebugLog = true; } static bool Bind(CConnman &connman, const CService &addr, unsigned int flags) { if (!(flags & BF_EXPLICIT) && IsLimited(addr)) return false; std::string strError; if (!connman.BindListenPort(addr, strError, (flags & BF_WHITELIST) != 0)) { if (flags & BF_REPORT_ERROR) return InitError(strError); return false; } return true; } void OnRPCStarted() { uiInterface.NotifyBlockTip.connect(&RPCNotifyBlockChange); } void OnRPCStopped() { uiInterface.NotifyBlockTip.disconnect(&RPCNotifyBlockChange); RPCNotifyBlockChange(false, nullptr); cvBlockChange.notify_all(); LogPrint(BCLog::RPC, "RPC stopped.\n"); } void OnRPCPreCommand(const CRPCCommand &cmd) { // Observe safe mode. std::string strWarning = GetWarnings("rpc"); if (strWarning != "" && - !GetBoolArg("-disablesafemode", DEFAULT_DISABLE_SAFEMODE) && + !gArgs.GetBoolArg("-disablesafemode", DEFAULT_DISABLE_SAFEMODE) && !cmd.okSafeMode) throw JSONRPCError(RPC_FORBIDDEN_BY_SAFE_MODE, std::string("Safe mode: ") + strWarning); } std::string HelpMessage(HelpMessageMode mode) { - const bool showDebug = GetBoolArg("-help-debug", false); + const bool showDebug = gArgs.GetBoolArg("-help-debug", false); // When adding new options to the categories, please keep and ensure // alphabetical ordering. Do not translate _(...) -help-debug options, Many // technical terms, and only a very small audience, so is unnecessary stress // to translators. std::string strUsage = HelpMessageGroup(_("Options:")); strUsage += HelpMessageOpt("-?", _("Print this help message and exit")); strUsage += HelpMessageOpt("-version", _("Print version and exit")); strUsage += HelpMessageOpt( "-alertnotify=", _("Execute command when a relevant alert is received or we see a " "really long fork (%s in cmd is replaced by message)")); strUsage += HelpMessageOpt("-blocknotify=", _("Execute command when the best block changes " "(%s in cmd is replaced by block hash)")); if (showDebug) strUsage += HelpMessageOpt( "-blocksonly", strprintf( _("Whether to operate in a blocks only mode (default: %d)"), DEFAULT_BLOCKSONLY)); strUsage += HelpMessageOpt( "-assumevalid=", strprintf(_("If this block is in the chain assume that it and its " "ancestors are valid and potentially skip their script " "verification (0 to verify all, default: %s, testnet: %s)"), Params(CBaseChainParams::MAIN) .GetConsensus() .defaultAssumeValid.GetHex(), Params(CBaseChainParams::TESTNET) .GetConsensus() .defaultAssumeValid.GetHex())); strUsage += HelpMessageOpt( "-conf=", strprintf(_("Specify configuration file (default: %s)"), BITCOIN_CONF_FILENAME)); if (mode == HMM_BITCOIND) { #if HAVE_DECL_DAEMON strUsage += HelpMessageOpt( "-daemon", _("Run in the background as a daemon and accept commands")); #endif } strUsage += HelpMessageOpt("-datadir=", _("Specify data directory")); if (showDebug) { strUsage += HelpMessageOpt( "-dbbatchsize", strprintf( "Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize)); } strUsage += HelpMessageOpt( "-dbcache=", strprintf( _("Set database cache size in megabytes (%d to %d, default: %d)"), nMinDbCache, nMaxDbCache, nDefaultDbCache)); if (showDebug) strUsage += HelpMessageOpt( "-feefilter", strprintf("Tell other nodes to filter invs to us by " "our mempool min fee (default: %d)", DEFAULT_FEEFILTER)); strUsage += HelpMessageOpt( "-loadblock=", _("Imports blocks from external blk000??.dat file on startup")); strUsage += HelpMessageOpt( "-maxorphantx=", strprintf(_("Keep at most unconnectable " "transactions in memory (default: %u)"), DEFAULT_MAX_ORPHAN_TRANSACTIONS)); strUsage += HelpMessageOpt("-maxmempool=", strprintf(_("Keep the transaction memory pool " "below megabytes (default: %u)"), DEFAULT_MAX_MEMPOOL_SIZE)); strUsage += HelpMessageOpt("-mempoolexpiry=", strprintf(_("Do not keep transactions in the mempool " "longer than hours (default: %u)"), DEFAULT_MEMPOOL_EXPIRY)); strUsage += HelpMessageOpt("-persistmempool", strprintf(_("Whether to save the mempool on shutdown " "and load on restart (default: %u)"), DEFAULT_PERSIST_MEMPOOL)); strUsage += HelpMessageOpt( "-blockreconstructionextratxn=", strprintf(_("Extra transactions to keep in memory for compact block " "reconstructions (default: %u)"), DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN)); strUsage += HelpMessageOpt( "-par=", strprintf(_("Set the number of script verification threads (%u to %d, " "0 = auto, <0 = leave that many cores free, default: %d)"), -GetNumCores(), MAX_SCRIPTCHECK_THREADS, DEFAULT_SCRIPTCHECK_THREADS)); #ifndef WIN32 strUsage += HelpMessageOpt( "-pid=", strprintf(_("Specify pid file (default: %s)"), BITCOIN_PID_FILENAME)); #endif strUsage += HelpMessageOpt( "-prune=", strprintf( _("Reduce storage requirements by enabling pruning (deleting) of " "old blocks. This allows the pruneblockchain RPC to be called to " "delete specific blocks, and enables automatic pruning of old " "blocks if a target size in MiB is provided. This mode is " "incompatible with -txindex and -rescan. " "Warning: Reverting this setting requires re-downloading the " "entire blockchain. " "(default: 0 = disable pruning blocks, 1 = allow manual pruning " "via RPC, >%u = automatically prune block files to stay under " "the specified target size in MiB)"), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024)); strUsage += HelpMessageOpt( "-reindex-chainstate", _("Rebuild chain state from the currently indexed blocks")); strUsage += HelpMessageOpt("-reindex", _("Rebuild chain state and block index from " "the blk*.dat files on disk")); #ifndef WIN32 strUsage += HelpMessageOpt( "-sysperms", _("Create new files with system default permissions, instead of umask " "077 (only effective with disabled wallet functionality)")); #endif strUsage += HelpMessageOpt( "-txindex", strprintf(_("Maintain a full transaction index, used by " "the getrawtransaction rpc call (default: %d)"), DEFAULT_TXINDEX)); strUsage += HelpMessageOpt( "-usecashaddr", _("Use Cash Address for destination encoding instead " "of base58 (activate by default on Jan, 14)")); strUsage += HelpMessageGroup(_("Connection options:")); strUsage += HelpMessageOpt( "-addnode=", _("Add a node to connect to and attempt to keep the connection open")); strUsage += HelpMessageOpt( "-banscore=", strprintf( _("Threshold for disconnecting misbehaving peers (default: %u)"), DEFAULT_BANSCORE_THRESHOLD)); strUsage += HelpMessageOpt( "-bantime=", strprintf(_("Number of seconds to keep misbehaving " "peers from reconnecting (default: %u)"), DEFAULT_MISBEHAVING_BANTIME)); strUsage += HelpMessageOpt("-bind=", _("Bind to given address and always listen on " "it. Use [host]:port notation for IPv6")); strUsage += HelpMessageOpt("-connect=", _("Connect only to the specified node(s); -noconnect or " "-connect=0 alone to disable automatic connections")); strUsage += HelpMessageOpt("-discover", _("Discover own IP addresses (default: 1 when " "listening and no -externalip or -proxy)")); strUsage += HelpMessageOpt( "-dns", _("Allow DNS lookups for -addnode, -seednode and -connect") + " " + strprintf(_("(default: %d)"), DEFAULT_NAME_LOOKUP)); strUsage += HelpMessageOpt( "-dnsseed", _("Query for peer addresses via DNS lookup, if low on " "addresses (default: 1 unless -connect/-noconnect)")); strUsage += HelpMessageOpt("-externalip=", _("Specify your own public address")); strUsage += HelpMessageOpt( "-forcednsseed", strprintf( _("Always query for peer addresses via DNS lookup (default: %d)"), DEFAULT_FORCEDNSSEED)); strUsage += HelpMessageOpt("-listen", _("Accept connections from outside (default: " "1 if no -proxy or -connect/-noconnect)")); strUsage += HelpMessageOpt( "-listenonion", strprintf(_("Automatically create Tor hidden service (default: %d)"), DEFAULT_LISTEN_ONION)); strUsage += HelpMessageOpt( "-maxconnections=", strprintf(_("Maintain at most connections to peers (default: %u)"), DEFAULT_MAX_PEER_CONNECTIONS)); strUsage += HelpMessageOpt("-maxreceivebuffer=", strprintf(_("Maximum per-connection receive buffer, " "*1000 bytes (default: %u)"), DEFAULT_MAXRECEIVEBUFFER)); strUsage += HelpMessageOpt( "-maxsendbuffer=", strprintf(_("Maximum per-connection send buffer, " "*1000 bytes (default: %u)"), DEFAULT_MAXSENDBUFFER)); strUsage += HelpMessageOpt( "-maxtimeadjustment", strprintf(_("Maximum allowed median peer time offset adjustment. Local " "perspective of time may be influenced by peers forward or " "backward by this amount. (default: %u seconds)"), DEFAULT_MAX_TIME_ADJUSTMENT)); strUsage += HelpMessageOpt("-onion=", strprintf(_("Use separate SOCKS5 proxy to reach peers " "via Tor hidden services (default: %s)"), "-proxy")); strUsage += HelpMessageOpt( "-onlynet=", _("Only connect to nodes in network (ipv4, ipv6 or onion)")); strUsage += HelpMessageOpt("-permitbaremultisig", strprintf(_("Relay non-P2SH multisig (default: %d)"), DEFAULT_PERMIT_BAREMULTISIG)); strUsage += HelpMessageOpt( "-peerbloomfilters", strprintf(_("Support filtering of blocks and transaction with bloom " "filters (default: %d)"), DEFAULT_PEERBLOOMFILTERS)); strUsage += HelpMessageOpt( "-port=", strprintf( _("Listen for connections on (default: %u or testnet: %u)"), Params(CBaseChainParams::MAIN).GetDefaultPort(), Params(CBaseChainParams::TESTNET).GetDefaultPort())); strUsage += HelpMessageOpt("-proxy=", _("Connect through SOCKS5 proxy")); strUsage += HelpMessageOpt( "-proxyrandomize", strprintf(_("Randomize credentials for every proxy connection. This " "enables Tor stream isolation (default: %d)"), DEFAULT_PROXYRANDOMIZE)); strUsage += HelpMessageOpt( "-seednode=", _("Connect to a node to retrieve peer addresses, and disconnect")); strUsage += HelpMessageOpt( "-timeout=", strprintf(_("Specify connection timeout in " "milliseconds (minimum: 1, default: %d)"), DEFAULT_CONNECT_TIMEOUT)); strUsage += HelpMessageOpt("-torcontrol=:", strprintf(_("Tor control port to use if onion " "listening enabled (default: %s)"), DEFAULT_TOR_CONTROL)); strUsage += HelpMessageOpt("-torpassword=", _("Tor control port password (default: empty)")); #ifdef USE_UPNP #if USE_UPNP strUsage += HelpMessageOpt("-upnp", _("Use UPnP to map the listening port " "(default: 1 when listening and no -proxy)")); #else strUsage += HelpMessageOpt( "-upnp", strprintf(_("Use UPnP to map the listening port (default: %u)"), 0)); #endif #endif strUsage += HelpMessageOpt("-whitebind=", _("Bind to given address and whitelist peers connecting " "to it. Use [host]:port notation for IPv6")); strUsage += HelpMessageOpt( "-whitelist=", _("Whitelist peers connecting from the given IP address (e.g. 1.2.3.4) " "or CIDR notated network (e.g. 1.2.3.0/24). Can be specified " "multiple times.") + " " + _("Whitelisted peers cannot be DoS banned and their " "transactions are always relayed, even if they are already " "in the mempool, useful e.g. for a gateway")); strUsage += HelpMessageOpt( "-whitelistrelay", strprintf(_("Accept relayed transactions received from whitelisted " "peers even when not relaying transactions (default: %d)"), DEFAULT_WHITELISTRELAY)); strUsage += HelpMessageOpt( "-whitelistforcerelay", strprintf(_("Force relay of transactions from whitelisted peers even " "if they violate local relay policy (default: %d)"), DEFAULT_WHITELISTFORCERELAY)); strUsage += HelpMessageOpt( "-maxuploadtarget=", strprintf(_("Tries to keep outbound traffic under the given target (in " "MiB per 24h), 0 = no limit (default: %d)"), DEFAULT_MAX_UPLOAD_TARGET)); #ifdef ENABLE_WALLET strUsage += CWallet::GetWalletHelpString(showDebug); #endif #if ENABLE_ZMQ strUsage += HelpMessageGroup(_("ZeroMQ notification options:")); strUsage += HelpMessageOpt("-zmqpubhashblock=
", _("Enable publish hash block in
")); strUsage += HelpMessageOpt("-zmqpubhashtx=
", _("Enable publish hash transaction in
")); strUsage += HelpMessageOpt("-zmqpubrawblock=
", _("Enable publish raw block in
")); strUsage += HelpMessageOpt("-zmqpubrawtx=
", _("Enable publish raw transaction in
")); #endif strUsage += HelpMessageGroup(_("Debugging/Testing options:")); strUsage += HelpMessageOpt("-uacomment=", _("Append comment to the user agent string")); if (showDebug) { strUsage += HelpMessageOpt( "-checkblocks=", strprintf( _("How many blocks to check at startup (default: %u, 0 = all)"), DEFAULT_CHECKBLOCKS)); strUsage += HelpMessageOpt("-checklevel=", strprintf(_("How thorough the block verification of " "-checkblocks is (0-4, default: %u)"), DEFAULT_CHECKLEVEL)); strUsage += HelpMessageOpt( "-checkblockindex", strprintf( "Do a full consistency check for mapBlockIndex, " "setBlockIndexCandidates, chainActive and mapBlocksUnlinked " "occasionally. Also sets -checkmempool (default: %d)", Params(CBaseChainParams::MAIN).DefaultConsistencyChecks())); strUsage += HelpMessageOpt( "-checkmempool=", strprintf( "Run checks every transactions (default: %d)", Params(CBaseChainParams::MAIN).DefaultConsistencyChecks())); strUsage += HelpMessageOpt( "-checkpoints", strprintf("Disable expensive verification for " "known chain history (default: %d)", DEFAULT_CHECKPOINTS_ENABLED)); strUsage += HelpMessageOpt( "-disablesafemode", strprintf("Disable safemode, override a real " "safe mode event (default: %d)", DEFAULT_DISABLE_SAFEMODE)); strUsage += HelpMessageOpt( "-testsafemode", strprintf("Force safe mode (default: %d)", DEFAULT_TESTSAFEMODE)); strUsage += HelpMessageOpt("-dropmessagestest=", "Randomly drop 1 of every network messages"); strUsage += HelpMessageOpt("-fuzzmessagestest=", "Randomly fuzz 1 of every network messages"); strUsage += HelpMessageOpt( "-stopafterblockimport", strprintf( "Stop running after importing blocks from disk (default: %d)", DEFAULT_STOPAFTERBLOCKIMPORT)); strUsage += HelpMessageOpt( "-stopatheight", strprintf("Stop running after reaching the given " "height in the main chain (default: %u)", DEFAULT_STOPATHEIGHT)); strUsage += HelpMessageOpt( "-limitancestorcount=", strprintf("Do not accept transactions if number of in-mempool " "ancestors is or more (default: %u)", DEFAULT_ANCESTOR_LIMIT)); strUsage += HelpMessageOpt("-limitancestorsize=", strprintf("Do not accept transactions whose size " "with all in-mempool ancestors exceeds " " kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT)); strUsage += HelpMessageOpt( "-limitdescendantcount=", strprintf("Do not accept transactions if any ancestor would have " " or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT)); strUsage += HelpMessageOpt( "-limitdescendantsize=", strprintf("Do not accept transactions if any ancestor would have " "more than kilobytes of in-mempool descendants " "(default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT)); strUsage += HelpMessageOpt("-bip9params=deployment:start:end", "Use given start/end times for specified " "BIP9 deployment (regtest-only)"); } strUsage += HelpMessageOpt( "-debug=", strprintf(_("Output debugging information (default: %u, supplying " " is optional)"), 0) + ". " + _("If is not supplied or if = 1, " "output all debugging information.") + _(" can be:") + " " + ListLogCategories() + "."); strUsage += HelpMessageOpt( "-debugexclude=", strprintf(_("Exclude debugging information for a category. Can be used " "in conjunction with -debug=1 to output debug logs for all " "categories except one or more specified categories."))); if (showDebug) { strUsage += HelpMessageOpt( "-nodebug", "Turn off debugging messages, same as -debug=0"); } strUsage += HelpMessageOpt( "-help-debug", _("Show all debugging options (usage: --help -help-debug)")); strUsage += HelpMessageOpt( "-logips", strprintf(_("Include IP addresses in debug output (default: %d)"), DEFAULT_LOGIPS)); strUsage += HelpMessageOpt( "-logtimestamps", strprintf(_("Prepend debug output with timestamp (default: %d)"), DEFAULT_LOGTIMESTAMPS)); if (showDebug) { strUsage += HelpMessageOpt( "-logtimemicros", strprintf( "Add microsecond precision to debug timestamps (default: %d)", DEFAULT_LOGTIMEMICROS)); strUsage += HelpMessageOpt( "-mocktime=", "Replace actual time with seconds since epoch (default: 0)"); strUsage += HelpMessageOpt( "-limitfreerelay=", strprintf("Continuously rate-limit free transactions to *1000 " "bytes per minute (default: %u)", DEFAULT_LIMITFREERELAY)); strUsage += HelpMessageOpt("-relaypriority", strprintf("Require high priority for relaying free " "or low-fee transactions (default: %d)", DEFAULT_RELAYPRIORITY)); strUsage += HelpMessageOpt( "-maxsigcachesize=", strprintf("Limit size of signature cache to MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE)); strUsage += HelpMessageOpt( "-maxscriptcachesize=", strprintf("Limit size of script cache to MiB (default: %u)", DEFAULT_MAX_SCRIPT_CACHE_SIZE)); strUsage += HelpMessageOpt( "-maxtipage=", strprintf("Maximum tip age in seconds to consider node in initial " "block download (default: %u)", DEFAULT_MAX_TIP_AGE)); } strUsage += HelpMessageOpt( "-minrelaytxfee=", strprintf( _("Fees (in %s/kB) smaller than this are considered zero fee for " "relaying, mining and transaction creation (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE))); strUsage += HelpMessageOpt( "-maxtxfee=", strprintf(_("Maximum total fees (in %s) to use in a single wallet " "transaction or raw transaction; setting this too low may " "abort large transactions (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MAXFEE))); strUsage += HelpMessageOpt( "-printtoconsole", _("Send trace/debug info to console instead of debug.log file")); if (showDebug) { strUsage += HelpMessageOpt( "-printpriority", strprintf("Log transaction priority and fee per " "kB when mining blocks (default: %d)", DEFAULT_PRINTPRIORITY)); } strUsage += HelpMessageOpt("-shrinkdebugfile", _("Shrink debug.log file on client startup " "(default: 1 when no -debug)")); AppendParamsHelpMessages(strUsage, showDebug); strUsage += HelpMessageGroup(_("Node relay options:")); if (showDebug) { strUsage += HelpMessageOpt( "-acceptnonstdtxn", strprintf( "Relay and mine \"non-standard\" transactions (%sdefault: %d)", "testnet/regtest only; ", !Params(CBaseChainParams::TESTNET).RequireStandard())); strUsage += HelpMessageOpt("-excessiveblocksize=", strprintf(_("Do not accept blocks larger than this " "limit, in bytes (default: %d)"), LEGACY_MAX_BLOCK_SIZE)); strUsage += HelpMessageOpt( "-incrementalrelayfee=", strprintf( "Fee rate (in %s/kB) used to define cost of relay, used for " "mempool limiting and BIP 125 replacement. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_INCREMENTAL_RELAY_FEE))); strUsage += HelpMessageOpt( "-dustrelayfee=", strprintf("Fee rate (in %s/kB) used to defined dust, the value of " "an output such that it will cost about 1/3 of its value " "in fees at this fee rate to spend it. (default: %s)", CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE))); } strUsage += HelpMessageOpt("-bytespersigop", strprintf(_("Equivalent bytes per sigop in transactions " "for relay and mining (default: %u)"), DEFAULT_BYTES_PER_SIGOP)); strUsage += HelpMessageOpt( "-datacarrier", strprintf(_("Relay and mine data carrier transactions (default: %d)"), DEFAULT_ACCEPT_DATACARRIER)); strUsage += HelpMessageOpt( "-datacarriersize", strprintf(_("Maximum size of data in data carrier transactions we " "relay and mine (default: %u)"), MAX_OP_RETURN_RELAY)); strUsage += HelpMessageGroup(_("Block creation options:")); strUsage += HelpMessageOpt( "-blockmaxsize=", strprintf(_("Set maximum block size in bytes (default: %d)"), DEFAULT_MAX_GENERATED_BLOCK_SIZE)); strUsage += HelpMessageOpt( "-blockprioritypercentage=", strprintf(_("Set maximum percentage of a block reserved to " "high-priority/low-fee transactions (default: %d)"), DEFAULT_BLOCK_PRIORITY_PERCENTAGE)); strUsage += HelpMessageOpt( "-blockmintxfee=", strprintf(_("Set lowest fee rate (in %s/kB) for transactions to be " "included in block creation. (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE))); if (showDebug) strUsage += HelpMessageOpt("-blockversion=", "Override block version to test forking scenarios"); strUsage += HelpMessageGroup(_("RPC server options:")); strUsage += HelpMessageOpt("-server", _("Accept command line and JSON-RPC commands")); strUsage += HelpMessageOpt( "-rest", strprintf(_("Accept public REST requests (default: %d)"), DEFAULT_REST_ENABLE)); strUsage += HelpMessageOpt( "-rpcbind=", _("Bind to given address to listen for JSON-RPC connections. Use " "[host]:port notation for IPv6. This option can be specified " "multiple times (default: bind to all interfaces)")); strUsage += HelpMessageOpt("-rpccookiefile=", _("Location of the auth cookie (default: data dir)")); strUsage += HelpMessageOpt("-rpcuser=", _("Username for JSON-RPC connections")); strUsage += HelpMessageOpt("-rpcpassword=", _("Password for JSON-RPC connections")); strUsage += HelpMessageOpt( "-rpcauth=", _("Username and hashed password for JSON-RPC connections. The field " " comes in the format: :$. A canonical " "python script is included in share/rpcuser. The client then " "connects normally using the " "rpcuser=/rpcpassword= pair of arguments. This " "option can be specified multiple times")); strUsage += HelpMessageOpt( "-rpcport=", strprintf(_("Listen for JSON-RPC connections on (default: %u or " "testnet: %u)"), BaseParams(CBaseChainParams::MAIN).RPCPort(), BaseParams(CBaseChainParams::TESTNET).RPCPort())); strUsage += HelpMessageOpt( "-rpcallowip=", _("Allow JSON-RPC connections from specified source. Valid for " "are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. " "1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This " "option can be specified multiple times")); strUsage += HelpMessageOpt( "-rpcthreads=", strprintf( _("Set the number of threads to service RPC calls (default: %d)"), DEFAULT_HTTP_THREADS)); if (showDebug) { strUsage += HelpMessageOpt( "-rpcworkqueue=", strprintf("Set the depth of the work queue to " "service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE)); strUsage += HelpMessageOpt( "-rpcservertimeout=", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT)); } return strUsage; } std::string LicenseInfo() { const std::string URL_SOURCE_CODE = ""; const std::string URL_WEBSITE = ""; return CopyrightHolders( strprintf(_("Copyright (C) %i-%i"), 2009, COPYRIGHT_YEAR) + " ") + "\n" + "\n" + strprintf(_("Please contribute if you find %s useful. " "Visit %s for further information about the software."), PACKAGE_NAME, URL_WEBSITE) + "\n" + strprintf(_("The source code is available from %s."), URL_SOURCE_CODE) + "\n" + "\n" + _("This is experimental software.") + "\n" + strprintf(_("Distributed under the MIT software license, see the " "accompanying file %s or %s"), "COPYING", "") + "\n" + "\n" + strprintf(_("This product includes software developed by the " "OpenSSL Project for use in the OpenSSL Toolkit %s and " "cryptographic software written by Eric Young and UPnP " "software written by Thomas Bernard."), "") + "\n"; } static void BlockNotifyCallback(bool initialSync, const CBlockIndex *pBlockIndex) { if (initialSync || !pBlockIndex) return; - std::string strCmd = GetArg("-blocknotify", ""); + std::string strCmd = gArgs.GetArg("-blocknotify", ""); boost::replace_all(strCmd, "%s", pBlockIndex->GetBlockHash().GetHex()); boost::thread t(runCommand, strCmd); // thread runs free } static bool fHaveGenesis = false; static boost::mutex cs_GenesisWait; static CConditionVariable condvar_GenesisWait; static void BlockNotifyGenesisWait(bool, const CBlockIndex *pBlockIndex) { if (pBlockIndex != nullptr) { { boost::unique_lock lock_GenesisWait(cs_GenesisWait); fHaveGenesis = true; } condvar_GenesisWait.notify_all(); } } struct CImportingNow { CImportingNow() { assert(fImporting == false); fImporting = true; } ~CImportingNow() { assert(fImporting == true); fImporting = false; } }; // If we're using -prune with -reindex, then delete block files that will be // ignored by the reindex. Since reindexing works by starting at block file 0 // and looping until a blockfile is missing, do the same here to delete any // later block files after a gap. Also delete all rev files since they'll be // rewritten by the reindex anyway. This ensures that vinfoBlockFile is in sync // with what's actually on disk by the time we start downloading, so that // pruning works correctly. void CleanupBlockRevFiles() { std::map mapBlockFiles; // Glob all blk?????.dat and rev?????.dat files from the blocks directory. // Remove the rev files immediately and insert the blk file paths into an // ordered map keyed by block file index. LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for " "-reindex with -prune\n"); fs::path blocksdir = GetDataDir() / "blocks"; for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator(); it++) { if (is_regular_file(*it) && it->path().filename().string().length() == 12 && it->path().filename().string().substr(8, 4) == ".dat") { if (it->path().filename().string().substr(0, 3) == "blk") mapBlockFiles[it->path().filename().string().substr(3, 5)] = it->path(); else if (it->path().filename().string().substr(0, 3) == "rev") remove(it->path()); } } // Remove all block files that aren't part of a contiguous set starting at // zero by walking the ordered map (keys are block file indices) by keeping // a separate counter. Once we hit a gap (or if 0 doesn't exist) start // removing block files. int nContigCounter = 0; for (const std::pair &item : mapBlockFiles) { if (atoi(item.first) == nContigCounter) { nContigCounter++; continue; } remove(item.second); } } void ThreadImport(const Config &config, std::vector vImportFiles) { RenameThread("bitcoin-loadblk"); { CImportingNow imp; // -reindex if (fReindex) { int nFile = 0; while (true) { CDiskBlockPos pos(nFile, 0); if (!fs::exists(GetBlockPosFilename(pos, "blk"))) break; // No block files left to reindex FILE *file = OpenBlockFile(pos, true); if (!file) break; // This error is logged in OpenBlockFile LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile); LoadExternalBlockFile(config, file, &pos); nFile++; } pblocktree->WriteReindexing(false); fReindex = false; LogPrintf("Reindexing finished\n"); // To avoid ending up in a situation without genesis block, re-try // initializing (no-op if reindexing worked): InitBlockIndex(config); } // hardcoded $DATADIR/bootstrap.dat fs::path pathBootstrap = GetDataDir() / "bootstrap.dat"; if (fs::exists(pathBootstrap)) { FILE *file = fsbridge::fopen(pathBootstrap, "rb"); if (file) { fs::path pathBootstrapOld = GetDataDir() / "bootstrap.dat.old"; LogPrintf("Importing bootstrap.dat...\n"); LoadExternalBlockFile(config, file); RenameOver(pathBootstrap, pathBootstrapOld); } else { LogPrintf("Warning: Could not open bootstrap file %s\n", pathBootstrap.string()); } } // -loadblock= for (const fs::path &path : vImportFiles) { FILE *file = fsbridge::fopen(path, "rb"); if (file) { LogPrintf("Importing blocks file %s...\n", path.string()); LoadExternalBlockFile(config, file); } else { LogPrintf("Warning: Could not open blocks file %s\n", path.string()); } } // scan for better chains in the block chain database, that are not yet // connected in the active best chain CValidationState state; if (!ActivateBestChain(config, state)) { LogPrintf("Failed to connect best block"); StartShutdown(); } - if (GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) { + if (gArgs.GetBoolArg("-stopafterblockimport", + DEFAULT_STOPAFTERBLOCKIMPORT)) { LogPrintf("Stopping after block import\n"); StartShutdown(); } } // End scope of CImportingNow - if (GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { + if (gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { LoadMempool(config); fDumpMempoolLater = !fRequestShutdown; } } /** Sanity checks * Ensure that Bitcoin is running in a usable environment with all * necessary library support. */ bool InitSanityCheck(void) { if (!ECC_InitSanityCheck()) { InitError( "Elliptic curve cryptography sanity check failure. Aborting."); return false; } if (!glibc_sanity_test() || !glibcxx_sanity_test()) { return false; } if (!Random_SanityCheck()) { InitError("OS cryptographic RNG sanity check failure. Aborting."); return false; } return true; } static bool AppInitServers(Config &config, boost::thread_group &threadGroup) { RPCServer::OnStarted(&OnRPCStarted); RPCServer::OnStopped(&OnRPCStopped); RPCServer::OnPreCommand(&OnRPCPreCommand); if (!InitHTTPServer(config)) return false; if (!StartRPC()) return false; if (!StartHTTPRPC()) return false; - if (GetBoolArg("-rest", DEFAULT_REST_ENABLE) && !StartREST()) return false; + if (gArgs.GetBoolArg("-rest", DEFAULT_REST_ENABLE) && !StartREST()) + return false; if (!StartHTTPServer()) return false; return true; } // Parameter interaction based on rules void InitParameterInteraction() { // when specifying an explicit binding address, you want to listen on it // even when -connect or -proxy is specified. - if (IsArgSet("-bind")) { - if (SoftSetBoolArg("-listen", true)) + if (gArgs.IsArgSet("-bind")) { + if (gArgs.SoftSetBoolArg("-listen", true)) LogPrintf( "%s: parameter interaction: -bind set -> setting -listen=1\n", __func__); } - if (IsArgSet("-whitebind")) { - if (SoftSetBoolArg("-listen", true)) + if (gArgs.IsArgSet("-whitebind")) { + if (gArgs.SoftSetBoolArg("-listen", true)) LogPrintf("%s: parameter interaction: -whitebind set -> setting " "-listen=1\n", __func__); } if (gArgs.IsArgSet("-connect")) { // when only connecting to trusted nodes, do not seed via DNS, or listen // by default. - if (SoftSetBoolArg("-dnsseed", false)) + if (gArgs.SoftSetBoolArg("-dnsseed", false)) LogPrintf("%s: parameter interaction: -connect set -> setting " "-dnsseed=0\n", __func__); - if (SoftSetBoolArg("-listen", false)) + if (gArgs.SoftSetBoolArg("-listen", false)) LogPrintf("%s: parameter interaction: -connect set -> setting " "-listen=0\n", __func__); } - if (IsArgSet("-proxy")) { + if (gArgs.IsArgSet("-proxy")) { // to protect privacy, do not listen by default if a default proxy // server is specified. - if (SoftSetBoolArg("-listen", false)) + if (gArgs.SoftSetBoolArg("-listen", false)) LogPrintf( "%s: parameter interaction: -proxy set -> setting -listen=0\n", __func__); // to protect privacy, do not use UPNP when a proxy is set. The user may // still specify -listen=1 to listen locally, so don't rely on this // happening through -listen below. - if (SoftSetBoolArg("-upnp", false)) + if (gArgs.SoftSetBoolArg("-upnp", false)) LogPrintf( "%s: parameter interaction: -proxy set -> setting -upnp=0\n", __func__); // to protect privacy, do not discover addresses by default - if (SoftSetBoolArg("-discover", false)) + if (gArgs.SoftSetBoolArg("-discover", false)) LogPrintf("%s: parameter interaction: -proxy set -> setting " "-discover=0\n", __func__); } - if (!GetBoolArg("-listen", DEFAULT_LISTEN)) { + if (!gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) { // do not map ports or try to retrieve public IP when not listening // (pointless) - if (SoftSetBoolArg("-upnp", false)) + if (gArgs.SoftSetBoolArg("-upnp", false)) LogPrintf( "%s: parameter interaction: -listen=0 -> setting -upnp=0\n", __func__); - if (SoftSetBoolArg("-discover", false)) + if (gArgs.SoftSetBoolArg("-discover", false)) LogPrintf( "%s: parameter interaction: -listen=0 -> setting -discover=0\n", __func__); - if (SoftSetBoolArg("-listenonion", false)) + if (gArgs.SoftSetBoolArg("-listenonion", false)) LogPrintf("%s: parameter interaction: -listen=0 -> setting " "-listenonion=0\n", __func__); } - if (IsArgSet("-externalip")) { + if (gArgs.IsArgSet("-externalip")) { // if an explicit public IP is specified, do not try to find others - if (SoftSetBoolArg("-discover", false)) + if (gArgs.SoftSetBoolArg("-discover", false)) LogPrintf("%s: parameter interaction: -externalip set -> setting " "-discover=0\n", __func__); } // disable whitelistrelay in blocksonly mode - if (GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) { - if (SoftSetBoolArg("-whitelistrelay", false)) + if (gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) { + if (gArgs.SoftSetBoolArg("-whitelistrelay", false)) LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting " "-whitelistrelay=0\n", __func__); } // Forcing relay from whitelisted hosts implies we will accept relays from // them in the first place. - if (GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) { - if (SoftSetBoolArg("-whitelistrelay", true)) + if (gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) { + if (gArgs.SoftSetBoolArg("-whitelistrelay", true)) LogPrintf("%s: parameter interaction: -whitelistforcerelay=1 -> " "setting -whitelistrelay=1\n", __func__); } } static std::string ResolveErrMsg(const char *const optname, const std::string &strBind) { return strprintf(_("Cannot resolve -%s address: '%s'"), optname, strBind); } void InitLogging() { - fPrintToConsole = GetBoolArg("-printtoconsole", false); - fLogTimestamps = GetBoolArg("-logtimestamps", DEFAULT_LOGTIMESTAMPS); - fLogTimeMicros = GetBoolArg("-logtimemicros", DEFAULT_LOGTIMEMICROS); - fLogIPs = GetBoolArg("-logips", DEFAULT_LOGIPS); + fPrintToConsole = gArgs.GetBoolArg("-printtoconsole", false); + fLogTimestamps = gArgs.GetBoolArg("-logtimestamps", DEFAULT_LOGTIMESTAMPS); + fLogTimeMicros = gArgs.GetBoolArg("-logtimemicros", DEFAULT_LOGTIMEMICROS); + fLogIPs = gArgs.GetBoolArg("-logips", DEFAULT_LOGIPS); LogPrintf("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"); LogPrintf("%s version %s\n", CLIENT_NAME, FormatFullVersion()); } namespace { // Variables internal to initialization process only ServiceFlags nRelevantServices = NODE_NETWORK; int nMaxConnections; int nUserMaxConnections; int nFD; ServiceFlags nLocalServices = NODE_NETWORK; } // namespace [[noreturn]] static void new_handler_terminate() { // Rather than throwing std::bad-alloc if allocation fails, terminate // immediately to (try to) avoid chain corruption. Since LogPrintf may // itself allocate memory, set the handler directly to terminate first. std::set_new_handler(std::terminate); LogPrintf("Error: Out of memory. Terminating.\n"); // The log was successful, terminate now. std::terminate(); }; bool AppInitBasicSetup() { // Step 1: setup #ifdef _MSC_VER // Turn off Microsoft heap dump noise _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_WARN, CreateFileA("NUL", GENERIC_WRITE, 0, nullptr, OPEN_EXISTING, 0, 0)); #endif #if _MSC_VER >= 1400 // Disable confusing "helpful" text message on abort, Ctrl-C _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); #endif #ifdef WIN32 // Enable Data Execution Prevention (DEP) // Minimum supported OS versions: WinXP SP3, WinVista >= SP1, Win Server 2008 // A failure is non-critical and needs no further attention! #ifndef PROCESS_DEP_ENABLE // We define this here, because GCCs winbase.h limits this to _WIN32_WINNT >= // 0x0601 (Windows 7), which is not correct. Can be removed, when GCCs winbase.h // is fixed! #define PROCESS_DEP_ENABLE 0x00000001 #endif typedef BOOL(WINAPI * PSETPROCDEPPOL)(DWORD); PSETPROCDEPPOL setProcDEPPol = (PSETPROCDEPPOL)GetProcAddress( GetModuleHandleA("Kernel32.dll"), "SetProcessDEPPolicy"); if (setProcDEPPol != nullptr) setProcDEPPol(PROCESS_DEP_ENABLE); #endif if (!SetupNetworking()) return InitError("Initializing networking failed"); #ifndef WIN32 - if (!GetBoolArg("-sysperms", false)) { + if (!gArgs.GetBoolArg("-sysperms", false)) { umask(077); } // Clean shutdown on SIGTERM struct sigaction sa; sa.sa_handler = HandleSIGTERM; sigemptyset(&sa.sa_mask); sa.sa_flags = 0; sigaction(SIGTERM, &sa, nullptr); sigaction(SIGINT, &sa, nullptr); // Reopen debug.log on SIGHUP struct sigaction sa_hup; sa_hup.sa_handler = HandleSIGHUP; sigemptyset(&sa_hup.sa_mask); sa_hup.sa_flags = 0; sigaction(SIGHUP, &sa_hup, nullptr); // Ignore SIGPIPE, otherwise it will bring the daemon down if the client // closes unexpectedly signal(SIGPIPE, SIG_IGN); #endif std::set_new_handler(new_handler_terminate); return true; } bool AppInitParameterInteraction(Config &config) { const CChainParams &chainparams = Params(); // Step 2: parameter interactions // also see: InitParameterInteraction() // if using block pruning, then disallow txindex - if (GetArg("-prune", 0)) { - if (GetBoolArg("-txindex", DEFAULT_TXINDEX)) + if (gArgs.GetArg("-prune", 0)) { + if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) return InitError(_("Prune mode is incompatible with -txindex.")); } // if space reserved for high priority transactions is misconfigured // stop program execution and warn the user with a proper error message - const int64_t blkprio = - GetArg("-blockprioritypercentage", DEFAULT_BLOCK_PRIORITY_PERCENTAGE); + const int64_t blkprio = gArgs.GetArg("-blockprioritypercentage", + DEFAULT_BLOCK_PRIORITY_PERCENTAGE); if (!config.SetBlockPriorityPercentage(blkprio)) { return InitError(_("Block priority percentage has to belong to the " "[0..100] interval.")); } // Make sure enough file descriptors are available int nBind = std::max( (gArgs.IsArgSet("-bind") ? gArgs.GetArgs("-bind").size() : 0) + (gArgs.IsArgSet("-whitebind") ? gArgs.GetArgs("-whitebind").size() : 0), size_t(1)); nUserMaxConnections = - GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS); + gArgs.GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS); nMaxConnections = std::max(nUserMaxConnections, 0); // Trim requested connection counts, to fit into system limitations nMaxConnections = std::max(std::min(nMaxConnections, (int)(FD_SETSIZE - nBind - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS)), 0); nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS + MAX_ADDNODE_CONNECTIONS); if (nFD < MIN_CORE_FILEDESCRIPTORS) return InitError(_("Not enough file descriptors available.")); nMaxConnections = std::min(nFD - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS, nMaxConnections); if (nMaxConnections < nUserMaxConnections) { InitWarning(strprintf(_("Reducing -maxconnections from %d to %d, " "because of system limitations."), nUserMaxConnections, nMaxConnections)); } // Step 3: parameter-to-internal-flags if (gArgs.IsArgSet("-debug")) { // Special-case: if -debug=0/-nodebug is set, turn off debugging // messages const std::vector &categories = gArgs.GetArgs("-debug"); if (find(categories.begin(), categories.end(), std::string("0")) == categories.end()) { for (const auto &cat : categories) { uint32_t flag = 0; if (!GetLogCategory(&flag, &cat)) { InitWarning( strprintf(_("Unsupported logging category %s=%s."), "-debug", cat)); } logCategories |= flag; } } } // Now remove the logging categories which were explicitly excluded if (gArgs.IsArgSet("-debugexclude")) { for (const std::string &cat : gArgs.GetArgs("-debugexclude")) { uint32_t flag; if (!GetLogCategory(&flag, &cat)) { InitWarning(strprintf(_("Unsupported logging category %s=%s."), "-debugexclude", cat)); } logCategories &= ~flag; } } // Check for -debugnet - if (GetBoolArg("-debugnet", false)) + if (gArgs.GetBoolArg("-debugnet", false)) InitWarning( _("Unsupported argument -debugnet ignored, use -debug=net.")); // Check for -socks - as this is a privacy risk to continue, exit here - if (IsArgSet("-socks")) + if (gArgs.IsArgSet("-socks")) return InitError( _("Unsupported argument -socks found. Setting SOCKS version isn't " "possible anymore, only SOCKS5 proxies are supported.")); // Check for -tor - as this is a privacy risk to continue, exit here - if (GetBoolArg("-tor", false)) + if (gArgs.GetBoolArg("-tor", false)) return InitError(_("Unsupported argument -tor found, use -onion.")); - if (GetBoolArg("-benchmark", false)) + if (gArgs.GetBoolArg("-benchmark", false)) InitWarning( _("Unsupported argument -benchmark ignored, use -debug=bench.")); - if (GetBoolArg("-whitelistalwaysrelay", false)) + if (gArgs.GetBoolArg("-whitelistalwaysrelay", false)) InitWarning(_("Unsupported argument -whitelistalwaysrelay ignored, use " "-whitelistrelay and/or -whitelistforcerelay.")); - if (IsArgSet("-blockminsize")) + if (gArgs.IsArgSet("-blockminsize")) InitWarning("Unsupported argument -blockminsize ignored."); // Checkmempool and checkblockindex default to true in regtest mode int ratio = std::min( - std::max(GetArg("-checkmempool", - chainparams.DefaultConsistencyChecks() ? 1 : 0), - 0), + std::max( + gArgs.GetArg("-checkmempool", + chainparams.DefaultConsistencyChecks() ? 1 : 0), + 0), 1000000); if (ratio != 0) { mempool.setSanityCheck(1.0 / ratio); } - fCheckBlockIndex = - GetBoolArg("-checkblockindex", chainparams.DefaultConsistencyChecks()); + fCheckBlockIndex = gArgs.GetBoolArg("-checkblockindex", + chainparams.DefaultConsistencyChecks()); fCheckpointsEnabled = - GetBoolArg("-checkpoints", DEFAULT_CHECKPOINTS_ENABLED); + gArgs.GetBoolArg("-checkpoints", DEFAULT_CHECKPOINTS_ENABLED); hashAssumeValid = uint256S( - GetArg("-assumevalid", - chainparams.GetConsensus().defaultAssumeValid.GetHex())); + gArgs.GetArg("-assumevalid", + chainparams.GetConsensus().defaultAssumeValid.GetHex())); if (!hashAssumeValid.IsNull()) LogPrintf("Assuming ancestors of block %s have valid signatures.\n", hashAssumeValid.GetHex()); else LogPrintf("Validating signatures for all blocks.\n"); // mempool limits int64_t nMempoolSizeMax = - GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; + gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; int64_t nMempoolSizeMin = - GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000 * - 40; + gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * + 1000 * 40; if (nMempoolSizeMax < 0 || nMempoolSizeMax < nMempoolSizeMin) return InitError(strprintf(_("-maxmempool must be at least %d MB"), std::ceil(nMempoolSizeMin / 1000000.0))); // Incremental relay fee sets the minimimum feerate increase necessary for // BIP 125 replacement in the mempool and the amount the mempool min fee // increases above the feerate of txs evicted due to mempool limiting. - if (IsArgSet("-incrementalrelayfee")) { + if (gArgs.IsArgSet("-incrementalrelayfee")) { Amount n(0); - if (!ParseMoney(GetArg("-incrementalrelayfee", ""), n)) - return InitError(AmountErrMsg("incrementalrelayfee", - GetArg("-incrementalrelayfee", ""))); + if (!ParseMoney(gArgs.GetArg("-incrementalrelayfee", ""), n)) + return InitError( + AmountErrMsg("incrementalrelayfee", + gArgs.GetArg("-incrementalrelayfee", ""))); incrementalRelayFee = CFeeRate(n); } // -par=0 means autodetect, but nScriptCheckThreads==0 means no concurrency - nScriptCheckThreads = GetArg("-par", DEFAULT_SCRIPTCHECK_THREADS); + nScriptCheckThreads = gArgs.GetArg("-par", DEFAULT_SCRIPTCHECK_THREADS); if (nScriptCheckThreads <= 0) nScriptCheckThreads += GetNumCores(); if (nScriptCheckThreads <= 1) nScriptCheckThreads = 0; else if (nScriptCheckThreads > MAX_SCRIPTCHECK_THREADS) nScriptCheckThreads = MAX_SCRIPTCHECK_THREADS; // Configure excessive block size. const uint64_t nProposedExcessiveBlockSize = - GetArg("-excessiveblocksize", DEFAULT_MAX_BLOCK_SIZE); + gArgs.GetArg("-excessiveblocksize", DEFAULT_MAX_BLOCK_SIZE); if (!config.SetMaxBlockSize(nProposedExcessiveBlockSize)) { return InitError( _("Excessive block size must be > 1,000,000 bytes (1MB)")); } // Check blockmaxsize does not exceed maximum accepted block size. const uint64_t nProposedMaxGeneratedBlockSize = - GetArg("-blockmaxsize", DEFAULT_MAX_GENERATED_BLOCK_SIZE); + gArgs.GetArg("-blockmaxsize", DEFAULT_MAX_GENERATED_BLOCK_SIZE); if (nProposedMaxGeneratedBlockSize > config.GetMaxBlockSize()) { auto msg = _("Max generated block size (blockmaxsize) cannot exceed " "the excessive block size (excessiveblocksize)"); return InitError(msg); } // block pruning; get the amount of disk space (in MiB) to allot for block & // undo files - int64_t nPruneArg = GetArg("-prune", 0); + int64_t nPruneArg = gArgs.GetArg("-prune", 0); if (nPruneArg < 0) { return InitError( _("Prune cannot be configured with a negative value.")); } nPruneTarget = (uint64_t)nPruneArg * 1024 * 1024; if (nPruneArg == 1) { // manual pruning: -prune=1 LogPrintf("Block pruning enabled. Use RPC call " "pruneblockchain(height) to manually prune block and undo " "files.\n"); nPruneTarget = std::numeric_limits::max(); fPruneMode = true; } else if (nPruneTarget) { if (nPruneTarget < MIN_DISK_SPACE_FOR_BLOCK_FILES) { return InitError( strprintf(_("Prune configured below the minimum of %d MiB. " "Please use a higher number."), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024)); } LogPrintf("Prune configured to target %uMiB on disk for block and undo " "files.\n", nPruneTarget / 1024 / 1024); fPruneMode = true; } RegisterAllRPCCommands(tableRPC); #ifdef ENABLE_WALLET RegisterWalletRPCCommands(tableRPC); RegisterDumpRPCCommands(tableRPC); #endif - nConnectTimeout = GetArg("-timeout", DEFAULT_CONNECT_TIMEOUT); + nConnectTimeout = gArgs.GetArg("-timeout", DEFAULT_CONNECT_TIMEOUT); if (nConnectTimeout <= 0) nConnectTimeout = DEFAULT_CONNECT_TIMEOUT; // Fee-per-kilobyte amount considered the same as "free". If you are mining, // be careful setting this: if you set it to zero then a transaction spammer // can cheaply fill blocks using 1-satoshi-fee transactions. It should be // set above the real cost to you of processing a transaction. - if (IsArgSet("-minrelaytxfee")) { + if (gArgs.IsArgSet("-minrelaytxfee")) { Amount n(0); - auto parsed = ParseMoney(GetArg("-minrelaytxfee", ""), n); + auto parsed = ParseMoney(gArgs.GetArg("-minrelaytxfee", ""), n); if (!parsed || Amount(0) == n) - return InitError( - AmountErrMsg("minrelaytxfee", GetArg("-minrelaytxfee", ""))); + return InitError(AmountErrMsg("minrelaytxfee", + gArgs.GetArg("-minrelaytxfee", ""))); // High fee check is done afterward in CWallet::ParameterInteraction() ::minRelayTxFee = CFeeRate(n); } else if (incrementalRelayFee > ::minRelayTxFee) { // Allow only setting incrementalRelayFee to control both ::minRelayTxFee = incrementalRelayFee; LogPrintf( "Increasing minrelaytxfee to %s to match incrementalrelayfee\n", ::minRelayTxFee.ToString()); } // Sanity check argument for min fee for including tx in block // TODO: Harmonize which arguments need sanity checking and where that // happens. - if (IsArgSet("-blockmintxfee")) { + if (gArgs.IsArgSet("-blockmintxfee")) { Amount n(0); - if (!ParseMoney(GetArg("-blockmintxfee", ""), n)) - return InitError( - AmountErrMsg("blockmintxfee", GetArg("-blockmintxfee", ""))); + if (!ParseMoney(gArgs.GetArg("-blockmintxfee", ""), n)) + return InitError(AmountErrMsg("blockmintxfee", + gArgs.GetArg("-blockmintxfee", ""))); } // Feerate used to define dust. Shouldn't be changed lightly as old // implementations may inadvertently create non-standard transactions. - if (IsArgSet("-dustrelayfee")) { + if (gArgs.IsArgSet("-dustrelayfee")) { Amount n(0); - auto parsed = ParseMoney(GetArg("-dustrelayfee", ""), n); + auto parsed = ParseMoney(gArgs.GetArg("-dustrelayfee", ""), n); if (!parsed || Amount(0) == n) - return InitError( - AmountErrMsg("dustrelayfee", GetArg("-dustrelayfee", ""))); + return InitError(AmountErrMsg("dustrelayfee", + gArgs.GetArg("-dustrelayfee", ""))); dustRelayFee = CFeeRate(n); } fRequireStandard = - !GetBoolArg("-acceptnonstdtxn", !chainparams.RequireStandard()); + !gArgs.GetBoolArg("-acceptnonstdtxn", !chainparams.RequireStandard()); if (chainparams.RequireStandard() && !fRequireStandard) return InitError( strprintf("acceptnonstdtxn is not currently supported for %s chain", chainparams.NetworkIDString())); - nBytesPerSigOp = GetArg("-bytespersigop", nBytesPerSigOp); + nBytesPerSigOp = gArgs.GetArg("-bytespersigop", nBytesPerSigOp); #ifdef ENABLE_WALLET if (!CWallet::ParameterInteraction()) return false; #endif fIsBareMultisigStd = - GetBoolArg("-permitbaremultisig", DEFAULT_PERMIT_BAREMULTISIG); - fAcceptDatacarrier = GetBoolArg("-datacarrier", DEFAULT_ACCEPT_DATACARRIER); - nMaxDatacarrierBytes = GetArg("-datacarriersize", nMaxDatacarrierBytes); + gArgs.GetBoolArg("-permitbaremultisig", DEFAULT_PERMIT_BAREMULTISIG); + fAcceptDatacarrier = + gArgs.GetBoolArg("-datacarrier", DEFAULT_ACCEPT_DATACARRIER); + nMaxDatacarrierBytes = + gArgs.GetArg("-datacarriersize", nMaxDatacarrierBytes); // Option to startup with mocktime set (used for regression testing): - SetMockTime(GetArg("-mocktime", 0)); // SetMockTime(0) is a no-op + SetMockTime(gArgs.GetArg("-mocktime", 0)); // SetMockTime(0) is a no-op - if (GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS)) + if (gArgs.GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS)) nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM); // Signal Bitcoin Cash support. // TODO: remove some time after the hardfork when no longer needed // to differentiate the network nodes. nLocalServices = ServiceFlags(nLocalServices | NODE_BITCOIN_CASH); - nMaxTipAge = GetArg("-maxtipage", DEFAULT_MAX_TIP_AGE); + nMaxTipAge = gArgs.GetArg("-maxtipage", DEFAULT_MAX_TIP_AGE); if (gArgs.IsArgSet("-bip9params")) { // Allow overriding BIP9 parameters for testing if (!chainparams.MineBlocksOnDemand()) { return InitError( "BIP9 parameters may only be overridden on regtest."); } for (const std::string &strDeployment : gArgs.GetArgs("-bip9params")) { std::vector vDeploymentParams; boost::split(vDeploymentParams, strDeployment, boost::is_any_of(":")); if (vDeploymentParams.size() != 3) { return InitError("BIP9 parameters malformed, expecting " "deployment:start:end"); } int64_t nStartTime, nTimeout; if (!ParseInt64(vDeploymentParams[1], &nStartTime)) { return InitError( strprintf("Invalid nStartTime (%s)", vDeploymentParams[1])); } if (!ParseInt64(vDeploymentParams[2], &nTimeout)) { return InitError( strprintf("Invalid nTimeout (%s)", vDeploymentParams[2])); } bool found = false; for (int j = 0; j < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j) { if (vDeploymentParams[0].compare( VersionBitsDeploymentInfo[j].name) == 0) { UpdateRegtestBIP9Parameters(Consensus::DeploymentPos(j), nStartTime, nTimeout); found = true; LogPrintf("Setting BIP9 activation parameters for %s to " "start=%ld, timeout=%ld\n", vDeploymentParams[0], nStartTime, nTimeout); break; } } if (!found) { return InitError( strprintf("Invalid deployment (%s)", vDeploymentParams[0])); } } } return true; } static bool LockDataDirectory(bool probeOnly) { std::string strDataDir = GetDataDir().string(); // Make sure only a single Bitcoin process is using the data directory. fs::path pathLockFile = GetDataDir() / ".lock"; // empty lock file; created if it doesn't exist. FILE *file = fsbridge::fopen(pathLockFile, "a"); if (file) fclose(file); try { static boost::interprocess::file_lock lock( pathLockFile.string().c_str()); if (!lock.try_lock()) { return InitError( strprintf(_("Cannot obtain a lock on data directory %s. %s is " "probably already running."), strDataDir, _(PACKAGE_NAME))); } if (probeOnly) { lock.unlock(); } } catch (const boost::interprocess::interprocess_exception &e) { return InitError(strprintf(_("Cannot obtain a lock on data directory " "%s. %s is probably already running.") + " %s.", strDataDir, _(PACKAGE_NAME), e.what())); } return true; } bool AppInitSanityChecks() { // Step 4: sanity checks // Initialize elliptic curve code std::string sha256_algo = SHA256AutoDetect(); LogPrintf("Using the '%s' SHA256 implementation\n", sha256_algo); RandomInit(); ECC_Start(); globalVerifyHandle.reset(new ECCVerifyHandle()); // Sanity check if (!InitSanityCheck()) { return InitError(strprintf( _("Initialization sanity check failed. %s is shutting down."), _(PACKAGE_NAME))); } // Probe the data directory lock to give an early error message, if possible return LockDataDirectory(true); } bool AppInitMain(Config &config, boost::thread_group &threadGroup, CScheduler &scheduler) { const CChainParams &chainparams = Params(); // Step 4a: application initialization // After daemonization get the data directory lock again and hold on to it // until exit. This creates a slight window for a race condition to happen, // however this condition is harmless: it will at most make us exit without // printing a message to console. if (!LockDataDirectory(false)) { // Detailed error printed inside LockDataDirectory return false; } #ifndef WIN32 CreatePidFile(GetPidFile(), getpid()); #endif - if (GetBoolArg("-shrinkdebugfile", logCategories != BCLog::NONE)) { + if (gArgs.GetBoolArg("-shrinkdebugfile", logCategories != BCLog::NONE)) { // Do this first since it both loads a bunch of debug.log into memory, // and because this needs to happen before any other debug.log printing. ShrinkDebugFile(); } if (fPrintToDebugLog) { OpenDebugLog(); } if (!fLogTimestamps) { LogPrintf("Startup time: %s\n", DateTimeStrFormat("%Y-%m-%d %H:%M:%S", GetTime())); } LogPrintf("Default data directory %s\n", GetDefaultDataDir().string()); LogPrintf("Using data directory %s\n", GetDataDir().string()); - LogPrintf("Using config file %s\n", - GetConfigFile(GetArg("-conf", BITCOIN_CONF_FILENAME)).string()); + LogPrintf( + "Using config file %s\n", + GetConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME)).string()); LogPrintf("Using at most %i automatic connections (%i file descriptors " "available)\n", nMaxConnections, nFD); InitSignatureCache(); InitScriptExecutionCache(); LogPrintf("Using %u threads for script verification\n", nScriptCheckThreads); if (nScriptCheckThreads) { for (int i = 0; i < nScriptCheckThreads - 1; i++) { threadGroup.create_thread(&ThreadScriptCheck); } } // Start the lightweight task scheduler thread CScheduler::Function serviceLoop = boost::bind(&CScheduler::serviceQueue, &scheduler); threadGroup.create_thread(boost::bind(&TraceThread, "scheduler", serviceLoop)); /* Start the RPC server already. It will be started in "warmup" mode * and not really process calls already (but it will signify connections * that the server is there and will be ready later). Warmup mode will * be disabled when initialisation is finished. */ - if (GetBoolArg("-server", false)) { + if (gArgs.GetBoolArg("-server", false)) { uiInterface.InitMessage.connect(SetRPCWarmupStatus); if (!AppInitServers(config, threadGroup)) { return InitError( _("Unable to start HTTP server. See debug log for details.")); } } int64_t nStart; // Step 5: verify wallet database integrity #ifdef ENABLE_WALLET if (!CWallet::Verify()) { return false; } #endif // Step 6: network initialization // Note that we absolutely cannot open any actual connections // until the very end ("start node") as the UTXO/block state // is not yet setup and may end up being set up twice if we // need to reindex later. assert(!g_connman); g_connman = std::unique_ptr( new CConnman(config, GetRand(std::numeric_limits::max()), GetRand(std::numeric_limits::max()))); CConnman &connman = *g_connman; peerLogic.reset(new PeerLogicValidation(&connman)); RegisterValidationInterface(peerLogic.get()); RegisterNodeSignals(GetNodeSignals()); if (gArgs.IsArgSet("-onlynet")) { std::set nets; for (const std::string &snet : gArgs.GetArgs("-onlynet")) { enum Network net = ParseNetwork(snet); if (net == NET_UNROUTABLE) return InitError(strprintf( _("Unknown network specified in -onlynet: '%s'"), snet)); nets.insert(net); } for (int n = 0; n < NET_MAX; n++) { enum Network net = (enum Network)n; if (!nets.count(net)) SetLimited(net); } } if (gArgs.IsArgSet("-whitelist")) { for (const std::string &net : gArgs.GetArgs("-whitelist")) { CSubNet subnet; LookupSubNet(net.c_str(), subnet); if (!subnet.IsValid()) return InitError(strprintf( _("Invalid netmask specified in -whitelist: '%s'"), net)); connman.AddWhitelistedRange(subnet); } } - bool proxyRandomize = GetBoolArg("-proxyrandomize", DEFAULT_PROXYRANDOMIZE); + bool proxyRandomize = + gArgs.GetBoolArg("-proxyrandomize", DEFAULT_PROXYRANDOMIZE); // -proxy sets a proxy for all outgoing network traffic // -noproxy (or -proxy=0) as well as the empty string can be used to not set // a proxy, this is the default - std::string proxyArg = GetArg("-proxy", ""); + std::string proxyArg = gArgs.GetArg("-proxy", ""); SetLimited(NET_TOR); if (proxyArg != "" && proxyArg != "0") { CService resolved(LookupNumeric(proxyArg.c_str(), 9050)); proxyType addrProxy = proxyType(resolved, proxyRandomize); if (!addrProxy.IsValid()) { return InitError( strprintf(_("Invalid -proxy address: '%s'"), proxyArg)); } SetProxy(NET_IPV4, addrProxy); SetProxy(NET_IPV6, addrProxy); SetProxy(NET_TOR, addrProxy); SetNameProxy(addrProxy); SetLimited(NET_TOR, false); // by default, -proxy sets onion as // reachable, unless -noonion later } // -onion can be used to set only a proxy for .onion, or override normal // proxy for .onion addresses. // -noonion (or -onion=0) disables connecting to .onion entirely. An empty // string is used to not override the onion proxy (in which case it defaults // to -proxy set above, or none) - std::string onionArg = GetArg("-onion", ""); + std::string onionArg = gArgs.GetArg("-onion", ""); if (onionArg != "") { if (onionArg == "0") { // Handle -noonion/-onion=0 SetLimited(NET_TOR); // set onions as unreachable } else { CService resolved(LookupNumeric(onionArg.c_str(), 9050)); proxyType addrOnion = proxyType(resolved, proxyRandomize); if (!addrOnion.IsValid()) { return InitError( strprintf(_("Invalid -onion address: '%s'"), onionArg)); } SetProxy(NET_TOR, addrOnion); SetLimited(NET_TOR, false); } } // see Step 2: parameter interactions for more information about these - fListen = GetBoolArg("-listen", DEFAULT_LISTEN); - fDiscover = GetBoolArg("-discover", true); - fNameLookup = GetBoolArg("-dns", DEFAULT_NAME_LOOKUP); - fRelayTxes = !GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY); + fListen = gArgs.GetBoolArg("-listen", DEFAULT_LISTEN); + fDiscover = gArgs.GetBoolArg("-discover", true); + fNameLookup = gArgs.GetBoolArg("-dns", DEFAULT_NAME_LOOKUP); + fRelayTxes = !gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY); if (fListen) { bool fBound = false; if (gArgs.IsArgSet("-bind")) { for (const std::string &strBind : gArgs.GetArgs("-bind")) { CService addrBind; if (!Lookup(strBind.c_str(), addrBind, GetListenPort(), false)) { return InitError(ResolveErrMsg("bind", strBind)); } fBound |= Bind(connman, addrBind, (BF_EXPLICIT | BF_REPORT_ERROR)); } } if (gArgs.IsArgSet("-whitebind")) { for (const std::string &strBind : gArgs.GetArgs("-whitebind")) { CService addrBind; if (!Lookup(strBind.c_str(), addrBind, 0, false)) { return InitError(ResolveErrMsg("whitebind", strBind)); } if (addrBind.GetPort() == 0) { return InitError(strprintf( _("Need to specify a port with -whitebind: '%s'"), strBind)); } fBound |= Bind(connman, addrBind, (BF_EXPLICIT | BF_REPORT_ERROR | BF_WHITELIST)); } } if (!gArgs.IsArgSet("-bind") && !gArgs.IsArgSet("-whitebind")) { struct in_addr inaddr_any; inaddr_any.s_addr = INADDR_ANY; fBound |= Bind(connman, CService(in6addr_any, GetListenPort()), BF_NONE); fBound |= Bind(connman, CService(inaddr_any, GetListenPort()), !fBound ? BF_REPORT_ERROR : BF_NONE); } if (!fBound) { return InitError(_("Failed to listen on any port. Use -listen=0 if " "you want this.")); } } if (gArgs.IsArgSet("-externalip")) { for (const std::string &strAddr : gArgs.GetArgs("-externalip")) { CService addrLocal; if (Lookup(strAddr.c_str(), addrLocal, GetListenPort(), fNameLookup) && addrLocal.IsValid()) { AddLocal(addrLocal, LOCAL_MANUAL); } else { return InitError(ResolveErrMsg("externalip", strAddr)); } } } if (gArgs.IsArgSet("-seednode")) { for (const std::string &strDest : gArgs.GetArgs("-seednode")) { connman.AddOneShot(strDest); } } #if ENABLE_ZMQ pzmqNotificationInterface = CZMQNotificationInterface::Create(); if (pzmqNotificationInterface) { RegisterValidationInterface(pzmqNotificationInterface); } #endif // unlimited unless -maxuploadtarget is set uint64_t nMaxOutboundLimit = 0; uint64_t nMaxOutboundTimeframe = MAX_UPLOAD_TIMEFRAME; - if (IsArgSet("-maxuploadtarget")) { + if (gArgs.IsArgSet("-maxuploadtarget")) { nMaxOutboundLimit = - GetArg("-maxuploadtarget", DEFAULT_MAX_UPLOAD_TARGET) * 1024 * 1024; + gArgs.GetArg("-maxuploadtarget", DEFAULT_MAX_UPLOAD_TARGET) * 1024 * + 1024; } // Step 7: load block chain - fReindex = GetBoolArg("-reindex", false); - bool fReindexChainState = GetBoolArg("-reindex-chainstate", false); + fReindex = gArgs.GetBoolArg("-reindex", false); + bool fReindexChainState = gArgs.GetBoolArg("-reindex-chainstate", false); // cache size calculations - int64_t nTotalCache = (GetArg("-dbcache", nDefaultDbCache) << 20); + int64_t nTotalCache = (gArgs.GetArg("-dbcache", nDefaultDbCache) << 20); // total cache cannot be less than nMinDbCache nTotalCache = std::max(nTotalCache, nMinDbCache << 20); // total cache cannot be greater than nMaxDbcache nTotalCache = std::min(nTotalCache, nMaxDbCache << 20); int64_t nBlockTreeDBCache = nTotalCache / 8; nBlockTreeDBCache = std::min(nBlockTreeDBCache, - (GetBoolArg("-txindex", DEFAULT_TXINDEX) + (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX) ? nMaxBlockDBAndTxIndexCache : nMaxBlockDBCache) << 20); nTotalCache -= nBlockTreeDBCache; // use 25%-50% of the remainder for disk cache int64_t nCoinDBCache = std::min(nTotalCache / 2, (nTotalCache / 4) + (1 << 23)); // cap total coins db cache nCoinDBCache = std::min(nCoinDBCache, nMaxCoinsDBCache << 20); nTotalCache -= nCoinDBCache; // the rest goes to in-memory cache nCoinCacheUsage = nTotalCache; int64_t nMempoolSizeMax = - GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; + gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; LogPrintf("Cache configuration:\n"); LogPrintf("* Using %.1fMiB for block index database\n", nBlockTreeDBCache * (1.0 / 1024 / 1024)); LogPrintf("* Using %.1fMiB for chain state database\n", nCoinDBCache * (1.0 / 1024 / 1024)); LogPrintf("* Using %.1fMiB for in-memory UTXO set (plus up to %.1fMiB of " "unused mempool space)\n", nCoinCacheUsage * (1.0 / 1024 / 1024), nMempoolSizeMax * (1.0 / 1024 / 1024)); bool fLoaded = false; while (!fLoaded && !fRequestShutdown) { bool fReset = fReindex; std::string strLoadError; uiInterface.InitMessage(_("Loading block index...")); nStart = GetTimeMillis(); do { try { UnloadBlockIndex(); delete pcoinsTip; delete pcoinsdbview; delete pcoinscatcher; delete pblocktree; pblocktree = new CBlockTreeDB(nBlockTreeDBCache, false, fReindex); pcoinsdbview = new CCoinsViewDB(nCoinDBCache, false, fReindex || fReindexChainState); pcoinscatcher = new CCoinsViewErrorCatcher(pcoinsdbview); if (fReindex) { pblocktree->WriteReindexing(true); // If we're reindexing in prune mode, wipe away unusable // block files and all undo data files if (fPruneMode) { CleanupBlockRevFiles(); } } else if (!pcoinsdbview->Upgrade()) { strLoadError = _("Error upgrading chainstate database"); break; } if (fRequestShutdown) break; if (!LoadBlockIndex(chainparams)) { strLoadError = _("Error loading block database"); break; } // If the loaded chain has a wrong genesis, bail out immediately // (we're likely using a testnet datadir, or the other way // around). if (!mapBlockIndex.empty() && mapBlockIndex.count( chainparams.GetConsensus().hashGenesisBlock) == 0) { return InitError(_("Incorrect or no genesis block found. " "Wrong datadir for network?")); } // Initialize the block index (no-op if non-empty database was // already loaded) if (!InitBlockIndex(config)) { strLoadError = _("Error initializing block database"); break; } // Check for changed -txindex state - if (fTxIndex != GetBoolArg("-txindex", DEFAULT_TXINDEX)) { + if (fTxIndex != gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) { strLoadError = _("You need to rebuild the database using " "-reindex-chainstate to change -txindex"); break; } // Check for changed -prune state. What we are concerned about // is a user who has pruned blocks in the past, but is now // trying to run unpruned. if (fHavePruned && !fPruneMode) { strLoadError = _("You need to rebuild the database using -reindex to " "go back to unpruned mode. This will redownload the " "entire blockchain"); break; } if (!ReplayBlocks(config, pcoinsdbview)) { strLoadError = _("Unable to replay blocks. You will need to rebuild " "the database using -reindex-chainstate."); break; } pcoinsTip = new CCoinsViewCache(pcoinscatcher); LoadChainTip(chainparams); if (!fReindex && chainActive.Tip() != nullptr) { uiInterface.InitMessage(_("Rewinding blocks...")); if (!RewindBlockIndex(config)) { strLoadError = _("Unable to rewind the database to a " "pre-fork state. You will need to " "redownload the blockchain"); break; } } uiInterface.InitMessage(_("Verifying blocks...")); if (fHavePruned && - GetArg("-checkblocks", DEFAULT_CHECKBLOCKS) > + gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS) > MIN_BLOCKS_TO_KEEP) { LogPrintf("Prune: pruned datadir may not have more than %d " "blocks; only checking available blocks", MIN_BLOCKS_TO_KEEP); } { LOCK(cs_main); CBlockIndex *tip = chainActive.Tip(); RPCNotifyBlockChange(true, tip); if (tip && tip->nTime > GetAdjustedTime() + 2 * 60 * 60) { strLoadError = _("The block database contains a block which " "appears to be from the future. " "This may be due to your computer's date and " "time being set incorrectly. " "Only rebuild the block database if you are sure " "that your computer's date and time are correct"); break; } } if (!CVerifyDB().VerifyDB( config, pcoinsdbview, - GetArg("-checklevel", DEFAULT_CHECKLEVEL), - GetArg("-checkblocks", DEFAULT_CHECKBLOCKS))) { + gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL), + gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS))) { strLoadError = _("Corrupted block database detected"); break; } } catch (const std::exception &e) { LogPrintf("%s\n", e.what()); strLoadError = _("Error opening block database"); break; } fLoaded = true; } while (false); if (!fLoaded && !fRequestShutdown) { // first suggest a reindex if (!fReset) { bool fRet = uiInterface.ThreadSafeQuestion( strLoadError + ".\n\n" + _("Do you want to rebuild the block database now?"), strLoadError + ".\nPlease restart with -reindex or " "-reindex-chainstate to recover.", "", CClientUIInterface::MSG_ERROR | CClientUIInterface::BTN_ABORT); if (fRet) { fReindex = true; fRequestShutdown = false; } else { LogPrintf("Aborted block database rebuild. Exiting.\n"); return false; } } else { return InitError(strLoadError); } } } // As LoadBlockIndex can take several minutes, it's possible the user // requested to kill the GUI during the last operation. If so, exit. // As the program has not fully started yet, Shutdown() is possibly // overkill. if (fRequestShutdown) { LogPrintf("Shutdown requested. Exiting.\n"); return false; } LogPrintf(" block index %15dms\n", GetTimeMillis() - nStart); fs::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME; CAutoFile est_filein(fsbridge::fopen(est_path, "rb"), SER_DISK, CLIENT_VERSION); // Allowed to fail as this file IS missing on first startup. if (!est_filein.IsNull()) mempool.ReadFeeEstimates(est_filein); fFeeEstimatesInitialized = true; // Encoded addresses using cashaddr instead of base58 // Activates by default on Jan, 14 config.SetCashAddrEncoding( - GetBoolArg("-usecashaddr", GetAdjustedTime() > 1515900000)); + gArgs.GetBoolArg("-usecashaddr", GetAdjustedTime() > 1515900000)); // Step 8: load wallet #ifdef ENABLE_WALLET if (!CWallet::InitLoadWallet()) return false; #else LogPrintf("No wallet support compiled in!\n"); #endif // Step 9: data directory maintenance // if pruning, unset the service bit and perform the initial blockstore // prune after any wallet rescanning has taken place. if (fPruneMode) { LogPrintf("Unsetting NODE_NETWORK on prune mode\n"); nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK); if (!fReindex) { uiInterface.InitMessage(_("Pruning blockstore...")); PruneAndFlush(); } } // Step 10: import blocks if (!CheckDiskSpace()) { return false; } // Either install a handler to notify us when genesis activates, or set // fHaveGenesis directly. // No locking, as this happens before any background thread is started. if (chainActive.Tip() == nullptr) { uiInterface.NotifyBlockTip.connect(BlockNotifyGenesisWait); } else { fHaveGenesis = true; } - if (IsArgSet("-blocknotify")) { + if (gArgs.IsArgSet("-blocknotify")) { uiInterface.NotifyBlockTip.connect(BlockNotifyCallback); } std::vector vImportFiles; if (gArgs.IsArgSet("-loadblock")) { for (const std::string &strFile : gArgs.GetArgs("-loadblock")) { vImportFiles.push_back(strFile); } } threadGroup.create_thread( boost::bind(&ThreadImport, std::ref(config), vImportFiles)); // Wait for genesis block to be processed { boost::unique_lock lock(cs_GenesisWait); while (!fHaveGenesis) { condvar_GenesisWait.wait(lock); } uiInterface.NotifyBlockTip.disconnect(BlockNotifyGenesisWait); } // Step 11: start node //// debug print LogPrintf("mapBlockIndex.size() = %u\n", mapBlockIndex.size()); LogPrintf("nBestHeight = %d\n", chainActive.Height()); - if (GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION)) { + if (gArgs.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION)) { StartTorControl(threadGroup, scheduler); } Discover(threadGroup); // Map ports with UPnP - MapPort(GetBoolArg("-upnp", DEFAULT_UPNP)); + MapPort(gArgs.GetBoolArg("-upnp", DEFAULT_UPNP)); std::string strNodeError; CConnman::Options connOptions; connOptions.nLocalServices = nLocalServices; connOptions.nRelevantServices = nRelevantServices; connOptions.nMaxConnections = nMaxConnections; connOptions.nMaxOutbound = std::min(MAX_OUTBOUND_CONNECTIONS, connOptions.nMaxConnections); connOptions.nMaxAddnode = MAX_ADDNODE_CONNECTIONS; connOptions.nMaxFeeler = 1; connOptions.nBestHeight = chainActive.Height(); connOptions.uiInterface = &uiInterface; connOptions.nSendBufferMaxSize = - 1000 * GetArg("-maxsendbuffer", DEFAULT_MAXSENDBUFFER); + 1000 * gArgs.GetArg("-maxsendbuffer", DEFAULT_MAXSENDBUFFER); connOptions.nReceiveFloodSize = - 1000 * GetArg("-maxreceivebuffer", DEFAULT_MAXRECEIVEBUFFER); + 1000 * gArgs.GetArg("-maxreceivebuffer", DEFAULT_MAXRECEIVEBUFFER); connOptions.nMaxOutboundTimeframe = nMaxOutboundTimeframe; connOptions.nMaxOutboundLimit = nMaxOutboundLimit; if (!connman.Start(scheduler, strNodeError, connOptions)) { return InitError(strNodeError); } // Step 12: finished SetRPCWarmupFinished(); uiInterface.InitMessage(_("Done loading")); #ifdef ENABLE_WALLET for (CWalletRef pwallet : vpwallets) { pwallet->postInitProcess(scheduler); } #endif return !fRequestShutdown; } diff --git a/src/miner.cpp b/src/miner.cpp index 27e7854f3f..a576dac15b 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -1,657 +1,658 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "miner.h" #include "amount.h" #include "chain.h" #include "chainparams.h" #include "coins.h" #include "config.h" #include "consensus/consensus.h" #include "consensus/merkle.h" #include "consensus/validation.h" #include "hash.h" #include "net.h" #include "policy/policy.h" #include "pow.h" #include "primitives/transaction.h" #include "script/standard.h" #include "timedata.h" #include "txmempool.h" #include "util.h" #include "utilmoneystr.h" #include "validation.h" #include "validationinterface.h" #include #include #include #include #include static const int MAX_COINBASE_SCRIPTSIG_SIZE = 100; ////////////////////////////////////////////////////////////////////////////// // // BitcoinMiner // // // Unconfirmed transactions in the memory pool often depend on other // transactions in the memory pool. When we select transactions from the // pool, we select by highest priority or fee rate, so we might consider // transactions that depend on transactions that aren't yet in the block. uint64_t nLastBlockTx = 0; uint64_t nLastBlockSize = 0; class ScoreCompare { public: ScoreCompare() {} bool operator()(const CTxMemPool::txiter a, const CTxMemPool::txiter b) { // Convert to less than. return CompareTxMemPoolEntryByScore()(*b, *a); } }; int64_t UpdateTime(CBlockHeader *pblock, const Config &config, const CBlockIndex *pindexPrev) { int64_t nOldTime = pblock->nTime; int64_t nNewTime = std::max(pindexPrev->GetMedianTimePast() + 1, GetAdjustedTime()); if (nOldTime < nNewTime) { pblock->nTime = nNewTime; } const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); // Updating time can change work required on testnet: if (consensusParams.fPowAllowMinDifficultyBlocks) { pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, config); } return nNewTime - nOldTime; } static uint64_t ComputeMaxGeneratedBlockSize(const Config &config, const CBlockIndex *pindexPrev) { // Block resource limits // If -blockmaxsize is not given, limit to DEFAULT_MAX_GENERATED_BLOCK_SIZE // If only one is given, only restrict the specified resource. // If both are given, restrict both. uint64_t nMaxGeneratedBlockSize = DEFAULT_MAX_GENERATED_BLOCK_SIZE; - if (IsArgSet("-blockmaxsize")) { + if (gArgs.IsArgSet("-blockmaxsize")) { nMaxGeneratedBlockSize = - GetArg("-blockmaxsize", DEFAULT_MAX_GENERATED_BLOCK_SIZE); + gArgs.GetArg("-blockmaxsize", DEFAULT_MAX_GENERATED_BLOCK_SIZE); } // Limit size to between 1K and MaxBlockSize-1K for sanity: nMaxGeneratedBlockSize = std::max(uint64_t(1000), std::min(config.GetMaxBlockSize() - 1000, nMaxGeneratedBlockSize)); return nMaxGeneratedBlockSize; } BlockAssembler::BlockAssembler(const Config &_config) : config(&_config) { - if (IsArgSet("-blockmintxfee")) { + if (gArgs.IsArgSet("-blockmintxfee")) { Amount n(0); - ParseMoney(GetArg("-blockmintxfee", ""), n); + ParseMoney(gArgs.GetArg("-blockmintxfee", ""), n); blockMinFeeRate = CFeeRate(n); } else { blockMinFeeRate = CFeeRate(DEFAULT_BLOCK_MIN_TX_FEE); } LOCK(cs_main); nMaxGeneratedBlockSize = ComputeMaxGeneratedBlockSize(*config, chainActive.Tip()); } void BlockAssembler::resetBlock() { inBlock.clear(); // Reserve space for coinbase tx. nBlockSize = 1000; nBlockSigOps = 100; // These counters do not include coinbase tx. nBlockTx = 0; nFees = Amount(0); lastFewTxs = 0; blockFinished = false; } static const std::vector getExcessiveBlockSizeSig(const Config &config) { std::string cbmsg = "/EB" + getSubVersionEB(config.GetMaxBlockSize()) + "/"; const char *cbcstr = cbmsg.c_str(); std::vector vec(cbcstr, cbcstr + cbmsg.size()); return vec; } std::unique_ptr BlockAssembler::CreateNewBlock(const CScript &scriptPubKeyIn) { int64_t nTimeStart = GetTimeMicros(); resetBlock(); pblocktemplate.reset(new CBlockTemplate()); if (!pblocktemplate.get()) { return nullptr; } // Pointer for convenience. pblock = &pblocktemplate->block; // Add dummy coinbase tx as first transaction. pblock->vtx.emplace_back(); // updated at end pblocktemplate->vTxFees.push_back(Amount(-1)); // updated at end pblocktemplate->vTxSigOpsCount.push_back(-1); LOCK2(cs_main, mempool.cs); CBlockIndex *pindexPrev = chainActive.Tip(); nHeight = pindexPrev->nHeight + 1; const CChainParams &chainparams = config->GetChainParams(); pblock->nVersion = ComputeBlockVersion(pindexPrev, chainparams.GetConsensus()); // -regtest only: allow overriding block.nVersion with // -blockversion=N to test forking scenarios if (chainparams.MineBlocksOnDemand()) { - pblock->nVersion = GetArg("-blockversion", pblock->nVersion); + pblock->nVersion = gArgs.GetArg("-blockversion", pblock->nVersion); } pblock->nTime = GetAdjustedTime(); nMaxGeneratedBlockSize = ComputeMaxGeneratedBlockSize(*config, pindexPrev); nLockTimeCutoff = (STANDARD_LOCKTIME_VERIFY_FLAGS & LOCKTIME_MEDIAN_TIME_PAST) ? pindexPrev->GetMedianTimePast() : pblock->GetBlockTime(); addPriorityTxs(); int nPackagesSelected = 0; int nDescendantsUpdated = 0; addPackageTxs(nPackagesSelected, nDescendantsUpdated); int64_t nTime1 = GetTimeMicros(); nLastBlockTx = nBlockTx; nLastBlockSize = nBlockSize; // Create coinbase transaction. CMutableTransaction coinbaseTx; coinbaseTx.vin.resize(1); coinbaseTx.vin[0].prevout.SetNull(); coinbaseTx.vout.resize(1); coinbaseTx.vout[0].scriptPubKey = scriptPubKeyIn; coinbaseTx.vout[0].nValue = nFees + GetBlockSubsidy(nHeight, chainparams.GetConsensus()); coinbaseTx.vin[0].scriptSig = CScript() << nHeight << OP_0; pblock->vtx[0] = MakeTransactionRef(coinbaseTx); pblocktemplate->vTxFees[0] = -1 * nFees; uint64_t nSerializeSize = GetSerializeSize(*pblock, SER_NETWORK, PROTOCOL_VERSION); LogPrintf("CreateNewBlock(): total size: %u txs: %u fees: %ld sigops %d\n", nSerializeSize, nBlockTx, nFees, nBlockSigOps); // Fill in header. pblock->hashPrevBlock = pindexPrev->GetBlockHash(); UpdateTime(pblock, *config, pindexPrev); pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, *config); pblock->nNonce = 0; pblocktemplate->vTxSigOpsCount[0] = GetSigOpCountWithoutP2SH(*pblock->vtx[0]); CValidationState state; if (!TestBlockValidity(*config, state, *pblock, pindexPrev, false, false)) { throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, FormatStateMessage(state))); } int64_t nTime2 = GetTimeMicros(); LogPrint( BCLog::BENCH, "CreateNewBlock() packages: %.2fms (%d packages, %d " "updated descendants), validity: %.2fms (total %.2fms)\n", 0.001 * (nTime1 - nTimeStart), nPackagesSelected, nDescendantsUpdated, 0.001 * (nTime2 - nTime1), 0.001 * (nTime2 - nTimeStart)); return std::move(pblocktemplate); } bool BlockAssembler::isStillDependent(CTxMemPool::txiter iter) { for (CTxMemPool::txiter parent : mempool.GetMemPoolParents(iter)) { if (!inBlock.count(parent)) { return true; } } return false; } void BlockAssembler::onlyUnconfirmed(CTxMemPool::setEntries &testSet) { for (CTxMemPool::setEntries::iterator iit = testSet.begin(); iit != testSet.end();) { // Only test txs not already in the block. if (inBlock.count(*iit)) { testSet.erase(iit++); } else { iit++; } } } bool BlockAssembler::TestPackage(uint64_t packageSize, int64_t packageSigOps) { auto blockSizeWithPackage = nBlockSize + packageSize; if (blockSizeWithPackage >= nMaxGeneratedBlockSize) { return false; } if (nBlockSigOps + packageSigOps >= GetMaxBlockSigOpsCount(blockSizeWithPackage)) { return false; } return true; } // Perform transaction-level checks before adding to block: // - transaction finality (locktime) // - serialized size (in case -blockmaxsize is in use) bool BlockAssembler::TestPackageTransactions( const CTxMemPool::setEntries &package) { uint64_t nPotentialBlockSize = nBlockSize; for (const CTxMemPool::txiter it : package) { CValidationState state; if (!ContextualCheckTransaction(*config, it->GetTx(), state, nHeight, nLockTimeCutoff)) { return false; } uint64_t nTxSize = ::GetSerializeSize(it->GetTx(), SER_NETWORK, PROTOCOL_VERSION); if (nPotentialBlockSize + nTxSize >= nMaxGeneratedBlockSize) { return false; } nPotentialBlockSize += nTxSize; } return true; } bool BlockAssembler::TestForBlock(CTxMemPool::txiter it) { auto blockSizeWithTx = nBlockSize + ::GetSerializeSize(it->GetTx(), SER_NETWORK, PROTOCOL_VERSION); if (blockSizeWithTx >= nMaxGeneratedBlockSize) { if (nBlockSize > nMaxGeneratedBlockSize - 100 || lastFewTxs > 50) { blockFinished = true; return false; } if (nBlockSize > nMaxGeneratedBlockSize - 1000) { lastFewTxs++; } return false; } auto maxBlockSigOps = GetMaxBlockSigOpsCount(blockSizeWithTx); if (nBlockSigOps + it->GetSigOpCount() >= maxBlockSigOps) { // If the block has room for no more sig ops then flag that the block is // finished. // TODO: We should consider adding another transaction that isn't very // dense in sigops instead of bailing out so easily. if (nBlockSigOps > maxBlockSigOps - 2) { blockFinished = true; return false; } // Otherwise attempt to find another tx with fewer sigops to put in the // block. return false; } // Must check that lock times are still valid. This can be removed once MTP // is always enforced as long as reorgs keep the mempool consistent. CValidationState state; if (!ContextualCheckTransaction(*config, it->GetTx(), state, nHeight, nLockTimeCutoff)) { return false; } return true; } void BlockAssembler::AddToBlock(CTxMemPool::txiter iter) { pblock->vtx.emplace_back(iter->GetSharedTx()); pblocktemplate->vTxFees.push_back(iter->GetFee()); pblocktemplate->vTxSigOpsCount.push_back(iter->GetSigOpCount()); nBlockSize += iter->GetTxSize(); ++nBlockTx; nBlockSigOps += iter->GetSigOpCount(); nFees += iter->GetFee(); inBlock.insert(iter); - bool fPrintPriority = GetBoolArg("-printpriority", DEFAULT_PRINTPRIORITY); + bool fPrintPriority = + gArgs.GetBoolArg("-printpriority", DEFAULT_PRINTPRIORITY); if (fPrintPriority) { double dPriority = iter->GetPriority(nHeight); Amount dummy; mempool.ApplyDeltas(iter->GetTx().GetId(), dPriority, dummy); LogPrintf( "priority %.1f fee %s txid %s\n", dPriority, CFeeRate(iter->GetModifiedFee(), iter->GetTxSize()).ToString(), iter->GetTx().GetId().ToString()); } } int BlockAssembler::UpdatePackagesForAdded( const CTxMemPool::setEntries &alreadyAdded, indexed_modified_transaction_set &mapModifiedTx) { int nDescendantsUpdated = 0; for (const CTxMemPool::txiter it : alreadyAdded) { CTxMemPool::setEntries descendants; mempool.CalculateDescendants(it, descendants); // Insert all descendants (not yet in block) into the modified set. for (CTxMemPool::txiter desc : descendants) { if (alreadyAdded.count(desc)) { continue; } ++nDescendantsUpdated; modtxiter mit = mapModifiedTx.find(desc); if (mit == mapModifiedTx.end()) { CTxMemPoolModifiedEntry modEntry(desc); modEntry.nSizeWithAncestors -= it->GetTxSize(); modEntry.nModFeesWithAncestors -= it->GetModifiedFee(); modEntry.nSigOpCountWithAncestors -= it->GetSigOpCount(); mapModifiedTx.insert(modEntry); } else { mapModifiedTx.modify(mit, update_for_parent_inclusion(it)); } } } return nDescendantsUpdated; } // Skip entries in mapTx that are already in a block or are present in // mapModifiedTx (which implies that the mapTx ancestor state is stale due to // ancestor inclusion in the block). Also skip transactions that we've already // failed to add. This can happen if we consider a transaction in mapModifiedTx // and it fails: we can then potentially consider it again while walking mapTx. // It's currently guaranteed to fail again, but as a belt-and-suspenders check // we put it in failedTx and avoid re-evaluation, since the re-evaluation would // be using cached size/sigops/fee values that are not actually correct. bool BlockAssembler::SkipMapTxEntry( CTxMemPool::txiter it, indexed_modified_transaction_set &mapModifiedTx, CTxMemPool::setEntries &failedTx) { assert(it != mempool.mapTx.end()); return mapModifiedTx.count(it) || inBlock.count(it) || failedTx.count(it); } void BlockAssembler::SortForBlock( const CTxMemPool::setEntries &package, CTxMemPool::txiter entry, std::vector &sortedEntries) { // Sort package by ancestor count. If a transaction A depends on transaction // B, then A's ancestor count must be greater than B's. So this is // sufficient to validly order the transactions for block inclusion. sortedEntries.clear(); sortedEntries.insert(sortedEntries.begin(), package.begin(), package.end()); std::sort(sortedEntries.begin(), sortedEntries.end(), CompareTxIterByAncestorCount()); } // This transaction selection algorithm orders the mempool based on feerate of a // transaction including all unconfirmed ancestors. Since we don't remove // transactions from the mempool as we select them for block inclusion, we need // an alternate method of updating the feerate of a transaction with its // not-yet-selected ancestors as we go. This is accomplished by walking the // in-mempool descendants of selected transactions and storing a temporary // modified state in mapModifiedTxs. Each time through the loop, we compare the // best transaction in mapModifiedTxs with the next transaction in the mempool // to decide what transaction package to work on next. void BlockAssembler::addPackageTxs(int &nPackagesSelected, int &nDescendantsUpdated) { // mapModifiedTx will store sorted packages after they are modified because // some of their txs are already in the block. indexed_modified_transaction_set mapModifiedTx; // Keep track of entries that failed inclusion, to avoid duplicate work. CTxMemPool::setEntries failedTx; // Start by adding all descendants of previously added txs to mapModifiedTx // and modifying them for their already included ancestors. UpdatePackagesForAdded(inBlock, mapModifiedTx); CTxMemPool::indexed_transaction_set::index::type::iterator mi = mempool.mapTx.get().begin(); CTxMemPool::txiter iter; // Limit the number of attempts to add transactions to the block when it is // close to full; this is just a simple heuristic to finish quickly if the // mempool has a lot of entries. const int64_t MAX_CONSECUTIVE_FAILURES = 1000; int64_t nConsecutiveFailed = 0; while (mi != mempool.mapTx.get().end() || !mapModifiedTx.empty()) { // First try to find a new transaction in mapTx to evaluate. if (mi != mempool.mapTx.get().end() && SkipMapTxEntry(mempool.mapTx.project<0>(mi), mapModifiedTx, failedTx)) { ++mi; continue; } // Now that mi is not stale, determine which transaction to evaluate: // the next entry from mapTx, or the best from mapModifiedTx? bool fUsingModified = false; modtxscoreiter modit = mapModifiedTx.get().begin(); if (mi == mempool.mapTx.get().end()) { // We're out of entries in mapTx; use the entry from mapModifiedTx iter = modit->iter; fUsingModified = true; } else { // Try to compare the mapTx entry to the mapModifiedTx entry. iter = mempool.mapTx.project<0>(mi); if (modit != mapModifiedTx.get().end() && CompareModifiedEntry()(*modit, CTxMemPoolModifiedEntry(iter))) { // The best entry in mapModifiedTx has higher score than the one // from mapTx. Switch which transaction (package) to consider iter = modit->iter; fUsingModified = true; } else { // Either no entry in mapModifiedTx, or it's worse than mapTx. // Increment mi for the next loop iteration. ++mi; } } // We skip mapTx entries that are inBlock, and mapModifiedTx shouldn't // contain anything that is inBlock. assert(!inBlock.count(iter)); uint64_t packageSize = iter->GetSizeWithAncestors(); Amount packageFees = iter->GetModFeesWithAncestors(); int64_t packageSigOps = iter->GetSigOpCountWithAncestors(); if (fUsingModified) { packageSize = modit->nSizeWithAncestors; packageFees = modit->nModFeesWithAncestors; packageSigOps = modit->nSigOpCountWithAncestors; } if (packageFees < blockMinFeeRate.GetFee(packageSize)) { // Everything else we might consider has a lower fee rate return; } if (!TestPackage(packageSize, packageSigOps)) { if (fUsingModified) { // Since we always look at the best entry in mapModifiedTx, we // must erase failed entries so that we can consider the next // best entry on the next loop iteration mapModifiedTx.get().erase(modit); failedTx.insert(iter); } ++nConsecutiveFailed; if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockSize > nMaxGeneratedBlockSize - 1000) { // Give up if we're close to full and haven't succeeded in a // while. break; } continue; } CTxMemPool::setEntries ancestors; uint64_t nNoLimit = std::numeric_limits::max(); std::string dummy; mempool.CalculateMemPoolAncestors(*iter, ancestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false); onlyUnconfirmed(ancestors); ancestors.insert(iter); // Test if all tx's are Final. if (!TestPackageTransactions(ancestors)) { if (fUsingModified) { mapModifiedTx.get().erase(modit); failedTx.insert(iter); } continue; } // This transaction will make it in; reset the failed counter. nConsecutiveFailed = 0; // Package can be added. Sort the entries in a valid order. std::vector sortedEntries; SortForBlock(ancestors, iter, sortedEntries); for (size_t i = 0; i < sortedEntries.size(); ++i) { AddToBlock(sortedEntries[i]); // Erase from the modified set, if present mapModifiedTx.erase(sortedEntries[i]); } ++nPackagesSelected; // Update transactions that depend on each of these nDescendantsUpdated += UpdatePackagesForAdded(ancestors, mapModifiedTx); } } void BlockAssembler::addPriorityTxs() { // How much of the block should be dedicated to high-priority transactions, // included regardless of the fees they pay. if (config->GetBlockPriorityPercentage() == 0) { return; } uint64_t nBlockPrioritySize = nMaxGeneratedBlockSize * config->GetBlockPriorityPercentage() / 100; // This vector will be sorted into a priority queue: std::vector vecPriority; TxCoinAgePriorityCompare pricomparer; std::map waitPriMap; typedef std::map::iterator waitPriIter; double actualPriority = -1; vecPriority.reserve(mempool.mapTx.size()); for (CTxMemPool::indexed_transaction_set::iterator mi = mempool.mapTx.begin(); mi != mempool.mapTx.end(); ++mi) { double dPriority = mi->GetPriority(nHeight); Amount dummy; mempool.ApplyDeltas(mi->GetTx().GetId(), dPriority, dummy); vecPriority.push_back(TxCoinAgePriority(dPriority, mi)); } std::make_heap(vecPriority.begin(), vecPriority.end(), pricomparer); CTxMemPool::txiter iter; // Add a tx from priority queue to fill the part of block reserved to // priority transactions. while (!vecPriority.empty() && !blockFinished) { iter = vecPriority.front().second; actualPriority = vecPriority.front().first; std::pop_heap(vecPriority.begin(), vecPriority.end(), pricomparer); vecPriority.pop_back(); // If tx already in block, skip. if (inBlock.count(iter)) { // Shouldn't happen for priority txs. assert(false); continue; } // If tx is dependent on other mempool txs which haven't yet been // included then put it in the waitSet. if (isStillDependent(iter)) { waitPriMap.insert(std::make_pair(iter, actualPriority)); continue; } // If this tx fits in the block add it, otherwise keep looping. if (TestForBlock(iter)) { AddToBlock(iter); // If now that this txs is added we've surpassed our desired // priority size or have dropped below the AllowFreeThreshold, then // we're done adding priority txs. if (nBlockSize >= nBlockPrioritySize || !AllowFree(actualPriority)) { break; } // This tx was successfully added, so add transactions that depend // on this one to the priority queue to try again. for (CTxMemPool::txiter child : mempool.GetMemPoolChildren(iter)) { waitPriIter wpiter = waitPriMap.find(child); if (wpiter != waitPriMap.end()) { vecPriority.push_back( TxCoinAgePriority(wpiter->second, child)); std::push_heap(vecPriority.begin(), vecPriority.end(), pricomparer); waitPriMap.erase(wpiter); } } } } } void IncrementExtraNonce(const Config &config, CBlock *pblock, const CBlockIndex *pindexPrev, unsigned int &nExtraNonce) { // Update nExtraNonce static uint256 hashPrevBlock; if (hashPrevBlock != pblock->hashPrevBlock) { nExtraNonce = 0; hashPrevBlock = pblock->hashPrevBlock; } ++nExtraNonce; // Height first in coinbase required for block.version=2 unsigned int nHeight = pindexPrev->nHeight + 1; CMutableTransaction txCoinbase(*pblock->vtx[0]); txCoinbase.vin[0].scriptSig = (CScript() << nHeight << CScriptNum(nExtraNonce) << getExcessiveBlockSizeSig(config)) + COINBASE_FLAGS; assert(txCoinbase.vin[0].scriptSig.size() <= MAX_COINBASE_SCRIPTSIG_SIZE); pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase)); pblock->hashMerkleRoot = BlockMerkleRoot(*pblock); } diff --git a/src/net.cpp b/src/net.cpp index 2f8f2dbd28..27ae3a961b 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -1,3053 +1,3053 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "net.h" #include "addrman.h" #include "chainparams.h" #include "clientversion.h" #include "config.h" #include "consensus/consensus.h" #include "crypto/common.h" #include "crypto/sha256.h" #include "hash.h" #include "netbase.h" #include "primitives/transaction.h" #include "scheduler.h" #include "ui_interface.h" #include "utilstrencodings.h" #ifdef WIN32 #include #else #include #endif #ifdef USE_UPNP #include #include #include #include #endif #include // Dump addresses to peers.dat and banlist.dat every 15 minutes (900s) #define DUMP_ADDRESSES_INTERVAL 900 // We add a random period time (0 to 1 seconds) to feeler connections to prevent // synchronization. #define FEELER_SLEEP_WINDOW 1 #if !defined(HAVE_MSG_NOSIGNAL) && !defined(MSG_NOSIGNAL) #define MSG_NOSIGNAL 0 #endif // Fix for ancient MinGW versions, that don't have defined these in ws2tcpip.h. // Todo: Can be removed when our pull-tester is upgraded to a modern MinGW // version. #ifdef WIN32 #ifndef PROTECTION_LEVEL_UNRESTRICTED #define PROTECTION_LEVEL_UNRESTRICTED 10 #endif #ifndef IPV6_PROTECTION_LEVEL #define IPV6_PROTECTION_LEVEL 23 #endif #endif static const std::string NET_MESSAGE_COMMAND_OTHER = "*other*"; // SHA256("netgroup")[0:8] static const uint64_t RANDOMIZER_ID_NETGROUP = 0x6c0edd8036ef4036ULL; // SHA256("localhostnonce")[0:8] static const uint64_t RANDOMIZER_ID_LOCALHOSTNONCE = 0xd93e69e2bbfa5735ULL; // // Global state variables // bool fDiscover = true; bool fListen = true; bool fRelayTxes = true; CCriticalSection cs_mapLocalHost; std::map mapLocalHost; static bool vfLimited[NET_MAX] = {}; limitedmap mapAlreadyAskedFor(MAX_INV_SZ); // Signals for message handling static CNodeSignals g_signals; CNodeSignals &GetNodeSignals() { return g_signals; } void CConnman::AddOneShot(const std::string &strDest) { LOCK(cs_vOneShots); vOneShots.push_back(strDest); } unsigned short GetListenPort() { - return (unsigned short)(GetArg("-port", Params().GetDefaultPort())); + return (unsigned short)(gArgs.GetArg("-port", Params().GetDefaultPort())); } // find 'best' local address for a particular peer bool GetLocal(CService &addr, const CNetAddr *paddrPeer) { if (!fListen) return false; int nBestScore = -1; int nBestReachability = -1; { LOCK(cs_mapLocalHost); for (std::map::iterator it = mapLocalHost.begin(); it != mapLocalHost.end(); it++) { int nScore = (*it).second.nScore; int nReachability = (*it).first.GetReachabilityFrom(paddrPeer); if (nReachability > nBestReachability || (nReachability == nBestReachability && nScore > nBestScore)) { addr = CService((*it).first, (*it).second.nPort); nBestReachability = nReachability; nBestScore = nScore; } } } return nBestScore >= 0; } //! Convert the pnSeeds6 array into usable address objects. static std::vector convertSeed6(const std::vector &vSeedsIn) { // It'll only connect to one or two seed nodes because once it connects, // it'll get a pile of addresses with newer timestamps. Seed nodes are given // a random 'last seen time' of between one and two weeks ago. const int64_t nOneWeek = 7 * 24 * 60 * 60; std::vector vSeedsOut; vSeedsOut.reserve(vSeedsIn.size()); for (std::vector::const_iterator i(vSeedsIn.begin()); i != vSeedsIn.end(); ++i) { struct in6_addr ip; memcpy(&ip, i->addr, sizeof(ip)); CAddress addr(CService(ip, i->port), NODE_NETWORK); addr.nTime = GetTime() - GetRand(nOneWeek) - nOneWeek; vSeedsOut.push_back(addr); } return vSeedsOut; } // Get best local address for a particular peer as a CAddress. Otherwise, return // the unroutable 0.0.0.0 but filled in with the normal parameters, since the IP // may be changed to a useful one by discovery. CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices) { CAddress ret(CService(CNetAddr(), GetListenPort()), NODE_NONE); CService addr; if (GetLocal(addr, paddrPeer)) { ret = CAddress(addr, nLocalServices); } ret.nTime = GetAdjustedTime(); return ret; } int GetnScore(const CService &addr) { LOCK(cs_mapLocalHost); if (mapLocalHost.count(addr) == LOCAL_NONE) { return 0; } return mapLocalHost[addr].nScore; } // Is our peer's addrLocal potentially useful as an external IP source? bool IsPeerAddrLocalGood(CNode *pnode) { CService addrLocal = pnode->GetAddrLocal(); return fDiscover && pnode->addr.IsRoutable() && addrLocal.IsRoutable() && !IsLimited(addrLocal.GetNetwork()); } // Pushes our own address to a peer. void AdvertiseLocal(CNode *pnode) { if (fListen && pnode->fSuccessfullyConnected) { CAddress addrLocal = GetLocalAddress(&pnode->addr, pnode->GetLocalServices()); // If discovery is enabled, sometimes give our peer the address it tells // us that it sees us as in case it has a better idea of our address // than we do. if (IsPeerAddrLocalGood(pnode) && (!addrLocal.IsRoutable() || GetRand((GetnScore(addrLocal) > LOCAL_MANUAL) ? 8 : 2) == 0)) { addrLocal.SetIP(pnode->GetAddrLocal()); } if (addrLocal.IsRoutable()) { LogPrint(BCLog::NET, "AdvertiseLocal: advertising address %s\n", addrLocal.ToString()); FastRandomContext insecure_rand; pnode->PushAddress(addrLocal, insecure_rand); } } } // Learn a new local address. bool AddLocal(const CService &addr, int nScore) { if (!addr.IsRoutable()) { return false; } if (!fDiscover && nScore < LOCAL_MANUAL) { return false; } if (IsLimited(addr)) { return false; } LogPrintf("AddLocal(%s,%i)\n", addr.ToString(), nScore); { LOCK(cs_mapLocalHost); bool fAlready = mapLocalHost.count(addr) > 0; LocalServiceInfo &info = mapLocalHost[addr]; if (!fAlready || nScore >= info.nScore) { info.nScore = nScore + (fAlready ? 1 : 0); info.nPort = addr.GetPort(); } } return true; } bool AddLocal(const CNetAddr &addr, int nScore) { return AddLocal(CService(addr, GetListenPort()), nScore); } bool RemoveLocal(const CService &addr) { LOCK(cs_mapLocalHost); LogPrintf("RemoveLocal(%s)\n", addr.ToString()); mapLocalHost.erase(addr); return true; } /** Make a particular network entirely off-limits (no automatic connects to it) */ void SetLimited(enum Network net, bool fLimited) { if (net == NET_UNROUTABLE) { return; } LOCK(cs_mapLocalHost); vfLimited[net] = fLimited; } bool IsLimited(enum Network net) { LOCK(cs_mapLocalHost); return vfLimited[net]; } bool IsLimited(const CNetAddr &addr) { return IsLimited(addr.GetNetwork()); } /** vote for a local address */ bool SeenLocal(const CService &addr) { LOCK(cs_mapLocalHost); if (mapLocalHost.count(addr) == 0) { return false; } mapLocalHost[addr].nScore++; return true; } /** check whether a given address is potentially local */ bool IsLocal(const CService &addr) { LOCK(cs_mapLocalHost); return mapLocalHost.count(addr) > 0; } /** check whether a given network is one we can probably connect to */ bool IsReachable(enum Network net) { LOCK(cs_mapLocalHost); return !vfLimited[net]; } /** check whether a given address is in a network we can probably connect to */ bool IsReachable(const CNetAddr &addr) { enum Network net = addr.GetNetwork(); return IsReachable(net); } CNode *CConnman::FindNode(const CNetAddr &ip) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if ((CNetAddr)pnode->addr == ip) { return pnode; } } return nullptr; } CNode *CConnman::FindNode(const CSubNet &subNet) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (subNet.Match((CNetAddr)pnode->addr)) { return pnode; } } return nullptr; } CNode *CConnman::FindNode(const std::string &addrName) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (pnode->GetAddrName() == addrName) { return pnode; } } return nullptr; } CNode *CConnman::FindNode(const CService &addr) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if ((CService)pnode->addr == addr) { return pnode; } } return nullptr; } bool CConnman::CheckIncomingNonce(uint64_t nonce) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (!pnode->fSuccessfullyConnected && !pnode->fInbound && pnode->GetLocalNonce() == nonce) return false; } return true; } CNode *CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure) { if (pszDest == nullptr) { if (IsLocal(addrConnect)) { return nullptr; } // Look for an existing connection CNode *pnode = FindNode((CService)addrConnect); if (pnode) { LogPrintf("Failed to open new connection, already connected\n"); return nullptr; } } /// debug print LogPrint(BCLog::NET, "trying connection %s lastseen=%.1fhrs\n", pszDest ? pszDest : addrConnect.ToString(), pszDest ? 0.0 : (double)(GetAdjustedTime() - addrConnect.nTime) / 3600.0); // Connect SOCKET hSocket; bool proxyConnectionFailed = false; if (pszDest ? ConnectSocketByName(addrConnect, hSocket, pszDest, Params().GetDefaultPort(), nConnectTimeout, &proxyConnectionFailed) : ConnectSocket(addrConnect, hSocket, nConnectTimeout, &proxyConnectionFailed)) { if (!IsSelectableSocket(hSocket)) { LogPrintf("Cannot create connection: non-selectable socket created " "(fd >= FD_SETSIZE ?)\n"); CloseSocket(hSocket); return nullptr; } if (pszDest && addrConnect.IsValid()) { // It is possible that we already have a connection to the IP/port // pszDest resolved to. In that case, drop the connection that was // just created, and return the existing CNode instead. Also store // the name we used to connect in that CNode, so that future // FindNode() calls to that name catch this early. LOCK(cs_vNodes); CNode *pnode = FindNode((CService)addrConnect); if (pnode) { pnode->MaybeSetAddrName(std::string(pszDest)); CloseSocket(hSocket); LogPrintf("Failed to open new connection, already connected\n"); return nullptr; } } addrman.Attempt(addrConnect, fCountFailure); // Add node NodeId id = GetNewNodeId(); uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE) .Write(id) .Finalize(); CNode *pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, pszDest ? pszDest : "", false); pnode->nServicesExpected = ServiceFlags(addrConnect.nServices & nRelevantServices); pnode->AddRef(); return pnode; } else if (!proxyConnectionFailed) { // If connecting to the node failed, and failure is not caused by a // problem connecting to the proxy, mark this as an attempt. addrman.Attempt(addrConnect, fCountFailure); } return nullptr; } void CConnman::DumpBanlist() { // Clean unused entries (if bantime has expired) SweepBanned(); if (!BannedSetIsDirty()) { return; } int64_t nStart = GetTimeMillis(); CBanDB bandb; banmap_t banmap; GetBanned(banmap); if (bandb.Write(banmap)) { SetBannedSetDirty(false); } LogPrint(BCLog::NET, "Flushed %d banned node ips/subnets to banlist.dat %dms\n", banmap.size(), GetTimeMillis() - nStart); } void CNode::CloseSocketDisconnect() { fDisconnect = true; LOCK(cs_hSocket); if (hSocket != INVALID_SOCKET) { LogPrint(BCLog::NET, "disconnecting peer=%d\n", id); CloseSocket(hSocket); } } void CConnman::ClearBanned() { { LOCK(cs_setBanned); setBanned.clear(); setBannedIsDirty = true; } // Store banlist to disk. DumpBanlist(); if (clientInterface) { clientInterface->BannedListChanged(); } } bool CConnman::IsBanned(CNetAddr ip) { LOCK(cs_setBanned); bool fResult = false; for (banmap_t::iterator it = setBanned.begin(); it != setBanned.end(); it++) { CSubNet subNet = (*it).first; CBanEntry banEntry = (*it).second; if (subNet.Match(ip) && GetTime() < banEntry.nBanUntil) { fResult = true; } } return fResult; } bool CConnman::IsBanned(CSubNet subnet) { LOCK(cs_setBanned); bool fResult = false; banmap_t::iterator i = setBanned.find(subnet); if (i != setBanned.end()) { CBanEntry banEntry = (*i).second; if (GetTime() < banEntry.nBanUntil) { fResult = true; } } return fResult; } void CConnman::Ban(const CNetAddr &addr, const BanReason &banReason, int64_t bantimeoffset, bool sinceUnixEpoch) { CSubNet subNet(addr); Ban(subNet, banReason, bantimeoffset, sinceUnixEpoch); } void CConnman::Ban(const CSubNet &subNet, const BanReason &banReason, int64_t bantimeoffset, bool sinceUnixEpoch) { CBanEntry banEntry(GetTime()); banEntry.banReason = banReason; if (bantimeoffset <= 0) { - bantimeoffset = GetArg("-bantime", DEFAULT_MISBEHAVING_BANTIME); + bantimeoffset = gArgs.GetArg("-bantime", DEFAULT_MISBEHAVING_BANTIME); sinceUnixEpoch = false; } banEntry.nBanUntil = (sinceUnixEpoch ? 0 : GetTime()) + bantimeoffset; { LOCK(cs_setBanned); if (setBanned[subNet].nBanUntil < banEntry.nBanUntil) { setBanned[subNet] = banEntry; setBannedIsDirty = true; } else { return; } } if (clientInterface) { clientInterface->BannedListChanged(); } { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (subNet.Match((CNetAddr)pnode->addr)) { pnode->fDisconnect = true; } } } if (banReason == BanReasonManuallyAdded) { // Store banlist to disk immediately if user requested ban. DumpBanlist(); } } bool CConnman::Unban(const CNetAddr &addr) { CSubNet subNet(addr); return Unban(subNet); } bool CConnman::Unban(const CSubNet &subNet) { { LOCK(cs_setBanned); if (!setBanned.erase(subNet)) { return false; } setBannedIsDirty = true; } if (clientInterface) { clientInterface->BannedListChanged(); } // Store banlist to disk immediately. DumpBanlist(); return true; } void CConnman::GetBanned(banmap_t &banMap) { LOCK(cs_setBanned); // Sweep the banlist so expired bans are not returned SweepBanned(); // Create a thread safe copy. banMap = setBanned; } void CConnman::SetBanned(const banmap_t &banMap) { LOCK(cs_setBanned); setBanned = banMap; setBannedIsDirty = true; } void CConnman::SweepBanned() { int64_t now = GetTime(); LOCK(cs_setBanned); banmap_t::iterator it = setBanned.begin(); while (it != setBanned.end()) { CSubNet subNet = (*it).first; CBanEntry banEntry = (*it).second; if (now > banEntry.nBanUntil) { setBanned.erase(it++); setBannedIsDirty = true; LogPrint(BCLog::NET, "%s: Removed banned node ip/subnet from banlist.dat: %s\n", __func__, subNet.ToString()); } else { ++it; } } } bool CConnman::BannedSetIsDirty() { LOCK(cs_setBanned); return setBannedIsDirty; } void CConnman::SetBannedSetDirty(bool dirty) { // Reuse setBanned lock for the isDirty flag. LOCK(cs_setBanned); setBannedIsDirty = dirty; } bool CConnman::IsWhitelistedRange(const CNetAddr &addr) { LOCK(cs_vWhitelistedRange); for (const CSubNet &subnet : vWhitelistedRange) { if (subnet.Match(addr)) { return true; } } return false; } void CConnman::AddWhitelistedRange(const CSubNet &subnet) { LOCK(cs_vWhitelistedRange); vWhitelistedRange.push_back(subnet); } std::string CNode::GetAddrName() const { LOCK(cs_addrName); return addrName; } void CNode::MaybeSetAddrName(const std::string &addrNameIn) { LOCK(cs_addrName); if (addrName.empty()) { addrName = addrNameIn; } } CService CNode::GetAddrLocal() const { LOCK(cs_addrLocal); return addrLocal; } void CNode::SetAddrLocal(const CService &addrLocalIn) { LOCK(cs_addrLocal); if (addrLocal.IsValid()) { error("Addr local already set for node: %i. Refusing to change from %s " "to %s", id, addrLocal.ToString(), addrLocalIn.ToString()); } else { addrLocal = addrLocalIn; } } #undef X #define X(name) stats.name = name void CNode::copyStats(CNodeStats &stats) { stats.nodeid = this->GetId(); X(nServices); X(addr); { LOCK(cs_filter); X(fRelayTxes); } X(nLastSend); X(nLastRecv); X(nTimeConnected); X(nTimeOffset); stats.addrName = GetAddrName(); X(nVersion); { LOCK(cs_SubVer); X(cleanSubVer); } X(fInbound); X(fAddnode); X(nStartingHeight); { LOCK(cs_vSend); X(mapSendBytesPerMsgCmd); X(nSendBytes); } { LOCK(cs_vRecv); X(mapRecvBytesPerMsgCmd); X(nRecvBytes); } X(fWhitelisted); // It is common for nodes with good ping times to suddenly become lagged, // due to a new block arriving or other large transfer. Merely reporting // pingtime might fool the caller into thinking the node was still // responsive, since pingtime does not update until the ping is complete, // which might take a while. So, if a ping is taking an unusually long time // in flight, the caller can immediately detect that this is happening. int64_t nPingUsecWait = 0; if ((0 != nPingNonceSent) && (0 != nPingUsecStart)) { nPingUsecWait = GetTimeMicros() - nPingUsecStart; } // Raw ping time is in microseconds, but show it to user as whole seconds // (Bitcoin users should be well used to small numbers with many decimal // places by now :) stats.dPingTime = ((double(nPingUsecTime)) / 1e6); stats.dMinPing = ((double(nMinPingUsecTime)) / 1e6); stats.dPingWait = ((double(nPingUsecWait)) / 1e6); // Leave string empty if addrLocal invalid (not filled in yet) CService addrLocalUnlocked = GetAddrLocal(); stats.addrLocal = addrLocalUnlocked.IsValid() ? addrLocalUnlocked.ToString() : ""; } #undef X bool CNode::ReceiveMsgBytes(const char *pch, unsigned int nBytes, bool &complete) { complete = false; int64_t nTimeMicros = GetTimeMicros(); LOCK(cs_vRecv); nLastRecv = nTimeMicros / 1000000; nRecvBytes += nBytes; while (nBytes > 0) { // Get current incomplete message, or create a new one. if (vRecvMsg.empty() || vRecvMsg.back().complete()) { vRecvMsg.push_back(CNetMessage(Params().NetMagic(), SER_NETWORK, INIT_PROTO_VERSION)); } CNetMessage &msg = vRecvMsg.back(); // Absorb network data. int handled; if (!msg.in_data) { handled = msg.readHeader(pch, nBytes); } else { handled = msg.readData(pch, nBytes); } if (handled < 0) { return false; } if (msg.in_data && msg.hdr.nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH) { LogPrint(BCLog::NET, "Oversized message from peer=%i, disconnecting\n", GetId()); return false; } pch += handled; nBytes -= handled; if (msg.complete()) { // Store received bytes per message command to prevent a memory DOS, // only allow valid commands. mapMsgCmdSize::iterator i = mapRecvBytesPerMsgCmd.find(msg.hdr.pchCommand); if (i == mapRecvBytesPerMsgCmd.end()) { i = mapRecvBytesPerMsgCmd.find(NET_MESSAGE_COMMAND_OTHER); } assert(i != mapRecvBytesPerMsgCmd.end()); i->second += msg.hdr.nMessageSize + CMessageHeader::HEADER_SIZE; msg.nTime = nTimeMicros; complete = true; } } return true; } void CNode::SetSendVersion(int nVersionIn) { // Send version may only be changed in the version message, and only one // version message is allowed per session. We can therefore treat this value // as const and even atomic as long as it's only used once a version message // has been successfully processed. Any attempt to set this twice is an // error. if (nSendVersion != 0) { error("Send version already set for node: %i. Refusing to change from " "%i to %i", id, nSendVersion, nVersionIn); } else { nSendVersion = nVersionIn; } } int CNode::GetSendVersion() const { // The send version should always be explicitly set to INIT_PROTO_VERSION // rather than using this value until SetSendVersion has been called. if (nSendVersion == 0) { error("Requesting unset send version for node: %i. Using %i", id, INIT_PROTO_VERSION); return INIT_PROTO_VERSION; } return nSendVersion; } int CNetMessage::readHeader(const char *pch, unsigned int nBytes) { // copy data to temporary parsing buffer unsigned int nRemaining = 24 - nHdrPos; unsigned int nCopy = std::min(nRemaining, nBytes); memcpy(&hdrbuf[nHdrPos], pch, nCopy); nHdrPos += nCopy; // if header incomplete, exit if (nHdrPos < 24) { return nCopy; } // deserialize to CMessageHeader try { hdrbuf >> hdr; } catch (const std::exception &) { return -1; } // reject messages larger than MAX_SIZE if (hdr.nMessageSize > MAX_SIZE) { return -1; } // switch state to reading message data in_data = true; return nCopy; } int CNetMessage::readData(const char *pch, unsigned int nBytes) { unsigned int nRemaining = hdr.nMessageSize - nDataPos; unsigned int nCopy = std::min(nRemaining, nBytes); if (vRecv.size() < nDataPos + nCopy) { // Allocate up to 256 KiB ahead, but never more than the total message // size. vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024)); } hasher.Write((const uint8_t *)pch, nCopy); memcpy(&vRecv[nDataPos], pch, nCopy); nDataPos += nCopy; return nCopy; } const uint256 &CNetMessage::GetMessageHash() const { assert(complete()); if (data_hash.IsNull()) { hasher.Finalize(data_hash.begin()); } return data_hash; } // requires LOCK(cs_vSend) size_t CConnman::SocketSendData(CNode *pnode) const { AssertLockHeld(pnode->cs_vSend); size_t nSentSize = 0; size_t nMsgCount = 0; for (const auto &data : pnode->vSendMsg) { assert(data.size() > pnode->nSendOffset); int nBytes = 0; { LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) { break; } nBytes = send(pnode->hSocket, reinterpret_cast(data.data()) + pnode->nSendOffset, data.size() - pnode->nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT); } if (nBytes == 0) { // couldn't send anything at all break; } if (nBytes < 0) { // error int nErr = WSAGetLastError(); if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) { LogPrintf("socket send error %s\n", NetworkErrorString(nErr)); pnode->CloseSocketDisconnect(); } break; } assert(nBytes > 0); pnode->nLastSend = GetSystemTimeInSeconds(); pnode->nSendBytes += nBytes; pnode->nSendOffset += nBytes; nSentSize += nBytes; if (pnode->nSendOffset != data.size()) { // could not send full message; stop sending more break; } pnode->nSendOffset = 0; pnode->nSendSize -= data.size(); pnode->fPauseSend = pnode->nSendSize > nSendBufferMaxSize; nMsgCount++; } pnode->vSendMsg.erase(pnode->vSendMsg.begin(), pnode->vSendMsg.begin() + nMsgCount); if (pnode->vSendMsg.empty()) { assert(pnode->nSendOffset == 0); assert(pnode->nSendSize == 0); } return nSentSize; } struct NodeEvictionCandidate { NodeId id; int64_t nTimeConnected; int64_t nMinPingUsecTime; int64_t nLastBlockTime; int64_t nLastTXTime; bool fRelevantServices; bool fRelayTxes; bool fBloomFilter; CAddress addr; uint64_t nKeyedNetGroup; }; static bool ReverseCompareNodeMinPingTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { return a.nMinPingUsecTime > b.nMinPingUsecTime; } static bool ReverseCompareNodeTimeConnected(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { return a.nTimeConnected > b.nTimeConnected; } static bool CompareNetGroupKeyed(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { return a.nKeyedNetGroup < b.nKeyedNetGroup; } static bool CompareNodeBlockTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { // There is a fall-through here because it is common for a node to have many // peers which have not yet relayed a block. if (a.nLastBlockTime != b.nLastBlockTime) { return a.nLastBlockTime < b.nLastBlockTime; } if (a.fRelevantServices != b.fRelevantServices) { return b.fRelevantServices; } return a.nTimeConnected > b.nTimeConnected; } static bool CompareNodeTXTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { // There is a fall-through here because it is common for a node to have more // than a few peers that have not yet relayed txn. if (a.nLastTXTime != b.nLastTXTime) { return a.nLastTXTime < b.nLastTXTime; } if (a.fRelayTxes != b.fRelayTxes) { return b.fRelayTxes; } if (a.fBloomFilter != b.fBloomFilter) { return a.fBloomFilter; } return a.nTimeConnected > b.nTimeConnected; } /** * Try to find a connection to evict when the node is full. Extreme care must be * taken to avoid opening the node to attacker triggered network partitioning. * The strategy used here is to protect a small number of peers for each of * several distinct characteristics which are difficult to forge. In order to * partition a node the attacker must be simultaneously better at all of them * than honest peers. */ bool CConnman::AttemptToEvictConnection() { std::vector vEvictionCandidates; { LOCK(cs_vNodes); for (CNode *node : vNodes) { if (node->fWhitelisted || !node->fInbound || node->fDisconnect) { continue; } NodeEvictionCandidate candidate = { node->id, node->nTimeConnected, node->nMinPingUsecTime, node->nLastBlockTime, node->nLastTXTime, (node->nServices & nRelevantServices) == nRelevantServices, node->fRelayTxes, node->pfilter != nullptr, node->addr, node->nKeyedNetGroup}; vEvictionCandidates.push_back(candidate); } } if (vEvictionCandidates.empty()) { return false; } // Protect connections with certain characteristics // Deterministically select 4 peers to protect by netgroup. An attacker // cannot predict which netgroups will be protected. std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), CompareNetGroupKeyed); vEvictionCandidates.erase( vEvictionCandidates.end() - std::min(4, static_cast(vEvictionCandidates.size())), vEvictionCandidates.end()); if (vEvictionCandidates.empty()) { return false; } // Protect the 8 nodes with the lowest minimum ping time. An attacker cannot // manipulate this metric without physically moving nodes closer to the // target. std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), ReverseCompareNodeMinPingTime); vEvictionCandidates.erase( vEvictionCandidates.end() - std::min(8, static_cast(vEvictionCandidates.size())), vEvictionCandidates.end()); if (vEvictionCandidates.empty()) { return false; } // Protect 4 nodes that most recently sent us transactions. An attacker // cannot manipulate this metric without performing useful work. std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), CompareNodeTXTime); vEvictionCandidates.erase( vEvictionCandidates.end() - std::min(4, static_cast(vEvictionCandidates.size())), vEvictionCandidates.end()); if (vEvictionCandidates.empty()) { return false; } // Protect 4 nodes that most recently sent us blocks. An attacker cannot // manipulate this metric without performing useful work. std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), CompareNodeBlockTime); vEvictionCandidates.erase( vEvictionCandidates.end() - std::min(4, static_cast(vEvictionCandidates.size())), vEvictionCandidates.end()); if (vEvictionCandidates.empty()) { return false; } // Protect the half of the remaining nodes which have been connected the // longest. This replicates the non-eviction implicit behavior, and // precludes attacks that start later. std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), ReverseCompareNodeTimeConnected); vEvictionCandidates.erase( vEvictionCandidates.end() - static_cast(vEvictionCandidates.size() / 2), vEvictionCandidates.end()); if (vEvictionCandidates.empty()) { return false; } // Identify the network group with the most connections and youngest member. // (vEvictionCandidates is already sorted by reverse connect time) uint64_t naMostConnections; unsigned int nMostConnections = 0; int64_t nMostConnectionsTime = 0; std::map> mapNetGroupNodes; for (const NodeEvictionCandidate &node : vEvictionCandidates) { mapNetGroupNodes[node.nKeyedNetGroup].push_back(node); int64_t grouptime = mapNetGroupNodes[node.nKeyedNetGroup][0].nTimeConnected; size_t groupsize = mapNetGroupNodes[node.nKeyedNetGroup].size(); if (groupsize > nMostConnections || (groupsize == nMostConnections && grouptime > nMostConnectionsTime)) { nMostConnections = groupsize; nMostConnectionsTime = grouptime; naMostConnections = node.nKeyedNetGroup; } } // Reduce to the network group with the most connections vEvictionCandidates = std::move(mapNetGroupNodes[naMostConnections]); // Disconnect from the network group with the most connections NodeId evicted = vEvictionCandidates.front().id; LOCK(cs_vNodes); for (std::vector::const_iterator it(vNodes.begin()); it != vNodes.end(); ++it) { if ((*it)->GetId() == evicted) { (*it)->fDisconnect = true; return true; } } return false; } void CConnman::AcceptConnection(const ListenSocket &hListenSocket) { struct sockaddr_storage sockaddr; socklen_t len = sizeof(sockaddr); SOCKET hSocket = accept(hListenSocket.socket, (struct sockaddr *)&sockaddr, &len); CAddress addr; int nInbound = 0; int nMaxInbound = nMaxConnections - (nMaxOutbound + nMaxFeeler); if (hSocket != INVALID_SOCKET) { if (!addr.SetSockAddr((const struct sockaddr *)&sockaddr)) { LogPrintf("Warning: Unknown socket family\n"); } } bool whitelisted = hListenSocket.whitelisted || IsWhitelistedRange(addr); { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (pnode->fInbound) { nInbound++; } } } if (hSocket == INVALID_SOCKET) { int nErr = WSAGetLastError(); if (nErr != WSAEWOULDBLOCK) { LogPrintf("socket error accept failed: %s\n", NetworkErrorString(nErr)); } return; } if (!fNetworkActive) { LogPrintf("connection from %s dropped: not accepting new connections\n", addr.ToString()); CloseSocket(hSocket); return; } if (!IsSelectableSocket(hSocket)) { LogPrintf("connection from %s dropped: non-selectable socket\n", addr.ToString()); CloseSocket(hSocket); return; } // According to the internet TCP_NODELAY is not carried into accepted // sockets on all platforms. Set it again here just to be sure. int set = 1; #ifdef WIN32 setsockopt(hSocket, IPPROTO_TCP, TCP_NODELAY, (const char *)&set, sizeof(int)); #else setsockopt(hSocket, IPPROTO_TCP, TCP_NODELAY, (void *)&set, sizeof(int)); #endif if (IsBanned(addr) && !whitelisted) { LogPrintf("connection from %s dropped (banned)\n", addr.ToString()); CloseSocket(hSocket); return; } if (nInbound >= nMaxInbound) { if (!AttemptToEvictConnection()) { // No connection to evict, disconnect the new connection LogPrint(BCLog::NET, "failed to find an eviction candidate - " "connection dropped (full)\n"); CloseSocket(hSocket); return; } } NodeId id = GetNewNodeId(); uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE) .Write(id) .Finalize(); CNode *pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), nonce, "", true); pnode->AddRef(); pnode->fWhitelisted = whitelisted; GetNodeSignals().InitializeNode(*config, pnode, *this); LogPrint(BCLog::NET, "connection from %s accepted\n", addr.ToString()); { LOCK(cs_vNodes); vNodes.push_back(pnode); } } void CConnman::ThreadSocketHandler() { unsigned int nPrevNodeCount = 0; while (!interruptNet) { // // Disconnect nodes // { LOCK(cs_vNodes); // Disconnect unused nodes std::vector vNodesCopy = vNodes; for (CNode *pnode : vNodesCopy) { if (pnode->fDisconnect) { // remove from vNodes vNodes.erase(remove(vNodes.begin(), vNodes.end(), pnode), vNodes.end()); // release outbound grant (if any) pnode->grantOutbound.Release(); // close socket and cleanup pnode->CloseSocketDisconnect(); // hold in disconnected pool until all refs are released pnode->Release(); vNodesDisconnected.push_back(pnode); } } } { // Delete disconnected nodes std::list vNodesDisconnectedCopy = vNodesDisconnected; for (CNode *pnode : vNodesDisconnectedCopy) { // wait until threads are done using it if (pnode->GetRefCount() <= 0) { bool fDelete = false; { TRY_LOCK(pnode->cs_inventory, lockInv); if (lockInv) { TRY_LOCK(pnode->cs_vSend, lockSend); if (lockSend) { fDelete = true; } } } if (fDelete) { vNodesDisconnected.remove(pnode); DeleteNode(pnode); } } } } size_t vNodesSize; { LOCK(cs_vNodes); vNodesSize = vNodes.size(); } if (vNodesSize != nPrevNodeCount) { nPrevNodeCount = vNodesSize; if (clientInterface) { clientInterface->NotifyNumConnectionsChanged(nPrevNodeCount); } } // // Find which sockets have data to receive // struct timeval timeout; timeout.tv_sec = 0; // Frequency to poll pnode->vSend timeout.tv_usec = 50000; fd_set fdsetRecv; fd_set fdsetSend; fd_set fdsetError; FD_ZERO(&fdsetRecv); FD_ZERO(&fdsetSend); FD_ZERO(&fdsetError); SOCKET hSocketMax = 0; bool have_fds = false; for (const ListenSocket &hListenSocket : vhListenSocket) { FD_SET(hListenSocket.socket, &fdsetRecv); hSocketMax = std::max(hSocketMax, hListenSocket.socket); have_fds = true; } { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { // Implement the following logic: // * If there is data to send, select() for sending data. As // this only happens when optimistic write failed, we choose to // first drain the write buffer in this case before receiving // more. This avoids needlessly queueing received data, if the // remote peer is not themselves receiving data. This means // properly utilizing TCP flow control signalling. // * Otherwise, if there is space left in the receive buffer, // select() for receiving data. // * Hand off all complete messages to the processor, to be // handled without blocking here. bool select_recv = !pnode->fPauseRecv; bool select_send; { LOCK(pnode->cs_vSend); select_send = !pnode->vSendMsg.empty(); } LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) { continue; } FD_SET(pnode->hSocket, &fdsetError); hSocketMax = std::max(hSocketMax, pnode->hSocket); have_fds = true; if (select_send) { FD_SET(pnode->hSocket, &fdsetSend); continue; } if (select_recv) { FD_SET(pnode->hSocket, &fdsetRecv); } } } int nSelect = select(have_fds ? hSocketMax + 1 : 0, &fdsetRecv, &fdsetSend, &fdsetError, &timeout); if (interruptNet) { return; } if (nSelect == SOCKET_ERROR) { if (have_fds) { int nErr = WSAGetLastError(); LogPrintf("socket select error %s\n", NetworkErrorString(nErr)); for (unsigned int i = 0; i <= hSocketMax; i++) { FD_SET(i, &fdsetRecv); } } FD_ZERO(&fdsetSend); FD_ZERO(&fdsetError); if (!interruptNet.sleep_for( std::chrono::milliseconds(timeout.tv_usec / 1000))) { return; } } // // Accept new connections // for (const ListenSocket &hListenSocket : vhListenSocket) { if (hListenSocket.socket != INVALID_SOCKET && FD_ISSET(hListenSocket.socket, &fdsetRecv)) { AcceptConnection(hListenSocket); } } // // Service each socket // std::vector vNodesCopy; { LOCK(cs_vNodes); vNodesCopy = vNodes; for (CNode *pnode : vNodesCopy) { pnode->AddRef(); } } for (CNode *pnode : vNodesCopy) { if (interruptNet) { return; } // // Receive // bool recvSet = false; bool sendSet = false; bool errorSet = false; { LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) { continue; } recvSet = FD_ISSET(pnode->hSocket, &fdsetRecv); sendSet = FD_ISSET(pnode->hSocket, &fdsetSend); errorSet = FD_ISSET(pnode->hSocket, &fdsetError); } if (recvSet || errorSet) { // typical socket buffer is 8K-64K char pchBuf[0x10000]; int nBytes = 0; { LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) { continue; } nBytes = recv(pnode->hSocket, pchBuf, sizeof(pchBuf), MSG_DONTWAIT); } if (nBytes > 0) { bool notify = false; if (!pnode->ReceiveMsgBytes(pchBuf, nBytes, notify)) { pnode->CloseSocketDisconnect(); } RecordBytesRecv(nBytes); if (notify) { size_t nSizeAdded = 0; auto it(pnode->vRecvMsg.begin()); for (; it != pnode->vRecvMsg.end(); ++it) { if (!it->complete()) { break; } nSizeAdded += it->vRecv.size() + CMessageHeader::HEADER_SIZE; } { LOCK(pnode->cs_vProcessMsg); pnode->vProcessMsg.splice( pnode->vProcessMsg.end(), pnode->vRecvMsg, pnode->vRecvMsg.begin(), it); pnode->nProcessQueueSize += nSizeAdded; pnode->fPauseRecv = pnode->nProcessQueueSize > nReceiveFloodSize; } WakeMessageHandler(); } } else if (nBytes == 0) { // socket closed gracefully if (!pnode->fDisconnect) { LogPrint(BCLog::NET, "socket closed\n"); } pnode->CloseSocketDisconnect(); } else if (nBytes < 0) { // error int nErr = WSAGetLastError(); if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) { if (!pnode->fDisconnect) { LogPrintf("socket recv error %s\n", NetworkErrorString(nErr)); } pnode->CloseSocketDisconnect(); } } } // // Send // if (sendSet) { LOCK(pnode->cs_vSend); size_t nBytes = SocketSendData(pnode); if (nBytes) { RecordBytesSent(nBytes); } } // // Inactivity checking // int64_t nTime = GetSystemTimeInSeconds(); if (nTime - pnode->nTimeConnected > 60) { if (pnode->nLastRecv == 0 || pnode->nLastSend == 0) { LogPrint(BCLog::NET, "socket no message in first 60 " "seconds, %d %d from %d\n", pnode->nLastRecv != 0, pnode->nLastSend != 0, pnode->id); pnode->fDisconnect = true; } else if (nTime - pnode->nLastSend > TIMEOUT_INTERVAL) { LogPrintf("socket sending timeout: %is\n", nTime - pnode->nLastSend); pnode->fDisconnect = true; } else if (nTime - pnode->nLastRecv > (pnode->nVersion > BIP0031_VERSION ? TIMEOUT_INTERVAL : 90 * 60)) { LogPrintf("socket receive timeout: %is\n", nTime - pnode->nLastRecv); pnode->fDisconnect = true; } else if (pnode->nPingNonceSent && pnode->nPingUsecStart + TIMEOUT_INTERVAL * 1000000 < GetTimeMicros()) { LogPrintf("ping timeout: %fs\n", 0.000001 * (GetTimeMicros() - pnode->nPingUsecStart)); pnode->fDisconnect = true; } else if (!pnode->fSuccessfullyConnected) { LogPrintf("version handshake timeout from %d\n", pnode->id); pnode->fDisconnect = true; } } } { LOCK(cs_vNodes); for (CNode *pnode : vNodesCopy) { pnode->Release(); } } } } void CConnman::WakeMessageHandler() { { std::lock_guard lock(mutexMsgProc); fMsgProcWake = true; } condMsgProc.notify_one(); } #ifdef USE_UPNP void ThreadMapPort() { std::string port = strprintf("%u", GetListenPort()); const char *multicastif = 0; const char *minissdpdpath = 0; struct UPNPDev *devlist = 0; char lanaddr[64]; #ifndef UPNPDISCOVER_SUCCESS /* miniupnpc 1.5 */ devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0); #elif MINIUPNPC_API_VERSION < 14 /* miniupnpc 1.6 */ int error = 0; devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, &error); #else /* miniupnpc 1.9.20150730 */ int error = 0; devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, 2, &error); #endif struct UPNPUrls urls; struct IGDdatas data; int r; r = UPNP_GetValidIGD(devlist, &urls, &data, lanaddr, sizeof(lanaddr)); if (r == 1) { if (fDiscover) { char externalIPAddress[40]; r = UPNP_GetExternalIPAddress( urls.controlURL, data.first.servicetype, externalIPAddress); if (r != UPNPCOMMAND_SUCCESS) { LogPrintf("UPnP: GetExternalIPAddress() returned %d\n", r); } else { if (externalIPAddress[0]) { CNetAddr resolved; if (LookupHost(externalIPAddress, resolved, false)) { LogPrintf("UPnP: ExternalIPAddress = %s\n", resolved.ToString().c_str()); AddLocal(resolved, LOCAL_UPNP); } } else { LogPrintf("UPnP: GetExternalIPAddress failed.\n"); } } } std::string strDesc = "Bitcoin " + FormatFullVersion(); try { while (true) { #ifndef UPNPDISCOVER_SUCCESS /* miniupnpc 1.5 */ r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype, port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0); #else /* miniupnpc 1.6 */ r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype, port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0, "0"); #endif if (r != UPNPCOMMAND_SUCCESS) { LogPrintf( "AddPortMapping(%s, %s, %s) failed with code %d (%s)\n", port, port, lanaddr, r, strupnperror(r)); } else { LogPrintf("UPnP Port Mapping successful.\n"); } // Refresh every 20 minutes MilliSleep(20 * 60 * 1000); } } catch (const boost::thread_interrupted &) { r = UPNP_DeletePortMapping(urls.controlURL, data.first.servicetype, port.c_str(), "TCP", 0); LogPrintf("UPNP_DeletePortMapping() returned: %d\n", r); freeUPNPDevlist(devlist); devlist = 0; FreeUPNPUrls(&urls); throw; } } else { LogPrintf("No valid UPnP IGDs found\n"); freeUPNPDevlist(devlist); devlist = 0; if (r != 0) { FreeUPNPUrls(&urls); } } } void MapPort(bool fUseUPnP) { static boost::thread *upnp_thread = nullptr; if (fUseUPnP) { if (upnp_thread) { upnp_thread->interrupt(); upnp_thread->join(); delete upnp_thread; } upnp_thread = new boost::thread( boost::bind(&TraceThread, "upnp", &ThreadMapPort)); } else if (upnp_thread) { upnp_thread->interrupt(); upnp_thread->join(); delete upnp_thread; upnp_thread = nullptr; } } #else void MapPort(bool) { // Intentionally left blank. } #endif static std::string GetDNSHost(const CDNSSeedData &data, ServiceFlags *requiredServiceBits) { // use default host for non-filter-capable seeds or if we use the default // service bits (NODE_NETWORK) if (!data.supportsServiceBitsFiltering || *requiredServiceBits == NODE_NETWORK) { *requiredServiceBits = NODE_NETWORK; return data.host; } // See chainparams.cpp, most dnsseeds only support one or two possible // servicebits hostnames return strprintf("x%x.%s", *requiredServiceBits, data.host); } void CConnman::ThreadDNSAddressSeed() { // goal: only query DNS seeds if address need is acute. // Avoiding DNS seeds when we don't need them improves user privacy by // creating fewer identifying DNS requests, reduces trust by giving seeds // less influence on the network topology, and reduces traffic to the seeds. if ((addrman.size() > 0) && - (!GetBoolArg("-forcednsseed", DEFAULT_FORCEDNSSEED))) { + (!gArgs.GetBoolArg("-forcednsseed", DEFAULT_FORCEDNSSEED))) { if (!interruptNet.sleep_for(std::chrono::seconds(11))) { return; } LOCK(cs_vNodes); int nRelevant = 0; for (auto pnode : vNodes) { nRelevant += pnode->fSuccessfullyConnected && ((pnode->nServices & nRelevantServices) == nRelevantServices); } if (nRelevant >= 2) { LogPrintf("P2P peers available. Skipped DNS seeding.\n"); return; } } const std::vector &vSeeds = Params().DNSSeeds(); int found = 0; LogPrintf("Loading addresses from DNS seeds (could take a while)\n"); for (const CDNSSeedData &seed : vSeeds) { if (HaveNameProxy()) { AddOneShot(seed.host); } else { std::vector vIPs; std::vector vAdd; ServiceFlags requiredServiceBits = nRelevantServices; if (LookupHost(GetDNSHost(seed, &requiredServiceBits).c_str(), vIPs, 0, true)) { for (const CNetAddr &ip : vIPs) { int nOneDay = 24 * 3600; CAddress addr = CAddress(CService(ip, Params().GetDefaultPort()), requiredServiceBits); // Use a random age between 3 and 7 days old. addr.nTime = GetTime() - 3 * nOneDay - GetRand(4 * nOneDay); vAdd.push_back(addr); found++; } } // TODO: The seed name resolve may fail, yielding an IP of [::], // which results in addrman assigning the same source to results // from different seeds. This should switch to a hard-coded stable // dummy IP for each seed name, so that the resolve is not required // at all. if (!vIPs.empty()) { CService seedSource; Lookup(seed.name.c_str(), seedSource, 0, true); addrman.Add(vAdd, seedSource); } } } LogPrintf("%d addresses found from DNS seeds\n", found); } void CConnman::DumpAddresses() { int64_t nStart = GetTimeMillis(); CAddrDB adb; adb.Write(addrman); LogPrint(BCLog::NET, "Flushed %d addresses to peers.dat %dms\n", addrman.size(), GetTimeMillis() - nStart); } void CConnman::DumpData() { DumpAddresses(); DumpBanlist(); } void CConnman::ProcessOneShot() { std::string strDest; { LOCK(cs_vOneShots); if (vOneShots.empty()) { return; } strDest = vOneShots.front(); vOneShots.pop_front(); } CAddress addr; CSemaphoreGrant grant(*semOutbound, true); if (grant) { if (!OpenNetworkConnection(addr, false, &grant, strDest.c_str(), true)) { AddOneShot(strDest); } } } void CConnman::ThreadOpenConnections() { // Connect to specific addresses if (gArgs.IsArgSet("-connect") && gArgs.GetArgs("-connect").size() > 0) { for (int64_t nLoop = 0;; nLoop++) { ProcessOneShot(); for (const std::string &strAddr : gArgs.GetArgs("-connect")) { CAddress addr(CService(), NODE_NONE); OpenNetworkConnection(addr, false, nullptr, strAddr.c_str()); for (int i = 0; i < 10 && i < nLoop; i++) { if (!interruptNet.sleep_for( std::chrono::milliseconds(500))) { return; } } } if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) { return; } } } // Initiate network connections int64_t nStart = GetTime(); // Minimum time before next feeler connection (in microseconds). int64_t nNextFeeler = PoissonNextSend(nStart * 1000 * 1000, FEELER_INTERVAL); while (!interruptNet) { ProcessOneShot(); if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) { return; } CSemaphoreGrant grant(*semOutbound); if (interruptNet) { return; } // Add seed nodes if DNS seeds are all down (an infrastructure attack?). if (addrman.size() == 0 && (GetTime() - nStart > 60)) { static bool done = false; if (!done) { LogPrintf("Adding fixed seed nodes as DNS doesn't seem to be " "available.\n"); CNetAddr local; LookupHost("127.0.0.1", local, false); addrman.Add(convertSeed6(Params().FixedSeeds()), local); done = true; } } // // Choose an address to connect to based on most recently seen // CAddress addrConnect; // Only connect out to one peer per network group (/16 for IPv4). Do // this here so we don't have to critsect vNodes inside mapAddresses // critsect. int nOutbound = 0; std::set> setConnected; { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (!pnode->fInbound && !pnode->fAddnode) { // Netgroups for inbound and addnode peers are not excluded // because our goal here is to not use multiple of our // limited outbound slots on a single netgroup but inbound // and addnode peers do not use our outbound slots. Inbound // peers also have the added issue that they're attacker // controlled and could be used to prevent us from // connecting to particular hosts if we used them here. setConnected.insert(pnode->addr.GetGroup()); nOutbound++; } } } // Feeler Connections // // Design goals: // * Increase the number of connectable addresses in the tried table. // // Method: // * Choose a random address from new and attempt to connect to it if // we can connect successfully it is added to tried. // * Start attempting feeler connections only after node finishes // making outbound connections. // * Only make a feeler connection once every few minutes. // bool fFeeler = false; if (nOutbound >= nMaxOutbound) { // The current time right now (in microseconds). int64_t nTime = GetTimeMicros(); if (nTime > nNextFeeler) { nNextFeeler = PoissonNextSend(nTime, FEELER_INTERVAL); fFeeler = true; } else { continue; } } int64_t nANow = GetAdjustedTime(); int nTries = 0; while (!interruptNet) { CAddrInfo addr = addrman.Select(fFeeler); // if we selected an invalid address, restart if (!addr.IsValid() || setConnected.count(addr.GetGroup()) || IsLocal(addr)) { break; } // If we didn't find an appropriate destination after trying 100 // addresses fetched from addrman, stop this loop, and let the outer // loop run again (which sleeps, adds seed nodes, recalculates // already-connected network ranges, ...) before trying new addrman // addresses. nTries++; if (nTries > 100) { break; } if (IsLimited(addr)) { continue; } // only connect to full nodes if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES) { continue; } // only consider very recently tried nodes after 30 failed attempts if (nANow - addr.nLastTry < 600 && nTries < 30) { continue; } // only consider nodes missing relevant services after 40 failed // attempts and only if less than half the outbound are up. if ((addr.nServices & nRelevantServices) != nRelevantServices && (nTries < 40 || nOutbound >= (nMaxOutbound >> 1))) { continue; } // do not allow non-default ports, unless after 50 invalid addresses // selected already. if (addr.GetPort() != Params().GetDefaultPort() && nTries < 50) { continue; } addrConnect = addr; break; } if (addrConnect.IsValid()) { if (fFeeler) { // Add small amount of random noise before connection to avoid // synchronization. int randsleep = GetRandInt(FEELER_SLEEP_WINDOW * 1000); if (!interruptNet.sleep_for( std::chrono::milliseconds(randsleep))) { return; } LogPrint(BCLog::NET, "Making feeler connection to %s\n", addrConnect.ToString()); } OpenNetworkConnection(addrConnect, (int)setConnected.size() >= std::min(nMaxConnections - 1, 2), &grant, nullptr, false, fFeeler); } } } std::vector CConnman::GetAddedNodeInfo() { std::vector ret; std::list lAddresses(0); { LOCK(cs_vAddedNodes); ret.reserve(vAddedNodes.size()); for (const std::string &strAddNode : vAddedNodes) { lAddresses.push_back(strAddNode); } } // Build a map of all already connected addresses (by IP:port and by name) // to inbound/outbound and resolved CService std::map mapConnected; std::map> mapConnectedByName; { LOCK(cs_vNodes); for (const CNode *pnode : vNodes) { if (pnode->addr.IsValid()) { mapConnected[pnode->addr] = pnode->fInbound; } std::string addrName = pnode->GetAddrName(); if (!addrName.empty()) { mapConnectedByName[std::move(addrName)] = std::make_pair(pnode->fInbound, static_cast(pnode->addr)); } } } for (const std::string &strAddNode : lAddresses) { CService service( LookupNumeric(strAddNode.c_str(), Params().GetDefaultPort())); if (service.IsValid()) { // strAddNode is an IP:port auto it = mapConnected.find(service); if (it != mapConnected.end()) { ret.push_back( AddedNodeInfo{strAddNode, service, true, it->second}); } else { ret.push_back( AddedNodeInfo{strAddNode, CService(), false, false}); } } else { // strAddNode is a name auto it = mapConnectedByName.find(strAddNode); if (it != mapConnectedByName.end()) { ret.push_back(AddedNodeInfo{strAddNode, it->second.second, true, it->second.first}); } else { ret.push_back( AddedNodeInfo{strAddNode, CService(), false, false}); } } } return ret; } void CConnman::ThreadOpenAddedConnections() { { LOCK(cs_vAddedNodes); if (gArgs.IsArgSet("-addnode")) { vAddedNodes = gArgs.GetArgs("-addnode"); } } while (true) { CSemaphoreGrant grant(*semAddnode); std::vector vInfo = GetAddedNodeInfo(); bool tried = false; for (const AddedNodeInfo &info : vInfo) { if (!info.fConnected) { if (!grant.TryAcquire()) { // If we've used up our semaphore and need a new one, lets // not wait here since while we are waiting the // addednodeinfo state might change. break; } // If strAddedNode is an IP/port, decode it immediately, so // OpenNetworkConnection can detect existing connections to that // IP/port. tried = true; CService service(LookupNumeric(info.strAddedNode.c_str(), Params().GetDefaultPort())); OpenNetworkConnection(CAddress(service, NODE_NONE), false, &grant, info.strAddedNode.c_str(), false, false, true); if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) { return; } } } // Retry every 60 seconds if a connection was attempted, otherwise two // seconds. if (!interruptNet.sleep_for(std::chrono::seconds(tried ? 60 : 2))) { return; } } } // If successful, this moves the passed grant to the constructed node. bool CConnman::OpenNetworkConnection(const CAddress &addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, bool fOneShot, bool fFeeler, bool fAddnode) { // // Initiate outbound network connection // if (interruptNet) { return false; } if (!fNetworkActive) { return false; } if (!pszDest) { if (IsLocal(addrConnect) || FindNode((CNetAddr)addrConnect) || IsBanned(addrConnect) || FindNode(addrConnect.ToStringIPPort())) { return false; } } else if (FindNode(std::string(pszDest))) { return false; } CNode *pnode = ConnectNode(addrConnect, pszDest, fCountFailure); if (!pnode) { return false; } if (grantOutbound) { grantOutbound->MoveTo(pnode->grantOutbound); } if (fOneShot) { pnode->fOneShot = true; } if (fFeeler) { pnode->fFeeler = true; } if (fAddnode) { pnode->fAddnode = true; } GetNodeSignals().InitializeNode(*config, pnode, *this); { LOCK(cs_vNodes); vNodes.push_back(pnode); } return true; } void CConnman::ThreadMessageHandler() { while (!flagInterruptMsgProc) { std::vector vNodesCopy; { LOCK(cs_vNodes); vNodesCopy = vNodes; for (CNode *pnode : vNodesCopy) { pnode->AddRef(); } } bool fMoreWork = false; for (CNode *pnode : vNodesCopy) { if (pnode->fDisconnect) { continue; } // Receive messages bool fMoreNodeWork = GetNodeSignals().ProcessMessages( *config, pnode, *this, flagInterruptMsgProc); fMoreWork |= (fMoreNodeWork && !pnode->fPauseSend); if (flagInterruptMsgProc) { return; } // Send messages { LOCK(pnode->cs_sendProcessing); GetNodeSignals().SendMessages(*config, pnode, *this, flagInterruptMsgProc); } if (flagInterruptMsgProc) { return; } } { LOCK(cs_vNodes); for (CNode *pnode : vNodesCopy) { pnode->Release(); } } std::unique_lock lock(mutexMsgProc); if (!fMoreWork) { condMsgProc.wait_until(lock, std::chrono::steady_clock::now() + std::chrono::milliseconds(100), [this] { return fMsgProcWake; }); } fMsgProcWake = false; } } bool CConnman::BindListenPort(const CService &addrBind, std::string &strError, bool fWhitelisted) { strError = ""; int nOne = 1; // Create socket for listening for incoming connections struct sockaddr_storage sockaddr; socklen_t len = sizeof(sockaddr); if (!addrBind.GetSockAddr((struct sockaddr *)&sockaddr, &len)) { strError = strprintf("Error: Bind address family for %s not supported", addrBind.ToString()); LogPrintf("%s\n", strError); return false; } SOCKET hListenSocket = socket(((struct sockaddr *)&sockaddr)->sa_family, SOCK_STREAM, IPPROTO_TCP); if (hListenSocket == INVALID_SOCKET) { strError = strprintf("Error: Couldn't open socket for incoming " "connections (socket returned error %s)", NetworkErrorString(WSAGetLastError())); LogPrintf("%s\n", strError); return false; } if (!IsSelectableSocket(hListenSocket)) { strError = "Error: Couldn't create a listenable socket for incoming " "connections"; LogPrintf("%s\n", strError); return false; } #ifndef WIN32 #ifdef SO_NOSIGPIPE // Different way of disabling SIGPIPE on BSD setsockopt(hListenSocket, SOL_SOCKET, SO_NOSIGPIPE, (void *)&nOne, sizeof(int)); #endif // Allow binding if the port is still in TIME_WAIT state after // the program was closed and restarted. setsockopt(hListenSocket, SOL_SOCKET, SO_REUSEADDR, (void *)&nOne, sizeof(int)); // Disable Nagle's algorithm setsockopt(hListenSocket, IPPROTO_TCP, TCP_NODELAY, (void *)&nOne, sizeof(int)); #else setsockopt(hListenSocket, SOL_SOCKET, SO_REUSEADDR, (const char *)&nOne, sizeof(int)); setsockopt(hListenSocket, IPPROTO_TCP, TCP_NODELAY, (const char *)&nOne, sizeof(int)); #endif // Set to non-blocking, incoming connections will also inherit this if (!SetSocketNonBlocking(hListenSocket, true)) { strError = strprintf("BindListenPort: Setting listening socket to " "non-blocking failed, error %s\n", NetworkErrorString(WSAGetLastError())); LogPrintf("%s\n", strError); return false; } // Some systems don't have IPV6_V6ONLY but are always v6only; others do have // the option and enable it by default or not. Try to enable it, if // possible. if (addrBind.IsIPv6()) { #ifdef IPV6_V6ONLY #ifdef WIN32 setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_V6ONLY, (const char *)&nOne, sizeof(int)); #else setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_V6ONLY, (void *)&nOne, sizeof(int)); #endif #endif #ifdef WIN32 int nProtLevel = PROTECTION_LEVEL_UNRESTRICTED; setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_PROTECTION_LEVEL, (const char *)&nProtLevel, sizeof(int)); #endif } if (::bind(hListenSocket, (struct sockaddr *)&sockaddr, len) == SOCKET_ERROR) { int nErr = WSAGetLastError(); if (nErr == WSAEADDRINUSE) { strError = strprintf(_("Unable to bind to %s on this computer. %s " "is probably already running."), addrBind.ToString(), _(PACKAGE_NAME)); } else { strError = strprintf(_("Unable to bind to %s on this computer " "(bind returned error %s)"), addrBind.ToString(), NetworkErrorString(nErr)); } LogPrintf("%s\n", strError); CloseSocket(hListenSocket); return false; } LogPrintf("Bound to %s\n", addrBind.ToString()); // Listen for incoming connections if (listen(hListenSocket, SOMAXCONN) == SOCKET_ERROR) { strError = strprintf(_("Error: Listening for incoming connections " "failed (listen returned error %s)"), NetworkErrorString(WSAGetLastError())); LogPrintf("%s\n", strError); CloseSocket(hListenSocket); return false; } vhListenSocket.push_back(ListenSocket(hListenSocket, fWhitelisted)); if (addrBind.IsRoutable() && fDiscover && !fWhitelisted) { AddLocal(addrBind, LOCAL_BIND); } return true; } void Discover(boost::thread_group &threadGroup) { if (!fDiscover) { return; } #ifdef WIN32 // Get local host IP char pszHostName[256] = ""; if (gethostname(pszHostName, sizeof(pszHostName)) != SOCKET_ERROR) { std::vector vaddr; if (LookupHost(pszHostName, vaddr, 0, true)) { for (const CNetAddr &addr : vaddr) { if (AddLocal(addr, LOCAL_IF)) { LogPrintf("%s: %s - %s\n", __func__, pszHostName, addr.ToString()); } } } } #else // Get local host ip struct ifaddrs *myaddrs; if (getifaddrs(&myaddrs) == 0) { for (struct ifaddrs *ifa = myaddrs; ifa != nullptr; ifa = ifa->ifa_next) { if (ifa->ifa_addr == nullptr || (ifa->ifa_flags & IFF_UP) == 0 || strcmp(ifa->ifa_name, "lo") == 0 || strcmp(ifa->ifa_name, "lo0") == 0) { continue; } if (ifa->ifa_addr->sa_family == AF_INET) { struct sockaddr_in *s4 = (struct sockaddr_in *)(ifa->ifa_addr); CNetAddr addr(s4->sin_addr); if (AddLocal(addr, LOCAL_IF)) { LogPrintf("%s: IPv4 %s: %s\n", __func__, ifa->ifa_name, addr.ToString()); } } else if (ifa->ifa_addr->sa_family == AF_INET6) { struct sockaddr_in6 *s6 = (struct sockaddr_in6 *)(ifa->ifa_addr); CNetAddr addr(s6->sin6_addr); if (AddLocal(addr, LOCAL_IF)) { LogPrintf("%s: IPv6 %s: %s\n", __func__, ifa->ifa_name, addr.ToString()); } } } freeifaddrs(myaddrs); } #endif } void CConnman::SetNetworkActive(bool active) { LogPrint(BCLog::NET, "SetNetworkActive: %s\n", active); if (!active) { fNetworkActive = false; LOCK(cs_vNodes); // Close sockets to all nodes for (CNode *pnode : vNodes) { pnode->CloseSocketDisconnect(); } } else { fNetworkActive = true; } uiInterface.NotifyNetworkActiveChanged(fNetworkActive); } CConnman::CConnman(const Config &configIn, uint64_t nSeed0In, uint64_t nSeed1In) : config(&configIn), nSeed0(nSeed0In), nSeed1(nSeed1In) { fNetworkActive = true; setBannedIsDirty = false; fAddressesInitialized = false; nLastNodeId = 0; nSendBufferMaxSize = 0; nReceiveFloodSize = 0; semOutbound = nullptr; semAddnode = nullptr; nMaxConnections = 0; nMaxOutbound = 0; nMaxAddnode = 0; nBestHeight = 0; clientInterface = nullptr; flagInterruptMsgProc = false; } NodeId CConnman::GetNewNodeId() { return nLastNodeId.fetch_add(1, std::memory_order_relaxed); } bool CConnman::Start(CScheduler &scheduler, std::string &strNodeError, Options connOptions) { nTotalBytesRecv = 0; nTotalBytesSent = 0; nMaxOutboundTotalBytesSentInCycle = 0; nMaxOutboundCycleStartTime = 0; nRelevantServices = connOptions.nRelevantServices; nLocalServices = connOptions.nLocalServices; nMaxConnections = connOptions.nMaxConnections; nMaxOutbound = std::min((connOptions.nMaxOutbound), nMaxConnections); nMaxAddnode = connOptions.nMaxAddnode; nMaxFeeler = connOptions.nMaxFeeler; nSendBufferMaxSize = connOptions.nSendBufferMaxSize; nReceiveFloodSize = connOptions.nReceiveFloodSize; nMaxOutboundLimit = connOptions.nMaxOutboundLimit; nMaxOutboundTimeframe = connOptions.nMaxOutboundTimeframe; SetBestHeight(connOptions.nBestHeight); clientInterface = connOptions.uiInterface; if (clientInterface) { clientInterface->InitMessage(_("Loading addresses...")); } // Load addresses from peers.dat int64_t nStart = GetTimeMillis(); { CAddrDB adb; if (adb.Read(addrman)) { LogPrintf("Loaded %i addresses from peers.dat %dms\n", addrman.size(), GetTimeMillis() - nStart); } else { // Addrman can be in an inconsistent state after failure, reset it addrman.Clear(); LogPrintf("Invalid or missing peers.dat; recreating\n"); DumpAddresses(); } } if (clientInterface) { clientInterface->InitMessage(_("Loading banlist...")); } // Load addresses from banlist.dat nStart = GetTimeMillis(); CBanDB bandb; banmap_t banmap; if (bandb.Read(banmap)) { // thread save setter SetBanned(banmap); // no need to write down, just read data SetBannedSetDirty(false); // sweep out unused entries SweepBanned(); LogPrint(BCLog::NET, "Loaded %d banned node ips/subnets from banlist.dat %dms\n", banmap.size(), GetTimeMillis() - nStart); } else { LogPrintf("Invalid or missing banlist.dat; recreating\n"); // force write SetBannedSetDirty(true); DumpBanlist(); } uiInterface.InitMessage(_("Starting network threads...")); fAddressesInitialized = true; if (semOutbound == nullptr) { // initialize semaphore semOutbound = new CSemaphore( std::min((nMaxOutbound + nMaxFeeler), nMaxConnections)); } if (semAddnode == nullptr) { // initialize semaphore semAddnode = new CSemaphore(nMaxAddnode); } // // Start threads // InterruptSocks5(false); interruptNet.reset(); flagInterruptMsgProc = false; { std::unique_lock lock(mutexMsgProc); fMsgProcWake = false; } // Send and receive from sockets, accept connections threadSocketHandler = std::thread( &TraceThread>, "net", std::function(std::bind(&CConnman::ThreadSocketHandler, this))); - if (!GetBoolArg("-dnsseed", true)) { + if (!gArgs.GetBoolArg("-dnsseed", true)) { LogPrintf("DNS seeding disabled\n"); } else { threadDNSAddressSeed = std::thread(&TraceThread>, "dnsseed", std::function( std::bind(&CConnman::ThreadDNSAddressSeed, this))); } // Initiate outbound connections from -addnode threadOpenAddedConnections = std::thread(&TraceThread>, "addcon", std::function(std::bind( &CConnman::ThreadOpenAddedConnections, this))); // Initiate outbound connections unless connect=0 if (!gArgs.IsArgSet("-connect") || gArgs.GetArgs("-connect").size() != 1 || gArgs.GetArgs("-connect")[0] != "0") { threadOpenConnections = std::thread(&TraceThread>, "opencon", std::function( std::bind(&CConnman::ThreadOpenConnections, this))); } // Process messages threadMessageHandler = std::thread(&TraceThread>, "msghand", std::function( std::bind(&CConnman::ThreadMessageHandler, this))); // Dump network addresses scheduler.scheduleEvery(std::bind(&CConnman::DumpData, this), DUMP_ADDRESSES_INTERVAL * 1000); return true; } class CNetCleanup { public: CNetCleanup() {} ~CNetCleanup() { #ifdef WIN32 // Shutdown Windows Sockets WSACleanup(); #endif } } instance_of_cnetcleanup; void CConnman::Interrupt() { { std::lock_guard lock(mutexMsgProc); flagInterruptMsgProc = true; } condMsgProc.notify_all(); interruptNet(); InterruptSocks5(true); if (semOutbound) { for (int i = 0; i < (nMaxOutbound + nMaxFeeler); i++) { semOutbound->post(); } } if (semAddnode) { for (int i = 0; i < nMaxAddnode; i++) { semAddnode->post(); } } } void CConnman::Stop() { if (threadMessageHandler.joinable()) { threadMessageHandler.join(); } if (threadOpenConnections.joinable()) { threadOpenConnections.join(); } if (threadOpenAddedConnections.joinable()) { threadOpenAddedConnections.join(); } if (threadDNSAddressSeed.joinable()) { threadDNSAddressSeed.join(); } if (threadSocketHandler.joinable()) { threadSocketHandler.join(); } if (fAddressesInitialized) { DumpData(); fAddressesInitialized = false; } // Close sockets for (CNode *pnode : vNodes) { pnode->CloseSocketDisconnect(); } for (ListenSocket &hListenSocket : vhListenSocket) { if (hListenSocket.socket != INVALID_SOCKET) { if (!CloseSocket(hListenSocket.socket)) { LogPrintf("CloseSocket(hListenSocket) failed with error %s\n", NetworkErrorString(WSAGetLastError())); } } } // clean up some globals (to help leak detection) for (CNode *pnode : vNodes) { DeleteNode(pnode); } for (CNode *pnode : vNodesDisconnected) { DeleteNode(pnode); } vNodes.clear(); vNodesDisconnected.clear(); vhListenSocket.clear(); delete semOutbound; semOutbound = nullptr; delete semAddnode; semAddnode = nullptr; } void CConnman::DeleteNode(CNode *pnode) { assert(pnode); bool fUpdateConnectionTime = false; GetNodeSignals().FinalizeNode(pnode->GetId(), fUpdateConnectionTime); if (fUpdateConnectionTime) { addrman.Connected(pnode->addr); } delete pnode; } CConnman::~CConnman() { Interrupt(); Stop(); } size_t CConnman::GetAddressCount() const { return addrman.size(); } void CConnman::SetServices(const CService &addr, ServiceFlags nServices) { addrman.SetServices(addr, nServices); } void CConnman::MarkAddressGood(const CAddress &addr) { addrman.Good(addr); } void CConnman::AddNewAddress(const CAddress &addr, const CAddress &addrFrom, int64_t nTimePenalty) { addrman.Add(addr, addrFrom, nTimePenalty); } void CConnman::AddNewAddresses(const std::vector &vAddr, const CAddress &addrFrom, int64_t nTimePenalty) { addrman.Add(vAddr, addrFrom, nTimePenalty); } std::vector CConnman::GetAddresses() { return addrman.GetAddr(); } bool CConnman::AddNode(const std::string &strNode) { LOCK(cs_vAddedNodes); for (std::vector::const_iterator it = vAddedNodes.begin(); it != vAddedNodes.end(); ++it) { if (strNode == *it) { return false; } } vAddedNodes.push_back(strNode); return true; } bool CConnman::RemoveAddedNode(const std::string &strNode) { LOCK(cs_vAddedNodes); for (std::vector::iterator it = vAddedNodes.begin(); it != vAddedNodes.end(); ++it) { if (strNode == *it) { vAddedNodes.erase(it); return true; } } return false; } size_t CConnman::GetNodeCount(NumConnections flags) { LOCK(cs_vNodes); // Shortcut if we want total if (flags == CConnman::CONNECTIONS_ALL) { return vNodes.size(); } int nNum = 0; for (std::vector::const_iterator it = vNodes.begin(); it != vNodes.end(); ++it) { if (flags & ((*it)->fInbound ? CONNECTIONS_IN : CONNECTIONS_OUT)) { nNum++; } } return nNum; } void CConnman::GetNodeStats(std::vector &vstats) { vstats.clear(); LOCK(cs_vNodes); vstats.reserve(vNodes.size()); for (CNode *pnode : vNodes) { vstats.emplace_back(); pnode->copyStats(vstats.back()); } } bool CConnman::DisconnectNode(const std::string &strNode) { LOCK(cs_vNodes); if (CNode *pnode = FindNode(strNode)) { pnode->fDisconnect = true; return true; } return false; } bool CConnman::DisconnectNode(NodeId id) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (id == pnode->id) { pnode->fDisconnect = true; return true; } } return false; } void CConnman::RecordBytesRecv(uint64_t bytes) { LOCK(cs_totalBytesRecv); nTotalBytesRecv += bytes; } void CConnman::RecordBytesSent(uint64_t bytes) { LOCK(cs_totalBytesSent); nTotalBytesSent += bytes; uint64_t now = GetTime(); if (nMaxOutboundCycleStartTime + nMaxOutboundTimeframe < now) { // timeframe expired, reset cycle nMaxOutboundCycleStartTime = now; nMaxOutboundTotalBytesSentInCycle = 0; } // TODO, exclude whitebind peers nMaxOutboundTotalBytesSentInCycle += bytes; } void CConnman::SetMaxOutboundTarget(uint64_t limit) { LOCK(cs_totalBytesSent); nMaxOutboundLimit = limit; } uint64_t CConnman::GetMaxOutboundTarget() { LOCK(cs_totalBytesSent); return nMaxOutboundLimit; } uint64_t CConnman::GetMaxOutboundTimeframe() { LOCK(cs_totalBytesSent); return nMaxOutboundTimeframe; } uint64_t CConnman::GetMaxOutboundTimeLeftInCycle() { LOCK(cs_totalBytesSent); if (nMaxOutboundLimit == 0) { return 0; } if (nMaxOutboundCycleStartTime == 0) { return nMaxOutboundTimeframe; } uint64_t cycleEndTime = nMaxOutboundCycleStartTime + nMaxOutboundTimeframe; uint64_t now = GetTime(); return (cycleEndTime < now) ? 0 : cycleEndTime - GetTime(); } void CConnman::SetMaxOutboundTimeframe(uint64_t timeframe) { LOCK(cs_totalBytesSent); if (nMaxOutboundTimeframe != timeframe) { // reset measure-cycle in case of changing the timeframe. nMaxOutboundCycleStartTime = GetTime(); } nMaxOutboundTimeframe = timeframe; } bool CConnman::OutboundTargetReached(bool historicalBlockServingLimit) { LOCK(cs_totalBytesSent); if (nMaxOutboundLimit == 0) { return false; } if (historicalBlockServingLimit) { // keep a large enough buffer to at least relay each block once. uint64_t timeLeftInCycle = GetMaxOutboundTimeLeftInCycle(); uint64_t buffer = timeLeftInCycle / 600 * ONE_MEGABYTE; if (buffer >= nMaxOutboundLimit || nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit - buffer) { return true; } } else if (nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit) { return true; } return false; } uint64_t CConnman::GetOutboundTargetBytesLeft() { LOCK(cs_totalBytesSent); if (nMaxOutboundLimit == 0) { return 0; } return (nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit) ? 0 : nMaxOutboundLimit - nMaxOutboundTotalBytesSentInCycle; } uint64_t CConnman::GetTotalBytesRecv() { LOCK(cs_totalBytesRecv); return nTotalBytesRecv; } uint64_t CConnman::GetTotalBytesSent() { LOCK(cs_totalBytesSent); return nTotalBytesSent; } ServiceFlags CConnman::GetLocalServices() const { return nLocalServices; } void CConnman::SetBestHeight(int height) { nBestHeight.store(height, std::memory_order_release); } int CConnman::GetBestHeight() const { return nBestHeight.load(std::memory_order_acquire); } unsigned int CConnman::GetReceiveFloodSize() const { return nReceiveFloodSize; } unsigned int CConnman::GetSendBufferSize() const { return nSendBufferMaxSize; } CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const std::string &addrNameIn, bool fInboundIn) : nTimeConnected(GetSystemTimeInSeconds()), addr(addrIn), fInbound(fInboundIn), id(idIn), nKeyedNetGroup(nKeyedNetGroupIn), addrKnown(5000, 0.001), filterInventoryKnown(50000, 0.000001), nLocalHostNonce(nLocalHostNonceIn), nLocalServices(nLocalServicesIn), nMyStartingHeight(nMyStartingHeightIn), nSendVersion(0) { nServices = NODE_NONE; nServicesExpected = NODE_NONE; hSocket = hSocketIn; nRecvVersion = INIT_PROTO_VERSION; nLastSend = 0; nLastRecv = 0; nSendBytes = 0; nRecvBytes = 0; nTimeOffset = 0; addrName = addrNameIn == "" ? addr.ToStringIPPort() : addrNameIn; nVersion = 0; strSubVer = ""; fWhitelisted = false; fOneShot = false; fAddnode = false; // set by version message fClient = false; fFeeler = false; fSuccessfullyConnected = false; fDisconnect = false; nRefCount = 0; nSendSize = 0; nSendOffset = 0; hashContinue = uint256(); nStartingHeight = -1; filterInventoryKnown.reset(); fSendMempool = false; fGetAddr = false; nNextLocalAddrSend = 0; nNextAddrSend = 0; nNextInvSend = 0; fRelayTxes = false; fSentAddr = false; pfilter = new CBloomFilter(); timeLastMempoolReq = 0; nLastBlockTime = 0; nLastTXTime = 0; nPingNonceSent = 0; nPingUsecStart = 0; nPingUsecTime = 0; fPingQueued = false; nMinPingUsecTime = std::numeric_limits::max(); minFeeFilter = Amount(0); lastSentFeeFilter = Amount(0); nextSendTimeFeeFilter = 0; fPauseRecv = false; fPauseSend = false; nProcessQueueSize = 0; for (const std::string &msg : getAllNetMessageTypes()) { mapRecvBytesPerMsgCmd[msg] = 0; } mapRecvBytesPerMsgCmd[NET_MESSAGE_COMMAND_OTHER] = 0; if (fLogIPs) { LogPrint(BCLog::NET, "Added connection to %s peer=%d\n", addrName, id); } else { LogPrint(BCLog::NET, "Added connection peer=%d\n", id); } } CNode::~CNode() { CloseSocket(hSocket); if (pfilter) { delete pfilter; } } void CNode::AskFor(const CInv &inv) { if (mapAskFor.size() > MAPASKFOR_MAX_SZ || setAskFor.size() > SETASKFOR_MAX_SZ) { return; } // a peer may not have multiple non-responded queue positions for a single // inv item. if (!setAskFor.insert(inv.hash).second) { return; } // We're using mapAskFor as a priority queue, the key is the earliest time // the request can be sent. int64_t nRequestTime; limitedmap::const_iterator it = mapAlreadyAskedFor.find(inv.hash); if (it != mapAlreadyAskedFor.end()) { nRequestTime = it->second; } else { nRequestTime = 0; } LogPrint(BCLog::NET, "askfor %s %d (%s) peer=%d\n", inv.ToString(), nRequestTime, DateTimeStrFormat("%H:%M:%S", nRequestTime / 1000000), id); // Make sure not to reuse time indexes to keep things in the same order int64_t nNow = GetTimeMicros() - 1000000; static int64_t nLastTime; ++nLastTime; nNow = std::max(nNow, nLastTime); nLastTime = nNow; // Each retry is 2 minutes after the last nRequestTime = std::max(nRequestTime + 2 * 60 * 1000000, nNow); if (it != mapAlreadyAskedFor.end()) { mapAlreadyAskedFor.update(it, nRequestTime); } else { mapAlreadyAskedFor.insert(std::make_pair(inv.hash, nRequestTime)); } mapAskFor.insert(std::make_pair(nRequestTime, inv)); } bool CConnman::NodeFullyConnected(const CNode *pnode) { return pnode && pnode->fSuccessfullyConnected && !pnode->fDisconnect; } void CConnman::PushMessage(CNode *pnode, CSerializedNetMsg &&msg) { size_t nMessageSize = msg.data.size(); size_t nTotalSize = nMessageSize + CMessageHeader::HEADER_SIZE; LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.command.c_str()), nMessageSize, pnode->id); std::vector serializedHeader; serializedHeader.reserve(CMessageHeader::HEADER_SIZE); uint256 hash = Hash(msg.data.data(), msg.data.data() + nMessageSize); CMessageHeader hdr(Params().NetMagic(), msg.command.c_str(), nMessageSize); memcpy(hdr.pchChecksum, hash.begin(), CMessageHeader::CHECKSUM_SIZE); CVectorWriter{SER_NETWORK, INIT_PROTO_VERSION, serializedHeader, 0, hdr}; size_t nBytesSent = 0; { LOCK(pnode->cs_vSend); bool optimisticSend(pnode->vSendMsg.empty()); // log total amount of bytes per command pnode->mapSendBytesPerMsgCmd[msg.command] += nTotalSize; pnode->nSendSize += nTotalSize; if (pnode->nSendSize > nSendBufferMaxSize) { pnode->fPauseSend = true; } pnode->vSendMsg.push_back(std::move(serializedHeader)); if (nMessageSize) { pnode->vSendMsg.push_back(std::move(msg.data)); } // If write queue empty, attempt "optimistic write" if (optimisticSend == true) { nBytesSent = SocketSendData(pnode); } } if (nBytesSent) { RecordBytesSent(nBytesSent); } } bool CConnman::ForNode(NodeId id, std::function func) { CNode *found = nullptr; LOCK(cs_vNodes); for (auto &&pnode : vNodes) { if (pnode->id == id) { found = pnode; break; } } return found != nullptr && NodeFullyConnected(found) && func(found); } int64_t PoissonNextSend(int64_t nNow, int average_interval_seconds) { return nNow + int64_t(log1p(GetRand(1ULL << 48) * -0.0000000000000035527136788 /* -1/2^48 */) * average_interval_seconds * -1000000.0 + 0.5); } CSipHasher CConnman::GetDeterministicRandomizer(uint64_t id) const { return CSipHasher(nSeed0, nSeed1).Write(id); } uint64_t CConnman::CalculateKeyedNetGroup(const CAddress &ad) const { std::vector vchNetGroup(ad.GetGroup()); return GetDeterministicRandomizer(RANDOMIZER_ID_NETGROUP) .Write(&vchNetGroup[0], vchNetGroup.size()) .Finalize(); } /** * This function convert MaxBlockSize from byte to * MB with a decimal precision one digit rounded down * E.g. * 1660000 -> 1.6 * 2010000 -> 2.0 * 1000000 -> 1.0 * 230000 -> 0.2 * 50000 -> 0.0 * * NB behavior for EB<1MB not standardized yet still * the function applies the same algo used for * EB greater or equal to 1MB */ std::string getSubVersionEB(uint64_t MaxBlockSize) { // Prepare EB string we are going to add to SubVer: // 1) translate from byte to MB and convert to string // 2) limit the EB string to the first decimal digit (floored) std::stringstream ebMBs; ebMBs << (MaxBlockSize / (ONE_MEGABYTE / 10)); std::string eb = ebMBs.str(); eb.insert(eb.size() - 1, ".", 1); if (eb.substr(0, 1) == ".") { eb = "0" + eb; } return eb; } std::string userAgent(const Config &config) { // format excessive blocksize value std::string eb = getSubVersionEB(config.GetMaxBlockSize()); std::vector uacomments; uacomments.push_back("EB" + eb); // sanitize comments per BIP-0014, format user agent and check total size if (gArgs.IsArgSet("-uacomment")) { for (const std::string &cmt : gArgs.GetArgs("-uacomment")) { if (cmt != SanitizeString(cmt, SAFE_CHARS_UA_COMMENT)) { LogPrintf( "User Agent comment (%s) contains unsafe characters. " "We are going to use a sanitize version of the comment.\n", cmt); } uacomments.push_back(cmt); } } std::string subversion = FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, uacomments); if (subversion.size() > MAX_SUBVERSION_LENGTH) { LogPrintf("Total length of network version string (%i) exceeds maximum " "length (%i). Reduce the number or size of uacomments. " "String has been resized to the max length allowed.\n", subversion.size(), MAX_SUBVERSION_LENGTH); subversion.resize(MAX_SUBVERSION_LENGTH - 2); subversion.append(")/"); LogPrintf("Current network string has been set to: %s\n", subversion); } return subversion; } diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 6ecf193856..388ed3a29c 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1,3835 +1,3838 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "net_processing.h" #include "addrman.h" #include "arith_uint256.h" #include "blockencodings.h" #include "chainparams.h" #include "config.h" #include "consensus/validation.h" #include "hash.h" #include "init.h" #include "merkleblock.h" #include "net.h" #include "netbase.h" #include "netmessagemaker.h" #include "policy/fees.h" #include "policy/policy.h" #include "primitives/block.h" #include "primitives/transaction.h" #include "random.h" #include "tinyformat.h" #include "txmempool.h" #include "ui_interface.h" #include "util.h" #include "utilmoneystr.h" #include "utilstrencodings.h" #include "validation.h" #include "validationinterface.h" #include #include #if defined(NDEBUG) #error "Bitcoin cannot be compiled without assertions." #endif // Used only to inform the wallet of when we last received a block. std::atomic nTimeBestReceived(0); struct IteratorComparator { template bool operator()(const I &a, const I &b) { return &(*a) < &(*b); } }; struct COrphanTx { // When modifying, adapt the copy of this definition in tests/DoS_tests. CTransactionRef tx; NodeId fromPeer; int64_t nTimeExpire; }; std::map mapOrphanTransactions GUARDED_BY(cs_main); std::map::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(cs_main); void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main); static size_t vExtraTxnForCompactIt = 0; static std::vector> vExtraTxnForCompact GUARDED_BY(cs_main); // SHA256("main address relay")[0:8] static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; // Internal stuff namespace { /** Number of nodes with fSyncStarted. */ int nSyncStarted = 0; /** * Sources of received blocks, saved to be able to send them reject messages or * ban them when processing happens afterwards. Protected by cs_main. * Set mapBlockSource[hash].second to false if the node should not be punished * if the block is invalid. */ std::map> mapBlockSource; /** * Filter for transactions that were recently rejected by AcceptToMemoryPool. * These are not rerequested until the chain tip changes, at which point the * entire filter is reset. Protected by cs_main. * * Without this filter we'd be re-requesting txs from each of our peers, * increasing bandwidth consumption considerably. For instance, with 100 peers, * half of which relay a tx we don't accept, that might be a 50x bandwidth * increase. A flooding attacker attempting to roll-over the filter using * minimum-sized, 60byte, transactions might manage to send 1000/sec if we have * fast peers, so we pick 120,000 to give our peers a two minute window to send * invs to us. * * Decreasing the false positive rate is fairly cheap, so we pick one in a * million to make it highly unlikely for users to have issues with this filter. * * Memory used: 1.3 MB */ std::unique_ptr recentRejects; uint256 hashRecentRejectsChainTip; /** Blocks that are in flight, and that are in the queue to be downloaded. * Protected by cs_main. */ struct QueuedBlock { uint256 hash; //!< Optional. const CBlockIndex *pindex; //!< Whether this block has validated headers at the time of request. bool fValidatedHeaders; //!< Optional, used for CMPCTBLOCK downloads std::unique_ptr partialBlock; }; std::map::iterator>> mapBlocksInFlight; /** Stack of nodes which we have set to announce using compact blocks */ std::list lNodesAnnouncingHeaderAndIDs; /** Number of preferable block download peers. */ int nPreferredDownload = 0; /** Number of peers from which we're downloading blocks. */ int nPeersWithValidatedDownloads = 0; /** Relay map, protected by cs_main. */ typedef std::map MapRelay; MapRelay mapRelay; /** Expiration-time ordered list of (expire time, relay map entry) pairs, * protected by cs_main). */ std::deque> vRelayExpiration; } // namespace ////////////////////////////////////////////////////////////////////////////// // // Registration of network node signals. // namespace { struct CBlockReject { uint8_t chRejectCode; std::string strRejectReason; uint256 hashBlock; }; /** * Maintain validation-specific state about nodes, protected by cs_main, instead * by CNode's own locks. This simplifies asynchronous operation, where * processing of incoming data is done after the ProcessMessage call returns, * and we're no longer holding the node's locks. */ struct CNodeState { //! The peer's address const CService address; //! Whether we have a fully established connection. bool fCurrentlyConnected; //! Accumulated misbehaviour score for this peer. int nMisbehavior; //! Whether this peer should be disconnected and banned (unless //! whitelisted). bool fShouldBan; //! String name of this peer (debugging/logging purposes). const std::string name; //! List of asynchronously-determined block rejections to notify this peer //! about. std::vector rejects; //! The best known block we know this peer has announced. const CBlockIndex *pindexBestKnownBlock; //! The hash of the last unknown block this peer has announced. uint256 hashLastUnknownBlock; //! The last full block we both have. const CBlockIndex *pindexLastCommonBlock; //! The best header we have sent our peer. const CBlockIndex *pindexBestHeaderSent; //! Length of current-streak of unconnecting headers announcements int nUnconnectingHeaders; //! Whether we've started headers synchronization with this peer. bool fSyncStarted; //! Since when we're stalling block download progress (in microseconds), or //! 0. int64_t nStallingSince; std::list vBlocksInFlight; //! When the first entry in vBlocksInFlight started downloading. Don't care //! when vBlocksInFlight is empty. int64_t nDownloadingSince; int nBlocksInFlight; int nBlocksInFlightValidHeaders; //! Whether we consider this a preferred download peer. bool fPreferredDownload; //! Whether this peer wants invs or headers (when possible) for block //! announcements. bool fPreferHeaders; //! Whether this peer wants invs or cmpctblocks (when possible) for block //! announcements. bool fPreferHeaderAndIDs; /** * Whether this peer will send us cmpctblocks if we request them. * This is not used to gate request logic, as we really only care about * fSupportsDesiredCmpctVersion, but is used as a flag to "lock in" the * version of compact blocks we send. */ bool fProvidesHeaderAndIDs; /** * If we've announced NODE_WITNESS to this peer: whether the peer sends * witnesses in cmpctblocks/blocktxns, otherwise: whether this peer sends * non-witnesses in cmpctblocks/blocktxns. */ bool fSupportsDesiredCmpctVersion; CNodeState(CAddress addrIn, std::string addrNameIn) : address(addrIn), name(addrNameIn) { fCurrentlyConnected = false; nMisbehavior = 0; fShouldBan = false; pindexBestKnownBlock = nullptr; hashLastUnknownBlock.SetNull(); pindexLastCommonBlock = nullptr; pindexBestHeaderSent = nullptr; nUnconnectingHeaders = 0; fSyncStarted = false; nStallingSince = 0; nDownloadingSince = 0; nBlocksInFlight = 0; nBlocksInFlightValidHeaders = 0; fPreferredDownload = false; fPreferHeaders = false; fPreferHeaderAndIDs = false; fProvidesHeaderAndIDs = false; fSupportsDesiredCmpctVersion = false; } }; /** Map maintaining per-node state. Requires cs_main. */ std::map mapNodeState; // Requires cs_main. CNodeState *State(NodeId pnode) { std::map::iterator it = mapNodeState.find(pnode); if (it == mapNodeState.end()) { return nullptr; } return &it->second; } void UpdatePreferredDownload(CNode *node, CNodeState *state) { nPreferredDownload -= state->fPreferredDownload; // Whether this node should be marked as a preferred download node. state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) && !node->fOneShot && !node->fClient; nPreferredDownload += state->fPreferredDownload; } void PushNodeVersion(const Config &config, CNode *pnode, CConnman &connman, int64_t nTime) { ServiceFlags nLocalNodeServices = pnode->GetLocalServices(); uint64_t nonce = pnode->GetLocalNonce(); int nNodeStartingHeight = pnode->GetMyStartingHeight(); NodeId nodeid = pnode->GetId(); CAddress addr = pnode->addr; CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices)); CAddress addrMe = CAddress(CService(), nLocalNodeServices); connman.PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION) .Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe, nonce, userAgent(config), nNodeStartingHeight, ::fRelayTxes)); if (fLogIPs) { LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, " "us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid); } else { LogPrint( BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid); } } void InitializeNode(const Config &config, CNode *pnode, CConnman &connman) { CAddress addr = pnode->addr; std::string addrName = pnode->GetAddrName(); NodeId nodeid = pnode->GetId(); { LOCK(cs_main); mapNodeState.emplace_hint( mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName))); } if (!pnode->fInbound) { PushNodeVersion(config, pnode, connman, GetTime()); } } void FinalizeNode(NodeId nodeid, bool &fUpdateConnectionTime) { fUpdateConnectionTime = false; LOCK(cs_main); CNodeState *state = State(nodeid); if (state->fSyncStarted) { nSyncStarted--; } if (state->nMisbehavior == 0 && state->fCurrentlyConnected) { fUpdateConnectionTime = true; } for (const QueuedBlock &entry : state->vBlocksInFlight) { mapBlocksInFlight.erase(entry.hash); } // Get rid of stale mapBlockSource entries for this peer as they may leak // if we don't clean them up (I saw on the order of ~100 stale entries on // a full resynch in my testing -- these entries stay forever). // Performance note: most of the time mapBlockSource has 0 or 1 entries. // During synch of blockchain it may end up with as many as 1000 entries, // which still only takes ~1ms to iterate through on even old hardware. // So this memleak cleanup is not expensive and worth doing since even // small leaks are bad. :) for (auto it = mapBlockSource.begin(); it != mapBlockSource.end(); /*NA*/) { if (it->second.first == nodeid) { mapBlockSource.erase(it++); } else { ++it; } } EraseOrphansFor(nodeid); nPreferredDownload -= state->fPreferredDownload; nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0); assert(nPeersWithValidatedDownloads >= 0); mapNodeState.erase(nodeid); if (mapNodeState.empty()) { // Do a consistency check after the last peer is removed. assert(mapBlocksInFlight.empty()); assert(nPreferredDownload == 0); assert(nPeersWithValidatedDownloads == 0); } } // Requires cs_main. // Returns a bool indicating whether we requested this block. // Also used if a block was /not/ received and timed out or started with another // peer. bool MarkBlockAsReceived(const uint256 &hash) { std::map::iterator>>::iterator itInFlight = mapBlocksInFlight.find(hash); if (itInFlight != mapBlocksInFlight.end()) { CNodeState *state = State(itInFlight->second.first); state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders; if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) { // Last validated block on the queue was received. nPeersWithValidatedDownloads--; } if (state->vBlocksInFlight.begin() == itInFlight->second.second) { // First block on the queue was received, update the start download // time for the next one state->nDownloadingSince = std::max(state->nDownloadingSince, GetTimeMicros()); } state->vBlocksInFlight.erase(itInFlight->second.second); state->nBlocksInFlight--; state->nStallingSince = 0; mapBlocksInFlight.erase(itInFlight); return true; } return false; } // Requires cs_main. // returns false, still setting pit, if the block was already in flight from the // same peer pit will only be valid as long as the same cs_main lock is being // held. static bool MarkBlockAsInFlight(const Config &config, NodeId nodeid, const uint256 &hash, const Consensus::Params &consensusParams, const CBlockIndex *pindex = nullptr, std::list::iterator **pit = nullptr) { CNodeState *state = State(nodeid); assert(state != nullptr); // Short-circuit most stuff in case its from the same node. std::map::iterator>>::iterator itInFlight = mapBlocksInFlight.find(hash); if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) { *pit = &itInFlight->second.second; return false; } // Make sure it's not listed somewhere already. MarkBlockAsReceived(hash); std::list::iterator it = state->vBlocksInFlight.insert( state->vBlocksInFlight.end(), {hash, pindex, pindex != nullptr, std::unique_ptr( pit ? new PartiallyDownloadedBlock(config, &mempool) : nullptr)}); state->nBlocksInFlight++; state->nBlocksInFlightValidHeaders += it->fValidatedHeaders; if (state->nBlocksInFlight == 1) { // We're starting a block download (batch) from this peer. state->nDownloadingSince = GetTimeMicros(); } if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) { nPeersWithValidatedDownloads++; } itInFlight = mapBlocksInFlight .insert(std::make_pair(hash, std::make_pair(nodeid, it))) .first; if (pit) { *pit = &itInFlight->second.second; } return true; } /** Check whether the last unknown block a peer advertised is not yet known. */ void ProcessBlockAvailability(NodeId nodeid) { CNodeState *state = State(nodeid); assert(state != nullptr); if (!state->hashLastUnknownBlock.IsNull()) { BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock); if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) { if (state->pindexBestKnownBlock == nullptr || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) { state->pindexBestKnownBlock = itOld->second; } state->hashLastUnknownBlock.SetNull(); } } } /** Update tracking information about which blocks a peer is assumed to have. */ void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) { CNodeState *state = State(nodeid); assert(state != nullptr); ProcessBlockAvailability(nodeid); BlockMap::iterator it = mapBlockIndex.find(hash); if (it != mapBlockIndex.end() && it->second->nChainWork > 0) { // An actually better block was announced. if (state->pindexBestKnownBlock == nullptr || it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) { state->pindexBestKnownBlock = it->second; } } else { // An unknown block was announced; just assume that the latest one is // the best one. state->hashLastUnknownBlock = hash; } } void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman &connman) { AssertLockHeld(cs_main); CNodeState *nodestate = State(nodeid); if (!nodestate) { LogPrint(BCLog::NET, "node state unavailable: peer=%d\n", nodeid); return; } if (!nodestate->fProvidesHeaderAndIDs) { return; } for (std::list::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) { if (*it == nodeid) { lNodesAnnouncingHeaderAndIDs.erase(it); lNodesAnnouncingHeaderAndIDs.push_back(nodeid); return; } } connman.ForNode(nodeid, [&connman](CNode *pfrom) { bool fAnnounceUsingCMPCTBLOCK = false; uint64_t nCMPCTBLOCKVersion = 1; if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { // As per BIP152, we only get 3 of our peers to announce // blocks using compact encodings. connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [&connman, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion](CNode *pnodeStop) { connman.PushMessage( pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion()) .Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); return true; }); lNodesAnnouncingHeaderAndIDs.pop_front(); } fAnnounceUsingCMPCTBLOCK = true; connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()) .Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); return true; }); } // Requires cs_main bool CanDirectFetch(const Consensus::Params &consensusParams) { return chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20; } // Requires cs_main bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) { if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) { return true; } if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) { return true; } return false; } /** * Update pindexLastCommonBlock and add not-in-flight missing successors to * vBlocks, until it has at most count entries. */ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector &vBlocks, NodeId &nodeStaller, const Consensus::Params &consensusParams) { if (count == 0) { return; } vBlocks.reserve(vBlocks.size() + count); CNodeState *state = State(nodeid); assert(state != nullptr); // Make sure pindexBestKnownBlock is up to date, we'll need it. ProcessBlockAvailability(nodeid); if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork) { // This peer has nothing interesting. return; } if (state->pindexLastCommonBlock == nullptr) { // Bootstrap quickly by guessing a parent of our best tip is the forking // point. Guessing wrong in either direction is not a problem. state->pindexLastCommonBlock = chainActive[std::min( state->pindexBestKnownBlock->nHeight, chainActive.Height())]; } // If the peer reorganized, our previous pindexLastCommonBlock may not be an // ancestor of its current tip anymore. Go back enough to fix that. state->pindexLastCommonBlock = LastCommonAncestor( state->pindexLastCommonBlock, state->pindexBestKnownBlock); if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) { return; } std::vector vToFetch; const CBlockIndex *pindexWalk = state->pindexLastCommonBlock; // Never fetch further than the best block we know the peer has, or more // than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last linked block we have in // common with this peer. The +1 is so we can detect stalling, namely if we // would be able to download that next block if the window were 1 larger. int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW; int nMaxHeight = std::min(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1); NodeId waitingfor = -1; while (pindexWalk->nHeight < nMaxHeight) { // Read up to 128 (or more, if more blocks than that are needed) // successors of pindexWalk (towards pindexBestKnownBlock) into // vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as // expensive as iterating over ~100 CBlockIndex* entries anyway. int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max(count - vBlocks.size(), 128)); vToFetch.resize(nToFetch); pindexWalk = state->pindexBestKnownBlock->GetAncestor( pindexWalk->nHeight + nToFetch); vToFetch[nToFetch - 1] = pindexWalk; for (unsigned int i = nToFetch - 1; i > 0; i--) { vToFetch[i - 1] = vToFetch[i]->pprev; } // Iterate over those blocks in vToFetch (in forward direction), adding // the ones that are not yet downloaded and not in flight to vBlocks. In // the mean time, update pindexLastCommonBlock as long as all ancestors // are already downloaded, or if it's already part of our chain (and // therefore don't need it even if pruned). for (const CBlockIndex *pindex : vToFetch) { if (!pindex->IsValid(BLOCK_VALID_TREE)) { // We consider the chain that this peer is on invalid. return; } if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) { if (pindex->nChainTx) { state->pindexLastCommonBlock = pindex; } } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) { // The block is not already downloaded, and not yet in flight. if (pindex->nHeight > nWindowEnd) { // We reached the end of the window. if (vBlocks.size() == 0 && waitingfor != nodeid) { // We aren't able to fetch anything, but we would be if // the download window was one larger. nodeStaller = waitingfor; } return; } vBlocks.push_back(pindex); if (vBlocks.size() == count) { return; } } else if (waitingfor == -1) { // This is the first already-in-flight block. waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first; } } } } } // namespace bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) { LOCK(cs_main); CNodeState *state = State(nodeid); if (state == nullptr) { return false; } stats.nMisbehavior = state->nMisbehavior; stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1; stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1; for (const QueuedBlock &queue : state->vBlocksInFlight) { if (queue.pindex) { stats.vHeightInFlight.push_back(queue.pindex->nHeight); } } return true; } void RegisterNodeSignals(CNodeSignals &nodeSignals) { nodeSignals.ProcessMessages.connect(&ProcessMessages); nodeSignals.SendMessages.connect(&SendMessages); nodeSignals.InitializeNode.connect(&InitializeNode); nodeSignals.FinalizeNode.connect(&FinalizeNode); } void UnregisterNodeSignals(CNodeSignals &nodeSignals) { nodeSignals.ProcessMessages.disconnect(&ProcessMessages); nodeSignals.SendMessages.disconnect(&SendMessages); nodeSignals.InitializeNode.disconnect(&InitializeNode); nodeSignals.FinalizeNode.disconnect(&FinalizeNode); } ////////////////////////////////////////////////////////////////////////////// // // mapOrphanTransactions // void AddToCompactExtraTransactions(const CTransactionRef &tx) { - size_t max_extra_txn = GetArg("-blockreconstructionextratxn", - DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN); + size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", + DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN); if (max_extra_txn <= 0) { return; } if (!vExtraTxnForCompact.size()) { vExtraTxnForCompact.resize(max_extra_txn); } vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetId(), tx); vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn; } bool AddOrphanTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { const uint256 &txid = tx->GetId(); if (mapOrphanTransactions.count(txid)) { return false; } // Ignore big transactions, to avoid a send-big-orphans memory exhaustion // attack. If a peer has a legitimate large transaction with a missing // parent then we assume it will rebroadcast it later, after the parent // transaction(s) have been mined or received. // 100 orphans, each of which is at most 99,999 bytes big is at most 10 // megabytes of orphans and somewhat more byprev index (in the worst case): unsigned int sz = GetTransactionSize(*tx); if (sz >= MAX_STANDARD_TX_SIZE) { LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, txid.ToString()); return false; } auto ret = mapOrphanTransactions.emplace( txid, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME}); assert(ret.second); for (const CTxIn &txin : tx->vin) { mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first); } AddToCompactExtraTransactions(tx); LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", txid.ToString(), mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size()); return true; } static int EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { std::map::iterator it = mapOrphanTransactions.find(hash); if (it == mapOrphanTransactions.end()) { return 0; } for (const CTxIn &txin : it->second.tx->vin) { auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout); if (itPrev == mapOrphanTransactionsByPrev.end()) { continue; } itPrev->second.erase(it); if (itPrev->second.empty()) { mapOrphanTransactionsByPrev.erase(itPrev); } } mapOrphanTransactions.erase(it); return 1; } void EraseOrphansFor(NodeId peer) { int nErased = 0; std::map::iterator iter = mapOrphanTransactions.begin(); while (iter != mapOrphanTransactions.end()) { // Increment to avoid iterator becoming invalid. std::map::iterator maybeErase = iter++; if (maybeErase->second.fromPeer == peer) { nErased += EraseOrphanTx(maybeErase->second.tx->GetId()); } } if (nErased > 0) { LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer); } } unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { unsigned int nEvicted = 0; static int64_t nNextSweep; int64_t nNow = GetTime(); if (nNextSweep <= nNow) { // Sweep out expired orphan pool entries: int nErased = 0; int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL; std::map::iterator iter = mapOrphanTransactions.begin(); while (iter != mapOrphanTransactions.end()) { std::map::iterator maybeErase = iter++; if (maybeErase->second.nTimeExpire <= nNow) { nErased += EraseOrphanTx(maybeErase->second.tx->GetId()); } else { nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime); } } // Sweep again 5 minutes after the next entry that expires in order to // batch the linear scan. nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL; if (nErased > 0) { LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased); } } while (mapOrphanTransactions.size() > nMaxOrphans) { // Evict a random orphan: uint256 randomhash = GetRandHash(); std::map::iterator it = mapOrphanTransactions.lower_bound(randomhash); if (it == mapOrphanTransactions.end()) { it = mapOrphanTransactions.begin(); } EraseOrphanTx(it->first); ++nEvicted; } return nEvicted; } // Requires cs_main. void Misbehaving(NodeId pnode, int howmuch, const std::string &reason) { if (howmuch == 0) { return; } CNodeState *state = State(pnode); if (state == nullptr) { return; } state->nMisbehavior += howmuch; - int banscore = GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD); + int banscore = gArgs.GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD); if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore) { LogPrintf( "%s: %s peer=%d (%d -> %d) reason: %s BAN THRESHOLD EXCEEDED\n", __func__, state->name, pnode, state->nMisbehavior - howmuch, state->nMisbehavior, reason.c_str()); state->fShouldBan = true; } else { LogPrintf("%s: %s peer=%d (%d -> %d) reason: %s\n", __func__, state->name, pnode, state->nMisbehavior - howmuch, state->nMisbehavior, reason.c_str()); } } // overloaded variant of above to operate on CNode*s static void Misbehaving(CNode *node, int howmuch, const std::string &reason) { Misbehaving(node->GetId(), howmuch, reason); } ////////////////////////////////////////////////////////////////////////////// // // blockchain -> download logic notification // PeerLogicValidation::PeerLogicValidation(CConnman *connmanIn) : connman(connmanIn) { // Initialize global variables that cannot be constructed at startup. recentRejects.reset(new CRollingBloomFilter(120000, 0.000001)); } void PeerLogicValidation::BlockConnected( const std::shared_ptr &pblock, const CBlockIndex *pindex, const std::vector &vtxConflicted) { LOCK(cs_main); std::vector vOrphanErase; for (const CTransactionRef &ptx : pblock->vtx) { const CTransaction &tx = *ptx; // Which orphan pool entries must we evict? for (size_t j = 0; j < tx.vin.size(); j++) { auto itByPrev = mapOrphanTransactionsByPrev.find(tx.vin[j].prevout); if (itByPrev == mapOrphanTransactionsByPrev.end()) { continue; } for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) { const CTransaction &orphanTx = *(*mi)->second.tx; const uint256 &orphanHash = orphanTx.GetHash(); vOrphanErase.push_back(orphanHash); } } } // Erase orphan transactions include or precluded by this block if (vOrphanErase.size()) { int nErased = 0; for (uint256 &orphanId : vOrphanErase) { nErased += EraseOrphanTx(orphanId); } LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased); } } static CCriticalSection cs_most_recent_block; static std::shared_ptr most_recent_block; static std::shared_ptr most_recent_compact_block; static uint256 most_recent_block_hash; void PeerLogicValidation::NewPoWValidBlock( const CBlockIndex *pindex, const std::shared_ptr &pblock) { std::shared_ptr pcmpctblock = std::make_shared(*pblock); const CNetMsgMaker msgMaker(PROTOCOL_VERSION); LOCK(cs_main); static int nHighestFastAnnounce = 0; if (pindex->nHeight <= nHighestFastAnnounce) { return; } nHighestFastAnnounce = pindex->nHeight; uint256 hashBlock(pblock->GetHash()); { LOCK(cs_most_recent_block); most_recent_block_hash = hashBlock; most_recent_block = pblock; most_recent_compact_block = pcmpctblock; } connman->ForEachNode([this, &pcmpctblock, pindex, &msgMaker, &hashBlock](CNode *pnode) { // TODO: Avoid the repeated-serialization here if (pnode->nVersion < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect) { return; } ProcessBlockAvailability(pnode->GetId()); CNodeState &state = *State(pnode->GetId()); // If the peer has, or we announced to them the previous block already, // but we don't think they have this one, go ahead and announce it. if (state.fPreferHeaderAndIDs && !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) { LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerLogicValidation::NewPoWValidBlock", hashBlock.ToString(), pnode->id); connman->PushMessage( pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock)); state.pindexBestHeaderSent = pindex; } }); } void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) { const int nNewHeight = pindexNew->nHeight; connman->SetBestHeight(nNewHeight); if (!fInitialDownload) { // Find the hashes of all blocks that weren't previously in the best // chain. std::vector vHashes; const CBlockIndex *pindexToAnnounce = pindexNew; while (pindexToAnnounce != pindexFork) { vHashes.push_back(pindexToAnnounce->GetBlockHash()); pindexToAnnounce = pindexToAnnounce->pprev; if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) { // Limit announcements in case of a huge reorganization. Rely on // the peer's synchronization mechanism in that case. break; } } // Relay inventory, but don't relay old inventory during initial block // download. connman->ForEachNode([nNewHeight, &vHashes](CNode *pnode) { if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) { for (const uint256 &hash : boost::adaptors::reverse(vHashes)) { pnode->PushBlockHash(hash); } } }); connman->WakeMessageHandler(); } nTimeBestReceived = GetTime(); } void PeerLogicValidation::BlockChecked(const CBlock &block, const CValidationState &state) { LOCK(cs_main); const uint256 hash(block.GetHash()); std::map>::iterator it = mapBlockSource.find(hash); int nDoS = 0; if (state.IsInvalid(nDoS)) { // Don't send reject message with code 0 or an internal reject code. if (it != mapBlockSource.end() && State(it->second.first) && state.GetRejectCode() > 0 && state.GetRejectCode() < REJECT_INTERNAL) { CBlockReject reject = { uint8_t(state.GetRejectCode()), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), hash}; State(it->second.first)->rejects.push_back(reject); if (nDoS > 0 && it->second.second) { Misbehaving(it->second.first, nDoS, state.GetRejectReason()); } } } // Check that: // 1. The block is valid // 2. We're not in initial block download // 3. This is currently the best block we're aware of. We haven't updated // the tip yet so we have no way to check this directly here. Instead we // just check that there are currently no other blocks in flight. else if (state.IsValid() && !IsInitialBlockDownload() && mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) { if (it != mapBlockSource.end()) { MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, *connman); } } if (it != mapBlockSource.end()) { mapBlockSource.erase(it); } } ////////////////////////////////////////////////////////////////////////////// // // Messages // static bool AlreadyHave(const CInv &inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { switch (inv.type) { case MSG_TX: { assert(recentRejects); if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip) { // If the chain tip has changed previously rejected transactions // might be now valid, e.g. due to a nLockTime'd tx becoming // valid, or a double-spend. Reset the rejects filter and give // those txs a second chance. hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash(); recentRejects->reset(); } // Use pcoinsTip->HaveCoinInCache as a quick approximation to // exclude requesting or processing some txs which have already been // included in a block. As this is best effort, we only check for // output 0 and 1. This works well enough in practice and we get // diminishing returns with 2 onward. return recentRejects->contains(inv.hash) || mempool.exists(inv.hash) || mapOrphanTransactions.count(inv.hash) || pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 0)) || pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 1)); } case MSG_BLOCK: return mapBlockIndex.count(inv.hash); } // Don't know what it is, just say we already got one return true; } static void RelayTransaction(const CTransaction &tx, CConnman &connman) { CInv inv(MSG_TX, tx.GetId()); connman.ForEachNode([&inv](CNode *pnode) { pnode->PushInventory(inv); }); } static void RelayAddress(const CAddress &addr, bool fReachable, CConnman &connman) { // Limited relaying of addresses outside our network(s) unsigned int nRelayNodes = fReachable ? 2 : 1; // Relay to a limited number of other nodes. // Use deterministic randomness to send to the same nodes for 24 hours at a // time so the addrKnowns of the chosen nodes prevent repeats. uint64_t hashAddr = addr.GetHash(); const CSipHasher hasher = connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY) .Write(hashAddr << 32) .Write((GetTime() + hashAddr) / (24 * 60 * 60)); FastRandomContext insecure_rand; std::array, 2> best{ {{0, nullptr}, {0, nullptr}}}; assert(nRelayNodes <= best.size()); auto sortfunc = [&best, &hasher, nRelayNodes](CNode *pnode) { if (pnode->nVersion >= CADDR_TIME_VERSION) { uint64_t hashKey = CSipHasher(hasher).Write(pnode->id).Finalize(); for (unsigned int i = 0; i < nRelayNodes; i++) { if (hashKey > best[i].first) { std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1); best[i] = std::make_pair(hashKey, pnode); break; } } } }; auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] { for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) { best[i].second->PushAddress(addr, insecure_rand); } }; connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc)); } static void ProcessGetData(const Config &config, CNode *pfrom, const Consensus::Params &consensusParams, CConnman &connman, const std::atomic &interruptMsgProc) { std::deque::iterator it = pfrom->vRecvGetData.begin(); std::vector vNotFound; const CNetMsgMaker msgMaker(pfrom->GetSendVersion()); LOCK(cs_main); while (it != pfrom->vRecvGetData.end()) { // Don't bother if send buffer is too full to respond anyway. if (pfrom->fPauseSend) { break; } const CInv &inv = *it; { if (interruptMsgProc) { return; } it++; if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK) { bool send = false; BlockMap::iterator mi = mapBlockIndex.find(inv.hash); if (mi != mapBlockIndex.end()) { if (mi->second->nChainTx && !mi->second->IsValid(BLOCK_VALID_SCRIPTS) && mi->second->IsValid(BLOCK_VALID_TREE)) { // If we have the block and all of its parents, but have // not yet validated it, we might be in the middle of // connecting it (ie in the unlock of cs_main before // ActivateBestChain but after AcceptBlock). In this // case, we need to run ActivateBestChain prior to // checking the relay conditions below. std::shared_ptr a_recent_block; { LOCK(cs_most_recent_block); a_recent_block = most_recent_block; } CValidationState dummy; ActivateBestChain(config, dummy, a_recent_block); } if (chainActive.Contains(mi->second)) { send = true; } else { static const int nOneMonth = 30 * 24 * 60 * 60; // To prevent fingerprinting attacks, only send blocks // outside of the active chain if they are valid, and no // more than a month older (both in time, and in best // equivalent proof of work) than the best header chain // we know about. send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) && (GetBlockProofEquivalentTime( *pindexBestHeader, *mi->second, *pindexBestHeader, consensusParams) < nOneMonth); if (!send) { LogPrintf("%s: ignoring request from peer=%i for " "old block that isn't in the main " "chain\n", __func__, pfrom->GetId()); } } } // Disconnect node in case we have reached the outbound limit // for serving historical blocks never disconnect whitelisted // nodes. // assume > 1 week = historical static const int nOneWeek = 7 * 24 * 60 * 60; if (send && connman.OutboundTargetReached(true) && (((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted) { LogPrint(BCLog::NET, "historical block serving limit " "reached, disconnect peer=%d\n", pfrom->GetId()); // disconnect node pfrom->fDisconnect = true; send = false; } // Pruned nodes may have deleted the block, so check whether // it's available before trying to send. if (send && (mi->second->nStatus & BLOCK_HAVE_DATA)) { // Send block from disk CBlock block; if (!ReadBlockFromDisk(block, (*mi).second, config)) { assert(!"cannot load block from disk"); } if (inv.type == MSG_BLOCK) { connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::BLOCK, block)); } else if (inv.type == MSG_FILTERED_BLOCK) { bool sendMerkleBlock = false; CMerkleBlock merkleBlock; { LOCK(pfrom->cs_filter); if (pfrom->pfilter) { sendMerkleBlock = true; merkleBlock = CMerkleBlock(block, *pfrom->pfilter); } } if (sendMerkleBlock) { connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock)); // CMerkleBlock just contains hashes, so also push // any transactions in the block the client did not // see. This avoids hurting performance by // pointlessly requiring a round-trip. Note that // there is currently no way for a node to request // any single transactions we didn't send here - // they must either disconnect and retry or request // the full block. Thus, the protocol spec specified // allows for us to provide duplicate txn here, // however we MUST always provide at least what the // remote peer needs. typedef std::pair PairType; for (PairType &pair : merkleBlock.vMatchedTxn) { connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::TX, *block.vtx[pair.first])); } } // else // no response } else if (inv.type == MSG_CMPCT_BLOCK) { // If a peer is asking for old blocks, we're almost // guaranteed they won't have a useful mempool to match // against a compact block, and we don't feel like // constructing the object for them, so instead we // respond with the full, non-compact block. int nSendFlags = 0; if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) { CBlockHeaderAndShortTxIDs cmpctblock(block); connman.PushMessage( pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); } else { connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, block)); } } // Trigger the peer node to send a getblocks request for the // next batch of inventory. if (inv.hash == pfrom->hashContinue) { // Bypass PushInventory, this must send even if // redundant, and we want it right after the last block // so they don't wait for other stuff first. std::vector vInv; vInv.push_back( CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash())); connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::INV, vInv)); pfrom->hashContinue.SetNull(); } } } else if (inv.type == MSG_TX) { // Send stream from relay memory bool push = false; auto mi = mapRelay.find(inv.hash); int nSendFlags = 0; if (mi != mapRelay.end()) { connman.PushMessage( pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second)); push = true; } else if (pfrom->timeLastMempoolReq) { auto txinfo = mempool.info(inv.hash); // To protect privacy, do not answer getdata using the // mempool when that TX couldn't have been INVed in reply to // a MEMPOOL request. if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) { connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx)); push = true; } } if (!push) { vNotFound.push_back(inv); } } // Track requests for our stuff. GetMainSignals().Inventory(inv.hash); if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK) { break; } } } pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it); if (!vNotFound.empty()) { // Let the peer know that we didn't find what it asked for, so it // doesn't have to wait around forever. Currently only SPV clients // actually care about this message: it's needed when they are // recursively walking the dependencies of relevant unconfirmed // transactions. SPV clients want to do that because they want to know // about (and store and rebroadcast and risk analyze) the dependencies // of transactions relevant to them, without having to download the // entire memory pool. connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound)); } } uint32_t GetFetchFlags(CNode *pfrom, const CBlockIndex *pprev, const Consensus::Params &chainparams) { uint32_t nFetchFlags = 0; return nFetchFlags; } inline static void SendBlockTransactions(const CBlock &block, const BlockTransactionsRequest &req, CNode *pfrom, CConnman &connman) { BlockTransactions resp(req); for (size_t i = 0; i < req.indexes.size(); i++) { if (req.indexes[i] >= block.vtx.size()) { LOCK(cs_main); Misbehaving(pfrom, 100, "out-of-bound-tx-index"); LogPrintf( "Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->id); return; } resp.txn[i] = block.vtx[req.indexes[i]]; } LOCK(cs_main); const CNetMsgMaker msgMaker(pfrom->GetSendVersion()); int nSendFlags = 0; connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp)); } static bool ProcessMessage(const Config &config, CNode *pfrom, const std::string &strCommand, CDataStream &vRecv, int64_t nTimeReceived, const CChainParams &chainparams, CConnman &connman, const std::atomic &interruptMsgProc) { LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id); - if (IsArgSet("-dropmessagestest") && - GetRand(GetArg("-dropmessagestest", 0)) == 0) { + if (gArgs.IsArgSet("-dropmessagestest") && + GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0) { LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n"); return true; } if (!(pfrom->GetLocalServices() & NODE_BLOOM) && (strCommand == NetMsgType::FILTERLOAD || strCommand == NetMsgType::FILTERADD)) { if (pfrom->nVersion >= NO_BLOOM_VERSION) { LOCK(cs_main); Misbehaving(pfrom, 100, "no-bloom-version"); return false; } else { pfrom->fDisconnect = true; return false; } } if (strCommand == NetMsgType::REJECT) { if (LogAcceptCategory(BCLog::NET)) { try { std::string strMsg; uint8_t ccode; std::string strReason; vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH); std::ostringstream ss; ss << strMsg << " code " << itostr(ccode) << ": " << strReason; if (strMsg == NetMsgType::BLOCK || strMsg == NetMsgType::TX) { uint256 hash; vRecv >> hash; ss << ": hash " << hash.ToString(); } LogPrint(BCLog::NET, "Reject %s\n", SanitizeString(ss.str())); } catch (const std::ios_base::failure &) { // Avoid feedback loops by preventing reject messages from // triggering a new reject message. LogPrint(BCLog::NET, "Unparseable reject message received\n"); } } } else if (strCommand == NetMsgType::VERSION) { // Each connection can only send one version message if (pfrom->nVersion != 0) { connman.PushMessage( pfrom, CNetMsgMaker(INIT_PROTO_VERSION) .Make(NetMsgType::REJECT, strCommand, REJECT_DUPLICATE, std::string("Duplicate version message"))); LOCK(cs_main); Misbehaving(pfrom, 1, "multiple-version"); return false; } int64_t nTime; CAddress addrMe; CAddress addrFrom; uint64_t nNonce = 1; uint64_t nServiceInt; ServiceFlags nServices; int nVersion; int nSendVersion; std::string strSubVer; std::string cleanSubVer; int nStartingHeight = -1; bool fRelay = true; vRecv >> nVersion >> nServiceInt >> nTime >> addrMe; nSendVersion = std::min(nVersion, PROTOCOL_VERSION); nServices = ServiceFlags(nServiceInt); if (!pfrom->fInbound) { connman.SetServices(pfrom->addr, nServices); } if (pfrom->nServicesExpected & ~nServices) { LogPrint(BCLog::NET, "peer=%d does not offer the expected services " "(%08x offered, %08x expected); " "disconnecting\n", pfrom->id, nServices, pfrom->nServicesExpected); connman.PushMessage( pfrom, CNetMsgMaker(INIT_PROTO_VERSION) .Make(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD, strprintf("Expected to offer services %08x", pfrom->nServicesExpected))); pfrom->fDisconnect = true; return false; } if (nVersion < MIN_PEER_PROTO_VERSION) { // disconnect from peers older than this proto version LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, nVersion); connman.PushMessage( pfrom, CNetMsgMaker(INIT_PROTO_VERSION) .Make(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE, strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION))); pfrom->fDisconnect = true; return false; } if (!vRecv.empty()) { vRecv >> addrFrom >> nNonce; } if (!vRecv.empty()) { vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH); cleanSubVer = SanitizeString(strSubVer); } if (!vRecv.empty()) { vRecv >> nStartingHeight; } if (!vRecv.empty()) { vRecv >> fRelay; } // Disconnect if we connected to ourself if (pfrom->fInbound && !connman.CheckIncomingNonce(nNonce)) { LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString()); pfrom->fDisconnect = true; return true; } if (pfrom->fInbound && addrMe.IsRoutable()) { SeenLocal(addrMe); } // Be shy and don't send version until we hear if (pfrom->fInbound) { PushNodeVersion(config, pfrom, connman, GetAdjustedTime()); } connman.PushMessage( pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK)); pfrom->nServices = nServices; pfrom->SetAddrLocal(addrMe); { LOCK(pfrom->cs_SubVer); pfrom->strSubVer = strSubVer; pfrom->cleanSubVer = cleanSubVer; } pfrom->nStartingHeight = nStartingHeight; pfrom->fClient = !(nServices & NODE_NETWORK); { LOCK(pfrom->cs_filter); // set to true after we get the first filter* message pfrom->fRelayTxes = fRelay; } // Change version pfrom->SetSendVersion(nSendVersion); pfrom->nVersion = nVersion; // Potentially mark this peer as a preferred download peer. { LOCK(cs_main); UpdatePreferredDownload(pfrom, State(pfrom->GetId())); } if (!pfrom->fInbound) { // Advertise our address if (fListen && !IsInitialBlockDownload()) { CAddress addr = GetLocalAddress(&pfrom->addr, pfrom->GetLocalServices()); FastRandomContext insecure_rand; if (addr.IsRoutable()) { LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString()); pfrom->PushAddress(addr, insecure_rand); } else if (IsPeerAddrLocalGood(pfrom)) { addr.SetIP(addrMe); LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString()); pfrom->PushAddress(addr, insecure_rand); } } // Get recent addresses if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman.GetAddressCount() < 1000) { connman.PushMessage( pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR)); pfrom->fGetAddr = true; } connman.MarkAddressGood(pfrom->addr); } std::string remoteAddr; if (fLogIPs) { remoteAddr = ", peeraddr=" + pfrom->addr.ToString(); } LogPrintf("receive version message: [%s] %s: version %d, blocks=%d, " "us=%s, peer=%d%s\n", pfrom->addr.ToString().c_str(), cleanSubVer, pfrom->nVersion, pfrom->nStartingHeight, addrMe.ToString(), pfrom->id, remoteAddr); int64_t nTimeOffset = nTime - GetTime(); pfrom->nTimeOffset = nTimeOffset; AddTimeData(pfrom->addr, nTimeOffset); // If the peer is old enough to have the old alert system, send it the // final alert. if (pfrom->nVersion <= 70012) { CDataStream finalAlert( ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffef" "fff7f01ffffff7f00000000ffffff7f00ffffff7f002f55524745" "4e543a20416c657274206b657920636f6d70726f6d697365642c2" "075706772616465207265717569726564004630440220653febd6" "410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3ab" "d5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fec" "aae66ecf689bf71b50"), SER_NETWORK, PROTOCOL_VERSION); connman.PushMessage( pfrom, CNetMsgMaker(nSendVersion).Make("alert", finalAlert)); } // Feeler connections exist only to verify if address is online. if (pfrom->fFeeler) { assert(pfrom->fInbound == false); pfrom->fDisconnect = true; } return true; } else if (pfrom->nVersion == 0) { // Must have a version message before anything else LOCK(cs_main); Misbehaving(pfrom, 1, "missing-version"); return false; } // At this point, the outgoing message serialization version can't change. const CNetMsgMaker msgMaker(pfrom->GetSendVersion()); if (strCommand == NetMsgType::VERACK) { pfrom->SetRecvVersion( std::min(pfrom->nVersion.load(), PROTOCOL_VERSION)); if (!pfrom->fInbound) { // Mark this node as currently connected, so we update its timestamp // later. LOCK(cs_main); State(pfrom->GetId())->fCurrentlyConnected = true; } if (pfrom->nVersion >= SENDHEADERS_VERSION) { // Tell our peer we prefer to receive headers rather than inv's // We send this to non-NODE NETWORK peers as well, because even // non-NODE NETWORK peers can announce blocks (such as pruning // nodes) connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDHEADERS)); } if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) { // Tell our peer we are willing to provide version 1 or 2 // cmpctblocks. However, we do not request new block announcements // using cmpctblock messages. We send this to non-NODE NETWORK peers // as well, because they may wish to request compact blocks from us. bool fAnnounceUsingCMPCTBLOCK = false; uint64_t nCMPCTBLOCKVersion = 1; connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); } pfrom->fSuccessfullyConnected = true; } else if (!pfrom->fSuccessfullyConnected) { // Must have a verack message before anything else LOCK(cs_main); Misbehaving(pfrom, 1, "missing-verack"); return false; } else if (strCommand == NetMsgType::ADDR) { std::vector vAddr; vRecv >> vAddr; // Don't want addr from older versions unless seeding if (pfrom->nVersion < CADDR_TIME_VERSION && connman.GetAddressCount() > 1000) { return true; } if (vAddr.size() > 1000) { LOCK(cs_main); Misbehaving(pfrom, 20, "oversized-addr"); return error("message addr size() = %u", vAddr.size()); } // Store the new addresses std::vector vAddrOk; int64_t nNow = GetAdjustedTime(); int64_t nSince = nNow - 10 * 60; for (CAddress &addr : vAddr) { if (interruptMsgProc) { return true; } if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES) { continue; } if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) { addr.nTime = nNow - 5 * 24 * 60 * 60; } pfrom->AddAddressKnown(addr); bool fReachable = IsReachable(addr); if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable()) { // Relay to a limited number of other nodes RelayAddress(addr, fReachable, connman); } // Do not store addresses outside our network if (fReachable) { vAddrOk.push_back(addr); } } connman.AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60); if (vAddr.size() < 1000) { pfrom->fGetAddr = false; } if (pfrom->fOneShot) { pfrom->fDisconnect = true; } } else if (strCommand == NetMsgType::SENDHEADERS) { LOCK(cs_main); State(pfrom->GetId())->fPreferHeaders = true; } else if (strCommand == NetMsgType::SENDCMPCT) { bool fAnnounceUsingCMPCTBLOCK = false; uint64_t nCMPCTBLOCKVersion = 0; vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion; if (nCMPCTBLOCKVersion == 1) { LOCK(cs_main); // fProvidesHeaderAndIDs is used to "lock in" version of compact // blocks we send. if (!State(pfrom->GetId())->fProvidesHeaderAndIDs) { State(pfrom->GetId())->fProvidesHeaderAndIDs = true; } State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK; if (!State(pfrom->GetId())->fSupportsDesiredCmpctVersion) { State(pfrom->GetId())->fSupportsDesiredCmpctVersion = true; } } } else if (strCommand == NetMsgType::INV) { std::vector vInv; vRecv >> vInv; if (vInv.size() > MAX_INV_SZ) { LOCK(cs_main); Misbehaving(pfrom, 20, "oversized-inv"); return error("message inv size() = %u", vInv.size()); } bool fBlocksOnly = !fRelayTxes; // Allow whitelisted peers to send data other than blocks in blocks only // mode if whitelistrelay is true if (pfrom->fWhitelisted && - GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)) { + gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)) { fBlocksOnly = false; } LOCK(cs_main); uint32_t nFetchFlags = GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus()); std::vector vToFetch; for (size_t nInv = 0; nInv < vInv.size(); nInv++) { CInv &inv = vInv[nInv]; if (interruptMsgProc) { return true; } bool fAlreadyHave = AlreadyHave(inv); LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id); if (inv.type == MSG_TX) { inv.type |= nFetchFlags; } if (inv.type == MSG_BLOCK) { UpdateBlockAvailability(pfrom->GetId(), inv.hash); if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) { // We used to request the full block here, but since // headers-announcements are now the primary method of // announcement on the network, and since, in the case that // a node fell back to inv we probably have a reorg which we // should get the headers for first, we now only provide a // getheaders response here. When we receive the headers, we // will then ask for the blocks we need. connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash)); LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id); } } else { pfrom->AddInventoryKnown(inv); if (fBlocksOnly) { LogPrint(BCLog::NET, "transaction (%s) inv sent in " "violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->id); } else if (!fAlreadyHave && !fImporting && !fReindex && !IsInitialBlockDownload()) { pfrom->AskFor(inv); } } // Track requests for our stuff GetMainSignals().Inventory(inv.hash); } if (!vToFetch.empty()) { connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vToFetch)); } } else if (strCommand == NetMsgType::GETDATA) { std::vector vInv; vRecv >> vInv; if (vInv.size() > MAX_INV_SZ) { LOCK(cs_main); Misbehaving(pfrom, 20, "too-many-inv"); return error("message getdata size() = %u", vInv.size()); } LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id); if (vInv.size() > 0) { LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id); } pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end()); ProcessGetData(config, pfrom, chainparams.GetConsensus(), connman, interruptMsgProc); } else if (strCommand == NetMsgType::GETBLOCKS) { CBlockLocator locator; uint256 hashStop; vRecv >> locator >> hashStop; // We might have announced the currently-being-connected tip using a // compact block, which resulted in the peer sending a getblocks // request, which we would otherwise respond to without the new block. // To avoid this situation we simply verify that we are on our best // known chain now. This is super overkill, but we handle it better // for getheaders requests, and there are no known nodes which support // compact blocks but still use getblocks to request blocks. { std::shared_ptr a_recent_block; { LOCK(cs_most_recent_block); a_recent_block = most_recent_block; } CValidationState dummy; ActivateBestChain(config, dummy, a_recent_block); } LOCK(cs_main); // Find the last block the caller has in the main chain const CBlockIndex *pindex = FindForkInGlobalIndex(chainActive, locator); // Send the rest of the chain if (pindex) { pindex = chainActive.Next(pindex); } int nLimit = 500; LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id); for (; pindex; pindex = chainActive.Next(pindex)) { if (pindex->GetBlockHash() == hashStop) { LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); break; } // If pruning, don't inv blocks unless we have on disk and are // likely to still have for some reasonable time window (1 hour) // that block relay might require. const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing; if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= chainActive.Tip()->nHeight - nPrunedBlocksLikelyToHave)) { LogPrint( BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); break; } pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash())); if (--nLimit <= 0) { // When this block is requested, we'll send an inv that'll // trigger the peer to getblocks the next batch of inventory. LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); pfrom->hashContinue = pindex->GetBlockHash(); break; } } } else if (strCommand == NetMsgType::GETBLOCKTXN) { BlockTransactionsRequest req; vRecv >> req; std::shared_ptr recent_block; { LOCK(cs_most_recent_block); if (most_recent_block_hash == req.blockhash) { recent_block = most_recent_block; } // Unlock cs_most_recent_block to avoid cs_main lock inversion } if (recent_block) { SendBlockTransactions(*recent_block, req, pfrom, connman); return true; } LOCK(cs_main); BlockMap::iterator it = mapBlockIndex.find(req.blockhash); if (it == mapBlockIndex.end() || !(it->second->nStatus & BLOCK_HAVE_DATA)) { LogPrintf("Peer %d sent us a getblocktxn for a block we don't have", pfrom->id); return true; } if (it->second->nHeight < chainActive.Height() - MAX_BLOCKTXN_DEPTH) { // If an older block is requested (should never happen in practice, // but can happen in tests) send a block response instead of a // blocktxn response. Sending a full block response instead of a // small blocktxn response is preferable in the case where a peer // might maliciously send lots of getblocktxn requests to trigger // expensive disk reads, because it will require the peer to // actually receive all the data read from disk over the network. LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep", pfrom->id, MAX_BLOCKTXN_DEPTH); CInv inv; inv.type = MSG_BLOCK; inv.hash = req.blockhash; pfrom->vRecvGetData.push_back(inv); ProcessGetData(config, pfrom, chainparams.GetConsensus(), connman, interruptMsgProc); return true; } CBlock block; bool ret = ReadBlockFromDisk(block, it->second, config); assert(ret); SendBlockTransactions(block, req, pfrom, connman); } else if (strCommand == NetMsgType::GETHEADERS) { CBlockLocator locator; uint256 hashStop; vRecv >> locator >> hashStop; LOCK(cs_main); if (IsInitialBlockDownload() && !pfrom->fWhitelisted) { LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because " "node is in initial block download\n", pfrom->id); return true; } CNodeState *nodestate = State(pfrom->GetId()); const CBlockIndex *pindex = nullptr; if (locator.IsNull()) { // If locator is null, return the hashStop block BlockMap::iterator mi = mapBlockIndex.find(hashStop); if (mi == mapBlockIndex.end()) { return true; } pindex = (*mi).second; } else { // Find the last block the caller has in the main chain pindex = FindForkInGlobalIndex(chainActive, locator); if (pindex) { pindex = chainActive.Next(pindex); } } // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx // count at the end std::vector vHeaders; int nLimit = MAX_HEADERS_RESULTS; LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->id); for (; pindex; pindex = chainActive.Next(pindex)) { vHeaders.push_back(pindex->GetBlockHeader()); if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) { break; } } // pindex can be nullptr either if we sent chainActive.Tip() OR // if our peer has chainActive.Tip() (and thus we are sending an empty // headers message). In both cases it's safe to update // pindexBestHeaderSent to be our tip. // // It is important that we simply reset the BestHeaderSent value here, // and not max(BestHeaderSent, newHeaderSent). We might have announced // the currently-being-connected tip using a compact block, which // resulted in the peer sending a headers request, which we respond to // without the new block. By resetting the BestHeaderSent, we ensure we // will re-announce the new block via headers (or compact blocks again) // in the SendMessages logic. nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip(); connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders)); } else if (strCommand == NetMsgType::TX) { // Stop processing the transaction early if // We are in blocks only mode and peer is either not whitelisted or // whitelistrelay is off if (!fRelayTxes && (!pfrom->fWhitelisted || - !GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY))) { + !gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY))) { LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom->id); return true; } std::deque vWorkQueue; std::vector vEraseQueue; CTransactionRef ptx; vRecv >> ptx; const CTransaction &tx = *ptx; CInv inv(MSG_TX, tx.GetId()); pfrom->AddInventoryKnown(inv); LOCK(cs_main); bool fMissingInputs = false; CValidationState state; pfrom->setAskFor.erase(inv.hash); mapAlreadyAskedFor.erase(inv.hash); std::list lRemovedTxn; if (!AlreadyHave(inv) && AcceptToMemoryPool(config, mempool, state, ptx, true, &fMissingInputs, &lRemovedTxn)) { mempool.check(pcoinsTip); RelayTransaction(tx, connman); for (size_t i = 0; i < tx.vout.size(); i++) { vWorkQueue.emplace_back(inv.hash, i); } pfrom->nLastTXTime = GetTime(); LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s " "(poolsz %u txn, %u kB)\n", pfrom->id, tx.GetId().ToString(), mempool.size(), mempool.DynamicMemoryUsage() / 1000); // Recursively process any orphan transactions that depended on this // one std::set setMisbehaving; while (!vWorkQueue.empty()) { auto itByPrev = mapOrphanTransactionsByPrev.find(vWorkQueue.front()); vWorkQueue.pop_front(); if (itByPrev == mapOrphanTransactionsByPrev.end()) { continue; } for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) { const CTransactionRef &porphanTx = (*mi)->second.tx; const CTransaction &orphanTx = *porphanTx; const uint256 &orphanId = orphanTx.GetId(); NodeId fromPeer = (*mi)->second.fromPeer; bool fMissingInputs2 = false; // Use a dummy CValidationState so someone can't setup nodes // to counter-DoS based on orphan resolution (that is, // feeding people an invalid transaction based on LegitTxX // in order to get anyone relaying LegitTxX banned) CValidationState stateDummy; if (setMisbehaving.count(fromPeer)) { continue; } if (AcceptToMemoryPool(config, mempool, stateDummy, porphanTx, true, &fMissingInputs2, &lRemovedTxn)) { LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanId.ToString()); RelayTransaction(orphanTx, connman); for (size_t i = 0; i < orphanTx.vout.size(); i++) { vWorkQueue.emplace_back(orphanId, i); } vEraseQueue.push_back(orphanId); } else if (!fMissingInputs2) { int nDos = 0; if (stateDummy.IsInvalid(nDos) && nDos > 0) { // Punish peer that gave us an invalid orphan tx Misbehaving(fromPeer, nDos, "invalid-orphan-tx"); setMisbehaving.insert(fromPeer); LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s\n", orphanId.ToString()); } // Has inputs but not accepted to mempool // Probably non-standard or insufficient fee/priority LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanId.ToString()); vEraseQueue.push_back(orphanId); if (!stateDummy.CorruptionPossible()) { // Do not use rejection cache for witness // transactions or witness-stripped transactions, as // they can have been malleated. See // https://github.com/bitcoin/bitcoin/issues/8279 // for details. assert(recentRejects); recentRejects->insert(orphanId); } } mempool.check(pcoinsTip); } } for (uint256 hash : vEraseQueue) { EraseOrphanTx(hash); } } else if (fMissingInputs) { // It may be the case that the orphans parents have all been // rejected. bool fRejectedParents = false; for (const CTxIn &txin : tx.vin) { if (recentRejects->contains(txin.prevout.hash)) { fRejectedParents = true; break; } } if (!fRejectedParents) { uint32_t nFetchFlags = GetFetchFlags( pfrom, chainActive.Tip(), chainparams.GetConsensus()); for (const CTxIn &txin : tx.vin) { CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash); pfrom->AddInventoryKnown(_inv); if (!AlreadyHave(_inv)) { pfrom->AskFor(_inv); } } AddOrphanTx(ptx, pfrom->GetId()); // DoS prevention: do not allow mapOrphanTransactions to grow // unbounded unsigned int nMaxOrphanTx = (unsigned int)std::max( int64_t(0), - GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS)); + gArgs.GetArg("-maxorphantx", + DEFAULT_MAX_ORPHAN_TRANSACTIONS)); unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx); if (nEvicted > 0) { LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted); } } else { LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n", tx.GetId().ToString()); // We will continue to reject this tx since it has rejected // parents so avoid re-requesting it from other peers. recentRejects->insert(tx.GetId()); } } else { if (!state.CorruptionPossible()) { // Do not use rejection cache for witness transactions or // witness-stripped transactions, as they can have been // malleated. See https://github.com/bitcoin/bitcoin/issues/8279 // for details. assert(recentRejects); recentRejects->insert(tx.GetId()); if (RecursiveDynamicUsage(*ptx) < 100000) { AddToCompactExtraTransactions(ptx); } } if (pfrom->fWhitelisted && - GetBoolArg("-whitelistforcerelay", - DEFAULT_WHITELISTFORCERELAY)) { + gArgs.GetBoolArg("-whitelistforcerelay", + DEFAULT_WHITELISTFORCERELAY)) { // Always relay transactions received from whitelisted peers, // even if they were already in the mempool or rejected from it // due to policy, allowing the node to function as a gateway for // nodes hidden behind it. // // Never relay transactions that we would assign a non-zero DoS // score for, as we expect peers to do the same with us in that // case. int nDoS = 0; if (!state.IsInvalid(nDoS) || nDoS == 0) { LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetId().ToString(), pfrom->id); RelayTransaction(tx, connman); } else { LogPrintf("Not relaying invalid transaction %s from " "whitelisted peer=%d (%s)\n", tx.GetId().ToString(), pfrom->id, FormatStateMessage(state)); } } } for (const CTransactionRef &removedTx : lRemovedTxn) { AddToCompactExtraTransactions(removedTx); } int nDoS = 0; if (state.IsInvalid(nDoS)) { LogPrint( BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(), pfrom->id, FormatStateMessage(state)); // Never send AcceptToMemoryPool's internal codes over P2P. if (state.GetRejectCode() > 0 && state.GetRejectCode() < REJECT_INTERNAL) { connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::REJECT, strCommand, uint8_t(state.GetRejectCode()), state.GetRejectReason().substr( 0, MAX_REJECT_MESSAGE_LENGTH), inv.hash)); } if (nDoS > 0) { Misbehaving(pfrom, nDoS, state.GetRejectReason()); } } } // Ignore blocks received while importing else if (strCommand == NetMsgType::CMPCTBLOCK && !fImporting && !fReindex) { CBlockHeaderAndShortTxIDs cmpctblock; vRecv >> cmpctblock; { LOCK(cs_main); if (mapBlockIndex.find(cmpctblock.header.hashPrevBlock) == mapBlockIndex.end()) { // Doesn't connect (or is genesis), instead of DoSing in // AcceptBlockHeader, request deeper headers if (!IsInitialBlockDownload()) { connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256())); } return true; } } const CBlockIndex *pindex = nullptr; CValidationState state; if (!ProcessNewBlockHeaders(config, {cmpctblock.header}, state, &pindex)) { int nDoS; if (state.IsInvalid(nDoS)) { if (nDoS > 0) { LOCK(cs_main); Misbehaving(pfrom, nDoS, state.GetRejectReason()); } LogPrintf("Peer %d sent us invalid header via cmpctblock\n", pfrom->id); return true; } } // When we succeed in decoding a block's txids from a cmpctblock // message we typically jump to the BLOCKTXN handling code, with a // dummy (empty) BLOCKTXN message, to re-use the logic there in // completing processing of the putative block (without cs_main). bool fProcessBLOCKTXN = false; CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION); // If we end up treating this as a plain headers message, call that as // well // without cs_main. bool fRevertToHeaderProcessing = false; CDataStream vHeadersMsg(SER_NETWORK, PROTOCOL_VERSION); // Keep a CBlock for "optimistic" compactblock reconstructions (see // below) std::shared_ptr pblock = std::make_shared(); bool fBlockReconstructed = false; { LOCK(cs_main); // If AcceptBlockHeader returned true, it set pindex assert(pindex); UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash()); std::map::iterator>>:: iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash()); bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end(); if (pindex->nStatus & BLOCK_HAVE_DATA) { // Nothing to do here return true; } if (pindex->nChainWork <= chainActive.Tip()->nChainWork || // We know something better pindex->nTx != 0) { // We had this block at some point, but pruned it if (fAlreadyInFlight) { // We requested this block for some reason, but our mempool // will probably be useless so we just grab the block via // normal getdata. std::vector vInv(1); vInv[0] = CInv( MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); } return true; } // If we're not close to tip yet, give up and let parallel block // fetch work its magic. if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus())) { return true; } CNodeState *nodestate = State(pfrom->GetId()); // We want to be a bit conservative just to be extra careful about // DoS possibilities in compact block processing... if (pindex->nHeight <= chainActive.Height() + 2) { if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || (fAlreadyInFlight && blockInFlightIt->second.first == pfrom->GetId())) { std::list::iterator *queuedBlockIt = nullptr; if (!MarkBlockAsInFlight(config, pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex, &queuedBlockIt)) { if (!(*queuedBlockIt)->partialBlock) { (*queuedBlockIt) ->partialBlock.reset( new PartiallyDownloadedBlock(config, &mempool)); } else { // The block was already in flight using compact // blocks from the same peer. LogPrint(BCLog::NET, "Peer sent us compact block " "we were already syncing!\n"); return true; } } PartiallyDownloadedBlock &partialBlock = *(*queuedBlockIt)->partialBlock; ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact); if (status == READ_STATUS_INVALID) { // Reset in-flight state in case of whitelist MarkBlockAsReceived(pindex->GetBlockHash()); Misbehaving(pfrom, 100, "invalid-cmpctblk"); LogPrintf("Peer %d sent us invalid compact block\n", pfrom->id); return true; } else if (status == READ_STATUS_FAILED) { // Duplicate txindexes, the block is now in-flight, so // just request it. std::vector vInv(1); vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); return true; } BlockTransactionsRequest req; for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) { if (!partialBlock.IsTxAvailable(i)) { req.indexes.push_back(i); } } if (req.indexes.empty()) { // Dirty hack to jump to BLOCKTXN code (TODO: move // message handling into their own functions) BlockTransactions txn; txn.blockhash = cmpctblock.header.GetHash(); blockTxnMsg << txn; fProcessBLOCKTXN = true; } else { req.blockhash = pindex->GetBlockHash(); connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req)); } } else { // This block is either already in flight from a different // peer, or this peer has too many blocks outstanding to // download from. Optimistically try to reconstruct anyway // since we might be able to without any round trips. PartiallyDownloadedBlock tempBlock(config, &mempool); ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact); if (status != READ_STATUS_OK) { // TODO: don't ignore failures return true; } std::vector dummy; status = tempBlock.FillBlock(*pblock, dummy); if (status == READ_STATUS_OK) { fBlockReconstructed = true; } } } else { if (fAlreadyInFlight) { // We requested this block, but its far into the future, so // our mempool will probably be useless - request the block // normally. std::vector vInv(1); vInv[0] = CInv( MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); return true; } else { // If this was an announce-cmpctblock, we want the same // treatment as a header message. Dirty hack to process as // if it were just a headers message (TODO: move message // handling into their own functions) std::vector headers; headers.push_back(cmpctblock.header); vHeadersMsg << headers; fRevertToHeaderProcessing = true; } } } // cs_main if (fProcessBLOCKTXN) { return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman, interruptMsgProc); } if (fRevertToHeaderProcessing) { return ProcessMessage(config, pfrom, NetMsgType::HEADERS, vHeadersMsg, nTimeReceived, chainparams, connman, interruptMsgProc); } if (fBlockReconstructed) { // If we got here, we were able to optimistically reconstruct a // block that is in flight from some other peer. { LOCK(cs_main); mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom->GetId(), false)); } bool fNewBlock = false; ProcessNewBlock(config, pblock, true, &fNewBlock); if (fNewBlock) { pfrom->nLastBlockTime = GetTime(); } // hold cs_main for CBlockIndex::IsValid() LOCK(cs_main); if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) { // Clear download state for this block, which is in process from // some other peer. We do this after calling. ProcessNewBlock so // that a malleated cmpctblock announcement can't be used to // interfere with block relay. MarkBlockAsReceived(pblock->GetHash()); } } } else if (strCommand == NetMsgType::BLOCKTXN && !fImporting && !fReindex) // Ignore blocks received while importing { BlockTransactions resp; vRecv >> resp; std::shared_ptr pblock = std::make_shared(); bool fBlockRead = false; { LOCK(cs_main); std::map::iterator>>:: iterator it = mapBlocksInFlight.find(resp.blockhash); if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock || it->second.first != pfrom->GetId()) { LogPrint(BCLog::NET, "Peer %d sent us block transactions for block " "we weren't expecting\n", pfrom->id); return true; } PartiallyDownloadedBlock &partialBlock = *it->second.second->partialBlock; ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn); if (status == READ_STATUS_INVALID) { // Reset in-flight state in case of whitelist. MarkBlockAsReceived(resp.blockhash); Misbehaving(pfrom, 100, "invalid-cmpctblk-txns"); LogPrintf("Peer %d sent us invalid compact block/non-matching " "block transactions\n", pfrom->id); return true; } else if (status == READ_STATUS_FAILED) { // Might have collided, fall back to getdata now :( std::vector invs; invs.push_back( CInv(MSG_BLOCK | GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus()), resp.blockhash)); connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, invs)); } else { // Block is either okay, or possibly we received // READ_STATUS_CHECKBLOCK_FAILED. // Note that CheckBlock can only fail for one of a few reasons: // 1. bad-proof-of-work (impossible here, because we've already // accepted the header) // 2. merkleroot doesn't match the transactions given (already // caught in FillBlock with READ_STATUS_FAILED, so // impossible here) // 3. the block is otherwise invalid (eg invalid coinbase, // block is too big, too many legacy sigops, etc). // So if CheckBlock failed, #3 is the only possibility. // Under BIP 152, we don't DoS-ban unless proof of work is // invalid (we don't require all the stateless checks to have // been run). This is handled below, so just treat this as // though the block was successfully read, and rely on the // handling in ProcessNewBlock to ensure the block index is // updated, reject messages go out, etc. // it is now an empty pointer MarkBlockAsReceived(resp.blockhash); fBlockRead = true; // mapBlockSource is only used for sending reject messages and // DoS scores, so the race between here and cs_main in // ProcessNewBlock is fine. BIP 152 permits peers to relay // compact blocks after validating the header only; we should // not punish peers if the block turns out to be invalid. mapBlockSource.emplace(resp.blockhash, std::make_pair(pfrom->GetId(), false)); } } // Don't hold cs_main when we call into ProcessNewBlock if (fBlockRead) { bool fNewBlock = false; // Since we requested this block (it was in mapBlocksInFlight), // force it to be processed, even if it would not be a candidate for // new tip (missing previous block, chain not long enough, etc) ProcessNewBlock(config, pblock, true, &fNewBlock); if (fNewBlock) { pfrom->nLastBlockTime = GetTime(); } } } // Ignore headers received while importing else if (strCommand == NetMsgType::HEADERS && !fImporting && !fReindex) { std::vector headers; // Bypass the normal CBlock deserialization, as we don't want to risk // deserializing 2000 full blocks. unsigned int nCount = ReadCompactSize(vRecv); if (nCount > MAX_HEADERS_RESULTS) { LOCK(cs_main); Misbehaving(pfrom, 20, "too-many-headers"); return error("headers message size = %u", nCount); } headers.resize(nCount); for (unsigned int n = 0; n < nCount; n++) { vRecv >> headers[n]; // Ignore tx count; assume it is 0. ReadCompactSize(vRecv); } if (nCount == 0) { // Nothing interesting. Stop asking this peers for more headers. return true; } const CBlockIndex *pindexLast = nullptr; { LOCK(cs_main); CNodeState *nodestate = State(pfrom->GetId()); // If this looks like it could be a block announcement (nCount < // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers // that // don't connect: // - Send a getheaders message in response to try to connect the // chain. // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that // don't connect before giving DoS points // - Once a headers message is received that is valid and does // connect, // nUnconnectingHeaders gets reset back to 0. if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) { nodestate->nUnconnectingHeaders++; connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256())); LogPrint(BCLog::NET, "received header %s: missing prev block " "%s, sending getheaders (%d) to end " "(peer=%d, nUnconnectingHeaders=%d)\n", headers[0].GetHash().ToString(), headers[0].hashPrevBlock.ToString(), pindexBestHeader->nHeight, pfrom->id, nodestate->nUnconnectingHeaders); // Set hashLastUnknownBlock for this peer, so that if we // eventually get the headers - even from a different peer - // we can use this peer to download. UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash()); if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) { // The peer is sending us many headers we can't connect. Misbehaving(pfrom, 20, "too-many-unconnected-headers"); } return true; } uint256 hashLastBlock; for (const CBlockHeader &header : headers) { if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) { Misbehaving(pfrom, 20, "disconnected-header"); return error("non-continuous headers sequence"); } hashLastBlock = header.GetHash(); } } CValidationState state; if (!ProcessNewBlockHeaders(config, headers, state, &pindexLast)) { int nDoS; if (state.IsInvalid(nDoS)) { if (nDoS > 0) { LOCK(cs_main); Misbehaving(pfrom, nDoS, state.GetRejectReason()); } return error("invalid header received"); } } { LOCK(cs_main); CNodeState *nodestate = State(pfrom->GetId()); if (nodestate->nUnconnectingHeaders > 0) { LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->id, nodestate->nUnconnectingHeaders); } nodestate->nUnconnectingHeaders = 0; assert(pindexLast); UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash()); if (nCount == MAX_HEADERS_RESULTS) { // Headers message had its maximum size; the peer may have more // headers. // TODO: optimize: if pindexLast is an ancestor of // chainActive.Tip or pindexBestHeader, continue from there // instead. LogPrint( BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight); connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256())); } bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus()); // If this set of headers is valid and ends in a block with at least // as much work as our tip, download as much as possible. if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) { std::vector vToFetch; const CBlockIndex *pindexWalk = pindexLast; // Calculate all the blocks we'd need to switch to pindexLast, // up to a limit. while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && !mapBlocksInFlight.count(pindexWalk->GetBlockHash())) { // We don't have this block, and it's not yet in flight. vToFetch.push_back(pindexWalk); } pindexWalk = pindexWalk->pprev; } // If pindexWalk still isn't on our main chain, we're looking at // a very large reorg at a time we think we're close to caught // up to the main chain -- this shouldn't really happen. Bail // out on the direct fetch and rely on parallel download // instead. if (!chainActive.Contains(pindexWalk)) { LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); } else { std::vector vGetData; // Download as much as possible, from earliest to latest. for (const CBlockIndex *pindex : boost::adaptors::reverse(vToFetch)) { if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { // Can't download any more from this peer break; } uint32_t nFetchFlags = GetFetchFlags( pfrom, pindex->pprev, chainparams.GetConsensus()); vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); MarkBlockAsInFlight(config, pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex); LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", pindex->GetBlockHash().ToString(), pfrom->id); } if (vGetData.size() > 1) { LogPrint(BCLog::NET, "Downloading blocks toward %s " "(%d) via headers direct fetch\n", pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); } if (vGetData.size() > 0) { if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) { // In any case, we want to download using a compact // block, not a regular one. vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); } connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData)); } } } } } else if (strCommand == NetMsgType::BLOCK && !fImporting && !fReindex) // Ignore blocks received while importing { std::shared_ptr pblock = std::make_shared(); vRecv >> *pblock; LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->id); // Process all blocks from whitelisted peers, even if not requested, // unless we're still syncing with the network. Such an unrequested // block may still be processed, subject to the conditions in // AcceptBlock(). bool forceProcessing = pfrom->fWhitelisted && !IsInitialBlockDownload(); const uint256 hash(pblock->GetHash()); { LOCK(cs_main); // Also always process if we requested the block explicitly, as we // may need it even though it is not a candidate for a new best tip. forceProcessing |= MarkBlockAsReceived(hash); // mapBlockSource is only used for sending reject messages and DoS // scores, so the race between here and cs_main in ProcessNewBlock // is fine. mapBlockSource.emplace(hash, std::make_pair(pfrom->GetId(), true)); } bool fNewBlock = false; ProcessNewBlock(config, pblock, forceProcessing, &fNewBlock); if (fNewBlock) { pfrom->nLastBlockTime = GetTime(); } } else if (strCommand == NetMsgType::GETADDR) { // This asymmetric behavior for inbound and outbound connections was // introduced to prevent a fingerprinting attack: an attacker can send // specific fake addresses to users' AddrMan and later request them by // sending getaddr messages. Making nodes which are behind NAT and can // only make outgoing connections ignore the getaddr message mitigates // the attack. if (!pfrom->fInbound) { LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->id); return true; } // Only send one GetAddr response per connection to reduce resource // waste and discourage addr stamping of INV announcements. if (pfrom->fSentAddr) { LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->id); return true; } pfrom->fSentAddr = true; pfrom->vAddrToSend.clear(); std::vector vAddr = connman.GetAddresses(); FastRandomContext insecure_rand; for (const CAddress &addr : vAddr) { pfrom->PushAddress(addr, insecure_rand); } } else if (strCommand == NetMsgType::MEMPOOL) { if (!(pfrom->GetLocalServices() & NODE_BLOOM) && !pfrom->fWhitelisted) { LogPrint(BCLog::NET, "mempool request with bloom filters disabled, " "disconnect peer=%d\n", pfrom->GetId()); pfrom->fDisconnect = true; return true; } if (connman.OutboundTargetReached(false) && !pfrom->fWhitelisted) { LogPrint(BCLog::NET, "mempool request with bandwidth limit " "reached, disconnect peer=%d\n", pfrom->GetId()); pfrom->fDisconnect = true; return true; } LOCK(pfrom->cs_inventory); pfrom->fSendMempool = true; } else if (strCommand == NetMsgType::PING) { if (pfrom->nVersion > BIP0031_VERSION) { uint64_t nonce = 0; vRecv >> nonce; // Echo the message back with the nonce. This allows for two useful // features: // // 1) A remote node can quickly check if the connection is // operational. // 2) Remote nodes can measure the latency of the network thread. If // this node is overloaded it won't respond to pings quickly and the // remote node can avoid sending us more work, like chain download // requests. // // The nonce stops the remote getting confused between different // pings: without it, if the remote node sends a ping once per // second and this node takes 5 seconds to respond to each, the 5th // ping the remote sends would appear to return very quickly. connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::PONG, nonce)); } } else if (strCommand == NetMsgType::PONG) { int64_t pingUsecEnd = nTimeReceived; uint64_t nonce = 0; size_t nAvail = vRecv.in_avail(); bool bPingFinished = false; std::string sProblem; if (nAvail >= sizeof(nonce)) { vRecv >> nonce; // Only process pong message if there is an outstanding ping (old // ping without nonce should never pong) if (pfrom->nPingNonceSent != 0) { if (nonce == pfrom->nPingNonceSent) { // Matching pong received, this ping is no longer // outstanding bPingFinished = true; int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart; if (pingUsecTime > 0) { // Successful ping time measurement, replace previous pfrom->nPingUsecTime = pingUsecTime; pfrom->nMinPingUsecTime = std::min( pfrom->nMinPingUsecTime.load(), pingUsecTime); } else { // This should never happen sProblem = "Timing mishap"; } } else { // Nonce mismatches are normal when pings are overlapping sProblem = "Nonce mismatch"; if (nonce == 0) { // This is most likely a bug in another implementation // somewhere; cancel this ping bPingFinished = true; sProblem = "Nonce zero"; } } } else { sProblem = "Unsolicited pong without ping"; } } else { // This is most likely a bug in another implementation somewhere; // cancel this ping bPingFinished = true; sProblem = "Short payload"; } if (!(sProblem.empty())) { LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n", pfrom->id, sProblem, pfrom->nPingNonceSent, nonce, nAvail); } if (bPingFinished) { pfrom->nPingNonceSent = 0; } } else if (strCommand == NetMsgType::FILTERLOAD) { CBloomFilter filter; vRecv >> filter; if (!filter.IsWithinSizeConstraints()) { // There is no excuse for sending a too-large filter LOCK(cs_main); Misbehaving(pfrom, 100, "oversized-bloom-filter"); } else { LOCK(pfrom->cs_filter); delete pfrom->pfilter; pfrom->pfilter = new CBloomFilter(filter); pfrom->pfilter->UpdateEmptyFull(); pfrom->fRelayTxes = true; } } else if (strCommand == NetMsgType::FILTERADD) { std::vector vData; vRecv >> vData; // Nodes must NEVER send a data item > 520 bytes (the max size for a // script data object, and thus, the maximum size any matched object can // have) in a filteradd message. bool bad = false; if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) { bad = true; } else { LOCK(pfrom->cs_filter); if (pfrom->pfilter) { pfrom->pfilter->insert(vData); } else { bad = true; } } if (bad) { LOCK(cs_main); // The structure of this code doesn't really allow for a good error // code. We'll go generic. Misbehaving(pfrom, 100, "invalid-filteradd"); } } else if (strCommand == NetMsgType::FILTERCLEAR) { LOCK(pfrom->cs_filter); if (pfrom->GetLocalServices() & NODE_BLOOM) { delete pfrom->pfilter; pfrom->pfilter = new CBloomFilter(); } pfrom->fRelayTxes = true; } else if (strCommand == NetMsgType::FEEFILTER) { Amount newFeeFilter(0); vRecv >> newFeeFilter; if (MoneyRange(newFeeFilter)) { { LOCK(pfrom->cs_feeFilter); pfrom->minFeeFilter = newFeeFilter; } LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom->id); } } else if (strCommand == NetMsgType::NOTFOUND) { // We do not care about the NOTFOUND message, but logging an Unknown // Command message would be undesirable as we transmit it ourselves. } else { // Ignore unknown commands for extensibility LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id); } return true; } static bool SendRejectsAndCheckIfBanned(CNode *pnode, CConnman &connman) { AssertLockHeld(cs_main); CNodeState &state = *State(pnode->GetId()); for (const CBlockReject &reject : state.rejects) { connman.PushMessage( pnode, CNetMsgMaker(INIT_PROTO_VERSION) .Make(NetMsgType::REJECT, std::string(NetMsgType::BLOCK), reject.chRejectCode, reject.strRejectReason, reject.hashBlock)); } state.rejects.clear(); if (state.fShouldBan) { state.fShouldBan = false; if (pnode->fWhitelisted) { LogPrintf("Warning: not punishing whitelisted peer %s!\n", pnode->addr.ToString()); } else if (pnode->fAddnode) { LogPrintf("Warning: not punishing addnoded peer %s!\n", pnode->addr.ToString()); } else { pnode->fDisconnect = true; if (pnode->addr.IsLocal()) { LogPrintf("Warning: not banning local peer %s!\n", pnode->addr.ToString()); } else { connman.Ban(pnode->addr, BanReasonNodeMisbehaving); } } return true; } return false; } bool ProcessMessages(const Config &config, CNode *pfrom, CConnman &connman, const std::atomic &interruptMsgProc) { const CChainParams &chainparams = Params(); // // Message format // (4) message start // (12) command // (4) size // (4) checksum // (x) data // bool fMoreWork = false; if (!pfrom->vRecvGetData.empty()) { ProcessGetData(config, pfrom, chainparams.GetConsensus(), connman, interruptMsgProc); } if (pfrom->fDisconnect) { return false; } // this maintains the order of responses if (!pfrom->vRecvGetData.empty()) { return true; } // Don't bother if send buffer is too full to respond anyway if (pfrom->fPauseSend) { return false; } std::list msgs; { LOCK(pfrom->cs_vProcessMsg); if (pfrom->vProcessMsg.empty()) { return false; } // Just take one message msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin()); pfrom->nProcessQueueSize -= msgs.front().vRecv.size() + CMessageHeader::HEADER_SIZE; pfrom->fPauseRecv = pfrom->nProcessQueueSize > connman.GetReceiveFloodSize(); fMoreWork = !pfrom->vProcessMsg.empty(); } CNetMessage &msg(msgs.front()); msg.SetVersion(pfrom->GetRecvVersion()); // Scan for message start if (memcmp(std::begin(msg.hdr.pchMessageStart), std::begin(chainparams.NetMagic()), CMessageHeader::MESSAGE_START_SIZE) != 0) { LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->id); pfrom->fDisconnect = true; return false; } // Read header CMessageHeader &hdr = msg.hdr; if (!hdr.IsValid(chainparams.NetMagic())) { LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id); return fMoreWork; } std::string strCommand = hdr.GetCommand(); // Message size unsigned int nMessageSize = hdr.nMessageSize; // Checksum CDataStream &vRecv = msg.vRecv; const uint256 &hash = msg.GetMessageHash(); if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) { LogPrintf( "%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__, SanitizeString(strCommand), nMessageSize, HexStr(hash.begin(), hash.begin() + CMessageHeader::CHECKSUM_SIZE), HexStr(hdr.pchChecksum, hdr.pchChecksum + CMessageHeader::CHECKSUM_SIZE)); return fMoreWork; } // Process message bool fRet = false; try { fRet = ProcessMessage(config, pfrom, strCommand, vRecv, msg.nTime, chainparams, connman, interruptMsgProc); if (interruptMsgProc) { return false; } if (!pfrom->vRecvGetData.empty()) { fMoreWork = true; } } catch (const std::ios_base::failure &e) { connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION) .Make(NetMsgType::REJECT, strCommand, REJECT_MALFORMED, std::string("error parsing message"))); if (strstr(e.what(), "end of data")) { // Allow exceptions from under-length message on vRecv LogPrintf( "%s(%s, %u bytes): Exception '%s' caught, normally caused by a " "message being shorter than its stated length\n", __func__, SanitizeString(strCommand), nMessageSize, e.what()); } else if (strstr(e.what(), "size too large")) { // Allow exceptions from over-long size LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what()); } else if (strstr(e.what(), "non-canonical ReadCompactSize()")) { // Allow exceptions from non-canonical encoding LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what()); } else { PrintExceptionContinue(&e, "ProcessMessages()"); } } catch (const std::exception &e) { PrintExceptionContinue(&e, "ProcessMessages()"); } catch (...) { PrintExceptionContinue(nullptr, "ProcessMessages()"); } if (!fRet) { LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->id); } LOCK(cs_main); SendRejectsAndCheckIfBanned(pfrom, connman); return fMoreWork; } class CompareInvMempoolOrder { CTxMemPool *mp; public: CompareInvMempoolOrder(CTxMemPool *_mempool) { mp = _mempool; } bool operator()(std::set::iterator a, std::set::iterator b) { /* As std::make_heap produces a max-heap, we want the entries with the * fewest ancestors/highest fee to sort later. */ return mp->CompareDepthAndScore(*b, *a); } }; bool SendMessages(const Config &config, CNode *pto, CConnman &connman, const std::atomic &interruptMsgProc) { const Consensus::Params &consensusParams = Params().GetConsensus(); // Don't send anything until the version handshake is complete if (!pto->fSuccessfullyConnected || pto->fDisconnect) { return true; } // If we get here, the outgoing message serialization version is set and // can't change. const CNetMsgMaker msgMaker(pto->GetSendVersion()); // // Message: ping // bool pingSend = false; if (pto->fPingQueued) { // RPC ping request by user pingSend = true; } if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) { // Ping automatically sent as a latency probe & keepalive. pingSend = true; } if (pingSend) { uint64_t nonce = 0; while (nonce == 0) { GetRandBytes((uint8_t *)&nonce, sizeof(nonce)); } pto->fPingQueued = false; pto->nPingUsecStart = GetTimeMicros(); if (pto->nVersion > BIP0031_VERSION) { pto->nPingNonceSent = nonce; connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce)); } else { // Peer is too old to support ping command with nonce, pong will // never arrive. pto->nPingNonceSent = 0; connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING)); } } // Acquire cs_main for IsInitialBlockDownload() and CNodeState() TRY_LOCK(cs_main, lockMain); if (!lockMain) { return true; } if (SendRejectsAndCheckIfBanned(pto, connman)) { return true; } CNodeState &state = *State(pto->GetId()); // Address refresh broadcast int64_t nNow = GetTimeMicros(); if (!IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) { AdvertiseLocal(pto); pto->nNextLocalAddrSend = PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); } // // Message: addr // if (pto->nNextAddrSend < nNow) { pto->nNextAddrSend = PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL); std::vector vAddr; vAddr.reserve(pto->vAddrToSend.size()); for (const CAddress &addr : pto->vAddrToSend) { if (!pto->addrKnown.contains(addr.GetKey())) { pto->addrKnown.insert(addr.GetKey()); vAddr.push_back(addr); // receiver rejects addr messages larger than 1000 if (vAddr.size() >= 1000) { connman.PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr)); vAddr.clear(); } } } pto->vAddrToSend.clear(); if (!vAddr.empty()) { connman.PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr)); } // we only send the big addr message once if (pto->vAddrToSend.capacity() > 40) { pto->vAddrToSend.shrink_to_fit(); } } // Start block sync if (pindexBestHeader == nullptr) { pindexBestHeader = chainActive.Tip(); } // Download if this is a nice peer, or we have no nice peers and this one // might do. bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) { // Only actively request headers from a single peer, unless we're close // to today. if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) { state.fSyncStarted = true; nSyncStarted++; const CBlockIndex *pindexStart = pindexBestHeader; /** * If possible, start at the block preceding the currently best * known header. This ensures that we always get a non-empty list of * headers back as long as the peer is up-to-date. With a non-empty * response, we can initialise the peer's known best block. This * wouldn't be possible if we requested starting at pindexBestHeader * and got back an empty response. */ if (pindexStart->pprev) { pindexStart = pindexStart->pprev; } LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight); connman.PushMessage( pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256())); } } // Resend wallet transactions that haven't gotten in a block yet // Except during reindex, importing and IBD, when old wallet transactions // become unconfirmed and spams other nodes. if (!fReindex && !fImporting && !IsInitialBlockDownload()) { GetMainSignals().Broadcast(nTimeBestReceived, &connman); } // // Try sending block announcements via headers // { // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our list of block // hashes we're relaying, and our peer wants headers announcements, then // find the first header not yet known to our peer but would connect, // and send. If no header would connect, or if we have too many blocks, // or if the peer doesn't want headers, just add all to the inv queue. LOCK(pto->cs_inventory); std::vector vHeaders; bool fRevertToInv = ((!state.fPreferHeaders && (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) || pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE); // last header queued for delivery const CBlockIndex *pBestIndex = nullptr; // ensure pindexBestKnownBlock is up-to-date ProcessBlockAvailability(pto->id); if (!fRevertToInv) { bool fFoundStartingHeader = false; // Try to find first header that our peer doesn't have, and then // send all headers past that one. If we come across an headers that // aren't on chainActive, give up. for (const uint256 &hash : pto->vBlockHashesToAnnounce) { BlockMap::iterator mi = mapBlockIndex.find(hash); assert(mi != mapBlockIndex.end()); const CBlockIndex *pindex = mi->second; if (chainActive[pindex->nHeight] != pindex) { // Bail out if we reorged away from this block fRevertToInv = true; break; } if (pBestIndex != nullptr && pindex->pprev != pBestIndex) { // This means that the list of blocks to announce don't // connect to each other. This shouldn't really be possible // to hit during regular operation (because reorgs should // take us to a chain that has some block not on the prior // chain, which should be caught by the prior check), but // one way this could happen is by using invalidateblock / // reconsiderblock repeatedly on the tip, causing it to be // added multiple times to vBlockHashesToAnnounce. Robustly // deal with this rare situation by reverting to an inv. fRevertToInv = true; break; } pBestIndex = pindex; if (fFoundStartingHeader) { // add this to the headers message vHeaders.push_back(pindex->GetBlockHeader()); } else if (PeerHasHeader(&state, pindex)) { // Keep looking for the first new block. continue; } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) { // Peer doesn't have this header but they do have the prior // one. // Start sending headers. fFoundStartingHeader = true; vHeaders.push_back(pindex->GetBlockHeader()); } else { // Peer doesn't have this header or the prior one -- // nothing will connect, so bail out. fRevertToInv = true; break; } } } if (!fRevertToInv && !vHeaders.empty()) { if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) { // We only send up to 1 block as header-and-ids, as otherwise // probably means we're doing an initial-ish-sync or they're // slow. LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__, vHeaders.front().GetHash().ToString(), pto->id); int nSendFlags = 0; bool fGotBlockFromCache = false; { LOCK(cs_most_recent_block); if (most_recent_block_hash == pBestIndex->GetBlockHash()) { CBlockHeaderAndShortTxIDs cmpctblock( *most_recent_block); connman.PushMessage( pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); fGotBlockFromCache = true; } } if (!fGotBlockFromCache) { CBlock block; bool ret = ReadBlockFromDisk(block, pBestIndex, config); assert(ret); CBlockHeaderAndShortTxIDs cmpctblock(block); connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); } state.pindexBestHeaderSent = pBestIndex; } else if (state.fPreferHeaders) { if (vHeaders.size() > 1) { LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__, vHeaders.size(), vHeaders.front().GetHash().ToString(), vHeaders.back().GetHash().ToString(), pto->id); } else { LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__, vHeaders.front().GetHash().ToString(), pto->id); } connman.PushMessage( pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders)); state.pindexBestHeaderSent = pBestIndex; } else { fRevertToInv = true; } } if (fRevertToInv) { // If falling back to using an inv, just try to inv the tip. The // last entry in vBlockHashesToAnnounce was our tip at some point in // the past. if (!pto->vBlockHashesToAnnounce.empty()) { const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back(); BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce); assert(mi != mapBlockIndex.end()); const CBlockIndex *pindex = mi->second; // Warn if we're announcing a block that is not on the main // chain. This should be very rare and could be optimized out. // Just log for now. if (chainActive[pindex->nHeight] != pindex) { LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n", hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString()); } // If the peer's chain has this block, don't inv it back. if (!PeerHasHeader(&state, pindex)) { pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce)); LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__, pto->id, hashToAnnounce.ToString()); } } } pto->vBlockHashesToAnnounce.clear(); } // // Message: inventory // std::vector vInv; { LOCK(pto->cs_inventory); vInv.reserve(std::max(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX)); // Add blocks for (const uint256 &hash : pto->vInventoryBlockToSend) { vInv.push_back(CInv(MSG_BLOCK, hash)); if (vInv.size() == MAX_INV_SZ) { connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); vInv.clear(); } } pto->vInventoryBlockToSend.clear(); // Check whether periodic sends should happen bool fSendTrickle = pto->fWhitelisted; if (pto->nNextInvSend < nNow) { fSendTrickle = true; // Use half the delay for outbound peers, as there is less privacy // concern for them. pto->nNextInvSend = PoissonNextSend( nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound); } // Time to send but the peer has requested we not relay transactions. if (fSendTrickle) { LOCK(pto->cs_filter); if (!pto->fRelayTxes) { pto->setInventoryTxToSend.clear(); } } // Respond to BIP35 mempool requests if (fSendTrickle && pto->fSendMempool) { auto vtxinfo = mempool.infoAll(); pto->fSendMempool = false; Amount filterrate(0); { LOCK(pto->cs_feeFilter); filterrate = pto->minFeeFilter; } LOCK(pto->cs_filter); for (const auto &txinfo : vtxinfo) { const uint256 &txid = txinfo.tx->GetId(); CInv inv(MSG_TX, txid); pto->setInventoryTxToSend.erase(txid); if (filterrate != Amount(0)) { if (txinfo.feeRate.GetFeePerK() < filterrate) { continue; } } if (pto->pfilter) { if (!pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) { continue; } } pto->filterInventoryKnown.insert(txid); vInv.push_back(inv); if (vInv.size() == MAX_INV_SZ) { connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); vInv.clear(); } } pto->timeLastMempoolReq = GetTime(); } // Determine transactions to relay if (fSendTrickle) { // Produce a vector with all candidates for sending std::vector::iterator> vInvTx; vInvTx.reserve(pto->setInventoryTxToSend.size()); for (std::set::iterator it = pto->setInventoryTxToSend.begin(); it != pto->setInventoryTxToSend.end(); it++) { vInvTx.push_back(it); } Amount filterrate(0); { LOCK(pto->cs_feeFilter); filterrate = pto->minFeeFilter; } // Topologically and fee-rate sort the inventory we send for privacy // and priority reasons. A heap is used so that not all items need // sorting if only a few are being sent. CompareInvMempoolOrder compareInvMempoolOrder(&mempool); std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); // No reason to drain out at many times the network's capacity, // especially since we have many peers and some will draw much // shorter delays. unsigned int nRelayedTransactions = 0; LOCK(pto->cs_filter); while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) { // Fetch the top element from the heap std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); std::set::iterator it = vInvTx.back(); vInvTx.pop_back(); uint256 hash = *it; // Remove it from the to-be-sent set pto->setInventoryTxToSend.erase(it); // Check if not in the filter already if (pto->filterInventoryKnown.contains(hash)) { continue; } // Not in the mempool anymore? don't bother sending it. auto txinfo = mempool.info(hash); if (!txinfo.tx) { continue; } if (filterrate != Amount(0) && txinfo.feeRate.GetFeePerK() < filterrate) { continue; } if (pto->pfilter && !pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) { continue; } // Send vInv.push_back(CInv(MSG_TX, hash)); nRelayedTransactions++; { // Expire old relay messages while (!vRelayExpiration.empty() && vRelayExpiration.front().first < nNow) { mapRelay.erase(vRelayExpiration.front().second); vRelayExpiration.pop_front(); } auto ret = mapRelay.insert( std::make_pair(hash, std::move(txinfo.tx))); if (ret.second) { vRelayExpiration.push_back(std::make_pair( nNow + 15 * 60 * 1000000, ret.first)); } } if (vInv.size() == MAX_INV_SZ) { connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); vInv.clear(); } pto->filterInventoryKnown.insert(hash); } } } if (!vInv.empty()) { connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); } // Detect whether we're stalling nNow = GetTimeMicros(); if (state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) { // Stalling only triggers when the block download window cannot move. // During normal steady state, the download window should be much larger // than the to-be-downloaded set of blocks, so disconnection should only // happen during initial block download. LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->id); pto->fDisconnect = true; return true; } // In case there is a block that has been in flight from this peer for 2 + // 0.5 * N times the block interval (with N the number of peers from which // we're downloading validated blocks), disconnect due to timeout. We // compensate for other peers to prevent killing off peers due to our own // downstream link being saturated. We only count validated in-flight blocks // so peers can't advertise non-existing block hashes to unreasonably // increase our timeout. if (state.vBlocksInFlight.size() > 0) { QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0); if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) { LogPrintf("Timeout downloading block %s from peer=%d, " "disconnecting\n", queuedBlock.hash.ToString(), pto->id); pto->fDisconnect = true; return true; } } // // Message: getdata (blocks) // std::vector vGetData; if (!pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { std::vector vToDownload; NodeId staller = -1; FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams); for (const CBlockIndex *pindex : vToDownload) { uint32_t nFetchFlags = GetFetchFlags(pto, pindex->pprev, consensusParams); vGetData.push_back( CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); MarkBlockAsInFlight(config, pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex); LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), pindex->nHeight, pto->id); } if (state.nBlocksInFlight == 0 && staller != -1) { if (State(staller)->nStallingSince == 0) { State(staller)->nStallingSince = nNow; LogPrint(BCLog::NET, "Stall started peer=%d\n", staller); } } } // // Message: getdata (non-blocks) // while (!pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow) { const CInv &inv = (*pto->mapAskFor.begin()).second; if (!AlreadyHave(inv)) { LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->id); vGetData.push_back(inv); if (vGetData.size() >= 1000) { connman.PushMessage( pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); vGetData.clear(); } } else { // If we're not going to ask, don't expect a response. pto->setAskFor.erase(inv.hash); } pto->mapAskFor.erase(pto->mapAskFor.begin()); } if (!vGetData.empty()) { connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); } // // Message: feefilter // // We don't want white listed peers to filter txs to us if we have // -whitelistforcerelay if (pto->nVersion >= FEEFILTER_VERSION && - GetBoolArg("-feefilter", DEFAULT_FEEFILTER) && + gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) && !(pto->fWhitelisted && - GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY))) { + gArgs.GetBoolArg("-whitelistforcerelay", + DEFAULT_WHITELISTFORCERELAY))) { Amount currentFilter = mempool - .GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * - 1000000) + .GetMinFee( + gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * + 1000000) .GetFeePerK(); int64_t timeNow = GetTimeMicros(); if (timeNow > pto->nextSendTimeFeeFilter) { static CFeeRate default_feerate = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE); static FeeFilterRounder filterRounder(default_feerate); Amount filterToSend = filterRounder.round(currentFilter); // If we don't allow free transactions, then we always have a fee // filter of at least minRelayTxFee - if (GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY) <= 0) { + if (gArgs.GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY) <= 0) { filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK()); } if (filterToSend != pto->lastSentFeeFilter) { connman.PushMessage( pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend)); pto->lastSentFeeFilter = filterToSend; } pto->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL); } // If the fee filter has changed substantially and it's still more than // MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then move the // broadcast to within MAX_FEEFILTER_CHANGE_DELAY. else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->nextSendTimeFeeFilter && (currentFilter < 3 * pto->lastSentFeeFilter / 4 || currentFilter > 4 * pto->lastSentFeeFilter / 3)) { pto->nextSendTimeFeeFilter = timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000; } } return true; } class CNetProcessingCleanup { public: CNetProcessingCleanup() {} ~CNetProcessingCleanup() { // orphan transactions mapOrphanTransactions.clear(); mapOrphanTransactionsByPrev.clear(); } } instance_of_cnetprocessingcleanup; diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp index 3da17d6bab..7f86514c98 100644 --- a/src/policy/fees.cpp +++ b/src/policy/fees.cpp @@ -1,582 +1,582 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "policy/fees.h" #include "policy/policy.h" #include "amount.h" #include "primitives/transaction.h" #include "random.h" #include "streams.h" #include "txmempool.h" #include "util.h" void TxConfirmStats::Initialize(std::vector &defaultBuckets, unsigned int maxConfirms, double _decay) { decay = _decay; for (size_t i = 0; i < defaultBuckets.size(); i++) { buckets.push_back(defaultBuckets[i]); bucketMap[defaultBuckets[i]] = i; } confAvg.resize(maxConfirms); curBlockConf.resize(maxConfirms); unconfTxs.resize(maxConfirms); for (unsigned int i = 0; i < maxConfirms; i++) { confAvg[i].resize(buckets.size()); curBlockConf[i].resize(buckets.size()); unconfTxs[i].resize(buckets.size()); } oldUnconfTxs.resize(buckets.size()); curBlockTxCt.resize(buckets.size()); txCtAvg.resize(buckets.size()); curBlockVal.resize(buckets.size()); avg.resize(buckets.size()); } // Zero out the data for the current block void TxConfirmStats::ClearCurrent(unsigned int nBlockHeight) { for (size_t j = 0; j < buckets.size(); j++) { oldUnconfTxs[j] += unconfTxs[nBlockHeight % unconfTxs.size()][j]; unconfTxs[nBlockHeight % unconfTxs.size()][j] = 0; for (size_t i = 0; i < curBlockConf.size(); i++) { curBlockConf[i][j] = 0; } curBlockTxCt[j] = 0; curBlockVal[j] = 0; } } void TxConfirmStats::Record(int blocksToConfirm, double val) { // blocksToConfirm is 1-based if (blocksToConfirm < 1) { return; } unsigned int bucketindex = bucketMap.lower_bound(val)->second; for (size_t i = blocksToConfirm; i <= curBlockConf.size(); i++) { curBlockConf[i - 1][bucketindex]++; } curBlockTxCt[bucketindex]++; curBlockVal[bucketindex] += val; } void TxConfirmStats::UpdateMovingAverages() { for (unsigned int j = 0; j < buckets.size(); j++) { for (unsigned int i = 0; i < confAvg.size(); i++) { confAvg[i][j] = confAvg[i][j] * decay + curBlockConf[i][j]; } avg[j] = avg[j] * decay + curBlockVal[j]; txCtAvg[j] = txCtAvg[j] * decay + curBlockTxCt[j]; } } // returns -1 on error conditions double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal, double successBreakPoint, bool requireGreater, unsigned int nBlockHeight) { // Counters for a bucket (or range of buckets) // Number of tx's confirmed within the confTarget double nConf = 0; // Total number of tx's that were ever confirmed double totalNum = 0; // Number of tx's still in mempool for confTarget or longer int extraNum = 0; int maxbucketindex = buckets.size() - 1; // requireGreater means we are looking for the lowest feerate such that all // higher values pass, so we start at maxbucketindex (highest feerate) and // look at successively smaller buckets until we reach failure. Otherwise, // we are looking for the highest feerate such that all lower values fail, // and we go in the opposite direction. unsigned int startbucket = requireGreater ? maxbucketindex : 0; int step = requireGreater ? -1 : 1; // We'll combine buckets until we have enough samples. // The near and far variables will define the range we've combined // The best variables are the last range we saw which still had a high // enough confirmation rate to count as success. // The cur variables are the current range we're counting. unsigned int curNearBucket = startbucket; unsigned int bestNearBucket = startbucket; unsigned int curFarBucket = startbucket; unsigned int bestFarBucket = startbucket; bool foundAnswer = false; unsigned int bins = unconfTxs.size(); // Start counting from highest(default) or lowest feerate transactions for (int bucket = startbucket; bucket >= 0 && bucket <= maxbucketindex; bucket += step) { curFarBucket = bucket; nConf += confAvg[confTarget - 1][bucket]; totalNum += txCtAvg[bucket]; for (unsigned int confct = confTarget; confct < GetMaxConfirms(); confct++) { extraNum += unconfTxs[(nBlockHeight - confct) % bins][bucket]; } extraNum += oldUnconfTxs[bucket]; // If we have enough transaction data points in this range of buckets, // we can test for success (Only count the confirmed data points, so // that each confirmation count will be looking at the same amount of // data and same bucket breaks) if (totalNum >= sufficientTxVal / (1 - decay)) { double curPct = nConf / (totalNum + extraNum); // Check to see if we are no longer getting confirmed at the success // rate if (requireGreater && curPct < successBreakPoint) { break; } if (!requireGreater && curPct > successBreakPoint) { break; } // Otherwise update the cumulative stats, and the bucket variables // and reset the counters foundAnswer = true; nConf = 0; totalNum = 0; extraNum = 0; bestNearBucket = curNearBucket; bestFarBucket = curFarBucket; curNearBucket = bucket + step; } } double median = -1; double txSum = 0; // Calculate the "average" feerate of the best bucket range that met success // conditions. Find the bucket with the median transaction and then report // the average feerate from that bucket. This is a compromise between // finding the median which we can't since we don't save all tx's and // reporting the average which is less accurate unsigned int minBucket = bestNearBucket < bestFarBucket ? bestNearBucket : bestFarBucket; unsigned int maxBucket = bestNearBucket > bestFarBucket ? bestNearBucket : bestFarBucket; for (unsigned int j = minBucket; j <= maxBucket; j++) { txSum += txCtAvg[j]; } if (foundAnswer && txSum != 0) { txSum = txSum / 2; for (unsigned int j = minBucket; j <= maxBucket; j++) { if (txCtAvg[j] < txSum) { txSum -= txCtAvg[j]; } else { // we're in the right bucket median = avg[j] / txCtAvg[j]; break; } } } LogPrint(BCLog::ESTIMATEFEE, "%3d: For conf success %s %4.2f need feerate " "%s: %12.5g from buckets %8g - %8g Cur " "Bucket stats %6.2f%% %8.1f/(%.1f+%d " "mempool)\n", confTarget, requireGreater ? ">" : "<", successBreakPoint, requireGreater ? ">" : "<", median, buckets[minBucket], buckets[maxBucket], 100 * nConf / (totalNum + extraNum), nConf, totalNum, extraNum); return median; } void TxConfirmStats::Write(CAutoFile &fileout) { fileout << decay; fileout << buckets; fileout << avg; fileout << txCtAvg; fileout << confAvg; } void TxConfirmStats::Read(CAutoFile &filein) { // Read data file into temporary variables and do some very basic sanity // checking std::vector fileBuckets; std::vector fileAvg; std::vector> fileConfAvg; std::vector fileTxCtAvg; double fileDecay; size_t maxConfirms; size_t numBuckets; filein >> fileDecay; if (fileDecay <= 0 || fileDecay >= 1) { throw std::runtime_error("Corrupt estimates file. Decay must be " "between 0 and 1 (non-inclusive)"); } filein >> fileBuckets; numBuckets = fileBuckets.size(); if (numBuckets <= 1 || numBuckets > 1000) { throw std::runtime_error("Corrupt estimates file. Must have between 2 " "and 1000 feerate buckets"); } filein >> fileAvg; if (fileAvg.size() != numBuckets) { throw std::runtime_error( "Corrupt estimates file. Mismatch in feerate average bucket count"); } filein >> fileTxCtAvg; if (fileTxCtAvg.size() != numBuckets) { throw std::runtime_error( "Corrupt estimates file. Mismatch in tx count bucket count"); } filein >> fileConfAvg; maxConfirms = fileConfAvg.size(); if (maxConfirms <= 0 || maxConfirms > 6 * 24 * 7) { // one week throw std::runtime_error("Corrupt estimates file. Must maintain " "estimates for between 1 and 1008 (one week) " "confirms"); } for (unsigned int i = 0; i < maxConfirms; i++) { if (fileConfAvg[i].size() != numBuckets) { throw std::runtime_error("Corrupt estimates file. Mismatch in " "feerate conf average bucket count"); } } // Now that we've processed the entire feerate estimate data file and not // thrown any errors, we can copy it to our data structures decay = fileDecay; buckets = fileBuckets; avg = fileAvg; confAvg = fileConfAvg; txCtAvg = fileTxCtAvg; bucketMap.clear(); // Resize the current block variables which aren't stored in the data file // to match the number of confirms and buckets curBlockConf.resize(maxConfirms); for (unsigned int i = 0; i < maxConfirms; i++) { curBlockConf[i].resize(buckets.size()); } curBlockTxCt.resize(buckets.size()); curBlockVal.resize(buckets.size()); unconfTxs.resize(maxConfirms); for (unsigned int i = 0; i < maxConfirms; i++) { unconfTxs[i].resize(buckets.size()); } oldUnconfTxs.resize(buckets.size()); for (size_t i = 0; i < buckets.size(); i++) { bucketMap[buckets[i]] = i; } LogPrint( BCLog::ESTIMATEFEE, "Reading estimates: %u buckets counting confirms up to %u blocks\n", numBuckets, maxConfirms); } unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val) { unsigned int bucketindex = bucketMap.lower_bound(val)->second; unsigned int blockIndex = nBlockHeight % unconfTxs.size(); unconfTxs[blockIndex][bucketindex]++; return bucketindex; } void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight, unsigned int bucketindex) { // nBestSeenHeight is not updated yet for the new block int blocksAgo = nBestSeenHeight - entryHeight; if (nBestSeenHeight == 0) { // the BlockPolicyEstimator hasn't seen any blocks yet blocksAgo = 0; } if (blocksAgo < 0) { // This can't happen because we call this with our best seen height, no // entries can have higher LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error, blocks ago is negative for mempool tx\n"); return; } if (blocksAgo >= (int)unconfTxs.size()) { if (oldUnconfTxs[bucketindex] > 0) { oldUnconfTxs[bucketindex]--; } else { LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error, mempool tx " "removed from >25 " "blocks,bucketIndex=%u already\n", bucketindex); } } else { unsigned int blockIndex = entryHeight % unconfTxs.size(); if (unconfTxs[blockIndex][bucketindex] > 0) { unconfTxs[blockIndex][bucketindex]--; } else { LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error, mempool tx removed from " "blockIndex=%u,bucketIndex=%u already\n", blockIndex, bucketindex); } } } // This function is called from CTxMemPool::removeUnchecked to ensure txs // removed from the mempool for any reason are no longer tracked. Txs that were // part of a block have already been removed in processBlockTx to ensure they // are never double tracked, but it is of no harm to try to remove them again. bool CBlockPolicyEstimator::removeTx(uint256 hash) { std::map::iterator pos = mapMemPoolTxs.find(hash); if (pos == mapMemPoolTxs.end()) { return false; } feeStats.removeTx(pos->second.blockHeight, nBestSeenHeight, pos->second.bucketIndex); mapMemPoolTxs.erase(hash); return true; } CBlockPolicyEstimator::CBlockPolicyEstimator(const CFeeRate &_minRelayFee) : nBestSeenHeight(0), trackedTxs(0), untrackedTxs(0) { static_assert(MIN_FEERATE > Amount(0), "Min feerate must be nonzero"); CFeeRate minFeeRate(MIN_FEERATE); minTrackedFee = _minRelayFee < minFeeRate ? minFeeRate : _minRelayFee; std::vector vfeelist; for (double bucketBoundary = minTrackedFee.GetFeePerK().GetSatoshis(); bucketBoundary <= double(MAX_FEERATE.GetSatoshis()); bucketBoundary *= FEE_SPACING) { vfeelist.push_back(bucketBoundary); } vfeelist.push_back(double(INF_FEERATE.GetSatoshis())); feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY); } void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry &entry, bool validFeeEstimate) { uint32_t txHeight = entry.GetHeight(); uint256 txid = entry.GetTx().GetId(); if (mapMemPoolTxs.count(txid)) { LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error mempool tx %s already being tracked\n", txid.ToString().c_str()); return; } if (txHeight != nBestSeenHeight) { // Ignore side chains and re-orgs; assuming they are random they don't // affect the estimate. We'll potentially double count transactions in // 1-block reorgs. Ignore txs if BlockPolicyEstimator is not in sync // with chainActive.Tip(). It will be synced next time a block is // processed. return; } // Only want to be updating estimates when our blockchain is synced, // otherwise we'll miscalculate how many blocks its taking to get included. if (!validFeeEstimate) { untrackedTxs++; return; } trackedTxs++; // Feerates are stored and reported as BCH-per-kb: CFeeRate feeRate(entry.GetFee(), entry.GetTxSize()); mapMemPoolTxs[txid].blockHeight = txHeight; mapMemPoolTxs[txid].bucketIndex = feeStats.NewTx(txHeight, double(feeRate.GetFeePerK().GetSatoshis())); } bool CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry *entry) { if (!removeTx(entry->GetTx().GetId())) { // This transaction wasn't being tracked for fee estimation return false; } // How many blocks did it take for miners to include this transaction? // blocksToConfirm is 1-based, so a transaction included in the earliest // possible block has confirmation count of 1 int blocksToConfirm = nBlockHeight - entry->GetHeight(); if (blocksToConfirm <= 0) { // This can't happen because we don't process transactions from a block // with a height lower than our greatest seen height LogPrint( BCLog::ESTIMATEFEE, "Blockpolicy error Transaction had negative blocksToConfirm\n"); return false; } // Feerates are stored and reported as BCH-per-kb: CFeeRate feeRate(entry->GetFee(), entry->GetTxSize()); feeStats.Record(blocksToConfirm, (double)feeRate.GetFeePerK().GetSatoshis()); return true; } void CBlockPolicyEstimator::processBlock( unsigned int nBlockHeight, std::vector &entries) { if (nBlockHeight <= nBestSeenHeight) { // Ignore side chains and re-orgs; assuming they are random they don't // affect the estimate. And if an attacker can re-org the chain at will, // then you've got much bigger problems than "attacker can influence // transaction fees." return; } // Must update nBestSeenHeight in sync with ClearCurrent so that calls to // removeTx (via processBlockTx) correctly calculate age of unconfirmed txs // to remove from tracking. nBestSeenHeight = nBlockHeight; // Clear the current block state and update unconfirmed circular buffer feeStats.ClearCurrent(nBlockHeight); unsigned int countedTxs = 0; // Repopulate the current block states for (size_t i = 0; i < entries.size(); i++) { if (processBlockTx(nBlockHeight, entries[i])) { countedTxs++; } } // Update all exponential averages with the current block state feeStats.UpdateMovingAverages(); LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy after updating estimates for %u " "of %u txs in block, since last block %u of " "%u tracked, new mempool map size %u\n", countedTxs, entries.size(), trackedTxs, trackedTxs + untrackedTxs, mapMemPoolTxs.size()); trackedTxs = 0; untrackedTxs = 0; } CFeeRate CBlockPolicyEstimator::estimateFee(int confTarget) { // Return failure if trying to analyze a target we're not tracking // It's not possible to get reasonable estimates for confTarget of 1 if (confTarget <= 1 || (unsigned int)confTarget > feeStats.GetMaxConfirms()) { return CFeeRate(Amount(0)); } double median = feeStats.EstimateMedianVal( confTarget, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight); if (median < 0) { return CFeeRate(Amount(0)); } return CFeeRate(Amount(int64_t(median))); } CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool &pool) { if (answerFoundAtTarget) { *answerFoundAtTarget = confTarget; } // Return failure if trying to analyze a target we're not tracking if (confTarget <= 0 || (unsigned int)confTarget > feeStats.GetMaxConfirms()) { return CFeeRate(Amount(0)); } // It's not possible to get reasonable estimates for confTarget of 1 if (confTarget == 1) { confTarget = 2; } double median = -1; while (median < 0 && (unsigned int)confTarget <= feeStats.GetMaxConfirms()) { median = feeStats.EstimateMedianVal(confTarget++, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight); } if (answerFoundAtTarget) { *answerFoundAtTarget = confTarget - 1; } // If mempool is limiting txs , return at least the min feerate from the // mempool Amount minPoolFee = - pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * + pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000) .GetFeePerK(); if (minPoolFee > Amount(0) && minPoolFee > Amount(int64_t(median))) { return CFeeRate(minPoolFee); } if (median < 0) { return CFeeRate(Amount(0)); } return CFeeRate(Amount(int64_t(median))); } double CBlockPolicyEstimator::estimatePriority(int confTarget) { return -1; } double CBlockPolicyEstimator::estimateSmartPriority(int confTarget, int *answerFoundAtTarget, const CTxMemPool &pool) { if (answerFoundAtTarget) { *answerFoundAtTarget = confTarget; } // If mempool is limiting txs, no priority txs are allowed Amount minPoolFee = - pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * + pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000) .GetFeePerK(); if (minPoolFee > Amount(0)) { return double(INF_PRIORITY.GetSatoshis()); } return -1; } void CBlockPolicyEstimator::Write(CAutoFile &fileout) { fileout << nBestSeenHeight; feeStats.Write(fileout); } void CBlockPolicyEstimator::Read(CAutoFile &filein, int nFileVersion) { int nFileBestSeenHeight; filein >> nFileBestSeenHeight; feeStats.Read(filein); nBestSeenHeight = nFileBestSeenHeight; if (nFileVersion < 139900) { TxConfirmStats priStats; priStats.Read(filein); } } FeeFilterRounder::FeeFilterRounder(const CFeeRate &minIncrementalFee) { Amount minFeeLimit = std::max(Amount(1), minIncrementalFee.GetFeePerK() / 2); feeset.insert(Amount(0)); for (double bucketBoundary = minFeeLimit.GetSatoshis(); bucketBoundary <= double(MAX_FEERATE.GetSatoshis()); bucketBoundary *= FEE_SPACING) { feeset.insert(Amount(int64_t(bucketBoundary))); } } Amount FeeFilterRounder::round(const Amount currentMinFee) { auto it = feeset.lower_bound(currentMinFee); if ((it != feeset.begin() && insecure_rand.rand32() % 3 != 0) || it == feeset.end()) { it--; } return *it; } diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp index 5cc33503ba..52b542bac5 100644 --- a/src/qt/bitcoin.cpp +++ b/src/qt/bitcoin.cpp @@ -1,806 +1,806 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "bitcoingui.h" #include "chainparams.h" #include "clientmodel.h" #include "config.h" #include "guiconstants.h" #include "guiutil.h" #include "intro.h" #include "networkstyle.h" #include "optionsmodel.h" #include "platformstyle.h" #include "splashscreen.h" #include "utilitydialog.h" #include "winshutdownmonitor.h" #ifdef ENABLE_WALLET #include "paymentserver.h" #include "walletmodel.h" #endif #include "init.h" #include "rpc/server.h" #include "scheduler.h" #include "ui_interface.h" #include "util.h" #include "warnings.h" #ifdef ENABLE_WALLET #include "wallet/wallet.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(QT_STATICPLUGIN) #include #if QT_VERSION < 0x050000 Q_IMPORT_PLUGIN(qcncodecs) Q_IMPORT_PLUGIN(qjpcodecs) Q_IMPORT_PLUGIN(qtwcodecs) Q_IMPORT_PLUGIN(qkrcodecs) Q_IMPORT_PLUGIN(qtaccessiblewidgets) #else #if QT_VERSION < 0x050400 Q_IMPORT_PLUGIN(AccessibleFactory) #endif #if defined(QT_QPA_PLATFORM_XCB) Q_IMPORT_PLUGIN(QXcbIntegrationPlugin); #elif defined(QT_QPA_PLATFORM_WINDOWS) Q_IMPORT_PLUGIN(QWindowsIntegrationPlugin); #elif defined(QT_QPA_PLATFORM_COCOA) Q_IMPORT_PLUGIN(QCocoaIntegrationPlugin); #endif #endif #endif #if QT_VERSION < 0x050000 #include #endif // Declare meta types used for QMetaObject::invokeMethod Q_DECLARE_METATYPE(bool *) Q_DECLARE_METATYPE(Amount) // Config is non-copyable so we can only register pointers to it Q_DECLARE_METATYPE(Config *) static void InitMessage(const std::string &message) { LogPrintf("init message: %s\n", message); } /** * Translate string to current locale using Qt. */ static std::string Translate(const char *psz) { return QCoreApplication::translate("bitcoin-abc", psz).toStdString(); } static QString GetLangTerritory() { QSettings settings; // Get desired locale (e.g. "de_DE") // 1) System default language QString lang_territory = QLocale::system().name(); // 2) Language from QSettings QString lang_territory_qsettings = settings.value("language", "").toString(); if (!lang_territory_qsettings.isEmpty()) lang_territory = lang_territory_qsettings; // 3) -lang command line argument - lang_territory = - QString::fromStdString(GetArg("-lang", lang_territory.toStdString())); + lang_territory = QString::fromStdString( + gArgs.GetArg("-lang", lang_territory.toStdString())); return lang_territory; } /** Set up translations */ static void initTranslations(QTranslator &qtTranslatorBase, QTranslator &qtTranslator, QTranslator &translatorBase, QTranslator &translator) { // Remove old translators QApplication::removeTranslator(&qtTranslatorBase); QApplication::removeTranslator(&qtTranslator); QApplication::removeTranslator(&translatorBase); QApplication::removeTranslator(&translator); // Get desired locale (e.g. "de_DE") // 1) System default language QString lang_territory = GetLangTerritory(); // Convert to "de" only by truncating "_DE" QString lang = lang_territory; lang.truncate(lang_territory.lastIndexOf('_')); // Load language files for configured locale: // - First load the translator for the base language, without territory // - Then load the more specific locale translator // Load e.g. qt_de.qm if (qtTranslatorBase.load( "qt_" + lang, QLibraryInfo::location(QLibraryInfo::TranslationsPath))) QApplication::installTranslator(&qtTranslatorBase); // Load e.g. qt_de_DE.qm if (qtTranslator.load( "qt_" + lang_territory, QLibraryInfo::location(QLibraryInfo::TranslationsPath))) QApplication::installTranslator(&qtTranslator); // Load e.g. bitcoin_de.qm (shortcut "de" needs to be defined in // bitcoin.qrc) if (translatorBase.load(lang, ":/translations/")) QApplication::installTranslator(&translatorBase); // Load e.g. bitcoin_de_DE.qm (shortcut "de_DE" needs to be defined in // bitcoin.qrc) if (translator.load(lang_territory, ":/translations/")) QApplication::installTranslator(&translator); } /* qDebug() message handler --> debug.log */ #if QT_VERSION < 0x050000 void DebugMessageHandler(QtMsgType type, const char *msg) { if (type == QtDebugMsg) { LogPrint(BCLog::QT, "GUI: %s\n", msg); } else { LogPrintf("GUI: %s\n", msg); } } #else void DebugMessageHandler(QtMsgType type, const QMessageLogContext &context, const QString &msg) { Q_UNUSED(context); if (type == QtDebugMsg) { LogPrint(BCLog::QT, "GUI: %s\n", msg.toStdString()); } else { LogPrintf("GUI: %s\n", msg.toStdString()); } } #endif /** * Class encapsulating Bitcoin ABC startup and shutdown. * Allows running startup and shutdown in a different thread from the UI thread. */ class BitcoinABC : public QObject { Q_OBJECT public: explicit BitcoinABC(); public Q_SLOTS: void initialize(Config *config); void shutdown(); Q_SIGNALS: void initializeResult(int retval); void shutdownResult(int retval); void runawayException(const QString &message); private: boost::thread_group threadGroup; CScheduler scheduler; /// Pass fatal exception message to UI thread void handleRunawayException(const std::exception *e); }; /** Main Bitcoin application object */ class BitcoinApplication : public QApplication { Q_OBJECT public: explicit BitcoinApplication(int &argc, char **argv); ~BitcoinApplication(); #ifdef ENABLE_WALLET /// Create payment server void createPaymentServer(); #endif /// parameter interaction/setup based on rules void parameterSetup(); /// Create options model void createOptionsModel(bool resetSettings); /// Create main window void createWindow(const Config *, const NetworkStyle *networkStyle); /// Create splash screen void createSplashScreen(const NetworkStyle *networkStyle); /// Request core initialization void requestInitialize(Config &config); /// Request core shutdown void requestShutdown(Config &config); /// Get process return value int getReturnValue() { return returnValue; } /// Get window identifier of QMainWindow (BitcoinGUI) WId getMainWinId() const; public Q_SLOTS: void initializeResult(int retval); void shutdownResult(int retval); /// Handle runaway exceptions. Shows a message box with the problem and /// quits the program. void handleRunawayException(const QString &message); Q_SIGNALS: void requestedInitialize(Config *config); void requestedShutdown(); void stopThread(); void splashFinished(QWidget *window); private: QThread *coreThread; OptionsModel *optionsModel; ClientModel *clientModel; BitcoinGUI *window; QTimer *pollShutdownTimer; #ifdef ENABLE_WALLET PaymentServer *paymentServer; WalletModel *walletModel; #endif int returnValue; const PlatformStyle *platformStyle; std::unique_ptr shutdownWindow; void startThread(); }; #include "bitcoin.moc" BitcoinABC::BitcoinABC() : QObject() {} void BitcoinABC::handleRunawayException(const std::exception *e) { PrintExceptionContinue(e, "Runaway exception"); Q_EMIT runawayException(QString::fromStdString(GetWarnings("gui"))); } void BitcoinABC::initialize(Config *cfg) { Config &config(*cfg); try { qDebug() << __func__ << ": Running AppInit2 in thread"; if (!AppInitBasicSetup()) { Q_EMIT initializeResult(false); return; } if (!AppInitParameterInteraction(config)) { Q_EMIT initializeResult(false); return; } if (!AppInitSanityChecks()) { Q_EMIT initializeResult(false); return; } int rv = AppInitMain(config, threadGroup, scheduler); Q_EMIT initializeResult(rv); } catch (const std::exception &e) { handleRunawayException(&e); } catch (...) { handleRunawayException(nullptr); } } void BitcoinABC::shutdown() { try { qDebug() << __func__ << ": Running Shutdown in thread"; Interrupt(threadGroup); threadGroup.join_all(); Shutdown(); qDebug() << __func__ << ": Shutdown finished"; Q_EMIT shutdownResult(1); } catch (const std::exception &e) { handleRunawayException(&e); } catch (...) { handleRunawayException(nullptr); } } BitcoinApplication::BitcoinApplication(int &argc, char **argv) : QApplication(argc, argv), coreThread(0), optionsModel(0), clientModel(0), window(0), pollShutdownTimer(0), #ifdef ENABLE_WALLET paymentServer(0), walletModel(0), #endif returnValue(0) { setQuitOnLastWindowClosed(false); // UI per-platform customization. // This must be done inside the BitcoinApplication constructor, or after it, // because PlatformStyle::instantiate requires a QApplication. std::string platformName; - platformName = GetArg("-uiplatform", BitcoinGUI::DEFAULT_UIPLATFORM); + platformName = gArgs.GetArg("-uiplatform", BitcoinGUI::DEFAULT_UIPLATFORM); platformStyle = PlatformStyle::instantiate(QString::fromStdString(platformName)); // Fall back to "other" if specified name not found. if (!platformStyle) platformStyle = PlatformStyle::instantiate("other"); assert(platformStyle); } BitcoinApplication::~BitcoinApplication() { if (coreThread) { qDebug() << __func__ << ": Stopping thread"; Q_EMIT stopThread(); coreThread->wait(); qDebug() << __func__ << ": Stopped thread"; } delete window; window = 0; #ifdef ENABLE_WALLET delete paymentServer; paymentServer = 0; #endif delete optionsModel; optionsModel = 0; delete platformStyle; platformStyle = 0; } #ifdef ENABLE_WALLET void BitcoinApplication::createPaymentServer() { paymentServer = new PaymentServer(this); } #endif void BitcoinApplication::createOptionsModel(bool resetSettings) { optionsModel = new OptionsModel(nullptr, resetSettings); } void BitcoinApplication::createWindow(const Config *config, const NetworkStyle *networkStyle) { window = new BitcoinGUI(config, platformStyle, networkStyle, 0); pollShutdownTimer = new QTimer(window); connect(pollShutdownTimer, SIGNAL(timeout()), window, SLOT(detectShutdown())); pollShutdownTimer->start(200); } void BitcoinApplication::createSplashScreen(const NetworkStyle *networkStyle) { SplashScreen *splash = new SplashScreen(0, networkStyle); // We don't hold a direct pointer to the splash screen after creation, but // the splash screen will take care of deleting itself when slotFinish // happens. splash->show(); connect(this, SIGNAL(splashFinished(QWidget *)), splash, SLOT(slotFinish(QWidget *))); connect(this, SIGNAL(requestedShutdown()), splash, SLOT(close())); } void BitcoinApplication::startThread() { if (coreThread) return; coreThread = new QThread(this); BitcoinABC *executor = new BitcoinABC(); executor->moveToThread(coreThread); /* communication to and from thread */ connect(executor, SIGNAL(initializeResult(int)), this, SLOT(initializeResult(int))); connect(executor, SIGNAL(shutdownResult(int)), this, SLOT(shutdownResult(int))); connect(executor, SIGNAL(runawayException(QString)), this, SLOT(handleRunawayException(QString))); // Note on how Qt works: it tries to directly invoke methods if the signal // is emitted on the same thread that the target object 'lives' on. // But if the target object 'lives' on another thread (executor here does) // the SLOT will be invoked asynchronously at a later time in the thread // of the target object. So.. we pass a pointer around. If you pass // a reference around (even if it's non-const) you'll get Qt generating // code to copy-construct the parameter in question (Q_DECLARE_METATYPE // and qRegisterMetaType generate this code). For the Config class, // which is noncopyable, we can't do this. So.. we have to pass // pointers to Config around. Make sure Config &/Config * isn't a // temporary (eg it lives somewhere aside from the stack) or this will // crash because initialize() gets executed in another thread at some // unspecified time (after) requestedInitialize() is emitted! connect(this, SIGNAL(requestedInitialize(Config *)), executor, SLOT(initialize(Config *))); connect(this, SIGNAL(requestedShutdown()), executor, SLOT(shutdown())); /* make sure executor object is deleted in its own thread */ connect(this, SIGNAL(stopThread()), executor, SLOT(deleteLater())); connect(this, SIGNAL(stopThread()), coreThread, SLOT(quit())); coreThread->start(); } void BitcoinApplication::parameterSetup() { InitLogging(); InitParameterInteraction(); } void BitcoinApplication::requestInitialize(Config &config) { qDebug() << __func__ << ": Requesting initialize"; startThread(); // IMPORTANT: config must NOT be a reference to a temporary because below // signal may be connected to a slot that will be executed as a queued // connection in another thread! Q_EMIT requestedInitialize(&config); } void BitcoinApplication::requestShutdown(Config &config) { // Show a simple window indicating shutdown status. Do this first as some of // the steps may take some time below, for example the RPC console may still // be executing a command. shutdownWindow.reset(ShutdownWindow::showShutdownWindow(window)); qDebug() << __func__ << ": Requesting shutdown"; startThread(); window->hide(); window->setClientModel(0); pollShutdownTimer->stop(); #ifdef ENABLE_WALLET window->removeAllWallets(); delete walletModel; walletModel = 0; #endif delete clientModel; clientModel = 0; StartShutdown(); // Request shutdown from core thread Q_EMIT requestedShutdown(); } void BitcoinApplication::initializeResult(int retval) { qDebug() << __func__ << ": Initialization result: " << retval; // Set exit result: 0 if successful, 1 if failure returnValue = retval ? 0 : 1; if (retval) { // Log this only after AppInit2 finishes, as then logging setup is // guaranteed complete. qWarning() << "Platform customization:" << platformStyle->getName(); #ifdef ENABLE_WALLET PaymentServer::LoadRootCAs(); paymentServer->setOptionsModel(optionsModel); #endif clientModel = new ClientModel(optionsModel); window->setClientModel(clientModel); #ifdef ENABLE_WALLET // TODO: Expose secondary wallets if (!vpwallets.empty()) { walletModel = new WalletModel(platformStyle, vpwallets[0], optionsModel); window->addWallet(BitcoinGUI::DEFAULT_WALLET, walletModel); window->setCurrentWallet(BitcoinGUI::DEFAULT_WALLET); connect(walletModel, SIGNAL(coinsSent(CWallet *, SendCoinsRecipient, QByteArray)), paymentServer, SLOT(fetchPaymentACK(CWallet *, const SendCoinsRecipient &, QByteArray))); } #endif // If -min option passed, start window minimized. - if (GetBoolArg("-min", false)) { + if (gArgs.GetBoolArg("-min", false)) { window->showMinimized(); } else { window->show(); } Q_EMIT splashFinished(window); #ifdef ENABLE_WALLET // Now that initialization/startup is done, process any command-line // bitcoincash: URIs or payment requests: connect(paymentServer, SIGNAL(receivedPaymentRequest(SendCoinsRecipient)), window, SLOT(handlePaymentRequest(SendCoinsRecipient))); connect(window, SIGNAL(receivedURI(QString)), paymentServer, SLOT(handleURIOrFile(QString))); connect(paymentServer, SIGNAL(message(QString, QString, unsigned int)), window, SLOT(message(QString, QString, unsigned int))); QTimer::singleShot(100, paymentServer, SLOT(uiReady())); #endif } else { // Exit main loop. quit(); } } void BitcoinApplication::shutdownResult(int retval) { qDebug() << __func__ << ": Shutdown result: " << retval; // Exit main loop after shutdown finished. quit(); } void BitcoinApplication::handleRunawayException(const QString &message) { QMessageBox::critical( 0, "Runaway exception", BitcoinGUI::tr("A fatal error occurred. Bitcoin can no longer continue " "safely and will quit.") + QString("\n\n") + message); ::exit(EXIT_FAILURE); } WId BitcoinApplication::getMainWinId() const { if (!window) return 0; return window->winId(); } #ifndef BITCOIN_QT_TEST static void MigrateSettings() { assert(!QApplication::applicationName().isEmpty()); static const QString legacyAppName("Bitcoin-Qt"), #ifdef Q_OS_DARWIN // Macs and/or iOS et al use a domain-style name for Settings // files. All other platforms use a simple orgname. This // difference is documented in the QSettings class documentation. legacyOrg("bitcoin.org"); #else legacyOrg("Bitcoin"); #endif QSettings // below picks up settings file location based on orgname,appname legacy(legacyOrg, legacyAppName), // default c'tor below picks up settings file location based on // QApplication::applicationName(), et al -- which was already set // in main() abc; #ifdef Q_OS_DARWIN // Disable bogus OSX keys from MacOS system-wide prefs that may cloud our // judgement ;) (this behavior is also documented in QSettings docs) legacy.setFallbacksEnabled(false); abc.setFallbacksEnabled(false); #endif const QStringList legacyKeys(legacy.allKeys()); // We only migrate settings if we have Core settings but no Bitcoin-ABC // settings if (!legacyKeys.isEmpty() && abc.allKeys().isEmpty()) { for (const QString &key : legacyKeys) { // now, copy settings over abc.setValue(key, legacy.value(key)); } } } int main(int argc, char *argv[]) { SetupEnvironment(); /// 1. Parse command-line options. These take precedence over anything else. // Command-line options take precedence: - ParseParameters(argc, argv); + gArgs.ParseParameters(argc, argv); // Do not refer to data directory yet, this can be overridden by // Intro::pickDataDirectory /// 2. Basic Qt initialization (not dependent on parameters or configuration) #if QT_VERSION < 0x050000 // Internal string conversion is all UTF-8 QTextCodec::setCodecForTr(QTextCodec::codecForName("UTF-8")); QTextCodec::setCodecForCStrings(QTextCodec::codecForTr()); #endif Q_INIT_RESOURCE(bitcoin); Q_INIT_RESOURCE(bitcoin_locale); BitcoinApplication app(argc, argv); #if QT_VERSION > 0x050100 // Generate high-dpi pixmaps QApplication::setAttribute(Qt::AA_UseHighDpiPixmaps); #endif #if QT_VERSION >= 0x050600 QGuiApplication::setAttribute(Qt::AA_EnableHighDpiScaling); #endif #ifdef Q_OS_MAC QApplication::setAttribute(Qt::AA_DontShowIconsInMenus); #endif #if QT_VERSION >= 0x050500 // Because of the POODLE attack it is recommended to disable SSLv3 // (https://disablessl3.com/), so set SSL protocols to TLS1.0+. QSslConfiguration sslconf = QSslConfiguration::defaultConfiguration(); sslconf.setProtocol(QSsl::TlsV1_0OrLater); QSslConfiguration::setDefaultConfiguration(sslconf); #endif // Register meta types used for QMetaObject::invokeMethod qRegisterMetaType(); // Need to pass name here as Amount is a typedef (see // http://qt-project.org/doc/qt-5/qmetatype.html#qRegisterMetaType) // IMPORTANT if it is no longer a typedef use the normal variant above qRegisterMetaType("Amount"); qRegisterMetaType>("std::function"); // Need to register any types Qt doesn't know about if you intend // to use them with the signal/slot mechanism Qt provides. Even pointers. // Note that class Config is noncopyable and so we can't register a // non-pointer version of it with Qt, because Qt expects to be able to // copy-construct non-pointers to objects for invoking slots // behind-the-scenes in the 'Queued' connection case. qRegisterMetaType(); /// 3. Application identification // must be set before OptionsModel is initialized or translations are // loaded, as it is used to locate QSettings. // Note: If you move these calls somewhere else, be sure to bring // MigrateSettings() below along for the ride. QApplication::setOrganizationName(QAPP_ORG_NAME); QApplication::setOrganizationDomain(QAPP_ORG_DOMAIN); QApplication::setApplicationName(QAPP_APP_NAME_DEFAULT); // Migrate settings from core's/our old GUI settings to Bitcoin ABC // only if core's exist but Bitcoin ABC's doesn't. // NOTE -- this function needs to be called *after* the above 3 lines // that set the app orgname and app name! If you move the above 3 lines // to elsewhere, take this call with you! MigrateSettings(); GUIUtil::SubstituteFonts(GetLangTerritory()); /// 4. Initialization of translations, so that intro dialog is in user's /// language. Now that QSettings are accessible, initialize translations. QTranslator qtTranslatorBase, qtTranslator, translatorBase, translator; initTranslations(qtTranslatorBase, qtTranslator, translatorBase, translator); translationInterface.Translate.connect(Translate); // Show help message immediately after parsing command-line options (for // "-lang") and setting locale, but before showing splash screen. - if (IsArgSet("-?") || IsArgSet("-h") || IsArgSet("-help") || - IsArgSet("-version")) { - HelpMessageDialog help(nullptr, IsArgSet("-version")); + if (gArgs.IsArgSet("-?") || gArgs.IsArgSet("-h") || + gArgs.IsArgSet("-help") || gArgs.IsArgSet("-version")) { + HelpMessageDialog help(nullptr, gArgs.IsArgSet("-version")); help.showOrPrint(); return EXIT_SUCCESS; } /// 5. Now that settings and translations are available, ask user for data /// directory. User language is set up: pick a data directory. if (!Intro::pickDataDirectory()) return EXIT_SUCCESS; /// 6. Determine availability of data directory and parse bitcoin.conf /// - Do not call GetDataDir(true) before this step finishes. if (!fs::is_directory(GetDataDir(false))) { QMessageBox::critical( 0, QObject::tr(PACKAGE_NAME), QObject::tr( "Error: Specified data directory \"%1\" does not exist.") - .arg(QString::fromStdString(GetArg("-datadir", "")))); + .arg(QString::fromStdString(gArgs.GetArg("-datadir", "")))); return EXIT_FAILURE; } try { - ReadConfigFile(GetArg("-conf", BITCOIN_CONF_FILENAME)); + gArgs.ReadConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME)); } catch (const std::exception &e) { QMessageBox::critical( 0, QObject::tr(PACKAGE_NAME), QObject::tr("Error: Cannot parse configuration file: %1. Only use " "key=value syntax.") .arg(e.what())); return EXIT_FAILURE; } /// 7. Determine network (and switch to network specific options) // - Do not call Params() before this step. // - Do this after parsing the configuration file, as the network can be // switched there. // - QSettings() will use the new application name after this, resulting in // network-specific settings. // - Needs to be done before createOptionsModel. // Check for -testnet or -regtest parameter (Params() calls are only valid // after this clause) try { SelectParams(ChainNameFromCommandLine()); } catch (std::exception &e) { QMessageBox::critical(0, QObject::tr(PACKAGE_NAME), QObject::tr("Error: %1").arg(e.what())); return EXIT_FAILURE; } #ifdef ENABLE_WALLET // Parse URIs on command line -- this can affect Params() PaymentServer::ipcParseCommandLine(argc, argv); #endif QScopedPointer networkStyle(NetworkStyle::instantiate( QString::fromStdString(Params().NetworkIDString()))); assert(!networkStyle.isNull()); // Allow for separate UI settings for testnets QApplication::setApplicationName(networkStyle->getAppName()); // Re-initialize translations after changing application name (language in // network-specific settings can be different) initTranslations(qtTranslatorBase, qtTranslator, translatorBase, translator); #ifdef ENABLE_WALLET /// 8. URI IPC sending // - Do this early as we don't want to bother initializing if we are just // calling IPC // - Do this *after* setting up the data directory, as the data directory // hash is used in the name // of the server. // - Do this after creating app and setting up translations, so errors are // translated properly. if (PaymentServer::ipcSendCommandLine()) exit(EXIT_SUCCESS); // Start up the payment server early, too, so impatient users that click on // bitcoincash: links repeatedly have their payment requests routed to this // process: app.createPaymentServer(); #endif /// 9. Main GUI initialization // Install global event filter that makes sure that long tooltips can be // word-wrapped. app.installEventFilter( new GUIUtil::ToolTipToRichTextFilter(TOOLTIP_WRAP_THRESHOLD, &app)); #if QT_VERSION < 0x050000 // Install qDebug() message handler to route to debug.log qInstallMsgHandler(DebugMessageHandler); #else #if defined(Q_OS_WIN) // Install global event filter for processing Windows session related // Windows messages (WM_QUERYENDSESSION and WM_ENDSESSION) qApp->installNativeEventFilter(new WinShutdownMonitor()); #endif // Install qDebug() message handler to route to debug.log qInstallMessageHandler(DebugMessageHandler); #endif // Allow parameter interaction before we create the options model app.parameterSetup(); // Load GUI settings from QSettings - app.createOptionsModel(IsArgSet("-resetguisettings")); + app.createOptionsModel(gArgs.IsArgSet("-resetguisettings")); // Subscribe to global signals from core uiInterface.InitMessage.connect(InitMessage); // Get global config Config &config = const_cast(GetConfig()); - if (GetBoolArg("-splash", DEFAULT_SPLASHSCREEN) && - !GetBoolArg("-min", false)) + if (gArgs.GetBoolArg("-splash", DEFAULT_SPLASHSCREEN) && + !gArgs.GetBoolArg("-min", false)) app.createSplashScreen(networkStyle.data()); try { app.createWindow(&config, networkStyle.data()); app.requestInitialize(config); #if defined(Q_OS_WIN) && QT_VERSION >= 0x050000 WinShutdownMonitor::registerShutdownBlockReason( QObject::tr("%1 didn't yet exit safely...") .arg(QObject::tr(PACKAGE_NAME)), (HWND)app.getMainWinId()); #endif app.exec(); app.requestShutdown(config); app.exec(); } catch (const std::exception &e) { PrintExceptionContinue(&e, "Runaway exception"); app.handleRunawayException(QString::fromStdString(GetWarnings("gui"))); } catch (...) { PrintExceptionContinue(nullptr, "Runaway exception"); app.handleRunawayException(QString::fromStdString(GetWarnings("gui"))); } return app.getReturnValue(); } #endif // BITCOIN_QT_TEST diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp index c31ec2d2b6..0a827c058c 100644 --- a/src/qt/guiutil.cpp +++ b/src/qt/guiutil.cpp @@ -1,1040 +1,1040 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "guiutil.h" #include "bitcoinaddressvalidator.h" #include "bitcoinunits.h" #include "dstencode.h" #include "qvalidatedlineedit.h" #include "walletmodel.h" #include "cashaddr.h" #include "config.h" #include "dstencode.h" #include "fs.h" #include "init.h" #include "policy/policy.h" #include "primitives/transaction.h" #include "protocol.h" #include "script/script.h" #include "script/standard.h" #include "util.h" #include "utilstrencodings.h" #ifdef WIN32 #ifdef _WIN32_WINNT #undef _WIN32_WINNT #endif #define _WIN32_WINNT 0x0501 #ifdef _WIN32_IE #undef _WIN32_IE #endif #define _WIN32_IE 0x0501 #define WIN32_LEAN_AND_MEAN 1 #ifndef NOMINMAX #define NOMINMAX #endif #include "shellapi.h" #include "shlobj.h" #include "shlwapi.h" #endif #include #if BOOST_FILESYSTEM_VERSION >= 3 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include // for Qt::mightBeRichText #include #if QT_VERSION < 0x050000 #include #else #include #endif #if QT_VERSION >= 0x50200 #include #endif #if BOOST_FILESYSTEM_VERSION >= 3 static fs::detail::utf8_codecvt_facet utf8; #endif #if defined(Q_OS_MAC) // These Mac includes must be done in the global namespace #include #include extern double NSAppKitVersionNumber; #if !defined(NSAppKitVersionNumber10_8) #define NSAppKitVersionNumber10_8 1187 #endif #if !defined(NSAppKitVersionNumber10_9) #define NSAppKitVersionNumber10_9 1265 #endif #endif namespace GUIUtil { QString dateTimeStr(const QDateTime &date) { return date.date().toString(Qt::SystemLocaleShortDate) + QString(" ") + date.toString("hh:mm"); } QString dateTimeStr(qint64 nTime) { return dateTimeStr(QDateTime::fromTime_t((qint32)nTime)); } QFont fixedPitchFont() { #if QT_VERSION >= 0x50200 return QFontDatabase::systemFont(QFontDatabase::FixedFont); #else QFont font("Monospace"); #if QT_VERSION >= 0x040800 font.setStyleHint(QFont::Monospace); #else font.setStyleHint(QFont::TypeWriter); #endif return font; #endif } static std::string MakeAddrInvalid(std::string addr) { if (addr.size() < 2) { return ""; } // Checksum is at the end of the address. Swapping chars to make it invalid. std::swap(addr[addr.size() - 1], addr[addr.size() - 2]); if (!IsValidDestinationString(addr)) { return addr; } return ""; } std::string DummyAddress(const Config &config) { // Just some dummy data to generate an convincing random-looking (but // consistent) address static const std::vector dummydata = { 0xeb, 0x15, 0x23, 0x1d, 0xfc, 0xeb, 0x60, 0x92, 0x58, 0x86, 0xb6, 0x7d, 0x06, 0x52, 0x99, 0x92, 0x59, 0x15, 0xae, 0xb1}; const CTxDestination dstKey = CKeyID(uint160(dummydata)); return MakeAddrInvalid(EncodeDestination(dstKey, config)); } // Addresses are stored in the database with the encoding that the client was // configured with at the time of creation. // // This converts to clients current configuration. QString convertToConfiguredAddressFormat(const Config &config, const QString &addr) { if (!IsValidDestinationString(addr.toStdString(), config.GetChainParams())) { // We have something sketchy as input. Do not try to convert. return addr; } CTxDestination dst = DecodeDestination(addr.toStdString(), config.GetChainParams()); return QString::fromStdString(EncodeDestination(dst, config)); } void setupAddressWidget(QValidatedLineEdit *widget, QWidget *parent) { parent->setFocusProxy(widget); widget->setFont(fixedPitchFont()); #if QT_VERSION >= 0x040700 // We don't want translators to use own addresses in translations // and this is the only place, where this address is supplied. widget->setPlaceholderText( QObject::tr("Enter a Bitcoin address (e.g. %1)") .arg(QString::fromStdString(DummyAddress(GetConfig())))); #endif widget->setValidator( new BitcoinAddressEntryValidator(Params().CashAddrPrefix(), parent)); widget->setCheckValidator(new BitcoinAddressCheckValidator(parent)); } void setupAmountWidget(QLineEdit *widget, QWidget *parent) { QDoubleValidator *amountValidator = new QDoubleValidator(parent); amountValidator->setDecimals(8); amountValidator->setBottom(0.0); widget->setValidator(amountValidator); widget->setAlignment(Qt::AlignRight | Qt::AlignVCenter); } QString bitcoinURIScheme(const CChainParams ¶ms, bool useCashAddr) { if (!useCashAddr) { return "bitcoincash"; } return QString::fromStdString(params.CashAddrPrefix()); } QString bitcoinURIScheme(const Config &config) { return bitcoinURIScheme(config.GetChainParams(), config.UseCashAddrEncoding()); } static bool IsCashAddrEncoded(const QUrl &uri) { const std::string addr = (uri.scheme() + ":" + uri.path()).toStdString(); auto decoded = cashaddr::Decode(addr, ""); return !decoded.first.empty(); } bool parseBitcoinURI(const QString &scheme, const QUrl &uri, SendCoinsRecipient *out) { // return if URI has wrong scheme. if (!uri.isValid() || uri.scheme() != scheme) { return false; } SendCoinsRecipient rv; if (IsCashAddrEncoded(uri)) { rv.address = uri.scheme() + ":" + uri.path(); } else { // strip out uri scheme for base58 encoded addresses rv.address = uri.path(); } // Trim any following forward slash which may have been added by the OS if (rv.address.endsWith("/")) { rv.address.truncate(rv.address.length() - 1); } rv.amount = Amount(0); #if QT_VERSION < 0x050000 QList> items = uri.queryItems(); #else QUrlQuery uriQuery(uri); QList> items = uriQuery.queryItems(); #endif for (QList>::iterator i = items.begin(); i != items.end(); i++) { bool fShouldReturnFalse = false; if (i->first.startsWith("req-")) { i->first.remove(0, 4); fShouldReturnFalse = true; } if (i->first == "label") { rv.label = i->second; fShouldReturnFalse = false; } if (i->first == "message") { rv.message = i->second; fShouldReturnFalse = false; } else if (i->first == "amount") { if (!i->second.isEmpty()) { if (!BitcoinUnits::parse(BitcoinUnits::BCH, i->second, &rv.amount)) { return false; } } fShouldReturnFalse = false; } if (fShouldReturnFalse) { return false; } } if (out) { *out = rv; } return true; } bool parseBitcoinURI(const QString &scheme, QString uri, SendCoinsRecipient *out) { // // Cannot handle this later, because bitcoincash:// // will cause Qt to see the part after // as host, // which will lower-case it (and thus invalidate the address). if (uri.startsWith(scheme + "://", Qt::CaseInsensitive)) { uri.replace(0, scheme.length() + 3, scheme + ":"); } QUrl uriInstance(uri); return parseBitcoinURI(scheme, uriInstance, out); } QString formatBitcoinURI(const Config &config, const SendCoinsRecipient &info) { QString ret = info.address; if (!config.UseCashAddrEncoding()) { // prefix address with uri scheme for base58 encoded addresses. ret = (bitcoinURIScheme(config) + ":%1").arg(ret); } int paramCount = 0; if (info.amount != Amount(0)) { ret += QString("?amount=%1") .arg(BitcoinUnits::format(BitcoinUnits::BCH, info.amount, false, BitcoinUnits::separatorNever)); paramCount++; } if (!info.label.isEmpty()) { QString lbl(QUrl::toPercentEncoding(info.label)); ret += QString("%1label=%2").arg(paramCount == 0 ? "?" : "&").arg(lbl); paramCount++; } if (!info.message.isEmpty()) { QString msg(QUrl::toPercentEncoding(info.message)); ret += QString("%1message=%2").arg(paramCount == 0 ? "?" : "&").arg(msg); paramCount++; } return ret; } bool isDust(const QString &address, const Amount amount) { CTxDestination dest = DecodeDestination(address.toStdString()); CScript script = GetScriptForDestination(dest); CTxOut txOut(amount, script); return txOut.IsDust(dustRelayFee); } QString HtmlEscape(const QString &str, bool fMultiLine) { #if QT_VERSION < 0x050000 QString escaped = Qt::escape(str); #else QString escaped = str.toHtmlEscaped(); #endif if (fMultiLine) { escaped = escaped.replace("\n", "
\n"); } return escaped; } QString HtmlEscape(const std::string &str, bool fMultiLine) { return HtmlEscape(QString::fromStdString(str), fMultiLine); } void copyEntryData(QAbstractItemView *view, int column, int role) { if (!view || !view->selectionModel()) return; QModelIndexList selection = view->selectionModel()->selectedRows(column); if (!selection.isEmpty()) { // Copy first item setClipboard(selection.at(0).data(role).toString()); } } QList getEntryData(QAbstractItemView *view, int column) { if (!view || !view->selectionModel()) return QList(); return view->selectionModel()->selectedRows(column); } QString getSaveFileName(QWidget *parent, const QString &caption, const QString &dir, const QString &filter, QString *selectedSuffixOut) { QString selectedFilter; QString myDir; // Default to user documents location if (dir.isEmpty()) { #if QT_VERSION < 0x050000 myDir = QDesktopServices::storageLocation( QDesktopServices::DocumentsLocation); #else myDir = QStandardPaths::writableLocation(QStandardPaths::DocumentsLocation); #endif } else { myDir = dir; } /* Directly convert path to native OS path separators */ QString result = QDir::toNativeSeparators(QFileDialog::getSaveFileName( parent, caption, myDir, filter, &selectedFilter)); /* Extract first suffix from filter pattern "Description (*.foo)" or * "Description (*.foo *.bar ...) */ QRegExp filter_re(".* \\(\\*\\.(.*)[ \\)]"); QString selectedSuffix; if (filter_re.exactMatch(selectedFilter)) { selectedSuffix = filter_re.cap(1); } /* Add suffix if needed */ QFileInfo info(result); if (!result.isEmpty()) { if (info.suffix().isEmpty() && !selectedSuffix.isEmpty()) { /* No suffix specified, add selected suffix */ if (!result.endsWith(".")) result.append("."); result.append(selectedSuffix); } } /* Return selected suffix if asked to */ if (selectedSuffixOut) { *selectedSuffixOut = selectedSuffix; } return result; } QString getOpenFileName(QWidget *parent, const QString &caption, const QString &dir, const QString &filter, QString *selectedSuffixOut) { QString selectedFilter; QString myDir; if (dir.isEmpty()) // Default to user documents location { #if QT_VERSION < 0x050000 myDir = QDesktopServices::storageLocation( QDesktopServices::DocumentsLocation); #else myDir = QStandardPaths::writableLocation(QStandardPaths::DocumentsLocation); #endif } else { myDir = dir; } /* Directly convert path to native OS path separators */ QString result = QDir::toNativeSeparators(QFileDialog::getOpenFileName( parent, caption, myDir, filter, &selectedFilter)); if (selectedSuffixOut) { /* Extract first suffix from filter pattern "Description (*.foo)" or * "Description (*.foo *.bar ...) */ QRegExp filter_re(".* \\(\\*\\.(.*)[ \\)]"); QString selectedSuffix; if (filter_re.exactMatch(selectedFilter)) { selectedSuffix = filter_re.cap(1); } *selectedSuffixOut = selectedSuffix; } return result; } Qt::ConnectionType blockingGUIThreadConnection() { if (QThread::currentThread() != qApp->thread()) { return Qt::BlockingQueuedConnection; } else { return Qt::DirectConnection; } } bool checkPoint(const QPoint &p, const QWidget *w) { QWidget *atW = QApplication::widgetAt(w->mapToGlobal(p)); if (!atW) return false; return atW->topLevelWidget() == w; } bool isObscured(QWidget *w) { return !(checkPoint(QPoint(0, 0), w) && checkPoint(QPoint(w->width() - 1, 0), w) && checkPoint(QPoint(0, w->height() - 1), w) && checkPoint(QPoint(w->width() - 1, w->height() - 1), w) && checkPoint(QPoint(w->width() / 2, w->height() / 2), w)); } void openDebugLogfile() { fs::path pathDebug = GetDataDir() / "debug.log"; /* Open debug.log with the associated application */ if (fs::exists(pathDebug)) QDesktopServices::openUrl( QUrl::fromLocalFile(boostPathToQString(pathDebug))); } void SubstituteFonts(const QString &language) { #if defined(Q_OS_MAC) // Background: // OSX's default font changed in 10.9 and Qt is unable to find it with its // usual fallback methods when building against the 10.7 sdk or lower. // The 10.8 SDK added a function to let it find the correct fallback font. // If this fallback is not properly loaded, some characters may fail to // render correctly. // // The same thing happened with 10.10. .Helvetica Neue DeskInterface is now // default. // // Solution: If building with the 10.7 SDK or lower and the user's platform // is 10.9 or higher at runtime, substitute the correct font. This needs to // happen before the QApplication is created. #if defined(MAC_OS_X_VERSION_MAX_ALLOWED) && \ MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_8 if (floor(NSAppKitVersionNumber) > NSAppKitVersionNumber10_8) { if (floor(NSAppKitVersionNumber) <= NSAppKitVersionNumber10_9) /* On a 10.9 - 10.9.x system */ QFont::insertSubstitution(".Lucida Grande UI", "Lucida Grande"); else { /* 10.10 or later system */ if (language == "zh_CN" || language == "zh_TW" || language == "zh_HK") // traditional or simplified Chinese QFont::insertSubstitution(".Helvetica Neue DeskInterface", "Heiti SC"); else if (language == "ja") // Japanesee QFont::insertSubstitution(".Helvetica Neue DeskInterface", "Songti SC"); else QFont::insertSubstitution(".Helvetica Neue DeskInterface", "Lucida Grande"); } } #endif #endif } ToolTipToRichTextFilter::ToolTipToRichTextFilter(int _size_threshold, QObject *parent) : QObject(parent), size_threshold(_size_threshold) {} bool ToolTipToRichTextFilter::eventFilter(QObject *obj, QEvent *evt) { if (evt->type() == QEvent::ToolTipChange) { QWidget *widget = static_cast(obj); QString tooltip = widget->toolTip(); if (tooltip.size() > size_threshold && !tooltip.startsWith(" to make sure Qt detects this as rich text // Escape the current message as HTML and replace \n by
tooltip = "" + HtmlEscape(tooltip, true) + ""; widget->setToolTip(tooltip); return true; } } return QObject::eventFilter(obj, evt); } void TableViewLastColumnResizingFixer::connectViewHeadersSignals() { connect(tableView->horizontalHeader(), SIGNAL(sectionResized(int, int, int)), this, SLOT(on_sectionResized(int, int, int))); connect(tableView->horizontalHeader(), SIGNAL(geometriesChanged()), this, SLOT(on_geometriesChanged())); } // We need to disconnect these while handling the resize events, otherwise we // can enter infinite loops. void TableViewLastColumnResizingFixer::disconnectViewHeadersSignals() { disconnect(tableView->horizontalHeader(), SIGNAL(sectionResized(int, int, int)), this, SLOT(on_sectionResized(int, int, int))); disconnect(tableView->horizontalHeader(), SIGNAL(geometriesChanged()), this, SLOT(on_geometriesChanged())); } // Setup the resize mode, handles compatibility for Qt5 and below as the method // signatures changed. // Refactored here for readability. void TableViewLastColumnResizingFixer::setViewHeaderResizeMode( int logicalIndex, QHeaderView::ResizeMode resizeMode) { #if QT_VERSION < 0x050000 tableView->horizontalHeader()->setResizeMode(logicalIndex, resizeMode); #else tableView->horizontalHeader()->setSectionResizeMode(logicalIndex, resizeMode); #endif } void TableViewLastColumnResizingFixer::resizeColumn(int nColumnIndex, int width) { tableView->setColumnWidth(nColumnIndex, width); tableView->horizontalHeader()->resizeSection(nColumnIndex, width); } int TableViewLastColumnResizingFixer::getColumnsWidth() { int nColumnsWidthSum = 0; for (int i = 0; i < columnCount; i++) { nColumnsWidthSum += tableView->horizontalHeader()->sectionSize(i); } return nColumnsWidthSum; } int TableViewLastColumnResizingFixer::getAvailableWidthForColumn(int column) { int nResult = lastColumnMinimumWidth; int nTableWidth = tableView->horizontalHeader()->width(); if (nTableWidth > 0) { int nOtherColsWidth = getColumnsWidth() - tableView->horizontalHeader()->sectionSize(column); nResult = std::max(nResult, nTableWidth - nOtherColsWidth); } return nResult; } // Make sure we don't make the columns wider than the table's viewport width. void TableViewLastColumnResizingFixer::adjustTableColumnsWidth() { disconnectViewHeadersSignals(); resizeColumn(lastColumnIndex, getAvailableWidthForColumn(lastColumnIndex)); connectViewHeadersSignals(); int nTableWidth = tableView->horizontalHeader()->width(); int nColsWidth = getColumnsWidth(); if (nColsWidth > nTableWidth) { resizeColumn(secondToLastColumnIndex, getAvailableWidthForColumn(secondToLastColumnIndex)); } } // Make column use all the space available, useful during window resizing. void TableViewLastColumnResizingFixer::stretchColumnWidth(int column) { disconnectViewHeadersSignals(); resizeColumn(column, getAvailableWidthForColumn(column)); connectViewHeadersSignals(); } // When a section is resized this is a slot-proxy for ajustAmountColumnWidth(). void TableViewLastColumnResizingFixer::on_sectionResized(int logicalIndex, int oldSize, int newSize) { adjustTableColumnsWidth(); int remainingWidth = getAvailableWidthForColumn(logicalIndex); if (newSize > remainingWidth) { resizeColumn(logicalIndex, remainingWidth); } } // When the table's geometry is ready, we manually perform the stretch of the // "Message" column, // as the "Stretch" resize mode does not allow for interactive resizing. void TableViewLastColumnResizingFixer::on_geometriesChanged() { if ((getColumnsWidth() - this->tableView->horizontalHeader()->width()) != 0) { disconnectViewHeadersSignals(); resizeColumn(secondToLastColumnIndex, getAvailableWidthForColumn(secondToLastColumnIndex)); connectViewHeadersSignals(); } } /** * Initializes all internal variables and prepares the * the resize modes of the last 2 columns of the table and */ TableViewLastColumnResizingFixer::TableViewLastColumnResizingFixer( QTableView *table, int lastColMinimumWidth, int allColsMinimumWidth, QObject *parent) : QObject(parent), tableView(table), lastColumnMinimumWidth(lastColMinimumWidth), allColumnsMinimumWidth(allColsMinimumWidth) { columnCount = tableView->horizontalHeader()->count(); lastColumnIndex = columnCount - 1; secondToLastColumnIndex = columnCount - 2; tableView->horizontalHeader()->setMinimumSectionSize( allColumnsMinimumWidth); setViewHeaderResizeMode(secondToLastColumnIndex, QHeaderView::Interactive); setViewHeaderResizeMode(lastColumnIndex, QHeaderView::Interactive); } #ifdef WIN32 static fs::path StartupShortcutPath() { std::string chain = ChainNameFromCommandLine(); if (chain == CBaseChainParams::MAIN) return GetSpecialFolderPath(CSIDL_STARTUP) / "Bitcoin.lnk"; // Remove this special case when CBaseChainParams::TESTNET = "testnet4" if (chain == CBaseChainParams::TESTNET) return GetSpecialFolderPath(CSIDL_STARTUP) / "Bitcoin (testnet).lnk"; return GetSpecialFolderPath(CSIDL_STARTUP) / strprintf("Bitcoin (%s).lnk", chain); } bool GetStartOnSystemStartup() { // check for Bitcoin*.lnk return fs::exists(StartupShortcutPath()); } bool SetStartOnSystemStartup(bool fAutoStart) { // If the shortcut exists already, remove it for updating fs::remove(StartupShortcutPath()); if (fAutoStart) { CoInitialize(nullptr); // Get a pointer to the IShellLink interface. IShellLink *psl = nullptr; HRESULT hres = CoCreateInstance(CLSID_ShellLink, nullptr, CLSCTX_INPROC_SERVER, IID_IShellLink, reinterpret_cast(&psl)); if (SUCCEEDED(hres)) { // Get the current executable path TCHAR pszExePath[MAX_PATH]; GetModuleFileName(nullptr, pszExePath, sizeof(pszExePath)); // Start client minimized QString strArgs = "-min"; // Set -testnet /-regtest options strArgs += QString::fromStdString(strprintf( - " -testnet=%d -regtest=%d", GetBoolArg("-testnet", false), - GetBoolArg("-regtest", false))); + " -testnet=%d -regtest=%d", gArgs.GetBoolArg("-testnet", false), + gArgs.GetBoolArg("-regtest", false))); #ifdef UNICODE boost::scoped_array args(new TCHAR[strArgs.length() + 1]); // Convert the QString to TCHAR* strArgs.toWCharArray(args.get()); // Add missing '\0'-termination to string args[strArgs.length()] = '\0'; #endif // Set the path to the shortcut target psl->SetPath(pszExePath); PathRemoveFileSpec(pszExePath); psl->SetWorkingDirectory(pszExePath); psl->SetShowCmd(SW_SHOWMINNOACTIVE); #ifndef UNICODE psl->SetArguments(strArgs.toStdString().c_str()); #else psl->SetArguments(args.get()); #endif // Query IShellLink for the IPersistFile interface for // saving the shortcut in persistent storage. IPersistFile *ppf = nullptr; hres = psl->QueryInterface(IID_IPersistFile, reinterpret_cast(&ppf)); if (SUCCEEDED(hres)) { WCHAR pwsz[MAX_PATH]; // Ensure that the string is ANSI. MultiByteToWideChar(CP_ACP, 0, StartupShortcutPath().string().c_str(), -1, pwsz, MAX_PATH); // Save the link by calling IPersistFile::Save. hres = ppf->Save(pwsz, TRUE); ppf->Release(); psl->Release(); CoUninitialize(); return true; } psl->Release(); } CoUninitialize(); return false; } return true; } #elif defined(Q_OS_LINUX) // Follow the Desktop Application Autostart Spec: // http://standards.freedesktop.org/autostart-spec/autostart-spec-latest.html static fs::path GetAutostartDir() { char *pszConfigHome = getenv("XDG_CONFIG_HOME"); if (pszConfigHome) return fs::path(pszConfigHome) / "autostart"; char *pszHome = getenv("HOME"); if (pszHome) return fs::path(pszHome) / ".config" / "autostart"; return fs::path(); } static fs::path GetAutostartFilePath() { std::string chain = ChainNameFromCommandLine(); if (chain == CBaseChainParams::MAIN) return GetAutostartDir() / "bitcoin.desktop"; return GetAutostartDir() / strprintf("bitcoin-%s.lnk", chain); } bool GetStartOnSystemStartup() { fs::ifstream optionFile(GetAutostartFilePath()); if (!optionFile.good()) return false; // Scan through file for "Hidden=true": std::string line; while (!optionFile.eof()) { getline(optionFile, line); if (line.find("Hidden") != std::string::npos && line.find("true") != std::string::npos) return false; } optionFile.close(); return true; } bool SetStartOnSystemStartup(bool fAutoStart) { if (!fAutoStart) fs::remove(GetAutostartFilePath()); else { char pszExePath[MAX_PATH + 1]; memset(pszExePath, 0, sizeof(pszExePath)); if (readlink("/proc/self/exe", pszExePath, sizeof(pszExePath) - 1) == -1) return false; fs::create_directories(GetAutostartDir()); fs::ofstream optionFile(GetAutostartFilePath(), std::ios_base::out | std::ios_base::trunc); if (!optionFile.good()) return false; std::string chain = ChainNameFromCommandLine(); // Write a bitcoin.desktop file to the autostart directory: optionFile << "[Desktop Entry]\n"; optionFile << "Type=Application\n"; if (chain == CBaseChainParams::MAIN) optionFile << "Name=Bitcoin\n"; else optionFile << strprintf("Name=Bitcoin (%s)\n", chain); optionFile << "Exec=" << pszExePath << strprintf(" -min -testnet=%d -regtest=%d\n", - GetBoolArg("-testnet", false), - GetBoolArg("-regtest", false)); + gArgs.GetBoolArg("-testnet", false), + gArgs.GetBoolArg("-regtest", false)); optionFile << "Terminal=false\n"; optionFile << "Hidden=false\n"; optionFile.close(); } return true; } #elif defined(Q_OS_MAC) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wdeprecated-declarations" // based on: // https://github.com/Mozketo/LaunchAtLoginController/blob/master/LaunchAtLoginController.m // NB: caller must release returned ref if it's not NULL LSSharedFileListItemRef findStartupItemInList(LSSharedFileListRef list, CFURLRef findUrl); LSSharedFileListItemRef findStartupItemInList(LSSharedFileListRef list, CFURLRef findUrl) { LSSharedFileListItemRef foundItem = nullptr; // loop through the list of startup items and try to find the bitcoin app CFArrayRef listSnapshot = LSSharedFileListCopySnapshot(list, nullptr); for (int i = 0; !foundItem && i < CFArrayGetCount(listSnapshot); ++i) { LSSharedFileListItemRef item = (LSSharedFileListItemRef)CFArrayGetValueAtIndex(listSnapshot, i); UInt32 resolutionFlags = kLSSharedFileListNoUserInteraction | kLSSharedFileListDoNotMountVolumes; CFURLRef currentItemURL = nullptr; #if defined(MAC_OS_X_VERSION_MAX_ALLOWED) && \ MAC_OS_X_VERSION_MAX_ALLOWED >= 10100 if (&LSSharedFileListItemCopyResolvedURL) currentItemURL = LSSharedFileListItemCopyResolvedURL( item, resolutionFlags, nullptr); #if defined(MAC_OS_X_VERSION_MIN_REQUIRED) && \ MAC_OS_X_VERSION_MIN_REQUIRED < 10100 else LSSharedFileListItemResolve(item, resolutionFlags, ¤tItemURL, nullptr); #endif #else LSSharedFileListItemResolve(item, resolutionFlags, ¤tItemURL, nullptr); #endif if (currentItemURL && CFEqual(currentItemURL, findUrl)) { // found CFRetain(foundItem = item); } if (currentItemURL) { CFRelease(currentItemURL); } } CFRelease(listSnapshot); return foundItem; } bool GetStartOnSystemStartup() { CFURLRef bitcoinAppUrl = CFBundleCopyBundleURL(CFBundleGetMainBundle()); LSSharedFileListRef loginItems = LSSharedFileListCreate( nullptr, kLSSharedFileListSessionLoginItems, nullptr); LSSharedFileListItemRef foundItem = findStartupItemInList(loginItems, bitcoinAppUrl); // findStartupItemInList retains the item it returned, need to release if (foundItem) { CFRelease(foundItem); } CFRelease(loginItems); CFRelease(bitcoinAppUrl); return foundItem; } bool SetStartOnSystemStartup(bool fAutoStart) { CFURLRef bitcoinAppUrl = CFBundleCopyBundleURL(CFBundleGetMainBundle()); LSSharedFileListRef loginItems = LSSharedFileListCreate( nullptr, kLSSharedFileListSessionLoginItems, nullptr); LSSharedFileListItemRef foundItem = findStartupItemInList(loginItems, bitcoinAppUrl); if (fAutoStart && !foundItem) { // add bitcoin app to startup item list LSSharedFileListInsertItemURL(loginItems, kLSSharedFileListItemBeforeFirst, nullptr, nullptr, bitcoinAppUrl, nullptr, nullptr); } else if (!fAutoStart && foundItem) { // remove item LSSharedFileListItemRemove(loginItems, foundItem); } // findStartupItemInList retains the item it returned, need to release if (foundItem) { CFRelease(foundItem); } CFRelease(loginItems); CFRelease(bitcoinAppUrl); return true; } #pragma GCC diagnostic pop #else bool GetStartOnSystemStartup() { return false; } bool SetStartOnSystemStartup(bool fAutoStart) { return false; } #endif void saveWindowGeometry(const QString &strSetting, QWidget *parent) { QSettings settings; settings.setValue(strSetting + "Pos", parent->pos()); settings.setValue(strSetting + "Size", parent->size()); } void restoreWindowGeometry(const QString &strSetting, const QSize &defaultSize, QWidget *parent) { QSettings settings; QPoint pos = settings.value(strSetting + "Pos").toPoint(); QSize size = settings.value(strSetting + "Size", defaultSize).toSize(); if (!pos.x() && !pos.y()) { QRect screen = QApplication::desktop()->screenGeometry(); pos.setX((screen.width() - size.width()) / 2); pos.setY((screen.height() - size.height()) / 2); } parent->resize(size); parent->move(pos); } void setClipboard(const QString &str) { QApplication::clipboard()->setText(str, QClipboard::Clipboard); QApplication::clipboard()->setText(str, QClipboard::Selection); } #if BOOST_FILESYSTEM_VERSION >= 3 fs::path qstringToBoostPath(const QString &path) { return fs::path(path.toStdString(), utf8); } QString boostPathToQString(const fs::path &path) { return QString::fromStdString(path.string(utf8)); } #else #warning Conversion between boost path and QString can use invalid character encoding with boost_filesystem v2 and older fs::path qstringToBoostPath(const QString &path) { return fs::path(path.toStdString()); } QString boostPathToQString(const fs::path &path) { return QString::fromStdString(path.string()); } #endif QString formatDurationStr(int secs) { QStringList strList; int days = secs / 86400; int hours = (secs % 86400) / 3600; int mins = (secs % 3600) / 60; int seconds = secs % 60; if (days) strList.append(QString(QObject::tr("%1 d")).arg(days)); if (hours) strList.append(QString(QObject::tr("%1 h")).arg(hours)); if (mins) strList.append(QString(QObject::tr("%1 m")).arg(mins)); if (seconds || (!days && !hours && !mins)) strList.append(QString(QObject::tr("%1 s")).arg(seconds)); return strList.join(" "); } QString formatServicesStr(quint64 mask) { QStringList strList; // Just scan the last 8 bits for now. for (int i = 0; i < 8; i++) { uint64_t check = 1 << i; if (mask & check) { switch (check) { case NODE_NETWORK: strList.append("NETWORK"); break; case NODE_GETUTXO: strList.append("GETUTXO"); break; case NODE_BLOOM: strList.append("BLOOM"); break; case NODE_XTHIN: strList.append("XTHIN"); break; case NODE_BITCOIN_CASH: strList.append("CASH"); break; default: strList.append(QString("%1[%2]").arg("UNKNOWN").arg(check)); } } } if (strList.size()) return strList.join(" & "); else return QObject::tr("None"); } QString formatPingTime(double dPingTime) { return (dPingTime == std::numeric_limits::max() / 1e6 || dPingTime == 0) ? QObject::tr("N/A") : QString(QObject::tr("%1 ms")) .arg(QString::number((int)(dPingTime * 1000), 10)); } QString formatTimeOffset(int64_t nTimeOffset) { return QString(QObject::tr("%1 s")) .arg(QString::number((int)nTimeOffset, 10)); } QString formatNiceTimeOffset(qint64 secs) { // Represent time from last generated block in human readable text QString timeBehindText; const int HOUR_IN_SECONDS = 60 * 60; const int DAY_IN_SECONDS = 24 * 60 * 60; const int WEEK_IN_SECONDS = 7 * 24 * 60 * 60; // Average length of year in Gregorian calendar const int YEAR_IN_SECONDS = 31556952; if (secs < 60) { timeBehindText = QObject::tr("%n second(s)", "", secs); } else if (secs < 2 * HOUR_IN_SECONDS) { timeBehindText = QObject::tr("%n minute(s)", "", secs / 60); } else if (secs < 2 * DAY_IN_SECONDS) { timeBehindText = QObject::tr("%n hour(s)", "", secs / HOUR_IN_SECONDS); } else if (secs < 2 * WEEK_IN_SECONDS) { timeBehindText = QObject::tr("%n day(s)", "", secs / DAY_IN_SECONDS); } else if (secs < YEAR_IN_SECONDS) { timeBehindText = QObject::tr("%n week(s)", "", secs / WEEK_IN_SECONDS); } else { qint64 years = secs / YEAR_IN_SECONDS; qint64 remainder = secs % YEAR_IN_SECONDS; timeBehindText = QObject::tr("%1 and %2") .arg(QObject::tr("%n year(s)", "", years)) .arg(QObject::tr("%n week(s)", "", remainder / WEEK_IN_SECONDS)); } return timeBehindText; } void ClickableLabel::mouseReleaseEvent(QMouseEvent *event) { Q_EMIT clicked(event->pos()); } void ClickableProgressBar::mouseReleaseEvent(QMouseEvent *event) { Q_EMIT clicked(event->pos()); } } // namespace GUIUtil diff --git a/src/qt/intro.cpp b/src/qt/intro.cpp index a1a1db2510..6225042a44 100644 --- a/src/qt/intro.cpp +++ b/src/qt/intro.cpp @@ -1,295 +1,296 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "fs.h" #include "guiutil.h" #include "intro.h" #include "ui_intro.h" #include "util.h" #include #include #include #include static const uint64_t GB_BYTES = 1000000000LL; /* Minimum free space (in GB) needed for data directory */ static const uint64_t BLOCK_CHAIN_SIZE = 150; /* Minimum free space (in GB) needed for data directory when pruned; Does not * include prune target */ static const uint64_t CHAIN_STATE_SIZE = 3; /* Total required space (in GB) depending on user choice (prune, not prune) */ static uint64_t requiredSpace; /* Check free space asynchronously to prevent hanging the UI thread. Up to one request to check a path is in flight to this thread; when the check() function runs, the current path is requested from the associated Intro object. The reply is sent back through a signal. This ensures that no queue of checking requests is built up while the user is still entering the path, and that always the most recently entered path is checked as soon as the thread becomes available. */ class FreespaceChecker : public QObject { Q_OBJECT public: FreespaceChecker(Intro *intro); enum Status { ST_OK, ST_ERROR }; public Q_SLOTS: void check(); Q_SIGNALS: void reply(int status, const QString &message, quint64 available); private: Intro *intro; }; #include "intro.moc" FreespaceChecker::FreespaceChecker(Intro *_intro) { this->intro = _intro; } void FreespaceChecker::check() { QString dataDirStr = intro->getPathToCheck(); fs::path dataDir = GUIUtil::qstringToBoostPath(dataDirStr); uint64_t freeBytesAvailable = 0; int replyStatus = ST_OK; QString replyMessage = tr("A new data directory will be created."); /* Find first parent that exists, so that fs::space does not fail */ fs::path parentDir = dataDir; fs::path parentDirOld = fs::path(); while (parentDir.has_parent_path() && !fs::exists(parentDir)) { parentDir = parentDir.parent_path(); /* Check if we make any progress, break if not to prevent an infinite * loop here */ if (parentDirOld == parentDir) break; parentDirOld = parentDir; } try { freeBytesAvailable = fs::space(parentDir).available; if (fs::exists(dataDir)) { if (fs::is_directory(dataDir)) { QString separator = "" + QDir::toNativeSeparators("/") + tr("name") + ""; replyStatus = ST_OK; replyMessage = tr("Directory already exists. Add %1 if you " "intend to create a new directory here.") .arg(separator); } else { replyStatus = ST_ERROR; replyMessage = tr("Path already exists, and is not a directory."); } } } catch (const fs::filesystem_error &) { /* Parent directory does not exist or is not accessible */ replyStatus = ST_ERROR; replyMessage = tr("Cannot create data directory here."); } Q_EMIT reply(replyStatus, replyMessage, freeBytesAvailable); } Intro::Intro(QWidget *parent) : QDialog(parent), ui(new Ui::Intro), thread(0), signalled(false) { ui->setupUi(this); ui->welcomeLabel->setText(ui->welcomeLabel->text().arg(tr(PACKAGE_NAME))); ui->storageLabel->setText(ui->storageLabel->text().arg(tr(PACKAGE_NAME))); - uint64_t pruneTarget = std::max(0, GetArg("-prune", 0)); + uint64_t pruneTarget = std::max(0, gArgs.GetArg("-prune", 0)); requiredSpace = BLOCK_CHAIN_SIZE; if (pruneTarget) { uint64_t prunedGBs = std::ceil(pruneTarget * 1024 * 1024.0 / GB_BYTES); if (prunedGBs <= requiredSpace) { requiredSpace = prunedGBs; } } requiredSpace += CHAIN_STATE_SIZE; ui->sizeWarningLabel->setText( ui->sizeWarningLabel->text().arg(tr(PACKAGE_NAME)).arg(requiredSpace)); startThread(); } Intro::~Intro() { delete ui; /* Ensure thread is finished before it is deleted */ Q_EMIT stopThread(); thread->wait(); } QString Intro::getDataDirectory() { return ui->dataDirectory->text(); } void Intro::setDataDirectory(const QString &dataDir) { ui->dataDirectory->setText(dataDir); if (dataDir == getDefaultDataDirectory()) { ui->dataDirDefault->setChecked(true); ui->dataDirectory->setEnabled(false); ui->ellipsisButton->setEnabled(false); } else { ui->dataDirCustom->setChecked(true); ui->dataDirectory->setEnabled(true); ui->ellipsisButton->setEnabled(true); } } QString Intro::getDefaultDataDirectory() { return GUIUtil::boostPathToQString(GetDefaultDataDir()); } bool Intro::pickDataDirectory() { QSettings settings; /* If data directory provided on command line, no need to look at settings or show a picking dialog */ - if (!GetArg("-datadir", "").empty()) return true; + if (!gArgs.GetArg("-datadir", "").empty()) return true; /* 1) Default data directory for operating system */ QString dataDir = getDefaultDataDirectory(); /* 2) Allow QSettings to override default dir */ dataDir = settings.value("strDataDir", dataDir).toString(); if (!fs::exists(GUIUtil::qstringToBoostPath(dataDir)) || - GetBoolArg("-choosedatadir", DEFAULT_CHOOSE_DATADIR) || + gArgs.GetBoolArg("-choosedatadir", DEFAULT_CHOOSE_DATADIR) || settings.value("fReset", false).toBool() || - GetBoolArg("-resetguisettings", false)) { + gArgs.GetBoolArg("-resetguisettings", false)) { /* If current default data directory does not exist, let the user choose * one */ Intro intro; intro.setDataDirectory(dataDir); intro.setWindowIcon(QIcon(":icons/bitcoin")); while (true) { if (!intro.exec()) { /* Cancel clicked */ return false; } dataDir = intro.getDataDirectory(); try { TryCreateDirectories(GUIUtil::qstringToBoostPath(dataDir)); break; } catch (const fs::filesystem_error &) { QMessageBox::critical(0, tr(PACKAGE_NAME), tr("Error: Specified data directory " "\"%1\" cannot be created.") .arg(dataDir)); /* fall through, back to choosing screen */ } } settings.setValue("strDataDir", dataDir); settings.setValue("fReset", false); } /* Only override -datadir if different from the default, to make it possible * to * override -datadir in the bitcoin.conf file in the default data directory * (to be consistent with bitcoind behavior) */ if (dataDir != getDefaultDataDirectory()) { // use OS locale for path setting - SoftSetArg("-datadir", GUIUtil::qstringToBoostPath(dataDir).string()); + gArgs.SoftSetArg("-datadir", + GUIUtil::qstringToBoostPath(dataDir).string()); } return true; } void Intro::setStatus(int status, const QString &message, quint64 bytesAvailable) { switch (status) { case FreespaceChecker::ST_OK: ui->errorMessage->setText(message); ui->errorMessage->setStyleSheet(""); break; case FreespaceChecker::ST_ERROR: ui->errorMessage->setText(tr("Error") + ": " + message); ui->errorMessage->setStyleSheet("QLabel { color: #800000 }"); break; } /* Indicate number of bytes available */ if (status == FreespaceChecker::ST_ERROR) { ui->freeSpace->setText(""); } else { QString freeString = tr("%n GB of free space available", "", bytesAvailable / GB_BYTES); if (bytesAvailable < requiredSpace * GB_BYTES) { freeString += " " + tr("(of %n GB needed)", "", requiredSpace); ui->freeSpace->setStyleSheet("QLabel { color: #800000 }"); } else { ui->freeSpace->setStyleSheet(""); } ui->freeSpace->setText(freeString + "."); } /* Don't allow confirm in ERROR state */ ui->buttonBox->button(QDialogButtonBox::Ok) ->setEnabled(status != FreespaceChecker::ST_ERROR); } void Intro::on_dataDirectory_textChanged(const QString &dataDirStr) { /* Disable OK button until check result comes in */ ui->buttonBox->button(QDialogButtonBox::Ok)->setEnabled(false); checkPath(dataDirStr); } void Intro::on_ellipsisButton_clicked() { QString dir = QDir::toNativeSeparators(QFileDialog::getExistingDirectory( 0, "Choose data directory", ui->dataDirectory->text())); if (!dir.isEmpty()) ui->dataDirectory->setText(dir); } void Intro::on_dataDirDefault_clicked() { setDataDirectory(getDefaultDataDirectory()); } void Intro::on_dataDirCustom_clicked() { ui->dataDirectory->setEnabled(true); ui->ellipsisButton->setEnabled(true); } void Intro::startThread() { thread = new QThread(this); FreespaceChecker *executor = new FreespaceChecker(this); executor->moveToThread(thread); connect(executor, SIGNAL(reply(int, QString, quint64)), this, SLOT(setStatus(int, QString, quint64))); connect(this, SIGNAL(requestCheck()), executor, SLOT(check())); /* make sure executor object is deleted in its own thread */ connect(this, SIGNAL(stopThread()), executor, SLOT(deleteLater())); connect(this, SIGNAL(stopThread()), thread, SLOT(quit())); thread->start(); } void Intro::checkPath(const QString &dataDir) { mutex.lock(); pathToCheck = dataDir; if (!signalled) { signalled = true; Q_EMIT requestCheck(); } mutex.unlock(); } QString Intro::getPathToCheck() { QString retval; mutex.lock(); retval = pathToCheck; signalled = false; /* new request can be queued now */ mutex.unlock(); return retval; } diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp index 832d99a857..eb294a38eb 100644 --- a/src/qt/optionsmodel.cpp +++ b/src/qt/optionsmodel.cpp @@ -1,527 +1,529 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "optionsmodel.h" #include "bitcoinunits.h" #include "guiutil.h" #include "amount.h" #include "init.h" #include "intro.h" #include "net.h" #include "netbase.h" #include "txdb.h" // for -dbcache defaults #include "validation.h" // For DEFAULT_SCRIPTCHECK_THREADS #ifdef ENABLE_WALLET #include "wallet/wallet.h" #include "wallet/walletdb.h" #endif #include #include #include OptionsModel::OptionsModel(QObject *parent, bool resetSettings) : QAbstractListModel(parent) { Init(resetSettings); } void OptionsModel::addOverriddenOption(const std::string &option) { - strOverriddenByCommandLine += QString::fromStdString(option) + "=" + - QString::fromStdString(GetArg(option, "")) + - " "; + strOverriddenByCommandLine += + QString::fromStdString(option) + "=" + + QString::fromStdString(gArgs.GetArg(option, "")) + " "; } // Writes all missing QSettings with their default values void OptionsModel::Init(bool resetSettings) { if (resetSettings) { Reset(); } checkAndMigrate(); QSettings settings; // Ensure restart flag is unset on client startup setRestartRequired(false); // These are Qt-only settings: // Window if (!settings.contains("fHideTrayIcon")) { settings.setValue("fHideTrayIcon", false); } fHideTrayIcon = settings.value("fHideTrayIcon").toBool(); Q_EMIT hideTrayIconChanged(fHideTrayIcon); if (!settings.contains("fMinimizeToTray")) { settings.setValue("fMinimizeToTray", false); } fMinimizeToTray = settings.value("fMinimizeToTray").toBool() && !fHideTrayIcon; if (!settings.contains("fMinimizeOnClose")) { settings.setValue("fMinimizeOnClose", false); } fMinimizeOnClose = settings.value("fMinimizeOnClose").toBool(); // Display if (!settings.contains("nDisplayUnit")) { settings.setValue("nDisplayUnit", BitcoinUnits::BCH); } nDisplayUnit = settings.value("nDisplayUnit").toInt(); if (!settings.contains("strThirdPartyTxUrls")) { settings.setValue("strThirdPartyTxUrls", ""); } strThirdPartyTxUrls = settings.value("strThirdPartyTxUrls", "").toString(); if (!settings.contains("fCoinControlFeatures")) { settings.setValue("fCoinControlFeatures", false); } fCoinControlFeatures = settings.value("fCoinControlFeatures", false).toBool(); // These are shared with the core or have a command-line parameter // and we want command-line parameters to overwrite the GUI settings. // // If setting doesn't exist create it with defaults. // - // If SoftSetArg() or SoftSetBoolArg() return false we were overridden + // If gArgs.SoftSetArg() or gArgs.SoftSetBoolArg() return false we were + // overridden // by command-line and show this in the UI. // Main if (!settings.contains("nDatabaseCache")) { settings.setValue("nDatabaseCache", (qint64)nDefaultDbCache); } - if (!SoftSetArg( + if (!gArgs.SoftSetArg( "-dbcache", settings.value("nDatabaseCache").toString().toStdString())) { addOverriddenOption("-dbcache"); } if (!settings.contains("nThreadsScriptVerif")) { settings.setValue("nThreadsScriptVerif", DEFAULT_SCRIPTCHECK_THREADS); } - if (!SoftSetArg( + if (!gArgs.SoftSetArg( "-par", settings.value("nThreadsScriptVerif").toString().toStdString())) { addOverriddenOption("-par"); } if (!settings.contains("strDataDir")) { settings.setValue("strDataDir", Intro::getDefaultDataDirectory()); } // Wallet #ifdef ENABLE_WALLET if (!settings.contains("bSpendZeroConfChange")) { settings.setValue("bSpendZeroConfChange", true); } - if (!SoftSetBoolArg("-spendzeroconfchange", - settings.value("bSpendZeroConfChange").toBool())) { + if (!gArgs.SoftSetBoolArg( + "-spendzeroconfchange", + settings.value("bSpendZeroConfChange").toBool())) { addOverriddenOption("-spendzeroconfchange"); } #endif // Network if (!settings.contains("fUseUPnP")) { settings.setValue("fUseUPnP", DEFAULT_UPNP); } - if (!SoftSetBoolArg("-upnp", settings.value("fUseUPnP").toBool())) { + if (!gArgs.SoftSetBoolArg("-upnp", settings.value("fUseUPnP").toBool())) { addOverriddenOption("-upnp"); } if (!settings.contains("fListen")) { settings.setValue("fListen", DEFAULT_LISTEN); } - if (!SoftSetBoolArg("-listen", settings.value("fListen").toBool())) { + if (!gArgs.SoftSetBoolArg("-listen", settings.value("fListen").toBool())) { addOverriddenOption("-listen"); } if (!settings.contains("fUseProxy")) { settings.setValue("fUseProxy", false); } if (!settings.contains("addrProxy")) { settings.setValue("addrProxy", "127.0.0.1:9050"); } // Only try to set -proxy, if user has enabled fUseProxy if (settings.value("fUseProxy").toBool() && - !SoftSetArg("-proxy", - settings.value("addrProxy").toString().toStdString())) { + !gArgs.SoftSetArg( + "-proxy", settings.value("addrProxy").toString().toStdString())) { addOverriddenOption("-proxy"); } else if (!settings.value("fUseProxy").toBool() && - !GetArg("-proxy", "").empty()) { + !gArgs.GetArg("-proxy", "").empty()) { addOverriddenOption("-proxy"); } if (!settings.contains("fUseSeparateProxyTor")) { settings.setValue("fUseSeparateProxyTor", false); } if (!settings.contains("addrSeparateProxyTor")) { settings.setValue("addrSeparateProxyTor", "127.0.0.1:9050"); } // Only try to set -onion, if user has enabled fUseSeparateProxyTor if (settings.value("fUseSeparateProxyTor").toBool() && - !SoftSetArg( + !gArgs.SoftSetArg( "-onion", settings.value("addrSeparateProxyTor").toString().toStdString())) { addOverriddenOption("-onion"); } else if (!settings.value("fUseSeparateProxyTor").toBool() && - !GetArg("-onion", "").empty()) { + !gArgs.GetArg("-onion", "").empty()) { addOverriddenOption("-onion"); } // Display if (!settings.contains("language")) { settings.setValue("language", ""); } - if (!SoftSetArg("-lang", - settings.value("language").toString().toStdString())) { + if (!gArgs.SoftSetArg( + "-lang", settings.value("language").toString().toStdString())) { addOverriddenOption("-lang"); } language = settings.value("language").toString(); } void OptionsModel::Reset() { QSettings settings; // Save the strDataDir setting QString dataDir = Intro::getDefaultDataDirectory(); dataDir = settings.value("strDataDir", dataDir).toString(); // Remove all entries from our QSettings object settings.clear(); // Set strDataDir settings.setValue("strDataDir", dataDir); // Set that this was reset settings.setValue("fReset", true); // default setting for OptionsModel::StartAtStartup - disabled if (GUIUtil::GetStartOnSystemStartup()) { GUIUtil::SetStartOnSystemStartup(false); } } int OptionsModel::rowCount(const QModelIndex &parent) const { return OptionIDRowCount; } // read QSettings values and return them QVariant OptionsModel::data(const QModelIndex &index, int role) const { if (role == Qt::EditRole) { QSettings settings; switch (index.row()) { case StartAtStartup: return GUIUtil::GetStartOnSystemStartup(); case HideTrayIcon: return fHideTrayIcon; case MinimizeToTray: return fMinimizeToTray; case MapPortUPnP: #ifdef USE_UPNP return settings.value("fUseUPnP"); #else return false; #endif case MinimizeOnClose: return fMinimizeOnClose; // default proxy case ProxyUse: return settings.value("fUseProxy", false); case ProxyIP: { // contains IP at index 0 and port at index 1 QStringList strlIpPort = settings.value("addrProxy") .toString() .split(":", QString::SkipEmptyParts); return strlIpPort.at(0); } case ProxyPort: { // contains IP at index 0 and port at index 1 QStringList strlIpPort = settings.value("addrProxy") .toString() .split(":", QString::SkipEmptyParts); return strlIpPort.at(1); } // separate Tor proxy case ProxyUseTor: return settings.value("fUseSeparateProxyTor", false); case ProxyIPTor: { // contains IP at index 0 and port at index 1 QStringList strlIpPort = settings.value("addrSeparateProxyTor") .toString() .split(":", QString::SkipEmptyParts); return strlIpPort.at(0); } case ProxyPortTor: { // contains IP at index 0 and port at index 1 QStringList strlIpPort = settings.value("addrSeparateProxyTor") .toString() .split(":", QString::SkipEmptyParts); return strlIpPort.at(1); } #ifdef ENABLE_WALLET case SpendZeroConfChange: return settings.value("bSpendZeroConfChange"); #endif case DisplayUnit: return nDisplayUnit; case ThirdPartyTxUrls: return strThirdPartyTxUrls; case Language: return settings.value("language"); case CoinControlFeatures: return fCoinControlFeatures; case DatabaseCache: return settings.value("nDatabaseCache"); case ThreadsScriptVerif: return settings.value("nThreadsScriptVerif"); case Listen: return settings.value("fListen"); default: return QVariant(); } } return QVariant(); } // write QSettings values bool OptionsModel::setData(const QModelIndex &index, const QVariant &value, int role) { bool successful = true; /* set to false on parse error */ if (role == Qt::EditRole) { QSettings settings; switch (index.row()) { case StartAtStartup: successful = GUIUtil::SetStartOnSystemStartup(value.toBool()); break; case HideTrayIcon: fHideTrayIcon = value.toBool(); settings.setValue("fHideTrayIcon", fHideTrayIcon); Q_EMIT hideTrayIconChanged(fHideTrayIcon); break; case MinimizeToTray: fMinimizeToTray = value.toBool(); settings.setValue("fMinimizeToTray", fMinimizeToTray); break; case MapPortUPnP: // core option - can be changed on-the-fly settings.setValue("fUseUPnP", value.toBool()); MapPort(value.toBool()); break; case MinimizeOnClose: fMinimizeOnClose = value.toBool(); settings.setValue("fMinimizeOnClose", fMinimizeOnClose); break; // default proxy case ProxyUse: if (settings.value("fUseProxy") != value) { settings.setValue("fUseProxy", value.toBool()); setRestartRequired(true); } break; case ProxyIP: { // contains current IP at index 0 and current port at index 1 QStringList strlIpPort = settings.value("addrProxy") .toString() .split(":", QString::SkipEmptyParts); // if that key doesn't exist or has a changed IP if (!settings.contains("addrProxy") || strlIpPort.at(0) != value.toString()) { // construct new value from new IP and current port QString strNewValue = value.toString() + ":" + strlIpPort.at(1); settings.setValue("addrProxy", strNewValue); setRestartRequired(true); } } break; case ProxyPort: { // contains current IP at index 0 and current port at index 1 QStringList strlIpPort = settings.value("addrProxy") .toString() .split(":", QString::SkipEmptyParts); // if that key doesn't exist or has a changed port if (!settings.contains("addrProxy") || strlIpPort.at(1) != value.toString()) { // construct new value from current IP and new port QString strNewValue = strlIpPort.at(0) + ":" + value.toString(); settings.setValue("addrProxy", strNewValue); setRestartRequired(true); } } break; // separate Tor proxy case ProxyUseTor: if (settings.value("fUseSeparateProxyTor") != value) { settings.setValue("fUseSeparateProxyTor", value.toBool()); setRestartRequired(true); } break; case ProxyIPTor: { // contains current IP at index 0 and current port at index 1 QStringList strlIpPort = settings.value("addrSeparateProxyTor") .toString() .split(":", QString::SkipEmptyParts); // if that key doesn't exist or has a changed IP if (!settings.contains("addrSeparateProxyTor") || strlIpPort.at(0) != value.toString()) { // construct new value from new IP and current port QString strNewValue = value.toString() + ":" + strlIpPort.at(1); settings.setValue("addrSeparateProxyTor", strNewValue); setRestartRequired(true); } } break; case ProxyPortTor: { // contains current IP at index 0 and current port at index 1 QStringList strlIpPort = settings.value("addrSeparateProxyTor") .toString() .split(":", QString::SkipEmptyParts); // if that key doesn't exist or has a changed port if (!settings.contains("addrSeparateProxyTor") || strlIpPort.at(1) != value.toString()) { // construct new value from current IP and new port QString strNewValue = strlIpPort.at(0) + ":" + value.toString(); settings.setValue("addrSeparateProxyTor", strNewValue); setRestartRequired(true); } } break; #ifdef ENABLE_WALLET case SpendZeroConfChange: if (settings.value("bSpendZeroConfChange") != value) { settings.setValue("bSpendZeroConfChange", value); setRestartRequired(true); } break; #endif case DisplayUnit: setDisplayUnit(value); break; case ThirdPartyTxUrls: if (strThirdPartyTxUrls != value.toString()) { strThirdPartyTxUrls = value.toString(); settings.setValue("strThirdPartyTxUrls", strThirdPartyTxUrls); setRestartRequired(true); } break; case Language: if (settings.value("language") != value) { settings.setValue("language", value); setRestartRequired(true); } break; case CoinControlFeatures: fCoinControlFeatures = value.toBool(); settings.setValue("fCoinControlFeatures", fCoinControlFeatures); Q_EMIT coinControlFeaturesChanged(fCoinControlFeatures); break; case DatabaseCache: if (settings.value("nDatabaseCache") != value) { settings.setValue("nDatabaseCache", value); setRestartRequired(true); } break; case ThreadsScriptVerif: if (settings.value("nThreadsScriptVerif") != value) { settings.setValue("nThreadsScriptVerif", value); setRestartRequired(true); } break; case Listen: if (settings.value("fListen") != value) { settings.setValue("fListen", value); setRestartRequired(true); } break; default: break; } } Q_EMIT dataChanged(index, index); return successful; } /** Updates current unit in memory, settings and emits * displayUnitChanged(newUnit) signal */ void OptionsModel::setDisplayUnit(const QVariant &value) { if (!value.isNull()) { QSettings settings; nDisplayUnit = value.toInt(); settings.setValue("nDisplayUnit", nDisplayUnit); Q_EMIT displayUnitChanged(nDisplayUnit); } } bool OptionsModel::getProxySettings(QNetworkProxy &proxy) const { // Directly query current base proxy, because // GUI settings can be overridden with -proxy. proxyType curProxy; if (GetProxy(NET_IPV4, curProxy)) { proxy.setType(QNetworkProxy::Socks5Proxy); proxy.setHostName(QString::fromStdString(curProxy.proxy.ToStringIP())); proxy.setPort(curProxy.proxy.GetPort()); return true; } else proxy.setType(QNetworkProxy::NoProxy); return false; } void OptionsModel::setRestartRequired(bool fRequired) { QSettings settings; return settings.setValue("fRestartRequired", fRequired); } bool OptionsModel::isRestartRequired() { QSettings settings; return settings.value("fRestartRequired", false).toBool(); } void OptionsModel::checkAndMigrate() { // Migration of default values // Check if the QSettings container was already loaded with this client // version QSettings settings; static const char strSettingsVersionKey[] = "nSettingsVersion"; int settingsVersion = settings.contains(strSettingsVersionKey) ? settings.value(strSettingsVersionKey).toInt() : 0; if (settingsVersion < CLIENT_VERSION) { // -dbcache was bumped from 100 to 300 in 0.13 // see https://github.com/bitcoin/bitcoin/pull/8273 // force people to upgrade to the new value if they are using 100MB if (settingsVersion < 130000 && settings.contains("nDatabaseCache") && settings.value("nDatabaseCache").toLongLong() == 100) settings.setValue("nDatabaseCache", (qint64)nDefaultDbCache); settings.setValue(strSettingsVersionKey, CLIENT_VERSION); } } diff --git a/src/qt/paymentrequestplus.cpp b/src/qt/paymentrequestplus.cpp index 6302564ad4..e9318a6ffe 100644 --- a/src/qt/paymentrequestplus.cpp +++ b/src/qt/paymentrequestplus.cpp @@ -1,232 +1,232 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. // // Wraps dumb protocol buffer paymentRequest with some extra methods // #include "paymentrequestplus.h" #include "util.h" #include #include #include #include #include class SSLVerifyError : public std::runtime_error { public: SSLVerifyError(std::string err) : std::runtime_error(err) {} }; bool PaymentRequestPlus::parse(const QByteArray &data) { bool parseOK = paymentRequest.ParseFromArray(data.data(), data.size()); if (!parseOK) { qWarning() << "PaymentRequestPlus::parse: Error parsing payment request"; return false; } if (paymentRequest.payment_details_version() > 1) { qWarning() << "PaymentRequestPlus::parse: Received up-version payment " "details, version=" << paymentRequest.payment_details_version(); return false; } parseOK = details.ParseFromString(paymentRequest.serialized_payment_details()); if (!parseOK) { qWarning() << "PaymentRequestPlus::parse: Error parsing payment details"; paymentRequest.Clear(); return false; } return true; } bool PaymentRequestPlus::SerializeToString(std::string *output) const { return paymentRequest.SerializeToString(output); } bool PaymentRequestPlus::IsInitialized() const { return paymentRequest.IsInitialized(); } bool PaymentRequestPlus::getMerchant(X509_STORE *certStore, QString &merchant) const { merchant.clear(); if (!IsInitialized()) return false; // One day we'll support more PKI types, but just x509 for now: const EVP_MD *digestAlgorithm = nullptr; if (paymentRequest.pki_type() == "x509+sha256") { digestAlgorithm = EVP_sha256(); } else if (paymentRequest.pki_type() == "x509+sha1") { digestAlgorithm = EVP_sha1(); } else if (paymentRequest.pki_type() == "none") { qWarning() << "PaymentRequestPlus::getMerchant: Payment request: " "pki_type == none"; return false; } else { qWarning() << "PaymentRequestPlus::getMerchant: Payment request: " "unknown pki_type " << QString::fromStdString(paymentRequest.pki_type()); return false; } payments::X509Certificates certChain; if (!certChain.ParseFromString(paymentRequest.pki_data())) { qWarning() << "PaymentRequestPlus::getMerchant: Payment request: error " "parsing pki_data"; return false; } std::vector certs; const QDateTime currentTime = QDateTime::currentDateTime(); for (int i = 0; i < certChain.certificate_size(); i++) { QByteArray certData(certChain.certificate(i).data(), certChain.certificate(i).size()); QSslCertificate qCert(certData, QSsl::Der); if (currentTime < qCert.effectiveDate() || currentTime > qCert.expiryDate()) { qWarning() << "PaymentRequestPlus::getMerchant: Payment request: " "certificate expired or not yet active: " << qCert; return false; } #if QT_VERSION >= 0x050000 if (qCert.isBlacklisted()) { qWarning() << "PaymentRequestPlus::getMerchant: Payment request: " "certificate blacklisted: " << qCert; return false; } #endif const uint8_t *data = (const uint8_t *)certChain.certificate(i).data(); X509 *cert = d2i_X509(nullptr, &data, certChain.certificate(i).size()); if (cert) certs.push_back(cert); } if (certs.empty()) { qWarning() << "PaymentRequestPlus::getMerchant: Payment request: empty " "certificate chain"; return false; } // The first cert is the signing cert, the rest are untrusted certs that // chain to a valid root authority. OpenSSL needs them separately. STACK_OF(X509) *chain = sk_X509_new_null(); for (int i = certs.size() - 1; i > 0; i--) { sk_X509_push(chain, certs[i]); } X509 *signing_cert = certs[0]; // Now create a "store context", which is a single use object for checking, // load the signing cert into it and verify. X509_STORE_CTX *store_ctx = X509_STORE_CTX_new(); if (!store_ctx) { qWarning() << "PaymentRequestPlus::getMerchant: Payment request: error " "creating X509_STORE_CTX"; return false; } char *website = nullptr; bool fResult = true; try { if (!X509_STORE_CTX_init(store_ctx, certStore, signing_cert, chain)) { int error = X509_STORE_CTX_get_error(store_ctx); throw SSLVerifyError(X509_verify_cert_error_string(error)); } // Now do the verification! int result = X509_verify_cert(store_ctx); if (result != 1) { int error = X509_STORE_CTX_get_error(store_ctx); // For testing payment requests, we allow self signed root certs! // This option is just shown in the UI options, if -help-debug is // enabled. if (!(error == X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT && - GetBoolArg("-allowselfsignedrootcertificates", - DEFAULT_SELFSIGNED_ROOTCERTS))) { + gArgs.GetBoolArg("-allowselfsignedrootcertificates", + DEFAULT_SELFSIGNED_ROOTCERTS))) { throw SSLVerifyError(X509_verify_cert_error_string(error)); } else { qDebug() << "PaymentRequestPlus::getMerchant: Allowing self " "signed root certificate, because " "-allowselfsignedrootcertificates is true."; } } X509_NAME *certname = X509_get_subject_name(signing_cert); // Valid cert; check signature: // Copy payments::PaymentRequest rcopy(paymentRequest); rcopy.set_signature(std::string("")); // Everything but the signature std::string data_to_verify; rcopy.SerializeToString(&data_to_verify); #if HAVE_DECL_EVP_MD_CTX_NEW EVP_MD_CTX *ctx = EVP_MD_CTX_new(); if (!ctx) throw SSLVerifyError("Error allocating OpenSSL context."); #else EVP_MD_CTX _ctx; EVP_MD_CTX *ctx; ctx = &_ctx; #endif EVP_PKEY *pubkey = X509_get_pubkey(signing_cert); EVP_MD_CTX_init(ctx); if (!EVP_VerifyInit_ex(ctx, digestAlgorithm, nullptr) || !EVP_VerifyUpdate(ctx, data_to_verify.data(), data_to_verify.size()) || !EVP_VerifyFinal( ctx, (const uint8_t *)paymentRequest.signature().data(), (unsigned int)paymentRequest.signature().size(), pubkey)) { throw SSLVerifyError("Bad signature, invalid payment request."); } #if HAVE_DECL_EVP_MD_CTX_NEW EVP_MD_CTX_free(ctx); #endif // OpenSSL API for getting human printable strings from certs is // baroque. int textlen = X509_NAME_get_text_by_NID(certname, NID_commonName, nullptr, 0); website = new char[textlen + 1]; if (X509_NAME_get_text_by_NID(certname, NID_commonName, website, textlen + 1) == textlen && textlen > 0) { merchant = website; } else { throw SSLVerifyError("Bad certificate, missing common name."); } // TODO: detect EV certificates and set merchant = business name instead // of unfriendly NID_commonName ? } catch (const SSLVerifyError &err) { fResult = false; qWarning() << "PaymentRequestPlus::getMerchant: SSL error: " << err.what(); } if (website) delete[] website; X509_STORE_CTX_free(store_ctx); for (unsigned int i = 0; i < certs.size(); i++) X509_free(certs[i]); return fResult; } QList> PaymentRequestPlus::getPayTo() const { QList> result; for (int i = 0; i < details.outputs_size(); i++) { const uint8_t *scriptStr = (const uint8_t *)details.outputs(i).script().data(); CScript s(scriptStr, scriptStr + details.outputs(i).script().size()); result.append(std::make_pair(s, Amount(details.outputs(i).amount()))); } return result; } diff --git a/src/qt/paymentserver.cpp b/src/qt/paymentserver.cpp index 403dbc78d9..5c2ff7ae26 100644 --- a/src/qt/paymentserver.cpp +++ b/src/qt/paymentserver.cpp @@ -1,899 +1,899 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "paymentserver.h" #include "bitcoinunits.h" #include "guiutil.h" #include "optionsmodel.h" #include "chainparams.h" #include "config.h" #include "dstencode.h" #include "policy/policy.h" #include "ui_interface.h" #include "util.h" #include "wallet/wallet.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if QT_VERSION < 0x050000 #include #else #include #endif const int BITCOIN_IPC_CONNECT_TIMEOUT = 1000; // milliseconds // BIP70 payment protocol messages const char *BIP70_MESSAGE_PAYMENTACK = "PaymentACK"; const char *BIP70_MESSAGE_PAYMENTREQUEST = "PaymentRequest"; // BIP71 payment protocol media types const char *BIP71_MIMETYPE_PAYMENT = "application/bitcoincash-payment"; const char *BIP71_MIMETYPE_PAYMENTACK = "application/bitcoincash-paymentack"; const char *BIP71_MIMETYPE_PAYMENTREQUEST = "application/bitcoincash-paymentrequest"; struct X509StoreDeleter { void operator()(X509_STORE *b) { X509_STORE_free(b); } }; struct X509Deleter { void operator()(X509 *b) { X509_free(b); } }; namespace // Anon namespace { std::unique_ptr certStore; } // // Create a name that is unique for: // testnet / non-testnet // data directory // static QString ipcServerName() { QString name("BitcoinQt"); // Append a simple hash of the datadir // Note that GetDataDir(true) returns a different path for -testnet versus // main net QString ddir(GUIUtil::boostPathToQString(GetDataDir(true))); name.append(QString::number(qHash(ddir))); return name; } // // We store payment URIs and requests received before the main GUI window is up // and ready to ask the user to send payment. // static QList savedPaymentRequests; static void ReportInvalidCertificate(const QSslCertificate &cert) { #if QT_VERSION < 0x050000 qDebug() << QString("%1: Payment server found an invalid certificate: ") .arg(__func__) << cert.serialNumber() << cert.subjectInfo(QSslCertificate::CommonName) << cert.subjectInfo(QSslCertificate::OrganizationalUnitName); #else qDebug() << QString("%1: Payment server found an invalid certificate: ") .arg(__func__) << cert.serialNumber() << cert.subjectInfo(QSslCertificate::CommonName) << cert.subjectInfo(QSslCertificate::DistinguishedNameQualifier) << cert.subjectInfo(QSslCertificate::OrganizationalUnitName); #endif } // // Load OpenSSL's list of root certificate authorities // void PaymentServer::LoadRootCAs(X509_STORE *_store) { // Unit tests mostly use this, to pass in fake root CAs: if (_store) { certStore.reset(_store); return; } // Normal execution, use either -rootcertificates or system certs: certStore.reset(X509_STORE_new()); // Note: use "-system-" default here so that users can pass // -rootcertificates="" and get 'I don't like X.509 certificates, don't // trust anybody' behavior: QString certFile = - QString::fromStdString(GetArg("-rootcertificates", "-system-")); + QString::fromStdString(gArgs.GetArg("-rootcertificates", "-system-")); // Empty store if (certFile.isEmpty()) { qDebug() << QString("PaymentServer::%1: Payment request authentication " "via X.509 certificates disabled.") .arg(__func__); return; } QList certList; if (certFile != "-system-") { qDebug() << QString("PaymentServer::%1: Using \"%2\" as trusted root " "certificate.") .arg(__func__) .arg(certFile); certList = QSslCertificate::fromPath(certFile); // Use those certificates when fetching payment requests, too: QSslSocket::setDefaultCaCertificates(certList); } else certList = QSslSocket::systemCaCertificates(); int nRootCerts = 0; const QDateTime currentTime = QDateTime::currentDateTime(); for (const QSslCertificate &cert : certList) { // Don't log nullptr certificates if (cert.isNull()) { continue; } // Not yet active/valid, or expired certificate if (currentTime < cert.effectiveDate() || currentTime > cert.expiryDate()) { ReportInvalidCertificate(cert); continue; } #if QT_VERSION >= 0x050000 // Blacklisted certificate if (cert.isBlacklisted()) { ReportInvalidCertificate(cert); continue; } #endif QByteArray certData = cert.toDer(); const uint8_t *data = (const uint8_t *)certData.data(); std::unique_ptr x509( d2i_X509(0, &data, certData.size())); if (x509 && X509_STORE_add_cert(certStore.get(), x509.get())) { // Note: X509_STORE increases the reference count to the X509 // object, we still have to release our reference to it. ++nRootCerts; } else { ReportInvalidCertificate(cert); continue; } } qWarning() << "PaymentServer::LoadRootCAs: Loaded " << nRootCerts << " root certificates"; // Project for another day: // Fetch certificate revocation lists, and add them to certStore. // Issues to consider: // performance (start a thread to fetch in background?) // privacy (fetch through tor/proxy so IP address isn't revealed) // would it be easier to just use a compiled-in blacklist? // or use Qt's blacklist? // "certificate stapling" with server-side caching is more efficient } static std::string ipcParseURI(const QString &arg, const CChainParams ¶ms, bool useCashAddr) { const QString scheme = GUIUtil::bitcoinURIScheme(params, useCashAddr); if (!arg.startsWith(scheme + ":", Qt::CaseInsensitive)) { return {}; } SendCoinsRecipient r; if (!GUIUtil::parseBitcoinURI(scheme, arg, &r)) { return {}; } return r.address.toStdString(); } static bool ipcCanParseCashAddrURI(const QString &arg, const std::string &network) { const CChainParams ¶ms(Params(network)); std::string addr = ipcParseURI(arg, params, true); return IsValidDestinationString(addr, params); } static bool ipcCanParseLegacyURI(const QString &arg, const std::string &network) { const CChainParams ¶ms(Params(network)); std::string addr = ipcParseURI(arg, params, false); return IsValidDestinationString(addr, params); } // // Sending to the server is done synchronously, at startup. // If the server isn't already running, startup continues, and the items in // savedPaymentRequest will be handled when uiReady() is called. // // Warning: ipcSendCommandLine() is called early in init, so don't use "Q_EMIT // message()", but "QMessageBox::"! // void PaymentServer::ipcParseCommandLine(int argc, char *argv[]) { std::array networks = {&CBaseChainParams::MAIN, &CBaseChainParams::TESTNET, &CBaseChainParams::REGTEST}; const std::string *chosenNetwork = nullptr; for (int i = 1; i < argc; i++) { QString arg(argv[i]); if (arg.startsWith("-")) { continue; } const std::string *itemNetwork = nullptr; // Try to parse as a URI for (auto net : networks) { if (ipcCanParseCashAddrURI(arg, *net)) { itemNetwork = net; break; } if (ipcCanParseLegacyURI(arg, *net)) { itemNetwork = net; break; } } if (!itemNetwork && QFile::exists(arg)) { // Filename PaymentRequestPlus request; if (readPaymentRequestFromFile(arg, request)) { for (auto net : networks) { if (*net == request.getDetails().network()) { itemNetwork = net; } } } } if (itemNetwork == nullptr) { // Printing to debug.log is about the best we can do here, the GUI // hasn't started yet so we can't pop up a message box. qWarning() << "PaymentServer::ipcSendCommandLine: Payment request " "file or URI does not exist or is invalid: " << arg; continue; } if (chosenNetwork && chosenNetwork != itemNetwork) { qWarning() << "PaymentServer::ipcSendCommandLine: Payment request " "from network " << QString(itemNetwork->c_str()) << " does not match already chosen network " << QString(chosenNetwork->c_str()); continue; } savedPaymentRequests.append(arg); chosenNetwork = itemNetwork; } if (chosenNetwork) { SelectParams(*chosenNetwork); } } // // Sending to the server is done synchronously, at startup. // If the server isn't already running, startup continues, and the items in // savedPaymentRequest will be handled when uiReady() is called. // bool PaymentServer::ipcSendCommandLine() { bool fResult = false; for (const QString &r : savedPaymentRequests) { QLocalSocket *socket = new QLocalSocket(); socket->connectToServer(ipcServerName(), QIODevice::WriteOnly); if (!socket->waitForConnected(BITCOIN_IPC_CONNECT_TIMEOUT)) { delete socket; socket = nullptr; return false; } QByteArray block; QDataStream out(&block, QIODevice::WriteOnly); out.setVersion(QDataStream::Qt_4_0); out << r; out.device()->seek(0); socket->write(block); socket->flush(); socket->waitForBytesWritten(BITCOIN_IPC_CONNECT_TIMEOUT); socket->disconnectFromServer(); delete socket; socket = nullptr; fResult = true; } return fResult; } PaymentServer::PaymentServer(QObject *parent, bool startLocalServer) : QObject(parent), saveURIs(true), uriServer(0), netManager(0), optionsModel(0) { // Verify that the version of the library that we linked against is // compatible with the version of the headers we compiled against. GOOGLE_PROTOBUF_VERIFY_VERSION; // Install global event filter to catch QFileOpenEvents // on Mac: sent when you click bitcoincash: links // other OSes: helpful when dealing with payment request files if (parent) { parent->installEventFilter(this); } QString name = ipcServerName(); // Clean up old socket leftover from a crash: QLocalServer::removeServer(name); if (startLocalServer) { uriServer = new QLocalServer(this); if (!uriServer->listen(name)) { // constructor is called early in init, so don't use "Q_EMIT // message()" here QMessageBox::critical(0, tr("Payment request error"), tr("Cannot start click-to-pay handler")); } else { connect(uriServer, SIGNAL(newConnection()), this, SLOT(handleURIConnection())); connect(this, SIGNAL(receivedPaymentACK(QString)), this, SLOT(handlePaymentACK(QString))); } } } PaymentServer::~PaymentServer() { google::protobuf::ShutdownProtobufLibrary(); } // // OSX-specific way of handling bitcoincash: URIs and PaymentRequest mime types. // Also used by paymentservertests.cpp and when opening a payment request file // via "Open URI..." menu entry. // bool PaymentServer::eventFilter(QObject *object, QEvent *event) { if (event->type() == QEvent::FileOpen) { QFileOpenEvent *fileEvent = static_cast(event); if (!fileEvent->file().isEmpty()) { handleURIOrFile(fileEvent->file()); } else if (!fileEvent->url().isEmpty()) { handleURIOrFile(fileEvent->url().toString()); } return true; } return QObject::eventFilter(object, event); } void PaymentServer::initNetManager() { if (!optionsModel) { return; } if (netManager != nullptr) { delete netManager; } // netManager is used to fetch paymentrequests given in bitcoincash: URIs netManager = new QNetworkAccessManager(this); QNetworkProxy proxy; // Query active SOCKS5 proxy if (optionsModel->getProxySettings(proxy)) { netManager->setProxy(proxy); qDebug() << "PaymentServer::initNetManager: Using SOCKS5 proxy" << proxy.hostName() << ":" << proxy.port(); } else { qDebug() << "PaymentServer::initNetManager: No active proxy server found."; } connect(netManager, SIGNAL(finished(QNetworkReply *)), this, SLOT(netRequestFinished(QNetworkReply *))); connect(netManager, SIGNAL(sslErrors(QNetworkReply *, const QList &)), this, SLOT(reportSslErrors(QNetworkReply *, const QList &))); } void PaymentServer::uiReady() { initNetManager(); saveURIs = false; for (const QString &s : savedPaymentRequests) { handleURIOrFile(s); } savedPaymentRequests.clear(); } bool PaymentServer::handleURI(const QString &scheme, const QString &s) { if (!s.startsWith(scheme + ":", Qt::CaseInsensitive)) { return false; } #if QT_VERSION < 0x050000 QUrl uri(s); #else QUrlQuery uri((QUrl(s))); #endif if (uri.hasQueryItem("r")) { // payment request URI QByteArray temp; temp.append(uri.queryItemValue("r")); QString decoded = QUrl::fromPercentEncoding(temp); QUrl fetchUrl(decoded, QUrl::StrictMode); if (fetchUrl.isValid()) { qDebug() << "PaymentServer::handleURIOrFile: fetchRequest(" << fetchUrl << ")"; fetchRequest(fetchUrl); } else { qWarning() << "PaymentServer::handleURIOrFile: Invalid URL: " << fetchUrl; Q_EMIT message(tr("URI handling"), tr("Payment request fetch URL is invalid: %1") .arg(fetchUrl.toString()), CClientUIInterface::ICON_WARNING); } return true; } // normal URI SendCoinsRecipient recipient; if (GUIUtil::parseBitcoinURI(scheme, s, &recipient)) { if (!IsValidDestinationString(recipient.address.toStdString())) { Q_EMIT message( tr("URI handling"), tr("Invalid payment address %1").arg(recipient.address), CClientUIInterface::MSG_ERROR); } else { Q_EMIT receivedPaymentRequest(recipient); } } else { Q_EMIT message( tr("URI handling"), tr("URI cannot be parsed! This can be caused by an invalid " "Bitcoin address or malformed URI parameters."), CClientUIInterface::ICON_WARNING); } return true; } void PaymentServer::handleURIOrFile(const QString &s) { if (saveURIs) { savedPaymentRequests.append(s); return; } // bitcoincash: CashAddr URI QString schemeCash = GUIUtil::bitcoinURIScheme(Params(), true); if (handleURI(schemeCash, s)) { return; } // bitcoincash: Legacy URI QString schemeLegacy = GUIUtil::bitcoinURIScheme(Params(), false); if (handleURI(schemeLegacy, s)) { return; } // payment request file if (QFile::exists(s)) { PaymentRequestPlus request; SendCoinsRecipient recipient; if (!readPaymentRequestFromFile(s, request)) { Q_EMIT message(tr("Payment request file handling"), tr("Payment request file cannot be read! This can " "be caused by an invalid payment request file."), CClientUIInterface::ICON_WARNING); } else if (processPaymentRequest(request, recipient)) { Q_EMIT receivedPaymentRequest(recipient); } return; } } void PaymentServer::handleURIConnection() { QLocalSocket *clientConnection = uriServer->nextPendingConnection(); while (clientConnection->bytesAvailable() < (int)sizeof(quint32)) { clientConnection->waitForReadyRead(); } connect(clientConnection, SIGNAL(disconnected()), clientConnection, SLOT(deleteLater())); QDataStream in(clientConnection); in.setVersion(QDataStream::Qt_4_0); if (clientConnection->bytesAvailable() < (int)sizeof(quint16)) { return; } QString msg; in >> msg; handleURIOrFile(msg); } // // Warning: readPaymentRequestFromFile() is used in ipcSendCommandLine() // so don't use "Q_EMIT message()", but "QMessageBox::"! // bool PaymentServer::readPaymentRequestFromFile(const QString &filename, PaymentRequestPlus &request) { QFile f(filename); if (!f.open(QIODevice::ReadOnly)) { qWarning() << QString("PaymentServer::%1: Failed to open %2") .arg(__func__) .arg(filename); return false; } // BIP70 DoS protection if (!verifySize(f.size())) { return false; } QByteArray data = f.readAll(); return request.parse(data); } bool PaymentServer::processPaymentRequest(const PaymentRequestPlus &request, SendCoinsRecipient &recipient) { if (!optionsModel) { return false; } if (request.IsInitialized()) { // Payment request network matches client network? if (!verifyNetwork(request.getDetails())) { Q_EMIT message( tr("Payment request rejected"), tr("Payment request network doesn't match client network."), CClientUIInterface::MSG_ERROR); return false; } // Make sure any payment requests involved are still valid. // This is re-checked just before sending coins in // WalletModel::sendCoins(). if (verifyExpired(request.getDetails())) { Q_EMIT message(tr("Payment request rejected"), tr("Payment request expired."), CClientUIInterface::MSG_ERROR); return false; } } else { Q_EMIT message(tr("Payment request error"), tr("Payment request is not initialized."), CClientUIInterface::MSG_ERROR); return false; } recipient.paymentRequest = request; recipient.message = GUIUtil::HtmlEscape(request.getDetails().memo()); request.getMerchant(certStore.get(), recipient.authenticatedMerchant); QList> sendingTos = request.getPayTo(); QStringList addresses; for (const std::pair &sendingTo : sendingTos) { // Extract and check destination addresses CTxDestination dest; if (ExtractDestination(sendingTo.first, dest)) { // Append destination address addresses.append(QString::fromStdString(EncodeDestination(dest))); } else if (!recipient.authenticatedMerchant.isEmpty()) { // Unauthenticated payment requests to custom bitcoin addresses are // not supported // (there is no good way to tell the user where they are paying in a // way they'd // have a chance of understanding). Q_EMIT message(tr("Payment request rejected"), tr("Unverified payment requests to custom payment " "scripts are unsupported."), CClientUIInterface::MSG_ERROR); return false; } // Bitcoin amounts are stored as (optional) uint64 in the protobuf // messages (see paymentrequest.proto), but Amount is defined as // int64_t. Because of that we need to verify that amounts are in a // valid range and no overflow has happened. if (!verifyAmount(sendingTo.second)) { Q_EMIT message(tr("Payment request rejected"), tr("Invalid payment request."), CClientUIInterface::MSG_ERROR); return false; } // Extract and check amounts CTxOut txOut(Amount(sendingTo.second), sendingTo.first); if (txOut.IsDust(dustRelayFee)) { Q_EMIT message( tr("Payment request error"), tr("Requested payment amount of %1 is too small (considered " "dust).") .arg(BitcoinUnits::formatWithUnit( optionsModel->getDisplayUnit(), sendingTo.second)), CClientUIInterface::MSG_ERROR); return false; } recipient.amount += sendingTo.second; // Also verify that the final amount is still in a valid range after // adding additional amounts. if (!verifyAmount(recipient.amount)) { Q_EMIT message(tr("Payment request rejected"), tr("Invalid payment request."), CClientUIInterface::MSG_ERROR); return false; } } // Store addresses and format them to fit nicely into the GUI recipient.address = addresses.join("
"); if (!recipient.authenticatedMerchant.isEmpty()) { qDebug() << "PaymentServer::processPaymentRequest: Secure payment " "request from " << recipient.authenticatedMerchant; } else { qDebug() << "PaymentServer::processPaymentRequest: Insecure payment " "request to " << addresses.join(", "); } return true; } void PaymentServer::fetchRequest(const QUrl &url) { QNetworkRequest netRequest; netRequest.setAttribute(QNetworkRequest::User, BIP70_MESSAGE_PAYMENTREQUEST); netRequest.setUrl(url); netRequest.setRawHeader("User-Agent", CLIENT_NAME.c_str()); netRequest.setRawHeader("Accept", BIP71_MIMETYPE_PAYMENTREQUEST); netManager->get(netRequest); } void PaymentServer::fetchPaymentACK(CWallet *wallet, SendCoinsRecipient recipient, QByteArray transaction) { const payments::PaymentDetails &details = recipient.paymentRequest.getDetails(); if (!details.has_payment_url()) { return; } QNetworkRequest netRequest; netRequest.setAttribute(QNetworkRequest::User, BIP70_MESSAGE_PAYMENTACK); netRequest.setUrl(QString::fromStdString(details.payment_url())); netRequest.setHeader(QNetworkRequest::ContentTypeHeader, BIP71_MIMETYPE_PAYMENT); netRequest.setRawHeader("User-Agent", CLIENT_NAME.c_str()); netRequest.setRawHeader("Accept", BIP71_MIMETYPE_PAYMENTACK); payments::Payment payment; payment.set_merchant_data(details.merchant_data()); payment.add_transactions(transaction.data(), transaction.size()); // Create a new refund address, or re-use: QString account = tr("Refund from %1").arg(recipient.authenticatedMerchant); std::string strAccount = account.toStdString(); std::set refundAddresses = wallet->GetAccountAddresses(strAccount); if (!refundAddresses.empty()) { CScript s = GetScriptForDestination(*refundAddresses.begin()); payments::Output *refund_to = payment.add_refund_to(); refund_to->set_script(&s[0], s.size()); } else { CPubKey newKey; if (wallet->GetKeyFromPool(newKey)) { CKeyID keyID = newKey.GetID(); wallet->SetAddressBook(keyID, strAccount, "refund"); CScript s = GetScriptForDestination(keyID); payments::Output *refund_to = payment.add_refund_to(); refund_to->set_script(&s[0], s.size()); } else { // This should never happen, because sending coins should have just // unlocked the wallet and refilled the keypool. qWarning() << "PaymentServer::fetchPaymentACK: Error getting " "refund key, refund_to not set"; } } int length = payment.ByteSize(); netRequest.setHeader(QNetworkRequest::ContentLengthHeader, length); QByteArray serData(length, '\0'); if (payment.SerializeToArray(serData.data(), length)) { netManager->post(netRequest, serData); } else { // This should never happen, either. qWarning() << "PaymentServer::fetchPaymentACK: Error serializing " "payment message"; } } void PaymentServer::netRequestFinished(QNetworkReply *reply) { reply->deleteLater(); // BIP70 DoS protection if (!verifySize(reply->size())) { Q_EMIT message( tr("Payment request rejected"), tr("Payment request %1 is too large (%2 bytes, allowed %3 bytes).") .arg(reply->request().url().toString()) .arg(reply->size()) .arg(BIP70_MAX_PAYMENTREQUEST_SIZE), CClientUIInterface::MSG_ERROR); return; } if (reply->error() != QNetworkReply::NoError) { QString msg = tr("Error communicating with %1: %2") .arg(reply->request().url().toString()) .arg(reply->errorString()); qWarning() << "PaymentServer::netRequestFinished: " << msg; Q_EMIT message(tr("Payment request error"), msg, CClientUIInterface::MSG_ERROR); return; } QByteArray data = reply->readAll(); QString requestType = reply->request().attribute(QNetworkRequest::User).toString(); if (requestType == BIP70_MESSAGE_PAYMENTREQUEST) { PaymentRequestPlus request; SendCoinsRecipient recipient; if (!request.parse(data)) { qWarning() << "PaymentServer::netRequestFinished: Error parsing " "payment request"; Q_EMIT message(tr("Payment request error"), tr("Payment request cannot be parsed!"), CClientUIInterface::MSG_ERROR); } else if (processPaymentRequest(request, recipient)) { Q_EMIT receivedPaymentRequest(recipient); } return; } else if (requestType == BIP70_MESSAGE_PAYMENTACK) { payments::PaymentACK paymentACK; if (!paymentACK.ParseFromArray(data.data(), data.size())) { QString msg = tr("Bad response from server %1") .arg(reply->request().url().toString()); qWarning() << "PaymentServer::netRequestFinished: " << msg; Q_EMIT message(tr("Payment request error"), msg, CClientUIInterface::MSG_ERROR); } else { Q_EMIT receivedPaymentACK(GUIUtil::HtmlEscape(paymentACK.memo())); } } } void PaymentServer::reportSslErrors(QNetworkReply *reply, const QList &errs) { Q_UNUSED(reply); QString errString; for (const QSslError &err : errs) { qWarning() << "PaymentServer::reportSslErrors: " << err; errString += err.errorString() + "\n"; } Q_EMIT message(tr("Network request error"), errString, CClientUIInterface::MSG_ERROR); } void PaymentServer::setOptionsModel(OptionsModel *_optionsModel) { this->optionsModel = _optionsModel; } void PaymentServer::handlePaymentACK(const QString &paymentACKMsg) { // currently we don't further process or store the paymentACK message Q_EMIT message(tr("Payment acknowledged"), paymentACKMsg, CClientUIInterface::ICON_INFORMATION | CClientUIInterface::MODAL); } bool PaymentServer::verifyNetwork( const payments::PaymentDetails &requestDetails) { bool fVerified = requestDetails.network() == Params().NetworkIDString(); if (!fVerified) { qWarning() << QString("PaymentServer::%1: Payment request network " "\"%2\" doesn't match client network \"%3\".") .arg(__func__) .arg(QString::fromStdString(requestDetails.network())) .arg(QString::fromStdString( Params().NetworkIDString())); } return fVerified; } bool PaymentServer::verifyExpired( const payments::PaymentDetails &requestDetails) { bool fVerified = (requestDetails.has_expires() && (int64_t)requestDetails.expires() < GetTime()); if (fVerified) { const QString requestExpires = QString::fromStdString(DateTimeStrFormat( "%Y-%m-%d %H:%M:%S", (int64_t)requestDetails.expires())); qWarning() << QString( "PaymentServer::%1: Payment request expired \"%2\".") .arg(__func__) .arg(requestExpires); } return fVerified; } bool PaymentServer::verifySize(qint64 requestSize) { bool fVerified = (requestSize <= BIP70_MAX_PAYMENTREQUEST_SIZE); if (!fVerified) { qWarning() << QString("PaymentServer::%1: Payment request too large " "(%2 bytes, allowed %3 bytes).") .arg(__func__) .arg(requestSize) .arg(BIP70_MAX_PAYMENTREQUEST_SIZE); } return fVerified; } bool PaymentServer::verifyAmount(const Amount requestAmount) { bool fVerified = MoneyRange(Amount(requestAmount)); if (!fVerified) { qWarning() << QString("PaymentServer::%1: Payment request amount out " "of allowed range (%2, allowed 0 - %3).") .arg(__func__) .arg(requestAmount.GetSatoshis()) .arg(MAX_MONEY.GetSatoshis()); } return fVerified; } X509_STORE *PaymentServer::getCertStore() { return certStore.get(); } diff --git a/src/qt/test/rpcnestedtests.cpp b/src/qt/test/rpcnestedtests.cpp index 70d13908b8..ed9bf22f98 100644 --- a/src/qt/test/rpcnestedtests.cpp +++ b/src/qt/test/rpcnestedtests.cpp @@ -1,221 +1,221 @@ // Copyright (c) 2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "rpcnestedtests.h" #include "chainparams.h" #include "config.h" #include "consensus/validation.h" #include "fs.h" #include "rpc/register.h" #include "rpc/server.h" #include "rpcconsole.h" #include "test/testutil.h" #include "univalue.h" #include "util.h" #include "validation.h" #include #include static UniValue rpcNestedTest_rpc(const Config &config, const JSONRPCRequest &request) { if (request.fHelp) { return "help message"; } return request.params.write(0, 0); } static const CRPCCommand vRPCCommands[] = { {"test", "rpcNestedTest", rpcNestedTest_rpc, true, {}}, }; void RPCNestedTests::rpcNestedTests() { UniValue jsonRPCError; // Do some test setup could be moved to a more generic place when we add // more tests on QT level const Config &config = GetConfig(); RegisterAllRPCCommands(tableRPC); tableRPC.appendCommand("rpcNestedTest", &vRPCCommands[0]); ClearDatadirCache(); std::string path = QDir::tempPath().toStdString() + "/" + strprintf("test_bitcoin_qt_%lu_%i", (unsigned long)GetTime(), (int)(GetRand(100000))); QDir dir(QString::fromStdString(path)); dir.mkpath("."); - ForceSetArg("-datadir", path); + gArgs.ForceSetArg("-datadir", path); // mempool.setSanityCheck(1.0); pblocktree = new CBlockTreeDB(1 << 20, true); pcoinsdbview = new CCoinsViewDB(1 << 23, true); pcoinsTip = new CCoinsViewCache(pcoinsdbview); InitBlockIndex(config); { CValidationState state; bool ok = ActivateBestChain(config, state); QVERIFY(ok); } SetRPCWarmupFinished(); std::string result; std::string result2; std::string filtered; // Simple result filtering with path. RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo()[chain]", &filtered); QVERIFY(result == "main"); QVERIFY(filtered == "getblockchaininfo()[chain]"); // Simple 2 level nesting. RPCConsole::RPCExecuteCommandLine(result, "getblock(getbestblockhash())"); RPCConsole::RPCExecuteCommandLine( result, "getblock(getblock(getbestblockhash())[hash], true)"); // 4 level nesting with whitespace, filtering path and boolean parameter. RPCConsole::RPCExecuteCommandLine( result, "getblock( getblock( getblock(getbestblockhash())[hash] " ")[hash], true)"); RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo"); QVERIFY(result.substr(0, 1) == "{"); RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo()"); QVERIFY(result.substr(0, 1) == "{"); // Whitespace at the end will be tolerated. RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo "); QVERIFY(result.substr(0, 1) == "{"); // Quote path identifier are allowed, but look after a child contaning the // quotes in the key. (RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo()[\"chain\"]")); QVERIFY(result == "null"); // parameter not in brackets are allowed. (RPCConsole::RPCExecuteCommandLine(result, "createrawtransaction [] {} 0")); // Parameter in brackets are allowed. (RPCConsole::RPCExecuteCommandLine(result2, "createrawtransaction([],{},0)")); QVERIFY(result == result2); // Whitespace between parametres is allowed. (RPCConsole::RPCExecuteCommandLine( result2, "createrawtransaction( [], {} , 0 )")); QVERIFY(result == result2); RPCConsole::RPCExecuteCommandLine( result, "getblock(getbestblockhash())[tx][0]", &filtered); QVERIFY(result == "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"); QVERIFY(filtered == "getblock(getbestblockhash())[tx][0]"); RPCConsole::RPCParseCommandLine(result, "importprivkey", false, &filtered); QVERIFY(filtered == "importprivkey(…)"); RPCConsole::RPCParseCommandLine(result, "signmessagewithprivkey abc", false, &filtered); QVERIFY(filtered == "signmessagewithprivkey(…)"); RPCConsole::RPCParseCommandLine(result, "signmessagewithprivkey abc,def", false, &filtered); QVERIFY(filtered == "signmessagewithprivkey(…)"); RPCConsole::RPCParseCommandLine(result, "signrawtransaction(abc)", false, &filtered); QVERIFY(filtered == "signrawtransaction(…)"); RPCConsole::RPCParseCommandLine(result, "walletpassphrase(help())", false, &filtered); QVERIFY(filtered == "walletpassphrase(…)"); RPCConsole::RPCParseCommandLine( result, "walletpassphrasechange(help(walletpassphrasechange(abc)))", false, &filtered); QVERIFY(filtered == "walletpassphrasechange(…)"); RPCConsole::RPCParseCommandLine(result, "help(encryptwallet(abc, def))", false, &filtered); QVERIFY(filtered == "help(encryptwallet(…))"); RPCConsole::RPCParseCommandLine(result, "help(importprivkey())", false, &filtered); QVERIFY(filtered == "help(importprivkey(…))"); RPCConsole::RPCParseCommandLine(result, "help(importprivkey(help()))", false, &filtered); QVERIFY(filtered == "help(importprivkey(…))"); RPCConsole::RPCParseCommandLine( result, "help(importprivkey(abc), walletpassphrase(def))", false, &filtered); QVERIFY(filtered == "help(importprivkey(…), walletpassphrase(…))"); RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest"); QVERIFY(result == "[]"); RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest ''"); QVERIFY(result == "[\"\"]"); RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest \"\""); QVERIFY(result == "[\"\"]"); RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest '' abc"); QVERIFY(result == "[\"\",\"abc\"]"); RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest abc '' abc"); QVERIFY(result == "[\"abc\",\"\",\"abc\"]"); RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest abc abc"); QVERIFY(result == "[\"abc\",\"abc\"]"); RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest abc\t\tabc"); QVERIFY(result == "[\"abc\",\"abc\"]"); RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest(abc )"); QVERIFY(result == "[\"abc\"]"); RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest( abc )"); QVERIFY(result == "[\"abc\"]"); RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest( abc , cba )"); QVERIFY(result == "[\"abc\",\"cba\"]"); #if QT_VERSION >= 0x050300 // do the QVERIFY_EXCEPTION_THROWN checks only with Qt5.3 and higher // (QVERIFY_EXCEPTION_THROWN was introduced in Qt5.3) // invalid syntax QVERIFY_EXCEPTION_THROWN( RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo() .\n"), std::runtime_error); // invalid syntax QVERIFY_EXCEPTION_THROWN( RPCConsole::RPCExecuteCommandLine( result, "getblockchaininfo() getblockchaininfo()"), std::runtime_error); // tolerate non closing brackets if we have no arguments. (RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo(")); // tolerate non command brackts (RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo()()()")); // invalid argument QVERIFY_EXCEPTION_THROWN( RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo(True)"), UniValue); // method not found QVERIFY_EXCEPTION_THROWN( RPCConsole::RPCExecuteCommandLine(result, "a(getblockchaininfo(True))"), UniValue); // don't tollerate empty arguments when using , QVERIFY_EXCEPTION_THROWN( RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest abc,,abc"), std::runtime_error); // don't tollerate empty arguments when using , QVERIFY_EXCEPTION_THROWN( RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest(abc,,abc)"), std::runtime_error); // don't tollerate empty arguments when using , QVERIFY_EXCEPTION_THROWN( RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest(abc,,)"), std::runtime_error); #endif UnloadBlockIndex(); delete pcoinsTip; pcoinsTip = nullptr; delete pcoinsdbview; pcoinsdbview = nullptr; delete pblocktree; pblocktree = nullptr; fs::remove_all(fs::path(path)); } diff --git a/src/qt/utilitydialog.cpp b/src/qt/utilitydialog.cpp index f2efe32744..4140203a8d 100644 --- a/src/qt/utilitydialog.cpp +++ b/src/qt/utilitydialog.cpp @@ -1,209 +1,209 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "utilitydialog.h" #include "ui_helpmessagedialog.h" #include "bitcoingui.h" #include "clientmodel.h" #include "guiconstants.h" #include "guiutil.h" #include "intro.h" #include "paymentrequestplus.h" #include "clientversion.h" #include "init.h" #include "util.h" #include #include #include #include #include #include #include /** "Help message" or "About" dialog box */ HelpMessageDialog::HelpMessageDialog(QWidget *parent, bool about) : QDialog(parent), ui(new Ui::HelpMessageDialog) { ui->setupUi(this); QString version = tr(PACKAGE_NAME) + " " + tr("version") + " " + QString::fromStdString(FormatFullVersion()); /** * On x86 add a bit specifier to the version so that users can distinguish * between 32 and 64 bit builds. On other architectures, 32/64 bit may be more * ambiguous. */ #if defined(__x86_64__) version += " " + tr("(%1-bit)").arg(64); #elif defined(__i386__) version += " " + tr("(%1-bit)").arg(32); #endif if (about) { setWindowTitle(tr("About %1").arg(tr(PACKAGE_NAME))); /// HTML-format the license message from the core QString licenseInfo = QString::fromStdString(LicenseInfo()); QString licenseInfoHTML = licenseInfo; // Make URLs clickable QRegExp uri("<(.*)>", Qt::CaseSensitive, QRegExp::RegExp2); uri.setMinimal(true); // use non-greedy matching licenseInfoHTML.replace(uri, "\\1"); // Replace newlines with HTML breaks licenseInfoHTML.replace("\n", "
"); ui->aboutMessage->setTextFormat(Qt::RichText); ui->scrollArea->setVerticalScrollBarPolicy(Qt::ScrollBarAsNeeded); text = version + "\n" + licenseInfo; ui->aboutMessage->setText(version + "

" + licenseInfoHTML); ui->aboutMessage->setWordWrap(true); ui->helpMessage->setVisible(false); } else { setWindowTitle(tr("Command-line options")); QString header = tr("Usage:") + "\n" + " bitcoin-qt [" + tr("command-line options") + "] " + "\n"; QTextCursor cursor(ui->helpMessage->document()); cursor.insertText(version); cursor.insertBlock(); cursor.insertText(header); cursor.insertBlock(); std::string strUsage = HelpMessage(HMM_BITCOIN_QT); - const bool showDebug = GetBoolArg("-help-debug", false); + const bool showDebug = gArgs.GetBoolArg("-help-debug", false); strUsage += HelpMessageGroup(tr("UI Options:").toStdString()); if (showDebug) { strUsage += HelpMessageOpt( "-allowselfsignedrootcertificates", strprintf("Allow self signed root certificates (default: %d)", DEFAULT_SELFSIGNED_ROOTCERTS)); } strUsage += HelpMessageOpt( "-choosedatadir", strprintf(tr("Choose data directory on startup (default: %d)") .toStdString(), DEFAULT_CHOOSE_DATADIR)); strUsage += HelpMessageOpt( "-lang=", tr("Set language, for example \"de_DE\" (default: system locale)") .toStdString()); strUsage += HelpMessageOpt("-min", tr("Start minimized").toStdString()); strUsage += HelpMessageOpt("-rootcertificates=", tr("Set SSL root certificates for payment " "request (default: -system-)") .toStdString()); strUsage += HelpMessageOpt( "-splash", strprintf( tr("Show splash screen on startup (default: %d)").toStdString(), DEFAULT_SPLASHSCREEN)); strUsage += HelpMessageOpt( "-resetguisettings", tr("Reset all settings changed in the GUI").toStdString()); if (showDebug) { strUsage += HelpMessageOpt( "-uiplatform", strprintf("Select platform to customize UI for (one of " "windows, macosx, other; default: %s)", BitcoinGUI::DEFAULT_UIPLATFORM)); } QString coreOptions = QString::fromStdString(strUsage); text = version + "\n" + header + "\n" + coreOptions; QTextTableFormat tf; tf.setBorderStyle(QTextFrameFormat::BorderStyle_None); tf.setCellPadding(2); QVector widths; widths << QTextLength(QTextLength::PercentageLength, 35); widths << QTextLength(QTextLength::PercentageLength, 65); tf.setColumnWidthConstraints(widths); QTextCharFormat bold; bold.setFontWeight(QFont::Bold); for (const QString &line : coreOptions.split("\n")) { if (line.startsWith(" -")) { cursor.currentTable()->appendRows(1); cursor.movePosition(QTextCursor::PreviousCell); cursor.movePosition(QTextCursor::NextRow); cursor.insertText(line.trimmed()); cursor.movePosition(QTextCursor::NextCell); } else if (line.startsWith(" ")) { cursor.insertText(line.trimmed() + ' '); } else if (line.size() > 0) { // Title of a group if (cursor.currentTable()) cursor.currentTable()->appendRows(1); cursor.movePosition(QTextCursor::Down); cursor.insertText(line.trimmed(), bold); cursor.insertTable(1, 2, tf); } } ui->helpMessage->moveCursor(QTextCursor::Start); ui->scrollArea->setVisible(false); ui->aboutLogo->setVisible(false); } } HelpMessageDialog::~HelpMessageDialog() { delete ui; } void HelpMessageDialog::printToConsole() { // On other operating systems, the expected action is to print the message // to the console. fprintf(stdout, "%s\n", qPrintable(text)); } void HelpMessageDialog::showOrPrint() { #if defined(WIN32) // On Windows, show a message box, as there is no stderr/stdout in windowed // applications exec(); #else // On other operating systems, print help text to console printToConsole(); #endif } void HelpMessageDialog::on_okButton_accepted() { close(); } /** "Shutdown" window */ ShutdownWindow::ShutdownWindow(QWidget *parent, Qt::WindowFlags f) : QWidget(parent, f) { QVBoxLayout *layout = new QVBoxLayout(); layout->addWidget(new QLabel( tr("%1 is shutting down...").arg(tr(PACKAGE_NAME)) + "

" + tr("Do not shut down the computer until this window disappears."))); setLayout(layout); } QWidget *ShutdownWindow::showShutdownWindow(BitcoinGUI *window) { if (!window) return nullptr; // Show a simple window indicating shutdown status QWidget *shutdownWindow = new ShutdownWindow(); shutdownWindow->setWindowTitle(window->windowTitle()); // Center shutdown window at where main window was const QPoint global = window->mapToGlobal(window->rect().center()); shutdownWindow->move(global.x() - shutdownWindow->width() / 2, global.y() - shutdownWindow->height() / 2); shutdownWindow->show(); return shutdownWindow; } void ShutdownWindow::closeEvent(QCloseEvent *event) { event->ignore(); } diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp index 46d311e707..6b7f4f9092 100644 --- a/src/qt/walletmodel.cpp +++ b/src/qt/walletmodel.cpp @@ -1,681 +1,681 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "walletmodel.h" #include "addresstablemodel.h" #include "consensus/validation.h" #include "guiconstants.h" #include "guiutil.h" #include "paymentserver.h" #include "recentrequeststablemodel.h" #include "transactiontablemodel.h" #include "dstencode.h" #include "keystore.h" #include "net.h" // for g_connman #include "sync.h" #include "ui_interface.h" #include "util.h" // for GetBoolArg #include "validation.h" #include "wallet/wallet.h" #include "wallet/walletdb.h" // for BackupWallet #include #include #include #include WalletModel::WalletModel(const PlatformStyle *platformStyle, CWallet *_wallet, OptionsModel *_optionsModel, QObject *parent) : QObject(parent), wallet(_wallet), optionsModel(_optionsModel), addressTableModel(0), transactionTableModel(0), recentRequestsTableModel(0), cachedBalance(0), cachedUnconfirmedBalance(0), cachedImmatureBalance(0), cachedEncryptionStatus(Unencrypted), cachedNumBlocks(0) { fHaveWatchOnly = wallet->HaveWatchOnly(); fForceCheckBalanceChanged = false; addressTableModel = new AddressTableModel(wallet, this); transactionTableModel = new TransactionTableModel(platformStyle, wallet, this); recentRequestsTableModel = new RecentRequestsTableModel(wallet, this); // This timer will be fired repeatedly to update the balance pollTimer = new QTimer(this); connect(pollTimer, SIGNAL(timeout()), this, SLOT(pollBalanceChanged())); pollTimer->start(MODEL_UPDATE_DELAY); subscribeToCoreSignals(); } WalletModel::~WalletModel() { unsubscribeFromCoreSignals(); } Amount WalletModel::getBalance(const CCoinControl *coinControl) const { if (coinControl) { Amount nBalance(0); std::vector vCoins; wallet->AvailableCoins(vCoins, true, coinControl); for (const COutput &out : vCoins) { if (out.fSpendable) nBalance += out.tx->tx->vout[out.i].nValue; } return nBalance; } return wallet->GetBalance(); } Amount WalletModel::getUnconfirmedBalance() const { return wallet->GetUnconfirmedBalance(); } Amount WalletModel::getImmatureBalance() const { return wallet->GetImmatureBalance(); } bool WalletModel::haveWatchOnly() const { return fHaveWatchOnly; } Amount WalletModel::getWatchBalance() const { return wallet->GetWatchOnlyBalance(); } Amount WalletModel::getWatchUnconfirmedBalance() const { return wallet->GetUnconfirmedWatchOnlyBalance(); } Amount WalletModel::getWatchImmatureBalance() const { return wallet->GetImmatureWatchOnlyBalance(); } void WalletModel::updateStatus() { EncryptionStatus newEncryptionStatus = getEncryptionStatus(); if (cachedEncryptionStatus != newEncryptionStatus) Q_EMIT encryptionStatusChanged(newEncryptionStatus); } void WalletModel::pollBalanceChanged() { // Get required locks upfront. This avoids the GUI from getting stuck on // periodical polls if the core is holding the locks for a longer time - for // example, during a wallet rescan. TRY_LOCK(cs_main, lockMain); if (!lockMain) return; TRY_LOCK(wallet->cs_wallet, lockWallet); if (!lockWallet) return; if (fForceCheckBalanceChanged || chainActive.Height() != cachedNumBlocks) { fForceCheckBalanceChanged = false; // Balance and number of transactions might have changed cachedNumBlocks = chainActive.Height(); checkBalanceChanged(); if (transactionTableModel) transactionTableModel->updateConfirmations(); } } void WalletModel::checkBalanceChanged() { Amount newBalance(getBalance()); Amount newUnconfirmedBalance(getUnconfirmedBalance()); Amount newImmatureBalance(getImmatureBalance()); Amount newWatchOnlyBalance(0); Amount newWatchUnconfBalance(0); Amount newWatchImmatureBalance(0); if (haveWatchOnly()) { newWatchOnlyBalance = getWatchBalance(); newWatchUnconfBalance = getWatchUnconfirmedBalance(); newWatchImmatureBalance = getWatchImmatureBalance(); } if (cachedBalance != newBalance || cachedUnconfirmedBalance != newUnconfirmedBalance || cachedImmatureBalance != newImmatureBalance || cachedWatchOnlyBalance != newWatchOnlyBalance || cachedWatchUnconfBalance != newWatchUnconfBalance || cachedWatchImmatureBalance != newWatchImmatureBalance) { cachedBalance = newBalance; cachedUnconfirmedBalance = newUnconfirmedBalance; cachedImmatureBalance = newImmatureBalance; cachedWatchOnlyBalance = newWatchOnlyBalance; cachedWatchUnconfBalance = newWatchUnconfBalance; cachedWatchImmatureBalance = newWatchImmatureBalance; Q_EMIT balanceChanged(newBalance, newUnconfirmedBalance, newImmatureBalance, newWatchOnlyBalance, newWatchUnconfBalance, newWatchImmatureBalance); } } void WalletModel::updateTransaction() { // Balance and number of transactions might have changed fForceCheckBalanceChanged = true; } void WalletModel::updateAddressBook(const QString &address, const QString &label, bool isMine, const QString &purpose, int status) { if (addressTableModel) addressTableModel->updateEntry(address, label, isMine, purpose, status); } void WalletModel::updateWatchOnlyFlag(bool fHaveWatchonly) { fHaveWatchOnly = fHaveWatchonly; Q_EMIT notifyWatchonlyChanged(fHaveWatchonly); } bool WalletModel::validateAddress(const QString &address) { return IsValidDestinationString(address.toStdString()); } WalletModel::SendCoinsReturn WalletModel::prepareTransaction(WalletModelTransaction &transaction, const CCoinControl *coinControl) { Amount total(0); bool fSubtractFeeFromAmount = false; QList recipients = transaction.getRecipients(); std::vector vecSend; if (recipients.empty()) { return OK; } // Used to detect duplicates QSet setAddress; int nAddresses = 0; // Pre-check input data for validity for (const SendCoinsRecipient &rcp : recipients) { if (rcp.fSubtractFeeFromAmount) fSubtractFeeFromAmount = true; // PaymentRequest... if (rcp.paymentRequest.IsInitialized()) { Amount subtotal(0); const payments::PaymentDetails &details = rcp.paymentRequest.getDetails(); for (int i = 0; i < details.outputs_size(); i++) { const payments::Output &out = details.outputs(i); if (out.amount() <= 0) continue; subtotal += Amount(out.amount()); const uint8_t *scriptStr = (const uint8_t *)out.script().data(); CScript scriptPubKey(scriptStr, scriptStr + out.script().size()); Amount nAmount = Amount(out.amount()); CRecipient recipient = {scriptPubKey, Amount(nAmount), rcp.fSubtractFeeFromAmount}; vecSend.push_back(recipient); } if (subtotal <= Amount(0)) { return InvalidAmount; } total += subtotal; } else { // User-entered bitcoin address / amount: if (!validateAddress(rcp.address)) { return InvalidAddress; } if (rcp.amount <= Amount(0)) { return InvalidAmount; } setAddress.insert(rcp.address); ++nAddresses; CScript scriptPubKey = GetScriptForDestination( DecodeDestination(rcp.address.toStdString())); CRecipient recipient = {scriptPubKey, Amount(rcp.amount), rcp.fSubtractFeeFromAmount}; vecSend.push_back(recipient); total += rcp.amount; } } if (setAddress.size() != nAddresses) { return DuplicateAddress; } Amount nBalance = getBalance(coinControl); if (total > nBalance) { return AmountExceedsBalance; } { LOCK2(cs_main, wallet->cs_wallet); transaction.newPossibleKeyChange(wallet); Amount nFeeRequired(0); int nChangePosRet = -1; std::string strFailReason; CWalletTx *newTx = transaction.getTransaction(); CReserveKey *keyChange = transaction.getPossibleKeyChange(); bool fCreated = wallet->CreateTransaction(vecSend, *newTx, *keyChange, nFeeRequired, nChangePosRet, strFailReason, coinControl); transaction.setTransactionFee(nFeeRequired); if (fSubtractFeeFromAmount && fCreated) transaction.reassignAmounts(nChangePosRet); if (!fCreated) { if (!fSubtractFeeFromAmount && (total + nFeeRequired) > nBalance) { return SendCoinsReturn(AmountWithFeeExceedsBalance); } Q_EMIT message(tr("Send Coins"), QString::fromStdString(strFailReason), CClientUIInterface::MSG_ERROR); return TransactionCreationFailed; } // reject absurdly high fee. (This can never happen because the wallet // caps the fee at maxTxFee. This merely serves as a belt-and-suspenders // check) if (nFeeRequired > Amount(maxTxFee)) return AbsurdFee; } return SendCoinsReturn(OK); } WalletModel::SendCoinsReturn WalletModel::sendCoins(WalletModelTransaction &transaction) { /* store serialized transaction */ QByteArray transaction_array; { LOCK2(cs_main, wallet->cs_wallet); CWalletTx *newTx = transaction.getTransaction(); for (const SendCoinsRecipient &rcp : transaction.getRecipients()) { if (rcp.paymentRequest.IsInitialized()) { // Make sure any payment requests involved are still valid. if (PaymentServer::verifyExpired( rcp.paymentRequest.getDetails())) { return PaymentRequestExpired; } // Store PaymentRequests in wtx.vOrderForm in wallet. std::string key("PaymentRequest"); std::string value; rcp.paymentRequest.SerializeToString(&value); newTx->vOrderForm.push_back(make_pair(key, value)); } else if (!rcp.message.isEmpty()) { // Message from normal bitcoincash:URI // (bitcoincash:123...?message=example) newTx->vOrderForm.push_back( make_pair("Message", rcp.message.toStdString())); } } CReserveKey *keyChange = transaction.getPossibleKeyChange(); CValidationState state; if (!wallet->CommitTransaction(*newTx, *keyChange, g_connman.get(), state)) return SendCoinsReturn( TransactionCommitFailed, QString::fromStdString(state.GetRejectReason())); CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); ssTx << *newTx->tx; transaction_array.append(&(ssTx[0]), ssTx.size()); } // Add addresses / update labels that we've sent to to the address book, and // emit coinsSent signal for each recipient for (const SendCoinsRecipient &rcp : transaction.getRecipients()) { // Don't touch the address book when we have a payment request if (!rcp.paymentRequest.IsInitialized()) { std::string strAddress = rcp.address.toStdString(); CTxDestination dest = DecodeDestination(strAddress); std::string strLabel = rcp.label.toStdString(); { LOCK(wallet->cs_wallet); std::map::iterator mi = wallet->mapAddressBook.find(dest); // Check if we have a new address or an updated label if (mi == wallet->mapAddressBook.end()) { wallet->SetAddressBook(dest, strLabel, "send"); } else if (mi->second.name != strLabel) { // "" means don't change purpose wallet->SetAddressBook(dest, strLabel, ""); } } } Q_EMIT coinsSent(wallet, rcp, transaction_array); } // update balance immediately, otherwise there could be a short noticeable // delay until pollBalanceChanged hits checkBalanceChanged(); return SendCoinsReturn(OK); } OptionsModel *WalletModel::getOptionsModel() { return optionsModel; } AddressTableModel *WalletModel::getAddressTableModel() { return addressTableModel; } TransactionTableModel *WalletModel::getTransactionTableModel() { return transactionTableModel; } RecentRequestsTableModel *WalletModel::getRecentRequestsTableModel() { return recentRequestsTableModel; } WalletModel::EncryptionStatus WalletModel::getEncryptionStatus() const { if (!wallet->IsCrypted()) { return Unencrypted; } else if (wallet->IsLocked()) { return Locked; } else { return Unlocked; } } bool WalletModel::setWalletEncrypted(bool encrypted, const SecureString &passphrase) { if (encrypted) { // Encrypt return wallet->EncryptWallet(passphrase); } else { // Decrypt -- TODO; not supported yet return false; } } bool WalletModel::setWalletLocked(bool locked, const SecureString &passPhrase) { if (locked) { // Lock return wallet->Lock(); } else { // Unlock return wallet->Unlock(passPhrase); } } bool WalletModel::changePassphrase(const SecureString &oldPass, const SecureString &newPass) { bool retval; { LOCK(wallet->cs_wallet); // Make sure wallet is locked before attempting pass change wallet->Lock(); retval = wallet->ChangeWalletPassphrase(oldPass, newPass); } return retval; } bool WalletModel::backupWallet(const QString &filename) { return wallet->BackupWallet(filename.toLocal8Bit().data()); } // Handlers for core signals static void NotifyKeyStoreStatusChanged(WalletModel *walletmodel, CCryptoKeyStore *wallet) { qDebug() << "NotifyKeyStoreStatusChanged"; QMetaObject::invokeMethod(walletmodel, "updateStatus", Qt::QueuedConnection); } static void NotifyAddressBookChanged(WalletModel *walletmodel, CWallet *wallet, const CTxDestination &address, const std::string &label, bool isMine, const std::string &purpose, ChangeType status) { QString strAddress = QString::fromStdString(EncodeDestination(address)); QString strLabel = QString::fromStdString(label); QString strPurpose = QString::fromStdString(purpose); qDebug() << "NotifyAddressBookChanged: " + strAddress + " " + strLabel + " isMine=" + QString::number(isMine) + " purpose=" + strPurpose + " status=" + QString::number(status); QMetaObject::invokeMethod(walletmodel, "updateAddressBook", Qt::QueuedConnection, Q_ARG(QString, strAddress), Q_ARG(QString, strLabel), Q_ARG(bool, isMine), Q_ARG(QString, strPurpose), Q_ARG(int, status)); } static void NotifyTransactionChanged(WalletModel *walletmodel, CWallet *wallet, const uint256 &hash, ChangeType status) { Q_UNUSED(wallet); Q_UNUSED(hash); Q_UNUSED(status); QMetaObject::invokeMethod(walletmodel, "updateTransaction", Qt::QueuedConnection); } static void ShowProgress(WalletModel *walletmodel, const std::string &title, int nProgress) { // emits signal "showProgress" QMetaObject::invokeMethod(walletmodel, "showProgress", Qt::QueuedConnection, Q_ARG(QString, QString::fromStdString(title)), Q_ARG(int, nProgress)); } static void NotifyWatchonlyChanged(WalletModel *walletmodel, bool fHaveWatchonly) { QMetaObject::invokeMethod(walletmodel, "updateWatchOnlyFlag", Qt::QueuedConnection, Q_ARG(bool, fHaveWatchonly)); } void WalletModel::subscribeToCoreSignals() { // Connect signals to wallet wallet->NotifyStatusChanged.connect( boost::bind(&NotifyKeyStoreStatusChanged, this, _1)); wallet->NotifyAddressBookChanged.connect( boost::bind(NotifyAddressBookChanged, this, _1, _2, _3, _4, _5, _6)); wallet->NotifyTransactionChanged.connect( boost::bind(NotifyTransactionChanged, this, _1, _2, _3)); wallet->ShowProgress.connect(boost::bind(ShowProgress, this, _1, _2)); wallet->NotifyWatchonlyChanged.connect( boost::bind(NotifyWatchonlyChanged, this, _1)); } void WalletModel::unsubscribeFromCoreSignals() { // Disconnect signals from wallet wallet->NotifyStatusChanged.disconnect( boost::bind(&NotifyKeyStoreStatusChanged, this, _1)); wallet->NotifyAddressBookChanged.disconnect( boost::bind(NotifyAddressBookChanged, this, _1, _2, _3, _4, _5, _6)); wallet->NotifyTransactionChanged.disconnect( boost::bind(NotifyTransactionChanged, this, _1, _2, _3)); wallet->ShowProgress.disconnect(boost::bind(ShowProgress, this, _1, _2)); wallet->NotifyWatchonlyChanged.disconnect( boost::bind(NotifyWatchonlyChanged, this, _1)); } // WalletModel::UnlockContext implementation WalletModel::UnlockContext WalletModel::requestUnlock() { bool was_locked = getEncryptionStatus() == Locked; if (was_locked) { // Request UI to unlock wallet Q_EMIT requireUnlock(); } // If wallet is still locked, unlock was failed or cancelled, mark context // as invalid bool valid = getEncryptionStatus() != Locked; return UnlockContext(this, valid, was_locked); } WalletModel::UnlockContext::UnlockContext(WalletModel *_wallet, bool _valid, bool _relock) : wallet(_wallet), valid(_valid), relock(_relock) {} WalletModel::UnlockContext::~UnlockContext() { if (valid && relock) { wallet->setWalletLocked(true); } } void WalletModel::UnlockContext::CopyFrom(const UnlockContext &rhs) { // Transfer context; old object no longer relocks wallet *this = rhs; rhs.relock = false; } bool WalletModel::getPubKey(const CKeyID &address, CPubKey &vchPubKeyOut) const { return wallet->GetPubKey(address, vchPubKeyOut); } bool WalletModel::IsSpendable(const CTxDestination &dest) const { return IsMine(*wallet, dest) & ISMINE_SPENDABLE; } bool WalletModel::getPrivKey(const CKeyID &address, CKey &vchPrivKeyOut) const { return wallet->GetKey(address, vchPrivKeyOut); } // returns a list of COutputs from COutPoints void WalletModel::getOutputs(const std::vector &vOutpoints, std::vector &vOutputs) { LOCK2(cs_main, wallet->cs_wallet); for (const COutPoint &outpoint : vOutpoints) { if (!wallet->mapWallet.count(outpoint.hash)) continue; int nDepth = wallet->mapWallet[outpoint.hash].GetDepthInMainChain(); if (nDepth < 0) continue; COutput out(&wallet->mapWallet[outpoint.hash], outpoint.n, nDepth, true, true); vOutputs.push_back(out); } } bool WalletModel::isSpent(const COutPoint &outpoint) const { LOCK2(cs_main, wallet->cs_wallet); return wallet->IsSpent(outpoint.hash, outpoint.n); } // AvailableCoins + LockedCoins grouped by wallet address (put change in one // group with wallet address) void WalletModel::listCoins( std::map> &mapCoins) const { std::vector vCoins; wallet->AvailableCoins(vCoins); // ListLockedCoins, mapWallet LOCK2(cs_main, wallet->cs_wallet); std::vector vLockedCoins; wallet->ListLockedCoins(vLockedCoins); // add locked coins for (const COutPoint &outpoint : vLockedCoins) { if (!wallet->mapWallet.count(outpoint.hash)) continue; int nDepth = wallet->mapWallet[outpoint.hash].GetDepthInMainChain(); if (nDepth < 0) continue; COutput out(&wallet->mapWallet[outpoint.hash], outpoint.n, nDepth, true, true); if (outpoint.n < out.tx->tx->vout.size() && wallet->IsMine(out.tx->tx->vout[outpoint.n]) == ISMINE_SPENDABLE) vCoins.push_back(out); } for (const COutput &out : vCoins) { COutput cout = out; while (wallet->IsChange(cout.tx->tx->vout[cout.i]) && cout.tx->tx->vin.size() > 0 && wallet->IsMine(cout.tx->tx->vin[0])) { if (!wallet->mapWallet.count(cout.tx->tx->vin[0].prevout.hash)) break; cout = COutput(&wallet->mapWallet[cout.tx->tx->vin[0].prevout.hash], cout.tx->tx->vin[0].prevout.n, 0, true, true); } CTxDestination address; if (!out.fSpendable || !ExtractDestination(cout.tx->tx->vout[cout.i].scriptPubKey, address)) continue; mapCoins[QString::fromStdString(EncodeDestination(address))].push_back( out); } } bool WalletModel::isLockedCoin(uint256 hash, unsigned int n) const { LOCK2(cs_main, wallet->cs_wallet); return wallet->IsLockedCoin(hash, n); } void WalletModel::lockCoin(COutPoint &output) { LOCK2(cs_main, wallet->cs_wallet); wallet->LockCoin(output); } void WalletModel::unlockCoin(COutPoint &output) { LOCK2(cs_main, wallet->cs_wallet); wallet->UnlockCoin(output); } void WalletModel::listLockedCoins(std::vector &vOutpts) { LOCK2(cs_main, wallet->cs_wallet); wallet->ListLockedCoins(vOutpts); } void WalletModel::loadReceiveRequests( std::vector &vReceiveRequests) { LOCK(wallet->cs_wallet); for (const std::pair &item : wallet->mapAddressBook) { for (const std::pair &item2 : item.second.destdata) if (item2.first.size() > 2 && item2.first.substr(0, 2) == "rr") // receive request vReceiveRequests.push_back(item2.second); } } bool WalletModel::saveReceiveRequest(const std::string &sAddress, const int64_t nId, const std::string &sRequest) { CTxDestination dest = DecodeDestination(sAddress); std::stringstream ss; ss << nId; // "rr" prefix = "receive request" in destdata std::string key = "rr" + ss.str(); LOCK(wallet->cs_wallet); if (sRequest.empty()) return wallet->EraseDestData(dest, key); else return wallet->AddDestData(dest, key, sRequest); } bool WalletModel::transactionCanBeAbandoned(uint256 hash) const { LOCK2(cs_main, wallet->cs_wallet); const CWalletTx *wtx = wallet->GetWalletTx(hash); if (!wtx || wtx->isAbandoned() || wtx->GetDepthInMainChain() > 0 || wtx->InMempool()) return false; return true; } bool WalletModel::abandonTransaction(uint256 hash) const { LOCK2(cs_main, wallet->cs_wallet); return wallet->AbandonTransaction(hash); } bool WalletModel::isWalletEnabled() { - return !GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET); + return !gArgs.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET); } bool WalletModel::hdEnabled() const { return wallet->IsHDEnabled(); } int WalletModel::getDefaultConfirmTarget() const { return nTxConfirmTarget; } diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 4fc4e88b72..f47c98a155 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1,1762 +1,1762 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "rpc/blockchain.h" #include "amount.h" #include "chain.h" #include "chainparams.h" #include "checkpoints.h" #include "coins.h" #include "config.h" #include "consensus/validation.h" #include "hash.h" #include "policy/policy.h" #include "primitives/transaction.h" #include "rpc/server.h" #include "rpc/tojson.h" #include "streams.h" #include "sync.h" #include "txmempool.h" #include "util.h" #include "utilstrencodings.h" #include "validation.h" #include // boost::thread::interrupt #include #include #include struct CUpdatedBlock { uint256 hash; int height; }; static std::mutex cs_blockchange; static std::condition_variable cond_blockchange; static CUpdatedBlock latestblock; static double GetDifficultyFromBits(uint32_t nBits) { int nShift = (nBits >> 24) & 0xff; double dDiff = 0x0000ffff / double(nBits & 0x00ffffff); while (nShift < 29) { dDiff *= 256.0; nShift++; } while (nShift > 29) { dDiff /= 256.0; nShift--; } return dDiff; } double GetDifficulty(const CBlockIndex *blockindex) { // Floating point number that is a multiple of the minimum difficulty, // minimum difficulty = 1.0. if (blockindex == nullptr) { return 1.0; } return GetDifficultyFromBits(blockindex->nBits); } UniValue blockheaderToJSON(const CBlockIndex *blockindex) { UniValue result(UniValue::VOBJ); result.push_back(Pair("hash", blockindex->GetBlockHash().GetHex())); int confirmations = -1; // Only report confirmations if the block is on the main chain if (chainActive.Contains(blockindex)) { confirmations = chainActive.Height() - blockindex->nHeight + 1; } result.push_back(Pair("confirmations", confirmations)); result.push_back(Pair("height", blockindex->nHeight)); result.push_back(Pair("version", blockindex->nVersion)); result.push_back( Pair("versionHex", strprintf("%08x", blockindex->nVersion))); result.push_back(Pair("merkleroot", blockindex->hashMerkleRoot.GetHex())); result.push_back(Pair("time", int64_t(blockindex->nTime))); result.push_back( Pair("mediantime", int64_t(blockindex->GetMedianTimePast()))); result.push_back(Pair("nonce", uint64_t(blockindex->nNonce))); result.push_back(Pair("bits", strprintf("%08x", blockindex->nBits))); result.push_back(Pair("difficulty", GetDifficulty(blockindex))); result.push_back(Pair("chainwork", blockindex->nChainWork.GetHex())); if (blockindex->pprev) { result.push_back(Pair("previousblockhash", blockindex->pprev->GetBlockHash().GetHex())); } CBlockIndex *pnext = chainActive.Next(blockindex); if (pnext) { result.push_back(Pair("nextblockhash", pnext->GetBlockHash().GetHex())); } return result; } UniValue blockToJSON(const Config &config, const CBlock &block, const CBlockIndex *blockindex, bool txDetails) { UniValue result(UniValue::VOBJ); result.push_back(Pair("hash", blockindex->GetBlockHash().GetHex())); int confirmations = -1; // Only report confirmations if the block is on the main chain if (chainActive.Contains(blockindex)) { confirmations = chainActive.Height() - blockindex->nHeight + 1; } result.push_back(Pair("confirmations", confirmations)); result.push_back(Pair( "size", (int)::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION))); result.push_back(Pair("height", blockindex->nHeight)); result.push_back(Pair("version", block.nVersion)); result.push_back(Pair("versionHex", strprintf("%08x", block.nVersion))); result.push_back(Pair("merkleroot", block.hashMerkleRoot.GetHex())); UniValue txs(UniValue::VARR); for (const auto &tx : block.vtx) { if (txDetails) { UniValue objTx(UniValue::VOBJ); TxToJSON(config, *tx, uint256(), objTx); txs.push_back(objTx); } else { txs.push_back(tx->GetId().GetHex()); } } result.push_back(Pair("tx", txs)); result.push_back(Pair("time", block.GetBlockTime())); result.push_back( Pair("mediantime", int64_t(blockindex->GetMedianTimePast()))); result.push_back(Pair("nonce", uint64_t(block.nNonce))); result.push_back(Pair("bits", strprintf("%08x", block.nBits))); result.push_back(Pair("difficulty", GetDifficulty(blockindex))); result.push_back(Pair("chainwork", blockindex->nChainWork.GetHex())); if (blockindex->pprev) { result.push_back(Pair("previousblockhash", blockindex->pprev->GetBlockHash().GetHex())); } CBlockIndex *pnext = chainActive.Next(blockindex); if (pnext) { result.push_back(Pair("nextblockhash", pnext->GetBlockHash().GetHex())); } return result; } UniValue getblockcount(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getblockcount\n" "\nReturns the number of blocks in the longest blockchain.\n" "\nResult:\n" "n (numeric) The current block count\n" "\nExamples:\n" + HelpExampleCli("getblockcount", "") + HelpExampleRpc("getblockcount", "")); } LOCK(cs_main); return chainActive.Height(); } UniValue getbestblockhash(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getbestblockhash\n" "\nReturns the hash of the best (tip) block in the " "longest blockchain.\n" "\nResult:\n" "\"hex\" (string) the block hash hex encoded\n" "\nExamples:\n" + HelpExampleCli("getbestblockhash", "") + HelpExampleRpc("getbestblockhash", "")); } LOCK(cs_main); return chainActive.Tip()->GetBlockHash().GetHex(); } void RPCNotifyBlockChange(bool ibd, const CBlockIndex *pindex) { if (pindex) { std::lock_guard lock(cs_blockchange); latestblock.hash = pindex->GetBlockHash(); latestblock.height = pindex->nHeight; } cond_blockchange.notify_all(); } UniValue waitfornewblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 1) { throw std::runtime_error( "waitfornewblock (timeout)\n" "\nWaits for a specific new block and returns " "useful info about it.\n" "\nReturns the current block on timeout or exit.\n" "\nArguments:\n" "1. timeout (int, optional, default=0) Time in " "milliseconds to wait for a response. 0 indicates " "no timeout.\n" "\nResult:\n" "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" " \"height\" : { (int) Block height\n" "}\n" "\nExamples:\n" + HelpExampleCli("waitfornewblock", "1000") + HelpExampleRpc("waitfornewblock", "1000")); } int timeout = 0; if (request.params.size() > 0) { timeout = request.params[0].get_int(); } CUpdatedBlock block; { std::unique_lock lock(cs_blockchange); block = latestblock; if (timeout) { cond_blockchange.wait_for( lock, std::chrono::milliseconds(timeout), [&block] { return latestblock.height != block.height || latestblock.hash != block.hash || !IsRPCRunning(); }); } else { cond_blockchange.wait(lock, [&block] { return latestblock.height != block.height || latestblock.hash != block.hash || !IsRPCRunning(); }); } block = latestblock; } UniValue ret(UniValue::VOBJ); ret.push_back(Pair("hash", block.hash.GetHex())); ret.push_back(Pair("height", block.height)); return ret; } UniValue waitforblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "waitforblock (timeout)\n" "\nWaits for a specific new block and returns useful info about " "it.\n" "\nReturns the current block on timeout or exit.\n" "\nArguments:\n" "1. \"blockhash\" (required, string) Block hash to wait for.\n" "2. timeout (int, optional, default=0) Time in milliseconds " "to wait for a response. 0 indicates no timeout.\n" "\nResult:\n" "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" " \"height\" : { (int) Block height\n" "}\n" "\nExamples:\n" + HelpExampleCli("waitforblock", "\"0000000000079f8ef3d2c688c244eb7a4" "570b24c9ed7b4a8c619eb02596f8862\", " "1000") + HelpExampleRpc("waitforblock", "\"0000000000079f8ef3d2c688c244eb7a4" "570b24c9ed7b4a8c619eb02596f8862\", " "1000")); } int timeout = 0; uint256 hash = uint256S(request.params[0].get_str()); if (request.params.size() > 1) { timeout = request.params[1].get_int(); } CUpdatedBlock block; { std::unique_lock lock(cs_blockchange); if (timeout) { cond_blockchange.wait_for( lock, std::chrono::milliseconds(timeout), [&hash] { return latestblock.hash == hash || !IsRPCRunning(); }); } else { cond_blockchange.wait(lock, [&hash] { return latestblock.hash == hash || !IsRPCRunning(); }); } block = latestblock; } UniValue ret(UniValue::VOBJ); ret.push_back(Pair("hash", block.hash.GetHex())); ret.push_back(Pair("height", block.height)); return ret; } UniValue waitforblockheight(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "waitforblockheight (timeout)\n" "\nWaits for (at least) block height and returns the height and " "hash\n" "of the current tip.\n" "\nReturns the current block on timeout or exit.\n" "\nArguments:\n" "1. height (required, int) Block height to wait for (int)\n" "2. timeout (int, optional, default=0) Time in milliseconds to " "wait for a response. 0 indicates no timeout.\n" "\nResult:\n" "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" " \"height\" : { (int) Block height\n" "}\n" "\nExamples:\n" + HelpExampleCli("waitforblockheight", "\"100\", 1000") + HelpExampleRpc("waitforblockheight", "\"100\", 1000")); } int timeout = 0; int height = request.params[0].get_int(); if (request.params.size() > 1) { timeout = request.params[1].get_int(); } CUpdatedBlock block; { std::unique_lock lock(cs_blockchange); if (timeout) { cond_blockchange.wait_for( lock, std::chrono::milliseconds(timeout), [&height] { return latestblock.height >= height || !IsRPCRunning(); }); } else { cond_blockchange.wait(lock, [&height] { return latestblock.height >= height || !IsRPCRunning(); }); } block = latestblock; } UniValue ret(UniValue::VOBJ); ret.push_back(Pair("hash", block.hash.GetHex())); ret.push_back(Pair("height", block.height)); return ret; } UniValue getdifficulty(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error("getdifficulty\n" "\nReturns the proof-of-work difficulty as a " "multiple of the minimum difficulty.\n" "\nResult:\n" "n.nnn (numeric) the proof-of-work " "difficulty as a multiple of the minimum " "difficulty.\n" "\nExamples:\n" + HelpExampleCli("getdifficulty", "") + HelpExampleRpc("getdifficulty", "")); } LOCK(cs_main); return GetDifficulty(chainActive.Tip()); } std::string EntryDescriptionString() { return " \"size\" : n, (numeric) transaction size.\n" " \"fee\" : n, (numeric) transaction fee in " + CURRENCY_UNIT + "\n" " \"modifiedfee\" : n, (numeric) transaction fee with fee " "deltas used for mining priority\n" " \"time\" : n, (numeric) local time transaction " "entered pool in seconds since 1 Jan 1970 GMT\n" " \"height\" : n, (numeric) block height when " "transaction entered pool\n" " \"startingpriority\" : n, (numeric) DEPRECATED. Priority when " "transaction entered pool\n" " \"currentpriority\" : n, (numeric) DEPRECATED. Transaction " "priority now\n" " \"descendantcount\" : n, (numeric) number of in-mempool " "descendant transactions (including this one)\n" " \"descendantsize\" : n, (numeric) virtual transaction size " "of in-mempool descendants (including this one)\n" " \"descendantfees\" : n, (numeric) modified fees (see above) " "of in-mempool descendants (including this one)\n" " \"ancestorcount\" : n, (numeric) number of in-mempool " "ancestor transactions (including this one)\n" " \"ancestorsize\" : n, (numeric) virtual transaction size " "of in-mempool ancestors (including this one)\n" " \"ancestorfees\" : n, (numeric) modified fees (see above) " "of in-mempool ancestors (including this one)\n" " \"depends\" : [ (array) unconfirmed transactions " "used as inputs for this transaction\n" " \"transactionid\", (string) parent transaction id\n" " ... ]\n"; } void entryToJSON(UniValue &info, const CTxMemPoolEntry &e) { AssertLockHeld(mempool.cs); info.push_back(Pair("size", (int)e.GetTxSize())); info.push_back(Pair("fee", ValueFromAmount(e.GetFee()))); info.push_back(Pair("modifiedfee", ValueFromAmount(e.GetModifiedFee()))); info.push_back(Pair("time", e.GetTime())); info.push_back(Pair("height", (int)e.GetHeight())); info.push_back(Pair("startingpriority", e.GetPriority(e.GetHeight()))); info.push_back( Pair("currentpriority", e.GetPriority(chainActive.Height()))); info.push_back(Pair("descendantcount", e.GetCountWithDescendants())); info.push_back(Pair("descendantsize", e.GetSizeWithDescendants())); info.push_back( Pair("descendantfees", e.GetModFeesWithDescendants().GetSatoshis())); info.push_back(Pair("ancestorcount", e.GetCountWithAncestors())); info.push_back(Pair("ancestorsize", e.GetSizeWithAncestors())); info.push_back( Pair("ancestorfees", e.GetModFeesWithAncestors().GetSatoshis())); const CTransaction &tx = e.GetTx(); std::set setDepends; for (const CTxIn &txin : tx.vin) { if (mempool.exists(txin.prevout.hash)) { setDepends.insert(txin.prevout.hash.ToString()); } } UniValue depends(UniValue::VARR); for (const std::string &dep : setDepends) { depends.push_back(dep); } info.push_back(Pair("depends", depends)); } UniValue mempoolToJSON(bool fVerbose = false) { if (fVerbose) { LOCK(mempool.cs); UniValue o(UniValue::VOBJ); for (const CTxMemPoolEntry &e : mempool.mapTx) { const uint256 &txid = e.GetTx().GetId(); UniValue info(UniValue::VOBJ); entryToJSON(info, e); o.push_back(Pair(txid.ToString(), info)); } return o; } else { std::vector vtxids; mempool.queryHashes(vtxids); UniValue a(UniValue::VARR); for (const uint256 &txid : vtxids) { a.push_back(txid.ToString()); } return a; } } UniValue getrawmempool(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 1) { throw std::runtime_error( "getrawmempool ( verbose )\n" "\nReturns all transaction ids in memory pool as a json array of " "string transaction ids.\n" "\nArguments:\n" "1. verbose (boolean, optional, default=false) True for a json " "object, false for array of transaction ids\n" "\nResult: (for verbose = false):\n" "[ (json array of string)\n" " \"transactionid\" (string) The transaction id\n" " ,...\n" "]\n" "\nResult: (for verbose = true):\n" "{ (json object)\n" " \"transactionid\" : { (json object)\n" + EntryDescriptionString() + " }, ...\n" "}\n" "\nExamples:\n" + HelpExampleCli("getrawmempool", "true") + HelpExampleRpc("getrawmempool", "true")); } bool fVerbose = false; if (request.params.size() > 0) { fVerbose = request.params[0].get_bool(); } return mempoolToJSON(fVerbose); } UniValue getmempoolancestors(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "getmempoolancestors txid (verbose)\n" "\nIf txid is in the mempool, returns all in-mempool ancestors.\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id " "(must be in mempool)\n" "2. verbose (boolean, optional, default=false) " "True for a json object, false for array of transaction ids\n" "\nResult (for verbose=false):\n" "[ (json array of strings)\n" " \"transactionid\" (string) The transaction id of an " "in-mempool ancestor transaction\n" " ,...\n" "]\n" "\nResult (for verbose=true):\n" "{ (json object)\n" " \"transactionid\" : { (json object)\n" + EntryDescriptionString() + " }, ...\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmempoolancestors", "\"mytxid\"") + HelpExampleRpc("getmempoolancestors", "\"mytxid\"")); } bool fVerbose = false; if (request.params.size() > 1) { fVerbose = request.params[1].get_bool(); } uint256 hash = ParseHashV(request.params[0], "parameter 1"); LOCK(mempool.cs); CTxMemPool::txiter it = mempool.mapTx.find(hash); if (it == mempool.mapTx.end()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool"); } CTxMemPool::setEntries setAncestors; uint64_t noLimit = std::numeric_limits::max(); std::string dummy; mempool.CalculateMemPoolAncestors(*it, setAncestors, noLimit, noLimit, noLimit, noLimit, dummy, false); if (!fVerbose) { UniValue o(UniValue::VARR); for (CTxMemPool::txiter ancestorIt : setAncestors) { o.push_back(ancestorIt->GetTx().GetId().ToString()); } return o; } else { UniValue o(UniValue::VOBJ); for (CTxMemPool::txiter ancestorIt : setAncestors) { const CTxMemPoolEntry &e = *ancestorIt; const uint256 &_hash = e.GetTx().GetId(); UniValue info(UniValue::VOBJ); entryToJSON(info, e); o.push_back(Pair(_hash.ToString(), info)); } return o; } } UniValue getmempooldescendants(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "getmempooldescendants txid (verbose)\n" "\nIf txid is in the mempool, returns all in-mempool descendants.\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id " "(must be in mempool)\n" "2. verbose (boolean, optional, default=false) " "True for a json object, false for array of transaction ids\n" "\nResult (for verbose=false):\n" "[ (json array of strings)\n" " \"transactionid\" (string) The transaction id of an " "in-mempool descendant transaction\n" " ,...\n" "]\n" "\nResult (for verbose=true):\n" "{ (json object)\n" " \"transactionid\" : { (json object)\n" + EntryDescriptionString() + " }, ...\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmempooldescendants", "\"mytxid\"") + HelpExampleRpc("getmempooldescendants", "\"mytxid\"")); } bool fVerbose = false; if (request.params.size() > 1) fVerbose = request.params[1].get_bool(); uint256 hash = ParseHashV(request.params[0], "parameter 1"); LOCK(mempool.cs); CTxMemPool::txiter it = mempool.mapTx.find(hash); if (it == mempool.mapTx.end()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool"); } CTxMemPool::setEntries setDescendants; mempool.CalculateDescendants(it, setDescendants); // CTxMemPool::CalculateDescendants will include the given tx setDescendants.erase(it); if (!fVerbose) { UniValue o(UniValue::VARR); for (CTxMemPool::txiter descendantIt : setDescendants) { o.push_back(descendantIt->GetTx().GetId().ToString()); } return o; } else { UniValue o(UniValue::VOBJ); for (CTxMemPool::txiter descendantIt : setDescendants) { const CTxMemPoolEntry &e = *descendantIt; const uint256 &_hash = e.GetTx().GetId(); UniValue info(UniValue::VOBJ); entryToJSON(info, e); o.push_back(Pair(_hash.ToString(), info)); } return o; } } UniValue getmempoolentry(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "getmempoolentry txid\n" "\nReturns mempool data for given transaction\n" "\nArguments:\n" "1. \"txid\" (string, required) " "The transaction id (must be in mempool)\n" "\nResult:\n" "{ (json object)\n" + EntryDescriptionString() + "}\n" "\nExamples:\n" + HelpExampleCli("getmempoolentry", "\"mytxid\"") + HelpExampleRpc("getmempoolentry", "\"mytxid\"")); } uint256 hash = ParseHashV(request.params[0], "parameter 1"); LOCK(mempool.cs); CTxMemPool::txiter it = mempool.mapTx.find(hash); if (it == mempool.mapTx.end()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool"); } const CTxMemPoolEntry &e = *it; UniValue info(UniValue::VOBJ); entryToJSON(info, e); return info; } UniValue getblockhash(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "getblockhash height\n" "\nReturns hash of block in best-block-chain at height provided.\n" "\nArguments:\n" "1. height (numeric, required) The height index\n" "\nResult:\n" "\"hash\" (string) The block hash\n" "\nExamples:\n" + HelpExampleCli("getblockhash", "1000") + HelpExampleRpc("getblockhash", "1000")); } LOCK(cs_main); int nHeight = request.params[0].get_int(); if (nHeight < 0 || nHeight > chainActive.Height()) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Block height out of range"); } CBlockIndex *pblockindex = chainActive[nHeight]; return pblockindex->GetBlockHash().GetHex(); } UniValue getblockheader(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "getblockheader \"hash\" ( verbose )\n" "\nIf verbose is false, returns a string that is serialized, " "hex-encoded data for blockheader 'hash'.\n" "If verbose is true, returns an Object with information about " "blockheader .\n" "\nArguments:\n" "1. \"hash\" (string, required) The block hash\n" "2. verbose (boolean, optional, default=true) true for a " "json object, false for the hex encoded data\n" "\nResult (for verbose = true):\n" "{\n" " \"hash\" : \"hash\", (string) the block hash (same as " "provided)\n" " \"confirmations\" : n, (numeric) The number of confirmations, " "or -1 if the block is not on the main chain\n" " \"height\" : n, (numeric) The block height or index\n" " \"version\" : n, (numeric) The block version\n" " \"versionHex\" : \"00000000\", (string) The block version " "formatted in hexadecimal\n" " \"merkleroot\" : \"xxxx\", (string) The merkle root\n" " \"time\" : ttt, (numeric) The block time in seconds " "since epoch (Jan 1 1970 GMT)\n" " \"mediantime\" : ttt, (numeric) The median block time in " "seconds since epoch (Jan 1 1970 GMT)\n" " \"nonce\" : n, (numeric) The nonce\n" " \"bits\" : \"1d00ffff\", (string) The bits\n" " \"difficulty\" : x.xxx, (numeric) The difficulty\n" " \"chainwork\" : \"0000...1f3\" (string) Expected number of " "hashes required to produce the current chain (in hex)\n" " \"previousblockhash\" : \"hash\", (string) The hash of the " "previous block\n" " \"nextblockhash\" : \"hash\", (string) The hash of the " "next block\n" "}\n" "\nResult (for verbose=false):\n" "\"data\" (string) A string that is serialized, " "hex-encoded data for block 'hash'.\n" "\nExamples:\n" + HelpExampleCli("getblockheader", "\"00000000c937983704a73af28acdec3" "7b049d214adbda81d7e2a3dd146f6ed09" "\"") + HelpExampleRpc("getblockheader", "\"00000000c937983704a73af28acdec3" "7b049d214adbda81d7e2a3dd146f6ed09" "\"")); } LOCK(cs_main); std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); bool fVerbose = true; if (request.params.size() > 1) { fVerbose = request.params[1].get_bool(); } if (mapBlockIndex.count(hash) == 0) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } CBlockIndex *pblockindex = mapBlockIndex[hash]; if (!fVerbose) { CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION); ssBlock << pblockindex->GetBlockHeader(); std::string strHex = HexStr(ssBlock.begin(), ssBlock.end()); return strHex; } return blockheaderToJSON(pblockindex); } UniValue getblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "getblock \"blockhash\" ( verbose )\n" "\nIf verbose is false, returns a string that is serialized, " "hex-encoded data for block 'hash'.\n" "If verbose is true, returns an Object with information about " "block .\n" "\nArguments:\n" "1. \"blockhash\" (string, required) The block hash\n" "2. verbose (boolean, optional, default=true) true " "for a json object, false for the hex encoded data\n" "\nResult (for verbose = true):\n" "{\n" " \"hash\" : \"hash\", (string) the block hash (same as " "provided)\n" " \"confirmations\" : n, (numeric) The number of confirmations, " "or -1 if the block is not on the main chain\n" " \"size\" : n, (numeric) The block size\n" " \"height\" : n, (numeric) The block height or index\n" " \"version\" : n, (numeric) The block version\n" " \"versionHex\" : \"00000000\", (string) The block version " "formatted in hexadecimal\n" " \"merkleroot\" : \"xxxx\", (string) The merkle root\n" " \"tx\" : [ (array of string) The transaction ids\n" " \"transactionid\" (string) The transaction id\n" " ,...\n" " ],\n" " \"time\" : ttt, (numeric) The block time in seconds " "since epoch (Jan 1 1970 GMT)\n" " \"mediantime\" : ttt, (numeric) The median block time in " "seconds since epoch (Jan 1 1970 GMT)\n" " \"nonce\" : n, (numeric) The nonce\n" " \"bits\" : \"1d00ffff\", (string) The bits\n" " \"difficulty\" : x.xxx, (numeric) The difficulty\n" " \"chainwork\" : \"xxxx\", (string) Expected number of hashes " "required to produce the chain up to this block (in hex)\n" " \"previousblockhash\" : \"hash\", (string) The hash of the " "previous block\n" " \"nextblockhash\" : \"hash\" (string) The hash of the " "next block\n" "}\n" "\nResult (for verbose=false):\n" "\"data\" (string) A string that is serialized, " "hex-encoded data for block 'hash'.\n" "\nExamples:\n" + HelpExampleCli("getblock", "\"00000000c937983704a73af28acdec37b049d" "214adbda81d7e2a3dd146f6ed09\"") + HelpExampleRpc("getblock", "\"00000000c937983704a73af28acdec37b049d" "214adbda81d7e2a3dd146f6ed09\"")); } LOCK(cs_main); std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); bool fVerbose = true; if (request.params.size() > 1) { fVerbose = request.params[1].get_bool(); } if (mapBlockIndex.count(hash) == 0) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } CBlock block; CBlockIndex *pblockindex = mapBlockIndex[hash]; if (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0) { throw JSONRPCError(RPC_MISC_ERROR, "Block not available (pruned data)"); } if (!ReadBlockFromDisk(block, pblockindex, config)) { // Block not found on disk. This could be because we have the block // header in our index but don't have the block (for example if a // non-whitelisted node sends us an unrequested long chain of valid // blocks, we add the headers to our index, but don't accept the block). throw JSONRPCError(RPC_MISC_ERROR, "Block not found on disk"); } if (!fVerbose) { CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ssBlock << block; std::string strHex = HexStr(ssBlock.begin(), ssBlock.end()); return strHex; } return blockToJSON(config, block, pblockindex); } struct CCoinsStats { int nHeight; uint256 hashBlock; uint64_t nTransactions; uint64_t nTransactionOutputs; uint64_t nBogoSize; uint256 hashSerialized; uint64_t nDiskSize; Amount nTotalAmount; CCoinsStats() : nHeight(0), nTransactions(0), nTransactionOutputs(0), nBogoSize(0), nDiskSize(0), nTotalAmount(0) {} }; static void ApplyStats(CCoinsStats &stats, CHashWriter &ss, const uint256 &hash, const std::map &outputs) { assert(!outputs.empty()); ss << hash; ss << VARINT(outputs.begin()->second.GetHeight() * 2 + outputs.begin()->second.IsCoinBase()); stats.nTransactions++; for (const auto output : outputs) { ss << VARINT(output.first + 1); ss << output.second.GetTxOut().scriptPubKey; ss << VARINT(output.second.GetTxOut().nValue.GetSatoshis()); stats.nTransactionOutputs++; stats.nTotalAmount += output.second.GetTxOut().nValue; stats.nBogoSize += 32 /* txid */ + 4 /* vout index */ + 4 /* height + coinbase */ + 8 /* amount */ + 2 /* scriptPubKey len */ + output.second.GetTxOut().scriptPubKey.size() /* scriptPubKey */; } ss << VARINT(0); } //! Calculate statistics about the unspent transaction output set static bool GetUTXOStats(CCoinsView *view, CCoinsStats &stats) { std::unique_ptr pcursor(view->Cursor()); CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION); stats.hashBlock = pcursor->GetBestBlock(); { LOCK(cs_main); stats.nHeight = mapBlockIndex.find(stats.hashBlock)->second->nHeight; } ss << stats.hashBlock; uint256 prevkey; std::map outputs; while (pcursor->Valid()) { boost::this_thread::interruption_point(); COutPoint key; Coin coin; if (pcursor->GetKey(key) && pcursor->GetValue(coin)) { if (!outputs.empty() && key.hash != prevkey) { ApplyStats(stats, ss, prevkey, outputs); outputs.clear(); } prevkey = key.hash; outputs[key.n] = std::move(coin); } else { return error("%s: unable to read value", __func__); } pcursor->Next(); } if (!outputs.empty()) { ApplyStats(stats, ss, prevkey, outputs); } stats.hashSerialized = ss.GetHash(); stats.nDiskSize = view->EstimateSize(); return true; } UniValue pruneblockchain(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "pruneblockchain\n" "\nArguments:\n" "1. \"height\" (numeric, required) The block height to prune " "up to. May be set to a discrete height, or a unix timestamp\n" " to prune blocks whose block time is at least 2 " "hours older than the provided timestamp.\n" "\nResult:\n" "n (numeric) Height of the last block pruned.\n" "\nExamples:\n" + HelpExampleCli("pruneblockchain", "1000") + HelpExampleRpc("pruneblockchain", "1000")); } if (!fPruneMode) { throw JSONRPCError( RPC_MISC_ERROR, "Cannot prune blocks because node is not in prune mode."); } LOCK(cs_main); int heightParam = request.params[0].get_int(); if (heightParam < 0) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Negative block height."); } // Height value more than a billion is too high to be a block height, and // too low to be a block time (corresponds to timestamp from Sep 2001). if (heightParam > 1000000000) { // Add a 2 hour buffer to include blocks which might have had old // timestamps CBlockIndex *pindex = chainActive.FindEarliestAtLeast(heightParam - TIMESTAMP_WINDOW); if (!pindex) { throw JSONRPCError( RPC_INVALID_PARAMETER, "Could not find block with at least the specified timestamp."); } heightParam = pindex->nHeight; } unsigned int height = (unsigned int)heightParam; unsigned int chainHeight = (unsigned int)chainActive.Height(); if (chainHeight < Params().PruneAfterHeight()) { throw JSONRPCError(RPC_MISC_ERROR, "Blockchain is too short for pruning."); } else if (height > chainHeight) { throw JSONRPCError( RPC_INVALID_PARAMETER, "Blockchain is shorter than the attempted prune height."); } else if (height > chainHeight - MIN_BLOCKS_TO_KEEP) { LogPrint(BCLog::RPC, "Attempt to prune blocks close to the tip. " "Retaining the minimum number of blocks."); height = chainHeight - MIN_BLOCKS_TO_KEEP; } PruneBlockFilesManual(height); return uint64_t(height); } UniValue gettxoutsetinfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "gettxoutsetinfo\n" "\nReturns statistics about the unspent transaction output set.\n" "Note this call may take some time.\n" "\nResult:\n" "{\n" " \"height\":n, (numeric) The current block height (index)\n" " \"bestblock\": \"hex\", (string) the best block hash hex\n" " \"transactions\": n, (numeric) The number of transactions\n" " \"txouts\": n, (numeric) The number of output " "transactions\n" " \"bogosize\": n, (numeric) A database-independent " "metric for UTXO set size\n" " \"hash_serialized\": \"hash\", (string) The serialized hash\n" " \"disk_size\": n, (numeric) The estimated size of the " "chainstate on disk\n" " \"total_amount\": x.xxx (numeric) The total amount\n" "}\n" "\nExamples:\n" + HelpExampleCli("gettxoutsetinfo", "") + HelpExampleRpc("gettxoutsetinfo", "")); } UniValue ret(UniValue::VOBJ); CCoinsStats stats; FlushStateToDisk(); if (GetUTXOStats(pcoinsTip, stats)) { ret.push_back(Pair("height", int64_t(stats.nHeight))); ret.push_back(Pair("bestblock", stats.hashBlock.GetHex())); ret.push_back(Pair("transactions", int64_t(stats.nTransactions))); ret.push_back(Pair("txouts", int64_t(stats.nTransactionOutputs))); ret.push_back(Pair("bogosize", int64_t(stats.nBogoSize))); ret.push_back(Pair("hash_serialized", stats.hashSerialized.GetHex())); ret.push_back(Pair("disk_size", stats.nDiskSize)); ret.push_back( Pair("total_amount", ValueFromAmount(stats.nTotalAmount))); } else { throw JSONRPCError(RPC_INTERNAL_ERROR, "Unable to read UTXO set"); } return ret; } UniValue gettxout(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 2 || request.params.size() > 3) { throw std::runtime_error( "gettxout \"txid\" n ( include_mempool )\n" "\nReturns details about an unspent transaction output.\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id\n" "2. n (numeric, required) vout number\n" "3. include_mempool (boolean, optional) Whether to include the " "mempool\n" "\nResult:\n" "{\n" " \"bestblock\" : \"hash\", (string) the block hash\n" " \"confirmations\" : n, (numeric) The number of " "confirmations\n" " \"value\" : x.xxx, (numeric) The transaction value " "in " + CURRENCY_UNIT + "\n" " \"scriptPubKey\" : { (json object)\n" " \"asm\" : \"code\", (string) \n" " \"hex\" : \"hex\", (string) \n" " \"reqSigs\" : n, (numeric) Number of required " "signatures\n" " \"type\" : \"pubkeyhash\", (string) The type, eg pubkeyhash\n" " \"addresses\" : [ (array of string) array of " "bitcoin addresses\n" " \"address\" (string) bitcoin address\n" " ,...\n" " ]\n" " },\n" " \"coinbase\" : true|false (boolean) Coinbase or not\n" "}\n" "\nExamples:\n" "\nGet unspent transactions\n" + HelpExampleCli("listunspent", "") + "\nView the details\n" + HelpExampleCli("gettxout", "\"txid\" 1") + "\nAs a json rpc call\n" + HelpExampleRpc("gettxout", "\"txid\", 1")); } LOCK(cs_main); UniValue ret(UniValue::VOBJ); std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); int n = request.params[1].get_int(); COutPoint out(hash, n); bool fMempool = true; if (request.params.size() > 2) { fMempool = request.params[2].get_bool(); } Coin coin; if (fMempool) { LOCK(mempool.cs); CCoinsViewMemPool view(pcoinsTip, mempool); if (!view.GetCoin(out, coin) || mempool.isSpent(out)) { // TODO: this should be done by the CCoinsViewMemPool return NullUniValue; } } else { if (!pcoinsTip->GetCoin(out, coin)) { return NullUniValue; } } BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock()); CBlockIndex *pindex = it->second; ret.push_back(Pair("bestblock", pindex->GetBlockHash().GetHex())); if (coin.GetHeight() == MEMPOOL_HEIGHT) { ret.push_back(Pair("confirmations", 0)); } else { ret.push_back(Pair("confirmations", int64_t(pindex->nHeight - coin.GetHeight() + 1))); } ret.push_back(Pair("value", ValueFromAmount(coin.GetTxOut().nValue))); UniValue o(UniValue::VOBJ); ScriptPubKeyToJSON(config, coin.GetTxOut().scriptPubKey, o, true); ret.push_back(Pair("scriptPubKey", o)); ret.push_back(Pair("coinbase", coin.IsCoinBase())); return ret; } UniValue verifychain(const Config &config, const JSONRPCRequest &request) { - int nCheckLevel = GetArg("-checklevel", DEFAULT_CHECKLEVEL); - int nCheckDepth = GetArg("-checkblocks", DEFAULT_CHECKBLOCKS); + int nCheckLevel = gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL); + int nCheckDepth = gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS); if (request.fHelp || request.params.size() > 2) { throw std::runtime_error( "verifychain ( checklevel nblocks )\n" "\nVerifies blockchain database.\n" "\nArguments:\n" "1. checklevel (numeric, optional, 0-4, default=" + strprintf("%d", nCheckLevel) + ") How thorough the block verification is.\n" "2. nblocks (numeric, optional, default=" + strprintf("%d", nCheckDepth) + ", 0=all) The number of blocks to check.\n" "\nResult:\n" "true|false (boolean) Verified or not\n" "\nExamples:\n" + HelpExampleCli("verifychain", "") + HelpExampleRpc("verifychain", "")); } LOCK(cs_main); if (request.params.size() > 0) { nCheckLevel = request.params[0].get_int(); } if (request.params.size() > 1) { nCheckDepth = request.params[1].get_int(); } return CVerifyDB().VerifyDB(config, pcoinsTip, nCheckLevel, nCheckDepth); } /** Implementation of IsSuperMajority with better feedback */ static UniValue SoftForkMajorityDesc(int version, CBlockIndex *pindex, const Consensus::Params &consensusParams) { UniValue rv(UniValue::VOBJ); bool activated = false; switch (version) { case 2: activated = pindex->nHeight >= consensusParams.BIP34Height; break; case 3: activated = pindex->nHeight >= consensusParams.BIP66Height; break; case 4: activated = pindex->nHeight >= consensusParams.BIP65Height; break; } rv.push_back(Pair("status", activated)); return rv; } static UniValue SoftForkDesc(const std::string &name, int version, CBlockIndex *pindex, const Consensus::Params &consensusParams) { UniValue rv(UniValue::VOBJ); rv.push_back(Pair("id", name)); rv.push_back(Pair("version", version)); rv.push_back( Pair("reject", SoftForkMajorityDesc(version, pindex, consensusParams))); return rv; } static UniValue BIP9SoftForkDesc(const Consensus::Params &consensusParams, Consensus::DeploymentPos id) { UniValue rv(UniValue::VOBJ); const ThresholdState thresholdState = VersionBitsTipState(consensusParams, id); switch (thresholdState) { case THRESHOLD_DEFINED: rv.push_back(Pair("status", "defined")); break; case THRESHOLD_STARTED: rv.push_back(Pair("status", "started")); break; case THRESHOLD_LOCKED_IN: rv.push_back(Pair("status", "locked_in")); break; case THRESHOLD_ACTIVE: rv.push_back(Pair("status", "active")); break; case THRESHOLD_FAILED: rv.push_back(Pair("status", "failed")); break; } if (THRESHOLD_STARTED == thresholdState) { rv.push_back(Pair("bit", consensusParams.vDeployments[id].bit)); } rv.push_back( Pair("startTime", consensusParams.vDeployments[id].nStartTime)); rv.push_back(Pair("timeout", consensusParams.vDeployments[id].nTimeout)); rv.push_back( Pair("since", VersionBitsTipStateSinceHeight(consensusParams, id))); return rv; } void BIP9SoftForkDescPushBack(UniValue &bip9_softforks, const std::string &name, const Consensus::Params &consensusParams, Consensus::DeploymentPos id) { // Deployments with timeout value of 0 are hidden. // A timeout value of 0 guarantees a softfork will never be activated. // This is used when softfork codes are merged without specifying the // deployment schedule. if (consensusParams.vDeployments[id].nTimeout > 0) { bip9_softforks.push_back( Pair(name, BIP9SoftForkDesc(consensusParams, id))); } } UniValue getblockchaininfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getblockchaininfo\n" "Returns an object containing various state info regarding " "blockchain processing.\n" "\nResult:\n" "{\n" " \"chain\": \"xxxx\", (string) current network name as " "defined in BIP70 (main, test, regtest)\n" " \"blocks\": xxxxxx, (numeric) the current number of " "blocks processed in the server\n" " \"headers\": xxxxxx, (numeric) the current number of " "headers we have validated\n" " \"bestblockhash\": \"...\", (string) the hash of the currently " "best block\n" " \"difficulty\": xxxxxx, (numeric) the current difficulty\n" " \"mediantime\": xxxxxx, (numeric) median time for the " "current best block\n" " \"verificationprogress\": xxxx, (numeric) estimate of " "verification progress [0..1]\n" " \"chainwork\": \"xxxx\" (string) total amount of work in " "active chain, in hexadecimal\n" " \"pruned\": xx, (boolean) if the blocks are subject " "to pruning\n" " \"pruneheight\": xxxxxx, (numeric) lowest-height complete " "block stored\n" " \"softforks\": [ (array) status of softforks in " "progress\n" " {\n" " \"id\": \"xxxx\", (string) name of softfork\n" " \"version\": xx, (numeric) block version\n" " \"reject\": { (object) progress toward " "rejecting pre-softfork blocks\n" " \"status\": xx, (boolean) true if threshold " "reached\n" " },\n" " }, ...\n" " ],\n" " \"bip9_softforks\": { (object) status of BIP9 " "softforks in progress\n" " \"xxxx\" : { (string) name of the softfork\n" " \"status\": \"xxxx\", (string) one of \"defined\", " "\"started\", \"locked_in\", \"active\", \"failed\"\n" " \"bit\": xx, (numeric) the bit (0-28) in the " "block version field used to signal this softfork (only for " "\"started\" status)\n" " \"startTime\": xx, (numeric) the minimum median " "time past of a block at which the bit gains its meaning\n" " \"timeout\": xx, (numeric) the median time past " "of a block at which the deployment is considered failed if not " "yet locked in\n" " \"since\": xx (numeric) height of the first " "block to which the status applies\n" " }\n" " }\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblockchaininfo", "") + HelpExampleRpc("getblockchaininfo", "")); } LOCK(cs_main); UniValue obj(UniValue::VOBJ); obj.push_back(Pair("chain", Params().NetworkIDString())); obj.push_back(Pair("blocks", int(chainActive.Height()))); obj.push_back( Pair("headers", pindexBestHeader ? pindexBestHeader->nHeight : -1)); obj.push_back( Pair("bestblockhash", chainActive.Tip()->GetBlockHash().GetHex())); obj.push_back(Pair("difficulty", double(GetDifficulty(chainActive.Tip())))); obj.push_back( Pair("mediantime", int64_t(chainActive.Tip()->GetMedianTimePast()))); obj.push_back( Pair("verificationprogress", GuessVerificationProgress(Params().TxData(), chainActive.Tip()))); obj.push_back(Pair("chainwork", chainActive.Tip()->nChainWork.GetHex())); obj.push_back(Pair("pruned", fPruneMode)); const Consensus::Params &consensusParams = Params().GetConsensus(); CBlockIndex *tip = chainActive.Tip(); UniValue softforks(UniValue::VARR); UniValue bip9_softforks(UniValue::VOBJ); softforks.push_back(SoftForkDesc("bip34", 2, tip, consensusParams)); softforks.push_back(SoftForkDesc("bip66", 3, tip, consensusParams)); softforks.push_back(SoftForkDesc("bip65", 4, tip, consensusParams)); BIP9SoftForkDescPushBack(bip9_softforks, "csv", consensusParams, Consensus::DEPLOYMENT_CSV); obj.push_back(Pair("softforks", softforks)); obj.push_back(Pair("bip9_softforks", bip9_softforks)); if (fPruneMode) { CBlockIndex *block = chainActive.Tip(); while (block && block->pprev && (block->pprev->nStatus & BLOCK_HAVE_DATA)) { block = block->pprev; } obj.push_back(Pair("pruneheight", block->nHeight)); } return obj; } /** Comparison function for sorting the getchaintips heads. */ struct CompareBlocksByHeight { bool operator()(const CBlockIndex *a, const CBlockIndex *b) const { // Make sure that unequal blocks with the same height do not compare // equal. Use the pointers themselves to make a distinction. if (a->nHeight != b->nHeight) { return (a->nHeight > b->nHeight); } return a < b; } }; UniValue getchaintips(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getchaintips\n" "Return information about all known tips in the block tree," " including the main chain as well as orphaned branches.\n" "\nResult:\n" "[\n" " {\n" " \"height\": xxxx, (numeric) height of the chain tip\n" " \"hash\": \"xxxx\", (string) block hash of the tip\n" " \"branchlen\": 0 (numeric) zero for main chain\n" " \"status\": \"active\" (string) \"active\" for the main " "chain\n" " },\n" " {\n" " \"height\": xxxx,\n" " \"hash\": \"xxxx\",\n" " \"branchlen\": 1 (numeric) length of branch " "connecting the tip to the main chain\n" " \"status\": \"xxxx\" (string) status of the chain " "(active, valid-fork, valid-headers, headers-only, invalid)\n" " }\n" "]\n" "Possible values for status:\n" "1. \"invalid\" This branch contains at least one " "invalid block\n" "2. \"headers-only\" Not all blocks for this branch are " "available, but the headers are valid\n" "3. \"valid-headers\" All blocks are available for this " "branch, but they were never fully validated\n" "4. \"valid-fork\" This branch is not part of the " "active chain, but is fully validated\n" "5. \"active\" This is the tip of the active main " "chain, which is certainly valid\n" "\nExamples:\n" + HelpExampleCli("getchaintips", "") + HelpExampleRpc("getchaintips", "")); } LOCK(cs_main); /** * Idea: the set of chain tips is chainActive.tip, plus orphan blocks which * do not have another orphan building off of them. * Algorithm: * - Make one pass through mapBlockIndex, picking out the orphan blocks, * and also storing a set of the orphan block's pprev pointers. * - Iterate through the orphan blocks. If the block isn't pointed to by * another orphan, it is a chain tip. * - add chainActive.Tip() */ std::set setTips; std::set setOrphans; std::set setPrevs; for (const std::pair &item : mapBlockIndex) { if (!chainActive.Contains(item.second)) { setOrphans.insert(item.second); setPrevs.insert(item.second->pprev); } } for (std::set::iterator it = setOrphans.begin(); it != setOrphans.end(); ++it) { if (setPrevs.erase(*it) == 0) { setTips.insert(*it); } } // Always report the currently active tip. setTips.insert(chainActive.Tip()); /* Construct the output array. */ UniValue res(UniValue::VARR); for (const CBlockIndex *block : setTips) { UniValue obj(UniValue::VOBJ); obj.push_back(Pair("height", block->nHeight)); obj.push_back(Pair("hash", block->phashBlock->GetHex())); const int branchLen = block->nHeight - chainActive.FindFork(block)->nHeight; obj.push_back(Pair("branchlen", branchLen)); std::string status; if (chainActive.Contains(block)) { // This block is part of the currently active chain. status = "active"; } else if (block->nStatus & BLOCK_FAILED_MASK) { // This block or one of its ancestors is invalid. status = "invalid"; } else if (block->nChainTx == 0) { // This block cannot be connected because full block data for it or // one of its parents is missing. status = "headers-only"; } else if (block->IsValid(BLOCK_VALID_SCRIPTS)) { // This block is fully validated, but no longer part of the active // chain. It was probably the active block once, but was // reorganized. status = "valid-fork"; } else if (block->IsValid(BLOCK_VALID_TREE)) { // The headers for this block are valid, but it has not been // validated. It was probably never part of the most-work chain. status = "valid-headers"; } else { // No clue. status = "unknown"; } obj.push_back(Pair("status", status)); res.push_back(obj); } return res; } UniValue mempoolInfoToJSON() { UniValue ret(UniValue::VOBJ); ret.push_back(Pair("size", (int64_t)mempool.size())); ret.push_back(Pair("bytes", (int64_t)mempool.GetTotalTxSize())); ret.push_back(Pair("usage", (int64_t)mempool.DynamicMemoryUsage())); size_t maxmempool = - GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; + gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; ret.push_back(Pair("maxmempool", (int64_t)maxmempool)); ret.push_back( Pair("mempoolminfee", ValueFromAmount(mempool.GetMinFee(maxmempool).GetFeePerK()))); return ret; } UniValue getmempoolinfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getmempoolinfo\n" "\nReturns details on the active state of the TX memory pool.\n" "\nResult:\n" "{\n" " \"size\": xxxxx, (numeric) Current tx count\n" " \"bytes\": xxxxx, (numeric) Transaction size.\n" " \"usage\": xxxxx, (numeric) Total memory usage for " "the mempool\n" " \"maxmempool\": xxxxx, (numeric) Maximum memory usage " "for the mempool\n" " \"mempoolminfee\": xxxxx (numeric) Minimum fee for tx to " "be accepted\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmempoolinfo", "") + HelpExampleRpc("getmempoolinfo", "")); } return mempoolInfoToJSON(); } UniValue preciousblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "preciousblock \"blockhash\"\n" "\nTreats a block as if it were received before others with the " "same work.\n" "\nA later preciousblock call can override the effect of an " "earlier one.\n" "\nThe effects of preciousblock are not retained across restarts.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) the hash of the block to " "mark as precious\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("preciousblock", "\"blockhash\"") + HelpExampleRpc("preciousblock", "\"blockhash\"")); } std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); CBlockIndex *pblockindex; { LOCK(cs_main); if (mapBlockIndex.count(hash) == 0) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } pblockindex = mapBlockIndex[hash]; } CValidationState state; PreciousBlock(config, state, pblockindex); if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason()); } return NullUniValue; } UniValue invalidateblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "invalidateblock \"blockhash\"\n" "\nPermanently marks a block as invalid, as if it " "violated a consensus rule.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) the hash of " "the block to mark as invalid\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("invalidateblock", "\"blockhash\"") + HelpExampleRpc("invalidateblock", "\"blockhash\"")); } std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); CValidationState state; { LOCK(cs_main); if (mapBlockIndex.count(hash) == 0) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } CBlockIndex *pblockindex = mapBlockIndex[hash]; InvalidateBlock(config, state, pblockindex); } if (state.IsValid()) { ActivateBestChain(config, state); } if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason()); } return NullUniValue; } UniValue reconsiderblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "reconsiderblock \"blockhash\"\n" "\nRemoves invalidity status of a block and its descendants, " "reconsider them for activation.\n" "This can be used to undo the effects of invalidateblock.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) the hash of the block to " "reconsider\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("reconsiderblock", "\"blockhash\"") + HelpExampleRpc("reconsiderblock", "\"blockhash\"")); } std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); { LOCK(cs_main); if (mapBlockIndex.count(hash) == 0) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } CBlockIndex *pblockindex = mapBlockIndex[hash]; ResetBlockFailureFlags(pblockindex); } CValidationState state; ActivateBestChain(config, state); if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason()); } return NullUniValue; } UniValue getchaintxstats(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 2) { throw std::runtime_error( "getchaintxstats ( nblocks blockhash )\n" "\nCompute statistics about the total number and rate of " "transactions in the chain.\n" "\nArguments:\n" "1. nblocks (numeric, optional) Size of the window in number " "of blocks (default: one month).\n" "2. \"blockhash\" (string, optional) The hash of the block that " "ends the window.\n" "\nResult:\n" "{\n" " \"time\": xxxxx, (numeric) The timestamp for the " "final block in the window in UNIX format.\n" " \"txcount\": xxxxx, (numeric) The total number of " "transactions in the chain up to that point.\n" " \"window_block_count\": xxxxx, (numeric) Size of the window in " "number of blocks.\n" " \"window_tx_count\": xxxxx, (numeric) The number of " "transactions in the window. Only returned if " "\"window_block_count\" is > 0.\n" " \"window_interval\": xxxxx, (numeric) The elapsed time in " "the window in seconds. Only returned if \"window_block_count\" is " "> 0.\n" " \"txrate\": x.xx, (numeric) The average rate of " "transactions per second in the window. Only returned if " "\"window_interval\" is > 0.\n" "}\n" "\nExamples:\n" + HelpExampleCli("getchaintxstats", "") + HelpExampleRpc("getchaintxstats", "2016")); } const CBlockIndex *pindex; // By default: 1 month int blockcount = 30 * 24 * 60 * 60 / Params().GetConsensus().nPowTargetSpacing; bool havehash = !request.params[1].isNull(); uint256 hash; if (havehash) { hash = uint256S(request.params[1].get_str()); } { LOCK(cs_main); if (havehash) { auto it = mapBlockIndex.find(hash); if (it == mapBlockIndex.end()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } pindex = it->second; if (!chainActive.Contains(pindex)) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Block is not in main chain"); } } else { pindex = chainActive.Tip(); } } assert(pindex != nullptr); if (request.params[0].isNull()) { blockcount = std::max(0, std::min(blockcount, pindex->nHeight - 1)); } else { blockcount = request.params[0].get_int(); if (blockcount < 0 || (blockcount > 0 && blockcount >= pindex->nHeight)) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid block count: " "should be between 0 and " "the block's height - 1"); } } const CBlockIndex *pindexPast = pindex->GetAncestor(pindex->nHeight - blockcount); int nTimeDiff = pindex->GetMedianTimePast() - pindexPast->GetMedianTimePast(); int nTxDiff = pindex->nChainTx - pindexPast->nChainTx; UniValue ret(UniValue::VOBJ); ret.push_back(Pair("time", (int64_t)pindex->nTime)); ret.push_back(Pair("txcount", (int64_t)pindex->nChainTx)); ret.push_back(Pair("window_block_count", blockcount)); if (blockcount > 0) { ret.push_back(Pair("window_tx_count", nTxDiff)); ret.push_back(Pair("window_interval", nTimeDiff)); if (nTimeDiff > 0) { ret.push_back(Pair("txrate", ((double)nTxDiff) / nTimeDiff)); } } return ret; } // clang-format off static const CRPCCommand commands[] = { // category name actor (function) okSafe argNames // ------------------- ------------------------ ---------------------- ------ ---------- { "blockchain", "getblockchaininfo", getblockchaininfo, true, {} }, { "blockchain", "getchaintxstats", &getchaintxstats, true, {"nblocks", "blockhash"} }, { "blockchain", "getbestblockhash", getbestblockhash, true, {} }, { "blockchain", "getblockcount", getblockcount, true, {} }, { "blockchain", "getblock", getblock, true, {"blockhash","verbose"} }, { "blockchain", "getblockhash", getblockhash, true, {"height"} }, { "blockchain", "getblockheader", getblockheader, true, {"blockhash","verbose"} }, { "blockchain", "getchaintips", getchaintips, true, {} }, { "blockchain", "getdifficulty", getdifficulty, true, {} }, { "blockchain", "getmempoolancestors", getmempoolancestors, true, {"txid","verbose"} }, { "blockchain", "getmempooldescendants", getmempooldescendants, true, {"txid","verbose"} }, { "blockchain", "getmempoolentry", getmempoolentry, true, {"txid"} }, { "blockchain", "getmempoolinfo", getmempoolinfo, true, {} }, { "blockchain", "getrawmempool", getrawmempool, true, {"verbose"} }, { "blockchain", "gettxout", gettxout, true, {"txid","n","include_mempool"} }, { "blockchain", "gettxoutsetinfo", gettxoutsetinfo, true, {} }, { "blockchain", "pruneblockchain", pruneblockchain, true, {"height"} }, { "blockchain", "verifychain", verifychain, true, {"checklevel","nblocks"} }, { "blockchain", "preciousblock", preciousblock, true, {"blockhash"} }, /* Not shown in help */ { "hidden", "invalidateblock", invalidateblock, true, {"blockhash"} }, { "hidden", "reconsiderblock", reconsiderblock, true, {"blockhash"} }, { "hidden", "waitfornewblock", waitfornewblock, true, {"timeout"} }, { "hidden", "waitforblock", waitforblock, true, {"blockhash","timeout"} }, { "hidden", "waitforblockheight", waitforblockheight, true, {"height","timeout"} }, }; // clang-format on void RegisterBlockchainRPCCommands(CRPCTable &t) { for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) { t.appendCommand(commands[vcidx].name, &commands[vcidx]); } } diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index 5490f8195c..daa7c5ca04 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -1,1061 +1,1062 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "rpc/mining.h" #include "amount.h" #include "chain.h" #include "chainparams.h" #include "config.h" #include "consensus/consensus.h" #include "consensus/params.h" #include "consensus/validation.h" #include "core_io.h" #include "dstencode.h" #include "init.h" #include "miner.h" #include "net.h" #include "policy/policy.h" #include "pow.h" #include "rpc/blockchain.h" #include "rpc/server.h" #include "txmempool.h" #include "util.h" #include "utilstrencodings.h" #include "validation.h" #include "validationinterface.h" #include #include #include /** * Return average network hashes per second based on the last 'lookup' blocks, * or from the last difficulty change if 'lookup' is nonpositive. If 'height' is * nonnegative, compute the estimate at the time when a given block was found. */ static UniValue GetNetworkHashPS(int lookup, int height) { CBlockIndex *pb = chainActive.Tip(); if (height >= 0 && height < chainActive.Height()) { pb = chainActive[height]; } if (pb == nullptr || !pb->nHeight) { return 0; } // If lookup is -1, then use blocks since last difficulty change. if (lookup <= 0) { lookup = pb->nHeight % Params().GetConsensus().DifficultyAdjustmentInterval() + 1; } // If lookup is larger than chain, then set it to chain length. if (lookup > pb->nHeight) { lookup = pb->nHeight; } CBlockIndex *pb0 = pb; int64_t minTime = pb0->GetBlockTime(); int64_t maxTime = minTime; for (int i = 0; i < lookup; i++) { pb0 = pb0->pprev; int64_t time = pb0->GetBlockTime(); minTime = std::min(time, minTime); maxTime = std::max(time, maxTime); } // In case there's a situation where minTime == maxTime, we don't want a // divide by zero exception. if (minTime == maxTime) { return 0; } arith_uint256 workDiff = pb->nChainWork - pb0->nChainWork; int64_t timeDiff = maxTime - minTime; return workDiff.getdouble() / timeDiff; } static UniValue getnetworkhashps(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 2) { throw std::runtime_error( "getnetworkhashps ( nblocks height )\n" "\nReturns the estimated network hashes per second based on the " "last n blocks.\n" "Pass in [blocks] to override # of blocks, -1 specifies since last " "difficulty change.\n" "Pass in [height] to estimate the network speed at the time when a " "certain block was found.\n" "\nArguments:\n" "1. nblocks (numeric, optional, default=120) The number of " "blocks, or -1 for blocks since last difficulty change.\n" "2. height (numeric, optional, default=-1) To estimate at the " "time of the given height.\n" "\nResult:\n" "x (numeric) Hashes per second estimated\n" "\nExamples:\n" + HelpExampleCli("getnetworkhashps", "") + HelpExampleRpc("getnetworkhashps", "")); } LOCK(cs_main); return GetNetworkHashPS( request.params.size() > 0 ? request.params[0].get_int() : 120, request.params.size() > 1 ? request.params[1].get_int() : -1); } UniValue generateBlocks(const Config &config, std::shared_ptr coinbaseScript, int nGenerate, uint64_t nMaxTries, bool keepScript) { static const int nInnerLoopCount = 0x100000; int nHeightStart = 0; int nHeightEnd = 0; int nHeight = 0; { // Don't keep cs_main locked. LOCK(cs_main); nHeightStart = chainActive.Height(); nHeight = nHeightStart; nHeightEnd = nHeightStart + nGenerate; } unsigned int nExtraNonce = 0; UniValue blockHashes(UniValue::VARR); while (nHeight < nHeightEnd) { std::unique_ptr pblocktemplate( BlockAssembler(config).CreateNewBlock( coinbaseScript->reserveScript)); if (!pblocktemplate.get()) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block"); } CBlock *pblock = &pblocktemplate->block; { LOCK(cs_main); IncrementExtraNonce(config, pblock, chainActive.Tip(), nExtraNonce); } while (nMaxTries > 0 && pblock->nNonce < nInnerLoopCount && !CheckProofOfWork(pblock->GetHash(), pblock->nBits, config)) { ++pblock->nNonce; --nMaxTries; } if (nMaxTries == 0) { break; } if (pblock->nNonce == nInnerLoopCount) { continue; } std::shared_ptr shared_pblock = std::make_shared(*pblock); if (!ProcessNewBlock(config, shared_pblock, true, nullptr)) { throw JSONRPCError(RPC_INTERNAL_ERROR, "ProcessNewBlock, block not accepted"); } ++nHeight; blockHashes.push_back(pblock->GetHash().GetHex()); // Mark script as important because it was used at least for one // coinbase output if the script came from the wallet. if (keepScript) { coinbaseScript->KeepScript(); } } return blockHashes; } static UniValue generatetoaddress(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 2 || request.params.size() > 3) { throw std::runtime_error( "generatetoaddress nblocks address (maxtries)\n" "\nMine blocks immediately to a specified address (before the RPC " "call returns)\n" "\nArguments:\n" "1. nblocks (numeric, required) How many blocks are generated " "immediately.\n" "2. address (string, required) The address to send the newly " "generated bitcoin to.\n" "3. maxtries (numeric, optional) How many iterations to try " "(default = 1000000).\n" "\nResult:\n" "[ blockhashes ] (array) hashes of blocks generated\n" "\nExamples:\n" "\nGenerate 11 blocks to myaddress\n" + HelpExampleCli("generatetoaddress", "11 \"myaddress\"")); } int nGenerate = request.params[0].get_int(); uint64_t nMaxTries = 1000000; if (request.params.size() > 2) { nMaxTries = request.params[2].get_int(); } CTxDestination destination = DecodeDestination(request.params[1].get_str()); if (!IsValidDestination(destination)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Error: Invalid address"); } std::shared_ptr coinbaseScript = std::make_shared(); coinbaseScript->reserveScript = GetScriptForDestination(destination); return generateBlocks(config, coinbaseScript, nGenerate, nMaxTries, false); } static UniValue getmininginfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getmininginfo\n" "\nReturns a json object containing mining-related information." "\nResult:\n" "{\n" " \"blocks\": nnn, (numeric) The current block\n" " \"currentblocksize\": nnn, (numeric) The last block size\n" " \"currentblocktx\": nnn, (numeric) The last block " "transaction\n" " \"difficulty\": xxx.xxxxx (numeric) The current difficulty\n" " \"errors\": \"...\" (string) Current errors\n" " \"networkhashps\": nnn, (numeric) The network hashes per " "second\n" " \"pooledtx\": n (numeric) The size of the mempool\n" " \"chain\": \"xxxx\", (string) current network name as " "defined in BIP70 (main, test, regtest)\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmininginfo", "") + HelpExampleRpc("getmininginfo", "")); } LOCK(cs_main); UniValue obj(UniValue::VOBJ); obj.push_back(Pair("blocks", int(chainActive.Height()))); obj.push_back(Pair("currentblocksize", uint64_t(nLastBlockSize))); obj.push_back(Pair("currentblocktx", uint64_t(nLastBlockTx))); obj.push_back(Pair("difficulty", double(GetDifficulty(chainActive.Tip())))); - obj.push_back(Pair("blockprioritypercentage", - uint8_t(GetArg("-blockprioritypercentage", - DEFAULT_BLOCK_PRIORITY_PERCENTAGE)))); + obj.push_back( + Pair("blockprioritypercentage", + uint8_t(gArgs.GetArg("-blockprioritypercentage", + DEFAULT_BLOCK_PRIORITY_PERCENTAGE)))); obj.push_back(Pair("errors", GetWarnings("statusbar"))); obj.push_back(Pair("networkhashps", getnetworkhashps(config, request))); obj.push_back(Pair("pooledtx", uint64_t(mempool.size()))); obj.push_back(Pair("chain", Params().NetworkIDString())); return obj; } // NOTE: Unlike wallet RPC (which use BCH values), mining RPCs follow GBT (BIP // 22) in using satoshi amounts static UniValue prioritisetransaction(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 3) { throw std::runtime_error( "prioritisetransaction \n" "Accepts the transaction into mined blocks at a higher (or lower) " "priority\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id.\n" "2. priority_delta (numeric, required) The priority to add or " "subtract.\n" " The transaction selection algorithm considers " "the tx as it would have a higher priority.\n" " (priority of a transaction is calculated: " "coinage * value_in_satoshis / txsize) \n" "3. fee_delta (numeric, required) The fee value (in satoshis) " "to add (or subtract, if negative).\n" " The fee is not actually paid, only the " "algorithm for selecting transactions into a block\n" " considers the transaction as it would have paid " "a higher (or lower) fee.\n" "\nResult:\n" "true (boolean) Returns true\n" "\nExamples:\n" + HelpExampleCli("prioritisetransaction", "\"txid\" 0.0 10000") + HelpExampleRpc("prioritisetransaction", "\"txid\", 0.0, 10000")); } LOCK(cs_main); uint256 hash = ParseHashStr(request.params[0].get_str(), "txid"); Amount nAmount(request.params[2].get_int64()); mempool.PrioritiseTransaction(hash, request.params[0].get_str(), request.params[1].get_real(), nAmount); return true; } // NOTE: Assumes a conclusive result; if result is inconclusive, it must be // handled by caller static UniValue BIP22ValidationResult(const Config &config, const CValidationState &state) { if (state.IsValid()) { return NullUniValue; } std::string strRejectReason = state.GetRejectReason(); if (state.IsError()) { throw JSONRPCError(RPC_VERIFY_ERROR, strRejectReason); } if (state.IsInvalid()) { if (strRejectReason.empty()) { return "rejected"; } return strRejectReason; } // Should be impossible. return "valid?"; } std::string gbt_vb_name(const Consensus::DeploymentPos pos) { const struct BIP9DeploymentInfo &vbinfo = VersionBitsDeploymentInfo[pos]; std::string s = vbinfo.name; if (!vbinfo.gbt_force) { s.insert(s.begin(), '!'); } return s; } static UniValue getblocktemplate(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 1) { throw std::runtime_error( "getblocktemplate ( TemplateRequest )\n" "\nIf the request parameters include a 'mode' key, that is used to " "explicitly select between the default 'template' request or a " "'proposal'.\n" "It returns data needed to construct a block to work on.\n" "For full specification, see BIPs 22, 23, 9, and 145:\n" " " "https://github.com/bitcoin/bips/blob/master/bip-0022.mediawiki\n" " " "https://github.com/bitcoin/bips/blob/master/bip-0023.mediawiki\n" " " "https://github.com/bitcoin/bips/blob/master/" "bip-0009.mediawiki#getblocktemplate_changes\n" " " "https://github.com/bitcoin/bips/blob/master/bip-0145.mediawiki\n" "\nArguments:\n" "1. template_request (json object, optional) A json object " "in the following spec\n" " {\n" " \"mode\":\"template\" (string, optional) This must be " "set to \"template\", \"proposal\" (see BIP 23), or omitted\n" " \"capabilities\":[ (array, optional) A list of " "strings\n" " \"support\" (string) client side supported " "feature, 'longpoll', 'coinbasetxn', 'coinbasevalue', 'proposal', " "'serverlist', 'workid'\n" " ,...\n" " ],\n" " \"rules\":[ (array, optional) A list of " "strings\n" " \"support\" (string) client side supported " "softfork deployment\n" " ,...\n" " ]\n" " }\n" "\n" "\nResult:\n" "{\n" " \"version\" : n, (numeric) The preferred " "block version\n" " \"rules\" : [ \"rulename\", ... ], (array of strings) " "specific block rules that are to be enforced\n" " \"vbavailable\" : { (json object) set of " "pending, supported versionbit (BIP 9) softfork deployments\n" " \"rulename\" : bitnumber (numeric) identifies the " "bit number as indicating acceptance and readiness for the named " "softfork rule\n" " ,...\n" " },\n" " \"vbrequired\" : n, (numeric) bit mask of " "versionbits the server requires set in submissions\n" " \"previousblockhash\" : \"xxxx\", (string) The hash of " "current highest block\n" " \"transactions\" : [ (array) contents of " "non-coinbase transactions that should be included in the next " "block\n" " {\n" " \"data\" : \"xxxx\", (string) transaction " "data encoded in hexadecimal (byte-for-byte)\n" " \"txid\" : \"xxxx\", (string) transaction id " "encoded in little-endian hexadecimal\n" " \"hash\" : \"xxxx\", (string) hash encoded " "in little-endian hexadecimal (including witness data)\n" " \"depends\" : [ (array) array of numbers " "\n" " n (numeric) transactions " "before this one (by 1-based index in 'transactions' list) that " "must be present in the final block if this one is\n" " ,...\n" " ],\n" " \"fee\": n, (numeric) difference in " "value between transaction inputs and outputs (in Satoshis); for " "coinbase transactions, this is a negative Number of the total " "collected block fees (ie, not including the block subsidy); if " "key is not present, fee is unknown and clients MUST NOT assume " "there isn't one\n" " \"sigops\" : n, (numeric) total SigOps " "cost, as counted for purposes of block limits; if key is not " "present, sigop cost is unknown and clients MUST NOT assume it is " "zero\n" " \"required\" : true|false (boolean) if provided and " "true, this transaction must be in the final block\n" " }\n" " ,...\n" " ],\n" " \"coinbaseaux\" : { (json object) data that " "should be included in the coinbase's scriptSig content\n" " \"flags\" : \"xx\" (string) key name is to " "be ignored, and value included in scriptSig\n" " },\n" " \"coinbasevalue\" : n, (numeric) maximum allowable " "input to coinbase transaction, including the generation award and " "transaction fees (in Satoshis)\n" " \"coinbasetxn\" : { ... }, (json object) information " "for coinbase transaction\n" " \"target\" : \"xxxx\", (string) The hash target\n" " \"mintime\" : xxx, (numeric) The minimum " "timestamp appropriate for next block time in seconds since epoch " "(Jan 1 1970 GMT)\n" " \"mutable\" : [ (array of string) list of " "ways the block template may be changed \n" " \"value\" (string) A way the block " "template may be changed, e.g. 'time', 'transactions', " "'prevblock'\n" " ,...\n" " ],\n" " \"noncerange\" : \"00000000ffffffff\",(string) A range of valid " "nonces\n" " \"sigoplimit\" : n, (numeric) limit of sigops " "in blocks\n" " \"sizelimit\" : n, (numeric) limit of block " "size\n" " \"curtime\" : ttt, (numeric) current timestamp " "in seconds since epoch (Jan 1 1970 GMT)\n" " \"bits\" : \"xxxxxxxx\", (string) compressed " "target of next block\n" " \"height\" : n (numeric) The height of the " "next block\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblocktemplate", "") + HelpExampleRpc("getblocktemplate", "")); } LOCK(cs_main); std::string strMode = "template"; UniValue lpval = NullUniValue; std::set setClientRules; int64_t nMaxVersionPreVB = -1; if (request.params.size() > 0) { const UniValue &oparam = request.params[0].get_obj(); const UniValue &modeval = find_value(oparam, "mode"); if (modeval.isStr()) { strMode = modeval.get_str(); } else if (modeval.isNull()) { /* Do nothing */ } else { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); } lpval = find_value(oparam, "longpollid"); if (strMode == "proposal") { const UniValue &dataval = find_value(oparam, "data"); if (!dataval.isStr()) { throw JSONRPCError(RPC_TYPE_ERROR, "Missing data String key for proposal"); } CBlock block; if (!DecodeHexBlk(block, dataval.get_str())) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed"); } uint256 hash = block.GetHash(); BlockMap::iterator mi = mapBlockIndex.find(hash); if (mi != mapBlockIndex.end()) { CBlockIndex *pindex = mi->second; if (pindex->IsValid(BLOCK_VALID_SCRIPTS)) { return "duplicate"; } if (pindex->nStatus & BLOCK_FAILED_MASK) { return "duplicate-invalid"; } return "duplicate-inconclusive"; } CBlockIndex *const pindexPrev = chainActive.Tip(); // TestBlockValidity only supports blocks built on the current Tip if (block.hashPrevBlock != pindexPrev->GetBlockHash()) { return "inconclusive-not-best-prevblk"; } CValidationState state; TestBlockValidity(config, state, block, pindexPrev, false, true); return BIP22ValidationResult(config, state); } const UniValue &aClientRules = find_value(oparam, "rules"); if (aClientRules.isArray()) { for (size_t i = 0; i < aClientRules.size(); ++i) { const UniValue &v = aClientRules[i]; setClientRules.insert(v.get_str()); } } else { // NOTE: It is important that this NOT be read if versionbits is // supported const UniValue &uvMaxVersion = find_value(oparam, "maxversion"); if (uvMaxVersion.isNum()) { nMaxVersionPreVB = uvMaxVersion.get_int64(); } } } if (strMode != "template") { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); } if (!g_connman) { throw JSONRPCError( RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); } if (g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL) == 0) { throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Bitcoin is not connected!"); } if (IsInitialBlockDownload()) { throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Bitcoin is downloading blocks..."); } static unsigned int nTransactionsUpdatedLast; if (!lpval.isNull()) { // Wait to respond until either the best block changes, OR a minute has // passed and there are more transactions uint256 hashWatchedChain; boost::system_time checktxtime; unsigned int nTransactionsUpdatedLastLP; if (lpval.isStr()) { // Format: std::string lpstr = lpval.get_str(); hashWatchedChain.SetHex(lpstr.substr(0, 64)); nTransactionsUpdatedLastLP = atoi64(lpstr.substr(64)); } else { // NOTE: Spec does not specify behaviour for non-string longpollid, // but this makes testing easier hashWatchedChain = chainActive.Tip()->GetBlockHash(); nTransactionsUpdatedLastLP = nTransactionsUpdatedLast; } // Release the wallet and main lock while waiting LEAVE_CRITICAL_SECTION(cs_main); { checktxtime = boost::get_system_time() + boost::posix_time::minutes(1); boost::unique_lock lock(csBestBlock); while (chainActive.Tip()->GetBlockHash() == hashWatchedChain && IsRPCRunning()) { if (!cvBlockChange.timed_wait(lock, checktxtime)) { // Timeout: Check transactions for update if (mempool.GetTransactionsUpdated() != nTransactionsUpdatedLastLP) { break; } checktxtime += boost::posix_time::seconds(10); } } } ENTER_CRITICAL_SECTION(cs_main); if (!IsRPCRunning()) { throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Shutting down"); } // TODO: Maybe recheck connections/IBD and (if something wrong) send an // expires-immediately template to stop miners? } // Update block static CBlockIndex *pindexPrev; static int64_t nStart; static std::unique_ptr pblocktemplate; if (pindexPrev != chainActive.Tip() || (mempool.GetTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - nStart > 5)) { // Clear pindexPrev so future calls make a new block, despite any // failures from here on pindexPrev = nullptr; // Store the pindexBest used before CreateNewBlock, to avoid races nTransactionsUpdatedLast = mempool.GetTransactionsUpdated(); CBlockIndex *pindexPrevNew = chainActive.Tip(); nStart = GetTime(); // Create new block CScript scriptDummy = CScript() << OP_TRUE; pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptDummy); if (!pblocktemplate) { throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory"); } // Need to update only after we know CreateNewBlock succeeded pindexPrev = pindexPrevNew; } // pointer for convenience CBlock *pblock = &pblocktemplate->block; const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); // Update nTime UpdateTime(pblock, config, pindexPrev); pblock->nNonce = 0; UniValue aCaps(UniValue::VARR); aCaps.push_back("proposal"); UniValue transactions(UniValue::VARR); std::map setTxIndex; int i = 0; for (const auto &it : pblock->vtx) { const CTransaction &tx = *it; uint256 txId = tx.GetId(); setTxIndex[txId] = i++; if (tx.IsCoinBase()) { continue; } UniValue entry(UniValue::VOBJ); entry.push_back(Pair("data", EncodeHexTx(tx))); entry.push_back(Pair("txid", txId.GetHex())); entry.push_back(Pair("hash", tx.GetHash().GetHex())); UniValue deps(UniValue::VARR); for (const CTxIn &in : tx.vin) { if (setTxIndex.count(in.prevout.hash)) deps.push_back(setTxIndex[in.prevout.hash]); } entry.push_back(Pair("depends", deps)); int index_in_template = i - 1; entry.push_back(Pair( "fee", pblocktemplate->vTxFees[index_in_template].GetSatoshis())); int64_t nTxSigOps = pblocktemplate->vTxSigOpsCount[index_in_template]; entry.push_back(Pair("sigops", nTxSigOps)); transactions.push_back(entry); } UniValue aux(UniValue::VOBJ); aux.push_back( Pair("flags", HexStr(COINBASE_FLAGS.begin(), COINBASE_FLAGS.end()))); arith_uint256 hashTarget = arith_uint256().SetCompact(pblock->nBits); UniValue aMutable(UniValue::VARR); aMutable.push_back("time"); aMutable.push_back("transactions"); aMutable.push_back("prevblock"); UniValue result(UniValue::VOBJ); result.push_back(Pair("capabilities", aCaps)); UniValue aRules(UniValue::VARR); UniValue vbavailable(UniValue::VOBJ); for (int j = 0; j < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j) { Consensus::DeploymentPos pos = Consensus::DeploymentPos(j); ThresholdState state = VersionBitsState(pindexPrev, consensusParams, pos, versionbitscache); switch (state) { case THRESHOLD_DEFINED: case THRESHOLD_FAILED: // Not exposed to GBT at all break; case THRESHOLD_LOCKED_IN: { // Ensure bit is set in block version, then fallthrough to get // vbavailable set. pblock->nVersion |= VersionBitsMask(consensusParams, pos); } // FALLTHROUGH case THRESHOLD_STARTED: { const struct BIP9DeploymentInfo &vbinfo = VersionBitsDeploymentInfo[pos]; vbavailable.push_back(Pair( gbt_vb_name(pos), consensusParams.vDeployments[pos].bit)); if (setClientRules.find(vbinfo.name) == setClientRules.end()) { if (!vbinfo.gbt_force) { // If the client doesn't support this, don't indicate it // in the [default] version pblock->nVersion &= ~VersionBitsMask(consensusParams, pos); } } break; } case THRESHOLD_ACTIVE: { // Add to rules only const struct BIP9DeploymentInfo &vbinfo = VersionBitsDeploymentInfo[pos]; aRules.push_back(gbt_vb_name(pos)); if (setClientRules.find(vbinfo.name) == setClientRules.end()) { // Not supported by the client; make sure it's safe to // proceed if (!vbinfo.gbt_force) { // If we do anything other than throw an exception here, // be sure version/force isn't sent to old clients throw JSONRPCError( RPC_INVALID_PARAMETER, strprintf("Support for '%s' rule requires explicit " "client support", vbinfo.name)); } } break; } } } result.push_back(Pair("version", pblock->nVersion)); result.push_back(Pair("rules", aRules)); result.push_back(Pair("vbavailable", vbavailable)); result.push_back(Pair("vbrequired", int(0))); if (nMaxVersionPreVB >= 2) { // If VB is supported by the client, nMaxVersionPreVB is -1, so we won't // get here. Because BIP 34 changed how the generation transaction is // serialized, we can only use version/force back to v2 blocks. This is // safe to do [otherwise-]unconditionally only because we are throwing // an exception above if a non-force deployment gets activated. Note // that this can probably also be removed entirely after the first BIP9 // non-force deployment (ie, probably segwit) gets activated. aMutable.push_back("version/force"); } result.push_back(Pair("previousblockhash", pblock->hashPrevBlock.GetHex())); result.push_back(Pair("transactions", transactions)); result.push_back(Pair("coinbaseaux", aux)); result.push_back( Pair("coinbasevalue", (int64_t)pblock->vtx[0]->vout[0].nValue.GetSatoshis())); result.push_back(Pair("longpollid", chainActive.Tip()->GetBlockHash().GetHex() + i64tostr(nTransactionsUpdatedLast))); result.push_back(Pair("target", hashTarget.GetHex())); result.push_back( Pair("mintime", (int64_t)pindexPrev->GetMedianTimePast() + 1)); result.push_back(Pair("mutable", aMutable)); result.push_back(Pair("noncerange", "00000000ffffffff")); // FIXME: Allow for mining block greater than 1M. result.push_back( Pair("sigoplimit", GetMaxBlockSigOpsCount(DEFAULT_MAX_BLOCK_SIZE))); result.push_back(Pair("sizelimit", DEFAULT_MAX_BLOCK_SIZE)); result.push_back(Pair("curtime", pblock->GetBlockTime())); result.push_back(Pair("bits", strprintf("%08x", pblock->nBits))); result.push_back(Pair("height", (int64_t)(pindexPrev->nHeight + 1))); return result; } class submitblock_StateCatcher : public CValidationInterface { public: uint256 hash; bool found; CValidationState state; submitblock_StateCatcher(const uint256 &hashIn) : hash(hashIn), found(false), state() {} protected: void BlockChecked(const CBlock &block, const CValidationState &stateIn) override { if (block.GetHash() != hash) { return; } found = true; state = stateIn; } }; static UniValue submitblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "submitblock \"hexdata\" ( \"jsonparametersobject\" )\n" "\nAttempts to submit new block to network.\n" "The 'jsonparametersobject' parameter is currently ignored.\n" "See https://en.bitcoin.it/wiki/BIP_0022 for full specification.\n" "\nArguments\n" "1. \"hexdata\" (string, required) the hex-encoded block " "data to submit\n" "2. \"parameters\" (string, optional) object of optional " "parameters\n" " {\n" " \"workid\" : \"id\" (string, optional) if the server " "provided a workid, it MUST be included with submissions\n" " }\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("submitblock", "\"mydata\"") + HelpExampleRpc("submitblock", "\"mydata\"")); } std::shared_ptr blockptr = std::make_shared(); CBlock &block = *blockptr; if (!DecodeHexBlk(block, request.params[0].get_str())) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed"); } if (block.vtx.empty() || !block.vtx[0]->IsCoinBase()) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block does not start with a coinbase"); } uint256 hash = block.GetHash(); bool fBlockPresent = false; { LOCK(cs_main); BlockMap::iterator mi = mapBlockIndex.find(hash); if (mi != mapBlockIndex.end()) { CBlockIndex *pindex = mi->second; if (pindex->IsValid(BLOCK_VALID_SCRIPTS)) { return "duplicate"; } if (pindex->nStatus & BLOCK_FAILED_MASK) { return "duplicate-invalid"; } // Otherwise, we might only have the header - process the block // before returning fBlockPresent = true; } } submitblock_StateCatcher sc(block.GetHash()); RegisterValidationInterface(&sc); bool fAccepted = ProcessNewBlock(config, blockptr, true, nullptr); UnregisterValidationInterface(&sc); if (fBlockPresent) { if (fAccepted && !sc.found) { return "duplicate-inconclusive"; } return "duplicate"; } if (!sc.found) { return "inconclusive"; } return BIP22ValidationResult(config, sc.state); } static UniValue estimatefee(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "estimatefee nblocks\n" "\nEstimates the approximate fee per kilobyte needed for a " "transaction to begin\n" "confirmation within nblocks blocks.\n" "\nArguments:\n" "1. nblocks (numeric, required)\n" "\nResult:\n" "n (numeric) estimated fee-per-kilobyte\n" "\n" "A negative value is returned if not enough transactions and " "blocks\n" "have been observed to make an estimate.\n" "-1 is always returned for nblocks == 1 as it is impossible to " "calculate\n" "a fee that is high enough to get reliably included in the next " "block.\n" "\nExample:\n" + HelpExampleCli("estimatefee", "6")); } RPCTypeCheck(request.params, {UniValue::VNUM}); int nBlocks = request.params[0].get_int(); if (nBlocks < 1) { nBlocks = 1; } CFeeRate feeRate = mempool.estimateFee(nBlocks); if (feeRate == CFeeRate(Amount(0))) { return -1.0; } return ValueFromAmount(feeRate.GetFeePerK()); } static UniValue estimatepriority(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "estimatepriority nblocks\n" "\nDEPRECATED. Estimates the approximate priority " "a zero-fee transaction needs to begin\n" "confirmation within nblocks blocks.\n" "\nArguments:\n" "1. nblocks (numeric, required)\n" "\nResult:\n" "n (numeric) estimated priority\n" "\n" "A negative value is returned if not enough " "transactions and blocks\n" "have been observed to make an estimate.\n" "\nExample:\n" + HelpExampleCli("estimatepriority", "6")); } RPCTypeCheck(request.params, {UniValue::VNUM}); int nBlocks = request.params[0].get_int(); if (nBlocks < 1) { nBlocks = 1; } return mempool.estimatePriority(nBlocks); } static UniValue estimatesmartfee(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "estimatesmartfee nblocks\n" "\nWARNING: This interface is unstable and may disappear or " "change!\n" "\nEstimates the approximate fee per kilobyte needed for a " "transaction to begin\n" "confirmation within nblocks blocks if possible and return the " "number of blocks\n" "for which the estimate is valid.\n" "\nArguments:\n" "1. nblocks (numeric)\n" "\nResult:\n" "{\n" " \"feerate\" : x.x, (numeric) estimate fee-per-kilobyte (in " "BCH)\n" " \"blocks\" : n (numeric) block number where estimate " "was found\n" "}\n" "\n" "A negative value is returned if not enough transactions and " "blocks\n" "have been observed to make an estimate for any number of blocks.\n" "However it will not return a value below the mempool reject fee.\n" "\nExample:\n" + HelpExampleCli("estimatesmartfee", "6")); } RPCTypeCheck(request.params, {UniValue::VNUM}); int nBlocks = request.params[0].get_int(); UniValue result(UniValue::VOBJ); int answerFound; CFeeRate feeRate = mempool.estimateSmartFee(nBlocks, &answerFound); result.push_back(Pair("feerate", feeRate == CFeeRate(Amount(0)) ? -1.0 : ValueFromAmount(feeRate.GetFeePerK()))); result.push_back(Pair("blocks", answerFound)); return result; } static UniValue estimatesmartpriority(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "estimatesmartpriority nblocks\n" "\nDEPRECATED. WARNING: This interface is unstable and may " "disappear or change!\n" "\nEstimates the approximate priority a zero-fee transaction needs " "to begin\n" "confirmation within nblocks blocks if possible and return the " "number of blocks\n" "for which the estimate is valid.\n" "\nArguments:\n" "1. nblocks (numeric, required)\n" "\nResult:\n" "{\n" " \"priority\" : x.x, (numeric) estimated priority\n" " \"blocks\" : n (numeric) block number where estimate " "was found\n" "}\n" "\n" "A negative value is returned if not enough transactions and " "blocks\n" "have been observed to make an estimate for any number of blocks.\n" "However if the mempool reject fee is set it will return 1e9 * " "MAX_MONEY.\n" "\nExample:\n" + HelpExampleCli("estimatesmartpriority", "6")); } RPCTypeCheck(request.params, {UniValue::VNUM}); int nBlocks = request.params[0].get_int(); UniValue result(UniValue::VOBJ); int answerFound; double priority = mempool.estimateSmartPriority(nBlocks, &answerFound); result.push_back(Pair("priority", priority)); result.push_back(Pair("blocks", answerFound)); return result; } // clang-format off static const CRPCCommand commands[] = { // category name actor (function) okSafeMode // ---------- ------------------------ ---------------------- ---------- {"mining", "getnetworkhashps", getnetworkhashps, true, {"nblocks", "height"}}, {"mining", "getmininginfo", getmininginfo, true, {}}, {"mining", "prioritisetransaction", prioritisetransaction, true, {"txid", "priority_delta", "fee_delta"}}, {"mining", "getblocktemplate", getblocktemplate, true, {"template_request"}}, {"mining", "submitblock", submitblock, true, {"hexdata", "parameters"}}, {"generating", "generatetoaddress", generatetoaddress, true, {"nblocks", "address", "maxtries"}}, {"util", "estimatefee", estimatefee, true, {"nblocks"}}, {"util", "estimatepriority", estimatepriority, true, {"nblocks"}}, {"util", "estimatesmartfee", estimatesmartfee, true, {"nblocks"}}, {"util", "estimatesmartpriority", estimatesmartpriority, true, {"nblocks"}}, }; // clang-format on void RegisterMiningRPCCommands(CRPCTable &t) { for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) t.appendCommand(commands[vcidx].name, &commands[vcidx]); } diff --git a/src/rpc/protocol.cpp b/src/rpc/protocol.cpp index eca9fad2f9..b6a5896a42 100644 --- a/src/rpc/protocol.cpp +++ b/src/rpc/protocol.cpp @@ -1,120 +1,120 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "rpc/protocol.h" #include "random.h" #include "tinyformat.h" #include "util.h" #include "utilstrencodings.h" #include "utiltime.h" #include "version.h" #include #include /** * JSON-RPC protocol. Bitcoin speaks version 1.0 for maximum compatibility, but * uses JSON-RPC 1.1/2.0 standards for parts of the 1.0 standard that were * unspecified (HTTP errors and contents of 'error'). * * 1.0 spec: http://json-rpc.org/wiki/specification * 1.2 spec: http://jsonrpc.org/historical/json-rpc-over-http.html */ UniValue JSONRPCRequestObj(const std::string &strMethod, const UniValue ¶ms, const UniValue &id) { UniValue request(UniValue::VOBJ); request.push_back(Pair("method", strMethod)); request.push_back(Pair("params", params)); request.push_back(Pair("id", id)); return request; } UniValue JSONRPCReplyObj(const UniValue &result, const UniValue &error, const UniValue &id) { UniValue reply(UniValue::VOBJ); if (!error.isNull()) reply.push_back(Pair("result", NullUniValue)); else reply.push_back(Pair("result", result)); reply.push_back(Pair("error", error)); reply.push_back(Pair("id", id)); return reply; } std::string JSONRPCReply(const UniValue &result, const UniValue &error, const UniValue &id) { UniValue reply = JSONRPCReplyObj(result, error, id); return reply.write() + "\n"; } UniValue JSONRPCError(int code, const std::string &message) { UniValue error(UniValue::VOBJ); error.push_back(Pair("code", code)); error.push_back(Pair("message", message)); return error; } /** Username used when cookie authentication is in use (arbitrary, only for * recognizability in debugging/logging purposes) */ static const std::string COOKIEAUTH_USER = "__cookie__"; /** Default name for auth cookie file */ static const std::string COOKIEAUTH_FILE = ".cookie"; fs::path GetAuthCookieFile() { - fs::path path(GetArg("-rpccookiefile", COOKIEAUTH_FILE)); + fs::path path(gArgs.GetArg("-rpccookiefile", COOKIEAUTH_FILE)); if (!path.is_complete()) path = GetDataDir() / path; return path; } bool GenerateAuthCookie(std::string *cookie_out) { const size_t COOKIE_SIZE = 32; uint8_t rand_pwd[COOKIE_SIZE]; GetRandBytes(rand_pwd, COOKIE_SIZE); std::string cookie = COOKIEAUTH_USER + ":" + HexStr(rand_pwd, rand_pwd + COOKIE_SIZE); /** the umask determines what permissions are used to create this file - * these are set to 077 in init.cpp unless overridden with -sysperms. */ std::ofstream file; fs::path filepath = GetAuthCookieFile(); file.open(filepath.string().c_str()); if (!file.is_open()) { LogPrintf("Unable to open cookie authentication file %s for writing\n", filepath.string()); return false; } file << cookie; file.close(); LogPrintf("Generated RPC authentication cookie %s\n", filepath.string()); if (cookie_out) *cookie_out = cookie; return true; } bool GetAuthCookie(std::string *cookie_out) { std::ifstream file; std::string cookie; fs::path filepath = GetAuthCookieFile(); file.open(filepath.string().c_str()); if (!file.is_open()) return false; std::getline(file, cookie); file.close(); if (cookie_out) *cookie_out = cookie; return true; } void DeleteAuthCookie() { try { fs::remove(GetAuthCookieFile()); } catch (const fs::filesystem_error &e) { LogPrintf("%s: Unable to remove random auth cookie file: %s\n", __func__, e.what()); } } diff --git a/src/script/scriptcache.cpp b/src/script/scriptcache.cpp index 1820fce60e..e089238c06 100644 --- a/src/script/scriptcache.cpp +++ b/src/script/scriptcache.cpp @@ -1,60 +1,61 @@ // Copyright (c) 2017 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "scriptcache.h" #include "crypto/sha256.h" #include "cuckoocache.h" #include "primitives/transaction.h" #include "random.h" #include "script/sigcache.h" #include "sync.h" #include "util.h" #include "validation.h" static CuckooCache::cache scriptExecutionCache; static uint256 scriptExecutionCacheNonce(GetRandHash()); void InitScriptExecutionCache() { // nMaxCacheSize is unsigned. If -maxscriptcachesize is set to zero, // setup_bytes creates the minimum possible cache (2 elements). size_t nMaxCacheSize = - std::min(std::max(int64_t(0), GetArg("-maxscriptcachesize", - DEFAULT_MAX_SCRIPT_CACHE_SIZE)), + std::min(std::max(int64_t(0), + gArgs.GetArg("-maxscriptcachesize", + DEFAULT_MAX_SCRIPT_CACHE_SIZE)), MAX_MAX_SCRIPT_CACHE_SIZE) * (size_t(1) << 20); size_t nElems = scriptExecutionCache.setup_bytes(nMaxCacheSize); LogPrintf("Using %zu MiB out of %zu requested for script execution cache, " "able to store %zu elements\n", (nElems * sizeof(uint256)) >> 20, nMaxCacheSize >> 20, nElems); } uint256 GetScriptCacheKey(const CTransaction &tx, uint32_t flags) { uint256 key; // We only use the first 19 bytes of nonce to avoid a second SHA round - // giving us 19 + 32 + 4 = 55 bytes (+ 8 + 1 = 64) static_assert(55 - sizeof(flags) - 32 >= 128 / 8, "Want at least 128 bits of nonce for script execution cache"); CSHA256() .Write(scriptExecutionCacheNonce.begin(), 55 - sizeof(flags) - 32) .Write(tx.GetHash().begin(), 32) .Write((uint8_t *)&flags, sizeof(flags)) .Finalize(key.begin()); return key; } bool IsKeyInScriptCache(uint256 key, bool erase) { // TODO: Remove this requirement by making CuckooCache not require external // locks AssertLockHeld(cs_main); return scriptExecutionCache.contains(key, erase); } void AddKeyInScriptCache(uint256 key) { // TODO: Remove this requirement by making CuckooCache not require external // locks AssertLockHeld(cs_main); scriptExecutionCache.insert(key); } diff --git a/src/script/sigcache.cpp b/src/script/sigcache.cpp index 389c24cb0b..cc62a8e6ec 100644 --- a/src/script/sigcache.cpp +++ b/src/script/sigcache.cpp @@ -1,99 +1,100 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "sigcache.h" #include "cuckoocache.h" #include "memusage.h" #include "pubkey.h" #include "random.h" #include "uint256.h" #include "util.h" #include namespace { /** * Valid signature cache, to avoid doing expensive ECDSA signature checking * twice for every transaction (once when accepted into memory pool, and * again when accepted into the block chain) */ class CSignatureCache { private: //! Entries are SHA256(nonce || signature hash || public key || signature): uint256 nonce; typedef CuckooCache::cache map_type; map_type setValid; boost::shared_mutex cs_sigcache; public: CSignatureCache() { GetRandBytes(nonce.begin(), 32); } void ComputeEntry(uint256 &entry, const uint256 &hash, const std::vector &vchSig, const CPubKey &pubkey) { CSHA256() .Write(nonce.begin(), 32) .Write(hash.begin(), 32) .Write(&pubkey[0], pubkey.size()) .Write(&vchSig[0], vchSig.size()) .Finalize(entry.begin()); } bool Get(const uint256 &entry, const bool erase) { boost::shared_lock lock(cs_sigcache); return setValid.contains(entry, erase); } void Set(uint256 &entry) { boost::unique_lock lock(cs_sigcache); setValid.insert(entry); } uint32_t setup_bytes(size_t n) { return setValid.setup_bytes(n); } }; /** * In previous versions of this code, signatureCache was a local static variable * in CachingTransactionSignatureChecker::VerifySignature. We initialize * signatureCache outside of VerifySignature to avoid the atomic operation per * call overhead associated with local static variables even though * signatureCache could be made local to VerifySignature. */ static CSignatureCache signatureCache; } // namespace // To be called once in AppInit2/TestingSetup to initialize the signatureCache void InitSignatureCache() { // nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero, // setup_bytes creates the minimum possible cache (2 elements). size_t nMaxCacheSize = - std::min(std::max(int64_t(0), GetArg("-maxsigcachesize", - DEFAULT_MAX_SIG_CACHE_SIZE)), + std::min(std::max(int64_t(0), + gArgs.GetArg("-maxsigcachesize", + DEFAULT_MAX_SIG_CACHE_SIZE)), MAX_MAX_SIG_CACHE_SIZE) * (size_t(1) << 20); size_t nElems = signatureCache.setup_bytes(nMaxCacheSize); LogPrintf("Using %zu MiB out of %zu requested for signature cache, able to " "store %zu elements\n", (nElems * sizeof(uint256)) >> 20, nMaxCacheSize >> 20, nElems); } bool CachingTransactionSignatureChecker::VerifySignature( const std::vector &vchSig, const CPubKey &pubkey, const uint256 &sighash) const { uint256 entry; signatureCache.ComputeEntry(entry, sighash, vchSig, pubkey); if (signatureCache.Get(entry, !store)) { return true; } if (!TransactionSignatureChecker::VerifySignature(vchSig, pubkey, sighash)) { return false; } if (store) { signatureCache.Set(entry); } return true; } diff --git a/src/test/DoS_tests.cpp b/src/test/DoS_tests.cpp index d57ef92436..9cf67154ba 100644 --- a/src/test/DoS_tests.cpp +++ b/src/test/DoS_tests.cpp @@ -1,221 +1,221 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. // Unit tests for denial-of-service detection/prevention code #include "chainparams.h" #include "config.h" #include "keystore.h" #include "net.h" #include "net_processing.h" #include "pow.h" #include "script/sign.h" #include "serialize.h" #include "util.h" #include "validation.h" #include "test/test_bitcoin.h" #include #include #include // Tests these internal-to-net_processing.cpp methods: extern bool AddOrphanTx(const CTransactionRef &tx, NodeId peer); extern void EraseOrphansFor(NodeId peer); extern unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans); struct COrphanTx { CTransactionRef tx; NodeId fromPeer; int64_t nTimeExpire; }; extern std::map mapOrphanTransactions; CService ip(uint32_t i) { struct in_addr s; s.s_addr = i; return CService(CNetAddr(s), Params().GetDefaultPort()); } static NodeId id = 0; BOOST_FIXTURE_TEST_SUITE(DoS_tests, TestingSetup) BOOST_AUTO_TEST_CASE(DoS_banning) { const Config &config = GetConfig(); std::atomic interruptDummy(false); connman->ClearBanned(); CAddress addr1(ip(0xa0b0c001), NODE_NONE); CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 0, 0, "", true); dummyNode1.SetSendVersion(PROTOCOL_VERSION); GetNodeSignals().InitializeNode(config, &dummyNode1, *connman); dummyNode1.nVersion = 1; dummyNode1.fSuccessfullyConnected = true; // Should get banned. Misbehaving(dummyNode1.GetId(), 100, ""); SendMessages(config, &dummyNode1, *connman, interruptDummy); BOOST_CHECK(connman->IsBanned(addr1)); // Different IP, not banned. BOOST_CHECK(!connman->IsBanned(ip(0xa0b0c001 | 0x0000ff00))); CAddress addr2(ip(0xa0b0c002), NODE_NONE); CNode dummyNode2(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr2, 1, 1, "", true); dummyNode2.SetSendVersion(PROTOCOL_VERSION); GetNodeSignals().InitializeNode(config, &dummyNode2, *connman); dummyNode2.nVersion = 1; dummyNode2.fSuccessfullyConnected = true; Misbehaving(dummyNode2.GetId(), 50, ""); SendMessages(config, &dummyNode2, *connman, interruptDummy); // 2 not banned yet... BOOST_CHECK(!connman->IsBanned(addr2)); // ... but 1 still should be. BOOST_CHECK(connman->IsBanned(addr1)); Misbehaving(dummyNode2.GetId(), 50, ""); SendMessages(config, &dummyNode2, *connman, interruptDummy); BOOST_CHECK(connman->IsBanned(addr2)); } BOOST_AUTO_TEST_CASE(DoS_banscore) { const Config &config = GetConfig(); std::atomic interruptDummy(false); connman->ClearBanned(); // because 11 is my favorite number. - ForceSetArg("-banscore", "111"); + gArgs.ForceSetArg("-banscore", "111"); CAddress addr1(ip(0xa0b0c001), NODE_NONE); CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 3, 1, "", true); dummyNode1.SetSendVersion(PROTOCOL_VERSION); GetNodeSignals().InitializeNode(config, &dummyNode1, *connman); dummyNode1.nVersion = 1; dummyNode1.fSuccessfullyConnected = true; Misbehaving(dummyNode1.GetId(), 100, ""); SendMessages(config, &dummyNode1, *connman, interruptDummy); BOOST_CHECK(!connman->IsBanned(addr1)); Misbehaving(dummyNode1.GetId(), 10, ""); SendMessages(config, &dummyNode1, *connman, interruptDummy); BOOST_CHECK(!connman->IsBanned(addr1)); Misbehaving(dummyNode1.GetId(), 1, ""); SendMessages(config, &dummyNode1, *connman, interruptDummy); BOOST_CHECK(connman->IsBanned(addr1)); - ForceSetArg("-banscore", std::to_string(DEFAULT_BANSCORE_THRESHOLD)); + gArgs.ForceSetArg("-banscore", std::to_string(DEFAULT_BANSCORE_THRESHOLD)); } BOOST_AUTO_TEST_CASE(DoS_bantime) { const Config &config = GetConfig(); std::atomic interruptDummy(false); connman->ClearBanned(); int64_t nStartTime = GetTime(); // Overrides future calls to GetTime() SetMockTime(nStartTime); CAddress addr(ip(0xa0b0c001), NODE_NONE); CNode dummyNode(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr, 4, 4, "", true); dummyNode.SetSendVersion(PROTOCOL_VERSION); GetNodeSignals().InitializeNode(config, &dummyNode, *connman); dummyNode.nVersion = 1; dummyNode.fSuccessfullyConnected = true; Misbehaving(dummyNode.GetId(), 100, ""); SendMessages(config, &dummyNode, *connman, interruptDummy); BOOST_CHECK(connman->IsBanned(addr)); SetMockTime(nStartTime + 60 * 60); BOOST_CHECK(connman->IsBanned(addr)); SetMockTime(nStartTime + 60 * 60 * 24 + 1); BOOST_CHECK(!connman->IsBanned(addr)); } CTransactionRef RandomOrphan() { std::map::iterator it; it = mapOrphanTransactions.lower_bound(InsecureRand256()); if (it == mapOrphanTransactions.end()) it = mapOrphanTransactions.begin(); return it->second.tx; } BOOST_AUTO_TEST_CASE(DoS_mapOrphans) { CKey key; key.MakeNewKey(true); CBasicKeyStore keystore; keystore.AddKey(key); // 50 orphan transactions: for (int i = 0; i < 50; i++) { CMutableTransaction tx; tx.vin.resize(1); tx.vin[0].prevout.n = 0; tx.vin[0].prevout.hash = InsecureRand256(); tx.vin[0].scriptSig << OP_1; tx.vout.resize(1); tx.vout[0].nValue = 1 * CENT; tx.vout[0].scriptPubKey = GetScriptForDestination(key.GetPubKey().GetID()); AddOrphanTx(MakeTransactionRef(tx), i); } // ... and 50 that depend on other orphans: for (int i = 0; i < 50; i++) { CTransactionRef txPrev = RandomOrphan(); CMutableTransaction tx; tx.vin.resize(1); tx.vin[0].prevout.n = 0; tx.vin[0].prevout.hash = txPrev->GetId(); tx.vout.resize(1); tx.vout[0].nValue = 1 * CENT; tx.vout[0].scriptPubKey = GetScriptForDestination(key.GetPubKey().GetID()); SignSignature(keystore, *txPrev, tx, 0, SigHashType()); AddOrphanTx(MakeTransactionRef(tx), i); } // This really-big orphan should be ignored: for (int i = 0; i < 10; i++) { CTransactionRef txPrev = RandomOrphan(); CMutableTransaction tx; tx.vout.resize(1); tx.vout[0].nValue = 1 * CENT; tx.vout[0].scriptPubKey = GetScriptForDestination(key.GetPubKey().GetID()); tx.vin.resize(2777); for (unsigned int j = 0; j < tx.vin.size(); j++) { tx.vin[j].prevout.n = j; tx.vin[j].prevout.hash = txPrev->GetId(); } SignSignature(keystore, *txPrev, tx, 0, SigHashType()); // Re-use same signature for other inputs // (they don't have to be valid for this test) for (unsigned int j = 1; j < tx.vin.size(); j++) tx.vin[j].scriptSig = tx.vin[0].scriptSig; BOOST_CHECK(!AddOrphanTx(MakeTransactionRef(tx), i)); } // Test EraseOrphansFor: for (NodeId i = 0; i < 3; i++) { size_t sizeBefore = mapOrphanTransactions.size(); EraseOrphansFor(i); BOOST_CHECK(mapOrphanTransactions.size() < sizeBefore); } // Test LimitOrphanTxSize() function: LimitOrphanTxSize(40); BOOST_CHECK(mapOrphanTransactions.size() <= 40); LimitOrphanTxSize(10); BOOST_CHECK(mapOrphanTransactions.size() <= 10); LimitOrphanTxSize(0); BOOST_CHECK(mapOrphanTransactions.empty()); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/getarg_tests.cpp b/src/test/getarg_tests.cpp index d55834a15b..530fde7756 100644 --- a/src/test/getarg_tests.cpp +++ b/src/test/getarg_tests.cpp @@ -1,160 +1,160 @@ // Copyright (c) 2012-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "test/test_bitcoin.h" #include "util.h" #include #include #include #include BOOST_FIXTURE_TEST_SUITE(getarg_tests, BasicTestingSetup) static void ResetArgs(const std::string &strArg) { std::vector vecArg; if (strArg.size()) boost::split(vecArg, strArg, boost::is_space(), boost::token_compress_on); // Insert dummy executable name: vecArg.insert(vecArg.begin(), "testbitcoin"); // Convert to char*: std::vector vecChar; for (std::string &s : vecArg) { vecChar.push_back(s.c_str()); } - ParseParameters(vecChar.size(), &vecChar[0]); + gArgs.ParseParameters(vecChar.size(), &vecChar[0]); } BOOST_AUTO_TEST_CASE(boolarg) { ResetArgs("-foo"); - BOOST_CHECK(GetBoolArg("-foo", false)); - BOOST_CHECK(GetBoolArg("-foo", true)); + BOOST_CHECK(gArgs.GetBoolArg("-foo", false)); + BOOST_CHECK(gArgs.GetBoolArg("-foo", true)); - BOOST_CHECK(!GetBoolArg("-fo", false)); - BOOST_CHECK(GetBoolArg("-fo", true)); + BOOST_CHECK(!gArgs.GetBoolArg("-fo", false)); + BOOST_CHECK(gArgs.GetBoolArg("-fo", true)); - BOOST_CHECK(!GetBoolArg("-fooo", false)); - BOOST_CHECK(GetBoolArg("-fooo", true)); + BOOST_CHECK(!gArgs.GetBoolArg("-fooo", false)); + BOOST_CHECK(gArgs.GetBoolArg("-fooo", true)); ResetArgs("-foo=0"); - BOOST_CHECK(!GetBoolArg("-foo", false)); - BOOST_CHECK(!GetBoolArg("-foo", true)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); ResetArgs("-foo=1"); - BOOST_CHECK(GetBoolArg("-foo", false)); - BOOST_CHECK(GetBoolArg("-foo", true)); + BOOST_CHECK(gArgs.GetBoolArg("-foo", false)); + BOOST_CHECK(gArgs.GetBoolArg("-foo", true)); // New 0.6 feature: auto-map -nosomething to !-something: ResetArgs("-nofoo"); - BOOST_CHECK(!GetBoolArg("-foo", false)); - BOOST_CHECK(!GetBoolArg("-foo", true)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); ResetArgs("-nofoo=1"); - BOOST_CHECK(!GetBoolArg("-foo", false)); - BOOST_CHECK(!GetBoolArg("-foo", true)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); // -nofoo should win ResetArgs("-foo -nofoo"); - BOOST_CHECK(!GetBoolArg("-foo", false)); - BOOST_CHECK(!GetBoolArg("-foo", true)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); // -nofoo should win ResetArgs("-foo=1 -nofoo=1"); - BOOST_CHECK(!GetBoolArg("-foo", false)); - BOOST_CHECK(!GetBoolArg("-foo", true)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); // -nofoo=0 should win ResetArgs("-foo=0 -nofoo=0"); - BOOST_CHECK(GetBoolArg("-foo", false)); - BOOST_CHECK(GetBoolArg("-foo", true)); + BOOST_CHECK(gArgs.GetBoolArg("-foo", false)); + BOOST_CHECK(gArgs.GetBoolArg("-foo", true)); // New 0.6 feature: treat -- same as -: ResetArgs("--foo=1"); - BOOST_CHECK(GetBoolArg("-foo", false)); - BOOST_CHECK(GetBoolArg("-foo", true)); + BOOST_CHECK(gArgs.GetBoolArg("-foo", false)); + BOOST_CHECK(gArgs.GetBoolArg("-foo", true)); ResetArgs("--nofoo=1"); - BOOST_CHECK(!GetBoolArg("-foo", false)); - BOOST_CHECK(!GetBoolArg("-foo", true)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); } BOOST_AUTO_TEST_CASE(stringarg) { ResetArgs(""); - BOOST_CHECK_EQUAL(GetArg("-foo", ""), ""); - BOOST_CHECK_EQUAL(GetArg("-foo", "eleven"), "eleven"); + BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", ""), ""); + BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", "eleven"), "eleven"); ResetArgs("-foo -bar"); - BOOST_CHECK_EQUAL(GetArg("-foo", ""), ""); - BOOST_CHECK_EQUAL(GetArg("-foo", "eleven"), ""); + BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", ""), ""); + BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", "eleven"), ""); ResetArgs("-foo="); - BOOST_CHECK_EQUAL(GetArg("-foo", ""), ""); - BOOST_CHECK_EQUAL(GetArg("-foo", "eleven"), ""); + BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", ""), ""); + BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", "eleven"), ""); ResetArgs("-foo=11"); - BOOST_CHECK_EQUAL(GetArg("-foo", ""), "11"); - BOOST_CHECK_EQUAL(GetArg("-foo", "eleven"), "11"); + BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", ""), "11"); + BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", "eleven"), "11"); ResetArgs("-foo=eleven"); - BOOST_CHECK_EQUAL(GetArg("-foo", ""), "eleven"); - BOOST_CHECK_EQUAL(GetArg("-foo", "eleven"), "eleven"); + BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", ""), "eleven"); + BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", "eleven"), "eleven"); } BOOST_AUTO_TEST_CASE(intarg) { ResetArgs(""); - BOOST_CHECK_EQUAL(GetArg("-foo", 11), 11); - BOOST_CHECK_EQUAL(GetArg("-foo", 0), 0); + BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", 11), 11); + BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", 0), 0); ResetArgs("-foo -bar"); - BOOST_CHECK_EQUAL(GetArg("-foo", 11), 0); - BOOST_CHECK_EQUAL(GetArg("-bar", 11), 0); + BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", 11), 0); + BOOST_CHECK_EQUAL(gArgs.GetArg("-bar", 11), 0); ResetArgs("-foo=11 -bar=12"); - BOOST_CHECK_EQUAL(GetArg("-foo", 0), 11); - BOOST_CHECK_EQUAL(GetArg("-bar", 11), 12); + BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", 0), 11); + BOOST_CHECK_EQUAL(gArgs.GetArg("-bar", 11), 12); ResetArgs("-foo=NaN -bar=NotANumber"); - BOOST_CHECK_EQUAL(GetArg("-foo", 1), 0); - BOOST_CHECK_EQUAL(GetArg("-bar", 11), 0); + BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", 1), 0); + BOOST_CHECK_EQUAL(gArgs.GetArg("-bar", 11), 0); } BOOST_AUTO_TEST_CASE(doubledash) { ResetArgs("--foo"); - BOOST_CHECK_EQUAL(GetBoolArg("-foo", false), true); + BOOST_CHECK_EQUAL(gArgs.GetBoolArg("-foo", false), true); ResetArgs("--foo=verbose --bar=1"); - BOOST_CHECK_EQUAL(GetArg("-foo", ""), "verbose"); - BOOST_CHECK_EQUAL(GetArg("-bar", 0), 1); + BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", ""), "verbose"); + BOOST_CHECK_EQUAL(gArgs.GetArg("-bar", 0), 1); } BOOST_AUTO_TEST_CASE(boolargno) { ResetArgs("-nofoo"); - BOOST_CHECK(!GetBoolArg("-foo", true)); - BOOST_CHECK(!GetBoolArg("-foo", false)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); ResetArgs("-nofoo=1"); - BOOST_CHECK(!GetBoolArg("-foo", true)); - BOOST_CHECK(!GetBoolArg("-foo", false)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); ResetArgs("-nofoo=0"); - BOOST_CHECK(GetBoolArg("-foo", true)); - BOOST_CHECK(GetBoolArg("-foo", false)); + BOOST_CHECK(gArgs.GetBoolArg("-foo", true)); + BOOST_CHECK(gArgs.GetBoolArg("-foo", false)); // --nofoo should win ResetArgs("-foo --nofoo"); - BOOST_CHECK(!GetBoolArg("-foo", true)); - BOOST_CHECK(!GetBoolArg("-foo", false)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); + BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); // foo always wins: ResetArgs("-nofoo -foo"); - BOOST_CHECK(GetBoolArg("-foo", true)); - BOOST_CHECK(GetBoolArg("-foo", false)); + BOOST_CHECK(gArgs.GetBoolArg("-foo", true)); + BOOST_CHECK(gArgs.GetBoolArg("-foo", false)); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp index 400d3d02a3..f80c773f35 100644 --- a/src/test/miner_tests.cpp +++ b/src/test/miner_tests.cpp @@ -1,770 +1,770 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "miner.h" #include "chainparams.h" #include "coins.h" #include "config.h" #include "consensus/consensus.h" #include "consensus/merkle.h" #include "consensus/validation.h" #include "policy/policy.h" #include "pubkey.h" #include "script/standard.h" #include "txmempool.h" #include "uint256.h" #include "util.h" #include "utilstrencodings.h" #include "validation.h" #include "test/test_bitcoin.h" #include #include BOOST_FIXTURE_TEST_SUITE(miner_tests, TestingSetup) static CFeeRate blockMinFeeRate = CFeeRate(DEFAULT_BLOCK_MIN_TX_FEE); static struct { uint8_t extranonce; unsigned int nonce; } blockinfo[] = { {4, 0xa4a3e223}, {2, 0x15c32f9e}, {1, 0x0375b547}, {1, 0x7004a8a5}, {2, 0xce440296}, {2, 0x52cfe198}, {1, 0x77a72cd0}, {2, 0xbb5d6f84}, {2, 0x83f30c2c}, {1, 0x48a73d5b}, {1, 0xef7dcd01}, {2, 0x6809c6c4}, {2, 0x0883ab3c}, {1, 0x087bbbe2}, {2, 0x2104a814}, {2, 0xdffb6daa}, {1, 0xee8a0a08}, {2, 0xba4237c1}, {1, 0xa70349dc}, {1, 0x344722bb}, {3, 0xd6294733}, {2, 0xec9f5c94}, {2, 0xca2fbc28}, {1, 0x6ba4f406}, {2, 0x015d4532}, {1, 0x6e119b7c}, {2, 0x43e8f314}, {2, 0x27962f38}, {2, 0xb571b51b}, {2, 0xb36bee23}, {2, 0xd17924a8}, {2, 0x6bc212d9}, {1, 0x630d4948}, {2, 0x9a4c4ebb}, {2, 0x554be537}, {1, 0xd63ddfc7}, {2, 0xa10acc11}, {1, 0x759a8363}, {2, 0xfb73090d}, {1, 0xe82c6a34}, {1, 0xe33e92d7}, {3, 0x658ef5cb}, {2, 0xba32ff22}, {5, 0x0227a10c}, {1, 0xa9a70155}, {5, 0xd096d809}, {1, 0x37176174}, {1, 0x830b8d0f}, {1, 0xc6e3910e}, {2, 0x823f3ca8}, {1, 0x99850849}, {1, 0x7521fb81}, {1, 0xaacaabab}, {1, 0xd645a2eb}, {5, 0x7aea1781}, {5, 0x9d6e4b78}, {1, 0x4ce90fd8}, {1, 0xabdc832d}, {6, 0x4a34f32a}, {2, 0xf2524c1c}, {2, 0x1bbeb08a}, {1, 0xad47f480}, {1, 0x9f026aeb}, {1, 0x15a95049}, {2, 0xd1cb95b2}, {2, 0xf84bbda5}, {1, 0x0fa62cd1}, {1, 0xe05f9169}, {1, 0x78d194a9}, {5, 0x3e38147b}, {5, 0x737ba0d4}, {1, 0x63378e10}, {1, 0x6d5f91cf}, {2, 0x88612eb8}, {2, 0xe9639484}, {1, 0xb7fabc9d}, {2, 0x19b01592}, {1, 0x5a90dd31}, {2, 0x5bd7e028}, {2, 0x94d00323}, {1, 0xa9b9c01a}, {1, 0x3a40de61}, {1, 0x56e7eec7}, {5, 0x859f7ef6}, {1, 0xfd8e5630}, {1, 0x2b0c9f7f}, {1, 0xba700e26}, {1, 0x7170a408}, {1, 0x70de86a8}, {1, 0x74d64cd5}, {1, 0x49e738a1}, {2, 0x6910b602}, {0, 0x643c565f}, {1, 0x54264b3f}, {2, 0x97ea6396}, {2, 0x55174459}, {2, 0x03e8779a}, {1, 0x98f34d8f}, {1, 0xc07b2b07}, {1, 0xdfe29668}, {1, 0x3141c7c1}, {1, 0xb3b595f4}, {1, 0x735abf08}, {5, 0x623bfbce}, {2, 0xd351e722}, {1, 0xf4ca48c9}, {1, 0x5b19c670}, {1, 0xa164bf0e}, {2, 0xbbbeb305}, {2, 0xfe1c810a}, }; CBlockIndex CreateBlockIndex(int nHeight) { CBlockIndex index; index.nHeight = nHeight; index.pprev = chainActive.Tip(); return index; } bool TestSequenceLocks(const CTransaction &tx, int flags) { LOCK(mempool.cs); return CheckSequenceLocks(tx, flags); } // Test suite for ancestor feerate transaction selection. // Implemented as an additional function, rather than a separate test case, to // allow reusing the blockchain created in CreateNewBlock_validity. // Note that this test assumes blockprioritypercentage is 0. void TestPackageSelection(const CChainParams &chainparams, CScript scriptPubKey, std::vector &txFirst) { // Test the ancestor feerate transaction selection. TestMemPoolEntryHelper entry; GlobalConfig config; // these 3 tests assume blockprioritypercentage is 0. config.SetBlockPriorityPercentage(0); // Test that a medium fee transaction will be selected after a higher fee // rate package with a low fee rate parent. CMutableTransaction tx; tx.vin.resize(1); tx.vin[0].scriptSig = CScript() << OP_1; tx.vin[0].prevout.hash = txFirst[0]->GetId(); tx.vin[0].prevout.n = 0; tx.vout.resize(1); tx.vout[0].nValue = Amount(5000000000LL - 1000); // This tx has a low fee: 1000 satoshis. // Save this txid for later use. uint256 hashParentTx = tx.GetId(); mempool.addUnchecked(hashParentTx, entry.Fee(Amount(1000)) .Time(GetTime()) .SpendsCoinbase(true) .FromTx(tx)); // This tx has a medium fee: 10000 satoshis. tx.vin[0].prevout.hash = txFirst[1]->GetId(); tx.vout[0].nValue = Amount(5000000000LL - 10000); uint256 hashMediumFeeTx = tx.GetId(); mempool.addUnchecked(hashMediumFeeTx, entry.Fee(Amount(10000)) .Time(GetTime()) .SpendsCoinbase(true) .FromTx(tx)); // This tx has a high fee, but depends on the first transaction. tx.vin[0].prevout.hash = hashParentTx; // 50k satoshi fee. tx.vout[0].nValue = Amount(5000000000LL - 1000 - 50000); uint256 hashHighFeeTx = tx.GetId(); mempool.addUnchecked(hashHighFeeTx, entry.Fee(Amount(50000)) .Time(GetTime()) .SpendsCoinbase(false) .FromTx(tx)); std::unique_ptr pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey); BOOST_CHECK(pblocktemplate->block.vtx[1]->GetId() == hashParentTx); BOOST_CHECK(pblocktemplate->block.vtx[2]->GetId() == hashHighFeeTx); BOOST_CHECK(pblocktemplate->block.vtx[3]->GetId() == hashMediumFeeTx); // Test that a package below the block min tx fee doesn't get included tx.vin[0].prevout.hash = hashHighFeeTx; // 0 fee. tx.vout[0].nValue = Amount(5000000000LL - 1000 - 50000); uint256 hashFreeTx = tx.GetId(); mempool.addUnchecked(hashFreeTx, entry.Fee(Amount(0)).FromTx(tx)); size_t freeTxSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION); // Calculate a fee on child transaction that will put the package just // below the block min tx fee (assuming 1 child tx of the same size). Amount feeToUse = blockMinFeeRate.GetFee(2 * freeTxSize) - Amount(1); tx.vin[0].prevout.hash = hashFreeTx; tx.vout[0].nValue = Amount(5000000000LL - 1000 - 50000) - feeToUse; uint256 hashLowFeeTx = tx.GetId(); mempool.addUnchecked(hashLowFeeTx, entry.Fee(feeToUse).FromTx(tx)); pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey); // Verify that the free tx and the low fee tx didn't get selected. for (size_t i = 0; i < pblocktemplate->block.vtx.size(); ++i) { BOOST_CHECK(pblocktemplate->block.vtx[i]->GetId() != hashFreeTx); BOOST_CHECK(pblocktemplate->block.vtx[i]->GetId() != hashLowFeeTx); } // Test that packages above the min relay fee do get included, even if one // of the transactions is below the min relay fee. Remove the low fee // transaction and replace with a higher fee transaction mempool.removeRecursive(tx); // Now we should be just over the min relay fee. tx.vout[0].nValue -= Amount(2); hashLowFeeTx = tx.GetId(); mempool.addUnchecked(hashLowFeeTx, entry.Fee(feeToUse + Amount(2)).FromTx(tx)); pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey); BOOST_CHECK(pblocktemplate->block.vtx[4]->GetId() == hashFreeTx); BOOST_CHECK(pblocktemplate->block.vtx[5]->GetId() == hashLowFeeTx); // Test that transaction selection properly updates ancestor fee // calculations as ancestor transactions get included in a block. Add a // 0-fee transaction that has 2 outputs. tx.vin[0].prevout.hash = txFirst[2]->GetId(); tx.vout.resize(2); tx.vout[0].nValue = Amount(5000000000LL - 100000000); // 1BCC output. tx.vout[1].nValue = Amount(100000000); uint256 hashFreeTx2 = tx.GetId(); mempool.addUnchecked(hashFreeTx2, entry.Fee(Amount(0)).SpendsCoinbase(true).FromTx(tx)); // This tx can't be mined by itself. tx.vin[0].prevout.hash = hashFreeTx2; tx.vout.resize(1); feeToUse = blockMinFeeRate.GetFee(freeTxSize); tx.vout[0].nValue = Amount(5000000000LL) - Amount(100000000) - feeToUse; uint256 hashLowFeeTx2 = tx.GetId(); mempool.addUnchecked(hashLowFeeTx2, entry.Fee(feeToUse).SpendsCoinbase(false).FromTx(tx)); pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey); // Verify that this tx isn't selected. for (size_t i = 0; i < pblocktemplate->block.vtx.size(); ++i) { BOOST_CHECK(pblocktemplate->block.vtx[i]->GetId() != hashFreeTx2); BOOST_CHECK(pblocktemplate->block.vtx[i]->GetId() != hashLowFeeTx2); } // This tx will be mineable, and should cause hashLowFeeTx2 to be selected // as well. tx.vin[0].prevout.n = 1; // 10k satoshi fee. tx.vout[0].nValue = Amount(100000000 - 10000); mempool.addUnchecked(tx.GetId(), entry.Fee(Amount(10000)).FromTx(tx)); pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey); BOOST_CHECK(pblocktemplate->block.vtx[8]->GetId() == hashLowFeeTx2); } void TestCoinbaseMessageEB(uint64_t eb, std::string cbmsg) { GlobalConfig config; config.SetMaxBlockSize(eb); CScript scriptPubKey = CScript() << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909" "a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112" "de5c384df7ba0b8d578a4c702b6bf11d5f") << OP_CHECKSIG; std::unique_ptr pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey); CBlock *pblock = &pblocktemplate->block; // IncrementExtraNonce creates a valid coinbase and merkleRoot unsigned int extraNonce = 0; IncrementExtraNonce(config, pblock, chainActive.Tip(), extraNonce); unsigned int nHeight = chainActive.Tip()->nHeight + 1; std::vector vec(cbmsg.begin(), cbmsg.end()); BOOST_CHECK(pblock->vtx[0]->vin[0].scriptSig == ((CScript() << nHeight << CScriptNum(extraNonce) << vec) + COINBASE_FLAGS)); } // Coinbase scriptSig has to contains the correct EB value // converted to MB, rounded down to the first decimal BOOST_AUTO_TEST_CASE(CheckCoinbase_EB) { TestCoinbaseMessageEB(1000001, "/EB1.0/"); TestCoinbaseMessageEB(2000000, "/EB2.0/"); TestCoinbaseMessageEB(8000000, "/EB8.0/"); TestCoinbaseMessageEB(8320000, "/EB8.3/"); } // NOTE: These tests rely on CreateNewBlock doing its own self-validation! BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) { // Note that by default, these tests run with size accounting enabled. CScript scriptPubKey = CScript() << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909" "a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112" "de5c384df7ba0b8d578a4c702b6bf11d5f") << OP_CHECKSIG; std::unique_ptr pblocktemplate; CMutableTransaction tx, tx2; CScript script; uint256 hash; TestMemPoolEntryHelper entry; entry.nFee = Amount(11); entry.dPriority = 111.0; entry.nHeight = 11; GlobalConfig config; LOCK(cs_main); fCheckpointsEnabled = false; // Simple block creation, nothing special yet: BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); // We can't make transactions until we have inputs. Therefore, load 100 // blocks :) int baseheight = 0; std::vector txFirst; for (unsigned int i = 0; i < sizeof(blockinfo) / sizeof(*blockinfo); ++i) { // pointer for convenience. CBlock *pblock = &pblocktemplate->block; pblock->nVersion = 1; pblock->nTime = chainActive.Tip()->GetMedianTimePast() + 1; CMutableTransaction txCoinbase(*pblock->vtx[0]); txCoinbase.nVersion = 1; txCoinbase.vin[0].scriptSig = CScript(); txCoinbase.vin[0].scriptSig.push_back(blockinfo[i].extranonce); txCoinbase.vin[0].scriptSig.push_back(chainActive.Height()); // Ignore the (optional) segwit commitment added by CreateNewBlock (as // the hardcoded nonces don't account for this) txCoinbase.vout.resize(1); txCoinbase.vout[0].scriptPubKey = CScript(); pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase)); if (txFirst.size() == 0) baseheight = chainActive.Height(); if (txFirst.size() < 4) txFirst.push_back(pblock->vtx[0]); pblock->hashMerkleRoot = BlockMerkleRoot(*pblock); pblock->nNonce = blockinfo[i].nonce; std::shared_ptr shared_pblock = std::make_shared(*pblock); BOOST_CHECK(ProcessNewBlock(GetConfig(), shared_pblock, true, nullptr)); pblock->hashPrevBlock = pblock->GetHash(); } // Just to make sure we can still make simple blocks. BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); const Amount BLOCKSUBSIDY = 50 * COIN; const Amount LOWFEE = CENT; const Amount HIGHFEE = COIN; const Amount HIGHERFEE = 4 * COIN; // block sigops > limit: 1000 CHECKMULTISIG + 1 tx.vin.resize(1); // NOTE: OP_NOP is used to force 20 SigOps for the CHECKMULTISIG tx.vin[0].scriptSig = CScript() << OP_0 << OP_0 << OP_0 << OP_NOP << OP_CHECKMULTISIG << OP_1; tx.vin[0].prevout.hash = txFirst[0]->GetId(); tx.vin[0].prevout.n = 0; tx.vout.resize(1); tx.vout[0].nValue = BLOCKSUBSIDY; for (unsigned int i = 0; i < 1001; ++i) { tx.vout[0].nValue -= LOWFEE; hash = tx.GetId(); // Only first tx spends coinbase. bool spendsCoinbase = (i == 0) ? true : false; // If we don't set the # of sig ops in the CTxMemPoolEntry, template // creation fails. mempool.addUnchecked(hash, entry.Fee(LOWFEE) .Time(GetTime()) .SpendsCoinbase(spendsCoinbase) .FromTx(tx)); tx.vin[0].prevout.hash = hash; } BOOST_CHECK_THROW(BlockAssembler(config).CreateNewBlock(scriptPubKey), std::runtime_error); mempool.clear(); tx.vin[0].prevout.hash = txFirst[0]->GetId(); tx.vout[0].nValue = BLOCKSUBSIDY; for (unsigned int i = 0; i < 1001; ++i) { tx.vout[0].nValue -= LOWFEE; hash = tx.GetId(); // Only first tx spends coinbase. bool spendsCoinbase = (i == 0) ? true : false; // If we do set the # of sig ops in the CTxMemPoolEntry, template // creation passes. mempool.addUnchecked(hash, entry.Fee(LOWFEE) .Time(GetTime()) .SpendsCoinbase(spendsCoinbase) .SigOpsCost(80) .FromTx(tx)); tx.vin[0].prevout.hash = hash; } BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); mempool.clear(); // block size > limit tx.vin[0].scriptSig = CScript(); // 18 * (520char + DROP) + OP_1 = 9433 bytes std::vector vchData(520); for (unsigned int i = 0; i < 18; ++i) tx.vin[0].scriptSig << vchData << OP_DROP; tx.vin[0].scriptSig << OP_1; tx.vin[0].prevout.hash = txFirst[0]->GetId(); tx.vout[0].nValue = BLOCKSUBSIDY; for (unsigned int i = 0; i < 128; ++i) { tx.vout[0].nValue -= LOWFEE; hash = tx.GetId(); // Only first tx spends coinbase. bool spendsCoinbase = (i == 0) ? true : false; mempool.addUnchecked(hash, entry.Fee(LOWFEE) .Time(GetTime()) .SpendsCoinbase(spendsCoinbase) .FromTx(tx)); tx.vin[0].prevout.hash = hash; } BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); mempool.clear(); // Orphan in mempool, template creation fails. hash = tx.GetId(); mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).FromTx(tx)); BOOST_CHECK_THROW(BlockAssembler(config).CreateNewBlock(scriptPubKey), std::runtime_error); mempool.clear(); // Child with higher priority than parent. tx.vin[0].scriptSig = CScript() << OP_1; tx.vin[0].prevout.hash = txFirst[1]->GetId(); tx.vout[0].nValue = BLOCKSUBSIDY - HIGHFEE; hash = tx.GetId(); mempool.addUnchecked( hash, entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); tx.vin[0].prevout.hash = hash; tx.vin.resize(2); tx.vin[1].scriptSig = CScript() << OP_1; tx.vin[1].prevout.hash = txFirst[0]->GetId(); tx.vin[1].prevout.n = 0; // First txn output + fresh coinbase - new txn fee. tx.vout[0].nValue = tx.vout[0].nValue + BLOCKSUBSIDY - HIGHERFEE; hash = tx.GetId(); mempool.addUnchecked( hash, entry.Fee(HIGHERFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); mempool.clear(); // Coinbase in mempool, template creation fails. tx.vin.resize(1); tx.vin[0].prevout.SetNull(); tx.vin[0].scriptSig = CScript() << OP_0 << OP_1; tx.vout[0].nValue = Amount(0); hash = tx.GetId(); // Give it a fee so it'll get mined. mempool.addUnchecked( hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(false).FromTx(tx)); BOOST_CHECK_THROW(BlockAssembler(config).CreateNewBlock(scriptPubKey), std::runtime_error); mempool.clear(); // Invalid (pre-p2sh) txn in mempool, template creation fails. tx.vin[0].prevout.hash = txFirst[0]->GetId(); tx.vin[0].prevout.n = 0; tx.vin[0].scriptSig = CScript() << OP_1; tx.vout[0].nValue = BLOCKSUBSIDY - LOWFEE; script = CScript() << OP_0; tx.vout[0].scriptPubKey = GetScriptForDestination(CScriptID(script)); hash = tx.GetId(); mempool.addUnchecked( hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); tx.vin[0].prevout.hash = hash; tx.vin[0].scriptSig = CScript() << std::vector(script.begin(), script.end()); tx.vout[0].nValue -= LOWFEE; hash = tx.GetId(); mempool.addUnchecked( hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(false).FromTx(tx)); BOOST_CHECK_THROW(BlockAssembler(config).CreateNewBlock(scriptPubKey), std::runtime_error); mempool.clear(); // Double spend txn pair in mempool, template creation fails. tx.vin[0].prevout.hash = txFirst[0]->GetId(); tx.vin[0].scriptSig = CScript() << OP_1; tx.vout[0].nValue = BLOCKSUBSIDY - HIGHFEE; tx.vout[0].scriptPubKey = CScript() << OP_1; hash = tx.GetId(); mempool.addUnchecked( hash, entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); tx.vout[0].scriptPubKey = CScript() << OP_2; hash = tx.GetId(); mempool.addUnchecked( hash, entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); BOOST_CHECK_THROW(BlockAssembler(config).CreateNewBlock(scriptPubKey), std::runtime_error); mempool.clear(); // Subsidy changing. int nHeight = chainActive.Height(); // Create an actual 209999-long block chain (without valid blocks). while (chainActive.Tip()->nHeight < 209999) { CBlockIndex *prev = chainActive.Tip(); CBlockIndex *next = new CBlockIndex(); next->phashBlock = new uint256(InsecureRand256()); pcoinsTip->SetBestBlock(next->GetBlockHash()); next->pprev = prev; next->nHeight = prev->nHeight + 1; next->BuildSkip(); chainActive.SetTip(next); } BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); // Extend to a 210000-long block chain. while (chainActive.Tip()->nHeight < 210000) { CBlockIndex *prev = chainActive.Tip(); CBlockIndex *next = new CBlockIndex(); next->phashBlock = new uint256(InsecureRand256()); pcoinsTip->SetBestBlock(next->GetBlockHash()); next->pprev = prev; next->nHeight = prev->nHeight + 1; next->BuildSkip(); chainActive.SetTip(next); } BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); // Delete the dummy blocks again. while (chainActive.Tip()->nHeight > nHeight) { CBlockIndex *del = chainActive.Tip(); chainActive.SetTip(del->pprev); pcoinsTip->SetBestBlock(del->pprev->GetBlockHash()); delete del->phashBlock; delete del; } // non-final txs in mempool SetMockTime(chainActive.Tip()->GetMedianTimePast() + 1); int flags = LOCKTIME_VERIFY_SEQUENCE | LOCKTIME_MEDIAN_TIME_PAST; // height map std::vector prevheights; // Relative height locked. tx.nVersion = 2; tx.vin.resize(1); prevheights.resize(1); // Only 1 transaction. tx.vin[0].prevout.hash = txFirst[0]->GetId(); tx.vin[0].prevout.n = 0; tx.vin[0].scriptSig = CScript() << OP_1; // txFirst[0] is the 2nd block tx.vin[0].nSequence = chainActive.Tip()->nHeight + 1; prevheights[0] = baseheight + 1; tx.vout.resize(1); tx.vout[0].nValue = BLOCKSUBSIDY - HIGHFEE; tx.vout[0].scriptPubKey = CScript() << OP_1; tx.nLockTime = 0; hash = tx.GetId(); mempool.addUnchecked( hash, entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); { // Locktime passes. GlobalConfig config; CValidationState state; BOOST_CHECK(ContextualCheckTransactionForCurrentBlock(config, tx, state, flags)); } // Sequence locks fail. BOOST_CHECK(!TestSequenceLocks(tx, flags)); // Sequence locks pass on 2nd block. BOOST_CHECK( SequenceLocks(tx, flags, &prevheights, CreateBlockIndex(chainActive.Tip()->nHeight + 2))); // Relative time locked. tx.vin[0].prevout.hash = txFirst[1]->GetId(); // txFirst[1] is the 3rd block. tx.vin[0].nSequence = CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG | (((chainActive.Tip()->GetMedianTimePast() + 1 - chainActive[1]->GetMedianTimePast()) >> CTxIn::SEQUENCE_LOCKTIME_GRANULARITY) + 1); prevheights[0] = baseheight + 2; hash = tx.GetId(); mempool.addUnchecked(hash, entry.Time(GetTime()).FromTx(tx)); { // Locktime passes. GlobalConfig config; CValidationState state; BOOST_CHECK(ContextualCheckTransactionForCurrentBlock(config, tx, state, flags)); } // Sequence locks fail. BOOST_CHECK(!TestSequenceLocks(tx, flags)); for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++) { // Trick the MedianTimePast. chainActive.Tip()->GetAncestor(chainActive.Tip()->nHeight - i)->nTime += 512; } // Sequence locks pass 512 seconds later. BOOST_CHECK( SequenceLocks(tx, flags, &prevheights, CreateBlockIndex(chainActive.Tip()->nHeight + 1))); for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++) { // Undo tricked MTP. chainActive.Tip()->GetAncestor(chainActive.Tip()->nHeight - i)->nTime -= 512; } // Absolute height locked. tx.vin[0].prevout.hash = txFirst[2]->GetId(); tx.vin[0].nSequence = CTxIn::SEQUENCE_FINAL - 1; prevheights[0] = baseheight + 3; tx.nLockTime = chainActive.Tip()->nHeight + 1; hash = tx.GetId(); mempool.addUnchecked(hash, entry.Time(GetTime()).FromTx(tx)); { // Locktime fails. GlobalConfig config; CValidationState state; BOOST_CHECK(!ContextualCheckTransactionForCurrentBlock(config, tx, state, flags)); BOOST_CHECK_EQUAL(state.GetRejectReason(), "bad-txns-nonfinal"); } // Sequence locks pass. BOOST_CHECK(TestSequenceLocks(tx, flags)); { // Locktime passes on 2nd block. GlobalConfig config; CValidationState state; BOOST_CHECK(ContextualCheckTransaction( config, tx, state, chainActive.Tip()->nHeight + 2, chainActive.Tip()->GetMedianTimePast())); } // Absolute time locked. tx.vin[0].prevout.hash = txFirst[3]->GetId(); tx.nLockTime = chainActive.Tip()->GetMedianTimePast(); prevheights.resize(1); prevheights[0] = baseheight + 4; hash = tx.GetId(); mempool.addUnchecked(hash, entry.Time(GetTime()).FromTx(tx)); { // Locktime fails. GlobalConfig config; CValidationState state; BOOST_CHECK(!ContextualCheckTransactionForCurrentBlock(config, tx, state, flags)); BOOST_CHECK_EQUAL(state.GetRejectReason(), "bad-txns-nonfinal"); } // Sequence locks pass. BOOST_CHECK(TestSequenceLocks(tx, flags)); { // Locktime passes 1 second later. GlobalConfig config; CValidationState state; BOOST_CHECK(ContextualCheckTransaction( config, tx, state, chainActive.Tip()->nHeight + 1, chainActive.Tip()->GetMedianTimePast() + 1)); } // mempool-dependent transactions (not added) tx.vin[0].prevout.hash = hash; prevheights[0] = chainActive.Tip()->nHeight + 1; tx.nLockTime = 0; tx.vin[0].nSequence = 0; { // Locktime passes. GlobalConfig config; CValidationState state; BOOST_CHECK(ContextualCheckTransactionForCurrentBlock(config, tx, state, flags)); } // Sequence locks pass. BOOST_CHECK(TestSequenceLocks(tx, flags)); tx.vin[0].nSequence = 1; // Sequence locks fail. BOOST_CHECK(!TestSequenceLocks(tx, flags)); tx.vin[0].nSequence = CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG; // Sequence locks pass. BOOST_CHECK(TestSequenceLocks(tx, flags)); tx.vin[0].nSequence = CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG | 1; // Sequence locks fail. BOOST_CHECK(!TestSequenceLocks(tx, flags)); BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); // None of the of the absolute height/time locked tx should have made it // into the template because we still check IsFinalTx in CreateNewBlock, but // relative locked txs will if inconsistently added to mempool. For now // these will still generate a valid template until BIP68 soft fork. BOOST_CHECK_EQUAL(pblocktemplate->block.vtx.size(), 3UL); // However if we advance height by 1 and time by 512, all of them should be // mined. for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++) { // Trick the MedianTimePast. chainActive.Tip()->GetAncestor(chainActive.Tip()->nHeight - i)->nTime += 512; } chainActive.Tip()->nHeight++; SetMockTime(chainActive.Tip()->GetMedianTimePast() + 1); BOOST_CHECK(pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey)); BOOST_CHECK_EQUAL(pblocktemplate->block.vtx.size(), 5UL); chainActive.Tip()->nHeight--; SetMockTime(0); mempool.clear(); const CChainParams &chainparams = Params(CBaseChainParams::MAIN); TestPackageSelection(chainparams, scriptPubKey, txFirst); fCheckpointsEnabled = true; } void CheckBlockMaxSize(const CChainParams &chainparams, uint64_t size, uint64_t expected) { GlobalConfig config; - ForceSetArg("-blockmaxsize", std::to_string(size)); + gArgs.ForceSetArg("-blockmaxsize", std::to_string(size)); BlockAssembler ba(config); BOOST_CHECK_EQUAL(ba.GetMaxGeneratedBlockSize(), expected); } BOOST_AUTO_TEST_CASE(BlockAssembler_construction) { GlobalConfig config; const CChainParams &chainparams = Params(); // We are working on a fake chain and need to protect ourselves. LOCK(cs_main); // Activate UAHF the dirty way const int64_t uahfHeight = config.GetChainParams().GetConsensus().uahfHeight; auto pindex = chainActive.Tip(); for (size_t i = 0; pindex && i < 5; i++) { pindex->nHeight = uahfHeight + 5 - i; pindex = pindex->pprev; } // Test around historical 1MB (plus one byte because that's mandatory) config.SetMaxBlockSize(ONE_MEGABYTE + 1); CheckBlockMaxSize(chainparams, 0, 1000); CheckBlockMaxSize(chainparams, 1000, 1000); CheckBlockMaxSize(chainparams, 1001, 1001); CheckBlockMaxSize(chainparams, 12345, 12345); CheckBlockMaxSize(chainparams, ONE_MEGABYTE - 1001, ONE_MEGABYTE - 1001); CheckBlockMaxSize(chainparams, ONE_MEGABYTE - 1000, ONE_MEGABYTE - 1000); CheckBlockMaxSize(chainparams, ONE_MEGABYTE - 999, ONE_MEGABYTE - 999); CheckBlockMaxSize(chainparams, ONE_MEGABYTE, ONE_MEGABYTE - 999); // Test around higher limit such as 8MB static const auto EIGHT_MEGABYTES = 8 * ONE_MEGABYTE; config.SetMaxBlockSize(EIGHT_MEGABYTES); CheckBlockMaxSize(chainparams, EIGHT_MEGABYTES - 1001, EIGHT_MEGABYTES - 1001); CheckBlockMaxSize(chainparams, EIGHT_MEGABYTES - 1000, EIGHT_MEGABYTES - 1000); CheckBlockMaxSize(chainparams, EIGHT_MEGABYTES - 999, EIGHT_MEGABYTES - 1000); CheckBlockMaxSize(chainparams, EIGHT_MEGABYTES, EIGHT_MEGABYTES - 1000); // Test around default cap config.SetMaxBlockSize(DEFAULT_MAX_BLOCK_SIZE); CheckBlockMaxSize(chainparams, DEFAULT_MAX_BLOCK_SIZE - 1001, DEFAULT_MAX_BLOCK_SIZE - 1001); CheckBlockMaxSize(chainparams, DEFAULT_MAX_BLOCK_SIZE - 1000, DEFAULT_MAX_BLOCK_SIZE - 1000); CheckBlockMaxSize(chainparams, DEFAULT_MAX_BLOCK_SIZE - 999, DEFAULT_MAX_BLOCK_SIZE - 1000); CheckBlockMaxSize(chainparams, DEFAULT_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE - 1000); // If the parameter is not specified, we use // DEFAULT_MAX_GENERATED_BLOCK_SIZE { - ClearArg("-blockmaxsize"); + gArgs.ClearArg("-blockmaxsize"); BlockAssembler ba(config); BOOST_CHECK_EQUAL(ba.GetMaxGeneratedBlockSize(), DEFAULT_MAX_GENERATED_BLOCK_SIZE); } } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index 062b65fb46..33303d148f 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -1,205 +1,205 @@ // Copyright (c) 2012-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "addrman.h" #include "chainparams.h" #include "config.h" #include "hash.h" #include "net.h" #include "netbase.h" #include "serialize.h" #include "streams.h" #include "test/test_bitcoin.h" #include #include class CAddrManSerializationMock : public CAddrMan { public: virtual void Serialize(CDataStream &s) const = 0; //! Ensure that bucket placement is always the same for testing purposes. void MakeDeterministic() { nKey.SetNull(); insecure_rand = FastRandomContext(true); } }; class CAddrManUncorrupted : public CAddrManSerializationMock { public: void Serialize(CDataStream &s) const override { CAddrMan::Serialize(s); } }; class CAddrManCorrupted : public CAddrManSerializationMock { public: void Serialize(CDataStream &s) const override { // Produces corrupt output that claims addrman has 20 addrs when it only // has one addr. uint8_t nVersion = 1; s << nVersion; s << uint8_t(32); s << nKey; s << 10; // nNew s << 10; // nTried int nUBuckets = ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30); s << nUBuckets; CService serv; Lookup("252.1.1.1", serv, 7777, false); CAddress addr = CAddress(serv, NODE_NONE); CNetAddr resolved; LookupHost("252.2.2.2", resolved, false); CAddrInfo info = CAddrInfo(addr, resolved); s << info; } }; CDataStream AddrmanToStream(CAddrManSerializationMock &_addrman) { CDataStream ssPeersIn(SER_DISK, CLIENT_VERSION); ssPeersIn << FLATDATA(Params().DiskMagic()); ssPeersIn << _addrman; std::string str = ssPeersIn.str(); std::vector vchData(str.begin(), str.end()); return CDataStream(vchData, SER_DISK, CLIENT_VERSION); } BOOST_FIXTURE_TEST_SUITE(net_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(caddrdb_read) { CAddrManUncorrupted addrmanUncorrupted; addrmanUncorrupted.MakeDeterministic(); CService addr1, addr2, addr3; Lookup("250.7.1.1", addr1, 8333, false); Lookup("250.7.2.2", addr2, 9999, false); Lookup("250.7.3.3", addr3, 9999, false); // Add three addresses to new table. CService source; Lookup("252.5.1.1", source, 8333, false); addrmanUncorrupted.Add(CAddress(addr1, NODE_NONE), source); addrmanUncorrupted.Add(CAddress(addr2, NODE_NONE), source); addrmanUncorrupted.Add(CAddress(addr3, NODE_NONE), source); // Test that the de-serialization does not throw an exception. CDataStream ssPeers1 = AddrmanToStream(addrmanUncorrupted); bool exceptionThrown = false; CAddrMan addrman1; BOOST_CHECK(addrman1.size() == 0); try { uint8_t pchMsgTmp[4]; ssPeers1 >> FLATDATA(pchMsgTmp); ssPeers1 >> addrman1; } catch (const std::exception &e) { exceptionThrown = true; } BOOST_CHECK(addrman1.size() == 3); BOOST_CHECK(exceptionThrown == false); // Test that CAddrDB::Read creates an addrman with the correct number of // addrs. CDataStream ssPeers2 = AddrmanToStream(addrmanUncorrupted); CAddrMan addrman2; CAddrDB adb; BOOST_CHECK(addrman2.size() == 0); adb.Read(addrman2, ssPeers2); BOOST_CHECK(addrman2.size() == 3); } BOOST_AUTO_TEST_CASE(caddrdb_read_corrupted) { CAddrManCorrupted addrmanCorrupted; addrmanCorrupted.MakeDeterministic(); // Test that the de-serialization of corrupted addrman throws an exception. CDataStream ssPeers1 = AddrmanToStream(addrmanCorrupted); bool exceptionThrown = false; CAddrMan addrman1; BOOST_CHECK(addrman1.size() == 0); try { uint8_t pchMsgTmp[4]; ssPeers1 >> FLATDATA(pchMsgTmp); ssPeers1 >> addrman1; } catch (const std::exception &e) { exceptionThrown = true; } // Even through de-serialization failed addrman is not left in a clean // state. BOOST_CHECK(addrman1.size() == 1); BOOST_CHECK(exceptionThrown); // Test that CAddrDB::Read leaves addrman in a clean state if // de-serialization fails. CDataStream ssPeers2 = AddrmanToStream(addrmanCorrupted); CAddrMan addrman2; CAddrDB adb; BOOST_CHECK(addrman2.size() == 0); adb.Read(addrman2, ssPeers2); BOOST_CHECK(addrman2.size() == 0); } BOOST_AUTO_TEST_CASE(cnode_simple_test) { SOCKET hSocket = INVALID_SOCKET; NodeId id = 0; int height = 0; in_addr ipv4Addr; ipv4Addr.s_addr = 0xa0b0c001; CAddress addr = CAddress(CService(ipv4Addr, 7777), NODE_NETWORK); std::string pszDest = ""; bool fInboundIn = false; // Test that fFeeler is false by default. std::unique_ptr pnode1(new CNode(id++, NODE_NETWORK, height, hSocket, addr, 0, 0, pszDest, fInboundIn)); BOOST_CHECK(pnode1->fInbound == false); BOOST_CHECK(pnode1->fFeeler == false); fInboundIn = true; std::unique_ptr pnode2(new CNode(id++, NODE_NETWORK, height, hSocket, addr, 1, 1, pszDest, fInboundIn)); BOOST_CHECK(pnode2->fInbound == true); BOOST_CHECK(pnode2->fFeeler == false); } BOOST_AUTO_TEST_CASE(test_getSubVersionEB) { BOOST_CHECK_EQUAL(getSubVersionEB(13800000000), "13800.0"); BOOST_CHECK_EQUAL(getSubVersionEB(3800000000), "3800.0"); BOOST_CHECK_EQUAL(getSubVersionEB(14000000), "14.0"); BOOST_CHECK_EQUAL(getSubVersionEB(1540000), "1.5"); BOOST_CHECK_EQUAL(getSubVersionEB(1560000), "1.5"); BOOST_CHECK_EQUAL(getSubVersionEB(210000), "0.2"); BOOST_CHECK_EQUAL(getSubVersionEB(10000), "0.0"); BOOST_CHECK_EQUAL(getSubVersionEB(0), "0.0"); } BOOST_AUTO_TEST_CASE(test_userAgentLength) { GlobalConfig config; config.SetMaxBlockSize(8000000); std::string long_uacomment = "very very very very very very very very very " "very very very very very very very very very " "very very very very very very very very very " "very very very very very very very very very " "very very very very very very very very very " "very very very very very very very very very " "very very very very very very very very very " "very very very very very very long comment"; - ForceSetMultiArg("-uacomment", long_uacomment); + gArgs.ForceSetMultiArg("-uacomment", long_uacomment); BOOST_CHECK_EQUAL(userAgent(config).size(), MAX_SUBVERSION_LENGTH); BOOST_CHECK_EQUAL(userAgent(config), "/Bitcoin ABC:0.16.3(EB8.0; very very very very very " "very very very very very very very very very very very " "very very very very very very very very very very very " "very very very very very very very very very very very " "very very very very very very very ve)/"); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp index 4300d811a4..27cfbbcfc1 100644 --- a/src/test/test_bitcoin.cpp +++ b/src/test/test_bitcoin.cpp @@ -1,247 +1,247 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "test_bitcoin.h" #include "chainparams.h" #include "config.h" #include "consensus/consensus.h" #include "consensus/validation.h" #include "crypto/sha256.h" #include "fs.h" #include "key.h" #include "miner.h" #include "net_processing.h" #include "pubkey.h" #include "random.h" #include "rpc/register.h" #include "rpc/server.h" #include "script/scriptcache.h" #include "script/sigcache.h" #include "txdb.h" #include "txmempool.h" #include "ui_interface.h" #include "validation.h" #include "test/testutil.h" #include #include #include #include #include #include #include #include uint256 insecure_rand_seed = GetRandHash(); FastRandomContext insecure_rand_ctx(insecure_rand_seed); extern bool fPrintToConsole; extern void noui_connect(); BasicTestingSetup::BasicTestingSetup(const std::string &chainName) { SHA256AutoDetect(); RandomInit(); ECC_Start(); SetupEnvironment(); SetupNetworking(); InitSignatureCache(); InitScriptExecutionCache(); // Don't want to write to debug.log file. fPrintToDebugLog = false; fCheckBlockIndex = true; SelectParams(chainName); noui_connect(); // Set config parameters to default. GlobalConfig config; config.SetMaxBlockSize(DEFAULT_MAX_BLOCK_SIZE); } BasicTestingSetup::~BasicTestingSetup() { ECC_Stop(); g_connman.reset(); } TestingSetup::TestingSetup(const std::string &chainName) : BasicTestingSetup(chainName) { // Ideally we'd move all the RPC tests to the functional testing framework // instead of unit tests, but for now we need these here. const Config &config = GetConfig(); RegisterAllRPCCommands(tableRPC); ClearDatadirCache(); pathTemp = GetTempPath() / strprintf("test_bitcoin_%lu_%i", (unsigned long)GetTime(), (int)(InsecureRandRange(100000))); fs::create_directories(pathTemp); - ForceSetArg("-datadir", pathTemp.string()); + gArgs.ForceSetArg("-datadir", pathTemp.string()); mempool.setSanityCheck(1.0); pblocktree = new CBlockTreeDB(1 << 20, true); pcoinsdbview = new CCoinsViewDB(1 << 23, true); pcoinsTip = new CCoinsViewCache(pcoinsdbview); if (!InitBlockIndex(config)) { throw std::runtime_error("InitBlockIndex failed."); } { CValidationState state; if (!ActivateBestChain(config, state)) { throw std::runtime_error("ActivateBestChain failed."); } } nScriptCheckThreads = 3; for (int i = 0; i < nScriptCheckThreads - 1; i++) { threadGroup.create_thread(&ThreadScriptCheck); } // Deterministic randomness for tests. g_connman = std::unique_ptr(new CConnman(config, 0x1337, 0x1337)); connman = g_connman.get(); RegisterNodeSignals(GetNodeSignals()); } TestingSetup::~TestingSetup() { UnregisterNodeSignals(GetNodeSignals()); threadGroup.interrupt_all(); threadGroup.join_all(); UnloadBlockIndex(); delete pcoinsTip; delete pcoinsdbview; delete pblocktree; fs::remove_all(pathTemp); } TestChain100Setup::TestChain100Setup() : TestingSetup(CBaseChainParams::REGTEST) { // Generate a 100-block chain: coinbaseKey.MakeNewKey(true); CScript scriptPubKey = CScript() << ToByteVector(coinbaseKey.GetPubKey()) << OP_CHECKSIG; for (int i = 0; i < COINBASE_MATURITY; i++) { std::vector noTxns; CBlock b = CreateAndProcessBlock(noTxns, scriptPubKey); coinbaseTxns.push_back(*b.vtx[0]); } } // // Create a new block with just given transactions, coinbase paying to // scriptPubKey, and try to add it to the current chain. // CBlock TestChain100Setup::CreateAndProcessBlock( const std::vector &txns, const CScript &scriptPubKey) { const Config &config = GetConfig(); std::unique_ptr pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptPubKey); CBlock &block = pblocktemplate->block; // Replace mempool-selected txns with just coinbase plus passed-in txns: block.vtx.resize(1); for (const CMutableTransaction &tx : txns) { block.vtx.push_back(MakeTransactionRef(tx)); } // IncrementExtraNonce creates a valid coinbase and merkleRoot unsigned int extraNonce = 0; IncrementExtraNonce(config, &block, chainActive.Tip(), extraNonce); while (!CheckProofOfWork(block.GetHash(), block.nBits, config)) { ++block.nNonce; } std::shared_ptr shared_pblock = std::make_shared(block); ProcessNewBlock(GetConfig(), shared_pblock, true, nullptr); CBlock result = block; return result; } TestChain100Setup::~TestChain100Setup() {} CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(const CMutableTransaction &tx, CTxMemPool *pool) { CTransaction txn(tx); return FromTx(txn, pool); } CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(const CTransaction &txn, CTxMemPool *pool) { // Hack to assume either it's completely dependent on other mempool txs or // not at all. Amount inChainValue = pool && pool->HasNoInputsOf(txn) ? txn.GetValueOut() : Amount(0); return CTxMemPoolEntry(MakeTransactionRef(txn), nFee, nTime, dPriority, nHeight, inChainValue, spendsCoinbase, sigOpCost, lp); } namespace { // A place to put misc. setup code eg "the travis workaround" that needs to run // at program startup and exit struct Init { Init(); ~Init(); std::list> cleanup; }; Init init; Init::Init() { if (getenv("TRAVIS_NOHANG_WORKAROUND")) { // This is a workaround for MinGW/Win32 builds on Travis sometimes // hanging due to no output received by Travis after a 10-minute // timeout. // The strategy here is to let the jobs finish however long they take // on Travis, by feeding Travis output. We start a parallel thread // that just prints out '.' once per second. struct Private { Private() : stop(false) {} std::atomic_bool stop; std::thread thr; std::condition_variable cond; std::mutex mut; } *p = new Private; p->thr = std::thread([p] { // thread func.. print dots std::unique_lock lock(p->mut); unsigned ctr = 0; while (!p->stop) { if (ctr) { // skip first period to allow app to print first std::cerr << "." << std::flush; } if (!(++ctr % 79)) { // newline once in a while to keep travis happy std::cerr << std::endl; } p->cond.wait_for(lock, std::chrono::milliseconds(1000)); } }); cleanup.emplace_back([p]() { // cleanup function to kill the thread and delete the struct p->mut.lock(); p->stop = true; p->cond.notify_all(); p->mut.unlock(); if (p->thr.joinable()) { p->thr.join(); } delete p; }); } } Init::~Init() { for (auto &f : cleanup) { if (f) { f(); } } } } // end anonymous namespace diff --git a/src/timedata.cpp b/src/timedata.cpp index f9c275f84c..d0d2349fe5 100644 --- a/src/timedata.cpp +++ b/src/timedata.cpp @@ -1,119 +1,120 @@ // Copyright (c) 2014-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "timedata.h" #include "netaddress.h" #include "sync.h" #include "ui_interface.h" #include "util.h" #include "utilstrencodings.h" #include "warnings.h" static CCriticalSection cs_nTimeOffset; static int64_t nTimeOffset = 0; /** * "Never go to sea with two chronometers; take one or three." * Our three time sources are: * - System clock * - Median of other nodes clocks * - The user (asking the user to fix the system clock if the first two * disagree) */ int64_t GetTimeOffset() { LOCK(cs_nTimeOffset); return nTimeOffset; } int64_t GetAdjustedTime() { return GetTime() + GetTimeOffset(); } static int64_t abs64(int64_t n) { return (n >= 0 ? n : -n); } #define BITCOIN_TIMEDATA_MAX_SAMPLES 200 void AddTimeData(const CNetAddr &ip, int64_t nOffsetSample) { LOCK(cs_nTimeOffset); // Ignore duplicates static std::set setKnown; if (setKnown.size() == BITCOIN_TIMEDATA_MAX_SAMPLES) return; if (!setKnown.insert(ip).second) return; // Add data static CMedianFilter vTimeOffsets(BITCOIN_TIMEDATA_MAX_SAMPLES, 0); vTimeOffsets.input(nOffsetSample); LogPrint(BCLog::NET, "added time data, samples %d, offset %+d (%+d minutes)\n", vTimeOffsets.size(), nOffsetSample, nOffsetSample / 60); // There is a known issue here (see issue #4521): // // - The structure vTimeOffsets contains up to 200 elements, after which any // new element added to it will not increase its size, replacing the oldest // element. // // - The condition to update nTimeOffset includes checking whether the // number of elements in vTimeOffsets is odd, which will never happen after // there are 200 elements. // // But in this case the 'bug' is protective against some attacks, and may // actually explain why we've never seen attacks which manipulate the clock // offset. // // So we should hold off on fixing this and clean it up as part of a timing // cleanup that strengthens it in a number of other ways. // if (vTimeOffsets.size() >= 5 && vTimeOffsets.size() % 2 == 1) { int64_t nMedian = vTimeOffsets.median(); std::vector vSorted = vTimeOffsets.sorted(); // Only let other nodes change our time by so much if (abs64(nMedian) <= - std::max( - 0, GetArg("-maxtimeadjustment", DEFAULT_MAX_TIME_ADJUSTMENT))) { + std::max(0, + gArgs.GetArg("-maxtimeadjustment", + DEFAULT_MAX_TIME_ADJUSTMENT))) { nTimeOffset = nMedian; } else { nTimeOffset = 0; static bool fDone; if (!fDone) { // If nobody has a time different than ours but within 5 minutes // of ours, give a warning bool fMatch = false; for (int64_t nOffset : vSorted) { if (nOffset != 0 && abs64(nOffset) < 5 * 60) fMatch = true; } if (!fMatch) { fDone = true; std::string strMessage = strprintf(_("Please check that your computer's date " "and time are correct! If your clock is " "wrong, %s will not work properly."), _(PACKAGE_NAME)); SetMiscWarning(strMessage); uiInterface.ThreadSafeMessageBox( strMessage, "", CClientUIInterface::MSG_WARNING); } } } if (LogAcceptCategory(BCLog::NET)) { for (int64_t n : vSorted) { LogPrint(BCLog::NET, "%+d ", n); } LogPrint(BCLog::NET, "| "); LogPrint(BCLog::NET, "nTimeOffset = %+d (%+d minutes)\n", nTimeOffset, nTimeOffset / 60); } } } diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp index cf181d4647..00f2327822 100644 --- a/src/torcontrol.cpp +++ b/src/torcontrol.cpp @@ -1,772 +1,772 @@ // Copyright (c) 2015-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "torcontrol.h" #include "crypto/hmac_sha256.h" #include "net.h" #include "netbase.h" #include "util.h" #include "utilstrencodings.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /** Default control port */ const std::string DEFAULT_TOR_CONTROL = "127.0.0.1:9051"; /** Tor cookie size (from control-spec.txt) */ static const int TOR_COOKIE_SIZE = 32; /** Size of client/server nonce for SAFECOOKIE */ static const int TOR_NONCE_SIZE = 32; /** For computing serverHash in SAFECOOKIE */ static const std::string TOR_SAFE_SERVERKEY = "Tor safe cookie authentication server-to-controller hash"; /** For computing clientHash in SAFECOOKIE */ static const std::string TOR_SAFE_CLIENTKEY = "Tor safe cookie authentication controller-to-server hash"; /** Exponential backoff configuration - initial timeout in seconds */ static const float RECONNECT_TIMEOUT_START = 1.0; /** Exponential backoff configuration - growth factor */ static const float RECONNECT_TIMEOUT_EXP = 1.5; /** * Maximum length for lines received on TorControlConnection. * tor-control-spec.txt mentions that there is explicitly no limit defined to * line length, this is belt-and-suspenders sanity limit to prevent memory * exhaustion. */ static const int MAX_LINE_LENGTH = 100000; /****** Low-level TorControlConnection ********/ /** Reply from Tor, can be single or multi-line */ class TorControlReply { public: TorControlReply() { Clear(); } int code; std::vector lines; void Clear() { code = 0; lines.clear(); } }; /** * Low-level handling for Tor control connection. * Speaks the SMTP-like protocol as defined in torspec/control-spec.txt */ class TorControlConnection { public: typedef std::function ConnectionCB; typedef std::function ReplyHandlerCB; /** Create a new TorControlConnection. */ TorControlConnection(struct event_base *base); ~TorControlConnection(); /** * Connect to a Tor control port. * target is address of the form host:port. * connected is the handler that is called when connection is successfully * established. * disconnected is a handler that is called when the connection is broken. * Return true on success. */ bool Connect(const std::string &target, const ConnectionCB &connected, const ConnectionCB &disconnected); /** * Disconnect from Tor control port. */ bool Disconnect(); /** * Send a command, register a handler for the reply. * A trailing CRLF is automatically added. * Return true on success. */ bool Command(const std::string &cmd, const ReplyHandlerCB &reply_handler); /** Response handlers for async replies */ boost::signals2::signal async_handler; private: /** Callback when ready for use */ std::function connected; /** Callback when connection lost */ std::function disconnected; /** Libevent event base */ struct event_base *base; /** Connection to control socket */ struct bufferevent *b_conn; /** Message being received */ TorControlReply message; /** Response handlers */ std::deque reply_handlers; /** Libevent handlers: internal */ static void readcb(struct bufferevent *bev, void *ctx); static void eventcb(struct bufferevent *bev, short what, void *ctx); }; TorControlConnection::TorControlConnection(struct event_base *_base) : base(_base), b_conn(0) {} TorControlConnection::~TorControlConnection() { if (b_conn) bufferevent_free(b_conn); } void TorControlConnection::readcb(struct bufferevent *bev, void *ctx) { TorControlConnection *self = (TorControlConnection *)ctx; struct evbuffer *input = bufferevent_get_input(bev); size_t n_read_out = 0; char *line; assert(input); // If there is not a whole line to read, evbuffer_readln returns nullptr while ((line = evbuffer_readln(input, &n_read_out, EVBUFFER_EOL_CRLF)) != nullptr) { std::string s(line, n_read_out); free(line); // Short line if (s.size() < 4) continue; // (-|+| ) self->message.code = atoi(s.substr(0, 3)); self->message.lines.push_back(s.substr(4)); // '-','+' or ' ' char ch = s[3]; if (ch == ' ') { // Final line, dispatch reply and clean up if (self->message.code >= 600) { // Dispatch async notifications to async handler. // Synchronous and asynchronous messages are never interleaved self->async_handler(*self, self->message); } else { if (!self->reply_handlers.empty()) { // Invoke reply handler with message self->reply_handlers.front()(*self, self->message); self->reply_handlers.pop_front(); } else { LogPrint(BCLog::TOR, "tor: Received unexpected sync reply %i\n", self->message.code); } } self->message.Clear(); } } // Check for size of buffer - protect against memory exhaustion with very // long lines. Do this after evbuffer_readln to make sure all full lines // have been removed from the buffer. Everything left is an incomplete line. if (evbuffer_get_length(input) > MAX_LINE_LENGTH) { LogPrintf("tor: Disconnecting because MAX_LINE_LENGTH exceeded\n"); self->Disconnect(); } } void TorControlConnection::eventcb(struct bufferevent *bev, short what, void *ctx) { TorControlConnection *self = (TorControlConnection *)ctx; if (what & BEV_EVENT_CONNECTED) { LogPrint(BCLog::TOR, "tor: Successfully connected!\n"); self->connected(*self); } else if (what & (BEV_EVENT_EOF | BEV_EVENT_ERROR)) { if (what & BEV_EVENT_ERROR) LogPrint(BCLog::TOR, "tor: Error connecting to Tor control socket\n"); else LogPrint(BCLog::TOR, "tor: End of stream\n"); self->Disconnect(); self->disconnected(*self); } } bool TorControlConnection::Connect(const std::string &target, const ConnectionCB &_connected, const ConnectionCB &_disconnected) { if (b_conn) Disconnect(); // Parse target address:port struct sockaddr_storage connect_to_addr; int connect_to_addrlen = sizeof(connect_to_addr); if (evutil_parse_sockaddr_port(target.c_str(), (struct sockaddr *)&connect_to_addr, &connect_to_addrlen) < 0) { LogPrintf("tor: Error parsing socket address %s\n", target); return false; } // Create a new socket, set up callbacks and enable notification bits b_conn = bufferevent_socket_new(base, -1, BEV_OPT_CLOSE_ON_FREE); if (!b_conn) return false; bufferevent_setcb(b_conn, TorControlConnection::readcb, nullptr, TorControlConnection::eventcb, this); bufferevent_enable(b_conn, EV_READ | EV_WRITE); this->connected = _connected; this->disconnected = _disconnected; // Finally, connect to target if (bufferevent_socket_connect(b_conn, (struct sockaddr *)&connect_to_addr, connect_to_addrlen) < 0) { LogPrintf("tor: Error connecting to address %s\n", target); return false; } return true; } bool TorControlConnection::Disconnect() { if (b_conn) bufferevent_free(b_conn); b_conn = 0; return true; } bool TorControlConnection::Command(const std::string &cmd, const ReplyHandlerCB &reply_handler) { if (!b_conn) return false; struct evbuffer *buf = bufferevent_get_output(b_conn); if (!buf) return false; evbuffer_add(buf, cmd.data(), cmd.size()); evbuffer_add(buf, "\r\n", 2); reply_handlers.push_back(reply_handler); return true; } /****** General parsing utilities ********/ /* Split reply line in the form 'AUTH METHODS=...' into a type * 'AUTH' and arguments 'METHODS=...'. */ static std::pair SplitTorReplyLine(const std::string &s) { size_t ptr = 0; std::string type; while (ptr < s.size() && s[ptr] != ' ') { type.push_back(s[ptr]); ++ptr; } if (ptr < s.size()) ++ptr; // skip ' ' return make_pair(type, s.substr(ptr)); } /** * Parse reply arguments in the form 'METHODS=COOKIE,SAFECOOKIE * COOKIEFILE=".../control_auth_cookie"'. */ static std::map ParseTorReplyMapping(const std::string &s) { std::map mapping; size_t ptr = 0; while (ptr < s.size()) { std::string key, value; while (ptr < s.size() && s[ptr] != '=') { key.push_back(s[ptr]); ++ptr; } // unexpected end of line if (ptr == s.size()) return std::map(); // skip '=' ++ptr; // Quoted string if (ptr < s.size() && s[ptr] == '"') { // skip '=' ++ptr; bool escape_next = false; while (ptr < s.size() && (!escape_next && s[ptr] != '"')) { escape_next = (s[ptr] == '\\'); value.push_back(s[ptr]); ++ptr; } // unexpected end of line if (ptr == s.size()) return std::map(); // skip closing '"' ++ptr; /* TODO: unescape value - according to the spec this depends on the * context, some strings use C-LogPrintf style escape codes, some * don't. So may be better handled at the call site. */ } else { // Unquoted value. Note that values can contain '=' at will, just no // spaces while (ptr < s.size() && s[ptr] != ' ') { value.push_back(s[ptr]); ++ptr; } } if (ptr < s.size() && s[ptr] == ' ') { // skip ' ' after key=value ++ptr; } mapping[key] = value; } return mapping; } /** * Read full contents of a file and return them in a std::string. * Returns a pair . * If an error occurred, status will be false, otherwise status will be true and * the data will be returned in string. * * @param maxsize Puts a maximum size limit on the file that is read. If the * file is larger than this, truncated data * (with len > maxsize) will be returned. */ static std::pair ReadBinaryFile(const fs::path &filename, size_t maxsize = std::numeric_limits::max()) { FILE *f = fsbridge::fopen(filename, "rb"); if (f == nullptr) { return std::make_pair(false, ""); } std::string retval; char buffer[128]; size_t n; while ((n = fread(buffer, 1, sizeof(buffer), f)) > 0) { retval.append(buffer, buffer + n); if (retval.size() > maxsize) break; } fclose(f); return std::make_pair(true, retval); } /** * Write contents of std::string to a file. * @return true on success. */ static bool WriteBinaryFile(const fs::path &filename, const std::string &data) { FILE *f = fsbridge::fopen(filename, "wb"); if (f == nullptr) return false; if (fwrite(data.data(), 1, data.size(), f) != data.size()) { fclose(f); return false; } fclose(f); return true; } /****** Bitcoin specific TorController implementation ********/ /** * Controller that connects to Tor control socket, authenticate, then create * and maintain a ephemeral hidden service. */ class TorController { public: TorController(struct event_base *base, const std::string &target); ~TorController(); /** Get name fo file to store private key in */ fs::path GetPrivateKeyFile(); /** Reconnect, after getting disconnected */ void Reconnect(); private: struct event_base *base; std::string target; TorControlConnection conn; std::string private_key; std::string service_id; bool reconnect; struct event *reconnect_ev; float reconnect_timeout; CService service; /** Cookie for SAFECOOKIE auth */ std::vector cookie; /** ClientNonce for SAFECOOKIE auth */ std::vector clientNonce; /** Callback for ADD_ONION result */ void add_onion_cb(TorControlConnection &conn, const TorControlReply &reply); /** Callback for AUTHENTICATE result */ void auth_cb(TorControlConnection &conn, const TorControlReply &reply); /** Callback for AUTHCHALLENGE result */ void authchallenge_cb(TorControlConnection &conn, const TorControlReply &reply); /** Callback for PROTOCOLINFO result */ void protocolinfo_cb(TorControlConnection &conn, const TorControlReply &reply); /** Callback after successful connection */ void connected_cb(TorControlConnection &conn); /** Callback after connection lost or failed connection attempt */ void disconnected_cb(TorControlConnection &conn); /** Callback for reconnect timer */ static void reconnect_cb(evutil_socket_t fd, short what, void *arg); }; TorController::TorController(struct event_base *_base, const std::string &_target) : base(_base), target(_target), conn(base), reconnect(true), reconnect_ev(0), reconnect_timeout(RECONNECT_TIMEOUT_START) { reconnect_ev = event_new(base, -1, 0, reconnect_cb, this); if (!reconnect_ev) LogPrintf( "tor: Failed to create event for reconnection: out of memory?\n"); // Start connection attempts immediately if (!conn.Connect(_target, boost::bind(&TorController::connected_cb, this, _1), boost::bind(&TorController::disconnected_cb, this, _1))) { LogPrintf("tor: Initiating connection to Tor control port %s failed\n", _target); } // Read service private key if cached std::pair pkf = ReadBinaryFile(GetPrivateKeyFile()); if (pkf.first) { LogPrint(BCLog::TOR, "tor: Reading cached private key from %s\n", GetPrivateKeyFile()); private_key = pkf.second; } } TorController::~TorController() { if (reconnect_ev) { event_free(reconnect_ev); reconnect_ev = 0; } if (service.IsValid()) { RemoveLocal(service); } } void TorController::add_onion_cb(TorControlConnection &_conn, const TorControlReply &reply) { if (reply.code == 250) { LogPrint(BCLog::TOR, "tor: ADD_ONION successful\n"); for (const std::string &s : reply.lines) { std::map m = ParseTorReplyMapping(s); std::map::iterator i; if ((i = m.find("ServiceID")) != m.end()) service_id = i->second; if ((i = m.find("PrivateKey")) != m.end()) private_key = i->second; } service = LookupNumeric(std::string(service_id + ".onion").c_str(), GetListenPort()); LogPrintf("tor: Got service ID %s, advertising service %s\n", service_id, service.ToString()); if (WriteBinaryFile(GetPrivateKeyFile(), private_key)) { LogPrint(BCLog::TOR, "tor: Cached service private key to %s\n", GetPrivateKeyFile()); } else { LogPrintf("tor: Error writing service private key to %s\n", GetPrivateKeyFile()); } AddLocal(service, LOCAL_MANUAL); // ... onion requested - keep connection open } else if (reply.code == 510) { // 510 Unrecognized command LogPrintf("tor: Add onion failed with unrecognized command (You " "probably need to upgrade Tor)\n"); } else { LogPrintf("tor: Add onion failed; error code %d\n", reply.code); } } void TorController::auth_cb(TorControlConnection &_conn, const TorControlReply &reply) { if (reply.code == 250) { LogPrint(BCLog::TOR, "tor: Authentication successful\n"); // Now that we know Tor is running setup the proxy for onion addresses // if -onion isn't set to something else. - if (GetArg("-onion", "") == "") { + if (gArgs.GetArg("-onion", "") == "") { CService resolved(LookupNumeric("127.0.0.1", 9050)); proxyType addrOnion = proxyType(resolved, true); SetProxy(NET_TOR, addrOnion); SetLimited(NET_TOR, false); } // Finally - now create the service // No private key, generate one if (private_key.empty()) { // Explicitly request RSA1024 - see issue #9214 private_key = "NEW:RSA1024"; } // Request hidden service, redirect port. // Note that the 'virtual' port doesn't have to be the same as our // internal port, but this is just a convenient choice. TODO; refactor // the shutdown sequence some day. _conn.Command(strprintf("ADD_ONION %s Port=%i,127.0.0.1:%i", private_key, GetListenPort(), GetListenPort()), boost::bind(&TorController::add_onion_cb, this, _1, _2)); } else { LogPrintf("tor: Authentication failed\n"); } } /** Compute Tor SAFECOOKIE response. * * ServerHash is computed as: * HMAC-SHA256("Tor safe cookie authentication server-to-controller hash", * CookieString | ClientNonce | ServerNonce) * (with the HMAC key as its first argument) * * After a controller sends a successful AUTHCHALLENGE command, the * next command sent on the connection must be an AUTHENTICATE command, * and the only authentication string which that AUTHENTICATE command * will accept is: * * HMAC-SHA256("Tor safe cookie authentication controller-to-server hash", * CookieString | ClientNonce | ServerNonce) * */ static std::vector ComputeResponse(const std::string &key, const std::vector &cookie, const std::vector &clientNonce, const std::vector &serverNonce) { CHMAC_SHA256 computeHash((const uint8_t *)key.data(), key.size()); std::vector computedHash(CHMAC_SHA256::OUTPUT_SIZE, 0); computeHash.Write(cookie.data(), cookie.size()); computeHash.Write(clientNonce.data(), clientNonce.size()); computeHash.Write(serverNonce.data(), serverNonce.size()); computeHash.Finalize(computedHash.data()); return computedHash; } void TorController::authchallenge_cb(TorControlConnection &_conn, const TorControlReply &reply) { if (reply.code == 250) { LogPrint(BCLog::TOR, "tor: SAFECOOKIE authentication challenge successful\n"); std::pair l = SplitTorReplyLine(reply.lines[0]); if (l.first == "AUTHCHALLENGE") { std::map m = ParseTorReplyMapping(l.second); std::vector serverHash = ParseHex(m["SERVERHASH"]); std::vector serverNonce = ParseHex(m["SERVERNONCE"]); LogPrint(BCLog::TOR, "tor: AUTHCHALLENGE ServerHash %s ServerNonce %s\n", HexStr(serverHash), HexStr(serverNonce)); if (serverNonce.size() != 32) { LogPrintf( "tor: ServerNonce is not 32 bytes, as required by spec\n"); return; } std::vector computedServerHash = ComputeResponse( TOR_SAFE_SERVERKEY, cookie, clientNonce, serverNonce); if (computedServerHash != serverHash) { LogPrintf("tor: ServerHash %s does not match expected " "ServerHash %s\n", HexStr(serverHash), HexStr(computedServerHash)); return; } std::vector computedClientHash = ComputeResponse( TOR_SAFE_CLIENTKEY, cookie, clientNonce, serverNonce); _conn.Command("AUTHENTICATE " + HexStr(computedClientHash), boost::bind(&TorController::auth_cb, this, _1, _2)); } else { LogPrintf("tor: Invalid reply to AUTHCHALLENGE\n"); } } else { LogPrintf("tor: SAFECOOKIE authentication challenge failed\n"); } } void TorController::protocolinfo_cb(TorControlConnection &_conn, const TorControlReply &reply) { if (reply.code == 250) { std::set methods; std::string cookiefile; /* * 250-AUTH METHODS=COOKIE,SAFECOOKIE * COOKIEFILE="/home/x/.tor/control_auth_cookie" * 250-AUTH METHODS=NULL * 250-AUTH METHODS=HASHEDPASSWORD */ for (const std::string &s : reply.lines) { std::pair l = SplitTorReplyLine(s); if (l.first == "AUTH") { std::map m = ParseTorReplyMapping(l.second); std::map::iterator i; if ((i = m.find("METHODS")) != m.end()) boost::split(methods, i->second, boost::is_any_of(",")); if ((i = m.find("COOKIEFILE")) != m.end()) cookiefile = i->second; } else if (l.first == "VERSION") { std::map m = ParseTorReplyMapping(l.second); std::map::iterator i; if ((i = m.find("Tor")) != m.end()) { LogPrint(BCLog::TOR, "tor: Connected to Tor version %s\n", i->second); } } } for (const std::string &s : methods) { LogPrint(BCLog::TOR, "tor: Supported authentication method: %s\n", s); } // Prefer NULL, otherwise SAFECOOKIE. If a password is provided, use // HASHEDPASSWORD /* Authentication: * cookie: hex-encoded ~/.tor/control_auth_cookie * password: "password" */ - std::string torpassword = GetArg("-torpassword", ""); + std::string torpassword = gArgs.GetArg("-torpassword", ""); if (!torpassword.empty()) { if (methods.count("HASHEDPASSWORD")) { LogPrint(BCLog::TOR, "tor: Using HASHEDPASSWORD authentication\n"); boost::replace_all(torpassword, "\"", "\\\""); _conn.Command( "AUTHENTICATE \"" + torpassword + "\"", boost::bind(&TorController::auth_cb, this, _1, _2)); } else { LogPrintf("tor: Password provided with -torpassword, but " "HASHEDPASSWORD authentication is not available\n"); } } else if (methods.count("NULL")) { LogPrint(BCLog::TOR, "tor: Using NULL authentication\n"); _conn.Command("AUTHENTICATE", boost::bind(&TorController::auth_cb, this, _1, _2)); } else if (methods.count("SAFECOOKIE")) { // Cookie: hexdump -e '32/1 "%02x""\n"' ~/.tor/control_auth_cookie LogPrint(BCLog::TOR, "tor: Using SAFECOOKIE authentication, " "reading cookie authentication from %s\n", cookiefile); std::pair status_cookie = ReadBinaryFile(cookiefile, TOR_COOKIE_SIZE); if (status_cookie.first && status_cookie.second.size() == TOR_COOKIE_SIZE) { // _conn.Command("AUTHENTICATE " + HexStr(status_cookie.second), // boost::bind(&TorController::auth_cb, this, _1, _2)); cookie = std::vector(status_cookie.second.begin(), status_cookie.second.end()); clientNonce = std::vector(TOR_NONCE_SIZE, 0); GetRandBytes(&clientNonce[0], TOR_NONCE_SIZE); _conn.Command("AUTHCHALLENGE SAFECOOKIE " + HexStr(clientNonce), boost::bind(&TorController::authchallenge_cb, this, _1, _2)); } else { if (status_cookie.first) { LogPrintf("tor: Authentication cookie %s is not exactly %i " "bytes, as is required by the spec\n", cookiefile, TOR_COOKIE_SIZE); } else { LogPrintf("tor: Authentication cookie %s could not be " "opened (check permissions)\n", cookiefile); } } } else if (methods.count("HASHEDPASSWORD")) { LogPrintf("tor: The only supported authentication mechanism left " "is password, but no password provided with " "-torpassword\n"); } else { LogPrintf("tor: No supported authentication method\n"); } } else { LogPrintf("tor: Requesting protocol info failed\n"); } } void TorController::connected_cb(TorControlConnection &_conn) { reconnect_timeout = RECONNECT_TIMEOUT_START; // First send a PROTOCOLINFO command to figure out what authentication is // expected if (!_conn.Command( "PROTOCOLINFO 1", boost::bind(&TorController::protocolinfo_cb, this, _1, _2))) LogPrintf("tor: Error sending initial protocolinfo command\n"); } void TorController::disconnected_cb(TorControlConnection &_conn) { // Stop advertising service when disconnected if (service.IsValid()) RemoveLocal(service); service = CService(); if (!reconnect) return; LogPrint(BCLog::TOR, "tor: Not connected to Tor control port %s, trying to reconnect\n", target); // Single-shot timer for reconnect. Use exponential backoff. struct timeval time = MillisToTimeval(int64_t(reconnect_timeout * 1000.0)); if (reconnect_ev) event_add(reconnect_ev, &time); reconnect_timeout *= RECONNECT_TIMEOUT_EXP; } void TorController::Reconnect() { /* Try to reconnect and reestablish if we get booted - for example, Tor may * be restarting. */ if (!conn.Connect(target, boost::bind(&TorController::connected_cb, this, _1), boost::bind(&TorController::disconnected_cb, this, _1))) { LogPrintf( "tor: Re-initiating connection to Tor control port %s failed\n", target); } } fs::path TorController::GetPrivateKeyFile() { return GetDataDir() / "onion_private_key"; } void TorController::reconnect_cb(evutil_socket_t fd, short what, void *arg) { TorController *self = (TorController *)arg; self->Reconnect(); } /****** Thread ********/ struct event_base *base; boost::thread torControlThread; static void TorControlThread() { - TorController ctrl(base, GetArg("-torcontrol", DEFAULT_TOR_CONTROL)); + TorController ctrl(base, gArgs.GetArg("-torcontrol", DEFAULT_TOR_CONTROL)); event_base_dispatch(base); } void StartTorControl(boost::thread_group &threadGroup, CScheduler &scheduler) { assert(!base); #ifdef WIN32 evthread_use_windows_threads(); #else evthread_use_pthreads(); #endif base = event_base_new(); if (!base) { LogPrintf("tor: Unable to create event_base\n"); return; } torControlThread = boost::thread( boost::bind(&TraceThread, "torcontrol", &TorControlThread)); } void InterruptTorControl() { if (base) { LogPrintf("tor: Thread interrupt\n"); event_base_loopbreak(base); } } void StopTorControl() { if (base) { torControlThread.join(); event_base_free(base); base = 0; } } diff --git a/src/txdb.cpp b/src/txdb.cpp index 7f84c532e2..c19b587466 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -1,454 +1,455 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "txdb.h" #include "chainparams.h" #include "config.h" #include "hash.h" #include "init.h" #include "pow.h" -#include "ui_interface.h" #include "random.h" +#include "ui_interface.h" #include "uint256.h" #include "util.h" #include #include static const char DB_COIN = 'C'; static const char DB_COINS = 'c'; static const char DB_BLOCK_FILES = 'f'; static const char DB_TXINDEX = 't'; static const char DB_BLOCK_INDEX = 'b'; static const char DB_BEST_BLOCK = 'B'; static const char DB_HEAD_BLOCKS = 'H'; static const char DB_FLAG = 'F'; static const char DB_REINDEX_FLAG = 'R'; static const char DB_LAST_BLOCK = 'l'; namespace { struct CoinEntry { COutPoint *outpoint; char key; CoinEntry(const COutPoint *ptr) : outpoint(const_cast(ptr)), key(DB_COIN) {} template void Serialize(Stream &s) const { s << key; s << outpoint->hash; s << VARINT(outpoint->n); } template void Unserialize(Stream &s) { s >> key; s >> outpoint->hash; s >> VARINT(outpoint->n); } }; } // namespace CCoinsViewDB::CCoinsViewDB(size_t nCacheSize, bool fMemory, bool fWipe) : db(GetDataDir() / "chainstate", nCacheSize, fMemory, fWipe, true) {} bool CCoinsViewDB::GetCoin(const COutPoint &outpoint, Coin &coin) const { return db.Read(CoinEntry(&outpoint), coin); } bool CCoinsViewDB::HaveCoin(const COutPoint &outpoint) const { return db.Exists(CoinEntry(&outpoint)); } uint256 CCoinsViewDB::GetBestBlock() const { uint256 hashBestChain; if (!db.Read(DB_BEST_BLOCK, hashBestChain)) return uint256(); return hashBestChain; } std::vector CCoinsViewDB::GetHeadBlocks() const { std::vector vhashHeadBlocks; if (!db.Read(DB_HEAD_BLOCKS, vhashHeadBlocks)) { return std::vector(); } return vhashHeadBlocks; } bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { CDBBatch batch(db); size_t count = 0; size_t changed = 0; - size_t batch_size = (size_t)GetArg("-dbbatchsize", nDefaultDbBatchSize); - int crash_simulate = GetArg("-dbcrashratio", 0); + size_t batch_size = + (size_t)gArgs.GetArg("-dbbatchsize", nDefaultDbBatchSize); + int crash_simulate = gArgs.GetArg("-dbcrashratio", 0); assert(!hashBlock.IsNull()); uint256 old_tip = GetBestBlock(); if (old_tip.IsNull()) { // We may be in the middle of replaying. std::vector old_heads = GetHeadBlocks(); if (old_heads.size() == 2) { assert(old_heads[0] == hashBlock); old_tip = old_heads[1]; } } // In the first batch, mark the database as being in the middle of a // transition from old_tip to hashBlock. // A vector is used for future extensibility, as we may want to support // interrupting after partial writes from multiple independent reorgs. batch.Erase(DB_BEST_BLOCK); batch.Write(DB_HEAD_BLOCKS, std::vector{hashBlock, old_tip}); for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) { if (it->second.flags & CCoinsCacheEntry::DIRTY) { CoinEntry entry(&it->first); if (it->second.coin.IsSpent()) { batch.Erase(entry); } else { batch.Write(entry, it->second.coin); } changed++; } count++; CCoinsMap::iterator itOld = it++; mapCoins.erase(itOld); if (batch.SizeEstimate() > batch_size) { LogPrint(BCLog::COINDB, "Writing partial batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0)); db.WriteBatch(batch); batch.Clear(); if (crash_simulate) { static FastRandomContext rng; if (rng.randrange(crash_simulate) == 0) { LogPrintf("Simulating a crash. Goodbye.\n"); _Exit(0); } } } } // In the last batch, mark the database as consistent with hashBlock again. batch.Erase(DB_HEAD_BLOCKS); batch.Write(DB_BEST_BLOCK, hashBlock); LogPrint(BCLog::COINDB, "Writing final batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0)); bool ret = db.WriteBatch(batch); LogPrint(BCLog::COINDB, "Committed %u changed transaction outputs (out of " "%u) to coin database...\n", (unsigned int)changed, (unsigned int)count); return ret; } size_t CCoinsViewDB::EstimateSize() const { return db.EstimateSize(DB_COIN, char(DB_COIN + 1)); } CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CDBWrapper(GetDataDir() / "blocks" / "index", nCacheSize, fMemory, fWipe) {} bool CBlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo &info) { return Read(std::make_pair(DB_BLOCK_FILES, nFile), info); } bool CBlockTreeDB::WriteReindexing(bool fReindexing) { if (fReindexing) return Write(DB_REINDEX_FLAG, '1'); else return Erase(DB_REINDEX_FLAG); } bool CBlockTreeDB::ReadReindexing(bool &fReindexing) { fReindexing = Exists(DB_REINDEX_FLAG); return true; } bool CBlockTreeDB::ReadLastBlockFile(int &nFile) { return Read(DB_LAST_BLOCK, nFile); } CCoinsViewCursor *CCoinsViewDB::Cursor() const { CCoinsViewDBCursor *i = new CCoinsViewDBCursor( const_cast(db).NewIterator(), GetBestBlock()); /** * It seems that there are no "const iterators" for LevelDB. Since we only * need read operations on it, use a const-cast to get around that * restriction. */ i->pcursor->Seek(DB_COIN); // Cache key of first record if (i->pcursor->Valid()) { CoinEntry entry(&i->keyTmp.second); i->pcursor->GetKey(entry); i->keyTmp.first = entry.key; } else { // Make sure Valid() and GetKey() return false i->keyTmp.first = 0; } return i; } bool CCoinsViewDBCursor::GetKey(COutPoint &key) const { // Return cached key if (keyTmp.first == DB_COIN) { key = keyTmp.second; return true; } return false; } bool CCoinsViewDBCursor::GetValue(Coin &coin) const { return pcursor->GetValue(coin); } unsigned int CCoinsViewDBCursor::GetValueSize() const { return pcursor->GetValueSize(); } bool CCoinsViewDBCursor::Valid() const { return keyTmp.first == DB_COIN; } void CCoinsViewDBCursor::Next() { pcursor->Next(); CoinEntry entry(&keyTmp.second); if (!pcursor->Valid() || !pcursor->GetKey(entry)) { // Invalidate cached key after last record so that Valid() and GetKey() // return false keyTmp.first = 0; } else { keyTmp.first = entry.key; } } bool CBlockTreeDB::WriteBatchSync( const std::vector> &fileInfo, int nLastFile, const std::vector &blockinfo) { CDBBatch batch(*this); for (std::vector>::const_iterator it = fileInfo.begin(); it != fileInfo.end(); it++) { batch.Write(std::make_pair(DB_BLOCK_FILES, it->first), *it->second); } batch.Write(DB_LAST_BLOCK, nLastFile); for (std::vector::const_iterator it = blockinfo.begin(); it != blockinfo.end(); it++) { batch.Write(std::make_pair(DB_BLOCK_INDEX, (*it)->GetBlockHash()), CDiskBlockIndex(*it)); } return WriteBatch(batch, true); } bool CBlockTreeDB::ReadTxIndex(const uint256 &txid, CDiskTxPos &pos) { return Read(std::make_pair(DB_TXINDEX, txid), pos); } bool CBlockTreeDB::WriteTxIndex( const std::vector> &vect) { CDBBatch batch(*this); for (std::vector>::const_iterator it = vect.begin(); it != vect.end(); it++) batch.Write(std::make_pair(DB_TXINDEX, it->first), it->second); return WriteBatch(batch); } bool CBlockTreeDB::WriteFlag(const std::string &name, bool fValue) { return Write(std::make_pair(DB_FLAG, name), fValue ? '1' : '0'); } bool CBlockTreeDB::ReadFlag(const std::string &name, bool &fValue) { char ch; if (!Read(std::make_pair(DB_FLAG, name), ch)) return false; fValue = ch == '1'; return true; } bool CBlockTreeDB::LoadBlockIndexGuts( std::function insertBlockIndex) { const Config &config = GetConfig(); std::unique_ptr pcursor(NewIterator()); pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256())); // Load mapBlockIndex while (pcursor->Valid()) { boost::this_thread::interruption_point(); std::pair key; if (!pcursor->GetKey(key) || key.first != DB_BLOCK_INDEX) { break; } CDiskBlockIndex diskindex; if (!pcursor->GetValue(diskindex)) { return error("LoadBlockIndex() : failed to read value"); } // Construct block index object CBlockIndex *pindexNew = insertBlockIndex(diskindex.GetBlockHash()); pindexNew->pprev = insertBlockIndex(diskindex.hashPrev); pindexNew->nHeight = diskindex.nHeight; pindexNew->nFile = diskindex.nFile; pindexNew->nDataPos = diskindex.nDataPos; pindexNew->nUndoPos = diskindex.nUndoPos; pindexNew->nVersion = diskindex.nVersion; pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot; pindexNew->nTime = diskindex.nTime; pindexNew->nBits = diskindex.nBits; pindexNew->nNonce = diskindex.nNonce; pindexNew->nStatus = diskindex.nStatus; pindexNew->nTx = diskindex.nTx; if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, config)) { return error("LoadBlockIndex(): CheckProofOfWork failed: %s", pindexNew->ToString()); } pcursor->Next(); } return true; } namespace { //! Legacy class to deserialize pre-pertxout database entries without reindex. class CCoins { public: //! whether transaction is a coinbase bool fCoinBase; //! unspent transaction outputs; spent outputs are .IsNull(); spent outputs //! at the end of the array are dropped std::vector vout; //! at which height this transaction was included in the active block chain int nHeight; //! empty constructor CCoins() : fCoinBase(false), vout(0), nHeight(0) {} template void Unserialize(Stream &s) { uint32_t nCode = 0; // version int nVersionDummy; ::Unserialize(s, VARINT(nVersionDummy)); // header code ::Unserialize(s, VARINT(nCode)); fCoinBase = nCode & 1; std::vector vAvail(2, false); vAvail[0] = (nCode & 2) != 0; vAvail[1] = (nCode & 4) != 0; uint32_t nMaskCode = (nCode / 8) + ((nCode & 6) != 0 ? 0 : 1); // spentness bitmask while (nMaskCode > 0) { uint8_t chAvail = 0; ::Unserialize(s, chAvail); for (unsigned int p = 0; p < 8; p++) { bool f = (chAvail & (1 << p)) != 0; vAvail.push_back(f); } if (chAvail != 0) { nMaskCode--; } } // txouts themself vout.assign(vAvail.size(), CTxOut()); for (size_t i = 0; i < vAvail.size(); i++) { if (vAvail[i]) { ::Unserialize(s, REF(CTxOutCompressor(vout[i]))); } } // coinbase height ::Unserialize(s, VARINT(nHeight)); } }; } // namespace /** * Upgrade the database from older formats. * * Currently implemented: from the per-tx utxo model (0.8..0.14.x) to per-txout. */ bool CCoinsViewDB::Upgrade() { std::unique_ptr pcursor(db.NewIterator()); pcursor->Seek(std::make_pair(DB_COINS, uint256())); if (!pcursor->Valid()) { return true; } int64_t count = 0; LogPrintf("Upgrading utxo-set database...\n"); LogPrintf("[0%%]..."); size_t batch_size = 1 << 24; CDBBatch batch(db); uiInterface.SetProgressBreakAction(StartShutdown); int reportDone = 0; std::pair key; std::pair prev_key = {DB_COINS, uint256()}; while (pcursor->Valid()) { boost::this_thread::interruption_point(); if (ShutdownRequested()) { break; } if (!pcursor->GetKey(key) || key.first != DB_COINS) { break; } if (count++ % 256 == 0) { uint32_t high = 0x100 * *key.second.begin() + *(key.second.begin() + 1); int percentageDone = (int)(high * 100.0 / 65536.0 + 0.5); uiInterface.ShowProgress( _("Upgrading UTXO database") + "\n" + _("(press q to shutdown and continue later)") + "\n", percentageDone); if (reportDone < percentageDone / 10) { // report max. every 10% step LogPrintf("[%d%%]...", percentageDone); reportDone = percentageDone / 10; } } CCoins old_coins; if (!pcursor->GetValue(old_coins)) { return error("%s: cannot parse CCoins record", __func__); } COutPoint outpoint(key.second, 0); for (size_t i = 0; i < old_coins.vout.size(); ++i) { if (!old_coins.vout[i].IsNull() && !old_coins.vout[i].scriptPubKey.IsUnspendable()) { Coin newcoin(std::move(old_coins.vout[i]), old_coins.nHeight, old_coins.fCoinBase); outpoint.n = i; CoinEntry entry(&outpoint); batch.Write(entry, newcoin); } } batch.Erase(key); if (batch.SizeEstimate() > batch_size) { db.WriteBatch(batch); batch.Clear(); db.CompactRange(prev_key, key); prev_key = key; } pcursor->Next(); } db.WriteBatch(batch); db.CompactRange({DB_COINS, uint256()}, key); uiInterface.SetProgressBreakAction(std::function()); LogPrintf("[%s].\n", ShutdownRequested() ? "CANCELLED" : "DONE"); return !ShutdownRequested(); } diff --git a/src/util.cpp b/src/util.cpp index bf2caed91e..fa607939c8 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -1,875 +1,875 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "util.h" #include "chainparamsbase.h" #include "fs.h" #include "random.h" #include "serialize.h" #include "utilstrencodings.h" #include "utiltime.h" #include #if (defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)) #include #include #endif #ifndef WIN32 // for posix_fallocate #ifdef __linux__ #ifdef _POSIX_C_SOURCE #undef _POSIX_C_SOURCE #endif #define _POSIX_C_SOURCE 200112L #endif // __linux__ #include #include #include #include #else #ifdef _MSC_VER #pragma warning(disable : 4786) #pragma warning(disable : 4804) #pragma warning(disable : 4805) #pragma warning(disable : 4717) #endif #ifdef _WIN32_WINNT #undef _WIN32_WINNT #endif #define _WIN32_WINNT 0x0501 #ifdef _WIN32_IE #undef _WIN32_IE #endif #define _WIN32_IE 0x0501 #define WIN32_LEAN_AND_MEAN 1 #ifndef NOMINMAX #define NOMINMAX #endif #include /* for _commit */ #include #endif #ifdef HAVE_SYS_PRCTL_H #include #endif #ifdef HAVE_MALLOPT_ARENA_MAX #include #endif #include // for to_lower() #include #include // for startswith() and endswith() #include #include #include #include #include #include // Application startup time (used for uptime calculation) const int64_t nStartupTime = GetTime(); const char *const BITCOIN_CONF_FILENAME = "bitcoin.conf"; const char *const BITCOIN_PID_FILENAME = "bitcoind.pid"; ArgsManager gArgs; bool fPrintToConsole = false; bool fPrintToDebugLog = true; bool fLogTimestamps = DEFAULT_LOGTIMESTAMPS; bool fLogTimeMicros = DEFAULT_LOGTIMEMICROS; bool fLogIPs = DEFAULT_LOGIPS; std::atomic fReopenDebugLog(false); CTranslationInterface translationInterface; /** * Log categories bitfield. Leveldb/libevent need special handling if their * flags are changed at runtime. */ std::atomic logCategories(0); /** Init OpenSSL library multithreading support */ static CCriticalSection **ppmutexOpenSSL; void locking_callback(int mode, int i, const char *file, int line) NO_THREAD_SAFETY_ANALYSIS { if (mode & CRYPTO_LOCK) { ENTER_CRITICAL_SECTION(*ppmutexOpenSSL[i]); } else { LEAVE_CRITICAL_SECTION(*ppmutexOpenSSL[i]); } } // Init class CInit { public: CInit() { // Init OpenSSL library multithreading support. ppmutexOpenSSL = (CCriticalSection **)OPENSSL_malloc( CRYPTO_num_locks() * sizeof(CCriticalSection *)); for (int i = 0; i < CRYPTO_num_locks(); i++) ppmutexOpenSSL[i] = new CCriticalSection(); CRYPTO_set_locking_callback(locking_callback); // OpenSSL can optionally load a config file which lists optional // loadable modules and engines. We don't use them so we don't require // the config. However some of our libs may call functions which attempt // to load the config file, possibly resulting in an exit() or crash if // it is missing or corrupt. Explicitly tell OpenSSL not to try to load // the file. The result for our libs will be that the config appears to // have been loaded and there are no modules/engines available. OPENSSL_no_config(); #ifdef WIN32 // Seed OpenSSL PRNG with current contents of the screen. RAND_screen(); #endif // Seed OpenSSL PRNG with performance counter. RandAddSeed(); } ~CInit() { // Securely erase the memory used by the PRNG. RAND_cleanup(); // Shutdown OpenSSL library multithreading support. CRYPTO_set_locking_callback(nullptr); for (int i = 0; i < CRYPTO_num_locks(); i++) delete ppmutexOpenSSL[i]; OPENSSL_free(ppmutexOpenSSL); } } instance_of_cinit; /** * LogPrintf() has been broken a couple of times now by well-meaning people * adding mutexes in the most straightforward way. It breaks because it may be * called by global destructors during shutdown. Since the order of destruction * of static/global objects is undefined, defining a mutex as a global object * doesn't work (the mutex gets destroyed, and then some later destructor calls * OutputDebugStringF, maybe indirectly, and you get a core dump at shutdown * trying to lock the mutex). */ static boost::once_flag debugPrintInitFlag = BOOST_ONCE_INIT; /** * We use boost::call_once() to make sure mutexDebugLog and vMsgsBeforeOpenLog * are initialized in a thread-safe manner. * * NOTE: fileout, mutexDebugLog and sometimes vMsgsBeforeOpenLog are leaked on * exit. This is ugly, but will be cleaned up by the OS/libc. When the shutdown * sequence is fully audited and tested, explicit destruction of these objects * can be implemented. */ static FILE *fileout = nullptr; static boost::mutex *mutexDebugLog = nullptr; static std::list *vMsgsBeforeOpenLog; static int FileWriteStr(const std::string &str, FILE *fp) { return fwrite(str.data(), 1, str.size(), fp); } static void DebugPrintInit() { assert(mutexDebugLog == nullptr); mutexDebugLog = new boost::mutex(); vMsgsBeforeOpenLog = new std::list; } void OpenDebugLog() { boost::call_once(&DebugPrintInit, debugPrintInitFlag); boost::mutex::scoped_lock scoped_lock(*mutexDebugLog); assert(fileout == nullptr); assert(vMsgsBeforeOpenLog); fs::path pathDebug = GetDataDir() / "debug.log"; fileout = fsbridge::fopen(pathDebug, "a"); if (fileout) { // Unbuffered. setbuf(fileout, nullptr); // Dump buffered messages from before we opened the log. while (!vMsgsBeforeOpenLog->empty()) { FileWriteStr(vMsgsBeforeOpenLog->front(), fileout); vMsgsBeforeOpenLog->pop_front(); } } delete vMsgsBeforeOpenLog; vMsgsBeforeOpenLog = nullptr; } struct CLogCategoryDesc { uint32_t flag; std::string category; }; const CLogCategoryDesc LogCategories[] = { {BCLog::NONE, "0"}, {BCLog::NET, "net"}, {BCLog::TOR, "tor"}, {BCLog::MEMPOOL, "mempool"}, {BCLog::HTTP, "http"}, {BCLog::BENCH, "bench"}, {BCLog::ZMQ, "zmq"}, {BCLog::DB, "db"}, {BCLog::RPC, "rpc"}, {BCLog::ESTIMATEFEE, "estimatefee"}, {BCLog::ADDRMAN, "addrman"}, {BCLog::SELECTCOINS, "selectcoins"}, {BCLog::REINDEX, "reindex"}, {BCLog::CMPCTBLOCK, "cmpctblock"}, {BCLog::RAND, "rand"}, {BCLog::PRUNE, "prune"}, {BCLog::PROXY, "proxy"}, {BCLog::MEMPOOLREJ, "mempoolrej"}, {BCLog::LIBEVENT, "libevent"}, {BCLog::COINDB, "coindb"}, {BCLog::QT, "qt"}, {BCLog::LEVELDB, "leveldb"}, {BCLog::ALL, "1"}, {BCLog::ALL, "all"}, }; bool GetLogCategory(uint32_t *f, const std::string *str) { if (f && str) { if (*str == "") { *f = BCLog::ALL; return true; } for (unsigned int i = 0; i < ARRAYLEN(LogCategories); i++) { if (LogCategories[i].category == *str) { *f = LogCategories[i].flag; return true; } } } return false; } std::string ListLogCategories() { std::string ret; int outcount = 0; for (unsigned int i = 0; i < ARRAYLEN(LogCategories); i++) { // Omit the special cases. if (LogCategories[i].flag != BCLog::NONE && LogCategories[i].flag != BCLog::ALL) { if (outcount != 0) ret += ", "; ret += LogCategories[i].category; outcount++; } } return ret; } /** * fStartedNewLine is a state variable held by the calling context that will * suppress printing of the timestamp when multiple calls are made that don't * end in a newline. Initialize it to true, and hold it, in the calling context. */ static std::string LogTimestampStr(const std::string &str, std::atomic_bool *fStartedNewLine) { std::string strStamped; if (!fLogTimestamps) return str; if (*fStartedNewLine) { int64_t nTimeMicros = GetLogTimeMicros(); strStamped = DateTimeStrFormat("%Y-%m-%d %H:%M:%S", nTimeMicros / 1000000); if (fLogTimeMicros) strStamped += strprintf(".%06d", nTimeMicros % 1000000); strStamped += ' ' + str; } else strStamped = str; if (!str.empty() && str[str.size() - 1] == '\n') *fStartedNewLine = true; else *fStartedNewLine = false; return strStamped; } int LogPrintStr(const std::string &str) { // Returns total number of characters written. int ret = 0; static std::atomic_bool fStartedNewLine(true); std::string strTimestamped = LogTimestampStr(str, &fStartedNewLine); if (fPrintToConsole) { // Print to console. ret = fwrite(strTimestamped.data(), 1, strTimestamped.size(), stdout); fflush(stdout); } else if (fPrintToDebugLog) { boost::call_once(&DebugPrintInit, debugPrintInitFlag); boost::mutex::scoped_lock scoped_lock(*mutexDebugLog); // Buffer if we haven't opened the log yet. if (fileout == nullptr) { assert(vMsgsBeforeOpenLog); ret = strTimestamped.length(); vMsgsBeforeOpenLog->push_back(strTimestamped); } else { // Reopen the log file, if requested. if (fReopenDebugLog) { fReopenDebugLog = false; fs::path pathDebug = GetDataDir() / "debug.log"; if (fsbridge::freopen(pathDebug, "a", fileout) != nullptr) { // unbuffered. setbuf(fileout, nullptr); } } ret = FileWriteStr(strTimestamped, fileout); } } return ret; } /** Interpret string as boolean, for argument parsing */ static bool InterpretBool(const std::string &strValue) { if (strValue.empty()) return true; return (atoi(strValue) != 0); } /** Turn -noX into -X=0 */ static void InterpretNegativeSetting(std::string &strKey, std::string &strValue) { if (strKey.length() > 3 && strKey[0] == '-' && strKey[1] == 'n' && strKey[2] == 'o') { strKey = "-" + strKey.substr(3); strValue = InterpretBool(strValue) ? "0" : "1"; } } void ArgsManager::ParseParameters(int argc, const char *const argv[]) { LOCK(cs_args); mapArgs.clear(); mapMultiArgs.clear(); for (int i = 1; i < argc; i++) { std::string str(argv[i]); std::string strValue; size_t is_index = str.find('='); if (is_index != std::string::npos) { strValue = str.substr(is_index + 1); str = str.substr(0, is_index); } #ifdef WIN32 boost::to_lower(str); if (boost::algorithm::starts_with(str, "/")) str = "-" + str.substr(1); #endif if (str[0] != '-') break; // Interpret --foo as -foo. // If both --foo and -foo are set, the last takes effect. if (str.length() > 1 && str[1] == '-') str = str.substr(1); InterpretNegativeSetting(str, strValue); mapArgs[str] = strValue; mapMultiArgs[str].push_back(strValue); } } std::vector ArgsManager::GetArgs(const std::string &strArg) { LOCK(cs_args); return mapMultiArgs.at(strArg); } bool ArgsManager::IsArgSet(const std::string &strArg) { LOCK(cs_args); return mapArgs.count(strArg); } std::string ArgsManager::GetArg(const std::string &strArg, const std::string &strDefault) { LOCK(cs_args); if (mapArgs.count(strArg)) return mapArgs[strArg]; return strDefault; } int64_t ArgsManager::GetArg(const std::string &strArg, int64_t nDefault) { LOCK(cs_args); if (mapArgs.count(strArg)) return atoi64(mapArgs[strArg]); return nDefault; } bool ArgsManager::GetBoolArg(const std::string &strArg, bool fDefault) { LOCK(cs_args); if (mapArgs.count(strArg)) return InterpretBool(mapArgs[strArg]); return fDefault; } bool ArgsManager::SoftSetArg(const std::string &strArg, const std::string &strValue) { LOCK(cs_args); if (mapArgs.count(strArg)) { return false; } ForceSetArg(strArg, strValue); return true; } bool ArgsManager::SoftSetBoolArg(const std::string &strArg, bool fValue) { if (fValue) return SoftSetArg(strArg, std::string("1")); else return SoftSetArg(strArg, std::string("0")); } void ArgsManager::ForceSetArg(const std::string &strArg, const std::string &strValue) { LOCK(cs_args); mapArgs[strArg] = strValue; mapMultiArgs[strArg].push_back(strValue); } /** * This function is only used for testing purpose so * so we should not worry about element uniqueness and * integrity of mapMultiArgs data structure */ void ArgsManager::ForceSetMultiArg(const std::string &strArg, const std::string &strValue) { LOCK(cs_args); if (mapArgs.count(strArg) == 0) { mapArgs[strArg] = strValue; } mapMultiArgs[strArg].push_back(strValue); } void ArgsManager::ClearArg(const std::string &strArg) { LOCK(cs_args); mapArgs.erase(strArg); } static const int screenWidth = 79; static const int optIndent = 2; static const int msgIndent = 7; std::string HelpMessageGroup(const std::string &message) { return std::string(message) + std::string("\n\n"); } std::string HelpMessageOpt(const std::string &option, const std::string &message) { return std::string(optIndent, ' ') + std::string(option) + std::string("\n") + std::string(msgIndent, ' ') + FormatParagraph(message, screenWidth - msgIndent, msgIndent) + std::string("\n\n"); } static std::string FormatException(const std::exception *pex, const char *pszThread) { #ifdef WIN32 char pszModule[MAX_PATH] = ""; GetModuleFileNameA(nullptr, pszModule, sizeof(pszModule)); #else const char *pszModule = "bitcoin"; #endif if (pex) return strprintf("EXCEPTION: %s \n%s \n%s in %s \n", typeid(*pex).name(), pex->what(), pszModule, pszThread); else return strprintf("UNKNOWN EXCEPTION \n%s in %s \n", pszModule, pszThread); } void PrintExceptionContinue(const std::exception *pex, const char *pszThread) { std::string message = FormatException(pex, pszThread); LogPrintf("\n\n************************\n%s\n", message); fprintf(stderr, "\n\n************************\n%s\n", message.c_str()); } fs::path GetDefaultDataDir() { // Windows < Vista: C:\Documents and Settings\Username\Application Data\Bitcoin // Windows >= Vista: C:\Users\Username\AppData\Roaming\Bitcoin // Mac: ~/Library/Application Support/Bitcoin // Unix: ~/.bitcoin #ifdef WIN32 // Windows return GetSpecialFolderPath(CSIDL_APPDATA) / "Bitcoin"; #else fs::path pathRet; char *pszHome = getenv("HOME"); if (pszHome == nullptr || strlen(pszHome) == 0) pathRet = fs::path("/"); else pathRet = fs::path(pszHome); #ifdef MAC_OSX // Mac return pathRet / "Library/Application Support/Bitcoin"; #else // Unix return pathRet / ".bitcoin"; #endif #endif } static fs::path pathCached; static fs::path pathCachedNetSpecific; static CCriticalSection csPathCached; const fs::path &GetDataDir(bool fNetSpecific) { LOCK(csPathCached); fs::path &path = fNetSpecific ? pathCachedNetSpecific : pathCached; // This can be called during exceptions by LogPrintf(), so we cache the // value so we don't have to do memory allocations after that. if (!path.empty()) return path; - if (IsArgSet("-datadir")) { - path = fs::system_complete(GetArg("-datadir", "")); + if (gArgs.IsArgSet("-datadir")) { + path = fs::system_complete(gArgs.GetArg("-datadir", "")); if (!fs::is_directory(path)) { path = ""; return path; } } else { path = GetDefaultDataDir(); } if (fNetSpecific) path /= BaseParams().DataDir(); fs::create_directories(path); return path; } void ClearDatadirCache() { LOCK(csPathCached); pathCached = fs::path(); pathCachedNetSpecific = fs::path(); } fs::path GetConfigFile(const std::string &confPath) { fs::path pathConfigFile(confPath); if (!pathConfigFile.is_complete()) pathConfigFile = GetDataDir(false) / pathConfigFile; return pathConfigFile; } void ArgsManager::ReadConfigFile(const std::string &confPath) { fs::ifstream streamConfig(GetConfigFile(confPath)); // No bitcoin.conf file is OK if (!streamConfig.good()) return; { LOCK(cs_args); std::set setOptions; setOptions.insert("*"); for (boost::program_options::detail::config_file_iterator it(streamConfig, setOptions), end; it != end; ++it) { // Don't overwrite existing settings so command line settings // override bitcoin.conf std::string strKey = std::string("-") + it->string_key; std::string strValue = it->value[0]; InterpretNegativeSetting(strKey, strValue); if (mapArgs.count(strKey) == 0) { mapArgs[strKey] = strValue; } mapMultiArgs[strKey].push_back(strValue); } } // If datadir is changed in .conf file: ClearDatadirCache(); } #ifndef WIN32 fs::path GetPidFile() { - fs::path pathPidFile(GetArg("-pid", BITCOIN_PID_FILENAME)); + fs::path pathPidFile(gArgs.GetArg("-pid", BITCOIN_PID_FILENAME)); if (!pathPidFile.is_complete()) pathPidFile = GetDataDir() / pathPidFile; return pathPidFile; } void CreatePidFile(const fs::path &path, pid_t pid) { FILE *file = fsbridge::fopen(path, "w"); if (file) { fprintf(file, "%d\n", pid); fclose(file); } } #endif bool RenameOver(fs::path src, fs::path dest) { #ifdef WIN32 return MoveFileExA(src.string().c_str(), dest.string().c_str(), MOVEFILE_REPLACE_EXISTING) != 0; #else int rc = std::rename(src.string().c_str(), dest.string().c_str()); return (rc == 0); #endif /* WIN32 */ } /** * Ignores exceptions thrown by Boost's create_directories if the requested * directory exists. Specifically handles case where path p exists, but it * wasn't possible for the user to write to the parent directory. */ bool TryCreateDirectories(const fs::path &p) { try { return fs::create_directories(p); } catch (const fs::filesystem_error &) { if (!fs::exists(p) || !fs::is_directory(p)) { throw; } } // create_directory didn't create the directory, it had to have existed // already. return false; } void FileCommit(FILE *file) { // Harmless if redundantly called. fflush(file); #ifdef WIN32 HANDLE hFile = (HANDLE)_get_osfhandle(_fileno(file)); FlushFileBuffers(hFile); #else #if defined(__linux__) || defined(__NetBSD__) fdatasync(fileno(file)); #elif defined(__APPLE__) && defined(F_FULLFSYNC) fcntl(fileno(file), F_FULLFSYNC, 0); #else fsync(fileno(file)); #endif #endif } bool TruncateFile(FILE *file, unsigned int length) { #if defined(WIN32) return _chsize(_fileno(file), length) == 0; #else return ftruncate(fileno(file), length) == 0; #endif } /** * This function tries to raise the file descriptor limit to the requested * number. It returns the actual file descriptor limit (which may be more or * less than nMinFD) */ int RaiseFileDescriptorLimit(int nMinFD) { #if defined(WIN32) return 2048; #else struct rlimit limitFD; if (getrlimit(RLIMIT_NOFILE, &limitFD) != -1) { if (limitFD.rlim_cur < (rlim_t)nMinFD) { limitFD.rlim_cur = nMinFD; if (limitFD.rlim_cur > limitFD.rlim_max) limitFD.rlim_cur = limitFD.rlim_max; setrlimit(RLIMIT_NOFILE, &limitFD); getrlimit(RLIMIT_NOFILE, &limitFD); } return limitFD.rlim_cur; } // getrlimit failed, assume it's fine. return nMinFD; #endif } /** * This function tries to make a particular range of a file allocated * (corresponding to disk space) it is advisory, and the range specified in the * arguments will never contain live data. */ void AllocateFileRange(FILE *file, unsigned int offset, unsigned int length) { #if defined(WIN32) // Windows-specific version. HANDLE hFile = (HANDLE)_get_osfhandle(_fileno(file)); LARGE_INTEGER nFileSize; int64_t nEndPos = (int64_t)offset + length; nFileSize.u.LowPart = nEndPos & 0xFFFFFFFF; nFileSize.u.HighPart = nEndPos >> 32; SetFilePointerEx(hFile, nFileSize, 0, FILE_BEGIN); SetEndOfFile(hFile); #elif defined(MAC_OSX) // OSX specific version. fstore_t fst; fst.fst_flags = F_ALLOCATECONTIG; fst.fst_posmode = F_PEOFPOSMODE; fst.fst_offset = 0; fst.fst_length = (off_t)offset + length; fst.fst_bytesalloc = 0; if (fcntl(fileno(file), F_PREALLOCATE, &fst) == -1) { fst.fst_flags = F_ALLOCATEALL; fcntl(fileno(file), F_PREALLOCATE, &fst); } ftruncate(fileno(file), fst.fst_length); #elif defined(__linux__) // Version using posix_fallocate. off_t nEndPos = (off_t)offset + length; posix_fallocate(fileno(file), 0, nEndPos); #else // Fallback version // TODO: just write one byte per block static const char buf[65536] = {}; fseek(file, offset, SEEK_SET); while (length > 0) { unsigned int now = 65536; if (length < now) now = length; // Allowed to fail; this function is advisory anyway. fwrite(buf, 1, now, file); length -= now; } #endif } void ShrinkDebugFile() { // Amount of debug.log to save at end when shrinking (must fit in memory) constexpr size_t RECENT_DEBUG_HISTORY_SIZE = 10 * 1000000; // Scroll debug.log if it's getting too big. fs::path pathLog = GetDataDir() / "debug.log"; FILE *file = fsbridge::fopen(pathLog, "r"); // If debug.log file is more than 10% bigger the RECENT_DEBUG_HISTORY_SIZE // trim it down by saving only the last RECENT_DEBUG_HISTORY_SIZE bytes. if (file && fs::file_size(pathLog) > 11 * (RECENT_DEBUG_HISTORY_SIZE / 10)) { // Restart the file with some of the end. std::vector vch(RECENT_DEBUG_HISTORY_SIZE, 0); fseek(file, -((long)vch.size()), SEEK_END); int nBytes = fread(vch.data(), 1, vch.size(), file); fclose(file); file = fsbridge::fopen(pathLog, "w"); if (file) { fwrite(vch.data(), 1, nBytes, file); fclose(file); } } else if (file != nullptr) fclose(file); } #ifdef WIN32 fs::path GetSpecialFolderPath(int nFolder, bool fCreate) { char pszPath[MAX_PATH] = ""; if (SHGetSpecialFolderPathA(nullptr, pszPath, nFolder, fCreate)) { return fs::path(pszPath); } LogPrintf( "SHGetSpecialFolderPathA() failed, could not obtain requested path.\n"); return fs::path(""); } #endif void runCommand(const std::string &strCommand) { int nErr = ::system(strCommand.c_str()); if (nErr) LogPrintf("runCommand error: system(%s) returned %d\n", strCommand, nErr); } void RenameThread(const char *name) { #if defined(PR_SET_NAME) // Only the first 15 characters are used (16 - NUL terminator) ::prctl(PR_SET_NAME, name, 0, 0, 0); #elif (defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)) pthread_set_name_np(pthread_self(), name); #elif defined(MAC_OSX) pthread_setname_np(name); #else // Prevent warnings for unused parameters... (void)name; #endif } void SetupEnvironment() { #ifdef HAVE_MALLOPT_ARENA_MAX // glibc-specific: On 32-bit systems set the number of arenas to 1. By // default, since glibc 2.10, the C library will create up to two heap // arenas per core. This is known to cause excessive virtual address space // usage in our usage. Work around it by setting the maximum number of // arenas to 1. if (sizeof(void *) == 4) { mallopt(M_ARENA_MAX, 1); } #endif // On most POSIX systems (e.g. Linux, but not BSD) the environment's locale may // be invalid, in which case the "C" locale is used as fallback. #if !defined(WIN32) && !defined(MAC_OSX) && !defined(__FreeBSD__) && \ !defined(__OpenBSD__) try { // Raises a runtime error if current locale is invalid. std::locale(""); } catch (const std::runtime_error &) { setenv("LC_ALL", "C", 1); } #endif // The path locale is lazy initialized and to avoid deinitialization errors // in multithreading environments, it is set explicitly by the main thread. // A dummy locale is used to extract the internal default locale, used by // fs::path, which is then used to explicitly imbue the path. std::locale loc = fs::path::imbue(std::locale::classic()); fs::path::imbue(loc); } bool SetupNetworking() { #ifdef WIN32 // Initialize Windows Sockets. WSADATA wsadata; int ret = WSAStartup(MAKEWORD(2, 2), &wsadata); if (ret != NO_ERROR || LOBYTE(wsadata.wVersion) != 2 || HIBYTE(wsadata.wVersion) != 2) return false; #endif return true; } int GetNumCores() { #if BOOST_VERSION >= 105600 return boost::thread::physical_concurrency(); #else // Must fall back to hardware_concurrency, which unfortunately counts // virtual cores. return boost::thread::hardware_concurrency(); #endif } std::string CopyrightHolders(const std::string &strPrefix) { std::string strCopyrightHolders = strPrefix + strprintf(_(COPYRIGHT_HOLDERS), _(COPYRIGHT_HOLDERS_SUBSTITUTION)); // Check for untranslated substitution to make sure Bitcoin ABC copyright // is not removed by accident. if (strprintf(COPYRIGHT_HOLDERS, COPYRIGHT_HOLDERS_SUBSTITUTION) .find("Bitcoin ABC") == std::string::npos) { strCopyrightHolders += "\n" + strPrefix + "The Bitcoin ABC developers"; } return strCopyrightHolders; } // Obtain the application startup time (used for uptime calculation) int64_t GetStartupTime() { return nStartupTime; } diff --git a/src/util.h b/src/util.h index 79ae47b405..fac4b60bae 100644 --- a/src/util.h +++ b/src/util.h @@ -1,341 +1,292 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. /** * Server/client environment: argument handling, config file parsing, logging, * thread wrappers, startup time */ #ifndef BITCOIN_UTIL_H #define BITCOIN_UTIL_H #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "compat.h" #include "fs.h" #include "sync.h" #include "tinyformat.h" #include "utiltime.h" #include #include #include #include #include #include #include #include // Application startup time (used for uptime calculation) int64_t GetStartupTime(); static const bool DEFAULT_LOGTIMEMICROS = false; static const bool DEFAULT_LOGIPS = false; static const bool DEFAULT_LOGTIMESTAMPS = true; /** Signals for translation. */ class CTranslationInterface { public: /** Translate a message to the native language of the user. */ boost::signals2::signal Translate; }; extern bool fPrintToConsole; extern bool fPrintToDebugLog; extern bool fLogTimestamps; extern bool fLogTimeMicros; extern bool fLogIPs; extern std::atomic fReopenDebugLog; extern CTranslationInterface translationInterface; extern const char *const BITCOIN_CONF_FILENAME; extern const char *const BITCOIN_PID_FILENAME; extern std::atomic logCategories; /** * Translation function: Call Translate signal on UI interface, which returns a * boost::optional result. If no translation slot is registered, nothing is * returned, and simply return the input. */ inline std::string _(const char *psz) { boost::optional rv = translationInterface.Translate(psz); return rv ? (*rv) : psz; } void SetupEnvironment(); bool SetupNetworking(); namespace BCLog { enum LogFlags : uint32_t { NONE = 0, NET = (1 << 0), TOR = (1 << 1), MEMPOOL = (1 << 2), HTTP = (1 << 3), BENCH = (1 << 4), ZMQ = (1 << 5), DB = (1 << 6), RPC = (1 << 7), ESTIMATEFEE = (1 << 8), ADDRMAN = (1 << 9), SELECTCOINS = (1 << 10), REINDEX = (1 << 11), CMPCTBLOCK = (1 << 12), RAND = (1 << 13), PRUNE = (1 << 14), PROXY = (1 << 15), MEMPOOLREJ = (1 << 16), LIBEVENT = (1 << 17), COINDB = (1 << 18), QT = (1 << 19), LEVELDB = (1 << 20), ALL = ~uint32_t(0), }; } /** Return true if log accepts specified category */ static inline bool LogAcceptCategory(uint32_t category) { return (logCategories.load(std::memory_order_relaxed) & category) != 0; } /** Returns a string with the supported log categories */ std::string ListLogCategories(); /** Return true if str parses as a log category and set the flags in f */ bool GetLogCategory(uint32_t *f, const std::string *str); /** Send a string to the log output */ int LogPrintStr(const std::string &str); #define LogPrint(category, ...) \ do { \ if (LogAcceptCategory((category))) { \ LogPrintStr(tfm::format(__VA_ARGS__)); \ } \ } while (0) #define LogPrintf(...) \ do { \ LogPrintStr(tfm::format(__VA_ARGS__)); \ } while (0) template bool error(const char *fmt, const Args &... args) { LogPrintStr("ERROR: " + tfm::format(fmt, args...) + "\n"); return false; } void PrintExceptionContinue(const std::exception *pex, const char *pszThread); void FileCommit(FILE *file); bool TruncateFile(FILE *file, unsigned int length); int RaiseFileDescriptorLimit(int nMinFD); void AllocateFileRange(FILE *file, unsigned int offset, unsigned int length); bool RenameOver(fs::path src, fs::path dest); bool TryCreateDirectories(const fs::path &p); fs::path GetDefaultDataDir(); const fs::path &GetDataDir(bool fNetSpecific = true); void ClearDatadirCache(); fs::path GetConfigFile(const std::string &confPath); #ifndef WIN32 fs::path GetPidFile(); void CreatePidFile(const fs::path &path, pid_t pid); #endif #ifdef WIN32 fs::path GetSpecialFolderPath(int nFolder, bool fCreate = true); #endif void OpenDebugLog(); void ShrinkDebugFile(); void runCommand(const std::string &strCommand); inline bool IsSwitchChar(char c) { #ifdef WIN32 return c == '-' || c == '/'; #else return c == '-'; #endif } class ArgsManager { protected: CCriticalSection cs_args; std::map mapArgs; std::map> mapMultiArgs; public: void ParseParameters(int argc, const char *const argv[]); void ReadConfigFile(const std::string &confPath); std::vector GetArgs(const std::string &strArg); /** * Return true if the given argument has been manually set. * * @param strArg Argument to get (e.g. "-foo") * @return true if the argument has been set */ bool IsArgSet(const std::string &strArg); /** * Return string argument or default value. * * @param strArg Argument to get (e.g. "-foo") * @param default (e.g. "1") * @return command-line argument or default value */ std::string GetArg(const std::string &strArg, const std::string &strDefault); /** * Return integer argument or default value. * * @param strArg Argument to get (e.g. "-foo") * @param default (e.g. 1) * @return command-line argument (0 if invalid number) or default value */ int64_t GetArg(const std::string &strArg, int64_t nDefault); /** * Return boolean argument or default value. * * @param strArg Argument to get (e.g. "-foo") * @param default (true or false) * @return command-line argument or default value */ bool GetBoolArg(const std::string &strArg, bool fDefault); /** * Set an argument if it doesn't already have a value. * * @param strArg Argument to set (e.g. "-foo") * @param strValue Value (e.g. "1") * @return true if argument gets set, false if it already had a value */ bool SoftSetArg(const std::string &strArg, const std::string &strValue); /** * Set a boolean argument if it doesn't already have a value. * * @param strArg Argument to set (e.g. "-foo") * @param fValue Value (e.g. false) * @return true if argument gets set, false if it already had a value */ bool SoftSetBoolArg(const std::string &strArg, bool fValue); // Forces a arg setting, used only in testing void ForceSetArg(const std::string &strArg, const std::string &strValue); // Forces a multi arg setting, used only in testing void ForceSetMultiArg(const std::string &strArg, const std::string &strValue); // Remove an arg setting, used only in testing void ClearArg(const std::string &strArg); }; extern ArgsManager gArgs; -// wrappers using the global ArgsManager: -static inline void ParseParameters(int argc, const char *const argv[]) { - gArgs.ParseParameters(argc, argv); -} - -static inline void ReadConfigFile(const std::string &confPath) { - gArgs.ReadConfigFile(confPath); -} - -static inline bool SoftSetArg(const std::string &strArg, - const std::string &strValue) { - return gArgs.SoftSetArg(strArg, strValue); -} - -static inline void ForceSetArg(const std::string &strArg, - const std::string &strValue) { - gArgs.ForceSetArg(strArg, strValue); -} - -static inline void ForceSetMultiArg(const std::string &strArg, - const std::string &strValue) { - gArgs.ForceSetMultiArg(strArg, strValue); -} - -static inline void ClearArg(const std::string &strArg) { - gArgs.ClearArg(strArg); -} - -static inline bool IsArgSet(const std::string &strArg) { - return gArgs.IsArgSet(strArg); -} - -static inline std::string GetArg(const std::string &strArg, - const std::string &strDefault) { - return gArgs.GetArg(strArg, strDefault); -} - -static inline int64_t GetArg(const std::string &strArg, int64_t nDefault) { - return gArgs.GetArg(strArg, nDefault); -} - -static inline bool GetBoolArg(const std::string &strArg, bool fDefault) { - return gArgs.GetBoolArg(strArg, fDefault); -} - -static inline bool SoftSetBoolArg(const std::string &strArg, bool fValue) { - return gArgs.SoftSetBoolArg(strArg, fValue); -} - /** * Format a string to be used as group of options in help messages. * * @param message Group name (e.g. "RPC server options:") * @return the formatted string */ std::string HelpMessageGroup(const std::string &message); /** * Format a string to be used as option description in help messages. * * @param option Option message (e.g. "-rpcuser=") * @param message Option description (e.g. "Username for JSON-RPC connections") * @return the formatted string */ std::string HelpMessageOpt(const std::string &option, const std::string &message); /** * Return the number of physical cores available on the current system. * @note This does not count virtual cores, such as those provided by * HyperThreading when boost is newer than 1.56. */ int GetNumCores(); void RenameThread(const char *name); /** * .. and a wrapper that just calls func once */ template void TraceThread(const char *name, Callable func) { std::string s = strprintf("bitcoin-%s", name); RenameThread(s.c_str()); try { LogPrintf("%s thread start\n", name); func(); LogPrintf("%s thread exit\n", name); } catch (const boost::thread_interrupted &) { LogPrintf("%s thread interrupt\n", name); throw; } catch (const std::exception &e) { PrintExceptionContinue(&e, name); throw; } catch (...) { PrintExceptionContinue(nullptr, name); throw; } } std::string CopyrightHolders(const std::string &strPrefix); #endif // BITCOIN_UTIL_H diff --git a/src/validation.cpp b/src/validation.cpp index f0144d2851..7d671dc8c9 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1,5304 +1,5311 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Copyright (c) 2017 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "validation.h" #include "arith_uint256.h" #include "chainparams.h" #include "checkpoints.h" #include "checkqueue.h" #include "config.h" #include "consensus/consensus.h" #include "consensus/merkle.h" #include "consensus/validation.h" #include "fs.h" #include "hash.h" #include "init.h" #include "policy/fees.h" #include "policy/policy.h" #include "pow.h" #include "primitives/block.h" #include "primitives/transaction.h" #include "random.h" #include "script/script.h" #include "script/scriptcache.h" #include "script/sigcache.h" #include "script/standard.h" #include "timedata.h" #include "tinyformat.h" #include "txdb.h" #include "txmempool.h" #include "ui_interface.h" #include "undo.h" #include "util.h" #include "utilmoneystr.h" #include "utilstrencodings.h" #include "validationinterface.h" #include "versionbits.h" #include "warnings.h" #include #include #include #include #include #include #include #include #if defined(NDEBUG) #error "Bitcoin cannot be compiled without assertions." #endif /** * Global state */ CCriticalSection cs_main; BlockMap mapBlockIndex; CChain chainActive; CBlockIndex *pindexBestHeader = nullptr; CWaitableCriticalSection csBestBlock; CConditionVariable cvBlockChange; int nScriptCheckThreads = 0; std::atomic_bool fImporting(false); bool fReindex = false; bool fTxIndex = false; bool fHavePruned = false; bool fPruneMode = false; bool fIsBareMultisigStd = DEFAULT_PERMIT_BAREMULTISIG; bool fRequireStandard = true; bool fCheckBlockIndex = false; bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED; size_t nCoinCacheUsage = 5000 * 300; uint64_t nPruneTarget = 0; int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE; uint256 hashAssumeValid; CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE); Amount maxTxFee = DEFAULT_TRANSACTION_MAXFEE; CTxMemPool mempool(::minRelayTxFee); static void CheckBlockIndex(const Consensus::Params &consensusParams); /** Constant stuff for coinbase transactions we create: */ CScript COINBASE_FLAGS; const std::string strMessageMagic = "Bitcoin Signed Message:\n"; // Internal stuff namespace { struct CBlockIndexWorkComparator { bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const { // First sort by most total work, ... if (pa->nChainWork > pb->nChainWork) return false; if (pa->nChainWork < pb->nChainWork) return true; // ... then by earliest time received, ... if (pa->nSequenceId < pb->nSequenceId) return false; if (pa->nSequenceId > pb->nSequenceId) return true; // Use pointer address as tie breaker (should only happen with blocks // loaded from disk, as those all have id 0). if (pa < pb) return false; if (pa > pb) return true; // Identical blocks. return false; } }; CBlockIndex *pindexBestInvalid; /** * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself * and all ancestors) and as good as our current tip or better. Entries may be * failed, though, and pruning nodes may be missing the data for the block. */ std::set setBlockIndexCandidates; /** * All pairs A->B, where A (or one of its ancestors) misses transactions, but B * has transactions. Pruned nodes may have entries where B is missing data. */ std::multimap mapBlocksUnlinked; CCriticalSection cs_LastBlockFile; std::vector vinfoBlockFile; int nLastBlockFile = 0; /** * Global flag to indicate we should check to see if there are block/undo files * that should be deleted. Set on startup or if we allocate more file space when * we're in prune mode. */ bool fCheckForPruning = false; /** * Every received block is assigned a unique and increasing identifier, so we * know which one to give priority in case of a fork. */ CCriticalSection cs_nBlockSequenceId; /** Blocks loaded from disk are assigned id 0, so start the counter at 1. */ int32_t nBlockSequenceId = 1; /** Decreasing counter (used by subsequent preciousblock calls). */ int32_t nBlockReverseSequenceId = -1; /** chainwork for the last block that preciousblock has been applied to. */ arith_uint256 nLastPreciousChainwork = 0; /** Dirty block index entries. */ std::set setDirtyBlockIndex; /** Dirty block file entries. */ std::set setDirtyFileInfo; } // namespace CBlockIndex *FindForkInGlobalIndex(const CChain &chain, const CBlockLocator &locator) { // Find the first block the caller has in the main chain for (const uint256 &hash : locator.vHave) { BlockMap::iterator mi = mapBlockIndex.find(hash); if (mi != mapBlockIndex.end()) { CBlockIndex *pindex = (*mi).second; if (chain.Contains(pindex)) return pindex; if (pindex->GetAncestor(chain.Height()) == chain.Tip()) { return chain.Tip(); } } } return chain.Genesis(); } CCoinsViewCache *pcoinsTip = nullptr; CBlockTreeDB *pblocktree = nullptr; enum FlushStateMode { FLUSH_STATE_NONE, FLUSH_STATE_IF_NEEDED, FLUSH_STATE_PERIODIC, FLUSH_STATE_ALWAYS }; // See definition for documentation static bool FlushStateToDisk(const CChainParams &chainParams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight = 0); static void FindFilesToPruneManual(std::set &setFilesToPrune, int nManualPruneHeight); static void FindFilesToPrune(std::set &setFilesToPrune, uint64_t nPruneAfterHeight); static FILE *OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly = false); static uint32_t GetBlockScriptFlags(const CBlockIndex *pindex, const Config &config); static bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime) { if (tx.nLockTime == 0) { return true; } int64_t lockTime = tx.nLockTime; int64_t lockTimeLimit = (lockTime < LOCKTIME_THRESHOLD) ? nBlockHeight : nBlockTime; if (lockTime < lockTimeLimit) { return true; } for (const auto &txin : tx.vin) { if (txin.nSequence != CTxIn::SEQUENCE_FINAL) { return false; } } return true; } /** * Calculates the block height and previous block's median time past at * which the transaction will be considered final in the context of BIP 68. * Also removes from the vector of input heights any entries which did not * correspond to sequence locked inputs as they do not affect the calculation. */ static std::pair CalculateSequenceLocks(const CTransaction &tx, int flags, std::vector *prevHeights, const CBlockIndex &block) { assert(prevHeights->size() == tx.vin.size()); // Will be set to the equivalent height- and time-based nLockTime // values that would be necessary to satisfy all relative lock- // time constraints given our view of block chain history. // The semantics of nLockTime are the last invalid height/time, so // use -1 to have the effect of any height or time being valid. int nMinHeight = -1; int64_t nMinTime = -1; // tx.nVersion is signed integer so requires cast to unsigned otherwise // we would be doing a signed comparison and half the range of nVersion // wouldn't support BIP 68. bool fEnforceBIP68 = static_cast(tx.nVersion) >= 2 && flags & LOCKTIME_VERIFY_SEQUENCE; // Do not enforce sequence numbers as a relative lock time // unless we have been instructed to if (!fEnforceBIP68) { return std::make_pair(nMinHeight, nMinTime); } for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) { const CTxIn &txin = tx.vin[txinIndex]; // Sequence numbers with the most significant bit set are not // treated as relative lock-times, nor are they given any // consensus-enforced meaning at this point. if (txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_DISABLE_FLAG) { // The height of this input is not relevant for sequence locks (*prevHeights)[txinIndex] = 0; continue; } int nCoinHeight = (*prevHeights)[txinIndex]; if (txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG) { int64_t nCoinTime = block.GetAncestor(std::max(nCoinHeight - 1, 0)) ->GetMedianTimePast(); // NOTE: Subtract 1 to maintain nLockTime semantics. // BIP 68 relative lock times have the semantics of calculating the // first block or time at which the transaction would be valid. When // calculating the effective block time or height for the entire // transaction, we switch to using the semantics of nLockTime which // is the last invalid block time or height. Thus we subtract 1 from // the calculated time or height. // Time-based relative lock-times are measured from the smallest // allowed timestamp of the block containing the txout being spent, // which is the median time past of the block prior. nMinTime = std::max( nMinTime, nCoinTime + (int64_t)((txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_MASK) << CTxIn::SEQUENCE_LOCKTIME_GRANULARITY) - 1); } else { nMinHeight = std::max( nMinHeight, nCoinHeight + (int)(txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_MASK) - 1); } } return std::make_pair(nMinHeight, nMinTime); } static bool EvaluateSequenceLocks(const CBlockIndex &block, std::pair lockPair) { assert(block.pprev); int64_t nBlockTime = block.pprev->GetMedianTimePast(); if (lockPair.first >= block.nHeight || lockPair.second >= nBlockTime) return false; return true; } bool SequenceLocks(const CTransaction &tx, int flags, std::vector *prevHeights, const CBlockIndex &block) { return EvaluateSequenceLocks( block, CalculateSequenceLocks(tx, flags, prevHeights, block)); } bool TestLockPointValidity(const LockPoints *lp) { AssertLockHeld(cs_main); assert(lp); // If there are relative lock times then the maxInputBlock will be set // If there are no relative lock times, the LockPoints don't depend on the // chain if (lp->maxInputBlock) { // Check whether chainActive is an extension of the block at which the // LockPoints // calculation was valid. If not LockPoints are no longer valid if (!chainActive.Contains(lp->maxInputBlock)) { return false; } } // LockPoints still valid return true; } bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints *lp, bool useExistingLockPoints) { AssertLockHeld(cs_main); AssertLockHeld(mempool.cs); CBlockIndex *tip = chainActive.Tip(); CBlockIndex index; index.pprev = tip; // CheckSequenceLocks() uses chainActive.Height()+1 to evaluate height based // locks because when SequenceLocks() is called within ConnectBlock(), the // height of the block *being* evaluated is what is used. Thus if we want to // know if a transaction can be part of the *next* block, we need to use one // more than chainActive.Height() index.nHeight = tip->nHeight + 1; std::pair lockPair; if (useExistingLockPoints) { assert(lp); lockPair.first = lp->height; lockPair.second = lp->time; } else { // pcoinsTip contains the UTXO set for chainActive.Tip() CCoinsViewMemPool viewMemPool(pcoinsTip, mempool); std::vector prevheights; prevheights.resize(tx.vin.size()); for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) { const CTxIn &txin = tx.vin[txinIndex]; Coin coin; if (!viewMemPool.GetCoin(txin.prevout, coin)) { return error("%s: Missing input", __func__); } if (coin.GetHeight() == MEMPOOL_HEIGHT) { // Assume all mempool transaction confirm in the next block prevheights[txinIndex] = tip->nHeight + 1; } else { prevheights[txinIndex] = coin.GetHeight(); } } lockPair = CalculateSequenceLocks(tx, flags, &prevheights, index); if (lp) { lp->height = lockPair.first; lp->time = lockPair.second; // Also store the hash of the block with the highest height of all // the blocks which have sequence locked prevouts. This hash needs // to still be on the chain for these LockPoint calculations to be // valid. // Note: It is impossible to correctly calculate a maxInputBlock if // any of the sequence locked inputs depend on unconfirmed txs, // except in the special case where the relative lock time/height is // 0, which is equivalent to no sequence lock. Since we assume input // height of tip+1 for mempool txs and test the resulting lockPair // from CalculateSequenceLocks against tip+1. We know // EvaluateSequenceLocks will fail if there was a non-zero sequence // lock on a mempool input, so we can use the return value of // CheckSequenceLocks to indicate the LockPoints validity int maxInputHeight = 0; for (int height : prevheights) { // Can ignore mempool inputs since we'll fail if they had // non-zero locks if (height != tip->nHeight + 1) { maxInputHeight = std::max(maxInputHeight, height); } } lp->maxInputBlock = tip->GetAncestor(maxInputHeight); } } return EvaluateSequenceLocks(index, lockPair); } uint64_t GetSigOpCountWithoutP2SH(const CTransaction &tx) { uint64_t nSigOps = 0; for (const auto &txin : tx.vin) { nSigOps += txin.scriptSig.GetSigOpCount(false); } for (const auto &txout : tx.vout) { nSigOps += txout.scriptPubKey.GetSigOpCount(false); } return nSigOps; } uint64_t GetP2SHSigOpCount(const CTransaction &tx, const CCoinsViewCache &inputs) { if (tx.IsCoinBase()) { return 0; } uint64_t nSigOps = 0; for (auto &i : tx.vin) { const CTxOut &prevout = inputs.GetOutputFor(i); if (prevout.scriptPubKey.IsPayToScriptHash()) { nSigOps += prevout.scriptPubKey.GetSigOpCount(i.scriptSig); } } return nSigOps; } uint64_t GetTransactionSigOpCount(const CTransaction &tx, const CCoinsViewCache &inputs, int flags) { uint64_t nSigOps = GetSigOpCountWithoutP2SH(tx); if (tx.IsCoinBase()) { return nSigOps; } if (flags & SCRIPT_VERIFY_P2SH) { nSigOps += GetP2SHSigOpCount(tx, inputs); } return nSigOps; } static bool CheckTransactionCommon(const CTransaction &tx, CValidationState &state, bool fCheckDuplicateInputs) { // Basic checks that don't depend on any context if (tx.vin.empty()) { return state.DoS(10, false, REJECT_INVALID, "bad-txns-vin-empty"); } if (tx.vout.empty()) { return state.DoS(10, false, REJECT_INVALID, "bad-txns-vout-empty"); } // Size limit if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_TX_SIZE) { return state.DoS(100, false, REJECT_INVALID, "bad-txns-oversize"); } // Check for negative or overflow output values Amount nValueOut(0); for (const auto &txout : tx.vout) { if (txout.nValue < Amount(0)) { return state.DoS(100, false, REJECT_INVALID, "bad-txns-vout-negative"); } if (txout.nValue > MAX_MONEY) { return state.DoS(100, false, REJECT_INVALID, "bad-txns-vout-toolarge"); } nValueOut += txout.nValue; if (!MoneyRange(nValueOut)) { return state.DoS(100, false, REJECT_INVALID, "bad-txns-txouttotal-toolarge"); } } if (GetSigOpCountWithoutP2SH(tx) > MAX_TX_SIGOPS_COUNT) { return state.DoS(100, false, REJECT_INVALID, "bad-txn-sigops"); } // Check for duplicate inputs - note that this check is slow so we skip it // in CheckBlock if (fCheckDuplicateInputs) { std::set vInOutPoints; for (const auto &txin : tx.vin) { if (!vInOutPoints.insert(txin.prevout).second) { return state.DoS(100, false, REJECT_INVALID, "bad-txns-inputs-duplicate"); } } } return true; } bool CheckCoinbase(const CTransaction &tx, CValidationState &state, bool fCheckDuplicateInputs) { if (!tx.IsCoinBase()) { return state.DoS(100, false, REJECT_INVALID, "bad-cb-missing", false, "first tx is not coinbase"); } if (!CheckTransactionCommon(tx, state, fCheckDuplicateInputs)) { // CheckTransactionCommon fill in the state. return false; } if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > 100) { return state.DoS(100, false, REJECT_INVALID, "bad-cb-length"); } return true; } bool CheckRegularTransaction(const CTransaction &tx, CValidationState &state, bool fCheckDuplicateInputs) { if (tx.IsCoinBase()) { return state.DoS(100, false, REJECT_INVALID, "bad-tx-coinbase"); } if (!CheckTransactionCommon(tx, state, fCheckDuplicateInputs)) { // CheckTransactionCommon fill in the state. return false; } for (const auto &txin : tx.vin) { if (txin.prevout.IsNull()) { return state.DoS(10, false, REJECT_INVALID, "bad-txns-prevout-null"); } } return true; } static void LimitMempoolSize(CTxMemPool &pool, size_t limit, unsigned long age) { int expired = pool.Expire(GetTime() - age); if (expired != 0) { LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired); } std::vector vNoSpendsRemaining; pool.TrimToSize(limit, &vNoSpendsRemaining); for (const COutPoint &removed : vNoSpendsRemaining) { pcoinsTip->Uncache(removed); } } /** Convert CValidationState to a human-readable message for logging */ std::string FormatStateMessage(const CValidationState &state) { return strprintf( "%s%s (code %i)", state.GetRejectReason(), state.GetDebugMessage().empty() ? "" : ", " + state.GetDebugMessage(), state.GetRejectCode()); } static bool IsCurrentForFeeEstimation() { AssertLockHeld(cs_main); if (IsInitialBlockDownload()) { return false; } if (chainActive.Tip()->GetBlockTime() < (GetTime() - MAX_FEE_ESTIMATION_TIP_AGE)) { return false; } if (chainActive.Height() < pindexBestHeader->nHeight - 1) { return false; } return true; } static bool IsUAHFenabled(const Config &config, int nHeight) { return nHeight >= config.GetChainParams().GetConsensus().uahfHeight; } bool IsUAHFenabled(const Config &config, const CBlockIndex *pindexPrev) { if (pindexPrev == nullptr) { return false; } return IsUAHFenabled(config, pindexPrev->nHeight); } static bool IsDAAEnabled(const Config &config, int nHeight) { return nHeight >= config.GetChainParams().GetConsensus().daaHeight; } bool IsDAAEnabled(const Config &config, const CBlockIndex *pindexPrev) { if (pindexPrev == nullptr) { return false; } return IsDAAEnabled(config, pindexPrev->nHeight); } /** * Make mempool consistent after a reorg, by re-adding or recursively erasing * disconnected block transactions from the mempool, and also removing any other * transactions from the mempool that are no longer valid given the new * tip/height. * * Note: we assume that disconnectpool only contains transactions that are NOT * confirmed in the current chain nor already in the mempool (otherwise, * in-mempool descendants of such transactions would be removed). * * Passing fAddToMempool=false will skip trying to add the transactions back, * and instead just erase from the mempool as needed. */ void UpdateMempoolForReorg(const Config &config, DisconnectedBlockTransactions &disconnectpool, bool fAddToMempool) { AssertLockHeld(cs_main); std::vector vHashUpdate; // disconnectpool's insertion_order index sorts the entries from oldest to // newest, but the oldest entry will be the last tx from the latest mined // block that was disconnected. // Iterate disconnectpool in reverse, so that we add transactions back to // the mempool starting with the earliest transaction that had been // previously seen in a block. auto it = disconnectpool.queuedTx.get().rbegin(); while (it != disconnectpool.queuedTx.get().rend()) { // ignore validation errors in resurrected transactions CValidationState stateDummy; if (!fAddToMempool || (*it)->IsCoinBase() || !AcceptToMemoryPool(config, mempool, stateDummy, *it, false, nullptr, nullptr, true)) { // If the transaction doesn't make it in to the mempool, remove any // transactions that depend on it (which would now be orphans). mempool.removeRecursive(**it, MemPoolRemovalReason::REORG); } else if (mempool.exists((*it)->GetId())) { vHashUpdate.push_back((*it)->GetId()); } ++it; } disconnectpool.queuedTx.clear(); // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have // no in-mempool children, which is generally not true when adding // previously-confirmed transactions back to the mempool. // UpdateTransactionsFromBlock finds descendants of any transactions in the // disconnectpool that were added back and cleans up the mempool state. mempool.UpdateTransactionsFromBlock(vHashUpdate); // We also need to remove any now-immature transactions mempool.removeForReorg(config, pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); // Re-limit mempool size, in case we added any transactions LimitMempoolSize( - mempool, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, - GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); + mempool, + gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, + gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); } // Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool // were somehow broken and returning the wrong scriptPubKeys static bool CheckInputsFromMempoolAndCache(const CTransaction &tx, CValidationState &state, const CCoinsViewCache &view, CTxMemPool &pool, uint32_t flags, bool cacheSigStore, PrecomputedTransactionData &txdata) { AssertLockHeld(cs_main); // pool.cs should be locked already, but go ahead and re-take the lock here // to enforce that mempool doesn't change between when we check the view and // when we actually call through to CheckInputs LOCK(pool.cs); assert(!tx.IsCoinBase()); for (const CTxIn &txin : tx.vin) { const Coin &coin = view.AccessCoin(txin.prevout); // At this point we haven't actually checked if the coins are all // available (or shouldn't assume we have, since CheckInputs does). So // we just return failure if the inputs are not available here, and then // only have to check equivalence for available inputs. if (coin.IsSpent()) { return false; } const CTransactionRef &txFrom = pool.get(txin.prevout.hash); if (txFrom) { assert(txFrom->GetHash() == txin.prevout.hash); assert(txFrom->vout.size() > txin.prevout.n); assert(txFrom->vout[txin.prevout.n] == coin.GetTxOut()); } else { const Coin &coinFromDisk = pcoinsTip->AccessCoin(txin.prevout); assert(!coinFromDisk.IsSpent()); assert(coinFromDisk.GetTxOut() == coin.GetTxOut()); } } return CheckInputs(tx, state, view, true, flags, cacheSigStore, true, txdata); } static bool AcceptToMemoryPoolWorker( const Config &config, CTxMemPool &pool, CValidationState &state, const CTransactionRef &ptx, bool fLimitFree, bool *pfMissingInputs, int64_t nAcceptTime, std::list *plTxnReplaced, bool fOverrideMempoolLimit, const Amount nAbsurdFee, std::vector &coins_to_uncache) { AssertLockHeld(cs_main); const CTransaction &tx = *ptx; const uint256 txid = tx.GetId(); if (pfMissingInputs) { *pfMissingInputs = false; } // Coinbase is only valid in a block, not as a loose transaction. if (!CheckRegularTransaction(tx, state, true)) { // state filled in by CheckRegularTransaction. return false; } // Rather not work on nonstandard transactions (unless -testnet/-regtest) std::string reason; if (fRequireStandard && !IsStandardTx(tx, reason)) { return state.DoS(0, false, REJECT_NONSTANDARD, reason); } // Only accept nLockTime-using transactions that can be mined in the next // block; we don't want our mempool filled up with transactions that can't // be mined yet. CValidationState ctxState; if (!ContextualCheckTransactionForCurrentBlock( config, tx, ctxState, STANDARD_LOCKTIME_VERIFY_FLAGS)) { // We copy the state from a dummy to ensure we don't increase the // ban score of peer for transaction that could be valid in the future. return state.DoS( 0, false, REJECT_NONSTANDARD, ctxState.GetRejectReason(), ctxState.CorruptionPossible(), ctxState.GetDebugMessage()); } // Is it already in the memory pool? if (pool.exists(txid)) { return state.Invalid(false, REJECT_ALREADY_KNOWN, "txn-already-in-mempool"); } // Check for conflicts with in-memory transactions { // Protect pool.mapNextTx LOCK(pool.cs); for (const CTxIn &txin : tx.vin) { auto itConflicting = pool.mapNextTx.find(txin.prevout); if (itConflicting != pool.mapNextTx.end()) { // Disable replacement feature for good return state.Invalid(false, REJECT_CONFLICT, "txn-mempool-conflict"); } } } { CCoinsView dummy; CCoinsViewCache view(&dummy); Amount nValueIn(0); LockPoints lp; { LOCK(pool.cs); CCoinsViewMemPool viewMemPool(pcoinsTip, pool); view.SetBackend(viewMemPool); // Do we already have it? for (size_t out = 0; out < tx.vout.size(); out++) { COutPoint outpoint(txid, out); bool had_coin_in_cache = pcoinsTip->HaveCoinInCache(outpoint); if (view.HaveCoin(outpoint)) { if (!had_coin_in_cache) { coins_to_uncache.push_back(outpoint); } return state.Invalid(false, REJECT_ALREADY_KNOWN, "txn-already-known"); } } // Do all inputs exist? for (const CTxIn txin : tx.vin) { if (!pcoinsTip->HaveCoinInCache(txin.prevout)) { coins_to_uncache.push_back(txin.prevout); } if (!view.HaveCoin(txin.prevout)) { if (pfMissingInputs) { *pfMissingInputs = true; } // fMissingInputs and !state.IsInvalid() is used to detect // this condition, don't set state.Invalid() return false; } } // Are the actual inputs available? if (!view.HaveInputs(tx)) { return state.Invalid(false, REJECT_DUPLICATE, "bad-txns-inputs-spent"); } // Bring the best block into scope. view.GetBestBlock(); nValueIn = view.GetValueIn(tx); // We have all inputs cached now, so switch back to dummy, so we // don't need to keep lock on mempool. view.SetBackend(dummy); // Only accept BIP68 sequence locked transactions that can be mined // in the next block; we don't want our mempool filled up with // transactions that can't be mined yet. Must keep pool.cs for this // unless we change CheckSequenceLocks to take a CoinsViewCache // instead of create its own. if (!CheckSequenceLocks(tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp)) { return state.DoS(0, false, REJECT_NONSTANDARD, "non-BIP68-final"); } } // Check for non-standard pay-to-script-hash in inputs if (fRequireStandard && !AreInputsStandard(tx, view)) { return state.Invalid(false, REJECT_NONSTANDARD, "bad-txns-nonstandard-inputs"); } int64_t nSigOpsCount = GetTransactionSigOpCount(tx, view, STANDARD_SCRIPT_VERIFY_FLAGS); Amount nValueOut = tx.GetValueOut(); Amount nFees = nValueIn - nValueOut; // nModifiedFees includes any fee deltas from PrioritiseTransaction Amount nModifiedFees = nFees; double nPriorityDummy = 0; pool.ApplyDeltas(txid, nPriorityDummy, nModifiedFees); Amount inChainInputValue; double dPriority = view.GetPriority(tx, chainActive.Height(), inChainInputValue); // Keep track of transactions that spend a coinbase, which we re-scan // during reorgs to ensure COINBASE_MATURITY is still met. bool fSpendsCoinbase = false; for (const CTxIn &txin : tx.vin) { const Coin &coin = view.AccessCoin(txin.prevout); if (coin.IsCoinBase()) { fSpendsCoinbase = true; break; } } CTxMemPoolEntry entry(ptx, nFees, nAcceptTime, dPriority, chainActive.Height(), inChainInputValue, fSpendsCoinbase, nSigOpsCount, lp); unsigned int nSize = entry.GetTxSize(); // Check that the transaction doesn't have an excessive number of // sigops, making it impossible to mine. Since the coinbase transaction // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than // MAX_BLOCK_SIGOPS_PER_MB; we still consider this an invalid rather // than merely non-standard transaction. if (nSigOpsCount > MAX_STANDARD_TX_SIGOPS) { return state.DoS(0, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops", false, strprintf("%d", nSigOpsCount)); } Amount mempoolRejectFee = - pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * - 1000000) + pool.GetMinFee( + gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * + 1000000) .GetFee(nSize); if (mempoolRejectFee > Amount(0) && nModifiedFees < mempoolRejectFee) { return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool min fee not met", false, strprintf("%d < %d", nFees, mempoolRejectFee)); } - if (GetBoolArg("-relaypriority", DEFAULT_RELAYPRIORITY) && + if (gArgs.GetBoolArg("-relaypriority", DEFAULT_RELAYPRIORITY) && nModifiedFees < ::minRelayTxFee.GetFee(nSize) && !AllowFree(entry.GetPriority(chainActive.Height() + 1))) { // Require that free transactions have sufficient priority to be // mined in the next block. return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "insufficient priority"); } // Continuously rate-limit free (really, very-low-fee) transactions. // This mitigates 'penny-flooding' -- sending thousands of free // transactions just to be annoying or make others' transactions take // longer to confirm. if (fLimitFree && nModifiedFees < ::minRelayTxFee.GetFee(nSize)) { static CCriticalSection csFreeLimiter; static double dFreeCount; static int64_t nLastTime; int64_t nNow = GetTime(); LOCK(csFreeLimiter); // Use an exponentially decaying ~10-minute window: dFreeCount *= pow(1.0 - 1.0 / 600.0, double(nNow - nLastTime)); nLastTime = nNow; // -limitfreerelay unit is thousand-bytes-per-minute // At default rate it would take over a month to fill 1GB if (dFreeCount + nSize >= - GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY) * 10 * 1000) { + gArgs.GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY) * 10 * + 1000) { return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "rate limited free transaction"); } LogPrint(BCLog::MEMPOOL, "Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount + nSize); dFreeCount += nSize; } if (nAbsurdFee != Amount(0) && nFees > nAbsurdFee) { return state.Invalid(false, REJECT_HIGHFEE, "absurdly-high-fee", strprintf("%d > %d", nFees, nAbsurdFee)); } // Calculate in-mempool ancestors, up to a limit. CTxMemPool::setEntries setAncestors; size_t nLimitAncestors = - GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT); + gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT); size_t nLimitAncestorSize = - GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) * 1000; + gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) * + 1000; size_t nLimitDescendants = - GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT); + gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT); size_t nLimitDescendantSize = - GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * + gArgs.GetArg("-limitdescendantsize", + DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000; std::string errString; if (!pool.CalculateMemPoolAncestors( entry, setAncestors, nLimitAncestors, nLimitAncestorSize, nLimitDescendants, nLimitDescendantSize, errString)) { return state.DoS(0, false, REJECT_NONSTANDARD, "too-long-mempool-chain", false, errString); } uint32_t scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS; if (!config.GetChainParams().RequireStandard()) { scriptVerifyFlags = - GetArg("-promiscuousmempoolflags", scriptVerifyFlags); + gArgs.GetArg("-promiscuousmempoolflags", scriptVerifyFlags); } // Check against previous transactions. This is done last to help // prevent CPU exhaustion denial-of-service attacks. PrecomputedTransactionData txdata(tx); if (!CheckInputs(tx, state, view, true, scriptVerifyFlags, true, false, txdata)) { // State filled in by CheckInputs. return false; } // Check again against the current block tip's script verification flags // to cache our script execution flags. This is, of course, useless if // the next block has different script flags from the previous one, but // because the cache tracks script flags for us it will auto-invalidate // and we'll just have a few blocks of extra misses on soft-fork // activation. // // This is also useful in case of bugs in the standard flags that cause // transactions to pass as valid when they're actually invalid. For // instance the STRICTENC flag was incorrectly allowing certain CHECKSIG // NOT scripts to pass, even though they were invalid. // // There is a similar check in CreateNewBlock() to prevent creating // invalid blocks (using TestBlockValidity), however allowing such // transactions into the mempool can be exploited as a DoS attack. uint32_t currentBlockScriptVerifyFlags = GetBlockScriptFlags(chainActive.Tip(), config); if (!CheckInputsFromMempoolAndCache(tx, state, view, pool, currentBlockScriptVerifyFlags, true, txdata)) { // If we're using promiscuousmempoolflags, we may hit this normally. // Check if current block has some flags that scriptVerifyFlags does // not before printing an ominous warning. if (!(~scriptVerifyFlags & currentBlockScriptVerifyFlags)) { return error( "%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against " "MANDATORY but not STANDARD flags %s, %s", __func__, txid.ToString(), FormatStateMessage(state)); } if (!CheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true, false, txdata)) { return error( "%s: ConnectInputs failed against MANDATORY but not " "STANDARD flags due to promiscuous mempool %s, %s", __func__, txid.ToString(), FormatStateMessage(state)); } LogPrintf("Warning: -promiscuousmempool flags set to not include " "currently enforced soft forks, this may break mining or " "otherwise cause instability!\n"); } // This transaction should only count for fee estimation if // the node is not behind and it is not dependent on any other // transactions in the mempool. bool validForFeeEstimation = IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx); // Store transaction in memory. pool.addUnchecked(txid, entry, setAncestors, validForFeeEstimation); // Trim mempool and check if tx was trimmed. if (!fOverrideMempoolLimit) { LimitMempoolSize( - pool, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, - GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); + pool, + gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, + gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * + 60); if (!pool.exists(txid)) { return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool full"); } } } GetMainSignals().TransactionAddedToMempool(ptx); return true; } /** * (try to) add transaction to memory pool with a specified acceptance time. */ static bool AcceptToMemoryPoolWithTime( const Config &config, CTxMemPool &pool, CValidationState &state, const CTransactionRef &tx, bool fLimitFree, bool *pfMissingInputs, int64_t nAcceptTime, std::list *plTxnReplaced = nullptr, bool fOverrideMempoolLimit = false, const Amount nAbsurdFee = Amount(0)) { std::vector coins_to_uncache; bool res = AcceptToMemoryPoolWorker( config, pool, state, tx, fLimitFree, pfMissingInputs, nAcceptTime, plTxnReplaced, fOverrideMempoolLimit, nAbsurdFee, coins_to_uncache); if (!res) { for (const COutPoint &outpoint : coins_to_uncache) { pcoinsTip->Uncache(outpoint); } } // After we've (potentially) uncached entries, ensure our coins cache is // still within its size limits CValidationState stateDummy; FlushStateToDisk(config.GetChainParams(), stateDummy, FLUSH_STATE_PERIODIC); return res; } bool AcceptToMemoryPool(const Config &config, CTxMemPool &pool, CValidationState &state, const CTransactionRef &tx, bool fLimitFree, bool *pfMissingInputs, std::list *plTxnReplaced, bool fOverrideMempoolLimit, const Amount nAbsurdFee) { return AcceptToMemoryPoolWithTime(config, pool, state, tx, fLimitFree, pfMissingInputs, GetTime(), plTxnReplaced, fOverrideMempoolLimit, nAbsurdFee); } /** Return transaction in txOut, and if it was found inside a block, its hash is * placed in hashBlock */ bool GetTransaction(const Config &config, const uint256 &txid, CTransactionRef &txOut, uint256 &hashBlock, bool fAllowSlow) { CBlockIndex *pindexSlow = nullptr; LOCK(cs_main); CTransactionRef ptx = mempool.get(txid); if (ptx) { txOut = ptx; return true; } if (fTxIndex) { CDiskTxPos postx; if (pblocktree->ReadTxIndex(txid, postx)) { CAutoFile file(OpenBlockFile(postx, true), SER_DISK, CLIENT_VERSION); if (file.IsNull()) return error("%s: OpenBlockFile failed", __func__); CBlockHeader header; try { file >> header; fseek(file.Get(), postx.nTxOffset, SEEK_CUR); file >> txOut; } catch (const std::exception &e) { return error("%s: Deserialize or I/O error - %s", __func__, e.what()); } hashBlock = header.GetHash(); if (txOut->GetId() != txid) return error("%s: txid mismatch", __func__); return true; } } // use coin database to locate block that contains transaction, and scan it if (fAllowSlow) { const Coin &coin = AccessByTxid(*pcoinsTip, txid); if (!coin.IsSpent()) { pindexSlow = chainActive[coin.GetHeight()]; } } if (pindexSlow) { CBlock block; if (ReadBlockFromDisk(block, pindexSlow, config)) { for (const auto &tx : block.vtx) { if (tx->GetId() == txid) { txOut = tx; hashBlock = pindexSlow->GetBlockHash(); return true; } } } } return false; } ////////////////////////////////////////////////////////////////////////////// // // CBlock and CBlockIndex // static bool WriteBlockToDisk(const CBlock &block, CDiskBlockPos &pos, const CMessageHeader::MessageMagic &messageStart) { // Open history file to append CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) { return error("WriteBlockToDisk: OpenBlockFile failed"); } // Write index header unsigned int nSize = GetSerializeSize(fileout, block); fileout << FLATDATA(messageStart) << nSize; // Write block long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) { return error("WriteBlockToDisk: ftell failed"); } pos.nPos = (unsigned int)fileOutPos; fileout << block; return true; } bool ReadBlockFromDisk(CBlock &block, const CDiskBlockPos &pos, const Config &config) { block.SetNull(); // Open history file to read CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) { return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString()); } // Read block try { filein >> block; } catch (const std::exception &e) { return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString()); } // Check the header if (!CheckProofOfWork(block.GetHash(), block.nBits, config)) { return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString()); } return true; } bool ReadBlockFromDisk(CBlock &block, const CBlockIndex *pindex, const Config &config) { if (!ReadBlockFromDisk(block, pindex->GetBlockPos(), config)) { return false; } if (block.GetHash() != pindex->GetBlockHash()) { return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() " "doesn't match index for %s at %s", pindex->ToString(), pindex->GetBlockPos().ToString()); } return true; } Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams) { int halvings = nHeight / consensusParams.nSubsidyHalvingInterval; // Force block reward to zero when right shift is undefined. if (halvings >= 64) return Amount(0); Amount nSubsidy = 50 * COIN; // Subsidy is cut in half every 210,000 blocks which will occur // approximately every 4 years. return Amount(nSubsidy.GetSatoshis() >> halvings); } bool IsInitialBlockDownload() { const CChainParams &chainParams = Params(); // Once this function has returned false, it must remain false. static std::atomic latchToFalse{false}; // Optimization: pre-test latch before taking the lock. if (latchToFalse.load(std::memory_order_relaxed)) return false; LOCK(cs_main); if (latchToFalse.load(std::memory_order_relaxed)) return false; if (fImporting || fReindex) return true; if (chainActive.Tip() == nullptr) return true; if (chainActive.Tip()->nChainWork < UintToArith256(chainParams.GetConsensus().nMinimumChainWork)) return true; if (chainActive.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge)) return true; LogPrintf("Leaving InitialBlockDownload (latching to false)\n"); latchToFalse.store(true, std::memory_order_relaxed); return false; } CBlockIndex *pindexBestForkTip = nullptr, *pindexBestForkBase = nullptr; static void AlertNotify(const std::string &strMessage) { uiInterface.NotifyAlertChanged(); - std::string strCmd = GetArg("-alertnotify", ""); + std::string strCmd = gArgs.GetArg("-alertnotify", ""); if (strCmd.empty()) return; // Alert text should be plain ascii coming from a trusted source, but to be // safe we first strip anything not in safeChars, then add single quotes // around the whole string before passing it to the shell: std::string singleQuote("'"); std::string safeStatus = SanitizeString(strMessage); safeStatus = singleQuote + safeStatus + singleQuote; boost::replace_all(strCmd, "%s", safeStatus); boost::thread t(runCommand, strCmd); // thread runs free } static void CheckForkWarningConditions() { AssertLockHeld(cs_main); // Before we get past initial download, we cannot reliably alert about forks // (we assume we don't get stuck on a fork before finishing our initial // sync) if (IsInitialBlockDownload()) return; // If our best fork is no longer within 72 blocks (+/- 12 hours if no one // mines it) of our head, drop it if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->nHeight >= 72) pindexBestForkTip = nullptr; if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > chainActive.Tip()->nChainWork + (GetBlockProof(*chainActive.Tip()) * 6))) { if (!GetfLargeWorkForkFound() && pindexBestForkBase) { std::string warning = std::string("'Warning: Large-work fork detected, forking after " "block ") + pindexBestForkBase->phashBlock->ToString() + std::string("'"); AlertNotify(warning); } if (pindexBestForkTip && pindexBestForkBase) { LogPrintf("%s: Warning: Large valid fork found\n forking the " "chain at height %d (%s)\n lasting to height %d " "(%s).\nChain state database corruption likely.\n", __func__, pindexBestForkBase->nHeight, pindexBestForkBase->phashBlock->ToString(), pindexBestForkTip->nHeight, pindexBestForkTip->phashBlock->ToString()); SetfLargeWorkForkFound(true); } else { LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks " "longer than our best chain.\nChain state database " "corruption likely.\n", __func__); SetfLargeWorkInvalidChainFound(true); } } else { SetfLargeWorkForkFound(false); SetfLargeWorkInvalidChainFound(false); } } static void CheckForkWarningConditionsOnNewFork(CBlockIndex *pindexNewForkTip) { AssertLockHeld(cs_main); // If we are on a fork that is sufficiently large, set a warning flag CBlockIndex *pfork = pindexNewForkTip; CBlockIndex *plonger = chainActive.Tip(); while (pfork && pfork != plonger) { while (plonger && plonger->nHeight > pfork->nHeight) plonger = plonger->pprev; if (pfork == plonger) break; pfork = pfork->pprev; } // We define a condition where we should warn the user about as a fork of at // least 7 blocks with a tip within 72 blocks (+/- 12 hours if no one mines // it) of ours. We use 7 blocks rather arbitrarily as it represents just // under 10% of sustained network hash rate operating on the fork, or a // chain that is entirely longer than ours and invalid (note that this // should be detected by both). We define it this way because it allows us // to only store the highest fork tip (+ base) which meets the 7-block // condition and from this always have the most-likely-to-cause-warning fork if (pfork && (!pindexBestForkTip || (pindexBestForkTip && pindexNewForkTip->nHeight > pindexBestForkTip->nHeight)) && pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) && chainActive.Height() - pindexNewForkTip->nHeight < 72) { pindexBestForkTip = pindexNewForkTip; pindexBestForkBase = pfork; } CheckForkWarningConditions(); } static void InvalidChainFound(CBlockIndex *pindexNew) { if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork) pindexBestInvalid = pindexNew; LogPrintf( "%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__, pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, log(pindexNew->nChainWork.getdouble()) / log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexNew->GetBlockTime())); CBlockIndex *tip = chainActive.Tip(); assert(tip); LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__, tip->GetBlockHash().ToString(), chainActive.Height(), log(tip->nChainWork.getdouble()) / log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip->GetBlockTime())); CheckForkWarningConditions(); } static void InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) { if (!state.CorruptionPossible()) { pindex->nStatus |= BLOCK_FAILED_VALID; setDirtyBlockIndex.insert(pindex); setBlockIndexCandidates.erase(pindex); InvalidChainFound(pindex); } } void UpdateCoins(const CTransaction &tx, CCoinsViewCache &inputs, CTxUndo &txundo, int nHeight) { // Mark inputs spent. if (!tx.IsCoinBase()) { txundo.vprevout.reserve(tx.vin.size()); for (const CTxIn &txin : tx.vin) { txundo.vprevout.emplace_back(); bool is_spent = inputs.SpendCoin(txin.prevout, &txundo.vprevout.back()); assert(is_spent); } } // Add outputs. AddCoins(inputs, tx, nHeight); } void UpdateCoins(const CTransaction &tx, CCoinsViewCache &inputs, int nHeight) { CTxUndo txundo; UpdateCoins(tx, inputs, txundo, nHeight); } bool CScriptCheck::operator()() { const CScript &scriptSig = ptxTo->vin[nIn].scriptSig; return VerifyScript(scriptSig, scriptPubKey, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, amount, cacheStore, txdata), &error); } int GetSpendHeight(const CCoinsViewCache &inputs) { LOCK(cs_main); CBlockIndex *pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second; return pindexPrev->nHeight + 1; } namespace Consensus { bool CheckTxInputs(const CTransaction &tx, CValidationState &state, const CCoinsViewCache &inputs, int nSpendHeight) { // This doesn't trigger the DoS code on purpose; if it did, it would make it // easier for an attacker to attempt to split the network. if (!inputs.HaveInputs(tx)) { return state.Invalid(false, 0, "", "Inputs unavailable"); } Amount nValueIn(0); Amount nFees(0); for (size_t i = 0; i < tx.vin.size(); i++) { const COutPoint &prevout = tx.vin[i].prevout; const Coin &coin = inputs.AccessCoin(prevout); assert(!coin.IsSpent()); // If prev is coinbase, check that it's matured if (coin.IsCoinBase()) { if (nSpendHeight - coin.GetHeight() < COINBASE_MATURITY) { return state.Invalid( false, REJECT_INVALID, "bad-txns-premature-spend-of-coinbase", strprintf("tried to spend coinbase at depth %d", nSpendHeight - coin.GetHeight())); } } // Check for negative or overflow input values nValueIn += coin.GetTxOut().nValue; if (!MoneyRange(coin.GetTxOut().nValue) || !MoneyRange(nValueIn)) { return state.DoS(100, false, REJECT_INVALID, "bad-txns-inputvalues-outofrange"); } } if (nValueIn < tx.GetValueOut()) { return state.DoS(100, false, REJECT_INVALID, "bad-txns-in-belowout", false, strprintf("value in (%s) < value out (%s)", FormatMoney(nValueIn), FormatMoney(tx.GetValueOut()))); } // Tally transaction fees Amount nTxFee = nValueIn - tx.GetValueOut(); if (nTxFee < Amount(0)) { return state.DoS(100, false, REJECT_INVALID, "bad-txns-fee-negative"); } nFees += nTxFee; if (!MoneyRange(nFees)) { return state.DoS(100, false, REJECT_INVALID, "bad-txns-fee-outofrange"); } return true; } } // namespace Consensus bool CheckInputs(const CTransaction &tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, uint32_t flags, bool sigCacheStore, bool scriptCacheStore, const PrecomputedTransactionData &txdata, std::vector *pvChecks) { assert(!tx.IsCoinBase()); if (!Consensus::CheckTxInputs(tx, state, inputs, GetSpendHeight(inputs))) { return false; } if (pvChecks) { pvChecks->reserve(tx.vin.size()); } // The first loop above does all the inexpensive checks. Only if ALL inputs // pass do we perform expensive ECDSA signature checks. Helps prevent CPU // exhaustion attacks. // Skip script verification when connecting blocks under the assumedvalid // block. Assuming the assumedvalid block is valid this is safe because // block merkle hashes are still computed and checked, of course, if an // assumed valid block is invalid due to false scriptSigs this optimization // would allow an invalid chain to be accepted. if (!fScriptChecks) { return true; } // First check if script executions have been cached with the same flags. // Note that this assumes that the inputs provided are correct (ie that the // transaction hash which is in tx's prevouts properly commits to the // scriptPubKey in the inputs view of that transaction). uint256 hashCacheEntry = GetScriptCacheKey(tx, flags); if (IsKeyInScriptCache(hashCacheEntry, !scriptCacheStore)) { return true; } for (size_t i = 0; i < tx.vin.size(); i++) { const COutPoint &prevout = tx.vin[i].prevout; const Coin &coin = inputs.AccessCoin(prevout); assert(!coin.IsSpent()); // We very carefully only pass in things to CScriptCheck which are // clearly committed to by tx' witness hash. This provides a sanity // check that our caching is not introducing consensus failures through // additional data in, eg, the coins being spent being checked as a part // of CScriptCheck. const CScript &scriptPubKey = coin.GetTxOut().scriptPubKey; const Amount amount = coin.GetTxOut().nValue; // Verify signature CScriptCheck check(scriptPubKey, amount, tx, i, flags, sigCacheStore, txdata); if (pvChecks) { pvChecks->push_back(std::move(check)); } else if (!check()) { if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) { // Check whether the failure was caused by a non-mandatory // script verification check, such as non-standard DER encodings // or non-null dummy arguments; if so, don't trigger DoS // protection to avoid splitting the network between upgraded // and non-upgraded nodes. CScriptCheck check2(scriptPubKey, amount, tx, i, flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, sigCacheStore, txdata); if (check2()) { return state.Invalid( false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError()))); } } // Failures of other flags indicate a transaction that is invalid in // new blocks, e.g. a invalid P2SH. We DoS ban such nodes as they // are not following the protocol. That said during an upgrade // careful thought should be taken as to the correct behavior - we // may want to continue peering with non-upgraded nodes even after // soft-fork super-majority signaling has occurred. return state.DoS( 100, false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError()))); } } if (scriptCacheStore && !pvChecks) { // We executed all of the provided scripts, and were told to cache the // result. Do so now. AddKeyInScriptCache(hashCacheEntry); } return true; } namespace { bool UndoWriteToDisk(const CBlockUndo &blockundo, CDiskBlockPos &pos, const uint256 &hashBlock, const CMessageHeader::MessageMagic &messageStart) { // Open history file to append CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) return error("%s: OpenUndoFile failed", __func__); // Write index header unsigned int nSize = GetSerializeSize(fileout, blockundo); fileout << FLATDATA(messageStart) << nSize; // Write undo data long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) return error("%s: ftell failed", __func__); pos.nPos = (unsigned int)fileOutPos; fileout << blockundo; // calculate & write checksum CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); hasher << hashBlock; hasher << blockundo; fileout << hasher.GetHash(); return true; } bool UndoReadFromDisk(CBlockUndo &blockundo, const CDiskBlockPos &pos, const uint256 &hashBlock) { // Open history file to read CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) { return error("%s: OpenUndoFile failed", __func__); } // Read block uint256 hashChecksum; // We need a CHashVerifier as reserializing may lose data CHashVerifier verifier(&filein); try { verifier << hashBlock; verifier >> blockundo; filein >> hashChecksum; } catch (const std::exception &e) { return error("%s: Deserialize or I/O error - %s", __func__, e.what()); } // Verify checksum if (hashChecksum != verifier.GetHash()) { return error("%s: Checksum mismatch", __func__); } return true; } /** Abort with a message */ bool AbortNode(const std::string &strMessage, const std::string &userMessage = "") { SetMiscWarning(strMessage); LogPrintf("*** %s\n", strMessage); uiInterface.ThreadSafeMessageBox( userMessage.empty() ? _("Error: A fatal internal error occurred, see " "debug.log for details") : userMessage, "", CClientUIInterface::MSG_ERROR); StartShutdown(); return false; } bool AbortNode(CValidationState &state, const std::string &strMessage, const std::string &userMessage = "") { AbortNode(strMessage, userMessage); return state.Error(strMessage); } } // namespace /** Restore the UTXO in a Coin at a given COutPoint. */ DisconnectResult UndoCoinSpend(const Coin &undo, CCoinsViewCache &view, const COutPoint &out) { bool fClean = true; if (view.HaveCoin(out)) { // Overwriting transaction output. fClean = false; } if (undo.GetHeight() == 0) { // Missing undo metadata (height and coinbase). Older versions included // this information only in undo records for the last spend of a // transactions' outputs. This implies that it must be present for some // other output of the same tx. const Coin &alternate = AccessByTxid(view, out.hash); if (alternate.IsSpent()) { // Adding output for transaction without known metadata return DISCONNECT_FAILED; } // This is somewhat ugly, but hopefully utility is limited. This is only // useful when working from legacy on disck data. In any case, putting // the correct information in there doesn't hurt. const_cast(undo) = Coin(undo.GetTxOut(), alternate.GetHeight(), alternate.IsCoinBase()); } // The potential_overwrite parameter to AddCoin is only allowed to be false // if we know for sure that the coin did not already exist in the cache. As // we have queried for that above using HaveCoin, we don't need to guess. // When fClean is false, a coin already existed and it is an overwrite. view.AddCoin(out, std::move(undo), !fClean); return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN; } /** * Undo the effects of this block (with given index) on the UTXO set represented * by coins. When FAILED is returned, view is left in an indeterminate state. */ static DisconnectResult DisconnectBlock(const CBlock &block, const CBlockIndex *pindex, CCoinsViewCache &view) { CBlockUndo blockUndo; CDiskBlockPos pos = pindex->GetUndoPos(); if (pos.IsNull()) { error("DisconnectBlock(): no undo data available"); return DISCONNECT_FAILED; } if (!UndoReadFromDisk(blockUndo, pos, pindex->pprev->GetBlockHash())) { error("DisconnectBlock(): failure reading undo data"); return DISCONNECT_FAILED; } return ApplyBlockUndo(blockUndo, block, pindex, view); } DisconnectResult ApplyBlockUndo(const CBlockUndo &blockUndo, const CBlock &block, const CBlockIndex *pindex, CCoinsViewCache &view) { bool fClean = true; if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) { error("DisconnectBlock(): block and undo data inconsistent"); return DISCONNECT_FAILED; } // Undo transactions in reverse order. size_t i = block.vtx.size(); while (i-- > 0) { const CTransaction &tx = *(block.vtx[i]); uint256 txid = tx.GetId(); // Check that all outputs are available and match the outputs in the // block itself exactly. for (size_t o = 0; o < tx.vout.size(); o++) { if (tx.vout[o].scriptPubKey.IsUnspendable()) { continue; } COutPoint out(txid, o); Coin coin; bool is_spent = view.SpendCoin(out, &coin); if (!is_spent || tx.vout[o] != coin.GetTxOut()) { // transaction output mismatch fClean = false; } } // Restore inputs. if (i < 1) { // Skip the coinbase. continue; } const CTxUndo &txundo = blockUndo.vtxundo[i - 1]; if (txundo.vprevout.size() != tx.vin.size()) { error("DisconnectBlock(): transaction and undo data inconsistent"); return DISCONNECT_FAILED; } for (size_t j = tx.vin.size(); j-- > 0;) { const COutPoint &out = tx.vin[j].prevout; const Coin &undo = txundo.vprevout[j]; DisconnectResult res = UndoCoinSpend(undo, view, out); if (res == DISCONNECT_FAILED) { return DISCONNECT_FAILED; } fClean = fClean && res != DISCONNECT_UNCLEAN; } } // Move best block pointer to previous block. view.SetBestBlock(block.hashPrevBlock); return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN; } static void FlushBlockFile(bool fFinalize = false) { LOCK(cs_LastBlockFile); CDiskBlockPos posOld(nLastBlockFile, 0); FILE *fileOld = OpenBlockFile(posOld); if (fileOld) { if (fFinalize) TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize); FileCommit(fileOld); fclose(fileOld); } fileOld = OpenUndoFile(posOld); if (fileOld) { if (fFinalize) TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize); FileCommit(fileOld); fclose(fileOld); } } static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize); static CCheckQueue scriptcheckqueue(128); void ThreadScriptCheck() { RenameThread("bitcoin-scriptch"); scriptcheckqueue.Thread(); } // Protected by cs_main VersionBitsCache versionbitscache; int32_t ComputeBlockVersion(const CBlockIndex *pindexPrev, const Consensus::Params ¶ms) { LOCK(cs_main); int32_t nVersion = VERSIONBITS_TOP_BITS; for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) { ThresholdState state = VersionBitsState( pindexPrev, params, (Consensus::DeploymentPos)i, versionbitscache); if (state == THRESHOLD_LOCKED_IN || state == THRESHOLD_STARTED) { nVersion |= VersionBitsMask(params, (Consensus::DeploymentPos)i); } } return nVersion; } /** * Threshold condition checker that triggers when unknown versionbits are seen * on the network. */ class WarningBitsConditionChecker : public AbstractThresholdConditionChecker { private: int bit; public: WarningBitsConditionChecker(int bitIn) : bit(bitIn) {} int64_t BeginTime(const Consensus::Params ¶ms) const override { return 0; } int64_t EndTime(const Consensus::Params ¶ms) const override { return std::numeric_limits::max(); } int Period(const Consensus::Params ¶ms) const override { return params.nMinerConfirmationWindow; } int Threshold(const Consensus::Params ¶ms) const override { return params.nRuleChangeActivationThreshold; } bool Condition(const CBlockIndex *pindex, const Consensus::Params ¶ms) const override { return ((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) && ((pindex->nVersion >> bit) & 1) != 0 && ((ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0; } }; // Protected by cs_main static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS]; // Returns the script flags which should be checked for a given block static uint32_t GetBlockScriptFlags(const CBlockIndex *pindex, const Config &config) { AssertLockHeld(cs_main); const Consensus::Params &consensusparams = config.GetChainParams().GetConsensus(); // BIP16 didn't become active until Apr 1 2012 int64_t nBIP16SwitchTime = 1333238400; bool fStrictPayToScriptHash = (pindex->GetBlockTime() >= nBIP16SwitchTime); uint32_t flags = fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE; // Start enforcing the DERSIG (BIP66) rule if (pindex->nHeight >= consensusparams.BIP66Height) { flags |= SCRIPT_VERIFY_DERSIG; } // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule if (pindex->nHeight >= consensusparams.BIP65Height) { flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY; } // Start enforcing BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic. if (VersionBitsState(pindex->pprev, consensusparams, Consensus::DEPLOYMENT_CSV, versionbitscache) == THRESHOLD_ACTIVE) { flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY; } // If the UAHF is enabled, we start accepting replay protected txns if (IsUAHFenabled(config, pindex->pprev)) { flags |= SCRIPT_VERIFY_STRICTENC; flags |= SCRIPT_ENABLE_SIGHASH_FORKID; } // If the DAA HF is enabled, we start rejecting transaction that use a high // s in their signature. We also make sure that signature that are supposed // to fail (for instance in multisig or other forms of smart contracts) are // null. if (IsDAAEnabled(config, pindex->pprev)) { flags |= SCRIPT_VERIFY_LOW_S; flags |= SCRIPT_VERIFY_NULLFAIL; } return flags; } static int64_t nTimeCheck = 0; static int64_t nTimeForks = 0; static int64_t nTimeVerify = 0; static int64_t nTimeConnect = 0; static int64_t nTimeIndex = 0; static int64_t nTimeCallbacks = 0; static int64_t nTimeTotal = 0; /** * Apply the effects of this block (with given index) on the UTXO set * represented by coins. Validity checks that depend on the UTXO set are also * done; ConnectBlock() can fail if those validity checks fail (among other * reasons). */ static bool ConnectBlock(const Config &config, const CBlock &block, CValidationState &state, CBlockIndex *pindex, CCoinsViewCache &view, bool fJustCheck = false) { AssertLockHeld(cs_main); int64_t nTimeStart = GetTimeMicros(); // Check it again in case a previous version let a bad block in if (!CheckBlock(config, block, state, !fJustCheck, !fJustCheck)) { return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state)); } // Verify that the view's current state corresponds to the previous block uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash(); assert(hashPrevBlock == view.GetBestBlock()); // Special case for the genesis block, skipping connection of its // transactions (its coinbase is unspendable) const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); if (block.GetHash() == consensusParams.hashGenesisBlock) { if (!fJustCheck) { view.SetBestBlock(pindex->GetBlockHash()); } return true; } bool fScriptChecks = true; if (!hashAssumeValid.IsNull()) { // We've been configured with the hash of a block which has been // externally verified to have a valid history. A suitable default value // is included with the software and updated from time to time. Because // validity relative to a piece of software is an objective fact these // defaults can be easily reviewed. This setting doesn't force the // selection of any particular chain but makes validating some faster by // effectively caching the result of part of the verification. BlockMap::const_iterator it = mapBlockIndex.find(hashAssumeValid); if (it != mapBlockIndex.end()) { if (it->second->GetAncestor(pindex->nHeight) == pindex && pindexBestHeader->GetAncestor(pindex->nHeight) == pindex && pindexBestHeader->nChainWork >= UintToArith256(consensusParams.nMinimumChainWork)) { // This block is a member of the assumed verified chain and an // ancestor of the best header. The equivalent time check // discourages hashpower from extorting the network via DOS // attack into accepting an invalid block through telling users // they must manually set assumevalid. Requiring a software // change or burying the invalid block, regardless of the // setting, makes it hard to hide the implication of the demand. // This also avoids having release candidates that are hardly // doing any signature verification at all in testing without // having to artificially set the default assumed verified block // further back. The test against nMinimumChainWork prevents the // skipping when denied access to any chain at least as good as // the expected chain. fScriptChecks = (GetBlockProofEquivalentTime( *pindexBestHeader, *pindex, *pindexBestHeader, consensusParams) <= 60 * 60 * 24 * 7 * 2); } } } int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart; LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1 - nTimeStart), nTimeCheck * 0.000001); // Do not allow blocks that contain transactions which 'overwrite' older // transactions, unless those are already completely spent. If such // overwrites are allowed, coinbases and transactions depending upon those // can be duplicated to remove the ability to spend the first instance -- // even after being sent to another address. See BIP30 and // http://r6.ca/blog/20120206T005236Z.html for more information. This logic // is not necessary for memory pool transactions, as AcceptToMemoryPool // already refuses previously-known transaction ids entirely. This rule was // originally applied to all blocks with a timestamp after March 15, 2012, // 0:00 UTC. Now that the whole chain is irreversibly beyond that time it is // applied to all blocks except the two in the chain that violate it. This // prevents exploiting the issue against nodes during their initial block // download. bool fEnforceBIP30 = (!pindex->phashBlock) || // Enforce on CreateNewBlock // invocations which don't // have a hash. !((pindex->nHeight == 91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763" "b1f4360639393e0e4c8e300e0caec")) || (pindex->nHeight == 91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f" "610ae9601ac046a38084ccb7cd721"))); // Once BIP34 activated it was not possible to create new duplicate // coinbases and thus other than starting with the 2 existing duplicate // coinbase pairs, not possible to create overwriting txs. But by the time // BIP34 activated, in each of the existing pairs the duplicate coinbase had // overwritten the first before the first had been spent. Since those // coinbases are sufficiently buried its no longer possible to create // further duplicate transactions descending from the known pairs either. If // we're on the known chain at height greater than where BIP34 activated, we // can save the db accesses needed for the BIP30 check. CBlockIndex *pindexBIP34height = pindex->pprev->GetAncestor(consensusParams.BIP34Height); // Only continue to enforce if we're below BIP34 activation height or the // block hash at that height doesn't correspond. fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == consensusParams.BIP34Hash)); if (fEnforceBIP30) { for (const auto &tx : block.vtx) { for (size_t o = 0; o < tx->vout.size(); o++) { if (view.HaveCoin(COutPoint(tx->GetId(), o))) { return state.DoS( 100, error("ConnectBlock(): tried to overwrite transaction"), REJECT_INVALID, "bad-txns-BIP30"); } } } } // Start enforcing BIP68 (sequence locks) using versionbits logic. int nLockTimeFlags = 0; if (VersionBitsState(pindex->pprev, consensusParams, Consensus::DEPLOYMENT_CSV, versionbitscache) == THRESHOLD_ACTIVE) { nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE; } uint32_t flags = GetBlockScriptFlags(pindex, config); int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1; LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2 - nTime1), nTimeForks * 0.000001); CBlockUndo blockundo; CCheckQueueControl control(fScriptChecks ? &scriptcheckqueue : nullptr); std::vector prevheights; Amount nFees(0); int nInputs = 0; // Sigops counting. We need to do it again because of P2SH. uint64_t nSigOpsCount = 0; const uint64_t currentBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); const uint64_t nMaxSigOpsCount = GetMaxBlockSigOpsCount(currentBlockSize); CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size())); std::vector> vPos; vPos.reserve(block.vtx.size()); blockundo.vtxundo.reserve(block.vtx.size() - 1); for (size_t i = 0; i < block.vtx.size(); i++) { const CTransaction &tx = *(block.vtx[i]); nInputs += tx.vin.size(); if (!tx.IsCoinBase()) { if (!view.HaveInputs(tx)) { return state.DoS( 100, error("ConnectBlock(): inputs missing/spent"), REJECT_INVALID, "bad-txns-inputs-missingorspent"); } // Check that transaction is BIP68 final BIP68 lock checks (as // opposed to nLockTime checks) must be in ConnectBlock because they // require the UTXO set. prevheights.resize(tx.vin.size()); for (size_t j = 0; j < tx.vin.size(); j++) { prevheights[j] = view.AccessCoin(tx.vin[j].prevout).GetHeight(); } if (!SequenceLocks(tx, nLockTimeFlags, &prevheights, *pindex)) { return state.DoS( 100, error("%s: contains a non-BIP68-final transaction", __func__), REJECT_INVALID, "bad-txns-nonfinal"); } } // GetTransactionSigOpCount counts 2 types of sigops: // * legacy (always) // * p2sh (when P2SH enabled in flags and excludes coinbase) auto txSigOpsCount = GetTransactionSigOpCount(tx, view, flags); if (txSigOpsCount > MAX_TX_SIGOPS_COUNT) { return state.DoS(100, false, REJECT_INVALID, "bad-txn-sigops"); } nSigOpsCount += txSigOpsCount; if (nSigOpsCount > nMaxSigOpsCount) { return state.DoS(100, error("ConnectBlock(): too many sigops"), REJECT_INVALID, "bad-blk-sigops"); } if (!tx.IsCoinBase()) { Amount fee = view.GetValueIn(tx) - tx.GetValueOut(); nFees += fee; // Don't cache results if we're actually connecting blocks (still // consult the cache, though). bool fCacheResults = fJustCheck; std::vector vChecks; if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, fCacheResults, PrecomputedTransactionData(tx), &vChecks)) { return error("ConnectBlock(): CheckInputs on %s failed with %s", tx.GetId().ToString(), FormatStateMessage(state)); } control.Add(vChecks); } CTxUndo undoDummy; if (i > 0) { blockundo.vtxundo.push_back(CTxUndo()); } UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight); vPos.push_back(std::make_pair(tx.GetId(), pos)); pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION); } int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2; LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, " "%.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime3 - nTime2), 0.001 * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime3 - nTime2) / (nInputs - 1), nTimeConnect * 0.000001); Amount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, consensusParams); if (block.vtx[0]->GetValueOut() > blockReward) { return state.DoS(100, error("ConnectBlock(): coinbase pays too much " "(actual=%d vs limit=%d)", block.vtx[0]->GetValueOut(), blockReward), REJECT_INVALID, "bad-cb-amount"); } if (!control.Wait()) { return state.DoS(100, false, REJECT_INVALID, "blk-bad-inputs", false, "parallel script check failed"); } int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2; LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime4 - nTime2), nInputs <= 1 ? 0 : 0.001 * (nTime4 - nTime2) / (nInputs - 1), nTimeVerify * 0.000001); if (fJustCheck) { return true; } // Write undo information to disk if (pindex->GetUndoPos().IsNull() || !pindex->IsValid(BLOCK_VALID_SCRIPTS)) { if (pindex->GetUndoPos().IsNull()) { CDiskBlockPos _pos; if (!FindUndoPos( state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) + 40)) { return error("ConnectBlock(): FindUndoPos failed"); } if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), config.GetChainParams().DiskMagic())) { return AbortNode(state, "Failed to write undo data"); } // update nUndoPos in block index pindex->nUndoPos = _pos.nPos; pindex->nStatus |= BLOCK_HAVE_UNDO; } pindex->RaiseValidity(BLOCK_VALID_SCRIPTS); setDirtyBlockIndex.insert(pindex); } if (fTxIndex && !pblocktree->WriteTxIndex(vPos)) { return AbortNode(state, "Failed to write transaction index"); } // add this block to the view's block chain view.SetBestBlock(pindex->GetBlockHash()); int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4; LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5 - nTime4), nTimeIndex * 0.000001); int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5; LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6 - nTime5), nTimeCallbacks * 0.000001); return true; } /** * Update the on-disk chain state. * The caches and indexes are flushed depending on the mode we're called with if * they're too large, if it's been a while since the last write, or always and * in all cases if we're in prune mode and are deleting files. */ static bool FlushStateToDisk(const CChainParams &chainparams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight) { int64_t nMempoolUsage = mempool.DynamicMemoryUsage(); LOCK2(cs_main, cs_LastBlockFile); static int64_t nLastWrite = 0; static int64_t nLastFlush = 0; static int64_t nLastSetChain = 0; std::set setFilesToPrune; bool fFlushForPrune = false; try { if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) { if (nManualPruneHeight > 0) { FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight); } else { FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight()); fCheckForPruning = false; } if (!setFilesToPrune.empty()) { fFlushForPrune = true; if (!fHavePruned) { pblocktree->WriteFlag("prunedblockfiles", true); fHavePruned = true; } } } int64_t nNow = GetTimeMicros(); // Avoid writing/flushing immediately after startup. if (nLastWrite == 0) { nLastWrite = nNow; } if (nLastFlush == 0) { nLastFlush = nNow; } if (nLastSetChain == 0) { nLastSetChain = nNow; } int64_t nMempoolSizeMax = - GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; + gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; int64_t cacheSize = pcoinsTip->DynamicMemoryUsage(); int64_t nTotalSpace = nCoinCacheUsage + std::max(nMempoolSizeMax - nMempoolUsage, 0); // The cache is large and we're within 10% and 10 MiB of the limit, but // we have time now (not in the middle of a block processing). bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024); // The cache is over the limit, we have to write now. bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nTotalSpace; // It's been a while since we wrote the block index to disk. Do this // frequently, so we don't need to redownload after a crash. bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000; // It's been very long since we flushed the cache. Do this infrequently, // to optimize cache usage. bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000; // Combine all conditions that result in a full cache flush. bool fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune; // Write blocks and block index to disk. if (fDoFullFlush || fPeriodicWrite) { // Depend on nMinDiskSpace to ensure we can write block index if (!CheckDiskSpace(0)) return state.Error("out of disk space"); // First make sure all block and undo data is flushed to disk. FlushBlockFile(); // Then update all block file information (which may refer to block // and undo files). { std::vector> vFiles; vFiles.reserve(setDirtyFileInfo.size()); for (std::set::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end();) { vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it])); setDirtyFileInfo.erase(it++); } std::vector vBlocks; vBlocks.reserve(setDirtyBlockIndex.size()); for (std::set::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end();) { vBlocks.push_back(*it); setDirtyBlockIndex.erase(it++); } if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) { return AbortNode(state, "Failed to write to block index database"); } } // Finally remove any pruned files if (fFlushForPrune) UnlinkPrunedFiles(setFilesToPrune); nLastWrite = nNow; } // Flush best chain related state. This can only be done if the blocks / // block index write was also done. if (fDoFullFlush) { // Typical Coin structures on disk are around 48 bytes in size. // Pushing a new one to the database can cause it to be written // twice (once in the log, and once in the tables). This is already // an overestimation, as most will delete an existing entry or // overwrite one. Still, use a conservative safety factor of 2. if (!CheckDiskSpace(48 * 2 * 2 * pcoinsTip->GetCacheSize())) { return state.Error("out of disk space"); } // Flush the chainstate (which may refer to block index entries). if (!pcoinsTip->Flush()) { return AbortNode(state, "Failed to write to coin database"); } nLastFlush = nNow; } if (fDoFullFlush || ((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000)) { // Update best block in wallet (so we can detect restored wallets). GetMainSignals().SetBestChain(chainActive.GetLocator()); nLastSetChain = nNow; } } catch (const std::runtime_error &e) { return AbortNode( state, std::string("System error while flushing: ") + e.what()); } return true; } void FlushStateToDisk() { CValidationState state; const CChainParams &chainparams = Params(); FlushStateToDisk(chainparams, state, FLUSH_STATE_ALWAYS); } void PruneAndFlush() { CValidationState state; fCheckForPruning = true; const CChainParams &chainparams = Params(); FlushStateToDisk(chainparams, state, FLUSH_STATE_NONE); } /** Update chainActive and related internal data structures. */ static void UpdateTip(const Config &config, CBlockIndex *pindexNew) { const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); chainActive.SetTip(pindexNew); // New best block mempool.AddTransactionsUpdated(1); cvBlockChange.notify_all(); static bool fWarned = false; std::vector warningMessages; if (!IsInitialBlockDownload()) { int nUpgraded = 0; const CBlockIndex *pindex = chainActive.Tip(); for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) { WarningBitsConditionChecker checker(bit); ThresholdState state = checker.GetStateFor(pindex, consensusParams, warningcache[bit]); if (state == THRESHOLD_ACTIVE || state == THRESHOLD_LOCKED_IN) { if (state == THRESHOLD_ACTIVE) { std::string strWarning = strprintf(_("Warning: unknown new rules activated " "(versionbit %i)"), bit); SetMiscWarning(strWarning); if (!fWarned) { AlertNotify(strWarning); fWarned = true; } } else { warningMessages.push_back( strprintf("unknown new rules are about to activate " "(versionbit %i)", bit)); } } } // Check the version of the last 100 blocks to see if we need to // upgrade: for (int i = 0; i < 100 && pindex != nullptr; i++) { int32_t nExpectedVersion = ComputeBlockVersion(pindex->pprev, consensusParams); if (pindex->nVersion > VERSIONBITS_LAST_OLD_BLOCK_VERSION && (pindex->nVersion & ~nExpectedVersion) != 0) ++nUpgraded; pindex = pindex->pprev; } if (nUpgraded > 0) warningMessages.push_back(strprintf( "%d of last 100 blocks have unexpected version", nUpgraded)); if (nUpgraded > 100 / 2) { std::string strWarning = _("Warning: Unknown block versions being mined! It's possible " "unknown rules are in effect"); // notify GetWarnings(), called by Qt and the JSON-RPC code to warn // the user: SetMiscWarning(strWarning); if (!fWarned) { AlertNotify(strWarning); fWarned = true; } } } LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu " "date='%s' progress=%f cache=%.1fMiB(%utxo)", __func__, chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), chainActive.Tip()->nVersion, log(chainActive.Tip()->nChainWork.getdouble()) / log(2.0), (unsigned long)chainActive.Tip()->nChainTx, DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()), GuessVerificationProgress(config.GetChainParams().TxData(), chainActive.Tip()), pcoinsTip->DynamicMemoryUsage() * (1.0 / (1 << 20)), pcoinsTip->GetCacheSize()); if (!warningMessages.empty()) LogPrintf(" warning='%s'", boost::algorithm::join(warningMessages, ", ")); LogPrintf("\n"); } /** * Disconnect chainActive's tip. * After calling, the mempool will be in an inconsistent state, with * transactions from disconnected blocks being added to disconnectpool. You * should make the mempool consistent again by calling UpdateMempoolForReorg. * with cs_main held. * * If disconnectpool is nullptr, then no disconnected transactions are added to * disconnectpool (note that the caller is responsible for mempool consistency * in any case). */ static bool DisconnectTip(const Config &config, CValidationState &state, DisconnectedBlockTransactions *disconnectpool) { CBlockIndex *pindexDelete = chainActive.Tip(); assert(pindexDelete); // Read block from disk. std::shared_ptr pblock = std::make_shared(); CBlock &block = *pblock; if (!ReadBlockFromDisk(block, pindexDelete, config)) { return AbortNode(state, "Failed to read block"); } // Apply the block atomically to the chain state. int64_t nStart = GetTimeMicros(); { CCoinsViewCache view(pcoinsTip); assert(view.GetBestBlock() == pindexDelete->GetBlockHash()); if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) { return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); } bool flushed = view.Flush(); assert(flushed); } LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001); // Write the chain state to disk, if necessary. if (!FlushStateToDisk(config.GetChainParams(), state, FLUSH_STATE_IF_NEEDED)) { return false; } if (disconnectpool) { // Save transactions to re-add to mempool at end of reorg for (const auto &tx : boost::adaptors::reverse(block.vtx)) { disconnectpool->addTransaction(tx); } while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE) { // Drop the earliest entry, and remove its children from the // mempool. auto it = disconnectpool->queuedTx.get().begin(); mempool.removeRecursive(**it, MemPoolRemovalReason::REORG); disconnectpool->removeEntry(it); } } // Update chainActive and related variables. UpdateTip(config, pindexDelete->pprev); // Let wallets know transactions went from 1-confirmed to // 0-confirmed or conflicted: GetMainSignals().BlockDisconnected(pblock); return true; } static int64_t nTimeReadFromDisk = 0; static int64_t nTimeConnectTotal = 0; static int64_t nTimeFlush = 0; static int64_t nTimeChainState = 0; static int64_t nTimePostConnect = 0; struct PerBlockConnectTrace { CBlockIndex *pindex = nullptr; std::shared_ptr pblock; std::shared_ptr> conflictedTxs; PerBlockConnectTrace() : conflictedTxs(std::make_shared>()) {} }; /** * Used to track blocks whose transactions were applied to the UTXO state as a * part of a single ActivateBestChainStep call. * * This class also tracks transactions that are removed from the mempool as * conflicts (per block) and can be used to pass all those transactions through * SyncTransaction. * * This class assumes (and asserts) that the conflicted transactions for a given * block are added via mempool callbacks prior to the BlockConnected() * associated with those transactions. If any transactions are marked * conflicted, it is assumed that an associated block will always be added. * * This class is single-use, once you call GetBlocksConnected() you have to * throw it away and make a new one. */ class ConnectTrace { private: std::vector blocksConnected; CTxMemPool &pool; public: ConnectTrace(CTxMemPool &_pool) : blocksConnected(1), pool(_pool) { pool.NotifyEntryRemoved.connect( boost::bind(&ConnectTrace::NotifyEntryRemoved, this, _1, _2)); } ~ConnectTrace() { pool.NotifyEntryRemoved.disconnect( boost::bind(&ConnectTrace::NotifyEntryRemoved, this, _1, _2)); } void BlockConnected(CBlockIndex *pindex, std::shared_ptr pblock) { assert(!blocksConnected.back().pindex); assert(pindex); assert(pblock); blocksConnected.back().pindex = pindex; blocksConnected.back().pblock = std::move(pblock); blocksConnected.emplace_back(); } std::vector &GetBlocksConnected() { // We always keep one extra block at the end of our list because blocks // are added after all the conflicted transactions have been filled in. // Thus, the last entry should always be an empty one waiting for the // transactions from the next block. We pop the last entry here to make // sure the list we return is sane. assert(!blocksConnected.back().pindex); assert(blocksConnected.back().conflictedTxs->empty()); blocksConnected.pop_back(); return blocksConnected; } void NotifyEntryRemoved(CTransactionRef txRemoved, MemPoolRemovalReason reason) { assert(!blocksConnected.back().pindex); if (reason == MemPoolRemovalReason::CONFLICT) { blocksConnected.back().conflictedTxs->emplace_back( std::move(txRemoved)); } } }; /** * Connect a new block to chainActive. pblock is either nullptr or a pointer to * a CBlock corresponding to pindexNew, to bypass loading it again from disk. * * The block is always added to connectTrace (either after loading from disk or * by copying pblock) - if that is not intended, care must be taken to remove * the last entry in blocksConnected in case of failure. */ static bool ConnectTip(const Config &config, CValidationState &state, CBlockIndex *pindexNew, const std::shared_ptr &pblock, ConnectTrace &connectTrace, DisconnectedBlockTransactions &disconnectpool) { assert(pindexNew->pprev == chainActive.Tip()); // Read block from disk. int64_t nTime1 = GetTimeMicros(); std::shared_ptr pthisBlock; if (!pblock) { std::shared_ptr pblockNew = std::make_shared(); if (!ReadBlockFromDisk(*pblockNew, pindexNew, config)) { return AbortNode(state, "Failed to read block"); } pthisBlock = pblockNew; } else { pthisBlock = pblock; } const CBlock &blockConnecting = *pthisBlock; // Apply the block atomically to the chain state. int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1; int64_t nTime3; LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001); { CCoinsViewCache view(pcoinsTip); bool rv = ConnectBlock(config, blockConnecting, state, pindexNew, view); GetMainSignals().BlockChecked(blockConnecting, state); if (!rv) { if (state.IsInvalid()) { InvalidBlockFound(pindexNew, state); } return error("ConnectTip(): ConnectBlock %s failed", pindexNew->GetBlockHash().ToString()); } nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2; LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001); bool flushed = view.Flush(); assert(flushed); } int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3; LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001); // Write the chain state to disk, if necessary. if (!FlushStateToDisk(config.GetChainParams(), state, FLUSH_STATE_IF_NEEDED)) { return false; } int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4; LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001); // Remove conflicting transactions from the mempool.; mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight); disconnectpool.removeForBlock(blockConnecting.vtx); // Update chainActive & related variables. UpdateTip(config, pindexNew); int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1; LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001); LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001); connectTrace.BlockConnected(pindexNew, std::move(pthisBlock)); return true; } /** * Return the tip of the chain with the most work in it, that isn't known to be * invalid (it's however far from certain to be valid). */ static CBlockIndex *FindMostWorkChain() { do { CBlockIndex *pindexNew = nullptr; // Find the best candidate header. { std::set::reverse_iterator it = setBlockIndexCandidates.rbegin(); if (it == setBlockIndexCandidates.rend()) return nullptr; pindexNew = *it; } // Check whether all blocks on the path between the currently active // chain and the candidate are valid. Just going until the active chain // is an optimization, as we know all blocks in it are valid already. CBlockIndex *pindexTest = pindexNew; bool fInvalidAncestor = false; while (pindexTest && !chainActive.Contains(pindexTest)) { assert(pindexTest->nChainTx || pindexTest->nHeight == 0); // Pruned nodes may have entries in setBlockIndexCandidates for // which block files have been deleted. Remove those as candidates // for the most work chain if we come across them; we can't switch // to a chain unless we have all the non-active-chain parent blocks. bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK; bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA); if (fFailedChain || fMissingData) { // Candidate chain is not usable (either invalid or missing // data) if (fFailedChain && (pindexBestInvalid == nullptr || pindexNew->nChainWork > pindexBestInvalid->nChainWork)) pindexBestInvalid = pindexNew; CBlockIndex *pindexFailed = pindexNew; // Remove the entire chain from the set. while (pindexTest != pindexFailed) { if (fFailedChain) { pindexFailed->nStatus |= BLOCK_FAILED_CHILD; } else if (fMissingData) { // If we're missing data, then add back to // mapBlocksUnlinked, so that if the block arrives in // the future we can try adding to // setBlockIndexCandidates again. mapBlocksUnlinked.insert( std::make_pair(pindexFailed->pprev, pindexFailed)); } setBlockIndexCandidates.erase(pindexFailed); pindexFailed = pindexFailed->pprev; } setBlockIndexCandidates.erase(pindexTest); fInvalidAncestor = true; break; } pindexTest = pindexTest->pprev; } if (!fInvalidAncestor) return pindexNew; } while (true); } /** Delete all entries in setBlockIndexCandidates that are worse than the * current tip. */ static void PruneBlockIndexCandidates() { // Note that we can't delete the current block itself, as we may need to // return to it later in case a reorganization to a better block fails. std::set::iterator it = setBlockIndexCandidates.begin(); while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, chainActive.Tip())) { setBlockIndexCandidates.erase(it++); } // Either the current tip or a successor of it we're working towards is left // in setBlockIndexCandidates. assert(!setBlockIndexCandidates.empty()); } /** * Try to make some progress towards making pindexMostWork the active block. * pblock is either nullptr or a pointer to a CBlock corresponding to * pindexMostWork. */ static bool ActivateBestChainStep(const Config &config, CValidationState &state, CBlockIndex *pindexMostWork, const std::shared_ptr &pblock, bool &fInvalidFound, ConnectTrace &connectTrace) { AssertLockHeld(cs_main); const CBlockIndex *pindexOldTip = chainActive.Tip(); const CBlockIndex *pindexFork = chainActive.FindFork(pindexMostWork); // Disconnect active blocks which are no longer in the best chain. bool fBlocksDisconnected = false; DisconnectedBlockTransactions disconnectpool; while (chainActive.Tip() && chainActive.Tip() != pindexFork) { if (!DisconnectTip(config, state, &disconnectpool)) { // This is likely a fatal error, but keep the mempool consistent, // just in case. Only remove from the mempool in this case. UpdateMempoolForReorg(config, disconnectpool, false); return false; } fBlocksDisconnected = true; } // Build list of new blocks to connect. std::vector vpindexToConnect; bool fContinue = true; int nHeight = pindexFork ? pindexFork->nHeight : -1; while (fContinue && nHeight != pindexMostWork->nHeight) { // Don't iterate the entire list of potential improvements toward the // best tip, as we likely only need a few blocks along the way. int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight); vpindexToConnect.clear(); vpindexToConnect.reserve(nTargetHeight - nHeight); CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight); while (pindexIter && pindexIter->nHeight != nHeight) { vpindexToConnect.push_back(pindexIter); pindexIter = pindexIter->pprev; } nHeight = nTargetHeight; // Connect new blocks. for (CBlockIndex *pindexConnect : boost::adaptors::reverse(vpindexToConnect)) { if (!ConnectTip(config, state, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr(), connectTrace, disconnectpool)) { if (state.IsInvalid()) { // The block violates a consensus rule. if (!state.CorruptionPossible()) InvalidChainFound(vpindexToConnect.back()); state = CValidationState(); fInvalidFound = true; fContinue = false; break; } else { // A system error occurred (disk space, database error, // ...). // Make the mempool consistent with the current tip, just in // case any observers try to use it before shutdown. UpdateMempoolForReorg(config, disconnectpool, false); return false; } } else { PruneBlockIndexCandidates(); if (!pindexOldTip || chainActive.Tip()->nChainWork > pindexOldTip->nChainWork) { // We're in a better position than we were. Return // temporarily to release the lock. fContinue = false; break; } } } } if (fBlocksDisconnected) { // If any blocks were disconnected, disconnectpool may be non empty. Add // any disconnected transactions back to the mempool. UpdateMempoolForReorg(config, disconnectpool, true); } mempool.check(pcoinsTip); // Callbacks/notifications for a new best chain. if (fInvalidFound) CheckForkWarningConditionsOnNewFork(vpindexToConnect.back()); else CheckForkWarningConditions(); return true; } static void NotifyHeaderTip() { bool fNotify = false; bool fInitialBlockDownload = false; static CBlockIndex *pindexHeaderOld = nullptr; CBlockIndex *pindexHeader = nullptr; { LOCK(cs_main); pindexHeader = pindexBestHeader; if (pindexHeader != pindexHeaderOld) { fNotify = true; fInitialBlockDownload = IsInitialBlockDownload(); pindexHeaderOld = pindexHeader; } } // Send block tip changed notifications without cs_main if (fNotify) { uiInterface.NotifyHeaderTip(fInitialBlockDownload, pindexHeader); } } /** * Make the best chain active, in multiple steps. The result is either failure * or an activated best chain. pblock is either nullptr or a pointer to a block * that is already loaded (to avoid loading it again from disk). */ bool ActivateBestChain(const Config &config, CValidationState &state, std::shared_ptr pblock) { // Note that while we're often called here from ProcessNewBlock, this is // far from a guarantee. Things in the P2P/RPC will often end up calling // us in the middle of ProcessNewBlock - do not assume pblock is set // sanely for performance or correctness! CBlockIndex *pindexMostWork = nullptr; CBlockIndex *pindexNewTip = nullptr; do { boost::this_thread::interruption_point(); if (ShutdownRequested()) break; const CBlockIndex *pindexFork; bool fInitialDownload; { LOCK(cs_main); // Destructed before cs_main is unlocked. ConnectTrace connectTrace(mempool); CBlockIndex *pindexOldTip = chainActive.Tip(); if (pindexMostWork == nullptr) { pindexMostWork = FindMostWorkChain(); } // Whether we have anything to do at all. if (pindexMostWork == nullptr || pindexMostWork == chainActive.Tip()) { return true; } bool fInvalidFound = false; std::shared_ptr nullBlockPtr; if (!ActivateBestChainStep( config, state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) { return false; } if (fInvalidFound) { // Wipe cache, we may need another branch now. pindexMostWork = nullptr; } pindexNewTip = chainActive.Tip(); pindexFork = chainActive.FindFork(pindexOldTip); fInitialDownload = IsInitialBlockDownload(); for (const PerBlockConnectTrace &trace : connectTrace.GetBlocksConnected()) { assert(trace.pblock && trace.pindex); GetMainSignals().BlockConnected(trace.pblock, trace.pindex, *trace.conflictedTxs); } } // When we reach this point, we switched to a new tip (stored in // pindexNewTip). // Notifications/callbacks that can run without cs_main // Notify external listeners about the new tip. GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload); // Always notify the UI if a new block tip was connected if (pindexFork != pindexNewTip) { uiInterface.NotifyBlockTip(fInitialDownload, pindexNewTip); } } while (pindexNewTip != pindexMostWork); const CChainParams ¶ms = config.GetChainParams(); CheckBlockIndex(params.GetConsensus()); // Write changes periodically to disk, after relay. if (!FlushStateToDisk(params, state, FLUSH_STATE_PERIODIC)) { return false; } - int nStopAtHeight = GetArg("-stopatheight", DEFAULT_STOPATHEIGHT); + int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT); if (nStopAtHeight && pindexNewTip && pindexNewTip->nHeight >= nStopAtHeight) { StartShutdown(); } return true; } bool PreciousBlock(const Config &config, CValidationState &state, CBlockIndex *pindex) { { LOCK(cs_main); if (pindex->nChainWork < chainActive.Tip()->nChainWork) { // Nothing to do, this block is not at the tip. return true; } if (chainActive.Tip()->nChainWork > nLastPreciousChainwork) { // The chain has been extended since the last call, reset the // counter. nBlockReverseSequenceId = -1; } nLastPreciousChainwork = chainActive.Tip()->nChainWork; setBlockIndexCandidates.erase(pindex); pindex->nSequenceId = nBlockReverseSequenceId; if (nBlockReverseSequenceId > std::numeric_limits::min()) { // We can't keep reducing the counter if somebody really wants to // call preciousblock 2**31-1 times on the same set of tips... nBlockReverseSequenceId--; } if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->nChainTx) { setBlockIndexCandidates.insert(pindex); PruneBlockIndexCandidates(); } } return ActivateBestChain(config, state); } bool InvalidateBlock(const Config &config, CValidationState &state, CBlockIndex *pindex) { AssertLockHeld(cs_main); // Mark the block itself as invalid. pindex->nStatus |= BLOCK_FAILED_VALID; setDirtyBlockIndex.insert(pindex); setBlockIndexCandidates.erase(pindex); DisconnectedBlockTransactions disconnectpool; while (chainActive.Contains(pindex)) { CBlockIndex *pindexWalk = chainActive.Tip(); pindexWalk->nStatus |= BLOCK_FAILED_CHILD; setDirtyBlockIndex.insert(pindexWalk); setBlockIndexCandidates.erase(pindexWalk); // ActivateBestChain considers blocks already in chainActive // unconditionally valid already, so force disconnect away from it. if (!DisconnectTip(config, state, &disconnectpool)) { // It's probably hopeless to try to make the mempool consistent // here if DisconnectTip failed, but we can try. UpdateMempoolForReorg(config, disconnectpool, false); return false; } } // DisconnectTip will add transactions to disconnectpool; try to add these // back to the mempool. UpdateMempoolForReorg(config, disconnectpool, true); // The resulting new best tip may not be in setBlockIndexCandidates anymore, // so add it again. BlockMap::iterator it = mapBlockIndex.begin(); while (it != mapBlockIndex.end()) { if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && !setBlockIndexCandidates.value_comp()(it->second, chainActive.Tip())) { setBlockIndexCandidates.insert(it->second); } it++; } InvalidChainFound(pindex); uiInterface.NotifyBlockTip(IsInitialBlockDownload(), pindex->pprev); return true; } bool ResetBlockFailureFlags(CBlockIndex *pindex) { AssertLockHeld(cs_main); int nHeight = pindex->nHeight; // Remove the invalidity flag from this block and all its descendants. BlockMap::iterator it = mapBlockIndex.begin(); while (it != mapBlockIndex.end()) { if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) { it->second->nStatus &= ~BLOCK_FAILED_MASK; setDirtyBlockIndex.insert(it->second); if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && setBlockIndexCandidates.value_comp()(chainActive.Tip(), it->second)) { setBlockIndexCandidates.insert(it->second); } if (it->second == pindexBestInvalid) { // Reset invalid block marker if it was pointing to one of // those. pindexBestInvalid = nullptr; } } it++; } // Remove the invalidity flag from all ancestors too. while (pindex != nullptr) { if (pindex->nStatus & BLOCK_FAILED_MASK) { pindex->nStatus &= ~BLOCK_FAILED_MASK; setDirtyBlockIndex.insert(pindex); } pindex = pindex->pprev; } return true; } static CBlockIndex *AddToBlockIndex(const CBlockHeader &block) { // Check for duplicate uint256 hash = block.GetHash(); BlockMap::iterator it = mapBlockIndex.find(hash); if (it != mapBlockIndex.end()) return it->second; // Construct new block index object CBlockIndex *pindexNew = new CBlockIndex(block); assert(pindexNew); // We assign the sequence id to blocks only when the full data is available, // to avoid miners withholding blocks but broadcasting headers, to get a // competitive advantage. pindexNew->nSequenceId = 0; BlockMap::iterator mi = mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first; pindexNew->phashBlock = &((*mi).first); BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock); if (miPrev != mapBlockIndex.end()) { pindexNew->pprev = (*miPrev).second; pindexNew->nHeight = pindexNew->pprev->nHeight + 1; pindexNew->BuildSkip(); } pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime); pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew); pindexNew->RaiseValidity(BLOCK_VALID_TREE); if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork) { pindexBestHeader = pindexNew; } setDirtyBlockIndex.insert(pindexNew); return pindexNew; } /** * Mark a block as having its data received and checked (up to * BLOCK_VALID_TRANSACTIONS). */ bool ReceivedBlockTransactions(const CBlock &block, CValidationState &state, CBlockIndex *pindexNew, const CDiskBlockPos &pos) { pindexNew->nTx = block.vtx.size(); pindexNew->nChainTx = 0; pindexNew->nFile = pos.nFile; pindexNew->nDataPos = pos.nPos; pindexNew->nUndoPos = 0; pindexNew->nStatus |= BLOCK_HAVE_DATA; pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS); setDirtyBlockIndex.insert(pindexNew); if (pindexNew->pprev == nullptr || pindexNew->pprev->nChainTx) { // If pindexNew is the genesis block or all parents are // BLOCK_VALID_TRANSACTIONS. std::deque queue; queue.push_back(pindexNew); // Recursively process any descendant blocks that now may be eligible to // be connected. while (!queue.empty()) { CBlockIndex *pindex = queue.front(); queue.pop_front(); pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx; { LOCK(cs_nBlockSequenceId); pindex->nSequenceId = nBlockSequenceId++; } if (chainActive.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) { setBlockIndexCandidates.insert(pindex); } std::pair::iterator, std::multimap::iterator> range = mapBlocksUnlinked.equal_range(pindex); while (range.first != range.second) { std::multimap::iterator it = range.first; queue.push_back(it->second); range.first++; mapBlocksUnlinked.erase(it); } } } else { if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) { mapBlocksUnlinked.insert( std::make_pair(pindexNew->pprev, pindexNew)); } } return true; } static bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false) { LOCK(cs_LastBlockFile); unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile; if (vinfoBlockFile.size() <= nFile) { vinfoBlockFile.resize(nFile + 1); } if (!fKnown) { while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) { nFile++; if (vinfoBlockFile.size() <= nFile) { vinfoBlockFile.resize(nFile + 1); } } pos.nFile = nFile; pos.nPos = vinfoBlockFile[nFile].nSize; } if ((int)nFile != nLastBlockFile) { if (!fKnown) { LogPrintf("Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString()); } FlushBlockFile(!fKnown); nLastBlockFile = nFile; } vinfoBlockFile[nFile].AddBlock(nHeight, nTime); if (fKnown) vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize); else vinfoBlockFile[nFile].nSize += nAddSize; if (!fKnown) { unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE; unsigned int nNewChunks = (vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE; if (nNewChunks > nOldChunks) { if (fPruneMode) fCheckForPruning = true; if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) { FILE *file = OpenBlockFile(pos); if (file) { LogPrintf( "Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile); AllocateFileRange(file, pos.nPos, nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos); fclose(file); } } else return state.Error("out of disk space"); } } setDirtyFileInfo.insert(nFile); return true; } static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize) { pos.nFile = nFile; LOCK(cs_LastBlockFile); unsigned int nNewSize; pos.nPos = vinfoBlockFile[nFile].nUndoSize; nNewSize = vinfoBlockFile[nFile].nUndoSize += nAddSize; setDirtyFileInfo.insert(nFile); unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE; unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE; if (nNewChunks > nOldChunks) { if (fPruneMode) fCheckForPruning = true; if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) { FILE *file = OpenUndoFile(pos); if (file) { LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile); AllocateFileRange(file, pos.nPos, nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos); fclose(file); } } else return state.Error("out of disk space"); } return true; } static bool CheckBlockHeader(const Config &config, const CBlockHeader &block, CValidationState &state, bool fCheckPOW = true) { // Check proof of work matches claimed amount if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, config)) { return state.DoS(50, false, REJECT_INVALID, "high-hash", false, "proof of work failed"); } return true; } bool CheckBlock(const Config &config, const CBlock &block, CValidationState &state, bool fCheckPOW, bool fCheckMerkleRoot) { // These are checks that are independent of context. if (block.fChecked) { return true; } // Check that the header is valid (particularly PoW). This is mostly // redundant with the call in AcceptBlockHeader. if (!CheckBlockHeader(config, block, state, fCheckPOW)) { return false; } // Check the merkle root. if (fCheckMerkleRoot) { bool mutated; uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated); if (block.hashMerkleRoot != hashMerkleRoot2) { return state.DoS(100, false, REJECT_INVALID, "bad-txnmrklroot", true, "hashMerkleRoot mismatch"); } // Check for merkle tree malleability (CVE-2012-2459): repeating // sequences of transactions in a block without affecting the merkle // root of a block, while still invalidating it. if (mutated) { return state.DoS(100, false, REJECT_INVALID, "bad-txns-duplicate", true, "duplicate transaction"); } } // All potential-corruption validation must be done before we do any // transaction validation, as otherwise we may mark the header as invalid // because we receive the wrong transactions for it. // First transaction must be coinbase. if (block.vtx.empty()) { return state.DoS(100, false, REJECT_INVALID, "bad-cb-missing", false, "first tx is not coinbase"); } // Size limits. auto nMaxBlockSize = config.GetMaxBlockSize(); // Bail early if there is no way this block is of reasonable size. if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) { return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, "size limits failed"); } auto currentBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); if (currentBlockSize > nMaxBlockSize) { return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, "size limits failed"); } // And a valid coinbase. if (!CheckCoinbase(*block.vtx[0], state, false)) { return state.Invalid(false, state.GetRejectCode(), state.GetRejectReason(), strprintf("Coinbase check failed (txid %s) %s", block.vtx[0]->GetId().ToString(), state.GetDebugMessage())); } // Keep track of the sigops count. uint64_t nSigOps = 0; auto nMaxSigOpsCount = GetMaxBlockSigOpsCount(currentBlockSize); // Check transactions auto txCount = block.vtx.size(); auto *tx = block.vtx[0].get(); size_t i = 0; while (true) { // Count the sigops for the current transaction. If the total sigops // count is too high, the the block is invalid. nSigOps += GetSigOpCountWithoutP2SH(*tx); if (nSigOps > nMaxSigOpsCount) { return state.DoS(100, false, REJECT_INVALID, "bad-blk-sigops", false, "out-of-bounds SigOpCount"); } // Go to the next transaction. i++; // We reached the end of the block, success. if (i >= txCount) { break; } // Check that the transaction is valid. because this check differs for // the coinbase, the loos is arranged such as this only runs after at // least one increment. tx = block.vtx[i].get(); if (!CheckRegularTransaction(*tx, state, false)) { return state.Invalid( false, state.GetRejectCode(), state.GetRejectReason(), strprintf("Transaction check failed (txid %s) %s", tx->GetId().ToString(), state.GetDebugMessage())); } } if (fCheckPOW && fCheckMerkleRoot) { block.fChecked = true; } return true; } static bool CheckIndexAgainstCheckpoint(const CBlockIndex *pindexPrev, CValidationState &state, const CChainParams &chainparams, const uint256 &hash) { if (*pindexPrev->phashBlock == chainparams.GetConsensus().hashGenesisBlock) { return true; } int nHeight = pindexPrev->nHeight + 1; // Don't accept any forks from the main chain prior to last checkpoint. // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in // our MapBlockIndex. CBlockIndex *pcheckpoint = Checkpoints::GetLastCheckpoint(chainparams.Checkpoints()); if (pcheckpoint && nHeight < pcheckpoint->nHeight) { return state.DoS( 100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight), REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint"); } return true; } static bool ContextualCheckBlockHeader(const Config &config, const CBlockHeader &block, CValidationState &state, const CBlockIndex *pindexPrev, int64_t nAdjustedTime) { const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1; // Check proof of work if (block.nBits != GetNextWorkRequired(pindexPrev, &block, config)) { LogPrintf("bad bits after height: %d\n", pindexPrev->nHeight); return state.DoS(100, false, REJECT_INVALID, "bad-diffbits", false, "incorrect proof of work"); } // Check timestamp against prev if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) { return state.Invalid(false, REJECT_INVALID, "time-too-old", "block's timestamp is too early"); } // Check timestamp if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME) { return state.Invalid(false, REJECT_INVALID, "time-too-new", "block timestamp too far in the future"); } // Reject outdated version blocks when 95% (75% on testnet) of the network // has upgraded: // check for version 2, 3 and 4 upgrades if ((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) || (block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) || (block.nVersion < 4 && nHeight >= consensusParams.BIP65Height)) { return state.Invalid( false, REJECT_OBSOLETE, strprintf("bad-version(0x%08x)", block.nVersion), strprintf("rejected nVersion=0x%08x block", block.nVersion)); } return true; } bool ContextualCheckTransaction(const Config &config, const CTransaction &tx, CValidationState &state, int nHeight, int64_t nLockTimeCutoff) { if (!IsFinalTx(tx, nHeight, nLockTimeCutoff)) { // While this is only one transaction, we use txns in the error to // ensure continuity with other clients. return state.DoS(10, false, REJECT_INVALID, "bad-txns-nonfinal", false, "non-final transaction"); } const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); if (IsUAHFenabled(config, nHeight) && nHeight <= consensusParams.antiReplayOpReturnSunsetHeight) { for (const CTxOut &o : tx.vout) { if (o.scriptPubKey.IsCommitment( consensusParams.antiReplayOpReturnCommitment)) { return state.DoS(10, false, REJECT_INVALID, "bad-txn-replay", false, "non playable transaction"); } } } return true; } bool ContextualCheckTransactionForCurrentBlock(const Config &config, const CTransaction &tx, CValidationState &state, int flags) { AssertLockHeld(cs_main); // By convention a negative value for flags indicates that the current // network-enforced consensus rules should be used. In a future soft-fork // scenario that would mean checking which rules would be enforced for the // next block and setting the appropriate flags. At the present time no // soft-forks are scheduled, so no flags are set. flags = std::max(flags, 0); // ContextualCheckTransactionForCurrentBlock() uses chainActive.Height()+1 // to evaluate nLockTime because when IsFinalTx() is called within // CBlock::AcceptBlock(), the height of the block *being* evaluated is what // is used. Thus if we want to know if a transaction can be part of the // *next* block, we need to call ContextualCheckTransaction() with one more // than chainActive.Height(). const int nBlockHeight = chainActive.Height() + 1; // BIP113 will require that time-locked transactions have nLockTime set to // less than the median time of the previous block they're contained in. // When the next block is created its previous block will be the current // chain tip, so we use that to calculate the median time passed to // ContextualCheckTransaction() if LOCKTIME_MEDIAN_TIME_PAST is set. const int64_t nLockTimeCutoff = (flags & LOCKTIME_MEDIAN_TIME_PAST) ? chainActive.Tip()->GetMedianTimePast() : GetAdjustedTime(); return ContextualCheckTransaction(config, tx, state, nBlockHeight, nLockTimeCutoff); } static bool ContextualCheckBlock(const Config &config, const CBlock &block, CValidationState &state, const CBlockIndex *pindexPrev) { const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1; const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); // Start enforcing BIP113 (Median Time Past) using versionbits logic. int nLockTimeFlags = 0; if (VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_CSV, versionbitscache) == THRESHOLD_ACTIVE) { nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST; } const int64_t nMedianTimePast = pindexPrev == nullptr ? 0 : pindexPrev->GetMedianTimePast(); const int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST) ? nMedianTimePast : block.GetBlockTime(); // Check that all transactions are finalized for (const auto &tx : block.vtx) { if (!ContextualCheckTransaction(config, *tx, state, nHeight, nLockTimeCutoff)) { // state set by ContextualCheckTransaction. return false; } } // Enforce rule that the coinbase starts with serialized block height if (nHeight >= consensusParams.BIP34Height) { CScript expect = CScript() << nHeight; if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() || !std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) { return state.DoS(100, false, REJECT_INVALID, "bad-cb-height", false, "block height mismatch in coinbase"); } } return true; } static bool AcceptBlockHeader(const Config &config, const CBlockHeader &block, CValidationState &state, CBlockIndex **ppindex) { AssertLockHeld(cs_main); const CChainParams &chainparams = config.GetChainParams(); // Check for duplicate uint256 hash = block.GetHash(); BlockMap::iterator miSelf = mapBlockIndex.find(hash); CBlockIndex *pindex = nullptr; if (hash != chainparams.GetConsensus().hashGenesisBlock) { if (miSelf != mapBlockIndex.end()) { // Block header is already known. pindex = miSelf->second; if (ppindex) { *ppindex = pindex; } if (pindex->nStatus & BLOCK_FAILED_MASK) { return state.Invalid(error("%s: block %s is marked invalid", __func__, hash.ToString()), 0, "duplicate"); } return true; } if (!CheckBlockHeader(config, block, state)) { return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state)); } // Get prev block index CBlockIndex *pindexPrev = nullptr; BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock); if (mi == mapBlockIndex.end()) { return state.DoS(10, error("%s: prev block not found", __func__), 0, "prev-blk-not-found"); } pindexPrev = (*mi).second; if (pindexPrev->nStatus & BLOCK_FAILED_MASK) { return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk"); } assert(pindexPrev); if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, hash)) { return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__, state.GetRejectReason().c_str()); } if (!ContextualCheckBlockHeader(config, block, state, pindexPrev, GetAdjustedTime())) { return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state)); } } if (pindex == nullptr) { pindex = AddToBlockIndex(block); } if (ppindex) { *ppindex = pindex; } CheckBlockIndex(chainparams.GetConsensus()); return true; } // Exposed wrapper for AcceptBlockHeader bool ProcessNewBlockHeaders(const Config &config, const std::vector &headers, CValidationState &state, const CBlockIndex **ppindex) { { LOCK(cs_main); for (const CBlockHeader &header : headers) { // Use a temp pindex instead of ppindex to avoid a const_cast CBlockIndex *pindex = nullptr; if (!AcceptBlockHeader(config, header, state, &pindex)) { return false; } if (ppindex) { *ppindex = pindex; } } } NotifyHeaderTip(); return true; } /** * Store block on disk. If dbp is non-null, the file is known to already reside * on disk. */ static bool AcceptBlock(const Config &config, const std::shared_ptr &pblock, CValidationState &state, CBlockIndex **ppindex, bool fRequested, const CDiskBlockPos *dbp, bool *fNewBlock) { AssertLockHeld(cs_main); const CBlock &block = *pblock; if (fNewBlock) { *fNewBlock = false; } CBlockIndex *pindexDummy = nullptr; CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy; if (!AcceptBlockHeader(config, block, state, &pindex)) { return false; } // Try to process all requested blocks that we don't have, but only // process an unrequested block if it's new and has enough work to // advance our tip, and isn't too many blocks ahead. bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA; bool fHasMoreWork = (chainActive.Tip() ? pindex->nChainWork > chainActive.Tip()->nChainWork : true); // Blocks that are too out-of-order needlessly limit the effectiveness of // pruning, because pruning will not delete block files that contain any // blocks which are too close in height to the tip. Apply this test // regardless of whether pruning is enabled; it should generally be safe to // not process unrequested blocks. bool fTooFarAhead = (pindex->nHeight > int(chainActive.Height() + MIN_BLOCKS_TO_KEEP)); // TODO: Decouple this function from the block download logic by removing // fRequested // This requires some new chain datastructure to efficiently look up if a // block is in a chain leading to a candidate for best tip, despite not // being such a candidate itself. // TODO: deal better with return value and error conditions for duplicate // and unrequested blocks. if (fAlreadyHave) { return true; } // If we didn't ask for it: if (!fRequested) { // This is a previously-processed block that was pruned. if (pindex->nTx != 0) { return true; } // Don't process less-work chains. if (!fHasMoreWork) { return true; } // Block height is too high. if (fTooFarAhead) { return true; } } if (fNewBlock) { *fNewBlock = true; } if (!CheckBlock(config, block, state) || !ContextualCheckBlock(config, block, state, pindex->pprev)) { if (state.IsInvalid() && !state.CorruptionPossible()) { pindex->nStatus |= BLOCK_FAILED_VALID; setDirtyBlockIndex.insert(pindex); } return error("%s: %s (block %s)", __func__, FormatStateMessage(state), block.GetHash().ToString()); } // Header is valid/has work, merkle tree and segwit merkle tree are // good...RELAY NOW (but if it does not build on our best tip, let the // SendMessages loop relay it) if (!IsInitialBlockDownload() && chainActive.Tip() == pindex->pprev) { GetMainSignals().NewPoWValidBlock(pindex, pblock); } int nHeight = pindex->nHeight; const CChainParams &chainparams = config.GetChainParams(); // Write block to history file try { unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); CDiskBlockPos blockPos; if (dbp != nullptr) { blockPos = *dbp; } if (!FindBlockPos(state, blockPos, nBlockSize + 8, nHeight, block.GetBlockTime(), dbp != nullptr)) { return error("AcceptBlock(): FindBlockPos failed"); } if (dbp == nullptr) { if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) { AbortNode(state, "Failed to write block"); } } if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) { return error("AcceptBlock(): ReceivedBlockTransactions failed"); } } catch (const std::runtime_error &e) { return AbortNode(state, std::string("System error: ") + e.what()); } if (fCheckForPruning) { // we just allocated more disk space for block files. FlushStateToDisk(config.GetChainParams(), state, FLUSH_STATE_NONE); } return true; } bool ProcessNewBlock(const Config &config, const std::shared_ptr pblock, bool fForceProcessing, bool *fNewBlock) { { CBlockIndex *pindex = nullptr; if (fNewBlock) *fNewBlock = false; const CChainParams &chainparams = config.GetChainParams(); CValidationState state; // Ensure that CheckBlock() passes before calling AcceptBlock, as // belt-and-suspenders. bool ret = CheckBlock(config, *pblock, state); LOCK(cs_main); if (ret) { // Store to disk ret = AcceptBlock(config, pblock, state, &pindex, fForceProcessing, nullptr, fNewBlock); } CheckBlockIndex(chainparams.GetConsensus()); if (!ret) { GetMainSignals().BlockChecked(*pblock, state); return error("%s: AcceptBlock FAILED", __func__); } } NotifyHeaderTip(); // Only used to report errors, not invalidity - ignore it CValidationState state; if (!ActivateBestChain(config, state, pblock)) return error("%s: ActivateBestChain failed", __func__); return true; } bool TestBlockValidity(const Config &config, CValidationState &state, const CBlock &block, CBlockIndex *pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot) { AssertLockHeld(cs_main); const CChainParams &chainparams = config.GetChainParams(); assert(pindexPrev && pindexPrev == chainActive.Tip()); if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, block.GetHash())) { return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__, state.GetRejectReason().c_str()); } CCoinsViewCache viewNew(pcoinsTip); CBlockIndex indexDummy(block); indexDummy.pprev = pindexPrev; indexDummy.nHeight = pindexPrev->nHeight + 1; // NOTE: CheckBlockHeader is called by CheckBlock if (!ContextualCheckBlockHeader(config, block, state, pindexPrev, GetAdjustedTime())) { return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, FormatStateMessage(state)); } if (!CheckBlock(config, block, state, fCheckPOW, fCheckMerkleRoot)) { return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state)); } if (!ContextualCheckBlock(config, block, state, pindexPrev)) { return error("%s: Consensus::ContextualCheckBlock: %s", __func__, FormatStateMessage(state)); } if (!ConnectBlock(config, block, state, &indexDummy, viewNew, true)) { return false; } assert(state.IsValid()); return true; } /** * BLOCK PRUNING CODE */ /* Calculate the amount of disk space the block & undo files currently use */ static uint64_t CalculateCurrentUsage() { uint64_t retval = 0; for (const CBlockFileInfo &file : vinfoBlockFile) { retval += file.nSize + file.nUndoSize; } return retval; } /* Prune a block file (modify associated database entries)*/ void PruneOneBlockFile(const int fileNumber) { for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); ++it) { CBlockIndex *pindex = it->second; if (pindex->nFile == fileNumber) { pindex->nStatus &= ~BLOCK_HAVE_DATA; pindex->nStatus &= ~BLOCK_HAVE_UNDO; pindex->nFile = 0; pindex->nDataPos = 0; pindex->nUndoPos = 0; setDirtyBlockIndex.insert(pindex); // Prune from mapBlocksUnlinked -- any block we prune would have // to be downloaded again in order to consider its chain, at which // point it would be considered as a candidate for // mapBlocksUnlinked or setBlockIndexCandidates. std::pair::iterator, std::multimap::iterator> range = mapBlocksUnlinked.equal_range(pindex->pprev); while (range.first != range.second) { std::multimap::iterator _it = range.first; range.first++; if (_it->second == pindex) { mapBlocksUnlinked.erase(_it); } } } } vinfoBlockFile[fileNumber].SetNull(); setDirtyFileInfo.insert(fileNumber); } void UnlinkPrunedFiles(const std::set &setFilesToPrune) { for (std::set::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) { CDiskBlockPos pos(*it, 0); fs::remove(GetBlockPosFilename(pos, "blk")); fs::remove(GetBlockPosFilename(pos, "rev")); LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it); } } /** * Calculate the block/rev files to delete based on height specified by user * with RPC command pruneblockchain */ static void FindFilesToPruneManual(std::set &setFilesToPrune, int nManualPruneHeight) { assert(fPruneMode && nManualPruneHeight > 0); LOCK2(cs_main, cs_LastBlockFile); if (chainActive.Tip() == nullptr) { return; } // last block to prune is the lesser of (user-specified height, // MIN_BLOCKS_TO_KEEP from the tip) unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP); int count = 0; for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) { if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) { continue; } PruneOneBlockFile(fileNumber); setFilesToPrune.insert(fileNumber); count++; } LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count); } /* This function is called from the RPC code for pruneblockchain */ void PruneBlockFilesManual(int nManualPruneHeight) { CValidationState state; const CChainParams &chainparams = Params(); FlushStateToDisk(chainparams, state, FLUSH_STATE_NONE, nManualPruneHeight); } /** * Prune block and undo files (blk???.dat and undo???.dat) so that the disk * space used is less than a user-defined target. The user sets the target (in * MB) on the command line or in config file. This will be run on startup and * whenever new space is allocated in a block or undo file, staying below the * target. Changing back to unpruned requires a reindex (which in this case * means the blockchain must be re-downloaded.) * * Pruning functions are called from FlushStateToDisk when the global * fCheckForPruning flag has been set. Block and undo files are deleted in * lock-step (when blk00003.dat is deleted, so is rev00003.dat.). Pruning cannot * take place until the longest chain is at least a certain length (100000 on * mainnet, 1000 on testnet, 1000 on regtest). Pruning will never delete a block * within a defined distance (currently 288) from the active chain's tip. The * block index is updated by unsetting HAVE_DATA and HAVE_UNDO for any blocks * that were stored in the deleted files. A db flag records the fact that at * least some block files have been pruned. * * @param[out] setFilesToPrune The set of file indices that can be unlinked * will be returned */ static void FindFilesToPrune(std::set &setFilesToPrune, uint64_t nPruneAfterHeight) { LOCK2(cs_main, cs_LastBlockFile); if (chainActive.Tip() == nullptr || nPruneTarget == 0) { return; } if (uint64_t(chainActive.Tip()->nHeight) <= nPruneAfterHeight) { return; } unsigned int nLastBlockWeCanPrune = chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP; uint64_t nCurrentUsage = CalculateCurrentUsage(); // We don't check to prune until after we've allocated new space for files, // so we should leave a buffer under our target to account for another // allocation before the next pruning. uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE; uint64_t nBytesToPrune; int count = 0; if (nCurrentUsage + nBuffer >= nPruneTarget) { for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) { nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize; if (vinfoBlockFile[fileNumber].nSize == 0) { continue; } // are we below our target? if (nCurrentUsage + nBuffer < nPruneTarget) { break; } // don't prune files that could have a block within // MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) { continue; } PruneOneBlockFile(fileNumber); // Queue up the files for removal setFilesToPrune.insert(fileNumber); nCurrentUsage -= nBytesToPrune; count++; } } LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB " "max_prune_height=%d removed %d blk/rev pairs\n", nPruneTarget / 1024 / 1024, nCurrentUsage / 1024 / 1024, ((int64_t)nPruneTarget - (int64_t)nCurrentUsage) / 1024 / 1024, nLastBlockWeCanPrune, count); } bool CheckDiskSpace(uint64_t nAdditionalBytes) { uint64_t nFreeBytesAvailable = fs::space(GetDataDir()).available; // Check for nMinDiskSpace bytes (currently 50MB) if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes) return AbortNode("Disk space is low!", _("Error: Disk space is low!")); return true; } static FILE *OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fReadOnly) { if (pos.IsNull()) { return nullptr; } fs::path path = GetBlockPosFilename(pos, prefix); fs::create_directories(path.parent_path()); FILE *file = fsbridge::fopen(path, "rb+"); if (!file && !fReadOnly) { file = fsbridge::fopen(path, "wb+"); } if (!file) { LogPrintf("Unable to open file %s\n", path.string()); return nullptr; } if (pos.nPos) { if (fseek(file, pos.nPos, SEEK_SET)) { LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string()); fclose(file); return nullptr; } } return file; } FILE *OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly) { return OpenDiskFile(pos, "blk", fReadOnly); } /** Open an undo file (rev?????.dat) */ static FILE *OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) { return OpenDiskFile(pos, "rev", fReadOnly); } fs::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix) { return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile); } CBlockIndex *InsertBlockIndex(uint256 hash) { if (hash.IsNull()) return nullptr; // Return existing BlockMap::iterator mi = mapBlockIndex.find(hash); if (mi != mapBlockIndex.end()) return (*mi).second; // Create new CBlockIndex *pindexNew = new CBlockIndex(); if (!pindexNew) throw std::runtime_error(std::string(__func__) + ": new CBlockIndex failed"); mi = mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first; pindexNew->phashBlock = &((*mi).first); return pindexNew; } static bool LoadBlockIndexDB(const CChainParams &chainparams) { if (!pblocktree->LoadBlockIndexGuts(InsertBlockIndex)) return false; boost::this_thread::interruption_point(); // Calculate nChainWork std::vector> vSortedByHeight; vSortedByHeight.reserve(mapBlockIndex.size()); for (const std::pair &item : mapBlockIndex) { CBlockIndex *pindex = item.second; vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex)); } sort(vSortedByHeight.begin(), vSortedByHeight.end()); for (const std::pair &item : vSortedByHeight) { CBlockIndex *pindex = item.second; pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex); pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime); // We can link the chain of blocks for which we've received transactions // at some point. Pruned nodes may have deleted the block. if (pindex->nTx > 0) { if (pindex->pprev) { if (pindex->pprev->nChainTx) { pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx; } else { pindex->nChainTx = 0; mapBlocksUnlinked.insert( std::make_pair(pindex->pprev, pindex)); } } else { pindex->nChainTx = pindex->nTx; } } if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == nullptr)) { setBlockIndexCandidates.insert(pindex); } if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork)) { pindexBestInvalid = pindex; } if (pindex->pprev) { pindex->BuildSkip(); } if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex))) { pindexBestHeader = pindex; } } // Load block file info pblocktree->ReadLastBlockFile(nLastBlockFile); vinfoBlockFile.resize(nLastBlockFile + 1); LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile); for (int nFile = 0; nFile <= nLastBlockFile; nFile++) { pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]); } LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString()); for (int nFile = nLastBlockFile + 1; true; nFile++) { CBlockFileInfo info; if (pblocktree->ReadBlockFileInfo(nFile, info)) { vinfoBlockFile.push_back(info); } else { break; } } // Check presence of blk files LogPrintf("Checking all blk files are present...\n"); std::set setBlkDataFiles; for (const std::pair &item : mapBlockIndex) { CBlockIndex *pindex = item.second; if (pindex->nStatus & BLOCK_HAVE_DATA) { setBlkDataFiles.insert(pindex->nFile); } } for (std::set::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) { CDiskBlockPos pos(*it, 0); if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION) .IsNull()) { return false; } } // Check whether we have ever pruned block & undo files pblocktree->ReadFlag("prunedblockfiles", fHavePruned); if (fHavePruned) { LogPrintf( "LoadBlockIndexDB(): Block files have previously been pruned\n"); } // Check whether we need to continue reindexing bool fReindexing = false; pblocktree->ReadReindexing(fReindexing); fReindex |= fReindexing; // Check whether we have a transaction index pblocktree->ReadFlag("txindex", fTxIndex); LogPrintf("%s: transaction index %s\n", __func__, fTxIndex ? "enabled" : "disabled"); return true; } void LoadChainTip(const CChainParams &chainparams) { if (chainActive.Tip() && chainActive.Tip()->GetBlockHash() == pcoinsTip->GetBestBlock()) { return; } // Load pointer to end of best chain BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock()); if (it == mapBlockIndex.end()) { return; } chainActive.SetTip(it->second); PruneBlockIndexCandidates(); LogPrintf( "Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n", chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()), GuessVerificationProgress(chainparams.TxData(), chainActive.Tip())); } CVerifyDB::CVerifyDB() { uiInterface.ShowProgress(_("Verifying blocks..."), 0); } CVerifyDB::~CVerifyDB() { uiInterface.ShowProgress("", 100); } bool CVerifyDB::VerifyDB(const Config &config, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth) { LOCK(cs_main); if (chainActive.Tip() == nullptr || chainActive.Tip()->pprev == nullptr) { return true; } // Verify blocks in the best chain if (nCheckDepth <= 0) { // suffices until the year 19000 nCheckDepth = 1000000000; } if (nCheckDepth > chainActive.Height()) { nCheckDepth = chainActive.Height(); } nCheckLevel = std::max(0, std::min(4, nCheckLevel)); LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel); CCoinsViewCache coins(coinsview); CBlockIndex *pindexState = chainActive.Tip(); CBlockIndex *pindexFailure = nullptr; int nGoodTransactions = 0; CValidationState state; int reportDone = 0; LogPrintf("[0%%]..."); for (CBlockIndex *pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) { boost::this_thread::interruption_point(); int percentageDone = std::max( 1, std::min( 99, (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100)))); if (reportDone < percentageDone / 10) { // report every 10% step LogPrintf("[%d%%]...", percentageDone); reportDone = percentageDone / 10; } uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone); if (pindex->nHeight < chainActive.Height() - nCheckDepth) { break; } if (fPruneMode && !(pindex->nStatus & BLOCK_HAVE_DATA)) { // If pruning, only go back as far as we have data. LogPrintf("VerifyDB(): block verification stopping at height %d " "(pruning, no data)\n", pindex->nHeight); break; } CBlock block; // check level 0: read from disk if (!ReadBlockFromDisk(block, pindex, config)) { return error( "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } // check level 1: verify block validity if (nCheckLevel >= 1 && !CheckBlock(config, block, state)) { return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__, pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state)); } // check level 2: verify undo validity if (nCheckLevel >= 2 && pindex) { CBlockUndo undo; CDiskBlockPos pos = pindex->GetUndoPos(); if (!pos.IsNull()) { if (!UndoReadFromDisk(undo, pos, pindex->pprev->GetBlockHash())) { return error( "VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); } } } // check level 3: check for inconsistencies during memory-only // disconnect of tip blocks if (nCheckLevel >= 3 && pindex == pindexState && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) { assert(coins.GetBestBlock() == pindex->GetBlockHash()); DisconnectResult res = DisconnectBlock(block, pindex, coins); if (res == DISCONNECT_FAILED) { return error("VerifyDB(): *** irrecoverable inconsistency in " "block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } pindexState = pindex->pprev; if (res == DISCONNECT_UNCLEAN) { nGoodTransactions = 0; pindexFailure = pindex; } else { nGoodTransactions += block.vtx.size(); } } if (ShutdownRequested()) { return true; } } if (pindexFailure) { return error("VerifyDB(): *** coin database inconsistencies found " "(last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->nHeight + 1, nGoodTransactions); } // check level 4: try reconnecting blocks if (nCheckLevel >= 4) { CBlockIndex *pindex = pindexState; while (pindex != chainActive.Tip()) { boost::this_thread::interruption_point(); uiInterface.ShowProgress( _("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)))); pindex = chainActive.Next(pindex); CBlock block; if (!ReadBlockFromDisk(block, pindex, config)) { return error( "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } if (!ConnectBlock(config, block, state, pindex, coins)) { return error( "VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } } } LogPrintf("[DONE].\n"); LogPrintf("No coin database inconsistencies in last %i blocks (%i " "transactions)\n", chainActive.Height() - pindexState->nHeight, nGoodTransactions); return true; } /** * Apply the effects of a block on the utxo cache, ignoring that it may already * have been applied. */ static bool RollforwardBlock(const CBlockIndex *pindex, CCoinsViewCache &inputs, const Config &config) { // TODO: merge with ConnectBlock CBlock block; if (!ReadBlockFromDisk(block, pindex, config)) { return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } for (const CTransactionRef &tx : block.vtx) { if (!tx->IsCoinBase()) { for (const CTxIn &txin : tx->vin) { inputs.SpendCoin(txin.prevout); } } // Pass check = true as every addition may be an overwrite. AddCoins(inputs, *tx, pindex->nHeight, true); } return true; } bool ReplayBlocks(const Config &config, CCoinsView *view) { LOCK(cs_main); CCoinsViewCache cache(view); std::vector hashHeads = view->GetHeadBlocks(); if (hashHeads.empty()) { // We're already in a consistent state. return true; } if (hashHeads.size() != 2) { return error("ReplayBlocks(): unknown inconsistent state"); } uiInterface.ShowProgress(_("Replaying blocks..."), 0); LogPrintf("Replaying blocks\n"); // Old tip during the interrupted flush. const CBlockIndex *pindexOld = nullptr; // New tip during the interrupted flush. const CBlockIndex *pindexNew; // Latest block common to both the old and the new tip. const CBlockIndex *pindexFork = nullptr; if (mapBlockIndex.count(hashHeads[0]) == 0) { return error( "ReplayBlocks(): reorganization to unknown block requested"); } pindexNew = mapBlockIndex[hashHeads[0]]; if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush. if (mapBlockIndex.count(hashHeads[1]) == 0) { return error( "ReplayBlocks(): reorganization from unknown block requested"); } pindexOld = mapBlockIndex[hashHeads[1]]; pindexFork = LastCommonAncestor(pindexOld, pindexNew); assert(pindexFork != nullptr); } // Rollback along the old branch. while (pindexOld != pindexFork) { if (pindexOld->nHeight > 0) { // Never disconnect the genesis block. CBlock block; if (!ReadBlockFromDisk(block, pindexOld, config)) { return error("RollbackBlock(): ReadBlockFromDisk() failed at " "%d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString()); } LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight); DisconnectResult res = DisconnectBlock(block, pindexOld, cache); if (res == DISCONNECT_FAILED) { return error( "RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString()); } // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO // was deleted, or an existing UTXO was overwritten. It corresponds // to cases where the block-to-be-disconnect never had all its // operations applied to the UTXO set. However, as both writing a // UTXO and deleting a UTXO are idempotent operations, the result is // still a version of the UTXO set with the effects of that block // undone. } pindexOld = pindexOld->pprev; } // Roll forward from the forking point to the new tip. int nForkHeight = pindexFork ? pindexFork->nHeight : 0; for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) { const CBlockIndex *pindex = pindexNew->GetAncestor(nHeight); LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight); if (!RollforwardBlock(pindex, cache, config)) { return false; } } cache.SetBestBlock(pindexNew->GetBlockHash()); cache.Flush(); uiInterface.ShowProgress("", 100); return true; } bool RewindBlockIndex(const Config &config) { LOCK(cs_main); const CChainParams ¶ms = config.GetChainParams(); int nHeight = chainActive.Height() + 1; // nHeight is now the height of the first insufficiently-validated block, or // tipheight + 1 CValidationState state; CBlockIndex *pindex = chainActive.Tip(); while (chainActive.Height() >= nHeight) { if (fPruneMode && !(chainActive.Tip()->nStatus & BLOCK_HAVE_DATA)) { // If pruning, don't try rewinding past the HAVE_DATA point; since // older blocks can't be served anyway, there's no need to walk // further, and trying to DisconnectTip() will fail (and require a // needless reindex/redownload of the blockchain). break; } if (!DisconnectTip(config, state, nullptr)) { return error( "RewindBlockIndex: unable to disconnect block at height %i", pindex->nHeight); } // Occasionally flush state to disk. if (!FlushStateToDisk(params, state, FLUSH_STATE_PERIODIC)) { return false; } } // Reduce validity flag and have-data flags. // We do this after actual disconnecting, otherwise we'll end up writing the // lack of data to disk before writing the chainstate, resulting in a // failure to continue if interrupted. for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) { CBlockIndex *pindexIter = it->second; if (pindexIter->IsValid(BLOCK_VALID_TRANSACTIONS) && pindexIter->nChainTx) { setBlockIndexCandidates.insert(pindexIter); } } PruneBlockIndexCandidates(); CheckBlockIndex(params.GetConsensus()); if (!FlushStateToDisk(params, state, FLUSH_STATE_ALWAYS)) { return false; } return true; } // May NOT be used after any connections are up as much of the peer-processing // logic assumes a consistent block index state void UnloadBlockIndex() { LOCK(cs_main); setBlockIndexCandidates.clear(); chainActive.SetTip(nullptr); pindexBestInvalid = nullptr; pindexBestHeader = nullptr; mempool.clear(); mapBlocksUnlinked.clear(); vinfoBlockFile.clear(); nLastBlockFile = 0; nBlockSequenceId = 1; setDirtyBlockIndex.clear(); setDirtyFileInfo.clear(); versionbitscache.Clear(); for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) { warningcache[b].clear(); } for (BlockMap::value_type &entry : mapBlockIndex) { delete entry.second; } mapBlockIndex.clear(); fHavePruned = false; } bool LoadBlockIndex(const CChainParams &chainparams) { // Load block index from databases if (!fReindex && !LoadBlockIndexDB(chainparams)) { return false; } return true; } bool InitBlockIndex(const Config &config) { LOCK(cs_main); // Check whether we're already initialized if (chainActive.Genesis() != nullptr) { return true; } // Use the provided setting for -txindex in the new database - fTxIndex = GetBoolArg("-txindex", DEFAULT_TXINDEX); + fTxIndex = gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX); pblocktree->WriteFlag("txindex", fTxIndex); LogPrintf("Initializing databases...\n"); // Only add the genesis block if not reindexing (in which case we reuse the // one already on disk) if (!fReindex) { try { const CChainParams &chainparams = config.GetChainParams(); CBlock &block = const_cast(chainparams.GenesisBlock()); // Start new block file unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); CDiskBlockPos blockPos; CValidationState state; if (!FindBlockPos(state, blockPos, nBlockSize + 8, 0, block.GetBlockTime())) { return error("LoadBlockIndex(): FindBlockPos failed"); } if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) { return error( "LoadBlockIndex(): writing genesis block to disk failed"); } CBlockIndex *pindex = AddToBlockIndex(block); if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) { return error("LoadBlockIndex(): genesis block not accepted"); } } catch (const std::runtime_error &e) { return error( "LoadBlockIndex(): failed to initialize block database: %s", e.what()); } } return true; } bool LoadExternalBlockFile(const Config &config, FILE *fileIn, CDiskBlockPos *dbp) { // Map of disk positions for blocks with unknown parent (only used for // reindex) static std::multimap mapBlocksUnknownParent; int64_t nStart = GetTimeMillis(); const CChainParams &chainparams = config.GetChainParams(); int nLoaded = 0; try { // This takes over fileIn and calls fclose() on it in the CBufferedFile // destructor. Make sure we have at least 2*MAX_TX_SIZE space in there // so any transaction can fit in the buffer. CBufferedFile blkdat(fileIn, 2 * MAX_TX_SIZE, MAX_TX_SIZE + 8, SER_DISK, CLIENT_VERSION); uint64_t nRewind = blkdat.GetPos(); while (!blkdat.eof()) { boost::this_thread::interruption_point(); blkdat.SetPos(nRewind); // Start one byte further next time, in case of failure. nRewind++; // Remove former limit. blkdat.SetLimit(); unsigned int nSize = 0; try { // Locate a header. uint8_t buf[CMessageHeader::MESSAGE_START_SIZE]; blkdat.FindByte(chainparams.DiskMagic()[0]); nRewind = blkdat.GetPos() + 1; blkdat >> FLATDATA(buf); if (memcmp(buf, std::begin(chainparams.DiskMagic()), CMessageHeader::MESSAGE_START_SIZE)) { continue; } // Read size. blkdat >> nSize; if (nSize < 80) { continue; } } catch (const std::exception &) { // No valid block header found; don't complain. break; } try { // read block uint64_t nBlockPos = blkdat.GetPos(); if (dbp) { dbp->nPos = nBlockPos; } blkdat.SetLimit(nBlockPos + nSize); blkdat.SetPos(nBlockPos); std::shared_ptr pblock = std::make_shared(); CBlock &block = *pblock; blkdat >> block; nRewind = blkdat.GetPos(); // detect out of order blocks, and store them for later uint256 hash = block.GetHash(); if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex.find(block.hashPrevBlock) == mapBlockIndex.end()) { LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(), block.hashPrevBlock.ToString()); if (dbp) { mapBlocksUnknownParent.insert( std::make_pair(block.hashPrevBlock, *dbp)); } continue; } // process in case the block isn't known yet if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) { LOCK(cs_main); CValidationState state; if (AcceptBlock(config, pblock, state, nullptr, true, dbp, nullptr)) { nLoaded++; } if (state.IsError()) { break; } } else if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex[hash]->nHeight % 1000 == 0) { LogPrint( BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), mapBlockIndex[hash]->nHeight); } // Activate the genesis block so normal node progress can // continue if (hash == chainparams.GetConsensus().hashGenesisBlock) { CValidationState state; if (!ActivateBestChain(config, state)) { break; } } NotifyHeaderTip(); // Recursively process earlier encountered successors of this // block std::deque queue; queue.push_back(hash); while (!queue.empty()) { uint256 head = queue.front(); queue.pop_front(); std::pair::iterator, std::multimap::iterator> range = mapBlocksUnknownParent.equal_range(head); while (range.first != range.second) { std::multimap::iterator it = range.first; std::shared_ptr pblockrecursive = std::make_shared(); if (ReadBlockFromDisk(*pblockrecursive, it->second, config)) { LogPrint( BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(), head.ToString()); LOCK(cs_main); CValidationState dummy; if (AcceptBlock(config, pblockrecursive, dummy, nullptr, true, &it->second, nullptr)) { nLoaded++; queue.push_back(pblockrecursive->GetHash()); } } range.first++; mapBlocksUnknownParent.erase(it); NotifyHeaderTip(); } } } catch (const std::exception &e) { LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what()); } } } catch (const std::runtime_error &e) { AbortNode(std::string("System error: ") + e.what()); } if (nLoaded > 0) { LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart); } return nLoaded > 0; } static void CheckBlockIndex(const Consensus::Params &consensusParams) { if (!fCheckBlockIndex) { return; } LOCK(cs_main); // During a reindex, we read the genesis block and call CheckBlockIndex // before ActivateBestChain, so we have the genesis block in mapBlockIndex // but no active chain. (A few of the tests when iterating the block tree // require that chainActive has been initialized.) if (chainActive.Height() < 0) { assert(mapBlockIndex.size() <= 1); return; } // Build forward-pointing map of the entire block tree. std::multimap forward; for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) { forward.insert(std::make_pair(it->second->pprev, it->second)); } assert(forward.size() == mapBlockIndex.size()); std::pair::iterator, std::multimap::iterator> rangeGenesis = forward.equal_range(nullptr); CBlockIndex *pindex = rangeGenesis.first->second; rangeGenesis.first++; // There is only one index entry with parent nullptr. assert(rangeGenesis.first == rangeGenesis.second); // Iterate over the entire block tree, using depth-first search. // Along the way, remember whether there are blocks on the path from genesis // block being explored which are the first to have certain properties. size_t nNodes = 0; int nHeight = 0; // Oldest ancestor of pindex which is invalid. CBlockIndex *pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA. CBlockIndex *pindexFirstMissing = nullptr; // Oldest ancestor of pindex for which nTx == 0. CBlockIndex *pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE // (regardless of being valid or not). CBlockIndex *pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS // (regardless of being valid or not). CBlockIndex *pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN // (regardless of being valid or not). CBlockIndex *pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS // (regardless of being valid or not). CBlockIndex *pindexFirstNotScriptsValid = nullptr; while (pindex != nullptr) { nNodes++; if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) { pindexFirstInvalid = pindex; } if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA)) { pindexFirstMissing = pindex; } if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) { pindexFirstNeverProcessed = pindex; } if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) { pindexFirstNotTreeValid = pindex; } if (pindex->pprev != nullptr && pindexFirstNotTransactionsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) { pindexFirstNotTransactionsValid = pindex; } if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) { pindexFirstNotChainValid = pindex; } if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) { pindexFirstNotScriptsValid = pindex; } // Begin: actual consistency checks. if (pindex->pprev == nullptr) { // Genesis block checks. // Genesis block's hash must match. assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // The current active chain's genesis block must be this block. assert(pindex == chainActive.Genesis()); } if (pindex->nChainTx == 0) { // nSequenceId can't be set positive for blocks that aren't linked // (negative is used for preciousblock) assert(pindex->nSequenceId <= 0); } // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or // not pruning has occurred). HAVE_DATA is only equivalent to nTx > 0 // (or VALID_TRANSACTIONS) if no pruning has occurred. if (!fHavePruned) { // If we've never pruned, then HAVE_DATA should be equivalent to nTx // > 0 assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0)); assert(pindexFirstMissing == pindexFirstNeverProcessed); } else { // If we have pruned, then we can only say that HAVE_DATA implies // nTx > 0 if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0); } if (pindex->nStatus & BLOCK_HAVE_UNDO) { assert(pindex->nStatus & BLOCK_HAVE_DATA); } // This is pruning-independent. assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // All parents having had data (at some point) is equivalent to all // parents being VALID_TRANSACTIONS, which is equivalent to nChainTx // being set. // nChainTx != 0 is used to signal that all parent blocks have been // processed (but may have been pruned). assert((pindexFirstNeverProcessed != nullptr) == (pindex->nChainTx == 0)); assert((pindexFirstNotTransactionsValid != nullptr) == (pindex->nChainTx == 0)); // nHeight must be consistent. assert(pindex->nHeight == nHeight); // For every block except the genesis block, the chainwork must be // larger than the parent's. assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // The pskip pointer must point back for all but the first 2 blocks. assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // All mapBlockIndex entries must at least be TREE valid assert(pindexFirstNotTreeValid == nullptr); if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) { // TREE valid implies all parents are TREE valid assert(pindexFirstNotTreeValid == nullptr); } if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) { // CHAIN valid implies all parents are CHAIN valid assert(pindexFirstNotChainValid == nullptr); } if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) { // SCRIPTS valid implies all parents are SCRIPTS valid assert(pindexFirstNotScriptsValid == nullptr); } if (pindexFirstInvalid == nullptr) { // Checks for not-invalid blocks. // The failed mask cannot be set for blocks without invalid parents. assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); } if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == nullptr) { if (pindexFirstInvalid == nullptr) { // If this block sorts at least as good as the current tip and // is valid and we have all data for its parents, it must be in // setBlockIndexCandidates. chainActive.Tip() must also be there // even if some data has been pruned. if (pindexFirstMissing == nullptr || pindex == chainActive.Tip()) { assert(setBlockIndexCandidates.count(pindex)); } // If some parent is missing, then it could be that this block // was in setBlockIndexCandidates but had to be removed because // of the missing data. In this case it must be in // mapBlocksUnlinked -- see test below. } } else { // If this block sorts worse than the current tip or some ancestor's // block has never been seen, it cannot be in // setBlockIndexCandidates. assert(setBlockIndexCandidates.count(pindex) == 0); } // Check whether this block is in mapBlocksUnlinked. std::pair::iterator, std::multimap::iterator> rangeUnlinked = mapBlocksUnlinked.equal_range(pindex->pprev); bool foundInUnlinked = false; while (rangeUnlinked.first != rangeUnlinked.second) { assert(rangeUnlinked.first->first == pindex->pprev); if (rangeUnlinked.first->second == pindex) { foundInUnlinked = true; break; } rangeUnlinked.first++; } if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) { // If this block has block data available, some parent was never // received, and has no invalid parents, it must be in // mapBlocksUnlinked. assert(foundInUnlinked); } if (!(pindex->nStatus & BLOCK_HAVE_DATA)) { // Can't be in mapBlocksUnlinked if we don't HAVE_DATA assert(!foundInUnlinked); } if (pindexFirstMissing == nullptr) { // We aren't missing data for any parent -- cannot be in // mapBlocksUnlinked. assert(!foundInUnlinked); } if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) { // We HAVE_DATA for this block, have received data for all parents // at some point, but we're currently missing data for some parent. // We must have pruned. assert(fHavePruned); // This block may have entered mapBlocksUnlinked if: // - it has a descendant that at some point had more work than the // tip, and // - we tried switching to that descendant but were missing // data for some intermediate block between chainActive and the // tip. // So if this block is itself better than chainActive.Tip() and it // wasn't in // setBlockIndexCandidates, then it must be in mapBlocksUnlinked. if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && setBlockIndexCandidates.count(pindex) == 0) { if (pindexFirstInvalid == nullptr) { assert(foundInUnlinked); } } } // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // // Perhaps too slow // End: actual consistency checks. // Try descending into the first subnode. std::pair::iterator, std::multimap::iterator> range = forward.equal_range(pindex); if (range.first != range.second) { // A subnode was found. pindex = range.first->second; nHeight++; continue; } // This is a leaf node. Move upwards until we reach a node of which we // have not yet visited the last child. while (pindex) { // We are going to either move to a parent or a sibling of pindex. // If pindex was the first with a certain property, unset the // corresponding variable. if (pindex == pindexFirstInvalid) { pindexFirstInvalid = nullptr; } if (pindex == pindexFirstMissing) { pindexFirstMissing = nullptr; } if (pindex == pindexFirstNeverProcessed) { pindexFirstNeverProcessed = nullptr; } if (pindex == pindexFirstNotTreeValid) { pindexFirstNotTreeValid = nullptr; } if (pindex == pindexFirstNotTransactionsValid) { pindexFirstNotTransactionsValid = nullptr; } if (pindex == pindexFirstNotChainValid) { pindexFirstNotChainValid = nullptr; } if (pindex == pindexFirstNotScriptsValid) { pindexFirstNotScriptsValid = nullptr; } // Find our parent. CBlockIndex *pindexPar = pindex->pprev; // Find which child we just visited. std::pair::iterator, std::multimap::iterator> rangePar = forward.equal_range(pindexPar); while (rangePar.first->second != pindex) { // Our parent must have at least the node we're coming from as // child. assert(rangePar.first != rangePar.second); rangePar.first++; } // Proceed to the next one. rangePar.first++; if (rangePar.first != rangePar.second) { // Move to the sibling. pindex = rangePar.first->second; break; } else { // Move up further. pindex = pindexPar; nHeight--; continue; } } } // Check that we actually traversed the entire map. assert(nNodes == forward.size()); } std::string CBlockFileInfo::ToString() const { return strprintf( "CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, DateTimeStrFormat("%Y-%m-%d", nTimeFirst), DateTimeStrFormat("%Y-%m-%d", nTimeLast)); } CBlockFileInfo *GetBlockFileInfo(size_t n) { return &vinfoBlockFile.at(n); } ThresholdState VersionBitsTipState(const Consensus::Params ¶ms, Consensus::DeploymentPos pos) { LOCK(cs_main); return VersionBitsState(chainActive.Tip(), params, pos, versionbitscache); } int VersionBitsTipStateSinceHeight(const Consensus::Params ¶ms, Consensus::DeploymentPos pos) { LOCK(cs_main); return VersionBitsStateSinceHeight(chainActive.Tip(), params, pos, versionbitscache); } static const uint64_t MEMPOOL_DUMP_VERSION = 1; bool LoadMempool(const Config &config) { int64_t nExpiryTimeout = - GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60; + gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60; FILE *filestr = fsbridge::fopen(GetDataDir() / "mempool.dat", "rb"); CAutoFile file(filestr, SER_DISK, CLIENT_VERSION); if (file.IsNull()) { LogPrintf( "Failed to open mempool file from disk. Continuing anyway.\n"); return false; } int64_t count = 0; int64_t skipped = 0; int64_t failed = 0; int64_t nNow = GetTime(); try { uint64_t version; file >> version; if (version != MEMPOOL_DUMP_VERSION) { return false; } uint64_t num; file >> num; double prioritydummy = 0; while (num--) { CTransactionRef tx; int64_t nTime; int64_t nFeeDelta; file >> tx; file >> nTime; file >> nFeeDelta; Amount amountdelta(nFeeDelta); if (amountdelta != Amount(0)) { mempool.PrioritiseTransaction(tx->GetId(), tx->GetId().ToString(), prioritydummy, amountdelta); } CValidationState state; if (nTime + nExpiryTimeout > nNow) { LOCK(cs_main); AcceptToMemoryPoolWithTime(config, mempool, state, tx, true, nullptr, nTime); if (state.IsValid()) { ++count; } else { ++failed; } } else { ++skipped; } if (ShutdownRequested()) return false; } std::map mapDeltas; file >> mapDeltas; for (const auto &i : mapDeltas) { mempool.PrioritiseTransaction(i.first, i.first.ToString(), prioritydummy, i.second); } } catch (const std::exception &e) { LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing " "anyway.\n", e.what()); return false; } LogPrintf("Imported mempool transactions from disk: %i successes, %i " "failed, %i expired\n", count, failed, skipped); return true; } void DumpMempool(void) { int64_t start = GetTimeMicros(); std::map mapDeltas; std::vector vinfo; { LOCK(mempool.cs); for (const auto &i : mempool.mapDeltas) { mapDeltas[i.first] = i.second.second; } vinfo = mempool.infoAll(); } int64_t mid = GetTimeMicros(); try { FILE *filestr = fsbridge::fopen(GetDataDir() / "mempool.dat.new", "wb"); if (!filestr) { return; } CAutoFile file(filestr, SER_DISK, CLIENT_VERSION); uint64_t version = MEMPOOL_DUMP_VERSION; file << version; file << (uint64_t)vinfo.size(); for (const auto &i : vinfo) { file << *(i.tx); file << (int64_t)i.nTime; file << (int64_t)i.nFeeDelta.GetSatoshis(); mapDeltas.erase(i.tx->GetId()); } file << mapDeltas; FileCommit(file.Get()); file.fclose(); RenameOver(GetDataDir() / "mempool.dat.new", GetDataDir() / "mempool.dat"); int64_t last = GetTimeMicros(); LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid - start) * 0.000001, (last - mid) * 0.000001); } catch (const std::exception &e) { LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what()); } } //! Guess how far we are in the verification process at the given block index double GuessVerificationProgress(const ChainTxData &data, CBlockIndex *pindex) { if (pindex == nullptr) return 0.0; int64_t nNow = time(nullptr); double fTxTotal; if (pindex->nChainTx <= data.nTxCount) { fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate; } else { fTxTotal = pindex->nChainTx + (nNow - pindex->GetBlockTime()) * data.dTxRate; } return pindex->nChainTx / fTxTotal; } class CMainCleanup { public: CMainCleanup() {} ~CMainCleanup() { // block headers BlockMap::iterator it1 = mapBlockIndex.begin(); for (; it1 != mapBlockIndex.end(); it1++) delete (*it1).second; mapBlockIndex.clear(); } } instance_of_cmaincleanup; diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp index 19053a6a4f..4df6561e7c 100644 --- a/src/wallet/db.cpp +++ b/src/wallet/db.cpp @@ -1,751 +1,752 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "db.h" #include "addrman.h" #include "fs.h" #include "hash.h" #include "protocol.h" #include "util.h" #include "utilstrencodings.h" #include #include #include #ifndef WIN32 #include #endif // // CDB // CDBEnv bitdb; void CDBEnv::EnvShutdown() { if (!fDbEnvInit) { return; } fDbEnvInit = false; int ret = dbenv->close(0); if (ret != 0) { LogPrintf("CDBEnv::EnvShutdown: Error %d shutting down database " "environment: %s\n", ret, DbEnv::strerror(ret)); } if (!fMockDb) { DbEnv(uint32_t(0)).remove(strPath.c_str(), 0); } } void CDBEnv::Reset() { delete dbenv; dbenv = new DbEnv(DB_CXX_NO_EXCEPTIONS); fDbEnvInit = false; fMockDb = false; } CDBEnv::CDBEnv() : dbenv(nullptr) { Reset(); } CDBEnv::~CDBEnv() { EnvShutdown(); delete dbenv; dbenv = nullptr; } void CDBEnv::Close() { EnvShutdown(); } bool CDBEnv::Open(const fs::path &pathIn) { if (fDbEnvInit) { return true; } boost::this_thread::interruption_point(); strPath = pathIn.string(); fs::path pathLogDir = pathIn / "database"; TryCreateDirectories(pathLogDir); fs::path pathErrorFile = pathIn / "db.log"; LogPrintf("CDBEnv::Open: LogDir=%s ErrorFile=%s\n", pathLogDir.string(), pathErrorFile.string()); unsigned int nEnvFlags = 0; - if (GetBoolArg("-privdb", DEFAULT_WALLET_PRIVDB)) { + if (gArgs.GetBoolArg("-privdb", DEFAULT_WALLET_PRIVDB)) { nEnvFlags |= DB_PRIVATE; } dbenv->set_lg_dir(pathLogDir.string().c_str()); // 1 MiB should be enough for just the wallet dbenv->set_cachesize(0, 0x100000, 1); dbenv->set_lg_bsize(0x10000); dbenv->set_lg_max(1048576); dbenv->set_lk_max_locks(40000); dbenv->set_lk_max_objects(40000); /// debug dbenv->set_errfile(fsbridge::fopen(pathErrorFile, "a")); dbenv->set_flags(DB_AUTO_COMMIT, 1); dbenv->set_flags(DB_TXN_WRITE_NOSYNC, 1); dbenv->log_set_config(DB_LOG_AUTO_REMOVE, 1); int ret = dbenv->open(strPath.c_str(), DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_THREAD | DB_RECOVER | nEnvFlags, S_IRUSR | S_IWUSR); if (ret != 0) { return error( "CDBEnv::Open: Error %d opening database environment: %s\n", ret, DbEnv::strerror(ret)); } fDbEnvInit = true; fMockDb = false; return true; } void CDBEnv::MakeMock() { if (fDbEnvInit) { throw std::runtime_error("CDBEnv::MakeMock: Already initialized"); } boost::this_thread::interruption_point(); LogPrint(BCLog::DB, "CDBEnv::MakeMock\n"); dbenv->set_cachesize(1, 0, 1); dbenv->set_lg_bsize(10485760 * 4); dbenv->set_lg_max(10485760); dbenv->set_lk_max_locks(10000); dbenv->set_lk_max_objects(10000); dbenv->set_flags(DB_AUTO_COMMIT, 1); dbenv->log_set_config(DB_LOG_IN_MEMORY, 1); int ret = dbenv->open(nullptr, DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_THREAD | DB_PRIVATE, S_IRUSR | S_IWUSR); if (ret > 0) { throw std::runtime_error(strprintf( "CDBEnv::MakeMock: Error %d opening database environment.", ret)); } fDbEnvInit = true; fMockDb = true; } CDBEnv::VerifyResult CDBEnv::Verify(const std::string &strFile, recoverFunc_type recoverFunc, std::string &out_backup_filename) { LOCK(cs_db); assert(mapFileUseCount.count(strFile) == 0); Db db(dbenv, 0); int result = db.verify(strFile.c_str(), nullptr, nullptr, 0); if (result == 0) { return VERIFY_OK; } else if (recoverFunc == nullptr) { return RECOVER_FAIL; } // Try to recover: bool fRecovered = (*recoverFunc)(strFile, out_backup_filename); return (fRecovered ? RECOVER_OK : RECOVER_FAIL); } bool CDB::Recover(const std::string &filename, void *callbackDataIn, bool (*recoverKVcallback)(void *callbackData, CDataStream ssKey, CDataStream ssValue), std::string &newFilename) { // Recovery procedure: // Move wallet file to walletfilename.timestamp.bak // Call Salvage with fAggressive=true to get as much data as possible. // Rewrite salvaged data to fresh wallet file. // Set -rescan so any missing transactions will be found. int64_t now = GetTime(); newFilename = strprintf("%s.%d.bak", filename, now); int result = bitdb.dbenv->dbrename(nullptr, filename.c_str(), nullptr, newFilename.c_str(), DB_AUTO_COMMIT); if (result == 0) { LogPrintf("Renamed %s to %s\n", filename, newFilename); } else { LogPrintf("Failed to rename %s to %s\n", filename, newFilename); return false; } std::vector salvagedData; bool fSuccess = bitdb.Salvage(newFilename, true, salvagedData); if (salvagedData.empty()) { LogPrintf("Salvage(aggressive) found no records in %s.\n", newFilename); return false; } LogPrintf("Salvage(aggressive) found %u records\n", salvagedData.size()); std::unique_ptr pdbCopy(new Db(bitdb.dbenv, 0)); int ret = pdbCopy->open(nullptr, // Txn pointer filename.c_str(), // Filename "main", // Logical db name DB_BTREE, // Database type DB_CREATE, // Flags 0); if (ret > 0) { LogPrintf("Cannot create database file %s\n", filename); return false; } DbTxn *ptxn = bitdb.TxnBegin(); for (CDBEnv::KeyValPair &row : salvagedData) { if (recoverKVcallback) { CDataStream ssKey(row.first, SER_DISK, CLIENT_VERSION); CDataStream ssValue(row.second, SER_DISK, CLIENT_VERSION); std::string strType, strErr; if (!(*recoverKVcallback)(callbackDataIn, ssKey, ssValue)) { continue; } } Dbt datKey(&row.first[0], row.first.size()); Dbt datValue(&row.second[0], row.second.size()); int ret2 = pdbCopy->put(ptxn, &datKey, &datValue, DB_NOOVERWRITE); if (ret2 > 0) { fSuccess = false; } } ptxn->commit(0); pdbCopy->close(0); return fSuccess; } bool CDB::VerifyEnvironment(const std::string &walletFile, const fs::path &dataDir, std::string &errorStr) { LogPrintf("Using BerkeleyDB version %s\n", DbEnv::version(0, 0, 0)); LogPrintf("Using wallet %s\n", walletFile); // Wallet file must be a plain filename without a directory if (walletFile != fs::basename(walletFile) + fs::extension(walletFile)) { errorStr = strprintf(_("Wallet %s resides outside data directory %s"), walletFile, dataDir.string()); return false; } if (!bitdb.Open(dataDir)) { // try moving the database env out of the way fs::path pathDatabase = dataDir / "database"; fs::path pathDatabaseBak = dataDir / strprintf("database.%d.bak", GetTime()); try { fs::rename(pathDatabase, pathDatabaseBak); LogPrintf("Moved old %s to %s. Retrying.\n", pathDatabase.string(), pathDatabaseBak.string()); } catch (const fs::filesystem_error &) { // failure is ok (well, not really, but it's not worse than what we // started with) } // try again if (!bitdb.Open(dataDir)) { // if it still fails, it probably means we can't even create the // database env errorStr = strprintf( _("Error initializing wallet database environment %s!"), GetDataDir()); return false; } } return true; } bool CDB::VerifyDatabaseFile(const std::string &walletFile, const fs::path &dataDir, std::string &warningStr, std::string &errorStr, CDBEnv::recoverFunc_type recoverFunc) { if (fs::exists(dataDir / walletFile)) { std::string backup_filename; CDBEnv::VerifyResult r = bitdb.Verify(walletFile, recoverFunc, backup_filename); if (r == CDBEnv::RECOVER_OK) { warningStr = strprintf( _("Warning: Wallet file corrupt, data salvaged!" " Original %s saved as %s in %s; if" " your balance or transactions are incorrect you should" " restore from a backup."), walletFile, backup_filename, dataDir); } if (r == CDBEnv::RECOVER_FAIL) { errorStr = strprintf(_("%s corrupt, salvage failed"), walletFile); return false; } } // also return true if files does not exists return true; } /* End of headers, beginning of key/value data */ static const char *HEADER_END = "HEADER=END"; /* End of key/value data */ static const char *DATA_END = "DATA=END"; bool CDBEnv::Salvage(const std::string &strFile, bool fAggressive, std::vector &vResult) { LOCK(cs_db); assert(mapFileUseCount.count(strFile) == 0); u_int32_t flags = DB_SALVAGE; if (fAggressive) { flags |= DB_AGGRESSIVE; } std::stringstream strDump; Db db(dbenv, 0); int result = db.verify(strFile.c_str(), nullptr, &strDump, flags); if (result == DB_VERIFY_BAD) { LogPrintf("CDBEnv::Salvage: Database salvage found errors, all data " "may not be recoverable.\n"); if (!fAggressive) { LogPrintf("CDBEnv::Salvage: Rerun with aggressive mode to ignore " "errors and continue.\n"); return false; } } if (result != 0 && result != DB_VERIFY_BAD) { LogPrintf("CDBEnv::Salvage: Database salvage failed with result %d.\n", result); return false; } // Format of bdb dump is ascii lines: // header lines... // HEADER=END // hexadecimal key // hexadecimal value // ... repeated // DATA=END std::string strLine; while (!strDump.eof() && strLine != HEADER_END) { // Skip past header getline(strDump, strLine); } std::string keyHex, valueHex; while (!strDump.eof() && keyHex != DATA_END) { getline(strDump, keyHex); if (keyHex != DATA_END) { if (strDump.eof()) { break; } getline(strDump, valueHex); if (valueHex == DATA_END) { LogPrintf("CDBEnv::Salvage: WARNING: Number of keys in data " "does not match number of values.\n"); break; } vResult.push_back(make_pair(ParseHex(keyHex), ParseHex(valueHex))); } } if (keyHex != DATA_END) { LogPrintf("CDBEnv::Salvage: WARNING: Unexpected end of file while " "reading salvage output.\n"); return false; } return (result == 0); } void CDBEnv::CheckpointLSN(const std::string &strFile) { dbenv->txn_checkpoint(0, 0, 0); if (fMockDb) { return; } dbenv->lsn_reset(strFile.c_str(), 0); } CDB::CDB(CWalletDBWrapper &dbw, const char *pszMode, bool fFlushOnCloseIn) : pdb(nullptr), activeTxn(nullptr) { int ret; fReadOnly = (!strchr(pszMode, '+') && !strchr(pszMode, 'w')); fFlushOnClose = fFlushOnCloseIn; env = dbw.env; if (dbw.IsDummy()) { return; } const std::string &strFilename = dbw.strFile; bool fCreate = strchr(pszMode, 'c') != nullptr; unsigned int nFlags = DB_THREAD; if (fCreate) { nFlags |= DB_CREATE; } { LOCK(env->cs_db); if (!env->Open(GetDataDir())) { throw std::runtime_error( "CDB: Failed to open database environment."); } strFile = strFilename; ++env->mapFileUseCount[strFile]; pdb = env->mapDb[strFile]; if (pdb == nullptr) { pdb = new Db(env->dbenv, 0); bool fMockDb = env->IsMock(); if (fMockDb) { DbMpoolFile *mpf = pdb->get_mpf(); ret = mpf->set_flags(DB_MPOOL_NOFILE, 1); if (ret != 0) { throw std::runtime_error( strprintf("CDB: Failed to configure for no temp file " "backing for database %s", strFile)); } } ret = pdb->open(nullptr, // Txn pointer fMockDb ? nullptr : strFile.c_str(), // Filename fMockDb ? strFile.c_str() : "main", // Logical db name DB_BTREE, // Database type nFlags, // Flags 0); if (ret != 0) { delete pdb; pdb = nullptr; --env->mapFileUseCount[strFile]; strFile = ""; throw std::runtime_error(strprintf( "CDB: Error %d, can't open database %s", ret, strFilename)); } if (fCreate && !Exists(std::string("version"))) { bool fTmp = fReadOnly; fReadOnly = false; WriteVersion(CLIENT_VERSION); fReadOnly = fTmp; } env->mapDb[strFile] = pdb; } } } void CDB::Flush() { if (activeTxn) { return; } // Flush database activity from memory pool to disk log unsigned int nMinutes = 0; if (fReadOnly) { nMinutes = 1; } env->dbenv->txn_checkpoint( - nMinutes ? GetArg("-dblogsize", DEFAULT_WALLET_DBLOGSIZE) * 1024 : 0, + nMinutes ? gArgs.GetArg("-dblogsize", DEFAULT_WALLET_DBLOGSIZE) * 1024 + : 0, nMinutes, 0); } void CWalletDBWrapper::IncrementUpdateCounter() { ++nUpdateCounter; } void CDB::Close() { if (!pdb) { return; } if (activeTxn) { activeTxn->abort(); } activeTxn = nullptr; pdb = nullptr; if (fFlushOnClose) { Flush(); } LOCK(env->cs_db); --env->mapFileUseCount[strFile]; } void CDBEnv::CloseDb(const std::string &strFile) { LOCK(cs_db); if (mapDb[strFile] != nullptr) { // Close the database handle Db *pdb = mapDb[strFile]; pdb->close(0); delete pdb; mapDb[strFile] = nullptr; } } bool CDBEnv::RemoveDb(const std::string &strFile) { this->CloseDb(strFile); LOCK(cs_db); int rc = dbenv->dbremove(nullptr, strFile.c_str(), nullptr, DB_AUTO_COMMIT); return (rc == 0); } bool CDB::Rewrite(CWalletDBWrapper &dbw, const char *pszSkip) { if (dbw.IsDummy()) { return true; } CDBEnv *env = dbw.env; const std::string &strFile = dbw.strFile; while (true) { { LOCK(env->cs_db); if (!env->mapFileUseCount.count(strFile) || env->mapFileUseCount[strFile] == 0) { // Flush log data to the dat file env->CloseDb(strFile); env->CheckpointLSN(strFile); env->mapFileUseCount.erase(strFile); bool fSuccess = true; LogPrintf("CDB::Rewrite: Rewriting %s...\n", strFile); std::string strFileRes = strFile + ".rewrite"; { // surround usage of db with extra {} CDB db(dbw, "r"); Db *pdbCopy = new Db(env->dbenv, 0); int ret = pdbCopy->open(nullptr, // Txn pointer strFileRes.c_str(), // Filename "main", // Logical db name DB_BTREE, // Database type DB_CREATE, // Flags 0); if (ret > 0) { LogPrintf( "CDB::Rewrite: Can't create database file %s\n", strFileRes); fSuccess = false; } Dbc *pcursor = db.GetCursor(); if (pcursor) while (fSuccess) { CDataStream ssKey(SER_DISK, CLIENT_VERSION); CDataStream ssValue(SER_DISK, CLIENT_VERSION); int ret1 = db.ReadAtCursor(pcursor, ssKey, ssValue); if (ret1 == DB_NOTFOUND) { pcursor->close(); break; } if (ret1 != 0) { pcursor->close(); fSuccess = false; break; } if (pszSkip && strncmp(ssKey.data(), pszSkip, std::min(ssKey.size(), strlen(pszSkip))) == 0) { continue; } if (strncmp(ssKey.data(), "\x07version", 8) == 0) { // Update version: ssValue.clear(); ssValue << CLIENT_VERSION; } Dbt datKey(ssKey.data(), ssKey.size()); Dbt datValue(ssValue.data(), ssValue.size()); int ret2 = pdbCopy->put(nullptr, &datKey, &datValue, DB_NOOVERWRITE); if (ret2 > 0) { fSuccess = false; } } if (fSuccess) { db.Close(); env->CloseDb(strFile); if (pdbCopy->close(0)) { fSuccess = false; } delete pdbCopy; } } if (fSuccess) { Db dbA(env->dbenv, 0); if (dbA.remove(strFile.c_str(), nullptr, 0)) { fSuccess = false; } Db dbB(env->dbenv, 0); if (dbB.rename(strFileRes.c_str(), nullptr, strFile.c_str(), 0)) { fSuccess = false; } } if (!fSuccess) { LogPrintf( "CDB::Rewrite: Failed to rewrite database file %s\n", strFileRes); } return fSuccess; } } MilliSleep(100); } return false; } void CDBEnv::Flush(bool fShutdown) { int64_t nStart = GetTimeMillis(); // Flush log data to the actual data file on all files that are not in use LogPrint(BCLog::DB, "CDBEnv::Flush: Flush(%s)%s\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started"); if (!fDbEnvInit) { return; } { LOCK(cs_db); std::map::iterator mi = mapFileUseCount.begin(); while (mi != mapFileUseCount.end()) { std::string strFile = (*mi).first; int nRefCount = (*mi).second; LogPrint(BCLog::DB, "CDBEnv::Flush: Flushing %s (refcount = %d)...\n", strFile, nRefCount); if (nRefCount == 0) { // Move log data to the dat file CloseDb(strFile); LogPrint(BCLog::DB, "CDBEnv::Flush: %s checkpoint\n", strFile); dbenv->txn_checkpoint(0, 0, 0); LogPrint(BCLog::DB, "CDBEnv::Flush: %s detach\n", strFile); if (!fMockDb) { dbenv->lsn_reset(strFile.c_str(), 0); } LogPrint(BCLog::DB, "CDBEnv::Flush: %s closed\n", strFile); mapFileUseCount.erase(mi++); } else { mi++; } } LogPrint(BCLog::DB, "CDBEnv::Flush: Flush(%s)%s took %15dms\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started", GetTimeMillis() - nStart); if (fShutdown) { char **listp; if (mapFileUseCount.empty()) { dbenv->log_archive(&listp, DB_ARCH_REMOVE); Close(); if (!fMockDb) { fs::remove_all(fs::path(strPath) / "database"); } } } } } bool CDB::PeriodicFlush(CWalletDBWrapper &dbw) { if (dbw.IsDummy()) { return true; } bool ret = false; CDBEnv *env = dbw.env; const std::string &strFile = dbw.strFile; TRY_LOCK(bitdb.cs_db, lockDb); if (lockDb) { // Don't do this if any databases are in use int nRefCount = 0; std::map::iterator mit = env->mapFileUseCount.begin(); while (mit != env->mapFileUseCount.end()) { nRefCount += (*mit).second; mit++; } if (nRefCount == 0) { boost::this_thread::interruption_point(); std::map::iterator mi = env->mapFileUseCount.find(strFile); if (mi != env->mapFileUseCount.end()) { LogPrint(BCLog::DB, "Flushing %s\n", strFile); int64_t nStart = GetTimeMillis(); // Flush wallet file so it's self contained env->CloseDb(strFile); env->CheckpointLSN(strFile); env->mapFileUseCount.erase(mi++); LogPrint(BCLog::DB, "Flushed %s %dms\n", strFile, GetTimeMillis() - nStart); ret = true; } } } return ret; } bool CWalletDBWrapper::Rewrite(const char *pszSkip) { return CDB::Rewrite(*this, pszSkip); } bool CWalletDBWrapper::Backup(const std::string &strDest) { if (IsDummy()) { return false; } while (true) { { LOCK(env->cs_db); if (!env->mapFileUseCount.count(strFile) || env->mapFileUseCount[strFile] == 0) { // Flush log data to the dat file env->CloseDb(strFile); env->CheckpointLSN(strFile); env->mapFileUseCount.erase(strFile); // Copy wallet file. fs::path pathSrc = GetDataDir() / strFile; fs::path pathDest(strDest); if (fs::is_directory(pathDest)) { pathDest /= strFile; } try { if (fs::equivalent(pathSrc, pathDest)) { LogPrintf("cannot backup to wallet source file %s\n", pathDest.string()); return false; } fs::copy_file(pathSrc, pathDest, fs::copy_option::overwrite_if_exists); LogPrintf("copied %s to %s\n", strFile, pathDest.string()); return true; } catch (const fs::filesystem_error &e) { LogPrintf("error copying %s to %s - %s\n", strFile, pathDest.string(), e.what()); return false; } } } MilliSleep(100); } return false; } void CWalletDBWrapper::Flush(bool shutdown) { if (!IsDummy()) { env->Flush(shutdown); } } diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp index 55560ab6e8..d41d790563 100644 --- a/src/wallet/rpcdump.cpp +++ b/src/wallet/rpcdump.cpp @@ -1,1331 +1,1331 @@ // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "base58.h" #include "chain.h" #include "core_io.h" #include "dstencode.h" #include "init.h" #include "merkleblock.h" #include "rpc/server.h" #include "rpcwallet.h" #include "script/script.h" #include "script/standard.h" #include "sync.h" #include "util.h" #include "utiltime.h" #include "validation.h" #include "wallet.h" #include #include #include #include #include #include static std::string EncodeDumpTime(int64_t nTime) { return DateTimeStrFormat("%Y-%m-%dT%H:%M:%SZ", nTime); } static int64_t DecodeDumpTime(const std::string &str) { static const boost::posix_time::ptime epoch = boost::posix_time::from_time_t(0); static const std::locale loc( std::locale::classic(), new boost::posix_time::time_input_facet("%Y-%m-%dT%H:%M:%SZ")); std::istringstream iss(str); iss.imbue(loc); boost::posix_time::ptime ptime(boost::date_time::not_a_date_time); iss >> ptime; if (ptime.is_not_a_date_time()) return 0; return (ptime - epoch).total_seconds(); } static std::string EncodeDumpString(const std::string &str) { std::stringstream ret; for (uint8_t c : str) { if (c <= 32 || c >= 128 || c == '%') { ret << '%' << HexStr(&c, &c + 1); } else { ret << c; } } return ret.str(); } std::string DecodeDumpString(const std::string &str) { std::stringstream ret; for (unsigned int pos = 0; pos < str.length(); pos++) { uint8_t c = str[pos]; if (c == '%' && pos + 2 < str.length()) { c = (((str[pos + 1] >> 6) * 9 + ((str[pos + 1] - '0') & 15)) << 4) | ((str[pos + 2] >> 6) * 9 + ((str[pos + 2] - '0') & 15)); pos += 2; } ret << c; } return ret.str(); } UniValue importprivkey(const Config &config, const JSONRPCRequest &request) { CWallet *const pwallet = GetWalletForJSONRPCRequest(request); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() < 1 || request.params.size() > 3) throw std::runtime_error( "importprivkey \"bitcoinprivkey\" ( \"label\" ) ( rescan )\n" "\nAdds a private key (as returned by dumpprivkey) to your " "wallet.\n" "\nArguments:\n" "1. \"bitcoinprivkey\" (string, required) The private key (see " "dumpprivkey)\n" "2. \"label\" (string, optional, default=\"\") An " "optional label\n" "3. rescan (boolean, optional, default=true) Rescan " "the wallet for transactions\n" "\nNote: This call can take minutes to complete if rescan is " "true.\n" "\nExamples:\n" "\nDump a private key\n" + HelpExampleCli("dumpprivkey", "\"myaddress\"") + "\nImport the private key with rescan\n" + HelpExampleCli("importprivkey", "\"mykey\"") + "\nImport using a label and without rescan\n" + HelpExampleCli("importprivkey", "\"mykey\" \"testing\" false") + "\nImport using default blank label and without rescan\n" + HelpExampleCli("importprivkey", "\"mykey\" \"\" false") + "\nAs a JSON-RPC call\n" + HelpExampleRpc("importprivkey", "\"mykey\", \"testing\", false")); LOCK2(cs_main, pwallet->cs_wallet); EnsureWalletIsUnlocked(pwallet); std::string strSecret = request.params[0].get_str(); std::string strLabel = ""; if (request.params.size() > 1) { strLabel = request.params[1].get_str(); } // Whether to perform rescan after import bool fRescan = true; if (request.params.size() > 2) { fRescan = request.params[2].get_bool(); } if (fRescan && fPruneMode) { throw JSONRPCError(RPC_WALLET_ERROR, "Rescan is disabled in pruned mode"); } CBitcoinSecret vchSecret; bool fGood = vchSecret.SetString(strSecret); if (!fGood) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid private key encoding"); } CKey key = vchSecret.GetKey(); if (!key.IsValid()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Private key outside allowed range"); } CPubKey pubkey = key.GetPubKey(); assert(key.VerifyPubKey(pubkey)); CKeyID vchAddress = pubkey.GetID(); { pwallet->MarkDirty(); pwallet->SetAddressBook(vchAddress, strLabel, "receive"); // Don't throw error in case a key is already there if (pwallet->HaveKey(vchAddress)) { return NullUniValue; } pwallet->mapKeyMetadata[vchAddress].nCreateTime = 1; if (!pwallet->AddKeyPubKey(key, pubkey)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding key to wallet"); } // whenever a key is imported, we need to scan the whole chain pwallet->UpdateTimeFirstKey(1); if (fRescan) { pwallet->ScanForWalletTransactions(chainActive.Genesis(), true); } } return NullUniValue; } void ImportAddress(CWallet *, const CTxDestination &dest, const std::string &strLabel); void ImportScript(CWallet *const pwallet, const CScript &script, const std::string &strLabel, bool isRedeemScript) { if (!isRedeemScript && ::IsMine(*pwallet, script) == ISMINE_SPENDABLE) { throw JSONRPCError(RPC_WALLET_ERROR, "The wallet already contains the " "private key for this address or " "script"); } pwallet->MarkDirty(); if (!pwallet->HaveWatchOnly(script) && !pwallet->AddWatchOnly(script, 0 /* nCreateTime */)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding address to wallet"); } if (isRedeemScript) { if (!pwallet->HaveCScript(script) && !pwallet->AddCScript(script)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding p2sh redeemScript to wallet"); } ImportAddress(pwallet, CScriptID(script), strLabel); } else { CTxDestination destination; if (ExtractDestination(script, destination)) { pwallet->SetAddressBook(destination, strLabel, "receive"); } } } void ImportAddress(CWallet *const pwallet, const CTxDestination &dest, const std::string &strLabel) { CScript script = GetScriptForDestination(dest); ImportScript(pwallet, script, strLabel, false); // add to address book or update label if (IsValidDestination(dest)) { pwallet->SetAddressBook(dest, strLabel, "receive"); } } UniValue importaddress(const Config &config, const JSONRPCRequest &request) { CWallet *const pwallet = GetWalletForJSONRPCRequest(request); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() < 1 || request.params.size() > 4) { throw std::runtime_error( "importaddress \"address\" ( \"label\" rescan p2sh )\n" "\nAdds a script (in hex) or address that can be watched as if it " "were in your wallet but cannot be used to spend.\n" "\nArguments:\n" "1. \"script\" (string, required) The hex-encoded script " "(or address)\n" "2. \"label\" (string, optional, default=\"\") An " "optional label\n" "3. rescan (boolean, optional, default=true) Rescan " "the wallet for transactions\n" "4. p2sh (boolean, optional, default=false) Add " "the P2SH version of the script as well\n" "\nNote: This call can take minutes to complete if rescan is " "true.\n" "If you have the full public key, you should call importpubkey " "instead of this.\n" "\nNote: If you import a non-standard raw script in hex form, " "outputs sending to it will be treated\n" "as change, and not show up in many RPCs.\n" "\nExamples:\n" "\nImport a script with rescan\n" + HelpExampleCli("importaddress", "\"myscript\"") + "\nImport using a label without rescan\n" + HelpExampleCli("importaddress", "\"myscript\" \"testing\" false") + "\nAs a JSON-RPC call\n" + HelpExampleRpc("importaddress", "\"myscript\", \"testing\", false")); } std::string strLabel = ""; if (request.params.size() > 1) { strLabel = request.params[1].get_str(); } // Whether to perform rescan after import bool fRescan = true; if (request.params.size() > 2) { fRescan = request.params[2].get_bool(); } if (fRescan && fPruneMode) { throw JSONRPCError(RPC_WALLET_ERROR, "Rescan is disabled in pruned mode"); } // Whether to import a p2sh version, too bool fP2SH = false; if (request.params.size() > 3) { fP2SH = request.params[3].get_bool(); } LOCK2(cs_main, pwallet->cs_wallet); CTxDestination dest = DecodeDestination(request.params[0].get_str()); if (IsValidDestination(dest)) { if (fP2SH) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Cannot use the p2sh flag with an address - use " "a script instead"); } ImportAddress(pwallet, dest, strLabel); } else if (IsHex(request.params[0].get_str())) { std::vector data(ParseHex(request.params[0].get_str())); ImportScript(pwallet, CScript(data.begin(), data.end()), strLabel, fP2SH); } else { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid Bitcoin address or script"); } if (fRescan) { pwallet->ScanForWalletTransactions(chainActive.Genesis(), true); pwallet->ReacceptWalletTransactions(); } return NullUniValue; } UniValue importprunedfunds(const Config &config, const JSONRPCRequest &request) { CWallet *const pwallet = GetWalletForJSONRPCRequest(request); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() != 2) { throw std::runtime_error( "importprunedfunds\n" "\nImports funds without rescan. Corresponding address or script " "must previously be included in wallet. Aimed towards pruned " "wallets. The end-user is responsible to import additional " "transactions that subsequently spend the imported outputs or " "rescan after the point in the blockchain the transaction is " "included.\n" "\nArguments:\n" "1. \"rawtransaction\" (string, required) A raw transaction in hex " "funding an already-existing address in wallet\n" "2. \"txoutproof\" (string, required) The hex output from " "gettxoutproof that contains the transaction\n"); } CMutableTransaction tx; if (!DecodeHexTx(tx, request.params[0].get_str())) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed"); } uint256 txid = tx.GetId(); CWalletTx wtx(pwallet, MakeTransactionRef(std::move(tx))); CDataStream ssMB(ParseHexV(request.params[1], "proof"), SER_NETWORK, PROTOCOL_VERSION); CMerkleBlock merkleBlock; ssMB >> merkleBlock; // Search partial merkle tree in proof for our transaction and index in // valid block std::vector vMatch; std::vector vIndex; unsigned int txnIndex = 0; if (merkleBlock.txn.ExtractMatches(vMatch, vIndex) == merkleBlock.header.hashMerkleRoot) { LOCK(cs_main); if (!mapBlockIndex.count(merkleBlock.header.GetHash()) || !chainActive.Contains( mapBlockIndex[merkleBlock.header.GetHash()])) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found in chain"); } std::vector::const_iterator it; if ((it = std::find(vMatch.begin(), vMatch.end(), txid)) == vMatch.end()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction given doesn't exist in proof"); } txnIndex = vIndex[it - vMatch.begin()]; } else { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Something wrong with merkleblock"); } wtx.nIndex = txnIndex; wtx.hashBlock = merkleBlock.header.GetHash(); LOCK2(cs_main, pwallet->cs_wallet); if (pwallet->IsMine(wtx)) { pwallet->AddToWallet(wtx, false); return NullUniValue; } throw JSONRPCError( RPC_INVALID_ADDRESS_OR_KEY, "No addresses in wallet correspond to included transaction"); } UniValue removeprunedfunds(const Config &config, const JSONRPCRequest &request) { CWallet *const pwallet = GetWalletForJSONRPCRequest(request); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "removeprunedfunds \"txid\"\n" "\nDeletes the specified transaction from the wallet. Meant for " "use with pruned wallets and as a companion to importprunedfunds. " "This will effect wallet balances.\n" "\nArguments:\n" "1. \"txid\" (string, required) The hex-encoded id of " "the transaction you are deleting\n" "\nExamples:\n" + HelpExampleCli("removeprunedfunds", "\"a8d0c0184dde994a09ec054286f1" "ce581bebf46446a512166eae762873" "4ea0a5\"") + "\nAs a JSON-RPC call\n" + HelpExampleRpc("removprunedfunds", "\"a8d0c0184dde994a09ec054286f1c" "e581bebf46446a512166eae7628734e" "a0a5\"")); } LOCK2(cs_main, pwallet->cs_wallet); uint256 hash; hash.SetHex(request.params[0].get_str()); std::vector vHash; vHash.push_back(hash); std::vector vHashOut; if (pwallet->ZapSelectTx(vHash, vHashOut) != DB_LOAD_OK) { throw JSONRPCError(RPC_WALLET_ERROR, "Could not properly delete the transaction."); } if (vHashOut.empty()) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Transaction does not exist in wallet."); } return NullUniValue; } UniValue importpubkey(const Config &config, const JSONRPCRequest &request) { CWallet *const pwallet = GetWalletForJSONRPCRequest(request); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() < 1 || request.params.size() > 4) { throw std::runtime_error( "importpubkey \"pubkey\" ( \"label\" rescan )\n" "\nAdds a public key (in hex) that can be watched as if it were in " "your wallet but cannot be used to spend.\n" "\nArguments:\n" "1. \"pubkey\" (string, required) The hex-encoded public " "key\n" "2. \"label\" (string, optional, default=\"\") An " "optional label\n" "3. rescan (boolean, optional, default=true) Rescan " "the wallet for transactions\n" "\nNote: This call can take minutes to complete if rescan is " "true.\n" "\nExamples:\n" "\nImport a public key with rescan\n" + HelpExampleCli("importpubkey", "\"mypubkey\"") + "\nImport using a label without rescan\n" + HelpExampleCli("importpubkey", "\"mypubkey\" \"testing\" false") + "\nAs a JSON-RPC call\n" + HelpExampleRpc("importpubkey", "\"mypubkey\", \"testing\", false")); } std::string strLabel = ""; if (request.params.size() > 1) { strLabel = request.params[1].get_str(); } // Whether to perform rescan after import bool fRescan = true; if (request.params.size() > 2) { fRescan = request.params[2].get_bool(); } if (fRescan && fPruneMode) { throw JSONRPCError(RPC_WALLET_ERROR, "Rescan is disabled in pruned mode"); } if (!IsHex(request.params[0].get_str())) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Pubkey must be a hex string"); } std::vector data(ParseHex(request.params[0].get_str())); CPubKey pubKey(data.begin(), data.end()); if (!pubKey.IsFullyValid()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Pubkey is not a valid public key"); } LOCK2(cs_main, pwallet->cs_wallet); ImportAddress(pwallet, pubKey.GetID(), strLabel); ImportScript(pwallet, GetScriptForRawPubKey(pubKey), strLabel, false); if (fRescan) { pwallet->ScanForWalletTransactions(chainActive.Genesis(), true); pwallet->ReacceptWalletTransactions(); } return NullUniValue; } UniValue importwallet(const Config &config, const JSONRPCRequest &request) { CWallet *const pwallet = GetWalletForJSONRPCRequest(request); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "importwallet \"filename\"\n" "\nImports keys from a wallet dump file (see dumpwallet).\n" "\nArguments:\n" "1. \"filename\" (string, required) The wallet file\n" "\nExamples:\n" "\nDump the wallet\n" + HelpExampleCli("dumpwallet", "\"test\"") + "\nImport the wallet\n" + HelpExampleCli("importwallet", "\"test\"") + "\nImport using the json rpc call\n" + HelpExampleRpc("importwallet", "\"test\"")); } if (fPruneMode) { throw JSONRPCError(RPC_WALLET_ERROR, "Importing wallets is disabled in pruned mode"); } LOCK2(cs_main, pwallet->cs_wallet); EnsureWalletIsUnlocked(pwallet); std::ifstream file; file.open(request.params[0].get_str().c_str(), std::ios::in | std::ios::ate); if (!file.is_open()) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Cannot open wallet dump file"); } int64_t nTimeBegin = chainActive.Tip()->GetBlockTime(); bool fGood = true; int64_t nFilesize = std::max((int64_t)1, (int64_t)file.tellg()); file.seekg(0, file.beg); // show progress dialog in GUI pwallet->ShowProgress(_("Importing..."), 0); while (file.good()) { pwallet->ShowProgress( "", std::max(1, std::min(99, (int)(((double)file.tellg() / (double)nFilesize) * 100)))); std::string line; std::getline(file, line); if (line.empty() || line[0] == '#') { continue; } std::vector vstr; boost::split(vstr, line, boost::is_any_of(" ")); if (vstr.size() < 2) { continue; } CBitcoinSecret vchSecret; if (!vchSecret.SetString(vstr[0])) { continue; } CKey key = vchSecret.GetKey(); CPubKey pubkey = key.GetPubKey(); assert(key.VerifyPubKey(pubkey)); CKeyID keyid = pubkey.GetID(); if (pwallet->HaveKey(keyid)) { LogPrintf("Skipping import of %s (key already present)\n", EncodeDestination(keyid)); continue; } int64_t nTime = DecodeDumpTime(vstr[1]); std::string strLabel; bool fLabel = true; for (unsigned int nStr = 2; nStr < vstr.size(); nStr++) { if (boost::algorithm::starts_with(vstr[nStr], "#")) { break; } if (vstr[nStr] == "change=1") { fLabel = false; } if (vstr[nStr] == "reserve=1") { fLabel = false; } if (boost::algorithm::starts_with(vstr[nStr], "label=")) { strLabel = DecodeDumpString(vstr[nStr].substr(6)); fLabel = true; } } LogPrintf("Importing %s...\n", EncodeDestination(keyid)); if (!pwallet->AddKeyPubKey(key, pubkey)) { fGood = false; continue; } pwallet->mapKeyMetadata[keyid].nCreateTime = nTime; if (fLabel) { pwallet->SetAddressBook(keyid, strLabel, "receive"); } nTimeBegin = std::min(nTimeBegin, nTime); } file.close(); // hide progress dialog in GUI pwallet->ShowProgress("", 100); pwallet->UpdateTimeFirstKey(nTimeBegin); CBlockIndex *pindex = chainActive.FindEarliestAtLeast(nTimeBegin - TIMESTAMP_WINDOW); LogPrintf("Rescanning last %i blocks\n", chainActive.Height() - pindex->nHeight + 1); pwallet->ScanForWalletTransactions(pindex); pwallet->MarkDirty(); if (!fGood) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding some keys to wallet"); } return NullUniValue; } UniValue dumpprivkey(const Config &config, const JSONRPCRequest &request) { CWallet *const pwallet = GetWalletForJSONRPCRequest(request); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "dumpprivkey \"address\"\n" "\nReveals the private key corresponding to 'address'.\n" "Then the importprivkey can be used with this output\n" "\nArguments:\n" "1. \"address\" (string, required) The bitcoin address for the " "private key\n" "\nResult:\n" "\"key\" (string) The private key\n" "\nExamples:\n" + HelpExampleCli("dumpprivkey", "\"myaddress\"") + HelpExampleCli("importprivkey", "\"mykey\"") + HelpExampleRpc("dumpprivkey", "\"myaddress\"")); } LOCK2(cs_main, pwallet->cs_wallet); EnsureWalletIsUnlocked(pwallet); std::string strAddress = request.params[0].get_str(); CTxDestination dest = DecodeDestination(strAddress); if (!IsValidDestination(dest)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid Bitcoin address"); } const CKeyID *keyID = boost::get(&dest); if (!keyID) { throw JSONRPCError(RPC_TYPE_ERROR, "Address does not refer to a key"); } CKey vchSecret; if (!pwallet->GetKey(*keyID, vchSecret)) { throw JSONRPCError(RPC_WALLET_ERROR, "Private key for address " + strAddress + " is not known"); } return CBitcoinSecret(vchSecret).ToString(); } UniValue dumpwallet(const Config &config, const JSONRPCRequest &request) { CWallet *const pwallet = GetWalletForJSONRPCRequest(request); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() != 1) throw std::runtime_error( "dumpwallet \"filename\"\n" "\nDumps all wallet keys in a human-readable format.\n" "\nArguments:\n" "1. \"filename\" (string, required) The filename\n" "\nExamples:\n" + HelpExampleCli("dumpwallet", "\"test\"") + HelpExampleRpc("dumpwallet", "\"test\"")); LOCK2(cs_main, pwallet->cs_wallet); EnsureWalletIsUnlocked(pwallet); std::ofstream file; file.open(request.params[0].get_str().c_str()); if (!file.is_open()) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Cannot open wallet dump file"); } std::map mapKeyBirth; std::set setKeyPool; pwallet->GetKeyBirthTimes(mapKeyBirth); pwallet->GetAllReserveKeys(setKeyPool); // sort time/key pairs std::vector> vKeyBirth; for (const auto &entry : mapKeyBirth) { if (const CKeyID *keyID = boost::get(&entry.first)) { // set and test vKeyBirth.push_back(std::make_pair(entry.second, *keyID)); } } mapKeyBirth.clear(); std::sort(vKeyBirth.begin(), vKeyBirth.end()); // produce output file << strprintf("# Wallet dump created by Bitcoin %s\n", CLIENT_BUILD); file << strprintf("# * Created on %s\n", EncodeDumpTime(GetTime())); file << strprintf("# * Best block at time of backup was %i (%s),\n", chainActive.Height(), chainActive.Tip()->GetBlockHash().ToString()); file << strprintf("# mined on %s\n", EncodeDumpTime(chainActive.Tip()->GetBlockTime())); file << "\n"; // add the base58check encoded extended master if the wallet uses HD CKeyID masterKeyID = pwallet->GetHDChain().masterKeyID; if (!masterKeyID.IsNull()) { CKey key; if (pwallet->GetKey(masterKeyID, key)) { CExtKey masterKey; masterKey.SetMaster(key.begin(), key.size()); CBitcoinExtKey b58extkey; b58extkey.SetKey(masterKey); file << "# extended private masterkey: " << b58extkey.ToString() << "\n\n"; } } for (std::vector>::const_iterator it = vKeyBirth.begin(); it != vKeyBirth.end(); it++) { const CKeyID &keyid = it->second; std::string strTime = EncodeDumpTime(it->first); std::string strAddr = EncodeDestination(keyid); CKey key; if (pwallet->GetKey(keyid, key)) { file << strprintf("%s %s ", CBitcoinSecret(key).ToString(), strTime); if (pwallet->mapAddressBook.count(keyid)) { file << strprintf( "label=%s", EncodeDumpString(pwallet->mapAddressBook[keyid].name)); } else if (keyid == masterKeyID) { file << "hdmaster=1"; } else if (setKeyPool.count(keyid)) { file << "reserve=1"; } else if (pwallet->mapKeyMetadata[keyid].hdKeypath == "m") { file << "inactivehdmaster=1"; } else { file << "change=1"; } file << strprintf( " # addr=%s%s\n", strAddr, (pwallet->mapKeyMetadata[keyid].hdKeypath.size() > 0 ? " hdkeypath=" + pwallet->mapKeyMetadata[keyid].hdKeypath : "")); } } file << "\n"; file << "# End of dump\n"; file.close(); return NullUniValue; } UniValue ProcessImport(CWallet *const pwallet, const UniValue &data, const int64_t timestamp) { try { bool success = false; // Required fields. const UniValue &scriptPubKey = data["scriptPubKey"]; // Should have script or JSON with "address". if (!(scriptPubKey.getType() == UniValue::VOBJ && scriptPubKey.exists("address")) && !(scriptPubKey.getType() == UniValue::VSTR)) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid scriptPubKey"); } // Optional fields. const std::string &strRedeemScript = data.exists("redeemscript") ? data["redeemscript"].get_str() : ""; const UniValue &pubKeys = data.exists("pubkeys") ? data["pubkeys"].get_array() : UniValue(); const UniValue &keys = data.exists("keys") ? data["keys"].get_array() : UniValue(); const bool &internal = data.exists("internal") ? data["internal"].get_bool() : false; const bool &watchOnly = data.exists("watchonly") ? data["watchonly"].get_bool() : false; const std::string &label = data.exists("label") && !internal ? data["label"].get_str() : ""; bool isScript = scriptPubKey.getType() == UniValue::VSTR; bool isP2SH = strRedeemScript.length() > 0; const std::string &output = isScript ? scriptPubKey.get_str() : scriptPubKey["address"].get_str(); // Parse the output. CScript script; CTxDestination dest; if (!isScript) { dest = DecodeDestination(output); if (!IsValidDestination(dest)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid address"); } script = GetScriptForDestination(dest); } else { if (!IsHex(output)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid scriptPubKey"); } std::vector vData(ParseHex(output)); script = CScript(vData.begin(), vData.end()); } // Watchonly and private keys if (watchOnly && keys.size()) { throw JSONRPCError( RPC_INVALID_PARAMETER, "Incompatibility found between watchonly and keys"); } // Internal + Label if (internal && data.exists("label")) { throw JSONRPCError( RPC_INVALID_PARAMETER, "Incompatibility found between internal and label"); } // Not having Internal + Script if (!internal && isScript) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Internal must be set for hex scriptPubKey"); } // Keys / PubKeys size check. if (!isP2SH && (keys.size() > 1 || pubKeys.size() > 1)) { // Address / scriptPubKey throw JSONRPCError(RPC_INVALID_PARAMETER, "More than private key given for one address"); } // Invalid P2SH redeemScript if (isP2SH && !IsHex(strRedeemScript)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid redeem script"); } // Process. // // P2SH if (isP2SH) { // Import redeem script. std::vector vData(ParseHex(strRedeemScript)); CScript redeemScript = CScript(vData.begin(), vData.end()); // Invalid P2SH address if (!script.IsPayToScriptHash()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid P2SH address / script"); } pwallet->MarkDirty(); if (!pwallet->AddWatchOnly(redeemScript, timestamp)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding address to wallet"); } if (!pwallet->HaveCScript(redeemScript) && !pwallet->AddCScript(redeemScript)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding p2sh redeemScript to wallet"); } CTxDestination redeem_dest = CScriptID(redeemScript); CScript redeemDestination = GetScriptForDestination(redeem_dest); if (::IsMine(*pwallet, redeemDestination) == ISMINE_SPENDABLE) { throw JSONRPCError(RPC_WALLET_ERROR, "The wallet already contains the private " "key for this address or script"); } pwallet->MarkDirty(); if (!pwallet->AddWatchOnly(redeemDestination, timestamp)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding address to wallet"); } // add to address book or update label if (IsValidDestination(dest)) { pwallet->SetAddressBook(dest, label, "receive"); } // Import private keys. if (keys.size()) { for (size_t i = 0; i < keys.size(); i++) { const std::string &privkey = keys[i].get_str(); CBitcoinSecret vchSecret; bool fGood = vchSecret.SetString(privkey); if (!fGood) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid private key encoding"); } CKey key = vchSecret.GetKey(); if (!key.IsValid()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Private key outside allowed range"); } CPubKey pubkey = key.GetPubKey(); assert(key.VerifyPubKey(pubkey)); CKeyID vchAddress = pubkey.GetID(); pwallet->MarkDirty(); pwallet->SetAddressBook(vchAddress, label, "receive"); if (pwallet->HaveKey(vchAddress)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Already have this key"); } pwallet->mapKeyMetadata[vchAddress].nCreateTime = timestamp; if (!pwallet->AddKeyPubKey(key, pubkey)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding key to wallet"); } pwallet->UpdateTimeFirstKey(timestamp); } } success = true; } else { // Import public keys. if (pubKeys.size() && keys.size() == 0) { const std::string &strPubKey = pubKeys[0].get_str(); if (!IsHex(strPubKey)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Pubkey must be a hex string"); } std::vector vData(ParseHex(strPubKey)); CPubKey pubKey(vData.begin(), vData.end()); if (!pubKey.IsFullyValid()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Pubkey is not a valid public key"); } CTxDestination pubkey_dest = pubKey.GetID(); // Consistency check. if (!isScript && !(pubkey_dest == dest)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Consistency check failed"); } // Consistency check. if (isScript) { CTxDestination destination; if (ExtractDestination(script, destination)) { if (!(destination == pubkey_dest)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Consistency check failed"); } } } CScript pubKeyScript = GetScriptForDestination(pubkey_dest); if (::IsMine(*pwallet, pubKeyScript) == ISMINE_SPENDABLE) { throw JSONRPCError(RPC_WALLET_ERROR, "The wallet already " "contains the private " "key for this address " "or script"); } pwallet->MarkDirty(); if (!pwallet->AddWatchOnly(pubKeyScript, timestamp)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding address to wallet"); } // add to address book or update label if (IsValidDestination(pubkey_dest)) { pwallet->SetAddressBook(pubkey_dest, label, "receive"); } // TODO Is this necessary? CScript scriptRawPubKey = GetScriptForRawPubKey(pubKey); if (::IsMine(*pwallet, scriptRawPubKey) == ISMINE_SPENDABLE) { throw JSONRPCError(RPC_WALLET_ERROR, "The wallet already " "contains the private " "key for this address " "or script"); } pwallet->MarkDirty(); if (!pwallet->AddWatchOnly(scriptRawPubKey, timestamp)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding address to wallet"); } success = true; } // Import private keys. if (keys.size()) { const std::string &strPrivkey = keys[0].get_str(); // Checks. CBitcoinSecret vchSecret; bool fGood = vchSecret.SetString(strPrivkey); if (!fGood) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid private key encoding"); } CKey key = vchSecret.GetKey(); if (!key.IsValid()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Private key outside allowed range"); } CPubKey pubKey = key.GetPubKey(); assert(key.VerifyPubKey(pubKey)); CTxDestination pubkey_dest = pubKey.GetID(); // Consistency check. if (!isScript && !(pubkey_dest == dest)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Consistency check failed"); } // Consistency check. if (isScript) { CTxDestination destination; if (ExtractDestination(script, destination)) { if (!(destination == pubkey_dest)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Consistency check failed"); } } } CKeyID vchAddress = pubKey.GetID(); pwallet->MarkDirty(); pwallet->SetAddressBook(vchAddress, label, "receive"); if (pwallet->HaveKey(vchAddress)) { throw JSONRPCError(RPC_WALLET_ERROR, "The wallet already " "contains the private " "key for this address " "or script"); } pwallet->mapKeyMetadata[vchAddress].nCreateTime = timestamp; if (!pwallet->AddKeyPubKey(key, pubKey)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding key to wallet"); } pwallet->UpdateTimeFirstKey(timestamp); success = true; } // Import scriptPubKey only. if (pubKeys.size() == 0 && keys.size() == 0) { if (::IsMine(*pwallet, script) == ISMINE_SPENDABLE) { throw JSONRPCError(RPC_WALLET_ERROR, "The wallet already " "contains the private " "key for this address " "or script"); } pwallet->MarkDirty(); if (!pwallet->AddWatchOnly(script, timestamp)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding address to wallet"); } if (scriptPubKey.getType() == UniValue::VOBJ) { // add to address book or update label if (IsValidDestination(dest)) { pwallet->SetAddressBook(dest, label, "receive"); } } success = true; } } UniValue result = UniValue(UniValue::VOBJ); result.pushKV("success", UniValue(success)); return result; } catch (const UniValue &e) { UniValue result = UniValue(UniValue::VOBJ); result.pushKV("success", UniValue(false)); result.pushKV("error", e); return result; } catch (...) { UniValue result = UniValue(UniValue::VOBJ); result.pushKV("success", UniValue(false)); result.pushKV("error", JSONRPCError(RPC_MISC_ERROR, "Missing required fields")); return result; } } int64_t GetImportTimestamp(const UniValue &data, int64_t now) { if (data.exists("timestamp")) { const UniValue ×tamp = data["timestamp"]; if (timestamp.isNum()) { return timestamp.get_int64(); } else if (timestamp.isStr() && timestamp.get_str() == "now") { return now; } throw JSONRPCError(RPC_TYPE_ERROR, strprintf("Expected number or \"now\" timestamp " "value for key. got type %s", uvTypeName(timestamp.type()))); } throw JSONRPCError(RPC_TYPE_ERROR, "Missing required timestamp field for key"); } UniValue importmulti(const Config &config, const JSONRPCRequest &mainRequest) { CWallet *const pwallet = GetWalletForJSONRPCRequest(mainRequest); if (!EnsureWalletIsAvailable(pwallet, mainRequest.fHelp)) { return NullUniValue; } // clang-format off if (mainRequest.fHelp || mainRequest.params.size() < 1 || mainRequest.params.size() > 2) { throw std::runtime_error( "importmulti \"requests\" \"options\"\n\n" "Import addresses/scripts (with private or public keys, redeem script (P2SH)), rescanning all addresses in one-shot-only (rescan can be disabled via options).\n\n" "Arguments:\n" "1. requests (array, required) Data to be imported\n" " [ (array of json objects)\n" " {\n" " \"scriptPubKey\": \"