diff --git a/doc/release-notes.md b/doc/release-notes.md
index 8bb899988..e1e42e029 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -1,10 +1,11 @@
Bitcoin ABC version 0.17.2 is now available from:
This release includes the following features and fixes:
- Remove deprecated `estimatepriority` RPC.
- Remove deprecated `estimatesmartpriority` RPC.
- Remove support for `-sendfreetransactions`.
- Remove unstable `estimatesmartfee` RPC.
- Update berkley DB to 5.3 minimum. Developers should update their build environment accordingly.
+ - Remove `-incrementalrelayfee` option
diff --git a/src/init.cpp b/src/init.cpp
index 5cf826e80..f8b997965 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -1,2325 +1,2302 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#if defined(HAVE_CONFIG_H)
#include "config/bitcoin-config.h"
#endif
#include "init.h"
#include "addrman.h"
#include "amount.h"
#include "chain.h"
#include "chainparams.h"
#include "checkpoints.h"
#include "compat/sanity.h"
#include "config.h"
#include "consensus/validation.h"
#include "fs.h"
#include "httprpc.h"
#include "httpserver.h"
#include "key.h"
#include "miner.h"
#include "net.h"
#include "net_processing.h"
#include "netbase.h"
#include "policy/policy.h"
#include "rpc/register.h"
#include "rpc/server.h"
#include "scheduler.h"
#include "script/scriptcache.h"
#include "script/sigcache.h"
#include "script/standard.h"
#include "timedata.h"
#include "torcontrol.h"
#include "txdb.h"
#include "txmempool.h"
#include "ui_interface.h"
#include "util.h"
#include "utilmoneystr.h"
#include "validation.h"
#include "validationinterface.h"
#ifdef ENABLE_WALLET
#include "wallet/rpcdump.h"
#include "wallet/wallet.h"
#endif
#include "warnings.h"
#include
#include
#include
#ifndef WIN32
#include
#endif
#include
#include
#include
#include
#include
#include
#include
#if ENABLE_ZMQ
#include "zmq/zmqnotificationinterface.h"
#endif
bool fFeeEstimatesInitialized = false;
static const bool DEFAULT_PROXYRANDOMIZE = true;
static const bool DEFAULT_REST_ENABLE = false;
static const bool DEFAULT_DISABLE_SAFEMODE = false;
static const bool DEFAULT_STOPAFTERBLOCKIMPORT = false;
std::unique_ptr g_connman;
std::unique_ptr peerLogic;
#if ENABLE_ZMQ
static CZMQNotificationInterface *pzmqNotificationInterface = nullptr;
#endif
#ifdef WIN32
// Win32 LevelDB doesn't use filedescriptors, and the ones used for accessing
// block files don't count towards the fd_set size limit anyway.
#define MIN_CORE_FILEDESCRIPTORS 0
#else
#define MIN_CORE_FILEDESCRIPTORS 150
#endif
/** Used to pass flags to the Bind() function */
enum BindFlags {
BF_NONE = 0,
BF_EXPLICIT = (1U << 0),
BF_REPORT_ERROR = (1U << 1),
BF_WHITELIST = (1U << 2),
};
static const char *FEE_ESTIMATES_FILENAME = "fee_estimates.dat";
//////////////////////////////////////////////////////////////////////////////
//
// Shutdown
//
//
// Thread management and startup/shutdown:
//
// The network-processing threads are all part of a thread group created by
// AppInit() or the Qt main() function.
//
// A clean exit happens when StartShutdown() or the SIGTERM signal handler sets
// fRequestShutdown, which triggers the DetectShutdownThread(), which interrupts
// the main thread group. DetectShutdownThread() then exits, which causes
// AppInit() to continue (it .joins the shutdown thread). Shutdown() is then
// called to clean up database connections, and stop other threads that should
// only be stopped after the main network-processing threads have exited.
//
// Note that if running -daemon the parent process returns from AppInit2 before
// adding any threads to the threadGroup, so .join_all() returns immediately and
// the parent exits from main().
//
// Shutdown for Qt is very similar, only it uses a QTimer to detect
// fRequestShutdown getting set, and then does the normal Qt shutdown thing.
//
std::atomic fRequestShutdown(false);
std::atomic fDumpMempoolLater(false);
void StartShutdown() {
fRequestShutdown = true;
}
bool ShutdownRequested() {
return fRequestShutdown;
}
/**
* This is a minimally invasive approach to shutdown on LevelDB read errors from
* the chainstate, while keeping user interface out of the common library, which
* is shared between bitcoind, and bitcoin-qt and non-server tools.
*/
class CCoinsViewErrorCatcher final : public CCoinsViewBacked {
public:
CCoinsViewErrorCatcher(CCoinsView *view) : CCoinsViewBacked(view) {}
bool GetCoin(const COutPoint &outpoint, Coin &coin) const override {
try {
return CCoinsViewBacked::GetCoin(outpoint, coin);
} catch (const std::runtime_error &e) {
uiInterface.ThreadSafeMessageBox(
_("Error reading from database, shutting down."), "",
CClientUIInterface::MSG_ERROR);
LogPrintf("Error reading from database: %s\n", e.what());
// Starting the shutdown sequence and returning false to the caller
// would be interpreted as 'entry not found' (as opposed to unable
// to read data), and could lead to invalid interpretation. Just
// exit immediately, as we can't continue anyway, and all writes
// should be atomic.
abort();
}
}
// Writes do not need similar protection, as failure to write is handled by
// the caller.
};
static CCoinsViewDB *pcoinsdbview = nullptr;
static CCoinsViewErrorCatcher *pcoinscatcher = nullptr;
static std::unique_ptr globalVerifyHandle;
void Interrupt(boost::thread_group &threadGroup) {
InterruptHTTPServer();
InterruptHTTPRPC();
InterruptRPC();
InterruptREST();
InterruptTorControl();
if (g_connman) g_connman->Interrupt();
threadGroup.interrupt_all();
}
void Shutdown() {
LogPrintf("%s: In progress...\n", __func__);
static CCriticalSection cs_Shutdown;
TRY_LOCK(cs_Shutdown, lockShutdown);
if (!lockShutdown) return;
/// Note: Shutdown() must be able to handle cases in which AppInit2() failed
/// part of the way, for example if the data directory was found to be
/// locked. Be sure that anything that writes files or flushes caches only
/// does this if the respective module was initialized.
RenameThread("bitcoin-shutoff");
mempool.AddTransactionsUpdated(1);
StopHTTPRPC();
StopREST();
StopRPC();
StopHTTPServer();
#ifdef ENABLE_WALLET
for (CWalletRef pwallet : vpwallets) {
pwallet->Flush(false);
}
#endif
MapPort(false);
UnregisterValidationInterface(peerLogic.get());
peerLogic.reset();
g_connman.reset();
StopTorControl();
UnregisterNodeSignals(GetNodeSignals());
if (fDumpMempoolLater &&
gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
DumpMempool();
}
if (fFeeEstimatesInitialized) {
fs::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME;
CAutoFile est_fileout(fsbridge::fopen(est_path, "wb"), SER_DISK,
CLIENT_VERSION);
if (!est_fileout.IsNull())
mempool.WriteFeeEstimates(est_fileout);
else
LogPrintf("%s: Failed to write fee estimates to %s\n", __func__,
est_path.string());
fFeeEstimatesInitialized = false;
}
{
LOCK(cs_main);
if (pcoinsTip != nullptr) {
FlushStateToDisk();
}
delete pcoinsTip;
pcoinsTip = nullptr;
delete pcoinscatcher;
pcoinscatcher = nullptr;
delete pcoinsdbview;
pcoinsdbview = nullptr;
delete pblocktree;
pblocktree = nullptr;
}
#ifdef ENABLE_WALLET
for (CWalletRef pwallet : vpwallets) {
pwallet->Flush(true);
}
#endif
#if ENABLE_ZMQ
if (pzmqNotificationInterface) {
UnregisterValidationInterface(pzmqNotificationInterface);
delete pzmqNotificationInterface;
pzmqNotificationInterface = nullptr;
}
#endif
#ifndef WIN32
try {
fs::remove(GetPidFile());
} catch (const fs::filesystem_error &e) {
LogPrintf("%s: Unable to remove pidfile: %s\n", __func__, e.what());
}
#endif
UnregisterAllValidationInterfaces();
#ifdef ENABLE_WALLET
for (CWalletRef pwallet : vpwallets) {
delete pwallet;
}
vpwallets.clear();
#endif
globalVerifyHandle.reset();
ECC_Stop();
LogPrintf("%s: done\n", __func__);
}
/**
* Signal handlers are very limited in what they are allowed to do, so:
*/
void HandleSIGTERM(int) {
fRequestShutdown = true;
}
void HandleSIGHUP(int) {
GetLogger().fReopenDebugLog = true;
}
static bool Bind(CConnman &connman, const CService &addr, unsigned int flags) {
if (!(flags & BF_EXPLICIT) && IsLimited(addr)) return false;
std::string strError;
if (!connman.BindListenPort(addr, strError, (flags & BF_WHITELIST) != 0)) {
if (flags & BF_REPORT_ERROR) return InitError(strError);
return false;
}
return true;
}
void OnRPCStarted() {
uiInterface.NotifyBlockTip.connect(&RPCNotifyBlockChange);
}
void OnRPCStopped() {
uiInterface.NotifyBlockTip.disconnect(&RPCNotifyBlockChange);
RPCNotifyBlockChange(false, nullptr);
cvBlockChange.notify_all();
LogPrint(BCLog::RPC, "RPC stopped.\n");
}
void OnRPCPreCommand(const CRPCCommand &cmd) {
// Observe safe mode.
std::string strWarning = GetWarnings("rpc");
if (strWarning != "" &&
!gArgs.GetBoolArg("-disablesafemode", DEFAULT_DISABLE_SAFEMODE) &&
!cmd.okSafeMode)
throw JSONRPCError(RPC_FORBIDDEN_BY_SAFE_MODE,
std::string("Safe mode: ") + strWarning);
}
std::string HelpMessage(HelpMessageMode mode) {
const auto defaultBaseParams =
CreateBaseChainParams(CBaseChainParams::MAIN);
const auto testnetBaseParams =
CreateBaseChainParams(CBaseChainParams::TESTNET);
const auto defaultChainParams = CreateChainParams(CBaseChainParams::MAIN);
const auto testnetChainParams =
CreateChainParams(CBaseChainParams::TESTNET);
const bool showDebug = gArgs.GetBoolArg("-help-debug", false);
// When adding new options to the categories, please keep and ensure
// alphabetical ordering. Do not translate _(...) -help-debug options, Many
// technical terms, and only a very small audience, so is unnecessary stress
// to translators.
std::string strUsage = HelpMessageGroup(_("Options:"));
strUsage += HelpMessageOpt("-?", _("Print this help message and exit"));
strUsage += HelpMessageOpt("-version", _("Print version and exit"));
strUsage += HelpMessageOpt(
"-alertnotify=",
_("Execute command when a relevant alert is received or we see a "
"really long fork (%s in cmd is replaced by message)"));
strUsage += HelpMessageOpt("-blocknotify=",
_("Execute command when the best block changes "
"(%s in cmd is replaced by block hash)"));
if (showDebug)
strUsage += HelpMessageOpt(
"-blocksonly",
strprintf(
_("Whether to operate in a blocks only mode (default: %d)"),
DEFAULT_BLOCKSONLY));
strUsage += HelpMessageOpt(
"-assumevalid=",
strprintf(
_("If this block is in the chain assume that it and its ancestors "
"are valid and potentially skip their script verification (0 to "
"verify all, default: %s, testnet: %s)"),
defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(),
testnetChainParams->GetConsensus().defaultAssumeValid.GetHex()));
strUsage += HelpMessageOpt(
"-conf=", strprintf(_("Specify configuration file (default: %s)"),
BITCOIN_CONF_FILENAME));
if (mode == HMM_BITCOIND) {
#if HAVE_DECL_DAEMON
strUsage += HelpMessageOpt(
"-daemon",
_("Run in the background as a daemon and accept commands"));
#endif
}
strUsage += HelpMessageOpt("-datadir=", _("Specify data directory"));
if (showDebug) {
strUsage += HelpMessageOpt(
"-dbbatchsize",
strprintf(
"Maximum database write batch size in bytes (default: %u)",
nDefaultDbBatchSize));
}
strUsage += HelpMessageOpt(
"-dbcache=",
strprintf(
_("Set database cache size in megabytes (%d to %d, default: %d)"),
nMinDbCache, nMaxDbCache, nDefaultDbCache));
if (showDebug) {
strUsage += HelpMessageOpt(
"-feefilter", strprintf("Tell other nodes to filter invs to us by "
"our mempool min fee (default: %d)",
DEFAULT_FEEFILTER));
}
strUsage += HelpMessageOpt(
"-loadblock=",
_("Imports blocks from external blk000??.dat file on startup"));
strUsage += HelpMessageOpt(
"-maxorphantx=", strprintf(_("Keep at most unconnectable "
"transactions in memory (default: %u)"),
DEFAULT_MAX_ORPHAN_TRANSACTIONS));
strUsage += HelpMessageOpt("-maxmempool=",
strprintf(_("Keep the transaction memory pool "
"below megabytes (default: %u)"),
DEFAULT_MAX_MEMPOOL_SIZE));
strUsage +=
HelpMessageOpt("-mempoolexpiry=",
strprintf(_("Do not keep transactions in the mempool "
"longer than hours (default: %u)"),
DEFAULT_MEMPOOL_EXPIRY));
if (showDebug) {
strUsage += HelpMessageOpt(
"-minimumchainwork=",
strprintf(
"Minimum work assumed to exist on a valid chain in hex "
"(default: %s, testnet: %s)",
defaultChainParams->GetConsensus().nMinimumChainWork.GetHex(),
testnetChainParams->GetConsensus().nMinimumChainWork.GetHex()));
}
strUsage +=
HelpMessageOpt("-persistmempool",
strprintf(_("Whether to save the mempool on shutdown "
"and load on restart (default: %u)"),
DEFAULT_PERSIST_MEMPOOL));
strUsage += HelpMessageOpt(
"-blockreconstructionextratxn=",
strprintf(_("Extra transactions to keep in memory for compact block "
"reconstructions (default: %u)"),
DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN));
strUsage += HelpMessageOpt(
"-par=",
strprintf(_("Set the number of script verification threads (%u to %d, "
"0 = auto, <0 = leave that many cores free, default: %d)"),
-GetNumCores(), MAX_SCRIPTCHECK_THREADS,
DEFAULT_SCRIPTCHECK_THREADS));
#ifndef WIN32
strUsage += HelpMessageOpt(
"-pid=",
strprintf(_("Specify pid file (default: %s)"), BITCOIN_PID_FILENAME));
#endif
strUsage += HelpMessageOpt(
"-prune=",
strprintf(
_("Reduce storage requirements by enabling pruning (deleting) of "
"old blocks. This allows the pruneblockchain RPC to be called to "
"delete specific blocks, and enables automatic pruning of old "
"blocks if a target size in MiB is provided. This mode is "
"incompatible with -txindex and -rescan. "
"Warning: Reverting this setting requires re-downloading the "
"entire blockchain. "
"(default: 0 = disable pruning blocks, 1 = allow manual pruning "
"via RPC, >%u = automatically prune block files to stay under "
"the specified target size in MiB)"),
MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024));
strUsage += HelpMessageOpt(
"-reindex-chainstate",
_("Rebuild chain state from the currently indexed blocks"));
strUsage +=
HelpMessageOpt("-reindex", _("Rebuild chain state and block index from "
"the blk*.dat files on disk"));
#ifndef WIN32
strUsage += HelpMessageOpt(
"-sysperms",
_("Create new files with system default permissions, instead of umask "
"077 (only effective with disabled wallet functionality)"));
#endif
strUsage += HelpMessageOpt(
"-txindex", strprintf(_("Maintain a full transaction index, used by "
"the getrawtransaction rpc call (default: %d)"),
DEFAULT_TXINDEX));
strUsage += HelpMessageOpt(
"-usecashaddr", _("Use Cash Address for destination encoding instead "
"of base58 (activate by default on Jan, 14)"));
strUsage += HelpMessageGroup(_("Connection options:"));
strUsage += HelpMessageOpt(
"-addnode=",
_("Add a node to connect to and attempt to keep the connection open"));
strUsage += HelpMessageOpt(
"-banscore=",
strprintf(
_("Threshold for disconnecting misbehaving peers (default: %u)"),
DEFAULT_BANSCORE_THRESHOLD));
strUsage += HelpMessageOpt(
"-bantime=", strprintf(_("Number of seconds to keep misbehaving "
"peers from reconnecting (default: %u)"),
DEFAULT_MISBEHAVING_BANTIME));
strUsage += HelpMessageOpt("-bind=",
_("Bind to given address and always listen on "
"it. Use [host]:port notation for IPv6"));
strUsage +=
HelpMessageOpt("-connect=",
_("Connect only to the specified node(s); -noconnect or "
"-connect=0 alone to disable automatic connections"));
strUsage += HelpMessageOpt("-discover",
_("Discover own IP addresses (default: 1 when "
"listening and no -externalip or -proxy)"));
strUsage += HelpMessageOpt(
"-dns",
_("Allow DNS lookups for -addnode, -seednode and -connect") + " " +
strprintf(_("(default: %d)"), DEFAULT_NAME_LOOKUP));
strUsage += HelpMessageOpt(
"-dnsseed", _("Query for peer addresses via DNS lookup, if low on "
"addresses (default: 1 unless -connect/-noconnect)"));
strUsage += HelpMessageOpt("-externalip=",
_("Specify your own public address"));
strUsage += HelpMessageOpt(
"-forcednsseed",
strprintf(
_("Always query for peer addresses via DNS lookup (default: %d)"),
DEFAULT_FORCEDNSSEED));
strUsage +=
HelpMessageOpt("-listen", _("Accept connections from outside (default: "
"1 if no -proxy or -connect/-noconnect)"));
strUsage += HelpMessageOpt(
"-listenonion",
strprintf(_("Automatically create Tor hidden service (default: %d)"),
DEFAULT_LISTEN_ONION));
strUsage += HelpMessageOpt(
"-maxconnections=",
strprintf(_("Maintain at most connections to peers (default: %u)"),
DEFAULT_MAX_PEER_CONNECTIONS));
strUsage +=
HelpMessageOpt("-maxreceivebuffer=",
strprintf(_("Maximum per-connection receive buffer, "
"*1000 bytes (default: %u)"),
DEFAULT_MAXRECEIVEBUFFER));
strUsage += HelpMessageOpt(
"-maxsendbuffer=", strprintf(_("Maximum per-connection send buffer, "
"*1000 bytes (default: %u)"),
DEFAULT_MAXSENDBUFFER));
strUsage += HelpMessageOpt(
"-maxtimeadjustment",
strprintf(_("Maximum allowed median peer time offset adjustment. Local "
"perspective of time may be influenced by peers forward or "
"backward by this amount. (default: %u seconds)"),
DEFAULT_MAX_TIME_ADJUSTMENT));
strUsage +=
HelpMessageOpt("-onion=",
strprintf(_("Use separate SOCKS5 proxy to reach peers "
"via Tor hidden services (default: %s)"),
"-proxy"));
strUsage += HelpMessageOpt(
"-onlynet=",
_("Only connect to nodes in network (ipv4, ipv6 or onion)"));
strUsage +=
HelpMessageOpt("-permitbaremultisig",
strprintf(_("Relay non-P2SH multisig (default: %d)"),
DEFAULT_PERMIT_BAREMULTISIG));
strUsage += HelpMessageOpt(
"-peerbloomfilters",
strprintf(_("Support filtering of blocks and transaction with bloom "
"filters (default: %d)"),
DEFAULT_PEERBLOOMFILTERS));
strUsage += HelpMessageOpt(
"-port=",
strprintf(
_("Listen for connections on (default: %u or testnet: %u)"),
defaultChainParams->GetDefaultPort(),
testnetChainParams->GetDefaultPort()));
strUsage +=
HelpMessageOpt("-proxy=", _("Connect through SOCKS5 proxy"));
strUsage += HelpMessageOpt(
"-proxyrandomize",
strprintf(_("Randomize credentials for every proxy connection. This "
"enables Tor stream isolation (default: %d)"),
DEFAULT_PROXYRANDOMIZE));
strUsage += HelpMessageOpt(
"-seednode=",
_("Connect to a node to retrieve peer addresses, and disconnect"));
strUsage += HelpMessageOpt(
"-timeout=", strprintf(_("Specify connection timeout in "
"milliseconds (minimum: 1, default: %d)"),
DEFAULT_CONNECT_TIMEOUT));
strUsage += HelpMessageOpt("-torcontrol=:",
strprintf(_("Tor control port to use if onion "
"listening enabled (default: %s)"),
DEFAULT_TOR_CONTROL));
strUsage += HelpMessageOpt("-torpassword=",
_("Tor control port password (default: empty)"));
#ifdef USE_UPNP
#if USE_UPNP
strUsage +=
HelpMessageOpt("-upnp", _("Use UPnP to map the listening port "
"(default: 1 when listening and no -proxy)"));
#else
strUsage += HelpMessageOpt(
"-upnp",
strprintf(_("Use UPnP to map the listening port (default: %u)"), 0));
#endif
#endif
strUsage +=
HelpMessageOpt("-whitebind=",
_("Bind to given address and whitelist peers connecting "
"to it. Use [host]:port notation for IPv6"));
strUsage += HelpMessageOpt(
"-whitelist=",
_("Whitelist peers connecting from the given IP address (e.g. 1.2.3.4) "
"or CIDR notated network (e.g. 1.2.3.0/24). Can be specified "
"multiple times.") +
" " + _("Whitelisted peers cannot be DoS banned and their "
"transactions are always relayed, even if they are already "
"in the mempool, useful e.g. for a gateway"));
strUsage += HelpMessageOpt(
"-whitelistrelay",
strprintf(_("Accept relayed transactions received from whitelisted "
"peers even when not relaying transactions (default: %d)"),
DEFAULT_WHITELISTRELAY));
strUsage += HelpMessageOpt(
"-whitelistforcerelay",
strprintf(_("Force relay of transactions from whitelisted peers even "
"if they violate local relay policy (default: %d)"),
DEFAULT_WHITELISTFORCERELAY));
strUsage += HelpMessageOpt(
"-maxuploadtarget=",
strprintf(_("Tries to keep outbound traffic under the given target (in "
"MiB per 24h), 0 = no limit (default: %d)"),
DEFAULT_MAX_UPLOAD_TARGET));
#ifdef ENABLE_WALLET
strUsage += CWallet::GetWalletHelpString(showDebug);
#endif
#if ENABLE_ZMQ
strUsage += HelpMessageGroup(_("ZeroMQ notification options:"));
strUsage += HelpMessageOpt("-zmqpubhashblock=",
_("Enable publish hash block in "));
strUsage +=
HelpMessageOpt("-zmqpubhashtx=",
_("Enable publish hash transaction in "));
strUsage += HelpMessageOpt("-zmqpubrawblock=",
_("Enable publish raw block in "));
strUsage +=
HelpMessageOpt("-zmqpubrawtx=",
_("Enable publish raw transaction in "));
#endif
strUsage += HelpMessageGroup(_("Debugging/Testing options:"));
strUsage += HelpMessageOpt("-uacomment=",
_("Append comment to the user agent string"));
if (showDebug) {
strUsage += HelpMessageOpt(
"-checkblocks=",
strprintf(
_("How many blocks to check at startup (default: %u, 0 = all)"),
DEFAULT_CHECKBLOCKS));
strUsage +=
HelpMessageOpt("-checklevel=",
strprintf(_("How thorough the block verification of "
"-checkblocks is (0-4, default: %u)"),
DEFAULT_CHECKLEVEL));
strUsage += HelpMessageOpt(
"-checkblockindex",
strprintf("Do a full consistency check for mapBlockIndex, "
"setBlockIndexCandidates, chainActive and "
"mapBlocksUnlinked occasionally. Also sets -checkmempool "
"(default: %u)",
defaultChainParams->DefaultConsistencyChecks()));
strUsage += HelpMessageOpt(
"-checkmempool=",
strprintf("Run checks every transactions (default: %u)",
defaultChainParams->DefaultConsistencyChecks()));
strUsage += HelpMessageOpt(
"-checkpoints", strprintf("Only accept block chain matching "
"built-in checkpoints (default: %d)",
DEFAULT_CHECKPOINTS_ENABLED));
strUsage += HelpMessageOpt(
"-disablesafemode", strprintf("Disable safemode, override a real "
"safe mode event (default: %d)",
DEFAULT_DISABLE_SAFEMODE));
strUsage += HelpMessageOpt(
"-testsafemode",
strprintf("Force safe mode (default: %d)", DEFAULT_TESTSAFEMODE));
strUsage +=
HelpMessageOpt("-dropmessagestest=",
"Randomly drop 1 of every network messages");
strUsage +=
HelpMessageOpt("-fuzzmessagestest=",
"Randomly fuzz 1 of every network messages");
strUsage += HelpMessageOpt(
"-stopafterblockimport",
strprintf(
"Stop running after importing blocks from disk (default: %d)",
DEFAULT_STOPAFTERBLOCKIMPORT));
strUsage += HelpMessageOpt(
"-stopatheight", strprintf("Stop running after reaching the given "
"height in the main chain (default: %u)",
DEFAULT_STOPATHEIGHT));
strUsage += HelpMessageOpt(
"-limitancestorcount=",
strprintf("Do not accept transactions if number of in-mempool "
"ancestors is or more (default: %u)",
DEFAULT_ANCESTOR_LIMIT));
strUsage +=
HelpMessageOpt("-limitancestorsize=",
strprintf("Do not accept transactions whose size "
"with all in-mempool ancestors exceeds "
" kilobytes (default: %u)",
DEFAULT_ANCESTOR_SIZE_LIMIT));
strUsage += HelpMessageOpt(
"-limitdescendantcount=",
strprintf("Do not accept transactions if any ancestor would have "
" or more in-mempool descendants (default: %u)",
DEFAULT_DESCENDANT_LIMIT));
strUsage += HelpMessageOpt(
"-limitdescendantsize=",
strprintf("Do not accept transactions if any ancestor would have "
"more than kilobytes of in-mempool descendants "
"(default: %u).",
DEFAULT_DESCENDANT_SIZE_LIMIT));
strUsage += HelpMessageOpt("-bip9params=deployment:start:end",
"Use given start/end times for specified "
"BIP9 deployment (regtest-only)");
}
strUsage += HelpMessageOpt(
"-debug=",
strprintf(_("Output debugging information (default: %u, supplying "
" is optional)"),
0) +
". " + _("If is not supplied or if = 1, "
"output all debugging information.") +
_(" can be:") + " " + ListLogCategories() + ".");
strUsage += HelpMessageOpt(
"-debugexclude=",
strprintf(_("Exclude debugging information for a category. Can be used "
"in conjunction with -debug=1 to output debug logs for all "
"categories except one or more specified categories.")));
if (showDebug) {
strUsage += HelpMessageOpt(
"-nodebug", "Turn off debugging messages, same as -debug=0");
}
strUsage += HelpMessageOpt(
"-help-debug",
_("Show all debugging options (usage: --help -help-debug)"));
strUsage += HelpMessageOpt(
"-logips",
strprintf(_("Include IP addresses in debug output (default: %d)"),
DEFAULT_LOGIPS));
strUsage += HelpMessageOpt(
"-logtimestamps",
strprintf(_("Prepend debug output with timestamp (default: %d)"),
DEFAULT_LOGTIMESTAMPS));
if (showDebug) {
strUsage += HelpMessageOpt(
"-logtimemicros",
strprintf(
"Add microsecond precision to debug timestamps (default: %d)",
DEFAULT_LOGTIMEMICROS));
strUsage += HelpMessageOpt(
"-mocktime=",
"Replace actual time with seconds since epoch (default: 0)");
strUsage += HelpMessageOpt(
"-limitfreerelay=",
strprintf("Continuously rate-limit free transactions to *1000 "
"bytes per minute (default: %u)",
DEFAULT_LIMITFREERELAY));
strUsage +=
HelpMessageOpt("-relaypriority",
strprintf("Require high priority for relaying free "
"or low-fee transactions (default: %d)",
DEFAULT_RELAYPRIORITY));
strUsage += HelpMessageOpt(
"-maxsigcachesize=",
strprintf("Limit size of signature cache to MiB (default: %u)",
DEFAULT_MAX_SIG_CACHE_SIZE));
strUsage += HelpMessageOpt(
"-maxscriptcachesize=",
strprintf("Limit size of script cache to MiB (default: %u)",
DEFAULT_MAX_SCRIPT_CACHE_SIZE));
strUsage += HelpMessageOpt(
"-maxtipage=",
strprintf("Maximum tip age in seconds to consider node in initial "
"block download (default: %u)",
DEFAULT_MAX_TIP_AGE));
}
strUsage += HelpMessageOpt(
"-excessutxocharge=",
strprintf(_("Fees (in %s/kB) to charge per utxo created for"
"relaying, and mining (default: %s)"),
CURRENCY_UNIT, FormatMoney(DEFAULT_UTXO_FEE)));
strUsage += HelpMessageOpt(
"-minrelaytxfee=",
strprintf(
_("Fees (in %s/kB) smaller than this are considered zero fee for "
"relaying, mining and transaction creation (default: %s)"),
CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE)));
strUsage += HelpMessageOpt(
"-maxtxfee=",
strprintf(_("Maximum total fees (in %s) to use in a single wallet "
"transaction or raw transaction; setting this too low may "
"abort large transactions (default: %s)"),
CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MAXFEE)));
strUsage += HelpMessageOpt(
"-printtoconsole",
_("Send trace/debug info to console instead of debug.log file"));
if (showDebug) {
strUsage += HelpMessageOpt(
"-printpriority", strprintf("Log transaction priority and fee per "
"kB when mining blocks (default: %d)",
DEFAULT_PRINTPRIORITY));
}
strUsage += HelpMessageOpt("-shrinkdebugfile",
_("Shrink debug.log file on client startup "
"(default: 1 when no -debug)"));
AppendParamsHelpMessages(strUsage, showDebug);
strUsage += HelpMessageGroup(_("Node relay options:"));
if (showDebug) {
strUsage += HelpMessageOpt(
"-acceptnonstdtxn",
strprintf(
"Relay and mine \"non-standard\" transactions (%sdefault: %u)",
"testnet/regtest only; ",
defaultChainParams->RequireStandard()));
strUsage +=
HelpMessageOpt("-excessiveblocksize=",
strprintf(_("Do not accept blocks larger than this "
"limit, in bytes (default: %d)"),
DEFAULT_MAX_BLOCK_SIZE));
- strUsage += HelpMessageOpt(
- "-incrementalrelayfee=",
- strprintf(
- "Fee rate (in %s/kB) used to define cost of relay, used for "
- "mempool limiting and BIP 125 replacement. (default: %s)",
- CURRENCY_UNIT, FormatMoney(DEFAULT_INCREMENTAL_RELAY_FEE)));
strUsage += HelpMessageOpt(
"-dustrelayfee=",
strprintf("Fee rate (in %s/kB) used to defined dust, the value of "
"an output such that it will cost about 1/3 of its value "
"in fees at this fee rate to spend it. (default: %s)",
CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE)));
}
strUsage +=
HelpMessageOpt("-bytespersigop",
strprintf(_("Equivalent bytes per sigop in transactions "
"for relay and mining (default: %u)"),
DEFAULT_BYTES_PER_SIGOP));
strUsage += HelpMessageOpt(
"-datacarrier",
strprintf(_("Relay and mine data carrier transactions (default: %d)"),
DEFAULT_ACCEPT_DATACARRIER));
strUsage += HelpMessageOpt(
"-datacarriersize",
strprintf(_("Maximum size of data in data carrier transactions we "
"relay and mine (default: %u)"),
MAX_OP_RETURN_RELAY));
strUsage += HelpMessageGroup(_("Block creation options:"));
strUsage += HelpMessageOpt(
"-blockmaxsize=",
strprintf(_("Set maximum block size in bytes (default: %d)"),
DEFAULT_MAX_GENERATED_BLOCK_SIZE));
strUsage += HelpMessageOpt(
"-blockprioritypercentage=",
strprintf(_("Set maximum percentage of a block reserved to "
"high-priority/low-fee transactions (default: %d)"),
DEFAULT_BLOCK_PRIORITY_PERCENTAGE));
strUsage += HelpMessageOpt(
"-blockmintxfee=",
strprintf(_("Set lowest fee rate (in %s/kB) for transactions to be "
"included in block creation. (default: %s)"),
CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE)));
if (showDebug) {
strUsage +=
HelpMessageOpt("-blockversion=",
"Override block version to test forking scenarios");
}
strUsage += HelpMessageGroup(_("RPC server options:"));
strUsage += HelpMessageOpt("-server",
_("Accept command line and JSON-RPC commands"));
strUsage += HelpMessageOpt(
"-rest", strprintf(_("Accept public REST requests (default: %d)"),
DEFAULT_REST_ENABLE));
strUsage += HelpMessageOpt(
"-rpcbind=",
_("Bind to given address to listen for JSON-RPC connections. Use "
"[host]:port notation for IPv6. This option can be specified "
"multiple times (default: bind to all interfaces)"));
strUsage +=
HelpMessageOpt("-rpccookiefile=",
_("Location of the auth cookie (default: data dir)"));
strUsage += HelpMessageOpt("-rpcuser=",
_("Username for JSON-RPC connections"));
strUsage += HelpMessageOpt("-rpcpassword=",
_("Password for JSON-RPC connections"));
strUsage += HelpMessageOpt(
"-rpcauth=",
_("Username and hashed password for JSON-RPC connections. The field "
" comes in the format: :$. A canonical "
"python script is included in share/rpcuser. The client then "
"connects normally using the "
"rpcuser=/rpcpassword= pair of arguments. This "
"option can be specified multiple times"));
strUsage += HelpMessageOpt(
"-rpcport=",
strprintf(_("Listen for JSON-RPC connections on (default: %u or "
"testnet: %u)"),
defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort()));
strUsage += HelpMessageOpt(
"-rpcallowip=",
_("Allow JSON-RPC connections from specified source. Valid for "
"are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. "
"1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This "
"option can be specified multiple times"));
strUsage += HelpMessageOpt(
"-rpcthreads=",
strprintf(
_("Set the number of threads to service RPC calls (default: %d)"),
DEFAULT_HTTP_THREADS));
strUsage += HelpMessageOpt(
"-rpccorsdomain=value",
"Domain from which to accept cross origin requests (browser enforced)");
if (showDebug) {
strUsage += HelpMessageOpt(
"-rpcworkqueue=", strprintf("Set the depth of the work queue to "
"service RPC calls (default: %d)",
DEFAULT_HTTP_WORKQUEUE));
strUsage += HelpMessageOpt(
"-rpcservertimeout=",
strprintf("Timeout during HTTP requests (default: %d)",
DEFAULT_HTTP_SERVER_TIMEOUT));
}
return strUsage;
}
std::string LicenseInfo() {
const std::string URL_SOURCE_CODE =
"";
const std::string URL_WEBSITE = "";
return CopyrightHolders(
strprintf(_("Copyright (C) %i-%i"), 2009, COPYRIGHT_YEAR) +
" ") +
"\n" + "\n" +
strprintf(_("Please contribute if you find %s useful. "
"Visit %s for further information about the software."),
PACKAGE_NAME, URL_WEBSITE) +
"\n" + strprintf(_("The source code is available from %s."),
URL_SOURCE_CODE) +
"\n" + "\n" + _("This is experimental software.") + "\n" +
strprintf(_("Distributed under the MIT software license, see the "
"accompanying file %s or %s"),
"COPYING", "") +
"\n" + "\n" +
strprintf(_("This product includes software developed by the "
"OpenSSL Project for use in the OpenSSL Toolkit %s and "
"cryptographic software written by Eric Young and UPnP "
"software written by Thomas Bernard."),
"") +
"\n";
}
static void BlockNotifyCallback(bool initialSync,
const CBlockIndex *pBlockIndex) {
if (initialSync || !pBlockIndex) return;
std::string strCmd = gArgs.GetArg("-blocknotify", "");
boost::replace_all(strCmd, "%s", pBlockIndex->GetBlockHash().GetHex());
boost::thread t(runCommand, strCmd); // thread runs free
}
static bool fHaveGenesis = false;
static boost::mutex cs_GenesisWait;
static CConditionVariable condvar_GenesisWait;
static void BlockNotifyGenesisWait(bool, const CBlockIndex *pBlockIndex) {
if (pBlockIndex != nullptr) {
{
boost::unique_lock lock_GenesisWait(cs_GenesisWait);
fHaveGenesis = true;
}
condvar_GenesisWait.notify_all();
}
}
struct CImportingNow {
CImportingNow() {
assert(fImporting == false);
fImporting = true;
}
~CImportingNow() {
assert(fImporting == true);
fImporting = false;
}
};
// If we're using -prune with -reindex, then delete block files that will be
// ignored by the reindex. Since reindexing works by starting at block file 0
// and looping until a blockfile is missing, do the same here to delete any
// later block files after a gap. Also delete all rev files since they'll be
// rewritten by the reindex anyway. This ensures that vinfoBlockFile is in sync
// with what's actually on disk by the time we start downloading, so that
// pruning works correctly.
void CleanupBlockRevFiles() {
std::map mapBlockFiles;
// Glob all blk?????.dat and rev?????.dat files from the blocks directory.
// Remove the rev files immediately and insert the blk file paths into an
// ordered map keyed by block file index.
LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for "
"-reindex with -prune\n");
fs::path blocksdir = GetDataDir() / "blocks";
for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator();
it++) {
if (is_regular_file(*it) &&
it->path().filename().string().length() == 12 &&
it->path().filename().string().substr(8, 4) == ".dat") {
if (it->path().filename().string().substr(0, 3) == "blk")
mapBlockFiles[it->path().filename().string().substr(3, 5)] =
it->path();
else if (it->path().filename().string().substr(0, 3) == "rev")
remove(it->path());
}
}
// Remove all block files that aren't part of a contiguous set starting at
// zero by walking the ordered map (keys are block file indices) by keeping
// a separate counter. Once we hit a gap (or if 0 doesn't exist) start
// removing block files.
int nContigCounter = 0;
for (const std::pair &item : mapBlockFiles) {
if (atoi(item.first) == nContigCounter) {
nContigCounter++;
continue;
}
remove(item.second);
}
}
void ThreadImport(const Config &config, std::vector vImportFiles) {
RenameThread("bitcoin-loadblk");
{
CImportingNow imp;
// -reindex
if (fReindex) {
int nFile = 0;
while (true) {
CDiskBlockPos pos(nFile, 0);
if (!fs::exists(GetBlockPosFilename(pos, "blk"))) {
// No block files left to reindex
break;
}
FILE *file = OpenBlockFile(pos, true);
if (!file) {
// This error is logged in OpenBlockFile
break;
}
LogPrintf("Reindexing block file blk%05u.dat...\n",
(unsigned int)nFile);
LoadExternalBlockFile(config, file, &pos);
nFile++;
}
pblocktree->WriteReindexing(false);
fReindex = false;
LogPrintf("Reindexing finished\n");
// To avoid ending up in a situation without genesis block, re-try
// initializing (no-op if reindexing worked):
InitBlockIndex(config);
}
// hardcoded $DATADIR/bootstrap.dat
fs::path pathBootstrap = GetDataDir() / "bootstrap.dat";
if (fs::exists(pathBootstrap)) {
FILE *file = fsbridge::fopen(pathBootstrap, "rb");
if (file) {
fs::path pathBootstrapOld = GetDataDir() / "bootstrap.dat.old";
LogPrintf("Importing bootstrap.dat...\n");
LoadExternalBlockFile(config, file);
RenameOver(pathBootstrap, pathBootstrapOld);
} else {
LogPrintf("Warning: Could not open bootstrap file %s\n",
pathBootstrap.string());
}
}
// -loadblock=
for (const fs::path &path : vImportFiles) {
FILE *file = fsbridge::fopen(path, "rb");
if (file) {
LogPrintf("Importing blocks file %s...\n", path.string());
LoadExternalBlockFile(config, file);
} else {
LogPrintf("Warning: Could not open blocks file %s\n",
path.string());
}
}
// scan for better chains in the block chain database, that are not yet
// connected in the active best chain
CValidationState state;
if (!ActivateBestChain(config, state)) {
LogPrintf("Failed to connect best block");
StartShutdown();
}
if (gArgs.GetBoolArg("-stopafterblockimport",
DEFAULT_STOPAFTERBLOCKIMPORT)) {
LogPrintf("Stopping after block import\n");
StartShutdown();
}
} // End scope of CImportingNow
if (gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
LoadMempool(config);
fDumpMempoolLater = !fRequestShutdown;
}
}
/** Sanity checks
* Ensure that Bitcoin is running in a usable environment with all
* necessary library support.
*/
bool InitSanityCheck(void) {
if (!ECC_InitSanityCheck()) {
InitError(
"Elliptic curve cryptography sanity check failure. Aborting.");
return false;
}
if (!glibc_sanity_test() || !glibcxx_sanity_test()) {
return false;
}
if (!Random_SanityCheck()) {
InitError("OS cryptographic RNG sanity check failure. Aborting.");
return false;
}
return true;
}
static bool AppInitServers(Config &config, boost::thread_group &threadGroup) {
RPCServer::OnStarted(&OnRPCStarted);
RPCServer::OnStopped(&OnRPCStopped);
RPCServer::OnPreCommand(&OnRPCPreCommand);
if (!InitHTTPServer(config)) return false;
if (!StartRPC()) return false;
if (!StartHTTPRPC()) return false;
if (gArgs.GetBoolArg("-rest", DEFAULT_REST_ENABLE) && !StartREST())
return false;
if (!StartHTTPServer()) return false;
return true;
}
// Parameter interaction based on rules
void InitParameterInteraction() {
// when specifying an explicit binding address, you want to listen on it
// even when -connect or -proxy is specified.
if (gArgs.IsArgSet("-bind")) {
if (gArgs.SoftSetBoolArg("-listen", true))
LogPrintf(
"%s: parameter interaction: -bind set -> setting -listen=1\n",
__func__);
}
if (gArgs.IsArgSet("-whitebind")) {
if (gArgs.SoftSetBoolArg("-listen", true))
LogPrintf("%s: parameter interaction: -whitebind set -> setting "
"-listen=1\n",
__func__);
}
if (gArgs.IsArgSet("-connect")) {
// when only connecting to trusted nodes, do not seed via DNS, or listen
// by default.
if (gArgs.SoftSetBoolArg("-dnsseed", false))
LogPrintf("%s: parameter interaction: -connect set -> setting "
"-dnsseed=0\n",
__func__);
if (gArgs.SoftSetBoolArg("-listen", false))
LogPrintf("%s: parameter interaction: -connect set -> setting "
"-listen=0\n",
__func__);
}
if (gArgs.IsArgSet("-proxy")) {
// to protect privacy, do not listen by default if a default proxy
// server is specified.
if (gArgs.SoftSetBoolArg("-listen", false))
LogPrintf(
"%s: parameter interaction: -proxy set -> setting -listen=0\n",
__func__);
// to protect privacy, do not use UPNP when a proxy is set. The user may
// still specify -listen=1 to listen locally, so don't rely on this
// happening through -listen below.
if (gArgs.SoftSetBoolArg("-upnp", false))
LogPrintf(
"%s: parameter interaction: -proxy set -> setting -upnp=0\n",
__func__);
// to protect privacy, do not discover addresses by default
if (gArgs.SoftSetBoolArg("-discover", false))
LogPrintf("%s: parameter interaction: -proxy set -> setting "
"-discover=0\n",
__func__);
}
if (!gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) {
// do not map ports or try to retrieve public IP when not listening
// (pointless)
if (gArgs.SoftSetBoolArg("-upnp", false))
LogPrintf(
"%s: parameter interaction: -listen=0 -> setting -upnp=0\n",
__func__);
if (gArgs.SoftSetBoolArg("-discover", false))
LogPrintf(
"%s: parameter interaction: -listen=0 -> setting -discover=0\n",
__func__);
if (gArgs.SoftSetBoolArg("-listenonion", false))
LogPrintf("%s: parameter interaction: -listen=0 -> setting "
"-listenonion=0\n",
__func__);
}
if (gArgs.IsArgSet("-externalip")) {
// if an explicit public IP is specified, do not try to find others
if (gArgs.SoftSetBoolArg("-discover", false))
LogPrintf("%s: parameter interaction: -externalip set -> setting "
"-discover=0\n",
__func__);
}
// disable whitelistrelay in blocksonly mode
if (gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) {
if (gArgs.SoftSetBoolArg("-whitelistrelay", false))
LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting "
"-whitelistrelay=0\n",
__func__);
}
// Forcing relay from whitelisted hosts implies we will accept relays from
// them in the first place.
if (gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
if (gArgs.SoftSetBoolArg("-whitelistrelay", true))
LogPrintf("%s: parameter interaction: -whitelistforcerelay=1 -> "
"setting -whitelistrelay=1\n",
__func__);
}
}
static std::string ResolveErrMsg(const char *const optname,
const std::string &strBind) {
return strprintf(_("Cannot resolve -%s address: '%s'"), optname, strBind);
}
void InitLogging() {
BCLog::Logger &logger = GetLogger();
logger.fPrintToConsole = gArgs.GetBoolArg("-printtoconsole", false);
logger.fLogTimestamps =
gArgs.GetBoolArg("-logtimestamps", DEFAULT_LOGTIMESTAMPS);
logger.fLogTimeMicros =
gArgs.GetBoolArg("-logtimemicros", DEFAULT_LOGTIMEMICROS);
fLogIPs = gArgs.GetBoolArg("-logips", DEFAULT_LOGIPS);
LogPrintf("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
LogPrintf("%s version %s\n", CLIENT_NAME, FormatFullVersion());
}
namespace { // Variables internal to initialization process only
ServiceFlags nRelevantServices = NODE_NETWORK;
int nMaxConnections;
int nUserMaxConnections;
int nFD;
ServiceFlags nLocalServices = NODE_NETWORK;
} // namespace
[[noreturn]] static void new_handler_terminate() {
// Rather than throwing std::bad-alloc if allocation fails, terminate
// immediately to (try to) avoid chain corruption. Since LogPrintf may
// itself allocate memory, set the handler directly to terminate first.
std::set_new_handler(std::terminate);
LogPrintf("Error: Out of memory. Terminating.\n");
// The log was successful, terminate now.
std::terminate();
};
bool AppInitBasicSetup() {
// Step 1: setup
#ifdef _MSC_VER
// Turn off Microsoft heap dump noise
_CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_WARN, CreateFileA("NUL", GENERIC_WRITE, 0, nullptr,
OPEN_EXISTING, 0, 0));
#endif
#if _MSC_VER >= 1400
// Disable confusing "helpful" text message on abort, Ctrl-C
_set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT);
#endif
#ifdef WIN32
// Enable Data Execution Prevention (DEP)
// Minimum supported OS versions: WinXP SP3, WinVista >= SP1, Win Server 2008
// A failure is non-critical and needs no further attention!
#ifndef PROCESS_DEP_ENABLE
// We define this here, because GCCs winbase.h limits this to _WIN32_WINNT >=
// 0x0601 (Windows 7), which is not correct. Can be removed, when GCCs winbase.h
// is fixed!
#define PROCESS_DEP_ENABLE 0x00000001
#endif
typedef BOOL(WINAPI * PSETPROCDEPPOL)(DWORD);
PSETPROCDEPPOL setProcDEPPol = (PSETPROCDEPPOL)GetProcAddress(
GetModuleHandleA("Kernel32.dll"), "SetProcessDEPPolicy");
if (setProcDEPPol != nullptr) setProcDEPPol(PROCESS_DEP_ENABLE);
#endif
if (!SetupNetworking()) return InitError("Initializing networking failed");
#ifndef WIN32
if (!gArgs.GetBoolArg("-sysperms", false)) {
umask(077);
}
// Clean shutdown on SIGTERM
struct sigaction sa;
sa.sa_handler = HandleSIGTERM;
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
sigaction(SIGTERM, &sa, nullptr);
sigaction(SIGINT, &sa, nullptr);
// Reopen debug.log on SIGHUP
struct sigaction sa_hup;
sa_hup.sa_handler = HandleSIGHUP;
sigemptyset(&sa_hup.sa_mask);
sa_hup.sa_flags = 0;
sigaction(SIGHUP, &sa_hup, nullptr);
// Ignore SIGPIPE, otherwise it will bring the daemon down if the client
// closes unexpectedly
signal(SIGPIPE, SIG_IGN);
#endif
std::set_new_handler(new_handler_terminate);
return true;
}
bool AppInitParameterInteraction(Config &config) {
const CChainParams &chainparams = config.GetChainParams();
// Step 2: parameter interactions
// also see: InitParameterInteraction()
// if using block pruning, then disallow txindex
if (gArgs.GetArg("-prune", 0)) {
if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX))
return InitError(_("Prune mode is incompatible with -txindex."));
}
// if space reserved for high priority transactions is misconfigured
// stop program execution and warn the user with a proper error message
const int64_t blkprio = gArgs.GetArg("-blockprioritypercentage",
DEFAULT_BLOCK_PRIORITY_PERCENTAGE);
if (!config.SetBlockPriorityPercentage(blkprio)) {
return InitError(_("Block priority percentage has to belong to the "
"[0..100] interval."));
}
// Make sure enough file descriptors are available
int nBind = std::max(
(gArgs.IsArgSet("-bind") ? gArgs.GetArgs("-bind").size() : 0) +
(gArgs.IsArgSet("-whitebind") ? gArgs.GetArgs("-whitebind").size()
: 0),
size_t(1));
nUserMaxConnections =
gArgs.GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS);
nMaxConnections = std::max(nUserMaxConnections, 0);
// Trim requested connection counts, to fit into system limitations
nMaxConnections =
std::max(std::min(nMaxConnections,
(int)(FD_SETSIZE - nBind - MIN_CORE_FILEDESCRIPTORS -
MAX_ADDNODE_CONNECTIONS)),
0);
nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS +
MAX_ADDNODE_CONNECTIONS);
if (nFD < MIN_CORE_FILEDESCRIPTORS)
return InitError(_("Not enough file descriptors available."));
nMaxConnections =
std::min(nFD - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS,
nMaxConnections);
if (nMaxConnections < nUserMaxConnections) {
InitWarning(strprintf(_("Reducing -maxconnections from %d to %d, "
"because of system limitations."),
nUserMaxConnections, nMaxConnections));
}
// Step 3: parameter-to-internal-flags
if (gArgs.IsArgSet("-debug")) {
// Special-case: if -debug=0/-nodebug is set, turn off debugging
// messages
const std::vector &categories = gArgs.GetArgs("-debug");
if (find(categories.begin(), categories.end(), std::string("0")) ==
categories.end()) {
for (const auto &cat : categories) {
BCLog::LogFlags flag;
if (!GetLogCategory(flag, cat)) {
InitWarning(
strprintf(_("Unsupported logging category %s=%s."),
"-debug", cat));
}
GetLogger().EnableCategory(flag);
}
}
}
// Now remove the logging categories which were explicitly excluded
if (gArgs.IsArgSet("-debugexclude")) {
for (const std::string &cat : gArgs.GetArgs("-debugexclude")) {
BCLog::LogFlags flag;
if (!GetLogCategory(flag, cat)) {
InitWarning(strprintf(_("Unsupported logging category %s=%s."),
"-debugexclude", cat));
}
GetLogger().DisableCategory(flag);
}
}
// Check for -debugnet
if (gArgs.GetBoolArg("-debugnet", false))
InitWarning(
_("Unsupported argument -debugnet ignored, use -debug=net."));
// Check for -socks - as this is a privacy risk to continue, exit here
if (gArgs.IsArgSet("-socks"))
return InitError(
_("Unsupported argument -socks found. Setting SOCKS version isn't "
"possible anymore, only SOCKS5 proxies are supported."));
// Check for -tor - as this is a privacy risk to continue, exit here
if (gArgs.GetBoolArg("-tor", false))
return InitError(_("Unsupported argument -tor found, use -onion."));
if (gArgs.GetBoolArg("-benchmark", false))
InitWarning(
_("Unsupported argument -benchmark ignored, use -debug=bench."));
if (gArgs.GetBoolArg("-whitelistalwaysrelay", false))
InitWarning(_("Unsupported argument -whitelistalwaysrelay ignored, use "
"-whitelistrelay and/or -whitelistforcerelay."));
if (gArgs.IsArgSet("-blockminsize"))
InitWarning("Unsupported argument -blockminsize ignored.");
// Checkmempool and checkblockindex default to true in regtest mode
int ratio = std::min(
std::max(
gArgs.GetArg("-checkmempool",
chainparams.DefaultConsistencyChecks() ? 1 : 0),
0),
1000000);
if (ratio != 0) {
mempool.setSanityCheck(1.0 / ratio);
}
fCheckBlockIndex = gArgs.GetBoolArg("-checkblockindex",
chainparams.DefaultConsistencyChecks());
fCheckpointsEnabled =
gArgs.GetBoolArg("-checkpoints", DEFAULT_CHECKPOINTS_ENABLED);
hashAssumeValid = uint256S(
gArgs.GetArg("-assumevalid",
chainparams.GetConsensus().defaultAssumeValid.GetHex()));
if (!hashAssumeValid.IsNull())
LogPrintf("Assuming ancestors of block %s have valid signatures.\n",
hashAssumeValid.GetHex());
else
LogPrintf("Validating signatures for all blocks.\n");
if (gArgs.IsArgSet("-minimumchainwork")) {
const std::string minChainWorkStr =
gArgs.GetArg("-minimumchainwork", "");
if (!IsHexNumber(minChainWorkStr)) {
return InitError(strprintf(
"Invalid non-hex (%s) minimum chain work value specified",
minChainWorkStr));
}
nMinimumChainWork = UintToArith256(uint256S(minChainWorkStr));
} else {
nMinimumChainWork =
UintToArith256(chainparams.GetConsensus().nMinimumChainWork);
}
LogPrintf("Setting nMinimumChainWork=%s\n", nMinimumChainWork.GetHex());
if (nMinimumChainWork <
UintToArith256(chainparams.GetConsensus().nMinimumChainWork)) {
LogPrintf("Warning: nMinimumChainWork set below default value of %s\n",
chainparams.GetConsensus().nMinimumChainWork.GetHex());
}
// mempool limits
int64_t nMempoolSizeMax =
gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
int64_t nMempoolSizeMin =
gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) *
1000 * 40;
if (nMempoolSizeMax < 0 || nMempoolSizeMax < nMempoolSizeMin)
return InitError(strprintf(_("-maxmempool must be at least %d MB"),
std::ceil(nMempoolSizeMin / 1000000.0)));
- // Incremental relay fee sets the minimimum feerate increase necessary for
- // BIP 125 replacement in the mempool and the amount the mempool min fee
- // increases above the feerate of txs evicted due to mempool limiting.
- if (gArgs.IsArgSet("-incrementalrelayfee")) {
- Amount n(0);
- if (!ParseMoney(gArgs.GetArg("-incrementalrelayfee", ""), n))
- return InitError(
- AmountErrMsg("incrementalrelayfee",
- gArgs.GetArg("-incrementalrelayfee", "")));
- incrementalRelayFee = CFeeRate(n);
- }
// -par=0 means autodetect, but nScriptCheckThreads==0 means no concurrency
nScriptCheckThreads = gArgs.GetArg("-par", DEFAULT_SCRIPTCHECK_THREADS);
if (nScriptCheckThreads <= 0) nScriptCheckThreads += GetNumCores();
if (nScriptCheckThreads <= 1)
nScriptCheckThreads = 0;
else if (nScriptCheckThreads > MAX_SCRIPTCHECK_THREADS)
nScriptCheckThreads = MAX_SCRIPTCHECK_THREADS;
// Configure excessive block size.
const uint64_t nProposedExcessiveBlockSize =
gArgs.GetArg("-excessiveblocksize", DEFAULT_MAX_BLOCK_SIZE);
if (!config.SetMaxBlockSize(nProposedExcessiveBlockSize)) {
return InitError(
_("Excessive block size must be > 1,000,000 bytes (1MB)"));
}
// Check blockmaxsize does not exceed maximum accepted block size.
const uint64_t nProposedMaxGeneratedBlockSize =
gArgs.GetArg("-blockmaxsize", DEFAULT_MAX_GENERATED_BLOCK_SIZE);
if (nProposedMaxGeneratedBlockSize > config.GetMaxBlockSize()) {
auto msg = _("Max generated block size (blockmaxsize) cannot exceed "
"the excessive block size (excessiveblocksize)");
return InitError(msg);
}
// block pruning; get the amount of disk space (in MiB) to allot for block &
// undo files
int64_t nPruneArg = gArgs.GetArg("-prune", 0);
if (nPruneArg < 0) {
return InitError(
_("Prune cannot be configured with a negative value."));
}
nPruneTarget = (uint64_t)nPruneArg * 1024 * 1024;
if (nPruneArg == 1) { // manual pruning: -prune=1
LogPrintf("Block pruning enabled. Use RPC call "
"pruneblockchain(height) to manually prune block and undo "
"files.\n");
nPruneTarget = std::numeric_limits::max();
fPruneMode = true;
} else if (nPruneTarget) {
if (nPruneTarget < MIN_DISK_SPACE_FOR_BLOCK_FILES) {
return InitError(
strprintf(_("Prune configured below the minimum of %d MiB. "
"Please use a higher number."),
MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024));
}
LogPrintf("Prune configured to target %uMiB on disk for block and undo "
"files.\n",
nPruneTarget / 1024 / 1024);
fPruneMode = true;
}
RegisterAllRPCCommands(tableRPC);
#ifdef ENABLE_WALLET
RegisterWalletRPCCommands(tableRPC);
RegisterDumpRPCCommands(tableRPC);
#endif
nConnectTimeout = gArgs.GetArg("-timeout", DEFAULT_CONNECT_TIMEOUT);
if (nConnectTimeout <= 0) nConnectTimeout = DEFAULT_CONNECT_TIMEOUT;
// Obtain the amount to charge excess UTXO
if (gArgs.IsArgSet("-excessutxocharge")) {
Amount n(0);
auto parsed = ParseMoney(gArgs.GetArg("-excessutxocharge", ""), n);
if (!parsed || Amount(0) > n)
return InitError(AmountErrMsg(
"excessutxocharge", gArgs.GetArg("-excessutxocharge", "")));
config.SetExcessUTXOCharge(n);
} else {
config.SetExcessUTXOCharge(DEFAULT_UTXO_FEE);
}
// Fee-per-kilobyte amount considered the same as "free". If you are mining,
// be careful setting this: if you set it to zero then a transaction spammer
// can cheaply fill blocks using 1-satoshi-fee transactions. It should be
// set above the real cost to you of processing a transaction.
if (gArgs.IsArgSet("-minrelaytxfee")) {
Amount n(0);
auto parsed = ParseMoney(gArgs.GetArg("-minrelaytxfee", ""), n);
if (!parsed || Amount(0) == n)
return InitError(AmountErrMsg("minrelaytxfee",
gArgs.GetArg("-minrelaytxfee", "")));
// High fee check is done afterward in CWallet::ParameterInteraction()
::minRelayTxFee = CFeeRate(n);
- } else if (incrementalRelayFee > ::minRelayTxFee) {
- // Allow only setting incrementalRelayFee to control both
- ::minRelayTxFee = incrementalRelayFee;
- LogPrintf(
- "Increasing minrelaytxfee to %s to match incrementalrelayfee\n",
- ::minRelayTxFee.ToString());
}
// Sanity check argument for min fee for including tx in block
// TODO: Harmonize which arguments need sanity checking and where that
// happens.
if (gArgs.IsArgSet("-blockmintxfee")) {
Amount n(0);
if (!ParseMoney(gArgs.GetArg("-blockmintxfee", ""), n))
return InitError(AmountErrMsg("blockmintxfee",
gArgs.GetArg("-blockmintxfee", "")));
}
// Feerate used to define dust. Shouldn't be changed lightly as old
// implementations may inadvertently create non-standard transactions.
if (gArgs.IsArgSet("-dustrelayfee")) {
Amount n(0);
auto parsed = ParseMoney(gArgs.GetArg("-dustrelayfee", ""), n);
if (!parsed || Amount(0) == n)
return InitError(AmountErrMsg("dustrelayfee",
gArgs.GetArg("-dustrelayfee", "")));
dustRelayFee = CFeeRate(n);
}
fRequireStandard =
!gArgs.GetBoolArg("-acceptnonstdtxn", !chainparams.RequireStandard());
if (chainparams.RequireStandard() && !fRequireStandard)
return InitError(
strprintf("acceptnonstdtxn is not currently supported for %s chain",
chainparams.NetworkIDString()));
nBytesPerSigOp = gArgs.GetArg("-bytespersigop", nBytesPerSigOp);
#ifdef ENABLE_WALLET
if (!CWallet::ParameterInteraction()) return false;
#endif
fIsBareMultisigStd =
gArgs.GetBoolArg("-permitbaremultisig", DEFAULT_PERMIT_BAREMULTISIG);
fAcceptDatacarrier =
gArgs.GetBoolArg("-datacarrier", DEFAULT_ACCEPT_DATACARRIER);
// Option to startup with mocktime set (used for regression testing):
SetMockTime(gArgs.GetArg("-mocktime", 0)); // SetMockTime(0) is a no-op
if (gArgs.GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS))
nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM);
// Signal Bitcoin Cash support.
// TODO: remove some time after the hardfork when no longer needed
// to differentiate the network nodes.
nLocalServices = ServiceFlags(nLocalServices | NODE_BITCOIN_CASH);
nMaxTipAge = gArgs.GetArg("-maxtipage", DEFAULT_MAX_TIP_AGE);
if (gArgs.IsArgSet("-bip9params")) {
// Allow overriding BIP9 parameters for testing
if (!chainparams.MineBlocksOnDemand()) {
return InitError(
"BIP9 parameters may only be overridden on regtest.");
}
for (const std::string &strDeployment : gArgs.GetArgs("-bip9params")) {
std::vector vDeploymentParams;
boost::split(vDeploymentParams, strDeployment,
boost::is_any_of(":"));
if (vDeploymentParams.size() != 3) {
return InitError("BIP9 parameters malformed, expecting "
"deployment:start:end");
}
int64_t nStartTime, nTimeout;
if (!ParseInt64(vDeploymentParams[1], &nStartTime)) {
return InitError(
strprintf("Invalid nStartTime (%s)", vDeploymentParams[1]));
}
if (!ParseInt64(vDeploymentParams[2], &nTimeout)) {
return InitError(
strprintf("Invalid nTimeout (%s)", vDeploymentParams[2]));
}
bool found = false;
for (int j = 0; j < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS;
++j) {
if (vDeploymentParams[0].compare(
VersionBitsDeploymentInfo[j].name) == 0) {
UpdateBIP9Parameters(Consensus::DeploymentPos(j),
nStartTime, nTimeout);
found = true;
LogPrintf("Setting BIP9 activation parameters for %s to "
"start=%ld, timeout=%ld\n",
vDeploymentParams[0], nStartTime, nTimeout);
break;
}
}
if (!found) {
return InitError(
strprintf("Invalid deployment (%s)", vDeploymentParams[0]));
}
}
}
return true;
}
static bool LockDataDirectory(bool probeOnly) {
std::string strDataDir = GetDataDir().string();
// Make sure only a single Bitcoin process is using the data directory.
fs::path pathLockFile = GetDataDir() / ".lock";
// empty lock file; created if it doesn't exist.
FILE *file = fsbridge::fopen(pathLockFile, "a");
if (file) fclose(file);
try {
static boost::interprocess::file_lock lock(
pathLockFile.string().c_str());
if (!lock.try_lock()) {
return InitError(
strprintf(_("Cannot obtain a lock on data directory %s. %s is "
"probably already running."),
strDataDir, _(PACKAGE_NAME)));
}
if (probeOnly) {
lock.unlock();
}
} catch (const boost::interprocess::interprocess_exception &e) {
return InitError(strprintf(_("Cannot obtain a lock on data directory "
"%s. %s is probably already running.") +
" %s.",
strDataDir, _(PACKAGE_NAME), e.what()));
}
return true;
}
bool AppInitSanityChecks() {
// Step 4: sanity checks
// Initialize elliptic curve code
std::string sha256_algo = SHA256AutoDetect();
LogPrintf("Using the '%s' SHA256 implementation\n", sha256_algo);
RandomInit();
ECC_Start();
globalVerifyHandle.reset(new ECCVerifyHandle());
// Sanity check
if (!InitSanityCheck()) {
return InitError(strprintf(
_("Initialization sanity check failed. %s is shutting down."),
_(PACKAGE_NAME)));
}
// Probe the data directory lock to give an early error message, if possible
return LockDataDirectory(true);
}
bool AppInitMain(Config &config, boost::thread_group &threadGroup,
CScheduler &scheduler) {
const CChainParams &chainparams = config.GetChainParams();
// Step 4a: application initialization
// After daemonization get the data directory lock again and hold on to it
// until exit. This creates a slight window for a race condition to happen,
// however this condition is harmless: it will at most make us exit without
// printing a message to console.
if (!LockDataDirectory(false)) {
// Detailed error printed inside LockDataDirectory
return false;
}
#ifndef WIN32
CreatePidFile(GetPidFile(), getpid());
#endif
BCLog::Logger &logger = GetLogger();
bool default_shrinkdebugfile = logger.DefaultShrinkDebugFile();
if (gArgs.GetBoolArg("-shrinkdebugfile", default_shrinkdebugfile)) {
// Do this first since it both loads a bunch of debug.log into memory,
// and because this needs to happen before any other debug.log printing.
logger.ShrinkDebugFile();
}
if (logger.fPrintToDebugLog) {
logger.OpenDebugLog();
}
if (!logger.fLogTimestamps) {
LogPrintf("Startup time: %s\n",
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", GetTime()));
}
LogPrintf("Default data directory %s\n", GetDefaultDataDir().string());
LogPrintf("Using data directory %s\n", GetDataDir().string());
LogPrintf(
"Using config file %s\n",
GetConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME)).string());
LogPrintf("Using at most %i automatic connections (%i file descriptors "
"available)\n",
nMaxConnections, nFD);
InitSignatureCache();
InitScriptExecutionCache();
LogPrintf("Using %u threads for script verification\n",
nScriptCheckThreads);
if (nScriptCheckThreads) {
for (int i = 0; i < nScriptCheckThreads - 1; i++) {
threadGroup.create_thread(&ThreadScriptCheck);
}
}
// Start the lightweight task scheduler thread
CScheduler::Function serviceLoop =
boost::bind(&CScheduler::serviceQueue, &scheduler);
threadGroup.create_thread(boost::bind(&TraceThread,
"scheduler", serviceLoop));
/* Start the RPC server already. It will be started in "warmup" mode
* and not really process calls already (but it will signify connections
* that the server is there and will be ready later). Warmup mode will
* be disabled when initialisation is finished.
*/
if (gArgs.GetBoolArg("-server", false)) {
uiInterface.InitMessage.connect(SetRPCWarmupStatus);
if (!AppInitServers(config, threadGroup)) {
return InitError(
_("Unable to start HTTP server. See debug log for details."));
}
}
int64_t nStart;
// Step 5: verify wallet database integrity
#ifdef ENABLE_WALLET
if (!CWallet::Verify(chainparams)) {
return false;
}
#endif
// Step 6: network initialization
// Note that we absolutely cannot open any actual connections
// until the very end ("start node") as the UTXO/block state
// is not yet setup and may end up being set up twice if we
// need to reindex later.
assert(!g_connman);
g_connman = std::unique_ptr(
new CConnman(config, GetRand(std::numeric_limits::max()),
GetRand(std::numeric_limits::max())));
CConnman &connman = *g_connman;
peerLogic.reset(new PeerLogicValidation(&connman));
RegisterValidationInterface(peerLogic.get());
RegisterNodeSignals(GetNodeSignals());
if (gArgs.IsArgSet("-onlynet")) {
std::set nets;
for (const std::string &snet : gArgs.GetArgs("-onlynet")) {
enum Network net = ParseNetwork(snet);
if (net == NET_UNROUTABLE)
return InitError(strprintf(
_("Unknown network specified in -onlynet: '%s'"), snet));
nets.insert(net);
}
for (int n = 0; n < NET_MAX; n++) {
enum Network net = (enum Network)n;
if (!nets.count(net)) SetLimited(net);
}
}
if (gArgs.IsArgSet("-whitelist")) {
for (const std::string &net : gArgs.GetArgs("-whitelist")) {
CSubNet subnet;
LookupSubNet(net.c_str(), subnet);
if (!subnet.IsValid())
return InitError(strprintf(
_("Invalid netmask specified in -whitelist: '%s'"), net));
connman.AddWhitelistedRange(subnet);
}
}
bool proxyRandomize =
gArgs.GetBoolArg("-proxyrandomize", DEFAULT_PROXYRANDOMIZE);
// -proxy sets a proxy for all outgoing network traffic
// -noproxy (or -proxy=0) as well as the empty string can be used to not set
// a proxy, this is the default
std::string proxyArg = gArgs.GetArg("-proxy", "");
SetLimited(NET_TOR);
if (proxyArg != "" && proxyArg != "0") {
CService resolved(LookupNumeric(proxyArg.c_str(), 9050));
proxyType addrProxy = proxyType(resolved, proxyRandomize);
if (!addrProxy.IsValid()) {
return InitError(
strprintf(_("Invalid -proxy address: '%s'"), proxyArg));
}
SetProxy(NET_IPV4, addrProxy);
SetProxy(NET_IPV6, addrProxy);
SetProxy(NET_TOR, addrProxy);
SetNameProxy(addrProxy);
SetLimited(NET_TOR, false); // by default, -proxy sets onion as
// reachable, unless -noonion later
}
// -onion can be used to set only a proxy for .onion, or override normal
// proxy for .onion addresses.
// -noonion (or -onion=0) disables connecting to .onion entirely. An empty
// string is used to not override the onion proxy (in which case it defaults
// to -proxy set above, or none)
std::string onionArg = gArgs.GetArg("-onion", "");
if (onionArg != "") {
if (onionArg == "0") { // Handle -noonion/-onion=0
SetLimited(NET_TOR); // set onions as unreachable
} else {
CService resolved(LookupNumeric(onionArg.c_str(), 9050));
proxyType addrOnion = proxyType(resolved, proxyRandomize);
if (!addrOnion.IsValid()) {
return InitError(
strprintf(_("Invalid -onion address: '%s'"), onionArg));
}
SetProxy(NET_TOR, addrOnion);
SetLimited(NET_TOR, false);
}
}
// see Step 2: parameter interactions for more information about these
fListen = gArgs.GetBoolArg("-listen", DEFAULT_LISTEN);
fDiscover = gArgs.GetBoolArg("-discover", true);
fNameLookup = gArgs.GetBoolArg("-dns", DEFAULT_NAME_LOOKUP);
fRelayTxes = !gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY);
if (fListen) {
bool fBound = false;
if (gArgs.IsArgSet("-bind")) {
for (const std::string &strBind : gArgs.GetArgs("-bind")) {
CService addrBind;
if (!Lookup(strBind.c_str(), addrBind, GetListenPort(),
false)) {
return InitError(ResolveErrMsg("bind", strBind));
}
fBound |=
Bind(connman, addrBind, (BF_EXPLICIT | BF_REPORT_ERROR));
}
}
if (gArgs.IsArgSet("-whitebind")) {
for (const std::string &strBind : gArgs.GetArgs("-whitebind")) {
CService addrBind;
if (!Lookup(strBind.c_str(), addrBind, 0, false)) {
return InitError(ResolveErrMsg("whitebind", strBind));
}
if (addrBind.GetPort() == 0) {
return InitError(strprintf(
_("Need to specify a port with -whitebind: '%s'"),
strBind));
}
fBound |= Bind(connman, addrBind,
(BF_EXPLICIT | BF_REPORT_ERROR | BF_WHITELIST));
}
}
if (!gArgs.IsArgSet("-bind") && !gArgs.IsArgSet("-whitebind")) {
struct in_addr inaddr_any;
inaddr_any.s_addr = INADDR_ANY;
fBound |=
Bind(connman, CService(in6addr_any, GetListenPort()), BF_NONE);
fBound |= Bind(connman, CService(inaddr_any, GetListenPort()),
!fBound ? BF_REPORT_ERROR : BF_NONE);
}
if (!fBound) {
return InitError(_("Failed to listen on any port. Use -listen=0 if "
"you want this."));
}
}
if (gArgs.IsArgSet("-externalip")) {
for (const std::string &strAddr : gArgs.GetArgs("-externalip")) {
CService addrLocal;
if (Lookup(strAddr.c_str(), addrLocal, GetListenPort(),
fNameLookup) &&
addrLocal.IsValid()) {
AddLocal(addrLocal, LOCAL_MANUAL);
} else {
return InitError(ResolveErrMsg("externalip", strAddr));
}
}
}
if (gArgs.IsArgSet("-seednode")) {
for (const std::string &strDest : gArgs.GetArgs("-seednode")) {
connman.AddOneShot(strDest);
}
}
#if ENABLE_ZMQ
pzmqNotificationInterface = CZMQNotificationInterface::Create();
if (pzmqNotificationInterface) {
RegisterValidationInterface(pzmqNotificationInterface);
}
#endif
// unlimited unless -maxuploadtarget is set
uint64_t nMaxOutboundLimit = 0;
uint64_t nMaxOutboundTimeframe = MAX_UPLOAD_TIMEFRAME;
if (gArgs.IsArgSet("-maxuploadtarget")) {
nMaxOutboundLimit =
gArgs.GetArg("-maxuploadtarget", DEFAULT_MAX_UPLOAD_TARGET) * 1024 *
1024;
}
// Step 7: load block chain
fReindex = gArgs.GetBoolArg("-reindex", false);
bool fReindexChainState = gArgs.GetBoolArg("-reindex-chainstate", false);
// cache size calculations
int64_t nTotalCache = (gArgs.GetArg("-dbcache", nDefaultDbCache) << 20);
// total cache cannot be less than nMinDbCache
nTotalCache = std::max(nTotalCache, nMinDbCache << 20);
// total cache cannot be greater than nMaxDbcache
nTotalCache = std::min(nTotalCache, nMaxDbCache << 20);
int64_t nBlockTreeDBCache = nTotalCache / 8;
nBlockTreeDBCache = std::min(nBlockTreeDBCache,
(gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)
? nMaxBlockDBAndTxIndexCache
: nMaxBlockDBCache)
<< 20);
nTotalCache -= nBlockTreeDBCache;
// use 25%-50% of the remainder for disk cache
int64_t nCoinDBCache =
std::min(nTotalCache / 2, (nTotalCache / 4) + (1 << 23));
// cap total coins db cache
nCoinDBCache = std::min(nCoinDBCache, nMaxCoinsDBCache << 20);
nTotalCache -= nCoinDBCache;
// the rest goes to in-memory cache
nCoinCacheUsage = nTotalCache;
int64_t nMempoolSizeMax =
gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
LogPrintf("Cache configuration:\n");
LogPrintf("* Using %.1fMiB for block index database\n",
nBlockTreeDBCache * (1.0 / 1024 / 1024));
LogPrintf("* Using %.1fMiB for chain state database\n",
nCoinDBCache * (1.0 / 1024 / 1024));
LogPrintf("* Using %.1fMiB for in-memory UTXO set (plus up to %.1fMiB of "
"unused mempool space)\n",
nCoinCacheUsage * (1.0 / 1024 / 1024),
nMempoolSizeMax * (1.0 / 1024 / 1024));
bool fLoaded = false;
while (!fLoaded && !fRequestShutdown) {
bool fReset = fReindex;
std::string strLoadError;
uiInterface.InitMessage(_("Loading block index..."));
nStart = GetTimeMillis();
do {
try {
UnloadBlockIndex();
delete pcoinsTip;
delete pcoinsdbview;
delete pcoinscatcher;
delete pblocktree;
pblocktree =
new CBlockTreeDB(nBlockTreeDBCache, false, fReindex);
pcoinsdbview = new CCoinsViewDB(nCoinDBCache, false,
fReindex || fReindexChainState);
pcoinscatcher = new CCoinsViewErrorCatcher(pcoinsdbview);
if (fReindex) {
pblocktree->WriteReindexing(true);
// If we're reindexing in prune mode, wipe away unusable
// block files and all undo data files
if (fPruneMode) {
CleanupBlockRevFiles();
}
} else if (!pcoinsdbview->Upgrade()) {
strLoadError = _("Error upgrading chainstate database");
break;
}
if (fRequestShutdown) break;
if (!LoadBlockIndex(chainparams)) {
strLoadError = _("Error loading block database");
break;
}
// If the loaded chain has a wrong genesis, bail out immediately
// (we're likely using a testnet datadir, or the other way
// around).
if (!mapBlockIndex.empty() &&
mapBlockIndex.count(
chainparams.GetConsensus().hashGenesisBlock) == 0) {
return InitError(_("Incorrect or no genesis block found. "
"Wrong datadir for network?"));
}
// Initialize the block index (no-op if non-empty database was
// already loaded)
if (!InitBlockIndex(config)) {
strLoadError = _("Error initializing block database");
break;
}
// Check for changed -txindex state
if (fTxIndex != gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
strLoadError = _("You need to rebuild the database using "
"-reindex-chainstate to change -txindex");
break;
}
// Check for changed -prune state. What we are concerned about
// is a user who has pruned blocks in the past, but is now
// trying to run unpruned.
if (fHavePruned && !fPruneMode) {
strLoadError =
_("You need to rebuild the database using -reindex to "
"go back to unpruned mode. This will redownload the "
"entire blockchain");
break;
}
if (!ReplayBlocks(config, pcoinsdbview)) {
strLoadError =
_("Unable to replay blocks. You will need to rebuild "
"the database using -reindex-chainstate.");
break;
}
pcoinsTip = new CCoinsViewCache(pcoinscatcher);
LoadChainTip(chainparams);
if (!fReindex && chainActive.Tip() != nullptr) {
uiInterface.InitMessage(_("Rewinding blocks..."));
if (!RewindBlockIndex(config)) {
strLoadError = _("Unable to rewind the database to a "
"pre-fork state. You will need to "
"redownload the blockchain");
break;
}
}
uiInterface.InitMessage(_("Verifying blocks..."));
if (fHavePruned &&
gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS) >
MIN_BLOCKS_TO_KEEP) {
LogPrintf("Prune: pruned datadir may not have more than %d "
"blocks; only checking available blocks",
MIN_BLOCKS_TO_KEEP);
}
{
LOCK(cs_main);
CBlockIndex *tip = chainActive.Tip();
RPCNotifyBlockChange(true, tip);
if (tip &&
tip->nTime >
GetAdjustedTime() + MAX_FUTURE_BLOCK_TIME) {
strLoadError =
_("The block database contains a block which "
"appears to be from the future. "
"This may be due to your computer's date and "
"time being set incorrectly. "
"Only rebuild the block database if you are sure "
"that your computer's date and time are correct");
break;
}
}
if (!CVerifyDB().VerifyDB(
config, pcoinsdbview,
gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL),
gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS))) {
strLoadError = _("Corrupted block database detected");
break;
}
} catch (const std::exception &e) {
LogPrintf("%s\n", e.what());
strLoadError = _("Error opening block database");
break;
}
fLoaded = true;
} while (false);
if (!fLoaded && !fRequestShutdown) {
// first suggest a reindex
if (!fReset) {
bool fRet = uiInterface.ThreadSafeQuestion(
strLoadError + ".\n\n" +
_("Do you want to rebuild the block database now?"),
strLoadError + ".\nPlease restart with -reindex or "
"-reindex-chainstate to recover.",
"",
CClientUIInterface::MSG_ERROR |
CClientUIInterface::BTN_ABORT);
if (fRet) {
fReindex = true;
fRequestShutdown = false;
} else {
LogPrintf("Aborted block database rebuild. Exiting.\n");
return false;
}
} else {
return InitError(strLoadError);
}
}
}
// As LoadBlockIndex can take several minutes, it's possible the user
// requested to kill the GUI during the last operation. If so, exit.
// As the program has not fully started yet, Shutdown() is possibly
// overkill.
if (fRequestShutdown) {
LogPrintf("Shutdown requested. Exiting.\n");
return false;
}
LogPrintf(" block index %15dms\n", GetTimeMillis() - nStart);
fs::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME;
CAutoFile est_filein(fsbridge::fopen(est_path, "rb"), SER_DISK,
CLIENT_VERSION);
// Allowed to fail as this file IS missing on first startup.
if (!est_filein.IsNull()) mempool.ReadFeeEstimates(est_filein);
fFeeEstimatesInitialized = true;
// Encoded addresses using cashaddr instead of base58
// Activates by default on Jan, 14
config.SetCashAddrEncoding(
gArgs.GetBoolArg("-usecashaddr", GetAdjustedTime() > 1515900000));
// Step 8: load wallet
#ifdef ENABLE_WALLET
if (!CWallet::InitLoadWallet(chainparams)) return false;
#else
LogPrintf("No wallet support compiled in!\n");
#endif
// Step 9: data directory maintenance
// if pruning, unset the service bit and perform the initial blockstore
// prune after any wallet rescanning has taken place.
if (fPruneMode) {
LogPrintf("Unsetting NODE_NETWORK on prune mode\n");
nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK);
if (!fReindex) {
uiInterface.InitMessage(_("Pruning blockstore..."));
PruneAndFlush();
}
}
// Step 10: import blocks
if (!CheckDiskSpace()) {
return false;
}
// Either install a handler to notify us when genesis activates, or set
// fHaveGenesis directly.
// No locking, as this happens before any background thread is started.
if (chainActive.Tip() == nullptr) {
uiInterface.NotifyBlockTip.connect(BlockNotifyGenesisWait);
} else {
fHaveGenesis = true;
}
if (gArgs.IsArgSet("-blocknotify")) {
uiInterface.NotifyBlockTip.connect(BlockNotifyCallback);
}
std::vector vImportFiles;
if (gArgs.IsArgSet("-loadblock")) {
for (const std::string &strFile : gArgs.GetArgs("-loadblock")) {
vImportFiles.push_back(strFile);
}
}
threadGroup.create_thread(
boost::bind(&ThreadImport, std::ref(config), vImportFiles));
// Wait for genesis block to be processed
{
boost::unique_lock lock(cs_GenesisWait);
while (!fHaveGenesis) {
condvar_GenesisWait.wait(lock);
}
uiInterface.NotifyBlockTip.disconnect(BlockNotifyGenesisWait);
}
// Step 11: start node
//// debug print
LogPrintf("mapBlockIndex.size() = %u\n", mapBlockIndex.size());
LogPrintf("nBestHeight = %d\n", chainActive.Height());
if (gArgs.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION)) {
StartTorControl(threadGroup, scheduler);
}
Discover(threadGroup);
// Map ports with UPnP
MapPort(gArgs.GetBoolArg("-upnp", DEFAULT_UPNP));
std::string strNodeError;
CConnman::Options connOptions;
connOptions.nLocalServices = nLocalServices;
connOptions.nRelevantServices = nRelevantServices;
connOptions.nMaxConnections = nMaxConnections;
connOptions.nMaxOutbound =
std::min(MAX_OUTBOUND_CONNECTIONS, connOptions.nMaxConnections);
connOptions.nMaxAddnode = MAX_ADDNODE_CONNECTIONS;
connOptions.nMaxFeeler = 1;
connOptions.nBestHeight = chainActive.Height();
connOptions.uiInterface = &uiInterface;
connOptions.nSendBufferMaxSize =
1000 * gArgs.GetArg("-maxsendbuffer", DEFAULT_MAXSENDBUFFER);
connOptions.nReceiveFloodSize =
1000 * gArgs.GetArg("-maxreceivebuffer", DEFAULT_MAXRECEIVEBUFFER);
connOptions.nMaxOutboundTimeframe = nMaxOutboundTimeframe;
connOptions.nMaxOutboundLimit = nMaxOutboundLimit;
if (!connman.Start(scheduler, strNodeError, connOptions)) {
return InitError(strNodeError);
}
// Step 12: finished
SetRPCWarmupFinished();
uiInterface.InitMessage(_("Done loading"));
#ifdef ENABLE_WALLET
for (CWalletRef pwallet : vpwallets) {
pwallet->postInitProcess(scheduler);
}
#endif
return !fRequestShutdown;
}
diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp
index 668a3fe0b..93db33b25 100644
--- a/src/policy/policy.cpp
+++ b/src/policy/policy.cpp
@@ -1,160 +1,159 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
// NOTE: This file is intended to be customised by the end user, and includes
// only local node policy logic
#include "policy/policy.h"
#include "tinyformat.h"
#include "util.h"
#include "utilstrencodings.h"
#include "validation.h"
/**
* Check transaction inputs to mitigate two potential denial-of-service attacks:
*
* 1. scriptSigs with extra data stuffed into them, not consumed by scriptPubKey
* (or P2SH script)
* 2. P2SH scripts with a crazy number of expensive CHECKSIG/CHECKMULTISIG
* operations
*
* Why bother? To avoid denial-of-service attacks; an attacker can submit a
* standard HASH... OP_EQUAL transaction, which will get accepted into blocks.
* The redemption script can be anything; an attacker could use a very
* expensive-to-check-upon-redemption script like:
* DUP CHECKSIG DROP ... repeated 100 times... OP_1
*/
bool IsStandard(const CScript &scriptPubKey, txnouttype &whichType) {
std::vector> vSolutions;
if (!Solver(scriptPubKey, whichType, vSolutions)) {
return false;
}
if (whichType == TX_MULTISIG) {
uint8_t m = vSolutions.front()[0];
uint8_t n = vSolutions.back()[0];
// Support up to x-of-3 multisig txns as standard
if (n < 1 || n > 3) return false;
if (m < 1 || m > n) return false;
} else if (whichType == TX_NULL_DATA) {
if (!fAcceptDatacarrier) {
return false;
}
unsigned nMaxDatacarrierBytes =
gArgs.GetArg("-datacarriersize", MAX_OP_RETURN_RELAY);
if (scriptPubKey.size() > nMaxDatacarrierBytes) {
return false;
}
}
return whichType != TX_NONSTANDARD;
}
bool IsStandardTx(const CTransaction &tx, std::string &reason) {
if (tx.nVersion > CTransaction::MAX_STANDARD_VERSION || tx.nVersion < 1) {
reason = "version";
return false;
}
// Extremely large transactions with lots of inputs can cost the network
// almost as much to process as they cost the sender in fees, because
// computing signature hashes is O(ninputs*txsize). Limiting transactions
// to MAX_STANDARD_TX_SIZE mitigates CPU exhaustion attacks.
unsigned int sz = tx.GetTotalSize();
if (sz >= MAX_STANDARD_TX_SIZE) {
reason = "tx-size";
return false;
}
for (const CTxIn &txin : tx.vin) {
// Biggest 'standard' txin is a 15-of-15 P2SH multisig with compressed
// keys (remember the 520 byte limit on redeemScript size). That works
// out to a (15*(33+1))+3=513 byte redeemScript, 513+1+15*(73+1)+3=1627
// bytes of scriptSig, which we round off to 1650 bytes for some minor
// future-proofing. That's also enough to spend a 20-of-20 CHECKMULTISIG
// scriptPubKey, though such a scriptPubKey is not considered standard.
if (txin.scriptSig.size() > 1650) {
reason = "scriptsig-size";
return false;
}
if (!txin.scriptSig.IsPushOnly()) {
reason = "scriptsig-not-pushonly";
return false;
}
}
unsigned int nDataOut = 0;
txnouttype whichType;
for (const CTxOut &txout : tx.vout) {
if (!::IsStandard(txout.scriptPubKey, whichType)) {
reason = "scriptpubkey";
return false;
}
if (whichType == TX_NULL_DATA) {
nDataOut++;
} else if ((whichType == TX_MULTISIG) && (!fIsBareMultisigStd)) {
reason = "bare-multisig";
return false;
} else if (txout.IsDust(dustRelayFee)) {
reason = "dust";
return false;
}
}
// only one OP_RETURN txout is permitted
if (nDataOut > 1) {
reason = "multi-op-return";
return false;
}
return true;
}
bool AreInputsStandard(const CTransaction &tx,
const CCoinsViewCache &mapInputs) {
if (tx.IsCoinBase()) {
// Coinbases don't use vin normally.
return true;
}
for (size_t i = 0; i < tx.vin.size(); i++) {
const CTxOut &prev = mapInputs.GetOutputFor(tx.vin[i]);
std::vector> vSolutions;
txnouttype whichType;
// get the scriptPubKey corresponding to this input:
const CScript &prevScript = prev.scriptPubKey;
if (!Solver(prevScript, whichType, vSolutions)) {
return false;
}
if (whichType == TX_SCRIPTHASH) {
std::vector> stack;
// convert the scriptSig into a stack, so we can inspect the
// redeemScript
if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE,
BaseSignatureChecker())) {
return false;
}
if (stack.empty()) {
return false;
}
CScript subscript(stack.back().begin(), stack.back().end());
if (subscript.GetSigOpCount(true) > MAX_P2SH_SIGOPS) {
return false;
}
}
}
return true;
}
-CFeeRate incrementalRelayFee = CFeeRate(DEFAULT_INCREMENTAL_RELAY_FEE);
CFeeRate dustRelayFee = CFeeRate(DUST_RELAY_TX_FEE);
unsigned int nBytesPerSigOp = DEFAULT_BYTES_PER_SIGOP;
diff --git a/src/policy/policy.h b/src/policy/policy.h
index 9346837bb..0517f0d60 100644
--- a/src/policy/policy.h
+++ b/src/policy/policy.h
@@ -1,92 +1,92 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_POLICY_POLICY_H
#define BITCOIN_POLICY_POLICY_H
#include "consensus/consensus.h"
#include "script/interpreter.h"
#include "script/standard.h"
#include
class CCoinsViewCache;
/** Default for -blockmaxsize, which controls the maximum size of block the
* mining code will create **/
static const uint64_t DEFAULT_MAX_GENERATED_BLOCK_SIZE = 2 * ONE_MEGABYTE;
/** Default for -blockprioritypercentage, define the amount of block space
* reserved to high priority transactions **/
static const uint64_t DEFAULT_BLOCK_PRIORITY_PERCENTAGE = 5;
/** Default for -blockmintxfee, which sets the minimum feerate for a transaction
* in blocks created by mining code **/
static const Amount DEFAULT_BLOCK_MIN_TX_FEE(1000);
/** The maximum size for transactions we're willing to relay/mine */
static const unsigned int MAX_STANDARD_TX_SIZE = 100000;
/** Maximum number of signature check operations in an IsStandard() P2SH script
*/
static const unsigned int MAX_P2SH_SIGOPS = 15;
/** The maximum number of sigops we're willing to relay/mine in a single tx */
static const unsigned int MAX_STANDARD_TX_SIGOPS = MAX_TX_SIGOPS_COUNT / 5;
/** Default for -maxmempool, maximum megabytes of mempool memory usage */
static const unsigned int DEFAULT_MAX_MEMPOOL_SIZE = 300;
/** Default for -incrementalrelayfee, which sets the minimum feerate increase
* for mempool limiting or BIP 125 replacement **/
-static const Amount DEFAULT_INCREMENTAL_RELAY_FEE(1000);
+static const CFeeRate MEMPOOL_FULL_FEE_INCREMENT(Amount(1000));
/** Default for -bytespersigop */
static const unsigned int DEFAULT_BYTES_PER_SIGOP = 20;
/**
* Min feerate for defining dust. Historically this has been the same as the
* minRelayTxFee, however changing the dust limit changes which transactions are
* standard and should be done with care and ideally rarely. It makes sense to
* only increase the dust limit after prior releases were already not creating
* outputs below the new threshold.
*/
static const Amount DUST_RELAY_TX_FEE(1000);
/**
* Standard script verification flags that standard transactions will comply
* with. However scripts violating these flags may still be present in valid
* blocks and we must accept those blocks.
*/
static const unsigned int STANDARD_SCRIPT_VERIFY_FLAGS =
MANDATORY_SCRIPT_VERIFY_FLAGS | SCRIPT_VERIFY_DERSIG |
SCRIPT_VERIFY_MINIMALDATA | SCRIPT_VERIFY_NULLDUMMY |
SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_NOPS | SCRIPT_VERIFY_CLEANSTACK |
SCRIPT_VERIFY_NULLFAIL | SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY |
SCRIPT_VERIFY_CHECKSEQUENCEVERIFY | SCRIPT_VERIFY_LOW_S;
/** For convenience, standard but not mandatory verify flags. */
static const unsigned int STANDARD_NOT_MANDATORY_VERIFY_FLAGS =
STANDARD_SCRIPT_VERIFY_FLAGS & ~MANDATORY_SCRIPT_VERIFY_FLAGS;
/** Used as the flags parameter to sequence and nLocktime checks in
* non-consensus code. */
static const unsigned int STANDARD_LOCKTIME_VERIFY_FLAGS =
LOCKTIME_VERIFY_SEQUENCE | LOCKTIME_MEDIAN_TIME_PAST;
bool IsStandard(const CScript &scriptPubKey, txnouttype &whichType);
/**
* Check for standard transaction types
* @return True if all outputs (scriptPubKeys) use only standard transaction
* forms
*/
bool IsStandardTx(const CTransaction &tx, std::string &reason);
/**
* Check for standard transaction types
* @param[in] mapInputs Map of previous transactions that have outputs we're
* spending
* @return True if all inputs (scriptSigs) use only standard transaction forms
*/
bool AreInputsStandard(const CTransaction &tx,
const CCoinsViewCache &mapInputs);
extern CFeeRate incrementalRelayFee;
extern CFeeRate dustRelayFee;
extern unsigned int nBytesPerSigOp;
#endif // BITCOIN_POLICY_POLICY_H
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index f385faad3..43c0934f8 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -1,775 +1,769 @@
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "rpc/server.h"
#include "chainparams.h"
#include "clientversion.h"
#include "config.h"
#include "net.h"
#include "net_processing.h"
#include "netbase.h"
#include "policy/policy.h"
#include "protocol.h"
#include "sync.h"
#include "timedata.h"
#include "ui_interface.h"
#include "util.h"
#include "utilstrencodings.h"
#include "validation.h"
#include "version.h"
#include
static UniValue getconnectioncount(const Config &config,
const JSONRPCRequest &request) {
if (request.fHelp || request.params.size() != 0)
throw std::runtime_error(
"getconnectioncount\n"
"\nReturns the number of connections to other nodes.\n"
"\nResult:\n"
"n (numeric) The connection count\n"
"\nExamples:\n" +
HelpExampleCli("getconnectioncount", "") +
HelpExampleRpc("getconnectioncount", ""));
if (!g_connman)
throw JSONRPCError(
RPC_CLIENT_P2P_DISABLED,
"Error: Peer-to-peer functionality missing or disabled");
return (int)g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL);
}
static UniValue ping(const Config &config, const JSONRPCRequest &request) {
if (request.fHelp || request.params.size() != 0)
throw std::runtime_error(
"ping\n"
"\nRequests that a ping be sent to all other nodes, to measure "
"ping time.\n"
"Results provided in getpeerinfo, pingtime and pingwait fields are "
"decimal seconds.\n"
"Ping command is handled in queue with all other commands, so it "
"measures processing backlog, not just network ping.\n"
"\nExamples:\n" +
HelpExampleCli("ping", "") + HelpExampleRpc("ping", ""));
if (!g_connman)
throw JSONRPCError(
RPC_CLIENT_P2P_DISABLED,
"Error: Peer-to-peer functionality missing or disabled");
// Request that each node send a ping during next message processing pass
g_connman->ForEachNode([](CNode *pnode) { pnode->fPingQueued = true; });
return NullUniValue;
}
static UniValue getpeerinfo(const Config &config,
const JSONRPCRequest &request) {
if (request.fHelp || request.params.size() != 0) {
throw std::runtime_error(
"getpeerinfo\n"
"\nReturns data about each connected network node as a json array "
"of objects.\n"
"\nResult:\n"
"[\n"
" {\n"
" \"id\": n, (numeric) Peer index\n"
" \"addr\":\"host:port\", (string) The ip address and port "
"of the peer\n"
" \"addrlocal\":\"ip:port\", (string) local address\n"
" \"services\":\"xxxxxxxxxxxxxxxx\", (string) The services "
"offered\n"
" \"relaytxes\":true|false, (boolean) Whether peer has asked "
"us to relay transactions to it\n"
" \"lastsend\": ttt, (numeric) The time in seconds "
"since epoch (Jan 1 1970 GMT) of the last send\n"
" \"lastrecv\": ttt, (numeric) The time in seconds "
"since epoch (Jan 1 1970 GMT) of the last receive\n"
" \"bytessent\": n, (numeric) The total bytes sent\n"
" \"bytesrecv\": n, (numeric) The total bytes "
"received\n"
" \"conntime\": ttt, (numeric) The connection time in "
"seconds since epoch (Jan 1 1970 GMT)\n"
" \"timeoffset\": ttt, (numeric) The time offset in "
"seconds\n"
" \"pingtime\": n, (numeric) ping time (if "
"available)\n"
" \"minping\": n, (numeric) minimum observed ping "
"time (if any at all)\n"
" \"pingwait\": n, (numeric) ping wait (if "
"non-zero)\n"
" \"version\": v, (numeric) The peer version, such "
"as 7001\n"
" \"subver\": \"/Satoshi:0.8.5/\", (string) The string "
"version\n"
" \"inbound\": true|false, (boolean) Inbound (true) or "
"Outbound (false)\n"
" \"addnode\": true|false, (boolean) Whether connection was "
"due to addnode and is using an addnode slot\n"
" \"startingheight\": n, (numeric) The starting height "
"(block) of the peer\n"
" \"banscore\": n, (numeric) The ban score\n"
" \"synced_headers\": n, (numeric) The last header we "
"have in common with this peer\n"
" \"synced_blocks\": n, (numeric) The last block we have "
"in common with this peer\n"
" \"inflight\": [\n"
" n, (numeric) The heights of blocks "
"we're currently asking from this peer\n"
" ...\n"
" ],\n"
" \"whitelisted\": true|false, (boolean) Whether the peer is "
"whitelisted\n"
" \"bytessent_per_msg\": {\n"
" \"addr\": n, (numeric) The total bytes sent "
"aggregated by message type\n"
" ...\n"
" },\n"
" \"bytesrecv_per_msg\": {\n"
" \"addr\": n, (numeric) The total bytes "
"received aggregated by message type\n"
" ...\n"
" }\n"
" }\n"
" ,...\n"
"]\n"
"\nExamples:\n" +
HelpExampleCli("getpeerinfo", "") +
HelpExampleRpc("getpeerinfo", ""));
}
if (!g_connman) {
throw JSONRPCError(
RPC_CLIENT_P2P_DISABLED,
"Error: Peer-to-peer functionality missing or disabled");
}
std::vector vstats;
g_connman->GetNodeStats(vstats);
UniValue ret(UniValue::VARR);
for (const CNodeStats &stats : vstats) {
UniValue obj(UniValue::VOBJ);
CNodeStateStats statestats;
bool fStateStats = GetNodeStateStats(stats.nodeid, statestats);
obj.push_back(Pair("id", stats.nodeid));
obj.push_back(Pair("addr", stats.addrName));
if (!(stats.addrLocal.empty())) {
obj.push_back(Pair("addrlocal", stats.addrLocal));
}
obj.push_back(Pair("services", strprintf("%016x", stats.nServices)));
obj.push_back(Pair("relaytxes", stats.fRelayTxes));
obj.push_back(Pair("lastsend", stats.nLastSend));
obj.push_back(Pair("lastrecv", stats.nLastRecv));
obj.push_back(Pair("bytessent", stats.nSendBytes));
obj.push_back(Pair("bytesrecv", stats.nRecvBytes));
obj.push_back(Pair("conntime", stats.nTimeConnected));
obj.push_back(Pair("timeoffset", stats.nTimeOffset));
if (stats.dPingTime > 0.0) {
obj.push_back(Pair("pingtime", stats.dPingTime));
}
if (stats.dMinPing < std::numeric_limits::max() / 1e6) {
obj.push_back(Pair("minping", stats.dMinPing));
}
if (stats.dPingWait > 0.0) {
obj.push_back(Pair("pingwait", stats.dPingWait));
}
obj.push_back(Pair("version", stats.nVersion));
// Use the sanitized form of subver here, to avoid tricksy remote peers
// from corrupting or modifying the JSON output by putting special
// characters in their ver message.
obj.push_back(Pair("subver", stats.cleanSubVer));
obj.push_back(Pair("inbound", stats.fInbound));
obj.push_back(Pair("addnode", stats.fAddnode));
obj.push_back(Pair("startingheight", stats.nStartingHeight));
if (fStateStats) {
obj.push_back(Pair("banscore", statestats.nMisbehavior));
obj.push_back(Pair("synced_headers", statestats.nSyncHeight));
obj.push_back(Pair("synced_blocks", statestats.nCommonHeight));
UniValue heights(UniValue::VARR);
for (int height : statestats.vHeightInFlight) {
heights.push_back(height);
}
obj.push_back(Pair("inflight", heights));
}
obj.push_back(Pair("whitelisted", stats.fWhitelisted));
UniValue sendPerMsgCmd(UniValue::VOBJ);
for (const mapMsgCmdSize::value_type &i : stats.mapSendBytesPerMsgCmd) {
if (i.second > 0) {
sendPerMsgCmd.push_back(Pair(i.first, i.second));
}
}
obj.push_back(Pair("bytessent_per_msg", sendPerMsgCmd));
UniValue recvPerMsgCmd(UniValue::VOBJ);
for (const mapMsgCmdSize::value_type &i : stats.mapRecvBytesPerMsgCmd) {
if (i.second > 0) {
recvPerMsgCmd.push_back(Pair(i.first, i.second));
}
}
obj.push_back(Pair("bytesrecv_per_msg", recvPerMsgCmd));
ret.push_back(obj);
}
return ret;
}
static UniValue addnode(const Config &config, const JSONRPCRequest &request) {
std::string strCommand;
if (request.params.size() == 2) strCommand = request.params[1].get_str();
if (request.fHelp || request.params.size() != 2 ||
(strCommand != "onetry" && strCommand != "add" &&
strCommand != "remove"))
throw std::runtime_error(
"addnode \"node\" \"add|remove|onetry\"\n"
"\nAttempts add or remove a node from the addnode list.\n"
"Or try a connection to a node once.\n"
"\nArguments:\n"
"1. \"node\" (string, required) The node (see getpeerinfo for "
"nodes)\n"
"2. \"command\" (string, required) 'add' to add a node to the "
"list, 'remove' to remove a node from the list, 'onetry' to try a "
"connection to the node once\n"
"\nExamples:\n" +
HelpExampleCli("addnode", "\"192.168.0.6:8333\" \"onetry\"") +
HelpExampleRpc("addnode", "\"192.168.0.6:8333\", \"onetry\""));
if (!g_connman)
throw JSONRPCError(
RPC_CLIENT_P2P_DISABLED,
"Error: Peer-to-peer functionality missing or disabled");
std::string strNode = request.params[0].get_str();
if (strCommand == "onetry") {
CAddress addr;
g_connman->OpenNetworkConnection(addr, false, nullptr, strNode.c_str());
return NullUniValue;
}
if (strCommand == "add") {
if (!g_connman->AddNode(strNode))
throw JSONRPCError(RPC_CLIENT_NODE_ALREADY_ADDED,
"Error: Node already added");
} else if (strCommand == "remove") {
if (!g_connman->RemoveAddedNode(strNode))
throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED,
"Error: Node has not been added.");
}
return NullUniValue;
}
static UniValue disconnectnode(const Config &config,
const JSONRPCRequest &request) {
if (request.fHelp || request.params.size() == 0 ||
request.params.size() >= 3) {
throw std::runtime_error(
"disconnectnode \"[address]\" [nodeid]\n"
"\nImmediately disconnects from the specified peer node.\n"
"\nStrictly one out of 'address' and 'nodeid' can be provided to "
"identify the node.\n"
"\nTo disconnect by nodeid, either set 'address' to the empty "
"string, or call using the named 'nodeid' argument only.\n"
"\nArguments:\n"
"1. \"address\" (string, optional) The IP address/port of the "
"node\n"
"2. \"nodeid\" (number, optional) The node ID (see "
"getpeerinfo for node IDs)\n"
"\nExamples:\n" +
HelpExampleCli("disconnectnode", "\"192.168.0.6:8333\"") +
HelpExampleCli("disconnectnode", "\"\" 1") +
HelpExampleRpc("disconnectnode", "\"192.168.0.6:8333\"") +
HelpExampleRpc("disconnectnode", "\"\", 1"));
}
if (!g_connman) {
throw JSONRPCError(
RPC_CLIENT_P2P_DISABLED,
"Error: Peer-to-peer functionality missing or disabled");
}
bool success;
const UniValue &address_arg = request.params[0];
const UniValue &id_arg =
request.params.size() < 2 ? NullUniValue : request.params[1];
if (!address_arg.isNull() && id_arg.isNull()) {
/* handle disconnect-by-address */
success = g_connman->DisconnectNode(address_arg.get_str());
} else if (!id_arg.isNull() &&
(address_arg.isNull() ||
(address_arg.isStr() && address_arg.get_str().empty()))) {
/* handle disconnect-by-id */
NodeId nodeid = (NodeId)id_arg.get_int64();
success = g_connman->DisconnectNode(nodeid);
} else {
throw JSONRPCError(
RPC_INVALID_PARAMS,
"Only one of address and nodeid should be provided.");
}
if (!success) {
throw JSONRPCError(RPC_CLIENT_NODE_NOT_CONNECTED,
"Node not found in connected nodes");
}
return NullUniValue;
}
static UniValue getaddednodeinfo(const Config &config,
const JSONRPCRequest &request) {
if (request.fHelp || request.params.size() > 1)
throw std::runtime_error(
"getaddednodeinfo ( \"node\" )\n"
"\nReturns information about the given added node, or all added "
"nodes\n"
"(note that onetry addnodes are not listed here)\n"
"\nArguments:\n"
"1. \"node\" (string, optional) If provided, return information "
"about this specific node, otherwise all nodes are returned.\n"
"\nResult:\n"
"[\n"
" {\n"
" \"addednode\" : \"192.168.0.201\", (string) The node ip "
"address or name (as provided to addnode)\n"
" \"connected\" : true|false, (boolean) If connected\n"
" \"addresses\" : [ (list of objects) Only "
"when connected = true\n"
" {\n"
" \"address\" : \"192.168.0.201:8333\", (string) The "
"bitcoin server IP and port we're connected to\n"
" \"connected\" : \"outbound\" (string) "
"connection, inbound or outbound\n"
" }\n"
" ]\n"
" }\n"
" ,...\n"
"]\n"
"\nExamples:\n" +
HelpExampleCli("getaddednodeinfo", "true") +
HelpExampleCli("getaddednodeinfo", "true \"192.168.0.201\"") +
HelpExampleRpc("getaddednodeinfo", "true, \"192.168.0.201\""));
if (!g_connman)
throw JSONRPCError(
RPC_CLIENT_P2P_DISABLED,
"Error: Peer-to-peer functionality missing or disabled");
std::vector vInfo = g_connman->GetAddedNodeInfo();
if (request.params.size() == 1) {
bool found = false;
for (const AddedNodeInfo &info : vInfo) {
if (info.strAddedNode == request.params[0].get_str()) {
vInfo.assign(1, info);
found = true;
break;
}
}
if (!found) {
throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED,
"Error: Node has not been added.");
}
}
UniValue ret(UniValue::VARR);
for (const AddedNodeInfo &info : vInfo) {
UniValue obj(UniValue::VOBJ);
obj.push_back(Pair("addednode", info.strAddedNode));
obj.push_back(Pair("connected", info.fConnected));
UniValue addresses(UniValue::VARR);
if (info.fConnected) {
UniValue address(UniValue::VOBJ);
address.push_back(Pair("address", info.resolvedAddress.ToString()));
address.push_back(
Pair("connected", info.fInbound ? "inbound" : "outbound"));
addresses.push_back(address);
}
obj.push_back(Pair("addresses", addresses));
ret.push_back(obj);
}
return ret;
}
static UniValue getnettotals(const Config &config,
const JSONRPCRequest &request) {
if (request.fHelp || request.params.size() > 0)
throw std::runtime_error(
"getnettotals\n"
"\nReturns information about network traffic, including bytes in, "
"bytes out,\n"
"and current time.\n"
"\nResult:\n"
"{\n"
" \"totalbytesrecv\": n, (numeric) Total bytes received\n"
" \"totalbytessent\": n, (numeric) Total bytes sent\n"
" \"timemillis\": t, (numeric) Current UNIX time in "
"milliseconds\n"
" \"uploadtarget\":\n"
" {\n"
" \"timeframe\": n, (numeric) Length of "
"the measuring timeframe in seconds\n"
" \"target\": n, (numeric) Target in "
"bytes\n"
" \"target_reached\": true|false, (boolean) True if "
"target is reached\n"
" \"serve_historical_blocks\": true|false, (boolean) True if "
"serving historical blocks\n"
" \"bytes_left_in_cycle\": t, (numeric) Bytes "
"left in current time cycle\n"
" \"time_left_in_cycle\": t (numeric) Seconds "
"left in current time cycle\n"
" }\n"
"}\n"
"\nExamples:\n" +
HelpExampleCli("getnettotals", "") +
HelpExampleRpc("getnettotals", ""));
if (!g_connman)
throw JSONRPCError(
RPC_CLIENT_P2P_DISABLED,
"Error: Peer-to-peer functionality missing or disabled");
UniValue obj(UniValue::VOBJ);
obj.push_back(Pair("totalbytesrecv", g_connman->GetTotalBytesRecv()));
obj.push_back(Pair("totalbytessent", g_connman->GetTotalBytesSent()));
obj.push_back(Pair("timemillis", GetTimeMillis()));
UniValue outboundLimit(UniValue::VOBJ);
outboundLimit.push_back(
Pair("timeframe", g_connman->GetMaxOutboundTimeframe()));
outboundLimit.push_back(Pair("target", g_connman->GetMaxOutboundTarget()));
outboundLimit.push_back(
Pair("target_reached", g_connman->OutboundTargetReached(false)));
outboundLimit.push_back(Pair("serve_historical_blocks",
!g_connman->OutboundTargetReached(true)));
outboundLimit.push_back(
Pair("bytes_left_in_cycle", g_connman->GetOutboundTargetBytesLeft()));
outboundLimit.push_back(
Pair("time_left_in_cycle", g_connman->GetMaxOutboundTimeLeftInCycle()));
obj.push_back(Pair("uploadtarget", outboundLimit));
return obj;
}
static UniValue GetNetworksInfo() {
UniValue networks(UniValue::VARR);
for (int n = 0; n < NET_MAX; ++n) {
enum Network network = static_cast(n);
if (network == NET_UNROUTABLE) continue;
proxyType proxy;
UniValue obj(UniValue::VOBJ);
GetProxy(network, proxy);
obj.push_back(Pair("name", GetNetworkName(network)));
obj.push_back(Pair("limited", IsLimited(network)));
obj.push_back(Pair("reachable", IsReachable(network)));
obj.push_back(Pair("proxy",
proxy.IsValid() ? proxy.proxy.ToStringIPPort()
: std::string()));
obj.push_back(
Pair("proxy_randomize_credentials", proxy.randomize_credentials));
networks.push_back(obj);
}
return networks;
}
static UniValue getnetworkinfo(const Config &config,
const JSONRPCRequest &request) {
if (request.fHelp || request.params.size() != 0)
throw std::runtime_error(
"getnetworkinfo\n"
"Returns an object containing various state info regarding P2P "
"networking.\n"
"\nResult:\n"
"{\n"
" \"version\": xxxxx, (numeric) the server "
"version\n"
" \"subversion\": \"/Satoshi:x.x.x/\", (string) the server "
"subversion string\n"
" \"protocolversion\": xxxxx, (numeric) the protocol "
"version\n"
" \"localservices\": \"xxxxxxxxxxxxxxxx\", (string) the services "
"we offer to the network\n"
" \"localrelay\": true|false, (bool) true if "
"transaction relay is requested from peers\n"
" \"timeoffset\": xxxxx, (numeric) the time "
"offset\n"
" \"connections\": xxxxx, (numeric) the number "
"of connections\n"
" \"networkactive\": true|false, (bool) whether p2p "
"networking is enabled\n"
" \"networks\": [ (array) information "
"per network\n"
" {\n"
" \"name\": \"xxx\", (string) network "
"(ipv4, ipv6 or onion)\n"
" \"limited\": true|false, (boolean) is the "
"network limited using -onlynet?\n"
" \"reachable\": true|false, (boolean) is the "
"network reachable?\n"
" \"proxy\": \"host:port\" (string) the proxy "
"that is used for this network, or empty if none\n"
" \"proxy_randomize_credentials\": true|false, (string) "
"Whether randomized credentials are used\n"
" }\n"
" ,...\n"
" ],\n"
" \"relayfee\": x.xxxxxxxx, (numeric) minimum "
"relay fee for non-free transactions in " +
CURRENCY_UNIT +
"/kB\n"
" \"excessutxocharge\": x.xxxxxxxx, (numeric) minimum "
"charge for excess utxos in " +
CURRENCY_UNIT + "\n"
- " \"incrementalfee\": x.xxxxxxxx, "
- "(numeric) minimum fee increment for mempool "
- "limiting or BIP 125 replacement in " +
- CURRENCY_UNIT + "/kB\n"
" \"localaddresses\": [ "
"(array) list of local addresses\n"
" {\n"
" \"address\": \"xxxx\", "
"(string) network address\n"
" \"port\": xxx, "
"(numeric) network port\n"
" \"score\": xxx "
"(numeric) relative score\n"
" }\n"
" ,...\n"
" ]\n"
" \"warnings\": \"...\" "
"(string) any network warnings\n"
"}\n"
"\nExamples:\n" +
HelpExampleCli("getnetworkinfo", "") +
HelpExampleRpc("getnetworkinfo", ""));
LOCK(cs_main);
UniValue obj(UniValue::VOBJ);
obj.push_back(Pair("version", CLIENT_VERSION));
obj.push_back(Pair("subversion", userAgent(config)));
obj.push_back(Pair("protocolversion", PROTOCOL_VERSION));
if (g_connman)
obj.push_back(Pair("localservices",
strprintf("%016x", g_connman->GetLocalServices())));
obj.push_back(Pair("localrelay", fRelayTxes));
obj.push_back(Pair("timeoffset", GetTimeOffset()));
if (g_connman) {
obj.push_back(Pair("networkactive", g_connman->GetNetworkActive()));
obj.push_back(
Pair("connections",
(int)g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL)));
}
obj.push_back(Pair("networks", GetNetworksInfo()));
obj.push_back(
Pair("relayfee", ValueFromAmount(::minRelayTxFee.GetFeePerK())));
obj.push_back(Pair("excessutxocharge",
ValueFromAmount(config.GetExcessUTXOCharge())));
- obj.push_back(Pair("incrementalfee",
- ValueFromAmount(::incrementalRelayFee.GetFeePerK())));
UniValue localAddresses(UniValue::VARR);
{
LOCK(cs_mapLocalHost);
for (const std::pair &item : mapLocalHost) {
UniValue rec(UniValue::VOBJ);
rec.push_back(Pair("address", item.first.ToString()));
rec.push_back(Pair("port", item.second.nPort));
rec.push_back(Pair("score", item.second.nScore));
localAddresses.push_back(rec);
}
}
obj.push_back(Pair("localaddresses", localAddresses));
obj.push_back(Pair("warnings", GetWarnings("statusbar")));
return obj;
}
static UniValue setban(const Config &config, const JSONRPCRequest &request) {
std::string strCommand;
if (request.params.size() >= 2) {
strCommand = request.params[1].get_str();
}
if (request.fHelp || request.params.size() < 2 ||
(strCommand != "add" && strCommand != "remove")) {
throw std::runtime_error(
"setban \"subnet\" \"add|remove\" (bantime) (absolute)\n"
"\nAttempts add or remove a IP/Subnet from the banned list.\n"
"\nArguments:\n"
"1. \"subnet\" (string, required) The IP/Subnet (see "
"getpeerinfo for nodes ip) with a optional netmask (default is /32 "
"= single ip)\n"
"2. \"command\" (string, required) 'add' to add a IP/Subnet "
"to the list, 'remove' to remove a IP/Subnet from the list\n"
"3. \"bantime\" (numeric, optional) time in seconds how long "
"(or until when if [absolute] is set) the ip is banned (0 or empty "
"means using the default time of 24h which can also be overwritten "
"by the -bantime startup argument)\n"
"4. \"absolute\" (boolean, optional) If set, the bantime must "
"be a absolute timestamp in seconds since epoch (Jan 1 1970 GMT)\n"
"\nExamples:\n" +
HelpExampleCli("setban", "\"192.168.0.6\" \"add\" 86400") +
HelpExampleCli("setban", "\"192.168.0.0/24\" \"add\"") +
HelpExampleRpc("setban", "\"192.168.0.6\", \"add\", 86400"));
}
if (!g_connman) {
throw JSONRPCError(
RPC_CLIENT_P2P_DISABLED,
"Error: Peer-to-peer functionality missing or disabled");
}
CSubNet subNet;
CNetAddr netAddr;
bool isSubnet = false;
if (request.params[0].get_str().find("/") != std::string::npos) {
isSubnet = true;
}
if (!isSubnet) {
CNetAddr resolved;
LookupHost(request.params[0].get_str().c_str(), resolved, false);
netAddr = resolved;
} else {
LookupSubNet(request.params[0].get_str().c_str(), subNet);
}
if (!(isSubnet ? subNet.IsValid() : netAddr.IsValid())) {
throw JSONRPCError(RPC_CLIENT_INVALID_IP_OR_SUBNET,
"Error: Invalid IP/Subnet");
}
if (strCommand == "add") {
if (isSubnet ? g_connman->IsBanned(subNet)
: g_connman->IsBanned(netAddr)) {
throw JSONRPCError(RPC_CLIENT_NODE_ALREADY_ADDED,
"Error: IP/Subnet already banned");
}
// Use standard bantime if not specified.
int64_t banTime = 0;
if (request.params.size() >= 3 && !request.params[2].isNull()) {
banTime = request.params[2].get_int64();
}
bool absolute = false;
if (request.params.size() == 4 && request.params[3].isTrue()) {
absolute = true;
}
isSubnet
? g_connman->Ban(subNet, BanReasonManuallyAdded, banTime, absolute)
: g_connman->Ban(netAddr, BanReasonManuallyAdded, banTime,
absolute);
} else if (strCommand == "remove") {
if (!(isSubnet ? g_connman->Unban(subNet)
: g_connman->Unban(netAddr))) {
throw JSONRPCError(RPC_CLIENT_INVALID_IP_OR_SUBNET,
"Error: Unban failed. Requested address/subnet "
"was not previously banned.");
}
}
return NullUniValue;
}
static UniValue listbanned(const Config &config,
const JSONRPCRequest &request) {
if (request.fHelp || request.params.size() != 0)
throw std::runtime_error("listbanned\n"
"\nList all banned IPs/Subnets.\n"
"\nExamples:\n" +
HelpExampleCli("listbanned", "") +
HelpExampleRpc("listbanned", ""));
if (!g_connman)
throw JSONRPCError(
RPC_CLIENT_P2P_DISABLED,
"Error: Peer-to-peer functionality missing or disabled");
banmap_t banMap;
g_connman->GetBanned(banMap);
UniValue bannedAddresses(UniValue::VARR);
for (banmap_t::iterator it = banMap.begin(); it != banMap.end(); it++) {
CBanEntry banEntry = (*it).second;
UniValue rec(UniValue::VOBJ);
rec.push_back(Pair("address", (*it).first.ToString()));
rec.push_back(Pair("banned_until", banEntry.nBanUntil));
rec.push_back(Pair("ban_created", banEntry.nCreateTime));
rec.push_back(Pair("ban_reason", banEntry.banReasonToString()));
bannedAddresses.push_back(rec);
}
return bannedAddresses;
}
static UniValue clearbanned(const Config &config,
const JSONRPCRequest &request) {
if (request.fHelp || request.params.size() != 0)
throw std::runtime_error("clearbanned\n"
"\nClear all banned IPs.\n"
"\nExamples:\n" +
HelpExampleCli("clearbanned", "") +
HelpExampleRpc("clearbanned", ""));
if (!g_connman)
throw JSONRPCError(
RPC_CLIENT_P2P_DISABLED,
"Error: Peer-to-peer functionality missing or disabled");
g_connman->ClearBanned();
return NullUniValue;
}
static UniValue setnetworkactive(const Config &config,
const JSONRPCRequest &request) {
if (request.fHelp || request.params.size() != 1) {
throw std::runtime_error(
"setnetworkactive true|false\n"
"\nDisable/enable all p2p network activity.\n"
"\nArguments:\n"
"1. \"state\" (boolean, required) true to "
"enable networking, false to disable\n");
}
if (!g_connman) {
throw JSONRPCError(
RPC_CLIENT_P2P_DISABLED,
"Error: Peer-to-peer functionality missing or disabled");
}
g_connman->SetNetworkActive(request.params[0].get_bool());
return g_connman->GetNetworkActive();
}
// clang-format off
static const CRPCCommand commands[] = {
// category name actor (function) okSafeMode
// ------------------- ------------------------ ---------------------- ----------
{ "network", "getconnectioncount", getconnectioncount, true, {} },
{ "network", "ping", ping, true, {} },
{ "network", "getpeerinfo", getpeerinfo, true, {} },
{ "network", "addnode", addnode, true, {"node","command"} },
{ "network", "disconnectnode", disconnectnode, true, {"address", "nodeid"} },
{ "network", "getaddednodeinfo", getaddednodeinfo, true, {"node"} },
{ "network", "getnettotals", getnettotals, true, {} },
{ "network", "getnetworkinfo", getnetworkinfo, true, {} },
{ "network", "setban", setban, true, {"subnet", "command", "bantime", "absolute"} },
{ "network", "listbanned", listbanned, true, {} },
{ "network", "clearbanned", clearbanned, true, {} },
{ "network", "setnetworkactive", setnetworkactive, true, {"state"} },
};
// clang-format on
void RegisterNetRPCCommands(CRPCTable &t) {
for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++)
t.appendCommand(commands[vcidx].name, &commands[vcidx]);
}
diff --git a/src/test/mempool_tests.cpp b/src/test/mempool_tests.cpp
index 123640172..b689ad6e0 100644
--- a/src/test/mempool_tests.cpp
+++ b/src/test/mempool_tests.cpp
@@ -1,666 +1,655 @@
// Copyright (c) 2011-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "policy/policy.h"
#include "txmempool.h"
#include "util.h"
#include "test/test_bitcoin.h"
#include
#include
#include
BOOST_FIXTURE_TEST_SUITE(mempool_tests, TestingSetup)
BOOST_AUTO_TEST_CASE(MempoolRemoveTest) {
// Test CTxMemPool::remove functionality
TestMemPoolEntryHelper entry;
// Parent transaction with three children, and three grand-children:
CMutableTransaction txParent;
txParent.vin.resize(1);
txParent.vin[0].scriptSig = CScript() << OP_11;
txParent.vout.resize(3);
for (int i = 0; i < 3; i++) {
txParent.vout[i].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
txParent.vout[i].nValue = Amount(33000LL);
}
CMutableTransaction txChild[3];
for (int i = 0; i < 3; i++) {
txChild[i].vin.resize(1);
txChild[i].vin[0].scriptSig = CScript() << OP_11;
txChild[i].vin[0].prevout = COutPoint(txParent.GetId(), i);
txChild[i].vout.resize(1);
txChild[i].vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
txChild[i].vout[0].nValue = Amount(11000LL);
}
CMutableTransaction txGrandChild[3];
for (int i = 0; i < 3; i++) {
txGrandChild[i].vin.resize(1);
txGrandChild[i].vin[0].scriptSig = CScript() << OP_11;
txGrandChild[i].vin[0].prevout = COutPoint(txChild[i].GetId(), 0);
txGrandChild[i].vout.resize(1);
txGrandChild[i].vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
txGrandChild[i].vout[0].nValue = Amount(11000LL);
}
CTxMemPool testPool;
// Nothing in pool, remove should do nothing:
unsigned int poolSize = testPool.size();
testPool.removeRecursive(CTransaction(txParent));
BOOST_CHECK_EQUAL(testPool.size(), poolSize);
// Just the parent:
testPool.addUnchecked(txParent.GetId(), entry.FromTx(txParent));
poolSize = testPool.size();
testPool.removeRecursive(CTransaction(txParent));
BOOST_CHECK_EQUAL(testPool.size(), poolSize - 1);
// Parent, children, grandchildren:
testPool.addUnchecked(txParent.GetId(), entry.FromTx(txParent));
for (int i = 0; i < 3; i++) {
testPool.addUnchecked(txChild[i].GetId(), entry.FromTx(txChild[i]));
testPool.addUnchecked(txGrandChild[i].GetId(),
entry.FromTx(txGrandChild[i]));
}
// Remove Child[0], GrandChild[0] should be removed:
poolSize = testPool.size();
testPool.removeRecursive(CTransaction(txChild[0]));
BOOST_CHECK_EQUAL(testPool.size(), poolSize - 2);
// ... make sure grandchild and child are gone:
poolSize = testPool.size();
testPool.removeRecursive(CTransaction(txGrandChild[0]));
BOOST_CHECK_EQUAL(testPool.size(), poolSize);
poolSize = testPool.size();
testPool.removeRecursive(CTransaction(txChild[0]));
BOOST_CHECK_EQUAL(testPool.size(), poolSize);
// Remove parent, all children/grandchildren should go:
poolSize = testPool.size();
testPool.removeRecursive(CTransaction(txParent));
BOOST_CHECK_EQUAL(testPool.size(), poolSize - 5);
BOOST_CHECK_EQUAL(testPool.size(), 0UL);
// Add children and grandchildren, but NOT the parent (simulate the parent
// being in a block)
for (int i = 0; i < 3; i++) {
testPool.addUnchecked(txChild[i].GetId(), entry.FromTx(txChild[i]));
testPool.addUnchecked(txGrandChild[i].GetId(),
entry.FromTx(txGrandChild[i]));
}
// Now remove the parent, as might happen if a block-re-org occurs but the
// parent cannot be put into the mempool (maybe because it is non-standard):
poolSize = testPool.size();
testPool.removeRecursive(CTransaction(txParent));
BOOST_CHECK_EQUAL(testPool.size(), poolSize - 6);
BOOST_CHECK_EQUAL(testPool.size(), 0UL);
}
BOOST_AUTO_TEST_CASE(MempoolClearTest) {
// Test CTxMemPool::clear functionality
TestMemPoolEntryHelper entry;
// Create a transaction
CMutableTransaction txParent;
txParent.vin.resize(1);
txParent.vin[0].scriptSig = CScript() << OP_11;
txParent.vout.resize(3);
for (int i = 0; i < 3; i++) {
txParent.vout[i].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
txParent.vout[i].nValue = Amount(33000LL);
}
CTxMemPool testPool;
// Nothing in pool, clear should do nothing:
testPool.clear();
BOOST_CHECK_EQUAL(testPool.size(), 0UL);
// Add the transaction
testPool.addUnchecked(txParent.GetId(), entry.FromTx(txParent));
BOOST_CHECK_EQUAL(testPool.size(), 1UL);
BOOST_CHECK_EQUAL(testPool.mapTx.size(), 1UL);
BOOST_CHECK_EQUAL(testPool.mapNextTx.size(), 1UL);
BOOST_CHECK_EQUAL(testPool.vTxHashes.size(), 1UL);
// CTxMemPool's members should be empty after a clear
testPool.clear();
BOOST_CHECK_EQUAL(testPool.size(), 0UL);
BOOST_CHECK_EQUAL(testPool.mapTx.size(), 0UL);
BOOST_CHECK_EQUAL(testPool.mapNextTx.size(), 0UL);
BOOST_CHECK_EQUAL(testPool.vTxHashes.size(), 0UL);
}
template
void CheckSort(CTxMemPool &pool, std::vector &sortedOrder) {
BOOST_CHECK_EQUAL(pool.size(), sortedOrder.size());
typename CTxMemPool::indexed_transaction_set::index::type::iterator
it = pool.mapTx.get().begin();
int count = 0;
for (; it != pool.mapTx.get().end(); ++it, ++count) {
BOOST_CHECK_EQUAL(it->GetTx().GetId().ToString(), sortedOrder[count]);
}
}
BOOST_AUTO_TEST_CASE(MempoolIndexingTest) {
CTxMemPool pool;
TestMemPoolEntryHelper entry;
/* 3rd highest fee */
CMutableTransaction tx1 = CMutableTransaction();
tx1.vout.resize(1);
tx1.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx1.vout[0].nValue = 10 * COIN;
pool.addUnchecked(tx1.GetId(),
entry.Fee(Amount(10000LL)).Priority(10.0).FromTx(tx1));
/* highest fee */
CMutableTransaction tx2 = CMutableTransaction();
tx2.vout.resize(1);
tx2.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx2.vout[0].nValue = 2 * COIN;
pool.addUnchecked(tx2.GetId(),
entry.Fee(Amount(20000LL)).Priority(9.0).FromTx(tx2));
/* lowest fee */
CMutableTransaction tx3 = CMutableTransaction();
tx3.vout.resize(1);
tx3.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx3.vout[0].nValue = 5 * COIN;
pool.addUnchecked(tx3.GetId(),
entry.Fee(Amount(0LL)).Priority(100.0).FromTx(tx3));
/* 2nd highest fee */
CMutableTransaction tx4 = CMutableTransaction();
tx4.vout.resize(1);
tx4.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx4.vout[0].nValue = 6 * COIN;
pool.addUnchecked(tx4.GetId(),
entry.Fee(Amount(15000LL)).Priority(1.0).FromTx(tx4));
/* equal fee rate to tx1, but newer */
CMutableTransaction tx5 = CMutableTransaction();
tx5.vout.resize(1);
tx5.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx5.vout[0].nValue = 11 * COIN;
entry.nTime = 1;
entry.dPriority = 10.0;
pool.addUnchecked(tx5.GetId(), entry.Fee(Amount(10000LL)).FromTx(tx5));
BOOST_CHECK_EQUAL(pool.size(), 5UL);
std::vector sortedOrder;
sortedOrder.resize(5);
sortedOrder[0] = tx3.GetId().ToString(); // 0
sortedOrder[1] = tx5.GetId().ToString(); // 10000
sortedOrder[2] = tx1.GetId().ToString(); // 10000
sortedOrder[3] = tx4.GetId().ToString(); // 15000
sortedOrder[4] = tx2.GetId().ToString(); // 20000
CheckSort(pool, sortedOrder);
/* low fee but with high fee child */
/* tx6 -> tx7 -> tx8, tx9 -> tx10 */
CMutableTransaction tx6 = CMutableTransaction();
tx6.vout.resize(1);
tx6.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx6.vout[0].nValue = 20 * COIN;
pool.addUnchecked(tx6.GetId(), entry.Fee(Amount(0LL)).FromTx(tx6));
BOOST_CHECK_EQUAL(pool.size(), 6UL);
// Check that at this point, tx6 is sorted low
sortedOrder.insert(sortedOrder.begin(), tx6.GetId().ToString());
CheckSort(pool, sortedOrder);
CTxMemPool::setEntries setAncestors;
setAncestors.insert(pool.mapTx.find(tx6.GetId()));
CMutableTransaction tx7 = CMutableTransaction();
tx7.vin.resize(1);
tx7.vin[0].prevout = COutPoint(tx6.GetId(), 0);
tx7.vin[0].scriptSig = CScript() << OP_11;
tx7.vout.resize(2);
tx7.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx7.vout[0].nValue = 10 * COIN;
tx7.vout[1].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx7.vout[1].nValue = 1 * COIN;
CTxMemPool::setEntries setAncestorsCalculated;
std::string dummy;
BOOST_CHECK_EQUAL(
pool.CalculateMemPoolAncestors(entry.Fee(Amount(2000000LL)).FromTx(tx7),
setAncestorsCalculated, 100, 1000000,
1000, 1000000, dummy),
true);
BOOST_CHECK(setAncestorsCalculated == setAncestors);
pool.addUnchecked(tx7.GetId(), entry.FromTx(tx7), setAncestors);
BOOST_CHECK_EQUAL(pool.size(), 7UL);
// Now tx6 should be sorted higher (high fee child): tx7, tx6, tx2, ...
sortedOrder.erase(sortedOrder.begin());
sortedOrder.push_back(tx6.GetId().ToString());
sortedOrder.push_back(tx7.GetId().ToString());
CheckSort(pool, sortedOrder);
/* low fee child of tx7 */
CMutableTransaction tx8 = CMutableTransaction();
tx8.vin.resize(1);
tx8.vin[0].prevout = COutPoint(tx7.GetId(), 0);
tx8.vin[0].scriptSig = CScript() << OP_11;
tx8.vout.resize(1);
tx8.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx8.vout[0].nValue = 10 * COIN;
setAncestors.insert(pool.mapTx.find(tx7.GetId()));
pool.addUnchecked(tx8.GetId(), entry.Fee(Amount(0LL)).Time(2).FromTx(tx8),
setAncestors);
// Now tx8 should be sorted low, but tx6/tx both high
sortedOrder.insert(sortedOrder.begin(), tx8.GetId().ToString());
CheckSort(pool, sortedOrder);
/* low fee child of tx7 */
CMutableTransaction tx9 = CMutableTransaction();
tx9.vin.resize(1);
tx9.vin[0].prevout = COutPoint(tx7.GetId(), 1);
tx9.vin[0].scriptSig = CScript() << OP_11;
tx9.vout.resize(1);
tx9.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx9.vout[0].nValue = 1 * COIN;
pool.addUnchecked(tx9.GetId(), entry.Fee(Amount(0LL)).Time(3).FromTx(tx9),
setAncestors);
// tx9 should be sorted low
BOOST_CHECK_EQUAL(pool.size(), 9UL);
sortedOrder.insert(sortedOrder.begin(), tx9.GetId().ToString());
CheckSort(pool, sortedOrder);
std::vector snapshotOrder = sortedOrder;
setAncestors.insert(pool.mapTx.find(tx8.GetId()));
setAncestors.insert(pool.mapTx.find(tx9.GetId()));
/* tx10 depends on tx8 and tx9 and has a high fee*/
CMutableTransaction tx10 = CMutableTransaction();
tx10.vin.resize(2);
tx10.vin[0].prevout = COutPoint(tx8.GetId(), 0);
tx10.vin[0].scriptSig = CScript() << OP_11;
tx10.vin[1].prevout = COutPoint(tx9.GetId(), 0);
tx10.vin[1].scriptSig = CScript() << OP_11;
tx10.vout.resize(1);
tx10.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx10.vout[0].nValue = 10 * COIN;
setAncestorsCalculated.clear();
BOOST_CHECK_EQUAL(pool.CalculateMemPoolAncestors(
entry.Fee(Amount(200000LL)).Time(4).FromTx(tx10),
setAncestorsCalculated, 100, 1000000, 1000, 1000000,
dummy),
true);
BOOST_CHECK(setAncestorsCalculated == setAncestors);
pool.addUnchecked(tx10.GetId(), entry.FromTx(tx10), setAncestors);
/**
* tx8 and tx9 should both now be sorted higher
* Final order after tx10 is added:
*
* tx3 = 0 (1)
* tx5 = 10000 (1)
* tx1 = 10000 (1)
* tx4 = 15000 (1)
* tx2 = 20000 (1)
* tx9 = 200k (2 txs)
* tx8 = 200k (2 txs)
* tx10 = 200k (1 tx)
* tx6 = 2.2M (5 txs)
* tx7 = 2.2M (4 txs)
*/
// take out tx9, tx8 from the beginning
sortedOrder.erase(sortedOrder.begin(), sortedOrder.begin() + 2);
sortedOrder.insert(sortedOrder.begin() + 5, tx9.GetId().ToString());
sortedOrder.insert(sortedOrder.begin() + 6, tx8.GetId().ToString());
// tx10 is just before tx6
sortedOrder.insert(sortedOrder.begin() + 7, tx10.GetId().ToString());
CheckSort(pool, sortedOrder);
// there should be 10 transactions in the mempool
BOOST_CHECK_EQUAL(pool.size(), 10UL);
// Now try removing tx10 and verify the sort order returns to normal
pool.removeRecursive(pool.mapTx.find(tx10.GetId())->GetTx());
CheckSort(pool, snapshotOrder);
pool.removeRecursive(pool.mapTx.find(tx9.GetId())->GetTx());
pool.removeRecursive(pool.mapTx.find(tx8.GetId())->GetTx());
/* Now check the sort on the mining score index.
* Final order should be:
*
* tx7 (2M)
* tx2 (20k)
* tx4 (15000)
* tx1/tx5 (10000)
* tx3/6 (0)
* (Ties resolved by hash)
*/
sortedOrder.clear();
sortedOrder.push_back(tx7.GetId().ToString());
sortedOrder.push_back(tx2.GetId().ToString());
sortedOrder.push_back(tx4.GetId().ToString());
if (tx1.GetId() < tx5.GetId()) {
sortedOrder.push_back(tx5.GetId().ToString());
sortedOrder.push_back(tx1.GetId().ToString());
} else {
sortedOrder.push_back(tx1.GetId().ToString());
sortedOrder.push_back(tx5.GetId().ToString());
}
if (tx3.GetId() < tx6.GetId()) {
sortedOrder.push_back(tx6.GetId().ToString());
sortedOrder.push_back(tx3.GetId().ToString());
} else {
sortedOrder.push_back(tx3.GetId().ToString());
sortedOrder.push_back(tx6.GetId().ToString());
}
CheckSort(pool, sortedOrder);
}
BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest) {
CTxMemPool pool;
TestMemPoolEntryHelper entry;
/* 3rd highest fee */
CMutableTransaction tx1 = CMutableTransaction();
tx1.vout.resize(1);
tx1.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx1.vout[0].nValue = 10 * COIN;
pool.addUnchecked(tx1.GetId(),
entry.Fee(Amount(10000LL)).Priority(10.0).FromTx(tx1));
/* highest fee */
CMutableTransaction tx2 = CMutableTransaction();
tx2.vout.resize(1);
tx2.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx2.vout[0].nValue = 2 * COIN;
pool.addUnchecked(tx2.GetId(),
entry.Fee(Amount(20000LL)).Priority(9.0).FromTx(tx2));
uint64_t tx2Size = CTransaction(tx2).GetTotalSize();
/* lowest fee */
CMutableTransaction tx3 = CMutableTransaction();
tx3.vout.resize(1);
tx3.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx3.vout[0].nValue = 5 * COIN;
pool.addUnchecked(tx3.GetId(),
entry.Fee(Amount(0LL)).Priority(100.0).FromTx(tx3));
/* 2nd highest fee */
CMutableTransaction tx4 = CMutableTransaction();
tx4.vout.resize(1);
tx4.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx4.vout[0].nValue = 6 * COIN;
pool.addUnchecked(tx4.GetId(),
entry.Fee(Amount(15000LL)).Priority(1.0).FromTx(tx4));
/* equal fee rate to tx1, but newer */
CMutableTransaction tx5 = CMutableTransaction();
tx5.vout.resize(1);
tx5.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx5.vout[0].nValue = 11 * COIN;
pool.addUnchecked(tx5.GetId(), entry.Fee(Amount(10000LL)).FromTx(tx5));
BOOST_CHECK_EQUAL(pool.size(), 5UL);
std::vector sortedOrder;
sortedOrder.resize(5);
sortedOrder[0] = tx2.GetId().ToString(); // 20000
sortedOrder[1] = tx4.GetId().ToString(); // 15000
// tx1 and tx5 are both 10000
// Ties are broken by hash, not timestamp, so determine which hash comes
// first.
if (tx1.GetId() < tx5.GetId()) {
sortedOrder[2] = tx1.GetId().ToString();
sortedOrder[3] = tx5.GetId().ToString();
} else {
sortedOrder[2] = tx5.GetId().ToString();
sortedOrder[3] = tx1.GetId().ToString();
}
sortedOrder[4] = tx3.GetId().ToString(); // 0
CheckSort(pool, sortedOrder);
/* low fee parent with high fee child */
/* tx6 (0) -> tx7 (high) */
CMutableTransaction tx6 = CMutableTransaction();
tx6.vout.resize(1);
tx6.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx6.vout[0].nValue = 20 * COIN;
uint64_t tx6Size = CTransaction(tx6).GetTotalSize();
pool.addUnchecked(tx6.GetId(), entry.Fee(Amount(0LL)).FromTx(tx6));
BOOST_CHECK_EQUAL(pool.size(), 6UL);
// Ties are broken by hash
if (tx3.GetId() < tx6.GetId()) {
sortedOrder.push_back(tx6.GetId().ToString());
} else {
sortedOrder.insert(sortedOrder.end() - 1, tx6.GetId().ToString());
}
CheckSort(pool, sortedOrder);
CMutableTransaction tx7 = CMutableTransaction();
tx7.vin.resize(1);
tx7.vin[0].prevout = COutPoint(tx6.GetId(), 0);
tx7.vin[0].scriptSig = CScript() << OP_11;
tx7.vout.resize(1);
tx7.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx7.vout[0].nValue = 10 * COIN;
uint64_t tx7Size = CTransaction(tx7).GetTotalSize();
/* set the fee to just below tx2's feerate when including ancestor */
Amount fee((20000 / tx2Size) * (tx7Size + tx6Size) - 1);
// CTxMemPoolEntry entry7(tx7, fee, 2, 10.0, 1, true);
pool.addUnchecked(tx7.GetId(), entry.Fee(Amount(fee)).FromTx(tx7));
BOOST_CHECK_EQUAL(pool.size(), 7UL);
sortedOrder.insert(sortedOrder.begin() + 1, tx7.GetId().ToString());
CheckSort(pool, sortedOrder);
/* after tx6 is mined, tx7 should move up in the sort */
std::vector vtx;
vtx.push_back(MakeTransactionRef(tx6));
pool.removeForBlock(vtx, 1);
sortedOrder.erase(sortedOrder.begin() + 1);
// Ties are broken by hash
if (tx3.GetId() < tx6.GetId())
sortedOrder.pop_back();
else
sortedOrder.erase(sortedOrder.end() - 2);
sortedOrder.insert(sortedOrder.begin(), tx7.GetId().ToString());
CheckSort(pool, sortedOrder);
}
BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest) {
CTxMemPool pool;
TestMemPoolEntryHelper entry;
entry.dPriority = 10.0;
+ Amount feeIncrement = MEMPOOL_FULL_FEE_INCREMENT.GetFeePerK();
CMutableTransaction tx1 = CMutableTransaction();
tx1.vin.resize(1);
tx1.vin[0].scriptSig = CScript() << OP_1;
tx1.vout.resize(1);
tx1.vout[0].scriptPubKey = CScript() << OP_1 << OP_EQUAL;
tx1.vout[0].nValue = 10 * COIN;
pool.addUnchecked(tx1.GetId(),
entry.Fee(Amount(10000LL)).FromTx(tx1, &pool));
CMutableTransaction tx2 = CMutableTransaction();
tx2.vin.resize(1);
tx2.vin[0].scriptSig = CScript() << OP_2;
tx2.vout.resize(1);
tx2.vout[0].scriptPubKey = CScript() << OP_2 << OP_EQUAL;
tx2.vout[0].nValue = 10 * COIN;
pool.addUnchecked(tx2.GetId(),
entry.Fee(Amount(5000LL)).FromTx(tx2, &pool));
// should do nothing
pool.TrimToSize(pool.DynamicMemoryUsage());
BOOST_CHECK(pool.exists(tx1.GetId()));
BOOST_CHECK(pool.exists(tx2.GetId()));
// should remove the lower-feerate transaction
pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4);
BOOST_CHECK(pool.exists(tx1.GetId()));
BOOST_CHECK(!pool.exists(tx2.GetId()));
pool.addUnchecked(tx2.GetId(), entry.FromTx(tx2, &pool));
CMutableTransaction tx3 = CMutableTransaction();
tx3.vin.resize(1);
tx3.vin[0].prevout = COutPoint(tx2.GetId(), 0);
tx3.vin[0].scriptSig = CScript() << OP_2;
tx3.vout.resize(1);
tx3.vout[0].scriptPubKey = CScript() << OP_3 << OP_EQUAL;
tx3.vout[0].nValue = 10 * COIN;
pool.addUnchecked(tx3.GetId(),
entry.Fee(Amount(20000LL)).FromTx(tx3, &pool));
// tx3 should pay for tx2 (CPFP)
pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4);
BOOST_CHECK(!pool.exists(tx1.GetId()));
BOOST_CHECK(pool.exists(tx2.GetId()));
BOOST_CHECK(pool.exists(tx3.GetId()));
// mempool is limited to tx1's size in memory usage, so nothing fits
pool.TrimToSize(CTransaction(tx1).GetTotalSize());
BOOST_CHECK(!pool.exists(tx1.GetId()));
BOOST_CHECK(!pool.exists(tx2.GetId()));
BOOST_CHECK(!pool.exists(tx3.GetId()));
CFeeRate maxFeeRateRemoved(Amount(25000),
CTransaction(tx3).GetTotalSize() +
CTransaction(tx2).GetTotalSize());
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(),
- maxFeeRateRemoved.GetFeePerK() + Amount(1000));
+ maxFeeRateRemoved.GetFeePerK() + feeIncrement);
CMutableTransaction tx4 = CMutableTransaction();
tx4.vin.resize(2);
tx4.vin[0].prevout = COutPoint();
tx4.vin[0].scriptSig = CScript() << OP_4;
tx4.vin[1].prevout = COutPoint();
tx4.vin[1].scriptSig = CScript() << OP_4;
tx4.vout.resize(2);
tx4.vout[0].scriptPubKey = CScript() << OP_4 << OP_EQUAL;
tx4.vout[0].nValue = 10 * COIN;
tx4.vout[1].scriptPubKey = CScript() << OP_4 << OP_EQUAL;
tx4.vout[1].nValue = 10 * COIN;
CMutableTransaction tx5 = CMutableTransaction();
tx5.vin.resize(2);
tx5.vin[0].prevout = COutPoint(tx4.GetId(), 0);
tx5.vin[0].scriptSig = CScript() << OP_4;
tx5.vin[1].prevout = COutPoint();
tx5.vin[1].scriptSig = CScript() << OP_5;
tx5.vout.resize(2);
tx5.vout[0].scriptPubKey = CScript() << OP_5 << OP_EQUAL;
tx5.vout[0].nValue = 10 * COIN;
tx5.vout[1].scriptPubKey = CScript() << OP_5 << OP_EQUAL;
tx5.vout[1].nValue = 10 * COIN;
CMutableTransaction tx6 = CMutableTransaction();
tx6.vin.resize(2);
tx6.vin[0].prevout = COutPoint(tx4.GetId(), 1);
tx6.vin[0].scriptSig = CScript() << OP_4;
tx6.vin[1].prevout = COutPoint();
tx6.vin[1].scriptSig = CScript() << OP_6;
tx6.vout.resize(2);
tx6.vout[0].scriptPubKey = CScript() << OP_6 << OP_EQUAL;
tx6.vout[0].nValue = 10 * COIN;
tx6.vout[1].scriptPubKey = CScript() << OP_6 << OP_EQUAL;
tx6.vout[1].nValue = 10 * COIN;
CMutableTransaction tx7 = CMutableTransaction();
tx7.vin.resize(2);
tx7.vin[0].prevout = COutPoint(tx5.GetId(), 0);
tx7.vin[0].scriptSig = CScript() << OP_5;
tx7.vin[1].prevout = COutPoint(tx6.GetId(), 0);
tx7.vin[1].scriptSig = CScript() << OP_6;
tx7.vout.resize(2);
tx7.vout[0].scriptPubKey = CScript() << OP_7 << OP_EQUAL;
tx7.vout[0].nValue = 10 * COIN;
tx7.vout[1].scriptPubKey = CScript() << OP_7 << OP_EQUAL;
tx7.vout[1].nValue = 10 * COIN;
pool.addUnchecked(tx4.GetId(),
entry.Fee(Amount(7000LL)).FromTx(tx4, &pool));
pool.addUnchecked(tx5.GetId(),
entry.Fee(Amount(1000LL)).FromTx(tx5, &pool));
pool.addUnchecked(tx6.GetId(),
entry.Fee(Amount(1100LL)).FromTx(tx6, &pool));
pool.addUnchecked(tx7.GetId(),
entry.Fee(Amount(9000LL)).FromTx(tx7, &pool));
// we only require this remove, at max, 2 txn, because its not clear what
// we're really optimizing for aside from that
pool.TrimToSize(pool.DynamicMemoryUsage() - 1);
BOOST_CHECK(pool.exists(tx4.GetId()));
BOOST_CHECK(pool.exists(tx6.GetId()));
BOOST_CHECK(!pool.exists(tx7.GetId()));
if (!pool.exists(tx5.GetId()))
pool.addUnchecked(tx5.GetId(),
entry.Fee(Amount(1000LL)).FromTx(tx5, &pool));
pool.addUnchecked(tx7.GetId(),
entry.Fee(Amount(9000LL)).FromTx(tx7, &pool));
// should maximize mempool size by only removing 5/7
pool.TrimToSize(pool.DynamicMemoryUsage() / 2);
BOOST_CHECK(pool.exists(tx4.GetId()));
BOOST_CHECK(!pool.exists(tx5.GetId()));
BOOST_CHECK(pool.exists(tx6.GetId()));
BOOST_CHECK(!pool.exists(tx7.GetId()));
pool.addUnchecked(tx5.GetId(),
entry.Fee(Amount(1000LL)).FromTx(tx5, &pool));
pool.addUnchecked(tx7.GetId(),
entry.Fee(Amount(9000LL)).FromTx(tx7, &pool));
std::vector vtx;
SetMockTime(42);
SetMockTime(42 + CTxMemPool::ROLLING_FEE_HALFLIFE);
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(),
- maxFeeRateRemoved.GetFeePerK() + Amount(1000));
+ maxFeeRateRemoved.GetFeePerK() + feeIncrement);
// ... we should keep the same min fee until we get a block
pool.removeForBlock(vtx, 1);
SetMockTime(42 + 2 * CTxMemPool::ROLLING_FEE_HALFLIFE);
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(),
- (maxFeeRateRemoved.GetFeePerK() + Amount(1000)) / 2);
+ (maxFeeRateRemoved.GetFeePerK() + feeIncrement) / 2);
// ... then feerate should drop 1/2 each halflife
SetMockTime(42 + 2 * CTxMemPool::ROLLING_FEE_HALFLIFE +
CTxMemPool::ROLLING_FEE_HALFLIFE / 2);
BOOST_CHECK_EQUAL(
pool.GetMinFee(pool.DynamicMemoryUsage() * 5 / 2).GetFeePerK(),
- (maxFeeRateRemoved.GetFeePerK() + Amount(1000)) / 4);
+ (maxFeeRateRemoved.GetFeePerK() + feeIncrement) / 4);
// ... with a 1/2 halflife when mempool is < 1/2 its target size
SetMockTime(42 + 2 * CTxMemPool::ROLLING_FEE_HALFLIFE +
CTxMemPool::ROLLING_FEE_HALFLIFE / 2 +
CTxMemPool::ROLLING_FEE_HALFLIFE / 4);
BOOST_CHECK_EQUAL(
pool.GetMinFee(pool.DynamicMemoryUsage() * 9 / 2).GetFeePerK(),
- (maxFeeRateRemoved.GetFeePerK() + Amount(1000)) / 8);
+ (maxFeeRateRemoved.GetFeePerK() + feeIncrement) / 8);
// ... with a 1/4 halflife when mempool is < 1/4 its target size
- SetMockTime(42 + 7 * CTxMemPool::ROLLING_FEE_HALFLIFE +
- CTxMemPool::ROLLING_FEE_HALFLIFE / 2 +
- CTxMemPool::ROLLING_FEE_HALFLIFE / 4);
- BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), Amount(1000));
- // ... but feerate should never drop below 1000
-
- SetMockTime(42 + 8 * CTxMemPool::ROLLING_FEE_HALFLIFE +
- CTxMemPool::ROLLING_FEE_HALFLIFE / 2 +
- CTxMemPool::ROLLING_FEE_HALFLIFE / 4);
- BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), Amount(0));
- // ... unless it has gone all the way to 0 (after getting past 1000/2)
-
SetMockTime(0);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index 04950adff..e17caa61e 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -1,1260 +1,1254 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "txmempool.h"
#include "chainparams.h" // for GetConsensus.
#include "clientversion.h"
#include "consensus/consensus.h"
#include "consensus/validation.h"
#include "policy/fees.h"
#include "policy/policy.h"
#include "streams.h"
#include "timedata.h"
#include "util.h"
#include "utilmoneystr.h"
#include "utiltime.h"
#include "validation.h"
#include "version.h"
#include
CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef &_tx, const Amount _nFee,
int64_t _nTime, double _entryPriority,
unsigned int _entryHeight,
Amount _inChainInputValue,
bool _spendsCoinbase, int64_t _sigOpsCount,
LockPoints lp)
: tx(_tx), nFee(_nFee), nTime(_nTime), entryPriority(_entryPriority),
entryHeight(_entryHeight), inChainInputValue(_inChainInputValue),
spendsCoinbase(_spendsCoinbase), sigOpCount(_sigOpsCount),
lockPoints(lp) {
nTxSize = tx->GetTotalSize();
nModSize = tx->CalculateModifiedSize(GetTxSize());
nUsageSize = RecursiveDynamicUsage(tx);
nCountWithDescendants = 1;
nSizeWithDescendants = GetTxSize();
nModFeesWithDescendants = nFee;
Amount nValueIn = tx->GetValueOut() + nFee;
assert(inChainInputValue <= nValueIn);
feeDelta = Amount(0);
nCountWithAncestors = 1;
nSizeWithAncestors = GetTxSize();
nModFeesWithAncestors = nFee;
nSigOpCountWithAncestors = sigOpCount;
}
CTxMemPoolEntry::CTxMemPoolEntry(const CTxMemPoolEntry &other) {
*this = other;
}
double CTxMemPoolEntry::GetPriority(unsigned int currentHeight) const {
double deltaPriority = double((currentHeight - entryHeight) *
inChainInputValue.GetSatoshis()) /
nModSize;
double dResult = entryPriority + deltaPriority;
// This should only happen if it was called with a height below entry height
if (dResult < 0) {
dResult = 0;
}
return dResult;
}
void CTxMemPoolEntry::UpdateFeeDelta(Amount newFeeDelta) {
nModFeesWithDescendants += newFeeDelta - feeDelta;
nModFeesWithAncestors += newFeeDelta - feeDelta;
feeDelta = newFeeDelta;
}
void CTxMemPoolEntry::UpdateLockPoints(const LockPoints &lp) {
lockPoints = lp;
}
// Update the given tx for any in-mempool descendants.
// Assumes that setMemPoolChildren is correct for the given tx and all
// descendants.
void CTxMemPool::UpdateForDescendants(txiter updateIt,
cacheMap &cachedDescendants,
const std::set &setExclude) {
setEntries stageEntries, setAllDescendants;
stageEntries = GetMemPoolChildren(updateIt);
while (!stageEntries.empty()) {
const txiter cit = *stageEntries.begin();
setAllDescendants.insert(cit);
stageEntries.erase(cit);
const setEntries &setChildren = GetMemPoolChildren(cit);
for (const txiter childEntry : setChildren) {
cacheMap::iterator cacheIt = cachedDescendants.find(childEntry);
if (cacheIt != cachedDescendants.end()) {
// We've already calculated this one, just add the entries for
// this set but don't traverse again.
for (const txiter cacheEntry : cacheIt->second) {
setAllDescendants.insert(cacheEntry);
}
} else if (!setAllDescendants.count(childEntry)) {
// Schedule for later processing
stageEntries.insert(childEntry);
}
}
}
// setAllDescendants now contains all in-mempool descendants of updateIt.
// Update and add to cached descendant map
int64_t modifySize = 0;
Amount modifyFee(0);
int64_t modifyCount = 0;
for (txiter cit : setAllDescendants) {
if (!setExclude.count(cit->GetTx().GetId())) {
modifySize += cit->GetTxSize();
modifyFee += cit->GetModifiedFee();
modifyCount++;
cachedDescendants[updateIt].insert(cit);
// Update ancestor state for each descendant
mapTx.modify(cit,
update_ancestor_state(updateIt->GetTxSize(),
updateIt->GetModifiedFee(), 1,
updateIt->GetSigOpCount()));
}
}
mapTx.modify(updateIt,
update_descendant_state(modifySize, modifyFee, modifyCount));
}
// vHashesToUpdate is the set of transaction hashes from a disconnected block
// which has been re-added to the mempool. For each entry, look for descendants
// that are outside hashesToUpdate, and add fee/size information for such
// descendants to the parent. For each such descendant, also update the ancestor
// state to include the parent.
void CTxMemPool::UpdateTransactionsFromBlock(
const std::vector &vHashesToUpdate) {
LOCK(cs);
// For each entry in vHashesToUpdate, store the set of in-mempool, but not
// in-vHashesToUpdate transactions, so that we don't have to recalculate
// descendants when we come across a previously seen entry.
cacheMap mapMemPoolDescendantsToUpdate;
// Use a set for lookups into vHashesToUpdate (these entries are already
// accounted for in the state of their ancestors)
std::set setAlreadyIncluded(vHashesToUpdate.begin(),
vHashesToUpdate.end());
// Iterate in reverse, so that whenever we are looking at at a transaction
// we are sure that all in-mempool descendants have already been processed.
// This maximizes the benefit of the descendant cache and guarantees that
// setMemPoolChildren will be updated, an assumption made in
// UpdateForDescendants.
for (const uint256 &hash : boost::adaptors::reverse(vHashesToUpdate)) {
// we cache the in-mempool children to avoid duplicate updates
setEntries setChildren;
// calculate children from mapNextTx
txiter it = mapTx.find(hash);
if (it == mapTx.end()) {
continue;
}
auto iter = mapNextTx.lower_bound(COutPoint(hash, 0));
// First calculate the children, and update setMemPoolChildren to
// include them, and update their setMemPoolParents to include this tx.
for (; iter != mapNextTx.end() && iter->first->GetTxId() == hash;
++iter) {
const uint256 &childHash = iter->second->GetId();
txiter childIter = mapTx.find(childHash);
assert(childIter != mapTx.end());
// We can skip updating entries we've encountered before or that are
// in the block (which are already accounted for).
if (setChildren.insert(childIter).second &&
!setAlreadyIncluded.count(childHash)) {
UpdateChild(it, childIter, true);
UpdateParent(childIter, it, true);
}
}
UpdateForDescendants(it, mapMemPoolDescendantsToUpdate,
setAlreadyIncluded);
}
}
bool CTxMemPool::CalculateMemPoolAncestors(
const CTxMemPoolEntry &entry, setEntries &setAncestors,
uint64_t limitAncestorCount, uint64_t limitAncestorSize,
uint64_t limitDescendantCount, uint64_t limitDescendantSize,
std::string &errString, bool fSearchForParents /* = true */) const {
LOCK(cs);
setEntries parentHashes;
const CTransaction &tx = entry.GetTx();
if (fSearchForParents) {
// Get parents of this transaction that are in the mempool
// GetMemPoolParents() is only valid for entries in the mempool, so we
// iterate mapTx to find parents.
for (const CTxIn &in : tx.vin) {
txiter piter = mapTx.find(in.prevout.GetTxId());
if (piter == mapTx.end()) {
continue;
}
parentHashes.insert(piter);
if (parentHashes.size() + 1 > limitAncestorCount) {
errString =
strprintf("too many unconfirmed parents [limit: %u]",
limitAncestorCount);
return false;
}
}
} else {
// If we're not searching for parents, we require this to be an entry in
// the mempool already.
txiter it = mapTx.iterator_to(entry);
parentHashes = GetMemPoolParents(it);
}
size_t totalSizeWithAncestors = entry.GetTxSize();
while (!parentHashes.empty()) {
txiter stageit = *parentHashes.begin();
setAncestors.insert(stageit);
parentHashes.erase(stageit);
totalSizeWithAncestors += stageit->GetTxSize();
if (stageit->GetSizeWithDescendants() + entry.GetTxSize() >
limitDescendantSize) {
errString = strprintf(
"exceeds descendant size limit for tx %s [limit: %u]",
stageit->GetTx().GetId().ToString(), limitDescendantSize);
return false;
}
if (stageit->GetCountWithDescendants() + 1 > limitDescendantCount) {
errString = strprintf("too many descendants for tx %s [limit: %u]",
stageit->GetTx().GetId().ToString(),
limitDescendantCount);
return false;
}
if (totalSizeWithAncestors > limitAncestorSize) {
errString = strprintf("exceeds ancestor size limit [limit: %u]",
limitAncestorSize);
return false;
}
const setEntries &setMemPoolParents = GetMemPoolParents(stageit);
for (const txiter &phash : setMemPoolParents) {
// If this is a new ancestor, add it.
if (setAncestors.count(phash) == 0) {
parentHashes.insert(phash);
}
if (parentHashes.size() + setAncestors.size() + 1 >
limitAncestorCount) {
errString =
strprintf("too many unconfirmed ancestors [limit: %u]",
limitAncestorCount);
return false;
}
}
}
return true;
}
void CTxMemPool::UpdateAncestorsOf(bool add, txiter it,
setEntries &setAncestors) {
setEntries parentIters = GetMemPoolParents(it);
// add or remove this tx as a child of each parent
for (txiter piter : parentIters) {
UpdateChild(piter, it, add);
}
const int64_t updateCount = (add ? 1 : -1);
const int64_t updateSize = updateCount * it->GetTxSize();
const Amount updateFee = updateCount * it->GetModifiedFee();
for (txiter ancestorIt : setAncestors) {
mapTx.modify(ancestorIt, update_descendant_state(updateSize, updateFee,
updateCount));
}
}
void CTxMemPool::UpdateEntryForAncestors(txiter it,
const setEntries &setAncestors) {
int64_t updateCount = setAncestors.size();
int64_t updateSize = 0;
Amount updateFee(0);
int64_t updateSigOpsCount = 0;
for (txiter ancestorIt : setAncestors) {
updateSize += ancestorIt->GetTxSize();
updateFee += ancestorIt->GetModifiedFee();
updateSigOpsCount += ancestorIt->GetSigOpCount();
}
mapTx.modify(it, update_ancestor_state(updateSize, updateFee, updateCount,
updateSigOpsCount));
}
void CTxMemPool::UpdateChildrenForRemoval(txiter it) {
const setEntries &setMemPoolChildren = GetMemPoolChildren(it);
for (txiter updateIt : setMemPoolChildren) {
UpdateParent(updateIt, it, false);
}
}
void CTxMemPool::UpdateForRemoveFromMempool(const setEntries &entriesToRemove,
bool updateDescendants) {
// For each entry, walk back all ancestors and decrement size associated
// with this transaction.
const uint64_t nNoLimit = std::numeric_limits::max();
if (updateDescendants) {
// updateDescendants should be true whenever we're not recursively
// removing a tx and all its descendants, eg when a transaction is
// confirmed in a block. Here we only update statistics and not data in
// mapLinks (which we need to preserve until we're finished with all
// operations that need to traverse the mempool).
for (txiter removeIt : entriesToRemove) {
setEntries setDescendants;
CalculateDescendants(removeIt, setDescendants);
setDescendants.erase(removeIt); // don't update state for self
int64_t modifySize = -((int64_t)removeIt->GetTxSize());
Amount modifyFee = -1 * removeIt->GetModifiedFee();
int modifySigOps = -removeIt->GetSigOpCount();
for (txiter dit : setDescendants) {
mapTx.modify(dit, update_ancestor_state(modifySize, modifyFee,
-1, modifySigOps));
}
}
}
for (txiter removeIt : entriesToRemove) {
setEntries setAncestors;
const CTxMemPoolEntry &entry = *removeIt;
std::string dummy;
// Since this is a tx that is already in the mempool, we can call CMPA
// with fSearchForParents = false. If the mempool is in a consistent
// state, then using true or false should both be correct, though false
// should be a bit faster.
// However, if we happen to be in the middle of processing a reorg, then
// the mempool can be in an inconsistent state. In this case, the set of
// ancestors reachable via mapLinks will be the same as the set of
// ancestors whose packages include this transaction, because when we
// add a new transaction to the mempool in addUnchecked(), we assume it
// has no children, and in the case of a reorg where that assumption is
// false, the in-mempool children aren't linked to the in-block tx's
// until UpdateTransactionsFromBlock() is called. So if we're being
// called during a reorg, ie before UpdateTransactionsFromBlock() has
// been called, then mapLinks[] will differ from the set of mempool
// parents we'd calculate by searching, and it's important that we use
// the mapLinks[] notion of ancestor transactions as the set of things
// to update for removal.
CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit,
nNoLimit, nNoLimit, dummy, false);
// Note that UpdateAncestorsOf severs the child links that point to
// removeIt in the entries for the parents of removeIt.
UpdateAncestorsOf(false, removeIt, setAncestors);
}
// After updating all the ancestor sizes, we can now sever the link between
// each transaction being removed and any mempool children (ie, update
// setMemPoolParents for each direct child of a transaction being removed).
for (txiter removeIt : entriesToRemove) {
UpdateChildrenForRemoval(removeIt);
}
}
void CTxMemPoolEntry::UpdateDescendantState(int64_t modifySize,
Amount modifyFee,
int64_t modifyCount) {
nSizeWithDescendants += modifySize;
assert(int64_t(nSizeWithDescendants) > 0);
nModFeesWithDescendants += modifyFee;
nCountWithDescendants += modifyCount;
assert(int64_t(nCountWithDescendants) > 0);
}
void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, Amount modifyFee,
int64_t modifyCount,
int modifySigOps) {
nSizeWithAncestors += modifySize;
assert(int64_t(nSizeWithAncestors) > 0);
nModFeesWithAncestors += modifyFee;
nCountWithAncestors += modifyCount;
assert(int64_t(nCountWithAncestors) > 0);
nSigOpCountWithAncestors += modifySigOps;
assert(int(nSigOpCountWithAncestors) >= 0);
}
CTxMemPool::CTxMemPool() : nTransactionsUpdated(0) {
// lock free clear
_clear();
// Sanity checks off by default for performance, because otherwise accepting
// transactions becomes O(N^2) where N is the number of transactions in the
// pool
nCheckFrequency = 0;
minerPolicyEstimator = new CBlockPolicyEstimator();
}
CTxMemPool::~CTxMemPool() {
delete minerPolicyEstimator;
}
bool CTxMemPool::isSpent(const COutPoint &outpoint) {
LOCK(cs);
return mapNextTx.count(outpoint);
}
unsigned int CTxMemPool::GetTransactionsUpdated() const {
LOCK(cs);
return nTransactionsUpdated;
}
void CTxMemPool::AddTransactionsUpdated(unsigned int n) {
LOCK(cs);
nTransactionsUpdated += n;
}
bool CTxMemPool::addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry,
setEntries &setAncestors, bool validFeeEstimate) {
NotifyEntryAdded(entry.GetSharedTx());
// Add to memory pool without checking anything.
// Used by AcceptToMemoryPool(), which DOES do all the appropriate checks.
LOCK(cs);
indexed_transaction_set::iterator newit = mapTx.insert(entry).first;
mapLinks.insert(make_pair(newit, TxLinks()));
// Update transaction for any feeDelta created by PrioritiseTransaction
// TODO: refactor so that the fee delta is calculated before inserting into
// mapTx.
std::map>::const_iterator pos =
mapDeltas.find(hash);
if (pos != mapDeltas.end()) {
const std::pair &deltas = pos->second;
if (deltas.second != Amount(0)) {
mapTx.modify(newit, update_fee_delta(deltas.second));
}
}
// Update cachedInnerUsage to include contained transaction's usage.
// (When we update the entry for in-mempool parents, memory usage will be
// further updated.)
cachedInnerUsage += entry.DynamicMemoryUsage();
const CTransaction &tx = newit->GetTx();
std::set setParentTransactions;
for (const CTxIn &in : tx.vin) {
mapNextTx.insert(std::make_pair(&in.prevout, &tx));
setParentTransactions.insert(in.prevout.GetTxId());
}
// Don't bother worrying about child transactions of this one. Normal case
// of a new transaction arriving is that there can't be any children,
// because such children would be orphans. An exception to that is if a
// transaction enters that used to be in a block. In that case, our
// disconnect block logic will call UpdateTransactionsFromBlock to clean up
// the mess we're leaving here.
// Update ancestors with information about this tx
for (const uint256 &phash : setParentTransactions) {
txiter pit = mapTx.find(phash);
if (pit != mapTx.end()) {
UpdateParent(newit, pit, true);
}
}
UpdateAncestorsOf(true, newit, setAncestors);
UpdateEntryForAncestors(newit, setAncestors);
nTransactionsUpdated++;
totalTxSize += entry.GetTxSize();
minerPolicyEstimator->processTransaction(entry, validFeeEstimate);
vTxHashes.emplace_back(tx.GetHash(), newit);
newit->vTxHashesIdx = vTxHashes.size() - 1;
return true;
}
void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason) {
NotifyEntryRemoved(it->GetSharedTx(), reason);
const uint256 txid = it->GetTx().GetId();
for (const CTxIn &txin : it->GetTx().vin) {
mapNextTx.erase(txin.prevout);
}
if (vTxHashes.size() > 1) {
vTxHashes[it->vTxHashesIdx] = std::move(vTxHashes.back());
vTxHashes[it->vTxHashesIdx].second->vTxHashesIdx = it->vTxHashesIdx;
vTxHashes.pop_back();
if (vTxHashes.size() * 2 < vTxHashes.capacity()) {
vTxHashes.shrink_to_fit();
}
} else {
vTxHashes.clear();
}
totalTxSize -= it->GetTxSize();
cachedInnerUsage -= it->DynamicMemoryUsage();
cachedInnerUsage -= memusage::DynamicUsage(mapLinks[it].parents) +
memusage::DynamicUsage(mapLinks[it].children);
mapLinks.erase(it);
mapTx.erase(it);
nTransactionsUpdated++;
minerPolicyEstimator->removeTx(txid);
}
// Calculates descendants of entry that are not already in setDescendants, and
// adds to setDescendants. Assumes entryit is already a tx in the mempool and
// setMemPoolChildren is correct for tx and all descendants. Also assumes that
// if an entry is in setDescendants already, then all in-mempool descendants of
// it are already in setDescendants as well, so that we can save time by not
// iterating over those entries.
void CTxMemPool::CalculateDescendants(txiter entryit,
setEntries &setDescendants) {
setEntries stage;
if (setDescendants.count(entryit) == 0) {
stage.insert(entryit);
}
// Traverse down the children of entry, only adding children that are not
// accounted for in setDescendants already (because those children have
// either already been walked, or will be walked in this iteration).
while (!stage.empty()) {
txiter it = *stage.begin();
setDescendants.insert(it);
stage.erase(it);
const setEntries &setChildren = GetMemPoolChildren(it);
for (const txiter &childiter : setChildren) {
if (!setDescendants.count(childiter)) {
stage.insert(childiter);
}
}
}
}
void CTxMemPool::removeRecursive(const CTransaction &origTx,
MemPoolRemovalReason reason) {
// Remove transaction from memory pool.
LOCK(cs);
setEntries txToRemove;
txiter origit = mapTx.find(origTx.GetId());
if (origit != mapTx.end()) {
txToRemove.insert(origit);
} else {
// When recursively removing but origTx isn't in the mempool be sure to
// remove any children that are in the pool. This can happen during
// chain re-orgs if origTx isn't re-accepted into the mempool for any
// reason.
for (size_t i = 0; i < origTx.vout.size(); i++) {
auto it = mapNextTx.find(COutPoint(origTx.GetId(), i));
if (it == mapNextTx.end()) {
continue;
}
txiter nextit = mapTx.find(it->second->GetId());
assert(nextit != mapTx.end());
txToRemove.insert(nextit);
}
}
setEntries setAllRemoves;
for (txiter it : txToRemove) {
CalculateDescendants(it, setAllRemoves);
}
RemoveStaged(setAllRemoves, false, reason);
}
void CTxMemPool::removeForReorg(const Config &config,
const CCoinsViewCache *pcoins,
unsigned int nMemPoolHeight, int flags) {
// Remove transactions spending a coinbase which are now immature and
// no-longer-final transactions.
LOCK(cs);
setEntries txToRemove;
for (indexed_transaction_set::const_iterator it = mapTx.begin();
it != mapTx.end(); it++) {
const CTransaction &tx = it->GetTx();
LockPoints lp = it->GetLockPoints();
bool validLP = TestLockPointValidity(&lp);
CValidationState state;
if (!ContextualCheckTransactionForCurrentBlock(config, tx, state,
flags) ||
!CheckSequenceLocks(tx, flags, &lp, validLP)) {
// Note if CheckSequenceLocks fails the LockPoints may still be
// invalid. So it's critical that we remove the tx and not depend on
// the LockPoints.
txToRemove.insert(it);
} else if (it->GetSpendsCoinbase()) {
for (const CTxIn &txin : tx.vin) {
indexed_transaction_set::const_iterator it2 =
mapTx.find(txin.prevout.GetTxId());
if (it2 != mapTx.end()) {
continue;
}
const Coin &coin = pcoins->AccessCoin(txin.prevout);
if (nCheckFrequency != 0) {
assert(!coin.IsSpent());
}
if (coin.IsSpent() ||
(coin.IsCoinBase() &&
int64_t(nMemPoolHeight) - coin.GetHeight() <
COINBASE_MATURITY)) {
txToRemove.insert(it);
break;
}
}
}
if (!validLP) {
mapTx.modify(it, update_lock_points(lp));
}
}
setEntries setAllRemoves;
for (txiter it : txToRemove) {
CalculateDescendants(it, setAllRemoves);
}
RemoveStaged(setAllRemoves, false, MemPoolRemovalReason::REORG);
}
void CTxMemPool::removeConflicts(const CTransaction &tx) {
// Remove transactions which depend on inputs of tx, recursively
LOCK(cs);
for (const CTxIn &txin : tx.vin) {
auto it = mapNextTx.find(txin.prevout);
if (it != mapNextTx.end()) {
const CTransaction &txConflict = *it->second;
if (txConflict != tx) {
ClearPrioritisation(txConflict.GetId());
removeRecursive(txConflict, MemPoolRemovalReason::CONFLICT);
}
}
}
}
/**
* Called when a block is connected. Removes from mempool and updates the miner
* fee estimator.
*/
void CTxMemPool::removeForBlock(const std::vector &vtx,
unsigned int nBlockHeight) {
LOCK(cs);
std::vector entries;
for (const auto &tx : vtx) {
uint256 txid = tx->GetId();
indexed_transaction_set::iterator i = mapTx.find(txid);
if (i != mapTx.end()) {
entries.push_back(&*i);
}
}
// Before the txs in the new block have been removed from the mempool,
// update policy estimates
minerPolicyEstimator->processBlock(nBlockHeight, entries);
for (const auto &tx : vtx) {
txiter it = mapTx.find(tx->GetId());
if (it != mapTx.end()) {
setEntries stage;
stage.insert(it);
RemoveStaged(stage, true, MemPoolRemovalReason::BLOCK);
}
removeConflicts(*tx);
ClearPrioritisation(tx->GetId());
}
lastRollingFeeUpdate = GetTime();
blockSinceLastRollingFeeBump = true;
}
void CTxMemPool::_clear() {
mapLinks.clear();
mapTx.clear();
mapNextTx.clear();
vTxHashes.clear();
totalTxSize = 0;
cachedInnerUsage = 0;
lastRollingFeeUpdate = GetTime();
blockSinceLastRollingFeeBump = false;
rollingMinimumFeeRate = 0;
++nTransactionsUpdated;
}
void CTxMemPool::clear() {
LOCK(cs);
_clear();
}
void CTxMemPool::check(const CCoinsViewCache *pcoins) const {
if (nCheckFrequency == 0) {
return;
}
if (GetRand(std::numeric_limits::max()) >= nCheckFrequency) {
return;
}
LogPrint(BCLog::MEMPOOL,
"Checking mempool with %u transactions and %u inputs\n",
(unsigned int)mapTx.size(), (unsigned int)mapNextTx.size());
uint64_t checkTotal = 0;
uint64_t innerUsage = 0;
CCoinsViewCache mempoolDuplicate(const_cast(pcoins));
const int64_t nSpendHeight = GetSpendHeight(mempoolDuplicate);
LOCK(cs);
std::list waitingOnDependants;
for (indexed_transaction_set::const_iterator it = mapTx.begin();
it != mapTx.end(); it++) {
unsigned int i = 0;
checkTotal += it->GetTxSize();
innerUsage += it->DynamicMemoryUsage();
const CTransaction &tx = it->GetTx();
txlinksMap::const_iterator linksiter = mapLinks.find(it);
assert(linksiter != mapLinks.end());
const TxLinks &links = linksiter->second;
innerUsage += memusage::DynamicUsage(links.parents) +
memusage::DynamicUsage(links.children);
bool fDependsWait = false;
setEntries setParentCheck;
int64_t parentSizes = 0;
int64_t parentSigOpCount = 0;
for (const CTxIn &txin : tx.vin) {
// Check that every mempool transaction's inputs refer to available
// coins, or other mempool tx's.
indexed_transaction_set::const_iterator it2 =
mapTx.find(txin.prevout.GetTxId());
if (it2 != mapTx.end()) {
const CTransaction &tx2 = it2->GetTx();
assert(tx2.vout.size() > txin.prevout.GetN() &&
!tx2.vout[txin.prevout.GetN()].IsNull());
fDependsWait = true;
if (setParentCheck.insert(it2).second) {
parentSizes += it2->GetTxSize();
parentSigOpCount += it2->GetSigOpCount();
}
} else {
assert(pcoins->HaveCoin(txin.prevout));
}
// Check whether its inputs are marked in mapNextTx.
auto it3 = mapNextTx.find(txin.prevout);
assert(it3 != mapNextTx.end());
assert(it3->first == &txin.prevout);
assert(it3->second == &tx);
i++;
}
assert(setParentCheck == GetMemPoolParents(it));
// Verify ancestor state is correct.
setEntries setAncestors;
uint64_t nNoLimit = std::numeric_limits::max();
std::string dummy;
CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit,
nNoLimit, nNoLimit, dummy);
uint64_t nCountCheck = setAncestors.size() + 1;
uint64_t nSizeCheck = it->GetTxSize();
Amount nFeesCheck = it->GetModifiedFee();
int64_t nSigOpCheck = it->GetSigOpCount();
for (txiter ancestorIt : setAncestors) {
nSizeCheck += ancestorIt->GetTxSize();
nFeesCheck += ancestorIt->GetModifiedFee();
nSigOpCheck += ancestorIt->GetSigOpCount();
}
assert(it->GetCountWithAncestors() == nCountCheck);
assert(it->GetSizeWithAncestors() == nSizeCheck);
assert(it->GetSigOpCountWithAncestors() == nSigOpCheck);
assert(it->GetModFeesWithAncestors() == nFeesCheck);
// Check children against mapNextTx
CTxMemPool::setEntries setChildrenCheck;
auto iter = mapNextTx.lower_bound(COutPoint(it->GetTx().GetId(), 0));
int64_t childSizes = 0;
for (; iter != mapNextTx.end() &&
iter->first->GetTxId() == it->GetTx().GetId();
++iter) {
txiter childit = mapTx.find(iter->second->GetId());
// mapNextTx points to in-mempool transactions
assert(childit != mapTx.end());
if (setChildrenCheck.insert(childit).second) {
childSizes += childit->GetTxSize();
}
}
assert(setChildrenCheck == GetMemPoolChildren(it));
// Also check to make sure size is greater than sum with immediate
// children. Just a sanity check, not definitive that this calc is
// correct...
assert(it->GetSizeWithDescendants() >= childSizes + it->GetTxSize());
if (fDependsWait) {
waitingOnDependants.push_back(&(*it));
} else {
CValidationState state;
bool fCheckResult = tx.IsCoinBase() ||
Consensus::CheckTxInputs(
tx, state, mempoolDuplicate, nSpendHeight);
assert(fCheckResult);
UpdateCoins(tx, mempoolDuplicate, 1000000);
}
}
unsigned int stepsSinceLastRemove = 0;
while (!waitingOnDependants.empty()) {
const CTxMemPoolEntry *entry = waitingOnDependants.front();
waitingOnDependants.pop_front();
CValidationState state;
if (!mempoolDuplicate.HaveInputs(entry->GetTx())) {
waitingOnDependants.push_back(entry);
stepsSinceLastRemove++;
assert(stepsSinceLastRemove < waitingOnDependants.size());
} else {
bool fCheckResult =
entry->GetTx().IsCoinBase() ||
Consensus::CheckTxInputs(entry->GetTx(), state,
mempoolDuplicate, nSpendHeight);
assert(fCheckResult);
UpdateCoins(entry->GetTx(), mempoolDuplicate, 1000000);
stepsSinceLastRemove = 0;
}
}
for (auto it = mapNextTx.cbegin(); it != mapNextTx.cend(); it++) {
uint256 txid = it->second->GetId();
indexed_transaction_set::const_iterator it2 = mapTx.find(txid);
const CTransaction &tx = it2->GetTx();
assert(it2 != mapTx.end());
assert(&tx == it->second);
}
assert(totalTxSize == checkTotal);
assert(innerUsage == cachedInnerUsage);
}
bool CTxMemPool::CompareDepthAndScore(const uint256 &hasha,
const uint256 &hashb) {
LOCK(cs);
indexed_transaction_set::const_iterator i = mapTx.find(hasha);
if (i == mapTx.end()) {
return false;
}
indexed_transaction_set::const_iterator j = mapTx.find(hashb);
if (j == mapTx.end()) {
return true;
}
uint64_t counta = i->GetCountWithAncestors();
uint64_t countb = j->GetCountWithAncestors();
if (counta == countb) {
return CompareTxMemPoolEntryByScore()(*i, *j);
}
return counta < countb;
}
namespace {
class DepthAndScoreComparator {
public:
bool
operator()(const CTxMemPool::indexed_transaction_set::const_iterator &a,
const CTxMemPool::indexed_transaction_set::const_iterator &b) {
uint64_t counta = a->GetCountWithAncestors();
uint64_t countb = b->GetCountWithAncestors();
if (counta == countb) {
return CompareTxMemPoolEntryByScore()(*a, *b);
}
return counta < countb;
}
};
} // namespace
std::vector
CTxMemPool::GetSortedDepthAndScore() const {
std::vector iters;
AssertLockHeld(cs);
iters.reserve(mapTx.size());
for (indexed_transaction_set::iterator mi = mapTx.begin();
mi != mapTx.end(); ++mi) {
iters.push_back(mi);
}
std::sort(iters.begin(), iters.end(), DepthAndScoreComparator());
return iters;
}
void CTxMemPool::queryHashes(std::vector &vtxid) {
LOCK(cs);
auto iters = GetSortedDepthAndScore();
vtxid.clear();
vtxid.reserve(mapTx.size());
for (auto it : iters) {
vtxid.push_back(it->GetTx().GetId());
}
}
static TxMempoolInfo
GetInfo(CTxMemPool::indexed_transaction_set::const_iterator it) {
return TxMempoolInfo{it->GetSharedTx(), it->GetTime(),
CFeeRate(it->GetFee(), it->GetTxSize()),
it->GetModifiedFee() - it->GetFee()};
}
std::vector CTxMemPool::infoAll() const {
LOCK(cs);
auto iters = GetSortedDepthAndScore();
std::vector ret;
ret.reserve(mapTx.size());
for (auto it : iters) {
ret.push_back(GetInfo(it));
}
return ret;
}
CTransactionRef CTxMemPool::get(const uint256 &txid) const {
LOCK(cs);
indexed_transaction_set::const_iterator i = mapTx.find(txid);
if (i == mapTx.end()) {
return nullptr;
}
return i->GetSharedTx();
}
TxMempoolInfo CTxMemPool::info(const uint256 &txid) const {
LOCK(cs);
indexed_transaction_set::const_iterator i = mapTx.find(txid);
if (i == mapTx.end()) {
return TxMempoolInfo();
}
return GetInfo(i);
}
CFeeRate CTxMemPool::estimateFee(int nBlocks) const {
LOCK(cs);
return minerPolicyEstimator->estimateFee(nBlocks);
}
CFeeRate CTxMemPool::estimateSmartFee(int nBlocks,
int *answerFoundAtBlocks) const {
LOCK(cs);
return minerPolicyEstimator->estimateSmartFee(nBlocks, answerFoundAtBlocks,
*this);
}
bool CTxMemPool::WriteFeeEstimates(CAutoFile &fileout) const {
try {
LOCK(cs);
// version required to read: 0.13.99 or later
fileout << 139900;
// version that wrote the file
fileout << CLIENT_VERSION;
minerPolicyEstimator->Write(fileout);
} catch (const std::exception &) {
LogPrintf("CTxMemPool::WriteFeeEstimates(): unable to write policy "
"estimator data (non-fatal)\n");
return false;
}
return true;
}
bool CTxMemPool::ReadFeeEstimates(CAutoFile &filein) {
try {
int nVersionRequired, nVersionThatWrote;
filein >> nVersionRequired >> nVersionThatWrote;
if (nVersionRequired > CLIENT_VERSION) {
return error("CTxMemPool::ReadFeeEstimates(): up-version (%d) fee "
"estimate file",
nVersionRequired);
}
LOCK(cs);
minerPolicyEstimator->Read(filein, nVersionThatWrote);
} catch (const std::exception &) {
LogPrintf("CTxMemPool::ReadFeeEstimates(): unable to read policy "
"estimator data (non-fatal)\n");
return false;
}
return true;
}
void CTxMemPool::PrioritiseTransaction(const uint256 hash,
const std::string strHash,
double dPriorityDelta,
const Amount nFeeDelta) {
{
LOCK(cs);
std::pair &deltas = mapDeltas[hash];
deltas.first += dPriorityDelta;
deltas.second += nFeeDelta;
txiter it = mapTx.find(hash);
if (it != mapTx.end()) {
mapTx.modify(it, update_fee_delta(deltas.second));
// Now update all ancestors' modified fees with descendants
setEntries setAncestors;
uint64_t nNoLimit = std::numeric_limits::max();
std::string dummy;
CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit,
nNoLimit, nNoLimit, dummy, false);
for (txiter ancestorIt : setAncestors) {
mapTx.modify(ancestorIt,
update_descendant_state(0, nFeeDelta, 0));
}
// Now update all descendants' modified fees with ancestors
setEntries setDescendants;
CalculateDescendants(it, setDescendants);
setDescendants.erase(it);
for (txiter descendantIt : setDescendants) {
mapTx.modify(descendantIt,
update_ancestor_state(0, nFeeDelta, 0, 0));
}
}
}
LogPrintf("PrioritiseTransaction: %s priority += %f, fee += %d\n", strHash,
dPriorityDelta, FormatMoney(nFeeDelta));
}
void CTxMemPool::ApplyDeltas(const uint256 hash, double &dPriorityDelta,
Amount &nFeeDelta) const {
LOCK(cs);
std::map>::const_iterator pos =
mapDeltas.find(hash);
if (pos == mapDeltas.end()) {
return;
}
const std::pair &deltas = pos->second;
dPriorityDelta += deltas.first;
nFeeDelta += deltas.second;
}
void CTxMemPool::ClearPrioritisation(const uint256 hash) {
LOCK(cs);
mapDeltas.erase(hash);
}
bool CTxMemPool::HasNoInputsOf(const CTransaction &tx) const {
for (const CTxIn &in : tx.vin) {
if (exists(in.prevout.GetTxId())) {
return false;
}
}
return true;
}
CCoinsViewMemPool::CCoinsViewMemPool(CCoinsView *baseIn,
const CTxMemPool &mempoolIn)
: CCoinsViewBacked(baseIn), mempool(mempoolIn) {}
bool CCoinsViewMemPool::GetCoin(const COutPoint &outpoint, Coin &coin) const {
// If an entry in the mempool exists, always return that one, as it's
// guaranteed to never conflict with the underlying cache, and it cannot
// have pruned entries (as it contains full) transactions. First checking
// the underlying cache risks returning a pruned entry instead.
CTransactionRef ptx = mempool.get(outpoint.GetTxId());
if (ptx) {
if (outpoint.GetN() < ptx->vout.size()) {
coin = Coin(ptx->vout[outpoint.GetN()], MEMPOOL_HEIGHT, false);
return true;
}
return false;
}
return base->GetCoin(outpoint, coin) && !coin.IsSpent();
}
bool CCoinsViewMemPool::HaveCoin(const COutPoint &outpoint) const {
return mempool.exists(outpoint) || base->HaveCoin(outpoint);
}
size_t CTxMemPool::DynamicMemoryUsage() const {
LOCK(cs);
// Estimate the overhead of mapTx to be 15 pointers + an allocation, as no
// exact formula for boost::multi_index_contained is implemented.
return memusage::MallocUsage(sizeof(CTxMemPoolEntry) +
15 * sizeof(void *)) *
mapTx.size() +
memusage::DynamicUsage(mapNextTx) +
memusage::DynamicUsage(mapDeltas) +
memusage::DynamicUsage(mapLinks) +
memusage::DynamicUsage(vTxHashes) + cachedInnerUsage;
}
void CTxMemPool::RemoveStaged(setEntries &stage, bool updateDescendants,
MemPoolRemovalReason reason) {
AssertLockHeld(cs);
UpdateForRemoveFromMempool(stage, updateDescendants);
for (const txiter &it : stage) {
removeUnchecked(it, reason);
}
}
int CTxMemPool::Expire(int64_t time) {
LOCK(cs);
indexed_transaction_set::index::type::iterator it =
mapTx.get().begin();
setEntries toremove;
while (it != mapTx.get().end() && it->GetTime() < time) {
toremove.insert(mapTx.project<0>(it));
it++;
}
setEntries stage;
for (txiter removeit : toremove) {
CalculateDescendants(removeit, stage);
}
RemoveStaged(stage, false, MemPoolRemovalReason::EXPIRY);
return stage.size();
}
bool CTxMemPool::addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry,
bool validFeeEstimate) {
LOCK(cs);
setEntries setAncestors;
uint64_t nNoLimit = std::numeric_limits::max();
std::string dummy;
CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit,
nNoLimit, dummy);
return addUnchecked(hash, entry, setAncestors, validFeeEstimate);
}
void CTxMemPool::UpdateChild(txiter entry, txiter child, bool add) {
setEntries s;
if (add && mapLinks[entry].children.insert(child).second) {
cachedInnerUsage += memusage::IncrementalDynamicUsage(s);
} else if (!add && mapLinks[entry].children.erase(child)) {
cachedInnerUsage -= memusage::IncrementalDynamicUsage(s);
}
}
void CTxMemPool::UpdateParent(txiter entry, txiter parent, bool add) {
setEntries s;
if (add && mapLinks[entry].parents.insert(parent).second) {
cachedInnerUsage += memusage::IncrementalDynamicUsage(s);
} else if (!add && mapLinks[entry].parents.erase(parent)) {
cachedInnerUsage -= memusage::IncrementalDynamicUsage(s);
}
}
const CTxMemPool::setEntries &
CTxMemPool::GetMemPoolParents(txiter entry) const {
assert(entry != mapTx.end());
txlinksMap::const_iterator it = mapLinks.find(entry);
assert(it != mapLinks.end());
return it->second.parents;
}
const CTxMemPool::setEntries &
CTxMemPool::GetMemPoolChildren(txiter entry) const {
assert(entry != mapTx.end());
txlinksMap::const_iterator it = mapLinks.find(entry);
assert(it != mapLinks.end());
return it->second.children;
}
CFeeRate CTxMemPool::GetMinFee(size_t sizelimit) const {
LOCK(cs);
if (!blockSinceLastRollingFeeBump || rollingMinimumFeeRate == 0) {
return CFeeRate(Amount(int64_t(rollingMinimumFeeRate)));
}
int64_t time = GetTime();
if (time > lastRollingFeeUpdate + 10) {
double halflife = ROLLING_FEE_HALFLIFE;
if (DynamicMemoryUsage() < sizelimit / 4) {
halflife /= 4;
} else if (DynamicMemoryUsage() < sizelimit / 2) {
halflife /= 2;
}
rollingMinimumFeeRate =
rollingMinimumFeeRate /
pow(2.0, (time - lastRollingFeeUpdate) / halflife);
lastRollingFeeUpdate = time;
-
- if (rollingMinimumFeeRate <
- (double)incrementalRelayFee.GetFeePerK().GetSatoshis() / 2) {
- rollingMinimumFeeRate = 0;
- return CFeeRate(Amount(0));
- }
}
- return std::max(CFeeRate(Amount(int64_t(rollingMinimumFeeRate))),
- incrementalRelayFee);
+ return CFeeRate(Amount(int64_t(rollingMinimumFeeRate)));
}
void CTxMemPool::trackPackageRemoved(const CFeeRate &rate) {
AssertLockHeld(cs);
if (rate.GetFeePerK().GetSatoshis() > rollingMinimumFeeRate) {
rollingMinimumFeeRate = rate.GetFeePerK().GetSatoshis();
blockSinceLastRollingFeeBump = false;
}
}
void CTxMemPool::TrimToSize(size_t sizelimit,
std::vector *pvNoSpendsRemaining) {
LOCK(cs);
unsigned nTxnRemoved = 0;
CFeeRate maxFeeRateRemoved(Amount(0));
while (!mapTx.empty() && DynamicMemoryUsage() > sizelimit) {
indexed_transaction_set::index::type::iterator it =
mapTx.get().begin();
// We set the new mempool min fee to the feerate of the removed set,
// plus the "minimum reasonable fee rate" (ie some value under which we
// consider txn to have 0 fee). This way, we don't allow txn to enter
// mempool with feerate equal to txn which were removed with no block in
// between.
CFeeRate removed(it->GetModFeesWithDescendants(),
it->GetSizeWithDescendants());
- removed += incrementalRelayFee;
+ removed += MEMPOOL_FULL_FEE_INCREMENT;
+
trackPackageRemoved(removed);
maxFeeRateRemoved = std::max(maxFeeRateRemoved, removed);
setEntries stage;
CalculateDescendants(mapTx.project<0>(it), stage);
nTxnRemoved += stage.size();
std::vector txn;
if (pvNoSpendsRemaining) {
txn.reserve(stage.size());
for (txiter iter : stage) {
txn.push_back(iter->GetTx());
}
}
RemoveStaged(stage, false, MemPoolRemovalReason::SIZELIMIT);
if (pvNoSpendsRemaining) {
for (const CTransaction &tx : txn) {
for (const CTxIn &txin : tx.vin) {
if (exists(txin.prevout.GetTxId())) {
continue;
}
if (!mapNextTx.count(txin.prevout)) {
pvNoSpendsRemaining->push_back(txin.prevout);
}
}
}
}
}
if (maxFeeRateRemoved > CFeeRate(Amount(0))) {
LogPrint(BCLog::MEMPOOL,
"Removed %u txn, rolling minimum fee bumped to %s\n",
nTxnRemoved, maxFeeRateRemoved.ToString());
}
}
bool CTxMemPool::TransactionWithinChainLimit(const uint256 &txid,
size_t chainLimit) const {
LOCK(cs);
auto it = mapTx.find(txid);
return it == mapTx.end() || (it->GetCountWithAncestors() < chainLimit &&
it->GetCountWithDescendants() < chainLimit);
}
SaltedTxidHasher::SaltedTxidHasher()
: k0(GetRand(std::numeric_limits::max())),
k1(GetRand(std::numeric_limits::max())) {}