Page MenuHomePhabricator

No OneTemporary

diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp
index 7a3d94bf42..0ef3e8b144 100644
--- a/src/policy/fees.cpp
+++ b/src/policy/fees.cpp
@@ -1,558 +1,558 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Copyright (c) 2018 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "policy/fees.h"
#include "policy/policy.h"
#include "amount.h"
#include "primitives/transaction.h"
#include "random.h"
#include "streams.h"
#include "txmempool.h"
#include "util.h"
void TxConfirmStats::Initialize(std::vector<double> &defaultBuckets,
unsigned int maxConfirms, double _decay) {
decay = _decay;
for (size_t i = 0; i < defaultBuckets.size(); i++) {
buckets.push_back(defaultBuckets[i]);
bucketMap[defaultBuckets[i]] = i;
}
confAvg.resize(maxConfirms);
curBlockConf.resize(maxConfirms);
unconfTxs.resize(maxConfirms);
for (unsigned int i = 0; i < maxConfirms; i++) {
confAvg[i].resize(buckets.size());
curBlockConf[i].resize(buckets.size());
unconfTxs[i].resize(buckets.size());
}
oldUnconfTxs.resize(buckets.size());
curBlockTxCt.resize(buckets.size());
txCtAvg.resize(buckets.size());
curBlockVal.resize(buckets.size());
avg.resize(buckets.size());
}
// Zero out the data for the current block
void TxConfirmStats::ClearCurrent(unsigned int nBlockHeight) {
for (size_t j = 0; j < buckets.size(); j++) {
oldUnconfTxs[j] += unconfTxs[nBlockHeight % unconfTxs.size()][j];
unconfTxs[nBlockHeight % unconfTxs.size()][j] = 0;
for (size_t i = 0; i < curBlockConf.size(); i++) {
curBlockConf[i][j] = 0;
}
curBlockTxCt[j] = 0;
curBlockVal[j] = 0;
}
}
void TxConfirmStats::Record(int blocksToConfirm, double val) {
// blocksToConfirm is 1-based
if (blocksToConfirm < 1) {
return;
}
unsigned int bucketindex = bucketMap.lower_bound(val)->second;
for (size_t i = blocksToConfirm; i <= curBlockConf.size(); i++) {
curBlockConf[i - 1][bucketindex]++;
}
curBlockTxCt[bucketindex]++;
curBlockVal[bucketindex] += val;
}
void TxConfirmStats::UpdateMovingAverages() {
for (unsigned int j = 0; j < buckets.size(); j++) {
for (unsigned int i = 0; i < confAvg.size(); i++) {
confAvg[i][j] = confAvg[i][j] * decay + curBlockConf[i][j];
}
avg[j] = avg[j] * decay + curBlockVal[j];
txCtAvg[j] = txCtAvg[j] * decay + curBlockTxCt[j];
}
}
// returns -1 on error conditions
-double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
- double successBreakPoint,
- bool requireGreater,
- unsigned int nBlockHeight) {
+CFeeRate TxConfirmStats::EstimateMedianFeeRate(int confTarget,
+ double sufficientTxVal,
+ double successBreakPoint,
+ bool requireGreater,
+ unsigned int nBlockHeight) {
// Counters for a bucket (or range of buckets)
// Number of tx's confirmed within the confTarget
double nConf = 0;
// Total number of tx's that were ever confirmed
double totalNum = 0;
// Number of tx's still in mempool for confTarget or longer
int extraNum = 0;
int maxbucketindex = buckets.size() - 1;
// requireGreater means we are looking for the lowest feerate such that all
// higher values pass, so we start at maxbucketindex (highest feerate) and
// look at successively smaller buckets until we reach failure. Otherwise,
// we are looking for the highest feerate such that all lower values fail,
// and we go in the opposite direction.
unsigned int startbucket = requireGreater ? maxbucketindex : 0;
int step = requireGreater ? -1 : 1;
// We'll combine buckets until we have enough samples.
// The near and far variables will define the range we've combined
// The best variables are the last range we saw which still had a high
// enough confirmation rate to count as success.
// The cur variables are the current range we're counting.
unsigned int curNearBucket = startbucket;
unsigned int bestNearBucket = startbucket;
unsigned int curFarBucket = startbucket;
unsigned int bestFarBucket = startbucket;
bool foundAnswer = false;
unsigned int bins = unconfTxs.size();
// Start counting from highest(default) or lowest feerate transactions
for (int bucket = startbucket; bucket >= 0 && bucket <= maxbucketindex;
bucket += step) {
curFarBucket = bucket;
nConf += confAvg[confTarget - 1][bucket];
totalNum += txCtAvg[bucket];
for (unsigned int confct = confTarget; confct < GetMaxConfirms();
confct++) {
extraNum += unconfTxs[(nBlockHeight - confct) % bins][bucket];
}
extraNum += oldUnconfTxs[bucket];
// If we have enough transaction data points in this range of buckets,
// we can test for success (Only count the confirmed data points, so
// that each confirmation count will be looking at the same amount of
// data and same bucket breaks)
if (totalNum >= sufficientTxVal / (1 - decay)) {
double curPct = nConf / (totalNum + extraNum);
// Check to see if we are no longer getting confirmed at the success
// rate
if (requireGreater && curPct < successBreakPoint) {
break;
}
if (!requireGreater && curPct > successBreakPoint) {
break;
}
// Otherwise update the cumulative stats, and the bucket variables
// and reset the counters
foundAnswer = true;
nConf = 0;
totalNum = 0;
extraNum = 0;
bestNearBucket = curNearBucket;
bestFarBucket = curFarBucket;
curNearBucket = bucket + step;
}
}
double median = -1;
double txSum = 0;
// Calculate the "average" feerate of the best bucket range that met success
// conditions. Find the bucket with the median transaction and then report
// the average feerate from that bucket. This is a compromise between
// finding the median which we can't since we don't save all tx's and
// reporting the average which is less accurate
unsigned int minBucket =
bestNearBucket < bestFarBucket ? bestNearBucket : bestFarBucket;
unsigned int maxBucket =
bestNearBucket > bestFarBucket ? bestNearBucket : bestFarBucket;
for (unsigned int j = minBucket; j <= maxBucket; j++) {
txSum += txCtAvg[j];
}
if (foundAnswer && txSum != 0) {
txSum = txSum / 2;
for (unsigned int j = minBucket; j <= maxBucket; j++) {
if (txCtAvg[j] < txSum) {
txSum -= txCtAvg[j];
} else {
// we're in the right bucket
median = avg[j] / txCtAvg[j];
break;
}
}
}
LogPrint(BCLog::ESTIMATEFEE, "%3d: For conf success %s %4.2f need feerate "
"%s: %12.5g from buckets %8g - %8g Cur "
"Bucket stats %6.2f%% %8.1f/(%.1f+%d "
"mempool)\n",
confTarget, requireGreater ? ">" : "<", successBreakPoint,
requireGreater ? ">" : "<", median, buckets[minBucket],
buckets[maxBucket], 100 * nConf / (totalNum + extraNum), nConf,
totalNum, extraNum);
- return median;
+ return CFeeRate(int64_t(ceill(median)) * SATOSHI);
}
void TxConfirmStats::Write(CAutoFile &fileout) {
fileout << decay;
fileout << buckets;
fileout << avg;
fileout << txCtAvg;
fileout << confAvg;
}
void TxConfirmStats::Read(CAutoFile &filein) {
// Read data file into temporary variables and do some very basic sanity
// checking
std::vector<double> fileBuckets;
std::vector<double> fileAvg;
std::vector<std::vector<double>> fileConfAvg;
std::vector<double> fileTxCtAvg;
double fileDecay;
size_t maxConfirms;
size_t numBuckets;
filein >> fileDecay;
if (fileDecay <= 0 || fileDecay >= 1) {
throw std::runtime_error("Corrupt estimates file. Decay must be "
"between 0 and 1 (non-inclusive)");
}
filein >> fileBuckets;
numBuckets = fileBuckets.size();
if (numBuckets <= 1 || numBuckets > 1000) {
throw std::runtime_error("Corrupt estimates file. Must have between 2 "
"and 1000 feerate buckets");
}
filein >> fileAvg;
if (fileAvg.size() != numBuckets) {
throw std::runtime_error(
"Corrupt estimates file. Mismatch in feerate average bucket count");
}
filein >> fileTxCtAvg;
if (fileTxCtAvg.size() != numBuckets) {
throw std::runtime_error(
"Corrupt estimates file. Mismatch in tx count bucket count");
}
filein >> fileConfAvg;
maxConfirms = fileConfAvg.size();
if (maxConfirms <= 0 || maxConfirms > 6 * 24 * 7) {
// one week
throw std::runtime_error("Corrupt estimates file. Must maintain "
"estimates for between 1 and 1008 (one week) "
"confirms");
}
for (unsigned int i = 0; i < maxConfirms; i++) {
if (fileConfAvg[i].size() != numBuckets) {
throw std::runtime_error("Corrupt estimates file. Mismatch in "
"feerate conf average bucket count");
}
}
// Now that we've processed the entire feerate estimate data file and not
// thrown any errors, we can copy it to our data structures
decay = fileDecay;
buckets = fileBuckets;
avg = fileAvg;
confAvg = fileConfAvg;
txCtAvg = fileTxCtAvg;
bucketMap.clear();
// Resize the current block variables which aren't stored in the data file
// to match the number of confirms and buckets
curBlockConf.resize(maxConfirms);
for (unsigned int i = 0; i < maxConfirms; i++) {
curBlockConf[i].resize(buckets.size());
}
curBlockTxCt.resize(buckets.size());
curBlockVal.resize(buckets.size());
unconfTxs.resize(maxConfirms);
for (unsigned int i = 0; i < maxConfirms; i++) {
unconfTxs[i].resize(buckets.size());
}
oldUnconfTxs.resize(buckets.size());
for (size_t i = 0; i < buckets.size(); i++) {
bucketMap[buckets[i]] = i;
}
LogPrint(
BCLog::ESTIMATEFEE,
"Reading estimates: %u buckets counting confirms up to %u blocks\n",
numBuckets, maxConfirms);
}
unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val) {
unsigned int bucketindex = bucketMap.lower_bound(val)->second;
unsigned int blockIndex = nBlockHeight % unconfTxs.size();
unconfTxs[blockIndex][bucketindex]++;
return bucketindex;
}
void TxConfirmStats::removeTx(unsigned int entryHeight,
unsigned int nBestSeenHeight,
unsigned int bucketindex) {
// nBestSeenHeight is not updated yet for the new block
int blocksAgo = nBestSeenHeight - entryHeight;
if (nBestSeenHeight == 0) {
// the BlockPolicyEstimator hasn't seen any blocks yet
blocksAgo = 0;
}
if (blocksAgo < 0) {
// This can't happen because we call this with our best seen height, no
// entries can have higher
LogPrint(BCLog::ESTIMATEFEE,
"Blockpolicy error, blocks ago is negative for mempool tx\n");
return;
}
if (blocksAgo >= (int)unconfTxs.size()) {
if (oldUnconfTxs[bucketindex] > 0) {
oldUnconfTxs[bucketindex]--;
} else {
LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error, mempool tx "
"removed from >25 "
"blocks,bucketIndex=%u already\n",
bucketindex);
}
} else {
unsigned int blockIndex = entryHeight % unconfTxs.size();
if (unconfTxs[blockIndex][bucketindex] > 0) {
unconfTxs[blockIndex][bucketindex]--;
} else {
LogPrint(BCLog::ESTIMATEFEE,
"Blockpolicy error, mempool tx removed from "
"blockIndex=%u,bucketIndex=%u already\n",
blockIndex, bucketindex);
}
}
}
// This function is called from CTxMemPool::removeUnchecked to ensure txs
// removed from the mempool for any reason are no longer tracked. Txs that were
// part of a block have already been removed in processBlockTx to ensure they
// are never double tracked, but it is of no harm to try to remove them again.
bool CBlockPolicyEstimator::removeTx(uint256 hash) {
std::map<uint256, TxStatsInfo>::iterator pos = mapMemPoolTxs.find(hash);
if (pos == mapMemPoolTxs.end()) {
return false;
}
feeStats.removeTx(pos->second.blockHeight, nBestSeenHeight,
pos->second.bucketIndex);
mapMemPoolTxs.erase(hash);
return true;
}
CBlockPolicyEstimator::CBlockPolicyEstimator()
: nBestSeenHeight(0), trackedTxs(0), untrackedTxs(0) {
static_assert(MIN_FEERATE > Amount::zero(), "Min feerate must be nonzero");
CFeeRate minFeeRate(MIN_FEERATE);
std::vector<double> vfeelist;
for (double bucketBoundary = minFeeRate.GetFeePerK() / SATOSHI;
bucketBoundary <= double(MAX_FEERATE / SATOSHI);
bucketBoundary *= FEE_SPACING) {
vfeelist.push_back(bucketBoundary);
}
vfeelist.push_back(double(INF_FEERATE / SATOSHI));
feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY);
}
void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry &entry,
bool validFeeEstimate) {
uint32_t txHeight = entry.GetHeight();
uint256 txid = entry.GetTx().GetId();
if (mapMemPoolTxs.count(txid)) {
LogPrint(BCLog::ESTIMATEFEE,
"Blockpolicy error mempool tx %s already being tracked\n",
txid.ToString().c_str());
return;
}
if (txHeight != nBestSeenHeight) {
// Ignore side chains and re-orgs; assuming they are random they don't
// affect the estimate. We'll potentially double count transactions in
// 1-block reorgs. Ignore txs if BlockPolicyEstimator is not in sync
// with chainActive.Tip(). It will be synced next time a block is
// processed.
return;
}
// Only want to be updating estimates when our blockchain is synced,
// otherwise we'll miscalculate how many blocks its taking to get included.
if (!validFeeEstimate) {
untrackedTxs++;
return;
}
trackedTxs++;
// Feerates are stored and reported as BCH-per-kb:
CFeeRate feeRate(entry.GetFee(), entry.GetTxSize());
mapMemPoolTxs[txid].blockHeight = txHeight;
mapMemPoolTxs[txid].bucketIndex =
feeStats.NewTx(txHeight, double(feeRate.GetFeePerK() / SATOSHI));
}
bool CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight,
const CTxMemPoolEntry *entry) {
if (!removeTx(entry->GetTx().GetId())) {
// This transaction wasn't being tracked for fee estimation
return false;
}
// How many blocks did it take for miners to include this transaction?
// blocksToConfirm is 1-based, so a transaction included in the earliest
// possible block has confirmation count of 1
int blocksToConfirm = nBlockHeight - entry->GetHeight();
if (blocksToConfirm <= 0) {
// This can't happen because we don't process transactions from a block
// with a height lower than our greatest seen height
LogPrint(
BCLog::ESTIMATEFEE,
"Blockpolicy error Transaction had negative blocksToConfirm\n");
return false;
}
// Feerates are stored and reported as BCH-per-kb:
CFeeRate feeRate(entry->GetFee(), entry->GetTxSize());
feeStats.Record(blocksToConfirm, double(feeRate.GetFeePerK() / SATOSHI));
return true;
}
void CBlockPolicyEstimator::processBlock(
unsigned int nBlockHeight, std::vector<const CTxMemPoolEntry *> &entries) {
if (nBlockHeight <= nBestSeenHeight) {
// Ignore side chains and re-orgs; assuming they are random they don't
// affect the estimate. And if an attacker can re-org the chain at will,
// then you've got much bigger problems than "attacker can influence
// transaction fees."
return;
}
// Must update nBestSeenHeight in sync with ClearCurrent so that calls to
// removeTx (via processBlockTx) correctly calculate age of unconfirmed txs
// to remove from tracking.
nBestSeenHeight = nBlockHeight;
// Clear the current block state and update unconfirmed circular buffer
feeStats.ClearCurrent(nBlockHeight);
unsigned int countedTxs = 0;
// Repopulate the current block states
for (size_t i = 0; i < entries.size(); i++) {
if (processBlockTx(nBlockHeight, entries[i])) {
countedTxs++;
}
}
// Update all exponential averages with the current block state
feeStats.UpdateMovingAverages();
LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy after updating estimates for %u "
"of %u txs in block, since last block %u of "
"%u tracked, new mempool map size %u\n",
countedTxs, entries.size(), trackedTxs, trackedTxs + untrackedTxs,
mapMemPoolTxs.size());
trackedTxs = 0;
untrackedTxs = 0;
}
CFeeRate CBlockPolicyEstimator::estimateFee(int confTarget) {
// Return failure if trying to analyze a target we're not tracking
// It's not possible to get reasonable estimates for confTarget of 1
if (confTarget <= 1 ||
(unsigned int)confTarget > feeStats.GetMaxConfirms()) {
return CFeeRate(Amount::zero());
}
- double median = feeStats.EstimateMedianVal(
+ CFeeRate median = feeStats.EstimateMedianFeeRate(
confTarget, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
- if (median < 0) {
+ if (median < CFeeRate(Amount::zero())) {
return CFeeRate(Amount::zero());
}
- return CFeeRate(int64_t(median) * SATOSHI);
+ return median;
}
CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget,
int *answerFoundAtTarget,
const CTxMemPool &pool) {
if (answerFoundAtTarget) {
*answerFoundAtTarget = confTarget;
}
// Return failure if trying to analyze a target we're not tracking
if (confTarget <= 0 ||
(unsigned int)confTarget > feeStats.GetMaxConfirms()) {
return CFeeRate(Amount::zero());
}
// It's not possible to get reasonable estimates for confTarget of 1
if (confTarget == 1) {
confTarget = 2;
}
- double median = -1;
- while (median < 0 &&
- (unsigned int)confTarget <= feeStats.GetMaxConfirms()) {
- median =
- feeStats.EstimateMedianVal(confTarget++, SUFFICIENT_FEETXS,
- MIN_SUCCESS_PCT, true, nBestSeenHeight);
+ CFeeRate median = CFeeRate(-1 * SATOSHI);
+ while (median < CFeeRate(Amount::zero()) &&
+ uint32_t(confTarget) <= feeStats.GetMaxConfirms()) {
+ median = feeStats.EstimateMedianFeeRate(confTarget++, SUFFICIENT_FEETXS,
+ MIN_SUCCESS_PCT, true,
+ nBestSeenHeight);
}
if (answerFoundAtTarget) {
*answerFoundAtTarget = confTarget - 1;
}
// If mempool is limiting txs , return at least the min feerate from the
// mempool
Amount minPoolFee =
pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
1000000)
.GetFeePerK();
- if (minPoolFee > Amount::zero() &&
- minPoolFee > (int64_t(median) * SATOSHI)) {
+ if (minPoolFee > Amount::zero() && minPoolFee > median.GetFeePerK()) {
return CFeeRate(minPoolFee);
}
- if (median < 0) {
+ if (median < CFeeRate(Amount::zero())) {
return CFeeRate(Amount::zero());
}
- return CFeeRate(int64_t(median) * SATOSHI);
+ return median;
}
void CBlockPolicyEstimator::Write(CAutoFile &fileout) {
fileout << nBestSeenHeight;
feeStats.Write(fileout);
}
void CBlockPolicyEstimator::Read(CAutoFile &filein, int nFileVersion) {
int nFileBestSeenHeight;
filein >> nFileBestSeenHeight;
feeStats.Read(filein);
nBestSeenHeight = nFileBestSeenHeight;
if (nFileVersion < 139900) {
TxConfirmStats priStats;
priStats.Read(filein);
}
}
FeeFilterRounder::FeeFilterRounder(const CFeeRate &minIncrementalFee) {
Amount minFeeLimit = std::max(SATOSHI, minIncrementalFee.GetFeePerK() / 2);
feeset.insert(Amount::zero());
for (double bucketBoundary = minFeeLimit / SATOSHI;
bucketBoundary <= double(MAX_FEERATE / SATOSHI);
bucketBoundary *= FEE_SPACING) {
feeset.insert(int64_t(bucketBoundary) * SATOSHI);
}
}
Amount FeeFilterRounder::round(const Amount currentMinFee) {
auto it = feeset.lower_bound(currentMinFee);
if ((it != feeset.begin() && insecure_rand.rand32() % 3 != 0) ||
it == feeset.end()) {
it--;
}
return *it;
}
diff --git a/src/policy/fees.h b/src/policy/fees.h
index 26b7dc8e7c..d3627ed545 100644
--- a/src/policy/fees.h
+++ b/src/policy/fees.h
@@ -1,288 +1,288 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_POLICYESTIMATOR_H
#define BITCOIN_POLICYESTIMATOR_H
#include "amount.h"
#include "random.h"
#include "uint256.h"
#include <map>
#include <string>
#include <vector>
class CAutoFile;
class CFeeRate;
class CTxMemPoolEntry;
class CTxMemPool;
/** \class CBlockPolicyEstimator
* The BlockPolicyEstimator is used for estimating the feerate needed for a
* transaction to be included in a block within a certain number of blocks.
*
* At a high level the algorithm works by grouping transactions into buckets
* based on having similar feerates and then tracking how long it takes
* transactions in the various buckets to be mined. It operates under the
* assumption that in general transactions of higher feerate will be included in
* blocks before transactions of lower feerate. So for example if you wanted to
* know what feerate you should put on a transaction to be included in a block
* within the next 5 blocks, you would start by looking at the bucket with the
* highest feerate transactions and verifying that a sufficiently high
* percentage of them were confirmed within 5 blocks and then you would look at
* the next highest feerate bucket, and so on, stopping at the last bucket to
* pass the test. The average feerate of transactions in this bucket will give
* you an indication of the lowest feerate you can put on a transaction and
* still have a sufficiently high chance of being confirmed within your desired
* 5 blocks.
*
* Here is a brief description of the implementation:
* When a transaction enters the mempool, we track the height of the block chain
* at entry. Whenever a block comes in, we count the number of transactions in
* each bucket and the total amount of feerate paid in each bucket. Then we
* calculate how many blocks Y it took each transaction to be mined and we track
* an array of counters in each bucket for how long it to took transactions to
* get confirmed from 1 to a max of 25 and we increment all the counters from Y
* up to 25. This is because for any number Z>=Y the transaction was
* successfully mined within Z blocks. We want to save a history of this
* information, so at any time we have a counter of the total number of
* transactions that happened in a given feerate bucket and the total number
* that were confirmed in each number 1-25 blocks or less for any bucket. We
* save this history by keeping an exponentially decaying moving average of each
* one of these stats. Furthermore we also keep track of the number unmined (in
* mempool) transactions in each bucket and for how many blocks they have been
* outstanding and use that to increase the number of transactions we've seen in
* that feerate bucket when calculating an estimate for any number of
* confirmations below the number of blocks they've been outstanding.
*/
/**
* We will instantiate an instance of this class to track transactions that were
* included in a block. We will lump transactions into a bucket according to
* their approximate feerate and then track how long it took for those txs to be
* included in a block.
*
* The tracking of unconfirmed (mempool) transactions is completely independent
* of the historical tracking of transactions that have been confirmed in a
* block.
*/
class TxConfirmStats {
private:
// Define the buckets we will group transactions into
// The upper-bound of the range for the bucket (inclusive)
std::vector<double> buckets;
// Map of bucket upper-bound to index into all vectors by bucket
std::map<double, unsigned int> bucketMap;
// For each bucket X:
// Count the total # of txs in each bucket
// Track the historical moving average of this total over blocks
std::vector<double> txCtAvg;
// and calculate the total for the current block to update the moving
// average
std::vector<int> curBlockTxCt;
// Count the total # of txs confirmed within Y blocks in each bucket
// Track the historical moving average of theses totals over blocks
// confAvg[Y][X]
std::vector<std::vector<double>> confAvg;
// and calculate the totals for the current block to update the moving
// averages
// curBlockConf[Y][X]
std::vector<std::vector<int>> curBlockConf;
// Sum the total feerate of all tx's in each bucket
// Track the historical moving average of this total over blocks
std::vector<double> avg;
// and calculate the total for the current block to update the moving
// average
std::vector<double> curBlockVal;
// Combine the conf counts with tx counts to calculate the confirmation %
// for each Y,X. Combine the total value with the tx counts to calculate the
// avg feerate per bucket
double decay;
// Mempool counts of outstanding transactions
// For each bucket X, track the number of transactions in the mempool that
// are unconfirmed for each possible confirmation value Y
// unconfTxs[Y][X]
std::vector<std::vector<int>> unconfTxs;
// transactions still unconfirmed after MAX_CONFIRMS for each bucket
std::vector<int> oldUnconfTxs;
public:
/**
* Initialize the data structures. This is called by BlockPolicyEstimator's
* constructor with default values.
* @param defaultBuckets contains the upper limits for the bucket boundaries
* @param maxConfirms max number of confirms to track
* @param decay how much to decay the historical moving average per block
*/
void Initialize(std::vector<double> &defaultBuckets,
unsigned int maxConfirms, double decay);
/**
* Clear the state of the curBlock variables to start counting for the new
* block.
*/
void ClearCurrent(unsigned int nBlockHeight);
/**
* Record a new transaction data point in the current block stats
* @param blocksToConfirm the number of blocks it took this transaction to
* confirm
* @param val the feerate of the transaction
* @warning blocksToConfirm is 1-based and has to be >= 1
*/
void Record(int blocksToConfirm, double val);
/** Record a new transaction entering the mempool*/
unsigned int NewTx(unsigned int nBlockHeight, double val);
/** Remove a transaction from mempool tracking stats*/
void removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight,
unsigned int bucketIndex);
/**
* Update our estimates by decaying our historical moving average and
* updating with the data gathered from the current block.
*/
void UpdateMovingAverages();
/**
* Calculate a feerate estimate. Find the lowest value bucket (or range of
* buckets to make sure we have enough data points) whose transactions still
* have sufficient likelihood of being confirmed within the target number of
* confirmations
* @param confTarget target number of confirmations
* @param sufficientTxVal required average number of transactions per block
* in a bucket range
* @param minSuccess the success probability we require
* @param requireGreater return the lowest feerate such that all higher
* values pass minSuccess OR
* return the highest feerate such that all lower values fail
* minSuccess
* @param nBlockHeight the current block height
*/
- double EstimateMedianVal(int confTarget, double sufficientTxVal,
- double minSuccess, bool requireGreater,
- unsigned int nBlockHeight);
+ CFeeRate EstimateMedianFeeRate(int confTarget, double sufficientTxVal,
+ double minSuccess, bool requireGreater,
+ unsigned int nBlockHeight);
/** Return the max number of confirms we're tracking */
unsigned int GetMaxConfirms() { return confAvg.size(); }
/** Write state of estimation data to a file*/
void Write(CAutoFile &fileout);
/**
* Read saved state of estimation data from a file and replace all internal
* data structures and variables with this state.
*/
void Read(CAutoFile &filein);
};
/** Track confirm delays up to 25 blocks, can't estimate beyond that */
static const unsigned int MAX_BLOCK_CONFIRMS = 25;
/** Decay of .998 is a half-life of 346 blocks or about 2.4 days */
static const double DEFAULT_DECAY = .998;
/** Require greater than 95% of X feerate transactions to be confirmed within Y
* blocks for X to be big enough */
static const double MIN_SUCCESS_PCT = .95;
/** Require an avg of 1 tx in the combined feerate bucket per block to have stat
* significance */
static const double SUFFICIENT_FEETXS = 1;
// Minimum and Maximum values for tracking feerates
static constexpr Amount MIN_FEERATE(10 * SATOSHI);
static const Amount MAX_FEERATE(int64_t(1e7) * SATOSHI);
static const Amount INF_FEERATE(MAX_MONEY);
static const Amount INF_PRIORITY(int64_t(1e9) * MAX_MONEY);
// We have to lump transactions into buckets based on feerate, but we want to be
// able to give accurate estimates over a large range of potential feerates.
// Therefore it makes sense to exponentially space the buckets
/** Spacing of FeeRate buckets */
static const double FEE_SPACING = 1.1;
/**
* We want to be able to estimate feerates that are needed on tx's to be
* included in a certain number of blocks. Every time a block is added to the
* best chain, this class records stats on the transactions included in that
* block
*/
class CBlockPolicyEstimator {
public:
/**
* Create new BlockPolicyEstimator and initialize stats tracking classes
* with default values.
*/
CBlockPolicyEstimator();
/** Process all the transactions that have been included in a block */
void processBlock(unsigned int nBlockHeight,
std::vector<const CTxMemPoolEntry *> &entries);
/** Process a transaction confirmed in a block*/
bool processBlockTx(unsigned int nBlockHeight,
const CTxMemPoolEntry *entry);
/** Process a transaction accepted to the mempool*/
void processTransaction(const CTxMemPoolEntry &entry,
bool validFeeEstimate);
/** Remove a transaction from the mempool tracking stats*/
bool removeTx(uint256 hash);
/** Return a feerate estimate */
CFeeRate estimateFee(int confTarget);
/** Estimate feerate needed to get be included in a block within
* confTarget blocks. If no answer can be given at confTarget, return an
* estimate at the lowest target where one can be given.
*/
CFeeRate estimateSmartFee(int confTarget, int *answerFoundAtTarget,
const CTxMemPool &pool);
/** Write estimation data to a file */
void Write(CAutoFile &fileout);
/** Read estimation data from a file */
void Read(CAutoFile &filein, int nFileVersion);
private:
//!< Passed to constructor to avoid dependency on main
unsigned int nBestSeenHeight;
struct TxStatsInfo {
unsigned int blockHeight;
unsigned int bucketIndex;
TxStatsInfo() : blockHeight(0), bucketIndex(0) {}
};
// map of txids to information about that transaction
std::map<uint256, TxStatsInfo> mapMemPoolTxs;
/** Classes to track historical data on transaction confirmations */
TxConfirmStats feeStats;
unsigned int trackedTxs;
unsigned int untrackedTxs;
};
class FeeFilterRounder {
public:
/** Create new FeeFilterRounder */
FeeFilterRounder(const CFeeRate &minIncrementalFee);
/** Quantize a minimum fee for privacy purpose before broadcast **/
Amount round(const Amount currentMinFee);
private:
std::set<Amount> feeset;
FastRandomContext insecure_rand;
};
#endif /*BITCOIN_POLICYESTIMATOR_H */
diff --git a/src/test/mempool_tests.cpp b/src/test/mempool_tests.cpp
index 9a68c7902b..5fb375585a 100644
--- a/src/test/mempool_tests.cpp
+++ b/src/test/mempool_tests.cpp
@@ -1,667 +1,667 @@
// Copyright (c) 2011-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "policy/policy.h"
#include "txmempool.h"
#include "util.h"
#include "test/test_bitcoin.h"
#include <boost/test/unit_test.hpp>
#include <list>
#include <vector>
BOOST_FIXTURE_TEST_SUITE(mempool_tests, TestingSetup)
BOOST_AUTO_TEST_CASE(MempoolRemoveTest) {
// Test CTxMemPool::remove functionality
TestMemPoolEntryHelper entry;
// Parent transaction with three children, and three grand-children:
CMutableTransaction txParent;
txParent.vin.resize(1);
txParent.vin[0].scriptSig = CScript() << OP_11;
txParent.vout.resize(3);
for (int i = 0; i < 3; i++) {
txParent.vout[i].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
txParent.vout[i].nValue = 33000 * SATOSHI;
}
CMutableTransaction txChild[3];
for (int i = 0; i < 3; i++) {
txChild[i].vin.resize(1);
txChild[i].vin[0].scriptSig = CScript() << OP_11;
txChild[i].vin[0].prevout = COutPoint(txParent.GetId(), i);
txChild[i].vout.resize(1);
txChild[i].vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
txChild[i].vout[0].nValue = 11000 * SATOSHI;
}
CMutableTransaction txGrandChild[3];
for (int i = 0; i < 3; i++) {
txGrandChild[i].vin.resize(1);
txGrandChild[i].vin[0].scriptSig = CScript() << OP_11;
txGrandChild[i].vin[0].prevout = COutPoint(txChild[i].GetId(), 0);
txGrandChild[i].vout.resize(1);
txGrandChild[i].vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
txGrandChild[i].vout[0].nValue = 11000 * SATOSHI;
}
CTxMemPool testPool;
// Nothing in pool, remove should do nothing:
unsigned int poolSize = testPool.size();
testPool.removeRecursive(CTransaction(txParent));
BOOST_CHECK_EQUAL(testPool.size(), poolSize);
// Just the parent:
testPool.addUnchecked(txParent.GetId(), entry.FromTx(txParent));
poolSize = testPool.size();
testPool.removeRecursive(CTransaction(txParent));
BOOST_CHECK_EQUAL(testPool.size(), poolSize - 1);
// Parent, children, grandchildren:
testPool.addUnchecked(txParent.GetId(), entry.FromTx(txParent));
for (int i = 0; i < 3; i++) {
testPool.addUnchecked(txChild[i].GetId(), entry.FromTx(txChild[i]));
testPool.addUnchecked(txGrandChild[i].GetId(),
entry.FromTx(txGrandChild[i]));
}
// Remove Child[0], GrandChild[0] should be removed:
poolSize = testPool.size();
testPool.removeRecursive(CTransaction(txChild[0]));
BOOST_CHECK_EQUAL(testPool.size(), poolSize - 2);
// ... make sure grandchild and child are gone:
poolSize = testPool.size();
testPool.removeRecursive(CTransaction(txGrandChild[0]));
BOOST_CHECK_EQUAL(testPool.size(), poolSize);
poolSize = testPool.size();
testPool.removeRecursive(CTransaction(txChild[0]));
BOOST_CHECK_EQUAL(testPool.size(), poolSize);
// Remove parent, all children/grandchildren should go:
poolSize = testPool.size();
testPool.removeRecursive(CTransaction(txParent));
BOOST_CHECK_EQUAL(testPool.size(), poolSize - 5);
BOOST_CHECK_EQUAL(testPool.size(), 0UL);
// Add children and grandchildren, but NOT the parent (simulate the parent
// being in a block)
for (int i = 0; i < 3; i++) {
testPool.addUnchecked(txChild[i].GetId(), entry.FromTx(txChild[i]));
testPool.addUnchecked(txGrandChild[i].GetId(),
entry.FromTx(txGrandChild[i]));
}
// Now remove the parent, as might happen if a block-re-org occurs but the
// parent cannot be put into the mempool (maybe because it is non-standard):
poolSize = testPool.size();
testPool.removeRecursive(CTransaction(txParent));
BOOST_CHECK_EQUAL(testPool.size(), poolSize - 6);
BOOST_CHECK_EQUAL(testPool.size(), 0UL);
}
BOOST_AUTO_TEST_CASE(MempoolClearTest) {
// Test CTxMemPool::clear functionality
TestMemPoolEntryHelper entry;
// Create a transaction
CMutableTransaction txParent;
txParent.vin.resize(1);
txParent.vin[0].scriptSig = CScript() << OP_11;
txParent.vout.resize(3);
for (int i = 0; i < 3; i++) {
txParent.vout[i].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
txParent.vout[i].nValue = 33000 * SATOSHI;
}
CTxMemPool testPool;
// Nothing in pool, clear should do nothing:
testPool.clear();
BOOST_CHECK_EQUAL(testPool.size(), 0UL);
// Add the transaction
testPool.addUnchecked(txParent.GetId(), entry.FromTx(txParent));
BOOST_CHECK_EQUAL(testPool.size(), 1UL);
BOOST_CHECK_EQUAL(testPool.mapTx.size(), 1UL);
BOOST_CHECK_EQUAL(testPool.mapNextTx.size(), 1UL);
BOOST_CHECK_EQUAL(testPool.vTxHashes.size(), 1UL);
// CTxMemPool's members should be empty after a clear
testPool.clear();
BOOST_CHECK_EQUAL(testPool.size(), 0UL);
BOOST_CHECK_EQUAL(testPool.mapTx.size(), 0UL);
BOOST_CHECK_EQUAL(testPool.mapNextTx.size(), 0UL);
BOOST_CHECK_EQUAL(testPool.vTxHashes.size(), 0UL);
}
template <typename name>
void CheckSort(CTxMemPool &pool, std::vector<std::string> &sortedOrder,
std::string &&testcase) {
BOOST_CHECK_EQUAL(pool.size(), sortedOrder.size());
typename CTxMemPool::indexed_transaction_set::index<name>::type::iterator
it = pool.mapTx.get<name>().begin();
int count = 0;
for (; it != pool.mapTx.get<name>().end(); ++it, ++count) {
BOOST_CHECK_MESSAGE(it->GetTx().GetId().ToString() ==
sortedOrder[count],
it->GetTx().GetId().ToString()
<< " != " << sortedOrder[count] << " in test "
<< testcase << ":" << count);
}
}
BOOST_AUTO_TEST_CASE(MempoolIndexingTest) {
CTxMemPool pool;
TestMemPoolEntryHelper entry;
/* 3rd highest fee */
CMutableTransaction tx1 = CMutableTransaction();
tx1.vout.resize(1);
tx1.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx1.vout[0].nValue = 10 * COIN;
pool.addUnchecked(tx1.GetId(),
entry.Fee(10000 * SATOSHI).Priority(10.0).FromTx(tx1));
/* highest fee */
CMutableTransaction tx2 = CMutableTransaction();
tx2.vout.resize(1);
tx2.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx2.vout[0].nValue = 2 * COIN;
pool.addUnchecked(tx2.GetId(),
entry.Fee(20000 * SATOSHI).Priority(9.0).FromTx(tx2));
/* lowest fee */
CMutableTransaction tx3 = CMutableTransaction();
tx3.vout.resize(1);
tx3.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx3.vout[0].nValue = 5 * COIN;
pool.addUnchecked(tx3.GetId(),
entry.Fee(Amount::zero()).Priority(100.0).FromTx(tx3));
/* 2nd highest fee */
CMutableTransaction tx4 = CMutableTransaction();
tx4.vout.resize(1);
tx4.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx4.vout[0].nValue = 6 * COIN;
pool.addUnchecked(tx4.GetId(),
entry.Fee(15000 * SATOSHI).Priority(1.0).FromTx(tx4));
/* equal fee rate to tx1, but newer */
CMutableTransaction tx5 = CMutableTransaction();
tx5.vout.resize(1);
tx5.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx5.vout[0].nValue = 11 * COIN;
entry.nTime = 1;
entry.dPriority = 10.0;
pool.addUnchecked(tx5.GetId(), entry.Fee(10000 * SATOSHI).FromTx(tx5));
BOOST_CHECK_EQUAL(pool.size(), 5UL);
std::vector<std::string> sortedOrder;
sortedOrder.resize(5);
sortedOrder[0] = tx3.GetId().ToString(); // 0
sortedOrder[1] = tx5.GetId().ToString(); // 10000
sortedOrder[2] = tx1.GetId().ToString(); // 10000
sortedOrder[3] = tx4.GetId().ToString(); // 15000
sortedOrder[4] = tx2.GetId().ToString(); // 20000
CheckSort<descendant_score>(pool, sortedOrder, "MempoolIndexingTest1");
/* low fee but with high fee child */
/* tx6 -> tx7 -> tx8, tx9 -> tx10 */
CMutableTransaction tx6 = CMutableTransaction();
tx6.vout.resize(1);
tx6.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx6.vout[0].nValue = 20 * COIN;
pool.addUnchecked(tx6.GetId(), entry.Fee(Amount::zero()).FromTx(tx6));
BOOST_CHECK_EQUAL(pool.size(), 6UL);
// Check that at this point, tx6 is sorted low
sortedOrder.insert(sortedOrder.begin(), tx6.GetId().ToString());
CheckSort<descendant_score>(pool, sortedOrder, "MempoolIndexingTest2");
CTxMemPool::setEntries setAncestors;
setAncestors.insert(pool.mapTx.find(tx6.GetId()));
CMutableTransaction tx7 = CMutableTransaction();
tx7.vin.resize(1);
tx7.vin[0].prevout = COutPoint(tx6.GetId(), 0);
tx7.vin[0].scriptSig = CScript() << OP_11;
tx7.vout.resize(2);
tx7.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx7.vout[0].nValue = 10 * COIN;
tx7.vout[1].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx7.vout[1].nValue = 1 * COIN;
CTxMemPool::setEntries setAncestorsCalculated;
std::string dummy;
BOOST_CHECK_EQUAL(
pool.CalculateMemPoolAncestors(entry.Fee(2000000 * SATOSHI).FromTx(tx7),
setAncestorsCalculated, 100, 1000000,
1000, 1000000, dummy),
true);
BOOST_CHECK(setAncestorsCalculated == setAncestors);
pool.addUnchecked(tx7.GetId(), entry.FromTx(tx7), setAncestors);
BOOST_CHECK_EQUAL(pool.size(), 7UL);
// Now tx6 should be sorted higher (high fee child): tx7, tx6, tx2, ...
sortedOrder.erase(sortedOrder.begin());
sortedOrder.push_back(tx6.GetId().ToString());
sortedOrder.push_back(tx7.GetId().ToString());
CheckSort<descendant_score>(pool, sortedOrder, "MempoolIndexingTest3");
/* low fee child of tx7 */
CMutableTransaction tx8 = CMutableTransaction();
tx8.vin.resize(1);
tx8.vin[0].prevout = COutPoint(tx7.GetId(), 0);
tx8.vin[0].scriptSig = CScript() << OP_11;
tx8.vout.resize(1);
tx8.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx8.vout[0].nValue = 10 * COIN;
setAncestors.insert(pool.mapTx.find(tx7.GetId()));
pool.addUnchecked(tx8.GetId(),
entry.Fee(Amount::zero()).Time(2).FromTx(tx8),
setAncestors);
// Now tx8 should be sorted low, but tx6/tx both high
sortedOrder.insert(sortedOrder.begin(), tx8.GetId().ToString());
CheckSort<descendant_score>(pool, sortedOrder, "MempoolIndexingTest4");
/* low fee child of tx7 */
CMutableTransaction tx9 = CMutableTransaction();
tx9.vin.resize(1);
tx9.vin[0].prevout = COutPoint(tx7.GetId(), 1);
tx9.vin[0].scriptSig = CScript() << OP_11;
tx9.vout.resize(1);
tx9.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx9.vout[0].nValue = 1 * COIN;
pool.addUnchecked(tx9.GetId(),
entry.Fee(Amount::zero()).Time(3).FromTx(tx9),
setAncestors);
// tx9 should be sorted low
BOOST_CHECK_EQUAL(pool.size(), 9UL);
sortedOrder.insert(sortedOrder.begin(), tx9.GetId().ToString());
CheckSort<descendant_score>(pool, sortedOrder, "MempoolIndexingTest5");
std::vector<std::string> snapshotOrder = sortedOrder;
setAncestors.insert(pool.mapTx.find(tx8.GetId()));
setAncestors.insert(pool.mapTx.find(tx9.GetId()));
/* tx10 depends on tx8 and tx9 and has a high fee*/
CMutableTransaction tx10 = CMutableTransaction();
tx10.vin.resize(2);
tx10.vin[0].prevout = COutPoint(tx8.GetId(), 0);
tx10.vin[0].scriptSig = CScript() << OP_11;
tx10.vin[1].prevout = COutPoint(tx9.GetId(), 0);
tx10.vin[1].scriptSig = CScript() << OP_11;
tx10.vout.resize(1);
tx10.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx10.vout[0].nValue = 10 * COIN;
setAncestorsCalculated.clear();
BOOST_CHECK_EQUAL(pool.CalculateMemPoolAncestors(
entry.Fee(200000 * SATOSHI).Time(4).FromTx(tx10),
setAncestorsCalculated, 100, 1000000, 1000, 1000000,
dummy),
true);
BOOST_CHECK(setAncestorsCalculated == setAncestors);
pool.addUnchecked(tx10.GetId(), entry.FromTx(tx10), setAncestors);
/**
* tx8 and tx9 should both now be sorted higher
* Final order after tx10 is added:
*
* tx3 = 0 (1)
* tx5 = 10000 (1)
* tx1 = 10000 (1)
* tx4 = 15000 (1)
* tx2 = 20000 (1)
* tx9 = 200k (2 txs)
* tx8 = 200k (2 txs)
* tx10 = 200k (1 tx)
* tx6 = 2.2M (5 txs)
* tx7 = 2.2M (4 txs)
*/
// take out tx9, tx8 from the beginning
sortedOrder.erase(sortedOrder.begin(), sortedOrder.begin() + 2);
sortedOrder.insert(sortedOrder.begin() + 5, tx9.GetId().ToString());
sortedOrder.insert(sortedOrder.begin() + 6, tx8.GetId().ToString());
// tx10 is just before tx6
sortedOrder.insert(sortedOrder.begin() + 7, tx10.GetId().ToString());
CheckSort<descendant_score>(pool, sortedOrder, "MempoolIndexingTest6");
// there should be 10 transactions in the mempool
BOOST_CHECK_EQUAL(pool.size(), 10UL);
// Now try removing tx10 and verify the sort order returns to normal
pool.removeRecursive(pool.mapTx.find(tx10.GetId())->GetTx());
CheckSort<descendant_score>(pool, snapshotOrder, "MempoolIndexingTest7");
pool.removeRecursive(pool.mapTx.find(tx9.GetId())->GetTx());
pool.removeRecursive(pool.mapTx.find(tx8.GetId())->GetTx());
/* Now check the sort on the mining score index.
* Final order should be:
*
* tx7 (2M)
* tx2 (20k)
* tx4 (15000)
* tx1/tx5 (10000)
* tx3/6 (0)
* (Ties resolved by hash)
*/
sortedOrder.clear();
sortedOrder.push_back(tx7.GetId().ToString());
sortedOrder.push_back(tx2.GetId().ToString());
sortedOrder.push_back(tx4.GetId().ToString());
if (tx1.GetId() < tx5.GetId()) {
sortedOrder.push_back(tx5.GetId().ToString());
sortedOrder.push_back(tx1.GetId().ToString());
} else {
sortedOrder.push_back(tx1.GetId().ToString());
sortedOrder.push_back(tx5.GetId().ToString());
}
if (tx3.GetId() < tx6.GetId()) {
sortedOrder.push_back(tx6.GetId().ToString());
sortedOrder.push_back(tx3.GetId().ToString());
} else {
sortedOrder.push_back(tx3.GetId().ToString());
sortedOrder.push_back(tx6.GetId().ToString());
}
CheckSort<mining_score>(pool, sortedOrder, "MempoolIndexingTest8");
}
BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest) {
CTxMemPool pool;
TestMemPoolEntryHelper entry;
/* 3rd highest fee */
CMutableTransaction tx1 = CMutableTransaction();
tx1.vout.resize(1);
tx1.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx1.vout[0].nValue = 10 * COIN;
pool.addUnchecked(tx1.GetId(),
entry.Fee(10000 * SATOSHI).Priority(10.0).FromTx(tx1));
/* highest fee */
CMutableTransaction tx2 = CMutableTransaction();
tx2.vout.resize(1);
tx2.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx2.vout[0].nValue = 2 * COIN;
pool.addUnchecked(tx2.GetId(),
entry.Fee(20000 * SATOSHI).Priority(9.0).FromTx(tx2));
uint64_t tx2Size = CTransaction(tx2).GetTotalSize();
/* lowest fee */
CMutableTransaction tx3 = CMutableTransaction();
tx3.vout.resize(1);
tx3.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx3.vout[0].nValue = 5 * COIN;
pool.addUnchecked(tx3.GetId(),
entry.Fee(Amount::zero()).Priority(100.0).FromTx(tx3));
/* 2nd highest fee */
CMutableTransaction tx4 = CMutableTransaction();
tx4.vout.resize(1);
tx4.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx4.vout[0].nValue = 6 * COIN;
pool.addUnchecked(tx4.GetId(),
entry.Fee(15000 * SATOSHI).Priority(1.0).FromTx(tx4));
/* equal fee rate to tx1, but newer */
CMutableTransaction tx5 = CMutableTransaction();
tx5.vout.resize(1);
tx5.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx5.vout[0].nValue = 11 * COIN;
pool.addUnchecked(tx5.GetId(), entry.Fee(10000 * SATOSHI).FromTx(tx5));
BOOST_CHECK_EQUAL(pool.size(), 5UL);
std::vector<std::string> sortedOrder;
sortedOrder.resize(5);
sortedOrder[0] = tx2.GetId().ToString(); // 20000
sortedOrder[1] = tx4.GetId().ToString(); // 15000
// tx1 and tx5 are both 10000
// Ties are broken by hash, not timestamp, so determine which hash comes
// first.
if (tx1.GetId() < tx5.GetId()) {
sortedOrder[2] = tx1.GetId().ToString();
sortedOrder[3] = tx5.GetId().ToString();
} else {
sortedOrder[2] = tx5.GetId().ToString();
sortedOrder[3] = tx1.GetId().ToString();
}
sortedOrder[4] = tx3.GetId().ToString(); // 0
CheckSort<ancestor_score>(pool, sortedOrder,
"MempoolAncestorIndexingTest1");
/* low fee parent with high fee child */
/* tx6 (0) -> tx7 (high) */
CMutableTransaction tx6 = CMutableTransaction();
tx6.vout.resize(1);
tx6.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx6.vout[0].nValue = 20 * COIN;
uint64_t tx6Size = CTransaction(tx6).GetTotalSize();
pool.addUnchecked(tx6.GetId(), entry.Fee(Amount::zero()).FromTx(tx6));
BOOST_CHECK_EQUAL(pool.size(), 6UL);
// Ties are broken by hash
if (tx3.GetId() < tx6.GetId()) {
sortedOrder.push_back(tx6.GetId().ToString());
} else {
sortedOrder.insert(sortedOrder.end() - 1, tx6.GetId().ToString());
}
CheckSort<ancestor_score>(pool, sortedOrder,
"MempoolAncestorIndexingTest2");
CMutableTransaction tx7 = CMutableTransaction();
tx7.vin.resize(1);
tx7.vin[0].prevout = COutPoint(tx6.GetId(), 0);
tx7.vin[0].scriptSig = CScript() << OP_11;
tx7.vout.resize(1);
tx7.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx7.vout[0].nValue = 10 * COIN;
uint64_t tx7Size = CTransaction(tx7).GetTotalSize();
/* set the fee to just below tx2's feerate when including ancestor */
Amount fee = int64_t((20000 / tx2Size) * (tx7Size + tx6Size) - 1) * SATOSHI;
// CTxMemPoolEntry entry7(tx7, fee, 2, 10.0, 1, true);
pool.addUnchecked(tx7.GetId(), entry.Fee(Amount(fee)).FromTx(tx7));
BOOST_CHECK_EQUAL(pool.size(), 7UL);
sortedOrder.insert(sortedOrder.begin() + 1, tx7.GetId().ToString());
CheckSort<ancestor_score>(pool, sortedOrder,
"MempoolAncestorIndexingTest3");
/* after tx6 is mined, tx7 should move up in the sort */
std::vector<CTransactionRef> vtx;
vtx.push_back(MakeTransactionRef(tx6));
pool.removeForBlock(vtx, 1);
sortedOrder.erase(sortedOrder.begin() + 1);
// Ties are broken by hash
if (tx3.GetId() < tx6.GetId()) {
sortedOrder.pop_back();
} else {
sortedOrder.erase(sortedOrder.end() - 2);
}
sortedOrder.insert(sortedOrder.begin(), tx7.GetId().ToString());
CheckSort<ancestor_score>(pool, sortedOrder,
"MempoolAncestorIndexingTest4");
}
BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest) {
CTxMemPool pool;
TestMemPoolEntryHelper entry;
entry.dPriority = 10.0;
Amount feeIncrement = MEMPOOL_FULL_FEE_INCREMENT.GetFeePerK();
CMutableTransaction tx1 = CMutableTransaction();
tx1.vin.resize(1);
tx1.vin[0].scriptSig = CScript() << OP_1;
tx1.vout.resize(1);
tx1.vout[0].scriptPubKey = CScript() << OP_1 << OP_EQUAL;
tx1.vout[0].nValue = 10 * COIN;
pool.addUnchecked(tx1.GetId(),
entry.Fee(10000 * SATOSHI).FromTx(tx1, &pool));
CMutableTransaction tx2 = CMutableTransaction();
tx2.vin.resize(1);
tx2.vin[0].scriptSig = CScript() << OP_2;
tx2.vout.resize(1);
tx2.vout[0].scriptPubKey = CScript() << OP_2 << OP_EQUAL;
tx2.vout[0].nValue = 10 * COIN;
pool.addUnchecked(tx2.GetId(),
entry.Fee(5000 * SATOSHI).FromTx(tx2, &pool));
// should do nothing
pool.TrimToSize(pool.DynamicMemoryUsage());
BOOST_CHECK(pool.exists(tx1.GetId()));
BOOST_CHECK(pool.exists(tx2.GetId()));
// should remove the lower-feerate transaction
pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4);
BOOST_CHECK(pool.exists(tx1.GetId()));
BOOST_CHECK(!pool.exists(tx2.GetId()));
pool.addUnchecked(tx2.GetId(), entry.FromTx(tx2, &pool));
CMutableTransaction tx3 = CMutableTransaction();
tx3.vin.resize(1);
tx3.vin[0].prevout = COutPoint(tx2.GetId(), 0);
tx3.vin[0].scriptSig = CScript() << OP_2;
tx3.vout.resize(1);
tx3.vout[0].scriptPubKey = CScript() << OP_3 << OP_EQUAL;
tx3.vout[0].nValue = 10 * COIN;
pool.addUnchecked(tx3.GetId(),
entry.Fee(20000 * SATOSHI).FromTx(tx3, &pool));
// tx3 should pay for tx2 (CPFP)
pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4);
BOOST_CHECK(!pool.exists(tx1.GetId()));
BOOST_CHECK(pool.exists(tx2.GetId()));
BOOST_CHECK(pool.exists(tx3.GetId()));
// mempool is limited to tx1's size in memory usage, so nothing fits
pool.TrimToSize(CTransaction(tx1).GetTotalSize());
BOOST_CHECK(!pool.exists(tx1.GetId()));
BOOST_CHECK(!pool.exists(tx2.GetId()));
BOOST_CHECK(!pool.exists(tx3.GetId()));
CFeeRate maxFeeRateRemoved(25000 * SATOSHI,
CTransaction(tx3).GetTotalSize() +
CTransaction(tx2).GetTotalSize());
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(),
maxFeeRateRemoved.GetFeePerK() + feeIncrement);
CMutableTransaction tx4 = CMutableTransaction();
tx4.vin.resize(2);
tx4.vin[0].prevout = COutPoint();
tx4.vin[0].scriptSig = CScript() << OP_4;
tx4.vin[1].prevout = COutPoint();
tx4.vin[1].scriptSig = CScript() << OP_4;
tx4.vout.resize(2);
tx4.vout[0].scriptPubKey = CScript() << OP_4 << OP_EQUAL;
tx4.vout[0].nValue = 10 * COIN;
tx4.vout[1].scriptPubKey = CScript() << OP_4 << OP_EQUAL;
tx4.vout[1].nValue = 10 * COIN;
CMutableTransaction tx5 = CMutableTransaction();
tx5.vin.resize(2);
tx5.vin[0].prevout = COutPoint(tx4.GetId(), 0);
tx5.vin[0].scriptSig = CScript() << OP_4;
tx5.vin[1].prevout = COutPoint();
tx5.vin[1].scriptSig = CScript() << OP_5;
tx5.vout.resize(2);
tx5.vout[0].scriptPubKey = CScript() << OP_5 << OP_EQUAL;
tx5.vout[0].nValue = 10 * COIN;
tx5.vout[1].scriptPubKey = CScript() << OP_5 << OP_EQUAL;
tx5.vout[1].nValue = 10 * COIN;
CMutableTransaction tx6 = CMutableTransaction();
tx6.vin.resize(2);
tx6.vin[0].prevout = COutPoint(tx4.GetId(), 1);
tx6.vin[0].scriptSig = CScript() << OP_4;
tx6.vin[1].prevout = COutPoint();
tx6.vin[1].scriptSig = CScript() << OP_6;
tx6.vout.resize(2);
tx6.vout[0].scriptPubKey = CScript() << OP_6 << OP_EQUAL;
tx6.vout[0].nValue = 10 * COIN;
tx6.vout[1].scriptPubKey = CScript() << OP_6 << OP_EQUAL;
tx6.vout[1].nValue = 10 * COIN;
CMutableTransaction tx7 = CMutableTransaction();
tx7.vin.resize(2);
tx7.vin[0].prevout = COutPoint(tx5.GetId(), 0);
tx7.vin[0].scriptSig = CScript() << OP_5;
tx7.vin[1].prevout = COutPoint(tx6.GetId(), 0);
tx7.vin[1].scriptSig = CScript() << OP_6;
tx7.vout.resize(2);
tx7.vout[0].scriptPubKey = CScript() << OP_7 << OP_EQUAL;
tx7.vout[0].nValue = 10 * COIN;
tx7.vout[1].scriptPubKey = CScript() << OP_7 << OP_EQUAL;
tx7.vout[1].nValue = 10 * COIN;
pool.addUnchecked(tx4.GetId(),
entry.Fee(7000 * SATOSHI).FromTx(tx4, &pool));
pool.addUnchecked(tx5.GetId(),
entry.Fee(1000 * SATOSHI).FromTx(tx5, &pool));
pool.addUnchecked(tx6.GetId(),
entry.Fee(1100 * SATOSHI).FromTx(tx6, &pool));
pool.addUnchecked(tx7.GetId(),
entry.Fee(9000 * SATOSHI).FromTx(tx7, &pool));
// we only require this remove, at max, 2 txn, because its not clear what
// we're really optimizing for aside from that
pool.TrimToSize(pool.DynamicMemoryUsage() - 1);
BOOST_CHECK(pool.exists(tx4.GetId()));
BOOST_CHECK(pool.exists(tx6.GetId()));
BOOST_CHECK(!pool.exists(tx7.GetId()));
if (!pool.exists(tx5.GetId()))
pool.addUnchecked(tx5.GetId(),
entry.Fee(1000 * SATOSHI).FromTx(tx5, &pool));
pool.addUnchecked(tx7.GetId(),
entry.Fee(9000 * SATOSHI).FromTx(tx7, &pool));
// should maximize mempool size by only removing 5/7
pool.TrimToSize(pool.DynamicMemoryUsage() / 2);
BOOST_CHECK(pool.exists(tx4.GetId()));
BOOST_CHECK(!pool.exists(tx5.GetId()));
BOOST_CHECK(pool.exists(tx6.GetId()));
BOOST_CHECK(!pool.exists(tx7.GetId()));
pool.addUnchecked(tx5.GetId(),
entry.Fee(1000 * SATOSHI).FromTx(tx5, &pool));
pool.addUnchecked(tx7.GetId(),
entry.Fee(9000 * SATOSHI).FromTx(tx7, &pool));
std::vector<CTransactionRef> vtx;
SetMockTime(42);
SetMockTime(42 + CTxMemPool::ROLLING_FEE_HALFLIFE);
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(),
maxFeeRateRemoved.GetFeePerK() + feeIncrement);
// ... we should keep the same min fee until we get a block
pool.removeForBlock(vtx, 1);
SetMockTime(42 + 2 * CTxMemPool::ROLLING_FEE_HALFLIFE);
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(),
(maxFeeRateRemoved.GetFeePerK() + feeIncrement) / 2);
// ... then feerate should drop 1/2 each halflife
SetMockTime(42 + 2 * CTxMemPool::ROLLING_FEE_HALFLIFE +
CTxMemPool::ROLLING_FEE_HALFLIFE / 2);
BOOST_CHECK_EQUAL(
pool.GetMinFee(pool.DynamicMemoryUsage() * 5 / 2).GetFeePerK(),
(maxFeeRateRemoved.GetFeePerK() + feeIncrement) / 4);
// ... with a 1/2 halflife when mempool is < 1/2 its target size
SetMockTime(42 + 2 * CTxMemPool::ROLLING_FEE_HALFLIFE +
CTxMemPool::ROLLING_FEE_HALFLIFE / 2 +
CTxMemPool::ROLLING_FEE_HALFLIFE / 4);
BOOST_CHECK_EQUAL(
pool.GetMinFee(pool.DynamicMemoryUsage() * 9 / 2).GetFeePerK(),
- (maxFeeRateRemoved.GetFeePerK() + feeIncrement) / 8);
+ (maxFeeRateRemoved.GetFeePerK() + feeIncrement) / 8 + SATOSHI);
// ... with a 1/4 halflife when mempool is < 1/4 its target size
SetMockTime(0);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index 40c0a74933..e54b7d0b04 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -1,1381 +1,1381 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "txmempool.h"
#include "chainparams.h" // for GetConsensus.
#include "clientversion.h"
#include "consensus/consensus.h"
#include "consensus/tx_verify.h"
#include "consensus/validation.h"
#include "policy/fees.h"
#include "policy/policy.h"
#include "streams.h"
#include "timedata.h"
#include "util.h"
#include "utilmoneystr.h"
#include "utiltime.h"
#include "validation.h"
#include "version.h"
#include <boost/range/adaptor/reversed.hpp>
CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef &_tx, const Amount _nFee,
int64_t _nTime, double _entryPriority,
unsigned int _entryHeight,
Amount _inChainInputValue,
bool _spendsCoinbase, int64_t _sigOpsCount,
LockPoints lp)
: tx(_tx), nFee(_nFee), nTime(_nTime), entryPriority(_entryPriority),
entryHeight(_entryHeight), inChainInputValue(_inChainInputValue),
spendsCoinbase(_spendsCoinbase), sigOpCount(_sigOpsCount),
lockPoints(lp) {
nTxSize = tx->GetTotalSize();
nModSize = tx->CalculateModifiedSize(GetTxSize());
nUsageSize = RecursiveDynamicUsage(tx);
nCountWithDescendants = 1;
nSizeWithDescendants = GetTxSize();
nModFeesWithDescendants = nFee;
Amount nValueIn = tx->GetValueOut() + nFee;
assert(inChainInputValue <= nValueIn);
feeDelta = Amount::zero();
nCountWithAncestors = 1;
nSizeWithAncestors = GetTxSize();
nModFeesWithAncestors = nFee;
nSigOpCountWithAncestors = sigOpCount;
}
CTxMemPoolEntry::CTxMemPoolEntry(const CTxMemPoolEntry &other) {
*this = other;
}
double CTxMemPoolEntry::GetPriority(unsigned int currentHeight) const {
double deltaPriority =
double((currentHeight - entryHeight) * (inChainInputValue / SATOSHI)) /
nModSize;
double dResult = entryPriority + deltaPriority;
// This should only happen if it was called with a height below entry height
if (dResult < 0) {
dResult = 0;
}
return dResult;
}
void CTxMemPoolEntry::UpdateFeeDelta(Amount newFeeDelta) {
nModFeesWithDescendants += newFeeDelta - feeDelta;
nModFeesWithAncestors += newFeeDelta - feeDelta;
feeDelta = newFeeDelta;
}
void CTxMemPoolEntry::UpdateLockPoints(const LockPoints &lp) {
lockPoints = lp;
}
// Update the given tx for any in-mempool descendants.
// Assumes that setMemPoolChildren is correct for the given tx and all
// descendants.
void CTxMemPool::UpdateForDescendants(txiter updateIt,
cacheMap &cachedDescendants,
const std::set<TxId> &setExclude) {
setEntries stageEntries, setAllDescendants;
stageEntries = GetMemPoolChildren(updateIt);
while (!stageEntries.empty()) {
const txiter cit = *stageEntries.begin();
setAllDescendants.insert(cit);
stageEntries.erase(cit);
const setEntries &setChildren = GetMemPoolChildren(cit);
for (const txiter childEntry : setChildren) {
cacheMap::iterator cacheIt = cachedDescendants.find(childEntry);
if (cacheIt != cachedDescendants.end()) {
// We've already calculated this one, just add the entries for
// this set but don't traverse again.
for (const txiter cacheEntry : cacheIt->second) {
setAllDescendants.insert(cacheEntry);
}
} else if (!setAllDescendants.count(childEntry)) {
// Schedule for later processing
stageEntries.insert(childEntry);
}
}
}
// setAllDescendants now contains all in-mempool descendants of updateIt.
// Update and add to cached descendant map
int64_t modifySize = 0;
Amount modifyFee = Amount::zero();
int64_t modifyCount = 0;
for (txiter cit : setAllDescendants) {
if (!setExclude.count(cit->GetTx().GetId())) {
modifySize += cit->GetTxSize();
modifyFee += cit->GetModifiedFee();
modifyCount++;
cachedDescendants[updateIt].insert(cit);
// Update ancestor state for each descendant
mapTx.modify(cit,
update_ancestor_state(updateIt->GetTxSize(),
updateIt->GetModifiedFee(), 1,
updateIt->GetSigOpCount()));
}
}
mapTx.modify(updateIt,
update_descendant_state(modifySize, modifyFee, modifyCount));
}
// txidsToUpdate is the set of transaction hashes from a disconnected block
// which has been re-added to the mempool. For each entry, look for descendants
// that are outside txidsToUpdate, and add fee/size information for such
// descendants to the parent. For each such descendant, also update the ancestor
// state to include the parent.
void CTxMemPool::UpdateTransactionsFromBlock(
const std::vector<TxId> &txidsToUpdate) {
LOCK(cs);
// For each entry in txidsToUpdate, store the set of in-mempool, but not
// in-txidsToUpdate transactions, so that we don't have to recalculate
// descendants when we come across a previously seen entry.
cacheMap mapMemPoolDescendantsToUpdate;
// Use a set for lookups into txidsToUpdate (these entries are already
// accounted for in the state of their ancestors)
std::set<TxId> setAlreadyIncluded(txidsToUpdate.begin(),
txidsToUpdate.end());
// Iterate in reverse, so that whenever we are looking at at a transaction
// we are sure that all in-mempool descendants have already been processed.
// This maximizes the benefit of the descendant cache and guarantees that
// setMemPoolChildren will be updated, an assumption made in
// UpdateForDescendants.
for (const TxId &txid : boost::adaptors::reverse(txidsToUpdate)) {
// we cache the in-mempool children to avoid duplicate updates
setEntries setChildren;
// calculate children from mapNextTx
txiter it = mapTx.find(txid);
if (it == mapTx.end()) {
continue;
}
auto iter = mapNextTx.lower_bound(COutPoint(txid, 0));
// First calculate the children, and update setMemPoolChildren to
// include them, and update their setMemPoolParents to include this tx.
for (; iter != mapNextTx.end() && iter->first->GetTxId() == txid;
++iter) {
const TxId &childTxId = iter->second->GetId();
txiter childIter = mapTx.find(childTxId);
assert(childIter != mapTx.end());
// We can skip updating entries we've encountered before or that are
// in the block (which are already accounted for).
if (setChildren.insert(childIter).second &&
!setAlreadyIncluded.count(childTxId)) {
UpdateChild(it, childIter, true);
UpdateParent(childIter, it, true);
}
}
UpdateForDescendants(it, mapMemPoolDescendantsToUpdate,
setAlreadyIncluded);
}
}
bool CTxMemPool::CalculateMemPoolAncestors(
const CTxMemPoolEntry &entry, setEntries &setAncestors,
uint64_t limitAncestorCount, uint64_t limitAncestorSize,
uint64_t limitDescendantCount, uint64_t limitDescendantSize,
std::string &errString, bool fSearchForParents /* = true */) const {
LOCK(cs);
setEntries parentHashes;
const CTransaction &tx = entry.GetTx();
if (fSearchForParents) {
// Get parents of this transaction that are in the mempool
// GetMemPoolParents() is only valid for entries in the mempool, so we
// iterate mapTx to find parents.
for (const CTxIn &in : tx.vin) {
txiter piter = mapTx.find(in.prevout.GetTxId());
if (piter == mapTx.end()) {
continue;
}
parentHashes.insert(piter);
if (parentHashes.size() + 1 > limitAncestorCount) {
errString =
strprintf("too many unconfirmed parents [limit: %u]",
limitAncestorCount);
return false;
}
}
} else {
// If we're not searching for parents, we require this to be an entry in
// the mempool already.
txiter it = mapTx.iterator_to(entry);
parentHashes = GetMemPoolParents(it);
}
size_t totalSizeWithAncestors = entry.GetTxSize();
while (!parentHashes.empty()) {
txiter stageit = *parentHashes.begin();
setAncestors.insert(stageit);
parentHashes.erase(stageit);
totalSizeWithAncestors += stageit->GetTxSize();
if (stageit->GetSizeWithDescendants() + entry.GetTxSize() >
limitDescendantSize) {
errString = strprintf(
"exceeds descendant size limit for tx %s [limit: %u]",
stageit->GetTx().GetId().ToString(), limitDescendantSize);
return false;
}
if (stageit->GetCountWithDescendants() + 1 > limitDescendantCount) {
errString = strprintf("too many descendants for tx %s [limit: %u]",
stageit->GetTx().GetId().ToString(),
limitDescendantCount);
return false;
}
if (totalSizeWithAncestors > limitAncestorSize) {
errString = strprintf("exceeds ancestor size limit [limit: %u]",
limitAncestorSize);
return false;
}
const setEntries &setMemPoolParents = GetMemPoolParents(stageit);
for (const txiter &phash : setMemPoolParents) {
// If this is a new ancestor, add it.
if (setAncestors.count(phash) == 0) {
parentHashes.insert(phash);
}
if (parentHashes.size() + setAncestors.size() + 1 >
limitAncestorCount) {
errString =
strprintf("too many unconfirmed ancestors [limit: %u]",
limitAncestorCount);
return false;
}
}
}
return true;
}
void CTxMemPool::UpdateAncestorsOf(bool add, txiter it,
setEntries &setAncestors) {
setEntries parentIters = GetMemPoolParents(it);
// add or remove this tx as a child of each parent
for (txiter piter : parentIters) {
UpdateChild(piter, it, add);
}
const int64_t updateCount = (add ? 1 : -1);
const int64_t updateSize = updateCount * it->GetTxSize();
const Amount updateFee = updateCount * it->GetModifiedFee();
for (txiter ancestorIt : setAncestors) {
mapTx.modify(ancestorIt, update_descendant_state(updateSize, updateFee,
updateCount));
}
}
void CTxMemPool::UpdateEntryForAncestors(txiter it,
const setEntries &setAncestors) {
int64_t updateCount = setAncestors.size();
int64_t updateSize = 0;
Amount updateFee = Amount::zero();
int64_t updateSigOpsCount = 0;
for (txiter ancestorIt : setAncestors) {
updateSize += ancestorIt->GetTxSize();
updateFee += ancestorIt->GetModifiedFee();
updateSigOpsCount += ancestorIt->GetSigOpCount();
}
mapTx.modify(it, update_ancestor_state(updateSize, updateFee, updateCount,
updateSigOpsCount));
}
void CTxMemPool::UpdateChildrenForRemoval(txiter it) {
const setEntries &setMemPoolChildren = GetMemPoolChildren(it);
for (txiter updateIt : setMemPoolChildren) {
UpdateParent(updateIt, it, false);
}
}
void CTxMemPool::UpdateForRemoveFromMempool(const setEntries &entriesToRemove,
bool updateDescendants) {
// For each entry, walk back all ancestors and decrement size associated
// with this transaction.
const uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
if (updateDescendants) {
// updateDescendants should be true whenever we're not recursively
// removing a tx and all its descendants, eg when a transaction is
// confirmed in a block. Here we only update statistics and not data in
// mapLinks (which we need to preserve until we're finished with all
// operations that need to traverse the mempool).
for (txiter removeIt : entriesToRemove) {
setEntries setDescendants;
CalculateDescendants(removeIt, setDescendants);
setDescendants.erase(removeIt); // don't update state for self
int64_t modifySize = -((int64_t)removeIt->GetTxSize());
Amount modifyFee = -1 * removeIt->GetModifiedFee();
int modifySigOps = -removeIt->GetSigOpCount();
for (txiter dit : setDescendants) {
mapTx.modify(dit, update_ancestor_state(modifySize, modifyFee,
-1, modifySigOps));
}
}
}
for (txiter removeIt : entriesToRemove) {
setEntries setAncestors;
const CTxMemPoolEntry &entry = *removeIt;
std::string dummy;
// Since this is a tx that is already in the mempool, we can call CMPA
// with fSearchForParents = false. If the mempool is in a consistent
// state, then using true or false should both be correct, though false
// should be a bit faster.
// However, if we happen to be in the middle of processing a reorg, then
// the mempool can be in an inconsistent state. In this case, the set of
// ancestors reachable via mapLinks will be the same as the set of
// ancestors whose packages include this transaction, because when we
// add a new transaction to the mempool in addUnchecked(), we assume it
// has no children, and in the case of a reorg where that assumption is
// false, the in-mempool children aren't linked to the in-block tx's
// until UpdateTransactionsFromBlock() is called. So if we're being
// called during a reorg, ie before UpdateTransactionsFromBlock() has
// been called, then mapLinks[] will differ from the set of mempool
// parents we'd calculate by searching, and it's important that we use
// the mapLinks[] notion of ancestor transactions as the set of things
// to update for removal.
CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit,
nNoLimit, nNoLimit, dummy, false);
// Note that UpdateAncestorsOf severs the child links that point to
// removeIt in the entries for the parents of removeIt.
UpdateAncestorsOf(false, removeIt, setAncestors);
}
// After updating all the ancestor sizes, we can now sever the link between
// each transaction being removed and any mempool children (ie, update
// setMemPoolParents for each direct child of a transaction being removed).
for (txiter removeIt : entriesToRemove) {
UpdateChildrenForRemoval(removeIt);
}
}
void CTxMemPoolEntry::UpdateDescendantState(int64_t modifySize,
Amount modifyFee,
int64_t modifyCount) {
nSizeWithDescendants += modifySize;
assert(int64_t(nSizeWithDescendants) > 0);
nModFeesWithDescendants += modifyFee;
nCountWithDescendants += modifyCount;
assert(int64_t(nCountWithDescendants) > 0);
}
void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, Amount modifyFee,
int64_t modifyCount,
int modifySigOps) {
nSizeWithAncestors += modifySize;
assert(int64_t(nSizeWithAncestors) > 0);
nModFeesWithAncestors += modifyFee;
nCountWithAncestors += modifyCount;
assert(int64_t(nCountWithAncestors) > 0);
nSigOpCountWithAncestors += modifySigOps;
assert(int(nSigOpCountWithAncestors) >= 0);
}
CTxMemPool::CTxMemPool() : nTransactionsUpdated(0) {
// lock free clear
_clear();
// Sanity checks off by default for performance, because otherwise accepting
// transactions becomes O(N^2) where N is the number of transactions in the
// pool
nCheckFrequency = 0;
minerPolicyEstimator = new CBlockPolicyEstimator();
}
CTxMemPool::~CTxMemPool() {
delete minerPolicyEstimator;
}
bool CTxMemPool::isSpent(const COutPoint &outpoint) {
LOCK(cs);
return mapNextTx.count(outpoint);
}
unsigned int CTxMemPool::GetTransactionsUpdated() const {
LOCK(cs);
return nTransactionsUpdated;
}
void CTxMemPool::AddTransactionsUpdated(unsigned int n) {
LOCK(cs);
nTransactionsUpdated += n;
}
bool CTxMemPool::addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry,
setEntries &setAncestors, bool validFeeEstimate) {
NotifyEntryAdded(entry.GetSharedTx());
// Add to memory pool without checking anything.
// Used by AcceptToMemoryPool(), which DOES do all the appropriate checks.
LOCK(cs);
indexed_transaction_set::iterator newit = mapTx.insert(entry).first;
mapLinks.insert(make_pair(newit, TxLinks()));
// Update transaction for any feeDelta created by PrioritiseTransaction
// TODO: refactor so that the fee delta is calculated before inserting into
// mapTx.
std::map<uint256, TXModifier>::const_iterator pos = mapDeltas.find(hash);
if (pos != mapDeltas.end()) {
const TXModifier &deltas = pos->second;
if (deltas.second != Amount::zero()) {
mapTx.modify(newit, update_fee_delta(deltas.second));
}
}
// Update cachedInnerUsage to include contained transaction's usage.
// (When we update the entry for in-mempool parents, memory usage will be
// further updated.)
cachedInnerUsage += entry.DynamicMemoryUsage();
const CTransaction &tx = newit->GetTx();
std::set<uint256> setParentTransactions;
for (const CTxIn &in : tx.vin) {
mapNextTx.insert(std::make_pair(&in.prevout, &tx));
setParentTransactions.insert(in.prevout.GetTxId());
}
// Don't bother worrying about child transactions of this one. Normal case
// of a new transaction arriving is that there can't be any children,
// because such children would be orphans. An exception to that is if a
// transaction enters that used to be in a block. In that case, our
// disconnect block logic will call UpdateTransactionsFromBlock to clean up
// the mess we're leaving here.
// Update ancestors with information about this tx
for (const uint256 &phash : setParentTransactions) {
txiter pit = mapTx.find(phash);
if (pit != mapTx.end()) {
UpdateParent(newit, pit, true);
}
}
UpdateAncestorsOf(true, newit, setAncestors);
UpdateEntryForAncestors(newit, setAncestors);
nTransactionsUpdated++;
totalTxSize += entry.GetTxSize();
minerPolicyEstimator->processTransaction(entry, validFeeEstimate);
vTxHashes.emplace_back(tx.GetHash(), newit);
newit->vTxHashesIdx = vTxHashes.size() - 1;
return true;
}
void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason) {
NotifyEntryRemoved(it->GetSharedTx(), reason);
const uint256 txid = it->GetTx().GetId();
for (const CTxIn &txin : it->GetTx().vin) {
mapNextTx.erase(txin.prevout);
}
if (vTxHashes.size() > 1) {
vTxHashes[it->vTxHashesIdx] = std::move(vTxHashes.back());
vTxHashes[it->vTxHashesIdx].second->vTxHashesIdx = it->vTxHashesIdx;
vTxHashes.pop_back();
if (vTxHashes.size() * 2 < vTxHashes.capacity()) {
vTxHashes.shrink_to_fit();
}
} else {
vTxHashes.clear();
}
totalTxSize -= it->GetTxSize();
cachedInnerUsage -= it->DynamicMemoryUsage();
cachedInnerUsage -= memusage::DynamicUsage(mapLinks[it].parents) +
memusage::DynamicUsage(mapLinks[it].children);
mapLinks.erase(it);
mapTx.erase(it);
nTransactionsUpdated++;
minerPolicyEstimator->removeTx(txid);
}
// Calculates descendants of entry that are not already in setDescendants, and
// adds to setDescendants. Assumes entryit is already a tx in the mempool and
// setMemPoolChildren is correct for tx and all descendants. Also assumes that
// if an entry is in setDescendants already, then all in-mempool descendants of
// it are already in setDescendants as well, so that we can save time by not
// iterating over those entries.
void CTxMemPool::CalculateDescendants(txiter entryit,
setEntries &setDescendants) {
setEntries stage;
if (setDescendants.count(entryit) == 0) {
stage.insert(entryit);
}
// Traverse down the children of entry, only adding children that are not
// accounted for in setDescendants already (because those children have
// either already been walked, or will be walked in this iteration).
while (!stage.empty()) {
txiter it = *stage.begin();
setDescendants.insert(it);
stage.erase(it);
const setEntries &setChildren = GetMemPoolChildren(it);
for (const txiter &childiter : setChildren) {
if (!setDescendants.count(childiter)) {
stage.insert(childiter);
}
}
}
}
void CTxMemPool::removeRecursive(const CTransaction &origTx,
MemPoolRemovalReason reason) {
// Remove transaction from memory pool.
LOCK(cs);
setEntries txToRemove;
txiter origit = mapTx.find(origTx.GetId());
if (origit != mapTx.end()) {
txToRemove.insert(origit);
} else {
// When recursively removing but origTx isn't in the mempool be sure to
// remove any children that are in the pool. This can happen during
// chain re-orgs if origTx isn't re-accepted into the mempool for any
// reason.
for (size_t i = 0; i < origTx.vout.size(); i++) {
auto it = mapNextTx.find(COutPoint(origTx.GetId(), i));
if (it == mapNextTx.end()) {
continue;
}
txiter nextit = mapTx.find(it->second->GetId());
assert(nextit != mapTx.end());
txToRemove.insert(nextit);
}
}
setEntries setAllRemoves;
for (txiter it : txToRemove) {
CalculateDescendants(it, setAllRemoves);
}
RemoveStaged(setAllRemoves, false, reason);
}
void CTxMemPool::removeForReorg(const Config &config,
const CCoinsViewCache *pcoins,
unsigned int nMemPoolHeight, int flags) {
// Remove transactions spending a coinbase which are now immature and
// no-longer-final transactions.
LOCK(cs);
setEntries txToRemove;
for (indexed_transaction_set::const_iterator it = mapTx.begin();
it != mapTx.end(); it++) {
const CTransaction &tx = it->GetTx();
LockPoints lp = it->GetLockPoints();
bool validLP = TestLockPointValidity(&lp);
CValidationState state;
if (!ContextualCheckTransactionForCurrentBlock(config, tx, state,
flags) ||
!CheckSequenceLocks(tx, flags, &lp, validLP)) {
// Note if CheckSequenceLocks fails the LockPoints may still be
// invalid. So it's critical that we remove the tx and not depend on
// the LockPoints.
txToRemove.insert(it);
} else if (it->GetSpendsCoinbase()) {
for (const CTxIn &txin : tx.vin) {
indexed_transaction_set::const_iterator it2 =
mapTx.find(txin.prevout.GetTxId());
if (it2 != mapTx.end()) {
continue;
}
const Coin &coin = pcoins->AccessCoin(txin.prevout);
if (nCheckFrequency != 0) {
assert(!coin.IsSpent());
}
if (coin.IsSpent() ||
(coin.IsCoinBase() &&
int64_t(nMemPoolHeight) - coin.GetHeight() <
COINBASE_MATURITY)) {
txToRemove.insert(it);
break;
}
}
}
if (!validLP) {
mapTx.modify(it, update_lock_points(lp));
}
}
setEntries setAllRemoves;
for (txiter it : txToRemove) {
CalculateDescendants(it, setAllRemoves);
}
RemoveStaged(setAllRemoves, false, MemPoolRemovalReason::REORG);
}
void CTxMemPool::removeConflicts(const CTransaction &tx) {
// Remove transactions which depend on inputs of tx, recursively
LOCK(cs);
for (const CTxIn &txin : tx.vin) {
auto it = mapNextTx.find(txin.prevout);
if (it != mapNextTx.end()) {
const CTransaction &txConflict = *it->second;
if (txConflict != tx) {
ClearPrioritisation(txConflict.GetId());
removeRecursive(txConflict, MemPoolRemovalReason::CONFLICT);
}
}
}
}
/**
* Called when a block is connected. Removes from mempool and updates the miner
* fee estimator.
*/
void CTxMemPool::removeForBlock(const std::vector<CTransactionRef> &vtx,
unsigned int nBlockHeight) {
LOCK(cs);
DisconnectedBlockTransactions disconnectpool;
disconnectpool.addForBlock(vtx);
std::vector<const CTxMemPoolEntry *> entries;
for (const CTransactionRef &tx : boost::adaptors::reverse(
disconnectpool.GetQueuedTx().get<insertion_order>())) {
uint256 txid = tx->GetId();
indexed_transaction_set::iterator i = mapTx.find(txid);
if (i != mapTx.end()) {
entries.push_back(&*i);
}
}
// Before the txs in the new block have been removed from the mempool,
// update policy estimates
minerPolicyEstimator->processBlock(nBlockHeight, entries);
for (const CTransactionRef &tx : boost::adaptors::reverse(
disconnectpool.GetQueuedTx().get<insertion_order>())) {
txiter it = mapTx.find(tx->GetId());
if (it != mapTx.end()) {
setEntries stage;
stage.insert(it);
RemoveStaged(stage, true, MemPoolRemovalReason::BLOCK);
}
removeConflicts(*tx);
ClearPrioritisation(tx->GetId());
}
disconnectpool.clear();
lastRollingFeeUpdate = GetTime();
blockSinceLastRollingFeeBump = true;
}
void CTxMemPool::_clear() {
mapLinks.clear();
mapTx.clear();
mapNextTx.clear();
vTxHashes.clear();
totalTxSize = 0;
cachedInnerUsage = 0;
lastRollingFeeUpdate = GetTime();
blockSinceLastRollingFeeBump = false;
rollingMinimumFeeRate = 0;
++nTransactionsUpdated;
}
void CTxMemPool::clear() {
LOCK(cs);
_clear();
}
void CTxMemPool::check(const CCoinsViewCache *pcoins) const {
if (nCheckFrequency == 0) {
return;
}
if (GetRand(std::numeric_limits<uint32_t>::max()) >= nCheckFrequency) {
return;
}
LogPrint(BCLog::MEMPOOL,
"Checking mempool with %u transactions and %u inputs\n",
(unsigned int)mapTx.size(), (unsigned int)mapNextTx.size());
uint64_t checkTotal = 0;
uint64_t innerUsage = 0;
CCoinsViewCache mempoolDuplicate(const_cast<CCoinsViewCache *>(pcoins));
const int64_t nSpendHeight = GetSpendHeight(mempoolDuplicate);
LOCK(cs);
std::list<const CTxMemPoolEntry *> waitingOnDependants;
for (indexed_transaction_set::const_iterator it = mapTx.begin();
it != mapTx.end(); it++) {
unsigned int i = 0;
checkTotal += it->GetTxSize();
innerUsage += it->DynamicMemoryUsage();
const CTransaction &tx = it->GetTx();
txlinksMap::const_iterator linksiter = mapLinks.find(it);
assert(linksiter != mapLinks.end());
const TxLinks &links = linksiter->second;
innerUsage += memusage::DynamicUsage(links.parents) +
memusage::DynamicUsage(links.children);
bool fDependsWait = false;
setEntries setParentCheck;
int64_t parentSizes = 0;
int64_t parentSigOpCount = 0;
for (const CTxIn &txin : tx.vin) {
// Check that every mempool transaction's inputs refer to available
// coins, or other mempool tx's.
indexed_transaction_set::const_iterator it2 =
mapTx.find(txin.prevout.GetTxId());
if (it2 != mapTx.end()) {
const CTransaction &tx2 = it2->GetTx();
assert(tx2.vout.size() > txin.prevout.GetN() &&
!tx2.vout[txin.prevout.GetN()].IsNull());
fDependsWait = true;
if (setParentCheck.insert(it2).second) {
parentSizes += it2->GetTxSize();
parentSigOpCount += it2->GetSigOpCount();
}
} else {
assert(pcoins->HaveCoin(txin.prevout));
}
// Check whether its inputs are marked in mapNextTx.
auto it3 = mapNextTx.find(txin.prevout);
assert(it3 != mapNextTx.end());
assert(it3->first == &txin.prevout);
assert(it3->second == &tx);
i++;
}
assert(setParentCheck == GetMemPoolParents(it));
// Verify ancestor state is correct.
setEntries setAncestors;
uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
std::string dummy;
CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit,
nNoLimit, nNoLimit, dummy);
uint64_t nCountCheck = setAncestors.size() + 1;
uint64_t nSizeCheck = it->GetTxSize();
Amount nFeesCheck = it->GetModifiedFee();
int64_t nSigOpCheck = it->GetSigOpCount();
for (txiter ancestorIt : setAncestors) {
nSizeCheck += ancestorIt->GetTxSize();
nFeesCheck += ancestorIt->GetModifiedFee();
nSigOpCheck += ancestorIt->GetSigOpCount();
}
assert(it->GetCountWithAncestors() == nCountCheck);
assert(it->GetSizeWithAncestors() == nSizeCheck);
assert(it->GetSigOpCountWithAncestors() == nSigOpCheck);
assert(it->GetModFeesWithAncestors() == nFeesCheck);
// Check children against mapNextTx
CTxMemPool::setEntries setChildrenCheck;
auto iter = mapNextTx.lower_bound(COutPoint(it->GetTx().GetId(), 0));
int64_t childSizes = 0;
for (; iter != mapNextTx.end() &&
iter->first->GetTxId() == it->GetTx().GetId();
++iter) {
txiter childit = mapTx.find(iter->second->GetId());
// mapNextTx points to in-mempool transactions
assert(childit != mapTx.end());
if (setChildrenCheck.insert(childit).second) {
childSizes += childit->GetTxSize();
}
}
assert(setChildrenCheck == GetMemPoolChildren(it));
// Also check to make sure size is greater than sum with immediate
// children. Just a sanity check, not definitive that this calc is
// correct...
assert(it->GetSizeWithDescendants() >= childSizes + it->GetTxSize());
if (fDependsWait) {
waitingOnDependants.push_back(&(*it));
} else {
CValidationState state;
bool fCheckResult = tx.IsCoinBase() ||
Consensus::CheckTxInputs(
tx, state, mempoolDuplicate, nSpendHeight);
assert(fCheckResult);
UpdateCoins(mempoolDuplicate, tx, 1000000);
}
}
unsigned int stepsSinceLastRemove = 0;
while (!waitingOnDependants.empty()) {
const CTxMemPoolEntry *entry = waitingOnDependants.front();
waitingOnDependants.pop_front();
CValidationState state;
if (!mempoolDuplicate.HaveInputs(entry->GetTx())) {
waitingOnDependants.push_back(entry);
stepsSinceLastRemove++;
assert(stepsSinceLastRemove < waitingOnDependants.size());
} else {
bool fCheckResult =
entry->GetTx().IsCoinBase() ||
Consensus::CheckTxInputs(entry->GetTx(), state,
mempoolDuplicate, nSpendHeight);
assert(fCheckResult);
UpdateCoins(mempoolDuplicate, entry->GetTx(), 1000000);
stepsSinceLastRemove = 0;
}
}
for (auto it = mapNextTx.cbegin(); it != mapNextTx.cend(); it++) {
uint256 txid = it->second->GetId();
indexed_transaction_set::const_iterator it2 = mapTx.find(txid);
const CTransaction &tx = it2->GetTx();
assert(it2 != mapTx.end());
assert(&tx == it->second);
}
assert(totalTxSize == checkTotal);
assert(innerUsage == cachedInnerUsage);
}
bool CTxMemPool::CompareDepthAndScore(const uint256 &hasha,
const uint256 &hashb) {
LOCK(cs);
indexed_transaction_set::const_iterator i = mapTx.find(hasha);
if (i == mapTx.end()) {
return false;
}
indexed_transaction_set::const_iterator j = mapTx.find(hashb);
if (j == mapTx.end()) {
return true;
}
uint64_t counta = i->GetCountWithAncestors();
uint64_t countb = j->GetCountWithAncestors();
if (counta == countb) {
return CompareTxMemPoolEntryByScore()(*i, *j);
}
return counta < countb;
}
namespace {
class DepthAndScoreComparator {
public:
bool
operator()(const CTxMemPool::indexed_transaction_set::const_iterator &a,
const CTxMemPool::indexed_transaction_set::const_iterator &b) {
uint64_t counta = a->GetCountWithAncestors();
uint64_t countb = b->GetCountWithAncestors();
if (counta == countb) {
return CompareTxMemPoolEntryByScore()(*a, *b);
}
return counta < countb;
}
};
} // namespace
std::vector<CTxMemPool::indexed_transaction_set::const_iterator>
CTxMemPool::GetSortedDepthAndScore() const {
std::vector<indexed_transaction_set::const_iterator> iters;
AssertLockHeld(cs);
iters.reserve(mapTx.size());
for (indexed_transaction_set::iterator mi = mapTx.begin();
mi != mapTx.end(); ++mi) {
iters.push_back(mi);
}
std::sort(iters.begin(), iters.end(), DepthAndScoreComparator());
return iters;
}
void CTxMemPool::queryHashes(std::vector<uint256> &vtxid) {
LOCK(cs);
auto iters = GetSortedDepthAndScore();
vtxid.clear();
vtxid.reserve(mapTx.size());
for (auto it : iters) {
vtxid.push_back(it->GetTx().GetId());
}
}
static TxMempoolInfo
GetInfo(CTxMemPool::indexed_transaction_set::const_iterator it) {
return TxMempoolInfo{it->GetSharedTx(), it->GetTime(),
CFeeRate(it->GetFee(), it->GetTxSize()),
it->GetModifiedFee() - it->GetFee()};
}
std::vector<TxMempoolInfo> CTxMemPool::infoAll() const {
LOCK(cs);
auto iters = GetSortedDepthAndScore();
std::vector<TxMempoolInfo> ret;
ret.reserve(mapTx.size());
for (auto it : iters) {
ret.push_back(GetInfo(it));
}
return ret;
}
CTransactionRef CTxMemPool::get(const uint256 &txid) const {
LOCK(cs);
indexed_transaction_set::const_iterator i = mapTx.find(txid);
if (i == mapTx.end()) {
return nullptr;
}
return i->GetSharedTx();
}
TxMempoolInfo CTxMemPool::info(const uint256 &txid) const {
LOCK(cs);
indexed_transaction_set::const_iterator i = mapTx.find(txid);
if (i == mapTx.end()) {
return TxMempoolInfo();
}
return GetInfo(i);
}
CFeeRate CTxMemPool::estimateFee(int nBlocks) const {
LOCK(cs);
return minerPolicyEstimator->estimateFee(nBlocks);
}
CFeeRate CTxMemPool::estimateSmartFee(int nBlocks,
int *answerFoundAtBlocks) const {
LOCK(cs);
return minerPolicyEstimator->estimateSmartFee(nBlocks, answerFoundAtBlocks,
*this);
}
bool CTxMemPool::WriteFeeEstimates(CAutoFile &fileout) const {
try {
LOCK(cs);
// version required to read: 0.13.99 or later
fileout << 139900;
// version that wrote the file
fileout << CLIENT_VERSION;
minerPolicyEstimator->Write(fileout);
} catch (const std::exception &) {
LogPrintf("CTxMemPool::WriteFeeEstimates(): unable to write policy "
"estimator data (non-fatal)\n");
return false;
}
return true;
}
bool CTxMemPool::ReadFeeEstimates(CAutoFile &filein) {
try {
int nVersionRequired, nVersionThatWrote;
filein >> nVersionRequired >> nVersionThatWrote;
if (nVersionRequired > CLIENT_VERSION) {
return error("CTxMemPool::ReadFeeEstimates(): up-version (%d) fee "
"estimate file",
nVersionRequired);
}
LOCK(cs);
minerPolicyEstimator->Read(filein, nVersionThatWrote);
} catch (const std::exception &) {
LogPrintf("CTxMemPool::ReadFeeEstimates(): unable to read policy "
"estimator data (non-fatal)\n");
return false;
}
return true;
}
void CTxMemPool::PrioritiseTransaction(const uint256 hash,
const std::string strHash,
double dPriorityDelta,
const Amount nFeeDelta) {
{
LOCK(cs);
TXModifier &deltas = mapDeltas[hash];
deltas.first += dPriorityDelta;
deltas.second += nFeeDelta;
txiter it = mapTx.find(hash);
if (it != mapTx.end()) {
mapTx.modify(it, update_fee_delta(deltas.second));
// Now update all ancestors' modified fees with descendants
setEntries setAncestors;
uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
std::string dummy;
CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit,
nNoLimit, nNoLimit, dummy, false);
for (txiter ancestorIt : setAncestors) {
mapTx.modify(ancestorIt,
update_descendant_state(0, nFeeDelta, 0));
}
// Now update all descendants' modified fees with ancestors
setEntries setDescendants;
CalculateDescendants(it, setDescendants);
setDescendants.erase(it);
for (txiter descendantIt : setDescendants) {
mapTx.modify(descendantIt,
update_ancestor_state(0, nFeeDelta, 0, 0));
}
}
}
LogPrintf("PrioritiseTransaction: %s priority += %f, fee += %d\n", strHash,
dPriorityDelta, FormatMoney(nFeeDelta));
}
void CTxMemPool::ApplyDeltas(const uint256 hash, double &dPriorityDelta,
Amount &nFeeDelta) const {
LOCK(cs);
std::map<uint256, TXModifier>::const_iterator pos = mapDeltas.find(hash);
if (pos == mapDeltas.end()) {
return;
}
const TXModifier &deltas = pos->second;
dPriorityDelta += deltas.first;
nFeeDelta += deltas.second;
}
void CTxMemPool::ClearPrioritisation(const uint256 hash) {
LOCK(cs);
mapDeltas.erase(hash);
}
bool CTxMemPool::HasNoInputsOf(const CTransaction &tx) const {
for (const CTxIn &in : tx.vin) {
if (exists(in.prevout.GetTxId())) {
return false;
}
}
return true;
}
CCoinsViewMemPool::CCoinsViewMemPool(CCoinsView *baseIn,
const CTxMemPool &mempoolIn)
: CCoinsViewBacked(baseIn), mempool(mempoolIn) {}
bool CCoinsViewMemPool::GetCoin(const COutPoint &outpoint, Coin &coin) const {
// If an entry in the mempool exists, always return that one, as it's
// guaranteed to never conflict with the underlying cache, and it cannot
// have pruned entries (as it contains full) transactions. First checking
// the underlying cache risks returning a pruned entry instead.
CTransactionRef ptx = mempool.get(outpoint.GetTxId());
if (ptx) {
if (outpoint.GetN() < ptx->vout.size()) {
coin = Coin(ptx->vout[outpoint.GetN()], MEMPOOL_HEIGHT, false);
return true;
}
return false;
}
return base->GetCoin(outpoint, coin) && !coin.IsSpent();
}
bool CCoinsViewMemPool::HaveCoin(const COutPoint &outpoint) const {
return mempool.exists(outpoint) || base->HaveCoin(outpoint);
}
size_t CTxMemPool::DynamicMemoryUsage() const {
LOCK(cs);
// Estimate the overhead of mapTx to be 15 pointers + an allocation, as no
// exact formula for boost::multi_index_contained is implemented.
return memusage::MallocUsage(sizeof(CTxMemPoolEntry) +
15 * sizeof(void *)) *
mapTx.size() +
memusage::DynamicUsage(mapNextTx) +
memusage::DynamicUsage(mapDeltas) +
memusage::DynamicUsage(mapLinks) +
memusage::DynamicUsage(vTxHashes) + cachedInnerUsage;
}
void CTxMemPool::RemoveStaged(setEntries &stage, bool updateDescendants,
MemPoolRemovalReason reason) {
AssertLockHeld(cs);
UpdateForRemoveFromMempool(stage, updateDescendants);
for (const txiter &it : stage) {
removeUnchecked(it, reason);
}
}
int CTxMemPool::Expire(int64_t time) {
LOCK(cs);
indexed_transaction_set::index<entry_time>::type::iterator it =
mapTx.get<entry_time>().begin();
setEntries toremove;
while (it != mapTx.get<entry_time>().end() && it->GetTime() < time) {
toremove.insert(mapTx.project<0>(it));
it++;
}
setEntries stage;
for (txiter removeit : toremove) {
CalculateDescendants(removeit, stage);
}
RemoveStaged(stage, false, MemPoolRemovalReason::EXPIRY);
return stage.size();
}
void CTxMemPool::LimitSize(size_t limit, unsigned long age) {
int expired = Expire(GetTime() - age);
if (expired != 0) {
LogPrint(BCLog::MEMPOOL,
"Expired %i transactions from the memory pool\n", expired);
}
std::vector<COutPoint> vNoSpendsRemaining;
TrimToSize(limit, &vNoSpendsRemaining);
for (const COutPoint &removed : vNoSpendsRemaining) {
pcoinsTip->Uncache(removed);
}
}
bool CTxMemPool::addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry,
bool validFeeEstimate) {
LOCK(cs);
setEntries setAncestors;
uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
std::string dummy;
CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit,
nNoLimit, dummy);
return addUnchecked(hash, entry, setAncestors, validFeeEstimate);
}
void CTxMemPool::UpdateChild(txiter entry, txiter child, bool add) {
setEntries s;
if (add && mapLinks[entry].children.insert(child).second) {
cachedInnerUsage += memusage::IncrementalDynamicUsage(s);
} else if (!add && mapLinks[entry].children.erase(child)) {
cachedInnerUsage -= memusage::IncrementalDynamicUsage(s);
}
}
void CTxMemPool::UpdateParent(txiter entry, txiter parent, bool add) {
setEntries s;
if (add && mapLinks[entry].parents.insert(parent).second) {
cachedInnerUsage += memusage::IncrementalDynamicUsage(s);
} else if (!add && mapLinks[entry].parents.erase(parent)) {
cachedInnerUsage -= memusage::IncrementalDynamicUsage(s);
}
}
const CTxMemPool::setEntries &
CTxMemPool::GetMemPoolParents(txiter entry) const {
assert(entry != mapTx.end());
txlinksMap::const_iterator it = mapLinks.find(entry);
assert(it != mapLinks.end());
return it->second.parents;
}
const CTxMemPool::setEntries &
CTxMemPool::GetMemPoolChildren(txiter entry) const {
assert(entry != mapTx.end());
txlinksMap::const_iterator it = mapLinks.find(entry);
assert(it != mapLinks.end());
return it->second.children;
}
CFeeRate CTxMemPool::GetMinFee(size_t sizelimit) const {
LOCK(cs);
if (!blockSinceLastRollingFeeBump || rollingMinimumFeeRate == 0) {
- return CFeeRate(int64_t(rollingMinimumFeeRate) * SATOSHI);
+ return CFeeRate(int64_t(ceill(rollingMinimumFeeRate)) * SATOSHI);
}
int64_t time = GetTime();
if (time > lastRollingFeeUpdate + 10) {
double halflife = ROLLING_FEE_HALFLIFE;
if (DynamicMemoryUsage() < sizelimit / 4) {
halflife /= 4;
} else if (DynamicMemoryUsage() < sizelimit / 2) {
halflife /= 2;
}
rollingMinimumFeeRate =
rollingMinimumFeeRate /
pow(2.0, (time - lastRollingFeeUpdate) / halflife);
lastRollingFeeUpdate = time;
}
- return CFeeRate(int64_t(rollingMinimumFeeRate) * SATOSHI);
+ return CFeeRate(int64_t(ceill(rollingMinimumFeeRate)) * SATOSHI);
}
void CTxMemPool::trackPackageRemoved(const CFeeRate &rate) {
AssertLockHeld(cs);
if ((rate.GetFeePerK() / SATOSHI) > rollingMinimumFeeRate) {
rollingMinimumFeeRate = rate.GetFeePerK() / SATOSHI;
blockSinceLastRollingFeeBump = false;
}
}
void CTxMemPool::TrimToSize(size_t sizelimit,
std::vector<COutPoint> *pvNoSpendsRemaining) {
LOCK(cs);
unsigned nTxnRemoved = 0;
CFeeRate maxFeeRateRemoved(Amount::zero());
while (!mapTx.empty() && DynamicMemoryUsage() > sizelimit) {
indexed_transaction_set::index<descendant_score>::type::iterator it =
mapTx.get<descendant_score>().begin();
// We set the new mempool min fee to the feerate of the removed set,
// plus the "minimum reasonable fee rate" (ie some value under which we
// consider txn to have 0 fee). This way, we don't allow txn to enter
// mempool with feerate equal to txn which were removed with no block in
// between.
CFeeRate removed(it->GetModFeesWithDescendants(),
it->GetSizeWithDescendants());
removed += MEMPOOL_FULL_FEE_INCREMENT;
trackPackageRemoved(removed);
maxFeeRateRemoved = std::max(maxFeeRateRemoved, removed);
setEntries stage;
CalculateDescendants(mapTx.project<0>(it), stage);
nTxnRemoved += stage.size();
std::vector<CTransaction> txn;
if (pvNoSpendsRemaining) {
txn.reserve(stage.size());
for (txiter iter : stage) {
txn.push_back(iter->GetTx());
}
}
RemoveStaged(stage, false, MemPoolRemovalReason::SIZELIMIT);
if (pvNoSpendsRemaining) {
for (const CTransaction &tx : txn) {
for (const CTxIn &txin : tx.vin) {
if (exists(txin.prevout.GetTxId())) {
continue;
}
if (!mapNextTx.count(txin.prevout)) {
pvNoSpendsRemaining->push_back(txin.prevout);
}
}
}
}
}
if (maxFeeRateRemoved > CFeeRate(Amount::zero())) {
LogPrint(BCLog::MEMPOOL,
"Removed %u txn, rolling minimum fee bumped to %s\n",
nTxnRemoved, maxFeeRateRemoved.ToString());
}
}
bool CTxMemPool::TransactionWithinChainLimit(const uint256 &txid,
size_t chainLimit) const {
LOCK(cs);
auto it = mapTx.find(txid);
return it == mapTx.end() || (it->GetCountWithAncestors() < chainLimit &&
it->GetCountWithDescendants() < chainLimit);
}
SaltedTxidHasher::SaltedTxidHasher()
: k0(GetRand(std::numeric_limits<uint64_t>::max())),
k1(GetRand(std::numeric_limits<uint64_t>::max())) {}
/** Maximum bytes for transactions to store for processing during reorg */
static const size_t MAX_DISCONNECTED_TX_POOL_SIZE = 20 * DEFAULT_MAX_BLOCK_SIZE;
void DisconnectedBlockTransactions::addForBlock(
const std::vector<CTransactionRef> &vtx) {
for (const auto &tx : vtx) {
// If we already added it, just skip.
auto it = queuedTx.find(tx->GetId());
if (it != queuedTx.end()) {
continue;
}
// Insert the transaction into the pool.
addTransaction(tx);
// Fill in the set of parents.
std::unordered_set<TxId, SaltedTxidHasher> parents;
for (const CTxIn &in : tx->vin) {
parents.insert(in.prevout.GetTxId());
}
// In order to make sure we keep things in topological order, we check
// if we already know of the parent of the current transaction. If so,
// we remove them from the set and then add them back.
while (parents.size() > 0) {
std::unordered_set<TxId, SaltedTxidHasher> worklist(
std::move(parents));
for (const TxId &txid : worklist) {
// If we do not have that txid in the set, nothing needs to be
// done.
auto pit = queuedTx.find(txid);
if (pit == queuedTx.end()) {
continue;
}
// We have parent in our set, we reinsert them at the right
// position.
const CTransactionRef ptx = *pit;
queuedTx.erase(pit);
queuedTx.insert(ptx);
// And we make sure ancestors are covered.
for (const CTxIn &in : ptx->vin) {
parents.insert(in.prevout.GetTxId());
}
}
}
}
// Keep the size under control.
while (DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE) {
// Drop the earliest entry, and remove its children from the
// mempool.
auto it = queuedTx.get<insertion_order>().begin();
mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
removeEntry(it);
}
}
void DisconnectedBlockTransactions::updateMempoolForReorg(const Config &config,
bool fAddToMempool) {
AssertLockHeld(cs_main);
std::vector<TxId> txidsUpdate;
// disconnectpool's insertion_order index sorts the entries from oldest to
// newest, but the oldest entry will be the last tx from the latest mined
// block that was disconnected.
// Iterate disconnectpool in reverse, so that we add transactions back to
// the mempool starting with the earliest transaction that had been
// previously seen in a block.
for (const CTransactionRef &tx :
boost::adaptors::reverse(queuedTx.get<insertion_order>())) {
// ignore validation errors in resurrected transactions
CValidationState stateDummy;
if (!fAddToMempool || tx->IsCoinBase() ||
!AcceptToMemoryPool(config, mempool, stateDummy, tx, false, nullptr,
true)) {
// If the transaction doesn't make it in to the mempool, remove any
// transactions that depend on it (which would now be orphans).
mempool.removeRecursive(*tx, MemPoolRemovalReason::REORG);
} else if (mempool.exists(tx->GetId())) {
txidsUpdate.push_back(tx->GetId());
}
}
queuedTx.clear();
// AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
// no in-mempool children, which is generally not true when adding
// previously-confirmed transactions back to the mempool.
// UpdateTransactionsFromBlock finds descendants of any transactions in the
// disconnectpool that were added back and cleans up the mempool state.
mempool.UpdateTransactionsFromBlock(txidsUpdate);
// We also need to remove any now-immature transactions
mempool.removeForReorg(config, pcoinsTip, chainActive.Tip()->nHeight + 1,
STANDARD_LOCKTIME_VERIFY_FLAGS);
// Re-limit mempool size, in case we added any transactions
mempool.LimitSize(
gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
}

File Metadata

Mime Type
text/x-diff
Expires
Sun, Mar 2, 08:53 (1 d, 58 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5187135
Default Alt Text
(114 KB)

Event Timeline