diff --git a/src/amount.h b/src/amount.h index 6ac698999..158e45eb4 100644 --- a/src/amount.h +++ b/src/amount.h @@ -1,177 +1,181 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_AMOUNT_H #define BITCOIN_AMOUNT_H #include "serialize.h" #include #include #include #include struct Amount { private: int64_t amount; public: - Amount() : amount(0) {} + constexpr Amount() : amount(0) {} - template Amount(T _camount) : amount(_camount) { + template constexpr Amount(T _camount) : amount(_camount) { static_assert(std::is_integral(), "Only integer types can be used as amounts"); } - Amount(const Amount &_camount) : amount(_camount.amount) {} + constexpr Amount(const Amount &_camount) : amount(_camount.amount) {} // Allow access to underlying value for non-monetary operations int64_t GetSatoshis() const { return amount; } /* * Implement standard operators */ Amount &operator+=(const Amount a) { amount += a.amount; return *this; } Amount &operator-=(const Amount a) { amount -= a.amount; return *this; } - friend bool operator<(const Amount a, const Amount b) { + friend constexpr bool operator<(const Amount a, const Amount b) { return a.amount < b.amount; } - friend bool operator==(const Amount a, const Amount b) { + friend constexpr bool operator==(const Amount a, const Amount b) { return a.amount == b.amount; } - friend bool operator>(const Amount a, const Amount b) { + friend constexpr bool operator>(const Amount a, const Amount b) { return b.amount < a.amount; } - friend bool operator!=(const Amount a, const Amount b) { + friend constexpr bool operator!=(const Amount a, const Amount b) { return !(a.amount == b.amount); } - friend bool operator<=(const Amount a, const Amount b) { + friend constexpr bool operator<=(const Amount a, const Amount b) { return !(a.amount > b.amount); } - friend bool operator>=(const Amount a, const Amount b) { + friend constexpr bool operator>=(const Amount a, const Amount b) { return !(a.amount < b.amount); } - friend Amount operator+(const Amount a, const Amount b) { + friend constexpr Amount operator+(const Amount a, const Amount b) { return Amount(a.amount + b.amount); } - friend Amount operator-(const Amount a, const Amount b) { + friend constexpr Amount operator-(const Amount a, const Amount b) { return Amount(a.amount - b.amount); } // Implemented for allowing COIN as a base unit. - friend Amount operator*(const int64_t a, const Amount b) { + friend constexpr Amount operator*(const int64_t a, const Amount b) { return Amount(a * b.amount); } - friend Amount operator*(const int a, const Amount b) { + friend constexpr Amount operator*(const int a, const Amount b) { return Amount(a * b.amount); } // DO NOT IMPLEMENT - friend Amount operator*(const double a, const Amount b) = delete; - int64_t operator/(const Amount b) const { return amount / b.amount; } - Amount operator/(const int64_t b) const { return Amount(amount / b); } - Amount operator/(const int b) const { return Amount(amount / b); } + friend constexpr Amount operator*(const double a, const Amount b) = delete; + constexpr int64_t operator/(const Amount b) const { + return amount / b.amount; + } + constexpr Amount operator/(const int64_t b) const { + return Amount(amount / b); + } + constexpr Amount operator/(const int b) const { return Amount(amount / b); } // DO NOT IMPLEMENT - Amount operator/(const double b) const = delete; + constexpr Amount operator/(const double b) const = delete; // ostream support friend std::ostream &operator<<(std::ostream &stream, const Amount &ca) { return stream << ca.amount; } std::string ToString() const; // serialization support ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(amount); } }; /** Amount in satoshis (Can be negative) */ typedef int64_t CAmount; static const Amount COIN = 100000000; static const Amount CENT = 1000000; extern const std::string CURRENCY_UNIT; /** * No amount larger than this (in satoshi) is valid. * * Note that this constant is *not* the total money supply, which in Bitcoin * currently happens to be less than 21,000,000 BCC for various reasons, but * rather a sanity check. As this sanity check is used by consensus-critical * validation code, the exact value of the MAX_MONEY constant is consensus * critical; in unusual circumstances like a(nother) overflow bug that allowed * for the creation of coins out of thin air modification could lead to a fork. */ static const Amount MAX_MONEY = 21000000 * COIN; inline bool MoneyRange(const Amount nValue) { return (nValue >= 0 && nValue <= MAX_MONEY); } /** * Fee rate in satoshis per kilobyte: Amount / kB */ class CFeeRate { private: // unit is satoshis-per-1,000-bytes Amount nSatoshisPerK; public: /** Fee rate of 0 satoshis per kB */ CFeeRate() : nSatoshisPerK(0) {} explicit CFeeRate(const Amount _nSatoshisPerK) : nSatoshisPerK(_nSatoshisPerK) {} /** * Constructor for a fee rate in satoshis per kB. The size in bytes must not * exceed (2^63 - 1) */ CFeeRate(const Amount nFeePaid, size_t nBytes); CFeeRate(const CFeeRate &other) { nSatoshisPerK = other.nSatoshisPerK; } /** * Return the fee in satoshis for the given size in bytes. */ Amount GetFee(size_t nBytes) const; /** * Return the fee in satoshis for a size of 1000 bytes */ Amount GetFeePerK() const { return GetFee(1000); } friend bool operator<(const CFeeRate &a, const CFeeRate &b) { return a.nSatoshisPerK < b.nSatoshisPerK; } friend bool operator>(const CFeeRate &a, const CFeeRate &b) { return a.nSatoshisPerK > b.nSatoshisPerK; } friend bool operator==(const CFeeRate &a, const CFeeRate &b) { return a.nSatoshisPerK == b.nSatoshisPerK; } friend bool operator<=(const CFeeRate &a, const CFeeRate &b) { return a.nSatoshisPerK <= b.nSatoshisPerK; } friend bool operator>=(const CFeeRate &a, const CFeeRate &b) { return a.nSatoshisPerK >= b.nSatoshisPerK; } CFeeRate &operator+=(const CFeeRate &a) { nSatoshisPerK += a.nSatoshisPerK; return *this; } std::string ToString() const; ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(nSatoshisPerK); } }; #endif // BITCOIN_AMOUNT_H diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp index 060971169..75a47e830 100644 --- a/src/policy/fees.cpp +++ b/src/policy/fees.cpp @@ -1,578 +1,580 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "policy/fees.h" #include "policy/policy.h" #include "amount.h" #include "primitives/transaction.h" #include "random.h" #include "streams.h" #include "txmempool.h" #include "util.h" void TxConfirmStats::Initialize(std::vector &defaultBuckets, unsigned int maxConfirms, double _decay) { decay = _decay; for (size_t i = 0; i < defaultBuckets.size(); i++) { buckets.push_back(defaultBuckets[i]); bucketMap[defaultBuckets[i]] = i; } confAvg.resize(maxConfirms); curBlockConf.resize(maxConfirms); unconfTxs.resize(maxConfirms); for (unsigned int i = 0; i < maxConfirms; i++) { confAvg[i].resize(buckets.size()); curBlockConf[i].resize(buckets.size()); unconfTxs[i].resize(buckets.size()); } oldUnconfTxs.resize(buckets.size()); curBlockTxCt.resize(buckets.size()); txCtAvg.resize(buckets.size()); curBlockVal.resize(buckets.size()); avg.resize(buckets.size()); } // Zero out the data for the current block void TxConfirmStats::ClearCurrent(unsigned int nBlockHeight) { for (size_t j = 0; j < buckets.size(); j++) { oldUnconfTxs[j] += unconfTxs[nBlockHeight % unconfTxs.size()][j]; unconfTxs[nBlockHeight % unconfTxs.size()][j] = 0; for (size_t i = 0; i < curBlockConf.size(); i++) { curBlockConf[i][j] = 0; } curBlockTxCt[j] = 0; curBlockVal[j] = 0; } } void TxConfirmStats::Record(int blocksToConfirm, double val) { // blocksToConfirm is 1-based if (blocksToConfirm < 1) { return; } unsigned int bucketindex = bucketMap.lower_bound(val)->second; for (size_t i = blocksToConfirm; i <= curBlockConf.size(); i++) { curBlockConf[i - 1][bucketindex]++; } curBlockTxCt[bucketindex]++; curBlockVal[bucketindex] += val; } void TxConfirmStats::UpdateMovingAverages() { for (unsigned int j = 0; j < buckets.size(); j++) { for (unsigned int i = 0; i < confAvg.size(); i++) { confAvg[i][j] = confAvg[i][j] * decay + curBlockConf[i][j]; } avg[j] = avg[j] * decay + curBlockVal[j]; txCtAvg[j] = txCtAvg[j] * decay + curBlockTxCt[j]; } } // returns -1 on error conditions double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal, double successBreakPoint, bool requireGreater, unsigned int nBlockHeight) { // Counters for a bucket (or range of buckets) // Number of tx's confirmed within the confTarget double nConf = 0; // Total number of tx's that were ever confirmed double totalNum = 0; // Number of tx's still in mempool for confTarget or longer int extraNum = 0; int maxbucketindex = buckets.size() - 1; // requireGreater means we are looking for the lowest feerate such that all // higher values pass, so we start at maxbucketindex (highest feerate) and // look at successively smaller buckets until we reach failure. Otherwise, // we are looking for the highest feerate such that all lower values fail, // and we go in the opposite direction. unsigned int startbucket = requireGreater ? maxbucketindex : 0; int step = requireGreater ? -1 : 1; // We'll combine buckets until we have enough samples. // The near and far variables will define the range we've combined // The best variables are the last range we saw which still had a high // enough confirmation rate to count as success. // The cur variables are the current range we're counting. unsigned int curNearBucket = startbucket; unsigned int bestNearBucket = startbucket; unsigned int curFarBucket = startbucket; unsigned int bestFarBucket = startbucket; bool foundAnswer = false; unsigned int bins = unconfTxs.size(); // Start counting from highest(default) or lowest feerate transactions for (int bucket = startbucket; bucket >= 0 && bucket <= maxbucketindex; bucket += step) { curFarBucket = bucket; nConf += confAvg[confTarget - 1][bucket]; totalNum += txCtAvg[bucket]; for (unsigned int confct = confTarget; confct < GetMaxConfirms(); confct++) { extraNum += unconfTxs[(nBlockHeight - confct) % bins][bucket]; } extraNum += oldUnconfTxs[bucket]; // If we have enough transaction data points in this range of buckets, // we can test for success (Only count the confirmed data points, so // that each confirmation count will be looking at the same amount of // data and same bucket breaks) if (totalNum >= sufficientTxVal / (1 - decay)) { double curPct = nConf / (totalNum + extraNum); // Check to see if we are no longer getting confirmed at the success // rate if (requireGreater && curPct < successBreakPoint) { break; } if (!requireGreater && curPct > successBreakPoint) { break; } // Otherwise update the cumulative stats, and the bucket variables // and reset the counters foundAnswer = true; nConf = 0; totalNum = 0; extraNum = 0; bestNearBucket = curNearBucket; bestFarBucket = curFarBucket; curNearBucket = bucket + step; } } double median = -1; double txSum = 0; // Calculate the "average" feerate of the best bucket range that met success // conditions. Find the bucket with the median transaction and then report // the average feerate from that bucket. This is a compromise between // finding the median which we can't since we don't save all tx's and // reporting the average which is less accurate unsigned int minBucket = bestNearBucket < bestFarBucket ? bestNearBucket : bestFarBucket; unsigned int maxBucket = bestNearBucket > bestFarBucket ? bestNearBucket : bestFarBucket; for (unsigned int j = minBucket; j <= maxBucket; j++) { txSum += txCtAvg[j]; } if (foundAnswer && txSum != 0) { txSum = txSum / 2; for (unsigned int j = minBucket; j <= maxBucket; j++) { if (txCtAvg[j] < txSum) { txSum -= txCtAvg[j]; } else { // we're in the right bucket median = avg[j] / txCtAvg[j]; break; } } } LogPrint("estimatefee", "%3d: For conf success %s %4.2f need feerate %s: " "%12.5g from buckets %8g - %8g Cur Bucket stats " "%6.2f%% %8.1f/(%.1f+%d mempool)\n", confTarget, requireGreater ? ">" : "<", successBreakPoint, requireGreater ? ">" : "<", median, buckets[minBucket], buckets[maxBucket], 100 * nConf / (totalNum + extraNum), nConf, totalNum, extraNum); return median; } void TxConfirmStats::Write(CAutoFile &fileout) { fileout << decay; fileout << buckets; fileout << avg; fileout << txCtAvg; fileout << confAvg; } void TxConfirmStats::Read(CAutoFile &filein) { // Read data file into temporary variables and do some very basic sanity // checking std::vector fileBuckets; std::vector fileAvg; std::vector> fileConfAvg; std::vector fileTxCtAvg; double fileDecay; size_t maxConfirms; size_t numBuckets; filein >> fileDecay; if (fileDecay <= 0 || fileDecay >= 1) { throw std::runtime_error("Corrupt estimates file. Decay must be " "between 0 and 1 (non-inclusive)"); } filein >> fileBuckets; numBuckets = fileBuckets.size(); if (numBuckets <= 1 || numBuckets > 1000) { throw std::runtime_error("Corrupt estimates file. Must have between 2 " "and 1000 feerate buckets"); } filein >> fileAvg; if (fileAvg.size() != numBuckets) { throw std::runtime_error( "Corrupt estimates file. Mismatch in feerate average bucket count"); } filein >> fileTxCtAvg; if (fileTxCtAvg.size() != numBuckets) { throw std::runtime_error( "Corrupt estimates file. Mismatch in tx count bucket count"); } filein >> fileConfAvg; maxConfirms = fileConfAvg.size(); if (maxConfirms <= 0 || maxConfirms > 6 * 24 * 7) { // one week throw std::runtime_error("Corrupt estimates file. Must maintain " "estimates for between 1 and 1008 (one week) " "confirms"); } for (unsigned int i = 0; i < maxConfirms; i++) { if (fileConfAvg[i].size() != numBuckets) { throw std::runtime_error("Corrupt estimates file. Mismatch in " "feerate conf average bucket count"); } } // Now that we've processed the entire feerate estimate data file and not // thrown any errors, we can copy it to our data structures decay = fileDecay; buckets = fileBuckets; avg = fileAvg; confAvg = fileConfAvg; txCtAvg = fileTxCtAvg; bucketMap.clear(); // Resize the current block variables which aren't stored in the data file // to match the number of confirms and buckets curBlockConf.resize(maxConfirms); for (unsigned int i = 0; i < maxConfirms; i++) { curBlockConf[i].resize(buckets.size()); } curBlockTxCt.resize(buckets.size()); curBlockVal.resize(buckets.size()); unconfTxs.resize(maxConfirms); for (unsigned int i = 0; i < maxConfirms; i++) { unconfTxs[i].resize(buckets.size()); } oldUnconfTxs.resize(buckets.size()); for (size_t i = 0; i < buckets.size(); i++) { bucketMap[buckets[i]] = i; } LogPrint( "estimatefee", "Reading estimates: %u buckets counting confirms up to %u blocks\n", numBuckets, maxConfirms); } unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val) { unsigned int bucketindex = bucketMap.lower_bound(val)->second; unsigned int blockIndex = nBlockHeight % unconfTxs.size(); unconfTxs[blockIndex][bucketindex]++; return bucketindex; } void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight, unsigned int bucketindex) { // nBestSeenHeight is not updated yet for the new block int blocksAgo = nBestSeenHeight - entryHeight; if (nBestSeenHeight == 0) { // the BlockPolicyEstimator hasn't seen any blocks yet blocksAgo = 0; } if (blocksAgo < 0) { // This can't happen because we call this with our best seen height, no // entries can have higher LogPrint("estimatefee", "Blockpolicy error, blocks ago is negative for mempool tx\n"); return; } if (blocksAgo >= (int)unconfTxs.size()) { if (oldUnconfTxs[bucketindex] > 0) { oldUnconfTxs[bucketindex]--; } else { LogPrint("estimatefee", "Blockpolicy error, mempool tx removed " "from >25 blocks,bucketIndex=%u already\n", bucketindex); } } else { unsigned int blockIndex = entryHeight % unconfTxs.size(); if (unconfTxs[blockIndex][bucketindex] > 0) { unconfTxs[blockIndex][bucketindex]--; } else { LogPrint("estimatefee", "Blockpolicy error, mempool tx removed " "from blockIndex=%u,bucketIndex=%u " "already\n", blockIndex, bucketindex); } } } // This function is called from CTxMemPool::removeUnchecked to ensure txs // removed from the mempool for any reason are no longer tracked. Txs that were // part of a block have already been removed in processBlockTx to ensure they // are never double tracked, but it is of no harm to try to remove them again. bool CBlockPolicyEstimator::removeTx(uint256 hash) { std::map::iterator pos = mapMemPoolTxs.find(hash); - if (pos != mapMemPoolTxs.end()) { - feeStats.removeTx(pos->second.blockHeight, nBestSeenHeight, - pos->second.bucketIndex); - mapMemPoolTxs.erase(hash); - return true; - } else { + if (pos == mapMemPoolTxs.end()) { return false; } + + feeStats.removeTx(pos->second.blockHeight, nBestSeenHeight, + pos->second.bucketIndex); + mapMemPoolTxs.erase(hash); + return true; } CBlockPolicyEstimator::CBlockPolicyEstimator(const CFeeRate &_minRelayFee) : nBestSeenHeight(0), trackedTxs(0), untrackedTxs(0) { - static_assert(MIN_FEERATE > 0, "Min feerate must be nonzero"); - minTrackedFee = _minRelayFee < CFeeRate(Amount(int64_t(MIN_FEERATE))) - ? CFeeRate(Amount(int64_t(MIN_FEERATE))) - : _minRelayFee; + static_assert(MIN_FEERATE > Amount(0), "Min feerate must be nonzero"); + CFeeRate minFeeRate(MIN_FEERATE); + minTrackedFee = _minRelayFee < minFeeRate ? minFeeRate : _minRelayFee; std::vector vfeelist; - for (Amount bucketBoundary = minTrackedFee.GetFeePerK(); - bucketBoundary <= MAX_FEERATE; - bucketBoundary += bucketBoundary / FEE_SPACING_FRACTION) { - vfeelist.push_back(double(bucketBoundary.GetSatoshis())); + for (double bucketBoundary = minTrackedFee.GetFeePerK().GetSatoshis(); + bucketBoundary <= double(MAX_FEERATE.GetSatoshis()); + bucketBoundary *= FEE_SPACING) { + vfeelist.push_back(bucketBoundary); } + vfeelist.push_back(double(INF_FEERATE.GetSatoshis())); feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY); } void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry &entry, bool validFeeEstimate) { uint32_t txHeight = entry.GetHeight(); uint256 txid = entry.GetTx().GetId(); if (mapMemPoolTxs.count(txid)) { LogPrint("estimatefee", "Blockpolicy error mempool tx %s already being tracked\n", txid.ToString().c_str()); return; } if (txHeight != nBestSeenHeight) { // Ignore side chains and re-orgs; assuming they are random they don't // affect the estimate. We'll potentially double count transactions in // 1-block reorgs. Ignore txs if BlockPolicyEstimator is not in sync // with chainActive.Tip(). It will be synced next time a block is // processed. return; } // Only want to be updating estimates when our blockchain is synced, // otherwise we'll miscalculate how many blocks its taking to get included. if (!validFeeEstimate) { untrackedTxs++; return; } trackedTxs++; // Feerates are stored and reported as BCC-per-kb: CFeeRate feeRate(entry.GetFee(), entry.GetTxSize()); mapMemPoolTxs[txid].blockHeight = txHeight; mapMemPoolTxs[txid].bucketIndex = feeStats.NewTx(txHeight, double(feeRate.GetFeePerK().GetSatoshis())); } bool CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry *entry) { if (!removeTx(entry->GetTx().GetId())) { // This transaction wasn't being tracked for fee estimation return false; } // How many blocks did it take for miners to include this transaction? // blocksToConfirm is 1-based, so a transaction included in the earliest // possible block has confirmation count of 1 int blocksToConfirm = nBlockHeight - entry->GetHeight(); if (blocksToConfirm <= 0) { // This can't happen because we don't process transactions from a block // with a height lower than our greatest seen height LogPrint( "estimatefee", "Blockpolicy error Transaction had negative blocksToConfirm\n"); return false; } // Feerates are stored and reported as BCC-per-kb: CFeeRate feeRate(entry->GetFee(), entry->GetTxSize()); feeStats.Record(blocksToConfirm, (double)feeRate.GetFeePerK().GetSatoshis()); return true; } void CBlockPolicyEstimator::processBlock( unsigned int nBlockHeight, std::vector &entries) { if (nBlockHeight <= nBestSeenHeight) { // Ignore side chains and re-orgs; assuming they are random they don't // affect the estimate. And if an attacker can re-org the chain at will, // then you've got much bigger problems than "attacker can influence // transaction fees." return; } // Must update nBestSeenHeight in sync with ClearCurrent so that calls to // removeTx (via processBlockTx) correctly calculate age of unconfirmed txs // to remove from tracking. nBestSeenHeight = nBlockHeight; // Clear the current block state and update unconfirmed circular buffer feeStats.ClearCurrent(nBlockHeight); unsigned int countedTxs = 0; // Repopulate the current block states for (size_t i = 0; i < entries.size(); i++) { if (processBlockTx(nBlockHeight, entries[i])) { countedTxs++; } } // Update all exponential averages with the current block state feeStats.UpdateMovingAverages(); LogPrint("estimatefee", "Blockpolicy after updating estimates for %u of %u " "txs in block, since last block %u of %u tracked, " "new mempool map size %u\n", countedTxs, entries.size(), trackedTxs, trackedTxs + untrackedTxs, mapMemPoolTxs.size()); trackedTxs = 0; untrackedTxs = 0; } CFeeRate CBlockPolicyEstimator::estimateFee(int confTarget) { // Return failure if trying to analyze a target we're not tracking // It's not possible to get reasonable estimates for confTarget of 1 if (confTarget <= 1 || (unsigned int)confTarget > feeStats.GetMaxConfirms()) { return CFeeRate(0); } double median = feeStats.EstimateMedianVal( confTarget, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight); if (median < 0) { return CFeeRate(0); } return CFeeRate(Amount(int64_t(median))); } CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool &pool) { if (answerFoundAtTarget) { *answerFoundAtTarget = confTarget; } // Return failure if trying to analyze a target we're not tracking if (confTarget <= 0 || (unsigned int)confTarget > feeStats.GetMaxConfirms()) { return CFeeRate(0); } // It's not possible to get reasonable estimates for confTarget of 1 if (confTarget == 1) { confTarget = 2; } double median = -1; while (median < 0 && (unsigned int)confTarget <= feeStats.GetMaxConfirms()) { median = feeStats.EstimateMedianVal(confTarget++, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight); } if (answerFoundAtTarget) { *answerFoundAtTarget = confTarget - 1; } // If mempool is limiting txs , return at least the min feerate from the // mempool Amount minPoolFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000) .GetFeePerK(); if (minPoolFee > 0 && minPoolFee > int64_t(median)) { return CFeeRate(minPoolFee); } if (median < 0) { return CFeeRate(0); } return CFeeRate(Amount(int64_t(median))); } double CBlockPolicyEstimator::estimatePriority(int confTarget) { return -1; } double CBlockPolicyEstimator::estimateSmartPriority(int confTarget, int *answerFoundAtTarget, const CTxMemPool &pool) { if (answerFoundAtTarget) { *answerFoundAtTarget = confTarget; } // If mempool is limiting txs, no priority txs are allowed Amount minPoolFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000) .GetFeePerK(); if (minPoolFee > 0) { return double(INF_PRIORITY.GetSatoshis()); } return -1; } void CBlockPolicyEstimator::Write(CAutoFile &fileout) { fileout << nBestSeenHeight; feeStats.Write(fileout); } void CBlockPolicyEstimator::Read(CAutoFile &filein, int nFileVersion) { int nFileBestSeenHeight; filein >> nFileBestSeenHeight; feeStats.Read(filein); nBestSeenHeight = nFileBestSeenHeight; if (nFileVersion < 139900) { TxConfirmStats priStats; priStats.Read(filein); } } FeeFilterRounder::FeeFilterRounder(const CFeeRate &minIncrementalFee) { Amount minFeeLimit = std::max(Amount(1), minIncrementalFee.GetFeePerK() / 2); feeset.insert(Amount(0)); - for (Amount bucketBoundary = minFeeLimit; bucketBoundary <= MAX_FEERATE; - bucketBoundary += bucketBoundary / FEE_SPACING_FRACTION) { - feeset.insert(bucketBoundary); + for (double bucketBoundary = minFeeLimit.GetSatoshis(); + bucketBoundary <= double(MAX_FEERATE.GetSatoshis()); + bucketBoundary *= FEE_SPACING) { + feeset.insert(Amount(int64_t(bucketBoundary))); } } Amount FeeFilterRounder::round(const Amount currentMinFee) { - std::set::iterator it = feeset.lower_bound(currentMinFee); + auto it = feeset.lower_bound(currentMinFee); if ((it != feeset.begin() && insecure_rand.rand32() % 3 != 0) || it == feeset.end()) { it--; } + return *it; } diff --git a/src/policy/fees.h b/src/policy/fees.h index 8c624fb96..4eb982069 100644 --- a/src/policy/fees.h +++ b/src/policy/fees.h @@ -1,306 +1,306 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_POLICYESTIMATOR_H #define BITCOIN_POLICYESTIMATOR_H #include "amount.h" #include "random.h" #include "uint256.h" #include #include #include class CAutoFile; class CFeeRate; class CTxMemPoolEntry; class CTxMemPool; /** \class CBlockPolicyEstimator * The BlockPolicyEstimator is used for estimating the feerate needed for a * transaction to be included in a block within a certain number of blocks. * * At a high level the algorithm works by grouping transactions into buckets * based on having similar feerates and then tracking how long it takes * transactions in the various buckets to be mined. It operates under the * assumption that in general transactions of higher feerate will be included in * blocks before transactions of lower feerate. So for example if you wanted to * know what feerate you should put on a transaction to be included in a block * within the next 5 blocks, you would start by looking at the bucket with the * highest feerate transactions and verifying that a sufficiently high * percentage of them were confirmed within 5 blocks and then you would look at * the next highest feerate bucket, and so on, stopping at the last bucket to * pass the test. The average feerate of transactions in this bucket will give * you an indication of the lowest feerate you can put on a transaction and * still have a sufficiently high chance of being confirmed within your desired * 5 blocks. * * Here is a brief description of the implementation: * When a transaction enters the mempool, we track the height of the block chain * at entry. Whenever a block comes in, we count the number of transactions in * each bucket and the total amount of feerate paid in each bucket. Then we * calculate how many blocks Y it took each transaction to be mined and we track * an array of counters in each bucket for how long it to took transactions to * get confirmed from 1 to a max of 25 and we increment all the counters from Y * up to 25. This is because for any number Z>=Y the transaction was * successfully mined within Z blocks. We want to save a history of this * information, so at any time we have a counter of the total number of * transactions that happened in a given feerate bucket and the total number * that were confirmed in each number 1-25 blocks or less for any bucket. We * save this history by keeping an exponentially decaying moving average of each * one of these stats. Furthermore we also keep track of the number unmined (in * mempool) transactions in each bucket and for how many blocks they have been * outstanding and use that to increase the number of transactions we've seen in * that feerate bucket when calculating an estimate for any number of * confirmations below the number of blocks they've been outstanding. */ /** * We will instantiate an instance of this class to track transactions that were * included in a block. We will lump transactions into a bucket according to * their approximate feerate and then track how long it took for those txs to be * included in a block. * * The tracking of unconfirmed (mempool) transactions is completely independent * of the historical tracking of transactions that have been confirmed in a * block. */ class TxConfirmStats { private: // Define the buckets we will group transactions into // The upper-bound of the range for the bucket (inclusive) std::vector buckets; // Map of bucket upper-bound to index into all vectors by bucket std::map bucketMap; // For each bucket X: // Count the total # of txs in each bucket // Track the historical moving average of this total over blocks std::vector txCtAvg; // and calculate the total for the current block to update the moving // average std::vector curBlockTxCt; // Count the total # of txs confirmed within Y blocks in each bucket // Track the historical moving average of theses totals over blocks // confAvg[Y][X] std::vector> confAvg; // and calculate the totals for the current block to update the moving // averages // curBlockConf[Y][X] std::vector> curBlockConf; // Sum the total feerate of all tx's in each bucket // Track the historical moving average of this total over blocks std::vector avg; // and calculate the total for the current block to update the moving // average std::vector curBlockVal; // Combine the conf counts with tx counts to calculate the confirmation % // for each Y,X. Combine the total value with the tx counts to calculate the // avg feerate per bucket double decay; // Mempool counts of outstanding transactions // For each bucket X, track the number of transactions in the mempool that // are unconfirmed for each possible confirmation value Y // unconfTxs[Y][X] std::vector> unconfTxs; // transactions still unconfirmed after MAX_CONFIRMS for each bucket std::vector oldUnconfTxs; public: /** * Initialize the data structures. This is called by BlockPolicyEstimator's * constructor with default values. * @param defaultBuckets contains the upper limits for the bucket boundaries * @param maxConfirms max number of confirms to track * @param decay how much to decay the historical moving average per block */ void Initialize(std::vector &defaultBuckets, unsigned int maxConfirms, double decay); /** * Clear the state of the curBlock variables to start counting for the new * block. */ void ClearCurrent(unsigned int nBlockHeight); /** * Record a new transaction data point in the current block stats * @param blocksToConfirm the number of blocks it took this transaction to * confirm * @param val the feerate of the transaction * @warning blocksToConfirm is 1-based and has to be >= 1 */ void Record(int blocksToConfirm, double val); /** Record a new transaction entering the mempool*/ unsigned int NewTx(unsigned int nBlockHeight, double val); /** Remove a transaction from mempool tracking stats*/ void removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight, unsigned int bucketIndex); /** * Update our estimates by decaying our historical moving average and * updating with the data gathered from the current block. */ void UpdateMovingAverages(); /** * Calculate a feerate estimate. Find the lowest value bucket (or range of * buckets to make sure we have enough data points) whose transactions still * have sufficient likelihood of being confirmed within the target number of * confirmations * @param confTarget target number of confirmations * @param sufficientTxVal required average number of transactions per block * in a bucket range * @param minSuccess the success probability we require * @param requireGreater return the lowest feerate such that all higher * values pass minSuccess OR * return the highest feerate such that all lower values fail * minSuccess * @param nBlockHeight the current block height */ double EstimateMedianVal(int confTarget, double sufficientTxVal, double minSuccess, bool requireGreater, unsigned int nBlockHeight); /** Return the max number of confirms we're tracking */ unsigned int GetMaxConfirms() { return confAvg.size(); } /** Write state of estimation data to a file*/ void Write(CAutoFile &fileout); /** * Read saved state of estimation data from a file and replace all internal * data structures and variables with this state. */ void Read(CAutoFile &filein); }; /** Track confirm delays up to 25 blocks, can't estimate beyond that */ static const unsigned int MAX_BLOCK_CONFIRMS = 25; /** Decay of .998 is a half-life of 346 blocks or about 2.4 days */ static const double DEFAULT_DECAY = .998; /** Require greater than 95% of X feerate transactions to be confirmed within Y * blocks for X to be big enough */ static const double MIN_SUCCESS_PCT = .95; /** Require an avg of 1 tx in the combined feerate bucket per block to have stat * significance */ static const double SUFFICIENT_FEETXS = 1; // Minimum and Maximum values for tracking feerates -static constexpr double MIN_FEERATE = 10; +static constexpr Amount MIN_FEERATE(10); static const Amount MAX_FEERATE(int64_t(1e7)); static const Amount INF_FEERATE(MAX_MONEY); static const Amount INF_PRIORITY(int64_t(1e9) * MAX_MONEY); // We have to lump transactions into buckets based on feerate, but we want to be // able to give accurate estimates over a large range of potential feerates. // Therefore it makes sense to exponentially space the buckets /** Spacing of FeeRate buckets */ -static const int64_t FEE_SPACING_FRACTION = 10; +static const double FEE_SPACING = 1.1; /** * We want to be able to estimate feerates that are needed on tx's to be * included in a certain number of blocks. Every time a block is added to the * best chain, this class records stats on the transactions included in that * block */ class CBlockPolicyEstimator { public: /** * Create new BlockPolicyEstimator and initialize stats tracking classes * with default values. */ CBlockPolicyEstimator(const CFeeRate &minRelayFee); /** Process all the transactions that have been included in a block */ void processBlock(unsigned int nBlockHeight, std::vector &entries); /** Process a transaction confirmed in a block*/ bool processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry *entry); /** Process a transaction accepted to the mempool*/ void processTransaction(const CTxMemPoolEntry &entry, bool validFeeEstimate); /** Remove a transaction from the mempool tracking stats*/ bool removeTx(uint256 hash); /** Return a feerate estimate */ CFeeRate estimateFee(int confTarget); /** Estimate feerate needed to get be included in a block within * confTarget blocks. If no answer can be given at confTarget, return an * estimate at the lowest target where one can be given. */ CFeeRate estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool &pool); /** * Return a priority estimate. * DEPRECATED * Returns -1 */ double estimatePriority(int confTarget); /** * Estimate priority needed to get be included in a block within confTarget * blocks. * DEPRECATED * Returns -1 unless mempool is currently limited then returns INF_PRIORITY * answerFoundAtTarget is set to confTarget */ double estimateSmartPriority(int confTarget, int *answerFoundAtTarget, const CTxMemPool &pool); /** Write estimation data to a file */ void Write(CAutoFile &fileout); /** Read estimation data from a file */ void Read(CAutoFile &filein, int nFileVersion); private: //!< Passed to constructor to avoid dependency on main CFeeRate minTrackedFee; unsigned int nBestSeenHeight; struct TxStatsInfo { unsigned int blockHeight; unsigned int bucketIndex; TxStatsInfo() : blockHeight(0), bucketIndex(0) {} }; // map of txids to information about that transaction std::map mapMemPoolTxs; /** Classes to track historical data on transaction confirmations */ TxConfirmStats feeStats; unsigned int trackedTxs; unsigned int untrackedTxs; }; class FeeFilterRounder { public: /** Create new FeeFilterRounder */ FeeFilterRounder(const CFeeRate &minIncrementalFee); /** Quantize a minimum fee for privacy purpose before broadcast **/ Amount round(const Amount currentMinFee); private: std::set feeset; FastRandomContext insecure_rand; }; #endif /*BITCOIN_POLICYESTIMATOR_H */