diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp index f7052aeca..80b248587 100644 --- a/src/policy/fees.cpp +++ b/src/policy/fees.cpp @@ -1,580 +1,580 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "policy/fees.h" #include "policy/policy.h" #include "amount.h" #include "primitives/transaction.h" #include "random.h" #include "streams.h" #include "txmempool.h" #include "util.h" void TxConfirmStats::Initialize(std::vector &defaultBuckets, unsigned int maxConfirms, double _decay) { decay = _decay; for (size_t i = 0; i < defaultBuckets.size(); i++) { buckets.push_back(defaultBuckets[i]); bucketMap[defaultBuckets[i]] = i; } confAvg.resize(maxConfirms); curBlockConf.resize(maxConfirms); unconfTxs.resize(maxConfirms); for (unsigned int i = 0; i < maxConfirms; i++) { confAvg[i].resize(buckets.size()); curBlockConf[i].resize(buckets.size()); unconfTxs[i].resize(buckets.size()); } oldUnconfTxs.resize(buckets.size()); curBlockTxCt.resize(buckets.size()); txCtAvg.resize(buckets.size()); curBlockVal.resize(buckets.size()); avg.resize(buckets.size()); } // Zero out the data for the current block void TxConfirmStats::ClearCurrent(unsigned int nBlockHeight) { for (size_t j = 0; j < buckets.size(); j++) { oldUnconfTxs[j] += unconfTxs[nBlockHeight % unconfTxs.size()][j]; unconfTxs[nBlockHeight % unconfTxs.size()][j] = 0; for (size_t i = 0; i < curBlockConf.size(); i++) { curBlockConf[i][j] = 0; } curBlockTxCt[j] = 0; curBlockVal[j] = 0; } } void TxConfirmStats::Record(int blocksToConfirm, double val) { // blocksToConfirm is 1-based if (blocksToConfirm < 1) { return; } unsigned int bucketindex = bucketMap.lower_bound(val)->second; for (size_t i = blocksToConfirm; i <= curBlockConf.size(); i++) { curBlockConf[i - 1][bucketindex]++; } curBlockTxCt[bucketindex]++; curBlockVal[bucketindex] += val; } void TxConfirmStats::UpdateMovingAverages() { for (unsigned int j = 0; j < buckets.size(); j++) { for (unsigned int i = 0; i < confAvg.size(); i++) { confAvg[i][j] = confAvg[i][j] * decay + curBlockConf[i][j]; } avg[j] = avg[j] * decay + curBlockVal[j]; txCtAvg[j] = txCtAvg[j] * decay + curBlockTxCt[j]; } } // returns -1 on error conditions double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal, double successBreakPoint, bool requireGreater, unsigned int nBlockHeight) { // Counters for a bucket (or range of buckets) // Number of tx's confirmed within the confTarget double nConf = 0; // Total number of tx's that were ever confirmed double totalNum = 0; // Number of tx's still in mempool for confTarget or longer int extraNum = 0; int maxbucketindex = buckets.size() - 1; // requireGreater means we are looking for the lowest feerate such that all // higher values pass, so we start at maxbucketindex (highest feerate) and // look at successively smaller buckets until we reach failure. Otherwise, // we are looking for the highest feerate such that all lower values fail, // and we go in the opposite direction. unsigned int startbucket = requireGreater ? maxbucketindex : 0; int step = requireGreater ? -1 : 1; // We'll combine buckets until we have enough samples. // The near and far variables will define the range we've combined // The best variables are the last range we saw which still had a high // enough confirmation rate to count as success. // The cur variables are the current range we're counting. unsigned int curNearBucket = startbucket; unsigned int bestNearBucket = startbucket; unsigned int curFarBucket = startbucket; unsigned int bestFarBucket = startbucket; bool foundAnswer = false; unsigned int bins = unconfTxs.size(); // Start counting from highest(default) or lowest feerate transactions for (int bucket = startbucket; bucket >= 0 && bucket <= maxbucketindex; bucket += step) { curFarBucket = bucket; nConf += confAvg[confTarget - 1][bucket]; totalNum += txCtAvg[bucket]; for (unsigned int confct = confTarget; confct < GetMaxConfirms(); confct++) { extraNum += unconfTxs[(nBlockHeight - confct) % bins][bucket]; } extraNum += oldUnconfTxs[bucket]; // If we have enough transaction data points in this range of buckets, // we can test for success (Only count the confirmed data points, so // that each confirmation count will be looking at the same amount of // data and same bucket breaks) if (totalNum >= sufficientTxVal / (1 - decay)) { double curPct = nConf / (totalNum + extraNum); // Check to see if we are no longer getting confirmed at the success // rate if (requireGreater && curPct < successBreakPoint) { break; } if (!requireGreater && curPct > successBreakPoint) { break; } // Otherwise update the cumulative stats, and the bucket variables // and reset the counters foundAnswer = true; nConf = 0; totalNum = 0; extraNum = 0; bestNearBucket = curNearBucket; bestFarBucket = curFarBucket; curNearBucket = bucket + step; } } double median = -1; double txSum = 0; // Calculate the "average" feerate of the best bucket range that met success // conditions. Find the bucket with the median transaction and then report // the average feerate from that bucket. This is a compromise between // finding the median which we can't since we don't save all tx's and // reporting the average which is less accurate unsigned int minBucket = bestNearBucket < bestFarBucket ? bestNearBucket : bestFarBucket; unsigned int maxBucket = bestNearBucket > bestFarBucket ? bestNearBucket : bestFarBucket; for (unsigned int j = minBucket; j <= maxBucket; j++) { txSum += txCtAvg[j]; } if (foundAnswer && txSum != 0) { txSum = txSum / 2; for (unsigned int j = minBucket; j <= maxBucket; j++) { if (txCtAvg[j] < txSum) { txSum -= txCtAvg[j]; } else { // we're in the right bucket median = avg[j] / txCtAvg[j]; break; } } } LogPrint("estimatefee", "%3d: For conf success %s %4.2f need feerate %s: " "%12.5g from buckets %8g - %8g Cur Bucket stats " "%6.2f%% %8.1f/(%.1f+%d mempool)\n", confTarget, requireGreater ? ">" : "<", successBreakPoint, requireGreater ? ">" : "<", median, buckets[minBucket], buckets[maxBucket], 100 * nConf / (totalNum + extraNum), nConf, totalNum, extraNum); return median; } void TxConfirmStats::Write(CAutoFile &fileout) { fileout << decay; fileout << buckets; fileout << avg; fileout << txCtAvg; fileout << confAvg; } void TxConfirmStats::Read(CAutoFile &filein) { // Read data file into temporary variables and do some very basic sanity // checking std::vector fileBuckets; std::vector fileAvg; std::vector> fileConfAvg; std::vector fileTxCtAvg; double fileDecay; size_t maxConfirms; size_t numBuckets; filein >> fileDecay; if (fileDecay <= 0 || fileDecay >= 1) { throw std::runtime_error("Corrupt estimates file. Decay must be " "between 0 and 1 (non-inclusive)"); } filein >> fileBuckets; numBuckets = fileBuckets.size(); if (numBuckets <= 1 || numBuckets > 1000) { throw std::runtime_error("Corrupt estimates file. Must have between 2 " "and 1000 feerate buckets"); } filein >> fileAvg; if (fileAvg.size() != numBuckets) { throw std::runtime_error( "Corrupt estimates file. Mismatch in feerate average bucket count"); } filein >> fileTxCtAvg; if (fileTxCtAvg.size() != numBuckets) { throw std::runtime_error( "Corrupt estimates file. Mismatch in tx count bucket count"); } filein >> fileConfAvg; maxConfirms = fileConfAvg.size(); if (maxConfirms <= 0 || maxConfirms > 6 * 24 * 7) { // one week throw std::runtime_error("Corrupt estimates file. Must maintain " "estimates for between 1 and 1008 (one week) " "confirms"); } for (unsigned int i = 0; i < maxConfirms; i++) { if (fileConfAvg[i].size() != numBuckets) { throw std::runtime_error("Corrupt estimates file. Mismatch in " "feerate conf average bucket count"); } } // Now that we've processed the entire feerate estimate data file and not // thrown any errors, we can copy it to our data structures decay = fileDecay; buckets = fileBuckets; avg = fileAvg; confAvg = fileConfAvg; txCtAvg = fileTxCtAvg; bucketMap.clear(); // Resize the current block variables which aren't stored in the data file // to match the number of confirms and buckets curBlockConf.resize(maxConfirms); for (unsigned int i = 0; i < maxConfirms; i++) { curBlockConf[i].resize(buckets.size()); } curBlockTxCt.resize(buckets.size()); curBlockVal.resize(buckets.size()); unconfTxs.resize(maxConfirms); for (unsigned int i = 0; i < maxConfirms; i++) { unconfTxs[i].resize(buckets.size()); } oldUnconfTxs.resize(buckets.size()); for (size_t i = 0; i < buckets.size(); i++) { bucketMap[buckets[i]] = i; } LogPrint( "estimatefee", "Reading estimates: %u buckets counting confirms up to %u blocks\n", numBuckets, maxConfirms); } unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val) { unsigned int bucketindex = bucketMap.lower_bound(val)->second; unsigned int blockIndex = nBlockHeight % unconfTxs.size(); unconfTxs[blockIndex][bucketindex]++; return bucketindex; } void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight, unsigned int bucketindex) { // nBestSeenHeight is not updated yet for the new block int blocksAgo = nBestSeenHeight - entryHeight; if (nBestSeenHeight == 0) { // the BlockPolicyEstimator hasn't seen any blocks yet blocksAgo = 0; } if (blocksAgo < 0) { // This can't happen because we call this with our best seen height, no // entries can have higher LogPrint("estimatefee", "Blockpolicy error, blocks ago is negative for mempool tx\n"); return; } if (blocksAgo >= (int)unconfTxs.size()) { if (oldUnconfTxs[bucketindex] > 0) { oldUnconfTxs[bucketindex]--; } else { LogPrint("estimatefee", "Blockpolicy error, mempool tx removed " "from >25 blocks,bucketIndex=%u already\n", bucketindex); } } else { unsigned int blockIndex = entryHeight % unconfTxs.size(); if (unconfTxs[blockIndex][bucketindex] > 0) { unconfTxs[blockIndex][bucketindex]--; } else { LogPrint("estimatefee", "Blockpolicy error, mempool tx removed " "from blockIndex=%u,bucketIndex=%u " "already\n", blockIndex, bucketindex); } } } // This function is called from CTxMemPool::removeUnchecked to ensure txs // removed from the mempool for any reason are no longer tracked. Txs that were // part of a block have already been removed in processBlockTx to ensure they // are never double tracked, but it is of no harm to try to remove them again. bool CBlockPolicyEstimator::removeTx(uint256 hash) { std::map::iterator pos = mapMemPoolTxs.find(hash); if (pos == mapMemPoolTxs.end()) { return false; } feeStats.removeTx(pos->second.blockHeight, nBestSeenHeight, pos->second.bucketIndex); mapMemPoolTxs.erase(hash); return true; } CBlockPolicyEstimator::CBlockPolicyEstimator(const CFeeRate &_minRelayFee) : nBestSeenHeight(0), trackedTxs(0), untrackedTxs(0) { static_assert(MIN_FEERATE > Amount(0), "Min feerate must be nonzero"); CFeeRate minFeeRate(MIN_FEERATE); minTrackedFee = _minRelayFee < minFeeRate ? minFeeRate : _minRelayFee; std::vector vfeelist; for (double bucketBoundary = minTrackedFee.GetFeePerK().GetSatoshis(); bucketBoundary <= double(MAX_FEERATE.GetSatoshis()); bucketBoundary *= FEE_SPACING) { vfeelist.push_back(bucketBoundary); } vfeelist.push_back(double(INF_FEERATE.GetSatoshis())); feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY); } void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry &entry, bool validFeeEstimate) { uint32_t txHeight = entry.GetHeight(); uint256 txid = entry.GetTx().GetId(); if (mapMemPoolTxs.count(txid)) { LogPrint("estimatefee", "Blockpolicy error mempool tx %s already being tracked\n", txid.ToString().c_str()); return; } if (txHeight != nBestSeenHeight) { // Ignore side chains and re-orgs; assuming they are random they don't // affect the estimate. We'll potentially double count transactions in // 1-block reorgs. Ignore txs if BlockPolicyEstimator is not in sync // with chainActive.Tip(). It will be synced next time a block is // processed. return; } // Only want to be updating estimates when our blockchain is synced, // otherwise we'll miscalculate how many blocks its taking to get included. if (!validFeeEstimate) { untrackedTxs++; return; } trackedTxs++; // Feerates are stored and reported as BCH-per-kb: CFeeRate feeRate(entry.GetFee(), entry.GetTxSize()); mapMemPoolTxs[txid].blockHeight = txHeight; mapMemPoolTxs[txid].bucketIndex = feeStats.NewTx(txHeight, double(feeRate.GetFeePerK().GetSatoshis())); } bool CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry *entry) { if (!removeTx(entry->GetTx().GetId())) { // This transaction wasn't being tracked for fee estimation return false; } // How many blocks did it take for miners to include this transaction? // blocksToConfirm is 1-based, so a transaction included in the earliest // possible block has confirmation count of 1 int blocksToConfirm = nBlockHeight - entry->GetHeight(); if (blocksToConfirm <= 0) { // This can't happen because we don't process transactions from a block // with a height lower than our greatest seen height LogPrint( "estimatefee", "Blockpolicy error Transaction had negative blocksToConfirm\n"); return false; } // Feerates are stored and reported as BCH-per-kb: CFeeRate feeRate(entry->GetFee(), entry->GetTxSize()); feeStats.Record(blocksToConfirm, (double)feeRate.GetFeePerK().GetSatoshis()); return true; } void CBlockPolicyEstimator::processBlock( unsigned int nBlockHeight, std::vector &entries) { if (nBlockHeight <= nBestSeenHeight) { // Ignore side chains and re-orgs; assuming they are random they don't // affect the estimate. And if an attacker can re-org the chain at will, // then you've got much bigger problems than "attacker can influence // transaction fees." return; } // Must update nBestSeenHeight in sync with ClearCurrent so that calls to // removeTx (via processBlockTx) correctly calculate age of unconfirmed txs // to remove from tracking. nBestSeenHeight = nBlockHeight; // Clear the current block state and update unconfirmed circular buffer feeStats.ClearCurrent(nBlockHeight); unsigned int countedTxs = 0; // Repopulate the current block states for (size_t i = 0; i < entries.size(); i++) { if (processBlockTx(nBlockHeight, entries[i])) { countedTxs++; } } // Update all exponential averages with the current block state feeStats.UpdateMovingAverages(); LogPrint("estimatefee", "Blockpolicy after updating estimates for %u of %u " "txs in block, since last block %u of %u tracked, " "new mempool map size %u\n", countedTxs, entries.size(), trackedTxs, trackedTxs + untrackedTxs, mapMemPoolTxs.size()); trackedTxs = 0; untrackedTxs = 0; } CFeeRate CBlockPolicyEstimator::estimateFee(int confTarget) { // Return failure if trying to analyze a target we're not tracking // It's not possible to get reasonable estimates for confTarget of 1 if (confTarget <= 1 || (unsigned int)confTarget > feeStats.GetMaxConfirms()) { - return CFeeRate(0); + return CFeeRate(Amount(0)); } double median = feeStats.EstimateMedianVal( confTarget, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight); if (median < 0) { - return CFeeRate(0); + return CFeeRate(Amount(0)); } return CFeeRate(Amount(int64_t(median))); } CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool &pool) { if (answerFoundAtTarget) { *answerFoundAtTarget = confTarget; } // Return failure if trying to analyze a target we're not tracking if (confTarget <= 0 || (unsigned int)confTarget > feeStats.GetMaxConfirms()) { - return CFeeRate(0); + return CFeeRate(Amount(0)); } // It's not possible to get reasonable estimates for confTarget of 1 if (confTarget == 1) { confTarget = 2; } double median = -1; while (median < 0 && (unsigned int)confTarget <= feeStats.GetMaxConfirms()) { median = feeStats.EstimateMedianVal(confTarget++, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight); } if (answerFoundAtTarget) { *answerFoundAtTarget = confTarget - 1; } // If mempool is limiting txs , return at least the min feerate from the // mempool Amount minPoolFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000) .GetFeePerK(); - if (minPoolFee > 0 && minPoolFee > int64_t(median)) { + if (minPoolFee > Amount(0) && minPoolFee > Amount(int64_t(median))) { return CFeeRate(minPoolFee); } if (median < 0) { - return CFeeRate(0); + return CFeeRate(Amount(0)); } return CFeeRate(Amount(int64_t(median))); } double CBlockPolicyEstimator::estimatePriority(int confTarget) { return -1; } double CBlockPolicyEstimator::estimateSmartPriority(int confTarget, int *answerFoundAtTarget, const CTxMemPool &pool) { if (answerFoundAtTarget) { *answerFoundAtTarget = confTarget; } // If mempool is limiting txs, no priority txs are allowed Amount minPoolFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000) .GetFeePerK(); - if (minPoolFee > 0) { + if (minPoolFee > Amount(0)) { return double(INF_PRIORITY.GetSatoshis()); } return -1; } void CBlockPolicyEstimator::Write(CAutoFile &fileout) { fileout << nBestSeenHeight; feeStats.Write(fileout); } void CBlockPolicyEstimator::Read(CAutoFile &filein, int nFileVersion) { int nFileBestSeenHeight; filein >> nFileBestSeenHeight; feeStats.Read(filein); nBestSeenHeight = nFileBestSeenHeight; if (nFileVersion < 139900) { TxConfirmStats priStats; priStats.Read(filein); } } FeeFilterRounder::FeeFilterRounder(const CFeeRate &minIncrementalFee) { Amount minFeeLimit = std::max(Amount(1), minIncrementalFee.GetFeePerK() / 2); feeset.insert(Amount(0)); for (double bucketBoundary = minFeeLimit.GetSatoshis(); bucketBoundary <= double(MAX_FEERATE.GetSatoshis()); bucketBoundary *= FEE_SPACING) { feeset.insert(Amount(int64_t(bucketBoundary))); } } Amount FeeFilterRounder::round(const Amount currentMinFee) { auto it = feeset.lower_bound(currentMinFee); if ((it != feeset.begin() && insecure_rand.rand32() % 3 != 0) || it == feeset.end()) { it--; } return *it; } diff --git a/src/primitives/transaction.cpp b/src/primitives/transaction.cpp index b80481000..4b447b2cf 100644 --- a/src/primitives/transaction.cpp +++ b/src/primitives/transaction.cpp @@ -1,143 +1,143 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "primitives/transaction.h" #include "hash.h" #include "tinyformat.h" #include "utilstrencodings.h" std::string COutPoint::ToString() const { return strprintf("COutPoint(%s, %u)", hash.ToString().substr(0, 10), n); } CTxIn::CTxIn(COutPoint prevoutIn, CScript scriptSigIn, uint32_t nSequenceIn) { prevout = prevoutIn; scriptSig = scriptSigIn; nSequence = nSequenceIn; } CTxIn::CTxIn(uint256 hashPrevTx, uint32_t nOut, CScript scriptSigIn, uint32_t nSequenceIn) { prevout = COutPoint(hashPrevTx, nOut); scriptSig = scriptSigIn; nSequence = nSequenceIn; } std::string CTxIn::ToString() const { std::string str; str += "CTxIn("; str += prevout.ToString(); if (prevout.IsNull()) str += strprintf(", coinbase %s", HexStr(scriptSig)); else str += strprintf(", scriptSig=%s", HexStr(scriptSig).substr(0, 24)); if (nSequence != SEQUENCE_FINAL) str += strprintf(", nSequence=%u", nSequence); str += ")"; return str; } CTxOut::CTxOut(const Amount &nValueIn, CScript scriptPubKeyIn) { nValue = nValueIn; scriptPubKey = scriptPubKeyIn; } std::string CTxOut::ToString() const { return strprintf("CTxOut(nValue=%d.%08d, scriptPubKey=%s)", nValue.GetSatoshis() / COIN.GetSatoshis(), nValue.GetSatoshis() % COIN.GetSatoshis(), HexStr(scriptPubKey).substr(0, 30)); } CMutableTransaction::CMutableTransaction() : nVersion(CTransaction::CURRENT_VERSION), nLockTime(0) {} CMutableTransaction::CMutableTransaction(const CTransaction &tx) : nVersion(tx.nVersion), vin(tx.vin), vout(tx.vout), nLockTime(tx.nLockTime) {} uint256 CMutableTransaction::GetId() const { return SerializeHash(*this, SER_GETHASH, 0); } uint256 CTransaction::ComputeHash() const { return SerializeHash(*this, SER_GETHASH, 0); } uint256 CTransaction::GetHash() const { return GetId(); } /** * For backward compatibility, the hash is initialized to 0. * TODO: remove the need for this default constructor entirely. */ CTransaction::CTransaction() : nVersion(CTransaction::CURRENT_VERSION), vin(), vout(), nLockTime(0), hash() {} CTransaction::CTransaction(const CMutableTransaction &tx) : nVersion(tx.nVersion), vin(tx.vin), vout(tx.vout), nLockTime(tx.nLockTime), hash(ComputeHash()) {} CTransaction::CTransaction(CMutableTransaction &&tx) : nVersion(tx.nVersion), vin(std::move(tx.vin)), vout(std::move(tx.vout)), nLockTime(tx.nLockTime), hash(ComputeHash()) {} Amount CTransaction::GetValueOut() const { - Amount nValueOut = 0; + Amount nValueOut(0); for (std::vector::const_iterator it(vout.begin()); it != vout.end(); ++it) { nValueOut += it->nValue; if (!MoneyRange(it->nValue) || !MoneyRange(nValueOut)) throw std::runtime_error(std::string(__func__) + ": value out of range"); } return nValueOut; } double CTransaction::ComputePriority(double dPriorityInputs, unsigned int nTxSize) const { nTxSize = CalculateModifiedSize(nTxSize); if (nTxSize == 0) return 0.0; return dPriorityInputs / nTxSize; } unsigned int CTransaction::CalculateModifiedSize(unsigned int nTxSize) const { // In order to avoid disincentivizing cleaning up the UTXO set we don't // count the constant overhead for each txin and up to 110 bytes of // scriptSig (which is enough to cover a compressed pubkey p2sh redemption) // for priority. Providing any more cleanup incentive than making additional // inputs free would risk encouraging people to create junk outputs to // redeem later. if (nTxSize == 0) nTxSize = GetTransactionSize(*this); for (std::vector::const_iterator it(vin.begin()); it != vin.end(); ++it) { unsigned int offset = 41U + std::min(110U, (unsigned int)it->scriptSig.size()); if (nTxSize > offset) nTxSize -= offset; } return nTxSize; } unsigned int CTransaction::GetTotalSize() const { return ::GetSerializeSize(*this, SER_NETWORK, PROTOCOL_VERSION); } std::string CTransaction::ToString() const { std::string str; str += strprintf("CTransaction(txid=%s, ver=%d, vin.size=%u, vout.size=%u, " "nLockTime=%u)\n", GetId().ToString().substr(0, 10), nVersion, vin.size(), vout.size(), nLockTime); for (unsigned int i = 0; i < vin.size(); i++) str += " " + vin[i].ToString() + "\n"; for (unsigned int i = 0; i < vout.size(); i++) str += " " + vout[i].ToString() + "\n"; return str; } int64_t GetTransactionSize(const CTransaction &tx) { return ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION); } diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h index 91a72a69b..3b205c1f5 100644 --- a/src/primitives/transaction.h +++ b/src/primitives/transaction.h @@ -1,365 +1,365 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_PRIMITIVES_TRANSACTION_H #define BITCOIN_PRIMITIVES_TRANSACTION_H #include "amount.h" #include "script/script.h" #include "serialize.h" #include "uint256.h" static const int SERIALIZE_TRANSACTION = 0x00; /** An outpoint - a combination of a transaction hash and an index n into its * vout */ class COutPoint { public: uint256 hash; uint32_t n; COutPoint() { SetNull(); } COutPoint(uint256 hashIn, uint32_t nIn) { hash = hashIn; n = nIn; } ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(hash); READWRITE(n); } void SetNull() { hash.SetNull(); n = (uint32_t)-1; } bool IsNull() const { return (hash.IsNull() && n == (uint32_t)-1); } friend bool operator<(const COutPoint &a, const COutPoint &b) { int cmp = a.hash.Compare(b.hash); return cmp < 0 || (cmp == 0 && a.n < b.n); } friend bool operator==(const COutPoint &a, const COutPoint &b) { return (a.hash == b.hash && a.n == b.n); } friend bool operator!=(const COutPoint &a, const COutPoint &b) { return !(a == b); } std::string ToString() const; }; /** An input of a transaction. It contains the location of the previous * transaction's output that it claims and a signature that matches the * output's public key. */ class CTxIn { public: COutPoint prevout; CScript scriptSig; uint32_t nSequence; /* Setting nSequence to this value for every input in a transaction * disables nLockTime. */ static const uint32_t SEQUENCE_FINAL = 0xffffffff; /* Below flags apply in the context of BIP 68*/ /* If this flag set, CTxIn::nSequence is NOT interpreted as a * relative lock-time. */ static const uint32_t SEQUENCE_LOCKTIME_DISABLE_FLAG = (1 << 31); /* If CTxIn::nSequence encodes a relative lock-time and this flag * is set, the relative lock-time has units of 512 seconds, * otherwise it specifies blocks with a granularity of 1. */ static const uint32_t SEQUENCE_LOCKTIME_TYPE_FLAG = (1 << 22); /* If CTxIn::nSequence encodes a relative lock-time, this mask is * applied to extract that lock-time from the sequence field. */ static const uint32_t SEQUENCE_LOCKTIME_MASK = 0x0000ffff; /* In order to use the same number of bits to encode roughly the * same wall-clock duration, and because blocks are naturally * limited to occur every 600s on average, the minimum granularity * for time-based relative lock-time is fixed at 512 seconds. * Converting from CTxIn::nSequence to seconds is performed by * multiplying by 512 = 2^9, or equivalently shifting up by * 9 bits. */ static const int SEQUENCE_LOCKTIME_GRANULARITY = 9; CTxIn() { nSequence = SEQUENCE_FINAL; } explicit CTxIn(COutPoint prevoutIn, CScript scriptSigIn = CScript(), uint32_t nSequenceIn = SEQUENCE_FINAL); CTxIn(uint256 hashPrevTx, uint32_t nOut, CScript scriptSigIn = CScript(), uint32_t nSequenceIn = SEQUENCE_FINAL); ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(prevout); READWRITE(*(CScriptBase *)(&scriptSig)); READWRITE(nSequence); } friend bool operator==(const CTxIn &a, const CTxIn &b) { return (a.prevout == b.prevout && a.scriptSig == b.scriptSig && a.nSequence == b.nSequence); } friend bool operator!=(const CTxIn &a, const CTxIn &b) { return !(a == b); } std::string ToString() const; }; /** * An output of a transaction. It contains the public key that the next input * must be able to sign with to claim it. */ class CTxOut { public: Amount nValue; CScript scriptPubKey; CTxOut() { SetNull(); } CTxOut(const Amount &nValueIn, CScript scriptPubKeyIn); ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(nValue); READWRITE(*(CScriptBase *)(&scriptPubKey)); } void SetNull() { - nValue = -1; + nValue = Amount(-1); scriptPubKey.clear(); } - bool IsNull() const { return (nValue == -1); } + bool IsNull() const { return (nValue == Amount(-1)); } Amount GetDustThreshold(const CFeeRate &minRelayTxFee) const { // "Dust" is defined in terms of CTransaction::minRelayTxFee, which has // units satoshis-per-kilobyte. If you'd pay more than 1/3 in fees to // spend something, then we consider it dust. A typical spendable // non-segwit txout is 34 bytes big, and will need a CTxIn of at least // 148 bytes to spend: so dust is a spendable txout less than // 546*minRelayTxFee/1000 (in satoshis). A typical spendable segwit // txout is 31 bytes big, and will need a CTxIn of at least 67 bytes to // spend: so dust is a spendable txout less than 294*minRelayTxFee/1000 // (in satoshis). - if (scriptPubKey.IsUnspendable()) return 0; + if (scriptPubKey.IsUnspendable()) return Amount(0); size_t nSize = GetSerializeSize(*this, SER_DISK, 0); // the 148 mentioned above nSize += (32 + 4 + 1 + 107 + 4); return 3 * minRelayTxFee.GetFee(nSize); } bool IsDust(const CFeeRate &minRelayTxFee) const { return (nValue < GetDustThreshold(minRelayTxFee)); } friend bool operator==(const CTxOut &a, const CTxOut &b) { return (a.nValue == b.nValue && a.scriptPubKey == b.scriptPubKey); } friend bool operator!=(const CTxOut &a, const CTxOut &b) { return !(a == b); } std::string ToString() const; }; struct CMutableTransaction; /** * Basic transaction serialization format: * - int32_t nVersion * - std::vector vin * - std::vector vout * - uint32_t nLockTime */ template inline void UnserializeTransaction(TxType &tx, Stream &s) { s >> tx.nVersion; tx.vin.clear(); tx.vout.clear(); /* Try to read the vin. In case the dummy is there, this will be read as an * empty vector. */ s >> tx.vin; /* We read a non-empty vin. Assume a normal vout follows. */ s >> tx.vout; s >> tx.nLockTime; } template inline void SerializeTransaction(const TxType &tx, Stream &s) { s << tx.nVersion; s << tx.vin; s << tx.vout; s << tx.nLockTime; } /** The basic transaction that is broadcasted on the network and contained in * blocks. A transaction can contain multiple inputs and outputs. */ class CTransaction { public: // Default transaction version. static const int32_t CURRENT_VERSION = 2; // Changing the default transaction version requires a two step process: // first adapting relay policy by bumping MAX_STANDARD_VERSION, and then // later date bumping the default CURRENT_VERSION at which point both // CURRENT_VERSION and MAX_STANDARD_VERSION will be equal. static const int32_t MAX_STANDARD_VERSION = 2; // The local variables are made const to prevent unintended modification // without updating the cached hash value. However, CTransaction is not // actually immutable; deserialization and assignment are implemented, // and bypass the constness. This is safe, as they update the entire // structure, including the hash. const int32_t nVersion; const std::vector vin; const std::vector vout; const uint32_t nLockTime; private: /** Memory only. */ const uint256 hash; uint256 ComputeHash() const; public: /** Construct a CTransaction that qualifies as IsNull() */ CTransaction(); /** Convert a CMutableTransaction into a CTransaction. */ CTransaction(const CMutableTransaction &tx); CTransaction(CMutableTransaction &&tx); template inline void Serialize(Stream &s) const { SerializeTransaction(*this, s); } /** This deserializing constructor is provided instead of an Unserialize * method. Unserialize is not possible, since it would require overwriting * const fields. */ template CTransaction(deserialize_type, Stream &s) : CTransaction(CMutableTransaction(deserialize, s)) {} bool IsNull() const { return vin.empty() && vout.empty(); } const uint256 &GetId() const { return hash; } // Compute a hash that includes both transaction and witness data uint256 GetHash() const; // Return sum of txouts. Amount GetValueOut() const; // GetValueIn() is a method on CCoinsViewCache, because // inputs must be known to compute value in. // Compute priority, given priority of inputs and (optionally) tx size double ComputePriority(double dPriorityInputs, unsigned int nTxSize = 0) const; // Compute modified tx size for priority calculation (optionally given tx // size) unsigned int CalculateModifiedSize(unsigned int nTxSize = 0) const; /** * Get the total transaction size in bytes. * @return Total transaction size in bytes */ unsigned int GetTotalSize() const; bool IsCoinBase() const { return (vin.size() == 1 && vin[0].prevout.IsNull()); } friend bool operator==(const CTransaction &a, const CTransaction &b) { return a.hash == b.hash; } friend bool operator!=(const CTransaction &a, const CTransaction &b) { return a.hash != b.hash; } std::string ToString() const; }; /** A mutable version of CTransaction. */ struct CMutableTransaction { int32_t nVersion; std::vector vin; std::vector vout; uint32_t nLockTime; CMutableTransaction(); CMutableTransaction(const CTransaction &tx); template inline void Serialize(Stream &s) const { SerializeTransaction(*this, s); } template inline void Unserialize(Stream &s) { UnserializeTransaction(*this, s); } template CMutableTransaction(deserialize_type, Stream &s) { Unserialize(s); } /** Compute the hash of this CMutableTransaction. This is computed on the * fly, as opposed to GetId() in CTransaction, which uses a cached result. */ uint256 GetId() const; friend bool operator==(const CMutableTransaction &a, const CMutableTransaction &b) { return a.GetId() == b.GetId(); } }; typedef std::shared_ptr CTransactionRef; static inline CTransactionRef MakeTransactionRef() { return std::make_shared(); } template static inline CTransactionRef MakeTransactionRef(Tx &&txIn) { return std::make_shared(std::forward(txIn)); } /** Compute the size of a transaction */ int64_t GetTransactionSize(const CTransaction &tx); /** Precompute sighash midstate to avoid quadratic hashing */ struct PrecomputedTransactionData { uint256 hashPrevouts, hashSequence, hashOutputs; PrecomputedTransactionData() : hashPrevouts(), hashSequence(), hashOutputs() {} PrecomputedTransactionData(const PrecomputedTransactionData &txdata) : hashPrevouts(txdata.hashPrevouts), hashSequence(txdata.hashSequence), hashOutputs(txdata.hashOutputs) {} PrecomputedTransactionData(const CTransaction &tx); }; #endif // BITCOIN_PRIMITIVES_TRANSACTION_H diff --git a/src/script/sign.cpp b/src/script/sign.cpp index 2bf6e56a7..dc443d768 100644 --- a/src/script/sign.cpp +++ b/src/script/sign.cpp @@ -1,388 +1,388 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "script/sign.h" #include "key.h" #include "keystore.h" #include "policy/policy.h" #include "primitives/transaction.h" #include "script/standard.h" #include "uint256.h" typedef std::vector valtype; TransactionSignatureCreator::TransactionSignatureCreator( const CKeyStore *keystoreIn, const CTransaction *txToIn, unsigned int nInIn, const Amount amountIn, uint32_t nHashTypeIn) : BaseSignatureCreator(keystoreIn), txTo(txToIn), nIn(nInIn), amount(amountIn), nHashType(nHashTypeIn), checker(txTo, nIn, amountIn) {} bool TransactionSignatureCreator::CreateSig(std::vector &vchSig, const CKeyID &address, const CScript &scriptCode) const { CKey key; if (!keystore->GetKey(address, key)) { return false; } uint256 hash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount); if (!key.Sign(hash, vchSig)) { return false; } vchSig.push_back(uint8_t(nHashType)); return true; } static bool Sign1(const CKeyID &address, const BaseSignatureCreator &creator, const CScript &scriptCode, std::vector &ret) { std::vector vchSig; if (!creator.CreateSig(vchSig, address, scriptCode)) { return false; } ret.push_back(vchSig); return true; } static bool SignN(const std::vector &multisigdata, const BaseSignatureCreator &creator, const CScript &scriptCode, std::vector &ret) { int nSigned = 0; int nRequired = multisigdata.front()[0]; for (size_t i = 1; i < multisigdata.size() - 1 && nSigned < nRequired; i++) { const valtype &pubkey = multisigdata[i]; CKeyID keyID = CPubKey(pubkey).GetID(); if (Sign1(keyID, creator, scriptCode, ret)) { ++nSigned; } } return nSigned == nRequired; } /** * Sign scriptPubKey using signature made with creator. * Signatures are returned in scriptSigRet (or returns false if scriptPubKey * can't be signed), unless whichTypeRet is TX_SCRIPTHASH, in which case * scriptSigRet is the redemption script. * Returns false if scriptPubKey could not be completely satisfied. */ static bool SignStep(const BaseSignatureCreator &creator, const CScript &scriptPubKey, std::vector &ret, txnouttype &whichTypeRet) { CScript scriptRet; uint160 h160; ret.clear(); std::vector vSolutions; if (!Solver(scriptPubKey, whichTypeRet, vSolutions)) { return false; } CKeyID keyID; switch (whichTypeRet) { case TX_NONSTANDARD: case TX_NULL_DATA: return false; case TX_PUBKEY: keyID = CPubKey(vSolutions[0]).GetID(); return Sign1(keyID, creator, scriptPubKey, ret); case TX_PUBKEYHASH: { keyID = CKeyID(uint160(vSolutions[0])); if (!Sign1(keyID, creator, scriptPubKey, ret)) { return false; } CPubKey vch; creator.KeyStore().GetPubKey(keyID, vch); ret.push_back(ToByteVector(vch)); return true; } case TX_SCRIPTHASH: if (creator.KeyStore().GetCScript(uint160(vSolutions[0]), scriptRet)) { ret.push_back( std::vector(scriptRet.begin(), scriptRet.end())); return true; } return false; case TX_MULTISIG: // workaround CHECKMULTISIG bug ret.push_back(valtype()); return (SignN(vSolutions, creator, scriptPubKey, ret)); default: return false; } } static CScript PushAll(const std::vector &values) { CScript result; for (const valtype &v : values) { if (v.size() == 0) { result << OP_0; } else if (v.size() == 1 && v[0] >= 1 && v[0] <= 16) { result << CScript::EncodeOP_N(v[0]); } else { result << v; } } return result; } bool ProduceSignature(const BaseSignatureCreator &creator, const CScript &fromPubKey, SignatureData &sigdata) { CScript script = fromPubKey; bool solved = true; std::vector result; txnouttype whichType; solved = SignStep(creator, script, result, whichType); CScript subscript; if (solved && whichType == TX_SCRIPTHASH) { // Solver returns the subscript that needs to be evaluated; the final // scriptSig is the signatures from that and then the serialized // subscript: script = subscript = CScript(result[0].begin(), result[0].end()); solved = solved && SignStep(creator, script, result, whichType) && whichType != TX_SCRIPTHASH; result.push_back( std::vector(subscript.begin(), subscript.end())); } sigdata.scriptSig = PushAll(result); // Test solution return solved && VerifyScript(sigdata.scriptSig, fromPubKey, STANDARD_SCRIPT_VERIFY_FLAGS, creator.Checker()); } SignatureData DataFromTransaction(const CMutableTransaction &tx, unsigned int nIn) { SignatureData data; assert(tx.vin.size() > nIn); data.scriptSig = tx.vin[nIn].scriptSig; return data; } void UpdateTransaction(CMutableTransaction &tx, unsigned int nIn, const SignatureData &data) { assert(tx.vin.size() > nIn); tx.vin[nIn].scriptSig = data.scriptSig; } bool SignSignature(const CKeyStore &keystore, const CScript &fromPubKey, CMutableTransaction &txTo, unsigned int nIn, const Amount amount, uint32_t nHashType) { assert(nIn < txTo.vin.size()); CTransaction txToConst(txTo); TransactionSignatureCreator creator(&keystore, &txToConst, nIn, amount, nHashType); SignatureData sigdata; bool ret = ProduceSignature(creator, fromPubKey, sigdata); UpdateTransaction(txTo, nIn, sigdata); return ret; } bool SignSignature(const CKeyStore &keystore, const CTransaction &txFrom, CMutableTransaction &txTo, unsigned int nIn, uint32_t nHashType) { assert(nIn < txTo.vin.size()); CTxIn &txin = txTo.vin[nIn]; assert(txin.prevout.n < txFrom.vout.size()); const CTxOut &txout = txFrom.vout[txin.prevout.n]; - return SignSignature(keystore, txout.scriptPubKey, txTo, nIn, - txout.nValue.GetSatoshis(), nHashType); + return SignSignature(keystore, txout.scriptPubKey, txTo, nIn, txout.nValue, + nHashType); } static std::vector CombineMultisig( const CScript &scriptPubKey, const BaseSignatureChecker &checker, const std::vector &vSolutions, const std::vector &sigs1, const std::vector &sigs2) { // Combine all the signatures we've got: std::set allsigs; for (const valtype &v : sigs1) { if (!v.empty()) { allsigs.insert(v); } } for (const valtype &v : sigs2) { if (!v.empty()) { allsigs.insert(v); } } // Build a map of pubkey -> signature by matching sigs to pubkeys: assert(vSolutions.size() > 1); unsigned int nSigsRequired = vSolutions.front()[0]; unsigned int nPubKeys = vSolutions.size() - 2; std::map sigs; for (const valtype &sig : allsigs) { for (unsigned int i = 0; i < nPubKeys; i++) { const valtype &pubkey = vSolutions[i + 1]; // Already got a sig for this pubkey if (sigs.count(pubkey)) { continue; } if (checker.CheckSig(sig, pubkey, scriptPubKey, STANDARD_SCRIPT_VERIFY_FLAGS)) { sigs[pubkey] = sig; break; } } } // Now build a merged CScript: unsigned int nSigsHave = 0; // pop-one-too-many workaround std::vector result; result.push_back(valtype()); for (unsigned int i = 0; i < nPubKeys && nSigsHave < nSigsRequired; i++) { if (sigs.count(vSolutions[i + 1])) { result.push_back(sigs[vSolutions[i + 1]]); ++nSigsHave; } } // Fill any missing with OP_0: for (unsigned int i = nSigsHave; i < nSigsRequired; i++) { result.push_back(valtype()); } return result; } namespace { struct Stacks { std::vector script; Stacks() {} explicit Stacks(const std::vector &scriptSigStack_) : script(scriptSigStack_) {} explicit Stacks(const SignatureData &data) { EvalScript(script, data.scriptSig, MANDATORY_SCRIPT_VERIFY_FLAGS, BaseSignatureChecker()); } SignatureData Output() const { SignatureData result; result.scriptSig = PushAll(script); return result; } }; } // namespace static Stacks CombineSignatures(const CScript &scriptPubKey, const BaseSignatureChecker &checker, const txnouttype txType, const std::vector &vSolutions, Stacks sigs1, Stacks sigs2) { switch (txType) { case TX_NONSTANDARD: case TX_NULL_DATA: // Don't know anything about this, assume bigger one is correct: if (sigs1.script.size() >= sigs2.script.size()) { return sigs1; } return sigs2; case TX_PUBKEY: case TX_PUBKEYHASH: // Signatures are bigger than placeholders or empty scripts: if (sigs1.script.empty() || sigs1.script[0].empty()) { return sigs2; } return sigs1; case TX_SCRIPTHASH: { if (sigs1.script.empty() || sigs1.script.back().empty()) { return sigs2; } if (sigs2.script.empty() || sigs2.script.back().empty()) { return sigs1; } // Recur to combine: valtype spk = sigs1.script.back(); CScript pubKey2(spk.begin(), spk.end()); txnouttype txType2; std::vector> vSolutions2; Solver(pubKey2, txType2, vSolutions2); sigs1.script.pop_back(); sigs2.script.pop_back(); Stacks result = CombineSignatures(pubKey2, checker, txType2, vSolutions2, sigs1, sigs2); result.script.push_back(spk); return result; } case TX_MULTISIG: return Stacks(CombineMultisig(scriptPubKey, checker, vSolutions, sigs1.script, sigs2.script)); default: return Stacks(); } } SignatureData CombineSignatures(const CScript &scriptPubKey, const BaseSignatureChecker &checker, const SignatureData &scriptSig1, const SignatureData &scriptSig2) { txnouttype txType; std::vector> vSolutions; Solver(scriptPubKey, txType, vSolutions); return CombineSignatures(scriptPubKey, checker, txType, vSolutions, Stacks(scriptSig1), Stacks(scriptSig2)) .Output(); } namespace { /** Dummy signature checker which accepts all signatures. */ class DummySignatureChecker : public BaseSignatureChecker { public: DummySignatureChecker() {} bool CheckSig(const std::vector &scriptSig, const std::vector &vchPubKey, const CScript &scriptCode, uint32_t flags) const { return true; } }; const DummySignatureChecker dummyChecker; } // namespace const BaseSignatureChecker &DummySignatureCreator::Checker() const { return dummyChecker; } bool DummySignatureCreator::CreateSig(std::vector &vchSig, const CKeyID &keyid, const CScript &scriptCode) const { // Create a dummy signature that is a valid DER-encoding vchSig.assign(72, '\000'); vchSig[0] = 0x30; vchSig[1] = 69; vchSig[2] = 0x02; vchSig[3] = 33; vchSig[4] = 0x01; vchSig[4 + 33] = 0x02; vchSig[5 + 33] = 32; vchSig[6 + 33] = 0x01; vchSig[6 + 33 + 32] = SIGHASH_ALL | SIGHASH_FORKID; return true; }