diff --git a/src/bench/mempool_eviction.cpp b/src/bench/mempool_eviction.cpp index c6c35c330..5a17937cc 100644 --- a/src/bench/mempool_eviction.cpp +++ b/src/bench/mempool_eviction.cpp @@ -1,124 +1,124 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <policy/policy.h> #include <txmempool.h> #include <list> #include <vector> static void AddTx(const CTransactionRef &tx, const Amount &nFee, - CTxMemPool &pool) { + CTxMemPool &pool) EXCLUSIVE_LOCKS_REQUIRED(pool.cs) { int64_t nTime = 0; double dPriority = 10.0; unsigned int nHeight = 1; bool spendsCoinbase = false; unsigned int sigOpCost = 4; LockPoints lp; pool.addUnchecked(tx->GetId(), CTxMemPoolEntry(tx, nFee, nTime, dPriority, nHeight, tx->GetValueOut(), spendsCoinbase, sigOpCost, lp)); } // Right now this is only testing eviction performance in an extremely small // mempool. Code needs to be written to generate a much wider variety of // unique transactions for a more meaningful performance measurement. static void MempoolEviction(benchmark::State &state) { CMutableTransaction tx1 = CMutableTransaction(); tx1.vin.resize(1); tx1.vin[0].scriptSig = CScript() << OP_1; tx1.vout.resize(1); tx1.vout[0].scriptPubKey = CScript() << OP_1 << OP_EQUAL; tx1.vout[0].nValue = 10 * COIN; CMutableTransaction tx2 = CMutableTransaction(); tx2.vin.resize(1); tx2.vin[0].scriptSig = CScript() << OP_2; tx2.vout.resize(1); tx2.vout[0].scriptPubKey = CScript() << OP_2 << OP_EQUAL; tx2.vout[0].nValue = 10 * COIN; CMutableTransaction tx3 = CMutableTransaction(); tx3.vin.resize(1); tx3.vin[0].prevout = COutPoint(tx2.GetId(), 0); tx3.vin[0].scriptSig = CScript() << OP_2; tx3.vout.resize(1); tx3.vout[0].scriptPubKey = CScript() << OP_3 << OP_EQUAL; tx3.vout[0].nValue = 10 * COIN; CMutableTransaction tx4 = CMutableTransaction(); tx4.vin.resize(2); tx4.vin[0].prevout = COutPoint(); tx4.vin[0].scriptSig = CScript() << OP_4; tx4.vin[1].prevout = COutPoint(); tx4.vin[1].scriptSig = CScript() << OP_4; tx4.vout.resize(2); tx4.vout[0].scriptPubKey = CScript() << OP_4 << OP_EQUAL; tx4.vout[0].nValue = 10 * COIN; tx4.vout[1].scriptPubKey = CScript() << OP_4 << OP_EQUAL; tx4.vout[1].nValue = 10 * COIN; CMutableTransaction tx5 = CMutableTransaction(); tx5.vin.resize(2); tx5.vin[0].prevout = COutPoint(tx4.GetId(), 0); tx5.vin[0].scriptSig = CScript() << OP_4; tx5.vin[1].prevout = COutPoint(); tx5.vin[1].scriptSig = CScript() << OP_5; tx5.vout.resize(2); tx5.vout[0].scriptPubKey = CScript() << OP_5 << OP_EQUAL; tx5.vout[0].nValue = 10 * COIN; tx5.vout[1].scriptPubKey = CScript() << OP_5 << OP_EQUAL; tx5.vout[1].nValue = 10 * COIN; CMutableTransaction tx6 = CMutableTransaction(); tx6.vin.resize(2); tx6.vin[0].prevout = COutPoint(tx4.GetId(), 1); tx6.vin[0].scriptSig = CScript() << OP_4; tx6.vin[1].prevout = COutPoint(); tx6.vin[1].scriptSig = CScript() << OP_6; tx6.vout.resize(2); tx6.vout[0].scriptPubKey = CScript() << OP_6 << OP_EQUAL; tx6.vout[0].nValue = 10 * COIN; tx6.vout[1].scriptPubKey = CScript() << OP_6 << OP_EQUAL; tx6.vout[1].nValue = 10 * COIN; CMutableTransaction tx7 = CMutableTransaction(); tx7.vin.resize(2); tx7.vin[0].prevout = COutPoint(tx5.GetId(), 0); tx7.vin[0].scriptSig = CScript() << OP_5; tx7.vin[1].prevout = COutPoint(tx6.GetId(), 0); tx7.vin[1].scriptSig = CScript() << OP_6; tx7.vout.resize(2); tx7.vout[0].scriptPubKey = CScript() << OP_7 << OP_EQUAL; tx7.vout[0].nValue = 10 * COIN; tx7.vout[1].scriptPubKey = CScript() << OP_7 << OP_EQUAL; tx7.vout[1].nValue = 10 * COIN; CTxMemPool pool; - + LOCK(pool.cs); // Create transaction references outside the "hot loop" const CTransactionRef tx1_r{MakeTransactionRef(tx1)}; const CTransactionRef tx2_r{MakeTransactionRef(tx2)}; const CTransactionRef tx3_r{MakeTransactionRef(tx3)}; const CTransactionRef tx4_r{MakeTransactionRef(tx4)}; const CTransactionRef tx5_r{MakeTransactionRef(tx5)}; const CTransactionRef tx6_r{MakeTransactionRef(tx6)}; const CTransactionRef tx7_r{MakeTransactionRef(tx7)}; while (state.KeepRunning()) { AddTx(tx1_r, 10000 * SATOSHI, pool); AddTx(tx2_r, 5000 * SATOSHI, pool); AddTx(tx3_r, 20000 * SATOSHI, pool); AddTx(tx4_r, 7000 * SATOSHI, pool); AddTx(tx5_r, 1000 * SATOSHI, pool); AddTx(tx6_r, 1100 * SATOSHI, pool); AddTx(tx7_r, 9000 * SATOSHI, pool); pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4); pool.TrimToSize(GetVirtualTransactionSize(*tx1_r)); } } BENCHMARK(MempoolEviction, 41000); diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp index 03911650f..f3b17ee3a 100644 --- a/src/test/blockencodings_tests.cpp +++ b/src/test/blockencodings_tests.cpp @@ -1,415 +1,415 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <blockencodings.h> #include <chainparams.h> #include <config.h> #include <consensus/merkle.h> #include <pow.h> #include <random.h> #include <streams.h> #include <test/test_bitcoin.h> #include <boost/test/unit_test.hpp> std::vector<std::pair<uint256, CTransactionRef>> extra_txn; struct RegtestingSetup : public TestingSetup { RegtestingSetup() : TestingSetup(CBaseChainParams::REGTEST) {} }; BOOST_FIXTURE_TEST_SUITE(blockencodings_tests, RegtestingSetup) static COutPoint InsecureRandOutPoint() { return COutPoint(TxId(InsecureRand256()), 0); } static CBlock BuildBlockTestCase() { CBlock block; CMutableTransaction tx; tx.vin.resize(1); tx.vin[0].scriptSig.resize(10); tx.vout.resize(1); tx.vout[0].nValue = 42 * SATOSHI; block.vtx.resize(3); block.vtx[0] = MakeTransactionRef(tx); block.nVersion = 42; block.hashPrevBlock = InsecureRand256(); block.nBits = 0x207fffff; tx.vin[0].prevout = InsecureRandOutPoint(); block.vtx[1] = MakeTransactionRef(tx); tx.vin.resize(10); for (size_t i = 0; i < tx.vin.size(); i++) { tx.vin[i].prevout = InsecureRandOutPoint(); } block.vtx[2] = MakeTransactionRef(tx); bool mutated; block.hashMerkleRoot = BlockMerkleRoot(block, &mutated); assert(!mutated); GlobalConfig config; const Consensus::Params ¶ms = config.GetChainParams().GetConsensus(); while (!CheckProofOfWork(block.GetHash(), block.nBits, params)) { ++block.nNonce; } return block; } // Number of shared use_counts we expect for a tx we haven't touched // (block + mempool + our copy from the GetSharedTx call) constexpr long SHARED_TX_OFFSET{3}; BOOST_AUTO_TEST_CASE(SimpleRoundTripTest) { CTxMemPool pool; TestMemPoolEntryHelper entry; CBlock block(BuildBlockTestCase()); - pool.addUnchecked(block.vtx[2]->GetId(), entry.FromTx(block.vtx[2])); LOCK(pool.cs); + pool.addUnchecked(block.vtx[2]->GetId(), entry.FromTx(block.vtx[2])); BOOST_CHECK_EQUAL( pool.mapTx.find(block.vtx[2]->GetId())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); // Do a simple ShortTxIDs RT { CBlockHeaderAndShortTxIDs shortIDs(block); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << shortIDs; CBlockHeaderAndShortTxIDs shortIDs2; stream >> shortIDs2; PartiallyDownloadedBlock partialBlock(GetConfig(), &pool); BOOST_CHECK(partialBlock.InitData(shortIDs2, extra_txn) == READ_STATUS_OK); BOOST_CHECK(partialBlock.IsTxAvailable(0)); BOOST_CHECK(!partialBlock.IsTxAvailable(1)); BOOST_CHECK(partialBlock.IsTxAvailable(2)); BOOST_CHECK_EQUAL( pool.mapTx.find(block.vtx[2]->GetId())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); size_t poolSize = pool.size(); pool.removeRecursive(*block.vtx[2]); BOOST_CHECK_EQUAL(pool.size(), poolSize - 1); CBlock block2; { // No transactions. PartiallyDownloadedBlock tmp = partialBlock; BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_INVALID); partialBlock = tmp; } // Wrong transaction { // Current implementation doesn't check txn here, but don't require // that. PartiallyDownloadedBlock tmp = partialBlock; partialBlock.FillBlock(block2, {block.vtx[2]}); partialBlock = tmp; } bool mutated; BOOST_CHECK(block.hashMerkleRoot != BlockMerkleRoot(block2, &mutated)); CBlock block3; BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[1]}) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block3.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString()); BOOST_CHECK(!mutated); } } class TestHeaderAndShortIDs { // Utility to encode custom CBlockHeaderAndShortTxIDs public: CBlockHeader header; uint64_t nonce; std::vector<uint64_t> shorttxids; std::vector<PrefilledTransaction> prefilledtxn; explicit TestHeaderAndShortIDs(const CBlockHeaderAndShortTxIDs &orig) { CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << orig; stream >> *this; } explicit TestHeaderAndShortIDs(const CBlock &block) : TestHeaderAndShortIDs(CBlockHeaderAndShortTxIDs(block)) {} uint64_t GetShortID(const uint256 &txhash) const { CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << *this; CBlockHeaderAndShortTxIDs base; stream >> base; return base.GetShortID(txhash); } ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(header); READWRITE(nonce); size_t shorttxids_size = shorttxids.size(); READWRITE(VARINT(shorttxids_size)); shorttxids.resize(shorttxids_size); for (size_t i = 0; i < shorttxids.size(); i++) { uint32_t lsb = shorttxids[i] & 0xffffffff; uint16_t msb = (shorttxids[i] >> 32) & 0xffff; READWRITE(lsb); READWRITE(msb); shorttxids[i] = (uint64_t(msb) << 32) | uint64_t(lsb); } READWRITE(prefilledtxn); } }; BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) { CTxMemPool pool; TestMemPoolEntryHelper entry; CBlock block(BuildBlockTestCase()); - pool.addUnchecked(block.vtx[2]->GetId(), entry.FromTx(block.vtx[2])); LOCK(pool.cs); + pool.addUnchecked(block.vtx[2]->GetId(), entry.FromTx(block.vtx[2])); BOOST_CHECK_EQUAL( pool.mapTx.find(block.vtx[2]->GetId())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); uint256 txhash; // Test with pre-forwarding tx 1, but not coinbase { TestHeaderAndShortIDs shortIDs(block); shortIDs.prefilledtxn.resize(1); shortIDs.prefilledtxn[0] = {1, block.vtx[1]}; shortIDs.shorttxids.resize(2); shortIDs.shorttxids[0] = shortIDs.GetShortID(block.vtx[0]->GetId()); shortIDs.shorttxids[1] = shortIDs.GetShortID(block.vtx[2]->GetId()); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << shortIDs; CBlockHeaderAndShortTxIDs shortIDs2; stream >> shortIDs2; PartiallyDownloadedBlock partialBlock(GetConfig(), &pool); BOOST_CHECK(partialBlock.InitData(shortIDs2, extra_txn) == READ_STATUS_OK); BOOST_CHECK(!partialBlock.IsTxAvailable(0)); BOOST_CHECK(partialBlock.IsTxAvailable(1)); BOOST_CHECK(partialBlock.IsTxAvailable(2)); // +1 because of partialBlock BOOST_CHECK_EQUAL( pool.mapTx.find(block.vtx[2]->GetId())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); CBlock block2; { // No transactions. PartiallyDownloadedBlock tmp = partialBlock; BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_INVALID); partialBlock = tmp; } // Wrong transaction { // Current implementation doesn't check txn here, but don't require // that. PartiallyDownloadedBlock tmp = partialBlock; partialBlock.FillBlock(block2, {block.vtx[1]}); partialBlock = tmp; } // +2 because of partialBlock and block2 BOOST_CHECK_EQUAL( pool.mapTx.find(block.vtx[2]->GetId())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 2); bool mutated; BOOST_CHECK(block.hashMerkleRoot != BlockMerkleRoot(block2, &mutated)); CBlock block3; PartiallyDownloadedBlock partialBlockCopy = partialBlock; BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[0]}) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block3.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString()); BOOST_CHECK(!mutated); // +3 because of partialBlock and block2 and block3 BOOST_CHECK_EQUAL( pool.mapTx.find(block.vtx[2]->GetId())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 3); txhash = block.vtx[2]->GetId(); block.vtx.clear(); block2.vtx.clear(); block3.vtx.clear(); // + 1 because of partialBlock; -1 because of block. BOOST_CHECK_EQUAL(pool.mapTx.find(txhash)->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1 - 1); } // -1 because of block BOOST_CHECK_EQUAL(pool.mapTx.find(txhash)->GetSharedTx().use_count(), SHARED_TX_OFFSET - 1); } BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) { CTxMemPool pool; TestMemPoolEntryHelper entry; CBlock block(BuildBlockTestCase()); - pool.addUnchecked(block.vtx[1]->GetId(), entry.FromTx(block.vtx[1])); LOCK(pool.cs); + pool.addUnchecked(block.vtx[1]->GetId(), entry.FromTx(block.vtx[1])); BOOST_CHECK_EQUAL( pool.mapTx.find(block.vtx[1]->GetId())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); uint256 txhash; // Test with pre-forwarding coinbase + tx 2 with tx 1 in mempool { TestHeaderAndShortIDs shortIDs(block); shortIDs.prefilledtxn.resize(2); shortIDs.prefilledtxn[0] = {0, block.vtx[0]}; // id == 1 as it is 1 after index 1 shortIDs.prefilledtxn[1] = {1, block.vtx[2]}; shortIDs.shorttxids.resize(1); shortIDs.shorttxids[0] = shortIDs.GetShortID(block.vtx[1]->GetId()); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << shortIDs; CBlockHeaderAndShortTxIDs shortIDs2; stream >> shortIDs2; PartiallyDownloadedBlock partialBlock(GetConfig(), &pool); BOOST_CHECK(partialBlock.InitData(shortIDs2, extra_txn) == READ_STATUS_OK); BOOST_CHECK(partialBlock.IsTxAvailable(0)); BOOST_CHECK(partialBlock.IsTxAvailable(1)); BOOST_CHECK(partialBlock.IsTxAvailable(2)); BOOST_CHECK_EQUAL( pool.mapTx.find(block.vtx[1]->GetId())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); CBlock block2; PartiallyDownloadedBlock partialBlockCopy = partialBlock; BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString()); bool mutated; BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString()); BOOST_CHECK(!mutated); txhash = block.vtx[1]->GetId(); block.vtx.clear(); block2.vtx.clear(); // + 1 because of partialBlock; -1 because of block. BOOST_CHECK_EQUAL(pool.mapTx.find(txhash)->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1 - 1); } // -1 because of block BOOST_CHECK_EQUAL(pool.mapTx.find(txhash)->GetSharedTx().use_count(), SHARED_TX_OFFSET - 1); } BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest) { CTxMemPool pool; CMutableTransaction coinbase; coinbase.vin.resize(1); coinbase.vin[0].scriptSig.resize(10); coinbase.vout.resize(1); coinbase.vout[0].nValue = 42 * SATOSHI; CBlock block; block.vtx.resize(1); block.vtx[0] = MakeTransactionRef(std::move(coinbase)); block.nVersion = 42; block.hashPrevBlock = InsecureRand256(); block.nBits = 0x207fffff; bool mutated; block.hashMerkleRoot = BlockMerkleRoot(block, &mutated); assert(!mutated); GlobalConfig config; const Consensus::Params ¶ms = config.GetChainParams().GetConsensus(); while (!CheckProofOfWork(block.GetHash(), block.nBits, params)) { ++block.nNonce; } // Test simple header round-trip with only coinbase { CBlockHeaderAndShortTxIDs shortIDs(block); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << shortIDs; CBlockHeaderAndShortTxIDs shortIDs2; stream >> shortIDs2; PartiallyDownloadedBlock partialBlock(GetConfig(), &pool); BOOST_CHECK(partialBlock.InitData(shortIDs2, extra_txn) == READ_STATUS_OK); BOOST_CHECK(partialBlock.IsTxAvailable(0)); CBlock block2; std::vector<CTransactionRef> vtx_missing; BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString()); BOOST_CHECK(!mutated); } } BOOST_AUTO_TEST_CASE(TransactionsRequestSerializationTest) { BlockTransactionsRequest req1; req1.blockhash = InsecureRand256(); req1.indices.resize(4); req1.indices[0] = 0; req1.indices[1] = 1; req1.indices[2] = 3; req1.indices[3] = 4; CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << req1; BlockTransactionsRequest req2; stream >> req2; BOOST_CHECK_EQUAL(req1.blockhash.ToString(), req2.blockhash.ToString()); BOOST_CHECK_EQUAL(req1.indices.size(), req2.indices.size()); BOOST_CHECK_EQUAL(req1.indices[0], req2.indices[0]); BOOST_CHECK_EQUAL(req1.indices[1], req2.indices[1]); BOOST_CHECK_EQUAL(req1.indices[2], req2.indices[2]); BOOST_CHECK_EQUAL(req1.indices[3], req2.indices[3]); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/mempool_tests.cpp b/src/test/mempool_tests.cpp index 958f7d464..3f47a071c 100644 --- a/src/test/mempool_tests.cpp +++ b/src/test/mempool_tests.cpp @@ -1,1054 +1,1057 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <txmempool.h> #include <policy/policy.h> #include <reverse_iterator.h> #include <util/system.h> #include <test/test_bitcoin.h> #include <boost/test/unit_test.hpp> #include <algorithm> #include <list> #include <vector> BOOST_FIXTURE_TEST_SUITE(mempool_tests, TestingSetup) BOOST_AUTO_TEST_CASE(TestPackageAccounting) { CTxMemPool testPool; LOCK(testPool.cs); TestMemPoolEntryHelper entry; CMutableTransaction parentOfAll; std::vector<CTxIn> outpoints; const size_t maxOutputs = 3; // Construct a parent for the rest of the chain parentOfAll.vin.resize(1); parentOfAll.vin[0].scriptSig = CScript(); // Give us a couple outpoints so we can spend them for (size_t i = 0; i < maxOutputs; i++) { parentOfAll.vout.emplace_back(10 * SATOSHI, CScript() << OP_TRUE); } TxId parentOfAllId = parentOfAll.GetId(); testPool.addUnchecked(parentOfAllId, entry.FromTx(parentOfAll)); // Add some outpoints to the tracking vector for (size_t i = 0; i < maxOutputs; i++) { outpoints.emplace_back(COutPoint(parentOfAllId, i)); } Amount totalFee = Amount::zero(); size_t totalSize = CTransaction(parentOfAll).GetTotalSize(); // Generate 100 transactions for (size_t totalTransactions = 0; totalTransactions < 100; totalTransactions++) { CMutableTransaction tx; uint64_t minAncestors = std::numeric_limits<size_t>::max(); uint64_t maxAncestors = 0; Amount minFees = MAX_MONEY; Amount maxFees = Amount::zero(); uint64_t minSize = std::numeric_limits<size_t>::max(); uint64_t maxSize = 0; // Consume random inputs, but make sure we don't consume more than // available for (size_t input = std::min(InsecureRandRange(maxOutputs) + 1, uint64_t(outpoints.size())); input > 0; input--) { std::swap(outpoints[InsecureRandRange(outpoints.size())], outpoints.back()); tx.vin.emplace_back(outpoints.back()); outpoints.pop_back(); // We don't know exactly how many ancestors this transaction has // due to possible duplicates. Calculate a valid range based on // parents. CTxMemPoolEntry parent = *testPool.mapTx.find(tx.vin.back().prevout.GetTxId()); minAncestors = std::min(minAncestors, parent.GetCountWithAncestors()); maxAncestors += parent.GetCountWithAncestors(); minFees = std::min(minFees, parent.GetModFeesWithAncestors()); maxFees += parent.GetModFeesWithAncestors(); minSize = std::min(minSize, parent.GetSizeWithAncestors()); maxSize += parent.GetSizeWithAncestors(); } // Produce random number of outputs for (size_t output = InsecureRandRange(maxOutputs) + 1; output > 0; output--) { tx.vout.emplace_back(10 * SATOSHI, CScript() << OP_TRUE); } TxId curId = tx.GetId(); // Record the outputs for (size_t output = tx.vout.size(); output > 0; output--) { outpoints.emplace_back(COutPoint(curId, output)); } Amount randFee = int64_t(InsecureRandRange(300)) * SATOSHI; testPool.addUnchecked(curId, entry.Fee(randFee).FromTx(tx)); // Add this transaction to the totals. minAncestors += 1; maxAncestors += 1; minFees += randFee; maxFees += randFee; minSize += CTransaction(tx).GetTotalSize(); maxSize += CTransaction(tx).GetTotalSize(); // Calculate overall values totalFee += randFee; totalSize += CTransaction(tx).GetTotalSize(); CTxMemPoolEntry parentEntry = *testPool.mapTx.find(parentOfAllId); CTxMemPoolEntry latestEntry = *testPool.mapTx.find(curId); // Ensure values are within the expected ranges BOOST_CHECK(latestEntry.GetCountWithAncestors() >= minAncestors); BOOST_CHECK(latestEntry.GetCountWithAncestors() <= maxAncestors); BOOST_CHECK(latestEntry.GetSizeWithAncestors() >= minSize); BOOST_CHECK(latestEntry.GetSizeWithAncestors() <= maxSize); BOOST_CHECK(latestEntry.GetModFeesWithAncestors() >= minFees); BOOST_CHECK(latestEntry.GetModFeesWithAncestors() <= maxFees); BOOST_CHECK_EQUAL(parentEntry.GetCountWithDescendants(), testPool.mapTx.size()); BOOST_CHECK_EQUAL(parentEntry.GetSizeWithDescendants(), totalSize); BOOST_CHECK_EQUAL(parentEntry.GetModFeesWithDescendants(), totalFee); } } BOOST_AUTO_TEST_CASE(MempoolRemoveTest) { // Test CTxMemPool::remove functionality TestMemPoolEntryHelper entry; // Parent transaction with three children, and three grand-children: CMutableTransaction txParent; txParent.vin.resize(1); txParent.vin[0].scriptSig = CScript() << OP_11; txParent.vout.resize(3); for (int i = 0; i < 3; i++) { txParent.vout[i].scriptPubKey = CScript() << OP_11 << OP_EQUAL; txParent.vout[i].nValue = 33000 * SATOSHI; } CMutableTransaction txChild[3]; for (int i = 0; i < 3; i++) { txChild[i].vin.resize(1); txChild[i].vin[0].scriptSig = CScript() << OP_11; txChild[i].vin[0].prevout = COutPoint(txParent.GetId(), i); txChild[i].vout.resize(1); txChild[i].vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; txChild[i].vout[0].nValue = 11000 * SATOSHI; } CMutableTransaction txGrandChild[3]; for (int i = 0; i < 3; i++) { txGrandChild[i].vin.resize(1); txGrandChild[i].vin[0].scriptSig = CScript() << OP_11; txGrandChild[i].vin[0].prevout = COutPoint(txChild[i].GetId(), 0); txGrandChild[i].vout.resize(1); txGrandChild[i].vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; txGrandChild[i].vout[0].nValue = 11000 * SATOSHI; } CTxMemPool testPool; + LOCK(testPool.cs); // Nothing in pool, remove should do nothing: unsigned int poolSize = testPool.size(); testPool.removeRecursive(CTransaction(txParent)); BOOST_CHECK_EQUAL(testPool.size(), poolSize); // Just the parent: testPool.addUnchecked(txParent.GetId(), entry.FromTx(txParent)); poolSize = testPool.size(); testPool.removeRecursive(CTransaction(txParent)); BOOST_CHECK_EQUAL(testPool.size(), poolSize - 1); // Parent, children, grandchildren: testPool.addUnchecked(txParent.GetId(), entry.FromTx(txParent)); for (int i = 0; i < 3; i++) { testPool.addUnchecked(txChild[i].GetId(), entry.FromTx(txChild[i])); testPool.addUnchecked(txGrandChild[i].GetId(), entry.FromTx(txGrandChild[i])); } // Remove Child[0], GrandChild[0] should be removed: poolSize = testPool.size(); testPool.removeRecursive(CTransaction(txChild[0])); BOOST_CHECK_EQUAL(testPool.size(), poolSize - 2); // ... make sure grandchild and child are gone: poolSize = testPool.size(); testPool.removeRecursive(CTransaction(txGrandChild[0])); BOOST_CHECK_EQUAL(testPool.size(), poolSize); poolSize = testPool.size(); testPool.removeRecursive(CTransaction(txChild[0])); BOOST_CHECK_EQUAL(testPool.size(), poolSize); // Remove parent, all children/grandchildren should go: poolSize = testPool.size(); testPool.removeRecursive(CTransaction(txParent)); BOOST_CHECK_EQUAL(testPool.size(), poolSize - 5); BOOST_CHECK_EQUAL(testPool.size(), 0UL); // Add children and grandchildren, but NOT the parent (simulate the parent // being in a block) for (int i = 0; i < 3; i++) { testPool.addUnchecked(txChild[i].GetId(), entry.FromTx(txChild[i])); testPool.addUnchecked(txGrandChild[i].GetId(), entry.FromTx(txGrandChild[i])); } // Now remove the parent, as might happen if a block-re-org occurs but the // parent cannot be put into the mempool (maybe because it is non-standard): poolSize = testPool.size(); testPool.removeRecursive(CTransaction(txParent)); BOOST_CHECK_EQUAL(testPool.size(), poolSize - 6); BOOST_CHECK_EQUAL(testPool.size(), 0UL); } BOOST_AUTO_TEST_CASE(MempoolClearTest) { // Test CTxMemPool::clear functionality TestMemPoolEntryHelper entry; // Create a transaction CMutableTransaction txParent; txParent.vin.resize(1); txParent.vin[0].scriptSig = CScript() << OP_11; txParent.vout.resize(3); for (int i = 0; i < 3; i++) { txParent.vout[i].scriptPubKey = CScript() << OP_11 << OP_EQUAL; txParent.vout[i].nValue = 33000 * SATOSHI; } CTxMemPool testPool; LOCK(testPool.cs); // Nothing in pool, clear should do nothing: testPool.clear(); BOOST_CHECK_EQUAL(testPool.size(), 0UL); // Add the transaction testPool.addUnchecked(txParent.GetId(), entry.FromTx(txParent)); BOOST_CHECK_EQUAL(testPool.size(), 1UL); BOOST_CHECK_EQUAL(testPool.mapTx.size(), 1UL); BOOST_CHECK_EQUAL(testPool.mapNextTx.size(), 1UL); BOOST_CHECK_EQUAL(testPool.vTxHashes.size(), 1UL); // CTxMemPool's members should be empty after a clear testPool.clear(); BOOST_CHECK_EQUAL(testPool.size(), 0UL); BOOST_CHECK_EQUAL(testPool.mapTx.size(), 0UL); BOOST_CHECK_EQUAL(testPool.mapNextTx.size(), 0UL); BOOST_CHECK_EQUAL(testPool.vTxHashes.size(), 0UL); } template <typename name> static void CheckSort(CTxMemPool &pool, std::vector<std::string> &sortedOrder, const std::string &testcase) EXCLUSIVE_LOCKS_REQUIRED(pool.cs) { BOOST_CHECK_EQUAL(pool.size(), sortedOrder.size()); typename CTxMemPool::indexed_transaction_set::index<name>::type::iterator it = pool.mapTx.get<name>().begin(); int count = 0; for (; it != pool.mapTx.get<name>().end(); ++it, ++count) { BOOST_CHECK_MESSAGE(it->GetTx().GetId().ToString() == sortedOrder[count], it->GetTx().GetId().ToString() << " != " << sortedOrder[count] << " in test " << testcase << ":" << count); } } BOOST_AUTO_TEST_CASE(MempoolIndexingTest) { CTxMemPool pool; + LOCK(pool.cs); TestMemPoolEntryHelper entry; /* 3rd highest fee */ CMutableTransaction tx1 = CMutableTransaction(); tx1.vout.resize(1); tx1.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx1.vout[0].nValue = 10 * COIN; pool.addUnchecked(tx1.GetId(), entry.Fee(10000 * SATOSHI).Priority(10.0).FromTx(tx1)); /* highest fee */ CMutableTransaction tx2 = CMutableTransaction(); tx2.vout.resize(1); tx2.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx2.vout[0].nValue = 2 * COIN; pool.addUnchecked(tx2.GetId(), entry.Fee(20000 * SATOSHI).Priority(9.0).FromTx(tx2)); /* lowest fee */ CMutableTransaction tx3 = CMutableTransaction(); tx3.vout.resize(1); tx3.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx3.vout[0].nValue = 5 * COIN; pool.addUnchecked(tx3.GetId(), entry.Fee(Amount::zero()).Priority(100.0).FromTx(tx3)); /* 2nd highest fee */ CMutableTransaction tx4 = CMutableTransaction(); tx4.vout.resize(1); tx4.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx4.vout[0].nValue = 6 * COIN; pool.addUnchecked(tx4.GetId(), entry.Fee(15000 * SATOSHI).Priority(1.0).FromTx(tx4)); /* equal fee rate to tx1, but newer */ CMutableTransaction tx5 = CMutableTransaction(); tx5.vout.resize(1); tx5.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx5.vout[0].nValue = 11 * COIN; entry.nTime = 1; entry.dPriority = 10.0; pool.addUnchecked(tx5.GetId(), entry.Fee(10000 * SATOSHI).FromTx(tx5)); BOOST_CHECK_EQUAL(pool.size(), 5UL); std::vector<std::string> sortedOrder; sortedOrder.resize(5); sortedOrder[0] = tx3.GetId().ToString(); // 0 sortedOrder[1] = tx5.GetId().ToString(); // 10000 sortedOrder[2] = tx1.GetId().ToString(); // 10000 sortedOrder[3] = tx4.GetId().ToString(); // 15000 sortedOrder[4] = tx2.GetId().ToString(); // 20000 - LOCK(pool.cs); CheckSort<descendant_score>(pool, sortedOrder, "MempoolIndexingTest1"); /* low fee but with high fee child */ /* tx6 -> tx7 -> tx8, tx9 -> tx10 */ CMutableTransaction tx6 = CMutableTransaction(); tx6.vout.resize(1); tx6.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx6.vout[0].nValue = 20 * COIN; pool.addUnchecked(tx6.GetId(), entry.Fee(Amount::zero()).FromTx(tx6)); BOOST_CHECK_EQUAL(pool.size(), 6UL); // Check that at this point, tx6 is sorted low sortedOrder.insert(sortedOrder.begin(), tx6.GetId().ToString()); CheckSort<descendant_score>(pool, sortedOrder, "MempoolIndexingTest2"); CTxMemPool::setEntries setAncestors; setAncestors.insert(pool.mapTx.find(tx6.GetId())); CMutableTransaction tx7 = CMutableTransaction(); tx7.vin.resize(1); tx7.vin[0].prevout = COutPoint(tx6.GetId(), 0); tx7.vin[0].scriptSig = CScript() << OP_11; tx7.vout.resize(2); tx7.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx7.vout[0].nValue = 10 * COIN; tx7.vout[1].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx7.vout[1].nValue = 1 * COIN; CTxMemPool::setEntries setAncestorsCalculated; std::string dummy; BOOST_CHECK_EQUAL( pool.CalculateMemPoolAncestors(entry.Fee(2000000 * SATOSHI).FromTx(tx7), setAncestorsCalculated, 100, 1000000, 1000, 1000000, dummy), true); BOOST_CHECK(setAncestorsCalculated == setAncestors); pool.addUnchecked(tx7.GetId(), entry.FromTx(tx7), setAncestors); BOOST_CHECK_EQUAL(pool.size(), 7UL); // Now tx6 should be sorted higher (high fee child): tx7, tx6, tx2, ... sortedOrder.erase(sortedOrder.begin()); sortedOrder.push_back(tx6.GetId().ToString()); sortedOrder.push_back(tx7.GetId().ToString()); CheckSort<descendant_score>(pool, sortedOrder, "MempoolIndexingTest3"); /* low fee child of tx7 */ CMutableTransaction tx8 = CMutableTransaction(); tx8.vin.resize(1); tx8.vin[0].prevout = COutPoint(tx7.GetId(), 0); tx8.vin[0].scriptSig = CScript() << OP_11; tx8.vout.resize(1); tx8.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx8.vout[0].nValue = 10 * COIN; setAncestors.insert(pool.mapTx.find(tx7.GetId())); pool.addUnchecked(tx8.GetId(), entry.Fee(Amount::zero()).Time(2).FromTx(tx8), setAncestors); // Now tx8 should be sorted low, but tx6/tx both high sortedOrder.insert(sortedOrder.begin(), tx8.GetId().ToString()); CheckSort<descendant_score>(pool, sortedOrder, "MempoolIndexingTest4"); /* low fee child of tx7 */ CMutableTransaction tx9 = CMutableTransaction(); tx9.vin.resize(1); tx9.vin[0].prevout = COutPoint(tx7.GetId(), 1); tx9.vin[0].scriptSig = CScript() << OP_11; tx9.vout.resize(1); tx9.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx9.vout[0].nValue = 1 * COIN; pool.addUnchecked(tx9.GetId(), entry.Fee(Amount::zero()).Time(3).FromTx(tx9), setAncestors); // tx9 should be sorted low BOOST_CHECK_EQUAL(pool.size(), 9UL); sortedOrder.insert(sortedOrder.begin(), tx9.GetId().ToString()); CheckSort<descendant_score>(pool, sortedOrder, "MempoolIndexingTest5"); std::vector<std::string> snapshotOrder = sortedOrder; setAncestors.insert(pool.mapTx.find(tx8.GetId())); setAncestors.insert(pool.mapTx.find(tx9.GetId())); /* tx10 depends on tx8 and tx9 and has a high fee*/ CMutableTransaction tx10 = CMutableTransaction(); tx10.vin.resize(2); tx10.vin[0].prevout = COutPoint(tx8.GetId(), 0); tx10.vin[0].scriptSig = CScript() << OP_11; tx10.vin[1].prevout = COutPoint(tx9.GetId(), 0); tx10.vin[1].scriptSig = CScript() << OP_11; tx10.vout.resize(1); tx10.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx10.vout[0].nValue = 10 * COIN; setAncestorsCalculated.clear(); BOOST_CHECK_EQUAL(pool.CalculateMemPoolAncestors( entry.Fee(200000 * SATOSHI).Time(4).FromTx(tx10), setAncestorsCalculated, 100, 1000000, 1000, 1000000, dummy), true); BOOST_CHECK(setAncestorsCalculated == setAncestors); pool.addUnchecked(tx10.GetId(), entry.FromTx(tx10), setAncestors); /** * tx8 and tx9 should both now be sorted higher * Final order after tx10 is added: * * tx3 = 0 (1) * tx5 = 10000 (1) * tx1 = 10000 (1) * tx4 = 15000 (1) * tx2 = 20000 (1) * tx9 = 200k (2 txs) * tx8 = 200k (2 txs) * tx10 = 200k (1 tx) * tx6 = 2.2M (5 txs) * tx7 = 2.2M (4 txs) */ // take out tx9, tx8 from the beginning sortedOrder.erase(sortedOrder.begin(), sortedOrder.begin() + 2); sortedOrder.insert(sortedOrder.begin() + 5, tx9.GetId().ToString()); sortedOrder.insert(sortedOrder.begin() + 6, tx8.GetId().ToString()); // tx10 is just before tx6 sortedOrder.insert(sortedOrder.begin() + 7, tx10.GetId().ToString()); CheckSort<descendant_score>(pool, sortedOrder, "MempoolIndexingTest6"); // there should be 10 transactions in the mempool BOOST_CHECK_EQUAL(pool.size(), 10UL); // Now try removing tx10 and verify the sort order returns to normal pool.removeRecursive(pool.mapTx.find(tx10.GetId())->GetTx()); CheckSort<descendant_score>(pool, snapshotOrder, "MempoolIndexingTest7"); pool.removeRecursive(pool.mapTx.find(tx9.GetId())->GetTx()); pool.removeRecursive(pool.mapTx.find(tx8.GetId())->GetTx()); } BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest) { CTxMemPool pool; + LOCK(pool.cs); TestMemPoolEntryHelper entry; /* 3rd highest fee */ CMutableTransaction tx1 = CMutableTransaction(); tx1.vout.resize(1); tx1.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx1.vout[0].nValue = 10 * COIN; pool.addUnchecked(tx1.GetId(), entry.Fee(10000 * SATOSHI).Priority(10.0).FromTx(tx1)); /* highest fee */ CMutableTransaction tx2 = CMutableTransaction(); tx2.vout.resize(1); tx2.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx2.vout[0].nValue = 2 * COIN; pool.addUnchecked(tx2.GetId(), entry.Fee(20000 * SATOSHI).Priority(9.0).FromTx(tx2)); uint64_t tx2Size = CTransaction(tx2).GetTotalSize(); /* lowest fee */ CMutableTransaction tx3 = CMutableTransaction(); tx3.vout.resize(1); tx3.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx3.vout[0].nValue = 5 * COIN; pool.addUnchecked(tx3.GetId(), entry.Fee(Amount::zero()).Priority(100.0).FromTx(tx3)); /* 2nd highest fee */ CMutableTransaction tx4 = CMutableTransaction(); tx4.vout.resize(1); tx4.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx4.vout[0].nValue = 6 * COIN; pool.addUnchecked(tx4.GetId(), entry.Fee(15000 * SATOSHI).Priority(1.0).FromTx(tx4)); /* equal fee rate to tx1, but newer */ CMutableTransaction tx5 = CMutableTransaction(); tx5.vout.resize(1); tx5.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx5.vout[0].nValue = 11 * COIN; pool.addUnchecked(tx5.GetId(), entry.Fee(10000 * SATOSHI).FromTx(tx5)); BOOST_CHECK_EQUAL(pool.size(), 5UL); std::vector<std::string> sortedOrder; sortedOrder.resize(5); sortedOrder[0] = tx2.GetId().ToString(); // 20000 sortedOrder[1] = tx4.GetId().ToString(); // 15000 // tx1 and tx5 are both 10000 // Ties are broken by hash, not timestamp, so determine which hash comes // first. if (tx1.GetId() < tx5.GetId()) { sortedOrder[2] = tx1.GetId().ToString(); sortedOrder[3] = tx5.GetId().ToString(); } else { sortedOrder[2] = tx5.GetId().ToString(); sortedOrder[3] = tx1.GetId().ToString(); } sortedOrder[4] = tx3.GetId().ToString(); // 0 - LOCK(pool.cs); CheckSort<ancestor_score>(pool, sortedOrder, "MempoolAncestorIndexingTest1"); /* low fee parent with high fee child */ /* tx6 (0) -> tx7 (high) */ CMutableTransaction tx6 = CMutableTransaction(); tx6.vout.resize(1); tx6.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx6.vout[0].nValue = 20 * COIN; uint64_t tx6Size = CTransaction(tx6).GetTotalSize(); pool.addUnchecked(tx6.GetId(), entry.Fee(Amount::zero()).FromTx(tx6)); BOOST_CHECK_EQUAL(pool.size(), 6UL); // Ties are broken by hash if (tx3.GetId() < tx6.GetId()) { sortedOrder.push_back(tx6.GetId().ToString()); } else { sortedOrder.insert(sortedOrder.end() - 1, tx6.GetId().ToString()); } CheckSort<ancestor_score>(pool, sortedOrder, "MempoolAncestorIndexingTest2"); CMutableTransaction tx7 = CMutableTransaction(); tx7.vin.resize(1); tx7.vin[0].prevout = COutPoint(tx6.GetId(), 0); tx7.vin[0].scriptSig = CScript() << OP_11; tx7.vout.resize(1); tx7.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx7.vout[0].nValue = 10 * COIN; uint64_t tx7Size = CTransaction(tx7).GetTotalSize(); /* set the fee to just below tx2's feerate when including ancestor */ Amount fee = int64_t((20000 / tx2Size) * (tx7Size + tx6Size) - 1) * SATOSHI; // CTxMemPoolEntry entry7(tx7, fee, 2, 10.0, 1, true); pool.addUnchecked(tx7.GetId(), entry.Fee(Amount(fee)).FromTx(tx7)); BOOST_CHECK_EQUAL(pool.size(), 7UL); sortedOrder.insert(sortedOrder.begin() + 1, tx7.GetId().ToString()); CheckSort<ancestor_score>(pool, sortedOrder, "MempoolAncestorIndexingTest3"); /* after tx6 is mined, tx7 should move up in the sort */ std::vector<CTransactionRef> vtx; vtx.push_back(MakeTransactionRef(tx6)); pool.removeForBlock(vtx, 1); sortedOrder.erase(sortedOrder.begin() + 1); // Ties are broken by hash if (tx3.GetId() < tx6.GetId()) { sortedOrder.pop_back(); } else { sortedOrder.erase(sortedOrder.end() - 2); } sortedOrder.insert(sortedOrder.begin(), tx7.GetId().ToString()); CheckSort<ancestor_score>(pool, sortedOrder, "MempoolAncestorIndexingTest4"); // High-fee parent, low-fee child // tx7 -> tx8 CMutableTransaction tx8 = CMutableTransaction(); tx8.vin.resize(1); tx8.vin[0].prevout = COutPoint(tx7.GetId(), 0); tx8.vin[0].scriptSig = CScript() << OP_11; tx8.vout.resize(1); tx8.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx8.vout[0].nValue = 10 * COIN; // Check that we sort by min(feerate, ancestor_feerate): // set the fee so that the ancestor feerate is above tx1/5, // but the transaction's own feerate is lower pool.addUnchecked(tx8.GetId(), entry.Fee(Amount(5000 * SATOSHI)).FromTx(tx8)); sortedOrder.insert(sortedOrder.end() - 1, tx8.GetId().ToString()); CheckSort<ancestor_score>(pool, sortedOrder, "MempoolAncestorIndexingTest5"); } BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest) { CTxMemPool pool; + LOCK(pool.cs); TestMemPoolEntryHelper entry; entry.dPriority = 10.0; Amount feeIncrement = MEMPOOL_FULL_FEE_INCREMENT.GetFeePerK(); CMutableTransaction tx1 = CMutableTransaction(); tx1.vin.resize(1); tx1.vin[0].scriptSig = CScript() << OP_1; tx1.vout.resize(1); tx1.vout[0].scriptPubKey = CScript() << OP_1 << OP_EQUAL; tx1.vout[0].nValue = 10 * COIN; pool.addUnchecked(tx1.GetId(), entry.Fee(10000 * SATOSHI).FromTx(tx1, &pool)); CMutableTransaction tx2 = CMutableTransaction(); tx2.vin.resize(1); tx2.vin[0].scriptSig = CScript() << OP_2; tx2.vout.resize(1); tx2.vout[0].scriptPubKey = CScript() << OP_2 << OP_EQUAL; tx2.vout[0].nValue = 10 * COIN; pool.addUnchecked(tx2.GetId(), entry.Fee(5000 * SATOSHI).FromTx(tx2, &pool)); // should do nothing pool.TrimToSize(pool.DynamicMemoryUsage()); BOOST_CHECK(pool.exists(tx1.GetId())); BOOST_CHECK(pool.exists(tx2.GetId())); // should remove the lower-feerate transaction pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4); BOOST_CHECK(pool.exists(tx1.GetId())); BOOST_CHECK(!pool.exists(tx2.GetId())); pool.addUnchecked(tx2.GetId(), entry.FromTx(tx2, &pool)); CMutableTransaction tx3 = CMutableTransaction(); tx3.vin.resize(1); tx3.vin[0].prevout = COutPoint(tx2.GetId(), 0); tx3.vin[0].scriptSig = CScript() << OP_2; tx3.vout.resize(1); tx3.vout[0].scriptPubKey = CScript() << OP_3 << OP_EQUAL; tx3.vout[0].nValue = 10 * COIN; pool.addUnchecked(tx3.GetId(), entry.Fee(20000 * SATOSHI).FromTx(tx3, &pool)); // tx3 should pay for tx2 (CPFP) pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4); BOOST_CHECK(!pool.exists(tx1.GetId())); BOOST_CHECK(pool.exists(tx2.GetId())); BOOST_CHECK(pool.exists(tx3.GetId())); // mempool is limited to tx1's size in memory usage, so nothing fits pool.TrimToSize(CTransaction(tx1).GetTotalSize()); BOOST_CHECK(!pool.exists(tx1.GetId())); BOOST_CHECK(!pool.exists(tx2.GetId())); BOOST_CHECK(!pool.exists(tx3.GetId())); CFeeRate maxFeeRateRemoved(25000 * SATOSHI, CTransaction(tx3).GetTotalSize() + CTransaction(tx2).GetTotalSize()); BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), maxFeeRateRemoved.GetFeePerK() + feeIncrement); CMutableTransaction tx4 = CMutableTransaction(); tx4.vin.resize(2); tx4.vin[0].prevout = COutPoint(); tx4.vin[0].scriptSig = CScript() << OP_4; tx4.vin[1].prevout = COutPoint(); tx4.vin[1].scriptSig = CScript() << OP_4; tx4.vout.resize(2); tx4.vout[0].scriptPubKey = CScript() << OP_4 << OP_EQUAL; tx4.vout[0].nValue = 10 * COIN; tx4.vout[1].scriptPubKey = CScript() << OP_4 << OP_EQUAL; tx4.vout[1].nValue = 10 * COIN; CMutableTransaction tx5 = CMutableTransaction(); tx5.vin.resize(2); tx5.vin[0].prevout = COutPoint(tx4.GetId(), 0); tx5.vin[0].scriptSig = CScript() << OP_4; tx5.vin[1].prevout = COutPoint(); tx5.vin[1].scriptSig = CScript() << OP_5; tx5.vout.resize(2); tx5.vout[0].scriptPubKey = CScript() << OP_5 << OP_EQUAL; tx5.vout[0].nValue = 10 * COIN; tx5.vout[1].scriptPubKey = CScript() << OP_5 << OP_EQUAL; tx5.vout[1].nValue = 10 * COIN; CMutableTransaction tx6 = CMutableTransaction(); tx6.vin.resize(2); tx6.vin[0].prevout = COutPoint(tx4.GetId(), 1); tx6.vin[0].scriptSig = CScript() << OP_4; tx6.vin[1].prevout = COutPoint(); tx6.vin[1].scriptSig = CScript() << OP_6; tx6.vout.resize(2); tx6.vout[0].scriptPubKey = CScript() << OP_6 << OP_EQUAL; tx6.vout[0].nValue = 10 * COIN; tx6.vout[1].scriptPubKey = CScript() << OP_6 << OP_EQUAL; tx6.vout[1].nValue = 10 * COIN; CMutableTransaction tx7 = CMutableTransaction(); tx7.vin.resize(2); tx7.vin[0].prevout = COutPoint(tx5.GetId(), 0); tx7.vin[0].scriptSig = CScript() << OP_5; tx7.vin[1].prevout = COutPoint(tx6.GetId(), 0); tx7.vin[1].scriptSig = CScript() << OP_6; tx7.vout.resize(2); tx7.vout[0].scriptPubKey = CScript() << OP_7 << OP_EQUAL; tx7.vout[0].nValue = 10 * COIN; tx7.vout[1].scriptPubKey = CScript() << OP_7 << OP_EQUAL; tx7.vout[1].nValue = 10 * COIN; pool.addUnchecked(tx4.GetId(), entry.Fee(7000 * SATOSHI).FromTx(tx4, &pool)); pool.addUnchecked(tx5.GetId(), entry.Fee(1000 * SATOSHI).FromTx(tx5, &pool)); pool.addUnchecked(tx6.GetId(), entry.Fee(1100 * SATOSHI).FromTx(tx6, &pool)); pool.addUnchecked(tx7.GetId(), entry.Fee(9000 * SATOSHI).FromTx(tx7, &pool)); // we only require this to remove, at max, 2 txn, because it's not clear // what we're really optimizing for aside from that pool.TrimToSize(pool.DynamicMemoryUsage() - 1); BOOST_CHECK(pool.exists(tx4.GetId())); BOOST_CHECK(pool.exists(tx6.GetId())); BOOST_CHECK(!pool.exists(tx7.GetId())); if (!pool.exists(tx5.GetId())) pool.addUnchecked(tx5.GetId(), entry.Fee(1000 * SATOSHI).FromTx(tx5, &pool)); pool.addUnchecked(tx7.GetId(), entry.Fee(9000 * SATOSHI).FromTx(tx7, &pool)); // should maximize mempool size by only removing 5/7 pool.TrimToSize(pool.DynamicMemoryUsage() / 2); BOOST_CHECK(pool.exists(tx4.GetId())); BOOST_CHECK(!pool.exists(tx5.GetId())); BOOST_CHECK(pool.exists(tx6.GetId())); BOOST_CHECK(!pool.exists(tx7.GetId())); pool.addUnchecked(tx5.GetId(), entry.Fee(1000 * SATOSHI).FromTx(tx5, &pool)); pool.addUnchecked(tx7.GetId(), entry.Fee(9000 * SATOSHI).FromTx(tx7, &pool)); std::vector<CTransactionRef> vtx; SetMockTime(42); SetMockTime(42 + CTxMemPool::ROLLING_FEE_HALFLIFE); BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), maxFeeRateRemoved.GetFeePerK() + feeIncrement); // ... we should keep the same min fee until we get a block pool.removeForBlock(vtx, 1); SetMockTime(42 + 2 * CTxMemPool::ROLLING_FEE_HALFLIFE); BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), (maxFeeRateRemoved.GetFeePerK() + feeIncrement) / 2); // ... then feerate should drop 1/2 each halflife SetMockTime(42 + 2 * CTxMemPool::ROLLING_FEE_HALFLIFE + CTxMemPool::ROLLING_FEE_HALFLIFE / 2); BOOST_CHECK_EQUAL( pool.GetMinFee(pool.DynamicMemoryUsage() * 5 / 2).GetFeePerK(), (maxFeeRateRemoved.GetFeePerK() + feeIncrement) / 4); // ... with a 1/2 halflife when mempool is < 1/2 its target size SetMockTime(42 + 2 * CTxMemPool::ROLLING_FEE_HALFLIFE + CTxMemPool::ROLLING_FEE_HALFLIFE / 2 + CTxMemPool::ROLLING_FEE_HALFLIFE / 4); BOOST_CHECK_EQUAL( pool.GetMinFee(pool.DynamicMemoryUsage() * 9 / 2).GetFeePerK(), (maxFeeRateRemoved.GetFeePerK() + feeIncrement) / 8 + SATOSHI); // ... with a 1/4 halflife when mempool is < 1/4 its target size SetMockTime(0); } // expectedSize can be smaller than correctlyOrderedIds.size(), since we // might be testing intermediary states. Just avoiding some slice operations, void CheckDisconnectPoolOrder(DisconnectedBlockTransactions &disconnectPool, std::vector<TxId> correctlyOrderedIds, unsigned int expectedSize) { int i = 0; BOOST_CHECK_EQUAL(disconnectPool.GetQueuedTx().size(), expectedSize); // Txns in queuedTx's insertion_order index are sorted from children to // parent txn for (const CTransactionRef &tx : reverse_iterate(disconnectPool.GetQueuedTx().get<insertion_order>())) { BOOST_CHECK(tx->GetId() == correctlyOrderedIds[i]); i++; } } typedef std::vector<CMutableTransaction *> vecptx; BOOST_AUTO_TEST_CASE(TestImportMempool) { CMutableTransaction chainedTxn[5]; std::vector<TxId> correctlyOrderedIds; COutPoint lastOutpoint; // Construct a chain of 5 transactions for (int i = 0; i < 5; i++) { chainedTxn[i].vin.emplace_back(lastOutpoint); chainedTxn[i].vout.emplace_back(10 * SATOSHI, CScript() << OP_TRUE); correctlyOrderedIds.push_back(chainedTxn[i].GetId()); lastOutpoint = COutPoint(correctlyOrderedIds[i], 0); } // The first 3 txns simulate once confirmed transactions that have been // disconnected. We test 3 different orders: in order, one case of mixed // order and inverted order. vecptx disconnectedTxnsInOrder = {&chainedTxn[0], &chainedTxn[1], &chainedTxn[2]}; vecptx disconnectedTxnsMixedOrder = {&chainedTxn[1], &chainedTxn[2], &chainedTxn[0]}; vecptx disconnectedTxnsInvertedOrder = {&chainedTxn[2], &chainedTxn[1], &chainedTxn[0]}; // The last 2 txns simulate a chain of unconfirmed transactions in the // mempool. We test 2 different orders: in and out of order. vecptx unconfTxnsInOrder = {&chainedTxn[3], &chainedTxn[4]}; vecptx unconfTxnsOutOfOrder = {&chainedTxn[4], &chainedTxn[3]}; // Now we test all combinations of the previously defined orders for // disconnected and unconfirmed txns. The expected outcome is to have these // transactions in the correct order in queuedTx, as defined in // correctlyOrderedIds. for (auto &disconnectedTxns : {disconnectedTxnsInOrder, disconnectedTxnsMixedOrder, disconnectedTxnsInvertedOrder}) { for (auto &unconfTxns : {unconfTxnsInOrder, unconfTxnsOutOfOrder}) { // addForBlock inserts disconnectTxns in disconnectPool. They // simulate transactions that were once confirmed in a block std::vector<CTransactionRef> vtx; for (auto tx : disconnectedTxns) { vtx.push_back(MakeTransactionRef(*tx)); } DisconnectedBlockTransactions disconnectPool; disconnectPool.addForBlock(vtx); CheckDisconnectPoolOrder(disconnectPool, correctlyOrderedIds, disconnectedTxns.size()); // If the mempool is empty, importMempool doesn't change // disconnectPool CTxMemPool testPool; disconnectPool.importMempool(testPool); CheckDisconnectPoolOrder(disconnectPool, correctlyOrderedIds, disconnectedTxns.size()); // Add all unconfirmed transactions in testPool for (auto tx : unconfTxns) { TestMemPoolEntryHelper entry; testPool.addUnchecked(tx->GetId(), entry.FromTx(*tx)); } // Now we test importMempool with a non empty mempool disconnectPool.importMempool(testPool); CheckDisconnectPoolOrder(disconnectPool, correctlyOrderedIds, disconnectedTxns.size() + unconfTxns.size()); // We must clear disconnectPool to not trigger the assert in its // destructor disconnectPool.clear(); } } } inline CTransactionRef make_tx(std::vector<Amount> &&output_values, std::vector<CTransactionRef> &&inputs = std::vector<CTransactionRef>(), std::vector<uint32_t> &&input_indices = std::vector<uint32_t>()) { CMutableTransaction tx = CMutableTransaction(); tx.vin.resize(inputs.size()); tx.vout.resize(output_values.size()); for (size_t i = 0; i < inputs.size(); ++i) { tx.vin[i].prevout = COutPoint(inputs[i]->GetId(), input_indices.size() > i ? input_indices[i] : 0); } for (size_t i = 0; i < output_values.size(); ++i) { tx.vout[i].scriptPubKey = CScript() << OP_11 << OP_EQUAL; tx.vout[i].nValue = output_values[i]; } return MakeTransactionRef(tx); } #define MK_OUTPUTS(amounts...) \ std::vector<Amount> { amounts } #define MK_INPUTS(txs...) \ std::vector<CTransactionRef> { txs } #define MK_INPUT_IDX(idxes...) \ std::vector<uint32_t> { idxes } BOOST_AUTO_TEST_CASE(MempoolAncestryTests) { size_t ancestors, descendants; CTxMemPool pool; + LOCK(pool.cs); TestMemPoolEntryHelper entry; /* Base transaction */ // // [tx1] // CTransactionRef tx1 = make_tx(MK_OUTPUTS(10 * COIN)); pool.addUnchecked(tx1->GetId(), entry.Fee(10000 * SATOSHI).FromTx(tx1)); // Ancestors / descendants should be 1 / 1 (itself / itself) pool.GetTransactionAncestry(tx1->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 1ULL); BOOST_CHECK_EQUAL(descendants, 1ULL); /* Child transaction */ // // [tx1].0 <- [tx2] // CTransactionRef tx2 = make_tx(MK_OUTPUTS(495 * CENT, 5 * COIN), MK_INPUTS(tx1)); pool.addUnchecked(tx2->GetId(), entry.Fee(10000 * SATOSHI).FromTx(tx2)); // Ancestors / descendants should be: // transaction ancestors descendants // ============ =========== =========== // tx1 1 (tx1) 2 (tx1,2) // tx2 2 (tx1,2) 2 (tx1,2) pool.GetTransactionAncestry(tx1->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 1ULL); BOOST_CHECK_EQUAL(descendants, 2ULL); pool.GetTransactionAncestry(tx2->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 2ULL); BOOST_CHECK_EQUAL(descendants, 2ULL); /* Grand-child 1 */ // // [tx1].0 <- [tx2].0 <- [tx3] // CTransactionRef tx3 = make_tx(MK_OUTPUTS(290 * CENT, 200 * CENT), MK_INPUTS(tx2)); pool.addUnchecked(tx3->GetId(), entry.Fee(10000 * SATOSHI).FromTx(tx3)); // Ancestors / descendants should be: // transaction ancestors descendants // ============ =========== =========== // tx1 1 (tx1) 3 (tx1,2,3) // tx2 2 (tx1,2) 3 (tx1,2,3) // tx3 3 (tx1,2,3) 3 (tx1,2,3) pool.GetTransactionAncestry(tx1->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 1ULL); BOOST_CHECK_EQUAL(descendants, 3ULL); pool.GetTransactionAncestry(tx2->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 2ULL); BOOST_CHECK_EQUAL(descendants, 3ULL); pool.GetTransactionAncestry(tx3->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 3ULL); BOOST_CHECK_EQUAL(descendants, 3ULL); /* Grand-child 2 */ // // [tx1].0 <- [tx2].0 <- [tx3] // | // \---1 <- [tx4] // CTransactionRef tx4 = make_tx(MK_OUTPUTS(290 * CENT, 250 * CENT), MK_INPUTS(tx2), MK_INPUT_IDX(1)); pool.addUnchecked(tx4->GetId(), entry.Fee(10000 * SATOSHI).FromTx(tx4)); // Ancestors / descendants should be: // transaction ancestors descendants // ============ =========== =========== // tx1 1 (tx1) 4 (tx1,2,3,4) // tx2 2 (tx1,2) 4 (tx1,2,3,4) // tx3 3 (tx1,2,3) 4 (tx1,2,3,4) // tx4 3 (tx1,2,4) 4 (tx1,2,3,4) pool.GetTransactionAncestry(tx1->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 1ULL); BOOST_CHECK_EQUAL(descendants, 4ULL); pool.GetTransactionAncestry(tx2->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 2ULL); BOOST_CHECK_EQUAL(descendants, 4ULL); pool.GetTransactionAncestry(tx3->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 3ULL); BOOST_CHECK_EQUAL(descendants, 4ULL); pool.GetTransactionAncestry(tx4->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 3ULL); BOOST_CHECK_EQUAL(descendants, 4ULL); /* Make an alternate branch that is longer and connect it to tx3 */ // // [ty1].0 <- [ty2].0 <- [ty3].0 <- [ty4].0 <- [ty5].0 // | // [tx1].0 <- [tx2].0 <- [tx3].0 <- [ty6] --->--/ // | // \---1 <- [tx4] // CTransactionRef ty1, ty2, ty3, ty4, ty5; CTransactionRef *ty[5] = {&ty1, &ty2, &ty3, &ty4, &ty5}; Amount v = 5 * COIN; for (uint64_t i = 0; i < 5; i++) { CTransactionRef &tyi = *ty[i]; tyi = make_tx(MK_OUTPUTS(v), i > 0 ? MK_INPUTS(*ty[i - 1]) : std::vector<CTransactionRef>()); v -= 50 * CENT; pool.addUnchecked(tyi->GetId(), entry.Fee(10000 * SATOSHI).FromTx(tyi)); pool.GetTransactionAncestry(tyi->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, i + 1); BOOST_CHECK_EQUAL(descendants, i + 1); } CTransactionRef ty6 = make_tx(MK_OUTPUTS(5 * COIN), MK_INPUTS(tx3, ty5)); pool.addUnchecked(ty6->GetId(), entry.Fee(10000 * SATOSHI).FromTx(ty6)); // Ancestors / descendants should be: // transaction ancestors descendants // ============ =================== =========== // tx1 1 (tx1) 5 (tx1,2,3,4, ty6) // tx2 2 (tx1,2) 5 (tx1,2,3,4, ty6) // tx3 3 (tx1,2,3) 5 (tx1,2,3,4, ty6) // tx4 3 (tx1,2,4) 5 (tx1,2,3,4, ty6) // ty1 1 (ty1) 6 (ty1,2,3,4,5,6) // ty2 2 (ty1,2) 6 (ty1,2,3,4,5,6) // ty3 3 (ty1,2,3) 6 (ty1,2,3,4,5,6) // ty4 4 (y1234) 6 (ty1,2,3,4,5,6) // ty5 5 (y12345) 6 (ty1,2,3,4,5,6) // ty6 9 (tx123, ty123456) 6 (ty1,2,3,4,5,6) pool.GetTransactionAncestry(tx1->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 1ULL); BOOST_CHECK_EQUAL(descendants, 5ULL); pool.GetTransactionAncestry(tx2->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 2ULL); BOOST_CHECK_EQUAL(descendants, 5ULL); pool.GetTransactionAncestry(tx3->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 3ULL); BOOST_CHECK_EQUAL(descendants, 5ULL); pool.GetTransactionAncestry(tx4->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 3ULL); BOOST_CHECK_EQUAL(descendants, 5ULL); pool.GetTransactionAncestry(ty1->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 1ULL); BOOST_CHECK_EQUAL(descendants, 6ULL); pool.GetTransactionAncestry(ty2->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 2ULL); BOOST_CHECK_EQUAL(descendants, 6ULL); pool.GetTransactionAncestry(ty3->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 3ULL); BOOST_CHECK_EQUAL(descendants, 6ULL); pool.GetTransactionAncestry(ty4->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 4ULL); BOOST_CHECK_EQUAL(descendants, 6ULL); pool.GetTransactionAncestry(ty5->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 5ULL); BOOST_CHECK_EQUAL(descendants, 6ULL); pool.GetTransactionAncestry(ty6->GetId(), ancestors, descendants); BOOST_CHECK_EQUAL(ancestors, 9ULL); BOOST_CHECK_EQUAL(descendants, 6ULL); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp index 818f4f17e..7acdc37b5 100644 --- a/src/test/miner_tests.cpp +++ b/src/test/miner_tests.cpp @@ -1,788 +1,790 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Copyright (c) 2017-2018 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <miner.h> #include <chain.h> #include <chainparams.h> #include <coins.h> #include <config.h> #include <consensus/consensus.h> #include <consensus/merkle.h> #include <consensus/tx_verify.h> #include <consensus/validation.h> #include <policy/policy.h> #include <pubkey.h> #include <script/standard.h> #include <txmempool.h> #include <uint256.h> #include <util/strencodings.h> #include <util/system.h> #include <validation.h> #include <test/test_bitcoin.h> #include <boost/test/unit_test.hpp> #include <memory> BOOST_FIXTURE_TEST_SUITE(miner_tests, TestingSetup) // BOOST_CHECK_EXCEPTION predicates to check the specific validation error class HasReason { public: HasReason(const std::string &reason) : m_reason(reason) {} bool operator()(const std::runtime_error &e) const { return std::string(e.what()).find(m_reason) != std::string::npos; }; private: const std::string m_reason; }; static CFeeRate blockMinFeeRate = CFeeRate(DEFAULT_BLOCK_MIN_TX_FEE_PER_KB); static BlockAssembler AssemblerForTest(const CChainParams ¶ms, const CTxMemPool &mempool) { BlockAssembler::Options options; options.blockMinFeeRate = blockMinFeeRate; options.nBlockPriorityPercentage = 0; return BlockAssembler(params, mempool, options); } static struct { uint8_t extranonce; uint32_t nonce; } blockinfo[] = { {4, 0xa4a3e223}, {2, 0x15c32f9e}, {1, 0x0375b547}, {1, 0x7004a8a5}, {2, 0xce440296}, {2, 0x52cfe198}, {1, 0x77a72cd0}, {2, 0xbb5d6f84}, {2, 0x83f30c2c}, {1, 0x48a73d5b}, {1, 0xef7dcd01}, {2, 0x6809c6c4}, {2, 0x0883ab3c}, {1, 0x087bbbe2}, {2, 0x2104a814}, {2, 0xdffb6daa}, {1, 0xee8a0a08}, {2, 0xba4237c1}, {1, 0xa70349dc}, {1, 0x344722bb}, {3, 0xd6294733}, {2, 0xec9f5c94}, {2, 0xca2fbc28}, {1, 0x6ba4f406}, {2, 0x015d4532}, {1, 0x6e119b7c}, {2, 0x43e8f314}, {2, 0x27962f38}, {2, 0xb571b51b}, {2, 0xb36bee23}, {2, 0xd17924a8}, {2, 0x6bc212d9}, {1, 0x630d4948}, {2, 0x9a4c4ebb}, {2, 0x554be537}, {1, 0xd63ddfc7}, {2, 0xa10acc11}, {1, 0x759a8363}, {2, 0xfb73090d}, {1, 0xe82c6a34}, {1, 0xe33e92d7}, {3, 0x658ef5cb}, {2, 0xba32ff22}, {5, 0x0227a10c}, {1, 0xa9a70155}, {5, 0xd096d809}, {1, 0x37176174}, {1, 0x830b8d0f}, {1, 0xc6e3910e}, {2, 0x823f3ca8}, {1, 0x99850849}, {1, 0x7521fb81}, {1, 0xaacaabab}, {1, 0xd645a2eb}, {5, 0x7aea1781}, {5, 0x9d6e4b78}, {1, 0x4ce90fd8}, {1, 0xabdc832d}, {6, 0x4a34f32a}, {2, 0xf2524c1c}, {2, 0x1bbeb08a}, {1, 0xad47f480}, {1, 0x9f026aeb}, {1, 0x15a95049}, {2, 0xd1cb95b2}, {2, 0xf84bbda5}, {1, 0x0fa62cd1}, {1, 0xe05f9169}, {1, 0x78d194a9}, {5, 0x3e38147b}, {5, 0x737ba0d4}, {1, 0x63378e10}, {1, 0x6d5f91cf}, {2, 0x88612eb8}, {2, 0xe9639484}, {1, 0xb7fabc9d}, {2, 0x19b01592}, {1, 0x5a90dd31}, {2, 0x5bd7e028}, {2, 0x94d00323}, {1, 0xa9b9c01a}, {1, 0x3a40de61}, {1, 0x56e7eec7}, {5, 0x859f7ef6}, {1, 0xfd8e5630}, {1, 0x2b0c9f7f}, {1, 0xba700e26}, {1, 0x7170a408}, {1, 0x70de86a8}, {1, 0x74d64cd5}, {1, 0x49e738a1}, {2, 0x6910b602}, {0, 0x643c565f}, {1, 0x54264b3f}, {2, 0x97ea6396}, {2, 0x55174459}, {2, 0x03e8779a}, {1, 0x98f34d8f}, {1, 0xc07b2b07}, {1, 0xdfe29668}, {1, 0x3141c7c1}, {1, 0xb3b595f4}, {1, 0x735abf08}, {5, 0x623bfbce}, {2, 0xd351e722}, {1, 0xf4ca48c9}, {1, 0x5b19c670}, {1, 0xa164bf0e}, {2, 0xbbbeb305}, {2, 0xfe1c810a}, }; static CBlockIndex CreateBlockIndex(int nHeight) { CBlockIndex index; index.nHeight = nHeight; index.pprev = chainActive.Tip(); return index; } static bool TestSequenceLocks(const CTransaction &tx, int flags) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { LOCK(::g_mempool.cs); return CheckSequenceLocks(::g_mempool, tx, flags); } // Test suite for ancestor feerate transaction selection. // Implemented as an additional function, rather than a separate test case, to // allow reusing the blockchain created in CreateNewBlock_validity. // Note that this test assumes blockprioritypercentage is 0. static void TestPackageSelection(const CChainParams &chainparams, - CScript scriptPubKey, - std::vector<CTransactionRef> &txFirst) { + const CScript &scriptPubKey, + const std::vector<CTransactionRef> &txFirst) + EXCLUSIVE_LOCKS_REQUIRED(::g_mempool.cs) { // Test the ancestor feerate transaction selection. TestMemPoolEntryHelper entry; // Test that a medium fee transaction will be selected after a higher fee // rate package with a low fee rate parent. CMutableTransaction tx; tx.vin.resize(1); tx.vin[0].scriptSig = CScript() << OP_1; tx.vin[0].prevout = COutPoint(txFirst[0]->GetId(), 0); tx.vout.resize(1); tx.vout[0].nValue = int64_t(5000000000LL - 1000) * SATOSHI; // This tx has a low fee: 1000 satoshis. // Save this txid for later use. TxId parentTxId = tx.GetId(); g_mempool.addUnchecked(parentTxId, entry.Fee(1000 * SATOSHI) .Time(GetTime()) .SpendsCoinbase(true) .FromTx(tx)); // This tx has a medium fee: 10000 satoshis. tx.vin[0].prevout = COutPoint(txFirst[1]->GetId(), 0); tx.vout[0].nValue = int64_t(5000000000LL - 10000) * SATOSHI; TxId mediumFeeTxId = tx.GetId(); g_mempool.addUnchecked(mediumFeeTxId, entry.Fee(10000 * SATOSHI) .Time(GetTime()) .SpendsCoinbase(true) .FromTx(tx)); // This tx has a high fee, but depends on the first transaction. tx.vin[0].prevout = COutPoint(parentTxId, 0); // 50k satoshi fee. tx.vout[0].nValue = int64_t(5000000000LL - 1000 - 50000) * SATOSHI; TxId highFeeTxId = tx.GetId(); g_mempool.addUnchecked(highFeeTxId, entry.Fee(50000 * SATOSHI) .Time(GetTime()) .SpendsCoinbase(false) .FromTx(tx)); std::unique_ptr<CBlockTemplate> pblocktemplate = AssemblerForTest(chainparams, g_mempool).CreateNewBlock(scriptPubKey); BOOST_CHECK(pblocktemplate->block.vtx[1]->GetId() == parentTxId); BOOST_CHECK(pblocktemplate->block.vtx[2]->GetId() == highFeeTxId); BOOST_CHECK(pblocktemplate->block.vtx[3]->GetId() == mediumFeeTxId); // Test that a package below the block min tx fee doesn't get included tx.vin[0].prevout = COutPoint(highFeeTxId, 0); // 0 fee. tx.vout[0].nValue = int64_t(5000000000LL - 1000 - 50000) * SATOSHI; TxId freeTxId = tx.GetId(); g_mempool.addUnchecked(freeTxId, entry.Fee(Amount::zero()).FromTx(tx)); size_t freeTxSize = GetVirtualTransactionSize(CTransaction(tx)); // Calculate a fee on child transaction that will put the package just // below the block min tx fee (assuming 1 child tx of the same size). Amount feeToUse = blockMinFeeRate.GetFee(2 * freeTxSize) - SATOSHI; tx.vin[0].prevout = COutPoint(freeTxId, 0); tx.vout[0].nValue = int64_t(5000000000LL - 1000 - 50000) * SATOSHI - feeToUse; TxId lowFeeTxId = tx.GetId(); g_mempool.addUnchecked(lowFeeTxId, entry.Fee(feeToUse).FromTx(tx)); pblocktemplate = AssemblerForTest(chainparams, g_mempool).CreateNewBlock(scriptPubKey); // Verify that the free tx and the low fee tx didn't get selected. for (const auto &txn : pblocktemplate->block.vtx) { BOOST_CHECK(txn->GetId() != freeTxId); BOOST_CHECK(txn->GetId() != lowFeeTxId); } // Test that packages above the min relay fee do get included, even if one // of the transactions is below the min relay fee. Remove the low fee // transaction and replace with a higher fee transaction g_mempool.removeRecursive(CTransaction(tx)); // Now we should be just over the min relay fee. tx.vout[0].nValue -= 2 * SATOSHI; lowFeeTxId = tx.GetId(); g_mempool.addUnchecked(lowFeeTxId, entry.Fee(feeToUse + 2 * SATOSHI).FromTx(tx)); pblocktemplate = AssemblerForTest(chainparams, g_mempool).CreateNewBlock(scriptPubKey); BOOST_CHECK(pblocktemplate->block.vtx[4]->GetId() == freeTxId); BOOST_CHECK(pblocktemplate->block.vtx[5]->GetId() == lowFeeTxId); // Test that transaction selection properly updates ancestor fee // calculations as ancestor transactions get included in a block. Add a // 0-fee transaction that has 2 outputs. tx.vin[0].prevout = COutPoint(txFirst[2]->GetId(), 0); tx.vout.resize(2); tx.vout[0].nValue = int64_t(5000000000LL - 100000000) * SATOSHI; // 1BCC output. tx.vout[1].nValue = 100000000 * SATOSHI; TxId freeTxId2 = tx.GetId(); g_mempool.addUnchecked( freeTxId2, entry.Fee(Amount::zero()).SpendsCoinbase(true).FromTx(tx)); // This tx can't be mined by itself. tx.vin[0].prevout = COutPoint(freeTxId2, 0); tx.vout.resize(1); feeToUse = blockMinFeeRate.GetFee(freeTxSize); tx.vout[0].nValue = int64_t(5000000000LL - 100000000) * SATOSHI - feeToUse; TxId lowFeeTxId2 = tx.GetId(); g_mempool.addUnchecked( lowFeeTxId2, entry.Fee(feeToUse).SpendsCoinbase(false).FromTx(tx)); pblocktemplate = AssemblerForTest(chainparams, g_mempool).CreateNewBlock(scriptPubKey); // Verify that this tx isn't selected. for (const auto &txn : pblocktemplate->block.vtx) { BOOST_CHECK(txn->GetId() != freeTxId2); BOOST_CHECK(txn->GetId() != lowFeeTxId2); } // This tx will be mineable, and should cause lowFeeTxId2 to be selected as // well. tx.vin[0].prevout = COutPoint(freeTxId2, 1); // 10k satoshi fee. tx.vout[0].nValue = (100000000 - 10000) * SATOSHI; g_mempool.addUnchecked(tx.GetId(), entry.Fee(10000 * SATOSHI).FromTx(tx)); pblocktemplate = AssemblerForTest(chainparams, g_mempool).CreateNewBlock(scriptPubKey); BOOST_CHECK(pblocktemplate->block.vtx[8]->GetId() == lowFeeTxId2); } void TestCoinbaseMessageEB(uint64_t eb, std::string cbmsg) { GlobalConfig config; config.SetMaxBlockSize(eb); CScript scriptPubKey = CScript() << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909" "a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112" "de5c384df7ba0b8d578a4c702b6bf11d5f") << OP_CHECKSIG; std::unique_ptr<CBlockTemplate> pblocktemplate = BlockAssembler(config, g_mempool).CreateNewBlock(scriptPubKey); CBlock *pblock = &pblocktemplate->block; // IncrementExtraNonce creates a valid coinbase and merkleRoot unsigned int extraNonce = 0; IncrementExtraNonce(pblock, chainActive.Tip(), config.GetMaxBlockSize(), extraNonce); unsigned int nHeight = chainActive.Tip()->nHeight + 1; std::vector<uint8_t> vec(cbmsg.begin(), cbmsg.end()); BOOST_CHECK(pblock->vtx[0]->vin[0].scriptSig == ((CScript() << nHeight << CScriptNum(extraNonce) << vec) + COINBASE_FLAGS)); } // Coinbase scriptSig has to contains the correct EB value // converted to MB, rounded down to the first decimal BOOST_AUTO_TEST_CASE(CheckCoinbase_EB) { TestCoinbaseMessageEB(1000001, "/EB1.0/"); TestCoinbaseMessageEB(2000000, "/EB2.0/"); TestCoinbaseMessageEB(8000000, "/EB8.0/"); TestCoinbaseMessageEB(8320000, "/EB8.3/"); } // NOTE: These tests rely on CreateNewBlock doing its own self-validation! BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) { // Note that by default, these tests run with size accounting enabled. GlobalConfig config; const CChainParams &chainparams = config.GetChainParams(); CScript scriptPubKey = CScript() << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909" "a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112" "de5c384df7ba0b8d578a4c702b6bf11d5f") << OP_CHECKSIG; std::unique_ptr<CBlockTemplate> pblocktemplate; CMutableTransaction tx; CScript script; TestMemPoolEntryHelper entry; entry.nFee = 11 * SATOSHI; entry.dPriority = 111.0; entry.nHeight = 11; fCheckpointsEnabled = false; // Simple block creation, nothing special yet: BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams, g_mempool) .CreateNewBlock(scriptPubKey)); // We can't make transactions until we have inputs. // Therefore, load 100 blocks :) int baseheight = 0; std::vector<CTransactionRef> txFirst; for (size_t i = 0; i < sizeof(blockinfo) / sizeof(*blockinfo); ++i) { // pointer for convenience. CBlock *pblock = &pblocktemplate->block; { LOCK(cs_main); pblock->nVersion = 1; pblock->nTime = chainActive.Tip()->GetMedianTimePast() + 1; CMutableTransaction txCoinbase(*pblock->vtx[0]); txCoinbase.nVersion = 1; txCoinbase.vin[0].scriptSig = CScript(); txCoinbase.vin[0].scriptSig.push_back(blockinfo[i].extranonce); txCoinbase.vin[0].scriptSig.push_back(chainActive.Height()); txCoinbase.vout.resize(1); txCoinbase.vout[0].scriptPubKey = CScript(); pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase)); if (txFirst.size() == 0) { baseheight = chainActive.Height(); } if (txFirst.size() < 4) { txFirst.push_back(pblock->vtx[0]); } pblock->hashMerkleRoot = BlockMerkleRoot(*pblock); pblock->nNonce = blockinfo[i].nonce; } std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock); BOOST_CHECK(ProcessNewBlock(config, shared_pblock, true, nullptr)); pblock->hashPrevBlock = pblock->GetHash(); } LOCK(cs_main); + LOCK(::g_mempool.cs); // Just to make sure we can still make simple blocks. BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams, g_mempool) .CreateNewBlock(scriptPubKey)); const Amount BLOCKSUBSIDY = 50 * COIN; const Amount LOWFEE = CENT; const Amount HIGHFEE = COIN; const Amount HIGHERFEE = 4 * COIN; // block sigops > limit: 1000 CHECKMULTISIG + 1 tx.vin.resize(1); // NOTE: OP_NOP is used to force 20 SigOps for the CHECKMULTISIG tx.vin[0].scriptSig = CScript() << OP_0 << OP_0 << OP_0 << OP_NOP << OP_CHECKMULTISIG << OP_1; tx.vin[0].prevout = COutPoint(txFirst[0]->GetId(), 0); tx.vout.resize(1); tx.vout[0].nValue = BLOCKSUBSIDY; for (unsigned int i = 0; i < 1001; ++i) { tx.vout[0].nValue -= LOWFEE; const TxId txid = tx.GetId(); // Only first tx spends coinbase. bool spendsCoinbase = i == 0; // If we don't set the # of sig ops in the CTxMemPoolEntry, template // creation fails. g_mempool.addUnchecked(txid, entry.Fee(LOWFEE) .Time(GetTime()) .SpendsCoinbase(spendsCoinbase) .FromTx(tx)); tx.vin[0].prevout = COutPoint(txid, 0); } BOOST_CHECK_EXCEPTION( AssemblerForTest(chainparams, g_mempool).CreateNewBlock(scriptPubKey), std::runtime_error, HasReason("bad-blk-sigops")); g_mempool.clear(); tx.vin[0].prevout = COutPoint(txFirst[0]->GetId(), 0); tx.vout[0].nValue = BLOCKSUBSIDY; for (unsigned int i = 0; i < 1001; ++i) { tx.vout[0].nValue -= LOWFEE; const TxId txid = tx.GetId(); // Only first tx spends coinbase. bool spendsCoinbase = i == 0; // If we do set the # of sig ops in the CTxMemPoolEntry, template // creation passes. g_mempool.addUnchecked(txid, entry.Fee(LOWFEE) .Time(GetTime()) .SpendsCoinbase(spendsCoinbase) .SigOpsCost(80) .FromTx(tx)); tx.vin[0].prevout = COutPoint(txid, 0); } BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams, g_mempool) .CreateNewBlock(scriptPubKey)); g_mempool.clear(); // block size > limit tx.vin[0].scriptSig = CScript(); // 18 * (520char + DROP) + OP_1 = 9433 bytes std::vector<uint8_t> vchData(520); for (unsigned int i = 0; i < 18; ++i) { tx.vin[0].scriptSig << vchData << OP_DROP; } tx.vin[0].scriptSig << OP_1; tx.vin[0].prevout = COutPoint(txFirst[0]->GetId(), 0); tx.vout[0].nValue = BLOCKSUBSIDY; for (unsigned int i = 0; i < 128; ++i) { tx.vout[0].nValue -= LOWFEE; const TxId txid = tx.GetId(); // Only first tx spends coinbase. bool spendsCoinbase = i == 0; g_mempool.addUnchecked(txid, entry.Fee(LOWFEE) .Time(GetTime()) .SpendsCoinbase(spendsCoinbase) .FromTx(tx)); tx.vin[0].prevout = COutPoint(txid, 0); } BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams, g_mempool) .CreateNewBlock(scriptPubKey)); g_mempool.clear(); // Orphan in mempool, template creation fails. TxId txid = tx.GetId(); g_mempool.addUnchecked(txid, entry.Fee(LOWFEE).Time(GetTime()).FromTx(tx)); BOOST_CHECK_EXCEPTION( AssemblerForTest(chainparams, g_mempool).CreateNewBlock(scriptPubKey), std::runtime_error, HasReason("bad-txns-inputs-missingorspent")); g_mempool.clear(); // Child with higher priority than parent. tx.vin[0].scriptSig = CScript() << OP_1; tx.vin[0].prevout = COutPoint(txFirst[1]->GetId(), 0); tx.vout[0].nValue = BLOCKSUBSIDY - HIGHFEE; txid = tx.GetId(); g_mempool.addUnchecked( txid, entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); tx.vin[0].prevout = COutPoint(txid, 0); tx.vin.resize(2); tx.vin[1].scriptSig = CScript() << OP_1; tx.vin[1].prevout = COutPoint(txFirst[0]->GetId(), 0); // First txn output + fresh coinbase - new txn fee. tx.vout[0].nValue = tx.vout[0].nValue + BLOCKSUBSIDY - HIGHERFEE; txid = tx.GetId(); g_mempool.addUnchecked( txid, entry.Fee(HIGHERFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams, g_mempool) .CreateNewBlock(scriptPubKey)); g_mempool.clear(); // Coinbase in mempool, template creation fails. tx.vin.resize(1); tx.vin[0].prevout = COutPoint(); tx.vin[0].scriptSig = CScript() << OP_0 << OP_1; tx.vout[0].nValue = Amount::zero(); txid = tx.GetId(); // Give it a fee so it'll get mined. g_mempool.addUnchecked( txid, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(false).FromTx(tx)); // Should throw bad-tx-coinbase BOOST_CHECK_EXCEPTION( AssemblerForTest(chainparams, g_mempool).CreateNewBlock(scriptPubKey), std::runtime_error, HasReason("bad-tx-coinbase")); g_mempool.clear(); // Double spend txn pair in mempool, template creation fails. tx.vin[0].prevout = COutPoint(txFirst[0]->GetId(), 0); tx.vin[0].scriptSig = CScript() << OP_1; tx.vout[0].nValue = BLOCKSUBSIDY - HIGHFEE; tx.vout[0].scriptPubKey = CScript() << OP_1; txid = tx.GetId(); g_mempool.addUnchecked( txid, entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); tx.vout[0].scriptPubKey = CScript() << OP_2; txid = tx.GetId(); g_mempool.addUnchecked( txid, entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); BOOST_CHECK_EXCEPTION( AssemblerForTest(chainparams, g_mempool).CreateNewBlock(scriptPubKey), std::runtime_error, HasReason("bad-txns-inputs-missingorspent")); g_mempool.clear(); // Subsidy changing. int nHeight = chainActive.Height(); // Create an actual 209999-long block chain (without valid blocks). while (chainActive.Tip()->nHeight < 209999) { CBlockIndex *prev = chainActive.Tip(); CBlockIndex *next = new CBlockIndex(); next->phashBlock = new uint256(InsecureRand256()); pcoinsTip->SetBestBlock(next->GetBlockHash()); next->pprev = prev; next->nHeight = prev->nHeight + 1; next->BuildSkip(); chainActive.SetTip(next); } BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams, g_mempool) .CreateNewBlock(scriptPubKey)); // Extend to a 210000-long block chain. while (chainActive.Tip()->nHeight < 210000) { CBlockIndex *prev = chainActive.Tip(); CBlockIndex *next = new CBlockIndex(); next->phashBlock = new uint256(InsecureRand256()); pcoinsTip->SetBestBlock(next->GetBlockHash()); next->pprev = prev; next->nHeight = prev->nHeight + 1; next->BuildSkip(); chainActive.SetTip(next); } BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams, g_mempool) .CreateNewBlock(scriptPubKey)); // Invalid p2sh txn in mempool, template creation fails tx.vin[0].prevout = COutPoint(txFirst[0]->GetId(), 0); tx.vin[0].scriptSig = CScript() << OP_1; tx.vout[0].nValue = BLOCKSUBSIDY - LOWFEE; script = CScript() << OP_0; tx.vout[0].scriptPubKey = GetScriptForDestination(CScriptID(script)); txid = tx.GetId(); g_mempool.addUnchecked( txid, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); tx.vin[0].prevout = COutPoint(txid, 0); tx.vin[0].scriptSig = CScript() << std::vector<uint8_t>(script.begin(), script.end()); tx.vout[0].nValue -= LOWFEE; txid = tx.GetId(); g_mempool.addUnchecked( txid, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(false).FromTx(tx)); // Should throw blk-bad-inputs BOOST_CHECK_EXCEPTION( AssemblerForTest(chainparams, g_mempool).CreateNewBlock(scriptPubKey), std::runtime_error, HasReason("blk-bad-inputs")); g_mempool.clear(); // Delete the dummy blocks again. while (chainActive.Tip()->nHeight > nHeight) { CBlockIndex *del = chainActive.Tip(); chainActive.SetTip(del->pprev); pcoinsTip->SetBestBlock(del->pprev->GetBlockHash()); delete del->phashBlock; delete del; } // non-final txs in mempool SetMockTime(chainActive.Tip()->GetMedianTimePast() + 1); uint32_t flags = LOCKTIME_VERIFY_SEQUENCE | LOCKTIME_MEDIAN_TIME_PAST; // height map std::vector<int> prevheights; // Relative height locked. tx.nVersion = 2; tx.vin.resize(1); prevheights.resize(1); // Only 1 transaction. tx.vin[0].prevout = COutPoint(txFirst[0]->GetId(), 0); tx.vin[0].scriptSig = CScript() << OP_1; // txFirst[0] is the 2nd block tx.vin[0].nSequence = chainActive.Tip()->nHeight + 1; prevheights[0] = baseheight + 1; tx.vout.resize(1); tx.vout[0].nValue = BLOCKSUBSIDY - HIGHFEE; tx.vout[0].scriptPubKey = CScript() << OP_1; tx.nLockTime = 0; txid = tx.GetId(); g_mempool.addUnchecked( txid, entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); const Consensus::Params ¶ms = chainparams.GetConsensus(); { // Locktime passes. CValidationState state; BOOST_CHECK(ContextualCheckTransactionForCurrentBlock( params, CTransaction(tx), state, flags)); } // Sequence locks fail. BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks pass on 2nd block. BOOST_CHECK( SequenceLocks(CTransaction(tx), flags, &prevheights, CreateBlockIndex(chainActive.Tip()->nHeight + 2))); // Relative time locked. tx.vin[0].prevout = COutPoint(txFirst[1]->GetId(), 0); // txFirst[1] is the 3rd block. tx.vin[0].nSequence = CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG | (((chainActive.Tip()->GetMedianTimePast() + 1 - chainActive[1]->GetMedianTimePast()) >> CTxIn::SEQUENCE_LOCKTIME_GRANULARITY) + 1); prevheights[0] = baseheight + 2; txid = tx.GetId(); g_mempool.addUnchecked(txid, entry.Time(GetTime()).FromTx(tx)); { // Locktime passes. CValidationState state; BOOST_CHECK(ContextualCheckTransactionForCurrentBlock( params, CTransaction(tx), state, flags)); } // Sequence locks fail. BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++) { // Trick the MedianTimePast. chainActive.Tip()->GetAncestor(chainActive.Tip()->nHeight - i)->nTime += 512; } // Sequence locks pass 512 seconds later. BOOST_CHECK( SequenceLocks(CTransaction(tx), flags, &prevheights, CreateBlockIndex(chainActive.Tip()->nHeight + 1))); for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++) { // Undo tricked MTP. chainActive.Tip()->GetAncestor(chainActive.Tip()->nHeight - i)->nTime -= 512; } // Absolute height locked. tx.vin[0].prevout = COutPoint(txFirst[2]->GetId(), 0); tx.vin[0].nSequence = CTxIn::SEQUENCE_FINAL - 1; prevheights[0] = baseheight + 3; tx.nLockTime = chainActive.Tip()->nHeight + 1; txid = tx.GetId(); g_mempool.addUnchecked(txid, entry.Time(GetTime()).FromTx(tx)); { // Locktime fails. CValidationState state; BOOST_CHECK(!ContextualCheckTransactionForCurrentBlock( params, CTransaction(tx), state, flags)); BOOST_CHECK_EQUAL(state.GetRejectReason(), "bad-txns-nonfinal"); } // Sequence locks pass. BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); { // Locktime passes on 2nd block. CValidationState state; int64_t nMedianTimePast = chainActive.Tip()->GetMedianTimePast(); BOOST_CHECK(ContextualCheckTransaction( params, CTransaction(tx), state, chainActive.Tip()->nHeight + 2, nMedianTimePast, nMedianTimePast)); } // Absolute time locked. tx.vin[0].prevout = COutPoint(txFirst[3]->GetId(), 0); tx.nLockTime = chainActive.Tip()->GetMedianTimePast(); prevheights.resize(1); prevheights[0] = baseheight + 4; txid = tx.GetId(); g_mempool.addUnchecked(txid, entry.Time(GetTime()).FromTx(tx)); { // Locktime fails. CValidationState state; BOOST_CHECK(!ContextualCheckTransactionForCurrentBlock( params, CTransaction(tx), state, flags)); BOOST_CHECK_EQUAL(state.GetRejectReason(), "bad-txns-nonfinal"); } // Sequence locks pass. BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); { // Locktime passes 1 second later. CValidationState state; int64_t nMedianTimePast = chainActive.Tip()->GetMedianTimePast() + 1; BOOST_CHECK(ContextualCheckTransaction( params, CTransaction(tx), state, chainActive.Tip()->nHeight + 1, nMedianTimePast, nMedianTimePast)); } // mempool-dependent transactions (not added) tx.vin[0].prevout = COutPoint(txid, 0); prevheights[0] = chainActive.Tip()->nHeight + 1; tx.nLockTime = 0; tx.vin[0].nSequence = 0; { // Locktime passes. CValidationState state; BOOST_CHECK(ContextualCheckTransactionForCurrentBlock( params, CTransaction(tx), state, flags)); } // Sequence locks pass. BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); tx.vin[0].nSequence = 1; // Sequence locks fail. BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); tx.vin[0].nSequence = CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG; // Sequence locks pass. BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); tx.vin[0].nSequence = CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG | 1; // Sequence locks fail. BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); pblocktemplate = AssemblerForTest(chainparams, g_mempool).CreateNewBlock(scriptPubKey); BOOST_CHECK(pblocktemplate); // None of the of the absolute height/time locked tx should have made it // into the template because we still check IsFinalTx in CreateNewBlock, but // relative locked txs will if inconsistently added to g_mempool. For now // these will still generate a valid template until BIP68 soft fork. BOOST_CHECK_EQUAL(pblocktemplate->block.vtx.size(), 3UL); // However if we advance height by 1 and time by 512, all of them should be // mined. for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++) { // Trick the MedianTimePast. chainActive.Tip()->GetAncestor(chainActive.Tip()->nHeight - i)->nTime += 512; } chainActive.Tip()->nHeight++; SetMockTime(chainActive.Tip()->GetMedianTimePast() + 1); BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams, g_mempool) .CreateNewBlock(scriptPubKey)); BOOST_CHECK_EQUAL(pblocktemplate->block.vtx.size(), 5UL); chainActive.Tip()->nHeight--; SetMockTime(0); g_mempool.clear(); TestPackageSelection(chainparams, scriptPubKey, txFirst); fCheckpointsEnabled = true; } void CheckBlockMaxSize(const Config &config, uint64_t size, uint64_t expected) { gArgs.ForceSetArg("-blockmaxsize", std::to_string(size)); BlockAssembler ba(config, g_mempool); BOOST_CHECK_EQUAL(ba.GetMaxGeneratedBlockSize(), expected); } BOOST_AUTO_TEST_CASE(BlockAssembler_construction) { GlobalConfig config; // We are working on a fake chain and need to protect ourselves. LOCK(cs_main); // Test around historical 1MB (plus one byte because that's mandatory) config.SetMaxBlockSize(ONE_MEGABYTE + 1); CheckBlockMaxSize(config, 0, 1000); CheckBlockMaxSize(config, 1000, 1000); CheckBlockMaxSize(config, 1001, 1001); CheckBlockMaxSize(config, 12345, 12345); CheckBlockMaxSize(config, ONE_MEGABYTE - 1001, ONE_MEGABYTE - 1001); CheckBlockMaxSize(config, ONE_MEGABYTE - 1000, ONE_MEGABYTE - 1000); CheckBlockMaxSize(config, ONE_MEGABYTE - 999, ONE_MEGABYTE - 999); CheckBlockMaxSize(config, ONE_MEGABYTE, ONE_MEGABYTE - 999); // Test around default cap config.SetMaxBlockSize(DEFAULT_MAX_BLOCK_SIZE); // Now we can use the default max block size. CheckBlockMaxSize(config, DEFAULT_MAX_BLOCK_SIZE - 1001, DEFAULT_MAX_BLOCK_SIZE - 1001); CheckBlockMaxSize(config, DEFAULT_MAX_BLOCK_SIZE - 1000, DEFAULT_MAX_BLOCK_SIZE - 1000); CheckBlockMaxSize(config, DEFAULT_MAX_BLOCK_SIZE - 999, DEFAULT_MAX_BLOCK_SIZE - 1000); CheckBlockMaxSize(config, DEFAULT_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE - 1000); // If the parameter is not specified, we use // DEFAULT_MAX_GENERATED_BLOCK_SIZE { gArgs.ClearArg("-blockmaxsize"); BlockAssembler ba(config, g_mempool); BOOST_CHECK_EQUAL(ba.GetMaxGeneratedBlockSize(), DEFAULT_MAX_GENERATED_BLOCK_SIZE); } } BOOST_AUTO_TEST_CASE(TestCBlockTemplateEntry) { const CTransaction tx; CTransactionRef txRef = MakeTransactionRef(tx); CBlockTemplateEntry txEntry(txRef, 1 * SATOSHI, 200, 10); BOOST_CHECK_MESSAGE(txEntry.tx == txRef, "Transactions did not match"); BOOST_CHECK_EQUAL(txEntry.txFee, 1 * SATOSHI); BOOST_CHECK_EQUAL(txEntry.txSize, 200); BOOST_CHECK_EQUAL(txEntry.txSigOps, 10); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/policyestimator_tests.cpp b/src/test/policyestimator_tests.cpp index 8785559c0..303ceb8c8 100644 --- a/src/test/policyestimator_tests.cpp +++ b/src/test/policyestimator_tests.cpp @@ -1,102 +1,103 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <policy/fees.h> #include <policy/policy.h> #include <txmempool.h> #include <uint256.h> #include <util/system.h> #include <test/test_bitcoin.h> #include <boost/test/unit_test.hpp> BOOST_FIXTURE_TEST_SUITE(policyestimator_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(MempoolMinimumFeeEstimate) { CTxMemPool mpool; + LOCK(mpool.cs); TestMemPoolEntryHelper entry; // Create a transaction template CScript garbage; for (unsigned int i = 0; i < 128; i++) { garbage.push_back('X'); } CMutableTransaction tx; tx.vin.resize(1); tx.vin[0].scriptSig = garbage; tx.vout.resize(1); tx.vout[0].nValue = Amount::zero(); // Create a fake block std::vector<CTransactionRef> block; int blocknum = 0; // Loop through 200 blocks adding transactions so we have a estimateFee // that is calculable. while (blocknum < 200) { for (int64_t j = 0; j < 100; j++) { // make transaction unique tx.vin[0].nSequence = 10000 * blocknum + j; TxId txid = tx.GetId(); mpool.addUnchecked( txid, entry.Fee((j + 1) * DEFAULT_BLOCK_MIN_TX_FEE_PER_KB) .Time(GetTime()) .Priority(0) .Height(blocknum) .FromTx(tx, &mpool)); CTransactionRef ptx = mpool.get(txid); block.push_back(ptx); } mpool.removeForBlock(block, ++blocknum); block.clear(); } // Check that the estimate is above the rolling minimum fee. This should be // true since we have not trimmed the mempool. BOOST_CHECK(mpool.GetMinFee(1) <= mpool.estimateFee()); // Check that estimateFee returns the minimum rolling fee even when the // mempool grows very quickly and no blocks have been mined. // Add a bunch of low fee transactions which are not in the mempool // And have zero fees. CMutableTransaction mtx; tx.vin.resize(1); tx.vin[0].scriptSig = garbage; tx.vout.resize(1); block.clear(); // Add tons of transactions to the mempool, // but don't mine them. for (int64_t i = 0; i < 10000; i++) { // Mutate the hash tx.vin[0].nSequence = 10000 * blocknum + i; // Add new transaction to the mempool with a increasing fee // The average should end up as 1/2 * 100 * // DEFAULT_BLOCK_MIN_TX_FEE_PER_KB mpool.addUnchecked(tx.GetId(), entry.Fee((i + 1) * DEFAULT_BLOCK_MIN_TX_FEE_PER_KB) .Time(GetTime()) .Priority(0) .Height(blocknum) .FromTx(tx, &mpool)); } // Trim to size. GetMinFee should be more than 10000 * // DEFAULT_BLOCK_MIN_TX_FEE_PER_KB But the estimateFee should be // unchanged. mpool.TrimToSize(1); BOOST_CHECK(mpool.GetMinFee(1) >= CFeeRate(10000 * DEFAULT_BLOCK_MIN_TX_FEE_PER_KB, CTransaction(tx).GetTotalSize())); BOOST_CHECK_MESSAGE(mpool.estimateFee() == mpool.GetMinFee(1), "Confirm blocks has failed"); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 99d7bc1a7..ceb8599d5 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -1,1395 +1,1392 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <txmempool.h> #include <chain.h> #include <chainparams.h> // for GetConsensus. #include <clientversion.h> #include <config.h> #include <consensus/consensus.h> #include <consensus/tx_verify.h> #include <consensus/validation.h> #include <policy/fees.h> #include <policy/policy.h> #include <reverse_iterator.h> #include <streams.h> #include <timedata.h> #include <util/moneystr.h> #include <util/system.h> #include <util/time.h> #include <validation.h> #include <version.h> #include <algorithm> CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef &_tx, const Amount _nFee, int64_t _nTime, double _entryPriority, unsigned int _entryHeight, Amount _inChainInputValue, bool _spendsCoinbase, int64_t _sigOpsCount, LockPoints lp) : tx(_tx), nFee(_nFee), nTime(_nTime), entryPriority(_entryPriority), entryHeight(_entryHeight), inChainInputValue(_inChainInputValue), spendsCoinbase(_spendsCoinbase), sigOpCount(_sigOpsCount), lockPoints(lp) { nTxSize = tx->GetTotalSize(); nModSize = tx->CalculateModifiedSize(GetTxSize()); nUsageSize = RecursiveDynamicUsage(tx); nCountWithDescendants = 1; nSizeWithDescendants = GetTxSize(); nModFeesWithDescendants = nFee; Amount nValueIn = tx->GetValueOut() + nFee; assert(inChainInputValue <= nValueIn); feeDelta = Amount::zero(); nCountWithAncestors = 1; nSizeWithAncestors = GetTxSize(); nModFeesWithAncestors = nFee; nSigOpCountWithAncestors = sigOpCount; } double CTxMemPoolEntry::GetPriority(unsigned int currentHeight) const { double deltaPriority = double((currentHeight - entryHeight) * (inChainInputValue / SATOSHI)) / nModSize; double dResult = entryPriority + deltaPriority; // This should only happen if it was called with a height below entry height if (dResult < 0) { dResult = 0; } return dResult; } void CTxMemPoolEntry::UpdateFeeDelta(Amount newFeeDelta) { nModFeesWithDescendants += newFeeDelta - feeDelta; nModFeesWithAncestors += newFeeDelta - feeDelta; feeDelta = newFeeDelta; } void CTxMemPoolEntry::UpdateLockPoints(const LockPoints &lp) { lockPoints = lp; } // Update the given tx for any in-mempool descendants. // Assumes that setMemPoolChildren is correct for the given tx and all // descendants. void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendants, const std::set<TxId> &setExclude) { setEntries stageEntries, setAllDescendants; stageEntries = GetMemPoolChildren(updateIt); while (!stageEntries.empty()) { const txiter cit = *stageEntries.begin(); setAllDescendants.insert(cit); stageEntries.erase(cit); const setEntries &setChildren = GetMemPoolChildren(cit); for (txiter childEntry : setChildren) { cacheMap::iterator cacheIt = cachedDescendants.find(childEntry); if (cacheIt != cachedDescendants.end()) { // We've already calculated this one, just add the entries for // this set but don't traverse again. for (txiter cacheEntry : cacheIt->second) { setAllDescendants.insert(cacheEntry); } } else if (!setAllDescendants.count(childEntry)) { // Schedule for later processing stageEntries.insert(childEntry); } } } // setAllDescendants now contains all in-mempool descendants of updateIt. // Update and add to cached descendant map int64_t modifySize = 0; int64_t modifyCount = 0; Amount modifyFee = Amount::zero(); for (txiter cit : setAllDescendants) { if (!setExclude.count(cit->GetTx().GetId())) { modifySize += cit->GetTxSize(); modifyFee += cit->GetModifiedFee(); modifyCount++; cachedDescendants[updateIt].insert(cit); // Update ancestor state for each descendant mapTx.modify(cit, update_ancestor_state(updateIt->GetTxSize(), updateIt->GetModifiedFee(), 1, updateIt->GetSigOpCount())); } } mapTx.modify(updateIt, update_descendant_state(modifySize, modifyFee, modifyCount)); } // txidsToUpdate is the set of transaction hashes from a disconnected block // which has been re-added to the mempool. For each entry, look for descendants // that are outside txidsToUpdate, and add fee/size information for such // descendants to the parent. For each such descendant, also update the ancestor // state to include the parent. void CTxMemPool::UpdateTransactionsFromBlock( const std::vector<TxId> &txidsToUpdate) { LOCK(cs); // For each entry in txidsToUpdate, store the set of in-mempool, but not // in-txidsToUpdate transactions, so that we don't have to recalculate // descendants when we come across a previously seen entry. cacheMap mapMemPoolDescendantsToUpdate; // Use a set for lookups into txidsToUpdate (these entries are already // accounted for in the state of their ancestors) std::set<TxId> setAlreadyIncluded(txidsToUpdate.begin(), txidsToUpdate.end()); // Iterate in reverse, so that whenever we are looking at a transaction // we are sure that all in-mempool descendants have already been processed. // This maximizes the benefit of the descendant cache and guarantees that // setMemPoolChildren will be updated, an assumption made in // UpdateForDescendants. for (const TxId &txid : reverse_iterate(txidsToUpdate)) { // we cache the in-mempool children to avoid duplicate updates setEntries setChildren; // calculate children from mapNextTx txiter it = mapTx.find(txid); if (it == mapTx.end()) { continue; } auto iter = mapNextTx.lower_bound(COutPoint(txid, 0)); // First calculate the children, and update setMemPoolChildren to // include them, and update their setMemPoolParents to include this tx. for (; iter != mapNextTx.end() && iter->first->GetTxId() == txid; ++iter) { const TxId &childTxId = iter->second->GetId(); txiter childIter = mapTx.find(childTxId); assert(childIter != mapTx.end()); // We can skip updating entries we've encountered before or that are // in the block (which are already accounted for). if (setChildren.insert(childIter).second && !setAlreadyIncluded.count(childTxId)) { UpdateChild(it, childIter, true); UpdateParent(childIter, it, true); } } UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded); } } bool CTxMemPool::CalculateMemPoolAncestors( const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents /* = true */) const { - LOCK(cs); setEntries parentHashes; const CTransaction &tx = entry.GetTx(); if (fSearchForParents) { // Get parents of this transaction that are in the mempool // GetMemPoolParents() is only valid for entries in the mempool, so we // iterate mapTx to find parents. for (const CTxIn &in : tx.vin) { txiter piter = mapTx.find(in.prevout.GetTxId()); if (piter == mapTx.end()) { continue; } parentHashes.insert(piter); if (parentHashes.size() + 1 > limitAncestorCount) { errString = strprintf("too many unconfirmed parents [limit: %u]", limitAncestorCount); return false; } } } else { // If we're not searching for parents, we require this to be an entry in // the mempool already. txiter it = mapTx.iterator_to(entry); parentHashes = GetMemPoolParents(it); } size_t totalSizeWithAncestors = entry.GetTxSize(); while (!parentHashes.empty()) { txiter stageit = *parentHashes.begin(); setAncestors.insert(stageit); parentHashes.erase(stageit); totalSizeWithAncestors += stageit->GetTxSize(); if (stageit->GetSizeWithDescendants() + entry.GetTxSize() > limitDescendantSize) { errString = strprintf( "exceeds descendant size limit for tx %s [limit: %u]", stageit->GetTx().GetId().ToString(), limitDescendantSize); return false; } if (stageit->GetCountWithDescendants() + 1 > limitDescendantCount) { errString = strprintf("too many descendants for tx %s [limit: %u]", stageit->GetTx().GetId().ToString(), limitDescendantCount); return false; } if (totalSizeWithAncestors > limitAncestorSize) { errString = strprintf("exceeds ancestor size limit [limit: %u]", limitAncestorSize); return false; } const setEntries &setMemPoolParents = GetMemPoolParents(stageit); for (const txiter &phash : setMemPoolParents) { // If this is a new ancestor, add it. if (setAncestors.count(phash) == 0) { parentHashes.insert(phash); } if (parentHashes.size() + setAncestors.size() + 1 > limitAncestorCount) { errString = strprintf("too many unconfirmed ancestors [limit: %u]", limitAncestorCount); return false; } } } return true; } void CTxMemPool::UpdateAncestorsOf(bool add, txiter it, setEntries &setAncestors) { setEntries parentIters = GetMemPoolParents(it); // add or remove this tx as a child of each parent for (txiter piter : parentIters) { UpdateChild(piter, it, add); } const int64_t updateCount = (add ? 1 : -1); const int64_t updateSize = updateCount * it->GetTxSize(); const Amount updateFee = updateCount * it->GetModifiedFee(); for (txiter ancestorIt : setAncestors) { mapTx.modify(ancestorIt, update_descendant_state(updateSize, updateFee, updateCount)); } } void CTxMemPool::UpdateEntryForAncestors(txiter it, const setEntries &setAncestors) { int64_t updateCount = setAncestors.size(); int64_t updateSize = 0; int64_t updateSigOpsCount = 0; Amount updateFee = Amount::zero(); for (txiter ancestorIt : setAncestors) { updateSize += ancestorIt->GetTxSize(); updateFee += ancestorIt->GetModifiedFee(); updateSigOpsCount += ancestorIt->GetSigOpCount(); } mapTx.modify(it, update_ancestor_state(updateSize, updateFee, updateCount, updateSigOpsCount)); } void CTxMemPool::UpdateChildrenForRemoval(txiter it) { const setEntries &setMemPoolChildren = GetMemPoolChildren(it); for (txiter updateIt : setMemPoolChildren) { UpdateParent(updateIt, it, false); } } void CTxMemPool::UpdateForRemoveFromMempool(const setEntries &entriesToRemove, bool updateDescendants) { // For each entry, walk back all ancestors and decrement size associated // with this transaction. const uint64_t nNoLimit = std::numeric_limits<uint64_t>::max(); if (updateDescendants) { // updateDescendants should be true whenever we're not recursively // removing a tx and all its descendants, eg when a transaction is // confirmed in a block. Here we only update statistics and not data in // mapLinks (which we need to preserve until we're finished with all // operations that need to traverse the mempool). for (txiter removeIt : entriesToRemove) { setEntries setDescendants; CalculateDescendants(removeIt, setDescendants); setDescendants.erase(removeIt); // don't update state for self int64_t modifySize = -int64_t(removeIt->GetTxSize()); Amount modifyFee = -1 * removeIt->GetModifiedFee(); int modifySigOps = -removeIt->GetSigOpCount(); for (txiter dit : setDescendants) { mapTx.modify(dit, update_ancestor_state(modifySize, modifyFee, -1, modifySigOps)); } } } for (txiter removeIt : entriesToRemove) { setEntries setAncestors; const CTxMemPoolEntry &entry = *removeIt; std::string dummy; // Since this is a tx that is already in the mempool, we can call CMPA // with fSearchForParents = false. If the mempool is in a consistent // state, then using true or false should both be correct, though false // should be a bit faster. // However, if we happen to be in the middle of processing a reorg, then // the mempool can be in an inconsistent state. In this case, the set of // ancestors reachable via mapLinks will be the same as the set of // ancestors whose packages include this transaction, because when we // add a new transaction to the mempool in addUnchecked(), we assume it // has no children, and in the case of a reorg where that assumption is // false, the in-mempool children aren't linked to the in-block tx's // until UpdateTransactionsFromBlock() is called. So if we're being // called during a reorg, ie before UpdateTransactionsFromBlock() has // been called, then mapLinks[] will differ from the set of mempool // parents we'd calculate by searching, and it's important that we use // the mapLinks[] notion of ancestor transactions as the set of things // to update for removal. CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false); // Note that UpdateAncestorsOf severs the child links that point to // removeIt in the entries for the parents of removeIt. UpdateAncestorsOf(false, removeIt, setAncestors); } // After updating all the ancestor sizes, we can now sever the link between // each transaction being removed and any mempool children (ie, update // setMemPoolParents for each direct child of a transaction being removed). for (txiter removeIt : entriesToRemove) { UpdateChildrenForRemoval(removeIt); } } void CTxMemPoolEntry::UpdateDescendantState(int64_t modifySize, Amount modifyFee, int64_t modifyCount) { nSizeWithDescendants += modifySize; assert(int64_t(nSizeWithDescendants) > 0); nModFeesWithDescendants += modifyFee; nCountWithDescendants += modifyCount; assert(int64_t(nCountWithDescendants) > 0); } void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, Amount modifyFee, int64_t modifyCount, int modifySigOps) { nSizeWithAncestors += modifySize; assert(int64_t(nSizeWithAncestors) > 0); nModFeesWithAncestors += modifyFee; nCountWithAncestors += modifyCount; assert(int64_t(nCountWithAncestors) > 0); nSigOpCountWithAncestors += modifySigOps; assert(int(nSigOpCountWithAncestors) >= 0); } CTxMemPool::CTxMemPool() : nTransactionsUpdated(0) { // lock free clear _clear(); // Sanity checks off by default for performance, because otherwise accepting // transactions becomes O(N^2) where N is the number of transactions in the // pool nCheckFrequency = 0; } CTxMemPool::~CTxMemPool() {} bool CTxMemPool::isSpent(const COutPoint &outpoint) const { LOCK(cs); return mapNextTx.count(outpoint); } unsigned int CTxMemPool::GetTransactionsUpdated() const { LOCK(cs); return nTransactionsUpdated; } void CTxMemPool::AddTransactionsUpdated(unsigned int n) { LOCK(cs); nTransactionsUpdated += n; } void CTxMemPool::addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry, setEntries &setAncestors) { NotifyEntryAdded(entry.GetSharedTx()); // Add to memory pool without checking anything. // Used by AcceptToMemoryPool(), which DOES do all the appropriate checks. - LOCK(cs); indexed_transaction_set::iterator newit = mapTx.insert(entry).first; mapLinks.insert(make_pair(newit, TxLinks())); // Update transaction for any feeDelta created by PrioritiseTransaction // TODO: refactor so that the fee delta is calculated before inserting into // mapTx. std::map<uint256, TXModifier>::const_iterator pos = mapDeltas.find(hash); if (pos != mapDeltas.end()) { const TXModifier &deltas = pos->second; if (deltas.second != Amount::zero()) { mapTx.modify(newit, update_fee_delta(deltas.second)); } } // Update cachedInnerUsage to include contained transaction's usage. // (When we update the entry for in-mempool parents, memory usage will be // further updated.) cachedInnerUsage += entry.DynamicMemoryUsage(); const CTransaction &tx = newit->GetTx(); std::set<uint256> setParentTransactions; for (const CTxIn &in : tx.vin) { mapNextTx.insert(std::make_pair(&in.prevout, &tx)); setParentTransactions.insert(in.prevout.GetTxId()); } // Don't bother worrying about child transactions of this one. Normal case // of a new transaction arriving is that there can't be any children, // because such children would be orphans. An exception to that is if a // transaction enters that used to be in a block. In that case, our // disconnect block logic will call UpdateTransactionsFromBlock to clean up // the mess we're leaving here. // Update ancestors with information about this tx for (const uint256 &phash : setParentTransactions) { txiter pit = mapTx.find(phash); if (pit != mapTx.end()) { UpdateParent(newit, pit, true); } } UpdateAncestorsOf(true, newit, setAncestors); UpdateEntryForAncestors(newit, setAncestors); nTransactionsUpdated++; totalTxSize += entry.GetTxSize(); vTxHashes.emplace_back(tx.GetHash(), newit); newit->vTxHashesIdx = vTxHashes.size() - 1; } void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason) { NotifyEntryRemoved(it->GetSharedTx(), reason); for (const CTxIn &txin : it->GetTx().vin) { mapNextTx.erase(txin.prevout); } if (vTxHashes.size() > 1) { vTxHashes[it->vTxHashesIdx] = std::move(vTxHashes.back()); vTxHashes[it->vTxHashesIdx].second->vTxHashesIdx = it->vTxHashesIdx; vTxHashes.pop_back(); if (vTxHashes.size() * 2 < vTxHashes.capacity()) { vTxHashes.shrink_to_fit(); } } else { vTxHashes.clear(); } totalTxSize -= it->GetTxSize(); cachedInnerUsage -= it->DynamicMemoryUsage(); cachedInnerUsage -= memusage::DynamicUsage(mapLinks[it].parents) + memusage::DynamicUsage(mapLinks[it].children); mapLinks.erase(it); mapTx.erase(it); nTransactionsUpdated++; } // Calculates descendants of entry that are not already in setDescendants, and // adds to setDescendants. Assumes entryit is already a tx in the mempool and // setMemPoolChildren is correct for tx and all descendants. Also assumes that // if an entry is in setDescendants already, then all in-mempool descendants of // it are already in setDescendants as well, so that we can save time by not // iterating over those entries. void CTxMemPool::CalculateDescendants(txiter entryit, setEntries &setDescendants) const { setEntries stage; if (setDescendants.count(entryit) == 0) { stage.insert(entryit); } // Traverse down the children of entry, only adding children that are not // accounted for in setDescendants already (because those children have // either already been walked, or will be walked in this iteration). while (!stage.empty()) { txiter it = *stage.begin(); setDescendants.insert(it); stage.erase(it); const setEntries &setChildren = GetMemPoolChildren(it); for (const txiter &childiter : setChildren) { if (!setDescendants.count(childiter)) { stage.insert(childiter); } } } } void CTxMemPool::removeRecursive(const CTransaction &origTx, MemPoolRemovalReason reason) { // Remove transaction from memory pool. LOCK(cs); setEntries txToRemove; txiter origit = mapTx.find(origTx.GetId()); if (origit != mapTx.end()) { txToRemove.insert(origit); } else { // When recursively removing but origTx isn't in the mempool be sure to // remove any children that are in the pool. This can happen during // chain re-orgs if origTx isn't re-accepted into the mempool for any // reason. for (size_t i = 0; i < origTx.vout.size(); i++) { auto it = mapNextTx.find(COutPoint(origTx.GetId(), i)); if (it == mapNextTx.end()) { continue; } txiter nextit = mapTx.find(it->second->GetId()); assert(nextit != mapTx.end()); txToRemove.insert(nextit); } } setEntries setAllRemoves; for (txiter it : txToRemove) { CalculateDescendants(it, setAllRemoves); } RemoveStaged(setAllRemoves, false, reason); } void CTxMemPool::removeForReorg(const Config &config, const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags) { // Remove transactions spending a coinbase which are now immature and // no-longer-final transactions. LOCK(cs); setEntries txToRemove; for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { const CTransaction &tx = it->GetTx(); LockPoints lp = it->GetLockPoints(); bool validLP = TestLockPointValidity(&lp); CValidationState state; if (!ContextualCheckTransactionForCurrentBlock( config.GetChainParams().GetConsensus(), tx, state, flags) || !CheckSequenceLocks(*this, tx, flags, &lp, validLP)) { // Note if CheckSequenceLocks fails the LockPoints may still be // invalid. So it's critical that we remove the tx and not depend on // the LockPoints. txToRemove.insert(it); } else if (it->GetSpendsCoinbase()) { for (const CTxIn &txin : tx.vin) { indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.GetTxId()); if (it2 != mapTx.end()) { continue; } const Coin &coin = pcoins->AccessCoin(txin.prevout); if (nCheckFrequency != 0) { assert(!coin.IsSpent()); } if (coin.IsSpent() || (coin.IsCoinBase() && int64_t(nMemPoolHeight) - coin.GetHeight() < COINBASE_MATURITY)) { txToRemove.insert(it); break; } } } if (!validLP) { mapTx.modify(it, update_lock_points(lp)); } } setEntries setAllRemoves; for (txiter it : txToRemove) { CalculateDescendants(it, setAllRemoves); } RemoveStaged(setAllRemoves, false, MemPoolRemovalReason::REORG); } void CTxMemPool::removeConflicts(const CTransaction &tx) { // Remove transactions which depend on inputs of tx, recursively AssertLockHeld(cs); for (const CTxIn &txin : tx.vin) { auto it = mapNextTx.find(txin.prevout); if (it != mapNextTx.end()) { const CTransaction &txConflict = *it->second; if (txConflict != tx) { ClearPrioritisation(txConflict.GetId()); removeRecursive(txConflict, MemPoolRemovalReason::CONFLICT); } } } } /** * Called when a block is connected. Removes from mempool and updates the miner * fee estimator. */ void CTxMemPool::removeForBlock(const std::vector<CTransactionRef> &vtx, unsigned int nBlockHeight) { LOCK(cs); DisconnectedBlockTransactions disconnectpool; disconnectpool.addForBlock(vtx); std::vector<const CTxMemPoolEntry *> entries; for (const CTransactionRef &tx : reverse_iterate(disconnectpool.GetQueuedTx().get<insertion_order>())) { uint256 txid = tx->GetId(); indexed_transaction_set::iterator i = mapTx.find(txid); if (i != mapTx.end()) { entries.push_back(&*i); } } for (const CTransactionRef &tx : reverse_iterate(disconnectpool.GetQueuedTx().get<insertion_order>())) { txiter it = mapTx.find(tx->GetId()); if (it != mapTx.end()) { setEntries stage; stage.insert(it); RemoveStaged(stage, true, MemPoolRemovalReason::BLOCK); } removeConflicts(*tx); ClearPrioritisation(tx->GetId()); } disconnectpool.clear(); lastRollingFeeUpdate = GetTime(); blockSinceLastRollingFeeBump = true; } void CTxMemPool::_clear() { mapLinks.clear(); mapTx.clear(); mapNextTx.clear(); vTxHashes.clear(); totalTxSize = 0; cachedInnerUsage = 0; lastRollingFeeUpdate = GetTime(); blockSinceLastRollingFeeBump = false; rollingMinimumFeeRate = 0; ++nTransactionsUpdated; } void CTxMemPool::clear() { LOCK(cs); _clear(); } static void CheckInputsAndUpdateCoins(const CTransaction &tx, CCoinsViewCache &mempoolDuplicate, const int64_t spendheight) { CValidationState state; Amount txfee = Amount::zero(); bool fCheckResult = tx.IsCoinBase() || Consensus::CheckTxInputs(tx, state, mempoolDuplicate, spendheight, txfee); assert(fCheckResult); UpdateCoins(mempoolDuplicate, tx, 1000000); } void CTxMemPool::check(const CCoinsViewCache *pcoins) const { LOCK(cs); if (nCheckFrequency == 0) { return; } if (GetRand(std::numeric_limits<uint32_t>::max()) >= nCheckFrequency) { return; } LogPrint(BCLog::MEMPOOL, "Checking mempool with %u transactions and %u inputs\n", (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size()); uint64_t checkTotal = 0; uint64_t innerUsage = 0; CCoinsViewCache mempoolDuplicate(const_cast<CCoinsViewCache *>(pcoins)); const int64_t spendheight = GetSpendHeight(mempoolDuplicate); std::list<const CTxMemPoolEntry *> waitingOnDependants; for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { unsigned int i = 0; checkTotal += it->GetTxSize(); innerUsage += it->DynamicMemoryUsage(); const CTransaction &tx = it->GetTx(); txlinksMap::const_iterator linksiter = mapLinks.find(it); assert(linksiter != mapLinks.end()); const TxLinks &links = linksiter->second; innerUsage += memusage::DynamicUsage(links.parents) + memusage::DynamicUsage(links.children); bool fDependsWait = false; setEntries setParentCheck; int64_t parentSizes = 0; int64_t parentSigOpCount = 0; for (const CTxIn &txin : tx.vin) { // Check that every mempool transaction's inputs refer to available // coins, or other mempool tx's. indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.GetTxId()); if (it2 != mapTx.end()) { const CTransaction &tx2 = it2->GetTx(); assert(tx2.vout.size() > txin.prevout.GetN() && !tx2.vout[txin.prevout.GetN()].IsNull()); fDependsWait = true; if (setParentCheck.insert(it2).second) { parentSizes += it2->GetTxSize(); parentSigOpCount += it2->GetSigOpCount(); } } else { assert(pcoins->HaveCoin(txin.prevout)); } // Check whether its inputs are marked in mapNextTx. auto it3 = mapNextTx.find(txin.prevout); assert(it3 != mapNextTx.end()); assert(it3->first == &txin.prevout); assert(it3->second == &tx); i++; } assert(setParentCheck == GetMemPoolParents(it)); // Verify ancestor state is correct. setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits<uint64_t>::max(); std::string dummy; CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy); uint64_t nCountCheck = setAncestors.size() + 1; uint64_t nSizeCheck = it->GetTxSize(); Amount nFeesCheck = it->GetModifiedFee(); int64_t nSigOpCheck = it->GetSigOpCount(); for (txiter ancestorIt : setAncestors) { nSizeCheck += ancestorIt->GetTxSize(); nFeesCheck += ancestorIt->GetModifiedFee(); nSigOpCheck += ancestorIt->GetSigOpCount(); } assert(it->GetCountWithAncestors() == nCountCheck); assert(it->GetSizeWithAncestors() == nSizeCheck); assert(it->GetSigOpCountWithAncestors() == nSigOpCheck); assert(it->GetModFeesWithAncestors() == nFeesCheck); // Check children against mapNextTx CTxMemPool::setEntries setChildrenCheck; auto iter = mapNextTx.lower_bound(COutPoint(it->GetTx().GetId(), 0)); uint64_t child_sizes = 0; for (; iter != mapNextTx.end() && iter->first->GetTxId() == it->GetTx().GetId(); ++iter) { txiter childit = mapTx.find(iter->second->GetId()); // mapNextTx points to in-mempool transactions assert(childit != mapTx.end()); if (setChildrenCheck.insert(childit).second) { child_sizes += childit->GetTxSize(); } } assert(setChildrenCheck == GetMemPoolChildren(it)); // Also check to make sure size is greater than sum with immediate // children. Just a sanity check, not definitive that this calc is // correct... assert(it->GetSizeWithDescendants() >= child_sizes + it->GetTxSize()); if (fDependsWait) { waitingOnDependants.push_back(&(*it)); } else { CheckInputsAndUpdateCoins(tx, mempoolDuplicate, spendheight); } } unsigned int stepsSinceLastRemove = 0; while (!waitingOnDependants.empty()) { const CTxMemPoolEntry *entry = waitingOnDependants.front(); waitingOnDependants.pop_front(); if (!mempoolDuplicate.HaveInputs(entry->GetTx())) { waitingOnDependants.push_back(entry); stepsSinceLastRemove++; assert(stepsSinceLastRemove < waitingOnDependants.size()); } else { CheckInputsAndUpdateCoins(entry->GetTx(), mempoolDuplicate, spendheight); stepsSinceLastRemove = 0; } } for (auto it = mapNextTx.cbegin(); it != mapNextTx.cend(); it++) { uint256 txid = it->second->GetId(); indexed_transaction_set::const_iterator it2 = mapTx.find(txid); const CTransaction &tx = it2->GetTx(); assert(it2 != mapTx.end()); assert(&tx == it->second); } assert(totalTxSize == checkTotal); assert(innerUsage == cachedInnerUsage); } bool CTxMemPool::CompareDepthAndScore(const uint256 &hasha, const uint256 &hashb) { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(hasha); if (i == mapTx.end()) { return false; } indexed_transaction_set::const_iterator j = mapTx.find(hashb); if (j == mapTx.end()) { return true; } uint64_t counta = i->GetCountWithAncestors(); uint64_t countb = j->GetCountWithAncestors(); if (counta == countb) { return CompareTxMemPoolEntryByScore()(*i, *j); } return counta < countb; } namespace { class DepthAndScoreComparator { public: bool operator()(const CTxMemPool::indexed_transaction_set::const_iterator &a, const CTxMemPool::indexed_transaction_set::const_iterator &b) { uint64_t counta = a->GetCountWithAncestors(); uint64_t countb = b->GetCountWithAncestors(); if (counta == countb) { return CompareTxMemPoolEntryByScore()(*a, *b); } return counta < countb; } }; } // namespace std::vector<CTxMemPool::indexed_transaction_set::const_iterator> CTxMemPool::GetSortedDepthAndScore() const { std::vector<indexed_transaction_set::const_iterator> iters; AssertLockHeld(cs); iters.reserve(mapTx.size()); for (indexed_transaction_set::iterator mi = mapTx.begin(); mi != mapTx.end(); ++mi) { iters.push_back(mi); } std::sort(iters.begin(), iters.end(), DepthAndScoreComparator()); return iters; } void CTxMemPool::queryHashes(std::vector<uint256> &vtxid) { LOCK(cs); auto iters = GetSortedDepthAndScore(); vtxid.clear(); vtxid.reserve(mapTx.size()); for (auto it : iters) { vtxid.push_back(it->GetTx().GetId()); } } static TxMempoolInfo GetInfo(CTxMemPool::indexed_transaction_set::const_iterator it) { return TxMempoolInfo{it->GetSharedTx(), it->GetTime(), CFeeRate(it->GetFee(), it->GetTxSize()), it->GetModifiedFee() - it->GetFee()}; } std::vector<TxMempoolInfo> CTxMemPool::infoAll() const { LOCK(cs); auto iters = GetSortedDepthAndScore(); std::vector<TxMempoolInfo> ret; ret.reserve(mapTx.size()); for (auto it : iters) { ret.push_back(GetInfo(it)); } return ret; } CTransactionRef CTxMemPool::get(const uint256 &txid) const { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(txid); if (i == mapTx.end()) { return nullptr; } return i->GetSharedTx(); } TxMempoolInfo CTxMemPool::info(const uint256 &txid) const { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(txid); if (i == mapTx.end()) { return TxMempoolInfo(); } return GetInfo(i); } CFeeRate CTxMemPool::estimateFee() const { LOCK(cs); uint64_t maxMempoolSize = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; // minerPolicy uses recent blocks to figure out a reasonable fee. This // may disagree with the rollingMinimumFeerate under certain scenarios // where the mempool increases rapidly, or blocks are being mined which // do not contain propagated transactions. return std::max(::minRelayTxFee, GetMinFee(maxMempoolSize)); } void CTxMemPool::PrioritiseTransaction(const uint256 &hash, double dPriorityDelta, const Amount nFeeDelta) { { LOCK(cs); TXModifier &deltas = mapDeltas[hash]; deltas.first += dPriorityDelta; deltas.second += nFeeDelta; txiter it = mapTx.find(hash); if (it != mapTx.end()) { mapTx.modify(it, update_fee_delta(deltas.second)); // Now update all ancestors' modified fees with descendants setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits<uint64_t>::max(); std::string dummy; CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false); for (txiter ancestorIt : setAncestors) { mapTx.modify(ancestorIt, update_descendant_state(0, nFeeDelta, 0)); } // Now update all descendants' modified fees with ancestors setEntries setDescendants; CalculateDescendants(it, setDescendants); setDescendants.erase(it); for (txiter descendantIt : setDescendants) { mapTx.modify(descendantIt, update_ancestor_state(0, nFeeDelta, 0, 0)); } ++nTransactionsUpdated; } } LogPrintf("PrioritiseTransaction: %s priority += %f, fee += %d\n", hash.ToString(), dPriorityDelta, FormatMoney(nFeeDelta)); } void CTxMemPool::ApplyDeltas(const uint256 hash, double &dPriorityDelta, Amount &nFeeDelta) const { LOCK(cs); std::map<uint256, TXModifier>::const_iterator pos = mapDeltas.find(hash); if (pos == mapDeltas.end()) { return; } const TXModifier &deltas = pos->second; dPriorityDelta += deltas.first; nFeeDelta += deltas.second; } void CTxMemPool::ClearPrioritisation(const uint256 hash) { LOCK(cs); mapDeltas.erase(hash); } bool CTxMemPool::HasNoInputsOf(const CTransaction &tx) const { for (const CTxIn &in : tx.vin) { if (exists(in.prevout.GetTxId())) { return false; } } return true; } CCoinsViewMemPool::CCoinsViewMemPool(CCoinsView *baseIn, const CTxMemPool &mempoolIn) : CCoinsViewBacked(baseIn), mempool(mempoolIn) {} bool CCoinsViewMemPool::GetCoin(const COutPoint &outpoint, Coin &coin) const { // If an entry in the mempool exists, always return that one, as it's // guaranteed to never conflict with the underlying cache, and it cannot // have pruned entries (as it contains full) transactions. First checking // the underlying cache risks returning a pruned entry instead. CTransactionRef ptx = mempool.get(outpoint.GetTxId()); if (ptx) { if (outpoint.GetN() < ptx->vout.size()) { coin = Coin(ptx->vout[outpoint.GetN()], MEMPOOL_HEIGHT, false); return true; } return false; } return base->GetCoin(outpoint, coin); } size_t CTxMemPool::DynamicMemoryUsage() const { LOCK(cs); // Estimate the overhead of mapTx to be 12 pointers + an allocation, as no // exact formula for boost::multi_index_contained is implemented. return memusage::MallocUsage(sizeof(CTxMemPoolEntry) + 12 * sizeof(void *)) * mapTx.size() + memusage::DynamicUsage(mapNextTx) + memusage::DynamicUsage(mapDeltas) + memusage::DynamicUsage(mapLinks) + memusage::DynamicUsage(vTxHashes) + cachedInnerUsage; } void CTxMemPool::RemoveStaged(setEntries &stage, bool updateDescendants, MemPoolRemovalReason reason) { AssertLockHeld(cs); UpdateForRemoveFromMempool(stage, updateDescendants); for (const txiter &it : stage) { removeUnchecked(it, reason); } } int CTxMemPool::Expire(int64_t time) { LOCK(cs); indexed_transaction_set::index<entry_time>::type::iterator it = mapTx.get<entry_time>().begin(); setEntries toremove; while (it != mapTx.get<entry_time>().end() && it->GetTime() < time) { toremove.insert(mapTx.project<0>(it)); it++; } setEntries stage; for (txiter removeit : toremove) { CalculateDescendants(removeit, stage); } RemoveStaged(stage, false, MemPoolRemovalReason::EXPIRY); return stage.size(); } void CTxMemPool::LimitSize(size_t limit, unsigned long age) { int expired = Expire(GetTime() - age); if (expired != 0) { LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired); } std::vector<COutPoint> vNoSpendsRemaining; TrimToSize(limit, &vNoSpendsRemaining); for (const COutPoint &removed : vNoSpendsRemaining) { pcoinsTip->Uncache(removed); } } void CTxMemPool::addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry) { - LOCK(cs); setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits<uint64_t>::max(); std::string dummy; CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy); return addUnchecked(hash, entry, setAncestors); } void CTxMemPool::UpdateChild(txiter entry, txiter child, bool add) { setEntries s; if (add && mapLinks[entry].children.insert(child).second) { cachedInnerUsage += memusage::IncrementalDynamicUsage(s); } else if (!add && mapLinks[entry].children.erase(child)) { cachedInnerUsage -= memusage::IncrementalDynamicUsage(s); } } void CTxMemPool::UpdateParent(txiter entry, txiter parent, bool add) { setEntries s; if (add && mapLinks[entry].parents.insert(parent).second) { cachedInnerUsage += memusage::IncrementalDynamicUsage(s); } else if (!add && mapLinks[entry].parents.erase(parent)) { cachedInnerUsage -= memusage::IncrementalDynamicUsage(s); } } const CTxMemPool::setEntries & CTxMemPool::GetMemPoolParents(txiter entry) const { assert(entry != mapTx.end()); txlinksMap::const_iterator it = mapLinks.find(entry); assert(it != mapLinks.end()); return it->second.parents; } const CTxMemPool::setEntries & CTxMemPool::GetMemPoolChildren(txiter entry) const { assert(entry != mapTx.end()); txlinksMap::const_iterator it = mapLinks.find(entry); assert(it != mapLinks.end()); return it->second.children; } CFeeRate CTxMemPool::GetMinFee(size_t sizelimit) const { LOCK(cs); if (!blockSinceLastRollingFeeBump || rollingMinimumFeeRate == 0) { return CFeeRate(int64_t(ceill(rollingMinimumFeeRate)) * SATOSHI); } int64_t time = GetTime(); if (time > lastRollingFeeUpdate + 10) { double halflife = ROLLING_FEE_HALFLIFE; if (DynamicMemoryUsage() < sizelimit / 4) { halflife /= 4; } else if (DynamicMemoryUsage() < sizelimit / 2) { halflife /= 2; } rollingMinimumFeeRate = rollingMinimumFeeRate / pow(2.0, (time - lastRollingFeeUpdate) / halflife); lastRollingFeeUpdate = time; } return CFeeRate(int64_t(ceill(rollingMinimumFeeRate)) * SATOSHI); } void CTxMemPool::trackPackageRemoved(const CFeeRate &rate) { AssertLockHeld(cs); if ((rate.GetFeePerK() / SATOSHI) > rollingMinimumFeeRate) { rollingMinimumFeeRate = rate.GetFeePerK() / SATOSHI; blockSinceLastRollingFeeBump = false; } } void CTxMemPool::TrimToSize(size_t sizelimit, std::vector<COutPoint> *pvNoSpendsRemaining) { LOCK(cs); unsigned nTxnRemoved = 0; CFeeRate maxFeeRateRemoved(Amount::zero()); while (!mapTx.empty() && DynamicMemoryUsage() > sizelimit) { indexed_transaction_set::index<descendant_score>::type::iterator it = mapTx.get<descendant_score>().begin(); // We set the new mempool min fee to the feerate of the removed set, // plus the "minimum reasonable fee rate" (ie some value under which we // consider txn to have 0 fee). This way, we don't allow txn to enter // mempool with feerate equal to txn which were removed with no block in // between. CFeeRate removed(it->GetModFeesWithDescendants(), it->GetSizeWithDescendants()); removed += MEMPOOL_FULL_FEE_INCREMENT; trackPackageRemoved(removed); maxFeeRateRemoved = std::max(maxFeeRateRemoved, removed); setEntries stage; CalculateDescendants(mapTx.project<0>(it), stage); nTxnRemoved += stage.size(); std::vector<CTransaction> txn; if (pvNoSpendsRemaining) { txn.reserve(stage.size()); for (txiter iter : stage) { txn.push_back(iter->GetTx()); } } RemoveStaged(stage, false, MemPoolRemovalReason::SIZELIMIT); if (pvNoSpendsRemaining) { for (const CTransaction &tx : txn) { for (const CTxIn &txin : tx.vin) { if (exists(txin.prevout.GetTxId())) { continue; } pvNoSpendsRemaining->push_back(txin.prevout); } } } } if (maxFeeRateRemoved > CFeeRate(Amount::zero())) { LogPrint(BCLog::MEMPOOL, "Removed %u txn, rolling minimum fee bumped to %s\n", nTxnRemoved, maxFeeRateRemoved.ToString()); } } uint64_t CTxMemPool::CalculateDescendantMaximum(txiter entry) const { // find parent with highest descendant count std::vector<txiter> candidates; setEntries counted; candidates.push_back(entry); uint64_t maximum = 0; while (candidates.size()) { txiter candidate = candidates.back(); candidates.pop_back(); if (!counted.insert(candidate).second) { continue; } const setEntries &parents = GetMemPoolParents(candidate); if (parents.size() == 0) { maximum = std::max(maximum, candidate->GetCountWithDescendants()); } else { for (txiter i : parents) { candidates.push_back(i); } } } return maximum; } void CTxMemPool::GetTransactionAncestry(const uint256 &txid, size_t &ancestors, size_t &descendants) const { LOCK(cs); auto it = mapTx.find(txid); ancestors = descendants = 0; if (it != mapTx.end()) { ancestors = it->GetCountWithAncestors(); descendants = CalculateDescendantMaximum(it); } } SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand(std::numeric_limits<uint64_t>::max())), k1(GetRand(std::numeric_limits<uint64_t>::max())) {} /** Maximum bytes for transactions to store for processing during reorg */ static const size_t MAX_DISCONNECTED_TX_POOL_SIZE = 20 * DEFAULT_MAX_BLOCK_SIZE; void DisconnectedBlockTransactions::addForBlock( const std::vector<CTransactionRef> &vtx) { for (const auto &tx : reverse_iterate(vtx)) { // If we already added it, just skip. auto it = queuedTx.find(tx->GetId()); if (it != queuedTx.end()) { continue; } // Insert the transaction into the pool. addTransaction(tx); // Fill in the set of parents. std::unordered_set<TxId, SaltedTxidHasher> parents; for (const CTxIn &in : tx->vin) { parents.insert(in.prevout.GetTxId()); } // In order to make sure we keep things in topological order, we check // if we already know of the parent of the current transaction. If so, // we remove them from the set and then add them back. while (parents.size() > 0) { std::unordered_set<TxId, SaltedTxidHasher> worklist( std::move(parents)); parents.clear(); for (const TxId &txid : worklist) { // If we do not have that txid in the set, nothing needs to be // done. auto pit = queuedTx.find(txid); if (pit == queuedTx.end()) { continue; } // We have parent in our set, we reinsert them at the right // position. const CTransactionRef ptx = *pit; queuedTx.erase(pit); queuedTx.insert(ptx); // And we make sure ancestors are covered. for (const CTxIn &in : ptx->vin) { parents.insert(in.prevout.GetTxId()); } } } } // Keep the size under control. while (DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE) { // Drop the earliest entry, and remove its children from the // mempool. auto it = queuedTx.get<insertion_order>().begin(); g_mempool.removeRecursive(**it, MemPoolRemovalReason::REORG); removeEntry(it); } } void DisconnectedBlockTransactions::importMempool(CTxMemPool &pool) { // addForBlock's algorithm sorts a vector of transactions back into // topological order. We use it in a separate object to create a valid // ordering of all mempool transactions, which we then splice in front of // the current queuedTx. This results in a valid sequence of transactions to // be reprocessed in updateMempoolForReorg. // We create vtx in order of the entry_time index to facilitate for // addForBlocks (which iterates in reverse order), as vtx probably end in // the correct ordering for queuedTx. std::vector<CTransactionRef> vtx; { LOCK(pool.cs); vtx.reserve(pool.mapTx.size()); for (const CTxMemPoolEntry &e : pool.mapTx.get<entry_time>()) { vtx.push_back(e.GetSharedTx()); } pool.clear(); } // Use addForBlocks to sort the transactions and then splice them in front // of queuedTx DisconnectedBlockTransactions orderedTxnPool; orderedTxnPool.addForBlock(vtx); cachedInnerUsage += orderedTxnPool.cachedInnerUsage; queuedTx.get<insertion_order>().splice( queuedTx.get<insertion_order>().begin(), orderedTxnPool.queuedTx.get<insertion_order>()); // We limit memory usage because we can't know if more blocks will be // disconnected while (DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE) { // Drop the earliest entry which, by definition, has no children removeEntry(queuedTx.get<insertion_order>().begin()); } } void DisconnectedBlockTransactions::updateMempoolForReorg(const Config &config, bool fAddToMempool) { AssertLockHeld(cs_main); std::vector<TxId> txidsUpdate; // disconnectpool's insertion_order index sorts the entries from oldest to // newest, but the oldest entry will be the last tx from the latest mined // block that was disconnected. // Iterate disconnectpool in reverse, so that we add transactions back to // the mempool starting with the earliest transaction that had been // previously seen in a block. for (const CTransactionRef &tx : reverse_iterate(queuedTx.get<insertion_order>())) { // ignore validation errors in resurrected transactions CValidationState stateDummy; if (!fAddToMempool || tx->IsCoinBase() || !AcceptToMemoryPool(config, g_mempool, stateDummy, tx, false, nullptr, true)) { // If the transaction doesn't make it in to the mempool, remove any // transactions that depend on it (which would now be orphans). g_mempool.removeRecursive(*tx, MemPoolRemovalReason::REORG); } else if (g_mempool.exists(tx->GetId())) { txidsUpdate.push_back(tx->GetId()); } } queuedTx.clear(); // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have // no in-mempool children, which is generally not true when adding // previously-confirmed transactions back to the mempool. // UpdateTransactionsFromBlock finds descendants of any transactions in the // disconnectpool that were added back and cleans up the mempool state. g_mempool.UpdateTransactionsFromBlock(txidsUpdate); // We also need to remove any now-immature transactions g_mempool.removeForReorg(config, pcoinsTip.get(), chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); // Re-limit mempool size, in case we added any transactions g_mempool.LimitSize( gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); } diff --git a/src/txmempool.h b/src/txmempool.h index 4e9c1067b..d51c8d64d 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -1,948 +1,950 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_TXMEMPOOL_H #define BITCOIN_TXMEMPOOL_H #include <amount.h> #include <coins.h> #include <crypto/siphash.h> #include <indirectmap.h> #include <primitives/transaction.h> #include <random.h> #include <sync.h> #include <boost/multi_index/hashed_index.hpp> #include <boost/multi_index/ordered_index.hpp> #include <boost/multi_index/sequenced_index.hpp> #include <boost/multi_index_container.hpp> #include <boost/signals2/signal.hpp> #include <map> #include <memory> #include <set> #include <string> #include <utility> #include <vector> class CBlockIndex; class Config; extern CCriticalSection cs_main; inline double AllowFreeThreshold() { return (144 * COIN) / (250 * SATOSHI); } inline bool AllowFree(double dPriority) { // Large (in bytes) low-priority (new, small-coin) transactions need a fee. return dPriority > AllowFreeThreshold(); } /** * Fake height value used in Coins to signify they are only in the memory * pool(since 0.8) */ static const uint32_t MEMPOOL_HEIGHT = 0x7FFFFFFF; struct LockPoints { // Will be set to the blockchain height and median time past values that // would be necessary to satisfy all relative locktime constraints (BIP68) // of this tx given our view of block chain history int height; int64_t time; // As long as the current chain descends from the highest height block // containing one of the inputs used in the calculation, then the cached // values are still valid even after a reorg. CBlockIndex *maxInputBlock; LockPoints() : height(0), time(0), maxInputBlock(nullptr) {} }; class CTxMemPool; /** \class CTxMemPoolEntry * * CTxMemPoolEntry stores data about the corresponding transaction, as well as * data about all in-mempool transactions that depend on the transaction * ("descendant" transactions). * * When a new entry is added to the mempool, we update the descendant state * (nCountWithDescendants, nSizeWithDescendants, and nModFeesWithDescendants) * for all ancestors of the newly added transaction. */ class CTxMemPoolEntry { private: CTransactionRef tx; //!< Cached to avoid expensive parent-transaction lookups Amount nFee; //!< ... and avoid recomputing tx size size_t nTxSize; //!< ... and modified size for priority size_t nModSize; //!< ... and total memory usage size_t nUsageSize; //!< Local time when entering the mempool int64_t nTime; //!< Priority when entering the mempool double entryPriority; //!< Chain height when entering the mempool unsigned int entryHeight; //!< Sum of all txin values that are already in blockchain Amount inChainInputValue; //!< keep track of transactions that spend a coinbase bool spendsCoinbase; //!< Total sigop plus P2SH sigops count int64_t sigOpCount; //!< Used for determining the priority of the transaction for mining in a //! block Amount feeDelta; //!< Track the height and time at which tx was final LockPoints lockPoints; // Information about descendants of this transaction that are in the // mempool; if we remove this transaction we must remove all of these // descendants as well. //!< number of descendant transactions uint64_t nCountWithDescendants; //!< ... and size uint64_t nSizeWithDescendants; //!< ... and total fees (all including us) Amount nModFeesWithDescendants; // Analogous statistics for ancestor transactions uint64_t nCountWithAncestors; uint64_t nSizeWithAncestors; Amount nModFeesWithAncestors; int64_t nSigOpCountWithAncestors; public: CTxMemPoolEntry(const CTransactionRef &_tx, const Amount _nFee, int64_t _nTime, double _entryPriority, unsigned int _entryHeight, Amount _inChainInputValue, bool spendsCoinbase, int64_t nSigOpsCost, LockPoints lp); const CTransaction &GetTx() const { return *this->tx; } CTransactionRef GetSharedTx() const { return this->tx; } /** * Fast calculation of lower bound of current priority as update from entry * priority. Only inputs that were originally in-chain will age. */ double GetPriority(unsigned int currentHeight) const; const Amount GetFee() const { return nFee; } size_t GetTxSize() const { return nTxSize; } int64_t GetTime() const { return nTime; } unsigned int GetHeight() const { return entryHeight; } int64_t GetSigOpCount() const { return sigOpCount; } Amount GetModifiedFee() const { return nFee + feeDelta; } size_t DynamicMemoryUsage() const { return nUsageSize; } const LockPoints &GetLockPoints() const { return lockPoints; } // Adjusts the descendant state. void UpdateDescendantState(int64_t modifySize, Amount modifyFee, int64_t modifyCount); // Adjusts the ancestor state void UpdateAncestorState(int64_t modifySize, Amount modifyFee, int64_t modifyCount, int modifySigOps); // Updates the fee delta used for mining priority score, and the // modified fees with descendants. void UpdateFeeDelta(Amount feeDelta); // Update the LockPoints after a reorg void UpdateLockPoints(const LockPoints &lp); uint64_t GetCountWithDescendants() const { return nCountWithDescendants; } uint64_t GetSizeWithDescendants() const { return nSizeWithDescendants; } Amount GetModFeesWithDescendants() const { return nModFeesWithDescendants; } bool GetSpendsCoinbase() const { return spendsCoinbase; } uint64_t GetCountWithAncestors() const { return nCountWithAncestors; } uint64_t GetSizeWithAncestors() const { return nSizeWithAncestors; } Amount GetModFeesWithAncestors() const { return nModFeesWithAncestors; } int64_t GetSigOpCountWithAncestors() const { return nSigOpCountWithAncestors; } //!< Index in mempool's vTxHashes mutable size_t vTxHashesIdx; }; // Helpers for modifying CTxMemPool::mapTx, which is a boost multi_index. struct update_descendant_state { update_descendant_state(int64_t _modifySize, Amount _modifyFee, int64_t _modifyCount) : modifySize(_modifySize), modifyFee(_modifyFee), modifyCount(_modifyCount) {} void operator()(CTxMemPoolEntry &e) { e.UpdateDescendantState(modifySize, modifyFee, modifyCount); } private: int64_t modifySize; Amount modifyFee; int64_t modifyCount; }; struct update_ancestor_state { update_ancestor_state(int64_t _modifySize, Amount _modifyFee, int64_t _modifyCount, int64_t _modifySigOpsCost) : modifySize(_modifySize), modifyFee(_modifyFee), modifyCount(_modifyCount), modifySigOpsCost(_modifySigOpsCost) {} void operator()(CTxMemPoolEntry &e) { e.UpdateAncestorState(modifySize, modifyFee, modifyCount, modifySigOpsCost); } private: int64_t modifySize; Amount modifyFee; int64_t modifyCount; int64_t modifySigOpsCost; }; struct update_fee_delta { explicit update_fee_delta(Amount _feeDelta) : feeDelta(_feeDelta) {} void operator()(CTxMemPoolEntry &e) { e.UpdateFeeDelta(feeDelta); } private: Amount feeDelta; }; struct update_lock_points { explicit update_lock_points(const LockPoints &_lp) : lp(_lp) {} void operator()(CTxMemPoolEntry &e) { e.UpdateLockPoints(lp); } private: const LockPoints &lp; }; // extracts a transaction hash from CTxMempoolEntry or CTransactionRef struct mempoolentry_txid { typedef uint256 result_type; result_type operator()(const CTxMemPoolEntry &entry) const { return entry.GetTx().GetId(); } result_type operator()(const CTransactionRef &tx) const { return tx->GetId(); } }; /** \class CompareTxMemPoolEntryByDescendantScore * * Sort an entry by max(score/size of entry's tx, score/size with all * descendants). */ class CompareTxMemPoolEntryByDescendantScore { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { double a_mod_fee, a_size, b_mod_fee, b_size; GetModFeeAndSize(a, a_mod_fee, a_size); GetModFeeAndSize(b, b_mod_fee, b_size); // Avoid division by rewriting (a/b > c/d) as (a*d > c*b). double f1 = a_mod_fee * b_size; double f2 = a_size * b_mod_fee; if (f1 == f2) { return a.GetTime() >= b.GetTime(); } return f1 < f2; } // Return the fee/size we're using for sorting this entry. void GetModFeeAndSize(const CTxMemPoolEntry &a, double &mod_fee, double &size) const { // Compare feerate with descendants to feerate of the transaction, and // return the fee/size for the max. double f1 = a.GetSizeWithDescendants() * (a.GetModifiedFee() / SATOSHI); double f2 = a.GetTxSize() * (a.GetModFeesWithDescendants() / SATOSHI); if (f2 > f1) { mod_fee = a.GetModFeesWithDescendants() / SATOSHI; size = a.GetSizeWithDescendants(); } else { mod_fee = a.GetModifiedFee() / SATOSHI; size = a.GetTxSize(); } } }; /** \class CompareTxMemPoolEntryByScore * * Sort by feerate of entry (fee/size) in descending order * This is only used for transaction relay, so we use GetFee() * instead of GetModifiedFee() to avoid leaking prioritization * information via the sort order. */ class CompareTxMemPoolEntryByScore { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { double f1 = b.GetTxSize() * (a.GetFee() / SATOSHI); double f2 = a.GetTxSize() * (b.GetFee() / SATOSHI); if (f1 == f2) { return b.GetTx().GetId() < a.GetTx().GetId(); } return f1 > f2; } }; class CompareTxMemPoolEntryByEntryTime { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { return a.GetTime() < b.GetTime(); } }; /** \class CompareTxMemPoolEntryByAncestorScore * * Sort an entry by min(score/size of entry's tx, score/size with all * ancestors). */ class CompareTxMemPoolEntryByAncestorFee { public: template <typename T> bool operator()(const T &a, const T &b) const { double a_mod_fee, a_size, b_mod_fee, b_size; GetModFeeAndSize(a, a_mod_fee, a_size); GetModFeeAndSize(b, b_mod_fee, b_size); // Avoid division by rewriting (a/b > c/d) as (a*d > c*b). double f1 = a_mod_fee * b_size; double f2 = a_size * b_mod_fee; if (f1 == f2) { return a.GetTx().GetId() < b.GetTx().GetId(); } return f1 > f2; } // Return the fee/size we're using for sorting this entry. template <typename T> void GetModFeeAndSize(const T &a, double &mod_fee, double &size) const { // Compare feerate with ancestors to feerate of the transaction, and // return the fee/size for the min. double f1 = a.GetSizeWithAncestors() * (a.GetModifiedFee() / SATOSHI); double f2 = a.GetTxSize() * (a.GetModFeesWithAncestors() / SATOSHI); if (f1 > f2) { mod_fee = a.GetModFeesWithAncestors() / SATOSHI; size = a.GetSizeWithAncestors(); } else { mod_fee = a.GetModifiedFee() / SATOSHI; size = a.GetTxSize(); } } }; // Multi_index tag names struct descendant_score {}; struct entry_time {}; struct ancestor_score {}; /** * Information about a mempool transaction. */ struct TxMempoolInfo { /** The transaction itself */ CTransactionRef tx; /** Time the transaction entered the mempool. */ int64_t nTime; /** Feerate of the transaction. */ CFeeRate feeRate; /** The fee delta. */ Amount nFeeDelta; }; /** * Reason why a transaction was removed from the mempool, this is passed to the * notification signal. */ enum class MemPoolRemovalReason { //!< Manually removed or unknown reason UNKNOWN = 0, //!< Expired from mempool EXPIRY, //!< Removed in size limiting SIZELIMIT, //!< Removed for reorganization REORG, //!< Removed for block BLOCK, //!< Removed for conflict with in-block transaction CONFLICT, //!< Removed for replacement REPLACED }; class SaltedTxidHasher { private: /** Salt */ const uint64_t k0, k1; public: SaltedTxidHasher(); size_t operator()(const uint256 &txid) const { return SipHashUint256(k0, k1, txid); } }; typedef std::pair<double, Amount> TXModifier; /** * CTxMemPool stores valid-according-to-the-current-best-chain transactions that * may be included in the next block. * * Transactions are added when they are seen on the network (or created by the * local node), but not all transactions seen are added to the pool. For * example, the following new transactions will not be added to the mempool: * - a transaction which doesn't meet the minimum fee requirements. * - a new transaction that double-spends an input of a transaction already in * the pool where the new transaction does not meet the Replace-By-Fee * requirements as defined in BIP 125. * - a non-standard transaction. * * CTxMemPool::mapTx, and CTxMemPoolEntry bookkeeping: * * mapTx is a boost::multi_index that sorts the mempool on 4 criteria: * - transaction hash * - descendant feerate [we use max(feerate of tx, feerate of tx with all * descendants)] * - time in mempool * - ancestor feerate [we use min(feerate of tx, feerate of tx with all * unconfirmed ancestors)] * * Note: the term "descendant" refers to in-mempool transactions that depend on * this one, while "ancestor" refers to in-mempool transactions that a given * transaction depends on. * * In order for the feerate sort to remain correct, we must update transactions * in the mempool when new descendants arrive. To facilitate this, we track the * set of in-mempool direct parents and direct children in mapLinks. Within each * CTxMemPoolEntry, we track the size and fees of all descendants. * * Usually when a new transaction is added to the mempool, it has no in-mempool * children (because any such children would be an orphan). So in * addUnchecked(), we: * - update a new entry's setMemPoolParents to include all in-mempool parents * - update the new entry's direct parents to include the new tx as a child * - update all ancestors of the transaction to include the new tx's size/fee * * When a transaction is removed from the mempool, we must: * - update all in-mempool parents to not track the tx in setMemPoolChildren * - update all ancestors to not include the tx's size/fees in descendant state * - update all in-mempool children to not include it as a parent * * These happen in UpdateForRemoveFromMempool(). (Note that when removing a * transaction along with its descendants, we must calculate that set of * transactions to be removed before doing the removal, or else the mempool can * be in an inconsistent state where it's impossible to walk the ancestors of a * transaction.) * * In the event of a reorg, the assumption that a newly added tx has no * in-mempool children is false. In particular, the mempool is in an * inconsistent state while new transactions are being added, because there may * be descendant transactions of a tx coming from a disconnected block that are * unreachable from just looking at transactions in the mempool (the linking * transactions may also be in the disconnected block, waiting to be added). * Because of this, there's not much benefit in trying to search for in-mempool * children in addUnchecked(). Instead, in the special case of transactions * being added from a disconnected block, we require the caller to clean up the * state, to account for in-mempool, out-of-block descendants for all the * in-block transactions by calling UpdateTransactionsFromBlock(). Note that * until this is called, the mempool state is not consistent, and in particular * mapLinks may not be correct (and therefore functions like * CalculateMemPoolAncestors() and CalculateDescendants() that rely on them to * walk the mempool are not generally safe to use). * * Computational limits: * * Updating all in-mempool ancestors of a newly added transaction can be slow, * if no bound exists on how many in-mempool ancestors there may be. * CalculateMemPoolAncestors() takes configurable limits that are designed to * prevent these calculations from being too CPU intensive. */ class CTxMemPool { private: //!< Value n means that n times in 2^32 we check. uint32_t nCheckFrequency GUARDED_BY(cs); //!< Used by getblocktemplate to trigger CreateNewBlock() invocation unsigned int nTransactionsUpdated; //!< sum of all mempool tx's virtual sizes. uint64_t totalTxSize; //!< sum of dynamic memory usage of all the map elements (NOT the maps //! themselves) uint64_t cachedInnerUsage; mutable int64_t lastRollingFeeUpdate; mutable bool blockSinceLastRollingFeeBump; //!< minimum fee to get into the pool, decreases exponentially mutable double rollingMinimumFeeRate; void trackPackageRemoved(const CFeeRate &rate) EXCLUSIVE_LOCKS_REQUIRED(cs); public: // public only for testing static const int ROLLING_FEE_HALFLIFE = 60 * 60 * 12; typedef boost::multi_index_container< CTxMemPoolEntry, boost::multi_index::indexed_by< // sorted by txid boost::multi_index::hashed_unique< mempoolentry_txid, SaltedTxidHasher>, // sorted by fee rate boost::multi_index::ordered_non_unique< boost::multi_index::tag<descendant_score>, boost::multi_index::identity<CTxMemPoolEntry>, CompareTxMemPoolEntryByDescendantScore>, // sorted by entry time boost::multi_index::ordered_non_unique< boost::multi_index::tag<entry_time>, boost::multi_index::identity<CTxMemPoolEntry>, CompareTxMemPoolEntryByEntryTime>, // sorted by fee rate with ancestors boost::multi_index::ordered_non_unique< boost::multi_index::tag<ancestor_score>, boost::multi_index::identity<CTxMemPoolEntry>, CompareTxMemPoolEntryByAncestorFee>>> indexed_transaction_set; mutable CCriticalSection cs; indexed_transaction_set mapTx GUARDED_BY(cs); typedef indexed_transaction_set::nth_index<0>::type::iterator txiter; //!< All tx hashes/entries in mapTx, in random order std::vector<std::pair<uint256, txiter>> vTxHashes; struct CompareIteratorByHash { bool operator()(const txiter &a, const txiter &b) const { return a->GetTx().GetId() < b->GetTx().GetId(); } }; typedef std::set<txiter, CompareIteratorByHash> setEntries; const setEntries &GetMemPoolParents(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs); const setEntries &GetMemPoolChildren(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs); uint64_t CalculateDescendantMaximum(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs); private: typedef std::map<txiter, setEntries, CompareIteratorByHash> cacheMap; struct TxLinks { setEntries parents; setEntries children; }; typedef std::map<txiter, TxLinks, CompareIteratorByHash> txlinksMap; txlinksMap mapLinks; void UpdateParent(txiter entry, txiter parent, bool add); void UpdateChild(txiter entry, txiter child, bool add); std::vector<indexed_transaction_set::const_iterator> GetSortedDepthAndScore() const EXCLUSIVE_LOCKS_REQUIRED(cs); public: indirectmap<COutPoint, const CTransaction *> mapNextTx GUARDED_BY(cs); std::map<uint256, TXModifier> mapDeltas; /** * Create a new CTxMemPool. */ CTxMemPool(); ~CTxMemPool(); /** * If sanity-checking is turned on, check makes sure the pool is consistent * (does not contain two transactions that spend the same inputs, all inputs * are in the mapNextTx array). If sanity-checking is turned off, check does * nothing. */ void check(const CCoinsViewCache *pcoins) const; void setSanityCheck(double dFrequency = 1.0) { LOCK(cs); nCheckFrequency = static_cast<uint32_t>(dFrequency * 4294967295.0); } // addUnchecked must updated state for all ancestors of a given transaction, // to track size/count of descendant transactions. First version of // addUnchecked can be used to have it call CalculateMemPoolAncestors(), and // then invoke the second version. // Note that addUnchecked is ONLY called from ATMP outside of tests // and any other callers may break wallet's in-mempool tracking (due to // lack of CValidationInterface::TransactionAddedToMempool callbacks). - void addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry); + void addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry) + EXCLUSIVE_LOCKS_REQUIRED(cs); void addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry, - setEntries &setAncestors); + setEntries &setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs); void removeRecursive( const CTransaction &tx, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN); void removeForReorg(const Config &config, const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags) EXCLUSIVE_LOCKS_REQUIRED(cs_main); void removeConflicts(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(cs); void removeForBlock(const std::vector<CTransactionRef> &vtx, unsigned int nBlockHeight); void clear(); // lock free void _clear() EXCLUSIVE_LOCKS_REQUIRED(cs); bool CompareDepthAndScore(const uint256 &hasha, const uint256 &hashb); void queryHashes(std::vector<uint256> &vtxid); bool isSpent(const COutPoint &outpoint) const; unsigned int GetTransactionsUpdated() const; void AddTransactionsUpdated(unsigned int n); /** * Check that none of this transactions inputs are in the mempool, and thus * the tx is not dependent on other mempool transactions to be included in a * block. */ bool HasNoInputsOf(const CTransaction &tx) const; /** Affect CreateNewBlock prioritisation of transactions */ void PrioritiseTransaction(const uint256 &hash, double dPriorityDelta, const Amount nFeeDelta); void ApplyDeltas(const uint256 hash, double &dPriorityDelta, Amount &nFeeDelta) const; void ClearPrioritisation(const uint256 hash); public: /** * Remove a set of transactions from the mempool. If a transaction is in * this set, then all in-mempool descendants must also be in the set, unless * this transaction is being removed for being in a block. Set * updateDescendants to true when removing a tx that was in a block, so that * any in-mempool descendants have their ancestor state updated. */ void RemoveStaged(setEntries &stage, bool updateDescendants, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN) EXCLUSIVE_LOCKS_REQUIRED(cs); /** * When adding transactions from a disconnected block back to the mempool, * new mempool entries may have children in the mempool (which is generally * not the case when otherwise adding transactions). * UpdateTransactionsFromBlock() will find child transactions and update the * descendant state for each transaction in txidsToUpdate (excluding any * child transactions present in txidsToUpdate, which are already accounted * for). * Note: txidsToUpdate should be the set of transactions from the * disconnected block that have been accepted back into the mempool. */ void UpdateTransactionsFromBlock(const std::vector<TxId> &txidsToUpdate); /** * Try to calculate all in-mempool ancestors of entry. * (these are all calculated including the tx itself) * limitAncestorCount = max number of ancestors * limitAncestorSize = max size of ancestors * limitDescendantCount = max number of descendants any ancestor can have * limitDescendantSize = max size of descendants any ancestor can have * errString = populated with error reason if any limits are hit * fSearchForParents = whether to search a tx's vin for in-mempool parents, * or look up parents from mapLinks. Must be true for entries not in the * mempool */ bool CalculateMemPoolAncestors( const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, - std::string &errString, bool fSearchForParents = true) const; + std::string &errString, bool fSearchForParents = true) const + EXCLUSIVE_LOCKS_REQUIRED(cs); /** * Populate setDescendants with all in-mempool descendants of hash. * Assumes that setDescendants includes all in-mempool descendants of * anything already in it. */ void CalculateDescendants(txiter it, setEntries &setDescendants) const EXCLUSIVE_LOCKS_REQUIRED(cs); /** * The minimum fee to get into the mempool, which may itself not be enough * for larger-sized transactions. The incrementalRelayFee policy variable is * used to bound the time it takes the fee rate to go back down all the way * to 0. When the feerate would otherwise be half of this, it is set to 0 * instead. */ CFeeRate GetMinFee(size_t sizelimit) const; /** * Remove transactions from the mempool until its dynamic size is <= * sizelimit. pvNoSpendsRemaining, if set, will be populated with the list * of outpoints which are not in mempool which no longer have any spends in * this mempool. */ void TrimToSize(size_t sizelimit, std::vector<COutPoint> *pvNoSpendsRemaining = nullptr); /** * Expire all transaction (and their dependencies) in the mempool older than * time. Return the number of removed transactions. */ int Expire(int64_t time); /** * Reduce the size of the mempool by expiring and then trimming the mempool. */ void LimitSize(size_t limit, unsigned long age); /** * Calculate the ancestor and descendant count for the given transaction. * The counts include the transaction itself. */ void GetTransactionAncestry(const uint256 &txid, size_t &ancestors, size_t &descendants) const; unsigned long size() { LOCK(cs); return mapTx.size(); } uint64_t GetTotalTxSize() const { LOCK(cs); return totalTxSize; } bool exists(uint256 hash) const { LOCK(cs); return mapTx.count(hash) != 0; } CTransactionRef get(const uint256 &hash) const; TxMempoolInfo info(const uint256 &hash) const; std::vector<TxMempoolInfo> infoAll() const; CFeeRate estimateFee() const; size_t DynamicMemoryUsage() const; boost::signals2::signal<void(CTransactionRef)> NotifyEntryAdded; boost::signals2::signal<void(CTransactionRef, MemPoolRemovalReason)> NotifyEntryRemoved; private: /** * UpdateForDescendants is used by UpdateTransactionsFromBlock to update the * descendants for a single transaction that has been added to the mempool * but may have child transactions in the mempool, eg during a chain reorg. * setExclude is the set of descendant transactions in the mempool that must * not be accounted for (because any descendants in setExclude were added to * the mempool after the transaction being updated and hence their state is * already reflected in the parent state). * * cachedDescendants will be updated with the descendants of the transaction * being updated, so that future invocations don't need to walk the same * transaction again, if encountered in another transaction chain. */ void UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendants, const std::set<TxId> &setExclude) EXCLUSIVE_LOCKS_REQUIRED(cs); /** * Update ancestors of hash to add/remove it as a descendant transaction. */ void UpdateAncestorsOf(bool add, txiter hash, setEntries &setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs); /** Set ancestor state for an entry */ void UpdateEntryForAncestors(txiter it, const setEntries &setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs); /** * For each transaction being removed, update ancestors and any direct * children. If updateDescendants is true, then also update in-mempool * descendants' ancestor state. */ void UpdateForRemoveFromMempool(const setEntries &entriesToRemove, bool updateDescendants) EXCLUSIVE_LOCKS_REQUIRED(cs); /** Sever link between specified transaction and direct children. */ void UpdateChildrenForRemoval(txiter entry) EXCLUSIVE_LOCKS_REQUIRED(cs); /** * Before calling removeUnchecked for a given transaction, * UpdateForRemoveFromMempool must be called on the entire (dependent) set * of transactions being removed at the same time. We use each * CTxMemPoolEntry's setMemPoolParents in order to walk ancestors of a given * transaction that is removed, so we can't remove intermediate transactions * in a chain before we've updated all the state for the removal. */ void removeUnchecked(txiter entry, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN) EXCLUSIVE_LOCKS_REQUIRED(cs); }; /** * CCoinsView that brings transactions from a mempool into view. * It does not check for spendings by memory pool transactions. * Instead, it provides access to all Coins which are either unspent in the * base CCoinsView, or are outputs from any mempool transaction! * This allows transaction replacement to work as expected, as you want to * have all inputs "available" to check signatures, and any cycles in the * dependency graph are checked directly in AcceptToMemoryPool. * It also allows you to sign a double-spend directly in * signrawtransactionwithkey and signrawtransactionwithwallet, as long as the * conflicting transaction is not yet confirmed. */ class CCoinsViewMemPool : public CCoinsViewBacked { protected: const CTxMemPool &mempool; public: CCoinsViewMemPool(CCoinsView *baseIn, const CTxMemPool &mempoolIn); bool GetCoin(const COutPoint &outpoint, Coin &coin) const override; }; // We want to sort transactions by coin age priority typedef std::pair<double, CTxMemPool::txiter> TxCoinAgePriority; struct TxCoinAgePriorityCompare { bool operator()(const TxCoinAgePriority &a, const TxCoinAgePriority &b) { if (a.first == b.first) { // Reverse order to make sort less than return CompareTxMemPoolEntryByScore()(*(b.second), *(a.second)); } return a.first < b.first; } }; /** * DisconnectedBlockTransactions * * During the reorg, it's desirable to re-add previously confirmed transactions * to the mempool, so that anything not re-confirmed in the new chain is * available to be mined. However, it's more efficient to wait until the reorg * is complete and process all still-unconfirmed transactions at that time, * since we expect most confirmed transactions to (typically) still be * confirmed in the new chain, and re-accepting to the memory pool is expensive * (and therefore better to not do in the middle of reorg-processing). * Instead, store the disconnected transactions (in order!) as we go, remove any * that are included in blocks in the new chain, and then process the remaining * still-unconfirmed transactions at the end. * * It also enables efficient reprocessing of current mempool entries, useful * when (de)activating forks that result in in-mempool transactions becoming * invalid */ // multi_index tag names struct txid_index {}; struct insertion_order {}; class DisconnectedBlockTransactions { private: typedef boost::multi_index_container< CTransactionRef, boost::multi_index::indexed_by< // sorted by txid boost::multi_index::hashed_unique< boost::multi_index::tag<txid_index>, mempoolentry_txid, SaltedTxidHasher>, // sorted by order in the blockchain boost::multi_index::sequenced< boost::multi_index::tag<insertion_order>>>> indexed_disconnected_transactions; indexed_disconnected_transactions queuedTx; uint64_t cachedInnerUsage = 0; void addTransaction(const CTransactionRef &tx) { queuedTx.insert(tx); cachedInnerUsage += RecursiveDynamicUsage(tx); } public: // It's almost certainly a logic bug if we don't clear out queuedTx before // destruction, as we add to it while disconnecting blocks, and then we // need to re-process remaining transactions to ensure mempool consistency. // For now, assert() that we've emptied out this object on destruction. // This assert() can always be removed if the reorg-processing code were // to be refactored such that this assumption is no longer true (for // instance if there was some other way we cleaned up the mempool after a // reorg, besides draining this object). ~DisconnectedBlockTransactions() { assert(queuedTx.empty()); } // Estimate the overhead of queuedTx to be 6 pointers + an allocation, as // no exact formula for boost::multi_index_contained is implemented. size_t DynamicMemoryUsage() const { return memusage::MallocUsage(sizeof(CTransactionRef) + 6 * sizeof(void *)) * queuedTx.size() + cachedInnerUsage; } const indexed_disconnected_transactions &GetQueuedTx() const { return queuedTx; } // Import mempool entries in topological order into queuedTx and clear the // mempool. Caller should call updateMempoolForReorg to reprocess these // transactions void importMempool(CTxMemPool &pool); // Add entries for a block while reconstructing the topological ordering so // they can be added back to the mempool simply. void addForBlock(const std::vector<CTransactionRef> &vtx); // Remove entries based on txid_index, and update memory usage. void removeForBlock(const std::vector<CTransactionRef> &vtx) { // Short-circuit in the common case of a block being added to the tip if (queuedTx.empty()) { return; } for (auto const &tx : vtx) { auto it = queuedTx.find(tx->GetId()); if (it != queuedTx.end()) { cachedInnerUsage -= RecursiveDynamicUsage(*it); queuedTx.erase(it); } } } // Remove an entry by insertion_order index, and update memory usage. void removeEntry(indexed_disconnected_transactions::index< insertion_order>::type::iterator entry) { cachedInnerUsage -= RecursiveDynamicUsage(*entry); queuedTx.get<insertion_order>().erase(entry); } bool isEmpty() const { return queuedTx.empty(); } void clear() { cachedInnerUsage = 0; queuedTx.clear(); } /** * Make mempool consistent after a reorg, by re-adding or recursively * erasing disconnected block transactions from the mempool, and also * removing any other transactions from the mempool that are no longer valid * given the new tip/height. * * Note: we assume that disconnectpool only contains transactions that are * NOT confirmed in the current chain nor already in the mempool (otherwise, * in-mempool descendants of such transactions would be removed). * * Passing fAddToMempool=false will skip trying to add the transactions * back, and instead just erase from the mempool as needed. */ void updateMempoolForReorg(const Config &config, bool fAddToMempool); }; #endif // BITCOIN_TXMEMPOOL_H diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index a85f2bb5e..b67e8729a 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -1,4756 +1,4757 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <wallet/wallet.h> #include <chain.h> #include <checkpoints.h> #include <config.h> #include <consensus/consensus.h> #include <consensus/validation.h> #include <fs.h> #include <init.h> #include <key.h> #include <key_io.h> #include <keystore.h> #include <net.h> #include <policy/policy.h> #include <primitives/block.h> #include <primitives/transaction.h> #include <random.h> #include <script/script.h> #include <script/sighashtype.h> #include <script/sign.h> #include <timedata.h> #include <txmempool.h> #include <ui_interface.h> #include <util/moneystr.h> #include <util/system.h> #include <validation.h> #include <wallet/coincontrol.h> #include <wallet/coinselection.h> #include <wallet/fees.h> #include <wallet/finaltx.h> #include <wallet/walletutil.h> #include <boost/algorithm/string/replace.hpp> #include <algorithm> #include <cassert> #include <future> static CCriticalSection cs_wallets; static std::vector<std::shared_ptr<CWallet>> vpwallets GUARDED_BY(cs_wallets); bool AddWallet(const std::shared_ptr<CWallet> &wallet) { LOCK(cs_wallets); assert(wallet); std::vector<std::shared_ptr<CWallet>>::const_iterator i = std::find(vpwallets.begin(), vpwallets.end(), wallet); if (i != vpwallets.end()) { return false; } vpwallets.push_back(wallet); return true; } bool RemoveWallet(const std::shared_ptr<CWallet> &wallet) { LOCK(cs_wallets); assert(wallet); std::vector<std::shared_ptr<CWallet>>::iterator i = std::find(vpwallets.begin(), vpwallets.end(), wallet); if (i == vpwallets.end()) { return false; } vpwallets.erase(i); return true; } bool HasWallets() { LOCK(cs_wallets); return !vpwallets.empty(); } std::vector<std::shared_ptr<CWallet>> GetWallets() { LOCK(cs_wallets); return vpwallets; } std::shared_ptr<CWallet> GetWallet(const std::string &name) { LOCK(cs_wallets); for (const std::shared_ptr<CWallet> &wallet : vpwallets) { if (wallet->GetName() == name) { return wallet; } } return nullptr; } static const size_t OUTPUT_GROUP_MAX_ENTRIES = 10; const uint32_t BIP32_HARDENED_KEY_LIMIT = 0x80000000; const uint256 CMerkleTx::ABANDON_HASH(uint256S( "0000000000000000000000000000000000000000000000000000000000000001")); /** @defgroup mapWallet * * @{ */ std::string COutput::ToString() const { return strprintf("COutput(%s, %d, %d) [%s]", tx->GetId().ToString(), i, nDepth, FormatMoney(tx->tx->vout[i].nValue)); } class CAffectedKeysVisitor : public boost::static_visitor<void> { private: const CKeyStore &keystore; std::vector<CKeyID> &vKeys; public: CAffectedKeysVisitor(const CKeyStore &keystoreIn, std::vector<CKeyID> &vKeysIn) : keystore(keystoreIn), vKeys(vKeysIn) {} void Process(const CScript &script) { txnouttype type; std::vector<CTxDestination> vDest; int nRequired; if (ExtractDestinations(script, type, vDest, nRequired)) { for (const CTxDestination &dest : vDest) { boost::apply_visitor(*this, dest); } } } void operator()(const CKeyID &keyId) { if (keystore.HaveKey(keyId)) { vKeys.push_back(keyId); } } void operator()(const CScriptID &scriptId) { CScript script; if (keystore.GetCScript(scriptId, script)) { Process(script); } } void operator()(const CNoDestination &none) {} }; const CWalletTx *CWallet::GetWalletTx(const TxId &txid) const { LOCK(cs_wallet); std::map<TxId, CWalletTx>::const_iterator it = mapWallet.find(txid); if (it == mapWallet.end()) { return nullptr; } return &(it->second); } CPubKey CWallet::GenerateNewKey(WalletBatch &batch, bool internal) { // mapKeyMetadata AssertLockHeld(cs_wallet); // default to compressed public keys if we want 0.6.0 wallets bool fCompressed = CanSupportFeature(FEATURE_COMPRPUBKEY); CKey secret; // Create new metadata int64_t nCreationTime = GetTime(); CKeyMetadata metadata(nCreationTime); // use HD key derivation if HD was enabled during wallet creation if (IsHDEnabled()) { DeriveNewChildKey( batch, metadata, secret, (CanSupportFeature(FEATURE_HD_SPLIT) ? internal : false)); } else { secret.MakeNewKey(fCompressed); } // Compressed public keys were introduced in version 0.6.0 if (fCompressed) { SetMinVersion(FEATURE_COMPRPUBKEY); } CPubKey pubkey = secret.GetPubKey(); assert(secret.VerifyPubKey(pubkey)); mapKeyMetadata[pubkey.GetID()] = metadata; UpdateTimeFirstKey(nCreationTime); if (!AddKeyPubKeyWithDB(batch, secret, pubkey)) { throw std::runtime_error(std::string(__func__) + ": AddKey failed"); } return pubkey; } void CWallet::DeriveNewChildKey(WalletBatch &batch, CKeyMetadata &metadata, CKey &secret, bool internal) { // for now we use a fixed keypath scheme of m/0'/0'/k // master key seed (256bit) CKey key; // hd master key CExtKey masterKey; // key at m/0' CExtKey accountKey; // key at m/0'/0' (external) or m/0'/1' (internal) CExtKey chainChildKey; // key at m/0'/0'/<n>' CExtKey childKey; // try to get the master key if (!GetKey(hdChain.masterKeyID, key)) { throw std::runtime_error(std::string(__func__) + ": Master key not found"); } masterKey.SetMaster(key.begin(), key.size()); // derive m/0' // use hardened derivation (child keys >= 0x80000000 are hardened after // bip32) masterKey.Derive(accountKey, BIP32_HARDENED_KEY_LIMIT); // derive m/0'/0' (external chain) OR m/0'/1' (internal chain) assert(internal ? CanSupportFeature(FEATURE_HD_SPLIT) : true); accountKey.Derive(chainChildKey, BIP32_HARDENED_KEY_LIMIT + (internal ? 1 : 0)); // derive child key at next index, skip keys already known to the wallet do { // always derive hardened keys // childIndex | BIP32_HARDENED_KEY_LIMIT = derive childIndex in hardened // child-index-range // example: 1 | BIP32_HARDENED_KEY_LIMIT == 0x80000001 == 2147483649 if (internal) { chainChildKey.Derive(childKey, hdChain.nInternalChainCounter | BIP32_HARDENED_KEY_LIMIT); metadata.hdKeypath = "m/0'/1'/" + std::to_string(hdChain.nInternalChainCounter) + "'"; hdChain.nInternalChainCounter++; } else { chainChildKey.Derive(childKey, hdChain.nExternalChainCounter | BIP32_HARDENED_KEY_LIMIT); metadata.hdKeypath = "m/0'/0'/" + std::to_string(hdChain.nExternalChainCounter) + "'"; hdChain.nExternalChainCounter++; } } while (HaveKey(childKey.key.GetPubKey().GetID())); secret = childKey.key; metadata.hdMasterKeyID = hdChain.masterKeyID; // update the chain model in the database if (!batch.WriteHDChain(hdChain)) { throw std::runtime_error(std::string(__func__) + ": Writing HD chain model failed"); } } bool CWallet::AddKeyPubKeyWithDB(WalletBatch &batch, const CKey &secret, const CPubKey &pubkey) { // mapKeyMetadata AssertLockHeld(cs_wallet); // CCryptoKeyStore has no concept of wallet databases, but calls // AddCryptedKey // which is overridden below. To avoid flushes, the database handle is // tunneled through to it. bool needsDB = !encrypted_batch; if (needsDB) { encrypted_batch = &batch; } if (!CCryptoKeyStore::AddKeyPubKey(secret, pubkey)) { if (needsDB) { encrypted_batch = nullptr; } return false; } if (needsDB) { encrypted_batch = nullptr; } // Check if we need to remove from watch-only. CScript script; script = GetScriptForDestination(pubkey.GetID()); if (HaveWatchOnly(script)) { RemoveWatchOnly(script); } script = GetScriptForRawPubKey(pubkey); if (HaveWatchOnly(script)) { RemoveWatchOnly(script); } if (IsCrypted()) { return true; } return batch.WriteKey(pubkey, secret.GetPrivKey(), mapKeyMetadata[pubkey.GetID()]); } bool CWallet::AddKeyPubKey(const CKey &secret, const CPubKey &pubkey) { WalletBatch batch(*database); return CWallet::AddKeyPubKeyWithDB(batch, secret, pubkey); } bool CWallet::AddCryptedKey(const CPubKey &vchPubKey, const std::vector<uint8_t> &vchCryptedSecret) { if (!CCryptoKeyStore::AddCryptedKey(vchPubKey, vchCryptedSecret)) { return false; } LOCK(cs_wallet); if (encrypted_batch) { return encrypted_batch->WriteCryptedKey( vchPubKey, vchCryptedSecret, mapKeyMetadata[vchPubKey.GetID()]); } return WalletBatch(*database).WriteCryptedKey( vchPubKey, vchCryptedSecret, mapKeyMetadata[vchPubKey.GetID()]); } void CWallet::LoadKeyMetadata(const CKeyID &keyID, const CKeyMetadata &meta) { // mapKeyMetadata AssertLockHeld(cs_wallet); UpdateTimeFirstKey(meta.nCreateTime); mapKeyMetadata[keyID] = meta; } void CWallet::LoadScriptMetadata(const CScriptID &script_id, const CKeyMetadata &meta) { // m_script_metadata AssertLockHeld(cs_wallet); UpdateTimeFirstKey(meta.nCreateTime); m_script_metadata[script_id] = meta; } bool CWallet::LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<uint8_t> &vchCryptedSecret) { return CCryptoKeyStore::AddCryptedKey(vchPubKey, vchCryptedSecret); } /** * Update wallet first key creation time. This should be called whenever keys * are added to the wallet, with the oldest key creation time. */ void CWallet::UpdateTimeFirstKey(int64_t nCreateTime) { AssertLockHeld(cs_wallet); if (nCreateTime <= 1) { // Cannot determine birthday information, so set the wallet birthday to // the beginning of time. nTimeFirstKey = 1; } else if (!nTimeFirstKey || nCreateTime < nTimeFirstKey) { nTimeFirstKey = nCreateTime; } } bool CWallet::AddCScript(const CScript &redeemScript) { if (!CCryptoKeyStore::AddCScript(redeemScript)) { return false; } return WalletBatch(*database).WriteCScript(Hash160(redeemScript), redeemScript); } bool CWallet::LoadCScript(const CScript &redeemScript) { /** * A sanity check was added in pull #3843 to avoid adding redeemScripts that * never can be redeemed. However, old wallets may still contain these. Do * not add them to the wallet and warn. */ if (redeemScript.size() > MAX_SCRIPT_ELEMENT_SIZE) { std::string strAddr = EncodeDestination(CScriptID(redeemScript), GetConfig()); LogPrintf("%s: Warning: This wallet contains a redeemScript of size %i " "which exceeds maximum size %i thus can never be redeemed. " "Do not use address %s.\n", __func__, redeemScript.size(), MAX_SCRIPT_ELEMENT_SIZE, strAddr); return true; } return CCryptoKeyStore::AddCScript(redeemScript); } bool CWallet::AddWatchOnly(const CScript &dest) { if (!CCryptoKeyStore::AddWatchOnly(dest)) { return false; } const CKeyMetadata &meta = m_script_metadata[CScriptID(dest)]; UpdateTimeFirstKey(meta.nCreateTime); NotifyWatchonlyChanged(true); return WalletBatch(*database).WriteWatchOnly(dest, meta); } bool CWallet::AddWatchOnly(const CScript &dest, int64_t nCreateTime) { m_script_metadata[CScriptID(dest)].nCreateTime = nCreateTime; return AddWatchOnly(dest); } bool CWallet::RemoveWatchOnly(const CScript &dest) { AssertLockHeld(cs_wallet); if (!CCryptoKeyStore::RemoveWatchOnly(dest)) { return false; } if (!HaveWatchOnly()) { NotifyWatchonlyChanged(false); } return WalletBatch(*database).EraseWatchOnly(dest); } bool CWallet::LoadWatchOnly(const CScript &dest) { return CCryptoKeyStore::AddWatchOnly(dest); } bool CWallet::Unlock(const SecureString &strWalletPassphrase) { CCrypter crypter; CKeyingMaterial _vMasterKey; LOCK(cs_wallet); for (const MasterKeyMap::value_type &pMasterKey : mapMasterKeys) { if (!crypter.SetKeyFromPassphrase( strWalletPassphrase, pMasterKey.second.vchSalt, pMasterKey.second.nDeriveIterations, pMasterKey.second.nDerivationMethod)) { return false; } if (!crypter.Decrypt(pMasterKey.second.vchCryptedKey, _vMasterKey)) { // try another master key continue; } if (CCryptoKeyStore::Unlock(_vMasterKey)) { return true; } } return false; } bool CWallet::ChangeWalletPassphrase( const SecureString &strOldWalletPassphrase, const SecureString &strNewWalletPassphrase) { bool fWasLocked = IsLocked(); LOCK(cs_wallet); Lock(); CCrypter crypter; CKeyingMaterial _vMasterKey; for (MasterKeyMap::value_type &pMasterKey : mapMasterKeys) { if (!crypter.SetKeyFromPassphrase( strOldWalletPassphrase, pMasterKey.second.vchSalt, pMasterKey.second.nDeriveIterations, pMasterKey.second.nDerivationMethod)) { return false; } if (!crypter.Decrypt(pMasterKey.second.vchCryptedKey, _vMasterKey)) { return false; } if (CCryptoKeyStore::Unlock(_vMasterKey)) { int64_t nStartTime = GetTimeMillis(); crypter.SetKeyFromPassphrase(strNewWalletPassphrase, pMasterKey.second.vchSalt, pMasterKey.second.nDeriveIterations, pMasterKey.second.nDerivationMethod); pMasterKey.second.nDeriveIterations = static_cast<unsigned int>( pMasterKey.second.nDeriveIterations * (100 / ((double)(GetTimeMillis() - nStartTime)))); nStartTime = GetTimeMillis(); crypter.SetKeyFromPassphrase(strNewWalletPassphrase, pMasterKey.second.vchSalt, pMasterKey.second.nDeriveIterations, pMasterKey.second.nDerivationMethod); pMasterKey.second.nDeriveIterations = (pMasterKey.second.nDeriveIterations + static_cast<unsigned int>( pMasterKey.second.nDeriveIterations * 100 / double(GetTimeMillis() - nStartTime))) / 2; if (pMasterKey.second.nDeriveIterations < 25000) { pMasterKey.second.nDeriveIterations = 25000; } LogPrintf( "Wallet passphrase changed to an nDeriveIterations of %i\n", pMasterKey.second.nDeriveIterations); if (!crypter.SetKeyFromPassphrase( strNewWalletPassphrase, pMasterKey.second.vchSalt, pMasterKey.second.nDeriveIterations, pMasterKey.second.nDerivationMethod)) { return false; } if (!crypter.Encrypt(_vMasterKey, pMasterKey.second.vchCryptedKey)) { return false; } WalletBatch(*database).WriteMasterKey(pMasterKey.first, pMasterKey.second); if (fWasLocked) { Lock(); } return true; } } return false; } void CWallet::ChainStateFlushed(const CBlockLocator &loc) { WalletBatch batch(*database); batch.WriteBestBlock(loc); } void CWallet::SetMinVersion(enum WalletFeature nVersion, WalletBatch *batch_in, bool fExplicit) { // nWalletVersion LOCK(cs_wallet); if (nWalletVersion >= nVersion) { return; } // When doing an explicit upgrade, if we pass the max version permitted, // upgrade all the way. if (fExplicit && nVersion > nWalletMaxVersion) { nVersion = FEATURE_LATEST; } nWalletVersion = nVersion; if (nVersion > nWalletMaxVersion) { nWalletMaxVersion = nVersion; } WalletBatch *batch = batch_in ? batch_in : new WalletBatch(*database); if (nWalletVersion > 40000) { batch->WriteMinVersion(nWalletVersion); } if (!batch_in) { delete batch; } } bool CWallet::SetMaxVersion(int nVersion) { // nWalletVersion, nWalletMaxVersion LOCK(cs_wallet); // Cannot downgrade below current version if (nWalletVersion > nVersion) { return false; } nWalletMaxVersion = nVersion; return true; } std::set<TxId> CWallet::GetConflicts(const TxId &txid) const { std::set<TxId> result; AssertLockHeld(cs_wallet); std::map<TxId, CWalletTx>::const_iterator it = mapWallet.find(txid); if (it == mapWallet.end()) { return result; } const CWalletTx &wtx = it->second; std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range; for (const CTxIn &txin : wtx.tx->vin) { if (mapTxSpends.count(txin.prevout) <= 1) { // No conflict if zero or one spends. continue; } range = mapTxSpends.equal_range(txin.prevout); for (TxSpends::const_iterator _it = range.first; _it != range.second; ++_it) { result.insert(_it->second); } } return result; } bool CWallet::HasWalletSpend(const TxId &txid) const { AssertLockHeld(cs_wallet); auto iter = mapTxSpends.lower_bound(COutPoint(txid, 0)); return (iter != mapTxSpends.end() && iter->first.GetTxId() == txid); } void CWallet::Flush(bool shutdown) { database->Flush(shutdown); } void CWallet::SyncMetaData( std::pair<TxSpends::iterator, TxSpends::iterator> range) { // We want all the wallet transactions in range to have the same metadata as // the oldest (smallest nOrderPos). // So: find smallest nOrderPos: int nMinOrderPos = std::numeric_limits<int>::max(); const CWalletTx *copyFrom = nullptr; for (TxSpends::iterator it = range.first; it != range.second; ++it) { const CWalletTx *wtx = &mapWallet.at(it->second); if (wtx->nOrderPos < nMinOrderPos) { nMinOrderPos = wtx->nOrderPos; copyFrom = wtx; } } // Now copy data from copyFrom to rest: for (TxSpends::iterator it = range.first; it != range.second; ++it) { const TxId &txid = it->second; CWalletTx *copyTo = &mapWallet.at(txid); if (copyFrom == copyTo) { continue; } assert( copyFrom && "Oldest wallet transaction in range assumed to have been found."); if (!copyFrom->IsEquivalentTo(*copyTo)) { continue; } copyTo->mapValue = copyFrom->mapValue; copyTo->vOrderForm = copyFrom->vOrderForm; // fTimeReceivedIsTxTime not copied on purpose nTimeReceived not copied // on purpose. copyTo->nTimeSmart = copyFrom->nTimeSmart; copyTo->fFromMe = copyFrom->fFromMe; copyTo->strFromAccount = copyFrom->strFromAccount; // nOrderPos not copied on purpose cached members not copied on purpose. } } /** * Outpoint is spent if any non-conflicted transaction, spends it: */ bool CWallet::IsSpent(const COutPoint &outpoint) const { std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range = mapTxSpends.equal_range(outpoint); for (TxSpends::const_iterator it = range.first; it != range.second; ++it) { const TxId &wtxid = it->second; std::map<TxId, CWalletTx>::const_iterator mit = mapWallet.find(wtxid); if (mit != mapWallet.end()) { int depth = mit->second.GetDepthInMainChain(); if (depth > 0 || (depth == 0 && !mit->second.isAbandoned())) { // Spent return true; } } } return false; } void CWallet::AddToSpends(const COutPoint &outpoint, const TxId &wtxid) { mapTxSpends.insert(std::make_pair(outpoint, wtxid)); std::pair<TxSpends::iterator, TxSpends::iterator> range; range = mapTxSpends.equal_range(outpoint); SyncMetaData(range); } void CWallet::AddToSpends(const TxId &wtxid) { auto it = mapWallet.find(wtxid); assert(it != mapWallet.end()); CWalletTx &thisTx = it->second; // Coinbases don't spend anything! if (thisTx.IsCoinBase()) { return; } for (const CTxIn &txin : thisTx.tx->vin) { AddToSpends(txin.prevout, wtxid); } } bool CWallet::EncryptWallet(const SecureString &strWalletPassphrase) { if (IsCrypted()) { return false; } CKeyingMaterial _vMasterKey; _vMasterKey.resize(WALLET_CRYPTO_KEY_SIZE); GetStrongRandBytes(&_vMasterKey[0], WALLET_CRYPTO_KEY_SIZE); CMasterKey kMasterKey; kMasterKey.vchSalt.resize(WALLET_CRYPTO_SALT_SIZE); GetStrongRandBytes(&kMasterKey.vchSalt[0], WALLET_CRYPTO_SALT_SIZE); CCrypter crypter; int64_t nStartTime = GetTimeMillis(); crypter.SetKeyFromPassphrase(strWalletPassphrase, kMasterKey.vchSalt, 25000, kMasterKey.nDerivationMethod); kMasterKey.nDeriveIterations = static_cast<unsigned int>( 2500000 / double(GetTimeMillis() - nStartTime)); nStartTime = GetTimeMillis(); crypter.SetKeyFromPassphrase(strWalletPassphrase, kMasterKey.vchSalt, kMasterKey.nDeriveIterations, kMasterKey.nDerivationMethod); kMasterKey.nDeriveIterations = (kMasterKey.nDeriveIterations + static_cast<unsigned int>(kMasterKey.nDeriveIterations * 100 / double(GetTimeMillis() - nStartTime))) / 2; if (kMasterKey.nDeriveIterations < 25000) { kMasterKey.nDeriveIterations = 25000; } LogPrintf("Encrypting Wallet with an nDeriveIterations of %i\n", kMasterKey.nDeriveIterations); if (!crypter.SetKeyFromPassphrase(strWalletPassphrase, kMasterKey.vchSalt, kMasterKey.nDeriveIterations, kMasterKey.nDerivationMethod)) { return false; } if (!crypter.Encrypt(_vMasterKey, kMasterKey.vchCryptedKey)) { return false; } { LOCK(cs_wallet); mapMasterKeys[++nMasterKeyMaxID] = kMasterKey; assert(!encrypted_batch); encrypted_batch = new WalletBatch(*database); if (!encrypted_batch->TxnBegin()) { delete encrypted_batch; encrypted_batch = nullptr; return false; } encrypted_batch->WriteMasterKey(nMasterKeyMaxID, kMasterKey); if (!EncryptKeys(_vMasterKey)) { encrypted_batch->TxnAbort(); delete encrypted_batch; // We now probably have half of our keys encrypted in memory, and // half not... die and let the user reload the unencrypted wallet. assert(false); } // Encryption was introduced in version 0.4.0 SetMinVersion(FEATURE_WALLETCRYPT, encrypted_batch, true); if (!encrypted_batch->TxnCommit()) { delete encrypted_batch; // We now have keys encrypted in memory, but not on disk... // die to avoid confusion and let the user reload the unencrypted // wallet. assert(false); } delete encrypted_batch; encrypted_batch = nullptr; Lock(); Unlock(strWalletPassphrase); // If we are using HD, replace the HD master key (seed) with a new one. if (IsHDEnabled()) { SetHDMasterKey(GenerateNewHDMasterKey()); } NewKeyPool(); Lock(); // Need to completely rewrite the wallet file; if we don't, bdb might // keep bits of the unencrypted private key in slack space in the // database file. database->Rewrite(); } NotifyStatusChanged(this); return true; } DBErrors CWallet::ReorderTransactions() { LOCK(cs_wallet); WalletBatch batch(*database); // Old wallets didn't have any defined order for transactions. Probably a // bad idea to change the output of this. // First: get all CWalletTx and CAccountingEntry into a sorted-by-time // multimap. TxItems txByTime; for (auto &entry : mapWallet) { CWalletTx *wtx = &entry.second; txByTime.insert( std::make_pair(wtx->nTimeReceived, TxPair(wtx, nullptr))); } std::list<CAccountingEntry> acentries; batch.ListAccountCreditDebit("", acentries); for (CAccountingEntry &entry : acentries) { txByTime.insert(std::make_pair(entry.nTime, TxPair(nullptr, &entry))); } nOrderPosNext = 0; std::vector<int64_t> nOrderPosOffsets; for (TxItems::iterator it = txByTime.begin(); it != txByTime.end(); ++it) { CWalletTx *const pwtx = (*it).second.first; CAccountingEntry *const pacentry = (*it).second.second; int64_t &nOrderPos = (pwtx != nullptr) ? pwtx->nOrderPos : pacentry->nOrderPos; if (nOrderPos == -1) { nOrderPos = nOrderPosNext++; nOrderPosOffsets.push_back(nOrderPos); if (pwtx) { if (!batch.WriteTx(*pwtx)) { return DBErrors::LOAD_FAIL; } } else if (!batch.WriteAccountingEntry(pacentry->nEntryNo, *pacentry)) { return DBErrors::LOAD_FAIL; } } else { int64_t nOrderPosOff = 0; for (const int64_t &nOffsetStart : nOrderPosOffsets) { if (nOrderPos >= nOffsetStart) { ++nOrderPosOff; } } nOrderPos += nOrderPosOff; nOrderPosNext = std::max(nOrderPosNext, nOrderPos + 1); if (!nOrderPosOff) { continue; } // Since we're changing the order, write it back. if (pwtx) { if (!batch.WriteTx(*pwtx)) { return DBErrors::LOAD_FAIL; } } else if (!batch.WriteAccountingEntry(pacentry->nEntryNo, *pacentry)) { return DBErrors::LOAD_FAIL; } } } batch.WriteOrderPosNext(nOrderPosNext); return DBErrors::LOAD_OK; } int64_t CWallet::IncOrderPosNext(WalletBatch *batch) { // nOrderPosNext AssertLockHeld(cs_wallet); int64_t nRet = nOrderPosNext++; if (batch) { batch->WriteOrderPosNext(nOrderPosNext); } else { WalletBatch(*database).WriteOrderPosNext(nOrderPosNext); } return nRet; } bool CWallet::AccountMove(std::string strFrom, std::string strTo, const Amount nAmount, std::string strComment) { WalletBatch batch(*database); if (!batch.TxnBegin()) { return false; } int64_t nNow = GetAdjustedTime(); // Debit CAccountingEntry debit; debit.nOrderPos = IncOrderPosNext(&batch); debit.strAccount = strFrom; debit.nCreditDebit = -nAmount; debit.nTime = nNow; debit.strOtherAccount = strTo; debit.strComment = strComment; AddAccountingEntry(debit, &batch); // Credit CAccountingEntry credit; credit.nOrderPos = IncOrderPosNext(&batch); credit.strAccount = strTo; credit.nCreditDebit = nAmount; credit.nTime = nNow; credit.strOtherAccount = strFrom; credit.strComment = strComment; AddAccountingEntry(credit, &batch); return batch.TxnCommit(); } bool CWallet::GetLabelDestination(CTxDestination &dest, const std::string &label, bool bForceNew) { WalletBatch batch(*database); CAccount account; batch.ReadAccount(label, account); if (!bForceNew) { if (!account.vchPubKey.IsValid()) { bForceNew = true; } else { // Check if the current key has been used (TODO: check other // addresses with the same key) CScript scriptPubKey = GetScriptForDestination(GetDestinationForKey( account.vchPubKey, m_default_address_type)); for (std::map<TxId, CWalletTx>::iterator it = mapWallet.begin(); it != mapWallet.end() && account.vchPubKey.IsValid(); ++it) { for (const CTxOut &txout : (*it).second.tx->vout) { if (txout.scriptPubKey == scriptPubKey) { bForceNew = true; break; } } } } } // Generate a new key if (bForceNew) { if (!GetKeyFromPool(account.vchPubKey, false)) { return false; } LearnRelatedScripts(account.vchPubKey, m_default_address_type); dest = GetDestinationForKey(account.vchPubKey, m_default_address_type); SetAddressBook(dest, label, "receive"); batch.WriteAccount(label, account); } else { dest = GetDestinationForKey(account.vchPubKey, m_default_address_type); } return true; } void CWallet::MarkDirty() { LOCK(cs_wallet); for (std::pair<const TxId, CWalletTx> &item : mapWallet) { item.second.MarkDirty(); } } bool CWallet::AddToWallet(const CWalletTx &wtxIn, bool fFlushOnClose) { LOCK(cs_wallet); WalletBatch batch(*database, "r+", fFlushOnClose); const TxId &txid = wtxIn.GetId(); // Inserts only if not already there, returns tx inserted or tx found. std::pair<std::map<TxId, CWalletTx>::iterator, bool> ret = mapWallet.insert(std::make_pair(txid, wtxIn)); CWalletTx &wtx = (*ret.first).second; wtx.BindWallet(this); bool fInsertedNew = ret.second; if (fInsertedNew) { wtx.nTimeReceived = GetAdjustedTime(); wtx.nOrderPos = IncOrderPosNext(&batch); wtxOrdered.insert(std::make_pair(wtx.nOrderPos, TxPair(&wtx, nullptr))); wtx.nTimeSmart = ComputeTimeSmart(wtx); AddToSpends(txid); } bool fUpdated = false; if (!fInsertedNew) { // Merge if (!wtxIn.hashUnset() && wtxIn.hashBlock != wtx.hashBlock) { wtx.hashBlock = wtxIn.hashBlock; fUpdated = true; } // If no longer abandoned, update if (wtxIn.hashBlock.IsNull() && wtx.isAbandoned()) { wtx.hashBlock = wtxIn.hashBlock; fUpdated = true; } if (wtxIn.nIndex != -1 && (wtxIn.nIndex != wtx.nIndex)) { wtx.nIndex = wtxIn.nIndex; fUpdated = true; } if (wtxIn.fFromMe && wtxIn.fFromMe != wtx.fFromMe) { wtx.fFromMe = wtxIn.fFromMe; fUpdated = true; } } //// debug print LogPrintf("AddToWallet %s %s%s\n", wtxIn.GetId().ToString(), (fInsertedNew ? "new" : ""), (fUpdated ? "update" : "")); // Write to disk if ((fInsertedNew || fUpdated) && !batch.WriteTx(wtx)) { return false; } // Break debit/credit balance caches: wtx.MarkDirty(); // Notify UI of new or updated transaction. NotifyTransactionChanged(this, txid, fInsertedNew ? CT_NEW : CT_UPDATED); // Notify an external script when a wallet transaction comes in or is // updated. std::string strCmd = gArgs.GetArg("-walletnotify", ""); if (!strCmd.empty()) { boost::replace_all(strCmd, "%s", wtxIn.GetId().GetHex()); std::thread t(runCommand, strCmd); // Thread runs free. t.detach(); } return true; } void CWallet::LoadToWallet(const CWalletTx &wtxIn) { const TxId &txid = wtxIn.GetId(); CWalletTx &wtx = mapWallet.emplace(txid, wtxIn).first->second; wtx.BindWallet(this); wtxOrdered.insert(std::make_pair(wtx.nOrderPos, TxPair(&wtx, nullptr))); AddToSpends(txid); for (const CTxIn &txin : wtx.tx->vin) { auto it = mapWallet.find(txin.prevout.GetTxId()); if (it != mapWallet.end()) { CWalletTx &prevtx = it->second; if (prevtx.nIndex == -1 && !prevtx.hashUnset()) { MarkConflicted(prevtx.hashBlock, wtx.GetId()); } } } } /** * Add a transaction to the wallet, or update it. pIndex and posInBlock should * be set when the transaction was known to be included in a block. When pIndex * == nullptr, then wallet state is not updated in AddToWallet, but * notifications happen and cached balances are marked dirty. * * If fUpdate is true, existing transactions will be updated. * TODO: One exception to this is that the abandoned state is cleared under the * assumption that any further notification of a transaction that was considered * abandoned is an indication that it is not safe to be considered abandoned. * Abandoned state should probably be more carefully tracked via different * posInBlock signals or by checking mempool presence when necessary. */ bool CWallet::AddToWalletIfInvolvingMe(const CTransactionRef &ptx, const CBlockIndex *pIndex, int posInBlock, bool fUpdate) { const CTransaction &tx = *ptx; AssertLockHeld(cs_wallet); if (pIndex != nullptr) { for (const CTxIn &txin : tx.vin) { std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range = mapTxSpends.equal_range(txin.prevout); while (range.first != range.second) { if (range.first->second != tx.GetId()) { LogPrintf("Transaction %s (in block %s) conflicts with " "wallet transaction %s (both spend %s:%i)\n", tx.GetId().ToString(), pIndex->GetBlockHash().ToString(), range.first->second.ToString(), range.first->first.GetTxId().ToString(), range.first->first.GetN()); MarkConflicted(pIndex->GetBlockHash(), range.first->second); } range.first++; } } } bool fExisted = mapWallet.count(tx.GetId()) != 0; if (fExisted && !fUpdate) { return false; } if (fExisted || IsMine(tx) || IsFromMe(tx)) { /** * Check if any keys in the wallet keypool that were supposed to be * unused have appeared in a new transaction. If so, remove those keys * from the keypool. This can happen when restoring an old wallet backup * that does not contain the mostly recently created transactions from * newer versions of the wallet. */ // loop though all outputs for (const CTxOut &txout : tx.vout) { // extract addresses and check if they match with an unused keypool // key std::vector<CKeyID> vAffected; CAffectedKeysVisitor(*this, vAffected).Process(txout.scriptPubKey); for (const CKeyID &keyid : vAffected) { std::map<CKeyID, int64_t>::const_iterator mi = m_pool_key_to_index.find(keyid); if (mi != m_pool_key_to_index.end()) { LogPrintf("%s: Detected a used keypool key, mark all " "keypool key up to this key as used\n", __func__); MarkReserveKeysAsUsed(mi->second); if (!TopUpKeyPool()) { LogPrintf( "%s: Topping up keypool failed (locked wallet)\n", __func__); } } } } CWalletTx wtx(this, ptx); // Get merkle branch if transaction was found in a block if (pIndex != nullptr) { wtx.SetMerkleBranch(pIndex, posInBlock); } return AddToWallet(wtx, false); } return false; } bool CWallet::TransactionCanBeAbandoned(const TxId &txid) const { LOCK2(cs_main, cs_wallet); const CWalletTx *wtx = GetWalletTx(txid); return wtx && !wtx->isAbandoned() && wtx->GetDepthInMainChain() == 0 && !wtx->InMempool(); } bool CWallet::AbandonTransaction(const TxId &txid) { LOCK2(cs_main, cs_wallet); WalletBatch batch(*database, "r+"); std::set<TxId> todo; std::set<TxId> done; // Can't mark abandoned if confirmed or in mempool auto it = mapWallet.find(txid); assert(it != mapWallet.end()); CWalletTx &origtx = it->second; if (origtx.GetDepthInMainChain() != 0 || origtx.InMempool()) { return false; } todo.insert(txid); while (!todo.empty()) { const TxId now = *todo.begin(); todo.erase(now); done.insert(now); it = mapWallet.find(now); assert(it != mapWallet.end()); CWalletTx &wtx = it->second; int currentconfirm = wtx.GetDepthInMainChain(); // If the orig tx was not in block, none of its spends can be. assert(currentconfirm <= 0); // If (currentconfirm < 0) {Tx and spends are already conflicted, no // need to abandon} if (currentconfirm == 0 && !wtx.isAbandoned()) { // If the orig tx was not in block/mempool, none of its spends can // be in mempool. assert(!wtx.InMempool()); wtx.nIndex = -1; wtx.setAbandoned(); wtx.MarkDirty(); batch.WriteTx(wtx); NotifyTransactionChanged(this, wtx.GetId(), CT_UPDATED); // Iterate over all its outputs, and mark transactions in the wallet // that spend them abandoned too. TxSpends::const_iterator iter = mapTxSpends.lower_bound(COutPoint(now, 0)); while (iter != mapTxSpends.end() && iter->first.GetTxId() == now) { if (!done.count(iter->second)) { todo.insert(iter->second); } iter++; } // If a transaction changes 'conflicted' state, that changes the // balance available of the outputs it spends. So force those to be // recomputed. for (const CTxIn &txin : wtx.tx->vin) { auto it2 = mapWallet.find(txin.prevout.GetTxId()); if (it2 != mapWallet.end()) { it2->second.MarkDirty(); } } } } return true; } void CWallet::MarkConflicted(const uint256 &hashBlock, const TxId &txid) { LOCK2(cs_main, cs_wallet); int conflictconfirms = 0; CBlockIndex *pindex = LookupBlockIndex(hashBlock); if (pindex && chainActive.Contains(pindex)) { conflictconfirms = -(chainActive.Height() - pindex->nHeight + 1); } // If number of conflict confirms cannot be determined, this means that the // block is still unknown or not yet part of the main chain, for example // when loading the wallet during a reindex. Do nothing in that case. if (conflictconfirms >= 0) { return; } // Do not flush the wallet here for performance reasons. WalletBatch batch(*database, "r+", false); std::set<TxId> todo; std::set<TxId> done; todo.insert(txid); while (!todo.empty()) { const TxId now = *todo.begin(); todo.erase(now); done.insert(now); auto it = mapWallet.find(now); assert(it != mapWallet.end()); CWalletTx &wtx = it->second; int currentconfirm = wtx.GetDepthInMainChain(); if (conflictconfirms < currentconfirm) { // Block is 'more conflicted' than current confirm; update. // Mark transaction as conflicted with this block. wtx.nIndex = -1; wtx.hashBlock = hashBlock; wtx.MarkDirty(); batch.WriteTx(wtx); // Iterate over all its outputs, and mark transactions in the wallet // that spend them conflicted too. TxSpends::const_iterator iter = mapTxSpends.lower_bound(COutPoint(now, 0)); while (iter != mapTxSpends.end() && iter->first.GetTxId() == now) { if (!done.count(iter->second)) { todo.insert(iter->second); } iter++; } // If a transaction changes 'conflicted' state, that changes the // balance available of the outputs it spends. So force those to be // recomputed. for (const CTxIn &txin : wtx.tx->vin) { auto it2 = mapWallet.find(txin.prevout.GetTxId()); if (it2 != mapWallet.end()) { it2->second.MarkDirty(); } } } } } void CWallet::SyncTransaction(const CTransactionRef &ptx, const CBlockIndex *pindex, int posInBlock) { const CTransaction &tx = *ptx; if (!AddToWalletIfInvolvingMe(ptx, pindex, posInBlock, true)) { // Not one of ours return; } // If a transaction changes 'conflicted' state, that changes the balance // available of the outputs it spends. So force those to be // recomputed, also: for (const CTxIn &txin : tx.vin) { auto it = mapWallet.find(txin.prevout.GetTxId()); if (it != mapWallet.end()) { it->second.MarkDirty(); } } } void CWallet::TransactionAddedToMempool(const CTransactionRef &ptx) { LOCK2(cs_main, cs_wallet); SyncTransaction(ptx); auto it = mapWallet.find(ptx->GetId()); if (it != mapWallet.end()) { it->second.fInMempool = true; } } void CWallet::TransactionRemovedFromMempool(const CTransactionRef &ptx) { LOCK(cs_wallet); auto it = mapWallet.find(ptx->GetId()); if (it != mapWallet.end()) { it->second.fInMempool = false; } } void CWallet::BlockConnected( const std::shared_ptr<const CBlock> &pblock, const CBlockIndex *pindex, const std::vector<CTransactionRef> &vtxConflicted) { LOCK2(cs_main, cs_wallet); // TODO: Temporarily ensure that mempool removals are notified before // connected transactions. This shouldn't matter, but the abandoned state of // transactions in our wallet is currently cleared when we receive another // notification and there is a race condition where notification of a // connected conflict might cause an outside process to abandon a // transaction and then have it inadvertently cleared by the notification // that the conflicted transaction was evicted. for (const CTransactionRef &ptx : vtxConflicted) { SyncTransaction(ptx); TransactionRemovedFromMempool(ptx); } for (size_t i = 0; i < pblock->vtx.size(); i++) { SyncTransaction(pblock->vtx[i], pindex, i); TransactionRemovedFromMempool(pblock->vtx[i]); } m_last_block_processed = pindex; } void CWallet::BlockDisconnected(const std::shared_ptr<const CBlock> &pblock) { LOCK2(cs_main, cs_wallet); for (const CTransactionRef &ptx : pblock->vtx) { SyncTransaction(ptx); } } void CWallet::BlockUntilSyncedToCurrentChain() { AssertLockNotHeld(cs_main); AssertLockNotHeld(cs_wallet); { // Skip the queue-draining stuff if we know we're caught up with // chainActive.Tip()... // We could also take cs_wallet here, and call m_last_block_processed // protected by cs_wallet instead of cs_main, but as long as we need // cs_main here anyway, it's easier to just call it cs_main-protected. LOCK(cs_main); const CBlockIndex *initialChainTip = chainActive.Tip(); if (m_last_block_processed->GetAncestor(initialChainTip->nHeight) == initialChainTip) { return; } } // ...otherwise put a callback in the validation interface queue and wait // for the queue to drain enough to execute it (indicating we are caught up // at least with the time we entered this function). SyncWithValidationInterfaceQueue(); } isminetype CWallet::IsMine(const CTxIn &txin) const { LOCK(cs_wallet); std::map<TxId, CWalletTx>::const_iterator mi = mapWallet.find(txin.prevout.GetTxId()); if (mi != mapWallet.end()) { const CWalletTx &prev = (*mi).second; if (txin.prevout.GetN() < prev.tx->vout.size()) { return IsMine(prev.tx->vout[txin.prevout.GetN()]); } } return ISMINE_NO; } // Note that this function doesn't distinguish between a 0-valued input, and a // not-"is mine" (according to the filter) input. Amount CWallet::GetDebit(const CTxIn &txin, const isminefilter &filter) const { LOCK(cs_wallet); std::map<TxId, CWalletTx>::const_iterator mi = mapWallet.find(txin.prevout.GetTxId()); if (mi != mapWallet.end()) { const CWalletTx &prev = (*mi).second; if (txin.prevout.GetN() < prev.tx->vout.size()) { if (IsMine(prev.tx->vout[txin.prevout.GetN()]) & filter) { return prev.tx->vout[txin.prevout.GetN()].nValue; } } } return Amount::zero(); } isminetype CWallet::IsMine(const CTxOut &txout) const { return ::IsMine(*this, txout.scriptPubKey); } Amount CWallet::GetCredit(const CTxOut &txout, const isminefilter &filter) const { if (!MoneyRange(txout.nValue)) { throw std::runtime_error(std::string(__func__) + ": value out of range"); } return (IsMine(txout) & filter) ? txout.nValue : Amount::zero(); } bool CWallet::IsChange(const CTxOut &txout) const { // TODO: fix handling of 'change' outputs. The assumption is that any // payment to a script that is ours, but is not in the address book is // change. That assumption is likely to break when we implement // multisignature wallets that return change back into a // multi-signature-protected address; a better way of identifying which // outputs are 'the send' and which are 'the change' will need to be // implemented (maybe extend CWalletTx to remember which output, if any, was // change). if (::IsMine(*this, txout.scriptPubKey)) { CTxDestination address; if (!ExtractDestination(txout.scriptPubKey, address)) { return true; } LOCK(cs_wallet); if (!mapAddressBook.count(address)) { return true; } } return false; } Amount CWallet::GetChange(const CTxOut &txout) const { if (!MoneyRange(txout.nValue)) { throw std::runtime_error(std::string(__func__) + ": value out of range"); } return (IsChange(txout) ? txout.nValue : Amount::zero()); } bool CWallet::IsMine(const CTransaction &tx) const { for (const CTxOut &txout : tx.vout) { if (IsMine(txout)) { return true; } } return false; } bool CWallet::IsFromMe(const CTransaction &tx) const { return GetDebit(tx, ISMINE_ALL) > Amount::zero(); } Amount CWallet::GetDebit(const CTransaction &tx, const isminefilter &filter) const { Amount nDebit = Amount::zero(); for (const CTxIn &txin : tx.vin) { nDebit += GetDebit(txin, filter); if (!MoneyRange(nDebit)) { throw std::runtime_error(std::string(__func__) + ": value out of range"); } } return nDebit; } bool CWallet::IsAllFromMe(const CTransaction &tx, const isminefilter &filter) const { LOCK(cs_wallet); for (const CTxIn &txin : tx.vin) { auto mi = mapWallet.find(txin.prevout.GetTxId()); if (mi == mapWallet.end()) { // Any unknown inputs can't be from us. return false; } const CWalletTx &prev = (*mi).second; if (txin.prevout.GetN() >= prev.tx->vout.size()) { // Invalid input! return false; } if (!(IsMine(prev.tx->vout[txin.prevout.GetN()]) & filter)) { return false; } } return true; } Amount CWallet::GetCredit(const CTransaction &tx, const isminefilter &filter) const { Amount nCredit = Amount::zero(); for (const CTxOut &txout : tx.vout) { nCredit += GetCredit(txout, filter); if (!MoneyRange(nCredit)) { throw std::runtime_error(std::string(__func__) + ": value out of range"); } } return nCredit; } Amount CWallet::GetChange(const CTransaction &tx) const { Amount nChange = Amount::zero(); for (const CTxOut &txout : tx.vout) { nChange += GetChange(txout); if (!MoneyRange(nChange)) { throw std::runtime_error(std::string(__func__) + ": value out of range"); } } return nChange; } CPubKey CWallet::GenerateNewHDMasterKey() { CKey key; key.MakeNewKey(true); int64_t nCreationTime = GetTime(); CKeyMetadata metadata(nCreationTime); // Calculate the pubkey. CPubKey pubkey = key.GetPubKey(); assert(key.VerifyPubKey(pubkey)); // Set the hd keypath to "m" -> Master, refers the masterkeyid to itself. metadata.hdKeypath = "m"; metadata.hdMasterKeyID = pubkey.GetID(); LOCK(cs_wallet); // mem store the metadata mapKeyMetadata[pubkey.GetID()] = metadata; // Write the key&metadata to the database. if (!AddKeyPubKey(key, pubkey)) { throw std::runtime_error(std::string(__func__) + ": AddKeyPubKey failed"); } return pubkey; } void CWallet::SetHDMasterKey(const CPubKey &pubkey) { LOCK(cs_wallet); // Store the keyid (hash160) together with the child index counter in the // database as a hdchain object. CHDChain newHdChain; newHdChain.nVersion = CanSupportFeature(FEATURE_HD_SPLIT) ? CHDChain::VERSION_HD_CHAIN_SPLIT : CHDChain::VERSION_HD_BASE; newHdChain.masterKeyID = pubkey.GetID(); SetHDChain(newHdChain, false); } void CWallet::SetHDChain(const CHDChain &chain, bool memonly) { LOCK(cs_wallet); if (!memonly && !WalletBatch(*database).WriteHDChain(chain)) { throw std::runtime_error(std::string(__func__) + ": writing chain failed"); } hdChain = chain; } bool CWallet::IsHDEnabled() const { return !hdChain.masterKeyID.IsNull(); } int64_t CWalletTx::GetTxTime() const { int64_t n = nTimeSmart; return n ? n : nTimeReceived; } // Helper for producing a max-sized low-S signature (eg 72 bytes) bool CWallet::DummySignInput(CTxIn &tx_in, const CTxOut &txout) const { // Fill in dummy signatures for fee calculation. const CScript &scriptPubKey = txout.scriptPubKey; SignatureData sigdata; if (!ProduceSignature(*this, DUMMY_SIGNATURE_CREATOR, scriptPubKey, sigdata)) { return false; } UpdateInput(tx_in, sigdata); return true; } // Helper for producing a bunch of max-sized low-S signatures (eg 72 bytes) bool CWallet::DummySignTx(CMutableTransaction &txNew, const std::vector<CTxOut> &txouts) const { // Fill in dummy signatures for fee calculation. int nIn = 0; for (const auto &txout : txouts) { if (!DummySignInput(txNew.vin[nIn], txout)) { return false; } nIn++; } return true; } int64_t CalculateMaximumSignedTxSize(const CTransaction &tx, const CWallet *wallet) { std::vector<CTxOut> txouts; // Look up the inputs. We should have already checked that this transaction // IsAllFromMe(ISMINE_SPENDABLE), so every input should already be in our // wallet, with a valid index into the vout array, and the ability to sign. for (auto &input : tx.vin) { const auto mi = wallet->mapWallet.find(input.prevout.GetTxId()); if (mi == wallet->mapWallet.end()) { return -1; } assert(input.prevout.GetN() < mi->second.tx->vout.size()); txouts.emplace_back(mi->second.tx->vout[input.prevout.GetN()]); } return CalculateMaximumSignedTxSize(tx, wallet, txouts); } // txouts needs to be in the order of tx.vin int64_t CalculateMaximumSignedTxSize(const CTransaction &tx, const CWallet *wallet, const std::vector<CTxOut> &txouts) { CMutableTransaction txNew(tx); if (!wallet->DummySignTx(txNew, txouts)) { // This should never happen, because IsAllFromMe(ISMINE_SPENDABLE) // implies that we can sign for every input. return -1; } return GetVirtualTransactionSize(CTransaction(txNew)); } int CalculateMaximumSignedInputSize(const CTxOut &txout, const CWallet *wallet) { CMutableTransaction txn; txn.vin.push_back(CTxIn(COutPoint())); if (!wallet->DummySignInput(txn.vin[0], txout)) { // This should never happen, because IsAllFromMe(ISMINE_SPENDABLE) // implies that we can sign for every input. return -1; } return GetVirtualTransactionInputSize(txn.vin[0]); } void CWalletTx::GetAmounts(std::list<COutputEntry> &listReceived, std::list<COutputEntry> &listSent, Amount &nFee, std::string &strSentAccount, const isminefilter &filter) const { nFee = Amount::zero(); listReceived.clear(); listSent.clear(); strSentAccount = strFromAccount; // Compute fee: Amount nDebit = GetDebit(filter); // debit>0 means we signed/sent this transaction. if (nDebit > Amount::zero()) { Amount nValueOut = tx->GetValueOut(); nFee = (nDebit - nValueOut); } // Sent/received. for (unsigned int i = 0; i < tx->vout.size(); ++i) { const CTxOut &txout = tx->vout[i]; isminetype fIsMine = pwallet->IsMine(txout); // Only need to handle txouts if AT LEAST one of these is true: // 1) they debit from us (sent) // 2) the output is to us (received) if (nDebit > Amount::zero()) { // Don't report 'change' txouts if (pwallet->IsChange(txout)) { continue; } } else if (!(fIsMine & filter)) { continue; } // In either case, we need to get the destination address. CTxDestination address; if (!ExtractDestination(txout.scriptPubKey, address) && !txout.scriptPubKey.IsUnspendable()) { LogPrintf("CWalletTx::GetAmounts: Unknown transaction type found, " "txid %s\n", this->GetId().ToString()); address = CNoDestination(); } COutputEntry output = {address, txout.nValue, (int)i}; // If we are debited by the transaction, add the output as a "sent" // entry. if (nDebit > Amount::zero()) { listSent.push_back(output); } // If we are receiving the output, add it as a "received" entry. if (fIsMine & filter) { listReceived.push_back(output); } } } /** * Scan active chain for relevant transactions after importing keys. This should * be called whenever new keys are added to the wallet, with the oldest key * creation time. * * @return Earliest timestamp that could be successfully scanned from. Timestamp * returned will be higher than startTime if relevant blocks could not be read. */ int64_t CWallet::RescanFromTime(int64_t startTime, const WalletRescanReserver &reserver, bool update) { // Find starting block. May be null if nCreateTime is greater than the // highest blockchain timestamp, in which case there is nothing that needs // to be scanned. CBlockIndex *startBlock = nullptr; { LOCK(cs_main); startBlock = chainActive.FindEarliestAtLeast(startTime - TIMESTAMP_WINDOW); LogPrintf("%s: Rescanning last %i blocks\n", __func__, startBlock ? chainActive.Height() - startBlock->nHeight + 1 : 0); } if (startBlock) { const CBlockIndex *const failedBlock = ScanForWalletTransactions(startBlock, nullptr, reserver, update); if (failedBlock) { return failedBlock->GetBlockTimeMax() + TIMESTAMP_WINDOW + 1; } } return startTime; } /** * Scan the block chain (starting in pindexStart) for transactions from or to * us. If fUpdate is true, found transactions that already exist in the wallet * will be updated. * * Returns null if scan was successful. Otherwise, if a complete rescan was not * possible (due to pruning or corruption), returns pointer to the most recent * block that could not be scanned. * * If pindexStop is not a nullptr, the scan will stop at the block-index * defined by pindexStop * * Caller needs to make sure pindexStop (and the optional pindexStart) are on * the main chain after to the addition of any new keys you want to detect * transactions for. */ CBlockIndex *CWallet::ScanForWalletTransactions( CBlockIndex *pindexStart, CBlockIndex *pindexStop, const WalletRescanReserver &reserver, bool fUpdate) { int64_t nNow = GetTime(); assert(reserver.isReserved()); if (pindexStop) { assert(pindexStop->nHeight >= pindexStart->nHeight); } CBlockIndex *pindex = pindexStart; CBlockIndex *ret = nullptr; if (pindex) { LogPrintf("Rescan started from block %d...\n", pindex->nHeight); } { fAbortRescan = false; // Show rescan progress in GUI as dialog or on splashscreen, if -rescan // on startup. ShowProgress(_("Rescanning..."), 0); CBlockIndex *tip = nullptr; double progress_begin; double progress_end; { LOCK(cs_main); progress_begin = GuessVerificationProgress(chainParams.TxData(), pindex); if (pindexStop == nullptr) { tip = chainActive.Tip(); progress_end = GuessVerificationProgress(chainParams.TxData(), tip); } else { progress_end = GuessVerificationProgress(chainParams.TxData(), pindexStop); } } double progress_current = progress_begin; while (pindex && !fAbortRescan && !ShutdownRequested()) { if (pindex->nHeight % 100 == 0 && progress_end - progress_begin > 0.0) { ShowProgress( _("Rescanning..."), std::max( 1, std::min(99, (int)((progress_current - progress_begin) / (progress_end - progress_begin) * 100)))); } if (GetTime() >= nNow + 60) { nNow = GetTime(); LogPrintf("Still rescanning. At block %d. Progress=%f\n", pindex->nHeight, progress_current); } CBlock block; if (ReadBlockFromDisk(block, pindex, chainParams.GetConsensus())) { LOCK2(cs_main, cs_wallet); if (pindex && !chainActive.Contains(pindex)) { // Abort scan if current block is no longer active, to // prevent marking transactions as coming from the wrong // block. ret = pindex; break; } for (size_t posInBlock = 0; posInBlock < block.vtx.size(); ++posInBlock) { AddToWalletIfInvolvingMe(block.vtx[posInBlock], pindex, posInBlock, fUpdate); } } else { ret = pindex; } if (pindex == pindexStop) { break; } { LOCK(cs_main); pindex = chainActive.Next(pindex); progress_current = GuessVerificationProgress(chainParams.TxData(), pindex); if (pindexStop == nullptr && tip != chainActive.Tip()) { tip = chainActive.Tip(); // in case the tip has changed, update progress max progress_end = GuessVerificationProgress(chainParams.TxData(), tip); } } } if (pindex && fAbortRescan) { LogPrintf("Rescan aborted at block %d. Progress=%f\n", pindex->nHeight, progress_current); } else if (pindex && ShutdownRequested()) { LogPrintf("Rescan interrupted by shutdown request at block %d. " "Progress=%f\n", pindex->nHeight, progress_current); } // Hide progress dialog in GUI. ShowProgress(_("Rescanning..."), 100); } return ret; } void CWallet::ReacceptWalletTransactions() { // If transactions aren't being broadcasted, don't let them into local // mempool either. if (!fBroadcastTransactions) { return; } LOCK2(cs_main, cs_wallet); std::map<int64_t, CWalletTx *> mapSorted; // Sort pending wallet transactions based on their initial wallet insertion // order. for (std::pair<const TxId, CWalletTx> &item : mapWallet) { const TxId &wtxid = item.first; CWalletTx &wtx = item.second; assert(wtx.GetId() == wtxid); int nDepth = wtx.GetDepthInMainChain(); if (!wtx.IsCoinBase() && (nDepth == 0 && !wtx.isAbandoned())) { mapSorted.insert(std::make_pair(wtx.nOrderPos, &wtx)); } } // Try to add wallet transactions to memory pool. for (std::pair<const int64_t, CWalletTx *> &item : mapSorted) { CWalletTx &wtx = *(item.second); CValidationState state; wtx.AcceptToMemoryPool(maxTxFee, state); } } bool CWalletTx::RelayWalletTransaction(CConnman *connman) { assert(pwallet->GetBroadcastTransactions()); if (IsCoinBase() || isAbandoned() || GetDepthInMainChain() != 0) { return false; } CValidationState state; // GetDepthInMainChain already catches known conflicts. if (InMempool() || AcceptToMemoryPool(maxTxFee, state)) { LogPrintf("Relaying wtx %s\n", GetId().ToString()); if (connman) { CInv inv(MSG_TX, GetId()); connman->ForEachNode( [&inv](CNode *pnode) { pnode->PushInventory(inv); }); return true; } } return false; } std::set<TxId> CWalletTx::GetConflicts() const { std::set<TxId> result; if (pwallet != nullptr) { const TxId &txid = GetId(); result = pwallet->GetConflicts(txid); result.erase(txid); } return result; } Amount CWalletTx::GetDebit(const isminefilter &filter) const { if (tx->vin.empty()) { return Amount::zero(); } Amount debit = Amount::zero(); if (filter & ISMINE_SPENDABLE) { if (fDebitCached) { debit += nDebitCached; } else { nDebitCached = pwallet->GetDebit(*tx, ISMINE_SPENDABLE); fDebitCached = true; debit += nDebitCached; } } if (filter & ISMINE_WATCH_ONLY) { if (fWatchDebitCached) { debit += nWatchDebitCached; } else { nWatchDebitCached = pwallet->GetDebit(*tx, ISMINE_WATCH_ONLY); fWatchDebitCached = true; debit += Amount(nWatchDebitCached); } } return debit; } Amount CWalletTx::GetCredit(const isminefilter &filter) const { // Must wait until coinbase is safely deep enough in the chain before // valuing it. if (IsImmatureCoinBase()) { return Amount::zero(); } Amount credit = Amount::zero(); if (filter & ISMINE_SPENDABLE) { // GetBalance can assume transactions in mapWallet won't change. if (fCreditCached) { credit += nCreditCached; } else { nCreditCached = pwallet->GetCredit(*tx, ISMINE_SPENDABLE); fCreditCached = true; credit += nCreditCached; } } if (filter & ISMINE_WATCH_ONLY) { if (fWatchCreditCached) { credit += nWatchCreditCached; } else { nWatchCreditCached = pwallet->GetCredit(*tx, ISMINE_WATCH_ONLY); fWatchCreditCached = true; credit += nWatchCreditCached; } } return credit; } Amount CWalletTx::GetImmatureCredit(bool fUseCache) const { if (IsImmatureCoinBase() && IsInMainChain()) { if (fUseCache && fImmatureCreditCached) { return nImmatureCreditCached; } nImmatureCreditCached = pwallet->GetCredit(*tx, ISMINE_SPENDABLE); fImmatureCreditCached = true; return nImmatureCreditCached; } return Amount::zero(); } Amount CWalletTx::GetAvailableCredit(bool fUseCache) const { if (pwallet == nullptr) { return Amount::zero(); } // Must wait until coinbase is safely deep enough in the chain before // valuing it. if (IsImmatureCoinBase()) { return Amount::zero(); } if (fUseCache && fAvailableCreditCached) { return nAvailableCreditCached; } Amount nCredit = Amount::zero(); const TxId &txid = GetId(); for (uint32_t i = 0; i < tx->vout.size(); i++) { if (!pwallet->IsSpent(COutPoint(txid, i))) { const CTxOut &txout = tx->vout[i]; nCredit += pwallet->GetCredit(txout, ISMINE_SPENDABLE); if (!MoneyRange(nCredit)) { throw std::runtime_error(std::string(__func__) + " : value out of range"); } } } nAvailableCreditCached = nCredit; fAvailableCreditCached = true; return nCredit; } Amount CWalletTx::GetImmatureWatchOnlyCredit(const bool fUseCache) const { if (IsImmatureCoinBase() && IsInMainChain()) { if (fUseCache && fImmatureWatchCreditCached) { return nImmatureWatchCreditCached; } nImmatureWatchCreditCached = pwallet->GetCredit(*tx, ISMINE_WATCH_ONLY); fImmatureWatchCreditCached = true; return nImmatureWatchCreditCached; } return Amount::zero(); } Amount CWalletTx::GetAvailableWatchOnlyCredit(const bool fUseCache) const { if (pwallet == nullptr) { return Amount::zero(); } // Must wait until coinbase is safely deep enough in the chain before // valuing it. if (IsCoinBase() && GetBlocksToMaturity() > 0) { return Amount::zero(); } if (fUseCache && fAvailableWatchCreditCached) { return nAvailableWatchCreditCached; } Amount nCredit = Amount::zero(); const TxId &txid = GetId(); for (uint32_t i = 0; i < tx->vout.size(); i++) { if (!pwallet->IsSpent(COutPoint(txid, i))) { const CTxOut &txout = tx->vout[i]; nCredit += pwallet->GetCredit(txout, ISMINE_WATCH_ONLY); if (!MoneyRange(nCredit)) { throw std::runtime_error(std::string(__func__) + ": value out of range"); } } } nAvailableWatchCreditCached = nCredit; fAvailableWatchCreditCached = true; return nCredit; } Amount CWalletTx::GetChange() const { if (fChangeCached) { return nChangeCached; } nChangeCached = pwallet->GetChange(*tx); fChangeCached = true; return nChangeCached; } bool CWalletTx::InMempool() const { return fInMempool; } bool CWalletTx::IsTrusted() const { // Quick answer in most cases if (!CheckFinalTx(*tx)) { return false; } int nDepth = GetDepthInMainChain(); if (nDepth >= 1) { return true; } if (nDepth < 0) { return false; } // using wtx's cached debit if (!pwallet->m_spend_zero_conf_change || !IsFromMe(ISMINE_ALL)) { return false; } // Don't trust unconfirmed transactions from us unless they are in the // mempool. if (!InMempool()) { return false; } // Trusted if all inputs are from us and are in the mempool: for (const CTxIn &txin : tx->vin) { // Transactions not sent by us: not trusted const CWalletTx *parent = pwallet->GetWalletTx(txin.prevout.GetTxId()); if (parent == nullptr) { return false; } const CTxOut &parentOut = parent->tx->vout[txin.prevout.GetN()]; if (pwallet->IsMine(parentOut) != ISMINE_SPENDABLE) { return false; } } return true; } bool CWalletTx::IsEquivalentTo(const CWalletTx &_tx) const { CMutableTransaction tx1{*this->tx}; CMutableTransaction tx2{*_tx.tx}; for (auto &txin : tx1.vin) { txin.scriptSig = CScript(); } for (auto &txin : tx2.vin) { txin.scriptSig = CScript(); } return CTransaction(tx1) == CTransaction(tx2); } std::vector<uint256> CWallet::ResendWalletTransactionsBefore(int64_t nTime, CConnman *connman) { std::vector<uint256> result; LOCK(cs_wallet); // Sort them in chronological order std::multimap<unsigned int, CWalletTx *> mapSorted; for (std::pair<const TxId, CWalletTx> &item : mapWallet) { CWalletTx &wtx = item.second; // Don't rebroadcast if newer than nTime: if (wtx.nTimeReceived > nTime) { continue; } mapSorted.insert(std::make_pair(wtx.nTimeReceived, &wtx)); } for (std::pair<const unsigned int, CWalletTx *> &item : mapSorted) { CWalletTx &wtx = *item.second; if (wtx.RelayWalletTransaction(connman)) { result.push_back(wtx.GetId()); } } return result; } void CWallet::ResendWalletTransactions(int64_t nBestBlockTime, CConnman *connman) { // Do this infrequently and randomly to avoid giving away that these are our // transactions. if (GetTime() < nNextResend || !fBroadcastTransactions) { return; } bool fFirst = (nNextResend == 0); nNextResend = GetTime() + GetRand(30 * 60); if (fFirst) { return; } // Only do it if there's been a new block since last time if (nBestBlockTime < nLastResend) { return; } nLastResend = GetTime(); // Rebroadcast unconfirmed txes older than 5 minutes before the last block // was found: std::vector<uint256> relayed = ResendWalletTransactionsBefore(nBestBlockTime - 5 * 60, connman); if (!relayed.empty()) { LogPrintf("%s: rebroadcast %u unconfirmed transactions\n", __func__, relayed.size()); } } /** @} */ // end of mapWallet /** * @defgroup Actions * * @{ */ Amount CWallet::GetBalance() const { LOCK2(cs_main, cs_wallet); Amount nTotal = Amount::zero(); for (const auto &entry : mapWallet) { const CWalletTx *pcoin = &entry.second; if (pcoin->IsTrusted()) { nTotal += pcoin->GetAvailableCredit(); } } return nTotal; } Amount CWallet::GetUnconfirmedBalance() const { LOCK2(cs_main, cs_wallet); Amount nTotal = Amount::zero(); for (const auto &entry : mapWallet) { const CWalletTx *pcoin = &entry.second; if (!pcoin->IsTrusted() && pcoin->GetDepthInMainChain() == 0 && pcoin->InMempool()) { nTotal += pcoin->GetAvailableCredit(); } } return nTotal; } Amount CWallet::GetImmatureBalance() const { LOCK2(cs_main, cs_wallet); Amount nTotal = Amount::zero(); for (const auto &entry : mapWallet) { const CWalletTx *pcoin = &entry.second; nTotal += pcoin->GetImmatureCredit(); } return nTotal; } Amount CWallet::GetWatchOnlyBalance() const { LOCK2(cs_main, cs_wallet); Amount nTotal = Amount::zero(); for (const auto &entry : mapWallet) { const CWalletTx *pcoin = &entry.second; if (pcoin->IsTrusted()) { nTotal += pcoin->GetAvailableWatchOnlyCredit(); } } return nTotal; } Amount CWallet::GetUnconfirmedWatchOnlyBalance() const { LOCK2(cs_main, cs_wallet); Amount nTotal = Amount::zero(); for (const auto &entry : mapWallet) { const CWalletTx *pcoin = &entry.second; if (!pcoin->IsTrusted() && pcoin->GetDepthInMainChain() == 0 && pcoin->InMempool()) { nTotal += pcoin->GetAvailableWatchOnlyCredit(); } } return nTotal; } Amount CWallet::GetImmatureWatchOnlyBalance() const { LOCK2(cs_main, cs_wallet); Amount nTotal = Amount::zero(); for (const auto &entry : mapWallet) { const CWalletTx *pcoin = &entry.second; nTotal += pcoin->GetImmatureWatchOnlyCredit(); } return nTotal; } // Calculate total balance in a different way from GetBalance. The biggest // difference is that GetBalance sums up all unspent TxOuts paying to the // wallet, while this sums up both spent and unspent TxOuts paying to the // wallet, and then subtracts the values of TxIns spending from the wallet. This // also has fewer restrictions on which unconfirmed transactions are considered // trusted. Amount CWallet::GetLegacyBalance(const isminefilter &filter, int minDepth, const std::string *account) const { LOCK2(cs_main, cs_wallet); Amount balance = Amount::zero(); for (const auto &entry : mapWallet) { const CWalletTx &wtx = entry.second; const int depth = wtx.GetDepthInMainChain(); if (depth < 0 || !CheckFinalTx(*wtx.tx) || wtx.IsImmatureCoinBase()) { continue; } // Loop through tx outputs and add incoming payments. For outgoing txs, // treat change outputs specially, as part of the amount debited. Amount debit = wtx.GetDebit(filter); const bool outgoing = debit > Amount::zero(); for (const CTxOut &out : wtx.tx->vout) { if (outgoing && IsChange(out)) { debit -= out.nValue; } else if (IsMine(out) & filter && depth >= minDepth && (!account || *account == GetLabelName(out.scriptPubKey))) { balance += out.nValue; } } // For outgoing txs, subtract amount debited. if (outgoing && (!account || *account == wtx.strFromAccount)) { balance -= debit; } } if (account) { balance += WalletBatch(*database).GetAccountCreditDebit(*account); } return balance; } Amount CWallet::GetAvailableBalance(const CCoinControl *coinControl) const { LOCK2(cs_main, cs_wallet); Amount balance = Amount::zero(); std::vector<COutput> vCoins; AvailableCoins(vCoins, true, coinControl); for (const COutput &out : vCoins) { if (out.fSpendable) { balance += out.tx->tx->vout[out.i].nValue; } } return balance; } void CWallet::AvailableCoins(std::vector<COutput> &vCoins, bool fOnlySafe, const CCoinControl *coinControl, const Amount nMinimumAmount, const Amount nMaximumAmount, const Amount nMinimumSumAmount, const uint64_t nMaximumCount, const int nMinDepth, const int nMaxDepth) const { AssertLockHeld(cs_main); AssertLockHeld(cs_wallet); vCoins.clear(); Amount nTotal = Amount::zero(); for (const auto &entry : mapWallet) { const TxId &wtxid = entry.first; const CWalletTx *pcoin = &entry.second; if (!CheckFinalTx(*pcoin->tx)) { continue; } if (pcoin->IsImmatureCoinBase()) { continue; } int nDepth = pcoin->GetDepthInMainChain(); if (nDepth < 0) { continue; } // We should not consider coins which aren't at least in our mempool. // It's possible for these to be conflicted via ancestors which we may // never be able to detect. if (nDepth == 0 && !pcoin->InMempool()) { continue; } bool safeTx = pcoin->IsTrusted(); // Bitcoin-ABC: Removed check that prevents consideration of coins from // transactions that are replacing other transactions. This check based // on pcoin->mapValue.count("replaces_txid") which was not being set // anywhere. // Similarly, we should not consider coins from transactions that have // been replaced. In the example above, we would want to prevent // creation of a transaction A' spending an output of A, because if // transaction B were initially confirmed, conflicting with A and A', we // wouldn't want to the user to create a transaction D intending to // replace A', but potentially resulting in a scenario where A, A', and // D could all be accepted (instead of just B and D, or just A and A' // like the user would want). // Bitcoin-ABC: retained this check as 'replaced_by_txid' is still set // in the wallet code. if (nDepth == 0 && pcoin->mapValue.count("replaced_by_txid")) { safeTx = false; } if (fOnlySafe && !safeTx) { continue; } if (nDepth < nMinDepth || nDepth > nMaxDepth) { continue; } for (uint32_t i = 0; i < pcoin->tx->vout.size(); i++) { if (pcoin->tx->vout[i].nValue < nMinimumAmount || pcoin->tx->vout[i].nValue > nMaximumAmount) { continue; } const COutPoint outpoint(wtxid, i); if (coinControl && coinControl->HasSelected() && !coinControl->fAllowOtherInputs && !coinControl->IsSelected(outpoint)) { continue; } if (IsLockedCoin(outpoint)) { continue; } if (IsSpent(outpoint)) { continue; } isminetype mine = IsMine(pcoin->tx->vout[i]); if (mine == ISMINE_NO) { continue; } bool fSpendableIn = ((mine & ISMINE_SPENDABLE) != ISMINE_NO) || (coinControl && coinControl->fAllowWatchOnly && (mine & ISMINE_WATCH_SOLVABLE) != ISMINE_NO); bool fSolvableIn = (mine & (ISMINE_SPENDABLE | ISMINE_WATCH_SOLVABLE)) != ISMINE_NO; vCoins.push_back( COutput(pcoin, i, nDepth, fSpendableIn, fSolvableIn, safeTx)); // Checks the sum amount of all UTXO's. if (nMinimumSumAmount != MAX_MONEY) { nTotal += pcoin->tx->vout[i].nValue; if (nTotal >= nMinimumSumAmount) { return; } } // Checks the maximum number of UTXO's. if (nMaximumCount > 0 && vCoins.size() >= nMaximumCount) { return; } } } } std::map<CTxDestination, std::vector<COutput>> CWallet::ListCoins() const { AssertLockHeld(cs_main); AssertLockHeld(cs_wallet); std::map<CTxDestination, std::vector<COutput>> result; std::vector<COutput> availableCoins; AvailableCoins(availableCoins); for (auto &coin : availableCoins) { CTxDestination address; if (coin.fSpendable && ExtractDestination( FindNonChangeParentOutput(*coin.tx->tx, coin.i).scriptPubKey, address)) { result[address].emplace_back(std::move(coin)); } } std::vector<COutPoint> lockedCoins; ListLockedCoins(lockedCoins); for (const auto &output : lockedCoins) { auto it = mapWallet.find(output.GetTxId()); if (it != mapWallet.end()) { int depth = it->second.GetDepthInMainChain(); if (depth >= 0 && output.GetN() < it->second.tx->vout.size() && IsMine(it->second.tx->vout[output.GetN()]) == ISMINE_SPENDABLE) { CTxDestination address; if (ExtractDestination( FindNonChangeParentOutput(*it->second.tx, output.GetN()) .scriptPubKey, address)) { result[address].emplace_back( &it->second, output.GetN(), depth, true /* spendable */, true /* solvable */, false /* safe */); } } } } return result; } const CTxOut &CWallet::FindNonChangeParentOutput(const CTransaction &tx, int output) const { const CTransaction *ptx = &tx; int n = output; while (IsChange(ptx->vout[n]) && ptx->vin.size() > 0) { const COutPoint &prevout = ptx->vin[0].prevout; auto it = mapWallet.find(prevout.GetTxId()); if (it == mapWallet.end() || it->second.tx->vout.size() <= prevout.GetN() || !IsMine(it->second.tx->vout[prevout.GetN()])) { break; } ptx = it->second.tx.get(); n = prevout.GetN(); } return ptx->vout[n]; } bool CWallet::SelectCoinsMinConf( const Amount nTargetValue, const CoinEligibilityFilter &eligibility_filter, std::vector<OutputGroup> groups, std::set<CInputCoin> &setCoinsRet, Amount &nValueRet, const CoinSelectionParams &coin_selection_params, bool &bnb_used) const { setCoinsRet.clear(); nValueRet = Amount::zero(); std::vector<OutputGroup> utxo_pool; if (coin_selection_params.use_bnb) { // Get long term estimate CCoinControl temp; temp.m_confirm_target = 1008; CFeeRate long_term_feerate = GetMinimumFeeRate(*this, temp, g_mempool); // Calculate cost of change Amount cost_of_change = dustRelayFee.GetFee(coin_selection_params.change_spend_size) + coin_selection_params.effective_fee.GetFee( coin_selection_params.change_output_size); // Filter by the min conf specs and add to utxo_pool and calculate // effective value for (OutputGroup &group : groups) { if (!group.EligibleForSpending(eligibility_filter)) { continue; } group.fee = Amount::zero(); group.long_term_fee = Amount::zero(); group.effective_value = Amount::zero(); for (auto it = group.m_outputs.begin(); it != group.m_outputs.end();) { const CInputCoin &coin = *it; Amount effective_value = coin.txout.nValue - (coin.m_input_bytes < 0 ? Amount::zero() : coin_selection_params.effective_fee.GetFee( coin.m_input_bytes)); // Only include outputs that are positive effective value (i.e. // not dust) if (effective_value > Amount::zero()) { group.fee += coin.m_input_bytes < 0 ? Amount::zero() : coin_selection_params.effective_fee.GetFee( coin.m_input_bytes); group.long_term_fee += coin.m_input_bytes < 0 ? Amount::zero() : long_term_feerate.GetFee(coin.m_input_bytes); group.effective_value += effective_value; ++it; } else { it = group.Discard(coin); } } if (group.effective_value > Amount::zero()) { utxo_pool.push_back(group); } } // Calculate the fees for things that aren't inputs Amount not_input_fees = coin_selection_params.effective_fee.GetFee( coin_selection_params.tx_noinputs_size); bnb_used = true; return SelectCoinsBnB(utxo_pool, nTargetValue, cost_of_change, setCoinsRet, nValueRet, not_input_fees); } else { // Filter by the min conf specs and add to utxo_pool for (const OutputGroup &group : groups) { if (!group.EligibleForSpending(eligibility_filter)) { continue; } utxo_pool.push_back(group); } bnb_used = false; return KnapsackSolver(nTargetValue, utxo_pool, setCoinsRet, nValueRet); } } bool CWallet::SelectCoins(const std::vector<COutput> &vAvailableCoins, const Amount nTargetValue, std::set<CInputCoin> &setCoinsRet, Amount &nValueRet, const CCoinControl &coin_control, CoinSelectionParams &coin_selection_params, bool &bnb_used) const { std::vector<COutput> vCoins(vAvailableCoins); // coin control -> return all selected outputs (we want all selected to go // into the transaction for sure) if (coin_control.HasSelected() && !coin_control.fAllowOtherInputs) { // We didn't use BnB here, so set it to false. bnb_used = false; for (const COutput &out : vCoins) { if (!out.fSpendable) { continue; } nValueRet += out.tx->tx->vout[out.i].nValue; setCoinsRet.insert(out.GetInputCoin()); } return (nValueRet >= nTargetValue); } // Calculate value from preset inputs and store them. std::set<CInputCoin> setPresetCoins; Amount nValueFromPresetInputs = Amount::zero(); std::vector<COutPoint> vPresetInputs; coin_control.ListSelected(vPresetInputs); for (const COutPoint &outpoint : vPresetInputs) { // For now, don't use BnB if preset inputs are selected. TODO: Enable // this later bnb_used = false; coin_selection_params.use_bnb = false; std::map<TxId, CWalletTx>::const_iterator it = mapWallet.find(outpoint.GetTxId()); if (it == mapWallet.end()) { // TODO: Allow non-wallet inputs return false; } const CWalletTx *pcoin = &it->second; // Clearly invalid input, fail. if (pcoin->tx->vout.size() <= outpoint.GetN()) { return false; } // Just to calculate the marginal byte size nValueFromPresetInputs += pcoin->tx->vout[outpoint.GetN()].nValue; setPresetCoins.insert(CInputCoin(pcoin->tx, outpoint.GetN())); } // Remove preset inputs from vCoins for (std::vector<COutput>::iterator it = vCoins.begin(); it != vCoins.end() && coin_control.HasSelected();) { if (setPresetCoins.count(it->GetInputCoin())) { it = vCoins.erase(it); } else { ++it; } } // form groups from remaining coins; note that preset coins will not // automatically have their associated (same address) coins included if (coin_control.m_avoid_partial_spends && vCoins.size() > OUTPUT_GROUP_MAX_ENTRIES) { // Cases where we have 11+ outputs all pointing to the same destination // may result in privacy leaks as they will potentially be // deterministically sorted. We solve that by explicitly shuffling the // outputs before processing Shuffle(vCoins.begin(), vCoins.end(), FastRandomContext()); } std::vector<OutputGroup> groups = GroupOutputs(vCoins, !coin_control.m_avoid_partial_spends); size_t max_ancestors = std::max<size_t>( 1, gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)); size_t max_descendants = std::max<size_t>( 1, gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)); bool fRejectLongChains = gArgs.GetBoolArg( "-walletrejectlongchains", DEFAULT_WALLET_REJECT_LONG_CHAINS); bool res = nTargetValue <= nValueFromPresetInputs || SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(1, 6, 0), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used) || SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(1, 1, 0), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used) || (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, 2), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) || (m_spend_zero_conf_change && SelectCoinsMinConf( nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, std::min<size_t>(4, max_ancestors / 3), std::min<size_t>(4, max_descendants / 3)), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) || (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, max_ancestors / 2, max_descendants / 2), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) || (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, max_ancestors - 1, max_descendants - 1), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) || (m_spend_zero_conf_change && !fRejectLongChains && SelectCoinsMinConf( nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, std::numeric_limits<uint64_t>::max()), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); // Because SelectCoinsMinConf clears the setCoinsRet, we now add the // possible inputs to the coinset. util::insert(setCoinsRet, setPresetCoins); // Add preset inputs to the total value selected. nValueRet += nValueFromPresetInputs; return res; } bool CWallet::SignTransaction(CMutableTransaction &tx) { // sign the new tx CTransaction txNewConst(tx); int nIn = 0; for (const auto &input : tx.vin) { auto mi = mapWallet.find(input.prevout.GetTxId()); if (mi == mapWallet.end() || input.prevout.GetN() >= mi->second.tx->vout.size()) { return false; } const CScript &scriptPubKey = mi->second.tx->vout[input.prevout.GetN()].scriptPubKey; const Amount amount = mi->second.tx->vout[input.prevout.GetN()].nValue; SignatureData sigdata; SigHashType sigHashType = SigHashType().withForkId(); if (!ProduceSignature(*this, TransactionSignatureCreator(&txNewConst, nIn, amount, sigHashType), scriptPubKey, sigdata)) { return false; } UpdateTransaction(tx, nIn, sigdata); nIn++; } return true; } bool CWallet::FundTransaction(CMutableTransaction &tx, Amount &nFeeRet, int &nChangePosInOut, std::string &strFailReason, bool lockUnspents, const std::set<int> &setSubtractFeeFromOutputs, CCoinControl coinControl) { std::vector<CRecipient> vecSend; // Turn the txout set into a CRecipient vector. for (size_t idx = 0; idx < tx.vout.size(); idx++) { const CTxOut &txOut = tx.vout[idx]; CRecipient recipient = {txOut.scriptPubKey, txOut.nValue, setSubtractFeeFromOutputs.count(idx) == 1}; vecSend.push_back(recipient); } coinControl.fAllowOtherInputs = true; for (const CTxIn &txin : tx.vin) { coinControl.Select(txin.prevout); } // Acquire the locks to prevent races to the new locked unspents between the // CreateTransaction call and LockCoin calls (when lockUnspents is true). LOCK2(cs_main, cs_wallet); CReserveKey reservekey(this); CTransactionRef tx_new; if (!CreateTransaction(vecSend, tx_new, reservekey, nFeeRet, nChangePosInOut, strFailReason, coinControl, false)) { return false; } if (nChangePosInOut != -1) { tx.vout.insert(tx.vout.begin() + nChangePosInOut, tx_new->vout[nChangePosInOut]); // We don't have the normal Create/Commit cycle, and don't want to // risk reusing change, so just remove the key from the keypool // here. reservekey.KeepKey(); } // Copy output sizes from new transaction; they may have had the fee // subtracted from them. for (size_t idx = 0; idx < tx.vout.size(); idx++) { tx.vout[idx].nValue = tx_new->vout[idx].nValue; } // Add new txins (keeping original txin scriptSig/order) for (const CTxIn &txin : tx_new->vin) { if (!coinControl.IsSelected(txin.prevout)) { tx.vin.push_back(txin); if (lockUnspents) { LockCoin(txin.prevout); } } } return true; } OutputType CWallet::TransactionChangeType(OutputType change_type, const std::vector<CRecipient> &vecSend) { // If -changetype is specified, always use that change type. if (change_type != OutputType::NONE) { return change_type; } // if m_default_address_type is legacy, use legacy address as change. if (m_default_address_type == OutputType::LEGACY) { return OutputType::LEGACY; } // else use m_default_address_type for change return m_default_address_type; } bool CWallet::CreateTransaction(const std::vector<CRecipient> &vecSend, CTransactionRef &tx, CReserveKey &reservekey, Amount &nFeeRet, int &nChangePosInOut, std::string &strFailReason, const CCoinControl &coinControl, bool sign) { Amount nValue = Amount::zero(); int nChangePosRequest = nChangePosInOut; unsigned int nSubtractFeeFromAmount = 0; for (const auto &recipient : vecSend) { if (nValue < Amount::zero() || recipient.nAmount < Amount::zero()) { strFailReason = _("Transaction amounts must not be negative"); return false; } nValue += recipient.nAmount; if (recipient.fSubtractFeeFromAmount) { nSubtractFeeFromAmount++; } } if (vecSend.empty()) { strFailReason = _("Transaction must have at least one recipient"); return false; } CMutableTransaction txNew; // Discourage fee sniping. // // For a large miner the value of the transactions in the best block and the // mempool can exceed the cost of deliberately attempting to mine two blocks // to orphan the current best block. By setting nLockTime such that only the // next block can include the transaction, we discourage this practice as // the height restricted and limited blocksize gives miners considering fee // sniping fewer options for pulling off this attack. // // A simple way to think about this is from the wallet's point of view we // always want the blockchain to move forward. By setting nLockTime this way // we're basically making the statement that we only want this transaction // to appear in the next block; we don't want to potentially encourage // reorgs by allowing transactions to appear at lower heights than the next // block in forks of the best chain. // // Of course, the subsidy is high enough, and transaction volume low enough, // that fee sniping isn't a problem yet, but by implementing a fix now we // ensure code won't be written that makes assumptions about nLockTime that // preclude a fix later. txNew.nLockTime = chainActive.Height(); // Secondly occasionally randomly pick a nLockTime even further back, so // that transactions that are delayed after signing for whatever reason, // e.g. high-latency mix networks and some CoinJoin implementations, have // better privacy. if (GetRandInt(10) == 0) { txNew.nLockTime = std::max(0, (int)txNew.nLockTime - GetRandInt(100)); } assert(txNew.nLockTime <= (unsigned int)chainActive.Height()); assert(txNew.nLockTime < LOCKTIME_THRESHOLD); { std::set<CInputCoin> setCoins; LOCK2(cs_main, cs_wallet); std::vector<COutput> vAvailableCoins; AvailableCoins(vAvailableCoins, true, &coinControl); // Parameters for coin selection, init with dummy CoinSelectionParams coin_selection_params; // Create change script that will be used if we need change // TODO: pass in scriptChange instead of reservekey so // change transaction isn't always pay-to-bitcoin-address CScript scriptChange; // coin control: send change to custom address if (!boost::get<CNoDestination>(&coinControl.destChange)) { scriptChange = GetScriptForDestination(coinControl.destChange); // no coin control: send change to newly generated address } else { // Note: We use a new key here to keep it from being obvious // which side is the change. // The drawback is that by not reusing a previous key, the // change may be lost if a backup is restored, if the backup // doesn't have the new private key for the change. If we // reused the old key, it would be possible to add code to look // for and rediscover unknown transactions that were written // with keys of ours to recover post-backup change. // Reserve a new key pair from key pool CPubKey vchPubKey; bool ret; ret = reservekey.GetReservedKey(vchPubKey, true); if (!ret) { strFailReason = _("Keypool ran out, please call keypoolrefill first"); return false; } const OutputType change_type = TransactionChangeType( coinControl.m_change_type ? *coinControl.m_change_type : m_default_change_type, vecSend); LearnRelatedScripts(vchPubKey, change_type); scriptChange = GetScriptForDestination( GetDestinationForKey(vchPubKey, change_type)); } CTxOut change_prototype_txout(Amount::zero(), scriptChange); coin_selection_params.change_output_size = GetSerializeSize(change_prototype_txout, SER_DISK, 0); // Get the fee rate to use effective values in coin selection CFeeRate nFeeRateNeeded = GetMinimumFeeRate(*this, coinControl, g_mempool); nFeeRet = Amount::zero(); bool pick_new_inputs = true; Amount nValueIn = Amount::zero(); // BnB selector is the only selector used when this is true. // That should only happen on the first pass through the loop. // If we are doing subtract fee from recipient, then don't use BnB coin_selection_params.use_bnb = nSubtractFeeFromAmount == 0; // Start with no fee and loop until there is enough fee while (true) { nChangePosInOut = nChangePosRequest; txNew.vin.clear(); txNew.vout.clear(); bool fFirst = true; Amount nValueToSelect = nValue; if (nSubtractFeeFromAmount == 0) { nValueToSelect += nFeeRet; } // Static vsize overhead + outputs vsize. 4 nVersion, 4 nLocktime, 1 // input count, 1 output count coin_selection_params.tx_noinputs_size = 10; // vouts to the payees for (const auto &recipient : vecSend) { CTxOut txout(recipient.nAmount, recipient.scriptPubKey); if (recipient.fSubtractFeeFromAmount) { assert(nSubtractFeeFromAmount != 0); // Subtract fee equally from each selected recipient. txout.nValue -= nFeeRet / int(nSubtractFeeFromAmount); // First receiver pays the remainder not divisible by output // count. if (fFirst) { fFirst = false; txout.nValue -= nFeeRet % int(nSubtractFeeFromAmount); } } // Include the fee cost for outputs. Note this is only used for // BnB right now coin_selection_params.tx_noinputs_size += ::GetSerializeSize(txout, SER_NETWORK, PROTOCOL_VERSION); if (IsDust(txout, dustRelayFee)) { if (recipient.fSubtractFeeFromAmount && nFeeRet > Amount::zero()) { if (txout.nValue < Amount::zero()) { strFailReason = _("The transaction amount is " "too small to pay the fee"); } else { strFailReason = _("The transaction amount is too small to " "send after the fee has been deducted"); } } else { strFailReason = _("Transaction amount too small"); } return false; } txNew.vout.push_back(txout); } // Choose coins to use bool bnb_used; if (pick_new_inputs) { nValueIn = Amount::zero(); setCoins.clear(); coin_selection_params.change_spend_size = CalculateMaximumSignedInputSize(change_prototype_txout, this); coin_selection_params.effective_fee = nFeeRateNeeded; if (!SelectCoins(vAvailableCoins, nValueToSelect, setCoins, nValueIn, coinControl, coin_selection_params, bnb_used)) { // If BnB was used, it was the first pass. No longer the // first pass and continue loop with knapsack. if (bnb_used) { coin_selection_params.use_bnb = false; continue; } else { strFailReason = _("Insufficient funds"); return false; } } } const Amount nChange = nValueIn - nValueToSelect; if (nChange > Amount::zero()) { // Fill a vout to ourself. CTxOut newTxOut(nChange, scriptChange); // Never create dust outputs; if we would, just add the dust to // the fee. // The nChange when BnB is used is always going to go to fees. if (IsDust(newTxOut, dustRelayFee) || bnb_used) { nChangePosInOut = -1; nFeeRet += nChange; } else { if (nChangePosInOut == -1) { // Insert change txn at random position: nChangePosInOut = GetRandInt(txNew.vout.size() + 1); } else if ((unsigned int)nChangePosInOut > txNew.vout.size()) { strFailReason = _("Change index out of range"); return false; } std::vector<CTxOut>::iterator position = txNew.vout.begin() + nChangePosInOut; txNew.vout.insert(position, newTxOut); } } else { nChangePosInOut = -1; } // Dummy fill vin for maximum size estimation // for (const auto &coin : setCoins) { txNew.vin.push_back(CTxIn(coin.outpoint, CScript())); } CTransaction txNewConst(txNew); int nBytes = CalculateMaximumSignedTxSize(txNewConst, this); if (nBytes < 0) { strFailReason = _("Signing transaction failed"); return false; } Amount nFeeNeeded = GetMinimumFee(*this, nBytes, coinControl, g_mempool); // If we made it here and we aren't even able to meet the relay fee // on the next pass, give up because we must be at the maximum // allowed fee. if (nFeeNeeded < ::minRelayTxFee.GetFee(nBytes)) { strFailReason = _("Transaction too large for fee policy"); return false; } if (nFeeRet >= nFeeNeeded) { // Reduce fee to only the needed amount if possible. This // prevents potential overpayment in fees if the coins selected // to meet nFeeNeeded result in a transaction that requires less // fee than the prior iteration. // If we have no change and a big enough excess fee, then try to // construct transaction again only without picking new inputs. // We now know we only need the smaller fee (because of reduced // tx size) and so we should add a change output. Only try this // once. if (nChangePosInOut == -1 && nSubtractFeeFromAmount == 0 && pick_new_inputs) { // Add 2 as a buffer in case increasing # of outputs changes // compact size unsigned int tx_size_with_change = nBytes + coin_selection_params.change_output_size + 2; Amount fee_needed_with_change = GetMinimumFee( *this, tx_size_with_change, coinControl, g_mempool); Amount minimum_value_for_change = GetDustThreshold(change_prototype_txout, dustRelayFee); if (nFeeRet >= fee_needed_with_change + minimum_value_for_change) { pick_new_inputs = false; nFeeRet = fee_needed_with_change; continue; } } // If we have change output already, just increase it if (nFeeRet > nFeeNeeded && nChangePosInOut != -1 && nSubtractFeeFromAmount == 0) { Amount extraFeePaid = nFeeRet - nFeeNeeded; std::vector<CTxOut>::iterator change_position = txNew.vout.begin() + nChangePosInOut; change_position->nValue += extraFeePaid; nFeeRet -= extraFeePaid; } // Done, enough fee included. break; } else if (!pick_new_inputs) { // This shouldn't happen, we should have had enough excess fee // to pay for the new output and still meet nFeeNeeded. // Or we should have just subtracted fee from recipients and // nFeeNeeded should not have changed. strFailReason = _("Transaction fee and change calculation failed"); return false; } // Try to reduce change to include necessary fee. if (nChangePosInOut != -1 && nSubtractFeeFromAmount == 0) { Amount additionalFeeNeeded = nFeeNeeded - nFeeRet; std::vector<CTxOut>::iterator change_position = txNew.vout.begin() + nChangePosInOut; // Only reduce change if remaining amount is still a large // enough output. if (change_position->nValue >= MIN_FINAL_CHANGE + additionalFeeNeeded) { change_position->nValue -= additionalFeeNeeded; nFeeRet += additionalFeeNeeded; // Done, able to increase fee from change. break; } } // If subtracting fee from recipients, we now know what fee we // need to subtract, we have no reason to reselect inputs. if (nSubtractFeeFromAmount > 0) { pick_new_inputs = false; } // Include more fee and try again. nFeeRet = nFeeNeeded; coin_selection_params.use_bnb = false; continue; } if (nChangePosInOut == -1) { // Return any reserved key if we don't have change reservekey.ReturnKey(); } // Shuffle selected coins and fill in final vin txNew.vin.clear(); std::vector<CInputCoin> selected_coins(setCoins.begin(), setCoins.end()); Shuffle(selected_coins.begin(), selected_coins.end(), FastRandomContext()); // Note how the sequence number is set to non-maxint so that // the nLockTime set above actually works. for (const auto &coin : selected_coins) { txNew.vin.push_back( CTxIn(coin.outpoint, CScript(), std::numeric_limits<uint32_t>::max() - 1)); } if (sign) { SigHashType sigHashType = SigHashType().withForkId(); CTransaction txNewConst(txNew); int nIn = 0; for (const auto &coin : selected_coins) { const CScript &scriptPubKey = coin.txout.scriptPubKey; SignatureData sigdata; if (!ProduceSignature( *this, TransactionSignatureCreator( &txNewConst, nIn, coin.txout.nValue, sigHashType), scriptPubKey, sigdata)) { strFailReason = _("Signing transaction failed"); return false; } UpdateTransaction(txNew, nIn, sigdata); nIn++; } } // Return the constructed transaction data. tx = MakeTransactionRef(std::move(txNew)); // Limit size. if (tx->GetTotalSize() >= MAX_STANDARD_TX_SIZE) { strFailReason = _("Transaction too large"); return false; } } if (gArgs.GetBoolArg("-walletrejectlongchains", DEFAULT_WALLET_REJECT_LONG_CHAINS)) { // Lastly, ensure this tx will pass the mempool's chain limits. LockPoints lp; CTxMemPoolEntry entry(tx, Amount::zero(), 0, 0, 0, Amount::zero(), false, 0, lp); CTxMemPool::setEntries setAncestors; size_t nLimitAncestors = gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT); size_t nLimitAncestorSize = gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) * 1000; size_t nLimitDescendants = gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT); size_t nLimitDescendantSize = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000; std::string errString; + LOCK(::g_mempool.cs); if (!g_mempool.CalculateMemPoolAncestors( entry, setAncestors, nLimitAncestors, nLimitAncestorSize, nLimitDescendants, nLimitDescendantSize, errString)) { strFailReason = _("Transaction has too long of a mempool chain"); return false; } } return true; } /** * Call after CreateTransaction unless you want to abort */ bool CWallet::CommitTransaction( CTransactionRef tx, mapValue_t mapValue, std::vector<std::pair<std::string, std::string>> orderForm, std::string fromAccount, CReserveKey &reservekey, CConnman *connman, CValidationState &state) { LOCK2(cs_main, cs_wallet); CWalletTx wtxNew(this, std::move(tx)); wtxNew.mapValue = std::move(mapValue); wtxNew.vOrderForm = std::move(orderForm); wtxNew.strFromAccount = std::move(fromAccount); wtxNew.fTimeReceivedIsTxTime = true; wtxNew.fFromMe = true; LogPrintfToBeContinued("CommitTransaction:\n%s", wtxNew.tx->ToString()); // Take key pair from key pool so it won't be used again. reservekey.KeepKey(); // Add tx to wallet, because if it has change it's also ours, otherwise just // for transaction history. AddToWallet(wtxNew); // Notify that old coins are spent. for (const CTxIn &txin : wtxNew.tx->vin) { CWalletTx &coin = mapWallet.at(txin.prevout.GetTxId()); coin.BindWallet(this); NotifyTransactionChanged(this, coin.GetId(), CT_UPDATED); } // Get the inserted-CWalletTx from mapWallet so that the // fInMempool flag is cached properly CWalletTx &wtx = mapWallet.at(wtxNew.GetId()); if (fBroadcastTransactions) { // Broadcast if (!wtx.AcceptToMemoryPool(maxTxFee, state)) { LogPrintf("CommitTransaction(): Transaction cannot be broadcast " "immediately, %s\n", FormatStateMessage(state)); // TODO: if we expect the failure to be long term or permanent, // instead delete wtx from the wallet and return failure. } else { wtx.RelayWalletTransaction(connman); } } return true; } void CWallet::ListAccountCreditDebit(const std::string &strAccount, std::list<CAccountingEntry> &entries) { WalletBatch batch(*database); return batch.ListAccountCreditDebit(strAccount, entries); } bool CWallet::AddAccountingEntry(const CAccountingEntry &acentry) { WalletBatch batch(*database); return AddAccountingEntry(acentry, &batch); } bool CWallet::AddAccountingEntry(const CAccountingEntry &acentry, WalletBatch *batch) { if (!batch->WriteAccountingEntry(++nAccountingEntryNumber, acentry)) { return false; } laccentries.push_back(acentry); CAccountingEntry &entry = laccentries.back(); wtxOrdered.insert(std::make_pair(entry.nOrderPos, TxPair(nullptr, &entry))); return true; } DBErrors CWallet::LoadWallet(bool &fFirstRunRet) { LOCK2(cs_main, cs_wallet); fFirstRunRet = false; DBErrors nLoadWalletRet = WalletBatch(*database, "cr+").LoadWallet(this); if (nLoadWalletRet == DBErrors::NEED_REWRITE) { if (database->Rewrite("\x04pool")) { setInternalKeyPool.clear(); setExternalKeyPool.clear(); m_pool_key_to_index.clear(); // Note: can't top-up keypool here, because wallet is locked. // User will be prompted to unlock wallet the next operation // that requires a new key. } } // This wallet is in its first run if all of these are empty fFirstRunRet = mapKeys.empty() && mapCryptedKeys.empty() && mapWatchKeys.empty() && setWatchOnly.empty() && mapScripts.empty(); if (nLoadWalletRet != DBErrors::LOAD_OK) { return nLoadWalletRet; } return DBErrors::LOAD_OK; } DBErrors CWallet::ZapSelectTx(std::vector<TxId> &txIdsIn, std::vector<TxId> &txIdsOut) { // mapWallet AssertLockHeld(cs_wallet); DBErrors nZapSelectTxRet = WalletBatch(*database, "cr+").ZapSelectTx(txIdsIn, txIdsOut); for (const TxId &txid : txIdsOut) { mapWallet.erase(txid); } if (nZapSelectTxRet == DBErrors::NEED_REWRITE) { if (database->Rewrite("\x04pool")) { setInternalKeyPool.clear(); setExternalKeyPool.clear(); m_pool_key_to_index.clear(); // Note: can't top-up keypool here, because wallet is locked. // User will be prompted to unlock wallet the next operation // that requires a new key. } } if (nZapSelectTxRet != DBErrors::LOAD_OK) { return nZapSelectTxRet; } MarkDirty(); return DBErrors::LOAD_OK; } DBErrors CWallet::ZapWalletTx(std::vector<CWalletTx> &vWtx) { DBErrors nZapWalletTxRet = WalletBatch(*database, "cr+").ZapWalletTx(vWtx); if (nZapWalletTxRet == DBErrors::NEED_REWRITE) { if (database->Rewrite("\x04pool")) { LOCK(cs_wallet); setInternalKeyPool.clear(); setExternalKeyPool.clear(); m_pool_key_to_index.clear(); // Note: can't top-up keypool here, because wallet is locked. // User will be prompted to unlock wallet the next operation // that requires a new key. } } if (nZapWalletTxRet != DBErrors::LOAD_OK) { return nZapWalletTxRet; } return DBErrors::LOAD_OK; } bool CWallet::SetAddressBook(const CTxDestination &address, const std::string &strName, const std::string &strPurpose) { bool fUpdated = false; { // mapAddressBook LOCK(cs_wallet); std::map<CTxDestination, CAddressBookData>::iterator mi = mapAddressBook.find(address); fUpdated = mi != mapAddressBook.end(); mapAddressBook[address].name = strName; // Update purpose only if requested. if (!strPurpose.empty()) { mapAddressBook[address].purpose = strPurpose; } } NotifyAddressBookChanged(this, address, strName, ::IsMine(*this, address) != ISMINE_NO, strPurpose, (fUpdated ? CT_UPDATED : CT_NEW)); if (!strPurpose.empty() && !WalletBatch(*database).WritePurpose(address, strPurpose)) { return false; } return WalletBatch(*database).WriteName(address, strName); } bool CWallet::DelAddressBook(const CTxDestination &address) { { // mapAddressBook LOCK(cs_wallet); // Delete destdata tuples associated with address. for (const std::pair<const std::string, std::string> &item : mapAddressBook[address].destdata) { WalletBatch(*database).EraseDestData(address, item.first); } mapAddressBook.erase(address); } NotifyAddressBookChanged(this, address, "", ::IsMine(*this, address) != ISMINE_NO, "", CT_DELETED); WalletBatch(*database).ErasePurpose(address); return WalletBatch(*database).EraseName(address); } const std::string &CWallet::GetLabelName(const CScript &scriptPubKey) const { CTxDestination address; if (ExtractDestination(scriptPubKey, address) && !scriptPubKey.IsUnspendable()) { auto mi = mapAddressBook.find(address); if (mi != mapAddressBook.end()) { return mi->second.name; } } // A scriptPubKey that doesn't have an entry in the address book is // associated with the default label (""). const static std::string DEFAULT_LABEL_NAME; return DEFAULT_LABEL_NAME; } /** * Mark old keypool keys as used, and generate all new keys. */ bool CWallet::NewKeyPool() { LOCK(cs_wallet); WalletBatch batch(*database); for (int64_t nIndex : setInternalKeyPool) { batch.ErasePool(nIndex); } setInternalKeyPool.clear(); for (int64_t nIndex : setExternalKeyPool) { batch.ErasePool(nIndex); } setExternalKeyPool.clear(); m_pool_key_to_index.clear(); if (!TopUpKeyPool()) { return false; } LogPrintf("CWallet::NewKeyPool rewrote keypool\n"); return true; } size_t CWallet::KeypoolCountExternalKeys() { // setExternalKeyPool AssertLockHeld(cs_wallet); return setExternalKeyPool.size(); } void CWallet::LoadKeyPool(int64_t nIndex, const CKeyPool &keypool) { AssertLockHeld(cs_wallet); if (keypool.fInternal) { setInternalKeyPool.insert(nIndex); } else { setExternalKeyPool.insert(nIndex); } m_max_keypool_index = std::max(m_max_keypool_index, nIndex); m_pool_key_to_index[keypool.vchPubKey.GetID()] = nIndex; // If no metadata exists yet, create a default with the pool key's // creation time. Note that this may be overwritten by actually // stored metadata for that key later, which is fine. CKeyID keyid = keypool.vchPubKey.GetID(); if (mapKeyMetadata.count(keyid) == 0) { mapKeyMetadata[keyid] = CKeyMetadata(keypool.nTime); } } bool CWallet::TopUpKeyPool(unsigned int kpSize) { LOCK(cs_wallet); if (IsLocked()) { return false; } // Top up key pool unsigned int nTargetSize; if (kpSize > 0) { nTargetSize = kpSize; } else { nTargetSize = std::max<int64_t>( gArgs.GetArg("-keypool", DEFAULT_KEYPOOL_SIZE), 0); } // count amount of available keys (internal, external) // make sure the keypool of external and internal keys fits the user // selected target (-keypool) int64_t missingExternal = std::max<int64_t>( std::max<int64_t>(nTargetSize, 1) - setExternalKeyPool.size(), 0); int64_t missingInternal = std::max<int64_t>( std::max<int64_t>(nTargetSize, 1) - setInternalKeyPool.size(), 0); if (!IsHDEnabled() || !CanSupportFeature(FEATURE_HD_SPLIT)) { // don't create extra internal keys missingInternal = 0; } bool internal = false; WalletBatch batch(*database); for (int64_t i = missingInternal + missingExternal; i--;) { if (i < missingInternal) { internal = true; } // How in the hell did you use so many keys? assert(m_max_keypool_index < std::numeric_limits<int64_t>::max()); int64_t index = ++m_max_keypool_index; CPubKey pubkey(GenerateNewKey(batch, internal)); if (!batch.WritePool(index, CKeyPool(pubkey, internal))) { throw std::runtime_error(std::string(__func__) + ": writing generated key failed"); } if (internal) { setInternalKeyPool.insert(index); } else { setExternalKeyPool.insert(index); } m_pool_key_to_index[pubkey.GetID()] = index; } if (missingInternal + missingExternal > 0) { LogPrintf( "keypool added %d keys (%d internal), size=%u (%u internal)\n", missingInternal + missingExternal, missingInternal, setInternalKeyPool.size() + setExternalKeyPool.size(), setInternalKeyPool.size()); } return true; } void CWallet::ReserveKeyFromKeyPool(int64_t &nIndex, CKeyPool &keypool, bool fRequestedInternal) { nIndex = -1; keypool.vchPubKey = CPubKey(); LOCK(cs_wallet); if (!IsLocked()) { TopUpKeyPool(); } bool fReturningInternal = IsHDEnabled() && CanSupportFeature(FEATURE_HD_SPLIT) && fRequestedInternal; std::set<int64_t> &setKeyPool = fReturningInternal ? setInternalKeyPool : setExternalKeyPool; // Get the oldest key if (setKeyPool.empty()) { return; } WalletBatch batch(*database); auto it = setKeyPool.begin(); nIndex = *it; setKeyPool.erase(it); if (!batch.ReadPool(nIndex, keypool)) { throw std::runtime_error(std::string(__func__) + ": read failed"); } if (!HaveKey(keypool.vchPubKey.GetID())) { throw std::runtime_error(std::string(__func__) + ": unknown key in key pool"); } if (keypool.fInternal != fReturningInternal) { throw std::runtime_error(std::string(__func__) + ": keypool entry misclassified"); } assert(keypool.vchPubKey.IsValid()); m_pool_key_to_index.erase(keypool.vchPubKey.GetID()); LogPrintf("keypool reserve %d\n", nIndex); } void CWallet::KeepKey(int64_t nIndex) { // Remove from key pool. WalletBatch batch(*database); batch.ErasePool(nIndex); LogPrintf("keypool keep %d\n", nIndex); } void CWallet::ReturnKey(int64_t nIndex, bool fInternal, const CPubKey &pubkey) { // Return to key pool { LOCK(cs_wallet); if (fInternal) { setInternalKeyPool.insert(nIndex); } else { setExternalKeyPool.insert(nIndex); } m_pool_key_to_index[pubkey.GetID()] = nIndex; } LogPrintf("keypool return %d\n", nIndex); } bool CWallet::GetKeyFromPool(CPubKey &result, bool internal) { CKeyPool keypool; LOCK(cs_wallet); int64_t nIndex = 0; ReserveKeyFromKeyPool(nIndex, keypool, internal); if (nIndex == -1) { if (IsLocked()) { return false; } WalletBatch batch(*database); result = GenerateNewKey(batch, internal); return true; } KeepKey(nIndex); result = keypool.vchPubKey; return true; } static int64_t GetOldestKeyTimeInPool(const std::set<int64_t> &setKeyPool, WalletBatch &batch) { if (setKeyPool.empty()) { return GetTime(); } CKeyPool keypool; int64_t nIndex = *(setKeyPool.begin()); if (!batch.ReadPool(nIndex, keypool)) { throw std::runtime_error(std::string(__func__) + ": read oldest key in keypool failed"); } assert(keypool.vchPubKey.IsValid()); return keypool.nTime; } int64_t CWallet::GetOldestKeyPoolTime() { LOCK(cs_wallet); WalletBatch batch(*database); // load oldest key from keypool, get time and return int64_t oldestKey = GetOldestKeyTimeInPool(setExternalKeyPool, batch); if (IsHDEnabled() && CanSupportFeature(FEATURE_HD_SPLIT)) { oldestKey = std::max(GetOldestKeyTimeInPool(setInternalKeyPool, batch), oldestKey); } return oldestKey; } std::map<CTxDestination, Amount> CWallet::GetAddressBalances() { std::map<CTxDestination, Amount> balances; LOCK(cs_wallet); for (const auto &walletEntry : mapWallet) { const CWalletTx *pcoin = &walletEntry.second; if (!pcoin->IsTrusted()) { continue; } if (pcoin->IsImmatureCoinBase()) { continue; } int nDepth = pcoin->GetDepthInMainChain(); if (nDepth < (pcoin->IsFromMe(ISMINE_ALL) ? 0 : 1)) { continue; } for (uint32_t i = 0; i < pcoin->tx->vout.size(); i++) { CTxDestination addr; if (!IsMine(pcoin->tx->vout[i])) { continue; } if (!ExtractDestination(pcoin->tx->vout[i].scriptPubKey, addr)) { continue; } Amount n = IsSpent(COutPoint(walletEntry.first, i)) ? Amount::zero() : pcoin->tx->vout[i].nValue; if (!balances.count(addr)) { balances[addr] = Amount::zero(); } balances[addr] += n; } } return balances; } std::set<std::set<CTxDestination>> CWallet::GetAddressGroupings() { // mapWallet AssertLockHeld(cs_wallet); std::set<std::set<CTxDestination>> groupings; std::set<CTxDestination> grouping; for (const auto &walletEntry : mapWallet) { const CWalletTx *pcoin = &walletEntry.second; if (pcoin->tx->vin.size() > 0) { bool any_mine = false; // Group all input addresses with each other. for (const auto &txin : pcoin->tx->vin) { CTxDestination address; // If this input isn't mine, ignore it. if (!IsMine(txin)) { continue; } if (!ExtractDestination(mapWallet.at(txin.prevout.GetTxId()) .tx->vout[txin.prevout.GetN()] .scriptPubKey, address)) { continue; } grouping.insert(address); any_mine = true; } // Group change with input addresses. if (any_mine) { for (const auto &txout : pcoin->tx->vout) { if (IsChange(txout)) { CTxDestination txoutAddr; if (!ExtractDestination(txout.scriptPubKey, txoutAddr)) { continue; } grouping.insert(txoutAddr); } } } if (grouping.size() > 0) { groupings.insert(grouping); grouping.clear(); } } // Group lone addrs by themselves. for (const auto &txout : pcoin->tx->vout) { if (IsMine(txout)) { CTxDestination address; if (!ExtractDestination(txout.scriptPubKey, address)) { continue; } grouping.insert(address); groupings.insert(grouping); grouping.clear(); } } } // A set of pointers to groups of addresses. std::set<std::set<CTxDestination> *> uniqueGroupings; // Map addresses to the unique group containing it. std::map<CTxDestination, std::set<CTxDestination> *> setmap; for (std::set<CTxDestination> _grouping : groupings) { // Make a set of all the groups hit by this new group. std::set<std::set<CTxDestination> *> hits; std::map<CTxDestination, std::set<CTxDestination> *>::iterator it; for (CTxDestination address : _grouping) { if ((it = setmap.find(address)) != setmap.end()) { hits.insert((*it).second); } } // Merge all hit groups into a new single group and delete old groups. std::set<CTxDestination> *merged = new std::set<CTxDestination>(_grouping); for (std::set<CTxDestination> *hit : hits) { merged->insert(hit->begin(), hit->end()); uniqueGroupings.erase(hit); delete hit; } uniqueGroupings.insert(merged); // Update setmap. for (CTxDestination element : *merged) { setmap[element] = merged; } } std::set<std::set<CTxDestination>> ret; for (std::set<CTxDestination> *uniqueGrouping : uniqueGroupings) { ret.insert(*uniqueGrouping); delete uniqueGrouping; } return ret; } std::set<CTxDestination> CWallet::GetLabelAddresses(const std::string &label) const { LOCK(cs_wallet); std::set<CTxDestination> result; for (const std::pair<const CTxDestination, CAddressBookData> &item : mapAddressBook) { const CTxDestination &address = item.first; const std::string &strName = item.second.name; if (strName == label) { result.insert(address); } } return result; } bool CReserveKey::GetReservedKey(CPubKey &pubkey, bool internal) { if (nIndex == -1) { CKeyPool keypool; pwallet->ReserveKeyFromKeyPool(nIndex, keypool, internal); if (nIndex == -1) { return false; } vchPubKey = keypool.vchPubKey; fInternal = keypool.fInternal; } assert(vchPubKey.IsValid()); pubkey = vchPubKey; return true; } void CReserveKey::KeepKey() { if (nIndex != -1) { pwallet->KeepKey(nIndex); } nIndex = -1; vchPubKey = CPubKey(); } void CReserveKey::ReturnKey() { if (nIndex != -1) { pwallet->ReturnKey(nIndex, fInternal, vchPubKey); } nIndex = -1; vchPubKey = CPubKey(); } void CWallet::MarkReserveKeysAsUsed(int64_t keypool_id) { AssertLockHeld(cs_wallet); bool internal = setInternalKeyPool.count(keypool_id); if (!internal) { assert(setExternalKeyPool.count(keypool_id)); } std::set<int64_t> *setKeyPool = internal ? &setInternalKeyPool : &setExternalKeyPool; auto it = setKeyPool->begin(); WalletBatch batch(*database); while (it != std::end(*setKeyPool)) { const int64_t &index = *(it); if (index > keypool_id) { // set*KeyPool is ordered break; } CKeyPool keypool; if (batch.ReadPool(index, keypool)) { // TODO: This should be unnecessary m_pool_key_to_index.erase(keypool.vchPubKey.GetID()); } LearnAllRelatedScripts(keypool.vchPubKey); batch.ErasePool(index); LogPrintf("keypool index %d removed\n", index); it = setKeyPool->erase(it); } } void CWallet::GetScriptForMining(std::shared_ptr<CReserveScript> &script) { std::shared_ptr<CReserveKey> rKey = std::make_shared<CReserveKey>(this); CPubKey pubkey; if (!rKey->GetReservedKey(pubkey)) { return; } script = rKey; script->reserveScript = CScript() << ToByteVector(pubkey) << OP_CHECKSIG; } void CWallet::LockCoin(const COutPoint &output) { // setLockedCoins AssertLockHeld(cs_wallet); setLockedCoins.insert(output); } void CWallet::UnlockCoin(const COutPoint &output) { // setLockedCoins AssertLockHeld(cs_wallet); setLockedCoins.erase(output); } void CWallet::UnlockAllCoins() { // setLockedCoins AssertLockHeld(cs_wallet); setLockedCoins.clear(); } bool CWallet::IsLockedCoin(const COutPoint &outpoint) const { // setLockedCoins AssertLockHeld(cs_wallet); return setLockedCoins.count(outpoint) > 0; } void CWallet::ListLockedCoins(std::vector<COutPoint> &vOutpts) const { // setLockedCoins AssertLockHeld(cs_wallet); for (COutPoint outpoint : setLockedCoins) { vOutpts.push_back(outpoint); } } /** @} */ // end of Actions void CWallet::GetKeyBirthTimes( std::map<CTxDestination, int64_t> &mapKeyBirth) const { // mapKeyMetadata AssertLockHeld(cs_wallet); mapKeyBirth.clear(); // Get birth times for keys with metadata. for (const auto &entry : mapKeyMetadata) { if (entry.second.nCreateTime) { mapKeyBirth[entry.first] = entry.second.nCreateTime; } } // Map in which we'll infer heights of other keys the tip can be // reorganized; use a 144-block safety margin. CBlockIndex *pindexMax = chainActive[std::max(0, chainActive.Height() - 144)]; std::map<CKeyID, CBlockIndex *> mapKeyFirstBlock; for (const CKeyID &keyid : GetKeys()) { if (mapKeyBirth.count(keyid) == 0) { mapKeyFirstBlock[keyid] = pindexMax; } } // If there are no such keys, we're done. if (mapKeyFirstBlock.empty()) { return; } // Find first block that affects those keys, if there are any left. std::vector<CKeyID> vAffected; for (const auto &entry : mapWallet) { // iterate over all wallet transactions... const CWalletTx &wtx = entry.second; CBlockIndex *pindex = LookupBlockIndex(wtx.hashBlock); if (pindex && chainActive.Contains(pindex)) { // ... which are already in a block int nHeight = pindex->nHeight; for (const CTxOut &txout : wtx.tx->vout) { // Iterate over all their outputs... CAffectedKeysVisitor(*this, vAffected) .Process(txout.scriptPubKey); for (const CKeyID &keyid : vAffected) { // ... and all their affected keys. std::map<CKeyID, CBlockIndex *>::iterator rit = mapKeyFirstBlock.find(keyid); if (rit != mapKeyFirstBlock.end() && nHeight < rit->second->nHeight) { rit->second = pindex; } } vAffected.clear(); } } } // Extract block timestamps for those keys. for (const auto &entry : mapKeyFirstBlock) { // block times can be 2h off mapKeyBirth[entry.first] = entry.second->GetBlockTime() - TIMESTAMP_WINDOW; } } /** * Compute smart timestamp for a transaction being added to the wallet. * * Logic: * - If sending a transaction, assign its timestamp to the current time. * - If receiving a transaction outside a block, assign its timestamp to the * current time. * - If receiving a block with a future timestamp, assign all its (not already * known) transactions' timestamps to the current time. * - If receiving a block with a past timestamp, before the most recent known * transaction (that we care about), assign all its (not already known) * transactions' timestamps to the same timestamp as that most-recent-known * transaction. * - If receiving a block with a past timestamp, but after the most recent known * transaction, assign all its (not already known) transactions' timestamps to * the block time. * * For more information see CWalletTx::nTimeSmart, * https://bitcointalk.org/?topic=54527, or * https://github.com/bitcoin/bitcoin/pull/1393. */ unsigned int CWallet::ComputeTimeSmart(const CWalletTx &wtx) const { unsigned int nTimeSmart = wtx.nTimeReceived; if (!wtx.hashUnset()) { if (const CBlockIndex *pindex = LookupBlockIndex(wtx.hashBlock)) { int64_t latestNow = wtx.nTimeReceived; int64_t latestEntry = 0; // Tolerate times up to the last timestamp in the wallet not more // than 5 minutes into the future int64_t latestTolerated = latestNow + 300; const TxItems &txOrdered = wtxOrdered; for (auto it = txOrdered.rbegin(); it != txOrdered.rend(); ++it) { CWalletTx *const pwtx = it->second.first; if (pwtx == &wtx) { continue; } CAccountingEntry *const pacentry = it->second.second; int64_t nSmartTime; if (pwtx) { nSmartTime = pwtx->nTimeSmart; if (!nSmartTime) { nSmartTime = pwtx->nTimeReceived; } } else { nSmartTime = pacentry->nTime; } if (nSmartTime <= latestTolerated) { latestEntry = nSmartTime; if (nSmartTime > latestNow) { latestNow = nSmartTime; } break; } } int64_t blocktime = pindex->GetBlockTime(); nTimeSmart = std::max(latestEntry, std::min(blocktime, latestNow)); } else { LogPrintf("%s: found %s in block %s not in index\n", __func__, wtx.GetId().ToString(), wtx.hashBlock.ToString()); } } return nTimeSmart; } bool CWallet::AddDestData(const CTxDestination &dest, const std::string &key, const std::string &value) { if (boost::get<CNoDestination>(&dest)) { return false; } mapAddressBook[dest].destdata.insert(std::make_pair(key, value)); return WalletBatch(*database).WriteDestData(dest, key, value); } bool CWallet::EraseDestData(const CTxDestination &dest, const std::string &key) { if (!mapAddressBook[dest].destdata.erase(key)) { return false; } return WalletBatch(*database).EraseDestData(dest, key); } void CWallet::LoadDestData(const CTxDestination &dest, const std::string &key, const std::string &value) { mapAddressBook[dest].destdata.insert(std::make_pair(key, value)); } bool CWallet::GetDestData(const CTxDestination &dest, const std::string &key, std::string *value) const { std::map<CTxDestination, CAddressBookData>::const_iterator i = mapAddressBook.find(dest); if (i != mapAddressBook.end()) { CAddressBookData::StringMap::const_iterator j = i->second.destdata.find(key); if (j != i->second.destdata.end()) { if (value) { *value = j->second; } return true; } } return false; } std::vector<std::string> CWallet::GetDestValues(const std::string &prefix) const { LOCK(cs_wallet); std::vector<std::string> values; for (const auto &address : mapAddressBook) { for (const auto &data : address.second.destdata) { if (!data.first.compare(0, prefix.size(), prefix)) { values.emplace_back(data.second); } } } return values; } bool CWallet::Verify(const CChainParams &chainParams, std::string wallet_file, bool salvage_wallet, std::string &error_string, std::string &warning_string) { // Do some checking on wallet path. It should be either a: // // 1. Path where a directory can be created. // 2. Path to an existing directory. // 3. Path to a symlink to a directory. // 4. For backwards compatibility, the name of a data file in -walletdir. LOCK(cs_wallets); fs::path wallet_path = fs::absolute(wallet_file, GetWalletDir()); fs::file_type path_type = fs::symlink_status(wallet_path).type(); if (!(path_type == fs::file_not_found || path_type == fs::directory_file || (path_type == fs::symlink_file && fs::is_directory(wallet_path)) || (path_type == fs::regular_file && fs::path(wallet_file).filename() == wallet_file))) { error_string = strprintf("Invalid -wallet path '%s'. -wallet path should point to " "a directory where wallet.dat and " "database/log.?????????? files can be stored, a location " "where such a directory could be created, " "or (for backwards compatibility) the name of an " "existing data file in -walletdir (%s)", wallet_file, GetWalletDir()); return false; } // Make sure that the wallet path doesn't clash with an existing wallet path for (auto wallet : GetWallets()) { if (fs::absolute(wallet->GetName(), GetWalletDir()) == wallet_path) { error_string = strprintf("Error loading wallet %s. Duplicate " "-wallet filename specified.", wallet_file); return false; } } try { if (!WalletBatch::VerifyEnvironment(wallet_path, error_string)) { return false; } } catch (const fs::filesystem_error &e) { error_string = strprintf("Error loading wallet %s. %s", wallet_file, e.what()); return false; } if (salvage_wallet) { // Recover readable keypairs: CWallet dummyWallet(chainParams, "dummy", WalletDatabase::CreateDummy()); std::string backup_filename; if (!WalletBatch::Recover( wallet_path, static_cast<void *>(&dummyWallet), WalletBatch::RecoverKeysOnlyFilter, backup_filename)) { return false; } } return WalletBatch::VerifyDatabaseFile(wallet_path, warning_string, error_string); } std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(const CChainParams &chainParams, const std::string &name, const fs::path &path) { const std::string &walletFile = name; // Needed to restore wallet transaction meta data after -zapwallettxes std::vector<CWalletTx> vWtx; if (gArgs.GetBoolArg("-zapwallettxes", false)) { uiInterface.InitMessage(_("Zapping all transactions from wallet...")); std::unique_ptr<CWallet> tempWallet = std::make_unique<CWallet>( chainParams, name, WalletDatabase::Create(path)); DBErrors nZapWalletRet = tempWallet->ZapWalletTx(vWtx); if (nZapWalletRet != DBErrors::LOAD_OK) { InitError( strprintf(_("Error loading %s: Wallet corrupted"), walletFile)); return nullptr; } } uiInterface.InitMessage(_("Loading wallet...")); int64_t nStart = GetTimeMillis(); bool fFirstRun = true; std::shared_ptr<CWallet> walletInstance = std::make_shared<CWallet>( chainParams, name, WalletDatabase::Create(path)); DBErrors nLoadWalletRet = walletInstance->LoadWallet(fFirstRun); if (nLoadWalletRet != DBErrors::LOAD_OK) { if (nLoadWalletRet == DBErrors::CORRUPT) { InitError( strprintf(_("Error loading %s: Wallet corrupted"), walletFile)); return nullptr; } if (nLoadWalletRet == DBErrors::NONCRITICAL_ERROR) { InitWarning(strprintf( _("Error reading %s! All keys read correctly, but transaction " "data" " or address book entries might be missing or incorrect."), walletFile)); } else if (nLoadWalletRet == DBErrors::TOO_NEW) { InitError(strprintf( _("Error loading %s: Wallet requires newer version of %s"), walletFile, _(PACKAGE_NAME))); return nullptr; } else if (nLoadWalletRet == DBErrors::NEED_REWRITE) { InitError(strprintf( _("Wallet needed to be rewritten: restart %s to complete"), _(PACKAGE_NAME))); return nullptr; } else { InitError(strprintf(_("Error loading %s"), walletFile)); return nullptr; } } uiInterface.LoadWallet(walletInstance); if (gArgs.GetBoolArg("-upgradewallet", fFirstRun)) { int nMaxVersion = gArgs.GetArg("-upgradewallet", 0); // The -upgradewallet without argument case if (nMaxVersion == 0) { LogPrintf("Performing wallet upgrade to %i\n", FEATURE_LATEST); nMaxVersion = CLIENT_VERSION; // permanently upgrade the wallet immediately walletInstance->SetMinVersion(FEATURE_LATEST); } else { LogPrintf("Allowing wallet upgrade up to %i\n", nMaxVersion); } if (nMaxVersion < walletInstance->GetVersion()) { InitError(_("Cannot downgrade wallet")); return nullptr; } walletInstance->SetMaxVersion(nMaxVersion); } if (fFirstRun) { // Ensure this wallet.dat can only be opened by clients supporting // HD with chain split and expects no default key. if (!gArgs.GetBoolArg("-usehd", true)) { InitError(strprintf(_("Error creating %s: You can't create non-HD " "wallets with this version."), walletFile)); return nullptr; } walletInstance->SetMinVersion(FEATURE_NO_DEFAULT_KEY); // Generate a new master key. CPubKey masterPubKey = walletInstance->GenerateNewHDMasterKey(); walletInstance->SetHDMasterKey(masterPubKey); // Top up the keypool if (!walletInstance->TopUpKeyPool()) { InitError(_("Unable to generate initial keys") += "\n"); return nullptr; } walletInstance->ChainStateFlushed(chainActive.GetLocator()); } else if (gArgs.IsArgSet("-usehd")) { bool useHD = gArgs.GetBoolArg("-usehd", true); if (walletInstance->IsHDEnabled() && !useHD) { InitError( strprintf(_("Error loading %s: You can't disable HD on an " "already existing HD wallet"), walletFile)); return nullptr; } if (!walletInstance->IsHDEnabled() && useHD) { InitError(strprintf(_("Error loading %s: You can't enable HD on an " "already existing non-HD wallet"), walletFile)); return nullptr; } } if (gArgs.IsArgSet("-mintxfee")) { Amount n = Amount::zero(); if (!ParseMoney(gArgs.GetArg("-mintxfee", ""), n) || n == Amount::zero()) { InitError(AmountErrMsg("mintxfee", gArgs.GetArg("-mintxfee", ""))); return nullptr; } if (n > HIGH_TX_FEE_PER_KB) { InitWarning(AmountHighWarn("-mintxfee") + " " + _("This is the minimum transaction fee you pay on " "every transaction.")); } walletInstance->m_min_fee = CFeeRate(n); } if (gArgs.IsArgSet("-fallbackfee")) { Amount nFeePerK = Amount::zero(); if (!ParseMoney(gArgs.GetArg("-fallbackfee", ""), nFeePerK)) { InitError( strprintf(_("Invalid amount for -fallbackfee=<amount>: '%s'"), gArgs.GetArg("-fallbackfee", ""))); return nullptr; } if (nFeePerK > HIGH_TX_FEE_PER_KB) { InitWarning(AmountHighWarn("-fallbackfee") + " " + _("This is the transaction fee you may pay when fee " "estimates are not available.")); } walletInstance->m_fallback_fee = CFeeRate(nFeePerK); // disable fallback fee in case value was set to 0, enable if non-null // value walletInstance->m_allow_fallback_fee = (nFeePerK != Amount::zero()); } if (gArgs.IsArgSet("-paytxfee")) { Amount nFeePerK = Amount::zero(); if (!ParseMoney(gArgs.GetArg("-paytxfee", ""), nFeePerK)) { InitError(AmountErrMsg("paytxfee", gArgs.GetArg("-paytxfee", ""))); return nullptr; } if (nFeePerK > HIGH_TX_FEE_PER_KB) { InitWarning(AmountHighWarn("-paytxfee") + " " + _("This is the transaction fee you will pay if you " "send a transaction.")); } walletInstance->m_pay_tx_fee = CFeeRate(nFeePerK, 1000); if (walletInstance->m_pay_tx_fee < ::minRelayTxFee) { InitError(strprintf(_("Invalid amount for -paytxfee=<amount>: '%s' " "(must be at least %s)"), gArgs.GetArg("-paytxfee", ""), ::minRelayTxFee.ToString())); return nullptr; } } walletInstance->m_spend_zero_conf_change = gArgs.GetBoolArg("-spendzeroconfchange", DEFAULT_SPEND_ZEROCONF_CHANGE); walletInstance->m_default_address_type = DEFAULT_ADDRESS_TYPE; walletInstance->m_default_change_type = OutputType::NONE; LogPrintf(" wallet %15dms\n", GetTimeMillis() - nStart); // Try to top up keypool. No-op if the wallet is locked. walletInstance->TopUpKeyPool(); LOCK(cs_main); CBlockIndex *pindexRescan = chainActive.Genesis(); if (!gArgs.GetBoolArg("-rescan", false)) { WalletBatch batch(*walletInstance->database); CBlockLocator locator; if (batch.ReadBestBlock(locator)) { pindexRescan = FindForkInGlobalIndex(chainActive, locator); } } walletInstance->m_last_block_processed = chainActive.Tip(); if (chainActive.Tip() && chainActive.Tip() != pindexRescan) { // We can't rescan beyond non-pruned blocks, stop and throw an error. // This might happen if a user uses an old wallet within a pruned node // or if he ran -disablewallet for a longer time, then decided to // re-enable. if (fPruneMode) { CBlockIndex *block = chainActive.Tip(); while (block && block->pprev && block->pprev->nStatus.hasData() && block->pprev->nTx > 0 && pindexRescan != block) { block = block->pprev; } if (pindexRescan != block) { InitError(_("Prune: last wallet synchronisation goes beyond " "pruned data. You need to -reindex (download the " "whole blockchain again in case of pruned node)")); return nullptr; } } uiInterface.InitMessage(_("Rescanning...")); LogPrintf("Rescanning last %i blocks (from block %i)...\n", chainActive.Height() - pindexRescan->nHeight, pindexRescan->nHeight); // No need to read and scan block if block was created before our wallet // birthday (as adjusted for block time variability) while (pindexRescan && walletInstance->nTimeFirstKey && (pindexRescan->GetBlockTime() < (walletInstance->nTimeFirstKey - TIMESTAMP_WINDOW))) { pindexRescan = chainActive.Next(pindexRescan); } nStart = GetTimeMillis(); { WalletRescanReserver reserver(walletInstance.get()); if (!reserver.reserve()) { InitError( _("Failed to rescan the wallet during initialization")); return nullptr; } walletInstance->ScanForWalletTransactions(pindexRescan, nullptr, reserver, true); } LogPrintf(" rescan %15dms\n", GetTimeMillis() - nStart); walletInstance->ChainStateFlushed(chainActive.GetLocator()); walletInstance->database->IncrementUpdateCounter(); // Restore wallet transaction metadata after -zapwallettxes=1 if (gArgs.GetBoolArg("-zapwallettxes", false) && gArgs.GetArg("-zapwallettxes", "1") != "2") { WalletBatch batch(*walletInstance->database); for (const CWalletTx &wtxOld : vWtx) { const TxId txid = wtxOld.GetId(); std::map<TxId, CWalletTx>::iterator mi = walletInstance->mapWallet.find(txid); if (mi != walletInstance->mapWallet.end()) { const CWalletTx *copyFrom = &wtxOld; CWalletTx *copyTo = &mi->second; copyTo->mapValue = copyFrom->mapValue; copyTo->vOrderForm = copyFrom->vOrderForm; copyTo->nTimeReceived = copyFrom->nTimeReceived; copyTo->nTimeSmart = copyFrom->nTimeSmart; copyTo->fFromMe = copyFrom->fFromMe; copyTo->strFromAccount = copyFrom->strFromAccount; copyTo->nOrderPos = copyFrom->nOrderPos; batch.WriteTx(*copyTo); } } } } // Register with the validation interface. It's ok to do this after rescan // since we're still holding cs_main. RegisterValidationInterface(walletInstance.get()); walletInstance->SetBroadcastTransactions( gArgs.GetBoolArg("-walletbroadcast", DEFAULT_WALLETBROADCAST)); LOCK(walletInstance->cs_wallet); LogPrintf("setKeyPool.size() = %u\n", walletInstance->GetKeyPoolSize()); LogPrintf("mapWallet.size() = %u\n", walletInstance->mapWallet.size()); LogPrintf("mapAddressBook.size() = %u\n", walletInstance->mapAddressBook.size()); return walletInstance; } void CWallet::postInitProcess() { // Add wallet transactions that aren't already in a block to mempool. // Do this here as mempool requires genesis block to be loaded. ReacceptWalletTransactions(); } bool CWallet::BackupWallet(const std::string &strDest) { return database->Backup(strDest); } CKeyPool::CKeyPool() { nTime = GetTime(); fInternal = false; } CKeyPool::CKeyPool(const CPubKey &vchPubKeyIn, bool internalIn) { nTime = GetTime(); vchPubKey = vchPubKeyIn; fInternal = internalIn; } CWalletKey::CWalletKey(int64_t nExpires) { nTimeCreated = (nExpires ? GetTime() : 0); nTimeExpires = nExpires; } void CMerkleTx::SetMerkleBranch(const CBlockIndex *pindex, int posInBlock) { // Update the tx's hashBlock hashBlock = pindex->GetBlockHash(); // Set the position of the transaction in the block. nIndex = posInBlock; } int CMerkleTx::GetDepthInMainChain() const { if (hashUnset()) { return 0; } AssertLockHeld(cs_main); // Find the block it claims to be in. CBlockIndex *pindex = LookupBlockIndex(hashBlock); if (!pindex || !chainActive.Contains(pindex)) { return 0; } return ((nIndex == -1) ? (-1) : 1) * (chainActive.Height() - pindex->nHeight + 1); } int CMerkleTx::GetBlocksToMaturity() const { if (!IsCoinBase()) { return 0; } return std::max(0, (COINBASE_MATURITY + 1) - GetDepthInMainChain()); } bool CMerkleTx::IsImmatureCoinBase() const { // note GetBlocksToMaturity is 0 for non-coinbase tx return GetBlocksToMaturity() > 0; } bool CWalletTx::AcceptToMemoryPool(const Amount nAbsurdFee, CValidationState &state) { // Quick check to avoid re-setting fInMempool to false if (g_mempool.exists(tx->GetId())) { return false; } // We must set fInMempool here - while it will be re-set to true by the // entered-mempool callback, if we did not there would be a race where a // user could call sendmoney in a loop and hit spurious out of funds errors // because we think that this newly generated transaction's change is // unavailable as we're not yet aware that it is in the mempool. bool ret = ::AcceptToMemoryPool( GetConfig(), g_mempool, state, tx, true /* fLimitFree */, nullptr /* pfMissingInputs */, false /* fOverrideMempoolLimit */, nAbsurdFee); fInMempool = ret; return ret; } static const std::string OUTPUT_TYPE_STRING_LEGACY = "legacy"; OutputType ParseOutputType(const std::string &type, OutputType default_type) { if (type.empty()) { return default_type; } else if (type == OUTPUT_TYPE_STRING_LEGACY) { return OutputType::LEGACY; } else { return OutputType::NONE; } } const std::string &FormatOutputType(OutputType type) { switch (type) { case OutputType::LEGACY: return OUTPUT_TYPE_STRING_LEGACY; default: assert(false); } } void CWallet::LearnRelatedScripts(const CPubKey &key, OutputType type) { // Nothing to do... } void CWallet::LearnAllRelatedScripts(const CPubKey &key) { // Nothing to do... } CTxDestination GetDestinationForKey(const CPubKey &key, OutputType type) { switch (type) { case OutputType::LEGACY: return key.GetID(); default: assert(false); } } std::vector<CTxDestination> GetAllDestinationsForKey(const CPubKey &key) { return std::vector<CTxDestination>{key.GetID()}; } CTxDestination CWallet::AddAndGetDestinationForScript(const CScript &script, OutputType type) { // Note that scripts over 520 bytes are not yet supported. switch (type) { case OutputType::LEGACY: return CScriptID(script); default: assert(false); } } std::vector<OutputGroup> CWallet::GroupOutputs(const std::vector<COutput> &outputs, bool single_coin) const { std::vector<OutputGroup> groups; std::map<CTxDestination, OutputGroup> gmap; CTxDestination dst; for (const auto &output : outputs) { if (output.fSpendable) { CInputCoin input_coin = output.GetInputCoin(); size_t ancestors, descendants; g_mempool.GetTransactionAncestry(output.tx->GetId(), ancestors, descendants); if (!single_coin && ExtractDestination(output.tx->tx->vout[output.i].scriptPubKey, dst)) { // Limit output groups to no more than 10 entries, to protect // against inadvertently creating a too-large transaction // when using -avoidpartialspends if (gmap[dst].m_outputs.size() >= OUTPUT_GROUP_MAX_ENTRIES) { groups.push_back(gmap[dst]); gmap.erase(dst); } gmap[dst].Insert(input_coin, output.nDepth, output.tx->IsFromMe(ISMINE_ALL), ancestors, descendants); } else { groups.emplace_back(input_coin, output.nDepth, output.tx->IsFromMe(ISMINE_ALL), ancestors, descendants); } } } if (!single_coin) { for (const auto &it : gmap) { groups.push_back(it.second); } } return groups; }