diff --git a/src/addrdb.h b/src/addrdb.h index 7b30fcc1b9..77f0559198 100644 --- a/src/addrdb.h +++ b/src/addrdb.h @@ -1,97 +1,97 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_ADDRDB_H #define BITCOIN_ADDRDB_H #include "fs.h" #include "serialize.h" #include #include class CSubNet; class CAddrMan; class CDataStream; class CChainParams; typedef enum BanReason { BanReasonUnknown = 0, BanReasonNodeMisbehaving = 1, BanReasonManuallyAdded = 2 } BanReason; class CBanEntry { public: static const int CURRENT_VERSION = 1; int nVersion; int64_t nCreateTime; int64_t nBanUntil; uint8_t banReason; CBanEntry() { SetNull(); } - CBanEntry(int64_t nCreateTimeIn) { + explicit CBanEntry(int64_t nCreateTimeIn) { SetNull(); nCreateTime = nCreateTimeIn; } ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(this->nVersion); READWRITE(nCreateTime); READWRITE(nBanUntil); READWRITE(banReason); } void SetNull() { nVersion = CBanEntry::CURRENT_VERSION; nCreateTime = 0; nBanUntil = 0; banReason = BanReasonUnknown; } std::string banReasonToString() { switch (banReason) { case BanReasonNodeMisbehaving: return "node misbehaving"; case BanReasonManuallyAdded: return "manually added"; default: return "unknown"; } } }; typedef std::map banmap_t; /** Access to the (IP) address database (peers.dat) */ class CAddrDB { private: fs::path pathAddr; const CChainParams &chainParams; public: CAddrDB(const CChainParams &chainParams); bool Write(const CAddrMan &addr); bool Read(CAddrMan &addr); bool Read(CAddrMan &addr, CDataStream &ssPeers); }; /** Access to the banlist database (banlist.dat) */ class CBanDB { private: fs::path pathBanlist; const CChainParams &chainParams; public: CBanDB(const CChainParams &chainParams); bool Write(const banmap_t &banSet); bool Read(banmap_t &banSet); }; #endif // BITCOIN_ADDRDB_H diff --git a/src/base58.cpp b/src/base58.cpp index a14a2b3dd8..9c2c1bb46b 100644 --- a/src/base58.cpp +++ b/src/base58.cpp @@ -1,301 +1,302 @@ // Copyright (c) 2014-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "base58.h" #include "hash.h" #include "script/script.h" #include "uint256.h" #include #include #include #include #include #include #include /** All alphanumeric characters except for "0", "I", "O", and "l" */ static const char *pszBase58 = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"; bool DecodeBase58(const char *psz, std::vector &vch) { // Skip leading spaces. while (*psz && isspace(*psz)) { psz++; } // Skip and count leading '1's. int zeroes = 0; int length = 0; while (*psz == '1') { zeroes++; psz++; } // Allocate enough space in big-endian base256 representation. // log(58) / log(256), rounded up. int size = strlen(psz) * 733 / 1000 + 1; std::vector b256(size); // Process the characters. while (*psz && !isspace(*psz)) { // Decode base58 character const char *ch = strchr(pszBase58, *psz); if (ch == nullptr) { return false; } // Apply "b256 = b256 * 58 + ch". int carry = ch - pszBase58; int i = 0; for (std::vector::reverse_iterator it = b256.rbegin(); (carry != 0 || i < length) && (it != b256.rend()); ++it, ++i) { carry += 58 * (*it); *it = carry % 256; carry /= 256; } assert(carry == 0); length = i; psz++; } // Skip trailing spaces. while (isspace(*psz)) { psz++; } if (*psz != 0) { return false; } // Skip leading zeroes in b256. std::vector::iterator it = b256.begin() + (size - length); while (it != b256.end() && *it == 0) it++; // Copy result into output vector. vch.reserve(zeroes + (b256.end() - it)); vch.assign(zeroes, 0x00); while (it != b256.end()) { vch.push_back(*(it++)); } return true; } std::string EncodeBase58(const uint8_t *pbegin, const uint8_t *pend) { // Skip & count leading zeroes. int zeroes = 0; int length = 0; while (pbegin != pend && *pbegin == 0) { pbegin++; zeroes++; } // Allocate enough space in big-endian base58 representation. // log(256) / log(58), rounded up. int size = (pend - pbegin) * 138 / 100 + 1; std::vector b58(size); // Process the bytes. while (pbegin != pend) { int carry = *pbegin; int i = 0; // Apply "b58 = b58 * 256 + ch". for (std::vector::reverse_iterator it = b58.rbegin(); (carry != 0 || i < length) && (it != b58.rend()); it++, i++) { carry += 256 * (*it); *it = carry % 58; carry /= 58; } assert(carry == 0); length = i; pbegin++; } // Skip leading zeroes in base58 result. std::vector::iterator it = b58.begin() + (size - length); while (it != b58.end() && *it == 0) { it++; } // Translate the result into a string. std::string str; str.reserve(zeroes + (b58.end() - it)); str.assign(zeroes, '1'); while (it != b58.end()) { str += pszBase58[*(it++)]; } return str; } std::string EncodeBase58(const std::vector &vch) { return EncodeBase58(&vch[0], &vch[0] + vch.size()); } bool DecodeBase58(const std::string &str, std::vector &vchRet) { return DecodeBase58(str.c_str(), vchRet); } std::string EncodeBase58Check(const std::vector &vchIn) { // add 4-byte hash check to the end std::vector vch(vchIn); uint256 hash = Hash(vch.begin(), vch.end()); vch.insert(vch.end(), (uint8_t *)&hash, (uint8_t *)&hash + 4); return EncodeBase58(vch); } bool DecodeBase58Check(const char *psz, std::vector &vchRet) { if (!DecodeBase58(psz, vchRet) || (vchRet.size() < 4)) { vchRet.clear(); return false; } // re-calculate the checksum, insure it matches the included 4-byte checksum uint256 hash = Hash(vchRet.begin(), vchRet.end() - 4); if (memcmp(&hash, &vchRet.end()[-4], 4) != 0) { vchRet.clear(); return false; } vchRet.resize(vchRet.size() - 4); return true; } bool DecodeBase58Check(const std::string &str, std::vector &vchRet) { return DecodeBase58Check(str.c_str(), vchRet); } CBase58Data::CBase58Data() { vchVersion.clear(); vchData.clear(); } void CBase58Data::SetData(const std::vector &vchVersionIn, const void *pdata, size_t nSize) { vchVersion = vchVersionIn; vchData.resize(nSize); if (!vchData.empty()) { memcpy(&vchData[0], pdata, nSize); } } void CBase58Data::SetData(const std::vector &vchVersionIn, const uint8_t *pbegin, const uint8_t *pend) { SetData(vchVersionIn, (void *)pbegin, pend - pbegin); } bool CBase58Data::SetString(const char *psz, unsigned int nVersionBytes) { std::vector vchTemp; bool rc58 = DecodeBase58Check(psz, vchTemp); if ((!rc58) || (vchTemp.size() < nVersionBytes)) { vchData.clear(); vchVersion.clear(); return false; } vchVersion.assign(vchTemp.begin(), vchTemp.begin() + nVersionBytes); vchData.resize(vchTemp.size() - nVersionBytes); if (!vchData.empty()) { memcpy(&vchData[0], &vchTemp[nVersionBytes], vchData.size()); } memory_cleanse(&vchTemp[0], vchTemp.size()); return true; } bool CBase58Data::SetString(const std::string &str) { return SetString(str.c_str()); } std::string CBase58Data::ToString() const { std::vector vch = vchVersion; vch.insert(vch.end(), vchData.begin(), vchData.end()); return EncodeBase58Check(vch); } int CBase58Data::CompareTo(const CBase58Data &b58) const { if (vchVersion < b58.vchVersion) return -1; if (vchVersion > b58.vchVersion) return 1; if (vchData < b58.vchData) return -1; if (vchData > b58.vchData) return 1; return 0; } namespace { class DestinationEncoder : public boost::static_visitor { private: const CChainParams &m_params; public: - DestinationEncoder(const CChainParams ¶ms) : m_params(params) {} + explicit DestinationEncoder(const CChainParams ¶ms) + : m_params(params) {} std::string operator()(const CKeyID &id) const { std::vector data = m_params.Base58Prefix(CChainParams::PUBKEY_ADDRESS); data.insert(data.end(), id.begin(), id.end()); return EncodeBase58Check(data); } std::string operator()(const CScriptID &id) const { std::vector data = m_params.Base58Prefix(CChainParams::SCRIPT_ADDRESS); data.insert(data.end(), id.begin(), id.end()); return EncodeBase58Check(data); } std::string operator()(const CNoDestination &no) const { return ""; } }; CTxDestination DecodeDestination(const std::string &str, const CChainParams ¶ms) { std::vector data; uint160 hash; if (!DecodeBase58Check(str, data)) { return CNoDestination(); } // Base58Check decoding const std::vector &pubkey_prefix = params.Base58Prefix(CChainParams::PUBKEY_ADDRESS); if (data.size() == 20 + pubkey_prefix.size() && std::equal(pubkey_prefix.begin(), pubkey_prefix.end(), data.begin())) { memcpy(hash.begin(), &data[pubkey_prefix.size()], 20); return CKeyID(hash); } const std::vector &script_prefix = params.Base58Prefix(CChainParams::SCRIPT_ADDRESS); if (data.size() == 20 + script_prefix.size() && std::equal(script_prefix.begin(), script_prefix.end(), data.begin())) { memcpy(hash.begin(), &data[script_prefix.size()], 20); return CScriptID(hash); } return CNoDestination(); } } // namespace void CBitcoinSecret::SetKey(const CKey &vchSecret) { assert(vchSecret.IsValid()); SetData(Params().Base58Prefix(CChainParams::SECRET_KEY), vchSecret.begin(), vchSecret.size()); if (vchSecret.IsCompressed()) vchData.push_back(1); } CKey CBitcoinSecret::GetKey() { CKey ret; assert(vchData.size() >= 32); ret.Set(vchData.begin(), vchData.begin() + 32, vchData.size() > 32 && vchData[32] == 1); return ret; } bool CBitcoinSecret::IsValid() const { bool fExpectedFormat = vchData.size() == 32 || (vchData.size() == 33 && vchData[32] == 1); bool fCorrectVersion = vchVersion == Params().Base58Prefix(CChainParams::SECRET_KEY); return fExpectedFormat && fCorrectVersion; } bool CBitcoinSecret::SetString(const char *pszSecret) { return CBase58Data::SetString(pszSecret) && IsValid(); } bool CBitcoinSecret::SetString(const std::string &strSecret) { return SetString(strSecret.c_str()); } std::string EncodeLegacyAddr(const CTxDestination &dest, const CChainParams ¶ms) { return boost::apply_visitor(DestinationEncoder(params), dest); } CTxDestination DecodeLegacyAddr(const std::string &str, const CChainParams ¶ms) { return DecodeDestination(str, params); } diff --git a/src/bench/checkqueue.cpp b/src/bench/checkqueue.cpp index 4533c622e4..2dcbf34896 100644 --- a/src/bench/checkqueue.cpp +++ b/src/bench/checkqueue.cpp @@ -1,58 +1,58 @@ // Copyright (c) 2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "checkqueue.h" #include "bench.h" #include "prevector.h" #include "random.h" #include "util.h" #include "validation.h" #include #include static const int MIN_CORES = 2; static const size_t BATCHES = 101; static const size_t BATCH_SIZE = 30; static const int PREVECTOR_SIZE = 28; static const size_t QUEUE_BATCH_SIZE = 128; // This Benchmark tests the CheckQueue with a slightly realistic workload, where // checks all contain a prevector that is indirect 50% of the time and there is // a little bit of work done between calls to Add. static void CCheckQueueSpeedPrevectorJob(benchmark::State &state) { struct PrevectorJob { prevector p; PrevectorJob() {} - PrevectorJob(FastRandomContext &insecure_rand) { + explicit PrevectorJob(FastRandomContext &insecure_rand) { p.resize(insecure_rand.randrange(PREVECTOR_SIZE * 2)); } bool operator()() { return true; } void swap(PrevectorJob &x) { p.swap(x.p); }; }; CCheckQueue queue{QUEUE_BATCH_SIZE}; boost::thread_group tg; for (auto x = 0; x < std::max(MIN_CORES, GetNumCores()); ++x) { tg.create_thread([&] { queue.Thread(); }); } while (state.KeepRunning()) { // Make insecure_rand here so that each iteration is identical. FastRandomContext insecure_rand(true); CCheckQueueControl control(&queue); std::vector> vBatches(BATCHES); for (auto &vChecks : vBatches) { vChecks.reserve(BATCH_SIZE); for (size_t x = 0; x < BATCH_SIZE; ++x) vChecks.emplace_back(insecure_rand); control.Add(vChecks); } // control waits for completion by RAII, but it is done explicitly here // for clarity control.Wait(); } tg.interrupt_all(); tg.join_all(); } BENCHMARK(CCheckQueueSpeedPrevectorJob, 1400); diff --git a/src/blockencodings.h b/src/blockencodings.h index 373fdb46f7..6a55a09eab 100644 --- a/src/blockencodings.h +++ b/src/blockencodings.h @@ -1,239 +1,239 @@ // Copyright (c) 2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_BLOCK_ENCODINGS_H #define BITCOIN_BLOCK_ENCODINGS_H #include "primitives/block.h" #include class Config; class CTxMemPool; // Dumb helper to handle CTransaction compression at serialize-time struct TransactionCompressor { private: CTransactionRef &tx; public: - TransactionCompressor(CTransactionRef &txIn) : tx(txIn) {} + explicit TransactionCompressor(CTransactionRef &txIn) : tx(txIn) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { // TODO: Compress tx encoding READWRITE(tx); } }; class BlockTransactionsRequest { public: // A BlockTransactionsRequest message uint256 blockhash; std::vector indices; ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(blockhash); uint64_t indices_size = uint64_t(indices.size()); READWRITE(COMPACTSIZE(indices_size)); if (ser_action.ForRead()) { size_t i = 0; while (indices.size() < indices_size) { indices.resize( std::min(uint64_t(1000 + indices.size()), indices_size)); for (; i < indices.size(); i++) { uint64_t n = 0; READWRITE(COMPACTSIZE(n)); if (n > std::numeric_limits::max()) { throw std::ios_base::failure( "index overflowed 32 bits"); } indices[i] = n; } } uint32_t offset = 0; for (auto &index : indices) { if (uint64_t(index) + uint64_t(offset) > std::numeric_limits::max()) { throw std::ios_base::failure("indices overflowed 32 bits"); } index = index + offset; offset = index + 1; } } else { for (size_t i = 0; i < indices.size(); i++) { uint64_t index = indices[i] - (i == 0 ? 0 : (indices[i - 1] + 1)); READWRITE(COMPACTSIZE(index)); } } } }; class BlockTransactions { public: // A BlockTransactions message uint256 blockhash; std::vector txn; BlockTransactions() {} - BlockTransactions(const BlockTransactionsRequest &req) + explicit BlockTransactions(const BlockTransactionsRequest &req) : blockhash(req.blockhash), txn(req.indices.size()) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(blockhash); uint64_t txn_size = (uint64_t)txn.size(); READWRITE(COMPACTSIZE(txn_size)); if (ser_action.ForRead()) { size_t i = 0; while (txn.size() < txn_size) { txn.resize(std::min(uint64_t(1000 + txn.size()), txn_size)); for (; i < txn.size(); i++) { READWRITE(REF(TransactionCompressor(txn[i]))); } } } else { for (size_t i = 0; i < txn.size(); i++) { READWRITE(REF(TransactionCompressor(txn[i]))); } } } }; // Dumb serialization/storage-helper for CBlockHeaderAndShortTxIDs and // PartiallyDownloadedBlock struct PrefilledTransaction { // Used as an offset since last prefilled tx in CBlockHeaderAndShortTxIDs, // as a proper transaction-in-block-index in PartiallyDownloadedBlock uint32_t index; CTransactionRef tx; ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { uint64_t n = index; READWRITE(COMPACTSIZE(n)); if (n > std::numeric_limits::max()) { throw std::ios_base::failure("index overflowed 32-bits"); } index = n; READWRITE(REF(TransactionCompressor(tx))); } }; typedef enum ReadStatus_t { READ_STATUS_OK, // Invalid object, peer is sending bogus crap. // FIXME: differenciate bogus crap from crap that do not fit our policy. READ_STATUS_INVALID, // Failed to process object. READ_STATUS_FAILED, // Used only by FillBlock to indicate a failure in CheckBlock. READ_STATUS_CHECKBLOCK_FAILED, } ReadStatus; class CBlockHeaderAndShortTxIDs { private: mutable uint64_t shorttxidk0, shorttxidk1; uint64_t nonce; void FillShortTxIDSelector() const; friend class PartiallyDownloadedBlock; static const int SHORTTXIDS_LENGTH = 6; protected: std::vector shorttxids; std::vector prefilledtxn; public: CBlockHeader header; // Dummy for deserialization CBlockHeaderAndShortTxIDs() {} CBlockHeaderAndShortTxIDs(const CBlock &block); uint64_t GetShortID(const uint256 &txhash) const; size_t BlockTxCount() const { return shorttxids.size() + prefilledtxn.size(); } ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(header); READWRITE(nonce); uint64_t shorttxids_size = (uint64_t)shorttxids.size(); READWRITE(COMPACTSIZE(shorttxids_size)); if (ser_action.ForRead()) { size_t i = 0; while (shorttxids.size() < shorttxids_size) { shorttxids.resize(std::min(uint64_t(1000 + shorttxids.size()), shorttxids_size)); for (; i < shorttxids.size(); i++) { uint32_t lsb = 0; uint16_t msb = 0; READWRITE(lsb); READWRITE(msb); shorttxids[i] = (uint64_t(msb) << 32) | uint64_t(lsb); static_assert( SHORTTXIDS_LENGTH == 6, "shorttxids serialization assumes 6-byte shorttxids"); } } } else { for (uint64_t shortid : shorttxids) { uint32_t lsb = shortid & 0xffffffff; uint16_t msb = (shortid >> 32) & 0xffff; READWRITE(lsb); READWRITE(msb); } } READWRITE(prefilledtxn); if (ser_action.ForRead()) { FillShortTxIDSelector(); } } }; class PartiallyDownloadedBlock { protected: std::vector txns_available; size_t prefilled_count = 0, mempool_count = 0, extra_count = 0; CTxMemPool *pool; const Config *config; public: CBlockHeader header; PartiallyDownloadedBlock(const Config &configIn, CTxMemPool *poolIn) : pool(poolIn), config(&configIn) {} // extra_txn is a list of extra transactions to look at, in form. ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector> &extra_txn); bool IsTxAvailable(size_t index) const; ReadStatus FillBlock(CBlock &block, const std::vector &vtx_missing); }; #endif diff --git a/src/chain.h b/src/chain.h index af65387d4a..91f6c407ea 100644 --- a/src/chain.h +++ b/src/chain.h @@ -1,411 +1,411 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_CHAIN_H #define BITCOIN_CHAIN_H #include "arith_uint256.h" #include "blockstatus.h" #include "blockvalidity.h" #include "consensus/params.h" #include "diskblockpos.h" #include "pow.h" #include "primitives/block.h" #include "tinyformat.h" #include "uint256.h" #include #include /** * Maximum amount of time that a block timestamp is allowed to exceed the * current network-adjusted time before the block will be accepted. */ static const int64_t MAX_FUTURE_BLOCK_TIME = 2 * 60 * 60; /** * Timestamp window used as a grace period by code that compares external * timestamps (such as timestamps passed to RPCs, or wallet key creation times) * to block timestamps. This should be set at least as high as * MAX_FUTURE_BLOCK_TIME. */ static const int64_t TIMESTAMP_WINDOW = MAX_FUTURE_BLOCK_TIME; /** * The block chain is a tree shaped structure starting with the genesis block at * the root, with each block potentially having multiple candidates to be the * next block. A blockindex may have multiple pprev pointing to it, but at most * one of them can be part of the currently active branch. */ class CBlockIndex { public: //! pointer to the hash of the block, if any. Memory is owned by this //! CBlockIndex const uint256 *phashBlock; //! pointer to the index of the predecessor of this block CBlockIndex *pprev; //! pointer to the index of some further predecessor of this block CBlockIndex *pskip; //! height of the entry in the chain. The genesis block has height 0 int nHeight; //! Which # file this block is stored in (blk?????.dat) int nFile; //! Byte offset within blk?????.dat where this block's data is stored unsigned int nDataPos; //! Byte offset within rev?????.dat where this block's undo data is stored unsigned int nUndoPos; //! (memory only) Total amount of work (expected number of hashes) in the //! chain up to and including this block arith_uint256 nChainWork; //! Number of transactions in this block. //! Note: in a potential headers-first mode, this number cannot be relied //! upon unsigned int nTx; //! (memory only) Number of transactions in the chain up to and including //! this block. //! This value will be non-zero only if and only if transactions for this //! block and all its parents are available. Change to 64-bit type when //! necessary; won't happen before 2030 unsigned int nChainTx; //! Verification status of this block. See enum BlockStatus BlockStatus nStatus; //! block header int32_t nVersion; uint256 hashMerkleRoot; uint32_t nTime; uint32_t nBits; uint32_t nNonce; //! (memory only) Sequential id assigned to distinguish order in which //! blocks are received. int32_t nSequenceId; //! (memory only) block header metadata uint64_t nTimeReceived; //! (memory only) Maximum nTime in the chain upto and including this block. unsigned int nTimeMax; void SetNull() { phashBlock = nullptr; pprev = nullptr; pskip = nullptr; nHeight = 0; nFile = 0; nDataPos = 0; nUndoPos = 0; nChainWork = arith_uint256(); nTx = 0; nChainTx = 0; nStatus = BlockStatus(); nSequenceId = 0; nTimeMax = 0; nVersion = 0; hashMerkleRoot = uint256(); nTime = 0; nTimeReceived = 0; nBits = 0; nNonce = 0; } CBlockIndex() { SetNull(); } - CBlockIndex(const CBlockHeader &block) { + explicit CBlockIndex(const CBlockHeader &block) { SetNull(); nVersion = block.nVersion; hashMerkleRoot = block.hashMerkleRoot; nTime = block.nTime; nTimeReceived = 0; nBits = block.nBits; nNonce = block.nNonce; } CDiskBlockPos GetBlockPos() const { CDiskBlockPos ret; if (nStatus.hasData()) { ret.nFile = nFile; ret.nPos = nDataPos; } return ret; } CDiskBlockPos GetUndoPos() const { CDiskBlockPos ret; if (nStatus.hasUndo()) { ret.nFile = nFile; ret.nPos = nUndoPos; } return ret; } CBlockHeader GetBlockHeader() const { CBlockHeader block; block.nVersion = nVersion; if (pprev) { block.hashPrevBlock = pprev->GetBlockHash(); } block.hashMerkleRoot = hashMerkleRoot; block.nTime = nTime; block.nBits = nBits; block.nNonce = nNonce; return block; } uint256 GetBlockHash() const { return *phashBlock; } int64_t GetBlockTime() const { return int64_t(nTime); } int64_t GetBlockTimeMax() const { return int64_t(nTimeMax); } int64_t GetHeaderReceivedTime() const { return nTimeReceived; } int64_t GetReceivedTimeDiff() const { return GetHeaderReceivedTime() - GetBlockTime(); } enum { nMedianTimeSpan = 11 }; int64_t GetMedianTimePast() const { int64_t pmedian[nMedianTimeSpan]; int64_t *pbegin = &pmedian[nMedianTimeSpan]; int64_t *pend = &pmedian[nMedianTimeSpan]; const CBlockIndex *pindex = this; for (int i = 0; i < nMedianTimeSpan && pindex; i++, pindex = pindex->pprev) { *(--pbegin) = pindex->GetBlockTime(); } std::sort(pbegin, pend); return pbegin[(pend - pbegin) / 2]; } std::string ToString() const { return strprintf( "CBlockIndex(pprev=%p, nHeight=%d, merkle=%s, hashBlock=%s)", pprev, nHeight, hashMerkleRoot.ToString(), GetBlockHash().ToString()); } //! Check whether this block index entry is valid up to the passed validity //! level. bool IsValid(enum BlockValidity nUpTo = BlockValidity::TRANSACTIONS) const { return nStatus.isValid(nUpTo); } //! Raise the validity level of this block index entry. //! Returns true if the validity was changed. bool RaiseValidity(enum BlockValidity nUpTo) { // Only validity flags allowed. if (nStatus.isInvalid()) { return false; } if (nStatus.getValidity() >= nUpTo) { return false; } nStatus = nStatus.withValidity(nUpTo); return true; } //! Build the skiplist pointer for this entry. void BuildSkip(); //! Efficiently find an ancestor of this block. CBlockIndex *GetAncestor(int height); const CBlockIndex *GetAncestor(int height) const; }; /** * Maintain a map of CBlockIndex for all known headers. */ struct BlockHasher { size_t operator()(const uint256 &hash) const { return hash.GetCheapHash(); } }; typedef std::unordered_map BlockMap; extern BlockMap mapBlockIndex; arith_uint256 GetBlockProof(const CBlockIndex &block); /** * Return the time it would take to redo the work difference between from and * to, assuming the current hashrate corresponds to the difficulty at tip, in * seconds. */ int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params &); /** * Find the forking point between two chain tips. */ const CBlockIndex *LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb); /** * Check if two block index are on the same fork. */ bool AreOnTheSameFork(const CBlockIndex *pa, const CBlockIndex *pb); /** Used to marshal pointers into hashes for db storage. */ class CDiskBlockIndex : public CBlockIndex { public: uint256 hashPrev; CDiskBlockIndex() { hashPrev = uint256(); } explicit CDiskBlockIndex(const CBlockIndex *pindex) : CBlockIndex(*pindex) { hashPrev = (pprev ? pprev->GetBlockHash() : uint256()); } ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { int _nVersion = s.GetVersion(); if (!(s.GetType() & SER_GETHASH)) { READWRITE(VARINT(_nVersion)); } READWRITE(VARINT(nHeight)); READWRITE(nStatus); READWRITE(VARINT(nTx)); if (nStatus.hasData() || nStatus.hasUndo()) { READWRITE(VARINT(nFile)); } if (nStatus.hasData()) { READWRITE(VARINT(nDataPos)); } if (nStatus.hasUndo()) { READWRITE(VARINT(nUndoPos)); } // block header READWRITE(this->nVersion); READWRITE(hashPrev); READWRITE(hashMerkleRoot); READWRITE(nTime); READWRITE(nBits); READWRITE(nNonce); } uint256 GetBlockHash() const { CBlockHeader block; block.nVersion = nVersion; block.hashPrevBlock = hashPrev; block.hashMerkleRoot = hashMerkleRoot; block.nTime = nTime; block.nBits = nBits; block.nNonce = nNonce; return block.GetHash(); } std::string ToString() const { std::string str = "CDiskBlockIndex("; str += CBlockIndex::ToString(); str += strprintf("\n hashBlock=%s, hashPrev=%s)", GetBlockHash().ToString(), hashPrev.ToString()); return str; } }; /** * An in-memory indexed chain of blocks. */ class CChain { private: std::vector vChain; public: /** * Returns the index entry for the genesis block of this chain, or nullptr * if none. */ CBlockIndex *Genesis() const { return vChain.size() > 0 ? vChain[0] : nullptr; } /** * Returns the index entry for the tip of this chain, or nullptr if none. */ CBlockIndex *Tip() const { return vChain.size() > 0 ? vChain[vChain.size() - 1] : nullptr; } /** * Returns the index entry at a particular height in this chain, or nullptr * if no such height exists. */ CBlockIndex *operator[](int nHeight) const { if (nHeight < 0 || nHeight >= (int)vChain.size()) { return nullptr; } return vChain[nHeight]; } /** Compare two chains efficiently. */ friend bool operator==(const CChain &a, const CChain &b) { return a.vChain.size() == b.vChain.size() && a.vChain[a.vChain.size() - 1] == b.vChain[b.vChain.size() - 1]; } /** Efficiently check whether a block is present in this chain. */ bool Contains(const CBlockIndex *pindex) const { return (*this)[pindex->nHeight] == pindex; } /** * Find the successor of a block in this chain, or nullptr if the given * index is not found or is the tip. */ CBlockIndex *Next(const CBlockIndex *pindex) const { if (!Contains(pindex)) { return nullptr; } return (*this)[pindex->nHeight + 1]; } /** * Return the maximal height in the chain. Is equal to chain.Tip() ? * chain.Tip()->nHeight : -1. */ int Height() const { return vChain.size() - 1; } /** Set/initialize a chain with a given tip. */ void SetTip(CBlockIndex *pindex); /** * Return a CBlockLocator that refers to a block in this chain (by default * the tip). */ CBlockLocator GetLocator(const CBlockIndex *pindex = nullptr) const; /** * Find the last common block between this chain and a block index entry. */ const CBlockIndex *FindFork(const CBlockIndex *pindex) const; /** * Find the earliest block with timestamp equal or greater than the given. */ CBlockIndex *FindEarliestAtLeast(int64_t nTime) const; }; #endif // BITCOIN_CHAIN_H diff --git a/src/compressor.h b/src/compressor.h index e3531c6f35..80378ff960 100644 --- a/src/compressor.h +++ b/src/compressor.h @@ -1,120 +1,120 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_COMPRESSOR_H #define BITCOIN_COMPRESSOR_H #include "primitives/transaction.h" #include "script/script.h" #include "serialize.h" class CKeyID; class CPubKey; class CScriptID; /** * Compact serializer for scripts. * * It detects common cases and encodes them much more efficiently. * 3 special cases are defined: * * Pay to pubkey hash (encoded as 21 bytes) * * Pay to script hash (encoded as 21 bytes) * * Pay to pubkey starting with 0x02, 0x03 or 0x04 (encoded as 33 bytes) * * Other scripts up to 121 bytes require 1 byte + script length. Above that, * scripts up to 16505 bytes require 2 bytes + script length. */ class CScriptCompressor { private: /** * make this static for now (there are only 6 special scripts defined) this * can potentially be extended together with a new nVersion for * transactions, in which case this value becomes dependent on nVersion and * nHeight of the enclosing transaction. */ static const unsigned int nSpecialScripts = 6; CScript &script; protected: /** * These check for scripts for which a special case with a shorter encoding * is defined. They are implemented separately from the CScript test, as * these test for exact byte sequence correspondences, and are more strict. * For example, IsToPubKey also verifies whether the public key is valid (as * invalid ones cannot be represented in compressed form). */ bool IsToKeyID(CKeyID &hash) const; bool IsToScriptID(CScriptID &hash) const; bool IsToPubKey(CPubKey &pubkey) const; bool Compress(std::vector &out) const; unsigned int GetSpecialSize(unsigned int nSize) const; bool Decompress(unsigned int nSize, const std::vector &out); public: - CScriptCompressor(CScript &scriptIn) : script(scriptIn) {} + explicit CScriptCompressor(CScript &scriptIn) : script(scriptIn) {} template void Serialize(Stream &s) const { std::vector compr; if (Compress(compr)) { s << CFlatData(compr); return; } unsigned int nSize = script.size() + nSpecialScripts; s << VARINT(nSize); s << CFlatData(script); } template void Unserialize(Stream &s) { unsigned int nSize = 0; s >> VARINT(nSize); if (nSize < nSpecialScripts) { std::vector vch(GetSpecialSize(nSize), 0x00); s >> REF(CFlatData(vch)); Decompress(nSize, vch); return; } nSize -= nSpecialScripts; if (nSize > MAX_SCRIPT_SIZE) { // Overly long script, replace with a short invalid one script << OP_RETURN; s.ignore(nSize); } else { script.resize(nSize); s >> REF(CFlatData(script)); } } }; /** wrapper for CTxOut that provides a more compact serialization */ class CTxOutCompressor { private: CTxOut &txout; public: static uint64_t CompressAmount(Amount nAmount); static Amount DecompressAmount(uint64_t nAmount); - CTxOutCompressor(CTxOut &txoutIn) : txout(txoutIn) {} + explicit CTxOutCompressor(CTxOut &txoutIn) : txout(txoutIn) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { if (!ser_action.ForRead()) { uint64_t nVal = CompressAmount(txout.nValue); READWRITE(VARINT(nVal)); } else { uint64_t nVal = 0; READWRITE(VARINT(nVal)); txout.nValue = DecompressAmount(nVal); } CScriptCompressor cscript(REF(txout.scriptPubKey)); READWRITE(cscript); } }; #endif // BITCOIN_COMPRESSOR_H diff --git a/src/cuckoocache.h b/src/cuckoocache.h index 8106caa088..e538ca24ad 100644 --- a/src/cuckoocache.h +++ b/src/cuckoocache.h @@ -1,461 +1,461 @@ // Copyright (c) 2016 Jeremy Rubin // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef _BITCOIN_CUCKOOCACHE_H_ #define _BITCOIN_CUCKOOCACHE_H_ #include #include #include #include #include #include #include /** namespace CuckooCache provides high performance cache primitives * * Summary: * * 1) bit_packed_atomic_flags is bit-packed atomic flags for garbage collection * * 2) cache is a cache which is performant in memory usage and lookup speed. It * is lockfree for erase operations. Elements are lazily erased on the next * insert. */ namespace CuckooCache { /** * bit_packed_atomic_flags implements a container for garbage collection flags * that is only thread unsafe on calls to setup. This class bit-packs collection * flags for memory efficiency. * * All operations are std::memory_order_relaxed so external mechanisms must * ensure that writes and reads are properly synchronized. * * On setup(n), all bits up to n are marked as collected. * * Under the hood, because it is an 8-bit type, it makes sense to use a multiple * of 8 for setup, but it will be safe if that is not the case as well. */ class bit_packed_atomic_flags { std::unique_ptr[]> mem; public: /** No default constructor as there must be some size */ bit_packed_atomic_flags() = delete; /** * bit_packed_atomic_flags constructor creates memory to sufficiently keep * track of garbage collection information for size entries. * * @param size the number of elements to allocate space for * * @post bit_set, bit_unset, and bit_is_set function properly forall x. x < * size * @post All calls to bit_is_set (without subsequent bit_unset) will return * true. */ - bit_packed_atomic_flags(uint32_t size) { + explicit bit_packed_atomic_flags(uint32_t size) { // pad out the size if needed size = (size + 7) / 8; mem.reset(new std::atomic[size]); for (uint32_t i = 0; i < size; ++i) mem[i].store(0xFF); }; /** * setup marks all entries and ensures that bit_packed_atomic_flags can * store at least size entries * * @param b the number of elements to allocate space for * @post bit_set, bit_unset, and bit_is_set function properly forall x. x < * b * @post All calls to bit_is_set (without subsequent bit_unset) will return * true. */ inline void setup(uint32_t b) { bit_packed_atomic_flags d(b); std::swap(mem, d.mem); } /** * bit_set sets an entry as discardable. * * @param s the index of the entry to bit_set. * @post immediately subsequent call (assuming proper external memory * ordering) to bit_is_set(s) == true. */ inline void bit_set(uint32_t s) { mem[s >> 3].fetch_or(1 << (s & 7), std::memory_order_relaxed); } /** * bit_unset marks an entry as something that should not be overwritten * * @param s the index of the entry to bit_unset. * @post immediately subsequent call (assuming proper external memory * ordering) to bit_is_set(s) == false. */ inline void bit_unset(uint32_t s) { mem[s >> 3].fetch_and(~(1 << (s & 7)), std::memory_order_relaxed); } /** * bit_is_set queries the table for discardability at s * * @param s the index of the entry to read. * @returns if the bit at index s was set. * */ inline bool bit_is_set(uint32_t s) const { return (1 << (s & 7)) & mem[s >> 3].load(std::memory_order_relaxed); } }; /** * cache implements a cache with properties similar to a cuckoo-set * * The cache is able to hold up to (~(uint32_t)0) - 1 elements. * * Read Operations: * - contains(*, false) * * Read+Erase Operations: * - contains(*, true) * * Erase Operations: * - allow_erase() * * Write Operations: * - setup() * - setup_bytes() * - insert() * - please_keep() * * Synchronization Free Operations: * - invalid() * - compute_hashes() * * User Must Guarantee: * * 1) Write Requires synchronized access (e.g., a lock) * 2) Read Requires no concurrent Write, synchronized with the last insert. * 3) Erase requires no concurrent Write, synchronized with last insert. * 4) An Erase caller must release all memory before allowing a new Writer. * * * Note on function names: * - The name "allow_erase" is used because the real discard happens later. * - The name "please_keep" is used because elements may be erased anyways on * insert. * * @tparam Element should be a movable and copyable type * @tparam Hash should be a function/callable which takes a template parameter * hash_select and an Element and extracts a hash from it. Should return * high-entropy hashes for `Hash h; h<0>(e) ... h<7>(e)`. */ template class cache { private: /** table stores all the elements */ std::vector table; /** size stores the total available slots in the hash table */ uint32_t size; /** * The bit_packed_atomic_flags array is marked mutable because we want * garbage collection to be allowed to occur from const methods. */ mutable bit_packed_atomic_flags collection_flags; /** * epoch_flags tracks how recently an element was inserted into the cache. * true denotes recent, false denotes not-recent. See insert() method for * full semantics. */ mutable std::vector epoch_flags; /** * epoch_heuristic_counter is used to determine when a epoch might be aged & * an expensive scan should be done. epoch_heuristic_counter is decremented * on insert and reset to the new number of inserts which would cause the * epoch to reach epoch_size when it reaches zero. */ uint32_t epoch_heuristic_counter; /** * epoch_size is set to be the number of elements supposed to be in a epoch. * When the number of non-erased elements in a epoch exceeds epoch_size, a * new epoch should be started and all current entries demoted. epoch_size * is set to be 45% of size because we want to keep load around 90%, and we * support 3 epochs at once -- one "dead" which has been erased, one "dying" * which has been marked to be erased next, and one "living" which new * inserts add to. */ uint32_t epoch_size; /** * hash_mask should be set to appropriately mask out a hash such that every * masked hash is [0,size), eg, if floor(log2(size)) == 20, then hash_mask * should be (1<<20)-1 */ uint32_t hash_mask; /** * depth_limit determines how many elements insert should try to replace. * Should be set to log2(n) */ uint8_t depth_limit; /** * hash_function is a const instance of the hash function. It cannot be * static or initialized at call time as it may have internal state (such as * a nonce). */ const Hash hash_function; /** * compute_hashes is convenience for not having to write out this expression * everywhere we use the hash values of an Element. * * @param e the element whose hashes will be returned * @returns std::array of deterministic hashes derived from e */ inline std::array compute_hashes(const Element &e) const { return {{hash_function.template operator()<0>(e) & hash_mask, hash_function.template operator()<1>(e) & hash_mask, hash_function.template operator()<2>(e) & hash_mask, hash_function.template operator()<3>(e) & hash_mask, hash_function.template operator()<4>(e) & hash_mask, hash_function.template operator()<5>(e) & hash_mask, hash_function.template operator()<6>(e) & hash_mask, hash_function.template operator()<7>(e) & hash_mask}}; } /* end * @returns a constexpr index that can never be inserted to */ constexpr uint32_t invalid() const { return ~(uint32_t)0; } /** * allow_erase marks the element at index n as discardable. * Threadsafe without any concurrent insert. * @param n the index to allow erasure of */ inline void allow_erase(uint32_t n) const { collection_flags.bit_set(n); } /** * please_keep marks the element at index n as an entry that should be kept. * Threadsafe without any concurrent insert. * @param n the index to prioritize keeping */ inline void please_keep(uint32_t n) const { collection_flags.bit_unset(n); } /** * epoch_check handles the changing of epochs for elements stored in the * cache. epoch_check should be run before every insert. * * First, epoch_check decrements and checks the cheap heuristic, and then * does a more expensive scan if the cheap heuristic runs out. If the * expensive scan succeeds, the epochs are aged and old elements are * allow_erased. The cheap heuristic is reset to retrigger after the worst * case growth of the current epoch's elements would exceed the epoch_size. */ void epoch_check() { if (epoch_heuristic_counter != 0) { --epoch_heuristic_counter; return; } // count the number of elements from the latest epoch which have not // been erased. uint32_t epoch_unused_count = 0; for (uint32_t i = 0; i < size; ++i) epoch_unused_count += epoch_flags[i] && !collection_flags.bit_is_set(i); // If there are more non-deleted entries in the current epoch than the // epoch size, then allow_erase on all elements in the old epoch (marked // false) and move all elements in the current epoch to the old epoch // but do not call allow_erase on their indices. if (epoch_unused_count >= epoch_size) { for (uint32_t i = 0; i < size; ++i) if (epoch_flags[i]) epoch_flags[i] = false; else allow_erase(i); epoch_heuristic_counter = epoch_size; } else { // reset the epoch_heuristic_counter to next do a scan when worst // case behavior (no intermittent erases) would exceed epoch size, // with a reasonable minimum scan size. Ordinarily, we would have to // sanity check std::min(epoch_size, epoch_unused_count), but we // already know that `epoch_unused_count < epoch_size` in this // branch epoch_heuristic_counter = std::max( 1u, std::max(epoch_size / 16, epoch_size - epoch_unused_count)); } } public: /** * You must always construct a cache with some elements via a subsequent * call to setup or setup_bytes, otherwise operations may segfault. */ cache() : table(), size(), collection_flags(0), epoch_flags(), epoch_heuristic_counter(), epoch_size(), depth_limit(0), hash_function() {} /** * setup initializes the container to store no more than new_size elements. * setup rounds down to a power of two size. * * setup should only be called once. * * @param new_size the desired number of elements to store * @returns the maximum number of elements storable */ uint32_t setup(uint32_t new_size) { // depth_limit must be at least one otherwise errors can occur. depth_limit = static_cast( std::log2(static_cast(std::max((uint32_t)2, new_size)))); size = 1 << depth_limit; hash_mask = size - 1; table.resize(size); collection_flags.setup(size); epoch_flags.resize(size); // Set to 45% as described above epoch_size = std::max((uint32_t)1, (45 * size) / 100); // Initially set to wait for a whole epoch epoch_heuristic_counter = epoch_size; return size; } /** * setup_bytes is a convenience function which accounts for internal memory * usage when deciding how many elements to store. It isn't perfect because * it doesn't account for any overhead (struct size, MallocUsage, collection * and epoch flags). This was done to simplify selecting a power of two * size. In the expected use case, an extra two bits per entry should be * negligible compared to the size of the elements. * * @param bytes the approximate number of bytes to use for this data * structure. * @returns the maximum number of elements storable (see setup() * documentation for more detail) */ uint32_t setup_bytes(size_t bytes) { return setup(bytes / sizeof(Element)); } /** * insert loops at most depth_limit times trying to insert a hash at various * locations in the table via a variant of the Cuckoo Algorithm with eight * hash locations. * * It drops the last tried element if it runs out of depth before * encountering an open slot. * * Thus * * insert(x); * return contains(x, false); * * is not guaranteed to return true. * * @param e the element to insert * @post one of the following: All previously inserted elements and e are * now in the table, one previously inserted element is evicted from the * table, the entry attempted to be inserted is evicted. */ inline void insert(Element e) { epoch_check(); uint32_t last_loc = invalid(); bool last_epoch = true; std::array locs = compute_hashes(e); // Make sure we have not already inserted this element. // If we have, make sure that it does not get deleted. for (uint32_t loc : locs) if (table[loc] == e) { please_keep(loc); epoch_flags[loc] = last_epoch; return; } for (uint8_t depth = 0; depth < depth_limit; ++depth) { // First try to insert to an empty slot, if one exists for (uint32_t loc : locs) { if (!collection_flags.bit_is_set(loc)) continue; table[loc] = std::move(e); please_keep(loc); epoch_flags[loc] = last_epoch; return; } /** * Swap with the element at the location that was not the last one * looked at. Example: * * 1) On first iteration, last_loc == invalid(), find returns last, * so last_loc defaults to locs[0]. * 2) On further iterations, where last_loc == locs[k], last_loc * will go to locs[k+1 % 8], i.e., next of the 8 indices wrapping * around to 0 if needed. * * This prevents moving the element we just put in. * * The swap is not a move -- we must switch onto the evicted element * for the next iteration. */ last_loc = locs[(1 + (std::find(locs.begin(), locs.end(), last_loc) - locs.begin())) & 7]; std::swap(table[last_loc], e); // Can't std::swap a std::vector::reference and a bool&. bool epoch = last_epoch; last_epoch = epoch_flags[last_loc]; epoch_flags[last_loc] = epoch; // Recompute the locs -- unfortunately happens one too many times! locs = compute_hashes(e); } } /** * contains iterates through the hash locations for a given element and * checks to see if it is present. * * contains does not check garbage collected state (in other words, garbage * is only collected when the space is needed), so: * * insert(x); * if (contains(x, true)) * return contains(x, false); * else * return true; * * executed on a single thread will always return true! * * This is a great property for re-org performance for example. * * contains returns a bool set true if the element was found. * * @param e the element to check * @param erase * * @post if erase is true and the element is found, then the garbage collect * flag is set * @returns true if the element is found, false otherwise */ inline bool contains(const Element &e, const bool erase) const { std::array locs = compute_hashes(e); for (uint32_t loc : locs) { if (table[loc] == e) { if (erase) { allow_erase(loc); } return true; } } return false; } }; } // namespace CuckooCache #endif diff --git a/src/dbwrapper.h b/src/dbwrapper.h index 0a05735e56..848d6bcb9e 100644 --- a/src/dbwrapper.h +++ b/src/dbwrapper.h @@ -1,330 +1,331 @@ // Copyright (c) 2012-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_DBWRAPPER_H #define BITCOIN_DBWRAPPER_H #include "clientversion.h" #include "fs.h" #include "serialize.h" #include "streams.h" #include "util.h" #include "utilstrencodings.h" #include "version.h" #include #include static const size_t DBWRAPPER_PREALLOC_KEY_SIZE = 64; static const size_t DBWRAPPER_PREALLOC_VALUE_SIZE = 1024; class dbwrapper_error : public std::runtime_error { public: - dbwrapper_error(const std::string &msg) : std::runtime_error(msg) {} + explicit dbwrapper_error(const std::string &msg) + : std::runtime_error(msg) {} }; class CDBWrapper; /** * These should be considered an implementation detail of the specific database. */ namespace dbwrapper_private { /** * Handle database error by throwing dbwrapper_error exception. */ void HandleError(const leveldb::Status &status); /** * Work around circular dependency, as well as for testing in dbwrapper_tests. * Database obfuscation should be considered an implementation detail of the * specific database. */ const std::vector &GetObfuscateKey(const CDBWrapper &w); }; // namespace dbwrapper_private /** Batch of changes queued to be written to a CDBWrapper */ class CDBBatch { friend class CDBWrapper; private: const CDBWrapper &parent; leveldb::WriteBatch batch; CDataStream ssKey; CDataStream ssValue; size_t size_estimate; public: /** * @param[in] _parent CDBWrapper that this batch is to be submitted to */ - CDBBatch(const CDBWrapper &_parent) + explicit CDBBatch(const CDBWrapper &_parent) : parent(_parent), ssKey(SER_DISK, CLIENT_VERSION), ssValue(SER_DISK, CLIENT_VERSION), size_estimate(0){}; void Clear() { batch.Clear(); size_estimate = 0; } template void Write(const K &key, const V &value) { ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey << key; leveldb::Slice slKey(ssKey.data(), ssKey.size()); ssValue.reserve(DBWRAPPER_PREALLOC_VALUE_SIZE); ssValue << value; ssValue.Xor(dbwrapper_private::GetObfuscateKey(parent)); leveldb::Slice slValue(ssValue.data(), ssValue.size()); batch.Put(slKey, slValue); // LevelDB serializes writes as: // - byte: header // - varint: key length (1 byte up to 127B, 2 bytes up to 16383B, ...) // - byte[]: key // - varint: value length // - byte[]: value // The formula below assumes the key and value are both less than 16k. size_estimate += 3 + (slKey.size() > 127) + slKey.size() + (slValue.size() > 127) + slValue.size(); ssKey.clear(); ssValue.clear(); } template void Erase(const K &key) { ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey << key; leveldb::Slice slKey(ssKey.data(), ssKey.size()); batch.Delete(slKey); // LevelDB serializes erases as: // - byte: header // - varint: key length // - byte[]: key // The formula below assumes the key is less than 16kB. size_estimate += 2 + (slKey.size() > 127) + slKey.size(); ssKey.clear(); } size_t SizeEstimate() const { return size_estimate; } }; class CDBIterator { private: const CDBWrapper &parent; leveldb::Iterator *piter; public: /** * @param[in] _parent Parent CDBWrapper instance. * @param[in] _piter The original leveldb iterator. */ CDBIterator(const CDBWrapper &_parent, leveldb::Iterator *_piter) : parent(_parent), piter(_piter){}; ~CDBIterator(); bool Valid(); void SeekToFirst(); template void Seek(const K &key) { CDataStream ssKey(SER_DISK, CLIENT_VERSION); ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey << key; leveldb::Slice slKey(ssKey.data(), ssKey.size()); piter->Seek(slKey); } void Next(); template bool GetKey(K &key) { leveldb::Slice slKey = piter->key(); try { CDataStream ssKey(slKey.data(), slKey.data() + slKey.size(), SER_DISK, CLIENT_VERSION); ssKey >> key; } catch (const std::exception &) { return false; } return true; } template bool GetValue(V &value) { leveldb::Slice slValue = piter->value(); try { CDataStream ssValue(slValue.data(), slValue.data() + slValue.size(), SER_DISK, CLIENT_VERSION); ssValue.Xor(dbwrapper_private::GetObfuscateKey(parent)); ssValue >> value; } catch (const std::exception &) { return false; } return true; } unsigned int GetValueSize() { return piter->value().size(); } }; class CDBWrapper { friend const std::vector & dbwrapper_private::GetObfuscateKey(const CDBWrapper &w); private: //! custom environment this database is using (may be nullptr in case of //! default environment) leveldb::Env *penv; //! database options used leveldb::Options options; //! options used when reading from the database leveldb::ReadOptions readoptions; //! options used when iterating over values of the database leveldb::ReadOptions iteroptions; //! options used when writing to the database leveldb::WriteOptions writeoptions; //! options used when sync writing to the database leveldb::WriteOptions syncoptions; //! the database itself leveldb::DB *pdb; //! a key used for optional XOR-obfuscation of the database std::vector obfuscate_key; //! the key under which the obfuscation key is stored static const std::string OBFUSCATE_KEY_KEY; //! the length of the obfuscate key in number of bytes static const unsigned int OBFUSCATE_KEY_NUM_BYTES; std::vector CreateObfuscateKey() const; public: /** * @param[in] path Location in the filesystem where leveldb data will * be stored. * @param[in] nCacheSize Configures various leveldb cache settings. * @param[in] fMemory If true, use leveldb's memory environment. * @param[in] fWipe If true, remove all existing data. * @param[in] obfuscate If true, store data obfuscated via simple XOR. If * false, XOR * with a zero'd byte array. */ CDBWrapper(const fs::path &path, size_t nCacheSize, bool fMemory = false, bool fWipe = false, bool obfuscate = false); ~CDBWrapper(); template bool Read(const K &key, V &value) const { CDataStream ssKey(SER_DISK, CLIENT_VERSION); ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey << key; leveldb::Slice slKey(ssKey.data(), ssKey.size()); std::string strValue; leveldb::Status status = pdb->Get(readoptions, slKey, &strValue); if (!status.ok()) { if (status.IsNotFound()) return false; LogPrintf("LevelDB read failure: %s\n", status.ToString()); dbwrapper_private::HandleError(status); } try { CDataStream ssValue(strValue.data(), strValue.data() + strValue.size(), SER_DISK, CLIENT_VERSION); ssValue.Xor(obfuscate_key); ssValue >> value; } catch (const std::exception &) { return false; } return true; } template bool Write(const K &key, const V &value, bool fSync = false) { CDBBatch batch(*this); batch.Write(key, value); return WriteBatch(batch, fSync); } template bool Exists(const K &key) const { CDataStream ssKey(SER_DISK, CLIENT_VERSION); ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey << key; leveldb::Slice slKey(ssKey.data(), ssKey.size()); std::string strValue; leveldb::Status status = pdb->Get(readoptions, slKey, &strValue); if (!status.ok()) { if (status.IsNotFound()) return false; LogPrintf("LevelDB read failure: %s\n", status.ToString()); dbwrapper_private::HandleError(status); } return true; } template bool Erase(const K &key, bool fSync = false) { CDBBatch batch(*this); batch.Erase(key); return WriteBatch(batch, fSync); } bool WriteBatch(CDBBatch &batch, bool fSync = false); // not available for LevelDB; provide for compatibility with BDB bool Flush() { return true; } bool Sync() { CDBBatch batch(*this); return WriteBatch(batch, true); } CDBIterator *NewIterator() { return new CDBIterator(*this, pdb->NewIterator(iteroptions)); } /** * Return true if the database managed by this class contains no entries. */ bool IsEmpty(); template size_t EstimateSize(const K &key_begin, const K &key_end) const { CDataStream ssKey1(SER_DISK, CLIENT_VERSION), ssKey2(SER_DISK, CLIENT_VERSION); ssKey1.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey2.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey1 << key_begin; ssKey2 << key_end; leveldb::Slice slKey1(ssKey1.data(), ssKey1.size()); leveldb::Slice slKey2(ssKey2.data(), ssKey2.size()); uint64_t size = 0; leveldb::Range range(slKey1, slKey2); pdb->GetApproximateSizes(&range, 1, &size); return size; } /** * Compact a certain range of keys in the database. */ template void CompactRange(const K &key_begin, const K &key_end) const { CDataStream ssKey1(SER_DISK, CLIENT_VERSION), ssKey2(SER_DISK, CLIENT_VERSION); ssKey1.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey2.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey1 << key_begin; ssKey2 << key_end; leveldb::Slice slKey1(ssKey1.data(), ssKey1.size()); leveldb::Slice slKey2(ssKey2.data(), ssKey2.size()); pdb->CompactRange(&slKey1, &slKey2); } }; #endif // BITCOIN_DBWRAPPER_H diff --git a/src/hash.h b/src/hash.h index 6adec90b82..5f0af10c71 100644 --- a/src/hash.h +++ b/src/hash.h @@ -1,254 +1,254 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_HASH_H #define BITCOIN_HASH_H #include "crypto/ripemd160.h" #include "crypto/sha256.h" #include "prevector.h" #include "serialize.h" #include "uint256.h" #include "version.h" #include typedef uint256 ChainCode; /** A hasher class for Bitcoin's 256-bit hash (double SHA-256). */ class CHash256 { private: CSHA256 sha; public: static const size_t OUTPUT_SIZE = CSHA256::OUTPUT_SIZE; void Finalize(uint8_t hash[OUTPUT_SIZE]) { uint8_t buf[CSHA256::OUTPUT_SIZE]; sha.Finalize(buf); sha.Reset().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(hash); } CHash256 &Write(const uint8_t *data, size_t len) { sha.Write(data, len); return *this; } CHash256 &Reset() { sha.Reset(); return *this; } }; /** A hasher class for Bitcoin's 160-bit hash (SHA-256 + RIPEMD-160). */ class CHash160 { private: CSHA256 sha; public: static const size_t OUTPUT_SIZE = CRIPEMD160::OUTPUT_SIZE; void Finalize(uint8_t hash[OUTPUT_SIZE]) { uint8_t buf[CSHA256::OUTPUT_SIZE]; sha.Finalize(buf); CRIPEMD160().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(hash); } CHash160 &Write(const uint8_t *data, size_t len) { sha.Write(data, len); return *this; } CHash160 &Reset() { sha.Reset(); return *this; } }; /** Compute the 256-bit hash of an object. */ template inline uint256 Hash(const T1 pbegin, const T1 pend) { static const uint8_t pblank[1] = {}; uint256 result; CHash256() .Write(pbegin == pend ? pblank : (const uint8_t *)&pbegin[0], (pend - pbegin) * sizeof(pbegin[0])) .Finalize((uint8_t *)&result); return result; } /** Compute the 256-bit hash of the concatenation of two objects. */ template inline uint256 Hash(const T1 p1begin, const T1 p1end, const T2 p2begin, const T2 p2end) { static const uint8_t pblank[1] = {}; uint256 result; CHash256() .Write(p1begin == p1end ? pblank : (const uint8_t *)&p1begin[0], (p1end - p1begin) * sizeof(p1begin[0])) .Write(p2begin == p2end ? pblank : (const uint8_t *)&p2begin[0], (p2end - p2begin) * sizeof(p2begin[0])) .Finalize((uint8_t *)&result); return result; } /** Compute the 256-bit hash of the concatenation of three objects. */ template inline uint256 Hash(const T1 p1begin, const T1 p1end, const T2 p2begin, const T2 p2end, const T3 p3begin, const T3 p3end) { static const uint8_t pblank[1] = {}; uint256 result; CHash256() .Write(p1begin == p1end ? pblank : (const uint8_t *)&p1begin[0], (p1end - p1begin) * sizeof(p1begin[0])) .Write(p2begin == p2end ? pblank : (const uint8_t *)&p2begin[0], (p2end - p2begin) * sizeof(p2begin[0])) .Write(p3begin == p3end ? pblank : (const uint8_t *)&p3begin[0], (p3end - p3begin) * sizeof(p3begin[0])) .Finalize((uint8_t *)&result); return result; } /** Compute the 160-bit hash an object. */ template inline uint160 Hash160(const T1 pbegin, const T1 pend) { static uint8_t pblank[1] = {}; uint160 result; CHash160() .Write(pbegin == pend ? pblank : (const uint8_t *)&pbegin[0], (pend - pbegin) * sizeof(pbegin[0])) .Finalize((uint8_t *)&result); return result; } /** Compute the 160-bit hash of a vector. */ inline uint160 Hash160(const std::vector &vch) { return Hash160(vch.begin(), vch.end()); } /** Compute the 160-bit hash of a vector. */ template inline uint160 Hash160(const prevector &vch) { return Hash160(vch.begin(), vch.end()); } /** A writer stream (for serialization) that computes a 256-bit hash. */ class CHashWriter { private: CHash256 ctx; const int nType; const int nVersion; public: CHashWriter(int nTypeIn, int nVersionIn) : nType(nTypeIn), nVersion(nVersionIn) {} int GetType() const { return nType; } int GetVersion() const { return nVersion; } void write(const char *pch, size_t size) { ctx.Write((const uint8_t *)pch, size); } // invalidates the object uint256 GetHash() { uint256 result; ctx.Finalize((uint8_t *)&result); return result; } template CHashWriter &operator<<(const T &obj) { // Serialize to this stream ::Serialize(*this, obj); return (*this); } }; /** * Reads data from an underlying stream, while hashing the read data. */ template class CHashVerifier : public CHashWriter { private: Source *source; public: - CHashVerifier(Source *source_) + explicit CHashVerifier(Source *source_) : CHashWriter(source_->GetType(), source_->GetVersion()), source(source_) {} void read(char *pch, size_t nSize) { source->read(pch, nSize); this->write(pch, nSize); } void ignore(size_t nSize) { char data[1024]; while (nSize > 0) { size_t now = std::min(nSize, 1024); read(data, now); nSize -= now; } } template CHashVerifier &operator>>(T &obj) { // Unserialize from this stream ::Unserialize(*this, obj); return (*this); } }; /** Compute the 256-bit hash of an object's serialization. */ template uint256 SerializeHash(const T &obj, int nType = SER_GETHASH, int nVersion = PROTOCOL_VERSION) { CHashWriter ss(nType, nVersion); ss << obj; return ss.GetHash(); } uint32_t MurmurHash3(uint32_t nHashSeed, const std::vector &vDataToHash); void BIP32Hash(const ChainCode &chainCode, uint32_t nChild, uint8_t header, const uint8_t data[32], uint8_t output[64]); /** SipHash-2-4 */ class CSipHasher { private: uint64_t v[4]; uint64_t tmp; int count; public: /** Construct a SipHash calculator initialized with 128-bit key (k0, k1) */ CSipHasher(uint64_t k0, uint64_t k1); /** * Hash a 64-bit integer worth of data. * It is treated as if this was the little-endian interpretation of 8 bytes. * This function can only be used when a multiple of 8 bytes have been * written so far. */ CSipHasher &Write(uint64_t data); /** Hash arbitrary bytes. */ CSipHasher &Write(const uint8_t *data, size_t size); /** Compute the 64-bit SipHash-2-4 of the data written so far. The object * remains untouched. */ uint64_t Finalize() const; }; /** Optimized SipHash-2-4 implementation for uint256. * * It is identical to: * SipHasher(k0, k1) * .Write(val.GetUint64(0)) * .Write(val.GetUint64(1)) * .Write(val.GetUint64(2)) * .Write(val.GetUint64(3)) * .Finalize() */ uint64_t SipHashUint256(uint64_t k0, uint64_t k1, const uint256 &val); uint64_t SipHashUint256Extra(uint64_t k0, uint64_t k1, const uint256 &val, uint32_t extra); #endif // BITCOIN_HASH_H diff --git a/src/httprpc.cpp b/src/httprpc.cpp index 6be1713c1e..86ebd23bcb 100644 --- a/src/httprpc.cpp +++ b/src/httprpc.cpp @@ -1,413 +1,413 @@ // Copyright (c) 2015-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "httprpc.h" #include "base58.h" #include "chainparams.h" #include "config.h" #include "crypto/hmac_sha256.h" #include "httpserver.h" #include "random.h" #include "rpc/protocol.h" #include "rpc/server.h" #include "sync.h" #include "ui_interface.h" #include "util.h" #include "utilstrencodings.h" #include // boost::trim #include /** WWW-Authenticate to present with 401 Unauthorized response */ static const char *WWW_AUTH_HEADER_DATA = "Basic realm=\"jsonrpc\""; /** RPC auth failure delay to make brute-forcing expensive */ static const int64_t RPC_AUTH_BRUTE_FORCE_DELAY = 250; /** * Simple one-shot callback timer to be used by the RPC mechanism to e.g. * re-lock the wallet. */ class HTTPRPCTimer : public RPCTimerBase { public: HTTPRPCTimer(struct event_base *eventBase, std::function &func, int64_t millis) : ev(eventBase, false, func) { struct timeval tv; tv.tv_sec = millis / 1000; tv.tv_usec = (millis % 1000) * 1000; ev.trigger(&tv); } private: HTTPEvent ev; }; class HTTPRPCTimerInterface : public RPCTimerInterface { public: - HTTPRPCTimerInterface(struct event_base *_base) : base(_base) {} + explicit HTTPRPCTimerInterface(struct event_base *_base) : base(_base) {} const char *Name() override { return "HTTP"; } RPCTimerBase *NewTimer(std::function &func, int64_t millis) override { return new HTTPRPCTimer(base, func, millis); } private: struct event_base *base; }; /* Stored RPC timer interface (for unregistration) */ static HTTPRPCTimerInterface *httpRPCTimerInterface = nullptr; static void JSONErrorReply(HTTPRequest *req, const UniValue &objError, const UniValue &id) { // Send error reply from json-rpc error object. int nStatus = HTTP_INTERNAL_SERVER_ERROR; int code = find_value(objError, "code").get_int(); if (code == RPC_INVALID_REQUEST) nStatus = HTTP_BAD_REQUEST; else if (code == RPC_METHOD_NOT_FOUND) nStatus = HTTP_NOT_FOUND; std::string strReply = JSONRPCReply(NullUniValue, objError, id); req->WriteHeader("Content-Type", "application/json"); req->WriteReply(nStatus, strReply); } /* * This function checks username and password against -rpcauth entries from * config file. */ static bool multiUserAuthorized(std::string strUserPass) { if (strUserPass.find(":") == std::string::npos) { return false; } std::string strUser = strUserPass.substr(0, strUserPass.find(":")); std::string strPass = strUserPass.substr(strUserPass.find(":") + 1); for (const std::string &strRPCAuth : gArgs.GetArgs("-rpcauth")) { // Search for multi-user login/pass "rpcauth" from config std::vector vFields; boost::split(vFields, strRPCAuth, boost::is_any_of(":$")); if (vFields.size() != 3) { // Incorrect formatting in config file continue; } std::string strName = vFields[0]; if (!TimingResistantEqual(strName, strUser)) { continue; } std::string strSalt = vFields[1]; std::string strHash = vFields[2]; static const unsigned int KEY_SIZE = 32; uint8_t out[KEY_SIZE]; CHMAC_SHA256(reinterpret_cast(strSalt.c_str()), strSalt.size()) .Write(reinterpret_cast(strPass.c_str()), strPass.size()) .Finalize(out); std::vector hexvec(out, out + KEY_SIZE); std::string strHashFromPass = HexStr(hexvec); if (TimingResistantEqual(strHashFromPass, strHash)) { return true; } } return false; } static bool RPCAuthorized(Config &config, const std::string &strAuth, std::string &strAuthUsernameOut) { // Belt-and-suspenders measure if InitRPCAuthentication was not called. if (config.GetRPCUserAndPassword().empty()) { return false; } if (strAuth.substr(0, 6) != "Basic ") { return false; } std::string strUserPass64 = strAuth.substr(6); boost::trim(strUserPass64); std::string strUserPass = DecodeBase64(strUserPass64); if (strUserPass.find(":") != std::string::npos) { strAuthUsernameOut = strUserPass.substr(0, strUserPass.find(":")); } // Check if authorized under single-user field if (TimingResistantEqual(strUserPass, config.GetRPCUserAndPassword())) { return true; } return multiUserAuthorized(strUserPass); } static bool checkCORS(Config &config, HTTPRequest *req) { // https://www.w3.org/TR/cors/#resource-requests // 1. If the Origin header is not present terminate this set of steps. // The request is outside the scope of this specification. std::pair origin = req->GetHeader("origin"); if (!origin.first) { return false; } // 2. If the value of the Origin header is not a case-sensitive match for // any of the values in list of origins do not set any additional headers // and terminate this set of steps. // Note: Always matching is acceptable since the list of origins can be // unbounded. if (origin.second != config.GetRPCCORSDomain()) { return false; } if (req->GetRequestMethod() == HTTPRequest::OPTIONS) { // 6.2 Preflight Request // In response to a preflight request the resource indicates which // methods and headers (other than simple methods and simple // headers) it is willing to handle and whether it supports // credentials. // Resources must use the following set of steps to determine which // additional headers to use in the response: // 3. Let method be the value as result of parsing the // Access-Control-Request-Method header. // If there is no Access-Control-Request-Method header or if parsing // failed, do not set any additional headers and terminate this set // of steps. The request is outside the scope of this specification. std::pair method = req->GetHeader("access-control-request-method"); if (!method.first) { return false; } // 4. Let header field-names be the values as result of parsing // the Access-Control-Request-Headers headers. // If there are no Access-Control-Request-Headers headers let header // field-names be the empty list. // If parsing failed do not set any additional headers and terminate // this set of steps. The request is outside the scope of this // specification. std::pair header_field_names = req->GetHeader("access-control-request-headers"); // 5. If method is not a case-sensitive match for any of the // values in list of methods do not set any additional headers // and terminate this set of steps. // Note: Always matching is acceptable since the list of methods // can be unbounded. if (method.second != "POST") { return false; } // 6. If any of the header field-names is not a ASCII case- // insensitive match for any of the values in list of headers do not // set any additional headers and terminate this set of steps. // Note: Always matching is acceptable since the list of headers can // be unbounded. const std::string &list_of_headers = "authorization,content-type"; // 7. If the resource supports credentials add a single // Access-Control-Allow-Origin header, with the value of the Origin // header as value, and add a single // Access-Control-Allow-Credentials header with the case-sensitive // string "true" as value. req->WriteHeader("Access-Control-Allow-Origin", origin.second); req->WriteHeader("Access-Control-Allow-Credentials", "true"); // 8. Optionally add a single Access-Control-Max-Age header with as // value the amount of seconds the user agent is allowed to cache // the result of the request. // 9. If method is a simple method this step may be skipped. // Add one or more Access-Control-Allow-Methods headers consisting // of (a subset of) the list of methods. // If a method is a simple method it does not need to be listed, but // this is not prohibited. // Note: Since the list of methods can be unbounded, simply // returning the method indicated by // Access-Control-Request-Method (if supported) can be enough. req->WriteHeader("Access-Control-Allow-Methods", method.second); // 10. If each of the header field-names is a simple header and none // is Content-Type, this step may be skipped. // Add one or more Access-Control-Allow-Headers headers consisting // of (a subset of) the list of headers. req->WriteHeader("Access-Control-Allow-Headers", header_field_names.first ? header_field_names.second : list_of_headers); req->WriteReply(HTTP_OK); return true; } // 6.1 Simple Cross-Origin Request, Actual Request, and Redirects // In response to a simple cross-origin request or actual request the // resource indicates whether or not to share the response. // If the resource has been relocated, it indicates whether to share its // new URL. // Resources must use the following set of steps to determine which // additional headers to use in the response: // 3. If the resource supports credentials add a single // Access-Control-Allow-Origin header, with the value of the Origin // header as value, and add a single Access-Control-Allow-Credentials // header with the case-sensitive string "true" as value. req->WriteHeader("Access-Control-Allow-Origin", origin.second); req->WriteHeader("Access-Control-Allow-Credentials", "true"); // 4. If the list of exposed headers is not empty add one or more // Access-Control-Expose-Headers headers, with as values the header // field names given in the list of exposed headers. req->WriteHeader("Access-Control-Expose-Headers", "WWW-Authenticate"); return false; } bool HTTPRPCRequestProcessor::ProcessHTTPRequest(HTTPRequest *req) { // First, check and/or set CORS headers if (checkCORS(config, req)) { return true; } // JSONRPC handles only POST if (req->GetRequestMethod() != HTTPRequest::POST) { req->WriteReply(HTTP_BAD_METHOD, "JSONRPC server handles only POST requests"); return false; } // Check authorization std::pair authHeader = req->GetHeader("authorization"); if (!authHeader.first) { req->WriteHeader("WWW-Authenticate", WWW_AUTH_HEADER_DATA); req->WriteReply(HTTP_UNAUTHORIZED); return false; } JSONRPCRequest jreq; if (!RPCAuthorized(config, authHeader.second, jreq.authUser)) { LogPrintf("ThreadRPCServer incorrect password attempt from %s\n", req->GetPeer().ToString()); /** * Deter brute-forcing. * If this results in a DoS the user really shouldn't have their RPC * port exposed. */ MilliSleep(RPC_AUTH_BRUTE_FORCE_DELAY); req->WriteHeader("WWW-Authenticate", WWW_AUTH_HEADER_DATA); req->WriteReply(HTTP_UNAUTHORIZED); return false; } try { // Parse request UniValue valRequest; if (!valRequest.read(req->ReadBody())) throw JSONRPCError(RPC_PARSE_ERROR, "Parse error"); // Set the URI jreq.URI = req->GetURI(); std::string strReply; // singleton request if (valRequest.isObject()) { jreq.parse(valRequest); UniValue result = rpcServer.ExecuteCommand(config, jreq); // Send reply strReply = JSONRPCReply(result, NullUniValue, jreq.id); } else if (valRequest.isArray()) { // array of requests strReply = JSONRPCExecBatch(config, rpcServer, jreq, valRequest.get_array()); } else { throw JSONRPCError(RPC_PARSE_ERROR, "Top-level object parse error"); } req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strReply); } catch (const UniValue &objError) { JSONErrorReply(req, objError, jreq.id); return false; } catch (const std::exception &e) { JSONErrorReply(req, JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id); return false; } return true; } static bool InitRPCAuthentication(Config &config) { if (gArgs.GetArg("-rpcpassword", "") == "") { LogPrintf("No rpcpassword set - using random cookie authentication\n"); std::string generatedUserAndPassword; if (!GenerateAuthCookie(&generatedUserAndPassword)) { // Same message as AbortNode. uiInterface.ThreadSafeMessageBox( _("Error: A fatal internal error occurred, see debug.log for " "details"), "", CClientUIInterface::MSG_ERROR); return false; } config.SetRPCUserAndPassword(generatedUserAndPassword); } else { LogPrintf("Config options rpcuser and rpcpassword will soon be " "deprecated. Locally-run instances may remove rpcuser to use " "cookie-based auth, or may be replaced with rpcauth. Please " "see share/rpcuser for rpcauth auth generation.\n"); config.SetRPCUserAndPassword(gArgs.GetArg("-rpcuser", "") + ":" + gArgs.GetArg("-rpcpassword", "")); } config.SetRPCCORSDomain(gArgs.GetArg("-rpccorsdomain", "")); return true; } bool StartHTTPRPC(Config &config, HTTPRPCRequestProcessor &httpRPCRequestProcessor) { LogPrint(BCLog::RPC, "Starting HTTP RPC server\n"); if (!InitRPCAuthentication(config)) { return false; } const std::function &rpcFunction = std::bind(&HTTPRPCRequestProcessor::DelegateHTTPRequest, &httpRPCRequestProcessor, std::placeholders::_2); RegisterHTTPHandler("/", true, rpcFunction); #ifdef ENABLE_WALLET // ifdef can be removed once we switch to better endpoint support and API // versioning RegisterHTTPHandler("/wallet/", false, rpcFunction); #endif assert(EventBase()); httpRPCTimerInterface = new HTTPRPCTimerInterface(EventBase()); RPCSetTimerInterface(httpRPCTimerInterface); return true; } void InterruptHTTPRPC() { LogPrint(BCLog::RPC, "Interrupting HTTP RPC server\n"); } void StopHTTPRPC() { LogPrint(BCLog::RPC, "Stopping HTTP RPC server\n"); UnregisterHTTPHandler("/", true); if (httpRPCTimerInterface) { RPCUnsetTimerInterface(httpRPCTimerInterface); delete httpRPCTimerInterface; httpRPCTimerInterface = nullptr; } } diff --git a/src/httpserver.cpp b/src/httpserver.cpp index 412fadbd7d..7f09da48b2 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -1,669 +1,669 @@ // Copyright (c) 2015-2016 The Bitcoin Core developers // Copyright (c) 2018 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "httpserver.h" #include "chainparamsbase.h" #include "compat.h" #include "config.h" #include "netbase.h" #include "rpc/protocol.h" // For HTTP status codes #include "sync.h" #include "ui_interface.h" #include "util.h" #include "utilstrencodings.h" #include #include #include #include #include #include #include #include #include #ifdef EVENT__HAVE_NETINET_IN_H #include #ifdef _XOPEN_SOURCE_EXTENDED #include #endif #endif #include #include #include #include /** Maximum size of http request (request line + headers) */ static const size_t MAX_HEADERS_SIZE = 8192; /** * Maximum HTTP post body size. Twice the maximum block size is added to this * value in practice. */ static const size_t MIN_SUPPORTED_BODY_SIZE = 0x02000000; /** HTTP request work item */ class HTTPWorkItem final : public HTTPClosure { public: HTTPWorkItem(Config &_config, std::unique_ptr _req, const std::string &_path, const HTTPRequestHandler &_func) : req(std::move(_req)), path(_path), func(_func), config(&_config) {} void operator()() override { func(*config, req.get(), path); } std::unique_ptr req; private: std::string path; HTTPRequestHandler func; Config *config; }; /** * Simple work queue for distributing work over multiple threads. * Work items are simply callable objects. */ template class WorkQueue { private: /** Mutex protects entire object */ CWaitableCriticalSection cs; std::condition_variable cond; std::deque> queue; bool running; size_t maxDepth; int numThreads; /** RAII object to keep track of number of running worker threads */ class ThreadCounter { public: WorkQueue &wq; - ThreadCounter(WorkQueue &w) : wq(w) { + explicit ThreadCounter(WorkQueue &w) : wq(w) { std::lock_guard lock(wq.cs); wq.numThreads += 1; } ~ThreadCounter() { std::lock_guard lock(wq.cs); wq.numThreads -= 1; wq.cond.notify_all(); } }; public: - WorkQueue(size_t _maxDepth) + explicit WorkQueue(size_t _maxDepth) : running(true), maxDepth(_maxDepth), numThreads(0) {} /** Precondition: worker threads have all stopped (call WaitExit) */ ~WorkQueue() {} /** Enqueue a work item */ bool Enqueue(WorkItem *item) { LOCK(cs); if (queue.size() >= maxDepth) { return false; } queue.emplace_back(std::unique_ptr(item)); cond.notify_one(); return true; } /** Thread function */ void Run() { ThreadCounter count(*this); while (true) { std::unique_ptr i; { WAIT_LOCK(cs, lock); while (running && queue.empty()) cond.wait(lock); if (!running) break; i = std::move(queue.front()); queue.pop_front(); } (*i)(); } } /** Interrupt and exit loops */ void Interrupt() { LOCK(cs); running = false; cond.notify_all(); } /** Wait for worker threads to exit */ void WaitExit() { std::unique_lock lock(cs); while (numThreads > 0) cond.wait(lock); } }; struct HTTPPathHandler { HTTPPathHandler() {} HTTPPathHandler(std::string _prefix, bool _exactMatch, HTTPRequestHandler _handler) : prefix(_prefix), exactMatch(_exactMatch), handler(_handler) {} std::string prefix; bool exactMatch; HTTPRequestHandler handler; }; /** HTTP module state */ //! libevent event loop static struct event_base *eventBase = nullptr; //! HTTP server struct evhttp *eventHTTP = nullptr; //! List of subnets to allow RPC connections from static std::vector rpc_allow_subnets; //! Work queue for handling longer requests off the event loop thread static WorkQueue *workQueue = nullptr; //! Handlers for (sub)paths std::vector pathHandlers; //! Bound listening sockets std::vector boundSockets; /** Check if a network address is allowed to access the HTTP server */ static bool ClientAllowed(const CNetAddr &netaddr) { if (!netaddr.IsValid()) return false; for (const CSubNet &subnet : rpc_allow_subnets) if (subnet.Match(netaddr)) return true; return false; } /** Initialize ACL list for HTTP server */ static bool InitHTTPAllowList() { rpc_allow_subnets.clear(); CNetAddr localv4; CNetAddr localv6; LookupHost("127.0.0.1", localv4, false); LookupHost("::1", localv6, false); // always allow IPv4 local subnet. rpc_allow_subnets.push_back(CSubNet(localv4, 8)); // always allow IPv6 localhost. rpc_allow_subnets.push_back(CSubNet(localv6)); for (const std::string &strAllow : gArgs.GetArgs("-rpcallowip")) { CSubNet subnet; LookupSubNet(strAllow.c_str(), subnet); if (!subnet.IsValid()) { uiInterface.ThreadSafeMessageBox( strprintf("Invalid -rpcallowip subnet specification: %s. " "Valid are a single IP (e.g. 1.2.3.4), a " "network/netmask (e.g. 1.2.3.4/255.255.255.0) or a " "network/CIDR (e.g. 1.2.3.4/24).", strAllow), "", CClientUIInterface::MSG_ERROR); return false; } rpc_allow_subnets.push_back(subnet); } std::string strAllowed; for (const CSubNet &subnet : rpc_allow_subnets) { strAllowed += subnet.ToString() + " "; } LogPrint(BCLog::HTTP, "Allowing HTTP connections from: %s\n", strAllowed); return true; } /** HTTP request method as string - use for logging only */ static std::string RequestMethodString(HTTPRequest::RequestMethod m) { switch (m) { case HTTPRequest::GET: return "GET"; case HTTPRequest::POST: return "POST"; case HTTPRequest::HEAD: return "HEAD"; case HTTPRequest::PUT: return "PUT"; case HTTPRequest::OPTIONS: return "OPTIONS"; default: return "unknown"; } } /** HTTP request callback */ static void http_request_cb(struct evhttp_request *req, void *arg) { Config &config = *reinterpret_cast(arg); std::unique_ptr hreq(new HTTPRequest(req)); LogPrint(BCLog::HTTP, "Received a %s request for %s from %s\n", RequestMethodString(hreq->GetRequestMethod()), hreq->GetURI(), hreq->GetPeer().ToString()); // Early address-based allow check if (!ClientAllowed(hreq->GetPeer())) { hreq->WriteReply(HTTP_FORBIDDEN); return; } // Early reject unknown HTTP methods if (hreq->GetRequestMethod() == HTTPRequest::UNKNOWN) { hreq->WriteReply(HTTP_BADMETHOD); return; } // Find registered handler for prefix std::string strURI = hreq->GetURI(); std::string path; std::vector::const_iterator i = pathHandlers.begin(); std::vector::const_iterator iend = pathHandlers.end(); for (; i != iend; ++i) { bool match = false; if (i->exactMatch) { match = (strURI == i->prefix); } else { match = (strURI.substr(0, i->prefix.size()) == i->prefix); } if (match) { path = strURI.substr(i->prefix.size()); break; } } // Dispatch to worker thread. if (i != iend) { std::unique_ptr item( new HTTPWorkItem(config, std::move(hreq), path, i->handler)); assert(workQueue); if (workQueue->Enqueue(item.get())) { /* if true, queue took ownership */ item.release(); } else { LogPrintf("WARNING: request rejected because http work queue depth " "exceeded, it can be increased with the -rpcworkqueue= " "setting\n"); item->req->WriteReply(HTTP_INTERNAL, "Work queue depth exceeded"); } } else { hreq->WriteReply(HTTP_NOTFOUND); } } /** Callback to reject HTTP requests after shutdown. */ static void http_reject_request_cb(struct evhttp_request *req, void *) { LogPrint(BCLog::HTTP, "Rejecting request while shutting down\n"); evhttp_send_error(req, HTTP_SERVUNAVAIL, nullptr); } /** Event dispatcher thread */ static bool ThreadHTTP(struct event_base *base, struct evhttp *http) { RenameThread("bitcoin-http"); LogPrint(BCLog::HTTP, "Entering http event loop\n"); event_base_dispatch(base); // Event loop will be interrupted by InterruptHTTPServer() LogPrint(BCLog::HTTP, "Exited http event loop\n"); return event_base_got_break(base) == 0; } /** Bind HTTP server to specified addresses */ static bool HTTPBindAddresses(struct evhttp *http) { int defaultPort = gArgs.GetArg("-rpcport", BaseParams().RPCPort()); std::vector> endpoints; // Determine what addresses to bind to if (!gArgs.IsArgSet("-rpcallowip")) { // Default to loopback if not allowing external IPs. endpoints.push_back(std::make_pair("::1", defaultPort)); endpoints.push_back(std::make_pair("127.0.0.1", defaultPort)); if (gArgs.IsArgSet("-rpcbind")) { LogPrintf("WARNING: option -rpcbind was ignored because " "-rpcallowip was not specified, refusing to allow " "everyone to connect\n"); } } else if (gArgs.IsArgSet("-rpcbind")) { // Specific bind address. for (const std::string &strRPCBind : gArgs.GetArgs("-rpcbind")) { int port = defaultPort; std::string host; SplitHostPort(strRPCBind, port, host); endpoints.push_back(std::make_pair(host, port)); } } else { // No specific bind address specified, bind to any. endpoints.push_back(std::make_pair("::", defaultPort)); endpoints.push_back(std::make_pair("0.0.0.0", defaultPort)); } // Bind addresses for (std::vector>::iterator i = endpoints.begin(); i != endpoints.end(); ++i) { LogPrint(BCLog::HTTP, "Binding RPC on address %s port %i\n", i->first, i->second); evhttp_bound_socket *bind_handle = evhttp_bind_socket_with_handle( http, i->first.empty() ? nullptr : i->first.c_str(), i->second); if (bind_handle) { boundSockets.push_back(bind_handle); } else { LogPrintf("Binding RPC on address %s port %i failed.\n", i->first, i->second); } } return !boundSockets.empty(); } /** Simple wrapper to set thread name and run work queue */ static void HTTPWorkQueueRun(WorkQueue *queue) { RenameThread("bitcoin-httpworker"); queue->Run(); } /** libevent event log callback */ static void libevent_log_cb(int severity, const char *msg) { #ifndef EVENT_LOG_WARN // EVENT_LOG_WARN was added in 2.0.19; but before then _EVENT_LOG_WARN existed. #define EVENT_LOG_WARN _EVENT_LOG_WARN #endif // Log warn messages and higher without debug category. if (severity >= EVENT_LOG_WARN) { LogPrintf("libevent: %s\n", msg); } else { LogPrint(BCLog::LIBEVENT, "libevent: %s\n", msg); } } bool InitHTTPServer(Config &config) { struct evhttp *http = 0; struct event_base *base = 0; if (!InitHTTPAllowList()) return false; if (gArgs.GetBoolArg("-rpcssl", false)) { uiInterface.ThreadSafeMessageBox( "SSL mode for RPC (-rpcssl) is no longer supported.", "", CClientUIInterface::MSG_ERROR); return false; } // Redirect libevent's logging to our own log event_set_log_callback(&libevent_log_cb); #if LIBEVENT_VERSION_NUMBER >= 0x02010100 // If -debug=libevent, set full libevent debugging. // Otherwise, disable all libevent debugging. if (LogAcceptCategory(BCLog::LIBEVENT)) { event_enable_debug_logging(EVENT_DBG_ALL); } else { event_enable_debug_logging(EVENT_DBG_NONE); } #endif #ifdef WIN32 evthread_use_windows_threads(); #else evthread_use_pthreads(); #endif // XXX RAII: Create a new event_base for Libevent use base = event_base_new(); if (!base) { LogPrintf("Couldn't create an event_base: exiting\n"); return false; } // XXX RAII: Create a new evhttp object to handle requests http = evhttp_new(base); if (!http) { LogPrintf("couldn't create evhttp. Exiting.\n"); event_base_free(base); return false; } evhttp_set_timeout( http, gArgs.GetArg("-rpcservertimeout", DEFAULT_HTTP_SERVER_TIMEOUT)); evhttp_set_max_headers_size(http, MAX_HEADERS_SIZE); evhttp_set_max_body_size( http, MIN_SUPPORTED_BODY_SIZE + 2 * config.GetMaxBlockSize()); evhttp_set_gencb(http, http_request_cb, &config); // Only POST and OPTIONS are supported, but we return HTTP 405 for the // others evhttp_set_allowed_methods(http, EVHTTP_REQ_GET | EVHTTP_REQ_POST | EVHTTP_REQ_HEAD | EVHTTP_REQ_PUT | EVHTTP_REQ_DELETE | EVHTTP_REQ_OPTIONS); if (!HTTPBindAddresses(http)) { LogPrintf("Unable to bind any endpoint for RPC server\n"); evhttp_free(http); event_base_free(base); return false; } LogPrint(BCLog::HTTP, "Initialized HTTP server\n"); int workQueueDepth = std::max( (long)gArgs.GetArg("-rpcworkqueue", DEFAULT_HTTP_WORKQUEUE), 1L); LogPrintf("HTTP: creating work queue of depth %d\n", workQueueDepth); workQueue = new WorkQueue(workQueueDepth); eventBase = base; eventHTTP = http; return true; } std::thread threadHTTP; std::future threadResult; bool StartHTTPServer() { LogPrint(BCLog::HTTP, "Starting HTTP server\n"); int rpcThreads = std::max((long)gArgs.GetArg("-rpcthreads", DEFAULT_HTTP_THREADS), 1L); LogPrintf("HTTP: starting %d worker threads\n", rpcThreads); std::packaged_task task(ThreadHTTP); threadResult = task.get_future(); threadHTTP = std::thread(std::move(task), eventBase, eventHTTP); for (int i = 0; i < rpcThreads; i++) { std::thread rpc_worker(HTTPWorkQueueRun, workQueue); rpc_worker.detach(); } return true; } void InterruptHTTPServer() { LogPrint(BCLog::HTTP, "Interrupting HTTP server\n"); if (eventHTTP) { // Unlisten sockets for (evhttp_bound_socket *socket : boundSockets) { evhttp_del_accept_socket(eventHTTP, socket); } // Reject requests on current connections evhttp_set_gencb(eventHTTP, http_reject_request_cb, nullptr); } if (workQueue) workQueue->Interrupt(); } void StopHTTPServer() { LogPrint(BCLog::HTTP, "Stopping HTTP server\n"); if (workQueue) { LogPrint(BCLog::HTTP, "Waiting for HTTP worker threads to exit\n"); workQueue->WaitExit(); delete workQueue; } if (eventBase) { LogPrint(BCLog::HTTP, "Waiting for HTTP event thread to exit\n"); // Give event loop a few seconds to exit (to send back last RPC // responses), then break it. Before this was solved with // event_base_loopexit, but that didn't work as expected in at least // libevent 2.0.21 and always introduced a delay. In libevent master // that appears to be solved, so in the future that solution could be // used again (if desirable). // (see discussion in https://github.com/bitcoin/bitcoin/pull/6990) if (threadResult.valid() && threadResult.wait_for(std::chrono::milliseconds(2000)) == std::future_status::timeout) { LogPrintf("HTTP event loop did not exit within allotted time, " "sending loopbreak\n"); event_base_loopbreak(eventBase); } threadHTTP.join(); } if (eventHTTP) { evhttp_free(eventHTTP); eventHTTP = nullptr; } if (eventBase) { event_base_free(eventBase); eventBase = nullptr; } LogPrint(BCLog::HTTP, "Stopped HTTP server\n"); } struct event_base *EventBase() { return eventBase; } static void httpevent_callback_fn(evutil_socket_t, short, void *data) { // Static handler: simply call inner handler HTTPEvent *self = ((HTTPEvent *)data); self->handler(); if (self->deleteWhenTriggered) delete self; } HTTPEvent::HTTPEvent(struct event_base *base, bool _deleteWhenTriggered, const std::function &_handler) : deleteWhenTriggered(_deleteWhenTriggered), handler(_handler) { ev = event_new(base, -1, 0, httpevent_callback_fn, this); assert(ev); } HTTPEvent::~HTTPEvent() { event_free(ev); } void HTTPEvent::trigger(struct timeval *tv) { if (tv == nullptr) { // Immediately trigger event in main thread. event_active(ev, 0, 0); } else { // Trigger after timeval passed. evtimer_add(ev, tv); } } HTTPRequest::HTTPRequest(struct evhttp_request *_req) : req(_req), replySent(false) {} HTTPRequest::~HTTPRequest() { if (!replySent) { // Keep track of whether reply was sent to avoid request leaks LogPrintf("%s: Unhandled request\n", __func__); WriteReply(HTTP_INTERNAL, "Unhandled request"); } // evhttpd cleans up the request, as long as a reply was sent. } std::pair HTTPRequest::GetHeader(const std::string &hdr) { const struct evkeyvalq *headers = evhttp_request_get_input_headers(req); assert(headers); const char *val = evhttp_find_header(headers, hdr.c_str()); if (val) return std::make_pair(true, val); else return std::make_pair(false, ""); } std::string HTTPRequest::ReadBody() { struct evbuffer *buf = evhttp_request_get_input_buffer(req); if (!buf) return ""; size_t size = evbuffer_get_length(buf); /** * Trivial implementation: if this is ever a performance bottleneck, * internal copying can be avoided in multi-segment buffers by using * evbuffer_peek and an awkward loop. Though in that case, it'd be even * better to not copy into an intermediate string but use a stream * abstraction to consume the evbuffer on the fly in the parsing algorithm. */ const char *data = (const char *)evbuffer_pullup(buf, size); // returns nullptr in case of empty buffer. if (!data) { return ""; } std::string rv(data, size); evbuffer_drain(buf, size); return rv; } void HTTPRequest::WriteHeader(const std::string &hdr, const std::string &value) { struct evkeyvalq *headers = evhttp_request_get_output_headers(req); assert(headers); evhttp_add_header(headers, hdr.c_str(), value.c_str()); } /** * Closure sent to main thread to request a reply to be sent to a HTTP request. * Replies must be sent in the main loop in the main http thread, this cannot be * done from worker threads. */ void HTTPRequest::WriteReply(int nStatus, const std::string &strReply) { assert(!replySent && req); // Send event to main http thread to send reply message struct evbuffer *evb = evhttp_request_get_output_buffer(req); assert(evb); evbuffer_add(evb, strReply.data(), strReply.size()); HTTPEvent *ev = new HTTPEvent(eventBase, true, std::bind(evhttp_send_reply, req, nStatus, (const char *)nullptr, (struct evbuffer *)nullptr)); ev->trigger(nullptr); replySent = true; // transferred back to main thread. req = nullptr; } CService HTTPRequest::GetPeer() { evhttp_connection *con = evhttp_request_get_connection(req); CService peer; if (con) { // evhttp retains ownership over returned address string const char *address = ""; uint16_t port = 0; evhttp_connection_get_peer(con, (char **)&address, &port); peer = LookupNumeric(address, port); } return peer; } std::string HTTPRequest::GetURI() { return evhttp_request_get_uri(req); } HTTPRequest::RequestMethod HTTPRequest::GetRequestMethod() { switch (evhttp_request_get_command(req)) { case EVHTTP_REQ_GET: return GET; case EVHTTP_REQ_POST: return POST; case EVHTTP_REQ_HEAD: return HEAD; case EVHTTP_REQ_PUT: return PUT; case EVHTTP_REQ_OPTIONS: return OPTIONS; default: return UNKNOWN; } } void RegisterHTTPHandler(const std::string &prefix, bool exactMatch, const HTTPRequestHandler &handler) { LogPrint(BCLog::HTTP, "Registering HTTP handler for %s (exactmatch %d)\n", prefix, exactMatch); pathHandlers.push_back(HTTPPathHandler(prefix, exactMatch, handler)); } void UnregisterHTTPHandler(const std::string &prefix, bool exactMatch) { std::vector::iterator i = pathHandlers.begin(); std::vector::iterator iend = pathHandlers.end(); for (; i != iend; ++i) if (i->prefix == prefix && i->exactMatch == exactMatch) break; if (i != iend) { LogPrint(BCLog::HTTP, "Unregistering HTTP handler for %s (exactmatch %d)\n", prefix, exactMatch); pathHandlers.erase(i); } } diff --git a/src/httpserver.h b/src/httpserver.h index a03c21ad27..9d37ac12bd 100644 --- a/src/httpserver.h +++ b/src/httpserver.h @@ -1,159 +1,159 @@ // Copyright (c) 2015-2016 The Bitcoin Core developers // Copyright (c) 2018 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_HTTPSERVER_H #define BITCOIN_HTTPSERVER_H #include #include #include static const int DEFAULT_HTTP_THREADS = 4; static const int DEFAULT_HTTP_WORKQUEUE = 16; static const int DEFAULT_HTTP_SERVER_TIMEOUT = 30; struct evhttp_request; struct event_base; class Config; class CService; class HTTPRequest; /** * Initialize HTTP server. * Call this before RegisterHTTPHandler or EventBase(). */ bool InitHTTPServer(Config &config); /** * Start HTTP server. * This is separate from InitHTTPServer to give users race-condition-free time * to register their handlers between InitHTTPServer and StartHTTPServer. */ bool StartHTTPServer(); /** Interrupt HTTP server threads */ void InterruptHTTPServer(); /** Stop HTTP server */ void StopHTTPServer(); /** Handler for requests to a certain HTTP path */ typedef std::function HTTPRequestHandler; /** * Register handler for prefix. * If multiple handlers match a prefix, the first-registered one will * be invoked. */ void RegisterHTTPHandler(const std::string &prefix, bool exactMatch, const HTTPRequestHandler &handler); /** Unregister handler for prefix */ void UnregisterHTTPHandler(const std::string &prefix, bool exactMatch); /** * Return evhttp event base. This can be used by submodules to * queue timers or custom events. */ struct event_base *EventBase(); /** * In-flight HTTP request. * Thin C++ wrapper around evhttp_request. */ class HTTPRequest { private: struct evhttp_request *req; bool replySent; public: - HTTPRequest(struct evhttp_request *req); + explicit HTTPRequest(struct evhttp_request *req); ~HTTPRequest(); enum RequestMethod { UNKNOWN, GET, POST, HEAD, PUT, OPTIONS }; /** Get requested URI */ std::string GetURI(); /** Get CService (address:ip) for the origin of the http request */ CService GetPeer(); /** Get request method */ RequestMethod GetRequestMethod(); /** * Get the request header specified by hdr, or an empty string. * Return an pair (isPresent,string). */ std::pair GetHeader(const std::string &hdr); /** * Read request body. * * @note As this consumes the underlying buffer, call this only once. * Repeated calls will return an empty string. */ std::string ReadBody(); /** * Write output header. * * @note call this before calling WriteErrorReply or Reply. */ void WriteHeader(const std::string &hdr, const std::string &value); /** * Write HTTP reply. * nStatus is the HTTP status code to send. * strReply is the body of the reply. Keep it empty to send a standard * message. * * @note Can be called only once. As this will give the request back to the * main thread, do not call any other HTTPRequest methods after calling * this. */ void WriteReply(int nStatus, const std::string &strReply = ""); }; /** Event handler closure */ class HTTPClosure { public: virtual void operator()() = 0; virtual ~HTTPClosure() {} }; /** * Event class. This can be used either as an cross-thread trigger or as a * timer. */ class HTTPEvent { public: /** * Create a new event. * deleteWhenTriggered deletes this event object after the event is * triggered (and the handler called) * handler is the handler to call when the event is triggered. */ HTTPEvent(struct event_base *base, bool deleteWhenTriggered, const std::function &handler); ~HTTPEvent(); /** * Trigger the event. If tv is 0, trigger it immediately. Otherwise trigger * it after the given time has elapsed. */ void trigger(struct timeval *tv); bool deleteWhenTriggered; std::function handler; private: struct event *ev; }; #endif // BITCOIN_HTTPSERVER_H diff --git a/src/init.cpp b/src/init.cpp index 2b2179cbb9..2e06c456ac 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1,2332 +1,2333 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "init.h" #include "addrman.h" #include "amount.h" #include "chain.h" #include "chainparams.h" #include "checkpoints.h" #include "compat/sanity.h" #include "config.h" #include "consensus/validation.h" #include "diskblockpos.h" #include "fs.h" #include "httprpc.h" #include "httpserver.h" #include "key.h" #include "miner.h" #include "net.h" #include "net_processing.h" #include "netbase.h" #include "policy/policy.h" #include "rpc/register.h" #include "rpc/safemode.h" #include "rpc/server.h" #include "scheduler.h" #include "script/scriptcache.h" #include "script/sigcache.h" #include "script/standard.h" #include "timedata.h" #include "torcontrol.h" #include "txdb.h" #include "txmempool.h" #include "ui_interface.h" #include "util.h" #include "utilmoneystr.h" #include "validation.h" #include "validationinterface.h" #ifdef ENABLE_WALLET #include "wallet/rpcdump.h" #endif #include "walletinitinterface.h" #include "warnings.h" #include #include #include #ifndef WIN32 #include #endif #include #include #include #include #include #include #if ENABLE_ZMQ #include "zmq/zmqnotificationinterface.h" #endif bool fFeeEstimatesInitialized = false; static const bool DEFAULT_PROXYRANDOMIZE = true; static const bool DEFAULT_REST_ENABLE = false; static const bool DEFAULT_STOPAFTERBLOCKIMPORT = false; std::unique_ptr g_connman; std::unique_ptr peerLogic; std::unique_ptr g_wallet_init_interface; #if ENABLE_ZMQ static CZMQNotificationInterface *pzmqNotificationInterface = nullptr; #endif #ifdef WIN32 // Win32 LevelDB doesn't use filedescriptors, and the ones used for accessing // block files don't count towards the fd_set size limit anyway. #define MIN_CORE_FILEDESCRIPTORS 0 #else #define MIN_CORE_FILEDESCRIPTORS 150 #endif static const char *FEE_ESTIMATES_FILENAME = "fee_estimates.dat"; ////////////////////////////////////////////////////////////////////////////// // // Shutdown // // // Thread management and startup/shutdown: // // The network-processing threads are all part of a thread group created by // AppInit() or the Qt main() function. // // A clean exit happens when StartShutdown() or the SIGTERM signal handler sets // fRequestShutdown, which triggers the DetectShutdownThread(), which interrupts // the main thread group. DetectShutdownThread() then exits, which causes // AppInit() to continue (it .joins the shutdown thread). Shutdown() is then // called to clean up database connections, and stop other threads that should // only be stopped after the main network-processing threads have exited. // // Note that if running -daemon the parent process returns from AppInit2 before // adding any threads to the threadGroup, so .join_all() returns immediately and // the parent exits from main(). // // Shutdown for Qt is very similar, only it uses a QTimer to detect // fRequestShutdown getting set, and then does the normal Qt shutdown thing. // std::atomic fRequestShutdown(false); std::atomic fDumpMempoolLater(false); void StartShutdown() { fRequestShutdown = true; } bool ShutdownRequested() { return fRequestShutdown; } /** * This is a minimally invasive approach to shutdown on LevelDB read errors from * the chainstate, while keeping user interface out of the common library, which * is shared between bitcoind, and bitcoin-qt and non-server tools. */ class CCoinsViewErrorCatcher final : public CCoinsViewBacked { public: - CCoinsViewErrorCatcher(CCoinsView *view) : CCoinsViewBacked(view) {} + explicit CCoinsViewErrorCatcher(CCoinsView *view) + : CCoinsViewBacked(view) {} bool GetCoin(const COutPoint &outpoint, Coin &coin) const override { try { return CCoinsViewBacked::GetCoin(outpoint, coin); } catch (const std::runtime_error &e) { uiInterface.ThreadSafeMessageBox( _("Error reading from database, shutting down."), "", CClientUIInterface::MSG_ERROR); LogPrintf("Error reading from database: %s\n", e.what()); // Starting the shutdown sequence and returning false to the caller // would be interpreted as 'entry not found' (as opposed to unable // to read data), and could lead to invalid interpretation. Just // exit immediately, as we can't continue anyway, and all writes // should be atomic. abort(); } } // Writes do not need similar protection, as failure to write is handled by // the caller. }; static std::unique_ptr pcoinscatcher; static std::unique_ptr globalVerifyHandle; void Interrupt(boost::thread_group &threadGroup) { InterruptHTTPServer(); InterruptHTTPRPC(); InterruptRPC(); InterruptREST(); InterruptTorControl(); InterruptMapPort(); if (g_connman) { g_connman->Interrupt(); } threadGroup.interrupt_all(); } void Shutdown() { LogPrintf("%s: In progress...\n", __func__); static CCriticalSection cs_Shutdown; TRY_LOCK(cs_Shutdown, lockShutdown); if (!lockShutdown) { return; } /// Note: Shutdown() must be able to handle cases in which AppInit2() failed /// part of the way, for example if the data directory was found to be /// locked. Be sure that anything that writes files or flushes caches only /// does this if the respective module was initialized. RenameThread("bitcoin-shutoff"); g_mempool.AddTransactionsUpdated(1); StopHTTPRPC(); StopREST(); StopRPC(); StopHTTPServer(); g_wallet_init_interface->Flush(); StopMapPort(); // Because these depend on each-other, we make sure that neither can be // using the other before destroying them. UnregisterValidationInterface(peerLogic.get()); g_connman->Stop(); peerLogic.reset(); g_connman.reset(); StopTorControl(); if (fDumpMempoolLater && gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { DumpMempool(); } if (fFeeEstimatesInitialized) { fs::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME; CAutoFile est_fileout(fsbridge::fopen(est_path, "wb"), SER_DISK, CLIENT_VERSION); if (!est_fileout.IsNull()) { g_mempool.WriteFeeEstimates(est_fileout); } else { LogPrintf("%s: Failed to write fee estimates to %s\n", __func__, est_path.string()); } fFeeEstimatesInitialized = false; } // FlushStateToDisk generates a SetBestChain callback, which we should avoid // missing if (pcoinsTip != nullptr) { FlushStateToDisk(); } // After there are no more peers/RPC left to give us new data which may // generate CValidationInterface callbacks, flush them... GetMainSignals().FlushBackgroundCallbacks(); // Any future callbacks will be dropped. This should absolutely be safe - if // missing a callback results in an unrecoverable situation, unclean // shutdown would too. The only reason to do the above flushes is to let the // wallet catch up with our current chain to avoid any strange pruning edge // cases and make next startup faster by avoiding rescan. { LOCK(cs_main); if (pcoinsTip != nullptr) { FlushStateToDisk(); } pcoinsTip.reset(); pcoinscatcher.reset(); pcoinsdbview.reset(); pblocktree.reset(); } g_wallet_init_interface->Stop(); #if ENABLE_ZMQ if (pzmqNotificationInterface) { UnregisterValidationInterface(pzmqNotificationInterface); delete pzmqNotificationInterface; pzmqNotificationInterface = nullptr; } #endif #ifndef WIN32 try { fs::remove(GetPidFile()); } catch (const fs::filesystem_error &e) { LogPrintf("%s: Unable to remove pidfile: %s\n", __func__, e.what()); } #endif UnregisterAllValidationInterfaces(); GetMainSignals().UnregisterBackgroundSignalScheduler(); g_wallet_init_interface->Close(); g_wallet_init_interface.reset(); globalVerifyHandle.reset(); ECC_Stop(); LogPrintf("%s: done\n", __func__); } /** * Signal handlers are very limited in what they are allowed to do, so: */ void HandleSIGTERM(int) { fRequestShutdown = true; } void HandleSIGHUP(int) { GetLogger().m_reopen_file = true; } void OnRPCStarted() { uiInterface.NotifyBlockTip.connect(&RPCNotifyBlockChange); } void OnRPCStopped() { uiInterface.NotifyBlockTip.disconnect(&RPCNotifyBlockChange); RPCNotifyBlockChange(false, nullptr); g_best_block_cv.notify_all(); LogPrint(BCLog::RPC, "RPC stopped.\n"); } std::string HelpMessage(HelpMessageMode mode) { const auto defaultBaseParams = CreateBaseChainParams(CBaseChainParams::MAIN); const auto testnetBaseParams = CreateBaseChainParams(CBaseChainParams::TESTNET); const auto defaultChainParams = CreateChainParams(CBaseChainParams::MAIN); const auto testnetChainParams = CreateChainParams(CBaseChainParams::TESTNET); const bool showDebug = gArgs.GetBoolArg("-help-debug", false); // When adding new options to the categories, please keep and ensure // alphabetical ordering. Do not translate _(...) -help-debug options, Many // technical terms, and only a very small audience, so is unnecessary stress // to translators. std::string strUsage = HelpMessageGroup(_("Options:")); strUsage += HelpMessageOpt("-?", _("Print this help message and exit")); strUsage += HelpMessageOpt("-version", _("Print version and exit")); strUsage += HelpMessageOpt( "-alertnotify=", _("Execute command when a relevant alert is received or we see a " "really long fork (%s in cmd is replaced by message)")); strUsage += HelpMessageOpt("-blocknotify=", _("Execute command when the best block changes " "(%s in cmd is replaced by block hash)")); if (showDebug) { strUsage += HelpMessageOpt( "-blocksonly", strprintf( _("Whether to operate in a blocks only mode (default: %d)"), DEFAULT_BLOCKSONLY)); } strUsage += HelpMessageOpt( "-assumevalid=", strprintf( _("If this block is in the chain assume that it and its ancestors " "are valid and potentially skip their script verification (0 to " "verify all, default: %s, testnet: %s)"), defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(), testnetChainParams->GetConsensus().defaultAssumeValid.GetHex())); strUsage += HelpMessageOpt( "-conf=", strprintf(_("Specify configuration file (default: %s)"), BITCOIN_CONF_FILENAME)); if (mode == HMM_BITCOIND) { #if HAVE_DECL_DAEMON strUsage += HelpMessageOpt( "-daemon", _("Run in the background as a daemon and accept commands")); #endif } strUsage += HelpMessageOpt("-datadir=", _("Specify data directory")); if (showDebug) { strUsage += HelpMessageOpt( "-dbbatchsize", strprintf( "Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize)); } strUsage += HelpMessageOpt( "-dbcache=", strprintf( _("Set database cache size in megabytes (%d to %d, default: %d)"), nMinDbCache, nMaxDbCache, nDefaultDbCache)); if (showDebug) { strUsage += HelpMessageOpt( "-feefilter", strprintf("Tell other nodes to filter invs to us by " "our mempool min fee (default: %d)", DEFAULT_FEEFILTER)); } strUsage += HelpMessageOpt( "-loadblock=", _("Imports blocks from external blk000??.dat file on startup")); strUsage += HelpMessageOpt( "-maxorphantx=", strprintf(_("Keep at most unconnectable " "transactions in memory (default: %u)"), DEFAULT_MAX_ORPHAN_TRANSACTIONS)); strUsage += HelpMessageOpt("-maxmempool=", strprintf(_("Keep the transaction memory pool " "below megabytes (default: %u)"), DEFAULT_MAX_MEMPOOL_SIZE)); strUsage += HelpMessageOpt("-mempoolexpiry=", strprintf(_("Do not keep transactions in the mempool " "longer than hours (default: %u)"), DEFAULT_MEMPOOL_EXPIRY)); if (showDebug) { strUsage += HelpMessageOpt( "-minimumchainwork=", strprintf( "Minimum work assumed to exist on a valid chain in hex " "(default: %s, testnet: %s)", defaultChainParams->GetConsensus().nMinimumChainWork.GetHex(), testnetChainParams->GetConsensus().nMinimumChainWork.GetHex())); } strUsage += HelpMessageOpt("-persistmempool", strprintf(_("Whether to save the mempool on shutdown " "and load on restart (default: %u)"), DEFAULT_PERSIST_MEMPOOL)); strUsage += HelpMessageOpt( "-blockreconstructionextratxn=", strprintf(_("Extra transactions to keep in memory for compact block " "reconstructions (default: %u)"), DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN)); strUsage += HelpMessageOpt( "-par=", strprintf(_("Set the number of script verification threads (%u to %d, " "0 = auto, <0 = leave that many cores free, default: %d)"), -GetNumCores(), MAX_SCRIPTCHECK_THREADS, DEFAULT_SCRIPTCHECK_THREADS)); #ifndef WIN32 strUsage += HelpMessageOpt( "-pid=", strprintf(_("Specify pid file (default: %s)"), BITCOIN_PID_FILENAME)); #endif strUsage += HelpMessageOpt( "-prune=", strprintf( _("Reduce storage requirements by enabling pruning (deleting) of " "old blocks. This allows the pruneblockchain RPC to be called to " "delete specific blocks, and enables automatic pruning of old " "blocks if a target size in MiB is provided. This mode is " "incompatible with -txindex and -rescan. " "Warning: Reverting this setting requires re-downloading the " "entire blockchain. " "(default: 0 = disable pruning blocks, 1 = allow manual pruning " "via RPC, >%u = automatically prune block files to stay under " "the specified target size in MiB)"), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024)); strUsage += HelpMessageOpt( "-reindex-chainstate", _("Rebuild chain state from the currently indexed blocks")); strUsage += HelpMessageOpt("-reindex", _("Rebuild chain state and block index from " "the blk*.dat files on disk")); #ifndef WIN32 strUsage += HelpMessageOpt( "-sysperms", _("Create new files with system default permissions, instead of umask " "077 (only effective with disabled wallet functionality)")); #endif strUsage += HelpMessageOpt( "-txindex", strprintf(_("Maintain a full transaction index, used by " "the getrawtransaction rpc call (default: %d)"), DEFAULT_TXINDEX)); strUsage += HelpMessageOpt( "-usecashaddr", _("Use Cash Address for destination encoding instead " "of base58 (activate by default on Jan, 14)")); strUsage += HelpMessageGroup(_("Connection options:")); strUsage += HelpMessageOpt( "-addnode=", _("Add a node to connect to and attempt to keep the connection open " "(see the `addnode` RPC command help for more info)")); strUsage += HelpMessageOpt( "-banscore=", strprintf( _("Threshold for disconnecting misbehaving peers (default: %u)"), DEFAULT_BANSCORE_THRESHOLD)); strUsage += HelpMessageOpt( "-bantime=", strprintf(_("Number of seconds to keep misbehaving " "peers from reconnecting (default: %u)"), DEFAULT_MISBEHAVING_BANTIME)); strUsage += HelpMessageOpt("-bind=", _("Bind to given address and always listen on " "it. Use [host]:port notation for IPv6")); strUsage += HelpMessageOpt( "-connect=", _("Connect only to the specified node(s); -connect=0 " "disables automatic connections (the rules for this " "peer are the same as for -addnode)")); strUsage += HelpMessageOpt("-discover", _("Discover own IP addresses (default: 1 when " "listening and no -externalip or -proxy)")); strUsage += HelpMessageOpt( "-dns", _("Allow DNS lookups for -addnode, -seednode and -connect") + " " + strprintf(_("(default: %d)"), DEFAULT_NAME_LOOKUP)); strUsage += HelpMessageOpt( "-dnsseed", _("Query for peer addresses via DNS lookup, if low on " "addresses (default: 1 unless -connect/-noconnect)")); strUsage += HelpMessageOpt("-externalip=", _("Specify your own public address")); strUsage += HelpMessageOpt( "-forcednsseed", strprintf( _("Always query for peer addresses via DNS lookup (default: %d)"), DEFAULT_FORCEDNSSEED)); strUsage += HelpMessageOpt("-listen", _("Accept connections from outside (default: " "1 if no -proxy or -connect/-noconnect)")); strUsage += HelpMessageOpt( "-listenonion", strprintf(_("Automatically create Tor hidden service (default: %d)"), DEFAULT_LISTEN_ONION)); strUsage += HelpMessageOpt( "-maxconnections=", strprintf(_("Maintain at most connections to peers (default: %u)"), DEFAULT_MAX_PEER_CONNECTIONS)); strUsage += HelpMessageOpt("-maxreceivebuffer=", strprintf(_("Maximum per-connection receive buffer, " "*1000 bytes (default: %u)"), DEFAULT_MAXRECEIVEBUFFER)); strUsage += HelpMessageOpt( "-maxsendbuffer=", strprintf(_("Maximum per-connection send buffer, " "*1000 bytes (default: %u)"), DEFAULT_MAXSENDBUFFER)); strUsage += HelpMessageOpt( "-maxtimeadjustment", strprintf(_("Maximum allowed median peer time offset adjustment. Local " "perspective of time may be influenced by peers forward or " "backward by this amount. (default: %u seconds)"), DEFAULT_MAX_TIME_ADJUSTMENT)); strUsage += HelpMessageOpt("-onion=", strprintf(_("Use separate SOCKS5 proxy to reach peers " "via Tor hidden services (default: %s)"), "-proxy")); strUsage += HelpMessageOpt( "-onlynet=", _("Only connect to nodes in network (ipv4, ipv6 or onion)")); strUsage += HelpMessageOpt("-permitbaremultisig", strprintf(_("Relay non-P2SH multisig (default: %d)"), DEFAULT_PERMIT_BAREMULTISIG)); strUsage += HelpMessageOpt( "-peerbloomfilters", strprintf(_("Support filtering of blocks and transaction with bloom " "filters (default: %d)"), DEFAULT_PEERBLOOMFILTERS)); strUsage += HelpMessageOpt( "-port=", strprintf( _("Listen for connections on (default: %u or testnet: %u)"), defaultChainParams->GetDefaultPort(), testnetChainParams->GetDefaultPort())); strUsage += HelpMessageOpt("-proxy=", _("Connect through SOCKS5 proxy")); strUsage += HelpMessageOpt( "-proxyrandomize", strprintf(_("Randomize credentials for every proxy connection. This " "enables Tor stream isolation (default: %d)"), DEFAULT_PROXYRANDOMIZE)); strUsage += HelpMessageOpt( "-seednode=", _("Connect to a node to retrieve peer addresses, and disconnect")); strUsage += HelpMessageOpt( "-timeout=", strprintf(_("Specify connection timeout in " "milliseconds (minimum: 1, default: %d)"), DEFAULT_CONNECT_TIMEOUT)); strUsage += HelpMessageOpt("-torcontrol=:", strprintf(_("Tor control port to use if onion " "listening enabled (default: %s)"), DEFAULT_TOR_CONTROL)); strUsage += HelpMessageOpt("-torpassword=", _("Tor control port password (default: empty)")); #ifdef USE_UPNP #if USE_UPNP strUsage += HelpMessageOpt("-upnp", _("Use UPnP to map the listening port " "(default: 1 when listening and no -proxy)")); #else strUsage += HelpMessageOpt( "-upnp", strprintf(_("Use UPnP to map the listening port (default: %u)"), 0)); #endif #endif strUsage += HelpMessageOpt("-whitebind=", _("Bind to given address and whitelist peers connecting " "to it. Use [host]:port notation for IPv6")); strUsage += HelpMessageOpt( "-whitelist=", _("Whitelist peers connecting from the given IP address (e.g. 1.2.3.4) " "or CIDR notated network (e.g. 1.2.3.0/24). Can be specified " "multiple times.") + " " + _("Whitelisted peers cannot be DoS banned and their " "transactions are always relayed, even if they are already " "in the mempool, useful e.g. for a gateway")); strUsage += HelpMessageOpt( "-whitelistrelay", strprintf(_("Accept relayed transactions received from whitelisted " "peers even when not relaying transactions (default: %d)"), DEFAULT_WHITELISTRELAY)); strUsage += HelpMessageOpt( "-whitelistforcerelay", strprintf(_("Force relay of transactions from whitelisted peers even " "if they violate local relay policy (default: %d)"), DEFAULT_WHITELISTFORCERELAY)); strUsage += HelpMessageOpt( "-maxuploadtarget=", strprintf(_("Tries to keep outbound traffic under the given target (in " "MiB per 24h), 0 = no limit (default: %d)"), DEFAULT_MAX_UPLOAD_TARGET)); strUsage += g_wallet_init_interface->GetHelpString(showDebug); #if ENABLE_ZMQ strUsage += HelpMessageGroup(_("ZeroMQ notification options:")); strUsage += HelpMessageOpt("-zmqpubhashblock=
", _("Enable publish hash block in
")); strUsage += HelpMessageOpt("-zmqpubhashtx=
", _("Enable publish hash transaction in
")); strUsage += HelpMessageOpt("-zmqpubrawblock=
", _("Enable publish raw block in
")); strUsage += HelpMessageOpt("-zmqpubrawtx=
", _("Enable publish raw transaction in
")); #endif strUsage += HelpMessageGroup(_("Debugging/Testing options:")); strUsage += HelpMessageOpt("-uacomment=", _("Append comment to the user agent string")); if (showDebug) { strUsage += HelpMessageOpt( "-checkblocks=", strprintf( _("How many blocks to check at startup (default: %u, 0 = all)"), DEFAULT_CHECKBLOCKS)); strUsage += HelpMessageOpt("-checklevel=", strprintf(_("How thorough the block verification of " "-checkblocks is (0-4, default: %u)"), DEFAULT_CHECKLEVEL)); strUsage += HelpMessageOpt( "-checkblockindex", strprintf("Do a full consistency check for mapBlockIndex, " "setBlockIndexCandidates, chainActive and " "mapBlocksUnlinked occasionally. Also sets -checkmempool " "(default: %u)", defaultChainParams->DefaultConsistencyChecks())); strUsage += HelpMessageOpt( "-checkmempool=", strprintf("Run checks every transactions (default: %u)", defaultChainParams->DefaultConsistencyChecks())); strUsage += HelpMessageOpt( "-checkpoints", strprintf("Only accept block chain matching " "built-in checkpoints (default: %d)", DEFAULT_CHECKPOINTS_ENABLED)); strUsage += HelpMessageOpt( "-disablesafemode", strprintf("Disable safemode, override a real " "safe mode event (default: %d)", DEFAULT_DISABLE_SAFEMODE)); strUsage += HelpMessageOpt("-deprecatedrpc=", "Allows deprecated RPC method(s) to be used"); strUsage += HelpMessageOpt( "-testsafemode", strprintf("Force safe mode (default: %d)", DEFAULT_TESTSAFEMODE)); strUsage += HelpMessageOpt("-dropmessagestest=", "Randomly drop 1 of every network messages"); strUsage += HelpMessageOpt("-fuzzmessagestest=", "Randomly fuzz 1 of every network messages"); strUsage += HelpMessageOpt( "-stopafterblockimport", strprintf( "Stop running after importing blocks from disk (default: %d)", DEFAULT_STOPAFTERBLOCKIMPORT)); strUsage += HelpMessageOpt( "-stopatheight", strprintf("Stop running after reaching the given " "height in the main chain (default: %u)", DEFAULT_STOPATHEIGHT)); strUsage += HelpMessageOpt( "-limitancestorcount=", strprintf("Do not accept transactions if number of in-mempool " "ancestors is or more (default: %u)", DEFAULT_ANCESTOR_LIMIT)); strUsage += HelpMessageOpt("-limitancestorsize=", strprintf("Do not accept transactions whose size " "with all in-mempool ancestors exceeds " " kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT)); strUsage += HelpMessageOpt( "-limitdescendantcount=", strprintf("Do not accept transactions if any ancestor would have " " or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT)); strUsage += HelpMessageOpt( "-limitdescendantsize=", strprintf("Do not accept transactions if any ancestor would have " "more than kilobytes of in-mempool descendants " "(default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT)); } strUsage += HelpMessageOpt( "-debug=", strprintf(_("Output debugging information (default: %u, supplying " " is optional)"), 0) + ". " + _("If is not supplied or if = 1, " "output all debugging information.") + _(" can be:") + " " + ListLogCategories() + "."); strUsage += HelpMessageOpt( "-debugexclude=", strprintf(_("Exclude debugging information for a category. Can be used " "in conjunction with -debug=1 to output debug logs for all " "categories except one or more specified categories."))); if (showDebug) { strUsage += HelpMessageOpt( "-nodebug", "Turn off debugging messages, same as -debug=0"); } strUsage += HelpMessageOpt( "-help-debug", _("Show all debugging options (usage: --help -help-debug)")); strUsage += HelpMessageOpt( "-logips", strprintf(_("Include IP addresses in debug output (default: %d)"), DEFAULT_LOGIPS)); strUsage += HelpMessageOpt( "-logtimestamps", strprintf(_("Prepend debug output with timestamp (default: %d)"), DEFAULT_LOGTIMESTAMPS)); if (showDebug) { strUsage += HelpMessageOpt( "-logtimemicros", strprintf( "Add microsecond precision to debug timestamps (default: %d)", DEFAULT_LOGTIMEMICROS)); strUsage += HelpMessageOpt( "-mocktime=", "Replace actual time with seconds since epoch (default: 0)"); strUsage += HelpMessageOpt( "-limitfreerelay=", strprintf("Continuously rate-limit free transactions to *1000 " "bytes per minute (default: %u)", DEFAULT_LIMITFREERELAY)); strUsage += HelpMessageOpt("-relaypriority", strprintf("Require high priority for relaying free " "or low-fee transactions (default: %d)", DEFAULT_RELAYPRIORITY)); strUsage += HelpMessageOpt( "-maxsigcachesize=", strprintf("Limit size of signature cache to MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE)); strUsage += HelpMessageOpt( "-maxscriptcachesize=", strprintf("Limit size of script cache to MiB (default: %u)", DEFAULT_MAX_SCRIPT_CACHE_SIZE)); strUsage += HelpMessageOpt( "-maxtipage=", strprintf("Maximum tip age in seconds to consider node in initial " "block download (default: %u)", DEFAULT_MAX_TIP_AGE)); } strUsage += HelpMessageOpt( "-excessutxocharge=", strprintf(_("Fees (in %s/kB) to charge per utxo created for" "relaying, and mining (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_UTXO_FEE))); strUsage += HelpMessageOpt( "-minrelaytxfee=", strprintf( _("Fees (in %s/kB) smaller than this are considered zero fee for " "relaying, mining and transaction creation (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE_PER_KB))); strUsage += HelpMessageOpt( "-maxtxfee=", strprintf(_("Maximum total fees (in %s) to use in a single wallet " "transaction or raw transaction; setting this too low may " "abort large transactions (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MAXFEE))); strUsage += HelpMessageOpt( "-printtoconsole", _("Send trace/debug info to console instead of debug.log file")); if (showDebug) { strUsage += HelpMessageOpt( "-printpriority", strprintf("Log transaction priority and fee per " "kB when mining blocks (default: %d)", DEFAULT_PRINTPRIORITY)); } strUsage += HelpMessageOpt("-shrinkdebugfile", _("Shrink debug.log file on client startup " "(default: 1 when no -debug)")); AppendParamsHelpMessages(strUsage, showDebug); strUsage += HelpMessageGroup(_("Node relay options:")); if (showDebug) { strUsage += HelpMessageOpt( "-acceptnonstdtxn", strprintf( "Relay and mine \"non-standard\" transactions (%sdefault: %u)", "testnet/regtest only; ", defaultChainParams->RequireStandard())); strUsage += HelpMessageOpt("-excessiveblocksize=", strprintf(_("Do not accept blocks larger than this " "limit, in bytes (default: %d)"), DEFAULT_MAX_BLOCK_SIZE)); strUsage += HelpMessageOpt( "-dustrelayfee=", strprintf("Fee rate (in %s/kB) used to defined dust, the value of " "an output such that it will cost about 1/3 of its value " "in fees at this fee rate to spend it. (default: %s)", CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE))); } strUsage += HelpMessageOpt("-bytespersigop", strprintf(_("Equivalent bytes per sigop in transactions " "for relay and mining (default: %u)"), DEFAULT_BYTES_PER_SIGOP)); strUsage += HelpMessageOpt( "-datacarrier", strprintf(_("Relay and mine data carrier transactions (default: %d)"), DEFAULT_ACCEPT_DATACARRIER)); strUsage += HelpMessageOpt( "-datacarriersize", strprintf(_("Maximum size of data in data carrier transactions we " "relay and mine (default: %u)"), MAX_OP_RETURN_RELAY)); strUsage += HelpMessageGroup(_("Block creation options:")); strUsage += HelpMessageOpt( "-blockmaxsize=", strprintf(_("Set maximum block size in bytes (default: %d)"), DEFAULT_MAX_GENERATED_BLOCK_SIZE)); strUsage += HelpMessageOpt( "-blockprioritypercentage=", strprintf(_("Set maximum percentage of a block reserved to " "high-priority/low-fee transactions (default: %d)"), DEFAULT_BLOCK_PRIORITY_PERCENTAGE)); strUsage += HelpMessageOpt( "-blockmintxfee=", strprintf(_("Set lowest fee rate (in %s/kB) for transactions to be " "included in block creation. (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE_PER_KB))); if (showDebug) { strUsage += HelpMessageOpt("-blockversion=", "Override block version to test forking scenarios"); } strUsage += HelpMessageGroup(_("RPC server options:")); strUsage += HelpMessageOpt("-server", _("Accept command line and JSON-RPC commands")); strUsage += HelpMessageOpt( "-rest", strprintf(_("Accept public REST requests (default: %d)"), DEFAULT_REST_ENABLE)); strUsage += HelpMessageOpt( "-rpcbind=", _("Bind to given address to listen for JSON-RPC connections. Use " "[host]:port notation for IPv6. This option can be specified " "multiple times (default: bind to all interfaces)")); strUsage += HelpMessageOpt("-rpccookiefile=", _("Location of the auth cookie (default: data dir)")); strUsage += HelpMessageOpt("-rpcuser=", _("Username for JSON-RPC connections")); strUsage += HelpMessageOpt("-rpcpassword=", _("Password for JSON-RPC connections")); strUsage += HelpMessageOpt( "-rpcauth=", _("Username and hashed password for JSON-RPC connections. The field " " comes in the format: :$. A canonical " "python script is included in share/rpcuser. The client then " "connects normally using the " "rpcuser=/rpcpassword= pair of arguments. This " "option can be specified multiple times")); strUsage += HelpMessageOpt( "-rpcport=", strprintf(_("Listen for JSON-RPC connections on (default: %u or " "testnet: %u)"), defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort())); strUsage += HelpMessageOpt( "-rpcallowip=", _("Allow JSON-RPC connections from specified source. Valid for " "are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. " "1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This " "option can be specified multiple times")); strUsage += HelpMessageOpt( "-rpcthreads=", strprintf( _("Set the number of threads to service RPC calls (default: %d)"), DEFAULT_HTTP_THREADS)); strUsage += HelpMessageOpt( "-rpccorsdomain=value", "Domain from which to accept cross origin requests (browser enforced)"); if (showDebug) { strUsage += HelpMessageOpt( "-rpcworkqueue=", strprintf("Set the depth of the work queue to " "service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE)); strUsage += HelpMessageOpt( "-rpcservertimeout=", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT)); } return strUsage; } std::string LicenseInfo() { const std::string URL_SOURCE_CODE = ""; const std::string URL_WEBSITE = ""; return CopyrightHolders( strprintf(_("Copyright (C) %i-%i"), 2009, COPYRIGHT_YEAR) + " ") + "\n" + "\n" + strprintf(_("Please contribute if you find %s useful. " "Visit %s for further information about the software."), PACKAGE_NAME, URL_WEBSITE) + "\n" + strprintf(_("The source code is available from %s."), URL_SOURCE_CODE) + "\n" + "\n" + _("This is experimental software.") + "\n" + strprintf(_("Distributed under the MIT software license, see the " "accompanying file %s or %s"), "COPYING", "") + "\n" + "\n" + strprintf(_("This product includes software developed by the " "OpenSSL Project for use in the OpenSSL Toolkit %s and " "cryptographic software written by Eric Young and UPnP " "software written by Thomas Bernard."), "") + "\n"; } static void BlockNotifyCallback(bool initialSync, const CBlockIndex *pBlockIndex) { if (initialSync || !pBlockIndex) { return; } std::string strCmd = gArgs.GetArg("-blocknotify", ""); if (!strCmd.empty()) { boost::replace_all(strCmd, "%s", pBlockIndex->GetBlockHash().GetHex()); std::thread t(runCommand, strCmd); // thread runs free t.detach(); } } static bool fHaveGenesis = false; static CWaitableCriticalSection cs_GenesisWait; static CConditionVariable condvar_GenesisWait; static void BlockNotifyGenesisWait(bool, const CBlockIndex *pBlockIndex) { if (pBlockIndex != nullptr) { { LOCK(cs_GenesisWait); fHaveGenesis = true; } condvar_GenesisWait.notify_all(); } } struct CImportingNow { CImportingNow() { assert(fImporting == false); fImporting = true; } ~CImportingNow() { assert(fImporting == true); fImporting = false; } }; // If we're using -prune with -reindex, then delete block files that will be // ignored by the reindex. Since reindexing works by starting at block file 0 // and looping until a blockfile is missing, do the same here to delete any // later block files after a gap. Also delete all rev files since they'll be // rewritten by the reindex anyway. This ensures that vinfoBlockFile is in sync // with what's actually on disk by the time we start downloading, so that // pruning works correctly. void CleanupBlockRevFiles() { std::map mapBlockFiles; // Glob all blk?????.dat and rev?????.dat files from the blocks directory. // Remove the rev files immediately and insert the blk file paths into an // ordered map keyed by block file index. LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for " "-reindex with -prune\n"); fs::path blocksdir = GetDataDir() / "blocks"; for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator(); it++) { if (is_regular_file(*it) && it->path().filename().string().length() == 12 && it->path().filename().string().substr(8, 4) == ".dat") { if (it->path().filename().string().substr(0, 3) == "blk") { mapBlockFiles[it->path().filename().string().substr(3, 5)] = it->path(); } else if (it->path().filename().string().substr(0, 3) == "rev") { remove(it->path()); } } } // Remove all block files that aren't part of a contiguous set starting at // zero by walking the ordered map (keys are block file indices) by keeping // a separate counter. Once we hit a gap (or if 0 doesn't exist) start // removing block files. int nContigCounter = 0; for (const std::pair &item : mapBlockFiles) { if (atoi(item.first) == nContigCounter) { nContigCounter++; continue; } remove(item.second); } } void ThreadImport(const Config &config, std::vector vImportFiles) { RenameThread("bitcoin-loadblk"); { CImportingNow imp; // -reindex if (fReindex) { int nFile = 0; while (true) { CDiskBlockPos pos(nFile, 0); if (!fs::exists(GetBlockPosFilename(pos, "blk"))) { // No block files left to reindex break; } FILE *file = OpenBlockFile(pos, true); if (!file) { // This error is logged in OpenBlockFile break; } LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile); LoadExternalBlockFile(config, file, &pos); nFile++; } pblocktree->WriteReindexing(false); fReindex = false; LogPrintf("Reindexing finished\n"); // To avoid ending up in a situation without genesis block, re-try // initializing (no-op if reindexing worked): LoadGenesisBlock(config.GetChainParams()); } // hardcoded $DATADIR/bootstrap.dat fs::path pathBootstrap = GetDataDir() / "bootstrap.dat"; if (fs::exists(pathBootstrap)) { FILE *file = fsbridge::fopen(pathBootstrap, "rb"); if (file) { fs::path pathBootstrapOld = GetDataDir() / "bootstrap.dat.old"; LogPrintf("Importing bootstrap.dat...\n"); LoadExternalBlockFile(config, file); RenameOver(pathBootstrap, pathBootstrapOld); } else { LogPrintf("Warning: Could not open bootstrap file %s\n", pathBootstrap.string()); } } // -loadblock= for (const fs::path &path : vImportFiles) { FILE *file = fsbridge::fopen(path, "rb"); if (file) { LogPrintf("Importing blocks file %s...\n", path.string()); LoadExternalBlockFile(config, file); } else { LogPrintf("Warning: Could not open blocks file %s\n", path.string()); } } // scan for better chains in the block chain database, that are not yet // connected in the active best chain CValidationState state; if (!ActivateBestChain(config, state)) { LogPrintf("Failed to connect best block"); StartShutdown(); } if (gArgs.GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) { LogPrintf("Stopping after block import\n"); StartShutdown(); } } // End scope of CImportingNow if (gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { LoadMempool(config); fDumpMempoolLater = !fRequestShutdown; } } /** Sanity checks * Ensure that Bitcoin is running in a usable environment with all * necessary library support. */ bool InitSanityCheck(void) { if (!ECC_InitSanityCheck()) { InitError( "Elliptic curve cryptography sanity check failure. Aborting."); return false; } if (!glibc_sanity_test() || !glibcxx_sanity_test()) { return false; } if (!Random_SanityCheck()) { InitError("OS cryptographic RNG sanity check failure. Aborting."); return false; } return true; } static bool AppInitServers(Config &config, HTTPRPCRequestProcessor &httpRPCRequestProcessor, boost::thread_group &threadGroup) { RPCServerSignals::OnStarted(&OnRPCStarted); RPCServerSignals::OnStopped(&OnRPCStopped); if (!InitHTTPServer(config)) { return false; } if (!StartRPC()) { return false; } if (!StartHTTPRPC(config, httpRPCRequestProcessor)) { return false; } if (gArgs.GetBoolArg("-rest", DEFAULT_REST_ENABLE) && !StartREST()) { return false; } if (!StartHTTPServer()) { return false; } return true; } // Parameter interaction based on rules void InitParameterInteraction() { // when specifying an explicit binding address, you want to listen on it // even when -connect or -proxy is specified. if (gArgs.IsArgSet("-bind")) { if (gArgs.SoftSetBoolArg("-listen", true)) { LogPrintf( "%s: parameter interaction: -bind set -> setting -listen=1\n", __func__); } } if (gArgs.IsArgSet("-whitebind")) { if (gArgs.SoftSetBoolArg("-listen", true)) { LogPrintf("%s: parameter interaction: -whitebind set -> setting " "-listen=1\n", __func__); } } if (gArgs.IsArgSet("-connect")) { // when only connecting to trusted nodes, do not seed via DNS, or listen // by default. if (gArgs.SoftSetBoolArg("-dnsseed", false)) { LogPrintf("%s: parameter interaction: -connect set -> setting " "-dnsseed=0\n", __func__); } if (gArgs.SoftSetBoolArg("-listen", false)) { LogPrintf("%s: parameter interaction: -connect set -> setting " "-listen=0\n", __func__); } } if (gArgs.IsArgSet("-proxy")) { // to protect privacy, do not listen by default if a default proxy // server is specified. if (gArgs.SoftSetBoolArg("-listen", false)) { LogPrintf( "%s: parameter interaction: -proxy set -> setting -listen=0\n", __func__); } // to protect privacy, do not use UPNP when a proxy is set. The user may // still specify -listen=1 to listen locally, so don't rely on this // happening through -listen below. if (gArgs.SoftSetBoolArg("-upnp", false)) { LogPrintf( "%s: parameter interaction: -proxy set -> setting -upnp=0\n", __func__); } // to protect privacy, do not discover addresses by default if (gArgs.SoftSetBoolArg("-discover", false)) { LogPrintf("%s: parameter interaction: -proxy set -> setting " "-discover=0\n", __func__); } } if (!gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) { // do not map ports or try to retrieve public IP when not listening // (pointless) if (gArgs.SoftSetBoolArg("-upnp", false)) { LogPrintf( "%s: parameter interaction: -listen=0 -> setting -upnp=0\n", __func__); } if (gArgs.SoftSetBoolArg("-discover", false)) { LogPrintf( "%s: parameter interaction: -listen=0 -> setting -discover=0\n", __func__); } if (gArgs.SoftSetBoolArg("-listenonion", false)) { LogPrintf("%s: parameter interaction: -listen=0 -> setting " "-listenonion=0\n", __func__); } } if (gArgs.IsArgSet("-externalip")) { // if an explicit public IP is specified, do not try to find others if (gArgs.SoftSetBoolArg("-discover", false)) { LogPrintf("%s: parameter interaction: -externalip set -> setting " "-discover=0\n", __func__); } } // disable whitelistrelay in blocksonly mode if (gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) { if (gArgs.SoftSetBoolArg("-whitelistrelay", false)) { LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting " "-whitelistrelay=0\n", __func__); } } // Forcing relay from whitelisted hosts implies we will accept relays from // them in the first place. if (gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) { if (gArgs.SoftSetBoolArg("-whitelistrelay", true)) { LogPrintf("%s: parameter interaction: -whitelistforcerelay=1 -> " "setting -whitelistrelay=1\n", __func__); } } } static std::string ResolveErrMsg(const char *const optname, const std::string &strBind) { return strprintf(_("Cannot resolve -%s address: '%s'"), optname, strBind); } void InitLogging() { BCLog::Logger &logger = GetLogger(); logger.m_print_to_console = gArgs.GetBoolArg("-printtoconsole", false); logger.m_log_timestamps = gArgs.GetBoolArg("-logtimestamps", DEFAULT_LOGTIMESTAMPS); logger.m_log_time_micros = gArgs.GetBoolArg("-logtimemicros", DEFAULT_LOGTIMEMICROS); fLogIPs = gArgs.GetBoolArg("-logips", DEFAULT_LOGIPS); LogPrintf("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"); LogPrintf("%s version %s\n", CLIENT_NAME, FormatFullVersion()); } namespace { // Variables internal to initialization process only int nMaxConnections; int nUserMaxConnections; int nFD; ServiceFlags nLocalServices = NODE_NETWORK; } // namespace [[noreturn]] static void new_handler_terminate() { // Rather than throwing std::bad-alloc if allocation fails, terminate // immediately to (try to) avoid chain corruption. Since LogPrintf may // itself allocate memory, set the handler directly to terminate first. std::set_new_handler(std::terminate); LogPrintf("Error: Out of memory. Terminating.\n"); // The log was successful, terminate now. std::terminate(); }; bool AppInitBasicSetup() { // Step 1: setup #ifdef _MSC_VER // Turn off Microsoft heap dump noise _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_WARN, CreateFileA("NUL", GENERIC_WRITE, 0, nullptr, OPEN_EXISTING, 0, 0)); #endif #if _MSC_VER >= 1400 // Disable confusing "helpful" text message on abort, Ctrl-C _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); #endif #ifdef WIN32 // Enable Data Execution Prevention (DEP) // Minimum supported OS versions: WinXP SP3, WinVista >= SP1, Win Server 2008 // A failure is non-critical and needs no further attention! #ifndef PROCESS_DEP_ENABLE // We define this here, because GCCs winbase.h limits this to _WIN32_WINNT >= // 0x0601 (Windows 7), which is not correct. Can be removed, when GCCs winbase.h // is fixed! #define PROCESS_DEP_ENABLE 0x00000001 #endif typedef BOOL(WINAPI * PSETPROCDEPPOL)(DWORD); PSETPROCDEPPOL setProcDEPPol = (PSETPROCDEPPOL)GetProcAddress( GetModuleHandleA("Kernel32.dll"), "SetProcessDEPPolicy"); if (setProcDEPPol != nullptr) { setProcDEPPol(PROCESS_DEP_ENABLE); } #endif if (!SetupNetworking()) { return InitError("Initializing networking failed"); } #ifndef WIN32 if (!gArgs.GetBoolArg("-sysperms", false)) { umask(077); } // Clean shutdown on SIGTERM struct sigaction sa; sa.sa_handler = HandleSIGTERM; sigemptyset(&sa.sa_mask); sa.sa_flags = 0; sigaction(SIGTERM, &sa, nullptr); sigaction(SIGINT, &sa, nullptr); // Reopen debug.log on SIGHUP struct sigaction sa_hup; sa_hup.sa_handler = HandleSIGHUP; sigemptyset(&sa_hup.sa_mask); sa_hup.sa_flags = 0; sigaction(SIGHUP, &sa_hup, nullptr); // Ignore SIGPIPE, otherwise it will bring the daemon down if the client // closes unexpectedly signal(SIGPIPE, SIG_IGN); #endif std::set_new_handler(new_handler_terminate); return true; } bool AppInitParameterInteraction(Config &config, RPCServer &rpcServer) { const CChainParams &chainparams = config.GetChainParams(); // Step 2: parameter interactions // also see: InitParameterInteraction() // if using block pruning, then disallow txindex if (gArgs.GetArg("-prune", 0)) { if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) { return InitError(_("Prune mode is incompatible with -txindex.")); } } // if space reserved for high priority transactions is misconfigured // stop program execution and warn the user with a proper error message const int64_t blkprio = gArgs.GetArg("-blockprioritypercentage", DEFAULT_BLOCK_PRIORITY_PERCENTAGE); if (!config.SetBlockPriorityPercentage(blkprio)) { return InitError(_("Block priority percentage has to belong to the " "[0..100] interval.")); } // -bind and -whitebind can't be set when not listening size_t nUserBind = gArgs.GetArgs("-bind").size() + gArgs.GetArgs("-whitebind").size(); if (nUserBind != 0 && !gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) { return InitError( "Cannot set -bind or -whitebind together with -listen=0"); } // Make sure enough file descriptors are available int nBind = std::max(nUserBind, size_t(1)); nUserMaxConnections = gArgs.GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS); nMaxConnections = std::max(nUserMaxConnections, 0); // Trim requested connection counts, to fit into system limitations nMaxConnections = std::max(std::min(nMaxConnections, (int)(FD_SETSIZE - nBind - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS)), 0); nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS + MAX_ADDNODE_CONNECTIONS); if (nFD < MIN_CORE_FILEDESCRIPTORS) { return InitError(_("Not enough file descriptors available.")); } nMaxConnections = std::min(nFD - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS, nMaxConnections); if (nMaxConnections < nUserMaxConnections) { InitWarning(strprintf(_("Reducing -maxconnections from %d to %d, " "because of system limitations."), nUserMaxConnections, nMaxConnections)); } // Step 3: parameter-to-internal-flags if (gArgs.IsArgSet("-debug")) { // Special-case: if -debug=0/-nodebug is set, turn off debugging // messages const std::vector &categories = gArgs.GetArgs("-debug"); if (find(categories.begin(), categories.end(), std::string("0")) == categories.end()) { for (const auto &cat : categories) { BCLog::LogFlags flag; if (!GetLogCategory(flag, cat)) { InitWarning( strprintf(_("Unsupported logging category %s=%s."), "-debug", cat)); } GetLogger().EnableCategory(flag); } } } // Now remove the logging categories which were explicitly excluded for (const std::string &cat : gArgs.GetArgs("-debugexclude")) { BCLog::LogFlags flag; if (!GetLogCategory(flag, cat)) { InitWarning(strprintf(_("Unsupported logging category %s=%s."), "-debugexclude", cat)); } GetLogger().DisableCategory(flag); } // Check for -debugnet if (gArgs.GetBoolArg("-debugnet", false)) { InitWarning( _("Unsupported argument -debugnet ignored, use -debug=net.")); } // Check for -socks - as this is a privacy risk to continue, exit here if (gArgs.IsArgSet("-socks")) { return InitError( _("Unsupported argument -socks found. Setting SOCKS version isn't " "possible anymore, only SOCKS5 proxies are supported.")); } // Check for -tor - as this is a privacy risk to continue, exit here if (gArgs.GetBoolArg("-tor", false)) { return InitError(_("Unsupported argument -tor found, use -onion.")); } if (gArgs.GetBoolArg("-benchmark", false)) { InitWarning( _("Unsupported argument -benchmark ignored, use -debug=bench.")); } if (gArgs.GetBoolArg("-whitelistalwaysrelay", false)) { InitWarning(_("Unsupported argument -whitelistalwaysrelay ignored, use " "-whitelistrelay and/or -whitelistforcerelay.")); } if (gArgs.IsArgSet("-blockminsize")) { InitWarning("Unsupported argument -blockminsize ignored."); } // Checkmempool and checkblockindex default to true in regtest mode int ratio = std::min( std::max( gArgs.GetArg("-checkmempool", chainparams.DefaultConsistencyChecks() ? 1 : 0), 0), 1000000); if (ratio != 0) { g_mempool.setSanityCheck(1.0 / ratio); } fCheckBlockIndex = gArgs.GetBoolArg("-checkblockindex", chainparams.DefaultConsistencyChecks()); fCheckpointsEnabled = gArgs.GetBoolArg("-checkpoints", DEFAULT_CHECKPOINTS_ENABLED); hashAssumeValid = uint256S( gArgs.GetArg("-assumevalid", chainparams.GetConsensus().defaultAssumeValid.GetHex())); if (!hashAssumeValid.IsNull()) { LogPrintf("Assuming ancestors of block %s have valid signatures.\n", hashAssumeValid.GetHex()); } else { LogPrintf("Validating signatures for all blocks.\n"); } if (gArgs.IsArgSet("-minimumchainwork")) { const std::string minChainWorkStr = gArgs.GetArg("-minimumchainwork", ""); if (!IsHexNumber(minChainWorkStr)) { return InitError(strprintf( "Invalid non-hex (%s) minimum chain work value specified", minChainWorkStr)); } nMinimumChainWork = UintToArith256(uint256S(minChainWorkStr)); } else { nMinimumChainWork = UintToArith256(chainparams.GetConsensus().nMinimumChainWork); } LogPrintf("Setting nMinimumChainWork=%s\n", nMinimumChainWork.GetHex()); if (nMinimumChainWork < UintToArith256(chainparams.GetConsensus().nMinimumChainWork)) { LogPrintf("Warning: nMinimumChainWork set below default value of %s\n", chainparams.GetConsensus().nMinimumChainWork.GetHex()); } // mempool limits int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; int64_t nMempoolSizeMin = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000 * 40; if (nMempoolSizeMax < 0 || nMempoolSizeMax < nMempoolSizeMin) { return InitError(strprintf(_("-maxmempool must be at least %d MB"), std::ceil(nMempoolSizeMin / 1000000.0))); } // -par=0 means autodetect, but nScriptCheckThreads==0 means no concurrency nScriptCheckThreads = gArgs.GetArg("-par", DEFAULT_SCRIPTCHECK_THREADS); if (nScriptCheckThreads <= 0) { nScriptCheckThreads += GetNumCores(); } if (nScriptCheckThreads <= 1) { nScriptCheckThreads = 0; } else if (nScriptCheckThreads > MAX_SCRIPTCHECK_THREADS) { nScriptCheckThreads = MAX_SCRIPTCHECK_THREADS; } // Configure excessive block size. const uint64_t nProposedExcessiveBlockSize = gArgs.GetArg("-excessiveblocksize", DEFAULT_MAX_BLOCK_SIZE); if (!config.SetMaxBlockSize(nProposedExcessiveBlockSize)) { return InitError( _("Excessive block size must be > 1,000,000 bytes (1MB)")); } // Check blockmaxsize does not exceed maximum accepted block size. const uint64_t nProposedMaxGeneratedBlockSize = gArgs.GetArg("-blockmaxsize", DEFAULT_MAX_GENERATED_BLOCK_SIZE); if (nProposedMaxGeneratedBlockSize > config.GetMaxBlockSize()) { auto msg = _("Max generated block size (blockmaxsize) cannot exceed " "the excessive block size (excessiveblocksize)"); return InitError(msg); } // block pruning; get the amount of disk space (in MiB) to allot for block & // undo files int64_t nPruneArg = gArgs.GetArg("-prune", 0); if (nPruneArg < 0) { return InitError( _("Prune cannot be configured with a negative value.")); } nPruneTarget = (uint64_t)nPruneArg * 1024 * 1024; if (nPruneArg == 1) { // manual pruning: -prune=1 LogPrintf("Block pruning enabled. Use RPC call " "pruneblockchain(height) to manually prune block and undo " "files.\n"); nPruneTarget = std::numeric_limits::max(); fPruneMode = true; } else if (nPruneTarget) { if (nPruneTarget < MIN_DISK_SPACE_FOR_BLOCK_FILES) { return InitError( strprintf(_("Prune configured below the minimum of %d MiB. " "Please use a higher number."), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024)); } LogPrintf("Prune configured to target %uMiB on disk for block and undo " "files.\n", nPruneTarget / 1024 / 1024); fPruneMode = true; } RegisterAllRPCCommands(config, rpcServer, tableRPC); g_wallet_init_interface->RegisterRPC(tableRPC); #ifdef ENABLE_WALLET RegisterDumpRPCCommands(tableRPC); #endif nConnectTimeout = gArgs.GetArg("-timeout", DEFAULT_CONNECT_TIMEOUT); if (nConnectTimeout <= 0) { nConnectTimeout = DEFAULT_CONNECT_TIMEOUT; } // Obtain the amount to charge excess UTXO if (gArgs.IsArgSet("-excessutxocharge")) { Amount n = Amount::zero(); auto parsed = ParseMoney(gArgs.GetArg("-excessutxocharge", ""), n); if (!parsed || Amount::zero() > n) { return InitError(AmountErrMsg( "excessutxocharge", gArgs.GetArg("-excessutxocharge", ""))); } config.SetExcessUTXOCharge(n); } else { config.SetExcessUTXOCharge(DEFAULT_UTXO_FEE); } // Fee-per-kilobyte amount considered the same as "free". If you are mining, // be careful setting this: if you set it to zero then a transaction spammer // can cheaply fill blocks using 1-satoshi-fee transactions. It should be // set above the real cost to you of processing a transaction. if (gArgs.IsArgSet("-minrelaytxfee")) { Amount n = Amount::zero(); auto parsed = ParseMoney(gArgs.GetArg("-minrelaytxfee", ""), n); if (!parsed || Amount::zero() == n) { return InitError(AmountErrMsg("minrelaytxfee", gArgs.GetArg("-minrelaytxfee", ""))); } // High fee check is done afterward in WalletParameterInteraction() config.SetMinFeePerKB(CFeeRate(n)); } else { config.SetMinFeePerKB(CFeeRate(DEFAULT_MIN_RELAY_TX_FEE_PER_KB)); } // Sanity check argument for min fee for including tx in block // TODO: Harmonize which arguments need sanity checking and where that // happens. if (gArgs.IsArgSet("-blockmintxfee")) { Amount n = Amount::zero(); if (!ParseMoney(gArgs.GetArg("-blockmintxfee", ""), n)) { return InitError(AmountErrMsg("blockmintxfee", gArgs.GetArg("-blockmintxfee", ""))); } } // Feerate used to define dust. Shouldn't be changed lightly as old // implementations may inadvertently create non-standard transactions. if (gArgs.IsArgSet("-dustrelayfee")) { Amount n = Amount::zero(); auto parsed = ParseMoney(gArgs.GetArg("-dustrelayfee", ""), n); if (!parsed || Amount::zero() == n) { return InitError(AmountErrMsg("dustrelayfee", gArgs.GetArg("-dustrelayfee", ""))); } dustRelayFee = CFeeRate(n); } fRequireStandard = !gArgs.GetBoolArg("-acceptnonstdtxn", !chainparams.RequireStandard()); if (chainparams.RequireStandard() && !fRequireStandard) { return InitError( strprintf("acceptnonstdtxn is not currently supported for %s chain", chainparams.NetworkIDString())); } nBytesPerSigOp = gArgs.GetArg("-bytespersigop", nBytesPerSigOp); if (!g_wallet_init_interface->ParameterInteraction()) { return false; } fIsBareMultisigStd = gArgs.GetBoolArg("-permitbaremultisig", DEFAULT_PERMIT_BAREMULTISIG); fAcceptDatacarrier = gArgs.GetBoolArg("-datacarrier", DEFAULT_ACCEPT_DATACARRIER); // Option to startup with mocktime set (used for regression testing): SetMockTime(gArgs.GetArg("-mocktime", 0)); // SetMockTime(0) is a no-op if (gArgs.GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS)) { nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM); } // Signal Bitcoin Cash support. // TODO: remove some time after the hardfork when no longer needed // to differentiate the network nodes. nLocalServices = ServiceFlags(nLocalServices | NODE_BITCOIN_CASH); nMaxTipAge = gArgs.GetArg("-maxtipage", DEFAULT_MAX_TIP_AGE); return true; } static bool LockDataDirectory(bool probeOnly) { std::string strDataDir = GetDataDir().string(); // Make sure only a single Bitcoin process is using the data directory. fs::path pathLockFile = GetDataDir() / ".lock"; // empty lock file; created if it doesn't exist. FILE *file = fsbridge::fopen(pathLockFile, "a"); if (file) { fclose(file); } try { static boost::interprocess::file_lock lock( pathLockFile.string().c_str()); if (!lock.try_lock()) { return InitError( strprintf(_("Cannot obtain a lock on data directory %s. %s is " "probably already running."), strDataDir, _(PACKAGE_NAME))); } if (probeOnly) { lock.unlock(); } } catch (const boost::interprocess::interprocess_exception &e) { return InitError(strprintf(_("Cannot obtain a lock on data directory " "%s. %s is probably already running.") + " %s.", strDataDir, _(PACKAGE_NAME), e.what())); } return true; } bool AppInitSanityChecks() { // Step 4: sanity checks // Initialize elliptic curve code std::string sha256_algo = SHA256AutoDetect(); LogPrintf("Using the '%s' SHA256 implementation\n", sha256_algo); RandomInit(); ECC_Start(); globalVerifyHandle.reset(new ECCVerifyHandle()); // Sanity check if (!InitSanityCheck()) { return InitError(strprintf( _("Initialization sanity check failed. %s is shutting down."), _(PACKAGE_NAME))); } // Probe the data directory lock to give an early error message, if possible // We cannot hold the data directory lock here, as the forking for daemon() // hasn't yet happened, and a fork will cause weird behavior to it. return LockDataDirectory(true); } bool AppInitLockDataDirectory() { // After daemonization get the data directory lock again and hold on to it // until exit. This creates a slight window for a race condition to happen, // however this condition is harmless: it will at most make us exit without // printing a message to console. if (!LockDataDirectory(false)) { // Detailed error printed inside LockDataDirectory return false; } return true; } bool AppInitMain(Config &config, HTTPRPCRequestProcessor &httpRPCRequestProcessor, boost::thread_group &threadGroup, CScheduler &scheduler) { // Step 4a: application initialization const CChainParams &chainparams = config.GetChainParams(); #ifndef WIN32 CreatePidFile(GetPidFile(), getpid()); #endif BCLog::Logger &logger = GetLogger(); bool default_shrinkdebugfile = logger.DefaultShrinkDebugFile(); if (gArgs.GetBoolArg("-shrinkdebugfile", default_shrinkdebugfile)) { // Do this first since it both loads a bunch of debug.log into memory, // and because this needs to happen before any other debug.log printing. logger.ShrinkDebugFile(); } if (logger.m_print_to_file) { logger.OpenDebugLog(); } if (!logger.m_log_timestamps) { LogPrintf("Startup time: %s\n", DateTimeStrFormat("%Y-%m-%d %H:%M:%S", GetTime())); } LogPrintf("Default data directory %s\n", GetDefaultDataDir().string()); LogPrintf("Using data directory %s\n", GetDataDir().string()); LogPrintf( "Using config file %s\n", GetConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME)).string()); LogPrintf("Using at most %i automatic connections (%i file descriptors " "available)\n", nMaxConnections, nFD); InitSignatureCache(); InitScriptExecutionCache(); LogPrintf("Using %u threads for script verification\n", nScriptCheckThreads); if (nScriptCheckThreads) { for (int i = 0; i < nScriptCheckThreads - 1; i++) { threadGroup.create_thread(&ThreadScriptCheck); } } // Start the lightweight task scheduler thread CScheduler::Function serviceLoop = boost::bind(&CScheduler::serviceQueue, &scheduler); threadGroup.create_thread(boost::bind(&TraceThread, "scheduler", serviceLoop)); GetMainSignals().RegisterBackgroundSignalScheduler(scheduler); /** * Start the RPC server. It will be started in "warmup" mode and not * process calls yet (but it will verify that the server is there and will * be ready later). Warmup mode will be completed when initialisation is * finished. */ if (gArgs.GetBoolArg("-server", false)) { uiInterface.InitMessage.connect(SetRPCWarmupStatus); if (!AppInitServers(config, httpRPCRequestProcessor, threadGroup)) { return InitError( _("Unable to start HTTP server. See debug log for details.")); } } // Step 5: verify wallet database integrity if (!g_wallet_init_interface->Verify(chainparams)) { return false; } // Step 6: network initialization // Note that we absolutely cannot open any actual connections // until the very end ("start node") as the UTXO/block state // is not yet setup and may end up being set up twice if we // need to reindex later. assert(!g_connman); g_connman = std::unique_ptr( new CConnman(config, GetRand(std::numeric_limits::max()), GetRand(std::numeric_limits::max()))); CConnman &connman = *g_connman; peerLogic.reset(new PeerLogicValidation(&connman, scheduler)); RegisterValidationInterface(peerLogic.get()); if (gArgs.IsArgSet("-onlynet")) { std::set nets; for (const std::string &snet : gArgs.GetArgs("-onlynet")) { enum Network net = ParseNetwork(snet); if (net == NET_UNROUTABLE) { return InitError(strprintf( _("Unknown network specified in -onlynet: '%s'"), snet)); } nets.insert(net); } for (int n = 0; n < NET_MAX; n++) { enum Network net = (enum Network)n; if (!nets.count(net)) SetLimited(net); } } // Check for host lookup allowed before parsing any network related // parameters fNameLookup = gArgs.GetBoolArg("-dns", DEFAULT_NAME_LOOKUP); bool proxyRandomize = gArgs.GetBoolArg("-proxyrandomize", DEFAULT_PROXYRANDOMIZE); // -proxy sets a proxy for all outgoing network traffic // -noproxy (or -proxy=0) as well as the empty string can be used to not set // a proxy, this is the default std::string proxyArg = gArgs.GetArg("-proxy", ""); SetLimited(NET_TOR); if (proxyArg != "" && proxyArg != "0") { CService proxyAddr; if (!Lookup(proxyArg.c_str(), proxyAddr, 9050, fNameLookup)) { return InitError(strprintf( _("Invalid -proxy address or hostname: '%s'"), proxyArg)); } proxyType addrProxy = proxyType(proxyAddr, proxyRandomize); if (!addrProxy.IsValid()) { return InitError(strprintf( _("Invalid -proxy address or hostname: '%s'"), proxyArg)); } SetProxy(NET_IPV4, addrProxy); SetProxy(NET_IPV6, addrProxy); SetProxy(NET_TOR, addrProxy); SetNameProxy(addrProxy); // by default, -proxy sets onion as reachable, unless -noonion later SetLimited(NET_TOR, false); } // -onion can be used to set only a proxy for .onion, or override normal // proxy for .onion addresses. // -noonion (or -onion=0) disables connecting to .onion entirely. An empty // string is used to not override the onion proxy (in which case it defaults // to -proxy set above, or none) std::string onionArg = gArgs.GetArg("-onion", ""); if (onionArg != "") { if (onionArg == "0") { // Handle -noonion/-onion=0 SetLimited(NET_TOR); // set onions as unreachable } else { CService onionProxy; if (!Lookup(onionArg.c_str(), onionProxy, 9050, fNameLookup)) { return InitError(strprintf( _("Invalid -onion address or hostname: '%s'"), onionArg)); } proxyType addrOnion = proxyType(onionProxy, proxyRandomize); if (!addrOnion.IsValid()) { return InitError(strprintf( _("Invalid -onion address or hostname: '%s'"), onionArg)); } SetProxy(NET_TOR, addrOnion); SetLimited(NET_TOR, false); } } // see Step 2: parameter interactions for more information about these fListen = gArgs.GetBoolArg("-listen", DEFAULT_LISTEN); fDiscover = gArgs.GetBoolArg("-discover", true); fRelayTxes = !gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY); for (const std::string &strAddr : gArgs.GetArgs("-externalip")) { CService addrLocal; if (Lookup(strAddr.c_str(), addrLocal, GetListenPort(), fNameLookup) && addrLocal.IsValid()) { AddLocal(addrLocal, LOCAL_MANUAL); } else { return InitError(ResolveErrMsg("externalip", strAddr)); } } #if ENABLE_ZMQ pzmqNotificationInterface = CZMQNotificationInterface::Create(); if (pzmqNotificationInterface) { RegisterValidationInterface(pzmqNotificationInterface); } #endif // unlimited unless -maxuploadtarget is set uint64_t nMaxOutboundLimit = 0; uint64_t nMaxOutboundTimeframe = MAX_UPLOAD_TIMEFRAME; if (gArgs.IsArgSet("-maxuploadtarget")) { nMaxOutboundLimit = gArgs.GetArg("-maxuploadtarget", DEFAULT_MAX_UPLOAD_TARGET) * 1024 * 1024; } // Step 7: load block chain fReindex = gArgs.GetBoolArg("-reindex", false); bool fReindexChainState = gArgs.GetBoolArg("-reindex-chainstate", false); // cache size calculations int64_t nTotalCache = (gArgs.GetArg("-dbcache", nDefaultDbCache) << 20); // total cache cannot be less than nMinDbCache nTotalCache = std::max(nTotalCache, nMinDbCache << 20); // total cache cannot be greater than nMaxDbcache nTotalCache = std::min(nTotalCache, nMaxDbCache << 20); int64_t nBlockTreeDBCache = nTotalCache / 8; nBlockTreeDBCache = std::min(nBlockTreeDBCache, (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX) ? nMaxBlockDBAndTxIndexCache : nMaxBlockDBCache) << 20); nTotalCache -= nBlockTreeDBCache; // use 25%-50% of the remainder for disk cache int64_t nCoinDBCache = std::min(nTotalCache / 2, (nTotalCache / 4) + (1 << 23)); // cap total coins db cache nCoinDBCache = std::min(nCoinDBCache, nMaxCoinsDBCache << 20); nTotalCache -= nCoinDBCache; // the rest goes to in-memory cache nCoinCacheUsage = nTotalCache; int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; LogPrintf("Cache configuration:\n"); LogPrintf("* Using %.1fMiB for block index database\n", nBlockTreeDBCache * (1.0 / 1024 / 1024)); LogPrintf("* Using %.1fMiB for chain state database\n", nCoinDBCache * (1.0 / 1024 / 1024)); LogPrintf("* Using %.1fMiB for in-memory UTXO set (plus up to %.1fMiB of " "unused mempool space)\n", nCoinCacheUsage * (1.0 / 1024 / 1024), nMempoolSizeMax * (1.0 / 1024 / 1024)); int64_t nStart = 0; bool fLoaded = false; while (!fLoaded && !fRequestShutdown) { bool fReset = fReindex; std::string strLoadError; uiInterface.InitMessage(_("Loading block index...")); nStart = GetTimeMillis(); do { try { UnloadBlockIndex(); pcoinsTip.reset(); pcoinsdbview.reset(); pcoinscatcher.reset(); pblocktree.reset( new CBlockTreeDB(nBlockTreeDBCache, false, fReset)); if (fReindex) { pblocktree->WriteReindexing(true); // If we're reindexing in prune mode, wipe away unusable // block files and all undo data files if (fPruneMode) { CleanupBlockRevFiles(); } } if (fRequestShutdown) { break; } // LoadBlockIndex will load fTxIndex from the db, or set it if // we're reindexing. It will also load fHavePruned if we've // ever removed a block file from disk. if (!LoadBlockIndex(config)) { strLoadError = _("Error loading block database"); break; } // If the loaded chain has a wrong genesis, bail out immediately // (we're likely using a testnet datadir, or the other way // around). if (!mapBlockIndex.empty() && mapBlockIndex.count( chainparams.GetConsensus().hashGenesisBlock) == 0) { return InitError(_("Incorrect or no genesis block found. " "Wrong datadir for network?")); } // Check for changed -txindex state if (fTxIndex != gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) { strLoadError = _("You need to rebuild the database using " "-reindex-chainstate to change -txindex"); break; } // Check for changed -prune state. What we are concerned about // is a user who has pruned blocks in the past, but is now // trying to run unpruned. if (fHavePruned && !fPruneMode) { strLoadError = _("You need to rebuild the database using -reindex to " "go back to unpruned mode. This will redownload the " "entire blockchain"); break; } // At this point blocktree args are consistent with what's on // disk. If we're not mid-reindex (based on disk + args), add a // genesis block on disk. This is called again in ThreadImport // if the reindex completes. if (!fReindex && !LoadGenesisBlock(chainparams)) { strLoadError = _("Error initializing block database"); break; } // At this point we're either in reindex or we've loaded a // useful block tree into mapBlockIndex! pcoinsdbview.reset(new CCoinsViewDB( nCoinDBCache, false, fReset || fReindexChainState)); pcoinscatcher.reset( new CCoinsViewErrorCatcher(pcoinsdbview.get())); // If necessary, upgrade from older database format. // This is a no-op if we cleared the coinsviewdb with -reindex // or -reindex-chainstate if (!pcoinsdbview->Upgrade()) { strLoadError = _("Error upgrading chainstate database"); break; } // ReplayBlocks is a no-op if we cleared the coinsviewdb with // -reindex or -reindex-chainstate if (!ReplayBlocks(config, pcoinsdbview.get())) { strLoadError = _("Unable to replay blocks. You will need to rebuild " "the database using -reindex-chainstate."); break; } // The on-disk coinsdb is now in a good state, create the cache pcoinsTip.reset(new CCoinsViewCache(pcoinscatcher.get())); if (!fReindex && !fReindexChainState) { // LoadChainTip sets chainActive based on pcoinsTip's best // block if (!LoadChainTip(config)) { strLoadError = _("Error initializing block database"); break; } assert(chainActive.Tip() != nullptr); } if (!fReindex) { // Note that RewindBlockIndex MUST run even if we're about // to -reindex-chainstate. It both disconnects blocks based // on chainActive, and drops block data in mapBlockIndex // based on lack of available witness data. uiInterface.InitMessage(_("Rewinding blocks...")); if (!RewindBlockIndex(config)) { strLoadError = _("Unable to rewind the database to a " "pre-fork state. You will need to " "redownload the blockchain"); break; } } if (!fReindex && !fReindexChainState) { uiInterface.InitMessage(_("Verifying blocks...")); if (fHavePruned && gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS) > MIN_BLOCKS_TO_KEEP) { LogPrintf("Prune: pruned datadir may not have more " "than %d blocks; only checking available " "blocks", MIN_BLOCKS_TO_KEEP); } { LOCK(cs_main); CBlockIndex *tip = chainActive.Tip(); RPCNotifyBlockChange(true, tip); if (tip && tip->nTime > GetAdjustedTime() + MAX_FUTURE_BLOCK_TIME) { strLoadError = _("The block database contains a block which " "appears to be from the future. This may be " "due to your computer's date and time being " "set incorrectly. Only rebuild the block " "database if you are sure that your " "computer's date and time are correct"); break; } } if (!CVerifyDB().VerifyDB( config, pcoinsdbview.get(), gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL), gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS))) { strLoadError = _("Corrupted block database detected"); break; } } } catch (const std::exception &e) { LogPrintf("%s\n", e.what()); strLoadError = _("Error opening block database"); break; } fLoaded = true; } while (false); if (!fLoaded && !fRequestShutdown) { // first suggest a reindex if (!fReset) { bool fRet = uiInterface.ThreadSafeQuestion( strLoadError + ".\n\n" + _("Do you want to rebuild the block database now?"), strLoadError + ".\nPlease restart with -reindex or " "-reindex-chainstate to recover.", "", CClientUIInterface::MSG_ERROR | CClientUIInterface::BTN_ABORT); if (fRet) { fReindex = true; fRequestShutdown = false; } else { LogPrintf("Aborted block database rebuild. Exiting.\n"); return false; } } else { return InitError(strLoadError); } } } // As LoadBlockIndex can take several minutes, it's possible the user // requested to kill the GUI during the last operation. If so, exit. // As the program has not fully started yet, Shutdown() is possibly // overkill. if (fRequestShutdown) { LogPrintf("Shutdown requested. Exiting.\n"); return false; } LogPrintf(" block index %15dms\n", GetTimeMillis() - nStart); fs::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME; CAutoFile est_filein(fsbridge::fopen(est_path, "rb"), SER_DISK, CLIENT_VERSION); // Allowed to fail as this file IS missing on first startup. if (!est_filein.IsNull()) { g_mempool.ReadFeeEstimates(est_filein); } fFeeEstimatesInitialized = true; // Encoded addresses using cashaddr instead of base58 // Activates by default on Jan, 14 config.SetCashAddrEncoding( gArgs.GetBoolArg("-usecashaddr", GetAdjustedTime() > 1515900000)); // Step 8: load wallet if (!g_wallet_init_interface->Open(chainparams)) { return false; } // Step 9: data directory maintenance // if pruning, unset the service bit and perform the initial blockstore // prune after any wallet rescanning has taken place. if (fPruneMode) { LogPrintf("Unsetting NODE_NETWORK on prune mode\n"); nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK); if (!fReindex) { uiInterface.InitMessage(_("Pruning blockstore...")); PruneAndFlush(); } } // Step 10: import blocks if (!CheckDiskSpace()) { return false; } // Either install a handler to notify us when genesis activates, or set // fHaveGenesis directly. // No locking, as this happens before any background thread is started. if (chainActive.Tip() == nullptr) { uiInterface.NotifyBlockTip.connect(BlockNotifyGenesisWait); } else { fHaveGenesis = true; } if (gArgs.IsArgSet("-blocknotify")) { uiInterface.NotifyBlockTip.connect(BlockNotifyCallback); } std::vector vImportFiles; for (const std::string &strFile : gArgs.GetArgs("-loadblock")) { vImportFiles.push_back(strFile); } threadGroup.create_thread( boost::bind(&ThreadImport, std::ref(config), vImportFiles)); // Wait for genesis block to be processed { WAIT_LOCK(cs_GenesisWait, lock); // We previously could hang here if StartShutdown() is called prior to // ThreadImport getting started, so instead we just wait on a timer to // check ShutdownRequested() regularly. while (!fHaveGenesis && !ShutdownRequested()) { condvar_GenesisWait.wait_for(lock, std::chrono::milliseconds(500)); } uiInterface.NotifyBlockTip.disconnect(BlockNotifyGenesisWait); } // Step 11: start node //// debug print LogPrintf("mapBlockIndex.size() = %u\n", mapBlockIndex.size()); LogPrintf("nBestHeight = %d\n", chainActive.Height()); if (gArgs.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION)) { StartTorControl(); } Discover(); // Map ports with UPnP if (gArgs.GetBoolArg("-upnp", DEFAULT_UPNP)) { StartMapPort(); } CConnman::Options connOptions; connOptions.nLocalServices = nLocalServices; connOptions.nMaxConnections = nMaxConnections; connOptions.nMaxOutbound = std::min(MAX_OUTBOUND_CONNECTIONS, connOptions.nMaxConnections); connOptions.nMaxAddnode = MAX_ADDNODE_CONNECTIONS; connOptions.nMaxFeeler = 1; connOptions.nBestHeight = chainActive.Height(); connOptions.uiInterface = &uiInterface; connOptions.m_msgproc = peerLogic.get(); connOptions.nSendBufferMaxSize = 1000 * gArgs.GetArg("-maxsendbuffer", DEFAULT_MAXSENDBUFFER); connOptions.nReceiveFloodSize = 1000 * gArgs.GetArg("-maxreceivebuffer", DEFAULT_MAXRECEIVEBUFFER); connOptions.m_added_nodes = gArgs.GetArgs("-addnode"); connOptions.nMaxOutboundTimeframe = nMaxOutboundTimeframe; connOptions.nMaxOutboundLimit = nMaxOutboundLimit; for (const std::string &strBind : gArgs.GetArgs("-bind")) { CService addrBind; if (!Lookup(strBind.c_str(), addrBind, GetListenPort(), false)) { return InitError(ResolveErrMsg("bind", strBind)); } connOptions.vBinds.push_back(addrBind); } for (const std::string &strBind : gArgs.GetArgs("-whitebind")) { CService addrBind; if (!Lookup(strBind.c_str(), addrBind, 0, false)) { return InitError(ResolveErrMsg("whitebind", strBind)); } if (addrBind.GetPort() == 0) { return InitError(strprintf( _("Need to specify a port with -whitebind: '%s'"), strBind)); } connOptions.vWhiteBinds.push_back(addrBind); } for (const auto &net : gArgs.GetArgs("-whitelist")) { CSubNet subnet; LookupSubNet(net.c_str(), subnet); if (!subnet.IsValid()) { return InitError(strprintf( _("Invalid netmask specified in -whitelist: '%s'"), net)); } connOptions.vWhitelistedRange.push_back(subnet); } connOptions.vSeedNodes = gArgs.GetArgs("-seednode"); // Initiate outbound connections unless connect=0 connOptions.m_use_addrman_outgoing = !gArgs.IsArgSet("-connect"); if (!connOptions.m_use_addrman_outgoing) { const auto connect = gArgs.GetArgs("-connect"); if (connect.size() != 1 || connect[0] != "0") { connOptions.m_specified_outgoing = connect; } } if (!connman.Start(scheduler, connOptions)) { return false; } // Step 12: finished SetRPCWarmupFinished(); uiInterface.InitMessage(_("Done loading")); g_wallet_init_interface->Start(scheduler); return !fRequestShutdown; } diff --git a/src/limitedmap.h b/src/limitedmap.h index da57f6e99a..6e136be1a5 100644 --- a/src/limitedmap.h +++ b/src/limitedmap.h @@ -1,100 +1,100 @@ // Copyright (c) 2012-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_LIMITEDMAP_H #define BITCOIN_LIMITEDMAP_H #include #include /** * STL-like map container that only keeps the N elements with the highest value. */ template class limitedmap { public: typedef K key_type; typedef V mapped_type; typedef std::pair value_type; typedef typename std::map::const_iterator const_iterator; typedef typename std::map::size_type size_type; protected: std::map map; typedef typename std::map::iterator iterator; std::multimap rmap; typedef typename std::multimap::iterator rmap_iterator; size_type nMaxSize; public: - limitedmap(size_type nMaxSizeIn) { + explicit limitedmap(size_type nMaxSizeIn) { assert(nMaxSizeIn > 0); nMaxSize = nMaxSizeIn; } const_iterator begin() const { return map.begin(); } const_iterator end() const { return map.end(); } size_type size() const { return map.size(); } bool empty() const { return map.empty(); } const_iterator find(const key_type &k) const { return map.find(k); } size_type count(const key_type &k) const { return map.count(k); } void insert(const value_type &x) { std::pair ret = map.insert(x); if (ret.second) { if (map.size() > nMaxSize) { map.erase(rmap.begin()->second); rmap.erase(rmap.begin()); } rmap.insert(make_pair(x.second, ret.first)); } } void erase(const key_type &k) { iterator itTarget = map.find(k); if (itTarget == map.end()) { return; } std::pair itPair = rmap.equal_range(itTarget->second); for (rmap_iterator it = itPair.first; it != itPair.second; ++it) { if (it->second == itTarget) { rmap.erase(it); map.erase(itTarget); return; } } // Shouldn't ever get here assert(0); } void update(const_iterator itIn, const mapped_type &v) { // Using map::erase() with empty range instead of map::find() to get a // non-const iterator, since it is a constant time operation in C++11. // For more details, see // https://stackoverflow.com/questions/765148/how-to-remove-constness-of-const-iterator iterator itTarget = map.erase(itIn, itIn); if (itTarget == map.end()) return; std::pair itPair = rmap.equal_range(itTarget->second); for (rmap_iterator it = itPair.first; it != itPair.second; ++it) { if (it->second == itTarget) { rmap.erase(it); itTarget->second = v; rmap.insert(make_pair(v, itTarget)); return; } } // Shouldn't ever get here assert(0); } size_type max_size() const { return nMaxSize; } size_type max_size(size_type s) { assert(s > 0); while (map.size() > s) { map.erase(rmap.begin()->second); rmap.erase(rmap.begin()); } nMaxSize = s; return nMaxSize; } }; #endif // BITCOIN_LIMITEDMAP_H diff --git a/src/miner.h b/src/miner.h index 2674f34e8f..fb68f24464 100644 --- a/src/miner.h +++ b/src/miner.h @@ -1,237 +1,237 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_MINER_H #define BITCOIN_MINER_H #include "primitives/block.h" #include "txmempool.h" #include "boost/multi_index/ordered_index.hpp" #include "boost/multi_index_container.hpp" #include #include class CBlockIndex; class CChainParams; class Config; class CReserveKey; class CScript; class CWallet; static const bool DEFAULT_PRINTPRIORITY = false; struct CBlockTemplateEntry { CTransactionRef tx; Amount fees; int64_t sigOpCount; CBlockTemplateEntry(CTransactionRef _tx, Amount _fees, int64_t _sigOpCount) : tx(_tx), fees(_fees), sigOpCount(_sigOpCount){}; }; struct CBlockTemplate { CBlock block; std::vector entries; }; // Container for tracking updates to ancestor feerate as we include (parent) // transactions in a block struct CTxMemPoolModifiedEntry { - CTxMemPoolModifiedEntry(CTxMemPool::txiter entry) { + explicit CTxMemPoolModifiedEntry(CTxMemPool::txiter entry) { iter = entry; nSizeWithAncestors = entry->GetSizeWithAncestors(); nBillableSizeWithAncestors = entry->GetBillableSizeWithAncestors(); nModFeesWithAncestors = entry->GetModFeesWithAncestors(); nSigOpCountWithAncestors = entry->GetSigOpCountWithAncestors(); } CTxMemPool::txiter iter; uint64_t nSizeWithAncestors; uint64_t nBillableSizeWithAncestors; Amount nModFeesWithAncestors; int64_t nSigOpCountWithAncestors; }; /** * Comparator for CTxMemPool::txiter objects. * It simply compares the internal memory address of the CTxMemPoolEntry object * pointed to. This means it has no meaning, and is only useful for using them * as key in other indexes. */ struct CompareCTxMemPoolIter { bool operator()(const CTxMemPool::txiter &a, const CTxMemPool::txiter &b) const { return &(*a) < &(*b); } }; struct modifiedentry_iter { typedef CTxMemPool::txiter result_type; result_type operator()(const CTxMemPoolModifiedEntry &entry) const { return entry.iter; } }; // This matches the calculation in CompareTxMemPoolEntryByAncestorFee, // except operating on CTxMemPoolModifiedEntry. // TODO: refactor to avoid duplication of this logic. struct CompareModifiedEntry { bool operator()(const CTxMemPoolModifiedEntry &a, const CTxMemPoolModifiedEntry &b) const { double f1 = b.nSizeWithAncestors * (a.nModFeesWithAncestors / SATOSHI); double f2 = a.nSizeWithAncestors * (b.nModFeesWithAncestors / SATOSHI); if (f1 == f2) { return CTxMemPool::CompareIteratorByHash()(a.iter, b.iter); } return f1 > f2; } }; // A comparator that sorts transactions based on number of ancestors. // This is sufficient to sort an ancestor package in an order that is valid // to appear in a block. struct CompareTxIterByAncestorCount { bool operator()(const CTxMemPool::txiter &a, const CTxMemPool::txiter &b) const { if (a->GetCountWithAncestors() != b->GetCountWithAncestors()) { return a->GetCountWithAncestors() < b->GetCountWithAncestors(); } return CTxMemPool::CompareIteratorByHash()(a, b); } }; typedef boost::multi_index_container< CTxMemPoolModifiedEntry, boost::multi_index::indexed_by< boost::multi_index::ordered_unique, // sorted by modified ancestor fee rate boost::multi_index::ordered_non_unique< // Reuse same tag from CTxMemPool's similar index boost::multi_index::tag, boost::multi_index::identity, CompareModifiedEntry>>> indexed_modified_transaction_set; typedef indexed_modified_transaction_set::nth_index<0>::type::iterator modtxiter; typedef indexed_modified_transaction_set::index::type::iterator modtxscoreiter; struct update_for_parent_inclusion { - update_for_parent_inclusion(CTxMemPool::txiter it) : iter(it) {} + explicit update_for_parent_inclusion(CTxMemPool::txiter it) : iter(it) {} void operator()(CTxMemPoolModifiedEntry &e) { e.nModFeesWithAncestors -= iter->GetFee(); e.nSizeWithAncestors -= iter->GetTxSize(); e.nBillableSizeWithAncestors -= iter->GetTxBillableSize(); e.nSigOpCountWithAncestors -= iter->GetSigOpCount(); } CTxMemPool::txiter iter; }; /** Generate a new block, without valid proof-of-work */ class BlockAssembler { private: // The constructed block template std::unique_ptr pblocktemplate; // A convenience pointer that always refers to the CBlock in pblocktemplate CBlock *pblock; // Configuration parameters for the block size uint64_t nMaxGeneratedBlockSize; CFeeRate blockMinFeeRate; // Information on the current status of the block uint64_t nBlockSize; uint64_t nBlockTx; uint64_t nBlockSigOps; Amount nFees; CTxMemPool::setEntries inBlock; // Chain context for the block int nHeight; int64_t nLockTimeCutoff; int64_t nMedianTimePast; const Config *config; const CTxMemPool *mempool; // Variables used for addPriorityTxs int lastFewTxs; public: BlockAssembler(const Config &_config, const CTxMemPool &mempool); /** Construct a new block template with coinbase to scriptPubKeyIn */ std::unique_ptr CreateNewBlock(const CScript &scriptPubKeyIn); uint64_t GetMaxGeneratedBlockSize() const { return nMaxGeneratedBlockSize; } private: // utility functions /** Clear the block's state and prepare for assembling a new block */ void resetBlock(); /** Add a tx to the block */ void AddToBlock(CTxMemPool::txiter iter); // Methods for how to add transactions to a block. /** Add transactions based on tx "priority" */ void addPriorityTxs(); /** Add transactions based on feerate including unconfirmed ancestors * Increments nPackagesSelected / nDescendantsUpdated with corresponding * statistics from the package selection (for logging statistics). */ void addPackageTxs(int &nPackagesSelected, int &nDescendantsUpdated); /** Enum for the results from TestForBlock */ enum class TestForBlockResult : uint8_t { TXFits = 0, TXCantFit = 1, BlockFinished = 3, }; // helper function for addPriorityTxs /** Test if tx will still "fit" in the block */ TestForBlockResult TestForBlock(CTxMemPool::txiter iter); /** Test if tx still has unconfirmed parents not yet in block */ bool isStillDependent(CTxMemPool::txiter iter); // helper functions for addPackageTxs() /** Remove confirmed (inBlock) entries from given set */ void onlyUnconfirmed(CTxMemPool::setEntries &testSet); /** Test if a new package would "fit" in the block */ bool TestPackage(uint64_t packageSize, int64_t packageSigOpsCost); /** Perform checks on each transaction in a package: * locktime, serialized size (if necessary) * These checks should always succeed, and they're here * only as an extra check in case of suboptimal node configuration */ bool TestPackageTransactions(const CTxMemPool::setEntries &package); /** Return true if given transaction from mapTx has already been evaluated, * or if the transaction's cached data in mapTx is incorrect. */ bool SkipMapTxEntry(CTxMemPool::txiter it, indexed_modified_transaction_set &mapModifiedTx, CTxMemPool::setEntries &failedTx); /** Sort the package in an order that is valid to appear in a block */ void SortForBlock(const CTxMemPool::setEntries &package, CTxMemPool::txiter entry, std::vector &sortedEntries); /** Add descendants of given transactions to mapModifiedTx with ancestor * state updated assuming given transactions are inBlock. Returns number * of updated descendants. */ int UpdatePackagesForAdded(const CTxMemPool::setEntries &alreadyAdded, indexed_modified_transaction_set &mapModifiedTx); }; /** Modify the extranonce in a block */ void IncrementExtraNonce(const Config &config, CBlock *pblock, const CBlockIndex *pindexPrev, unsigned int &nExtraNonce); int64_t UpdateTime(CBlockHeader *pblock, const Config &config, const CBlockIndex *pindexPrev); #endif // BITCOIN_MINER_H diff --git a/src/netaddress.h b/src/netaddress.h index cc7ba9be4b..33477dc395 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -1,198 +1,199 @@ // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_NETADDRESS_H #define BITCOIN_NETADDRESS_H #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "compat.h" #include "serialize.h" #include #include #include enum Network { NET_UNROUTABLE = 0, NET_IPV4, NET_IPV6, NET_TOR, NET_INTERNAL, NET_MAX, }; /** IP address (IPv6, or IPv4 using mapped IPv6 range (::FFFF:0:0/96)) */ class CNetAddr { protected: // in network byte order uint8_t ip[16]; // for scoped/link-local ipv6 addresses uint32_t scopeId; public: CNetAddr(); - CNetAddr(const struct in_addr &ipv4Addr); + explicit CNetAddr(const struct in_addr &ipv4Addr); void SetIP(const CNetAddr &ip); private: /** * Set raw IPv4 or IPv6 address (in network byte order) * @note Only NET_IPV4 and NET_IPV6 are allowed for network. */ void SetRaw(Network network, const uint8_t *data); public: /** * Transform an arbitrary string into a non-routable ipv6 address. * Useful for mapping resolved addresses back to their source. */ bool SetInternal(const std::string &name); // for Tor addresses bool SetSpecial(const std::string &strName); // IPv4 mapped address (::FFFF:0:0/96, 0.0.0.0/0) bool IsIPv4() const; // IPv6 address (not mapped IPv4, not Tor) bool IsIPv6() const; // IPv4 private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12) bool IsRFC1918() const; // IPv4 inter-network communications (192.18.0.0/15) bool IsRFC2544() const; // IPv4 ISP-level NAT (100.64.0.0/10) bool IsRFC6598() const; // IPv4 documentation addresses (192.0.2.0/24, 198.51.100.0/24, // 203.0.113.0/24) bool IsRFC5737() const; // IPv6 documentation address (2001:0DB8::/32) bool IsRFC3849() const; // IPv4 autoconfig (169.254.0.0/16) bool IsRFC3927() const; // IPv6 6to4 tunnelling (2002::/16) bool IsRFC3964() const; // IPv6 unique local (FC00::/7) bool IsRFC4193() const; // IPv6 Teredo tunnelling (2001::/32) bool IsRFC4380() const; // IPv6 ORCHID (2001:10::/28) bool IsRFC4843() const; // IPv6 autoconfig (FE80::/64) bool IsRFC4862() const; // IPv6 well-known prefix (64:FF9B::/96) bool IsRFC6052() const; // IPv6 IPv4-translated address (::FFFF:0:0:0/96) bool IsRFC6145() const; bool IsTor() const; bool IsLocal() const; bool IsRoutable() const; bool IsInternal() const; bool IsValid() const; enum Network GetNetwork() const; std::string ToString() const; std::string ToStringIP() const; unsigned int GetByte(int n) const; uint64_t GetHash() const; bool GetInAddr(struct in_addr *pipv4Addr) const; std::vector GetGroup() const; int GetReachabilityFrom(const CNetAddr *paddrPartner = nullptr) const; - CNetAddr(const struct in6_addr &pipv6Addr, const uint32_t scope = 0); + explicit CNetAddr(const struct in6_addr &pipv6Addr, + const uint32_t scope = 0); bool GetIn6Addr(struct in6_addr *pipv6Addr) const; friend bool operator==(const CNetAddr &a, const CNetAddr &b); friend bool operator!=(const CNetAddr &a, const CNetAddr &b) { return !(a == b); } friend bool operator<(const CNetAddr &a, const CNetAddr &b); ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(FLATDATA(ip)); } friend class CSubNet; }; class CSubNet { protected: /// Network (base) address CNetAddr network; /// Netmask, in network byte order uint8_t netmask[16]; /// Is this value valid? (only used to signal parse errors) bool valid; public: CSubNet(); CSubNet(const CNetAddr &addr, int32_t mask); CSubNet(const CNetAddr &addr, const CNetAddr &mask); // constructor for single ip subnet (/32 or /128) explicit CSubNet(const CNetAddr &addr); bool Match(const CNetAddr &addr) const; std::string ToString() const; bool IsValid() const; friend bool operator==(const CSubNet &a, const CSubNet &b); friend bool operator!=(const CSubNet &a, const CSubNet &b) { return !(a == b); } friend bool operator<(const CSubNet &a, const CSubNet &b); ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(network); READWRITE(FLATDATA(netmask)); READWRITE(FLATDATA(valid)); } }; /** A combination of a network address (CNetAddr) and a (TCP) port */ class CService : public CNetAddr { protected: // host order unsigned short port; public: CService(); CService(const CNetAddr &ip, unsigned short port); CService(const struct in_addr &ipv4Addr, unsigned short port); - CService(const struct sockaddr_in &addr); + explicit CService(const struct sockaddr_in &addr); unsigned short GetPort() const; bool GetSockAddr(struct sockaddr *paddr, socklen_t *addrlen) const; bool SetSockAddr(const struct sockaddr *paddr); friend bool operator==(const CService &a, const CService &b); friend bool operator!=(const CService &a, const CService &b) { return !(a == b); } friend bool operator<(const CService &a, const CService &b); std::vector GetKey() const; std::string ToString() const; std::string ToStringPort() const; std::string ToStringIPPort() const; CService(const struct in6_addr &ipv6Addr, unsigned short port); - CService(const struct sockaddr_in6 &addr); + explicit CService(const struct sockaddr_in6 &addr); ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(FLATDATA(ip)); unsigned short portN = htons(port); READWRITE(FLATDATA(portN)); if (ser_action.ForRead()) port = ntohs(portN); } }; #endif // BITCOIN_NETADDRESS_H diff --git a/src/netmessagemaker.h b/src/netmessagemaker.h index 976cdb1381..ec3082a90d 100644 --- a/src/netmessagemaker.h +++ b/src/netmessagemaker.h @@ -1,35 +1,35 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_NETMESSAGEMAKER_H #define BITCOIN_NETMESSAGEMAKER_H #include "net.h" #include "serialize.h" class CNetMsgMaker { public: - CNetMsgMaker(int nVersionIn) : nVersion(nVersionIn) {} + explicit CNetMsgMaker(int nVersionIn) : nVersion(nVersionIn) {} template CSerializedNetMsg Make(int nFlags, std::string sCommand, Args &&... args) const { CSerializedNetMsg msg; msg.command = std::move(sCommand); CVectorWriter{SER_NETWORK, nFlags | nVersion, msg.data, 0, std::forward(args)...}; return msg; } template CSerializedNetMsg Make(std::string sCommand, Args &&... args) const { return Make(0, std::move(sCommand), std::forward(args)...); } private: const int nVersion; }; #endif // BITCOIN_NETMESSAGEMAKER_H diff --git a/src/policy/fees.h b/src/policy/fees.h index ca0b5827c1..5cb09011e0 100644 --- a/src/policy/fees.h +++ b/src/policy/fees.h @@ -1,287 +1,287 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_POLICYESTIMATOR_H #define BITCOIN_POLICYESTIMATOR_H #include "amount.h" #include "random.h" #include "uint256.h" #include #include #include class CAutoFile; class CFeeRate; class CTxMemPoolEntry; class CTxMemPool; /** \class CBlockPolicyEstimator * The BlockPolicyEstimator is used for estimating the feerate needed for a * transaction to be included in a block within a certain number of blocks. * * At a high level the algorithm works by grouping transactions into buckets * based on having similar feerates and then tracking how long it takes * transactions in the various buckets to be mined. It operates under the * assumption that in general transactions of higher feerate will be included in * blocks before transactions of lower feerate. So for example if you wanted to * know what feerate you should put on a transaction to be included in a block * within the next 5 blocks, you would start by looking at the bucket with the * highest feerate transactions and verifying that a sufficiently high * percentage of them were confirmed within 5 blocks and then you would look at * the next highest feerate bucket, and so on, stopping at the last bucket to * pass the test. The average feerate of transactions in this bucket will give * you an indication of the lowest feerate you can put on a transaction and * still have a sufficiently high chance of being confirmed within your desired * 5 blocks. * * Here is a brief description of the implementation: * When a transaction enters the mempool, we track the height of the block chain * at entry. Whenever a block comes in, we count the number of transactions in * each bucket and the total amount of feerate paid in each bucket. Then we * calculate how many blocks Y it took each transaction to be mined and we track * an array of counters in each bucket for how long it to took transactions to * get confirmed from 1 to a max of 25 and we increment all the counters from Y * up to 25. This is because for any number Z>=Y the transaction was * successfully mined within Z blocks. We want to save a history of this * information, so at any time we have a counter of the total number of * transactions that happened in a given feerate bucket and the total number * that were confirmed in each number 1-25 blocks or less for any bucket. We * save this history by keeping an exponentially decaying moving average of each * one of these stats. Furthermore we also keep track of the number unmined (in * mempool) transactions in each bucket and for how many blocks they have been * outstanding and use that to increase the number of transactions we've seen in * that feerate bucket when calculating an estimate for any number of * confirmations below the number of blocks they've been outstanding. */ /** * We will instantiate an instance of this class to track transactions that were * included in a block. We will lump transactions into a bucket according to * their approximate feerate and then track how long it took for those txs to be * included in a block. * * The tracking of unconfirmed (mempool) transactions is completely independent * of the historical tracking of transactions that have been confirmed in a * block. */ class TxConfirmStats { private: // Define the buckets we will group transactions into // The upper-bound of the range for the bucket (inclusive) std::vector buckets; // Map of bucket upper-bound to index into all vectors by bucket std::map bucketMap; // For each bucket X: // Count the total # of txs in each bucket // Track the historical moving average of this total over blocks std::vector txCtAvg; // and calculate the total for the current block to update the moving // average std::vector curBlockTxCt; // Count the total # of txs confirmed within Y blocks in each bucket // Track the historical moving average of theses totals over blocks // confAvg[Y][X] std::vector> confAvg; // and calculate the totals for the current block to update the moving // averages // curBlockConf[Y][X] std::vector> curBlockConf; // Sum the total feerate of all tx's in each bucket // Track the historical moving average of this total over blocks std::vector avg; // and calculate the total for the current block to update the moving // average std::vector curBlockVal; // Combine the conf counts with tx counts to calculate the confirmation % // for each Y,X. Combine the total value with the tx counts to calculate the // avg feerate per bucket double decay; // Mempool counts of outstanding transactions // For each bucket X, track the number of transactions in the mempool that // are unconfirmed for each possible confirmation value Y // unconfTxs[Y][X] std::vector> unconfTxs; // transactions still unconfirmed after MAX_CONFIRMS for each bucket std::vector oldUnconfTxs; public: /** * Initialize the data structures. This is called by BlockPolicyEstimator's * constructor with default values. * @param defaultBuckets contains the upper limits for the bucket boundaries * @param maxConfirms max number of confirms to track * @param decay how much to decay the historical moving average per block */ void Initialize(std::vector &defaultBuckets, unsigned int maxConfirms, double decay); /** * Clear the state of the curBlock variables to start counting for the new * block. */ void ClearCurrent(unsigned int nBlockHeight); /** * Record a new transaction data point in the current block stats * @param blocksToConfirm the number of blocks it took this transaction to * confirm * @param val the feerate of the transaction * @warning blocksToConfirm is 1-based and has to be >= 1 */ void Record(int blocksToConfirm, double val); /** Record a new transaction entering the mempool*/ unsigned int NewTx(unsigned int nBlockHeight, double val); /** Remove a transaction from mempool tracking stats*/ void removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight, unsigned int bucketIndex); /** * Update our estimates by decaying our historical moving average and * updating with the data gathered from the current block. */ void UpdateMovingAverages(); /** * Calculate a feerate estimate. Find the lowest value bucket (or range of * buckets to make sure we have enough data points) whose transactions still * have sufficient likelihood of being confirmed within the target number of * confirmations * @param confTarget target number of confirmations * @param sufficientTxVal required average number of transactions per block * in a bucket range * @param minSuccess the success probability we require * @param requireGreater return the lowest feerate such that all higher * values pass minSuccess OR * return the highest feerate such that all lower values fail * minSuccess * @param nBlockHeight the current block height */ CFeeRate EstimateMedianFeeRate(int confTarget, double sufficientTxVal, double minSuccess, bool requireGreater, unsigned int nBlockHeight); /** Return the max number of confirms we're tracking */ unsigned int GetMaxConfirms() { return confAvg.size(); } /** Write state of estimation data to a file*/ void Write(CAutoFile &fileout); /** * Read saved state of estimation data from a file and replace all internal * data structures and variables with this state. */ void Read(CAutoFile &filein); }; /** Track confirm delays up to 25 blocks, can't estimate beyond that */ static const unsigned int MAX_BLOCK_CONFIRMS = 25; /** Decay of .998 is a half-life of 346 blocks or about 2.4 days */ static const double DEFAULT_DECAY = .998; /** Require greater than 95% of X feerate transactions to be confirmed within Y * blocks for X to be big enough */ static const double MIN_SUCCESS_PCT = .95; /** Require an avg of 1 tx in the combined feerate bucket per block to have stat * significance */ static const double SUFFICIENT_FEETXS = 1; // Minimum and Maximum values for tracking feerates static constexpr Amount MIN_FEERATE(10 * SATOSHI); static const Amount MAX_FEERATE(int64_t(1e7) * SATOSHI); static const Amount INF_FEERATE(MAX_MONEY); // We have to lump transactions into buckets based on feerate, but we want to be // able to give accurate estimates over a large range of potential feerates. // Therefore it makes sense to exponentially space the buckets /** Spacing of FeeRate buckets */ static const double FEE_SPACING = 1.1; /** * We want to be able to estimate feerates that are needed on tx's to be * included in a certain number of blocks. Every time a block is added to the * best chain, this class records stats on the transactions included in that * block */ class CBlockPolicyEstimator { public: /** * Create new BlockPolicyEstimator and initialize stats tracking classes * with default values. */ CBlockPolicyEstimator(); /** Process all the transactions that have been included in a block */ void processBlock(unsigned int nBlockHeight, std::vector &entries); /** Process a transaction confirmed in a block*/ bool processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry *entry); /** Process a transaction accepted to the mempool*/ void processTransaction(const CTxMemPoolEntry &entry, bool validFeeEstimate); /** Remove a transaction from the mempool tracking stats*/ bool removeTx(uint256 hash); /** Return a feerate estimate */ CFeeRate estimateFee(int confTarget); /** Estimate feerate needed to get be included in a block within * confTarget blocks. If no answer can be given at confTarget, return an * estimate at the lowest target where one can be given. */ CFeeRate estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool &pool); /** Write estimation data to a file */ void Write(CAutoFile &fileout); /** Read estimation data from a file */ void Read(CAutoFile &filein, int nFileVersion); private: //!< Passed to constructor to avoid dependency on main unsigned int nBestSeenHeight; struct TxStatsInfo { unsigned int blockHeight; unsigned int bucketIndex; TxStatsInfo() : blockHeight(0), bucketIndex(0) {} }; // map of txids to information about that transaction std::map mapMemPoolTxs; /** Classes to track historical data on transaction confirmations */ TxConfirmStats feeStats; unsigned int trackedTxs; unsigned int untrackedTxs; }; class FeeFilterRounder { public: /** Create new FeeFilterRounder */ - FeeFilterRounder(const CFeeRate &minIncrementalFee); + explicit FeeFilterRounder(const CFeeRate &minIncrementalFee); /** Quantize a minimum fee for privacy purpose before broadcast **/ Amount round(const Amount currentMinFee); private: std::set feeset; FastRandomContext insecure_rand; }; #endif /*BITCOIN_POLICYESTIMATOR_H */ diff --git a/src/primitives/block.h b/src/primitives/block.h index 162553bfac..64e4d73fbb 100644 --- a/src/primitives/block.h +++ b/src/primitives/block.h @@ -1,130 +1,131 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_PRIMITIVES_BLOCK_H #define BITCOIN_PRIMITIVES_BLOCK_H #include "primitives/transaction.h" #include "serialize.h" #include "uint256.h" /** * Nodes collect new transactions into a block, hash them into a hash tree, and * scan through nonce values to make the block's hash satisfy proof-of-work * requirements. When they solve the proof-of-work, they broadcast the block to * everyone and the block is added to the block chain. The first transaction in * the block is a special one that creates a new coin owned by the creator of * the block. */ class CBlockHeader { public: // header int32_t nVersion; uint256 hashPrevBlock; uint256 hashMerkleRoot; uint32_t nTime; uint32_t nBits; uint32_t nNonce; CBlockHeader() { SetNull(); } ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(this->nVersion); READWRITE(hashPrevBlock); READWRITE(hashMerkleRoot); READWRITE(nTime); READWRITE(nBits); READWRITE(nNonce); } void SetNull() { nVersion = 0; hashPrevBlock.SetNull(); hashMerkleRoot.SetNull(); nTime = 0; nBits = 0; nNonce = 0; } bool IsNull() const { return (nBits == 0); } uint256 GetHash() const; int64_t GetBlockTime() const { return (int64_t)nTime; } }; class CBlock : public CBlockHeader { public: // network and disk std::vector vtx; // memory only mutable bool fChecked; CBlock() { SetNull(); } CBlock(const CBlockHeader &header) { SetNull(); *((CBlockHeader *)this) = header; } ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(*(CBlockHeader *)this); READWRITE(vtx); } void SetNull() { CBlockHeader::SetNull(); vtx.clear(); fChecked = false; } CBlockHeader GetBlockHeader() const { CBlockHeader block; block.nVersion = nVersion; block.hashPrevBlock = hashPrevBlock; block.hashMerkleRoot = hashMerkleRoot; block.nTime = nTime; block.nBits = nBits; block.nNonce = nNonce; return block; } std::string ToString() const; }; /** * Describes a place in the block chain to another node such that if the other * node doesn't have the same branch, it can find a recent common trunk. The * further back it is, the further before the fork it may be. */ struct CBlockLocator { std::vector vHave; CBlockLocator() {} - CBlockLocator(const std::vector &vHaveIn) : vHave(vHaveIn) {} + explicit CBlockLocator(const std::vector &vHaveIn) + : vHave(vHaveIn) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { int nVersion = s.GetVersion(); if (!(s.GetType() & SER_GETHASH)) READWRITE(nVersion); READWRITE(vHave); } void SetNull() { vHave.clear(); } bool IsNull() const { return vHave.empty(); } }; #endif // BITCOIN_PRIMITIVES_BLOCK_H diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h index daa18cbede..ec4a414828 100644 --- a/src/primitives/transaction.h +++ b/src/primitives/transaction.h @@ -1,391 +1,391 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Copyright (c) 2017-2018 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_PRIMITIVES_TRANSACTION_H #define BITCOIN_PRIMITIVES_TRANSACTION_H #include "amount.h" #include "feerate.h" #include "primitives/txid.h" #include "script/script.h" #include "serialize.h" static const int SERIALIZE_TRANSACTION = 0x00; /** * An outpoint - a combination of a transaction hash and an index n into its * vout. */ class COutPoint { private: TxId txid; uint32_t n; public: COutPoint() : txid(), n(-1) {} COutPoint(uint256 txidIn, uint32_t nIn) : txid(TxId(txidIn)), n(nIn) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(txid); READWRITE(n); } bool IsNull() const { return txid.IsNull() && n == uint32_t(-1); } const TxId &GetTxId() const { return txid; } uint32_t GetN() const { return n; } friend bool operator<(const COutPoint &a, const COutPoint &b) { int cmp = a.txid.Compare(b.txid); return cmp < 0 || (cmp == 0 && a.n < b.n); } friend bool operator==(const COutPoint &a, const COutPoint &b) { return (a.txid == b.txid && a.n == b.n); } friend bool operator!=(const COutPoint &a, const COutPoint &b) { return !(a == b); } std::string ToString() const; }; /** * An input of a transaction. It contains the location of the previous * transaction's output that it claims and a signature that matches the output's * public key. */ class CTxIn { public: COutPoint prevout; CScript scriptSig; uint32_t nSequence; /** * Setting nSequence to this value for every input in a transaction disables * nLockTime. */ static const uint32_t SEQUENCE_FINAL = 0xffffffff; /* Below flags apply in the context of BIP 68*/ /** * If this flag set, CTxIn::nSequence is NOT interpreted as a relative * lock-time. */ static const uint32_t SEQUENCE_LOCKTIME_DISABLE_FLAG = (1 << 31); /** * If CTxIn::nSequence encodes a relative lock-time and this flag is set, * the relative lock-time has units of 512 seconds, otherwise it specifies * blocks with a granularity of 1. */ static const uint32_t SEQUENCE_LOCKTIME_TYPE_FLAG = (1 << 22); /** * If CTxIn::nSequence encodes a relative lock-time, this mask is applied to * extract that lock-time from the sequence field. */ static const uint32_t SEQUENCE_LOCKTIME_MASK = 0x0000ffff; /** * In order to use the same number of bits to encode roughly the same * wall-clock duration, and because blocks are naturally limited to occur * every 600s on average, the minimum granularity for time-based relative * lock-time is fixed at 512 seconds. Converting from CTxIn::nSequence to * seconds is performed by multiplying by 512 = 2^9, or equivalently * shifting up by 9 bits. */ static const int SEQUENCE_LOCKTIME_GRANULARITY = 9; CTxIn() { nSequence = SEQUENCE_FINAL; } explicit CTxIn(COutPoint prevoutIn, CScript scriptSigIn = CScript(), uint32_t nSequenceIn = SEQUENCE_FINAL) : prevout(prevoutIn), scriptSig(scriptSigIn), nSequence(nSequenceIn) {} CTxIn(TxId prevTxId, uint32_t nOut, CScript scriptSigIn = CScript(), uint32_t nSequenceIn = SEQUENCE_FINAL) : CTxIn(COutPoint(prevTxId, nOut), scriptSigIn, nSequenceIn) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(prevout); READWRITE(scriptSig); READWRITE(nSequence); } friend bool operator==(const CTxIn &a, const CTxIn &b) { return (a.prevout == b.prevout && a.scriptSig == b.scriptSig && a.nSequence == b.nSequence); } friend bool operator!=(const CTxIn &a, const CTxIn &b) { return !(a == b); } std::string ToString() const; }; /** * An output of a transaction. It contains the public key that the next input * must be able to sign with to claim it. */ class CTxOut { public: Amount nValue; CScript scriptPubKey; CTxOut() { SetNull(); } CTxOut(Amount nValueIn, CScript scriptPubKeyIn) : nValue(nValueIn), scriptPubKey(scriptPubKeyIn) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(nValue); READWRITE(scriptPubKey); } void SetNull() { nValue = -SATOSHI; scriptPubKey.clear(); } bool IsNull() const { return nValue == -SATOSHI; } Amount GetDustThreshold(const CFeeRate &minRelayTxFee) const { /** * "Dust" is defined in terms of CTransaction::minRelayTxFee, which has * units satoshis-per-kilobyte. If you'd pay more than 1/3 in fees to * spend something, then we consider it dust. A typical spendable * non-segwit txout is 34 bytes big, and will need a CTxIn of at least * 148 bytes to spend: so dust is a spendable txout less than * 546*minRelayTxFee/1000 (in satoshis). A typical spendable segwit * txout is 31 bytes big, and will need a CTxIn of at least 67 bytes to * spend: so dust is a spendable txout less than 294*minRelayTxFee/1000 * (in satoshis). */ if (scriptPubKey.IsUnspendable()) { return Amount::zero(); } size_t nSize = GetSerializeSize(*this, SER_DISK, 0); // the 148 mentioned above nSize += (32 + 4 + 1 + 107 + 4); return 3 * minRelayTxFee.GetFee(nSize); } bool IsDust(const CFeeRate &minRelayTxFee) const { return (nValue < GetDustThreshold(minRelayTxFee)); } friend bool operator==(const CTxOut &a, const CTxOut &b) { return (a.nValue == b.nValue && a.scriptPubKey == b.scriptPubKey); } friend bool operator!=(const CTxOut &a, const CTxOut &b) { return !(a == b); } std::string ToString() const; }; class CMutableTransaction; /** * Basic transaction serialization format: * - int32_t nVersion * - std::vector vin * - std::vector vout * - uint32_t nLockTime */ template inline void UnserializeTransaction(TxType &tx, Stream &s) { s >> tx.nVersion; tx.vin.clear(); tx.vout.clear(); /* Try to read the vin. In case the dummy is there, this will be read as an * empty vector. */ s >> tx.vin; /* We read a non-empty vin. Assume a normal vout follows. */ s >> tx.vout; s >> tx.nLockTime; } template inline void SerializeTransaction(const TxType &tx, Stream &s) { s << tx.nVersion; s << tx.vin; s << tx.vout; s << tx.nLockTime; } /** * The basic transaction that is broadcasted on the network and contained in * blocks. A transaction can contain multiple inputs and outputs. */ class CTransaction { public: // Default transaction version. static const int32_t CURRENT_VERSION = 2; // Changing the default transaction version requires a two step process: // first adapting relay policy by bumping MAX_STANDARD_VERSION, and then // later date bumping the default CURRENT_VERSION at which point both // CURRENT_VERSION and MAX_STANDARD_VERSION will be equal. static const int32_t MAX_STANDARD_VERSION = 2; // The local variables are made const to prevent unintended modification // without updating the cached hash value. However, CTransaction is not // actually immutable; deserialization and assignment are implemented, // and bypass the constness. This is safe, as they update the entire // structure, including the hash. const int32_t nVersion; const std::vector vin; const std::vector vout; const uint32_t nLockTime; private: /** Memory only. */ const uint256 hash; uint256 ComputeHash() const; public: /** Construct a CTransaction that qualifies as IsNull() */ CTransaction(); /** Convert a CMutableTransaction into a CTransaction. */ explicit CTransaction(const CMutableTransaction &tx); explicit CTransaction(CMutableTransaction &&tx); template inline void Serialize(Stream &s) const { SerializeTransaction(*this, s); } /** * This deserializing constructor is provided instead of an Unserialize * method. Unserialize is not possible, since it would require overwriting * const fields. */ template CTransaction(deserialize_type, Stream &s) : CTransaction(CMutableTransaction(deserialize, s)) {} bool IsNull() const { return vin.empty() && vout.empty(); } const TxId GetId() const { return TxId(hash); } const TxHash GetHash() const { return TxHash(hash); } // Return sum of txouts. Amount GetValueOut() const; // GetValueIn() is a method on CCoinsViewCache, because // inputs must be known to compute value in. // Compute priority, given priority of inputs and (optionally) tx size double ComputePriority(double dPriorityInputs, unsigned int nTxSize = 0) const; // Compute modified tx size for priority calculation (optionally given tx // size) unsigned int CalculateModifiedSize(unsigned int nTxSize = 0) const; // Computes an adjusted tx size so that the UTXIs are billed partially // upfront. size_t GetBillableSize() const; /** * Get the total transaction size in bytes. * @return Total transaction size in bytes */ unsigned int GetTotalSize() const; bool IsCoinBase() const { return (vin.size() == 1 && vin[0].prevout.IsNull()); } friend bool operator==(const CTransaction &a, const CTransaction &b) { return a.hash == b.hash; } friend bool operator!=(const CTransaction &a, const CTransaction &b) { return a.hash != b.hash; } std::string ToString() const; }; /** * A mutable version of CTransaction. */ class CMutableTransaction { public: int32_t nVersion; std::vector vin; std::vector vout; uint32_t nLockTime; CMutableTransaction(); CMutableTransaction(const CTransaction &tx); template inline void Serialize(Stream &s) const { SerializeTransaction(*this, s); } template inline void Unserialize(Stream &s) { UnserializeTransaction(*this, s); } template CMutableTransaction(deserialize_type, Stream &s) { Unserialize(s); } /** * Compute the id and hash of this CMutableTransaction. This is computed on * the fly, as opposed to GetId() and GetHash() in CTransaction, which uses * a cached result. */ TxId GetId() const; TxHash GetHash() const; friend bool operator==(const CMutableTransaction &a, const CMutableTransaction &b) { return a.GetId() == b.GetId(); } }; typedef std::shared_ptr CTransactionRef; static inline CTransactionRef MakeTransactionRef() { return std::make_shared(); } template static inline CTransactionRef MakeTransactionRef(Tx &&txIn) { return std::make_shared(std::forward(txIn)); } /** Precompute sighash midstate to avoid quadratic hashing */ struct PrecomputedTransactionData { uint256 hashPrevouts, hashSequence, hashOutputs; PrecomputedTransactionData() : hashPrevouts(), hashSequence(), hashOutputs() {} PrecomputedTransactionData(const PrecomputedTransactionData &txdata) : hashPrevouts(txdata.hashPrevouts), hashSequence(txdata.hashSequence), hashOutputs(txdata.hashOutputs) {} - PrecomputedTransactionData(const CTransaction &tx); + explicit PrecomputedTransactionData(const CTransaction &tx); }; #endif // BITCOIN_PRIMITIVES_TRANSACTION_H diff --git a/src/protocol.h b/src/protocol.h index 456fde4283..897e23d502 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -1,462 +1,462 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef __cplusplus #error This header can only be compiled as C++. #endif #ifndef BITCOIN_PROTOCOL_H #define BITCOIN_PROTOCOL_H #include "netaddress.h" #include "serialize.h" #include "uint256.h" #include "version.h" #include #include #include class Config; /** * Maximum length of incoming protocol messages (Currently 2MB). * NB: Messages propagating block content are not subject to this limit. */ static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH = 2 * 1024 * 1024; /** * Message header. * (4) message start. * (12) command. * (4) size. * (4) checksum. */ class CMessageHeader { public: enum { MESSAGE_START_SIZE = 4, COMMAND_SIZE = 12, MESSAGE_SIZE_SIZE = 4, CHECKSUM_SIZE = 4, MESSAGE_SIZE_OFFSET = MESSAGE_START_SIZE + COMMAND_SIZE, CHECKSUM_OFFSET = MESSAGE_SIZE_OFFSET + MESSAGE_SIZE_SIZE, HEADER_SIZE = MESSAGE_START_SIZE + COMMAND_SIZE + MESSAGE_SIZE_SIZE + CHECKSUM_SIZE }; typedef std::array MessageMagic; - CMessageHeader(const MessageMagic &pchMessageStartIn); + explicit CMessageHeader(const MessageMagic &pchMessageStartIn); CMessageHeader(const MessageMagic &pchMessageStartIn, const char *pszCommand, unsigned int nMessageSizeIn); std::string GetCommand() const; bool IsValid(const Config &config) const; bool IsValidWithoutConfig(const MessageMagic &magic) const; bool IsOversized(const Config &config) const; ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(FLATDATA(pchMessageStart)); READWRITE(FLATDATA(pchCommand)); READWRITE(nMessageSize); READWRITE(FLATDATA(pchChecksum)); } MessageMagic pchMessageStart; std::array pchCommand; uint32_t nMessageSize; uint8_t pchChecksum[CHECKSUM_SIZE]; }; /** * Bitcoin protocol message types. When adding new message types, don't forget * to update allNetMessageTypes in protocol.cpp. */ namespace NetMsgType { /** * The version message provides information about the transmitting node to the * receiving node at the beginning of a connection. * @see https://bitcoin.org/en/developer-reference#version */ extern const char *VERSION; /** * The verack message acknowledges a previously-received version message, * informing the connecting node that it can begin to send other messages. * @see https://bitcoin.org/en/developer-reference#verack */ extern const char *VERACK; /** * The addr (IP address) message relays connection information for peers on the * network. * @see https://bitcoin.org/en/developer-reference#addr */ extern const char *ADDR; /** * The inv message (inventory message) transmits one or more inventories of * objects known to the transmitting peer. * @see https://bitcoin.org/en/developer-reference#inv */ extern const char *INV; /** * The getdata message requests one or more data objects from another node. * @see https://bitcoin.org/en/developer-reference#getdata */ extern const char *GETDATA; /** * The merkleblock message is a reply to a getdata message which requested a * block using the inventory type MSG_MERKLEBLOCK. * @since protocol version 70001 as described by BIP37. * @see https://bitcoin.org/en/developer-reference#merkleblock */ extern const char *MERKLEBLOCK; /** * The getblocks message requests an inv message that provides block header * hashes starting from a particular point in the block chain. * @see https://bitcoin.org/en/developer-reference#getblocks */ extern const char *GETBLOCKS; /** * The getheaders message requests a headers message that provides block * headers starting from a particular point in the block chain. * @since protocol version 31800. * @see https://bitcoin.org/en/developer-reference#getheaders */ extern const char *GETHEADERS; /** * The tx message transmits a single transaction. * @see https://bitcoin.org/en/developer-reference#tx */ extern const char *TX; /** * The headers message sends one or more block headers to a node which * previously requested certain headers with a getheaders message. * @since protocol version 31800. * @see https://bitcoin.org/en/developer-reference#headers */ extern const char *HEADERS; /** * The block message transmits a single serialized block. * @see https://bitcoin.org/en/developer-reference#block */ extern const char *BLOCK; /** * The getaddr message requests an addr message from the receiving node, * preferably one with lots of IP addresses of other receiving nodes. * @see https://bitcoin.org/en/developer-reference#getaddr */ extern const char *GETADDR; /** * The mempool message requests the TXIDs of transactions that the receiving * node has verified as valid but which have not yet appeared in a block. * @since protocol version 60002. * @see https://bitcoin.org/en/developer-reference#mempool */ extern const char *MEMPOOL; /** * The ping message is sent periodically to help confirm that the receiving * peer is still connected. * @see https://bitcoin.org/en/developer-reference#ping */ extern const char *PING; /** * The pong message replies to a ping message, proving to the pinging node that * the ponging node is still alive. * @since protocol version 60001 as described by BIP31. * @see https://bitcoin.org/en/developer-reference#pong */ extern const char *PONG; /** * The notfound message is a reply to a getdata message which requested an * object the receiving node does not have available for relay. * @ince protocol version 70001. * @see https://bitcoin.org/en/developer-reference#notfound */ extern const char *NOTFOUND; /** * The filterload message tells the receiving peer to filter all relayed * transactions and requested merkle blocks through the provided filter. * @since protocol version 70001 as described by BIP37. * Only available with service bit NODE_BLOOM since protocol version * 70011 as described by BIP111. * @see https://bitcoin.org/en/developer-reference#filterload */ extern const char *FILTERLOAD; /** * The filteradd message tells the receiving peer to add a single element to a * previously-set bloom filter, such as a new public key. * @since protocol version 70001 as described by BIP37. * Only available with service bit NODE_BLOOM since protocol version * 70011 as described by BIP111. * @see https://bitcoin.org/en/developer-reference#filteradd */ extern const char *FILTERADD; /** * The filterclear message tells the receiving peer to remove a previously-set * bloom filter. * @since protocol version 70001 as described by BIP37. * Only available with service bit NODE_BLOOM since protocol version * 70011 as described by BIP111. * @see https://bitcoin.org/en/developer-reference#filterclear */ extern const char *FILTERCLEAR; /** * The reject message informs the receiving node that one of its previous * messages has been rejected. * @since protocol version 70002 as described by BIP61. * @see https://bitcoin.org/en/developer-reference#reject */ extern const char *REJECT; /** * Indicates that a node prefers to receive new block announcements via a * "headers" message rather than an "inv". * @since protocol version 70012 as described by BIP130. * @see https://bitcoin.org/en/developer-reference#sendheaders */ extern const char *SENDHEADERS; /** * The feefilter message tells the receiving peer not to inv us any txs * which do not meet the specified min fee rate. * @since protocol version 70013 as described by BIP133 */ extern const char *FEEFILTER; /** * Contains a 1-byte bool and 8-byte LE version number. * Indicates that a node is willing to provide blocks via "cmpctblock" messages. * May indicate that a node prefers to receive new block announcements via a * "cmpctblock" message rather than an "inv", depending on message contents. * @since protocol version 70014 as described by BIP 152 */ extern const char *SENDCMPCT; /** * Contains a CBlockHeaderAndShortTxIDs object - providing a header and * list of "short txids". * @since protocol version 70014 as described by BIP 152 */ extern const char *CMPCTBLOCK; /** * Contains a BlockTransactionsRequest * Peer should respond with "blocktxn" message. * @since protocol version 70014 as described by BIP 152 */ extern const char *GETBLOCKTXN; /** * Contains a BlockTransactions. * Sent in response to a "getblocktxn" message. * @since protocol version 70014 as described by BIP 152 */ extern const char *BLOCKTXN; /** * Contains an AvalanchePoll. * Peer should respond with "avaresponse" message. */ extern const char *AVAPOLL; /** * Contains an AvalancheResponse. * Sent in response to a "avapoll" message. */ extern const char *AVARESPONSE; /** * Indicate if the message is used to transmit the content of a block. * These messages can be significantly larger than usual messages and therefore * may need to be processed differently. */ bool IsBlockLike(const std::string &strCommand); }; // namespace NetMsgType /* Get a vector of all valid message types (see above) */ const std::vector &getAllNetMessageTypes(); /** * nServices flags. */ enum ServiceFlags : uint64_t { // Nothing NODE_NONE = 0, // NODE_NETWORK means that the node is capable of serving the block chain. // It is currently set by all Bitcoin ABC nodes, and is unset by SPV clients // or other peers that just want network services but don't provide them. NODE_NETWORK = (1 << 0), // NODE_GETUTXO means the node is capable of responding to the getutxo // protocol request. Bitcoin ABC does not support this but a patch set // called Bitcoin XT does. See BIP 64 for details on how this is // implemented. NODE_GETUTXO = (1 << 1), // NODE_BLOOM means the node is capable and willing to handle bloom-filtered // connections. Bitcoin ABC nodes used to support this by default, without // advertising this bit, but no longer do as of protocol version 70011 (= // NO_BLOOM_VERSION) NODE_BLOOM = (1 << 2), // NODE_XTHIN means the node supports Xtreme Thinblocks. If this is turned // off then the node will not service nor make xthin requests. NODE_XTHIN = (1 << 4), // NODE_BITCOIN_CASH means the node supports Bitcoin Cash and the // associated consensus rule changes. // This service bit is intended to be used prior until some time after the // UAHF activation when the Bitcoin Cash network has adequately separated. // TODO: remove (free up) the NODE_BITCOIN_CASH service bit once no longer // needed. NODE_BITCOIN_CASH = (1 << 5), // Bits 24-31 are reserved for temporary experiments. Just pick a bit that // isn't getting used, or one not being used much, and notify the // bitcoin-development mailing list. Remember that service bits are just // unauthenticated advertisements, so your code must be robust against // collisions and other cases where nodes may be advertising a service they // do not actually support. Other service bits should be allocated via the // BIP process. // NODE_AVALANCHE means the node supports Bitcoin Cash's avalanche // preconsensus mechanism. NODE_AVALANCHE = (1 << 24), }; /** * Gets the set of service flags which are "desirable" for a given peer. * * These are the flags which are required for a peer to support for them * to be "interesting" to us, ie for us to wish to use one of our few * outbound connection slots for or for us to wish to prioritize keeping * their connection around. * * Relevant service flags may be peer- and state-specific in that the * version of the peer may determine which flags are required (eg in the * case of NODE_NETWORK_LIMITED where we seek out NODE_NETWORK peers * unless they set NODE_NETWORK_LIMITED and we are out of IBD, in which * case NODE_NETWORK_LIMITED suffices). * * Thus, generally, avoid calling with peerServices == NODE_NONE, unless * state-specific flags must absolutely be avoided. When called with * peerServices == NODE_NONE, the returned desirable service flags are * guaranteed to not change dependant on state - ie they are suitable for * use when describing peers which we know to be desirable, but for which * we do not have a confirmed set of service flags. * * If the NODE_NONE return value is changed, contrib/seeds/makeseeds.py * should be updated appropriately to filter for the same nodes. */ static ServiceFlags GetDesirableServiceFlags(ServiceFlags services) { return ServiceFlags(NODE_NETWORK); } /** * A shortcut for (services & GetDesirableServiceFlags(services)) * == GetDesirableServiceFlags(services), ie determines whether the given * set of service flags are sufficient for a peer to be "relevant". */ static inline bool HasAllDesirableServiceFlags(ServiceFlags services) { return !(GetDesirableServiceFlags(services) & (~services)); } /** * Checks if a peer with the given service flags may be capable of having a * robust address-storage DB. Currently an alias for checking NODE_NETWORK. */ static inline bool MayHaveUsefulAddressDB(ServiceFlags services) { return services & NODE_NETWORK; } /** * A CService with information about it as peer. */ class CAddress : public CService { public: CAddress(); explicit CAddress(CService ipIn, ServiceFlags nServicesIn); void Init(); ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { if (ser_action.ForRead()) Init(); int nVersion = s.GetVersion(); if (s.GetType() & SER_DISK) READWRITE(nVersion); if ((s.GetType() & SER_DISK) || (nVersion >= CADDR_TIME_VERSION && !(s.GetType() & SER_GETHASH))) READWRITE(nTime); uint64_t nServicesInt = nServices; READWRITE(nServicesInt); nServices = (ServiceFlags)nServicesInt; READWRITE(*(CService *)this); } // TODO: make private (improves encapsulation) public: ServiceFlags nServices; // disk and network only unsigned int nTime; }; /** getdata message type flags */ const uint32_t MSG_TYPE_MASK = 0xffffffff >> 3; /** getdata / inv message types. * These numbers are defined by the protocol. When adding a new value, be sure * to mention it in the respective BIP. */ enum GetDataMsg { UNDEFINED = 0, MSG_TX = 1, MSG_BLOCK = 2, // The following can only occur in getdata. Invs always use TX or BLOCK. //!< Defined in BIP37 MSG_FILTERED_BLOCK = 3, //!< Defined in BIP152 MSG_CMPCT_BLOCK = 4, }; /** * Inv(ventory) message data. * Intended as non-ambiguous identifier of objects (eg. transactions, blocks) * held by peers. */ class CInv { public: // TODO: make private (improves encapsulation) uint32_t type; uint256 hash; public: CInv() : type(0), hash() {} CInv(uint32_t typeIn, const uint256 &hashIn) : type(typeIn), hash(hashIn) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(type); READWRITE(hash); } friend bool operator<(const CInv &a, const CInv &b) { return a.type < b.type || (a.type == b.type && a.hash < b.hash); } std::string GetCommand() const; std::string ToString() const; uint32_t GetKind() const { return type & MSG_TYPE_MASK; } bool IsTx() const { auto k = GetKind(); return k == MSG_TX; } bool IsSomeBlock() const { auto k = GetKind(); return k == MSG_BLOCK || k == MSG_FILTERED_BLOCK || k == MSG_CMPCT_BLOCK; } }; #endif // BITCOIN_PROTOCOL_H diff --git a/src/pubkey.h b/src/pubkey.h index 99b91c523e..2dda620485 100644 --- a/src/pubkey.h +++ b/src/pubkey.h @@ -1,216 +1,218 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_PUBKEY_H #define BITCOIN_PUBKEY_H #include "hash.h" #include "serialize.h" #include "uint256.h" #include #include #include /** * secp256k1: * const unsigned int PRIVATE_KEY_SIZE = 279; * const unsigned int PUBLIC_KEY_SIZE = 65; * const unsigned int SIGNATURE_SIZE = 72; * * see www.keylength.com * script supports up to 75 for single byte push */ const unsigned int BIP32_EXTKEY_SIZE = 74; /** A reference to a CKey: the Hash160 of its serialized public key */ class CKeyID : public uint160 { public: CKeyID() : uint160() {} - CKeyID(const uint160 &in) : uint160(in) {} + explicit CKeyID(const uint160 &in) : uint160(in) {} }; typedef uint256 ChainCode; /** An encapsulated public key. */ class CPubKey { private: /** * Just store the serialized data. * Its length can very cheaply be computed from the first byte. */ uint8_t vch[65]; //! Compute the length of a pubkey with a given first byte. static unsigned int GetLen(uint8_t chHeader) { if (chHeader == 2 || chHeader == 3) return 33; if (chHeader == 4 || chHeader == 6 || chHeader == 7) return 65; return 0; } //! Set this key data to be invalid void Invalidate() { vch[0] = 0xFF; } public: //! Construct an invalid public key. CPubKey() { Invalidate(); } //! Initialize a public key using begin/end iterators to byte data. template void Set(const T pbegin, const T pend) { int len = pend == pbegin ? 0 : GetLen(pbegin[0]); if (len && len == (pend - pbegin)) memcpy(vch, (uint8_t *)&pbegin[0], len); else Invalidate(); } //! Construct a public key using begin/end iterators to byte data. template CPubKey(const T pbegin, const T pend) { Set(pbegin, pend); } //! Construct a public key from a byte vector. - CPubKey(const std::vector &_vch) { Set(_vch.begin(), _vch.end()); } + explicit CPubKey(const std::vector &_vch) { + Set(_vch.begin(), _vch.end()); + } //! Simple read-only vector-like interface to the pubkey data. unsigned int size() const { return GetLen(vch[0]); } const uint8_t *begin() const { return vch; } const uint8_t *end() const { return vch + size(); } const uint8_t &operator[](unsigned int pos) const { return vch[pos]; } //! Comparator implementation. friend bool operator==(const CPubKey &a, const CPubKey &b) { return a.vch[0] == b.vch[0] && memcmp(a.vch, b.vch, a.size()) == 0; } friend bool operator!=(const CPubKey &a, const CPubKey &b) { return !(a == b); } friend bool operator<(const CPubKey &a, const CPubKey &b) { return a.vch[0] < b.vch[0] || (a.vch[0] == b.vch[0] && memcmp(a.vch, b.vch, a.size()) < 0); } //! Implement serialization, as if this was a byte vector. template void Serialize(Stream &s) const { unsigned int len = size(); ::WriteCompactSize(s, len); s.write((char *)vch, len); } template void Unserialize(Stream &s) { unsigned int len = ::ReadCompactSize(s); if (len <= 65) { s.read((char *)vch, len); } else { // invalid pubkey, skip available data char dummy; while (len--) s.read(&dummy, 1); Invalidate(); } } //! Get the KeyID of this public key (hash of its serialization) CKeyID GetID() const { return CKeyID(Hash160(vch, vch + size())); } //! Get the 256-bit hash of this public key. uint256 GetHash() const { return Hash(vch, vch + size()); } /* * Check syntactic correctness. * * Note that this is consensus critical as CheckSig() calls it! */ bool IsValid() const { return size() > 0; } //! fully validate whether this is a valid public key (more expensive than //! IsValid()) bool IsFullyValid() const; //! Check whether this is a compressed public key. bool IsCompressed() const { return size() == 33; } /** * Verify a DER signature (~72 bytes). * If this public key is not fully valid, the return value will be false. */ bool Verify(const uint256 &hash, const std::vector &vchSig) const; /** * Check whether a signature is normalized (lower-S). */ static bool CheckLowS(const boost::sliced_range> &vchSig); static bool CheckLowS(const std::vector &vchSig) { return CheckLowS(vchSig | boost::adaptors::sliced(0, vchSig.size())); } //! Recover a public key from a compact signature. bool RecoverCompact(const uint256 &hash, const std::vector &vchSig); //! Turn this public key into an uncompressed public key. bool Decompress(); //! Derive BIP32 child pubkey. bool Derive(CPubKey &pubkeyChild, ChainCode &ccChild, unsigned int nChild, const ChainCode &cc) const; }; struct CExtPubKey { uint8_t nDepth; uint8_t vchFingerprint[4]; unsigned int nChild; ChainCode chaincode; CPubKey pubkey; friend bool operator==(const CExtPubKey &a, const CExtPubKey &b) { return a.nDepth == b.nDepth && memcmp(&a.vchFingerprint[0], &b.vchFingerprint[0], sizeof(vchFingerprint)) == 0 && a.nChild == b.nChild && a.chaincode == b.chaincode && a.pubkey == b.pubkey; } void Encode(uint8_t code[BIP32_EXTKEY_SIZE]) const; void Decode(const uint8_t code[BIP32_EXTKEY_SIZE]); bool Derive(CExtPubKey &out, unsigned int nChild) const; void Serialize(CSizeComputer &s) const { // Optimized implementation for ::GetSerializeSize that avoids copying. // add one byte for the size (compact int) s.seek(BIP32_EXTKEY_SIZE + 1); } template void Serialize(Stream &s) const { unsigned int len = BIP32_EXTKEY_SIZE; ::WriteCompactSize(s, len); uint8_t code[BIP32_EXTKEY_SIZE]; Encode(code); s.write((const char *)&code[0], len); } template void Unserialize(Stream &s) { unsigned int len = ::ReadCompactSize(s); uint8_t code[BIP32_EXTKEY_SIZE]; if (len != BIP32_EXTKEY_SIZE) throw std::runtime_error("Invalid extended key size\n"); s.read((char *)&code[0], len); Decode(code); } }; /** * Users of this module must hold an ECCVerifyHandle. The constructor and * destructor of these are not allowed to run in parallel, though. */ class ECCVerifyHandle { static int refcount; public: ECCVerifyHandle(); ~ECCVerifyHandle(); }; #endif // BITCOIN_PUBKEY_H diff --git a/src/qt/coincontroldialog.h b/src/qt/coincontroldialog.h index d87b97af48..91e6b3a1e1 100644 --- a/src/qt/coincontroldialog.h +++ b/src/qt/coincontroldialog.h @@ -1,112 +1,112 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_QT_COINCONTROLDIALOG_H #define BITCOIN_QT_COINCONTROLDIALOG_H #include "amount.h" #include #include #include #include #include #include #include #include class PlatformStyle; class WalletModel; class CCoinControl; class CTxMemPool; namespace Ui { class CoinControlDialog; } #define ASYMP_UTF8 "\xE2\x89\x88" class CCoinControlWidgetItem : public QTreeWidgetItem { public: - CCoinControlWidgetItem(QTreeWidget *parent, int type = Type) + explicit CCoinControlWidgetItem(QTreeWidget *parent, int type = Type) : QTreeWidgetItem(parent, type) {} - CCoinControlWidgetItem(int type = Type) : QTreeWidgetItem(type) {} - CCoinControlWidgetItem(QTreeWidgetItem *parent, int type = Type) + explicit CCoinControlWidgetItem(int type = Type) : QTreeWidgetItem(type) {} + explicit CCoinControlWidgetItem(QTreeWidgetItem *parent, int type = Type) : QTreeWidgetItem(parent, type) {} bool operator<(const QTreeWidgetItem &other) const override; }; class CoinControlDialog : public QDialog { Q_OBJECT public: explicit CoinControlDialog(const PlatformStyle *platformStyle, QWidget *parent = 0); ~CoinControlDialog(); void setModel(WalletModel *model); // static because also called from sendcoinsdialog static void updateLabels(WalletModel *, QDialog *); static QList payAmounts; static CCoinControl *coinControl; static bool fSubtractFeeFromAmount; private: Ui::CoinControlDialog *ui; WalletModel *model; int sortColumn; Qt::SortOrder sortOrder; QMenu *contextMenu; QTreeWidgetItem *contextMenuItem; QAction *copyTransactionHashAction; QAction *lockAction; QAction *unlockAction; const PlatformStyle *platformStyle; void sortView(int, Qt::SortOrder); void updateView(); enum { COLUMN_CHECKBOX = 0, COLUMN_AMOUNT, COLUMN_LABEL, COLUMN_ADDRESS, COLUMN_DATE, COLUMN_CONFIRMATIONS, COLUMN_TXHASH, COLUMN_VOUT_INDEX, }; friend class CCoinControlWidgetItem; private Q_SLOTS: void showMenu(const QPoint &); void copyAmount(); void copyLabel(); void copyAddress(); void copyTransactionHash(); void lockCoin(); void unlockCoin(); void clipboardQuantity(); void clipboardAmount(); void clipboardFee(); void clipboardAfterFee(); void clipboardBytes(); void clipboardLowOutput(); void clipboardChange(); void radioTreeMode(bool); void radioListMode(bool); void viewItemChanged(QTreeWidgetItem *, int); void headerSectionClicked(int); void buttonBoxClicked(QAbstractButton *); void buttonSelectAllClicked(); void updateLabelLocked(); }; #endif // BITCOIN_QT_COINCONTROLDIALOG_H diff --git a/src/qt/intro.cpp b/src/qt/intro.cpp index 88c57b4cd5..dc241927f4 100644 --- a/src/qt/intro.cpp +++ b/src/qt/intro.cpp @@ -1,302 +1,302 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "fs.h" #include "guiutil.h" #include "intro.h" #include "ui_intro.h" #include "util.h" #include #include #include #include static const uint64_t GB_BYTES = 1000000000LL; /** * Minimum free space (in GB) needed for data directory. */ static const uint64_t BLOCK_CHAIN_SIZE = 200; /** * Minimum free space (in GB) needed for data directory when pruned; Does not * include prune target. */ static const uint64_t CHAIN_STATE_SIZE = 4; /** * Total required space (in GB) depending on user choice (prune, not prune). */ static uint64_t requiredSpace; /* Check free space asynchronously to prevent hanging the UI thread. Up to one request to check a path is in flight to this thread; when the check() function runs, the current path is requested from the associated Intro object. The reply is sent back through a signal. This ensures that no queue of checking requests is built up while the user is still entering the path, and that always the most recently entered path is checked as soon as the thread becomes available. */ class FreespaceChecker : public QObject { Q_OBJECT public: - FreespaceChecker(Intro *intro); + explicit FreespaceChecker(Intro *intro); enum Status { ST_OK, ST_ERROR }; public Q_SLOTS: void check(); Q_SIGNALS: void reply(int status, const QString &message, quint64 available); private: Intro *intro; }; #include "intro.moc" FreespaceChecker::FreespaceChecker(Intro *_intro) { this->intro = _intro; } void FreespaceChecker::check() { QString dataDirStr = intro->getPathToCheck(); fs::path dataDir = GUIUtil::qstringToBoostPath(dataDirStr); uint64_t freeBytesAvailable = 0; int replyStatus = ST_OK; QString replyMessage = tr("A new data directory will be created."); /* Find first parent that exists, so that fs::space does not fail */ fs::path parentDir = dataDir; fs::path parentDirOld = fs::path(); while (parentDir.has_parent_path() && !fs::exists(parentDir)) { parentDir = parentDir.parent_path(); /* Check if we make any progress, break if not to prevent an infinite * loop here */ if (parentDirOld == parentDir) break; parentDirOld = parentDir; } try { freeBytesAvailable = fs::space(parentDir).available; if (fs::exists(dataDir)) { if (fs::is_directory(dataDir)) { QString separator = "" + QDir::toNativeSeparators("/") + tr("name") + ""; replyStatus = ST_OK; replyMessage = tr("Directory already exists. Add %1 if you " "intend to create a new directory here.") .arg(separator); } else { replyStatus = ST_ERROR; replyMessage = tr("Path already exists, and is not a directory."); } } } catch (const fs::filesystem_error &) { /* Parent directory does not exist or is not accessible */ replyStatus = ST_ERROR; replyMessage = tr("Cannot create data directory here."); } Q_EMIT reply(replyStatus, replyMessage, freeBytesAvailable); } Intro::Intro(QWidget *parent) : QDialog(parent), ui(new Ui::Intro), thread(0), signalled(false) { ui->setupUi(this); ui->welcomeLabel->setText(ui->welcomeLabel->text().arg(tr(PACKAGE_NAME))); ui->storageLabel->setText(ui->storageLabel->text().arg(tr(PACKAGE_NAME))); uint64_t pruneTarget = std::max(0, gArgs.GetArg("-prune", 0)); requiredSpace = BLOCK_CHAIN_SIZE; if (pruneTarget) { uint64_t prunedGBs = std::ceil(pruneTarget * 1024 * 1024.0 / GB_BYTES); if (prunedGBs <= requiredSpace) { requiredSpace = prunedGBs; } } requiredSpace += CHAIN_STATE_SIZE; ui->sizeWarningLabel->setText( ui->sizeWarningLabel->text().arg(tr(PACKAGE_NAME)).arg(requiredSpace)); startThread(); } Intro::~Intro() { delete ui; /* Ensure thread is finished before it is deleted */ Q_EMIT stopThread(); thread->wait(); } QString Intro::getDataDirectory() { return ui->dataDirectory->text(); } void Intro::setDataDirectory(const QString &dataDir) { ui->dataDirectory->setText(dataDir); if (dataDir == getDefaultDataDirectory()) { ui->dataDirDefault->setChecked(true); ui->dataDirectory->setEnabled(false); ui->ellipsisButton->setEnabled(false); } else { ui->dataDirCustom->setChecked(true); ui->dataDirectory->setEnabled(true); ui->ellipsisButton->setEnabled(true); } } QString Intro::getDefaultDataDirectory() { return GUIUtil::boostPathToQString(GetDefaultDataDir()); } bool Intro::pickDataDirectory() { QSettings settings; /* If data directory provided on command line, no need to look at settings or show a picking dialog */ if (!gArgs.GetArg("-datadir", "").empty()) return true; /* 1) Default data directory for operating system */ QString dataDir = getDefaultDataDirectory(); /* 2) Allow QSettings to override default dir */ dataDir = settings.value("strDataDir", dataDir).toString(); if (!fs::exists(GUIUtil::qstringToBoostPath(dataDir)) || gArgs.GetBoolArg("-choosedatadir", DEFAULT_CHOOSE_DATADIR) || settings.value("fReset", false).toBool() || gArgs.GetBoolArg("-resetguisettings", false)) { /* If current default data directory does not exist, let the user choose * one */ Intro intro; intro.setDataDirectory(dataDir); intro.setWindowIcon(QIcon(":icons/bitcoin")); while (true) { if (!intro.exec()) { /* Cancel clicked */ return false; } dataDir = intro.getDataDirectory(); try { TryCreateDirectories(GUIUtil::qstringToBoostPath(dataDir)); break; } catch (const fs::filesystem_error &) { QMessageBox::critical(0, tr(PACKAGE_NAME), tr("Error: Specified data directory " "\"%1\" cannot be created.") .arg(dataDir)); /* fall through, back to choosing screen */ } } settings.setValue("strDataDir", dataDir); settings.setValue("fReset", false); } /* Only override -datadir if different from the default, to make it possible * to * override -datadir in the bitcoin.conf file in the default data directory * (to be consistent with bitcoind behavior) */ if (dataDir != getDefaultDataDirectory()) { // use OS locale for path setting gArgs.SoftSetArg("-datadir", GUIUtil::qstringToBoostPath(dataDir).string()); } return true; } void Intro::setStatus(int status, const QString &message, quint64 bytesAvailable) { switch (status) { case FreespaceChecker::ST_OK: ui->errorMessage->setText(message); ui->errorMessage->setStyleSheet(""); break; case FreespaceChecker::ST_ERROR: ui->errorMessage->setText(tr("Error") + ": " + message); ui->errorMessage->setStyleSheet("QLabel { color: #800000 }"); break; } /* Indicate number of bytes available */ if (status == FreespaceChecker::ST_ERROR) { ui->freeSpace->setText(""); } else { QString freeString = tr("%n GB of free space available", "", bytesAvailable / GB_BYTES); if (bytesAvailable < requiredSpace * GB_BYTES) { freeString += " " + tr("(of %n GB needed)", "", requiredSpace); ui->freeSpace->setStyleSheet("QLabel { color: #800000 }"); } else { ui->freeSpace->setStyleSheet(""); } ui->freeSpace->setText(freeString + "."); } /* Don't allow confirm in ERROR state */ ui->buttonBox->button(QDialogButtonBox::Ok) ->setEnabled(status != FreespaceChecker::ST_ERROR); } void Intro::on_dataDirectory_textChanged(const QString &dataDirStr) { /* Disable OK button until check result comes in */ ui->buttonBox->button(QDialogButtonBox::Ok)->setEnabled(false); checkPath(dataDirStr); } void Intro::on_ellipsisButton_clicked() { QString dir = QDir::toNativeSeparators(QFileDialog::getExistingDirectory( 0, "Choose data directory", ui->dataDirectory->text())); if (!dir.isEmpty()) ui->dataDirectory->setText(dir); } void Intro::on_dataDirDefault_clicked() { setDataDirectory(getDefaultDataDirectory()); } void Intro::on_dataDirCustom_clicked() { ui->dataDirectory->setEnabled(true); ui->ellipsisButton->setEnabled(true); } void Intro::startThread() { thread = new QThread(this); FreespaceChecker *executor = new FreespaceChecker(this); executor->moveToThread(thread); connect(executor, SIGNAL(reply(int, QString, quint64)), this, SLOT(setStatus(int, QString, quint64))); connect(this, SIGNAL(requestCheck()), executor, SLOT(check())); /* make sure executor object is deleted in its own thread */ connect(this, SIGNAL(stopThread()), executor, SLOT(deleteLater())); connect(this, SIGNAL(stopThread()), thread, SLOT(quit())); thread->start(); } void Intro::checkPath(const QString &dataDir) { mutex.lock(); pathToCheck = dataDir; if (!signalled) { signalled = true; Q_EMIT requestCheck(); } mutex.unlock(); } QString Intro::getPathToCheck() { QString retval; mutex.lock(); retval = pathToCheck; signalled = false; /* new request can be queued now */ mutex.unlock(); return retval; } diff --git a/src/qt/notificator.cpp b/src/qt/notificator.cpp index a3110ca5d4..e2ff1b18f9 100644 --- a/src/qt/notificator.cpp +++ b/src/qt/notificator.cpp @@ -1,268 +1,268 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "notificator.h" #include #include #include #include #include #include #include #include #include #include #ifdef USE_DBUS #include #include #endif // Include ApplicationServices.h after QtDbus to avoid redefinition of check(). // This affects at least OSX 10.6. See /usr/include/AssertMacros.h for details. // Note: This could also be worked around using: // #define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #ifdef Q_OS_MAC #include "macnotificationhandler.h" #include #endif #ifdef USE_DBUS // https://wiki.ubuntu.com/NotificationDevelopmentGuidelines recommends at least // 128 const int FREEDESKTOP_NOTIFICATION_ICON_SIZE = 128; #endif Notificator::Notificator(const QString &_programName, QSystemTrayIcon *_trayIcon, QWidget *_parent) : QObject(_parent), parent(_parent), programName(_programName), mode(None), trayIcon(_trayIcon) #ifdef USE_DBUS , interface(0) #endif { if (_trayIcon && _trayIcon->supportsMessages()) { mode = QSystemTray; } #ifdef USE_DBUS interface = new QDBusInterface("org.freedesktop.Notifications", "/org/freedesktop/Notifications", "org.freedesktop.Notifications"); if (interface->isValid()) { mode = Freedesktop; } #endif #ifdef Q_OS_MAC // check if users OS has support for NSUserNotification if (MacNotificationHandler::instance() ->hasUserNotificationCenterSupport()) { mode = UserNotificationCenter; } #endif } Notificator::~Notificator() { #ifdef USE_DBUS delete interface; #endif } #ifdef USE_DBUS // Loosely based on http://www.qtcentre.org/archive/index.php/t-25879.html class FreedesktopImage { public: FreedesktopImage() {} - FreedesktopImage(const QImage &img); + explicit FreedesktopImage(const QImage &img); static int metaType(); // Image to variant that can be marshalled over DBus static QVariant toVariant(const QImage &img); private: int width, height, stride; bool hasAlpha; int channels; int bitsPerSample; QByteArray image; friend QDBusArgument &operator<<(QDBusArgument &a, const FreedesktopImage &i); friend const QDBusArgument &operator>>(const QDBusArgument &a, FreedesktopImage &i); }; Q_DECLARE_METATYPE(FreedesktopImage); // Image configuration settings const int CHANNELS = 4; const int BYTES_PER_PIXEL = 4; const int BITS_PER_SAMPLE = 8; FreedesktopImage::FreedesktopImage(const QImage &img) : width(img.width()), height(img.height()), stride(img.width() * BYTES_PER_PIXEL), hasAlpha(true), channels(CHANNELS), bitsPerSample(BITS_PER_SAMPLE) { // Convert 00xAARRGGBB to RGBA bytewise (endian-independent) format QImage tmp = img.convertToFormat(QImage::Format_ARGB32); const uint32_t *data = reinterpret_cast(tmp.bits()); unsigned int num_pixels = width * height; image.resize(num_pixels * BYTES_PER_PIXEL); for (unsigned int ptr = 0; ptr < num_pixels; ++ptr) { image[ptr * BYTES_PER_PIXEL + 0] = data[ptr] >> 16; // R image[ptr * BYTES_PER_PIXEL + 1] = data[ptr] >> 8; // G image[ptr * BYTES_PER_PIXEL + 2] = data[ptr]; // B image[ptr * BYTES_PER_PIXEL + 3] = data[ptr] >> 24; // A } } QDBusArgument &operator<<(QDBusArgument &a, const FreedesktopImage &i) { a.beginStructure(); a << i.width << i.height << i.stride << i.hasAlpha << i.bitsPerSample << i.channels << i.image; a.endStructure(); return a; } const QDBusArgument &operator>>(const QDBusArgument &a, FreedesktopImage &i) { a.beginStructure(); a >> i.width >> i.height >> i.stride >> i.hasAlpha >> i.bitsPerSample >> i.channels >> i.image; a.endStructure(); return a; } int FreedesktopImage::metaType() { return qDBusRegisterMetaType(); } QVariant FreedesktopImage::toVariant(const QImage &img) { FreedesktopImage fimg(img); return QVariant(FreedesktopImage::metaType(), &fimg); } void Notificator::notifyDBus(Class cls, const QString &title, const QString &text, const QIcon &icon, int millisTimeout) { Q_UNUSED(cls); // Arguments for DBus call: QList args; // Program Name: args.append(programName); // Unique ID of this notification type: args.append(0U); // Application Icon, empty string args.append(QString()); // Summary args.append(title); // Body args.append(text); // Actions (none, actions are deprecated) QStringList actions; args.append(actions); // Hints QVariantMap hints; // If no icon specified, set icon based on class QIcon tmpicon; if (icon.isNull()) { QStyle::StandardPixmap sicon = QStyle::SP_MessageBoxQuestion; switch (cls) { case Information: sicon = QStyle::SP_MessageBoxInformation; break; case Warning: sicon = QStyle::SP_MessageBoxWarning; break; case Critical: sicon = QStyle::SP_MessageBoxCritical; break; default: break; } tmpicon = QApplication::style()->standardIcon(sicon); } else { tmpicon = icon; } hints["icon_data"] = FreedesktopImage::toVariant( tmpicon.pixmap(FREEDESKTOP_NOTIFICATION_ICON_SIZE).toImage()); args.append(hints); // Timeout (in msec) args.append(millisTimeout); // "Fire and forget" interface->callWithArgumentList(QDBus::NoBlock, "Notify", args); } #endif void Notificator::notifySystray(Class cls, const QString &title, const QString &text, const QIcon &icon, int millisTimeout) { Q_UNUSED(icon); QSystemTrayIcon::MessageIcon sicon = QSystemTrayIcon::NoIcon; switch (cls) // Set icon based on class { case Information: sicon = QSystemTrayIcon::Information; break; case Warning: sicon = QSystemTrayIcon::Warning; break; case Critical: sicon = QSystemTrayIcon::Critical; break; } trayIcon->showMessage(title, text, sicon, millisTimeout); } // Based on Qt's tray icon implementation #ifdef Q_OS_MAC void Notificator::notifyMacUserNotificationCenter(Class cls, const QString &title, const QString &text, const QIcon &icon) { // icon is not supported by the user notification center yet. OSX will use // the app icon. MacNotificationHandler::instance()->showNotification(title, text); } #endif void Notificator::notify(Class cls, const QString &title, const QString &text, const QIcon &icon, int millisTimeout) { switch (mode) { #ifdef USE_DBUS case Freedesktop: notifyDBus(cls, title, text, icon, millisTimeout); break; #endif case QSystemTray: notifySystray(cls, title, text, icon, millisTimeout); break; #ifdef Q_OS_MAC case UserNotificationCenter: notifyMacUserNotificationCenter(cls, title, text, icon); break; #endif default: if (cls == Critical) { // Fall back to old fashioned pop-up dialog if critical and no // other notification available QMessageBox::critical(parent, title, text, QMessageBox::Ok, QMessageBox::Ok); } break; } } diff --git a/src/qt/overviewpage.cpp b/src/qt/overviewpage.cpp index 0c07c7901e..4062288912 100644 --- a/src/qt/overviewpage.cpp +++ b/src/qt/overviewpage.cpp @@ -1,299 +1,299 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "overviewpage.h" #include "ui_overviewpage.h" #include "bitcoinunits.h" #include "clientmodel.h" #include "guiconstants.h" #include "guiutil.h" #include "optionsmodel.h" #include "platformstyle.h" #include "transactionfilterproxy.h" #include "transactiontablemodel.h" #include "walletmodel.h" #include #include #define DECORATION_SIZE 54 #define NUM_ITEMS 5 class TxViewDelegate : public QAbstractItemDelegate { Q_OBJECT public: - TxViewDelegate(const PlatformStyle *_platformStyle, - QObject *parent = nullptr) + explicit TxViewDelegate(const PlatformStyle *_platformStyle, + QObject *parent = nullptr) : QAbstractItemDelegate(parent), unit(BitcoinUnits::BCH), platformStyle(_platformStyle) {} inline void paint(QPainter *painter, const QStyleOptionViewItem &option, const QModelIndex &index) const { painter->save(); QIcon icon = qvariant_cast( index.data(TransactionTableModel::RawDecorationRole)); QRect mainRect = option.rect; QRect decorationRect(mainRect.topLeft(), QSize(DECORATION_SIZE, DECORATION_SIZE)); int xspace = DECORATION_SIZE + 8; int ypad = 6; int halfheight = (mainRect.height() - 2 * ypad) / 2; QRect amountRect(mainRect.left() + xspace, mainRect.top() + ypad, mainRect.width() - xspace, halfheight); QRect addressRect(mainRect.left() + xspace, mainRect.top() + ypad + halfheight, mainRect.width() - xspace, halfheight); icon = platformStyle->SingleColorIcon(icon); icon.paint(painter, decorationRect); QDateTime date = index.data(TransactionTableModel::DateRole).toDateTime(); QString address = index.data(Qt::DisplayRole).toString(); Amount amount( int64_t( index.data(TransactionTableModel::AmountRole).toLongLong()) * SATOSHI); bool confirmed = index.data(TransactionTableModel::ConfirmedRole).toBool(); QVariant value = index.data(Qt::ForegroundRole); QColor foreground = option.palette.color(QPalette::Text); if (value.canConvert()) { QBrush brush = qvariant_cast(value); foreground = brush.color(); } painter->setPen(foreground); QRect boundingRect; painter->drawText(addressRect, Qt::AlignLeft | Qt::AlignVCenter, address, &boundingRect); if (index.data(TransactionTableModel::WatchonlyRole).toBool()) { QIcon iconWatchonly = qvariant_cast( index.data(TransactionTableModel::WatchonlyDecorationRole)); QRect watchonlyRect(boundingRect.right() + 5, mainRect.top() + ypad + halfheight, 16, halfheight); iconWatchonly.paint(painter, watchonlyRect); } if (amount < Amount::zero()) { foreground = COLOR_NEGATIVE; } else if (!confirmed) { foreground = COLOR_UNCONFIRMED; } else { foreground = option.palette.color(QPalette::Text); } painter->setPen(foreground); QString amountText = BitcoinUnits::formatWithUnit( unit, amount, true, BitcoinUnits::separatorAlways); if (!confirmed) { amountText = QString("[") + amountText + QString("]"); } painter->drawText(amountRect, Qt::AlignRight | Qt::AlignVCenter, amountText); painter->setPen(option.palette.color(QPalette::Text)); painter->drawText(amountRect, Qt::AlignLeft | Qt::AlignVCenter, GUIUtil::dateTimeStr(date)); painter->restore(); } inline QSize sizeHint(const QStyleOptionViewItem &option, const QModelIndex &index) const { return QSize(DECORATION_SIZE, DECORATION_SIZE); } int unit; const PlatformStyle *platformStyle; }; #include "overviewpage.moc" OverviewPage::OverviewPage(const PlatformStyle *platformStyle, QWidget *parent) : QWidget(parent), ui(new Ui::OverviewPage), clientModel(0), walletModel(0), currentBalance(-SATOSHI), currentUnconfirmedBalance(-SATOSHI), currentImmatureBalance(-SATOSHI), currentWatchOnlyBalance(-SATOSHI), currentWatchUnconfBalance(-SATOSHI), currentWatchImmatureBalance(-SATOSHI), txdelegate(new TxViewDelegate(platformStyle, this)) { ui->setupUi(this); // use a SingleColorIcon for the "out of sync warning" icon QIcon icon = platformStyle->SingleColorIcon(":/icons/warning"); // also set the disabled icon because we are using a disabled QPushButton to // work around missing HiDPI support of QLabel // (https://bugreports.qt.io/browse/QTBUG-42503) icon.addPixmap(icon.pixmap(QSize(64, 64), QIcon::Normal), QIcon::Disabled); ui->labelTransactionsStatus->setIcon(icon); ui->labelWalletStatus->setIcon(icon); // Recent transactions ui->listTransactions->setItemDelegate(txdelegate); ui->listTransactions->setIconSize(QSize(DECORATION_SIZE, DECORATION_SIZE)); ui->listTransactions->setMinimumHeight(NUM_ITEMS * (DECORATION_SIZE + 2)); ui->listTransactions->setAttribute(Qt::WA_MacShowFocusRect, false); connect(ui->listTransactions, SIGNAL(clicked(QModelIndex)), this, SLOT(handleTransactionClicked(QModelIndex))); // start with displaying the "out of sync" warnings showOutOfSyncWarning(true); connect(ui->labelWalletStatus, SIGNAL(clicked()), this, SLOT(handleOutOfSyncWarningClicks())); connect(ui->labelTransactionsStatus, SIGNAL(clicked()), this, SLOT(handleOutOfSyncWarningClicks())); } void OverviewPage::handleTransactionClicked(const QModelIndex &index) { if (filter) Q_EMIT transactionClicked(filter->mapToSource(index)); } void OverviewPage::handleOutOfSyncWarningClicks() { Q_EMIT outOfSyncWarningClicked(); } OverviewPage::~OverviewPage() { delete ui; } void OverviewPage::setBalance(const Amount balance, const Amount unconfirmedBalance, const Amount immatureBalance, const Amount watchOnlyBalance, const Amount watchUnconfBalance, const Amount watchImmatureBalance) { int unit = walletModel->getOptionsModel()->getDisplayUnit(); currentBalance = balance; currentUnconfirmedBalance = unconfirmedBalance; currentImmatureBalance = immatureBalance; currentWatchOnlyBalance = watchOnlyBalance; currentWatchUnconfBalance = watchUnconfBalance; currentWatchImmatureBalance = watchImmatureBalance; ui->labelBalance->setText(BitcoinUnits::formatWithUnit( unit, balance, false, BitcoinUnits::separatorAlways)); ui->labelUnconfirmed->setText(BitcoinUnits::formatWithUnit( unit, unconfirmedBalance, false, BitcoinUnits::separatorAlways)); ui->labelImmature->setText(BitcoinUnits::formatWithUnit( unit, immatureBalance, false, BitcoinUnits::separatorAlways)); ui->labelTotal->setText(BitcoinUnits::formatWithUnit( unit, balance + unconfirmedBalance + immatureBalance, false, BitcoinUnits::separatorAlways)); ui->labelWatchAvailable->setText(BitcoinUnits::formatWithUnit( unit, watchOnlyBalance, false, BitcoinUnits::separatorAlways)); ui->labelWatchPending->setText(BitcoinUnits::formatWithUnit( unit, watchUnconfBalance, false, BitcoinUnits::separatorAlways)); ui->labelWatchImmature->setText(BitcoinUnits::formatWithUnit( unit, watchImmatureBalance, false, BitcoinUnits::separatorAlways)); ui->labelWatchTotal->setText(BitcoinUnits::formatWithUnit( unit, watchOnlyBalance + watchUnconfBalance + watchImmatureBalance, false, BitcoinUnits::separatorAlways)); // only show immature (newly mined) balance if it's non-zero, so as not to // complicate things // for the non-mining users bool showImmature = immatureBalance != Amount::zero(); bool showWatchOnlyImmature = watchImmatureBalance != Amount::zero(); // for symmetry reasons also show immature label when the watch-only one is // shown ui->labelImmature->setVisible(showImmature || showWatchOnlyImmature); ui->labelImmatureText->setVisible(showImmature || showWatchOnlyImmature); ui->labelWatchImmature->setVisible( showWatchOnlyImmature); // show watch-only immature balance } // show/hide watch-only labels void OverviewPage::updateWatchOnlyLabels(bool showWatchOnly) { // show spendable label (only when watch-only is active) ui->labelSpendable->setVisible(showWatchOnly); // show watch-only label ui->labelWatchonly->setVisible(showWatchOnly); // show watch-only balance separator line ui->lineWatchBalance->setVisible(showWatchOnly); // show watch-only available balance ui->labelWatchAvailable->setVisible(showWatchOnly); // show watch-only pending balance ui->labelWatchPending->setVisible(showWatchOnly); // show watch-only total balance ui->labelWatchTotal->setVisible(showWatchOnly); if (!showWatchOnly) { ui->labelWatchImmature->hide(); } } void OverviewPage::setClientModel(ClientModel *model) { this->clientModel = model; if (model) { // Show warning if this is a prerelease version connect(model, SIGNAL(alertsChanged(QString)), this, SLOT(updateAlerts(QString))); updateAlerts(model->getStatusBarWarnings()); } } void OverviewPage::setWalletModel(WalletModel *model) { this->walletModel = model; if (model && model->getOptionsModel()) { // Set up transaction list filter.reset(new TransactionFilterProxy()); filter->setSourceModel(model->getTransactionTableModel()); filter->setLimit(NUM_ITEMS); filter->setDynamicSortFilter(true); filter->setSortRole(Qt::EditRole); filter->setShowInactive(false); filter->sort(TransactionTableModel::Date, Qt::DescendingOrder); ui->listTransactions->setModel(filter.get()); ui->listTransactions->setModelColumn(TransactionTableModel::ToAddress); // Keep up to date with wallet setBalance(model->getBalance(), model->getUnconfirmedBalance(), model->getImmatureBalance(), model->getWatchBalance(), model->getWatchUnconfirmedBalance(), model->getWatchImmatureBalance()); connect(model, SIGNAL(balanceChanged(Amount, Amount, Amount, Amount, Amount, Amount)), this, SLOT(setBalance(Amount, Amount, Amount, Amount, Amount, Amount))); connect(model->getOptionsModel(), SIGNAL(displayUnitChanged(int)), this, SLOT(updateDisplayUnit())); updateWatchOnlyLabels(model->haveWatchOnly()); connect(model, SIGNAL(notifyWatchonlyChanged(bool)), this, SLOT(updateWatchOnlyLabels(bool))); } // update the display unit, to not use the default ("BCH") updateDisplayUnit(); } void OverviewPage::updateDisplayUnit() { if (walletModel && walletModel->getOptionsModel()) { if (currentBalance != -SATOSHI) { setBalance(currentBalance, currentUnconfirmedBalance, currentImmatureBalance, currentWatchOnlyBalance, currentWatchUnconfBalance, currentWatchImmatureBalance); } // Update txdelegate->unit with the current unit txdelegate->unit = walletModel->getOptionsModel()->getDisplayUnit(); ui->listTransactions->update(); } } void OverviewPage::updateAlerts(const QString &warnings) { this->ui->labelAlerts->setVisible(!warnings.isEmpty()); this->ui->labelAlerts->setText(warnings); } void OverviewPage::showOutOfSyncWarning(bool fShow) { ui->labelWalletStatus->setVisible(fShow); ui->labelTransactionsStatus->setVisible(fShow); } diff --git a/src/qt/paymentrequestplus.cpp b/src/qt/paymentrequestplus.cpp index e88e793a77..b9f8c5836b 100644 --- a/src/qt/paymentrequestplus.cpp +++ b/src/qt/paymentrequestplus.cpp @@ -1,235 +1,235 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. // // Wraps dumb protocol buffer paymentRequest with some extra methods // #include "paymentrequestplus.h" #include "util.h" #include #include #include #include #include class SSLVerifyError : public std::runtime_error { public: - SSLVerifyError(std::string err) : std::runtime_error(err) {} + explicit SSLVerifyError(std::string err) : std::runtime_error(err) {} }; bool PaymentRequestPlus::parse(const QByteArray &data) { bool parseOK = paymentRequest.ParseFromArray(data.data(), data.size()); if (!parseOK) { qWarning() << "PaymentRequestPlus::parse: Error parsing payment request"; return false; } if (paymentRequest.payment_details_version() > 1) { qWarning() << "PaymentRequestPlus::parse: Received up-version payment " "details, version=" << paymentRequest.payment_details_version(); return false; } parseOK = details.ParseFromString(paymentRequest.serialized_payment_details()); if (!parseOK) { qWarning() << "PaymentRequestPlus::parse: Error parsing payment details"; paymentRequest.Clear(); return false; } return true; } bool PaymentRequestPlus::SerializeToString(std::string *output) const { return paymentRequest.SerializeToString(output); } bool PaymentRequestPlus::IsInitialized() const { return paymentRequest.IsInitialized(); } bool PaymentRequestPlus::getMerchant(X509_STORE *certStore, QString &merchant) const { merchant.clear(); if (!IsInitialized()) return false; // One day we'll support more PKI types, but just x509 for now: const EVP_MD *digestAlgorithm = nullptr; if (paymentRequest.pki_type() == "x509+sha256") { digestAlgorithm = EVP_sha256(); } else if (paymentRequest.pki_type() == "x509+sha1") { digestAlgorithm = EVP_sha1(); } else if (paymentRequest.pki_type() == "none") { qWarning() << "PaymentRequestPlus::getMerchant: Payment request: " "pki_type == none"; return false; } else { qWarning() << "PaymentRequestPlus::getMerchant: Payment request: " "unknown pki_type " << QString::fromStdString(paymentRequest.pki_type()); return false; } payments::X509Certificates certChain; if (!certChain.ParseFromString(paymentRequest.pki_data())) { qWarning() << "PaymentRequestPlus::getMerchant: Payment request: error " "parsing pki_data"; return false; } std::vector certs; const QDateTime currentTime = QDateTime::currentDateTime(); for (int i = 0; i < certChain.certificate_size(); i++) { QByteArray certData(certChain.certificate(i).data(), certChain.certificate(i).size()); QSslCertificate qCert(certData, QSsl::Der); if (currentTime < qCert.effectiveDate() || currentTime > qCert.expiryDate()) { qWarning() << "PaymentRequestPlus::getMerchant: Payment request: " "certificate expired or not yet active: " << qCert; return false; } if (qCert.isBlacklisted()) { qWarning() << "PaymentRequestPlus::getMerchant: Payment request: " "certificate blacklisted: " << qCert; return false; } const uint8_t *data = (const uint8_t *)certChain.certificate(i).data(); X509 *cert = d2i_X509(nullptr, &data, certChain.certificate(i).size()); if (cert) certs.push_back(cert); } if (certs.empty()) { qWarning() << "PaymentRequestPlus::getMerchant: Payment request: empty " "certificate chain"; return false; } // The first cert is the signing cert, the rest are untrusted certs that // chain to a valid root authority. OpenSSL needs them separately. STACK_OF(X509) *chain = sk_X509_new_null(); for (int i = certs.size() - 1; i > 0; i--) { sk_X509_push(chain, certs[i]); } X509 *signing_cert = certs[0]; // Now create a "store context", which is a single use object for checking, // load the signing cert into it and verify. X509_STORE_CTX *store_ctx = X509_STORE_CTX_new(); if (!store_ctx) { qWarning() << "PaymentRequestPlus::getMerchant: Payment request: error " "creating X509_STORE_CTX"; return false; } char *website = nullptr; bool fResult = true; try { if (!X509_STORE_CTX_init(store_ctx, certStore, signing_cert, chain)) { int error = X509_STORE_CTX_get_error(store_ctx); throw SSLVerifyError(X509_verify_cert_error_string(error)); } // Now do the verification! int result = X509_verify_cert(store_ctx); if (result != 1) { int error = X509_STORE_CTX_get_error(store_ctx); // For testing payment requests, we allow self signed root certs! // This option is just shown in the UI options, if -help-debug is // enabled. if (!(error == X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT && gArgs.GetBoolArg("-allowselfsignedrootcertificates", DEFAULT_SELFSIGNED_ROOTCERTS))) { throw SSLVerifyError(X509_verify_cert_error_string(error)); } else { qDebug() << "PaymentRequestPlus::getMerchant: Allowing self " "signed root certificate, because " "-allowselfsignedrootcertificates is true."; } } X509_NAME *certname = X509_get_subject_name(signing_cert); // Valid cert; check signature: // Copy payments::PaymentRequest rcopy(paymentRequest); rcopy.set_signature(std::string("")); // Everything but the signature std::string data_to_verify; rcopy.SerializeToString(&data_to_verify); #if HAVE_DECL_EVP_MD_CTX_NEW EVP_MD_CTX *ctx = EVP_MD_CTX_new(); if (!ctx) throw SSLVerifyError("Error allocating OpenSSL context."); #else EVP_MD_CTX _ctx; EVP_MD_CTX *ctx; ctx = &_ctx; #endif EVP_PKEY *pubkey = X509_get_pubkey(signing_cert); EVP_MD_CTX_init(ctx); if (!EVP_VerifyInit_ex(ctx, digestAlgorithm, nullptr) || !EVP_VerifyUpdate(ctx, data_to_verify.data(), data_to_verify.size()) || !EVP_VerifyFinal( ctx, (const uint8_t *)paymentRequest.signature().data(), (unsigned int)paymentRequest.signature().size(), pubkey)) { throw SSLVerifyError("Bad signature, invalid payment request."); } #if HAVE_DECL_EVP_MD_CTX_NEW EVP_MD_CTX_free(ctx); #endif // OpenSSL API for getting human printable strings from certs is // baroque. int textlen = X509_NAME_get_text_by_NID(certname, NID_commonName, nullptr, 0); website = new char[textlen + 1]; if (X509_NAME_get_text_by_NID(certname, NID_commonName, website, textlen + 1) == textlen && textlen > 0) { merchant = website; } else { throw SSLVerifyError("Bad certificate, missing common name."); } // TODO: detect EV certificates and set merchant = business name instead // of unfriendly NID_commonName ? } catch (const SSLVerifyError &err) { fResult = false; qWarning() << "PaymentRequestPlus::getMerchant: SSL error: " << err.what(); } if (website) { delete[] website; } X509_STORE_CTX_free(store_ctx); for (size_t i = 0; i < certs.size(); i++) { X509_free(certs[i]); } return fResult; } QList> PaymentRequestPlus::getPayTo() const { QList> result; for (int i = 0; i < details.outputs_size(); i++) { const uint8_t *scriptStr = (const uint8_t *)details.outputs(i).script().data(); CScript s(scriptStr, scriptStr + details.outputs(i).script().size()); result.append( std::make_pair(s, int64_t(details.outputs(i).amount()) * SATOSHI)); } return result; } diff --git a/src/qt/paymentserver.h b/src/qt/paymentserver.h index e4030b5b05..3050ad5fa0 100644 --- a/src/qt/paymentserver.h +++ b/src/qt/paymentserver.h @@ -1,149 +1,149 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_QT_PAYMENTSERVER_H #define BITCOIN_QT_PAYMENTSERVER_H // This class handles payment requests from clicking on // bitcoincash: URIs // // This is somewhat tricky, because we have to deal with the situation where the // user clicks on a link during startup/initialization, when the splash-screen // is up but the main window (and the Send Coins tab) is not. // // So, the strategy is: // // Create the server, and register the event handler, when the application is // created. Save any URIs received at or during startup in a list. // // When startup is finished and the main window is shown, a signal is sent to // slot uiReady(), which emits a receivedURI() signal for any payment requests // that happened during startup. // // After startup, receivedURI() happens as usual. // // This class has one more feature: a static method that finds URIs passed in // the command line and, if a server is running in another process, sends them // to the server. // #include "paymentrequestplus.h" #include "walletmodel.h" #include #include class OptionsModel; class CWallet; QT_BEGIN_NAMESPACE class QApplication; class QByteArray; class QLocalServer; class QNetworkAccessManager; class QNetworkReply; class QSslError; class QUrl; QT_END_NAMESPACE // BIP70 max payment request size in bytes (DoS protection) static const qint64 BIP70_MAX_PAYMENTREQUEST_SIZE = 50000; class PaymentServer : public QObject { Q_OBJECT public: // Parse URIs on command line // Returns false on error static void ipcParseCommandLine(int argc, char *argv[]); // Returns true if there were URIs on the command line which were // successfully sent to an already-running process. // Note: if a payment request is given, SelectParams(MAIN/TESTNET) will be // called so we startup in the right mode. static bool ipcSendCommandLine(); // parent should be QApplication object - PaymentServer(QObject *parent, bool startLocalServer = true); + explicit PaymentServer(QObject *parent, bool startLocalServer = true); ~PaymentServer(); // Load root certificate authorities. Pass nullptr (default) to read from // the file specified in the -rootcertificates setting, or, if that's not // set, to use the system default root certificates. If you pass in a store, // you should not X509_STORE_free it: it will be freed either at exit or // when another set of CAs are loaded. static void LoadRootCAs(X509_STORE *store = nullptr); // Return certificate store static X509_STORE *getCertStore(); // OptionsModel is used for getting proxy settings and display unit void setOptionsModel(OptionsModel *optionsModel); // Verify that the payment request network matches the client network static bool verifyNetwork(const payments::PaymentDetails &requestDetails); // Verify if the payment request is expired static bool verifyExpired(const payments::PaymentDetails &requestDetails); // Verify the payment request size is valid as per BIP70 static bool verifySize(qint64 requestSize); // Verify the payment request amount is valid static bool verifyAmount(const Amount requestAmount); Q_SIGNALS: // Fired when a valid payment request is received void receivedPaymentRequest(SendCoinsRecipient); // Fired when a valid PaymentACK is received void receivedPaymentACK(const QString &paymentACKMsg); // Fired when a message should be reported to the user void message(const QString &title, const QString &message, unsigned int style); public Q_SLOTS: // Signal this when the main window's UI is ready to display payment // requests to the user void uiReady(); // Submit Payment message to a merchant, get back PaymentACK: void fetchPaymentACK(CWallet *wallet, SendCoinsRecipient recipient, QByteArray transaction); // Handle an incoming URI, URI with local file scheme or file void handleURIOrFile(const QString &s); private Q_SLOTS: void handleURIConnection(); void netRequestFinished(QNetworkReply *); void reportSslErrors(QNetworkReply *, const QList &); void handlePaymentACK(const QString &paymentACKMsg); protected: // Constructor registers this on the parent QApplication to receive // QEvent::FileOpen and QEvent:Drop events bool eventFilter(QObject *object, QEvent *event) override; private: static bool readPaymentRequestFromFile(const QString &filename, PaymentRequestPlus &request); bool handleURI(const QString &scheme, const QString &s); bool processPaymentRequest(const PaymentRequestPlus &request, SendCoinsRecipient &recipient); void fetchRequest(const QUrl &url); // Setup networking void initNetManager(); // true during startup bool saveURIs; QLocalServer *uriServer; // Used to fetch payment requests QNetworkAccessManager *netManager; OptionsModel *optionsModel; }; #endif // BITCOIN_QT_PAYMENTSERVER_H diff --git a/src/qt/utilitydialog.h b/src/qt/utilitydialog.h index acb31197b5..f1613789f7 100644 --- a/src/qt/utilitydialog.h +++ b/src/qt/utilitydialog.h @@ -1,49 +1,49 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_QT_UTILITYDIALOG_H #define BITCOIN_QT_UTILITYDIALOG_H #include #include class BitcoinGUI; class ClientModel; namespace Ui { class HelpMessageDialog; } /** "Help message" dialog box */ class HelpMessageDialog : public QDialog { Q_OBJECT public: explicit HelpMessageDialog(QWidget *parent, bool about); ~HelpMessageDialog(); void printToConsole(); void showOrPrint(); private: Ui::HelpMessageDialog *ui; QString text; private Q_SLOTS: void on_okButton_accepted(); }; /** "Shutdown" window */ class ShutdownWindow : public QWidget { Q_OBJECT public: - ShutdownWindow(QWidget *parent = 0, Qt::WindowFlags f = 0); + explicit ShutdownWindow(QWidget *parent = nullptr, Qt::WindowFlags f = 0); static QWidget *showShutdownWindow(BitcoinGUI *window); protected: void closeEvent(QCloseEvent *event) override; }; #endif // BITCOIN_QT_UTILITYDIALOG_H diff --git a/src/rest.cpp b/src/rest.cpp index a619eb8100..011676e4c2 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -1,698 +1,699 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "chain.h" #include "chainparams.h" #include "config.h" #include "httpserver.h" #include "primitives/block.h" #include "primitives/transaction.h" #include "rpc/blockchain.h" #include "rpc/server.h" #include "rpc/tojson.h" #include "streams.h" #include "sync.h" #include "txmempool.h" #include "utilstrencodings.h" #include "validation.h" #include "version.h" #include #include // Allow a max of 15 outpoints to be queried at once. static const size_t MAX_GETUTXOS_OUTPOINTS = 15; enum RetFormat { RF_UNDEF, RF_BINARY, RF_HEX, RF_JSON, }; static const struct { enum RetFormat rf; const char *name; } rf_names[] = { {RF_UNDEF, ""}, {RF_BINARY, "bin"}, {RF_HEX, "hex"}, {RF_JSON, "json"}, }; struct CCoin { uint32_t nHeight; CTxOut out; CCoin() : nHeight(0) {} - CCoin(Coin in) : nHeight(in.GetHeight()), out(std::move(in.GetTxOut())) {} + explicit CCoin(Coin in) + : nHeight(in.GetHeight()), out(std::move(in.GetTxOut())) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { uint32_t nTxVerDummy = 0; READWRITE(nTxVerDummy); READWRITE(nHeight); READWRITE(out); } }; extern UniValue mempoolInfoToJSON(); extern UniValue mempoolToJSON(bool fVerbose = false); static bool RESTERR(HTTPRequest *req, enum HTTPStatusCode status, std::string message) { req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(status, message + "\r\n"); return false; } static enum RetFormat ParseDataFormat(std::string ¶m, const std::string &strReq) { const std::string::size_type pos = strReq.rfind('.'); if (pos == std::string::npos) { param = strReq; return rf_names[0].rf; } param = strReq.substr(0, pos); const std::string suff(strReq, pos + 1); for (size_t i = 0; i < ARRAYLEN(rf_names); i++) { if (suff == rf_names[i].name) { return rf_names[i].rf; } } /* If no suffix is found, return original string. */ param = strReq; return rf_names[0].rf; } static std::string AvailableDataFormatsString() { std::string formats = ""; for (size_t i = 0; i < ARRAYLEN(rf_names); i++) { if (strlen(rf_names[i].name) > 0) { formats.append("."); formats.append(rf_names[i].name); formats.append(", "); } } if (formats.length() > 0) { return formats.substr(0, formats.length() - 2); } return formats; } static bool ParseHashStr(const std::string &strReq, uint256 &v) { if (!IsHex(strReq) || (strReq.size() != 64)) { return false; } v.SetHex(strReq); return true; } static bool CheckWarmup(HTTPRequest *req) { std::string statusmessage; if (RPCIsInWarmup(&statusmessage)) { return RESTERR(req, HTTP_SERVICE_UNAVAILABLE, "Service temporarily unavailable: " + statusmessage); } return true; } static bool rest_headers(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); std::vector path; boost::split(path, param, boost::is_any_of("/")); if (path.size() != 2) { return RESTERR(req, HTTP_BAD_REQUEST, "No header count specified. Use " "/rest/headers//" ".."); } long count = strtol(path[0].c_str(), nullptr, 10); if (count < 1 || count > 2000) { return RESTERR(req, HTTP_BAD_REQUEST, "Header count out of range: " + path[0]); } std::string hashStr = path[1]; uint256 hash; if (!ParseHashStr(hashStr, hash)) { return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr); } std::vector headers; headers.reserve(count); { LOCK(cs_main); BlockMap::const_iterator it = mapBlockIndex.find(hash); const CBlockIndex *pindex = (it != mapBlockIndex.end()) ? it->second : nullptr; while (pindex != nullptr && chainActive.Contains(pindex)) { headers.push_back(pindex); if (headers.size() == size_t(count)) { break; } pindex = chainActive.Next(pindex); } } CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION); for (const CBlockIndex *pindex : headers) { ssHeader << pindex->GetBlockHeader(); } switch (rf) { case RF_BINARY: { std::string binaryHeader = ssHeader.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryHeader); return true; } case RF_HEX: { std::string strHex = HexStr(ssHeader.begin(), ssHeader.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } case RF_JSON: { UniValue jsonHeaders(UniValue::VARR); for (const CBlockIndex *pindex : headers) { jsonHeaders.push_back(blockheaderToJSON(pindex)); } std::string strJSON = jsonHeaders.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: .bin, .hex)"); } } // not reached // continue to process further HTTP reqs on this cxn return true; } static bool rest_block(const Config &config, HTTPRequest *req, const std::string &strURIPart, bool showTxDetails) { if (!CheckWarmup(req)) { return false; } std::string hashStr; const RetFormat rf = ParseDataFormat(hashStr, strURIPart); uint256 hash; if (!ParseHashStr(hashStr, hash)) { return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr); } CBlock block; CBlockIndex *pblockindex = nullptr; { LOCK(cs_main); if (mapBlockIndex.count(hash) == 0) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); } pblockindex = mapBlockIndex[hash]; if (fHavePruned && !pblockindex->nStatus.hasData() && pblockindex->nTx > 0) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not available (pruned data)"); } if (!ReadBlockFromDisk(block, pblockindex, config)) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); } } CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ssBlock << block; switch (rf) { case RF_BINARY: { std::string binaryBlock = ssBlock.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryBlock); return true; } case RF_HEX: { std::string strHex = HexStr(ssBlock.begin(), ssBlock.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } case RF_JSON: { UniValue objBlock = blockToJSON(config, block, pblockindex, showTxDetails); std::string strJSON = objBlock.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); } } // not reached // continue to process further HTTP reqs on this cxn return true; } static bool rest_block_extended(Config &config, HTTPRequest *req, const std::string &strURIPart) { return rest_block(config, req, strURIPart, true); } static bool rest_block_notxdetails(Config &config, HTTPRequest *req, const std::string &strURIPart) { return rest_block(config, req, strURIPart, false); } static bool rest_chaininfo(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { case RF_JSON: { JSONRPCRequest jsonRequest; jsonRequest.params = UniValue(UniValue::VARR); UniValue chainInfoObject = getblockchaininfo(config, jsonRequest); std::string strJSON = chainInfoObject.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: json)"); } } // not reached // continue to process further HTTP reqs on this cxn return true; } static bool rest_mempool_info(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { case RF_JSON: { UniValue mempoolInfoObject = mempoolInfoToJSON(); std::string strJSON = mempoolInfoObject.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: json)"); } } // not reached // continue to process further HTTP reqs on this cxn return true; } static bool rest_mempool_contents(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { case RF_JSON: { UniValue mempoolObject = mempoolToJSON(true); std::string strJSON = mempoolObject.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: json)"); } } // not reached // continue to process further HTTP reqs on this cxn return true; } static bool rest_tx(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string hashStr; const RetFormat rf = ParseDataFormat(hashStr, strURIPart); uint256 hash; if (!ParseHashStr(hashStr, hash)) { return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr); } const TxId txid(hash); CTransactionRef tx; uint256 hashBlock = uint256(); if (!GetTransaction(config, txid, tx, hashBlock, true)) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); } CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ssTx << tx; switch (rf) { case RF_BINARY: { std::string binaryTx = ssTx.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryTx); return true; } case RF_HEX: { std::string strHex = HexStr(ssTx.begin(), ssTx.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } case RF_JSON: { UniValue objTx(UniValue::VOBJ); TxToJSON(config, *tx, hashBlock, objTx); std::string strJSON = objTx.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); } } // not reached // continue to process further HTTP reqs on this cxn return true; } static bool rest_getutxos(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); std::vector uriParts; if (param.length() > 1) { std::string strUriParams = param.substr(1); boost::split(uriParts, strUriParams, boost::is_any_of("/")); } // throw exception in case of a empty request std::string strRequestMutable = req->ReadBody(); if (strRequestMutable.length() == 0 && uriParts.size() == 0) { return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request"); } bool fInputParsed = false; bool fCheckMemPool = false; std::vector vOutPoints; // parse/deserialize input // input-format = output-format, rest/getutxos/bin requires binary input, // gives binary output, ... if (uriParts.size() > 0) { // inputs is sent over URI scheme // (/rest/getutxos/checkmempool/txid1-n/txid2-n/...) if (uriParts.size() > 0 && uriParts[0] == "checkmempool") { fCheckMemPool = true; } for (size_t i = (fCheckMemPool) ? 1 : 0; i < uriParts.size(); i++) { uint256 txid; int32_t nOutput; std::string strTxid = uriParts[i].substr(0, uriParts[i].find("-")); std::string strOutput = uriParts[i].substr(uriParts[i].find("-") + 1); if (!ParseInt32(strOutput, &nOutput) || !IsHex(strTxid)) { return RESTERR(req, HTTP_BAD_REQUEST, "Parse error"); } txid.SetHex(strTxid); vOutPoints.push_back(COutPoint(txid, (uint32_t)nOutput)); } if (vOutPoints.size() > 0) { fInputParsed = true; } else { return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request"); } } switch (rf) { case RF_HEX: { // convert hex to bin, continue then with bin part std::vector strRequestV = ParseHex(strRequestMutable); strRequestMutable.assign(strRequestV.begin(), strRequestV.end()); } // FALLTHROUGH case RF_BINARY: { try { // deserialize only if user sent a request if (strRequestMutable.size() > 0) { // don't allow sending input over URI and HTTP RAW DATA if (fInputParsed) { return RESTERR(req, HTTP_BAD_REQUEST, "Combination of URI scheme inputs and " "raw post data is not allowed"); } CDataStream oss(SER_NETWORK, PROTOCOL_VERSION); oss << strRequestMutable; oss >> fCheckMemPool; oss >> vOutPoints; } } catch (const std::ios_base::failure &e) { // abort in case of unreadable binary data return RESTERR(req, HTTP_BAD_REQUEST, "Parse error"); } break; } case RF_JSON: { if (!fInputParsed) { return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request"); } break; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); } } // limit max outpoints if (vOutPoints.size() > MAX_GETUTXOS_OUTPOINTS) { return RESTERR( req, HTTP_BAD_REQUEST, strprintf("Error: max outpoints exceeded (max: %d, tried: %d)", MAX_GETUTXOS_OUTPOINTS, vOutPoints.size())); } // check spentness and form a bitmap (as well as a JSON capable // human-readable string representation) std::vector bitmap; std::vector outs; std::string bitmapStringRepresentation; std::vector hits; bitmap.resize((vOutPoints.size() + 7) / 8); { LOCK2(cs_main, g_mempool.cs); CCoinsView viewDummy; CCoinsViewCache view(&viewDummy); CCoinsViewCache &viewChain = *pcoinsTip; CCoinsViewMemPool viewMempool(&viewChain, g_mempool); if (fCheckMemPool) { // switch cache backend to db+mempool in case user likes to query // mempool. view.SetBackend(viewMempool); } for (size_t i = 0; i < vOutPoints.size(); i++) { Coin coin; bool hit = false; if (view.GetCoin(vOutPoints[i], coin) && !g_mempool.isSpent(vOutPoints[i])) { hit = true; outs.emplace_back(std::move(coin)); } hits.push_back(hit); // form a binary string representation (human-readable for json // output) bitmapStringRepresentation.append(hit ? "1" : "0"); bitmap[i / 8] |= ((uint8_t)hit) << (i % 8); } } switch (rf) { case RF_BINARY: { // serialize data // use exact same output as mentioned in Bip64 CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION); ssGetUTXOResponse << chainActive.Height() << chainActive.Tip()->GetBlockHash() << bitmap << outs; std::string ssGetUTXOResponseString = ssGetUTXOResponse.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, ssGetUTXOResponseString); return true; } case RF_HEX: { CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION); ssGetUTXOResponse << chainActive.Height() << chainActive.Tip()->GetBlockHash() << bitmap << outs; std::string strHex = HexStr(ssGetUTXOResponse.begin(), ssGetUTXOResponse.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } case RF_JSON: { UniValue objGetUTXOResponse(UniValue::VOBJ); // pack in some essentials // use more or less the same output as mentioned in Bip64 objGetUTXOResponse.pushKV("chainHeight", chainActive.Height()); objGetUTXOResponse.pushKV( "chaintipHash", chainActive.Tip()->GetBlockHash().GetHex()); objGetUTXOResponse.pushKV("bitmap", bitmapStringRepresentation); UniValue utxos(UniValue::VARR); for (const CCoin &coin : outs) { UniValue utxo(UniValue::VOBJ); utxo.pushKV("height", int32_t(coin.nHeight)); utxo.pushKV("value", ValueFromAmount(coin.out.nValue)); // include the script in a json output UniValue o(UniValue::VOBJ); ScriptPubKeyToJSON(config, coin.out.scriptPubKey, o, true); utxo.pushKV("scriptPubKey", o); utxos.push_back(utxo); } objGetUTXOResponse.pushKV("utxos", utxos); // return json string std::string strJSON = objGetUTXOResponse.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); } } // not reached // continue to process further HTTP reqs on this cxn return true; } static const struct { const char *prefix; bool (*handler)(Config &config, HTTPRequest *req, const std::string &strReq); } uri_prefixes[] = { {"/rest/tx/", rest_tx}, {"/rest/block/notxdetails/", rest_block_notxdetails}, {"/rest/block/", rest_block_extended}, {"/rest/chaininfo", rest_chaininfo}, {"/rest/mempool/info", rest_mempool_info}, {"/rest/mempool/contents", rest_mempool_contents}, {"/rest/headers/", rest_headers}, {"/rest/getutxos", rest_getutxos}, }; bool StartREST() { for (size_t i = 0; i < ARRAYLEN(uri_prefixes); i++) { RegisterHTTPHandler(uri_prefixes[i].prefix, false, uri_prefixes[i].handler); } return true; } void InterruptREST() {} void StopREST() { for (size_t i = 0; i < ARRAYLEN(uri_prefixes); i++) { UnregisterHTTPHandler(uri_prefixes[i].prefix, false); } } diff --git a/src/reverse_iterator.h b/src/reverse_iterator.h index 556f00bfca..3341cceaf7 100644 --- a/src/reverse_iterator.h +++ b/src/reverse_iterator.h @@ -1,29 +1,29 @@ // Taken from https://gist.github.com/arvidsson/7231973 #ifndef BITCOIN_REVERSE_ITERATOR_HPP #define BITCOIN_REVERSE_ITERATOR_HPP /** * Template used for reverse iteration in C++11 range-based for loops. * * std::vector v = {1, 2, 3, 4, 5}; * for (auto x : reverse_iterate(v)) * std::cout << x << " "; */ template class reverse_range { T &x; public: - reverse_range(T &xin) : x(xin) {} + explicit reverse_range(T &xin) : x(xin) {} auto begin() const -> decltype(this->x.rbegin()) { return x.rbegin(); } auto end() const -> decltype(this->x.rend()) { return x.rend(); } }; template reverse_range reverse_iterate(T &x) { return reverse_range(x); } #endif // BITCOIN_REVERSE_ITERATOR_HPP diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index 26d4a1e32f..9d7eeef925 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -1,834 +1,834 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "rpc/mining.h" #include "amount.h" #include "blockvalidity.h" #include "chain.h" #include "chainparams.h" #include "config.h" #include "consensus/consensus.h" #include "consensus/params.h" #include "consensus/validation.h" #include "core_io.h" #include "dstencode.h" #include "init.h" #include "miner.h" #include "net.h" #include "policy/policy.h" #include "pow.h" #include "rpc/blockchain.h" #include "rpc/server.h" #include "txmempool.h" #include "util.h" #include "utilstrencodings.h" #include "validation.h" #include "validationinterface.h" #include "warnings.h" #include #include #include /** * Return average network hashes per second based on the last 'lookup' blocks, * or from the last difficulty change if 'lookup' is nonpositive. If 'height' is * nonnegative, compute the estimate at the time when a given block was found. */ static UniValue GetNetworkHashPS(int lookup, int height) { CBlockIndex *pb = chainActive.Tip(); if (height >= 0 && height < chainActive.Height()) { pb = chainActive[height]; } if (pb == nullptr || !pb->nHeight) { return 0; } // If lookup is -1, then use blocks since last difficulty change. if (lookup <= 0) { lookup = pb->nHeight % Params().GetConsensus().DifficultyAdjustmentInterval() + 1; } // If lookup is larger than chain, then set it to chain length. if (lookup > pb->nHeight) { lookup = pb->nHeight; } CBlockIndex *pb0 = pb; int64_t minTime = pb0->GetBlockTime(); int64_t maxTime = minTime; for (int i = 0; i < lookup; i++) { pb0 = pb0->pprev; int64_t time = pb0->GetBlockTime(); minTime = std::min(time, minTime); maxTime = std::max(time, maxTime); } // In case there's a situation where minTime == maxTime, we don't want a // divide by zero exception. if (minTime == maxTime) { return 0; } arith_uint256 workDiff = pb->nChainWork - pb0->nChainWork; int64_t timeDiff = maxTime - minTime; return workDiff.getdouble() / timeDiff; } static UniValue getnetworkhashps(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 2) { throw std::runtime_error( "getnetworkhashps ( nblocks height )\n" "\nReturns the estimated network hashes per second based on the " "last n blocks.\n" "Pass in [blocks] to override # of blocks, -1 specifies since last " "difficulty change.\n" "Pass in [height] to estimate the network speed at the time when a " "certain block was found.\n" "\nArguments:\n" "1. nblocks (numeric, optional, default=120) The number of " "blocks, or -1 for blocks since last difficulty change.\n" "2. height (numeric, optional, default=-1) To estimate at the " "time of the given height.\n" "\nResult:\n" "x (numeric) Hashes per second estimated\n" "\nExamples:\n" + HelpExampleCli("getnetworkhashps", "") + HelpExampleRpc("getnetworkhashps", "")); } LOCK(cs_main); return GetNetworkHashPS( request.params.size() > 0 ? request.params[0].get_int() : 120, request.params.size() > 1 ? request.params[1].get_int() : -1); } UniValue generateBlocks(const Config &config, std::shared_ptr coinbaseScript, int nGenerate, uint64_t nMaxTries, bool keepScript) { static const int nInnerLoopCount = 0x100000; int nHeightStart = 0; int nHeightEnd = 0; int nHeight = 0; { // Don't keep cs_main locked. LOCK(cs_main); nHeightStart = chainActive.Height(); nHeight = nHeightStart; nHeightEnd = nHeightStart + nGenerate; } unsigned int nExtraNonce = 0; UniValue blockHashes(UniValue::VARR); while (nHeight < nHeightEnd) { std::unique_ptr pblocktemplate( BlockAssembler(config, g_mempool) .CreateNewBlock(coinbaseScript->reserveScript)); if (!pblocktemplate.get()) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block"); } CBlock *pblock = &pblocktemplate->block; { LOCK(cs_main); IncrementExtraNonce(config, pblock, chainActive.Tip(), nExtraNonce); } while (nMaxTries > 0 && pblock->nNonce < nInnerLoopCount && !CheckProofOfWork(pblock->GetHash(), pblock->nBits, config)) { ++pblock->nNonce; --nMaxTries; } if (nMaxTries == 0) { break; } if (pblock->nNonce == nInnerLoopCount) { continue; } std::shared_ptr shared_pblock = std::make_shared(*pblock); if (!ProcessNewBlock(config, shared_pblock, true, nullptr)) { throw JSONRPCError(RPC_INTERNAL_ERROR, "ProcessNewBlock, block not accepted"); } ++nHeight; blockHashes.push_back(pblock->GetHash().GetHex()); // Mark script as important because it was used at least for one // coinbase output if the script came from the wallet. if (keepScript) { coinbaseScript->KeepScript(); } } return blockHashes; } static UniValue generatetoaddress(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 2 || request.params.size() > 3) { throw std::runtime_error( "generatetoaddress nblocks address (maxtries)\n" "\nMine blocks immediately to a specified address (before the RPC " "call returns)\n" "\nArguments:\n" "1. nblocks (numeric, required) How many blocks are generated " "immediately.\n" "2. address (string, required) The address to send the newly " "generated bitcoin to.\n" "3. maxtries (numeric, optional) How many iterations to try " "(default = 1000000).\n" "\nResult:\n" "[ blockhashes ] (array) hashes of blocks generated\n" "\nExamples:\n" "\nGenerate 11 blocks to myaddress\n" + HelpExampleCli("generatetoaddress", "11 \"myaddress\"")); } int nGenerate = request.params[0].get_int(); uint64_t nMaxTries = 1000000; if (request.params.size() > 2) { nMaxTries = request.params[2].get_int(); } CTxDestination destination = DecodeDestination(request.params[1].get_str(), config.GetChainParams()); if (!IsValidDestination(destination)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Error: Invalid address"); } std::shared_ptr coinbaseScript = std::make_shared(); coinbaseScript->reserveScript = GetScriptForDestination(destination); return generateBlocks(config, coinbaseScript, nGenerate, nMaxTries, false); } static UniValue getmininginfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getmininginfo\n" "\nReturns a json object containing mining-related information." "\nResult:\n" "{\n" " \"blocks\": nnn, (numeric) The current block\n" " \"currentblocksize\": nnn, (numeric) The last block size\n" " \"currentblocktx\": nnn, (numeric) The last block " "transaction\n" " \"difficulty\": xxx.xxxxx (numeric) The current difficulty\n" " \"errors\": \"...\" (string) Current errors\n" " \"networkhashps\": nnn, (numeric) The network hashes per " "second\n" " \"pooledtx\": n (numeric) The size of the mempool\n" " \"chain\": \"xxxx\", (string) current network name as " "defined in BIP70 (main, test, regtest)\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmininginfo", "") + HelpExampleRpc("getmininginfo", "")); } LOCK(cs_main); UniValue obj(UniValue::VOBJ); obj.pushKV("blocks", int(chainActive.Height())); obj.pushKV("currentblocksize", uint64_t(nLastBlockSize)); obj.pushKV("currentblocktx", uint64_t(nLastBlockTx)); obj.pushKV("difficulty", double(GetDifficulty(chainActive.Tip()))); obj.pushKV("blockprioritypercentage", uint8_t(gArgs.GetArg("-blockprioritypercentage", DEFAULT_BLOCK_PRIORITY_PERCENTAGE))); obj.pushKV("errors", GetWarnings("statusbar")); obj.pushKV("networkhashps", getnetworkhashps(config, request)); obj.pushKV("pooledtx", uint64_t(g_mempool.size())); obj.pushKV("chain", config.GetChainParams().NetworkIDString()); return obj; } // NOTE: Unlike wallet RPC (which use BCH values), mining RPCs follow GBT (BIP // 22) in using satoshi amounts static UniValue prioritisetransaction(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 3) { throw std::runtime_error( "prioritisetransaction \n" "Accepts the transaction into mined blocks at a higher (or lower) " "priority\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id.\n" "2. priority_delta (numeric, required) The priority to add or " "subtract.\n" " The transaction selection algorithm considers " "the tx as it would have a higher priority.\n" " (priority of a transaction is calculated: " "coinage * value_in_satoshis / txsize) \n" "3. fee_delta (numeric, required) The fee value (in satoshis) " "to add (or subtract, if negative).\n" " The fee is not actually paid, only the " "algorithm for selecting transactions into a block\n" " considers the transaction as it would have paid " "a higher (or lower) fee.\n" "\nResult:\n" "true (boolean) Returns true\n" "\nExamples:\n" + HelpExampleCli("prioritisetransaction", "\"txid\" 0.0 10000") + HelpExampleRpc("prioritisetransaction", "\"txid\", 0.0, 10000")); } LOCK(cs_main); uint256 hash = ParseHashStr(request.params[0].get_str(), "txid"); Amount nAmount = request.params[2].get_int64() * SATOSHI; g_mempool.PrioritiseTransaction(hash, request.params[0].get_str(), request.params[1].get_real(), nAmount); return true; } // NOTE: Assumes a conclusive result; if result is inconclusive, it must be // handled by caller static UniValue BIP22ValidationResult(const Config &config, const CValidationState &state) { if (state.IsValid()) { return NullUniValue; } std::string strRejectReason = state.GetRejectReason(); if (state.IsError()) { throw JSONRPCError(RPC_VERIFY_ERROR, strRejectReason); } if (state.IsInvalid()) { if (strRejectReason.empty()) { return "rejected"; } return strRejectReason; } // Should be impossible. return "valid?"; } static UniValue getblocktemplate(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 1) { throw std::runtime_error( "getblocktemplate ( TemplateRequest )\n" "\nIf the request parameters include a 'mode' key, that is used to " "explicitly select between the default 'template' request or a " "'proposal'.\n" "It returns data needed to construct a block to work on.\n" "For full specification, see BIPs 22, 23, 9, and 145:\n" " " "https://github.com/bitcoin/bips/blob/master/bip-0022.mediawiki\n" " " "https://github.com/bitcoin/bips/blob/master/bip-0023.mediawiki\n" " " "https://github.com/bitcoin/bips/blob/master/" "bip-0009.mediawiki#getblocktemplate_changes\n" " " "https://github.com/bitcoin/bips/blob/master/bip-0145.mediawiki\n" "\nArguments:\n" "1. template_request (json object, optional) A json object " "in the following spec\n" " {\n" " \"mode\":\"template\" (string, optional) This must be " "set to \"template\", \"proposal\" (see BIP 23), or omitted\n" " \"capabilities\":[ (array, optional) A list of " "strings\n" " \"support\" (string) client side supported " "feature, 'longpoll', 'coinbasetxn', 'coinbasevalue', 'proposal', " "'serverlist', 'workid'\n" " ,...\n" " ]\n" " }\n" "\n" "\nResult:\n" "{\n" " \"version\" : n, (numeric) The preferred " "block version\n" " \"previousblockhash\" : \"xxxx\", (string) The hash of " "current highest block\n" " \"transactions\" : [ (array) contents of " "non-coinbase transactions that should be included in the next " "block\n" " {\n" " \"data\" : \"xxxx\", (string) transaction " "data encoded in hexadecimal (byte-for-byte)\n" " \"txid\" : \"xxxx\", (string) transaction id " "encoded in little-endian hexadecimal\n" " \"hash\" : \"xxxx\", (string) hash encoded " "in little-endian hexadecimal (including witness data)\n" " \"depends\" : [ (array) array of numbers " "\n" " n (numeric) transactions " "before this one (by 1-based index in 'transactions' list) that " "must be present in the final block if this one is\n" " ,...\n" " ],\n" " \"fee\": n, (numeric) difference in " "value between transaction inputs and outputs (in Satoshis); for " "coinbase transactions, this is a negative Number of the total " "collected block fees (ie, not including the block subsidy); if " "key is not present, fee is unknown and clients MUST NOT assume " "there isn't one\n" " \"sigops\" : n, (numeric) total SigOps " "cost, as counted for purposes of block limits; if key is not " "present, sigop cost is unknown and clients MUST NOT assume it is " "zero\n" " \"required\" : true|false (boolean) if provided and " "true, this transaction must be in the final block\n" " }\n" " ,...\n" " ],\n" " \"coinbaseaux\" : { (json object) data that " "should be included in the coinbase's scriptSig content\n" " \"flags\" : \"xx\" (string) key name is to " "be ignored, and value included in scriptSig\n" " },\n" " \"coinbasevalue\" : n, (numeric) maximum allowable " "input to coinbase transaction, including the generation award and " "transaction fees (in Satoshis)\n" " \"coinbasetxn\" : { ... }, (json object) information " "for coinbase transaction\n" " \"target\" : \"xxxx\", (string) The hash target\n" " \"mintime\" : xxx, (numeric) The minimum " "timestamp appropriate for next block time in seconds since epoch " "(Jan 1 1970 GMT)\n" " \"mutable\" : [ (array of string) list of " "ways the block template may be changed \n" " \"value\" (string) A way the block " "template may be changed, e.g. 'time', 'transactions', " "'prevblock'\n" " ,...\n" " ],\n" " \"noncerange\" : \"00000000ffffffff\",(string) A range of valid " "nonces\n" " \"sigoplimit\" : n, (numeric) limit of sigops " "in blocks\n" " \"sizelimit\" : n, (numeric) limit of block " "size\n" " \"curtime\" : ttt, (numeric) current timestamp " "in seconds since epoch (Jan 1 1970 GMT)\n" " \"bits\" : \"xxxxxxxx\", (string) compressed " "target of next block\n" " \"height\" : n (numeric) The height of the " "next block\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblocktemplate", "") + HelpExampleRpc("getblocktemplate", "")); } LOCK(cs_main); std::string strMode = "template"; UniValue lpval = NullUniValue; std::set setClientRules; if (request.params.size() > 0) { const UniValue &oparam = request.params[0].get_obj(); const UniValue &modeval = find_value(oparam, "mode"); if (modeval.isStr()) { strMode = modeval.get_str(); } else if (modeval.isNull()) { /* Do nothing */ } else { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); } lpval = find_value(oparam, "longpollid"); if (strMode == "proposal") { const UniValue &dataval = find_value(oparam, "data"); if (!dataval.isStr()) { throw JSONRPCError(RPC_TYPE_ERROR, "Missing data String key for proposal"); } CBlock block; if (!DecodeHexBlk(block, dataval.get_str())) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed"); } uint256 hash = block.GetHash(); BlockMap::iterator mi = mapBlockIndex.find(hash); if (mi != mapBlockIndex.end()) { CBlockIndex *pindex = mi->second; if (pindex->IsValid(BlockValidity::SCRIPTS)) { return "duplicate"; } if (pindex->nStatus.isInvalid()) { return "duplicate-invalid"; } return "duplicate-inconclusive"; } CBlockIndex *const pindexPrev = chainActive.Tip(); // TestBlockValidity only supports blocks built on the current Tip if (block.hashPrevBlock != pindexPrev->GetBlockHash()) { return "inconclusive-not-best-prevblk"; } CValidationState state; BlockValidationOptions validationOptions = BlockValidationOptions(false, true); TestBlockValidity(config, state, block, pindexPrev, validationOptions); return BIP22ValidationResult(config, state); } } if (strMode != "template") { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); } if (!g_connman) { throw JSONRPCError( RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); } if (g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL) == 0) { throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Bitcoin is not connected!"); } if (IsInitialBlockDownload()) { throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Bitcoin is downloading blocks..."); } static unsigned int nTransactionsUpdatedLast; if (!lpval.isNull()) { // Wait to respond until either the best block changes, OR a minute has // passed and there are more transactions uint256 hashWatchedChain; std::chrono::steady_clock::time_point checktxtime; unsigned int nTransactionsUpdatedLastLP; if (lpval.isStr()) { // Format: std::string lpstr = lpval.get_str(); hashWatchedChain.SetHex(lpstr.substr(0, 64)); nTransactionsUpdatedLastLP = atoi64(lpstr.substr(64)); } else { // NOTE: Spec does not specify behaviour for non-string longpollid, // but this makes testing easier hashWatchedChain = chainActive.Tip()->GetBlockHash(); nTransactionsUpdatedLastLP = nTransactionsUpdatedLast; } // Release the wallet and main lock while waiting LEAVE_CRITICAL_SECTION(cs_main); { checktxtime = std::chrono::steady_clock::now() + std::chrono::minutes(1); WAIT_LOCK(g_best_block_mutex, lock); while (g_best_block == hashWatchedChain && IsRPCRunning()) { if (g_best_block_cv.wait_until(lock, checktxtime) == std::cv_status::timeout) { // Timeout: Check transactions for update if (g_mempool.GetTransactionsUpdated() != nTransactionsUpdatedLastLP) { break; } checktxtime += std::chrono::seconds(10); } } } ENTER_CRITICAL_SECTION(cs_main); if (!IsRPCRunning()) { throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Shutting down"); } // TODO: Maybe recheck connections/IBD and (if something wrong) send an // expires-immediately template to stop miners? } // Update block static CBlockIndex *pindexPrev; static int64_t nStart; static std::unique_ptr pblocktemplate; if (pindexPrev != chainActive.Tip() || (g_mempool.GetTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - nStart > 5)) { // Clear pindexPrev so future calls make a new block, despite any // failures from here on pindexPrev = nullptr; // Store the pindexBest used before CreateNewBlock, to avoid races nTransactionsUpdatedLast = g_mempool.GetTransactionsUpdated(); CBlockIndex *pindexPrevNew = chainActive.Tip(); nStart = GetTime(); // Create new block CScript scriptDummy = CScript() << OP_TRUE; pblocktemplate = BlockAssembler(config, g_mempool).CreateNewBlock(scriptDummy); if (!pblocktemplate) { throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory"); } // Need to update only after we know CreateNewBlock succeeded pindexPrev = pindexPrevNew; } // pointer for convenience CBlock *pblock = &pblocktemplate->block; // Update nTime UpdateTime(pblock, config, pindexPrev); pblock->nNonce = 0; UniValue aCaps(UniValue::VARR); aCaps.push_back("proposal"); UniValue transactions(UniValue::VARR); std::map setTxIndex; int i = 0; for (const auto &it : pblock->vtx) { const CTransaction &tx = *it; uint256 txId = tx.GetId(); setTxIndex[txId] = i++; if (tx.IsCoinBase()) { continue; } UniValue entry(UniValue::VOBJ); entry.pushKV("data", EncodeHexTx(tx)); entry.pushKV("txid", txId.GetHex()); entry.pushKV("hash", tx.GetHash().GetHex()); UniValue deps(UniValue::VARR); for (const CTxIn &in : tx.vin) { if (setTxIndex.count(in.prevout.GetTxId())) { deps.push_back(setTxIndex[in.prevout.GetTxId()]); } } entry.pushKV("depends", deps); int index_in_template = i - 1; entry.pushKV("fee", pblocktemplate->entries[index_in_template].fees / SATOSHI); int64_t nTxSigOps = pblocktemplate->entries[index_in_template].sigOpCount; entry.pushKV("sigops", nTxSigOps); transactions.push_back(entry); } UniValue aux(UniValue::VOBJ); aux.pushKV("flags", HexStr(COINBASE_FLAGS.begin(), COINBASE_FLAGS.end())); arith_uint256 hashTarget = arith_uint256().SetCompact(pblock->nBits); UniValue aMutable(UniValue::VARR); aMutable.push_back("time"); aMutable.push_back("transactions"); aMutable.push_back("prevblock"); UniValue result(UniValue::VOBJ); result.pushKV("capabilities", aCaps); result.pushKV("version", pblock->nVersion); result.pushKV("previousblockhash", pblock->hashPrevBlock.GetHex()); result.pushKV("transactions", transactions); result.pushKV("coinbaseaux", aux); result.pushKV("coinbasevalue", int64_t(pblock->vtx[0]->vout[0].nValue / SATOSHI)); result.pushKV("longpollid", chainActive.Tip()->GetBlockHash().GetHex() + i64tostr(nTransactionsUpdatedLast)); result.pushKV("target", hashTarget.GetHex()); result.pushKV("mintime", int64_t(pindexPrev->GetMedianTimePast()) + 1); result.pushKV("mutable", aMutable); result.pushKV("noncerange", "00000000ffffffff"); // FIXME: Allow for mining block greater than 1M. result.pushKV("sigoplimit", GetMaxBlockSigOpsCount(DEFAULT_MAX_BLOCK_SIZE)); result.pushKV("sizelimit", DEFAULT_MAX_BLOCK_SIZE); result.pushKV("curtime", pblock->GetBlockTime()); result.pushKV("bits", strprintf("%08x", pblock->nBits)); result.pushKV("height", int64_t(pindexPrev->nHeight) + 1); return result; } class submitblock_StateCatcher : public CValidationInterface { public: uint256 hash; bool found; CValidationState state; - submitblock_StateCatcher(const uint256 &hashIn) + explicit submitblock_StateCatcher(const uint256 &hashIn) : hash(hashIn), found(false), state() {} protected: void BlockChecked(const CBlock &block, const CValidationState &stateIn) override { if (block.GetHash() != hash) { return; } found = true; state = stateIn; } }; static UniValue submitblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "submitblock \"hexdata\" ( \"jsonparametersobject\" )\n" "\nAttempts to submit new block to network.\n" "The 'jsonparametersobject' parameter is currently ignored.\n" "See https://en.bitcoin.it/wiki/BIP_0022 for full specification.\n" "\nArguments\n" "1. \"hexdata\" (string, required) the hex-encoded block " "data to submit\n" "2. \"parameters\" (string, optional) object of optional " "parameters\n" " {\n" " \"workid\" : \"id\" (string, optional) if the server " "provided a workid, it MUST be included with submissions\n" " }\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("submitblock", "\"mydata\"") + HelpExampleRpc("submitblock", "\"mydata\"")); } std::shared_ptr blockptr = std::make_shared(); CBlock &block = *blockptr; if (!DecodeHexBlk(block, request.params[0].get_str())) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed"); } if (block.vtx.empty() || !block.vtx[0]->IsCoinBase()) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block does not start with a coinbase"); } uint256 hash = block.GetHash(); bool fBlockPresent = false; { LOCK(cs_main); BlockMap::iterator mi = mapBlockIndex.find(hash); if (mi != mapBlockIndex.end()) { CBlockIndex *pindex = mi->second; if (pindex->IsValid(BlockValidity::SCRIPTS)) { return "duplicate"; } if (pindex->nStatus.isInvalid()) { return "duplicate-invalid"; } // Otherwise, we might only have the header - process the block // before returning fBlockPresent = true; } } submitblock_StateCatcher sc(block.GetHash()); RegisterValidationInterface(&sc); bool fAccepted = ProcessNewBlock(config, blockptr, true, nullptr); UnregisterValidationInterface(&sc); if (fBlockPresent) { if (fAccepted && !sc.found) { return "duplicate-inconclusive"; } return "duplicate"; } if (!sc.found) { return "inconclusive"; } return BIP22ValidationResult(config, sc.state); } static UniValue estimatefee(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "estimatefee nblocks\n" "\nEstimates the approximate fee per kilobyte needed for a " "transaction to begin\n" "confirmation within nblocks blocks.\n" "\nArguments:\n" "1. nblocks (numeric, required)\n" "\nResult:\n" "n (numeric) estimated fee-per-kilobyte\n" "\n" "A negative value is returned if not enough transactions and " "blocks\n" "have been observed to make an estimate.\n" "-1 is always returned for nblocks == 1 as it is impossible to " "calculate\n" "a fee that is high enough to get reliably included in the next " "block.\n" "\nExample:\n" + HelpExampleCli("estimatefee", "6")); } RPCTypeCheck(request.params, {UniValue::VNUM}); int nBlocks = request.params[0].get_int(); if (nBlocks < 1) { nBlocks = 1; } CFeeRate feeRate = g_mempool.estimateFee(nBlocks); if (feeRate == CFeeRate(Amount::zero())) { return -1.0; } return ValueFromAmount(feeRate.GetFeePerK()); } // clang-format off static const ContextFreeRPCCommand commands[] = { // category name actor (function) argNames // ---------- ------------------------ ---------------------- ---------- {"mining", "getnetworkhashps", getnetworkhashps, {"nblocks", "height"}}, {"mining", "getmininginfo", getmininginfo, {}}, {"mining", "prioritisetransaction", prioritisetransaction, {"txid", "priority_delta", "fee_delta"}}, {"mining", "getblocktemplate", getblocktemplate, {"template_request"}}, {"mining", "submitblock", submitblock, {"hexdata", "parameters"}}, {"generating", "generatetoaddress", generatetoaddress, {"nblocks", "address", "maxtries"}}, {"util", "estimatefee", estimatefee, {"nblocks"}}, }; // clang-format on void RegisterMiningRPCCommands(CRPCTable &t) { for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) t.appendCommand(commands[vcidx].name, &commands[vcidx]); } diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp index c898e4d165..64ceaa2f65 100644 --- a/src/rpc/misc.cpp +++ b/src/rpc/misc.cpp @@ -1,695 +1,695 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "rpc/misc.h" #include "base58.h" #include "clientversion.h" #include "config.h" #include "dstencode.h" #include "init.h" #include "net.h" #include "netbase.h" #include "rpc/blockchain.h" #include "rpc/server.h" #include "timedata.h" #include "util.h" #include "utilstrencodings.h" #include "validation.h" #ifdef ENABLE_WALLET #include "wallet/rpcwallet.h" #include "wallet/wallet.h" #include "wallet/walletdb.h" #endif #include "warnings.h" #include #include #ifdef HAVE_MALLOC_INFO #include #endif /** * @note Do not add or change anything in the information returned by this * method. `getinfo` exists for backwards-compatibility only. It combines * information from wildly different sources in the program, which is a mess, * and is thus planned to be deprecated eventually. * * Based on the source of the information, new information should be added to: * - `getblockchaininfo`, * - `getnetworkinfo` or * - `getwalletinfo` * * Or alternatively, create a specific query method for the information. **/ static UniValue getinfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getinfo\n" "\nDEPRECATED. Returns an object containing various state info.\n" "\nResult:\n" "{\n" " \"version\": xxxxx, (numeric) the server version\n" " \"protocolversion\": xxxxx, (numeric) the protocol version\n" " \"walletversion\": xxxxx, (numeric) the wallet version\n" " \"balance\": xxxxxxx, (numeric) the total bitcoin " "balance of the wallet\n" " \"blocks\": xxxxxx, (numeric) the current number of " "blocks processed in the server\n" " \"timeoffset\": xxxxx, (numeric) the time offset\n" " \"connections\": xxxxx, (numeric) the number of " "connections\n" " \"proxy\": \"host:port\", (string, optional) the proxy used " "by the server\n" " \"difficulty\": xxxxxx, (numeric) the current difficulty\n" " \"testnet\": true|false, (boolean) if the server is using " "testnet or not\n" " \"keypoololdest\": xxxxxx, (numeric) the timestamp (seconds " "since Unix epoch) of the oldest pre-generated key in the key " "pool\n" " \"keypoolsize\": xxxx, (numeric) how many new keys are " "pre-generated\n" " \"unlocked_until\": ttt, (numeric) the timestamp in " "seconds since epoch (midnight Jan 1 1970 GMT) that the wallet is " "unlocked for transfers, or 0 if the wallet is locked\n" " \"paytxfee\": x.xxxx, (numeric) the transaction fee set " "in " + CURRENCY_UNIT + "/kB\n" " \"relayfee\": x.xxxx, (numeric) minimum " "relay fee for non-free transactions in " + CURRENCY_UNIT + "/kB\n" " \"errors\": \"...\" (string) any error messages\n" "}\n" "\nExamples:\n" + HelpExampleCli("getinfo", "") + HelpExampleRpc("getinfo", "")); } #ifdef ENABLE_WALLET CWallet *const pwallet = GetWalletForJSONRPCRequest(request); LOCK2(cs_main, pwallet ? &pwallet->cs_wallet : nullptr); #else LOCK(cs_main); #endif proxyType proxy; GetProxy(NET_IPV4, proxy); UniValue obj(UniValue::VOBJ); obj.pushKV("version", CLIENT_VERSION); obj.pushKV("protocolversion", PROTOCOL_VERSION); #ifdef ENABLE_WALLET if (pwallet) { obj.pushKV("walletversion", pwallet->GetVersion()); obj.pushKV("balance", ValueFromAmount(pwallet->GetBalance())); } #endif obj.pushKV("blocks", (int)chainActive.Height()); obj.pushKV("timeoffset", GetTimeOffset()); if (g_connman) { obj.pushKV("connections", (int)g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL)); } obj.pushKV("proxy", (proxy.IsValid() ? proxy.proxy.ToStringIPPort() : std::string())); obj.pushKV("difficulty", double(GetDifficulty(chainActive.Tip()))); obj.pushKV("testnet", config.GetChainParams().NetworkIDString() == CBaseChainParams::TESTNET); #ifdef ENABLE_WALLET if (pwallet) { obj.pushKV("keypoololdest", pwallet->GetOldestKeyPoolTime()); obj.pushKV("keypoolsize", (int)pwallet->GetKeyPoolSize()); } if (pwallet && pwallet->IsCrypted()) { obj.pushKV("unlocked_until", pwallet->nRelockTime); } obj.pushKV("paytxfee", ValueFromAmount(payTxFee.GetFeePerK())); #endif obj.pushKV("relayfee", ValueFromAmount(config.GetMinFeePerKB().GetFeePerK())); obj.pushKV("errors", GetWarnings("statusbar")); return obj; } #ifdef ENABLE_WALLET class DescribeAddressVisitor : public boost::static_visitor { public: CWallet *const pwallet; - DescribeAddressVisitor(CWallet *_pwallet) : pwallet(_pwallet) {} + explicit DescribeAddressVisitor(CWallet *_pwallet) : pwallet(_pwallet) {} UniValue operator()(const CNoDestination &dest) const { return UniValue(UniValue::VOBJ); } UniValue operator()(const CKeyID &keyID) const { UniValue obj(UniValue::VOBJ); CPubKey vchPubKey; obj.pushKV("isscript", false); if (pwallet && pwallet->GetPubKey(keyID, vchPubKey)) { obj.pushKV("pubkey", HexStr(vchPubKey)); obj.pushKV("iscompressed", vchPubKey.IsCompressed()); } return obj; } UniValue operator()(const CScriptID &scriptID) const { UniValue obj(UniValue::VOBJ); CScript subscript; obj.pushKV("isscript", true); if (pwallet && pwallet->GetCScript(scriptID, subscript)) { std::vector addresses; txnouttype whichType; int nRequired; ExtractDestinations(subscript, whichType, addresses, nRequired); obj.pushKV("script", GetTxnOutputType(whichType)); obj.pushKV("hex", HexStr(subscript.begin(), subscript.end())); UniValue a(UniValue::VARR); for (const CTxDestination &addr : addresses) { a.push_back(EncodeDestination(addr)); } obj.pushKV("addresses", a); if (whichType == TX_MULTISIG) { obj.pushKV("sigsrequired", nRequired); } } return obj; } }; #endif static UniValue validateaddress(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "validateaddress \"address\"\n" "\nReturn information about the given bitcoin address.\n" "\nArguments:\n" "1. \"address\" (string, required) The bitcoin address to " "validate\n" "\nResult:\n" "{\n" " \"isvalid\" : true|false, (boolean) If the address is " "valid or not. If not, this is the only property returned.\n" " \"address\" : \"address\", (string) The bitcoin address " "validated\n" " \"scriptPubKey\" : \"hex\", (string) The hex encoded " "scriptPubKey generated by the address\n" " \"ismine\" : true|false, (boolean) If the address is " "yours or not\n" " \"iswatchonly\" : true|false, (boolean) If the address is " "watchonly\n" " \"isscript\" : true|false, (boolean) If the key is a " "script\n" " \"pubkey\" : \"publickeyhex\", (string) The hex value of the " "raw public key\n" " \"iscompressed\" : true|false, (boolean) If the address is " "compressed\n" " \"account\" : \"account\" (string) DEPRECATED. The " "account associated with the address, \"\" is the default account\n" " \"timestamp\" : timestamp, (number, optional) The " "creation time of the key if available in seconds since epoch (Jan " "1 1970 GMT)\n" " \"hdkeypath\" : \"keypath\" (string, optional) The HD " "keypath if the key is HD and available\n" " \"hdmasterkeyid\" : \"\" (string, optional) The " "Hash160 of the HD master pubkey\n" "}\n" "\nExamples:\n" + HelpExampleCli("validateaddress", "\"1PSSGeFHDnKNxiEyFrD1wcEaHr9hrQDDWc\"") + HelpExampleRpc("validateaddress", "\"1PSSGeFHDnKNxiEyFrD1wcEaHr9hrQDDWc\"")); } #ifdef ENABLE_WALLET CWallet *const pwallet = GetWalletForJSONRPCRequest(request); LOCK2(cs_main, pwallet ? &pwallet->cs_wallet : nullptr); #else LOCK(cs_main); #endif CTxDestination dest = DecodeDestination(request.params[0].get_str(), config.GetChainParams()); bool isValid = IsValidDestination(dest); UniValue ret(UniValue::VOBJ); ret.pushKV("isvalid", isValid); if (isValid) { std::string currentAddress = EncodeDestination(dest); ret.pushKV("address", currentAddress); CScript scriptPubKey = GetScriptForDestination(dest); ret.pushKV("scriptPubKey", HexStr(scriptPubKey.begin(), scriptPubKey.end())); #ifdef ENABLE_WALLET isminetype mine = pwallet ? IsMine(*pwallet, dest) : ISMINE_NO; ret.pushKV("ismine", (mine & ISMINE_SPENDABLE) ? true : false); ret.pushKV("iswatchonly", (mine & ISMINE_WATCH_ONLY) ? true : false); UniValue detail = boost::apply_visitor(DescribeAddressVisitor(pwallet), dest); ret.pushKVs(detail); if (pwallet && pwallet->mapAddressBook.count(dest)) { ret.pushKV("account", pwallet->mapAddressBook[dest].name); } if (pwallet) { const auto &meta = pwallet->mapKeyMetadata; const CKeyID *keyID = boost::get(&dest); auto it = keyID ? meta.find(*keyID) : meta.end(); if (it == meta.end()) { it = meta.find(CScriptID(scriptPubKey)); } if (it != meta.end()) { ret.pushKV("timestamp", it->second.nCreateTime); if (!it->second.hdKeypath.empty()) { ret.pushKV("hdkeypath", it->second.hdKeypath); ret.pushKV("hdmasterkeyid", it->second.hdMasterKeyID.GetHex()); } } } #endif } return ret; } // Needed even with !ENABLE_WALLET, to pass (ignored) pointers around class CWallet; /** * Used by addmultisigaddress / createmultisig: */ CScript createmultisig_redeemScript(CWallet *const pwallet, const UniValue ¶ms) { int nRequired = params[0].get_int(); const UniValue &keys = params[1].get_array(); // Gather public keys if (nRequired < 1) { throw std::runtime_error( "a multisignature address must require at least one key to redeem"); } if ((int)keys.size() < nRequired) { throw std::runtime_error( strprintf("not enough keys supplied " "(got %u keys, but need at least %d to redeem)", keys.size(), nRequired)); } if (keys.size() > 16) { throw std::runtime_error( "Number of addresses involved in the " "multisignature address creation > 16\nReduce the " "number"); } std::vector pubkeys; pubkeys.resize(keys.size()); for (size_t i = 0; i < keys.size(); i++) { const std::string &ks = keys[i].get_str(); #ifdef ENABLE_WALLET // Case 1: Bitcoin address and we have full public key: if (pwallet) { CTxDestination dest = DecodeDestination(ks, pwallet->chainParams); if (IsValidDestination(dest)) { const CKeyID *keyID = boost::get(&dest); if (!keyID) { throw std::runtime_error( strprintf("%s does not refer to a key", ks)); } CPubKey vchPubKey; if (!pwallet->GetPubKey(*keyID, vchPubKey)) { throw std::runtime_error( strprintf("no full public key for address %s", ks)); } if (!vchPubKey.IsFullyValid()) { throw std::runtime_error(" Invalid public key: " + ks); } pubkeys[i] = vchPubKey; continue; } } #endif // Case 2: hex public key if (IsHex(ks)) { CPubKey vchPubKey(ParseHex(ks)); if (!vchPubKey.IsFullyValid()) { throw std::runtime_error(" Invalid public key: " + ks); } pubkeys[i] = vchPubKey; } else { throw std::runtime_error(" Invalid public key: " + ks); } } CScript result = GetScriptForMultisig(nRequired, pubkeys); if (result.size() > MAX_SCRIPT_ELEMENT_SIZE) { throw std::runtime_error( strprintf("redeemScript exceeds size limit: %d > %d", result.size(), MAX_SCRIPT_ELEMENT_SIZE)); } return result; } static UniValue createmultisig(const Config &config, const JSONRPCRequest &request) { #ifdef ENABLE_WALLET CWallet *const pwallet = GetWalletForJSONRPCRequest(request); #else CWallet *const pwallet = nullptr; #endif if (request.fHelp || request.params.size() < 2 || request.params.size() > 2) { std::string msg = "createmultisig nrequired [\"key\",...]\n" "\nCreates a multi-signature address with n signature of m keys " "required.\n" "It returns a json object with the address and redeemScript.\n" "\nArguments:\n" "1. nrequired (numeric, required) The number of required " "signatures out of the n keys or addresses.\n" "2. \"keys\" (string, required) A json array of keys which " "are bitcoin addresses or hex-encoded public keys\n" " [\n" " \"key\" (string) bitcoin address or hex-encoded public " "key\n" " ,...\n" " ]\n" "\nResult:\n" "{\n" " \"address\":\"multisigaddress\", (string) The value of the new " "multisig address.\n" " \"redeemScript\":\"script\" (string) The string value of " "the hex-encoded redemption script.\n" "}\n" "\nExamples:\n" "\nCreate a multisig address from 2 addresses\n" + HelpExampleCli("createmultisig", "2 " "\"[\\\"16sSauSf5pF2UkUwvKGq4qjNRzBZYqgEL5\\\"," "\\\"171sgjn4YtPu27adkKGrdDwzRTxnRkBfKV\\\"]\"") + "\nAs a json rpc call\n" + HelpExampleRpc("createmultisig", "2, " "\"[\\\"16sSauSf5pF2UkUwvKGq4qjNRzBZYqgEL5\\\"," "\\\"171sgjn4YtPu27adkKGrdDwzRTxnRkBfKV\\\"]\""); throw std::runtime_error(msg); } // Construct using pay-to-script-hash: CScript inner = createmultisig_redeemScript(pwallet, request.params); CScriptID innerID(inner); UniValue result(UniValue::VOBJ); result.pushKV("address", EncodeDestination(innerID)); result.pushKV("redeemScript", HexStr(inner.begin(), inner.end())); return result; } static UniValue verifymessage(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 3) { throw std::runtime_error( "verifymessage \"address\" \"signature\" \"message\"\n" "\nVerify a signed message\n" "\nArguments:\n" "1. \"address\" (string, required) The bitcoin address to " "use for the signature.\n" "2. \"signature\" (string, required) The signature provided " "by the signer in base 64 encoding (see signmessage).\n" "3. \"message\" (string, required) The message that was " "signed.\n" "\nResult:\n" "true|false (boolean) If the signature is verified or not.\n" "\nExamples:\n" "\nUnlock the wallet for 30 seconds\n" + HelpExampleCli("walletpassphrase", "\"mypassphrase\" 30") + "\nCreate the signature\n" + HelpExampleCli( "signmessage", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\" \"my message\"") + "\nVerify the signature\n" + HelpExampleCli("verifymessage", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4" "XX\" \"signature\" \"my " "message\"") + "\nAs json rpc\n" + HelpExampleRpc("verifymessage", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4" "XX\", \"signature\", \"my " "message\"")); } LOCK(cs_main); std::string strAddress = request.params[0].get_str(); std::string strSign = request.params[1].get_str(); std::string strMessage = request.params[2].get_str(); CTxDestination destination = DecodeDestination(strAddress, config.GetChainParams()); if (!IsValidDestination(destination)) { throw JSONRPCError(RPC_TYPE_ERROR, "Invalid address"); } const CKeyID *keyID = boost::get(&destination); if (!keyID) { throw JSONRPCError(RPC_TYPE_ERROR, "Address does not refer to key"); } bool fInvalid = false; std::vector vchSig = DecodeBase64(strSign.c_str(), &fInvalid); if (fInvalid) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Malformed base64 encoding"); } CHashWriter ss(SER_GETHASH, 0); ss << strMessageMagic; ss << strMessage; CPubKey pubkey; if (!pubkey.RecoverCompact(ss.GetHash(), vchSig)) { return false; } return (pubkey.GetID() == *keyID); } static UniValue signmessagewithprivkey(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 2) { throw std::runtime_error( "signmessagewithprivkey \"privkey\" \"message\"\n" "\nSign a message with the private key of an address\n" "\nArguments:\n" "1. \"privkey\" (string, required) The private key to sign " "the message with.\n" "2. \"message\" (string, required) The message to create a " "signature of.\n" "\nResult:\n" "\"signature\" (string) The signature of the message " "encoded in base 64\n" "\nExamples:\n" "\nCreate the signature\n" + HelpExampleCli("signmessagewithprivkey", "\"privkey\" \"my message\"") + "\nVerify the signature\n" + HelpExampleCli("verifymessage", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4" "XX\" \"signature\" \"my " "message\"") + "\nAs json rpc\n" + HelpExampleRpc("signmessagewithprivkey", "\"privkey\", \"my message\"")); } std::string strPrivkey = request.params[0].get_str(); std::string strMessage = request.params[1].get_str(); CBitcoinSecret vchSecret; bool fGood = vchSecret.SetString(strPrivkey); if (!fGood) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid private key"); } CKey key = vchSecret.GetKey(); if (!key.IsValid()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Private key outside allowed range"); } CHashWriter ss(SER_GETHASH, 0); ss << strMessageMagic; ss << strMessage; std::vector vchSig; if (!key.SignCompact(ss.GetHash(), vchSig)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Sign failed"); } return EncodeBase64(&vchSig[0], vchSig.size()); } static UniValue setmocktime(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "setmocktime timestamp\n" "\nSet the local time to given timestamp (-regtest only)\n" "\nArguments:\n" "1. timestamp (integer, required) Unix seconds-since-epoch " "timestamp\n" " Pass 0 to go back to using the system time."); } if (!config.GetChainParams().MineBlocksOnDemand()) { throw std::runtime_error( "setmocktime for regression testing (-regtest mode) only"); } // For now, don't change mocktime if we're in the middle of validation, as // this could have an effect on mempool time-based eviction, as well as // IsCurrentForFeeEstimation() and IsInitialBlockDownload(). // TODO: figure out the right way to synchronize around mocktime, and // ensure all callsites of GetTime() are accessing this safely. LOCK(cs_main); RPCTypeCheck(request.params, {UniValue::VNUM}); SetMockTime(request.params[0].get_int64()); return NullUniValue; } static UniValue RPCLockedMemoryInfo() { LockedPool::Stats stats = LockedPoolManager::Instance().stats(); UniValue obj(UniValue::VOBJ); obj.pushKV("used", uint64_t(stats.used)); obj.pushKV("free", uint64_t(stats.free)); obj.pushKV("total", uint64_t(stats.total)); obj.pushKV("locked", uint64_t(stats.locked)); obj.pushKV("chunks_used", uint64_t(stats.chunks_used)); obj.pushKV("chunks_free", uint64_t(stats.chunks_free)); return obj; } #ifdef HAVE_MALLOC_INFO static std::string RPCMallocInfo() { char *ptr = nullptr; size_t size = 0; FILE *f = open_memstream(&ptr, &size); if (f) { malloc_info(0, f); fclose(f); if (ptr) { std::string rv(ptr, size); free(ptr); return rv; } } return ""; } #endif static UniValue getmemoryinfo(const Config &config, const JSONRPCRequest &request) { /* Please, avoid using the word "pool" here in the RPC interface or help, * as users will undoubtedly confuse it with the other "memory pool" */ if (request.fHelp || request.params.size() > 1) { throw std::runtime_error( "getmemoryinfo (\"mode\")\n" "Returns an object containing information about memory usage.\n" "Arguments:\n" "1. \"mode\" determines what kind of information is returned. This " "argument is optional, the default mode is \"stats\".\n" " - \"stats\" returns general statistics about memory usage in " "the daemon.\n" " - \"mallocinfo\" returns an XML string describing low-level " "heap state (only available if compiled with glibc 2.10+).\n" "\nResult (mode \"stats\"):\n" "{\n" " \"locked\": { (json object) Information about " "locked memory manager\n" " \"used\": xxxxx, (numeric) Number of bytes used\n" " \"free\": xxxxx, (numeric) Number of bytes available " "in current arenas\n" " \"total\": xxxxxxx, (numeric) Total number of bytes " "managed\n" " \"locked\": xxxxxx, (numeric) Amount of bytes that " "succeeded locking. If this number is smaller than total, locking " "pages failed at some point and key data could be swapped to " "disk.\n" " \"chunks_used\": xxxxx, (numeric) Number allocated chunks\n" " \"chunks_free\": xxxxx, (numeric) Number unused chunks\n" " }\n" "}\n" "\nResult (mode \"mallocinfo\"):\n" "\"...\"\n" "\nExamples:\n" + HelpExampleCli("getmemoryinfo", "") + HelpExampleRpc("getmemoryinfo", "")); } std::string mode = (request.params.size() < 1 || request.params[0].isNull()) ? "stats" : request.params[0].get_str(); if (mode == "stats") { UniValue obj(UniValue::VOBJ); obj.pushKV("locked", RPCLockedMemoryInfo()); return obj; } else if (mode == "mallocinfo") { #ifdef HAVE_MALLOC_INFO return RPCMallocInfo(); #else throw JSONRPCError( RPC_INVALID_PARAMETER, "mallocinfo is only available when compiled with glibc 2.10+"); #endif } else { throw JSONRPCError(RPC_INVALID_PARAMETER, "unknown mode " + mode); } } static UniValue echo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp) { throw std::runtime_error( "echo|echojson \"message\" ...\n" "\nSimply echo back the input arguments. This command is for " "testing.\n" "\nThe difference between echo and echojson is that echojson has " "argument conversion enabled in the client-side table in" "bitcoin-cli and the GUI. There is no server-side difference."); } return request.params; } // clang-format off static const ContextFreeRPCCommand commands[] = { // category name actor (function) argNames // ------------------- ------------------------ ---------------------- ---------- { "control", "getinfo", getinfo, {} }, /* uses wallet if enabled */ { "control", "getmemoryinfo", getmemoryinfo, {"mode"} }, { "util", "validateaddress", validateaddress, {"address"} }, /* uses wallet if enabled */ { "util", "createmultisig", createmultisig, {"nrequired","keys"} }, { "util", "verifymessage", verifymessage, {"address","signature","message"} }, { "util", "signmessagewithprivkey", signmessagewithprivkey, {"privkey","message"} }, /* Not shown in help */ { "hidden", "setmocktime", setmocktime, {"timestamp"}}, { "hidden", "echo", echo, {"arg0","arg1","arg2","arg3","arg4","arg5","arg6","arg7","arg8","arg9"}}, { "hidden", "echojson", echo, {"arg0","arg1","arg2","arg3","arg4","arg5","arg6","arg7","arg8","arg9"}}, }; // clang-format on void RegisterMiscRPCCommands(CRPCTable &t) { for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) { t.appendCommand(commands[vcidx].name, &commands[vcidx]); } } diff --git a/src/rpc/server.h b/src/rpc/server.h index fd03b74b42..21919c32ed 100644 --- a/src/rpc/server.h +++ b/src/rpc/server.h @@ -1,278 +1,279 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Copyright (c) 2017-2018 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_RPC_SERVER_H #define BITCOIN_RPC_SERVER_H #include "amount.h" #include "rpc/jsonrpcrequest.h" #include "rpc/protocol.h" #include "uint256.h" #include "util.h" #include #include #include #include #include #include #include static const unsigned int DEFAULT_RPC_SERIALIZE_VERSION = 1; class ContextFreeRPCCommand; namespace RPCServerSignals { void OnStarted(std::function slot); void OnStopped(std::function slot); } // namespace RPCServerSignals class CBlockIndex; class Config; class CNetAddr; /** * Wrapper for UniValue::VType, which includes typeAny: used to denote don't * care type. Only used by RPCTypeCheckObj. */ struct UniValueType { - UniValueType(UniValue::VType _type) : typeAny(false), type(_type) {} + explicit UniValueType(UniValue::VType _type) + : typeAny(false), type(_type) {} UniValueType() : typeAny(true) {} bool typeAny; UniValue::VType type; }; /** * Class for registering and managing all RPC calls. */ class RPCServer : public boost::noncopyable { public: RPCServer() {} /** * Attempts to execute an RPC command from the given request. * If no RPC command exists that matches the request, an error is returned. */ UniValue ExecuteCommand(Config &config, const JSONRPCRequest &request) const; }; /** * Query whether RPC is running */ bool IsRPCRunning(); /** * Set the RPC warmup status. When this is done, all RPC calls will error out * immediately with RPC_IN_WARMUP. */ void SetRPCWarmupStatus(const std::string &newStatus); /** * Mark warmup as done. RPC calls will be processed from now on. */ void SetRPCWarmupFinished(); /** * Returns the current warmup state */ bool RPCIsInWarmup(std::string *statusOut); /** * Type-check arguments; throws JSONRPCError if wrong type given. Does not check * that the right number of arguments are passed, just that any passed are the * correct type. */ void RPCTypeCheck(const UniValue ¶ms, const std::list &typesExpected, bool fAllowNull = false); /** * Type-check one argument; throws JSONRPCError if wrong type given. */ void RPCTypeCheckArgument(const UniValue &value, UniValue::VType typeExpected); /** * Check for expected keys/value types in an Object. */ void RPCTypeCheckObj(const UniValue &o, const std::map &typesExpected, bool fAllowNull = false, bool fStrict = false); /** * Opaque base class for timers returned by NewTimerFunc. * This provides no methods at the moment, but makes sure that delete cleans up * the whole state. */ class RPCTimerBase { public: virtual ~RPCTimerBase() {} }; /** * RPC timer "driver". */ class RPCTimerInterface { public: virtual ~RPCTimerInterface() {} /** * Implementation name */ virtual const char *Name() = 0; /** * Factory function for timers. * RPC will call the function to create a timer that will call func in * *millis* milliseconds. * @note As the RPC mechanism is backend-neutral, it can use different * implementations of timers. * This is needed to cope with the case in which there is no HTTP server, * but only GUI RPC console, and to break the dependency of pcserver on * httprpc. */ virtual RPCTimerBase *NewTimer(std::function &func, int64_t millis) = 0; }; /** * Set the factory function for timers */ void RPCSetTimerInterface(RPCTimerInterface *iface); /** * Set the factory function for timer, but only, if unset */ void RPCSetTimerInterfaceIfUnset(RPCTimerInterface *iface); /** * Unset factory function for timers */ void RPCUnsetTimerInterface(RPCTimerInterface *iface); /** * Run func nSeconds from now. * Overrides previous timer (if any). */ void RPCRunLater(const std::string &name, std::function func, int64_t nSeconds); typedef UniValue (*rpcfn_type)(Config &config, const JSONRPCRequest &jsonRequest); typedef UniValue (*const_rpcfn_type)(const Config &config, const JSONRPCRequest &jsonRequest); class ContextFreeRPCCommand { public: std::string category; std::string name; private: union { rpcfn_type fn; const_rpcfn_type cfn; } actor; bool useConstConfig; public: std::vector argNames; ContextFreeRPCCommand(std::string _category, std::string _name, rpcfn_type _actor, std::vector _argNames) : category{std::move(_category)}, name{std::move(_name)}, useConstConfig{false}, argNames{std::move(_argNames)} { actor.fn = _actor; } /** * There are 2 constructors depending Config is const or not, so we * can call the command through the proper pointer. Casting constness * on parameters of function is undefined behavior. */ ContextFreeRPCCommand(std::string _category, std::string _name, const_rpcfn_type _actor, std::vector _argNames) : category{std::move(_category)}, name{std::move(_name)}, useConstConfig{true}, argNames{std::move(_argNames)} { actor.cfn = _actor; } UniValue call(Config &config, const JSONRPCRequest &jsonRequest) const { return useConstConfig ? (*actor.cfn)(config, jsonRequest) : (*actor.fn)(config, jsonRequest); }; }; /** * Bitcoin RPC command dispatcher. */ class CRPCTable { private: std::map mapCommands; public: CRPCTable(); const ContextFreeRPCCommand *operator[](const std::string &name) const; std::string help(Config &config, const std::string &name, const JSONRPCRequest &helpreq) const; /** * Execute a method. * @param request The JSONRPCRequest to execute * @returns Result of the call. * @throws an exception (UniValue) when an error happens. */ UniValue execute(Config &config, const JSONRPCRequest &request) const; /** * Returns a list of registered commands * @returns List of registered commands. */ std::vector listCommands() const; /** * Appends a ContextFreeRPCCommand to the dispatch table. * Returns false if RPC server is already running (dump concurrency * protection). * Commands cannot be overwritten (returns false). */ bool appendCommand(const std::string &name, const ContextFreeRPCCommand *pcmd); }; bool IsDeprecatedRPCEnabled(ArgsManager &args, const std::string &method); extern CRPCTable tableRPC; /** * Utilities: convert hex-encoded values (throws error if not hex). */ extern uint256 ParseHashV(const UniValue &v, std::string strName); extern uint256 ParseHashO(const UniValue &o, std::string strKey); extern std::vector ParseHexV(const UniValue &v, std::string strName); extern std::vector ParseHexO(const UniValue &o, std::string strKey); extern Amount AmountFromValue(const UniValue &value); extern UniValue ValueFromAmount(const Amount amount); extern std::string HelpExampleCli(const std::string &methodname, const std::string &args); extern std::string HelpExampleRpc(const std::string &methodname, const std::string &args); bool StartRPC(); void InterruptRPC(); void StopRPC(); std::string JSONRPCExecBatch(Config &config, RPCServer &rpcServer, const JSONRPCRequest &req, const UniValue &vReq); void RPCNotifyBlockChange(bool ibd, const CBlockIndex *); /** * Retrieves any serialization flags requested in command line argument */ int RPCSerializationFlags(); #endif // BITCOIN_RPC_SERVER_H diff --git a/src/scheduler.cpp b/src/scheduler.cpp index 6d50244633..ec6fbab510 100644 --- a/src/scheduler.cpp +++ b/src/scheduler.cpp @@ -1,217 +1,217 @@ // Copyright (c) 2015-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "scheduler.h" #include "random.h" #include "reverselock.h" #include #include #include CScheduler::CScheduler() : nThreadsServicingQueue(0), stopRequested(false), stopWhenEmpty(false) {} CScheduler::~CScheduler() { assert(nThreadsServicingQueue == 0); } #if BOOST_VERSION < 105000 static boost::system_time toPosixTime(const boost::chrono::system_clock::time_point &t) { return boost::posix_time::from_time_t( boost::chrono::system_clock::to_time_t(t)); } #endif void CScheduler::serviceQueue() { boost::unique_lock lock(newTaskMutex); ++nThreadsServicingQueue; // newTaskMutex is locked throughout this loop EXCEPT when the thread is // waiting or when the user's function is called. while (!shouldStop()) { try { if (!shouldStop() && taskQueue.empty()) { reverse_lock> rlock(lock); // Use this chance to get a tiny bit more entropy RandAddSeedSleep(); } while (!shouldStop() && taskQueue.empty()) { // Wait until there is something to do. newTaskScheduled.wait(lock); } // Wait until either there is a new task, or until the time of the first item on // the queue: // wait_until needs boost 1.50 or later; older versions have timed_wait: #if BOOST_VERSION < 105000 while (!shouldStop() && !taskQueue.empty() && newTaskScheduled.timed_wait( lock, toPosixTime(taskQueue.begin()->first))) { // Keep waiting until timeout } #else // Some boost versions have a conflicting overload of wait_until // that returns void. Explicitly use a template here to avoid // hitting that overload. while (!shouldStop() && !taskQueue.empty()) { boost::chrono::system_clock::time_point timeToWaitFor = taskQueue.begin()->first; if (newTaskScheduled.wait_until<>(lock, timeToWaitFor) == boost::cv_status::timeout) { // Exit loop after timeout, it means we reached the time of // the event break; } } #endif // If there are multiple threads, the queue can empty while we're // waiting (another thread may service the task we were waiting on). if (shouldStop() || taskQueue.empty()) { continue; } Function f = taskQueue.begin()->second; taskQueue.erase(taskQueue.begin()); { // Unlock before calling f, so it can reschedule itself or // another task without deadlocking: reverse_lock> rlock(lock); f(); } } catch (...) { --nThreadsServicingQueue; throw; } } --nThreadsServicingQueue; newTaskScheduled.notify_one(); } void CScheduler::stop(bool drain) { { boost::unique_lock lock(newTaskMutex); if (drain) { stopWhenEmpty = true; } else { stopRequested = true; } } newTaskScheduled.notify_all(); } void CScheduler::schedule(CScheduler::Function f, boost::chrono::system_clock::time_point t) { { boost::unique_lock lock(newTaskMutex); taskQueue.insert(std::make_pair(t, f)); } newTaskScheduled.notify_one(); } void CScheduler::scheduleFromNow(CScheduler::Function f, int64_t deltaMilliSeconds) { schedule(f, boost::chrono::system_clock::now() + boost::chrono::milliseconds(deltaMilliSeconds)); } static void Repeat(CScheduler *s, CScheduler::Predicate p, int64_t deltaMilliSeconds) { if (p()) { s->scheduleFromNow(boost::bind(&Repeat, s, p, deltaMilliSeconds), deltaMilliSeconds); } } void CScheduler::scheduleEvery(CScheduler::Predicate p, int64_t deltaMilliSeconds) { scheduleFromNow(boost::bind(&Repeat, this, p, deltaMilliSeconds), deltaMilliSeconds); } size_t CScheduler::getQueueInfo(boost::chrono::system_clock::time_point &first, boost::chrono::system_clock::time_point &last) const { boost::unique_lock lock(newTaskMutex); size_t result = taskQueue.size(); if (!taskQueue.empty()) { first = taskQueue.begin()->first; last = taskQueue.rbegin()->first; } return result; } bool CScheduler::AreThreadsServicingQueue() const { return nThreadsServicingQueue; } void SingleThreadedSchedulerClient::MaybeScheduleProcessQueue() { { LOCK(m_cs_callbacks_pending); // Try to avoid scheduling too many copies here, but if we // accidentally have two ProcessQueue's scheduled at once its // not a big deal. if (m_are_callbacks_running) return; if (m_callbacks_pending.empty()) return; } m_pscheduler->schedule( std::bind(&SingleThreadedSchedulerClient::ProcessQueue, this)); } void SingleThreadedSchedulerClient::ProcessQueue() { std::function callback; { LOCK(m_cs_callbacks_pending); if (m_are_callbacks_running) return; if (m_callbacks_pending.empty()) return; m_are_callbacks_running = true; callback = std::move(m_callbacks_pending.front()); m_callbacks_pending.pop_front(); } // RAII the setting of fCallbacksRunning and calling // MaybeScheduleProcessQueue // to ensure both happen safely even if callback() throws. struct RAIICallbacksRunning { SingleThreadedSchedulerClient *instance; - RAIICallbacksRunning(SingleThreadedSchedulerClient *_instance) + explicit RAIICallbacksRunning(SingleThreadedSchedulerClient *_instance) : instance(_instance) {} ~RAIICallbacksRunning() { { LOCK(instance->m_cs_callbacks_pending); instance->m_are_callbacks_running = false; } instance->MaybeScheduleProcessQueue(); } } raiicallbacksrunning(this); callback(); } void SingleThreadedSchedulerClient::AddToProcessQueue( std::function func) { assert(m_pscheduler); { LOCK(m_cs_callbacks_pending); m_callbacks_pending.emplace_back(std::move(func)); } MaybeScheduleProcessQueue(); } void SingleThreadedSchedulerClient::EmptyQueue() { assert(!m_pscheduler->AreThreadsServicingQueue()); bool should_continue = true; while (should_continue) { ProcessQueue(); LOCK(m_cs_callbacks_pending); should_continue = !m_callbacks_pending.empty(); } } diff --git a/src/scheduler.h b/src/scheduler.h index efd4f9851f..b0d6bc9f07 100644 --- a/src/scheduler.h +++ b/src/scheduler.h @@ -1,120 +1,120 @@ // Copyright (c) 2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_SCHEDULER_H #define BITCOIN_SCHEDULER_H #include "sync.h" // // NOTE: // boost::thread / boost::chrono should be ported to // std::thread / std::chrono when we support C++11. // #include #include #include // // Simple class for background tasks that should be run periodically or once // "after a while" // // Usage: // // CScheduler* s = new CScheduler(); // s->scheduleFromNow(doSomething, 11); // Assuming a: void doSomething() { } // s->scheduleFromNow(std::bind(Class::func, this, argument), 3); // boost::thread* t = new boost::thread(boost::bind(CScheduler::serviceQueue, // s)); // // ... then at program shutdown, clean up the thread running serviceQueue: // t->interrupt(); // t->join(); // delete t; // delete s; // Must be done after thread is interrupted/joined. // class CScheduler { public: CScheduler(); ~CScheduler(); typedef std::function Function; typedef std::function Predicate; // Call func at/after time t void schedule(Function f, boost::chrono::system_clock::time_point t = boost::chrono::system_clock::now()); // Convenience method: call f once deltaMilliSeconds from now void scheduleFromNow(Function f, int64_t deltaMilliSeconds); // Another convenience method: call f approximately every deltaMilliSeconds // forever, starting deltaMilliSeconds from now. To be more precise: every // time f is finished, it is rescheduled to run deltaMilliSeconds later. If // you need more accurate scheduling, don't use this method. void scheduleEvery(Predicate p, int64_t deltaMilliSeconds); // To keep things as simple as possible, there is no unschedule. // Services the queue 'forever'. Should be run in a thread, and interrupted // using boost::interrupt_thread void serviceQueue(); // Tell any threads running serviceQueue to stop as soon as they're done // servicing whatever task they're currently servicing (drain=false) or when // there is no work left to be done (drain=true) void stop(bool drain = false); // Returns number of tasks waiting to be serviced, and first and last task // times size_t getQueueInfo(boost::chrono::system_clock::time_point &first, boost::chrono::system_clock::time_point &last) const; // Returns true if there are threads actively running in serviceQueue() bool AreThreadsServicingQueue() const; private: std::multimap taskQueue; boost::condition_variable newTaskScheduled; mutable boost::mutex newTaskMutex; int nThreadsServicingQueue; bool stopRequested; bool stopWhenEmpty; bool shouldStop() { return stopRequested || (stopWhenEmpty && taskQueue.empty()); } }; /** * Class used by CScheduler clients which may schedule multiple jobs * which are required to be run serially. Does not require such jobs * to be executed on the same thread, but no two jobs will be executed * at the same time. */ class SingleThreadedSchedulerClient { private: CScheduler *m_pscheduler; CCriticalSection m_cs_callbacks_pending; std::list> m_callbacks_pending; bool m_are_callbacks_running = false; void MaybeScheduleProcessQueue(); void ProcessQueue(); public: - SingleThreadedSchedulerClient(CScheduler *pschedulerIn) + explicit SingleThreadedSchedulerClient(CScheduler *pschedulerIn) : m_pscheduler(pschedulerIn) {} void AddToProcessQueue(std::function func); // Processes all remaining queue members on the calling thread, blocking // until queue is empty // Must be called after the CScheduler has no remaining processing threads! void EmptyQueue(); }; #endif diff --git a/src/script/sign.h b/src/script/sign.h index 1c3e8087b8..e43624a16b 100644 --- a/src/script/sign.h +++ b/src/script/sign.h @@ -1,108 +1,109 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_SCRIPT_SIGN_H #define BITCOIN_SCRIPT_SIGN_H #include "script/interpreter.h" #include "script/sighashtype.h" class CKeyID; class CKeyStore; class CMutableTransaction; class CScript; class CTransaction; /** Virtual base class for signature creators. */ class BaseSignatureCreator { protected: const CKeyStore *keystore; public: - BaseSignatureCreator(const CKeyStore *keystoreIn) : keystore(keystoreIn) {} + explicit BaseSignatureCreator(const CKeyStore *keystoreIn) + : keystore(keystoreIn) {} const CKeyStore &KeyStore() const { return *keystore; }; virtual ~BaseSignatureCreator() {} virtual const BaseSignatureChecker &Checker() const = 0; /** Create a singular (non-script) signature. */ virtual bool CreateSig(std::vector &vchSig, const CKeyID &keyid, const CScript &scriptCode) const = 0; }; /** A signature creator for transactions. */ class TransactionSignatureCreator : public BaseSignatureCreator { const CTransaction *txTo; unsigned int nIn; Amount amount; SigHashType sigHashType; const TransactionSignatureChecker checker; public: TransactionSignatureCreator(const CKeyStore *keystoreIn, const CTransaction *txToIn, unsigned int nInIn, const Amount amountIn, SigHashType sigHashTypeIn = SigHashType()); const BaseSignatureChecker &Checker() const override { return checker; } bool CreateSig(std::vector &vchSig, const CKeyID &keyid, const CScript &scriptCode) const override; }; class MutableTransactionSignatureCreator : public TransactionSignatureCreator { CTransaction tx; public: MutableTransactionSignatureCreator(const CKeyStore *keystoreIn, const CMutableTransaction *txToIn, unsigned int nInIn, const Amount amountIn, SigHashType sigHashTypeIn) : TransactionSignatureCreator(keystoreIn, &tx, nInIn, amountIn, sigHashTypeIn), tx(*txToIn) {} }; /** A signature creator that just produces 72-byte empty signatures. */ class DummySignatureCreator : public BaseSignatureCreator { public: - DummySignatureCreator(const CKeyStore *keystoreIn) + explicit DummySignatureCreator(const CKeyStore *keystoreIn) : BaseSignatureCreator(keystoreIn) {} const BaseSignatureChecker &Checker() const override; bool CreateSig(std::vector &vchSig, const CKeyID &keyid, const CScript &scriptCode) const override; }; struct SignatureData { CScript scriptSig; SignatureData() {} explicit SignatureData(const CScript &script) : scriptSig(script) {} }; /** Produce a script signature using a generic signature creator. */ bool ProduceSignature(const BaseSignatureCreator &creator, const CScript &scriptPubKey, SignatureData &sigdata); /** Produce a script signature for a transaction. */ bool SignSignature(const CKeyStore &keystore, const CScript &fromPubKey, CMutableTransaction &txTo, unsigned int nIn, const Amount amount, SigHashType sigHashType); bool SignSignature(const CKeyStore &keystore, const CTransaction &txFrom, CMutableTransaction &txTo, unsigned int nIn, SigHashType sigHashType); /** Combine two script signatures using a generic signature checker, * intelligently, possibly with OP_0 placeholders. */ SignatureData CombineSignatures(const CScript &scriptPubKey, const BaseSignatureChecker &checker, const SignatureData &scriptSig1, const SignatureData &scriptSig2); /** Extract signature data from a transaction, and insert it. */ SignatureData DataFromTransaction(const CMutableTransaction &tx, unsigned int nIn); void UpdateTransaction(CMutableTransaction &tx, unsigned int nIn, const SignatureData &data); #endif // BITCOIN_SCRIPT_SIGN_H diff --git a/src/script/standard.cpp b/src/script/standard.cpp index 5303ead9b8..cee9147989 100644 --- a/src/script/standard.cpp +++ b/src/script/standard.cpp @@ -1,291 +1,291 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "script/standard.h" #include "pubkey.h" #include "script/script.h" #include "util.h" #include "utilstrencodings.h" typedef std::vector valtype; bool fAcceptDatacarrier = DEFAULT_ACCEPT_DATACARRIER; CScriptID::CScriptID(const CScript &in) : uint160(Hash160(in.begin(), in.end())) {} const char *GetTxnOutputType(txnouttype t) { switch (t) { case TX_NONSTANDARD: return "nonstandard"; case TX_PUBKEY: return "pubkey"; case TX_PUBKEYHASH: return "pubkeyhash"; case TX_SCRIPTHASH: return "scripthash"; case TX_MULTISIG: return "multisig"; case TX_NULL_DATA: return "nulldata"; } return nullptr; } /** * Return public keys or hashes from scriptPubKey, for 'standard' transaction * types. */ bool Solver(const CScript &scriptPubKey, txnouttype &typeRet, std::vector> &vSolutionsRet) { // Templates static std::multimap mTemplates; if (mTemplates.empty()) { // Standard tx, sender provides pubkey, receiver adds signature mTemplates.insert( std::make_pair(TX_PUBKEY, CScript() << OP_PUBKEY << OP_CHECKSIG)); // Bitcoin address tx, sender provides hash of pubkey, receiver provides // signature and pubkey mTemplates.insert( std::make_pair(TX_PUBKEYHASH, CScript() << OP_DUP << OP_HASH160 << OP_PUBKEYHASH << OP_EQUALVERIFY << OP_CHECKSIG)); // Sender provides N pubkeys, receivers provides M signatures mTemplates.insert( std::make_pair(TX_MULTISIG, CScript() << OP_SMALLINTEGER << OP_PUBKEYS << OP_SMALLINTEGER << OP_CHECKMULTISIG)); } vSolutionsRet.clear(); // Shortcut for pay-to-script-hash, which are more constrained than the // other types: // it is always OP_HASH160 20 [20 byte hash] OP_EQUAL if (scriptPubKey.IsPayToScriptHash()) { typeRet = TX_SCRIPTHASH; std::vector hashBytes(scriptPubKey.begin() + 2, scriptPubKey.begin() + 22); vSolutionsRet.push_back(hashBytes); return true; } // Provably prunable, data-carrying output // // So long as script passes the IsUnspendable() test and all but the first // byte passes the IsPushOnly() test we don't care what exactly is in the // script. if (scriptPubKey.size() >= 1 && scriptPubKey[0] == OP_RETURN && scriptPubKey.IsPushOnly(scriptPubKey.begin() + 1)) { typeRet = TX_NULL_DATA; return true; } // Scan templates const CScript &script1 = scriptPubKey; for (const std::pair &tplate : mTemplates) { const CScript &script2 = tplate.second; vSolutionsRet.clear(); opcodetype opcode1, opcode2; std::vector vch1, vch2; // Compare CScript::const_iterator pc1 = script1.begin(); CScript::const_iterator pc2 = script2.begin(); while (true) { if (pc1 == script1.end() && pc2 == script2.end()) { // Found a match typeRet = tplate.first; if (typeRet == TX_MULTISIG) { // Additional checks for TX_MULTISIG: uint8_t m = vSolutionsRet.front()[0]; uint8_t n = vSolutionsRet.back()[0]; if (m < 1 || n < 1 || m > n || vSolutionsRet.size() - 2 != n) { return false; } } return true; } if (!script1.GetOp(pc1, opcode1, vch1)) { break; } if (!script2.GetOp(pc2, opcode2, vch2)) { break; } // Template matching opcodes: if (opcode2 == OP_PUBKEYS) { while (vch1.size() >= 33 && vch1.size() <= 65) { vSolutionsRet.push_back(vch1); if (!script1.GetOp(pc1, opcode1, vch1)) { break; } } if (!script2.GetOp(pc2, opcode2, vch2)) { break; } // Normal situation is to fall through to other if/else // statements } if (opcode2 == OP_PUBKEY) { if (vch1.size() < 33 || vch1.size() > 65) { break; } vSolutionsRet.push_back(vch1); } else if (opcode2 == OP_PUBKEYHASH) { if (vch1.size() != sizeof(uint160)) { break; } vSolutionsRet.push_back(vch1); } else if (opcode2 == OP_SMALLINTEGER) { // Single-byte small integer pushed onto vSolutions if (opcode1 == OP_0 || (opcode1 >= OP_1 && opcode1 <= OP_16)) { char n = (char)CScript::DecodeOP_N(opcode1); vSolutionsRet.push_back(valtype(1, n)); } else { break; } } else if (opcode1 != opcode2 || vch1 != vch2) { // Others must match exactly break; } } } vSolutionsRet.clear(); typeRet = TX_NONSTANDARD; return false; } bool ExtractDestination(const CScript &scriptPubKey, CTxDestination &addressRet) { std::vector vSolutions; txnouttype whichType; if (!Solver(scriptPubKey, whichType, vSolutions)) { return false; } if (whichType == TX_PUBKEY) { CPubKey pubKey(vSolutions[0]); if (!pubKey.IsValid()) { return false; } addressRet = pubKey.GetID(); return true; } if (whichType == TX_PUBKEYHASH) { addressRet = CKeyID(uint160(vSolutions[0])); return true; } if (whichType == TX_SCRIPTHASH) { addressRet = CScriptID(uint160(vSolutions[0])); return true; } // Multisig txns have more than one address... return false; } bool ExtractDestinations(const CScript &scriptPubKey, txnouttype &typeRet, std::vector &addressRet, int &nRequiredRet) { addressRet.clear(); typeRet = TX_NONSTANDARD; std::vector vSolutions; if (!Solver(scriptPubKey, typeRet, vSolutions)) { return false; } if (typeRet == TX_NULL_DATA) { // This is data, not addresses return false; } if (typeRet == TX_MULTISIG) { nRequiredRet = vSolutions.front()[0]; for (size_t i = 1; i < vSolutions.size() - 1; i++) { CPubKey pubKey(vSolutions[i]); if (!pubKey.IsValid()) { continue; } CTxDestination address = pubKey.GetID(); addressRet.push_back(address); } if (addressRet.empty()) { return false; } } else { nRequiredRet = 1; CTxDestination address; if (!ExtractDestination(scriptPubKey, address)) { return false; } addressRet.push_back(address); } return true; } namespace { class CScriptVisitor : public boost::static_visitor { private: CScript *script; public: - CScriptVisitor(CScript *scriptin) { script = scriptin; } + explicit CScriptVisitor(CScript *scriptin) { script = scriptin; } bool operator()(const CNoDestination &dest) const { script->clear(); return false; } bool operator()(const CKeyID &keyID) const { script->clear(); *script << OP_DUP << OP_HASH160 << ToByteVector(keyID) << OP_EQUALVERIFY << OP_CHECKSIG; return true; } bool operator()(const CScriptID &scriptID) const { script->clear(); *script << OP_HASH160 << ToByteVector(scriptID) << OP_EQUAL; return true; } }; } // namespace CScript GetScriptForDestination(const CTxDestination &dest) { CScript script; boost::apply_visitor(CScriptVisitor(&script), dest); return script; } CScript GetScriptForRawPubKey(const CPubKey &pubKey) { return CScript() << std::vector(pubKey.begin(), pubKey.end()) << OP_CHECKSIG; } CScript GetScriptForMultisig(int nRequired, const std::vector &keys) { CScript script; script << CScript::EncodeOP_N(nRequired); for (const CPubKey &key : keys) { script << ToByteVector(key); } script << CScript::EncodeOP_N(keys.size()) << OP_CHECKMULTISIG; return script; } bool IsValidDestination(const CTxDestination &dest) { return dest.which() != 0; } diff --git a/src/serialize.h b/src/serialize.h index b1c99983af..1ffcecc5e6 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -1,919 +1,919 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_SERIALIZE_H #define BITCOIN_SERIALIZE_H #include "compat/endian.h" #include #include #include #include #include #include #include #include #include #include #include #include #include "prevector.h" static const uint64_t MAX_SIZE = 0x02000000; /** * Dummy data type to identify deserializing constructors. * * By convention, a constructor of a type T with signature * * template T::T(deserialize_type, Stream& s) * * is a deserializing constructor, which builds the type by deserializing it * from s. If T contains const fields, this is likely the only way to do so. */ struct deserialize_type {}; constexpr deserialize_type deserialize{}; /** * Used to bypass the rule against non-const reference to temporary where it * makes sense with wrappers such as CFlatData or CTxDB */ template inline T &REF(const T &val) { return const_cast(val); } /** * Used to acquire a non-const pointer "this" to generate bodies of const * serialization operations from a template */ template inline T *NCONST_PTR(const T *val) { return const_cast(val); } /* * Lowest-level serialization and conversion. * @note Sizes of these types are verified in the tests */ template inline void ser_writedata8(Stream &s, uint8_t obj) { s.write((char *)&obj, 1); } template inline void ser_writedata16(Stream &s, uint16_t obj) { obj = htole16(obj); s.write((char *)&obj, 2); } template inline void ser_writedata32(Stream &s, uint32_t obj) { obj = htole32(obj); s.write((char *)&obj, 4); } template inline void ser_writedata64(Stream &s, uint64_t obj) { obj = htole64(obj); s.write((char *)&obj, 8); } template inline uint8_t ser_readdata8(Stream &s) { uint8_t obj; s.read((char *)&obj, 1); return obj; } template inline uint16_t ser_readdata16(Stream &s) { uint16_t obj; s.read((char *)&obj, 2); return le16toh(obj); } template inline uint32_t ser_readdata32(Stream &s) { uint32_t obj; s.read((char *)&obj, 4); return le32toh(obj); } template inline uint64_t ser_readdata64(Stream &s) { uint64_t obj; s.read((char *)&obj, 8); return le64toh(obj); } inline uint64_t ser_double_to_uint64(double x) { union { double x; uint64_t y; } tmp; tmp.x = x; return tmp.y; } inline uint32_t ser_float_to_uint32(float x) { union { float x; uint32_t y; } tmp; tmp.x = x; return tmp.y; } inline double ser_uint64_to_double(uint64_t y) { union { double x; uint64_t y; } tmp; tmp.y = y; return tmp.x; } inline float ser_uint32_to_float(uint32_t y) { union { float x; uint32_t y; } tmp; tmp.y = y; return tmp.x; } ///////////////////////////////////////////////////////////////// // // Templates for serializing to anything that looks like a stream, // i.e. anything that supports .read(char*, size_t) and .write(char*, size_t) // class CSizeComputer; enum { // primary actions SER_NETWORK = (1 << 0), SER_DISK = (1 << 1), SER_GETHASH = (1 << 2), }; #define READWRITE(obj) (::SerReadWrite(s, (obj), ser_action)) #define READWRITEMANY(...) (::SerReadWriteMany(s, ser_action, __VA_ARGS__)) /** * Implement three methods for serializable objects. These are actually wrappers * over "SerializationOp" template, which implements the body of each class' * serialization code. Adding "ADD_SERIALIZE_METHODS" in the body of the class * causes these wrappers to be added as members. */ #define ADD_SERIALIZE_METHODS \ template void Serialize(Stream &s) const { \ NCONST_PTR(this)->SerializationOp(s, CSerActionSerialize()); \ } \ template void Unserialize(Stream &s) { \ SerializationOp(s, CSerActionUnserialize()); \ } template inline void Serialize(Stream &s, char a) { ser_writedata8(s, a); } // TODO Get rid of bare char template inline void Serialize(Stream &s, int8_t a) { ser_writedata8(s, a); } template inline void Serialize(Stream &s, uint8_t a) { ser_writedata8(s, a); } template inline void Serialize(Stream &s, int16_t a) { ser_writedata16(s, a); } template inline void Serialize(Stream &s, uint16_t a) { ser_writedata16(s, a); } template inline void Serialize(Stream &s, int32_t a) { ser_writedata32(s, a); } template inline void Serialize(Stream &s, uint32_t a) { ser_writedata32(s, a); } template inline void Serialize(Stream &s, int64_t a) { ser_writedata64(s, a); } template inline void Serialize(Stream &s, uint64_t a) { ser_writedata64(s, a); } template inline void Serialize(Stream &s, float a) { ser_writedata32(s, ser_float_to_uint32(a)); } template inline void Serialize(Stream &s, double a) { ser_writedata64(s, ser_double_to_uint64(a)); } // TODO Get rid of bare char template inline void Unserialize(Stream &s, char &a) { a = ser_readdata8(s); } template inline void Unserialize(Stream &s, int8_t &a) { a = ser_readdata8(s); } template inline void Unserialize(Stream &s, uint8_t &a) { a = ser_readdata8(s); } template inline void Unserialize(Stream &s, int16_t &a) { a = ser_readdata16(s); } template inline void Unserialize(Stream &s, uint16_t &a) { a = ser_readdata16(s); } template inline void Unserialize(Stream &s, int32_t &a) { a = ser_readdata32(s); } template inline void Unserialize(Stream &s, uint32_t &a) { a = ser_readdata32(s); } template inline void Unserialize(Stream &s, int64_t &a) { a = ser_readdata64(s); } template inline void Unserialize(Stream &s, uint64_t &a) { a = ser_readdata64(s); } template inline void Unserialize(Stream &s, float &a) { a = ser_uint32_to_float(ser_readdata32(s)); } template inline void Unserialize(Stream &s, double &a) { a = ser_uint64_to_double(ser_readdata64(s)); } template inline void Serialize(Stream &s, bool a) { char f = a; ser_writedata8(s, f); } template inline void Unserialize(Stream &s, bool &a) { char f = ser_readdata8(s); a = f; } /** * Compact Size * size < 253 -- 1 byte * size <= USHRT_MAX -- 3 bytes (253 + 2 bytes) * size <= UINT_MAX -- 5 bytes (254 + 4 bytes) * size > UINT_MAX -- 9 bytes (255 + 8 bytes) */ inline uint32_t GetSizeOfCompactSize(uint64_t nSize) { if (nSize < 253) { return sizeof(uint8_t); } if (nSize <= std::numeric_limits::max()) { return sizeof(uint8_t) + sizeof(uint16_t); } if (nSize <= std::numeric_limits::max()) { return sizeof(uint8_t) + sizeof(uint32_t); } return sizeof(uint8_t) + sizeof(uint64_t); } inline void WriteCompactSize(CSizeComputer &os, uint64_t nSize); template void WriteCompactSize(Stream &os, uint64_t nSize) { if (nSize < 253) { ser_writedata8(os, nSize); } else if (nSize <= std::numeric_limits::max()) { ser_writedata8(os, 253); ser_writedata16(os, nSize); } else if (nSize <= std::numeric_limits::max()) { ser_writedata8(os, 254); ser_writedata32(os, nSize); } else { ser_writedata8(os, 255); ser_writedata64(os, nSize); } return; } template uint64_t ReadCompactSize(Stream &is) { uint8_t chSize = ser_readdata8(is); uint64_t nSizeRet = 0; if (chSize < 253) { nSizeRet = chSize; } else if (chSize == 253) { nSizeRet = ser_readdata16(is); if (nSizeRet < 253) { throw std::ios_base::failure("non-canonical ReadCompactSize()"); } } else if (chSize == 254) { nSizeRet = ser_readdata32(is); if (nSizeRet < 0x10000u) { throw std::ios_base::failure("non-canonical ReadCompactSize()"); } } else { nSizeRet = ser_readdata64(is); if (nSizeRet < 0x100000000ULL) { throw std::ios_base::failure("non-canonical ReadCompactSize()"); } } if (nSizeRet > MAX_SIZE) { throw std::ios_base::failure("ReadCompactSize(): size too large"); } return nSizeRet; } /** * Variable-length integers: bytes are a MSB base-128 encoding of the number. * The high bit in each byte signifies whether another digit follows. To make * sure the encoding is one-to-one, one is subtracted from all but the last * digit. Thus, the byte sequence a[] with length len, where all but the last * byte has bit 128 set, encodes the number: * * (a[len-1] & 0x7F) + sum(i=1..len-1, 128^i*((a[len-i-1] & 0x7F)+1)) * * Properties: * * Very small (0-127: 1 byte, 128-16511: 2 bytes, 16512-2113663: 3 bytes) * * Every integer has exactly one encoding * * Encoding does not depend on size of original integer type * * No redundancy: every (infinite) byte sequence corresponds to a list * of encoded integers. * * 0: [0x00] 256: [0x81 0x00] * 1: [0x01] 16383: [0xFE 0x7F] * 127: [0x7F] 16384: [0xFF 0x00] * 128: [0x80 0x00] 16511: [0xFF 0x7F] * 255: [0x80 0x7F] 65535: [0x82 0xFE 0x7F] * 2^32: [0x8E 0xFE 0xFE 0xFF 0x00] */ template inline unsigned int GetSizeOfVarInt(I n) { int nRet = 0; while (true) { nRet++; if (n <= 0x7F) { return nRet; } n = (n >> 7) - 1; } } template inline void WriteVarInt(CSizeComputer &os, I n); template void WriteVarInt(Stream &os, I n) { uint8_t tmp[(sizeof(n) * 8 + 6) / 7]; int len = 0; while (true) { tmp[len] = (n & 0x7F) | (len ? 0x80 : 0x00); if (n <= 0x7F) { break; } n = (n >> 7) - 1; len++; } do { ser_writedata8(os, tmp[len]); } while (len--); } template I ReadVarInt(Stream &is) { I n = 0; while (true) { uint8_t chData = ser_readdata8(is); n = (n << 7) | (chData & 0x7F); if ((chData & 0x80) == 0) { return n; } n++; } } #define FLATDATA(obj) \ REF(CFlatData((char *)&(obj), (char *)&(obj) + sizeof(obj))) #define VARINT(obj) REF(WrapVarInt(REF(obj))) #define COMPACTSIZE(obj) REF(CCompactSize(REF(obj))) #define LIMITED_STRING(obj, n) REF(LimitedString(REF(obj))) /** * Wrapper for serializing arrays and POD. */ class CFlatData { protected: char *pbegin; char *pend; public: CFlatData(void *pbeginIn, void *pendIn) : pbegin((char *)pbeginIn), pend((char *)pendIn) {} template explicit CFlatData(std::vector &v) { pbegin = (char *)v.data(); pend = (char *)(v.data() + v.size()); } template explicit CFlatData(prevector &v) { pbegin = (char *)v.data(); pend = (char *)(v.data() + v.size()); } char *begin() { return pbegin; } const char *begin() const { return pbegin; } char *end() { return pend; } const char *end() const { return pend; } template void Serialize(Stream &s) const { s.write(pbegin, pend - pbegin); } template void Unserialize(Stream &s) { s.read(pbegin, pend - pbegin); } }; template class CVarInt { protected: I &n; public: - CVarInt(I &nIn) : n(nIn) {} + explicit CVarInt(I &nIn) : n(nIn) {} template void Serialize(Stream &s) const { WriteVarInt(s, n); } template void Unserialize(Stream &s) { n = ReadVarInt(s); } }; class CCompactSize { protected: uint64_t &n; public: - CCompactSize(uint64_t &nIn) : n(nIn) {} + explicit CCompactSize(uint64_t &nIn) : n(nIn) {} template void Serialize(Stream &s) const { WriteCompactSize(s, n); } template void Unserialize(Stream &s) { n = ReadCompactSize(s); } }; template class LimitedString { protected: std::string &string; public: - LimitedString(std::string &_string) : string(_string) {} + explicit LimitedString(std::string &_string) : string(_string) {} template void Unserialize(Stream &s) { size_t size = ReadCompactSize(s); if (size > Limit) { throw std::ios_base::failure("String length limit exceeded"); } string.resize(size); if (size != 0) { s.read((char *)&string[0], size); } } template void Serialize(Stream &s) const { WriteCompactSize(s, string.size()); if (!string.empty()) { s.write((char *)&string[0], string.size()); } } }; template CVarInt WrapVarInt(I &n) { return CVarInt(n); } /** * Forward declarations */ /** * string */ template void Serialize(Stream &os, const std::basic_string &str); template void Unserialize(Stream &is, std::basic_string &str); /** * prevector * prevectors of uint8_t are a special case and are intended to be serialized as * a single opaque blob. */ template void Serialize_impl(Stream &os, const prevector &v, const uint8_t &); template void Serialize_impl(Stream &os, const prevector &v, const V &); template inline void Serialize(Stream &os, const prevector &v); template void Unserialize_impl(Stream &is, prevector &v, const uint8_t &); template void Unserialize_impl(Stream &is, prevector &v, const V &); template inline void Unserialize(Stream &is, prevector &v); /** * vector * vectors of uint8_t are a special case and are intended to be serialized as a * single opaque blob. */ template void Serialize_impl(Stream &os, const std::vector &v, const uint8_t &); template void Serialize_impl(Stream &os, const std::vector &v, const V &); template inline void Serialize(Stream &os, const std::vector &v); template void Unserialize_impl(Stream &is, std::vector &v, const uint8_t &); template void Unserialize_impl(Stream &is, std::vector &v, const V &); template inline void Unserialize(Stream &is, std::vector &v); /** * pair */ template void Serialize(Stream &os, const std::pair &item); template void Unserialize(Stream &is, std::pair &item); /** * map */ template void Serialize(Stream &os, const std::map &m); template void Unserialize(Stream &is, std::map &m); /** * set */ template void Serialize(Stream &os, const std::set &m); template void Unserialize(Stream &is, std::set &m); /** * shared_ptr */ template void Serialize(Stream &os, const std::shared_ptr &p); template void Unserialize(Stream &os, std::shared_ptr &p); /** * unique_ptr */ template void Serialize(Stream &os, const std::unique_ptr &p); template void Unserialize(Stream &os, std::unique_ptr &p); /** * If none of the specialized versions above matched, default to calling member * function. */ template inline void Serialize(Stream &os, const T &a) { a.Serialize(os); } template inline void Unserialize(Stream &is, T &a) { a.Unserialize(is); } /** * string */ template void Serialize(Stream &os, const std::basic_string &str) { WriteCompactSize(os, str.size()); if (!str.empty()) { os.write((char *)&str[0], str.size() * sizeof(str[0])); } } template void Unserialize(Stream &is, std::basic_string &str) { size_t nSize = ReadCompactSize(is); str.resize(nSize); if (nSize != 0) { is.read((char *)&str[0], nSize * sizeof(str[0])); } } /** * prevector */ template void Serialize_impl(Stream &os, const prevector &v, const uint8_t &) { WriteCompactSize(os, v.size()); if (!v.empty()) { os.write((char *)&v[0], v.size() * sizeof(T)); } } template void Serialize_impl(Stream &os, const prevector &v, const V &) { WriteCompactSize(os, v.size()); for (const T &i : v) { ::Serialize(os, i); } } template inline void Serialize(Stream &os, const prevector &v) { Serialize_impl(os, v, T()); } template void Unserialize_impl(Stream &is, prevector &v, const uint8_t &) { // Limit size per read so bogus size value won't cause out of memory v.clear(); size_t nSize = ReadCompactSize(is); size_t i = 0; while (i < nSize) { size_t blk = std::min(nSize - i, size_t(1 + 4999999 / sizeof(T))); v.resize(i + blk); is.read((char *)&v[i], blk * sizeof(T)); i += blk; } } template void Unserialize_impl(Stream &is, prevector &v, const V &) { v.clear(); size_t nSize = ReadCompactSize(is); size_t i = 0; size_t nMid = 0; while (nMid < nSize) { nMid += 5000000 / sizeof(T); if (nMid > nSize) { nMid = nSize; } v.resize(nMid); for (; i < nMid; i++) { Unserialize(is, v[i]); } } } template inline void Unserialize(Stream &is, prevector &v) { Unserialize_impl(is, v, T()); } /** * vector */ template void Serialize_impl(Stream &os, const std::vector &v, const uint8_t &) { WriteCompactSize(os, v.size()); if (!v.empty()) { os.write((char *)&v[0], v.size() * sizeof(T)); } } template void Serialize_impl(Stream &os, const std::vector &v, const V &) { WriteCompactSize(os, v.size()); for (const T &i : v) { ::Serialize(os, i); } } template inline void Serialize(Stream &os, const std::vector &v) { Serialize_impl(os, v, T()); } template void Unserialize_impl(Stream &is, std::vector &v, const uint8_t &) { // Limit size per read so bogus size value won't cause out of memory v.clear(); size_t nSize = ReadCompactSize(is); size_t i = 0; while (i < nSize) { size_t blk = std::min(nSize - i, size_t(1 + 4999999 / sizeof(T))); v.resize(i + blk); is.read((char *)&v[i], blk * sizeof(T)); i += blk; } } template void Unserialize_impl(Stream &is, std::vector &v, const V &) { v.clear(); size_t nSize = ReadCompactSize(is); size_t i = 0; size_t nMid = 0; while (nMid < nSize) { nMid += 5000000 / sizeof(T); if (nMid > nSize) { nMid = nSize; } v.resize(nMid); for (; i < nMid; i++) { Unserialize(is, v[i]); } } } template inline void Unserialize(Stream &is, std::vector &v) { Unserialize_impl(is, v, T()); } /** * pair */ template void Serialize(Stream &os, const std::pair &item) { Serialize(os, item.first); Serialize(os, item.second); } template void Unserialize(Stream &is, std::pair &item) { Unserialize(is, item.first); Unserialize(is, item.second); } /** * map */ template void Serialize(Stream &os, const std::map &m) { WriteCompactSize(os, m.size()); for (const std::pair &p : m) { Serialize(os, p); } } template void Unserialize(Stream &is, std::map &m) { m.clear(); size_t nSize = ReadCompactSize(is); typename std::map::iterator mi = m.begin(); for (size_t i = 0; i < nSize; i++) { std::pair item; Unserialize(is, item); mi = m.insert(mi, item); } } /** * set */ template void Serialize(Stream &os, const std::set &m) { WriteCompactSize(os, m.size()); for (const K &i : m) { Serialize(os, i); } } template void Unserialize(Stream &is, std::set &m) { m.clear(); size_t nSize = ReadCompactSize(is); typename std::set::iterator it = m.begin(); for (size_t i = 0; i < nSize; i++) { K key; Unserialize(is, key); it = m.insert(it, key); } } /** * unique_ptr */ template void Serialize(Stream &os, const std::unique_ptr &p) { Serialize(os, *p); } template void Unserialize(Stream &is, std::unique_ptr &p) { p.reset(new T(deserialize, is)); } /** * shared_ptr */ template void Serialize(Stream &os, const std::shared_ptr &p) { Serialize(os, *p); } template void Unserialize(Stream &is, std::shared_ptr &p) { p = std::make_shared(deserialize, is); } /** * Support for ADD_SERIALIZE_METHODS and READWRITE macro */ struct CSerActionSerialize { constexpr bool ForRead() const { return false; } }; struct CSerActionUnserialize { constexpr bool ForRead() const { return true; } }; template inline void SerReadWrite(Stream &s, const T &obj, CSerActionSerialize ser_action) { ::Serialize(s, obj); } template inline void SerReadWrite(Stream &s, T &obj, CSerActionUnserialize ser_action) { ::Unserialize(s, obj); } /** * ::GetSerializeSize implementations * * Computing the serialized size of objects is done through a special stream * object of type CSizeComputer, which only records the number of bytes written * to it. * * If your Serialize or SerializationOp method has non-trivial overhead for * serialization, it may be worthwhile to implement a specialized version for * CSizeComputer, which uses the s.seek() method to record bytes that would * be written instead. */ class CSizeComputer { protected: size_t nSize; const int nType; const int nVersion; public: CSizeComputer(int nTypeIn, int nVersionIn) : nSize(0), nType(nTypeIn), nVersion(nVersionIn) {} void write(const char *psz, size_t _nSize) { this->nSize += _nSize; } /** Pretend _nSize bytes are written, without specifying them. */ void seek(size_t _nSize) { this->nSize += _nSize; } template CSizeComputer &operator<<(const T &obj) { ::Serialize(*this, obj); return (*this); } size_t size() const { return nSize; } int GetVersion() const { return nVersion; } int GetType() const { return nType; } }; template void SerializeMany(Stream &s) {} template void SerializeMany(Stream &s, Arg &&arg) { ::Serialize(s, std::forward(arg)); } template void SerializeMany(Stream &s, Arg &&arg, Args &&... args) { ::Serialize(s, std::forward(arg)); ::SerializeMany(s, std::forward(args)...); } template inline void UnserializeMany(Stream &s) {} template inline void UnserializeMany(Stream &s, Arg &arg) { ::Unserialize(s, arg); } template inline void UnserializeMany(Stream &s, Arg &arg, Args &... args) { ::Unserialize(s, arg); ::UnserializeMany(s, args...); } template inline void SerReadWriteMany(Stream &s, CSerActionSerialize ser_action, Args &&... args) { ::SerializeMany(s, std::forward(args)...); } template inline void SerReadWriteMany(Stream &s, CSerActionUnserialize ser_action, Args &... args) { ::UnserializeMany(s, args...); } template inline void WriteVarInt(CSizeComputer &s, I n) { s.seek(GetSizeOfVarInt(n)); } inline void WriteCompactSize(CSizeComputer &s, uint64_t nSize) { s.seek(GetSizeOfCompactSize(nSize)); } template size_t GetSerializeSize(const T &t, int nType, int nVersion = 0) { return (CSizeComputer(nType, nVersion) << t).size(); } template size_t GetSerializeSize(const S &s, const T &t) { return (CSizeComputer(s.GetType(), s.GetVersion()) << t).size(); } #endif // BITCOIN_SERIALIZE_H diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h index 56b099364a..34fc87cc79 100644 --- a/src/support/lockedpool.h +++ b/src/support/lockedpool.h @@ -1,249 +1,249 @@ // Copyright (c) 2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H #define BITCOIN_SUPPORT_LOCKEDPOOL_H #include #include #include #include #include /** * OS-dependent allocation and deallocation of locked/pinned memory pages. * Abstract base class. */ class LockedPageAllocator { public: virtual ~LockedPageAllocator() {} /** * Allocate and lock memory pages. * If len is not a multiple of the system page size, it is rounded up. * Returns 0 in case of allocation failure. * * If locking the memory pages could not be accomplished it will still * return the memory, however the lockingSuccess flag will be false. * lockingSuccess is undefined if the allocation fails. */ virtual void *AllocateLocked(size_t len, bool *lockingSuccess) = 0; /** * Unlock and free memory pages. * Clear the memory before unlocking. */ virtual void FreeLocked(void *addr, size_t len) = 0; /** * Get the total limit on the amount of memory that may be locked by this * process, in bytes. Return size_t max if there is no limit or the limit is * unknown. Return 0 if no memory can be locked at all. */ virtual size_t GetLimit() = 0; }; /** * An arena manages a contiguous region of memory by dividing it into chunks. */ class Arena { public: Arena(void *base, size_t size, size_t alignment); virtual ~Arena(); /** Memory statistics. */ struct Stats { size_t used; size_t free; size_t total; size_t chunks_used; size_t chunks_free; }; /** * Allocate size bytes from this arena. * Returns pointer on success, or 0 if memory is full or the application * tried to allocate 0 bytes. */ void *alloc(size_t size); /** * Free a previously allocated chunk of memory. * Freeing the zero pointer has no effect. * Raises std::runtime_error in case of error. */ void free(void *ptr); /** Get arena usage statistics */ Stats stats() const; #ifdef ARENA_DEBUG void walk() const; #endif /** * Return whether a pointer points inside this arena. * This returns base <= ptr < (base+size) so only use it for (inclusive) * chunk starting addresses. */ bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; } private: // non construction-copyable Arena(const Arena &other) = delete; // non copyable Arena &operator=(const Arena &) = delete; /** * Map of chunk address to chunk information. This class makes use of the * sorted order to merge previous and next chunks during deallocation. */ std::map chunks_free; std::map chunks_used; /** Base address of arena */ char *base; /** End address of arena */ char *end; /** Minimum chunk alignment */ size_t alignment; }; /** * Pool for locked memory chunks. * * To avoid sensitive key data from being swapped to disk, the memory in this * pool is locked/pinned. * * An arena manages a contiguous region of memory. The pool starts out with one * arena but can grow to multiple arenas if the need arises. * * Unlike a normal C heap, the administrative structures are separate from the * managed memory. This has been done as the sizes and bases of objects are not * in themselves sensitive information, as to conserve precious locked memory. * In some operating systems the amount of memory that can be locked is small. */ class LockedPool { public: /** * Size of one arena of locked memory. This is a compromise. * Do not set this too low, as managing many arenas will increase allocation * and deallocation overhead. Setting it too high allocates more locked * memory from the OS than strictly necessary. */ static const size_t ARENA_SIZE = 256 * 1024; /** * Chunk alignment. Another compromise. Setting this too high will waste * memory, setting it too low will facilitate fragmentation. */ static const size_t ARENA_ALIGN = 16; /** * Callback when allocation succeeds but locking fails. */ typedef bool (*LockingFailed_Callback)(); /** Memory statistics. */ struct Stats { size_t used; size_t free; size_t total; size_t locked; size_t chunks_used; size_t chunks_free; }; /** * Create a new LockedPool. This takes ownership of the MemoryPageLocker, * you can only instantiate this with LockedPool(std::move(...)). * * The second argument is an optional callback when locking a newly * allocated arena failed. If this callback is provided and returns false, * the allocation fails (hard fail), if it returns true the allocation * proceeds, but it could warn. */ - LockedPool(std::unique_ptr allocator, - LockingFailed_Callback lf_cb_in = nullptr); + explicit LockedPool(std::unique_ptr allocator, + LockingFailed_Callback lf_cb_in = nullptr); ~LockedPool(); /** * Allocate size bytes from this arena. * Returns pointer on success, or 0 if memory is full or the application * tried to allocate 0 bytes. */ void *alloc(size_t size); /** * Free a previously allocated chunk of memory. * Freeing the zero pointer has no effect. * Raises std::runtime_error in case of error. */ void free(void *ptr); /** Get pool usage statistics */ Stats stats() const; private: // non construction-copyable LockedPool(const LockedPool &other) = delete; // non copyable LockedPool &operator=(const LockedPool &) = delete; std::unique_ptr allocator; /** Create an arena from locked pages */ class LockedPageArena : public Arena { public: LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align); ~LockedPageArena(); private: void *base; size_t size; LockedPageAllocator *allocator; }; bool new_arena(size_t size, size_t align); std::list arenas; LockingFailed_Callback lf_cb; size_t cumulative_bytes_locked; /** * Mutex protects access to this pool's data structures, including arenas. */ mutable std::mutex mutex; }; /** * Singleton class to keep track of locked (ie, non-swappable) memory, for use * in std::allocator templates. * * Some implementations of the STL allocate memory in some constructors (i.e., * see MSVC's vector implementation where it allocates 1 byte of memory in * the allocator). Due to the unpredictable order of static initializers, we * have to make sure the LockedPoolManager instance exists before any other * STL-based objects that use secure_allocator are created. So instead of having * LockedPoolManager also be static-initialized, it is created on demand. */ class LockedPoolManager : public LockedPool { public: /** Return the current instance, or create it once */ static LockedPoolManager &Instance() { std::call_once(LockedPoolManager::init_flag, LockedPoolManager::CreateInstance); return *LockedPoolManager::_instance; } private: - LockedPoolManager(std::unique_ptr allocator); + explicit LockedPoolManager(std::unique_ptr allocator); /** Create a new LockedPoolManager specialized to the OS */ static void CreateInstance(); /** Called when locking fails, warn the user here */ static bool LockingFailed(); static LockedPoolManager *_instance; static std::once_flag init_flag; }; #endif // BITCOIN_SUPPORT_LOCKEDPOOL_H diff --git a/src/test/base58_tests.cpp b/src/test/base58_tests.cpp index dfa0d78294..d1f18536b8 100644 --- a/src/test/base58_tests.cpp +++ b/src/test/base58_tests.cpp @@ -1,277 +1,277 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "base58.h" #include "data/base58_encode_decode.json.h" #include "data/base58_keys_invalid.json.h" #include "data/base58_keys_valid.json.h" #include "key.h" #include "script/script.h" #include "test/jsonutil.h" #include "test/test_bitcoin.h" #include "uint256.h" #include "util.h" #include "utilstrencodings.h" #include #include BOOST_FIXTURE_TEST_SUITE(base58_tests, BasicTestingSetup) // Goal: test low-level base58 encoding functionality BOOST_AUTO_TEST_CASE(base58_EncodeBase58) { UniValue tests = read_json(std::string(json_tests::base58_encode_decode, json_tests::base58_encode_decode + sizeof(json_tests::base58_encode_decode))); for (unsigned int idx = 0; idx < tests.size(); idx++) { UniValue test = tests[idx]; std::string strTest = test.write(); // Allow for extra stuff (useful for comments) if (test.size() < 2) { BOOST_ERROR("Bad test: " << strTest); continue; } std::vector sourcedata = ParseHex(test[0].get_str()); std::string base58string = test[1].get_str(); BOOST_CHECK_MESSAGE( EncodeBase58(sourcedata.data(), sourcedata.data() + sourcedata.size()) == base58string, strTest); } } // Goal: test low-level base58 decoding functionality BOOST_AUTO_TEST_CASE(base58_DecodeBase58) { UniValue tests = read_json(std::string(json_tests::base58_encode_decode, json_tests::base58_encode_decode + sizeof(json_tests::base58_encode_decode))); std::vector result; for (unsigned int idx = 0; idx < tests.size(); idx++) { UniValue test = tests[idx]; std::string strTest = test.write(); // Allow for extra stuff (useful for comments) if (test.size() < 2) { BOOST_ERROR("Bad test: " << strTest); continue; } std::vector expected = ParseHex(test[0].get_str()); std::string base58string = test[1].get_str(); BOOST_CHECK_MESSAGE(DecodeBase58(base58string, result), strTest); BOOST_CHECK_MESSAGE( result.size() == expected.size() && std::equal(result.begin(), result.end(), expected.begin()), strTest); } BOOST_CHECK(!DecodeBase58("invalid", result)); // check that DecodeBase58 skips whitespace, but still fails with unexpected // non-whitespace at the end. BOOST_CHECK(!DecodeBase58(" \t\n\v\f\r skip \r\f\v\n\t a", result)); BOOST_CHECK(DecodeBase58(" \t\n\v\f\r skip \r\f\v\n\t ", result)); std::vector expected = ParseHex("971a55"); BOOST_CHECK_EQUAL_COLLECTIONS(result.begin(), result.end(), expected.begin(), expected.end()); } // Visitor to check address type class TestAddrTypeVisitor : public boost::static_visitor { private: std::string exp_addrType; public: - TestAddrTypeVisitor(const std::string &_exp_addrType) + explicit TestAddrTypeVisitor(const std::string &_exp_addrType) : exp_addrType(_exp_addrType) {} bool operator()(const CKeyID &id) const { return (exp_addrType == "pubkey"); } bool operator()(const CScriptID &id) const { return (exp_addrType == "script"); } bool operator()(const CNoDestination &no) const { return (exp_addrType == "none"); } }; // Visitor to check address payload class TestPayloadVisitor : public boost::static_visitor { private: std::vector exp_payload; public: - TestPayloadVisitor(std::vector &_exp_payload) + explicit TestPayloadVisitor(std::vector &_exp_payload) : exp_payload(_exp_payload) {} bool operator()(const CKeyID &id) const { uint160 exp_key(exp_payload); return exp_key == id; } bool operator()(const CScriptID &id) const { uint160 exp_key(exp_payload); return exp_key == id; } bool operator()(const CNoDestination &no) const { return exp_payload.size() == 0; } }; // Goal: check that parsed keys match test payload BOOST_AUTO_TEST_CASE(base58_keys_valid_parse) { UniValue tests = read_json(std::string( json_tests::base58_keys_valid, json_tests::base58_keys_valid + sizeof(json_tests::base58_keys_valid))); CBitcoinSecret secret; CTxDestination destination; SelectParams(CBaseChainParams::MAIN); for (unsigned int idx = 0; idx < tests.size(); idx++) { UniValue test = tests[idx]; std::string strTest = test.write(); // Allow for extra stuff (useful for comments) if (test.size() < 3) { BOOST_ERROR("Bad test: " << strTest); continue; } std::string exp_base58string = test[0].get_str(); std::vector exp_payload = ParseHex(test[1].get_str()); const UniValue &metadata = test[2].get_obj(); bool isPrivkey = find_value(metadata, "isPrivkey").get_bool(); bool isTestnet = find_value(metadata, "isTestnet").get_bool(); if (isTestnet) SelectParams(CBaseChainParams::TESTNET); else SelectParams(CBaseChainParams::MAIN); if (isPrivkey) { bool isCompressed = find_value(metadata, "isCompressed").get_bool(); // Must be valid private key BOOST_CHECK_MESSAGE(secret.SetString(exp_base58string), "!SetString:" + strTest); BOOST_CHECK_MESSAGE(secret.IsValid(), "!IsValid:" + strTest); CKey privkey = secret.GetKey(); BOOST_CHECK_MESSAGE(privkey.IsCompressed() == isCompressed, "compressed mismatch:" + strTest); BOOST_CHECK_MESSAGE(privkey.size() == exp_payload.size() && std::equal(privkey.begin(), privkey.end(), exp_payload.begin()), "key mismatch:" + strTest); // Private key must be invalid public key destination = DecodeLegacyAddr(exp_base58string, Params()); BOOST_CHECK_MESSAGE(!IsValidDestination(destination), "IsValid privkey as pubkey:" + strTest); } else { // "script" or "pubkey" std::string exp_addrType = find_value(metadata, "addrType").get_str(); // Must be valid public key destination = DecodeLegacyAddr(exp_base58string, Params()); BOOST_CHECK_MESSAGE(IsValidDestination(destination), "!IsValid:" + strTest); BOOST_CHECK_MESSAGE((boost::get(&destination) != nullptr) == (exp_addrType == "script"), "isScript mismatch" + strTest); BOOST_CHECK_MESSAGE( boost::apply_visitor(TestAddrTypeVisitor(exp_addrType), destination), "addrType mismatch" + strTest); // Public key must be invalid private key secret.SetString(exp_base58string); BOOST_CHECK_MESSAGE(!secret.IsValid(), "IsValid pubkey as privkey:" + strTest); } } } // Goal: check that generated keys match test vectors BOOST_AUTO_TEST_CASE(base58_keys_valid_gen) { UniValue tests = read_json(std::string( json_tests::base58_keys_valid, json_tests::base58_keys_valid + sizeof(json_tests::base58_keys_valid))); for (unsigned int idx = 0; idx < tests.size(); idx++) { UniValue test = tests[idx]; std::string strTest = test.write(); // Allow for extra stuff (useful for comments) if (test.size() < 3) { BOOST_ERROR("Bad test: " << strTest); continue; } std::string exp_base58string = test[0].get_str(); std::vector exp_payload = ParseHex(test[1].get_str()); const UniValue &metadata = test[2].get_obj(); bool isPrivkey = find_value(metadata, "isPrivkey").get_bool(); bool isTestnet = find_value(metadata, "isTestnet").get_bool(); if (isTestnet) SelectParams(CBaseChainParams::TESTNET); else SelectParams(CBaseChainParams::MAIN); if (isPrivkey) { bool isCompressed = find_value(metadata, "isCompressed").get_bool(); CKey key; key.Set(exp_payload.begin(), exp_payload.end(), isCompressed); assert(key.IsValid()); CBitcoinSecret secret; secret.SetKey(key); BOOST_CHECK_MESSAGE(secret.ToString() == exp_base58string, "result mismatch: " + strTest); } else { std::string exp_addrType = find_value(metadata, "addrType").get_str(); CTxDestination dest; if (exp_addrType == "pubkey") { dest = CKeyID(uint160(exp_payload)); } else if (exp_addrType == "script") { dest = CScriptID(uint160(exp_payload)); } else if (exp_addrType == "none") { dest = CNoDestination(); } else { BOOST_ERROR("Bad addrtype: " << strTest); continue; } std::string address = EncodeLegacyAddr(dest, Params()); BOOST_CHECK_MESSAGE(address == exp_base58string, "mismatch: " + strTest); } } SelectParams(CBaseChainParams::MAIN); } // Goal: check that base58 parsing code is robust against a variety of corrupted // data BOOST_AUTO_TEST_CASE(base58_keys_invalid) { // Negative testcases UniValue tests = read_json(std::string(json_tests::base58_keys_invalid, json_tests::base58_keys_invalid + sizeof(json_tests::base58_keys_invalid))); CBitcoinSecret secret; CTxDestination destination; for (unsigned int idx = 0; idx < tests.size(); idx++) { UniValue test = tests[idx]; std::string strTest = test.write(); // Allow for extra stuff (useful for comments) if (test.size() < 1) { BOOST_ERROR("Bad test: " << strTest); continue; } std::string exp_base58string = test[0].get_str(); // must be invalid as public and as private key destination = DecodeLegacyAddr(exp_base58string, Params()); BOOST_CHECK_MESSAGE(!IsValidDestination(destination), "IsValid pubkey:" + strTest); secret.SetString(exp_base58string); BOOST_CHECK_MESSAGE(!secret.IsValid(), "IsValid privkey:" + strTest); } } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/bip32_tests.cpp b/src/test/bip32_tests.cpp index f6319ac121..cfb4c29338 100644 --- a/src/test/bip32_tests.cpp +++ b/src/test/bip32_tests.cpp @@ -1,170 +1,171 @@ // Copyright (c) 2013-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include "base58.h" #include "key.h" #include "test/test_bitcoin.h" #include "uint256.h" #include "util.h" #include "utilstrencodings.h" #include #include struct TestDerivation { std::string pub; std::string prv; unsigned int nChild; }; struct TestVector { std::string strHexMaster; std::vector vDerive; - TestVector(std::string strHexMasterIn) : strHexMaster(strHexMasterIn) {} + explicit TestVector(std::string strHexMasterIn) + : strHexMaster(strHexMasterIn) {} TestVector &operator()(std::string pub, std::string prv, unsigned int nChild) { vDerive.push_back(TestDerivation()); TestDerivation &der = vDerive.back(); der.pub = pub; der.prv = prv; der.nChild = nChild; return *this; } }; TestVector test1 = TestVector("000102030405060708090a0b0c0d0e0f")( "xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1R" "upje8YtGqsefD265TMg7usUDFdp6W1EGMcet8", "xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWU" "tg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi", 0x80000000)("xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LH" "hwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw", "xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1" "rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7", 1)("xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFH" "KkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ", "xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSx" "qu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs", 0x80000002)( "xpub6D4BDPcP2GT577Vvch3R8wDkScZWzQzMMUm3PWbmWvVJrZwQY4VUNgqFJPMM3No2dFDFGT" "sxxpG5uJh7n7epu4trkrX7x7DogT5Uv6fcLW5", "xprv9z4pot5VBttmtdRTWfWQmoH1taj2axGVzFqSb8C9xaxKymcFzXBDptWmT7FwuEzG3ryjH4" "ktypQSAewRiNMjANTtpgP4mLTj34bhnZX7UiM", 2)("xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZL" "RQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV", "xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7" "RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334", 1000000000)("xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNT" "EcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy", "xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8" "kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76", 0); TestVector test2 = TestVector("fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdba" "b7b4b1aeaba8a5a29f9c999693908d8a8784817e7b787572" "6f6c696663605d5a5754514e4b484542")( "xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6" "Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB", "xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFm" "M8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U", 0)("xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfv" "rnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH", "xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJ" "D9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt", 0xFFFFFFFF)("xpub6ASAVgeehLbnwdqV6UKMHVzgqAG8Gr6riv3Fxxpj8ksbH9ebxaEyBLZ" "85ySDhKiLDBrQSARLq1uNRts8RuJiHjaDMBU4Zn9h8LZNnBC5y4a", "xprv9wSp6B7kry3Vj9m1zSnLvN3xH8RdsPP1Mh7fAaR7aRLcQMKTR2vidYE" "eEg2mUCTAwCd6vnxVrcjfy2kRgVsFawNzmjuHc2YmYRmagcEPdU9", 1)("xpub6DF8uhdarytz3FWdA8TvFSvvAh8dP3283MY7p2V4SeE2wyWmG5mg" "5EwVvmdMVCQcoNJxGoWaU9DCWh89LojfZ537wTfunKau47EL2dhHKon", "xprv9zFnWC6h2cLgpmSA46vutJzBcfJ8yaJGg8cX1e5StJh45BBciYTR" "XSd25UEPVuesF9yog62tGAQtHjXajPPdbRCHuWS6T8XA2ECKADdw4Ef", 0xFFFFFFFE)( "xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5J" "aHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL", "xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyu" "oseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc", 2)("xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsA" "pME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt", "xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38" "EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j", 0); void RunTest(const TestVector &test) { std::vector seed = ParseHex(test.strHexMaster); CExtKey key; CExtPubKey pubkey; key.SetMaster(&seed[0], seed.size()); pubkey = key.Neuter(); for (const TestDerivation &derive : test.vDerive) { uint8_t data[74]; key.Encode(data); pubkey.Encode(data); // Test private key CBitcoinExtKey b58key; b58key.SetKey(key); BOOST_CHECK(b58key.ToString() == derive.prv); CBitcoinExtKey b58keyDecodeCheck(derive.prv); CExtKey checkKey = b58keyDecodeCheck.GetKey(); // ensure a base58 decoded key also matches assert(checkKey == key); // Test public key CBitcoinExtPubKey b58pubkey; b58pubkey.SetKey(pubkey); BOOST_CHECK(b58pubkey.ToString() == derive.pub); CBitcoinExtPubKey b58PubkeyDecodeCheck(derive.pub); CExtPubKey checkPubKey = b58PubkeyDecodeCheck.GetKey(); // ensure a base58 decoded pubkey also matches assert(checkPubKey == pubkey); // Derive new keys CExtKey keyNew; BOOST_CHECK(key.Derive(keyNew, derive.nChild)); CExtPubKey pubkeyNew = keyNew.Neuter(); if (!(derive.nChild & 0x80000000)) { // Compare with public derivation CExtPubKey pubkeyNew2; BOOST_CHECK(pubkey.Derive(pubkeyNew2, derive.nChild)); BOOST_CHECK(pubkeyNew == pubkeyNew2); } key = keyNew; pubkey = pubkeyNew; CDataStream ssPub(SER_DISK, CLIENT_VERSION); ssPub << pubkeyNew; BOOST_CHECK(ssPub.size() == 75); CDataStream ssPriv(SER_DISK, CLIENT_VERSION); ssPriv << keyNew; BOOST_CHECK(ssPriv.size() == 75); CExtPubKey pubCheck; CExtKey privCheck; ssPub >> pubCheck; ssPriv >> privCheck; BOOST_CHECK(pubCheck == pubkeyNew); BOOST_CHECK(privCheck == keyNew); } } BOOST_FIXTURE_TEST_SUITE(bip32_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(bip32_test1) { RunTest(test1); } BOOST_AUTO_TEST_CASE(bip32_test2) { RunTest(test2); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp index 0d0314cd09..2efbc23bf4 100644 --- a/src/test/blockencodings_tests.cpp +++ b/src/test/blockencodings_tests.cpp @@ -1,388 +1,388 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "blockencodings.h" #include "chainparams.h" #include "config.h" #include "consensus/merkle.h" #include "random.h" #include "test/test_bitcoin.h" #include std::vector> extra_txn; struct RegtestingSetup : public TestingSetup { RegtestingSetup() : TestingSetup(CBaseChainParams::REGTEST) {} }; BOOST_FIXTURE_TEST_SUITE(blockencodings_tests, RegtestingSetup) static CBlock BuildBlockTestCase() { CBlock block; CMutableTransaction tx; tx.vin.resize(1); tx.vin[0].scriptSig.resize(10); tx.vout.resize(1); tx.vout[0].nValue = 42 * SATOSHI; block.vtx.resize(3); block.vtx[0] = MakeTransactionRef(tx); block.nVersion = 42; block.hashPrevBlock = InsecureRand256(); block.nBits = 0x207fffff; tx.vin[0].prevout = COutPoint(InsecureRand256(), 0); block.vtx[1] = MakeTransactionRef(tx); tx.vin.resize(10); for (size_t i = 0; i < tx.vin.size(); i++) { tx.vin[i].prevout = COutPoint(InsecureRand256(), 0); } block.vtx[2] = MakeTransactionRef(tx); bool mutated; block.hashMerkleRoot = BlockMerkleRoot(block, &mutated); assert(!mutated); GlobalConfig config; while (!CheckProofOfWork(block.GetHash(), block.nBits, config)) { ++block.nNonce; } return block; } // Number of shared use_counts we expect for a tx we havent touched // == 2 (mempool + our copy from the GetSharedTx call) #define SHARED_TX_OFFSET 2 BOOST_AUTO_TEST_CASE(SimpleRoundTripTest) { CTxMemPool pool; TestMemPoolEntryHelper entry; CBlock block(BuildBlockTestCase()); pool.addUnchecked(block.vtx[2]->GetId(), entry.FromTx(*block.vtx[2])); BOOST_CHECK_EQUAL( pool.mapTx.find(block.vtx[2]->GetId())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); // Do a simple ShortTxIDs RT { CBlockHeaderAndShortTxIDs shortIDs(block); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << shortIDs; CBlockHeaderAndShortTxIDs shortIDs2; stream >> shortIDs2; PartiallyDownloadedBlock partialBlock(GetConfig(), &pool); BOOST_CHECK(partialBlock.InitData(shortIDs2, extra_txn) == READ_STATUS_OK); BOOST_CHECK(partialBlock.IsTxAvailable(0)); BOOST_CHECK(!partialBlock.IsTxAvailable(1)); BOOST_CHECK(partialBlock.IsTxAvailable(2)); BOOST_CHECK_EQUAL( pool.mapTx.find(block.vtx[2]->GetId())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); size_t poolSize = pool.size(); pool.removeRecursive(*block.vtx[2]); BOOST_CHECK_EQUAL(pool.size(), poolSize - 1); CBlock block2; { // No transactions. PartiallyDownloadedBlock tmp = partialBlock; BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_INVALID); partialBlock = tmp; } // Wrong transaction { // Current implementation doesn't check txn here, but don't require // that. PartiallyDownloadedBlock tmp = partialBlock; partialBlock.FillBlock(block2, {block.vtx[2]}); partialBlock = tmp; } bool mutated; BOOST_CHECK(block.hashMerkleRoot != BlockMerkleRoot(block2, &mutated)); CBlock block3; BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[1]}) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block3.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString()); BOOST_CHECK(!mutated); } } class TestHeaderAndShortIDs { // Utility to encode custom CBlockHeaderAndShortTxIDs public: CBlockHeader header; uint64_t nonce; std::vector shorttxids; std::vector prefilledtxn; - TestHeaderAndShortIDs(const CBlockHeaderAndShortTxIDs &orig) { + explicit TestHeaderAndShortIDs(const CBlockHeaderAndShortTxIDs &orig) { CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << orig; stream >> *this; } - TestHeaderAndShortIDs(const CBlock &block) + explicit TestHeaderAndShortIDs(const CBlock &block) : TestHeaderAndShortIDs(CBlockHeaderAndShortTxIDs(block)) {} uint64_t GetShortID(const uint256 &txhash) const { CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << *this; CBlockHeaderAndShortTxIDs base; stream >> base; return base.GetShortID(txhash); } ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(header); READWRITE(nonce); size_t shorttxids_size = shorttxids.size(); READWRITE(VARINT(shorttxids_size)); shorttxids.resize(shorttxids_size); for (size_t i = 0; i < shorttxids.size(); i++) { uint32_t lsb = shorttxids[i] & 0xffffffff; uint16_t msb = (shorttxids[i] >> 32) & 0xffff; READWRITE(lsb); READWRITE(msb); shorttxids[i] = (uint64_t(msb) << 32) | uint64_t(lsb); } READWRITE(prefilledtxn); } }; BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) { CTxMemPool pool; TestMemPoolEntryHelper entry; CBlock block(BuildBlockTestCase()); pool.addUnchecked(block.vtx[2]->GetId(), entry.FromTx(*block.vtx[2])); BOOST_CHECK_EQUAL( pool.mapTx.find(block.vtx[2]->GetId())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); uint256 txhash; // Test with pre-forwarding tx 1, but not coinbase { TestHeaderAndShortIDs shortIDs(block); shortIDs.prefilledtxn.resize(1); shortIDs.prefilledtxn[0] = {1, block.vtx[1]}; shortIDs.shorttxids.resize(2); shortIDs.shorttxids[0] = shortIDs.GetShortID(block.vtx[0]->GetId()); shortIDs.shorttxids[1] = shortIDs.GetShortID(block.vtx[2]->GetId()); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << shortIDs; CBlockHeaderAndShortTxIDs shortIDs2; stream >> shortIDs2; PartiallyDownloadedBlock partialBlock(GetConfig(), &pool); BOOST_CHECK(partialBlock.InitData(shortIDs2, extra_txn) == READ_STATUS_OK); BOOST_CHECK(!partialBlock.IsTxAvailable(0)); BOOST_CHECK(partialBlock.IsTxAvailable(1)); BOOST_CHECK(partialBlock.IsTxAvailable(2)); BOOST_CHECK_EQUAL( pool.mapTx.find(block.vtx[2]->GetId())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); CBlock block2; { // No transactions. PartiallyDownloadedBlock tmp = partialBlock; BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_INVALID); partialBlock = tmp; } // Wrong transaction { // Current implementation doesn't check txn here, but don't require // that. PartiallyDownloadedBlock tmp = partialBlock; partialBlock.FillBlock(block2, {block.vtx[1]}); partialBlock = tmp; } bool mutated; BOOST_CHECK(block.hashMerkleRoot != BlockMerkleRoot(block2, &mutated)); CBlock block3; PartiallyDownloadedBlock partialBlockCopy = partialBlock; BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[0]}) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block3.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString()); BOOST_CHECK(!mutated); txhash = block.vtx[2]->GetId(); block.vtx.clear(); block2.vtx.clear(); block3.vtx.clear(); // + 1 because of partialBlockCopy. BOOST_CHECK_EQUAL(pool.mapTx.find(txhash)->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); } BOOST_CHECK_EQUAL(pool.mapTx.find(txhash)->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); } BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) { CTxMemPool pool; TestMemPoolEntryHelper entry; CBlock block(BuildBlockTestCase()); pool.addUnchecked(block.vtx[1]->GetId(), entry.FromTx(*block.vtx[1])); BOOST_CHECK_EQUAL( pool.mapTx.find(block.vtx[1]->GetId())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); uint256 txhash; // Test with pre-forwarding coinbase + tx 2 with tx 1 in mempool { TestHeaderAndShortIDs shortIDs(block); shortIDs.prefilledtxn.resize(2); shortIDs.prefilledtxn[0] = {0, block.vtx[0]}; // id == 1 as it is 1 after index 1 shortIDs.prefilledtxn[1] = {1, block.vtx[2]}; shortIDs.shorttxids.resize(1); shortIDs.shorttxids[0] = shortIDs.GetShortID(block.vtx[1]->GetId()); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << shortIDs; CBlockHeaderAndShortTxIDs shortIDs2; stream >> shortIDs2; PartiallyDownloadedBlock partialBlock(GetConfig(), &pool); BOOST_CHECK(partialBlock.InitData(shortIDs2, extra_txn) == READ_STATUS_OK); BOOST_CHECK(partialBlock.IsTxAvailable(0)); BOOST_CHECK(partialBlock.IsTxAvailable(1)); BOOST_CHECK(partialBlock.IsTxAvailable(2)); BOOST_CHECK_EQUAL( pool.mapTx.find(block.vtx[1]->GetId())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); CBlock block2; PartiallyDownloadedBlock partialBlockCopy = partialBlock; BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString()); bool mutated; BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString()); BOOST_CHECK(!mutated); txhash = block.vtx[1]->GetId(); block.vtx.clear(); block2.vtx.clear(); // + 1 because of partialBlockCopy. BOOST_CHECK_EQUAL(pool.mapTx.find(txhash)->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1); } BOOST_CHECK_EQUAL(pool.mapTx.find(txhash)->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0); } BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest) { CTxMemPool pool; CMutableTransaction coinbase; coinbase.vin.resize(1); coinbase.vin[0].scriptSig.resize(10); coinbase.vout.resize(1); coinbase.vout[0].nValue = 42 * SATOSHI; CBlock block; block.vtx.resize(1); block.vtx[0] = MakeTransactionRef(std::move(coinbase)); block.nVersion = 42; block.hashPrevBlock = InsecureRand256(); block.nBits = 0x207fffff; bool mutated; block.hashMerkleRoot = BlockMerkleRoot(block, &mutated); assert(!mutated); GlobalConfig config; while (!CheckProofOfWork(block.GetHash(), block.nBits, config)) { ++block.nNonce; } // Test simple header round-trip with only coinbase { CBlockHeaderAndShortTxIDs shortIDs(block); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << shortIDs; CBlockHeaderAndShortTxIDs shortIDs2; stream >> shortIDs2; PartiallyDownloadedBlock partialBlock(GetConfig(), &pool); BOOST_CHECK(partialBlock.InitData(shortIDs2, extra_txn) == READ_STATUS_OK); BOOST_CHECK(partialBlock.IsTxAvailable(0)); CBlock block2; std::vector vtx_missing; BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString()); BOOST_CHECK(!mutated); } } BOOST_AUTO_TEST_CASE(TransactionsRequestSerializationTest) { BlockTransactionsRequest req1; req1.blockhash = InsecureRand256(); req1.indices.resize(4); req1.indices[0] = 0; req1.indices[1] = 1; req1.indices[2] = 3; req1.indices[3] = 4; CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << req1; BlockTransactionsRequest req2; stream >> req2; BOOST_CHECK_EQUAL(req1.blockhash.ToString(), req2.blockhash.ToString()); BOOST_CHECK_EQUAL(req1.indices.size(), req2.indices.size()); BOOST_CHECK_EQUAL(req1.indices[0], req2.indices[0]); BOOST_CHECK_EQUAL(req1.indices[1], req2.indices[1]); BOOST_CHECK_EQUAL(req1.indices[2], req2.indices[2]); BOOST_CHECK_EQUAL(req1.indices[3], req2.indices[3]); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp index c257b9da9b..e8fb03714a 100644 --- a/src/test/coins_tests.cpp +++ b/src/test/coins_tests.cpp @@ -1,897 +1,897 @@ // Copyright (c) 2014-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "coins.h" #include "consensus/validation.h" #include "script/standard.h" #include "test/test_bitcoin.h" #include "uint256.h" #include "undo.h" #include "utilstrencodings.h" #include "validation.h" #include #include #include namespace { //! equality test bool operator==(const Coin &a, const Coin &b) { // Empty Coin objects are always equal. if (a.IsSpent() && b.IsSpent()) { return true; } return a.IsCoinBase() == b.IsCoinBase() && a.GetHeight() == b.GetHeight() && a.GetTxOut() == b.GetTxOut(); } class CCoinsViewTest : public CCoinsView { uint256 hashBestBlock_; std::map map_; public: bool GetCoin(const COutPoint &outpoint, Coin &coin) const override { std::map::const_iterator it = map_.find(outpoint); if (it == map_.end()) { return false; } coin = it->second; if (coin.IsSpent() && InsecureRandBool() == 0) { // Randomly return false in case of an empty entry. return false; } return true; } bool HaveCoin(const COutPoint &outpoint) const override { Coin coin; return GetCoin(outpoint, coin); } uint256 GetBestBlock() const override { return hashBestBlock_; } bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override { for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) { if (it->second.flags & CCoinsCacheEntry::DIRTY) { // Same optimization used in CCoinsViewDB is to only write dirty // entries. map_[it->first] = it->second.coin; if (it->second.coin.IsSpent() && InsecureRandRange(3) == 0) { // Randomly delete empty entries on write. map_.erase(it->first); } } mapCoins.erase(it++); } if (!hashBlock.IsNull()) { hashBestBlock_ = hashBlock; } return true; } }; class CCoinsViewCacheTest : public CCoinsViewCache { public: - CCoinsViewCacheTest(CCoinsView *_base) : CCoinsViewCache(_base) {} + explicit CCoinsViewCacheTest(CCoinsView *_base) : CCoinsViewCache(_base) {} void SelfTest() const { // Manually recompute the dynamic usage of the whole data, and compare // it. size_t ret = memusage::DynamicUsage(cacheCoins); size_t count = 0; for (CCoinsMap::iterator it = cacheCoins.begin(); it != cacheCoins.end(); it++) { ret += it->second.coin.DynamicMemoryUsage(); count++; } BOOST_CHECK_EQUAL(GetCacheSize(), count); BOOST_CHECK_EQUAL(DynamicMemoryUsage(), ret); } CCoinsMap &map() { return cacheCoins; } size_t &usage() { return cachedCoinsUsage; } }; } // namespace BOOST_FIXTURE_TEST_SUITE(coins_tests, BasicTestingSetup) static const unsigned int NUM_SIMULATION_ITERATIONS = 40000; // This is a large randomized insert/remove simulation test on a variable-size // stack of caches on top of CCoinsViewTest. // // It will randomly create/update/delete Coin entries to a tip of caches, with // txids picked from a limited list of random 256-bit hashes. Occasionally, a // new tip is added to the stack of caches, or the tip is flushed and removed. // // During the process, booleans are kept to make sure that the randomized // operation hits all branches. BOOST_AUTO_TEST_CASE(coins_cache_simulation_test) { // Various coverage trackers. bool removed_all_caches = false; bool reached_4_caches = false; bool added_an_entry = false; bool added_an_unspendable_entry = false; bool removed_an_entry = false; bool updated_an_entry = false; bool found_an_entry = false; bool missed_an_entry = false; bool uncached_an_entry = false; // A simple map to track what we expect the cache stack to represent. std::map result; // The cache stack. // A CCoinsViewTest at the bottom. CCoinsViewTest base; // A stack of CCoinsViewCaches on top. std::vector stack; // Start with one cache. stack.push_back(new CCoinsViewCacheTest(&base)); // Use a limited set of random transaction ids, so we do test overwriting // entries. std::vector txids; txids.resize(NUM_SIMULATION_ITERATIONS / 8); for (size_t i = 0; i < txids.size(); i++) { txids[i] = TxId(InsecureRand256()); } for (unsigned int i = 0; i < NUM_SIMULATION_ITERATIONS; i++) { // Do a random modification. { // txid we're going to modify in this iteration. TxId txid = txids[InsecureRandRange(txids.size())]; Coin &coin = result[COutPoint(txid, 0)]; const Coin &entry = (InsecureRandRange(500) == 0) ? AccessByTxid(*stack.back(), txid) : stack.back()->AccessCoin(COutPoint(txid, 0)); BOOST_CHECK(coin == entry); if (InsecureRandRange(5) == 0 || coin.IsSpent()) { CTxOut txout; txout.nValue = int64_t(insecure_rand()) * SATOSHI; if (InsecureRandRange(16) == 0 && coin.IsSpent()) { txout.scriptPubKey.assign(1 + InsecureRandBits(6), OP_RETURN); BOOST_CHECK(txout.scriptPubKey.IsUnspendable()); added_an_unspendable_entry = true; } else { // Random sizes so we can test memory usage accounting txout.scriptPubKey.assign(InsecureRandBits(6), 0); (coin.IsSpent() ? added_an_entry : updated_an_entry) = true; coin = Coin(txout, 1, false); } Coin newcoin(txout, 1, false); stack.back()->AddCoin(COutPoint(txid, 0), newcoin, !coin.IsSpent() || insecure_rand() & 1); } else { removed_an_entry = true; coin.Clear(); stack.back()->SpendCoin(COutPoint(txid, 0)); } } // One every 10 iterations, remove a random entry from the cache if (InsecureRandRange(10)) { COutPoint out(txids[insecure_rand() % txids.size()], 0); int cacheid = insecure_rand() % stack.size(); stack[cacheid]->Uncache(out); uncached_an_entry |= !stack[cacheid]->HaveCoinInCache(out); } // Once every 1000 iterations and at the end, verify the full cache. if (InsecureRandRange(1000) == 1 || i == NUM_SIMULATION_ITERATIONS - 1) { for (auto it = result.begin(); it != result.end(); it++) { bool have = stack.back()->HaveCoin(it->first); const Coin &coin = stack.back()->AccessCoin(it->first); BOOST_CHECK(have == !coin.IsSpent()); BOOST_CHECK(coin == it->second); if (coin.IsSpent()) { missed_an_entry = true; } else { BOOST_CHECK(stack.back()->HaveCoinInCache(it->first)); found_an_entry = true; } } for (const CCoinsViewCacheTest *test : stack) { test->SelfTest(); } } // Every 100 iterations, flush an intermediate cache if (InsecureRandRange(100) == 0) { if (stack.size() > 1 && InsecureRandBool() == 0) { unsigned int flushIndex = InsecureRandRange(stack.size() - 1); stack[flushIndex]->Flush(); } } if (InsecureRandRange(100) == 0) { // Every 100 iterations, change the cache stack. if (stack.size() > 0 && InsecureRandBool() == 0) { // Remove the top cache stack.back()->Flush(); delete stack.back(); stack.pop_back(); } if (stack.size() == 0 || (stack.size() < 4 && InsecureRandBool())) { // Add a new cache CCoinsView *tip = &base; if (stack.size() > 0) { tip = stack.back(); } else { removed_all_caches = true; } stack.push_back(new CCoinsViewCacheTest(tip)); if (stack.size() == 4) { reached_4_caches = true; } } } } // Clean up the stack. while (stack.size() > 0) { delete stack.back(); stack.pop_back(); } // Verify coverage. BOOST_CHECK(removed_all_caches); BOOST_CHECK(reached_4_caches); BOOST_CHECK(added_an_entry); BOOST_CHECK(added_an_unspendable_entry); BOOST_CHECK(removed_an_entry); BOOST_CHECK(updated_an_entry); BOOST_CHECK(found_an_entry); BOOST_CHECK(missed_an_entry); BOOST_CHECK(uncached_an_entry); } // Store of all necessary tx and undo data for next test typedef std::map> UtxoData; UtxoData utxoData; UtxoData::iterator FindRandomFrom(const std::set &utxoSet) { assert(utxoSet.size()); auto utxoSetIt = utxoSet.lower_bound(COutPoint(InsecureRand256(), 0)); if (utxoSetIt == utxoSet.end()) { utxoSetIt = utxoSet.begin(); } auto utxoDataIt = utxoData.find(*utxoSetIt); assert(utxoDataIt != utxoData.end()); return utxoDataIt; } // This test is similar to the previous test except the emphasis is on testing // the functionality of UpdateCoins random txs are created and UpdateCoins is // used to update the cache stack. In particular it is tested that spending a // duplicate coinbase tx has the expected effect (the other duplicate is // overwitten at all cache levels) BOOST_AUTO_TEST_CASE(updatecoins_simulation_test) { bool spent_a_duplicate_coinbase = false; // A simple map to track what we expect the cache stack to represent. std::map result; // The cache stack. // A CCoinsViewTest at the bottom. CCoinsViewTest base; // A stack of CCoinsViewCaches on top. std::vector stack; // Start with one cache. stack.push_back(new CCoinsViewCacheTest(&base)); // Track the txids we've used in various sets std::set coinbase_coins; std::set disconnected_coins; std::set duplicate_coins; std::set utxoset; for (int64_t i = 0; i < NUM_SIMULATION_ITERATIONS; i++) { uint32_t randiter = insecure_rand(); // 19/20 txs add a new transaction if (randiter % 20 < 19) { CMutableTransaction tx; tx.vin.resize(1); tx.vout.resize(1); // Keep txs unique unless intended to duplicate. tx.vout[0].nValue = i * SATOSHI; // Random sizes so we can test memory usage accounting tx.vout[0].scriptPubKey.assign(insecure_rand() & 0x3F, 0); unsigned int height = insecure_rand(); Coin old_coin; // 2/20 times create a new coinbase if (randiter % 20 < 2 || coinbase_coins.size() < 10) { // 1/10 of those times create a duplicate coinbase if (InsecureRandRange(10) == 0 && coinbase_coins.size()) { auto utxod = FindRandomFrom(coinbase_coins); // Reuse the exact same coinbase tx = std::get<0>(utxod->second); // shouldn't be available for reconnection if its been // duplicated disconnected_coins.erase(utxod->first); duplicate_coins.insert(utxod->first); } else { coinbase_coins.insert(COutPoint(tx.GetId(), 0)); } assert(CTransaction(tx).IsCoinBase()); } // 17/20 times reconnect previous or add a regular tx else { COutPoint prevout; // 1/20 times reconnect a previously disconnected tx if (randiter % 20 == 2 && disconnected_coins.size()) { auto utxod = FindRandomFrom(disconnected_coins); tx = std::get<0>(utxod->second); prevout = tx.vin[0].prevout; if (!CTransaction(tx).IsCoinBase() && !utxoset.count(prevout)) { disconnected_coins.erase(utxod->first); continue; } // If this tx is already IN the UTXO, then it must be a // coinbase, and it must be a duplicate if (utxoset.count(utxod->first)) { assert(CTransaction(tx).IsCoinBase()); assert(duplicate_coins.count(utxod->first)); } disconnected_coins.erase(utxod->first); } // 16/20 times create a regular tx else { auto utxod = FindRandomFrom(utxoset); prevout = utxod->first; // Construct the tx to spend the coins of prevouthash tx.vin[0].prevout = COutPoint(prevout.GetTxId(), 0); assert(!CTransaction(tx).IsCoinBase()); } // In this simple test coins only have two states, spent or // unspent, save the unspent state to restore old_coin = result[prevout]; // Update the expected result of prevouthash to know these coins // are spent result[prevout].Clear(); utxoset.erase(prevout); // The test is designed to ensure spending a duplicate coinbase // will work properly if that ever happens and not resurrect the // previously overwritten coinbase if (duplicate_coins.count(prevout)) { spent_a_duplicate_coinbase = true; } } // Update the expected result to know about the new output coins assert(tx.vout.size() == 1); const COutPoint outpoint(tx.GetId(), 0); result[outpoint] = Coin(tx.vout[0], height, CTransaction(tx).IsCoinBase()); // Call UpdateCoins on the top cache CTxUndo undo; UpdateCoins(*(stack.back()), CTransaction(tx), undo, height); // Update the utxo set for future spends utxoset.insert(outpoint); // Track this tx and undo info to use later utxoData.emplace(outpoint, std::make_tuple(CTransaction(tx), undo, old_coin)); } // 1/20 times undo a previous transaction else if (utxoset.size()) { auto utxod = FindRandomFrom(utxoset); CTransaction &tx = std::get<0>(utxod->second); CTxUndo &undo = std::get<1>(utxod->second); Coin &orig_coin = std::get<2>(utxod->second); // Update the expected result // Remove new outputs result[utxod->first].Clear(); // If not coinbase restore prevout if (!tx.IsCoinBase()) { result[tx.vin[0].prevout] = orig_coin; } // Disconnect the tx from the current UTXO // See code in DisconnectBlock // remove outputs stack.back()->SpendCoin(utxod->first); // restore inputs if (!tx.IsCoinBase()) { const COutPoint &out = tx.vin[0].prevout; UndoCoinSpend(undo.vprevout[0], *(stack.back()), out); } // Store as a candidate for reconnection disconnected_coins.insert(utxod->first); // Update the utxoset utxoset.erase(utxod->first); if (!tx.IsCoinBase()) { utxoset.insert(tx.vin[0].prevout); } } // Once every 1000 iterations and at the end, verify the full cache. if (InsecureRandRange(1000) == 1 || i == NUM_SIMULATION_ITERATIONS - 1) { for (auto it = result.begin(); it != result.end(); it++) { bool have = stack.back()->HaveCoin(it->first); const Coin &coin = stack.back()->AccessCoin(it->first); BOOST_CHECK(have == !coin.IsSpent()); BOOST_CHECK(coin == it->second); } } // One every 10 iterations, remove a random entry from the cache if (utxoset.size() > 1 && InsecureRandRange(30)) { stack[insecure_rand() % stack.size()]->Uncache( FindRandomFrom(utxoset)->first); } if (disconnected_coins.size() > 1 && InsecureRandRange(30)) { stack[insecure_rand() % stack.size()]->Uncache( FindRandomFrom(disconnected_coins)->first); } if (duplicate_coins.size() > 1 && InsecureRandRange(30)) { stack[insecure_rand() % stack.size()]->Uncache( FindRandomFrom(duplicate_coins)->first); } if (InsecureRandRange(100) == 0) { // Every 100 iterations, flush an intermediate cache if (stack.size() > 1 && InsecureRandBool() == 0) { unsigned int flushIndex = InsecureRandRange(stack.size() - 1); stack[flushIndex]->Flush(); } } if (InsecureRandRange(100) == 0) { // Every 100 iterations, change the cache stack. if (stack.size() > 0 && InsecureRandBool() == 0) { stack.back()->Flush(); delete stack.back(); stack.pop_back(); } if (stack.size() == 0 || (stack.size() < 4 && InsecureRandBool())) { CCoinsView *tip = &base; if (stack.size() > 0) { tip = stack.back(); } stack.push_back(new CCoinsViewCacheTest(tip)); } } } // Clean up the stack. while (stack.size() > 0) { delete stack.back(); stack.pop_back(); } // Verify coverage. BOOST_CHECK(spent_a_duplicate_coinbase); } BOOST_AUTO_TEST_CASE(coin_serialization) { // Good example CDataStream ss1( ParseHex("97f23c835800816115944e077fe7c803cfa57f29b36bf87c1d35"), SER_DISK, CLIENT_VERSION); Coin c1; ss1 >> c1; BOOST_CHECK_EQUAL(c1.IsCoinBase(), false); BOOST_CHECK_EQUAL(c1.GetHeight(), 203998U); BOOST_CHECK_EQUAL(c1.GetTxOut().nValue, int64_t(60000000000) * SATOSHI); BOOST_CHECK_EQUAL(HexStr(c1.GetTxOut().scriptPubKey), HexStr(GetScriptForDestination(CKeyID(uint160(ParseHex( "816115944e077fe7c803cfa57f29b36bf87c1d35")))))); // Good example CDataStream ss2( ParseHex("8ddf77bbd123008c988f1a4a4de2161e0f50aac7f17e7f9555caa4"), SER_DISK, CLIENT_VERSION); Coin c2; ss2 >> c2; BOOST_CHECK_EQUAL(c2.IsCoinBase(), true); BOOST_CHECK_EQUAL(c2.GetHeight(), 120891); BOOST_CHECK_EQUAL(c2.GetTxOut().nValue, 110397 * SATOSHI); BOOST_CHECK_EQUAL(HexStr(c2.GetTxOut().scriptPubKey), HexStr(GetScriptForDestination(CKeyID(uint160(ParseHex( "8c988f1a4a4de2161e0f50aac7f17e7f9555caa4")))))); // Smallest possible example CDataStream ss3(ParseHex("000006"), SER_DISK, CLIENT_VERSION); Coin c3; ss3 >> c3; BOOST_CHECK_EQUAL(c3.IsCoinBase(), false); BOOST_CHECK_EQUAL(c3.GetHeight(), 0); BOOST_CHECK_EQUAL(c3.GetTxOut().nValue, Amount::zero()); BOOST_CHECK_EQUAL(c3.GetTxOut().scriptPubKey.size(), 0); // scriptPubKey that ends beyond the end of the stream CDataStream ss4(ParseHex("000007"), SER_DISK, CLIENT_VERSION); try { Coin c4; ss4 >> c4; BOOST_CHECK_MESSAGE(false, "We should have thrown"); } catch (const std::ios_base::failure &e) { } // Very large scriptPubKey (3*10^9 bytes) past the end of the stream CDataStream tmp(SER_DISK, CLIENT_VERSION); uint64_t x = 3000000000ULL; tmp << VARINT(x); BOOST_CHECK_EQUAL(HexStr(tmp.begin(), tmp.end()), "8a95c0bb00"); CDataStream ss5(ParseHex("00008a95c0bb00"), SER_DISK, CLIENT_VERSION); try { Coin c5; ss5 >> c5; BOOST_CHECK_MESSAGE(false, "We should have thrown"); } catch (const std::ios_base::failure &e) { } } static const COutPoint OUTPOINT; static const Amount PRUNED(-1 * SATOSHI); static const Amount ABSENT(-2 * SATOSHI); static const Amount FAIL(-3 * SATOSHI); static const Amount VALUE1(100 * SATOSHI); static const Amount VALUE2(200 * SATOSHI); static const Amount VALUE3(300 * SATOSHI); static const char DIRTY = CCoinsCacheEntry::DIRTY; static const char FRESH = CCoinsCacheEntry::FRESH; static const char NO_ENTRY = -1; static const auto FLAGS = {char(0), FRESH, DIRTY, char(DIRTY | FRESH)}; static const auto CLEAN_FLAGS = {char(0), FRESH}; static const auto ABSENT_FLAGS = {NO_ENTRY}; static void SetCoinValue(const Amount value, Coin &coin) { assert(value != ABSENT); coin.Clear(); assert(coin.IsSpent()); if (value != PRUNED) { CTxOut out; out.nValue = value; coin = Coin(std::move(out), 1, false); assert(!coin.IsSpent()); } } size_t InsertCoinMapEntry(CCoinsMap &map, const Amount value, char flags) { if (value == ABSENT) { assert(flags == NO_ENTRY); return 0; } assert(flags != NO_ENTRY); CCoinsCacheEntry entry; entry.flags = flags; SetCoinValue(value, entry.coin); auto inserted = map.emplace(OUTPOINT, std::move(entry)); assert(inserted.second); return inserted.first->second.coin.DynamicMemoryUsage(); } void GetCoinMapEntry(const CCoinsMap &map, Amount &value, char &flags) { auto it = map.find(OUTPOINT); if (it == map.end()) { value = ABSENT; flags = NO_ENTRY; } else { if (it->second.coin.IsSpent()) { value = PRUNED; } else { value = it->second.coin.GetTxOut().nValue; } flags = it->second.flags; assert(flags != NO_ENTRY); } } void WriteCoinViewEntry(CCoinsView &view, const Amount value, char flags) { CCoinsMap map; InsertCoinMapEntry(map, value, flags); view.BatchWrite(map, {}); } class SingleEntryCacheTest { public: SingleEntryCacheTest(const Amount base_value, const Amount cache_value, char cache_flags) { WriteCoinViewEntry(base, base_value, base_value == ABSENT ? NO_ENTRY : DIRTY); cache.usage() += InsertCoinMapEntry(cache.map(), cache_value, cache_flags); } CCoinsView root; CCoinsViewCacheTest base{&root}; CCoinsViewCacheTest cache{&base}; }; void CheckAccessCoin(const Amount base_value, const Amount cache_value, const Amount expected_value, char cache_flags, char expected_flags) { SingleEntryCacheTest test(base_value, cache_value, cache_flags); test.cache.AccessCoin(OUTPOINT); test.cache.SelfTest(); Amount result_value; char result_flags; GetCoinMapEntry(test.cache.map(), result_value, result_flags); BOOST_CHECK_EQUAL(result_value, expected_value); BOOST_CHECK_EQUAL(result_flags, expected_flags); } BOOST_AUTO_TEST_CASE(coin_access) { /* Check AccessCoin behavior, requesting a coin from a cache view layered on * top of a base view, and checking the resulting entry in the cache after * the access. * * Base Cache Result Cache Result * Value Value Value Flags Flags */ CheckAccessCoin(ABSENT, ABSENT, ABSENT, NO_ENTRY, NO_ENTRY); CheckAccessCoin(ABSENT, PRUNED, PRUNED, 0, 0); CheckAccessCoin(ABSENT, PRUNED, PRUNED, FRESH, FRESH); CheckAccessCoin(ABSENT, PRUNED, PRUNED, DIRTY, DIRTY); CheckAccessCoin(ABSENT, PRUNED, PRUNED, DIRTY | FRESH, DIRTY | FRESH); CheckAccessCoin(ABSENT, VALUE2, VALUE2, 0, 0); CheckAccessCoin(ABSENT, VALUE2, VALUE2, FRESH, FRESH); CheckAccessCoin(ABSENT, VALUE2, VALUE2, DIRTY, DIRTY); CheckAccessCoin(ABSENT, VALUE2, VALUE2, DIRTY | FRESH, DIRTY | FRESH); CheckAccessCoin(PRUNED, ABSENT, PRUNED, NO_ENTRY, FRESH); CheckAccessCoin(PRUNED, PRUNED, PRUNED, 0, 0); CheckAccessCoin(PRUNED, PRUNED, PRUNED, FRESH, FRESH); CheckAccessCoin(PRUNED, PRUNED, PRUNED, DIRTY, DIRTY); CheckAccessCoin(PRUNED, PRUNED, PRUNED, DIRTY | FRESH, DIRTY | FRESH); CheckAccessCoin(PRUNED, VALUE2, VALUE2, 0, 0); CheckAccessCoin(PRUNED, VALUE2, VALUE2, FRESH, FRESH); CheckAccessCoin(PRUNED, VALUE2, VALUE2, DIRTY, DIRTY); CheckAccessCoin(PRUNED, VALUE2, VALUE2, DIRTY | FRESH, DIRTY | FRESH); CheckAccessCoin(VALUE1, ABSENT, VALUE1, NO_ENTRY, 0); CheckAccessCoin(VALUE1, PRUNED, PRUNED, 0, 0); CheckAccessCoin(VALUE1, PRUNED, PRUNED, FRESH, FRESH); CheckAccessCoin(VALUE1, PRUNED, PRUNED, DIRTY, DIRTY); CheckAccessCoin(VALUE1, PRUNED, PRUNED, DIRTY | FRESH, DIRTY | FRESH); CheckAccessCoin(VALUE1, VALUE2, VALUE2, 0, 0); CheckAccessCoin(VALUE1, VALUE2, VALUE2, FRESH, FRESH); CheckAccessCoin(VALUE1, VALUE2, VALUE2, DIRTY, DIRTY); CheckAccessCoin(VALUE1, VALUE2, VALUE2, DIRTY | FRESH, DIRTY | FRESH); } void CheckSpendCoin(Amount base_value, Amount cache_value, Amount expected_value, char cache_flags, char expected_flags) { SingleEntryCacheTest test(base_value, cache_value, cache_flags); test.cache.SpendCoin(OUTPOINT); test.cache.SelfTest(); Amount result_value; char result_flags; GetCoinMapEntry(test.cache.map(), result_value, result_flags); BOOST_CHECK_EQUAL(result_value, expected_value); BOOST_CHECK_EQUAL(result_flags, expected_flags); }; BOOST_AUTO_TEST_CASE(coin_spend) { /** * Check SpendCoin behavior, requesting a coin from a cache view layered on * top of a base view, spending, and then checking the resulting entry in * the cache after the modification. * * Base Cache Result Cache Result * Value Value Value Flags Flags */ CheckSpendCoin(ABSENT, ABSENT, ABSENT, NO_ENTRY, NO_ENTRY); CheckSpendCoin(ABSENT, PRUNED, PRUNED, 0, DIRTY); CheckSpendCoin(ABSENT, PRUNED, ABSENT, FRESH, NO_ENTRY); CheckSpendCoin(ABSENT, PRUNED, PRUNED, DIRTY, DIRTY); CheckSpendCoin(ABSENT, PRUNED, ABSENT, DIRTY | FRESH, NO_ENTRY); CheckSpendCoin(ABSENT, VALUE2, PRUNED, 0, DIRTY); CheckSpendCoin(ABSENT, VALUE2, ABSENT, FRESH, NO_ENTRY); CheckSpendCoin(ABSENT, VALUE2, PRUNED, DIRTY, DIRTY); CheckSpendCoin(ABSENT, VALUE2, ABSENT, DIRTY | FRESH, NO_ENTRY); CheckSpendCoin(PRUNED, ABSENT, ABSENT, NO_ENTRY, NO_ENTRY); CheckSpendCoin(PRUNED, PRUNED, PRUNED, 0, DIRTY); CheckSpendCoin(PRUNED, PRUNED, ABSENT, FRESH, NO_ENTRY); CheckSpendCoin(PRUNED, PRUNED, PRUNED, DIRTY, DIRTY); CheckSpendCoin(PRUNED, PRUNED, ABSENT, DIRTY | FRESH, NO_ENTRY); CheckSpendCoin(PRUNED, VALUE2, PRUNED, 0, DIRTY); CheckSpendCoin(PRUNED, VALUE2, ABSENT, FRESH, NO_ENTRY); CheckSpendCoin(PRUNED, VALUE2, PRUNED, DIRTY, DIRTY); CheckSpendCoin(PRUNED, VALUE2, ABSENT, DIRTY | FRESH, NO_ENTRY); CheckSpendCoin(VALUE1, ABSENT, PRUNED, NO_ENTRY, DIRTY); CheckSpendCoin(VALUE1, PRUNED, PRUNED, 0, DIRTY); CheckSpendCoin(VALUE1, PRUNED, ABSENT, FRESH, NO_ENTRY); CheckSpendCoin(VALUE1, PRUNED, PRUNED, DIRTY, DIRTY); CheckSpendCoin(VALUE1, PRUNED, ABSENT, DIRTY | FRESH, NO_ENTRY); CheckSpendCoin(VALUE1, VALUE2, PRUNED, 0, DIRTY); CheckSpendCoin(VALUE1, VALUE2, ABSENT, FRESH, NO_ENTRY); CheckSpendCoin(VALUE1, VALUE2, PRUNED, DIRTY, DIRTY); CheckSpendCoin(VALUE1, VALUE2, ABSENT, DIRTY | FRESH, NO_ENTRY); } void CheckAddCoinBase(Amount base_value, Amount cache_value, Amount modify_value, Amount expected_value, char cache_flags, char expected_flags, bool coinbase) { SingleEntryCacheTest test(base_value, cache_value, cache_flags); Amount result_value; char result_flags; try { CTxOut output; output.nValue = modify_value; test.cache.AddCoin(OUTPOINT, Coin(std::move(output), 1, coinbase), coinbase); test.cache.SelfTest(); GetCoinMapEntry(test.cache.map(), result_value, result_flags); } catch (std::logic_error &e) { result_value = FAIL; result_flags = NO_ENTRY; } BOOST_CHECK_EQUAL(result_value, expected_value); BOOST_CHECK_EQUAL(result_flags, expected_flags); } // Simple wrapper for CheckAddCoinBase function above that loops through // different possible base_values, making sure each one gives the same results. // This wrapper lets the coin_add test below be shorter and less repetitive, // while still verifying that the CoinsViewCache::AddCoin implementation ignores // base values. template void CheckAddCoin(Args &&... args) { for (Amount base_value : {ABSENT, PRUNED, VALUE1}) { CheckAddCoinBase(base_value, std::forward(args)...); } } BOOST_AUTO_TEST_CASE(coin_add) { /** * Check AddCoin behavior, requesting a new coin from a cache view, writing * a modification to the coin, and then checking the resulting entry in the * cache after the modification. Verify behavior with the with the AddCoin * potential_overwrite argument set to false, and to true. * * Cache Write Result Cache Result potential_overwrite * Value Value Value Flags Flags */ CheckAddCoin(ABSENT, VALUE3, VALUE3, NO_ENTRY, DIRTY | FRESH, false); CheckAddCoin(ABSENT, VALUE3, VALUE3, NO_ENTRY, DIRTY, true); CheckAddCoin(PRUNED, VALUE3, VALUE3, 0, DIRTY | FRESH, false); CheckAddCoin(PRUNED, VALUE3, VALUE3, 0, DIRTY, true); CheckAddCoin(PRUNED, VALUE3, VALUE3, FRESH, DIRTY | FRESH, false); CheckAddCoin(PRUNED, VALUE3, VALUE3, FRESH, DIRTY | FRESH, true); CheckAddCoin(PRUNED, VALUE3, VALUE3, DIRTY, DIRTY, false); CheckAddCoin(PRUNED, VALUE3, VALUE3, DIRTY, DIRTY, true); CheckAddCoin(PRUNED, VALUE3, VALUE3, DIRTY | FRESH, DIRTY | FRESH, false); CheckAddCoin(PRUNED, VALUE3, VALUE3, DIRTY | FRESH, DIRTY | FRESH, true); CheckAddCoin(VALUE2, VALUE3, FAIL, 0, NO_ENTRY, false); CheckAddCoin(VALUE2, VALUE3, VALUE3, 0, DIRTY, true); CheckAddCoin(VALUE2, VALUE3, FAIL, FRESH, NO_ENTRY, false); CheckAddCoin(VALUE2, VALUE3, VALUE3, FRESH, DIRTY | FRESH, true); CheckAddCoin(VALUE2, VALUE3, FAIL, DIRTY, NO_ENTRY, false); CheckAddCoin(VALUE2, VALUE3, VALUE3, DIRTY, DIRTY, true); CheckAddCoin(VALUE2, VALUE3, FAIL, DIRTY | FRESH, NO_ENTRY, false); CheckAddCoin(VALUE2, VALUE3, VALUE3, DIRTY | FRESH, DIRTY | FRESH, true); } void CheckWriteCoin(Amount parent_value, Amount child_value, Amount expected_value, char parent_flags, char child_flags, char expected_flags) { SingleEntryCacheTest test(ABSENT, parent_value, parent_flags); Amount result_value; char result_flags; try { WriteCoinViewEntry(test.cache, child_value, child_flags); test.cache.SelfTest(); GetCoinMapEntry(test.cache.map(), result_value, result_flags); } catch (std::logic_error &e) { result_value = FAIL; result_flags = NO_ENTRY; } BOOST_CHECK_EQUAL(result_value, expected_value); BOOST_CHECK_EQUAL(result_flags, expected_flags); } BOOST_AUTO_TEST_CASE(coin_write) { /* Check BatchWrite behavior, flushing one entry from a child cache to a * parent cache, and checking the resulting entry in the parent cache * after the write. * * Parent Child Result Parent Child Result * Value Value Value Flags Flags Flags */ CheckWriteCoin(ABSENT, ABSENT, ABSENT, NO_ENTRY, NO_ENTRY, NO_ENTRY); CheckWriteCoin(ABSENT, PRUNED, PRUNED, NO_ENTRY, DIRTY, DIRTY); CheckWriteCoin(ABSENT, PRUNED, ABSENT, NO_ENTRY, DIRTY | FRESH, NO_ENTRY); CheckWriteCoin(ABSENT, VALUE2, VALUE2, NO_ENTRY, DIRTY, DIRTY); CheckWriteCoin(ABSENT, VALUE2, VALUE2, NO_ENTRY, DIRTY | FRESH, DIRTY | FRESH); CheckWriteCoin(PRUNED, ABSENT, PRUNED, 0, NO_ENTRY, 0); CheckWriteCoin(PRUNED, ABSENT, PRUNED, FRESH, NO_ENTRY, FRESH); CheckWriteCoin(PRUNED, ABSENT, PRUNED, DIRTY, NO_ENTRY, DIRTY); CheckWriteCoin(PRUNED, ABSENT, PRUNED, DIRTY | FRESH, NO_ENTRY, DIRTY | FRESH); CheckWriteCoin(PRUNED, PRUNED, PRUNED, 0, DIRTY, DIRTY); CheckWriteCoin(PRUNED, PRUNED, PRUNED, 0, DIRTY | FRESH, DIRTY); CheckWriteCoin(PRUNED, PRUNED, ABSENT, FRESH, DIRTY, NO_ENTRY); CheckWriteCoin(PRUNED, PRUNED, ABSENT, FRESH, DIRTY | FRESH, NO_ENTRY); CheckWriteCoin(PRUNED, PRUNED, PRUNED, DIRTY, DIRTY, DIRTY); CheckWriteCoin(PRUNED, PRUNED, PRUNED, DIRTY, DIRTY | FRESH, DIRTY); CheckWriteCoin(PRUNED, PRUNED, ABSENT, DIRTY | FRESH, DIRTY, NO_ENTRY); CheckWriteCoin(PRUNED, PRUNED, ABSENT, DIRTY | FRESH, DIRTY | FRESH, NO_ENTRY); CheckWriteCoin(PRUNED, VALUE2, VALUE2, 0, DIRTY, DIRTY); CheckWriteCoin(PRUNED, VALUE2, VALUE2, 0, DIRTY | FRESH, DIRTY); CheckWriteCoin(PRUNED, VALUE2, VALUE2, FRESH, DIRTY, DIRTY | FRESH); CheckWriteCoin(PRUNED, VALUE2, VALUE2, FRESH, DIRTY | FRESH, DIRTY | FRESH); CheckWriteCoin(PRUNED, VALUE2, VALUE2, DIRTY, DIRTY, DIRTY); CheckWriteCoin(PRUNED, VALUE2, VALUE2, DIRTY, DIRTY | FRESH, DIRTY); CheckWriteCoin(PRUNED, VALUE2, VALUE2, DIRTY | FRESH, DIRTY, DIRTY | FRESH); CheckWriteCoin(PRUNED, VALUE2, VALUE2, DIRTY | FRESH, DIRTY | FRESH, DIRTY | FRESH); CheckWriteCoin(VALUE1, ABSENT, VALUE1, 0, NO_ENTRY, 0); CheckWriteCoin(VALUE1, ABSENT, VALUE1, FRESH, NO_ENTRY, FRESH); CheckWriteCoin(VALUE1, ABSENT, VALUE1, DIRTY, NO_ENTRY, DIRTY); CheckWriteCoin(VALUE1, ABSENT, VALUE1, DIRTY | FRESH, NO_ENTRY, DIRTY | FRESH); CheckWriteCoin(VALUE1, PRUNED, PRUNED, 0, DIRTY, DIRTY); CheckWriteCoin(VALUE1, PRUNED, FAIL, 0, DIRTY | FRESH, NO_ENTRY); CheckWriteCoin(VALUE1, PRUNED, ABSENT, FRESH, DIRTY, NO_ENTRY); CheckWriteCoin(VALUE1, PRUNED, FAIL, FRESH, DIRTY | FRESH, NO_ENTRY); CheckWriteCoin(VALUE1, PRUNED, PRUNED, DIRTY, DIRTY, DIRTY); CheckWriteCoin(VALUE1, PRUNED, FAIL, DIRTY, DIRTY | FRESH, NO_ENTRY); CheckWriteCoin(VALUE1, PRUNED, ABSENT, DIRTY | FRESH, DIRTY, NO_ENTRY); CheckWriteCoin(VALUE1, PRUNED, FAIL, DIRTY | FRESH, DIRTY | FRESH, NO_ENTRY); CheckWriteCoin(VALUE1, VALUE2, VALUE2, 0, DIRTY, DIRTY); CheckWriteCoin(VALUE1, VALUE2, FAIL, 0, DIRTY | FRESH, NO_ENTRY); CheckWriteCoin(VALUE1, VALUE2, VALUE2, FRESH, DIRTY, DIRTY | FRESH); CheckWriteCoin(VALUE1, VALUE2, FAIL, FRESH, DIRTY | FRESH, NO_ENTRY); CheckWriteCoin(VALUE1, VALUE2, VALUE2, DIRTY, DIRTY, DIRTY); CheckWriteCoin(VALUE1, VALUE2, FAIL, DIRTY, DIRTY | FRESH, NO_ENTRY); CheckWriteCoin(VALUE1, VALUE2, VALUE2, DIRTY | FRESH, DIRTY, DIRTY | FRESH); CheckWriteCoin(VALUE1, VALUE2, FAIL, DIRTY | FRESH, DIRTY | FRESH, NO_ENTRY); // The checks above omit cases where the child flags are not DIRTY, since // they would be too repetitive (the parent cache is never updated in these // cases). The loop below covers these cases and makes sure the parent cache // is always left unchanged. for (Amount parent_value : {ABSENT, PRUNED, VALUE1}) { for (Amount child_value : {ABSENT, PRUNED, VALUE2}) { for (char parent_flags : parent_value == ABSENT ? ABSENT_FLAGS : FLAGS) { for (char child_flags : child_value == ABSENT ? ABSENT_FLAGS : CLEAN_FLAGS) { CheckWriteCoin(parent_value, child_value, parent_value, parent_flags, child_flags, parent_flags); } } } } } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/dbwrapper_tests.cpp b/src/test/dbwrapper_tests.cpp index f7a3a65291..f9a047eb88 100644 --- a/src/test/dbwrapper_tests.cpp +++ b/src/test/dbwrapper_tests.cpp @@ -1,309 +1,309 @@ // Copyright (c) 2012-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "dbwrapper.h" #include "random.h" #include "test/test_bitcoin.h" #include "uint256.h" #include // Test if a string consists entirely of null characters bool is_null_key(const std::vector &key) { bool isnull = true; for (unsigned int i = 0; i < key.size(); i++) isnull &= (key[i] == '\x00'); return isnull; } BOOST_FIXTURE_TEST_SUITE(dbwrapper_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(dbwrapper) { // Perform tests both obfuscated and non-obfuscated. for (bool obfuscate : {false, true}) { fs::path ph = fs::temp_directory_path() / fs::unique_path(); CDBWrapper dbw(ph, (1 << 20), true, false, obfuscate); char key = 'k'; uint256 in = InsecureRand256(); uint256 res; // Ensure that we're doing real obfuscation when obfuscate=true BOOST_CHECK(obfuscate != is_null_key(dbwrapper_private::GetObfuscateKey(dbw))); BOOST_CHECK(dbw.Write(key, in)); BOOST_CHECK(dbw.Read(key, res)); BOOST_CHECK_EQUAL(res.ToString(), in.ToString()); } } // Test batch operations BOOST_AUTO_TEST_CASE(dbwrapper_batch) { // Perform tests both obfuscated and non-obfuscated. for (bool obfuscate : {false, true}) { fs::path ph = fs::temp_directory_path() / fs::unique_path(); CDBWrapper dbw(ph, (1 << 20), true, false, obfuscate); char key = 'i'; uint256 in = InsecureRand256(); char key2 = 'j'; uint256 in2 = InsecureRand256(); char key3 = 'k'; uint256 in3 = InsecureRand256(); uint256 res; CDBBatch batch(dbw); batch.Write(key, in); batch.Write(key2, in2); batch.Write(key3, in3); // Remove key3 before it's even been written batch.Erase(key3); dbw.WriteBatch(batch); BOOST_CHECK(dbw.Read(key, res)); BOOST_CHECK_EQUAL(res.ToString(), in.ToString()); BOOST_CHECK(dbw.Read(key2, res)); BOOST_CHECK_EQUAL(res.ToString(), in2.ToString()); // key3 should've never been written BOOST_CHECK(dbw.Read(key3, res) == false); } } BOOST_AUTO_TEST_CASE(dbwrapper_iterator) { // Perform tests both obfuscated and non-obfuscated. for (bool obfuscate : {false, true}) { fs::path ph = fs::temp_directory_path() / fs::unique_path(); CDBWrapper dbw(ph, (1 << 20), true, false, obfuscate); // The two keys are intentionally chosen for ordering char key = 'j'; uint256 in = InsecureRand256(); BOOST_CHECK(dbw.Write(key, in)); char key2 = 'k'; uint256 in2 = InsecureRand256(); BOOST_CHECK(dbw.Write(key2, in2)); std::unique_ptr it( const_cast(dbw).NewIterator()); // Be sure to seek past the obfuscation key (if it exists) it->Seek(key); char key_res; uint256 val_res; it->GetKey(key_res); it->GetValue(val_res); BOOST_CHECK_EQUAL(key_res, key); BOOST_CHECK_EQUAL(val_res.ToString(), in.ToString()); it->Next(); it->GetKey(key_res); it->GetValue(val_res); BOOST_CHECK_EQUAL(key_res, key2); BOOST_CHECK_EQUAL(val_res.ToString(), in2.ToString()); it->Next(); BOOST_CHECK_EQUAL(it->Valid(), false); } } // Test that we do not obfuscation if there is existing data. BOOST_AUTO_TEST_CASE(existing_data_no_obfuscate) { // We're going to share this fs::path between two wrappers fs::path ph = fs::temp_directory_path() / fs::unique_path(); create_directories(ph); // Set up a non-obfuscated wrapper to write some initial data. CDBWrapper *dbw = new CDBWrapper(ph, (1 << 10), false, false, false); char key = 'k'; uint256 in = InsecureRand256(); uint256 res; BOOST_CHECK(dbw->Write(key, in)); BOOST_CHECK(dbw->Read(key, res)); BOOST_CHECK_EQUAL(res.ToString(), in.ToString()); // Call the destructor to free leveldb LOCK delete dbw; // Now, set up another wrapper that wants to obfuscate the same directory CDBWrapper odbw(ph, (1 << 10), false, false, true); // Check that the key/val we wrote with unobfuscated wrapper exists and // is readable. uint256 res2; BOOST_CHECK(odbw.Read(key, res2)); BOOST_CHECK_EQUAL(res2.ToString(), in.ToString()); // There should be existing data BOOST_CHECK(!odbw.IsEmpty()); // The key should be an empty string BOOST_CHECK(is_null_key(dbwrapper_private::GetObfuscateKey(odbw))); uint256 in2 = InsecureRand256(); uint256 res3; // Check that we can write successfully BOOST_CHECK(odbw.Write(key, in2)); BOOST_CHECK(odbw.Read(key, res3)); BOOST_CHECK_EQUAL(res3.ToString(), in2.ToString()); } // Ensure that we start obfuscating during a reindex. BOOST_AUTO_TEST_CASE(existing_data_reindex) { // We're going to share this fs::path between two wrappers fs::path ph = fs::temp_directory_path() / fs::unique_path(); create_directories(ph); // Set up a non-obfuscated wrapper to write some initial data. CDBWrapper *dbw = new CDBWrapper(ph, (1 << 10), false, false, false); char key = 'k'; uint256 in = InsecureRand256(); uint256 res; BOOST_CHECK(dbw->Write(key, in)); BOOST_CHECK(dbw->Read(key, res)); BOOST_CHECK_EQUAL(res.ToString(), in.ToString()); // Call the destructor to free leveldb LOCK delete dbw; // Simulate a -reindex by wiping the existing data store CDBWrapper odbw(ph, (1 << 10), false, true, true); // Check that the key/val we wrote with unobfuscated wrapper doesn't exist uint256 res2; BOOST_CHECK(!odbw.Read(key, res2)); BOOST_CHECK(!is_null_key(dbwrapper_private::GetObfuscateKey(odbw))); uint256 in2 = InsecureRand256(); uint256 res3; // Check that we can write successfully BOOST_CHECK(odbw.Write(key, in2)); BOOST_CHECK(odbw.Read(key, res3)); BOOST_CHECK_EQUAL(res3.ToString(), in2.ToString()); } BOOST_AUTO_TEST_CASE(iterator_ordering) { fs::path ph = fs::temp_directory_path() / fs::unique_path(); CDBWrapper dbw(ph, (1 << 20), true, false, false); for (int x = 0x00; x < 256; ++x) { uint8_t key = x; uint32_t value = x * x; BOOST_CHECK(dbw.Write(key, value)); } std::unique_ptr it( const_cast(dbw).NewIterator()); for (int seek_start : {0x00, 0x80}) { it->Seek((uint8_t)seek_start); for (int x = seek_start; x < 256; ++x) { uint8_t key; uint32_t value; BOOST_CHECK(it->Valid()); // Avoid spurious errors about invalid iterator's key and value in // case of failure if (!it->Valid()) break; BOOST_CHECK(it->GetKey(key)); BOOST_CHECK(it->GetValue(value)); BOOST_CHECK_EQUAL(key, x); BOOST_CHECK_EQUAL(value, x * x); it->Next(); } BOOST_CHECK(!it->Valid()); } } struct StringContentsSerializer { // Used to make two serialized objects the same while letting them have a // different lengths. This is a terrible idea. std::string str; StringContentsSerializer() {} - StringContentsSerializer(const std::string &inp) : str(inp) {} + explicit StringContentsSerializer(const std::string &inp) : str(inp) {} StringContentsSerializer &operator+=(const std::string &s) { str += s; return *this; } StringContentsSerializer &operator+=(const StringContentsSerializer &s) { return *this += s.str; } ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { if (ser_action.ForRead()) { str.clear(); char c = 0; while (true) { try { READWRITE(c); str.push_back(c); } catch (const std::ios_base::failure &e) { break; } } } else { for (size_t i = 0; i < str.size(); i++) READWRITE(str[i]); } } }; BOOST_AUTO_TEST_CASE(iterator_string_ordering) { char buf[10]; fs::path ph = fs::temp_directory_path() / fs::unique_path(); CDBWrapper dbw(ph, (1 << 20), true, false, false); for (int x = 0x00; x < 10; ++x) { for (int y = 0; y < 10; y++) { sprintf(buf, "%d", x); StringContentsSerializer key(buf); for (int z = 0; z < y; z++) key += key; uint32_t value = x * x; BOOST_CHECK(dbw.Write(key, value)); } } std::unique_ptr it( const_cast(dbw).NewIterator()); for (int seek_start : {0, 5}) { sprintf(buf, "%d", seek_start); StringContentsSerializer seek_key(buf); it->Seek(seek_key); for (int x = seek_start; x < 10; ++x) { for (int y = 0; y < 10; y++) { sprintf(buf, "%d", x); std::string exp_key(buf); for (int z = 0; z < y; z++) exp_key += exp_key; StringContentsSerializer key; uint32_t value; BOOST_CHECK(it->Valid()); // Avoid spurious errors about invalid iterator's key and value // in case of failure if (!it->Valid()) break; BOOST_CHECK(it->GetKey(key)); BOOST_CHECK(it->GetValue(value)); BOOST_CHECK_EQUAL(key.str, exp_key); BOOST_CHECK_EQUAL(value, x * x); it->Next(); } } BOOST_CHECK(!it->Valid()); } } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/test_bitcoin.h b/src/test/test_bitcoin.h index d0b5a68d03..333a6ba729 100644 --- a/src/test/test_bitcoin.h +++ b/src/test/test_bitcoin.h @@ -1,154 +1,156 @@ // Copyright (c) 2015-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_TEST_TEST_BITCOIN_H #define BITCOIN_TEST_TEST_BITCOIN_H #include "chainparamsbase.h" #include "fs.h" #include "key.h" #include "pubkey.h" #include "random.h" #include "scheduler.h" #include "txdb.h" #include "txmempool.h" #include extern uint256 insecure_rand_seed; extern FastRandomContext insecure_rand_ctx; static inline void SeedInsecureRand(bool fDeterministic = false) { if (fDeterministic) { insecure_rand_seed = uint256(); } else { insecure_rand_seed = GetRandHash(); } insecure_rand_ctx = FastRandomContext(insecure_rand_seed); } static inline uint32_t insecure_rand() { return insecure_rand_ctx.rand32(); } static inline uint256 InsecureRand256() { return insecure_rand_ctx.rand256(); } static inline uint64_t InsecureRandBits(int bits) { return insecure_rand_ctx.randbits(bits); } static inline uint64_t InsecureRandRange(uint64_t range) { return insecure_rand_ctx.randrange(range); } static inline bool InsecureRandBool() { return insecure_rand_ctx.randbool(); } static inline std::vector InsecureRandBytes(size_t len) { return insecure_rand_ctx.randbytes(len); } /** * Basic testing setup. * This just configures logging and chain parameters. */ struct BasicTestingSetup { ECCVerifyHandle globalVerifyHandle; - BasicTestingSetup(const std::string &chainName = CBaseChainParams::MAIN); + explicit BasicTestingSetup( + const std::string &chainName = CBaseChainParams::MAIN); ~BasicTestingSetup(); }; /** Testing setup that configures a complete environment. * Included are data directory, coins database, script check threads setup. */ class CConnman; class CNode; struct CConnmanTest { static void AddNode(CNode &node); static void ClearNodes(); }; class PeerLogicValidation; struct TestingSetup : public BasicTestingSetup { fs::path pathTemp; boost::thread_group threadGroup; CConnman *connman; CScheduler scheduler; std::unique_ptr peerLogic; - TestingSetup(const std::string &chainName = CBaseChainParams::MAIN); + explicit TestingSetup( + const std::string &chainName = CBaseChainParams::MAIN); ~TestingSetup(); }; class CBlock; class CMutableTransaction; class CScript; // // Testing fixture that pre-creates a // 100-block REGTEST-mode block chain // struct TestChain100Setup : public TestingSetup { TestChain100Setup(); // Create a new block with just given transactions, coinbase paying to // scriptPubKey, and try to add it to the current chain. CBlock CreateAndProcessBlock(const std::vector &txns, const CScript &scriptPubKey); ~TestChain100Setup(); // For convenience, coinbase transactions. std::vector coinbaseTxns; // private/public key needed to spend coinbase transactions. CKey coinbaseKey; }; class CTxMemPoolEntry; class CTxMemPool; struct TestMemPoolEntryHelper { // Default values Amount nFee; int64_t nTime; double dPriority; unsigned int nHeight; bool spendsCoinbase; unsigned int sigOpCost; LockPoints lp; TestMemPoolEntryHelper() : nFee(), nTime(0), dPriority(0.0), nHeight(1), spendsCoinbase(false), sigOpCost(4) {} CTxMemPoolEntry FromTx(const CMutableTransaction &tx, CTxMemPool *pool = nullptr); CTxMemPoolEntry FromTx(const CTransaction &tx, CTxMemPool *pool = nullptr); // Change the default value TestMemPoolEntryHelper &Fee(Amount _fee) { nFee = _fee; return *this; } TestMemPoolEntryHelper &Time(int64_t _time) { nTime = _time; return *this; } TestMemPoolEntryHelper &Priority(double _priority) { dPriority = _priority; return *this; } TestMemPoolEntryHelper &Height(unsigned int _height) { nHeight = _height; return *this; } TestMemPoolEntryHelper &SpendsCoinbase(bool _flag) { spendsCoinbase = _flag; return *this; } TestMemPoolEntryHelper &SigOpsCost(unsigned int _sigopsCost) { sigOpCost = _sigopsCost; return *this; } }; #endif diff --git a/src/tinyformat.h b/src/tinyformat.h index 19e8accae4..a05cd22af3 100644 --- a/src/tinyformat.h +++ b/src/tinyformat.h @@ -1,1076 +1,1077 @@ // tinyformat.h // Copyright (C) 2011, Chris Foster [chris42f (at) gmail (d0t) com] // // Boost Software License - Version 1.0 // // Permission is hereby granted, free of charge, to any person or organization // obtaining a copy of the software and accompanying documentation covered by // this license (the "Software") to use, reproduce, display, distribute, // execute, and transmit the Software, and to prepare derivative works of the // Software, and to permit third-parties to whom the Software is furnished to // do so, all subject to the following: // // The copyright notices in the Software and this entire statement, including // the above license grant, this restriction and the following disclaimer, // must be included in all copies of the Software, in whole or in part, and // all derivative works of the Software, unless such copies or derivative // works are solely in the form of machine-executable object code generated by // a source language processor. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT // SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE // FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, // ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. //------------------------------------------------------------------------------ // Tinyformat: A minimal type safe printf replacement // // tinyformat.h is a type safe printf replacement library in a single C++ // header file. Design goals include: // // * Type safety and extensibility for user defined types. // * C99 printf() compatibility, to the extent possible using std::ostream // * Simplicity and minimalism. A single header file to include and distribute // with your projects. // * Augment rather than replace the standard stream formatting mechanism // * C++98 support, with optional C++11 niceties // // // Main interface example usage // ---------------------------- // // To print a date to std::cout: // // std::string weekday = "Wednesday"; // const char* month = "July"; // size_t day = 27; // long hour = 14; // int min = 44; // // tfm::printf("%s, %s %d, %.2d:%.2d\n", weekday, month, day, hour, min); // // The strange types here emphasize the type safety of the interface; it is // possible to print a std::string using the "%s" conversion, and a // size_t using the "%d" conversion. A similar result could be achieved // using either of the tfm::format() functions. One prints on a user provided // stream: // // tfm::format(std::cerr, "%s, %s %d, %.2d:%.2d\n", // weekday, month, day, hour, min); // // The other returns a std::string: // // std::string date = tfm::format("%s, %s %d, %.2d:%.2d\n", // weekday, month, day, hour, min); // std::cout << date; // // These are the three primary interface functions. There is also a // convenience function printfln() which appends a newline to the usual result // of printf() for super simple logging. // // // User defined format functions // ----------------------------- // // Simulating variadic templates in C++98 is pretty painful since it requires // writing out the same function for each desired number of arguments. To make // this bearable tinyformat comes with a set of macros which are used // internally to generate the API, but which may also be used in user code. // // The three macros TINYFORMAT_ARGTYPES(n), TINYFORMAT_VARARGS(n) and // TINYFORMAT_PASSARGS(n) will generate a list of n argument types, // type/name pairs and argument names respectively when called with an integer // n between 1 and 16. We can use these to define a macro which generates the // desired user defined function with n arguments. To generate all 16 user // defined function bodies, use the macro TINYFORMAT_FOREACH_ARGNUM. For an // example, see the implementation of printf() at the end of the source file. // // Sometimes it's useful to be able to pass a list of format arguments through // to a non-template function. The FormatList class is provided as a way to do // this by storing the argument list in a type-opaque way. Continuing the // example from above, we construct a FormatList using makeFormatList(): // // FormatListRef formatList = tfm::makeFormatList(weekday, month, day, hour, // min); // // The format list can now be passed into any non-template function and used // via a call to the vformat() function: // // tfm::vformat(std::cout, "%s, %s %d, %.2d:%.2d\n", formatList); // // // Additional API information // -------------------------- // // Error handling: Define TINYFORMAT_ERROR to customize the error handling for // format strings which are unsupported or have the wrong number of format // specifiers (calls assert() by default). // // User defined types: Uses operator<< for user defined types by default. // Overload formatValue() for more control. #ifndef TINYFORMAT_H_INCLUDED #define TINYFORMAT_H_INCLUDED namespace tinyformat {} //------------------------------------------------------------------------------ // Config section. Customize to your liking! // Namespace alias to encourage brevity namespace tfm = tinyformat; // Error handling; calls assert() by default. #define TINYFORMAT_ERROR(reasonString) throw std::runtime_error(reasonString) // Define for C++11 variadic templates which make the code shorter & more // general. If you don't define this, C++11 support is autodetected below. #define TINYFORMAT_USE_VARIADIC_TEMPLATES //------------------------------------------------------------------------------ // Implementation details. #include #include #include #include #include #ifndef TINYFORMAT_ERROR #define TINYFORMAT_ERROR(reason) assert(0 && reason) #endif #if !defined(TINYFORMAT_USE_VARIADIC_TEMPLATES) && \ !defined(TINYFORMAT_NO_VARIADIC_TEMPLATES) #ifdef __GXX_EXPERIMENTAL_CXX0X__ #define TINYFORMAT_USE_VARIADIC_TEMPLATES #endif #endif #if defined(__GLIBCXX__) && __GLIBCXX__ < 20080201 // std::showpos is broken on old libstdc++ as provided with OSX. See // http://gcc.gnu.org/ml/libstdc++/2007-11/msg00075.html #define TINYFORMAT_OLD_LIBSTDCPLUSPLUS_WORKAROUND #endif #ifdef __APPLE__ // Workaround OSX linker warning: xcode uses different default symbol // visibilities for static libs vs executables (see issue #25) #define TINYFORMAT_HIDDEN __attribute__((visibility("hidden"))) #else #define TINYFORMAT_HIDDEN #endif namespace tinyformat { //------------------------------------------------------------------------------ namespace detail { // Test whether type T1 is convertible to type T2 template struct is_convertible { private: // two types of different size struct fail { char dummy[2]; }; struct succeed { char dummy; }; // Try to convert a T1 to a T2 by plugging into tryConvert static fail tryConvert(...); static succeed tryConvert(const T2 &); static const T1 &makeT1(); public: #ifdef _MSC_VER // Disable spurious loss of precision warnings in tryConvert(makeT1()) #pragma warning(push) #pragma warning(disable : 4244) #pragma warning(disable : 4267) #endif // Standard trick: the (...) version of tryConvert will be chosen from // the overload set only if the version taking a T2 doesn't match. Then // we compare the sizes of the return types to check which function // matched. Very neat, in a disgusting kind of way :) static const bool value = sizeof(tryConvert(makeT1())) == sizeof(succeed); #ifdef _MSC_VER #pragma warning(pop) #endif }; // Detect when a type is not a wchar_t string template struct is_wchar { typedef int tinyformat_wchar_is_not_supported; }; template <> struct is_wchar {}; template <> struct is_wchar {}; template struct is_wchar {}; template struct is_wchar {}; // Format the value by casting to type fmtT. This default implementation // should never be called. template ::value> struct formatValueAsType { static void invoke(std::ostream & /*out*/, const T & /*value*/) { assert(0); } }; // Specialized version for types that can actually be converted to fmtT, as // indicated by the "convertible" template parameter. template struct formatValueAsType { static void invoke(std::ostream &out, const T &value) { out << static_cast(value); } }; #ifdef TINYFORMAT_OLD_LIBSTDCPLUSPLUS_WORKAROUND template ::value> struct formatZeroIntegerWorkaround { static bool invoke(std::ostream & /**/, const T & /**/) { return false; } }; template struct formatZeroIntegerWorkaround { static bool invoke(std::ostream &out, const T &value) { if (static_cast(value) == 0 && out.flags() & std::ios::showpos) { out << "+0"; return true; } return false; } }; #endif // TINYFORMAT_OLD_LIBSTDCPLUSPLUS_WORKAROUND // Convert an arbitrary type to integer. The version with convertible=false // throws an error. template ::value> struct convertToInt { static int invoke(const T & /*value*/) { TINYFORMAT_ERROR("tinyformat: Cannot convert from argument type to " "integer for use as variable width or precision"); return 0; } }; // Specialization for convertToInt when conversion is possible template struct convertToInt { static int invoke(const T &value) { return static_cast(value); } }; // Format at most ntrunc characters to the given stream. template inline void formatTruncated(std::ostream &out, const T &value, int ntrunc) { std::ostringstream tmp; tmp << value; std::string result = tmp.str(); out.write(result.c_str(), (std::min)(ntrunc, static_cast(result.size()))); } #define TINYFORMAT_DEFINE_FORMAT_TRUNCATED_CSTR(type) \ inline void formatTruncated(std::ostream &out, type *value, int ntrunc) { \ std::streamsize len = 0; \ while (len < ntrunc && value[len] != 0) \ ++len; \ out.write(value, len); \ } // Overload for const char* and char*. Could overload for signed & unsigned // char too, but these are technically unneeded for printf compatibility. TINYFORMAT_DEFINE_FORMAT_TRUNCATED_CSTR(const char) TINYFORMAT_DEFINE_FORMAT_TRUNCATED_CSTR(char) #undef TINYFORMAT_DEFINE_FORMAT_TRUNCATED_CSTR } // namespace detail //------------------------------------------------------------------------------ // Variable formatting functions. May be overridden for user-defined types if // desired. /// Format a value into a stream, delegating to operator<< by default. /// /// Users may override this for their own types. When this function is called, /// the stream flags will have been modified according to the format string. /// The format specification is provided in the range [fmtBegin, fmtEnd). For /// truncating conversions, ntrunc is set to the desired maximum number of /// characters, for example "%.7s" calls formatValue with ntrunc = 7. /// /// By default, formatValue() uses the usual stream insertion operator /// operator<< to format the type T, with special cases for the %c and %p /// conversions. template inline void formatValue(std::ostream &out, const char * /*fmtBegin*/, const char *fmtEnd, int ntrunc, const T &value) { #ifndef TINYFORMAT_ALLOW_WCHAR_STRINGS // Since we don't support printing of wchar_t using "%ls", make it fail at // compile time in preference to printing as a void* at runtime. typedef typename detail::is_wchar::tinyformat_wchar_is_not_supported DummyType; (void)DummyType(); // avoid unused type warning with gcc-4.8 #endif // The mess here is to support the %c and %p conversions: if these // conversions are active we try to convert the type to a char or const // void* respectively and format that instead of the value itself. For the // %p conversion it's important to avoid dereferencing the pointer, which // could otherwise lead to a crash when printing a dangling (const char*). const bool canConvertToChar = detail::is_convertible::value; const bool canConvertToVoidPtr = detail::is_convertible::value; if (canConvertToChar && *(fmtEnd - 1) == 'c') detail::formatValueAsType::invoke(out, value); else if (canConvertToVoidPtr && *(fmtEnd - 1) == 'p') detail::formatValueAsType::invoke(out, value); #ifdef TINYFORMAT_OLD_LIBSTDCPLUSPLUS_WORKAROUND else if (detail::formatZeroIntegerWorkaround::invoke(out, value)) /**/ ; #endif else if (ntrunc >= 0) { // Take care not to overread C strings in truncating conversions like // "%.4s" where at most 4 characters may be read. detail::formatTruncated(out, value, ntrunc); } else out << value; } // Overloaded version for char types to support printing as an integer #define TINYFORMAT_DEFINE_FORMATVALUE_CHAR(charType) \ inline void formatValue(std::ostream &out, const char * /*fmtBegin*/, \ const char *fmtEnd, int /**/, charType value) { \ switch (*(fmtEnd - 1)) { \ case 'u': \ case 'd': \ case 'i': \ case 'o': \ case 'X': \ case 'x': \ out << static_cast(value); \ break; \ default: \ out << value; \ break; \ } \ } // per 3.9.1: char, signed char and uint8_t are all distinct types TINYFORMAT_DEFINE_FORMATVALUE_CHAR(char) TINYFORMAT_DEFINE_FORMATVALUE_CHAR(signed char) TINYFORMAT_DEFINE_FORMATVALUE_CHAR(uint8_t) #undef TINYFORMAT_DEFINE_FORMATVALUE_CHAR //------------------------------------------------------------------------------ // Tools for emulating variadic templates in C++98. The basic idea here is // stolen from the boost preprocessor metaprogramming library and cut down to // be just general enough for what we need. #define TINYFORMAT_ARGTYPES(n) TINYFORMAT_ARGTYPES_##n #define TINYFORMAT_VARARGS(n) TINYFORMAT_VARARGS_##n #define TINYFORMAT_PASSARGS(n) TINYFORMAT_PASSARGS_##n #define TINYFORMAT_PASSARGS_TAIL(n) TINYFORMAT_PASSARGS_TAIL_##n // To keep it as transparent as possible, the macros below have been generated // using python via the excellent cog.py code generation script. This avoids // the need for a bunch of complex (but more general) preprocessor tricks as // used in boost.preprocessor. // // To rerun the code generation in place, use `cog.py -r tinyformat.h` // (see http://nedbatchelder.com/code/cog). Alternatively you can just create // extra versions by hand. /*[[[cog maxParams = 16 def makeCommaSepLists(lineTemplate, elemTemplate, startInd=1): for j in range(startInd,maxParams+1): list = ', '.join([elemTemplate % {'i':i} for i in range(startInd,j+1)]) cog.outl(lineTemplate % {'j':j, 'list':list}) makeCommaSepLists('#define TINYFORMAT_ARGTYPES_%(j)d %(list)s', 'class T%(i)d') cog.outl() makeCommaSepLists('#define TINYFORMAT_VARARGS_%(j)d %(list)s', 'const T%(i)d& v%(i)d') cog.outl() makeCommaSepLists('#define TINYFORMAT_PASSARGS_%(j)d %(list)s', 'v%(i)d') cog.outl() cog.outl('#define TINYFORMAT_PASSARGS_TAIL_1') makeCommaSepLists('#define TINYFORMAT_PASSARGS_TAIL_%(j)d , %(list)s', 'v%(i)d', startInd = 2) cog.outl() cog.outl('#define TINYFORMAT_FOREACH_ARGNUM(m) \\\n ' + ' '.join(['m(%d)' % (j,) for j in range(1,maxParams+1)])) ]]]*/ #define TINYFORMAT_ARGTYPES_1 class T1 #define TINYFORMAT_ARGTYPES_2 class T1, class T2 #define TINYFORMAT_ARGTYPES_3 class T1, class T2, class T3 #define TINYFORMAT_ARGTYPES_4 class T1, class T2, class T3, class T4 #define TINYFORMAT_ARGTYPES_5 class T1, class T2, class T3, class T4, class T5 #define TINYFORMAT_ARGTYPES_6 \ class T1, class T2, class T3, class T4, class T5, class T6 #define TINYFORMAT_ARGTYPES_7 \ class T1, class T2, class T3, class T4, class T5, class T6, class T7 #define TINYFORMAT_ARGTYPES_8 \ class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ class T8 #define TINYFORMAT_ARGTYPES_9 \ class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ class T8, class T9 #define TINYFORMAT_ARGTYPES_10 \ class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ class T8, class T9, class T10 #define TINYFORMAT_ARGTYPES_11 \ class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ class T8, class T9, class T10, class T11 #define TINYFORMAT_ARGTYPES_12 \ class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ class T8, class T9, class T10, class T11, class T12 #define TINYFORMAT_ARGTYPES_13 \ class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ class T8, class T9, class T10, class T11, class T12, class T13 #define TINYFORMAT_ARGTYPES_14 \ class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ class T8, class T9, class T10, class T11, class T12, class T13, \ class T14 #define TINYFORMAT_ARGTYPES_15 \ class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ class T8, class T9, class T10, class T11, class T12, class T13, \ class T14, class T15 #define TINYFORMAT_ARGTYPES_16 \ class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ class T8, class T9, class T10, class T11, class T12, class T13, \ class T14, class T15, class T16 #define TINYFORMAT_VARARGS_1 const T1 &v1 #define TINYFORMAT_VARARGS_2 const T1 &v1, const T2 &v2 #define TINYFORMAT_VARARGS_3 const T1 &v1, const T2 &v2, const T3 &v3 #define TINYFORMAT_VARARGS_4 \ const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4 #define TINYFORMAT_VARARGS_5 \ const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5 #define TINYFORMAT_VARARGS_6 \ const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ const T6 &v6 #define TINYFORMAT_VARARGS_7 \ const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ const T6 &v6, const T7 &v7 #define TINYFORMAT_VARARGS_8 \ const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ const T6 &v6, const T7 &v7, const T8 &v8 #define TINYFORMAT_VARARGS_9 \ const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ const T6 &v6, const T7 &v7, const T8 &v8, const T9 &v9 #define TINYFORMAT_VARARGS_10 \ const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ const T6 &v6, const T7 &v7, const T8 &v8, const T9 &v9, const T10 &v10 #define TINYFORMAT_VARARGS_11 \ const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ const T6 &v6, const T7 &v7, const T8 &v8, const T9 &v9, \ const T10 &v10, const T11 &v11 #define TINYFORMAT_VARARGS_12 \ const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ const T6 &v6, const T7 &v7, const T8 &v8, const T9 &v9, \ const T10 &v10, const T11 &v11, const T12 &v12 #define TINYFORMAT_VARARGS_13 \ const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ const T6 &v6, const T7 &v7, const T8 &v8, const T9 &v9, \ const T10 &v10, const T11 &v11, const T12 &v12, const T13 &v13 #define TINYFORMAT_VARARGS_14 \ const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ const T6 &v6, const T7 &v7, const T8 &v8, const T9 &v9, \ const T10 &v10, const T11 &v11, const T12 &v12, const T13 &v13, \ const T14 &v14 #define TINYFORMAT_VARARGS_15 \ const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ const T6 &v6, const T7 &v7, const T8 &v8, const T9 &v9, \ const T10 &v10, const T11 &v11, const T12 &v12, const T13 &v13, \ const T14 &v14, const T15 &v15 #define TINYFORMAT_VARARGS_16 \ const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ const T6 &v6, const T7 &v7, const T8 &v8, const T9 &v9, \ const T10 &v10, const T11 &v11, const T12 &v12, const T13 &v13, \ const T14 &v14, const T15 &v15, const T16 &v16 #define TINYFORMAT_PASSARGS_1 v1 #define TINYFORMAT_PASSARGS_2 v1, v2 #define TINYFORMAT_PASSARGS_3 v1, v2, v3 #define TINYFORMAT_PASSARGS_4 v1, v2, v3, v4 #define TINYFORMAT_PASSARGS_5 v1, v2, v3, v4, v5 #define TINYFORMAT_PASSARGS_6 v1, v2, v3, v4, v5, v6 #define TINYFORMAT_PASSARGS_7 v1, v2, v3, v4, v5, v6, v7 #define TINYFORMAT_PASSARGS_8 v1, v2, v3, v4, v5, v6, v7, v8 #define TINYFORMAT_PASSARGS_9 v1, v2, v3, v4, v5, v6, v7, v8, v9 #define TINYFORMAT_PASSARGS_10 v1, v2, v3, v4, v5, v6, v7, v8, v9, v10 #define TINYFORMAT_PASSARGS_11 v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11 #define TINYFORMAT_PASSARGS_12 v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12 #define TINYFORMAT_PASSARGS_13 \ v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13 #define TINYFORMAT_PASSARGS_14 \ v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14 #define TINYFORMAT_PASSARGS_15 \ v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15 #define TINYFORMAT_PASSARGS_16 \ v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16 #define TINYFORMAT_PASSARGS_TAIL_1 #define TINYFORMAT_PASSARGS_TAIL_2 , v2 #define TINYFORMAT_PASSARGS_TAIL_3 , v2, v3 #define TINYFORMAT_PASSARGS_TAIL_4 , v2, v3, v4 #define TINYFORMAT_PASSARGS_TAIL_5 , v2, v3, v4, v5 #define TINYFORMAT_PASSARGS_TAIL_6 , v2, v3, v4, v5, v6 #define TINYFORMAT_PASSARGS_TAIL_7 , v2, v3, v4, v5, v6, v7 #define TINYFORMAT_PASSARGS_TAIL_8 , v2, v3, v4, v5, v6, v7, v8 #define TINYFORMAT_PASSARGS_TAIL_9 , v2, v3, v4, v5, v6, v7, v8, v9 #define TINYFORMAT_PASSARGS_TAIL_10 , v2, v3, v4, v5, v6, v7, v8, v9, v10 #define TINYFORMAT_PASSARGS_TAIL_11 , v2, v3, v4, v5, v6, v7, v8, v9, v10, v11 #define TINYFORMAT_PASSARGS_TAIL_12 \ , v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12 #define TINYFORMAT_PASSARGS_TAIL_13 \ , v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13 #define TINYFORMAT_PASSARGS_TAIL_14 \ , v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14 #define TINYFORMAT_PASSARGS_TAIL_15 \ , v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15 #define TINYFORMAT_PASSARGS_TAIL_16 \ , v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16 #define TINYFORMAT_FOREACH_ARGNUM(m) \ m(1) m(2) m(3) m(4) m(5) m(6) m(7) m(8) m(9) m(10) m(11) m(12) m(13) m(14) \ m(15) m(16) //[[[end]]] namespace detail { // Type-opaque holder for an argument to format(), with associated actions // on the type held as explicit function pointers. This allows FormatArg's // for each argument to be allocated as a homogenous array inside FormatList // whereas a naive implementation based on inheritance does not. class FormatArg { public: FormatArg() {} template - FormatArg(const T &value) + explicit FormatArg(const T &value) : m_value(static_cast(&value)), m_formatImpl(&formatImpl), m_toIntImpl(&toIntImpl) {} void format(std::ostream &out, const char *fmtBegin, const char *fmtEnd, int ntrunc) const { m_formatImpl(out, fmtBegin, fmtEnd, ntrunc, m_value); } int toInt() const { return m_toIntImpl(m_value); } private: template TINYFORMAT_HIDDEN static void formatImpl(std::ostream &out, const char *fmtBegin, const char *fmtEnd, int ntrunc, const void *value) { formatValue(out, fmtBegin, fmtEnd, ntrunc, *static_cast(value)); } template TINYFORMAT_HIDDEN static int toIntImpl(const void *value) { return convertToInt::invoke(*static_cast(value)); } const void *m_value; void (*m_formatImpl)(std::ostream &out, const char *fmtBegin, const char *fmtEnd, int ntrunc, const void *value); int (*m_toIntImpl)(const void *value); }; // Parse and return an integer from the string c, as atoi() // On return, c is set to one past the end of the integer. inline int parseIntAndAdvance(const char *&c) { int i = 0; for (; *c >= '0' && *c <= '9'; ++c) i = 10 * i + (*c - '0'); return i; } // Print literal part of format string and return next format spec position. // // Skips over any occurrences of '%%', printing a literal '%' to the output. // The position of the first % character of the next nontrivial format spec // is returned, or the end of string. inline const char *printFormatStringLiteral(std::ostream &out, const char *fmt) { const char *c = fmt; for (;; ++c) { switch (*c) { case '\0': out.write(fmt, c - fmt); return c; case '%': out.write(fmt, c - fmt); if (*(c + 1) != '%') return c; // for "%%", tack trailing % onto next literal section. fmt = ++c; break; default: break; } } } // Parse a format string and set the stream state accordingly. // // The format mini-language recognized here is meant to be the one from C99, // with the form "%[flags][width][.precision][length]type". // // Formatting options which can't be natively represented using the ostream // state are returned in spacePadPositive (for space padded positive // numbers) and ntrunc (for truncating conversions). argIndex is incremented // if necessary to pull out variable width and precision. The function // returns a pointer to the character after the end of the current format // spec. inline const char * streamStateFromFormat(std::ostream &out, bool &spacePadPositive, int &ntrunc, const char *fmtStart, const detail::FormatArg *formatters, int &argIndex, int numFormatters) { if (*fmtStart != '%') { TINYFORMAT_ERROR("tinyformat: Not enough conversion specifiers in " "format string"); return fmtStart; } // Reset stream state to defaults. out.width(0); out.precision(6); out.fill(' '); // Reset most flags; ignore irrelevant unitbuf & skipws. out.unsetf(std::ios::adjustfield | std::ios::basefield | std::ios::floatfield | std::ios::showbase | std::ios::boolalpha | std::ios::showpoint | std::ios::showpos | std::ios::uppercase); bool precisionSet = false; bool widthSet = false; int widthExtra = 0; const char *c = fmtStart + 1; // 1) Parse flags for (;; ++c) { switch (*c) { case '#': out.setf(std::ios::showpoint | std::ios::showbase); continue; case '0': // overridden by left alignment ('-' flag) if (!(out.flags() & std::ios::left)) { // Use internal padding so that numeric values are // formatted correctly, eg -00010 rather than 000-10 out.fill('0'); out.setf(std::ios::internal, std::ios::adjustfield); } continue; case '-': out.fill(' '); out.setf(std::ios::left, std::ios::adjustfield); continue; case ' ': // overridden by show positive sign, '+' flag. if (!(out.flags() & std::ios::showpos)) spacePadPositive = true; continue; case '+': out.setf(std::ios::showpos); spacePadPositive = false; widthExtra = 1; continue; default: break; } break; } // 2) Parse width if (*c >= '0' && *c <= '9') { widthSet = true; out.width(parseIntAndAdvance(c)); } if (*c == '*') { widthSet = true; int width = 0; if (argIndex < numFormatters) width = formatters[argIndex++].toInt(); else TINYFORMAT_ERROR( "tinyformat: Not enough arguments to read variable width"); if (width < 0) { // negative widths correspond to '-' flag set out.fill(' '); out.setf(std::ios::left, std::ios::adjustfield); width = -width; } out.width(width); ++c; } // 3) Parse precision if (*c == '.') { ++c; int precision = 0; if (*c == '*') { ++c; if (argIndex < numFormatters) precision = formatters[argIndex++].toInt(); else TINYFORMAT_ERROR("tinyformat: Not enough arguments to read " "variable precision"); } else { if (*c >= '0' && *c <= '9') { precision = parseIntAndAdvance(c); } else if (*c == '-') { // negative precisions ignored, treated as zero. parseIntAndAdvance(++c); } } out.precision(precision); precisionSet = true; } // 4) Ignore any C99 length modifier while (*c == 'l' || *c == 'h' || *c == 'L' || *c == 'j' || *c == 'z' || *c == 't') ++c; // 5) We're up to the conversion specifier character. // Set stream flags based on conversion specifier (thanks to the // boost::format class for forging the way here). bool intConversion = false; switch (*c) { case 'u': case 'd': case 'i': out.setf(std::ios::dec, std::ios::basefield); intConversion = true; break; case 'o': out.setf(std::ios::oct, std::ios::basefield); intConversion = true; break; case 'X': out.setf(std::ios::uppercase); // FALLTHROUGH case 'x': case 'p': out.setf(std::ios::hex, std::ios::basefield); intConversion = true; break; case 'E': out.setf(std::ios::uppercase); // FALLTHROUGH case 'e': out.setf(std::ios::scientific, std::ios::floatfield); out.setf(std::ios::dec, std::ios::basefield); break; case 'F': out.setf(std::ios::uppercase); // FALLTHROUGH case 'f': out.setf(std::ios::fixed, std::ios::floatfield); break; case 'G': out.setf(std::ios::uppercase); // FALLTHROUGH case 'g': out.setf(std::ios::dec, std::ios::basefield); // As in boost::format, let stream decide float format. out.flags(out.flags() & ~std::ios::floatfield); break; case 'a': case 'A': TINYFORMAT_ERROR("tinyformat: the %a and %A conversion specs " "are not supported"); break; case 'c': // Handled as special case inside formatValue() break; case 's': if (precisionSet) ntrunc = static_cast(out.precision()); // Make %s print booleans as "true" and "false" out.setf(std::ios::boolalpha); break; case 'n': // Not supported - will cause problems! TINYFORMAT_ERROR( "tinyformat: %n conversion spec not supported"); break; case '\0': TINYFORMAT_ERROR("tinyformat: Conversion spec incorrectly " "terminated by end of string"); return c; default: break; } if (intConversion && precisionSet && !widthSet) { // "precision" for integers gives the minimum number of digits (to // be padded with zeros on the left). This isn't really supported by // the iostreams, but we can approximately simulate it with the // width if the width isn't otherwise used. out.width(out.precision() + widthExtra); out.setf(std::ios::internal, std::ios::adjustfield); out.fill('0'); } return c + 1; } //------------------------------------------------------------------------------ inline void formatImpl(std::ostream &out, const char *fmt, const detail::FormatArg *formatters, int numFormatters) { // Saved stream state std::streamsize origWidth = out.width(); std::streamsize origPrecision = out.precision(); std::ios::fmtflags origFlags = out.flags(); char origFill = out.fill(); for (int argIndex = 0; argIndex < numFormatters; ++argIndex) { // Parse the format string fmt = printFormatStringLiteral(out, fmt); bool spacePadPositive = false; int ntrunc = -1; const char *fmtEnd = streamStateFromFormat(out, spacePadPositive, ntrunc, fmt, formatters, argIndex, numFormatters); if (argIndex >= numFormatters) { // Check args remain after reading any variable width/precision TINYFORMAT_ERROR("tinyformat: Not enough format arguments"); return; } const FormatArg &arg = formatters[argIndex]; // Format the arg into the stream. if (!spacePadPositive) arg.format(out, fmt, fmtEnd, ntrunc); else { // The following is a special case with no direct correspondence // between stream formatting and the printf() behaviour. // Simulate it crudely by formatting into a temporary string // stream and munging the resulting string. std::ostringstream tmpStream; tmpStream.copyfmt(out); tmpStream.setf(std::ios::showpos); arg.format(tmpStream, fmt, fmtEnd, ntrunc); // allocates... yuck. std::string result = tmpStream.str(); for (size_t i = 0, iend = result.size(); i < iend; ++i) if (result[i] == '+') result[i] = ' '; out << result; } fmt = fmtEnd; } // Print remaining part of format string. fmt = printFormatStringLiteral(out, fmt); if (*fmt != '\0') TINYFORMAT_ERROR( "tinyformat: Too many conversion specifiers in format string"); // Restore stream state out.width(origWidth); out.precision(origPrecision); out.flags(origFlags); out.fill(origFill); } } // namespace detail /// List of template arguments format(), held in a type-opaque way. /// /// A const reference to FormatList (typedef'd as FormatListRef) may be /// conveniently used to pass arguments to non-template functions: All type /// information has been stripped from the arguments, leaving just enough of a /// common interface to perform formatting as required. class FormatList { public: FormatList(detail::FormatArg *formatters, int N) : m_formatters(formatters), m_N(N) {} friend void vformat(std::ostream &out, const char *fmt, const FormatList &list); private: const detail::FormatArg *m_formatters; int m_N; }; /// Reference to type-opaque format list for passing to vformat() typedef const FormatList &FormatListRef; namespace detail { // Format list subclass with fixed storage to avoid dynamic allocation template class FormatListN : public FormatList { public: #ifdef TINYFORMAT_USE_VARIADIC_TEMPLATES template - FormatListN(const Args &... args) + explicit FormatListN(const Args &... args) : FormatList(&m_formatterStore[0], N), m_formatterStore{ FormatArg(args)...} { static_assert(sizeof...(args) == N, "Number of args must be N"); } #else // C++98 version void init(int) {} #define TINYFORMAT_MAKE_FORMATLIST_CONSTRUCTOR(n) \ \ template \ - FormatListN(TINYFORMAT_VARARGS(n)) : FormatList(&m_formatterStore[0], n) { \ + explicit FormatListN(TINYFORMAT_VARARGS(n)) \ + : FormatList(&m_formatterStore[0], n) { \ assert(n == N); \ init(0, TINYFORMAT_PASSARGS(n)); \ } \ \ template \ void init(int i, TINYFORMAT_VARARGS(n)) { \ m_formatterStore[i] = FormatArg(v1); \ init(i + 1 TINYFORMAT_PASSARGS_TAIL(n)); \ } TINYFORMAT_FOREACH_ARGNUM(TINYFORMAT_MAKE_FORMATLIST_CONSTRUCTOR) #undef TINYFORMAT_MAKE_FORMATLIST_CONSTRUCTOR #endif private: FormatArg m_formatterStore[N]; }; // Special 0-arg version - MSVC says zero-sized C array in struct is // nonstandard. template <> class FormatListN<0> : public FormatList { public: FormatListN() : FormatList(0, 0) {} }; } // namespace detail //------------------------------------------------------------------------------ // Primary API functions #ifdef TINYFORMAT_USE_VARIADIC_TEMPLATES /// Make type-agnostic format list from list of template arguments. /// /// The exact return type of this function is an implementation detail and /// shouldn't be relied upon. Instead it should be stored as a FormatListRef: /// /// FormatListRef formatList = makeFormatList( /*...*/ ); template detail::FormatListN makeFormatList(const Args &... args) { return detail::FormatListN(args...); } #else // C++98 version inline detail::FormatListN<0> makeFormatList() { return detail::FormatListN<0>(); } #define TINYFORMAT_MAKE_MAKEFORMATLIST(n) \ template \ detail::FormatListN makeFormatList(TINYFORMAT_VARARGS(n)) { \ return detail::FormatListN(TINYFORMAT_PASSARGS(n)); \ } TINYFORMAT_FOREACH_ARGNUM(TINYFORMAT_MAKE_MAKEFORMATLIST) #undef TINYFORMAT_MAKE_MAKEFORMATLIST #endif /// Format list of arguments to the stream according to the given format string. /// /// The name vformat() is chosen for the semantic similarity to vprintf(): the /// list of format arguments is held in a single function argument. inline void vformat(std::ostream &out, const char *fmt, FormatListRef list) { detail::formatImpl(out, fmt, list.m_formatters, list.m_N); } #ifdef TINYFORMAT_USE_VARIADIC_TEMPLATES /// Format list of arguments to the stream according to given format string. template void format(std::ostream &out, const char *fmt, const Args &... args) { vformat(out, fmt, makeFormatList(args...)); } /// Format list of arguments according to the given format string and return the /// result as a string. template std::string format(const char *fmt, const Args &... args) { std::ostringstream oss; format(oss, fmt, args...); return oss.str(); } /// Format list of arguments to std::cout, according to the given format string template void printf(const char *fmt, const Args &... args) { format(std::cout, fmt, args...); } template void printfln(const char *fmt, const Args &... args) { format(std::cout, fmt, args...); std::cout << '\n'; } #else // C++98 version inline void format(std::ostream &out, const char *fmt) { vformat(out, fmt, makeFormatList()); } inline std::string format(const char *fmt) { std::ostringstream oss; format(oss, fmt); return oss.str(); } inline void printf(const char *fmt) { format(std::cout, fmt); } inline void printfln(const char *fmt) { format(std::cout, fmt); std::cout << '\n'; } #define TINYFORMAT_MAKE_FORMAT_FUNCS(n) \ \ template \ void format(std::ostream &out, const char *fmt, TINYFORMAT_VARARGS(n)) { \ vformat(out, fmt, makeFormatList(TINYFORMAT_PASSARGS(n))); \ } \ \ template \ std::string format(const char *fmt, TINYFORMAT_VARARGS(n)) { \ std::ostringstream oss; \ format(oss, fmt, TINYFORMAT_PASSARGS(n)); \ return oss.str(); \ } \ \ template \ void printf(const char *fmt, TINYFORMAT_VARARGS(n)) { \ format(std::cout, fmt, TINYFORMAT_PASSARGS(n)); \ } \ \ template \ void printfln(const char *fmt, TINYFORMAT_VARARGS(n)) { \ format(std::cout, fmt, TINYFORMAT_PASSARGS(n)); \ std::cout << '\n'; \ } TINYFORMAT_FOREACH_ARGNUM(TINYFORMAT_MAKE_FORMAT_FUNCS) #undef TINYFORMAT_MAKE_FORMAT_FUNCS #endif // Added for Bitcoin Core template std::string format(const std::string &fmt, const Args &... args) { std::ostringstream oss; format(oss, fmt.c_str(), args...); return oss.str(); } } // namespace tinyformat #define strprintf tfm::format #endif // TINYFORMAT_H_INCLUDED diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp index 9e429c2720..1479d281cb 100644 --- a/src/torcontrol.cpp +++ b/src/torcontrol.cpp @@ -1,812 +1,812 @@ // Copyright (c) 2015-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "torcontrol.h" #include "crypto/hmac_sha256.h" #include "net.h" #include "netbase.h" #include "util.h" #include "utilstrencodings.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /** Default control port */ const std::string DEFAULT_TOR_CONTROL = "127.0.0.1:9051"; /** Tor cookie size (from control-spec.txt) */ static const int TOR_COOKIE_SIZE = 32; /** Size of client/server nonce for SAFECOOKIE */ static const int TOR_NONCE_SIZE = 32; /** For computing serverHash in SAFECOOKIE */ static const std::string TOR_SAFE_SERVERKEY = "Tor safe cookie authentication server-to-controller hash"; /** For computing clientHash in SAFECOOKIE */ static const std::string TOR_SAFE_CLIENTKEY = "Tor safe cookie authentication controller-to-server hash"; /** Exponential backoff configuration - initial timeout in seconds */ static const float RECONNECT_TIMEOUT_START = 1.0; /** Exponential backoff configuration - growth factor */ static const float RECONNECT_TIMEOUT_EXP = 1.5; /** * Maximum length for lines received on TorControlConnection. * tor-control-spec.txt mentions that there is explicitly no limit defined to * line length, this is belt-and-suspenders sanity limit to prevent memory * exhaustion. */ static const int MAX_LINE_LENGTH = 100000; /****** Low-level TorControlConnection ********/ /** Reply from Tor, can be single or multi-line */ class TorControlReply { public: TorControlReply() { Clear(); } int code; std::vector lines; void Clear() { code = 0; lines.clear(); } }; /** * Low-level handling for Tor control connection. * Speaks the SMTP-like protocol as defined in torspec/control-spec.txt */ class TorControlConnection { public: typedef std::function ConnectionCB; typedef std::function ReplyHandlerCB; /** Create a new TorControlConnection. */ - TorControlConnection(struct event_base *base); + explicit TorControlConnection(struct event_base *base); ~TorControlConnection(); /** * Connect to a Tor control port. * target is address of the form host:port. * connected is the handler that is called when connection is successfully * established. * disconnected is a handler that is called when the connection is broken. * Return true on success. */ bool Connect(const std::string &target, const ConnectionCB &connected, const ConnectionCB &disconnected); /** * Disconnect from Tor control port. */ bool Disconnect(); /** * Send a command, register a handler for the reply. * A trailing CRLF is automatically added. * Return true on success. */ bool Command(const std::string &cmd, const ReplyHandlerCB &reply_handler); /** Response handlers for async replies */ boost::signals2::signal async_handler; private: /** Callback when ready for use */ std::function connected; /** Callback when connection lost */ std::function disconnected; /** Libevent event base */ struct event_base *base; /** Connection to control socket */ struct bufferevent *b_conn; /** Message being received */ TorControlReply message; /** Response handlers */ std::deque reply_handlers; /** Libevent handlers: internal */ static void readcb(struct bufferevent *bev, void *ctx); static void eventcb(struct bufferevent *bev, short what, void *ctx); }; TorControlConnection::TorControlConnection(struct event_base *_base) : base(_base), b_conn(nullptr) {} TorControlConnection::~TorControlConnection() { if (b_conn) { bufferevent_free(b_conn); } } void TorControlConnection::readcb(struct bufferevent *bev, void *ctx) { TorControlConnection *self = (TorControlConnection *)ctx; struct evbuffer *input = bufferevent_get_input(bev); size_t n_read_out = 0; char *line; assert(input); // If there is not a whole line to read, evbuffer_readln returns nullptr while ((line = evbuffer_readln(input, &n_read_out, EVBUFFER_EOL_CRLF)) != nullptr) { std::string s(line, n_read_out); free(line); // Short line if (s.size() < 4) { continue; } // (-|+| ) self->message.code = atoi(s.substr(0, 3)); self->message.lines.push_back(s.substr(4)); // '-','+' or ' ' char ch = s[3]; if (ch == ' ') { // Final line, dispatch reply and clean up if (self->message.code >= 600) { // Dispatch async notifications to async handler. // Synchronous and asynchronous messages are never interleaved self->async_handler(*self, self->message); } else { if (!self->reply_handlers.empty()) { // Invoke reply handler with message self->reply_handlers.front()(*self, self->message); self->reply_handlers.pop_front(); } else { LogPrint(BCLog::TOR, "tor: Received unexpected sync reply %i\n", self->message.code); } } self->message.Clear(); } } // Check for size of buffer - protect against memory exhaustion with very // long lines. Do this after evbuffer_readln to make sure all full lines // have been removed from the buffer. Everything left is an incomplete line. if (evbuffer_get_length(input) > MAX_LINE_LENGTH) { LogPrintf("tor: Disconnecting because MAX_LINE_LENGTH exceeded\n"); self->Disconnect(); } } void TorControlConnection::eventcb(struct bufferevent *bev, short what, void *ctx) { TorControlConnection *self = (TorControlConnection *)ctx; if (what & BEV_EVENT_CONNECTED) { LogPrint(BCLog::TOR, "tor: Successfully connected!\n"); self->connected(*self); } else if (what & (BEV_EVENT_EOF | BEV_EVENT_ERROR)) { if (what & BEV_EVENT_ERROR) { LogPrint(BCLog::TOR, "tor: Error connecting to Tor control socket\n"); } else { LogPrint(BCLog::TOR, "tor: End of stream\n"); } self->Disconnect(); self->disconnected(*self); } } bool TorControlConnection::Connect(const std::string &target, const ConnectionCB &_connected, const ConnectionCB &_disconnected) { if (b_conn) { Disconnect(); } // Parse target address:port struct sockaddr_storage connect_to_addr; int connect_to_addrlen = sizeof(connect_to_addr); if (evutil_parse_sockaddr_port(target.c_str(), (struct sockaddr *)&connect_to_addr, &connect_to_addrlen) < 0) { LogPrintf("tor: Error parsing socket address %s\n", target); return false; } // Create a new socket, set up callbacks and enable notification bits b_conn = bufferevent_socket_new(base, -1, BEV_OPT_CLOSE_ON_FREE); if (!b_conn) { return false; } bufferevent_setcb(b_conn, TorControlConnection::readcb, nullptr, TorControlConnection::eventcb, this); bufferevent_enable(b_conn, EV_READ | EV_WRITE); this->connected = _connected; this->disconnected = _disconnected; // Finally, connect to target if (bufferevent_socket_connect(b_conn, (struct sockaddr *)&connect_to_addr, connect_to_addrlen) < 0) { LogPrintf("tor: Error connecting to address %s\n", target); return false; } return true; } bool TorControlConnection::Disconnect() { if (b_conn) { bufferevent_free(b_conn); } b_conn = nullptr; return true; } bool TorControlConnection::Command(const std::string &cmd, const ReplyHandlerCB &reply_handler) { if (!b_conn) { return false; } struct evbuffer *buf = bufferevent_get_output(b_conn); if (!buf) { return false; } evbuffer_add(buf, cmd.data(), cmd.size()); evbuffer_add(buf, "\r\n", 2); reply_handlers.push_back(reply_handler); return true; } /****** General parsing utilities ********/ /* Split reply line in the form 'AUTH METHODS=...' into a type * 'AUTH' and arguments 'METHODS=...'. */ static std::pair SplitTorReplyLine(const std::string &s) { size_t ptr = 0; std::string type; while (ptr < s.size() && s[ptr] != ' ') { type.push_back(s[ptr]); ++ptr; } if (ptr < s.size()) { // skip ' ' ++ptr; } return make_pair(type, s.substr(ptr)); } /** * Parse reply arguments in the form 'METHODS=COOKIE,SAFECOOKIE * COOKIEFILE=".../control_auth_cookie"'. */ static std::map ParseTorReplyMapping(const std::string &s) { std::map mapping; size_t ptr = 0; while (ptr < s.size()) { std::string key, value; while (ptr < s.size() && s[ptr] != '=') { key.push_back(s[ptr]); ++ptr; } // unexpected end of line if (ptr == s.size()) { return std::map(); } // skip '=' ++ptr; // Quoted string if (ptr < s.size() && s[ptr] == '"') { // skip '=' ++ptr; bool escape_next = false; while (ptr < s.size() && (!escape_next && s[ptr] != '"')) { escape_next = (s[ptr] == '\\'); value.push_back(s[ptr]); ++ptr; } // unexpected end of line if (ptr == s.size()) { return std::map(); } // skip closing '"' ++ptr; /* TODO: unescape value - according to the spec this depends on the * context, some strings use C-LogPrintf style escape codes, some * don't. So may be better handled at the call site. */ } else { // Unquoted value. Note that values can contain '=' at will, just no // spaces while (ptr < s.size() && s[ptr] != ' ') { value.push_back(s[ptr]); ++ptr; } } if (ptr < s.size() && s[ptr] == ' ') { // skip ' ' after key=value ++ptr; } mapping[key] = value; } return mapping; } /** * Read full contents of a file and return them in a std::string. * Returns a pair . * If an error occurred, status will be false, otherwise status will be true and * the data will be returned in string. * * @param maxsize Puts a maximum size limit on the file that is read. If the * file is larger than this, truncated data * (with len > maxsize) will be returned. */ static std::pair ReadBinaryFile(const fs::path &filename, size_t maxsize = std::numeric_limits::max()) { FILE *f = fsbridge::fopen(filename, "rb"); if (f == nullptr) { return std::make_pair(false, ""); } std::string retval; char buffer[128]; size_t n; while ((n = fread(buffer, 1, sizeof(buffer), f)) > 0) { retval.append(buffer, buffer + n); if (retval.size() > maxsize) { break; } } fclose(f); return std::make_pair(true, retval); } /** * Write contents of std::string to a file. * @return true on success. */ static bool WriteBinaryFile(const fs::path &filename, const std::string &data) { FILE *f = fsbridge::fopen(filename, "wb"); if (f == nullptr) { return false; } if (fwrite(data.data(), 1, data.size(), f) != data.size()) { fclose(f); return false; } fclose(f); return true; } /****** Bitcoin specific TorController implementation ********/ /** * Controller that connects to Tor control socket, authenticate, then create * and maintain a ephemeral hidden service. */ class TorController { public: TorController(struct event_base *base, const std::string &target); ~TorController(); /** Get name fo file to store private key in */ fs::path GetPrivateKeyFile(); /** Reconnect, after getting disconnected */ void Reconnect(); private: struct event_base *base; std::string target; TorControlConnection conn; std::string private_key; std::string service_id; bool reconnect; struct event *reconnect_ev; float reconnect_timeout; CService service; /** Cookie for SAFECOOKIE auth */ std::vector cookie; /** ClientNonce for SAFECOOKIE auth */ std::vector clientNonce; /** Callback for ADD_ONION result */ void add_onion_cb(TorControlConnection &conn, const TorControlReply &reply); /** Callback for AUTHENTICATE result */ void auth_cb(TorControlConnection &conn, const TorControlReply &reply); /** Callback for AUTHCHALLENGE result */ void authchallenge_cb(TorControlConnection &conn, const TorControlReply &reply); /** Callback for PROTOCOLINFO result */ void protocolinfo_cb(TorControlConnection &conn, const TorControlReply &reply); /** Callback after successful connection */ void connected_cb(TorControlConnection &conn); /** Callback after connection lost or failed connection attempt */ void disconnected_cb(TorControlConnection &conn); /** Callback for reconnect timer */ static void reconnect_cb(evutil_socket_t fd, short what, void *arg); }; TorController::TorController(struct event_base *_base, const std::string &_target) : base(_base), target(_target), conn(base), reconnect(true), reconnect_ev(0), reconnect_timeout(RECONNECT_TIMEOUT_START) { reconnect_ev = event_new(base, -1, 0, reconnect_cb, this); if (!reconnect_ev) { LogPrintf( "tor: Failed to create event for reconnection: out of memory?\n"); } // Start connection attempts immediately if (!conn.Connect(_target, boost::bind(&TorController::connected_cb, this, _1), boost::bind(&TorController::disconnected_cb, this, _1))) { LogPrintf("tor: Initiating connection to Tor control port %s failed\n", _target); } // Read service private key if cached std::pair pkf = ReadBinaryFile(GetPrivateKeyFile()); if (pkf.first) { LogPrint(BCLog::TOR, "tor: Reading cached private key from %s\n", GetPrivateKeyFile()); private_key = pkf.second; } } TorController::~TorController() { if (reconnect_ev) { event_free(reconnect_ev); reconnect_ev = nullptr; } if (service.IsValid()) { RemoveLocal(service); } } void TorController::add_onion_cb(TorControlConnection &_conn, const TorControlReply &reply) { if (reply.code == 250) { LogPrint(BCLog::TOR, "tor: ADD_ONION successful\n"); for (const std::string &s : reply.lines) { std::map m = ParseTorReplyMapping(s); std::map::iterator i; if ((i = m.find("ServiceID")) != m.end()) { service_id = i->second; } if ((i = m.find("PrivateKey")) != m.end()) { private_key = i->second; } } service = LookupNumeric(std::string(service_id + ".onion").c_str(), GetListenPort()); LogPrintf("tor: Got service ID %s, advertising service %s\n", service_id, service.ToString()); if (WriteBinaryFile(GetPrivateKeyFile(), private_key)) { LogPrint(BCLog::TOR, "tor: Cached service private key to %s\n", GetPrivateKeyFile()); } else { LogPrintf("tor: Error writing service private key to %s\n", GetPrivateKeyFile()); } AddLocal(service, LOCAL_MANUAL); // ... onion requested - keep connection open } else if (reply.code == 510) { // 510 Unrecognized command LogPrintf("tor: Add onion failed with unrecognized command (You " "probably need to upgrade Tor)\n"); } else { LogPrintf("tor: Add onion failed; error code %d\n", reply.code); } } void TorController::auth_cb(TorControlConnection &_conn, const TorControlReply &reply) { if (reply.code == 250) { LogPrint(BCLog::TOR, "tor: Authentication successful\n"); // Now that we know Tor is running setup the proxy for onion addresses // if -onion isn't set to something else. if (gArgs.GetArg("-onion", "") == "") { CService resolved(LookupNumeric("127.0.0.1", 9050)); proxyType addrOnion = proxyType(resolved, true); SetProxy(NET_TOR, addrOnion); SetLimited(NET_TOR, false); } // Finally - now create the service // No private key, generate one if (private_key.empty()) { // Explicitly request RSA1024 - see issue #9214 private_key = "NEW:RSA1024"; } // Request hidden service, redirect port. // Note that the 'virtual' port doesn't have to be the same as our // internal port, but this is just a convenient choice. TODO; refactor // the shutdown sequence some day. _conn.Command(strprintf("ADD_ONION %s Port=%i,127.0.0.1:%i", private_key, GetListenPort(), GetListenPort()), boost::bind(&TorController::add_onion_cb, this, _1, _2)); } else { LogPrintf("tor: Authentication failed\n"); } } /** Compute Tor SAFECOOKIE response. * * ServerHash is computed as: * HMAC-SHA256("Tor safe cookie authentication server-to-controller hash", * CookieString | ClientNonce | ServerNonce) * (with the HMAC key as its first argument) * * After a controller sends a successful AUTHCHALLENGE command, the * next command sent on the connection must be an AUTHENTICATE command, * and the only authentication string which that AUTHENTICATE command * will accept is: * * HMAC-SHA256("Tor safe cookie authentication controller-to-server hash", * CookieString | ClientNonce | ServerNonce) * */ static std::vector ComputeResponse(const std::string &key, const std::vector &cookie, const std::vector &clientNonce, const std::vector &serverNonce) { CHMAC_SHA256 computeHash((const uint8_t *)key.data(), key.size()); std::vector computedHash(CHMAC_SHA256::OUTPUT_SIZE, 0); computeHash.Write(cookie.data(), cookie.size()); computeHash.Write(clientNonce.data(), clientNonce.size()); computeHash.Write(serverNonce.data(), serverNonce.size()); computeHash.Finalize(computedHash.data()); return computedHash; } void TorController::authchallenge_cb(TorControlConnection &_conn, const TorControlReply &reply) { if (reply.code == 250) { LogPrint(BCLog::TOR, "tor: SAFECOOKIE authentication challenge successful\n"); std::pair l = SplitTorReplyLine(reply.lines[0]); if (l.first == "AUTHCHALLENGE") { std::map m = ParseTorReplyMapping(l.second); std::vector serverHash = ParseHex(m["SERVERHASH"]); std::vector serverNonce = ParseHex(m["SERVERNONCE"]); LogPrint(BCLog::TOR, "tor: AUTHCHALLENGE ServerHash %s ServerNonce %s\n", HexStr(serverHash), HexStr(serverNonce)); if (serverNonce.size() != 32) { LogPrintf( "tor: ServerNonce is not 32 bytes, as required by spec\n"); return; } std::vector computedServerHash = ComputeResponse( TOR_SAFE_SERVERKEY, cookie, clientNonce, serverNonce); if (computedServerHash != serverHash) { LogPrintf("tor: ServerHash %s does not match expected " "ServerHash %s\n", HexStr(serverHash), HexStr(computedServerHash)); return; } std::vector computedClientHash = ComputeResponse( TOR_SAFE_CLIENTKEY, cookie, clientNonce, serverNonce); _conn.Command("AUTHENTICATE " + HexStr(computedClientHash), boost::bind(&TorController::auth_cb, this, _1, _2)); } else { LogPrintf("tor: Invalid reply to AUTHCHALLENGE\n"); } } else { LogPrintf("tor: SAFECOOKIE authentication challenge failed\n"); } } void TorController::protocolinfo_cb(TorControlConnection &_conn, const TorControlReply &reply) { if (reply.code == 250) { std::set methods; std::string cookiefile; /* * 250-AUTH METHODS=COOKIE,SAFECOOKIE * COOKIEFILE="/home/x/.tor/control_auth_cookie" * 250-AUTH METHODS=NULL * 250-AUTH METHODS=HASHEDPASSWORD */ for (const std::string &s : reply.lines) { std::pair l = SplitTorReplyLine(s); if (l.first == "AUTH") { std::map m = ParseTorReplyMapping(l.second); std::map::iterator i; if ((i = m.find("METHODS")) != m.end()) { boost::split(methods, i->second, boost::is_any_of(",")); } if ((i = m.find("COOKIEFILE")) != m.end()) { cookiefile = i->second; } } else if (l.first == "VERSION") { std::map m = ParseTorReplyMapping(l.second); std::map::iterator i; if ((i = m.find("Tor")) != m.end()) { LogPrint(BCLog::TOR, "tor: Connected to Tor version %s\n", i->second); } } } for (const std::string &s : methods) { LogPrint(BCLog::TOR, "tor: Supported authentication method: %s\n", s); } // Prefer NULL, otherwise SAFECOOKIE. If a password is provided, use // HASHEDPASSWORD /* Authentication: * cookie: hex-encoded ~/.tor/control_auth_cookie * password: "password" */ std::string torpassword = gArgs.GetArg("-torpassword", ""); if (!torpassword.empty()) { if (methods.count("HASHEDPASSWORD")) { LogPrint(BCLog::TOR, "tor: Using HASHEDPASSWORD authentication\n"); boost::replace_all(torpassword, "\"", "\\\""); _conn.Command( "AUTHENTICATE \"" + torpassword + "\"", boost::bind(&TorController::auth_cb, this, _1, _2)); } else { LogPrintf("tor: Password provided with -torpassword, but " "HASHEDPASSWORD authentication is not available\n"); } } else if (methods.count("NULL")) { LogPrint(BCLog::TOR, "tor: Using NULL authentication\n"); _conn.Command("AUTHENTICATE", boost::bind(&TorController::auth_cb, this, _1, _2)); } else if (methods.count("SAFECOOKIE")) { // Cookie: hexdump -e '32/1 "%02x""\n"' ~/.tor/control_auth_cookie LogPrint(BCLog::TOR, "tor: Using SAFECOOKIE authentication, " "reading cookie authentication from %s\n", cookiefile); std::pair status_cookie = ReadBinaryFile(cookiefile, TOR_COOKIE_SIZE); if (status_cookie.first && status_cookie.second.size() == TOR_COOKIE_SIZE) { // _conn.Command("AUTHENTICATE " + HexStr(status_cookie.second), // boost::bind(&TorController::auth_cb, this, _1, _2)); cookie = std::vector(status_cookie.second.begin(), status_cookie.second.end()); clientNonce = std::vector(TOR_NONCE_SIZE, 0); GetRandBytes(&clientNonce[0], TOR_NONCE_SIZE); _conn.Command("AUTHCHALLENGE SAFECOOKIE " + HexStr(clientNonce), boost::bind(&TorController::authchallenge_cb, this, _1, _2)); } else { if (status_cookie.first) { LogPrintf("tor: Authentication cookie %s is not exactly %i " "bytes, as is required by the spec\n", cookiefile, TOR_COOKIE_SIZE); } else { LogPrintf("tor: Authentication cookie %s could not be " "opened (check permissions)\n", cookiefile); } } } else if (methods.count("HASHEDPASSWORD")) { LogPrintf("tor: The only supported authentication mechanism left " "is password, but no password provided with " "-torpassword\n"); } else { LogPrintf("tor: No supported authentication method\n"); } } else { LogPrintf("tor: Requesting protocol info failed\n"); } } void TorController::connected_cb(TorControlConnection &_conn) { reconnect_timeout = RECONNECT_TIMEOUT_START; // First send a PROTOCOLINFO command to figure out what authentication is // expected if (!_conn.Command( "PROTOCOLINFO 1", boost::bind(&TorController::protocolinfo_cb, this, _1, _2))) { LogPrintf("tor: Error sending initial protocolinfo command\n"); } } void TorController::disconnected_cb(TorControlConnection &_conn) { // Stop advertising service when disconnected if (service.IsValid()) { RemoveLocal(service); } service = CService(); if (!reconnect) { return; } LogPrint(BCLog::TOR, "tor: Not connected to Tor control port %s, trying to reconnect\n", target); // Single-shot timer for reconnect. Use exponential backoff. struct timeval time = MillisToTimeval(int64_t(reconnect_timeout * 1000.0)); if (reconnect_ev) { event_add(reconnect_ev, &time); } reconnect_timeout *= RECONNECT_TIMEOUT_EXP; } void TorController::Reconnect() { /* Try to reconnect and reestablish if we get booted - for example, Tor may * be restarting. */ if (!conn.Connect(target, boost::bind(&TorController::connected_cb, this, _1), boost::bind(&TorController::disconnected_cb, this, _1))) { LogPrintf( "tor: Re-initiating connection to Tor control port %s failed\n", target); } } fs::path TorController::GetPrivateKeyFile() { return GetDataDir() / "onion_private_key"; } void TorController::reconnect_cb(evutil_socket_t fd, short what, void *arg) { TorController *self = (TorController *)arg; self->Reconnect(); } /****** Thread ********/ static struct event_base *gBase; static std::thread torControlThread; static void TorControlThread() { TorController ctrl(gBase, gArgs.GetArg("-torcontrol", DEFAULT_TOR_CONTROL)); event_base_dispatch(gBase); } void StartTorControl() { assert(!gBase); #ifdef WIN32 evthread_use_windows_threads(); #else evthread_use_pthreads(); #endif gBase = event_base_new(); if (!gBase) { LogPrintf("tor: Unable to create event_base\n"); return; } torControlThread = std::thread( std::bind(&TraceThread, "torcontrol", &TorControlThread)); } void InterruptTorControl() { if (gBase) { LogPrintf("tor: Thread interrupt\n"); event_base_loopbreak(gBase); } } void StopTorControl() { if (gBase) { torControlThread.join(); event_base_free(gBase); gBase = nullptr; } } diff --git a/src/txdb.cpp b/src/txdb.cpp index 9fd987fa29..3577930541 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -1,457 +1,457 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "txdb.h" #include "chainparams.h" #include "hash.h" #include "init.h" #include "pow.h" #include "random.h" #include "ui_interface.h" #include "uint256.h" #include "util.h" #include #include static const char DB_COIN = 'C'; static const char DB_COINS = 'c'; static const char DB_BLOCK_FILES = 'f'; static const char DB_TXINDEX = 't'; static const char DB_BLOCK_INDEX = 'b'; static const char DB_BEST_BLOCK = 'B'; static const char DB_HEAD_BLOCKS = 'H'; static const char DB_FLAG = 'F'; static const char DB_REINDEX_FLAG = 'R'; static const char DB_LAST_BLOCK = 'l'; namespace { struct CoinEntry { COutPoint *outpoint; char key; - CoinEntry(const COutPoint *ptr) + explicit CoinEntry(const COutPoint *ptr) : outpoint(const_cast(ptr)), key(DB_COIN) {} template void Serialize(Stream &s) const { s << key; s << outpoint->GetTxId(); s << VARINT(outpoint->GetN()); } template void Unserialize(Stream &s) { s >> key; uint256 id; s >> id; uint32_t n = 0; s >> VARINT(n); *outpoint = COutPoint(id, n); } }; } // namespace CCoinsViewDB::CCoinsViewDB(size_t nCacheSize, bool fMemory, bool fWipe) : db(GetDataDir() / "chainstate", nCacheSize, fMemory, fWipe, true) {} bool CCoinsViewDB::GetCoin(const COutPoint &outpoint, Coin &coin) const { return db.Read(CoinEntry(&outpoint), coin); } bool CCoinsViewDB::HaveCoin(const COutPoint &outpoint) const { return db.Exists(CoinEntry(&outpoint)); } uint256 CCoinsViewDB::GetBestBlock() const { uint256 hashBestChain; if (!db.Read(DB_BEST_BLOCK, hashBestChain)) return uint256(); return hashBestChain; } std::vector CCoinsViewDB::GetHeadBlocks() const { std::vector vhashHeadBlocks; if (!db.Read(DB_HEAD_BLOCKS, vhashHeadBlocks)) { return std::vector(); } return vhashHeadBlocks; } bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { CDBBatch batch(db); size_t count = 0; size_t changed = 0; size_t batch_size = (size_t)gArgs.GetArg("-dbbatchsize", nDefaultDbBatchSize); int crash_simulate = gArgs.GetArg("-dbcrashratio", 0); assert(!hashBlock.IsNull()); uint256 old_tip = GetBestBlock(); if (old_tip.IsNull()) { // We may be in the middle of replaying. std::vector old_heads = GetHeadBlocks(); if (old_heads.size() == 2) { assert(old_heads[0] == hashBlock); old_tip = old_heads[1]; } } // In the first batch, mark the database as being in the middle of a // transition from old_tip to hashBlock. // A vector is used for future extensibility, as we may want to support // interrupting after partial writes from multiple independent reorgs. batch.Erase(DB_BEST_BLOCK); batch.Write(DB_HEAD_BLOCKS, std::vector{hashBlock, old_tip}); for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) { if (it->second.flags & CCoinsCacheEntry::DIRTY) { CoinEntry entry(&it->first); if (it->second.coin.IsSpent()) { batch.Erase(entry); } else { batch.Write(entry, it->second.coin); } changed++; } count++; CCoinsMap::iterator itOld = it++; mapCoins.erase(itOld); if (batch.SizeEstimate() > batch_size) { LogPrint(BCLog::COINDB, "Writing partial batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0)); db.WriteBatch(batch); batch.Clear(); if (crash_simulate) { static FastRandomContext rng; if (rng.randrange(crash_simulate) == 0) { LogPrintf("Simulating a crash. Goodbye.\n"); _Exit(0); } } } } // In the last batch, mark the database as consistent with hashBlock again. batch.Erase(DB_HEAD_BLOCKS); batch.Write(DB_BEST_BLOCK, hashBlock); LogPrint(BCLog::COINDB, "Writing final batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0)); bool ret = db.WriteBatch(batch); LogPrint(BCLog::COINDB, "Committed %u changed transaction outputs (out of " "%u) to coin database...\n", (unsigned int)changed, (unsigned int)count); return ret; } size_t CCoinsViewDB::EstimateSize() const { return db.EstimateSize(DB_COIN, char(DB_COIN + 1)); } CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CDBWrapper(GetDataDir() / "blocks" / "index", nCacheSize, fMemory, fWipe) {} bool CBlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo &info) { return Read(std::make_pair(DB_BLOCK_FILES, nFile), info); } bool CBlockTreeDB::WriteReindexing(bool fReindexing) { if (fReindexing) return Write(DB_REINDEX_FLAG, '1'); else return Erase(DB_REINDEX_FLAG); } bool CBlockTreeDB::ReadReindexing(bool &fReindexing) { fReindexing = Exists(DB_REINDEX_FLAG); return true; } bool CBlockTreeDB::ReadLastBlockFile(int &nFile) { return Read(DB_LAST_BLOCK, nFile); } CCoinsViewCursor *CCoinsViewDB::Cursor() const { CCoinsViewDBCursor *i = new CCoinsViewDBCursor( const_cast(db).NewIterator(), GetBestBlock()); /** * It seems that there are no "const iterators" for LevelDB. Since we only * need read operations on it, use a const-cast to get around that * restriction. */ i->pcursor->Seek(DB_COIN); // Cache key of first record if (i->pcursor->Valid()) { CoinEntry entry(&i->keyTmp.second); i->pcursor->GetKey(entry); i->keyTmp.first = entry.key; } else { // Make sure Valid() and GetKey() return false i->keyTmp.first = 0; } return i; } bool CCoinsViewDBCursor::GetKey(COutPoint &key) const { // Return cached key if (keyTmp.first == DB_COIN) { key = keyTmp.second; return true; } return false; } bool CCoinsViewDBCursor::GetValue(Coin &coin) const { return pcursor->GetValue(coin); } unsigned int CCoinsViewDBCursor::GetValueSize() const { return pcursor->GetValueSize(); } bool CCoinsViewDBCursor::Valid() const { return keyTmp.first == DB_COIN; } void CCoinsViewDBCursor::Next() { pcursor->Next(); CoinEntry entry(&keyTmp.second); if (!pcursor->Valid() || !pcursor->GetKey(entry)) { // Invalidate cached key after last record so that Valid() and GetKey() // return false keyTmp.first = 0; } else { keyTmp.first = entry.key; } } bool CBlockTreeDB::WriteBatchSync( const std::vector> &fileInfo, int nLastFile, const std::vector &blockinfo) { CDBBatch batch(*this); for (std::vector>::const_iterator it = fileInfo.begin(); it != fileInfo.end(); it++) { batch.Write(std::make_pair(DB_BLOCK_FILES, it->first), *it->second); } batch.Write(DB_LAST_BLOCK, nLastFile); for (std::vector::const_iterator it = blockinfo.begin(); it != blockinfo.end(); it++) { batch.Write(std::make_pair(DB_BLOCK_INDEX, (*it)->GetBlockHash()), CDiskBlockIndex(*it)); } return WriteBatch(batch, true); } bool CBlockTreeDB::ReadTxIndex(const uint256 &txid, CDiskTxPos &pos) { return Read(std::make_pair(DB_TXINDEX, txid), pos); } bool CBlockTreeDB::WriteTxIndex( const std::vector> &vect) { CDBBatch batch(*this); for (std::vector>::const_iterator it = vect.begin(); it != vect.end(); it++) batch.Write(std::make_pair(DB_TXINDEX, it->first), it->second); return WriteBatch(batch); } bool CBlockTreeDB::WriteFlag(const std::string &name, bool fValue) { return Write(std::make_pair(DB_FLAG, name), fValue ? '1' : '0'); } bool CBlockTreeDB::ReadFlag(const std::string &name, bool &fValue) { char ch; if (!Read(std::make_pair(DB_FLAG, name), ch)) return false; fValue = ch == '1'; return true; } bool CBlockTreeDB::LoadBlockIndexGuts( const Config &config, std::function insertBlockIndex) { std::unique_ptr pcursor(NewIterator()); pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256())); // Load mapBlockIndex while (pcursor->Valid()) { boost::this_thread::interruption_point(); std::pair key; if (!pcursor->GetKey(key) || key.first != DB_BLOCK_INDEX) { break; } CDiskBlockIndex diskindex; if (!pcursor->GetValue(diskindex)) { return error("LoadBlockIndex() : failed to read value"); } // Construct block index object CBlockIndex *pindexNew = insertBlockIndex(diskindex.GetBlockHash()); pindexNew->pprev = insertBlockIndex(diskindex.hashPrev); pindexNew->nHeight = diskindex.nHeight; pindexNew->nFile = diskindex.nFile; pindexNew->nDataPos = diskindex.nDataPos; pindexNew->nUndoPos = diskindex.nUndoPos; pindexNew->nVersion = diskindex.nVersion; pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot; pindexNew->nTime = diskindex.nTime; pindexNew->nBits = diskindex.nBits; pindexNew->nNonce = diskindex.nNonce; pindexNew->nStatus = diskindex.nStatus; pindexNew->nTx = diskindex.nTx; if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, config)) { return error("LoadBlockIndex(): CheckProofOfWork failed: %s", pindexNew->ToString()); } pcursor->Next(); } return true; } namespace { //! Legacy class to deserialize pre-pertxout database entries without reindex. class CCoins { public: //! whether transaction is a coinbase bool fCoinBase; //! unspent transaction outputs; spent outputs are .IsNull(); spent outputs //! at the end of the array are dropped std::vector vout; //! at which height this transaction was included in the active block chain int nHeight; //! empty constructor CCoins() : fCoinBase(false), vout(0), nHeight(0) {} template void Unserialize(Stream &s) { uint32_t nCode = 0; // version int nVersionDummy; ::Unserialize(s, VARINT(nVersionDummy)); // header code ::Unserialize(s, VARINT(nCode)); fCoinBase = nCode & 1; std::vector vAvail(2, false); vAvail[0] = (nCode & 2) != 0; vAvail[1] = (nCode & 4) != 0; uint32_t nMaskCode = (nCode / 8) + ((nCode & 6) != 0 ? 0 : 1); // spentness bitmask while (nMaskCode > 0) { uint8_t chAvail = 0; ::Unserialize(s, chAvail); for (unsigned int p = 0; p < 8; p++) { bool f = (chAvail & (1 << p)) != 0; vAvail.push_back(f); } if (chAvail != 0) { nMaskCode--; } } // txouts themself vout.assign(vAvail.size(), CTxOut()); for (size_t i = 0; i < vAvail.size(); i++) { if (vAvail[i]) { ::Unserialize(s, REF(CTxOutCompressor(vout[i]))); } } // coinbase height ::Unserialize(s, VARINT(nHeight)); } }; } // namespace /** * Upgrade the database from older formats. * * Currently implemented: from the per-tx utxo model (0.8..0.14.x) to per-txout. */ bool CCoinsViewDB::Upgrade() { std::unique_ptr pcursor(db.NewIterator()); pcursor->Seek(std::make_pair(DB_COINS, uint256())); if (!pcursor->Valid()) { return true; } int64_t count = 0; LogPrintf("Upgrading utxo-set database...\n"); LogPrintf("[0%%]..."); size_t batch_size = 1 << 24; CDBBatch batch(db); uiInterface.SetProgressBreakAction(StartShutdown); int reportDone = 0; std::pair key; std::pair prev_key = {DB_COINS, uint256()}; while (pcursor->Valid()) { boost::this_thread::interruption_point(); if (ShutdownRequested()) { break; } if (!pcursor->GetKey(key) || key.first != DB_COINS) { break; } if (count++ % 256 == 0) { uint32_t high = 0x100 * *key.second.begin() + *(key.second.begin() + 1); int percentageDone = (int)(high * 100.0 / 65536.0 + 0.5); uiInterface.ShowProgress( _("Upgrading UTXO database") + "\n" + _("(press q to shutdown and continue later)") + "\n", percentageDone); if (reportDone < percentageDone / 10) { // report max. every 10% step LogPrintf("[%d%%]...", percentageDone); reportDone = percentageDone / 10; } } CCoins old_coins; if (!pcursor->GetValue(old_coins)) { return error("%s: cannot parse CCoins record", __func__); } TxId id(key.second); for (size_t i = 0; i < old_coins.vout.size(); ++i) { if (!old_coins.vout[i].IsNull() && !old_coins.vout[i].scriptPubKey.IsUnspendable()) { Coin newcoin(std::move(old_coins.vout[i]), old_coins.nHeight, old_coins.fCoinBase); COutPoint outpoint(id, i); CoinEntry entry(&outpoint); batch.Write(entry, newcoin); } } batch.Erase(key); if (batch.SizeEstimate() > batch_size) { db.WriteBatch(batch); batch.Clear(); db.CompactRange(prev_key, key); prev_key = key; } pcursor->Next(); } db.WriteBatch(batch); db.CompactRange({DB_COINS, uint256()}, key); uiInterface.SetProgressBreakAction(std::function()); LogPrintf("[%s].\n", ShutdownRequested() ? "CANCELLED" : "DONE"); return !ShutdownRequested(); } diff --git a/src/txdb.h b/src/txdb.h index 8145ba605d..9217d599ba 100644 --- a/src/txdb.h +++ b/src/txdb.h @@ -1,135 +1,137 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_TXDB_H #define BITCOIN_TXDB_H #include "blockfileinfo.h" #include "chain.h" #include "coins.h" #include "dbwrapper.h" #include "diskblockpos.h" #include #include #include #include class CBlockIndex; class CCoinsViewDBCursor; class uint256; class Config; //! No need to periodic flush if at least this much space still available. static constexpr int MAX_BLOCK_COINSDB_USAGE = 10; //! -dbcache default (MiB) static const int64_t nDefaultDbCache = 450; //! -dbbatchsize default (bytes) static const int64_t nDefaultDbBatchSize = 16 << 20; //! max. -dbcache (MiB) static const int64_t nMaxDbCache = sizeof(void *) > 4 ? 16384 : 1024; //! min. -dbcache (MiB) static const int64_t nMinDbCache = 4; //! Max memory allocated to block tree DB specific cache, if no -txindex (MiB) static const int64_t nMaxBlockDBCache = 2; //! Max memory allocated to block tree DB specific cache, if -txindex (MiB) // Unlike for the UTXO database, for the txindex scenario the leveldb cache make // a meaningful difference: // https://github.com/bitcoin/bitcoin/pull/8273#issuecomment-229601991 static const int64_t nMaxBlockDBAndTxIndexCache = 1024; //! Max memory allocated to coin DB specific cache (MiB) static const int64_t nMaxCoinsDBCache = 8; struct CDiskTxPos : public CDiskBlockPos { unsigned int nTxOffset; // after header ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(*(CDiskBlockPos *)this); READWRITE(VARINT(nTxOffset)); } CDiskTxPos(const CDiskBlockPos &blockIn, unsigned int nTxOffsetIn) : CDiskBlockPos(blockIn.nFile, blockIn.nPos), nTxOffset(nTxOffsetIn) {} CDiskTxPos() { SetNull(); } void SetNull() { CDiskBlockPos::SetNull(); nTxOffset = 0; } }; /** CCoinsView backed by the coin database (chainstate/) */ class CCoinsViewDB final : public CCoinsView { protected: CDBWrapper db; public: - CCoinsViewDB(size_t nCacheSize, bool fMemory = false, bool fWipe = false); + explicit CCoinsViewDB(size_t nCacheSize, bool fMemory = false, + bool fWipe = false); bool GetCoin(const COutPoint &outpoint, Coin &coin) const override; bool HaveCoin(const COutPoint &outpoint) const override; uint256 GetBestBlock() const override; std::vector GetHeadBlocks() const override; bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override; CCoinsViewCursor *Cursor() const override; //! Attempt to update from an older database format. //! Returns whether an error occurred. bool Upgrade(); size_t EstimateSize() const override; }; /** Specialization of CCoinsViewCursor to iterate over a CCoinsViewDB */ class CCoinsViewDBCursor : public CCoinsViewCursor { public: ~CCoinsViewDBCursor() {} bool GetKey(COutPoint &key) const override; bool GetValue(Coin &coin) const override; unsigned int GetValueSize() const override; bool Valid() const override; void Next() override; private: CCoinsViewDBCursor(CDBIterator *pcursorIn, const uint256 &hashBlockIn) : CCoinsViewCursor(hashBlockIn), pcursor(pcursorIn) {} std::unique_ptr pcursor; std::pair keyTmp; friend class CCoinsViewDB; }; /** Access to the block database (blocks/index/) */ class CBlockTreeDB : public CDBWrapper { public: - CBlockTreeDB(size_t nCacheSize, bool fMemory = false, bool fWipe = false); + explicit CBlockTreeDB(size_t nCacheSize, bool fMemory = false, + bool fWipe = false); private: CBlockTreeDB(const CBlockTreeDB &); void operator=(const CBlockTreeDB &); public: bool WriteBatchSync( const std::vector> &fileInfo, int nLastFile, const std::vector &blockinfo); bool ReadBlockFileInfo(int nFile, CBlockFileInfo &fileinfo); bool ReadLastBlockFile(int &nFile); bool WriteReindexing(bool fReindex); bool ReadReindexing(bool &fReindex); bool ReadTxIndex(const uint256 &txid, CDiskTxPos &pos); bool WriteTxIndex(const std::vector> &list); bool WriteFlag(const std::string &name, bool fValue); bool ReadFlag(const std::string &name, bool &fValue); bool LoadBlockIndexGuts( const Config &config, std::function insertBlockIndex); }; #endif // BITCOIN_TXDB_H diff --git a/src/txmempool.h b/src/txmempool.h index fdeb970875..51ce335535 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -1,951 +1,951 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_TXMEMPOOL_H #define BITCOIN_TXMEMPOOL_H #include "amount.h" #include "coins.h" #include "indirectmap.h" #include "primitives/transaction.h" #include "random.h" #include "sync.h" #include #include #include #include #include #include #include #include #include #include #include class CAutoFile; class CBlockIndex; class Config; inline double AllowFreeThreshold() { return (144 * COIN) / (250 * SATOSHI); } inline bool AllowFree(double dPriority) { // Large (in bytes) low-priority (new, small-coin) transactions need a fee. return dPriority > AllowFreeThreshold(); } /** * Fake height value used in Coins to signify they are only in the memory * pool(since 0.8) */ static const uint32_t MEMPOOL_HEIGHT = 0x7FFFFFFF; struct LockPoints { // Will be set to the blockchain height and median time past values that // would be necessary to satisfy all relative locktime constraints (BIP68) // of this tx given our view of block chain history int height; int64_t time; // As long as the current chain descends from the highest height block // containing one of the inputs used in the calculation, then the cached // values are still valid even after a reorg. CBlockIndex *maxInputBlock; LockPoints() : height(0), time(0), maxInputBlock(nullptr) {} }; class CTxMemPool; /** \class CTxMemPoolEntry * * CTxMemPoolEntry stores data about the corresponding transaction, as well as * data about all in-mempool transactions that depend on the transaction * ("descendant" transactions). * * When a new entry is added to the mempool, we update the descendant state * (nCountWithDescendants, nSizeWithDescendants, and nModFeesWithDescendants) * for all ancestors of the newly added transaction. * * If updating the descendant state is skipped, we can mark the entry as * "dirty", and set nSizeWithDescendants/nModFeesWithDescendants to equal * nTxSize/nFee+feeDelta. (This can potentially happen during a reorg, where we * limit the amount of work we're willing to do to avoid consuming too much * CPU.) */ class CTxMemPoolEntry { private: CTransactionRef tx; //!< Cached to avoid expensive parent-transaction lookups Amount nFee; //!< ... and avoid recomputing tx size size_t nTxSize; //!< ... and billable size for billing size_t nTxBillableSize; //!< ... and modified size for priority size_t nModSize; //!< ... and total memory usage size_t nUsageSize; //!< Local time when entering the mempool int64_t nTime; //!< Priority when entering the mempool double entryPriority; //!< Chain height when entering the mempool unsigned int entryHeight; //!< Sum of all txin values that are already in blockchain Amount inChainInputValue; //!< keep track of transactions that spend a coinbase bool spendsCoinbase; //!< Total sigop plus P2SH sigops count int64_t sigOpCount; //!< Used for determining the priority of the transaction for mining in a //! block Amount feeDelta; //!< Track the height and time at which tx was final LockPoints lockPoints; // Information about descendants of this transaction that are in the // mempool; if we remove this transaction we must remove all of these // descendants as well. if nCountWithDescendants is 0, treat this entry as // dirty, and nSizeWithDescendants and nModFeesWithDescendants will not be // correct. //!< number of descendant transactions uint64_t nCountWithDescendants; //!< ... and size uint64_t nSizeWithDescendants; uint64_t nBillableSizeWithDescendants; //!< ... and total fees (all including us) Amount nModFeesWithDescendants; // Analogous statistics for ancestor transactions uint64_t nCountWithAncestors; uint64_t nSizeWithAncestors; uint64_t nBillableSizeWithAncestors; Amount nModFeesWithAncestors; int64_t nSigOpCountWithAncestors; public: CTxMemPoolEntry(const CTransactionRef &_tx, const Amount _nFee, int64_t _nTime, double _entryPriority, unsigned int _entryHeight, Amount _inChainInputValue, bool spendsCoinbase, int64_t nSigOpsCost, LockPoints lp); CTxMemPoolEntry(const CTxMemPoolEntry &other); const CTransaction &GetTx() const { return *this->tx; } CTransactionRef GetSharedTx() const { return this->tx; } /** * Fast calculation of lower bound of current priority as update from entry * priority. Only inputs that were originally in-chain will age. */ double GetPriority(unsigned int currentHeight) const; const Amount GetFee() const { return nFee; } size_t GetTxSize() const { return nTxSize; } size_t GetTxBillableSize() const { return nTxBillableSize; } int64_t GetTime() const { return nTime; } unsigned int GetHeight() const { return entryHeight; } int64_t GetSigOpCount() const { return sigOpCount; } Amount GetModifiedFee() const { return nFee + feeDelta; } size_t DynamicMemoryUsage() const { return nUsageSize; } const LockPoints &GetLockPoints() const { return lockPoints; } // Adjusts the descendant state, if this entry is not dirty. void UpdateDescendantState(int64_t modifySize, int64_t modifyBillableSize, Amount modifyFee, int64_t modifyCount); // Adjusts the ancestor state void UpdateAncestorState(int64_t modifySize, int64_t modifyBillableSize, Amount modifyFee, int64_t modifyCount, int modifySigOps); // Updates the fee delta used for mining priority score, and the // modified fees with descendants. void UpdateFeeDelta(Amount feeDelta); // Update the LockPoints after a reorg void UpdateLockPoints(const LockPoints &lp); uint64_t GetCountWithDescendants() const { return nCountWithDescendants; } uint64_t GetSizeWithDescendants() const { return nSizeWithDescendants; } uint64_t GetBillableSizeWithDescendants() const { return nBillableSizeWithDescendants; } Amount GetModFeesWithDescendants() const { return nModFeesWithDescendants; } bool GetSpendsCoinbase() const { return spendsCoinbase; } uint64_t GetCountWithAncestors() const { return nCountWithAncestors; } uint64_t GetSizeWithAncestors() const { return nSizeWithAncestors; } uint64_t GetBillableSizeWithAncestors() const { return nBillableSizeWithAncestors; } Amount GetModFeesWithAncestors() const { return nModFeesWithAncestors; } int64_t GetSigOpCountWithAncestors() const { return nSigOpCountWithAncestors; } //!< Index in mempool's vTxHashes mutable size_t vTxHashesIdx; }; // Helpers for modifying CTxMemPool::mapTx, which is a boost multi_index. struct update_descendant_state { update_descendant_state(int64_t _modifySize, int64_t _modifyBillableSize, Amount _modifyFee, int64_t _modifyCount) : modifySize(_modifySize), modifyBillableSize(_modifyBillableSize), modifyFee(_modifyFee), modifyCount(_modifyCount) {} void operator()(CTxMemPoolEntry &e) { e.UpdateDescendantState(modifySize, modifyBillableSize, modifyFee, modifyCount); } private: int64_t modifySize; int64_t modifyBillableSize; Amount modifyFee; int64_t modifyCount; }; struct update_ancestor_state { update_ancestor_state(int64_t _modifySize, int64_t _modifyBillableSize, Amount _modifyFee, int64_t _modifyCount, int64_t _modifySigOpsCost) : modifySize(_modifySize), modifyBillableSize(_modifyBillableSize), modifyFee(_modifyFee), modifyCount(_modifyCount), modifySigOpsCost(_modifySigOpsCost) {} void operator()(CTxMemPoolEntry &e) { e.UpdateAncestorState(modifySize, modifyBillableSize, modifyFee, modifyCount, modifySigOpsCost); } private: int64_t modifySize; int64_t modifyBillableSize; Amount modifyFee; int64_t modifyCount; int64_t modifySigOpsCost; }; struct update_fee_delta { - update_fee_delta(Amount _feeDelta) : feeDelta(_feeDelta) {} + explicit update_fee_delta(Amount _feeDelta) : feeDelta(_feeDelta) {} void operator()(CTxMemPoolEntry &e) { e.UpdateFeeDelta(feeDelta); } private: Amount feeDelta; }; struct update_lock_points { - update_lock_points(const LockPoints &_lp) : lp(_lp) {} + explicit update_lock_points(const LockPoints &_lp) : lp(_lp) {} void operator()(CTxMemPoolEntry &e) { e.UpdateLockPoints(lp); } private: const LockPoints &lp; }; // extracts a transaction hash from CTxMempoolEntry or CTransactionRef struct mempoolentry_txid { typedef uint256 result_type; result_type operator()(const CTxMemPoolEntry &entry) const { return entry.GetTx().GetId(); } result_type operator()(const CTransactionRef &tx) const { return tx->GetId(); } }; /** \class CompareTxMemPoolEntryByDescendantScore * * Sort an entry by max(score/size of entry's tx, score/size with all * descendants). */ class CompareTxMemPoolEntryByDescendantScore { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { bool fUseADescendants = UseDescendantScore(a); bool fUseBDescendants = UseDescendantScore(b); double aModFee = (fUseADescendants ? a.GetModFeesWithDescendants() : a.GetModifiedFee()) / SATOSHI; double aSize = fUseADescendants ? a.GetSizeWithDescendants() : a.GetTxSize(); double bModFee = (fUseBDescendants ? b.GetModFeesWithDescendants() : b.GetModifiedFee()) / SATOSHI; double bSize = fUseBDescendants ? b.GetSizeWithDescendants() : b.GetTxSize(); // Avoid division by rewriting (a/b > c/d) as (a*d > c*b). double f1 = aModFee * bSize; double f2 = aSize * bModFee; if (f1 == f2) { return a.GetTime() >= b.GetTime(); } return f1 < f2; } // Calculate which score to use for an entry (avoiding division). bool UseDescendantScore(const CTxMemPoolEntry &a) const { double f1 = a.GetSizeWithDescendants() * (a.GetModifiedFee() / SATOSHI); double f2 = a.GetTxSize() * (a.GetModFeesWithDescendants() / SATOSHI); return f2 > f1; } }; /** \class CompareTxMemPoolEntryByScore * * Sort by score of entry ((fee+delta)/size) in descending order */ class CompareTxMemPoolEntryByScore { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { double f1 = b.GetTxSize() * (a.GetModifiedFee() / SATOSHI); double f2 = a.GetTxSize() * (b.GetModifiedFee() / SATOSHI); if (f1 == f2) { return b.GetTx().GetId() < a.GetTx().GetId(); } return f1 > f2; } }; class CompareTxMemPoolEntryByEntryTime { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { return a.GetTime() < b.GetTime(); } }; class CompareTxMemPoolEntryByAncestorFee { public: bool operator()(const CTxMemPoolEntry &a, const CTxMemPoolEntry &b) const { double aFees = a.GetModFeesWithAncestors() / SATOSHI; double aSize = a.GetSizeWithAncestors(); double bFees = b.GetModFeesWithAncestors() / SATOSHI; double bSize = b.GetSizeWithAncestors(); // Avoid division by rewriting (a/b > c/d) as (a*d > c*b). double f1 = aFees * bSize; double f2 = aSize * bFees; if (f1 == f2) { return a.GetTx().GetId() < b.GetTx().GetId(); } return f1 > f2; } }; // Multi_index tag names struct descendant_score {}; struct entry_time {}; struct mining_score {}; struct ancestor_score {}; class CBlockPolicyEstimator; /** * Information about a mempool transaction. */ struct TxMempoolInfo { /** The transaction itself */ CTransactionRef tx; /** Time the transaction entered the mempool. */ int64_t nTime; /** Feerate of the transaction. */ CFeeRate feeRate; /** The fee delta. */ Amount nFeeDelta; }; /** * Reason why a transaction was removed from the mempool, this is passed to the * notification signal. */ enum class MemPoolRemovalReason { //! Manually removed or unknown reason UNKNOWN = 0, //! Expired from mempool EXPIRY, //! Removed in size limiting SIZELIMIT, //! Removed for reorganization REORG, //! Removed for block BLOCK, //! Removed for conflict with in-block transaction CONFLICT, //! Removed for replacement REPLACED }; class SaltedTxidHasher { private: /** Salt */ const uint64_t k0, k1; public: SaltedTxidHasher(); size_t operator()(const uint256 &txid) const { return SipHashUint256(k0, k1, txid); } }; typedef std::pair TXModifier; /** * CTxMemPool stores valid-according-to-the-current-best-chain transactions that * may be included in the next block. * * Transactions are added when they are seen on the network (or created by the * local node), but not all transactions seen are added to the pool. For * example, the following new transactions will not be added to the mempool: * - a transaction which doesn't meet the minimum fee requirements. * - a new transaction that double-spends an input of a transaction already in * the pool where the new transaction does not meet the Replace-By-Fee * requirements as defined in BIP 125. * - a non-standard transaction. * * CTxMemPool::mapTx, and CTxMemPoolEntry bookkeeping: * * mapTx is a boost::multi_index that sorts the mempool on 4 criteria: * - transaction hash * - feerate [we use max(feerate of tx, feerate of tx with all descendants)] * - time in mempool * - mining score (feerate modified by any fee deltas from * PrioritiseTransaction) * * Note: the term "descendant" refers to in-mempool transactions that depend on * this one, while "ancestor" refers to in-mempool transactions that a given * transaction depends on. * * In order for the feerate sort to remain correct, we must update transactions * in the mempool when new descendants arrive. To facilitate this, we track the * set of in-mempool direct parents and direct children in mapLinks. Within each * CTxMemPoolEntry, we track the size and fees of all descendants. * * Usually when a new transaction is added to the mempool, it has no in-mempool * children (because any such children would be an orphan). So in * addUnchecked(), we: * - update a new entry's setMemPoolParents to include all in-mempool parents * - update the new entry's direct parents to include the new tx as a child * - update all ancestors of the transaction to include the new tx's size/fee * * When a transaction is removed from the mempool, we must: * - update all in-mempool parents to not track the tx in setMemPoolChildren * - update all ancestors to not include the tx's size/fees in descendant state * - update all in-mempool children to not include it as a parent * * These happen in UpdateForRemoveFromMempool(). (Note that when removing a * transaction along with its descendants, we must calculate that set of * transactions to be removed before doing the removal, or else the mempool can * be in an inconsistent state where it's impossible to walk the ancestors of a * transaction.) * * In the event of a reorg, the assumption that a newly added tx has no * in-mempool children is false. In particular, the mempool is in an * inconsistent state while new transactions are being added, because there may * be descendant transactions of a tx coming from a disconnected block that are * unreachable from just looking at transactions in the mempool (the linking * transactions may also be in the disconnected block, waiting to be added). * Because of this, there's not much benefit in trying to search for in-mempool * children in addUnchecked(). Instead, in the special case of transactions * being added from a disconnected block, we require the caller to clean up the * state, to account for in-mempool, out-of-block descendants for all the * in-block transactions by calling UpdateTransactionsFromBlock(). Note that * until this is called, the mempool state is not consistent, and in particular * mapLinks may not be correct (and therefore functions like * CalculateMemPoolAncestors() and CalculateDescendants() that rely on them to * walk the mempool are not generally safe to use). * * Computational limits: * * Updating all in-mempool ancestors of a newly added transaction can be slow, * if no bound exists on how many in-mempool ancestors there may be. * CalculateMemPoolAncestors() takes configurable limits that are designed to * prevent these calculations from being too CPU intensive. * * Adding transactions from a disconnected block can be very time consuming, * because we don't have a way to limit the number of in-mempool descendants. To * bound CPU processing, we limit the amount of work we're willing to do to * properly update the descendant information for a tx being added from a * disconnected block. If we would exceed the limit, then we instead mark the * entry as "dirty", and set the feerate for sorting purposes to be equal the * feerate of the transaction without any descendants. */ class CTxMemPool { private: //!< Value n means that n times in 2^32 we check. uint32_t nCheckFrequency; unsigned int nTransactionsUpdated; CBlockPolicyEstimator *minerPolicyEstimator; //!< sum of all mempool tx's virtual sizes. uint64_t totalTxSize; //!< sum of dynamic memory usage of all the map elements (NOT the maps //! themselves) uint64_t cachedInnerUsage; mutable int64_t lastRollingFeeUpdate; mutable bool blockSinceLastRollingFeeBump; //!< minimum fee to get into the pool, decreases exponentially mutable double rollingMinimumFeeRate; void trackPackageRemoved(const CFeeRate &rate); public: // public only for testing static const int ROLLING_FEE_HALFLIFE = 60 * 60 * 12; typedef boost::multi_index_container< CTxMemPoolEntry, boost::multi_index::indexed_by< // sorted by txid boost::multi_index::hashed_unique< mempoolentry_txid, SaltedTxidHasher>, // sorted by fee rate boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByDescendantScore>, // sorted by entry time boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByEntryTime>, // sorted by score (for mining prioritization) boost::multi_index::ordered_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByScore>, // sorted by fee rate with ancestors boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::identity, CompareTxMemPoolEntryByAncestorFee>>> indexed_transaction_set; mutable CCriticalSection cs; indexed_transaction_set mapTx; typedef indexed_transaction_set::nth_index<0>::type::iterator txiter; //!< All tx hashes/entries in mapTx, in random order std::vector> vTxHashes; struct CompareIteratorByHash { bool operator()(const txiter &a, const txiter &b) const { return a->GetTx().GetId() < b->GetTx().GetId(); } }; typedef std::set setEntries; const setEntries &GetMemPoolParents(txiter entry) const; const setEntries &GetMemPoolChildren(txiter entry) const; private: typedef std::map cacheMap; struct TxLinks { setEntries parents; setEntries children; }; typedef std::map txlinksMap; txlinksMap mapLinks; void UpdateParent(txiter entry, txiter parent, bool add); void UpdateChild(txiter entry, txiter child, bool add); std::vector GetSortedDepthAndScore() const; public: indirectmap mapNextTx; std::map mapDeltas; /** Create a new CTxMemPool. */ CTxMemPool(); ~CTxMemPool(); /** * If sanity-checking is turned on, check makes sure the pool is consistent * (does not contain two transactions that spend the same inputs, all inputs * are in the mapNextTx array). If sanity-checking is turned off, check does * nothing. */ void check(const CCoinsViewCache *pcoins) const; void setSanityCheck(double dFrequency = 1.0) { nCheckFrequency = dFrequency * 4294967295.0; } // addUnchecked must updated state for all ancestors of a given transaction, // to track size/count of descendant transactions. First version of // addUnchecked can be used to have it call CalculateMemPoolAncestors(), and // then invoke the second version. bool addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry, bool validFeeEstimate = true); bool addUnchecked(const uint256 &hash, const CTxMemPoolEntry &entry, setEntries &setAncestors, bool validFeeEstimate = true); void removeRecursive( const CTransaction &tx, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN); void removeForReorg(const Config &config, const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags); void removeConflicts(const CTransaction &tx); void removeForBlock(const std::vector &vtx, unsigned int nBlockHeight); void clear(); // lock free void _clear(); bool CompareDepthAndScore(const uint256 &hasha, const uint256 &hashb); void queryHashes(std::vector &vtxid); bool isSpent(const COutPoint &outpoint); unsigned int GetTransactionsUpdated() const; void AddTransactionsUpdated(unsigned int n); /** * Check that none of this transactions inputs are in the mempool, and thus * the tx is not dependent on other mempool transactions to be included in a * block. */ bool HasNoInputsOf(const CTransaction &tx) const; /** Affect CreateNewBlock prioritisation of transactions */ void PrioritiseTransaction(const uint256 hash, const std::string strHash, double dPriorityDelta, const Amount nFeeDelta); void ApplyDeltas(const uint256 hash, double &dPriorityDelta, Amount &nFeeDelta) const; void ClearPrioritisation(const uint256 hash); public: /** * Remove a set of transactions from the mempool. If a transaction is in * this set, then all in-mempool descendants must also be in the set, unless * this transaction is being removed for being in a block. Set * updateDescendants to true when removing a tx that was in a block, so that * any in-mempool descendants have their ancestor state updated. */ void RemoveStaged(setEntries &stage, bool updateDescendants, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN); /** * When adding transactions from a disconnected block back to the mempool, * new mempool entries may have children in the mempool (which is generally * not the case when otherwise adding transactions). * UpdateTransactionsFromBlock() will find child transactions and update the * descendant state for each transaction in txidsToUpdate (excluding any * child transactions present in txidsToUpdate, which are already accounted * for). * Note: txidsToUpdate should be the set of transactions from the * disconnected block that have been accepted back into the mempool. */ void UpdateTransactionsFromBlock(const std::vector &txidsToUpdate); /** * Try to calculate all in-mempool ancestors of entry. * (these are all calculated including the tx itself) * limitAncestorCount = max number of ancestors * limitAncestorSize = max size of ancestors * limitDescendantCount = max number of descendants any ancestor can have * limitDescendantSize = max size of descendants any ancestor can have * errString = populated with error reason if any limits are hit * fSearchForParents = whether to search a tx's vin for in-mempool parents, * or look up parents from mapLinks. Must be true for entries not in the * mempool */ bool CalculateMemPoolAncestors( const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents = true) const; /** * Populate setDescendants with all in-mempool descendants of hash. * Assumes that setDescendants includes all in-mempool descendants of * anything already in it. */ void CalculateDescendants(txiter it, setEntries &setDescendants) const; /** * The minimum fee to get into the mempool, which may itself not be enough * for larger-sized transactions. The incrementalRelayFee policy variable is * used to bound the time it takes the fee rate to go back down all the way * to 0. When the feerate would otherwise be half of this, it is set to 0 * instead. */ CFeeRate GetMinFee(size_t sizelimit) const; /** * Remove transactions from the mempool until its dynamic size is <= * sizelimit. pvNoSpendsRemaining, if set, will be populated with the list * of outpoints which are not in mempool which no longer have any spends in * this mempool. */ void TrimToSize(size_t sizelimit, std::vector *pvNoSpendsRemaining = nullptr); /** * Expire all transaction (and their dependencies) in the mempool older than * time. Return the number of removed transactions. */ int Expire(int64_t time); /** * Reduce the size of the mempool by expiring and then trimming the mempool. */ void LimitSize(size_t limit, unsigned long age); /** * Returns false if the transaction is in the mempool and not within the * chain limit specified. */ bool TransactionWithinChainLimit(const uint256 &txid, size_t chainLimit) const; unsigned long size() { LOCK(cs); return mapTx.size(); } uint64_t GetTotalTxSize() { LOCK(cs); return totalTxSize; } bool exists(uint256 hash) const { LOCK(cs); return mapTx.count(hash) != 0; } bool exists(const COutPoint &outpoint) const { LOCK(cs); auto it = mapTx.find(outpoint.GetTxId()); return it != mapTx.end() && outpoint.GetN() < it->GetTx().vout.size(); } CTransactionRef get(const uint256 &hash) const; TxMempoolInfo info(const uint256 &hash) const; std::vector infoAll() const; /** * Estimate fee rate needed to get into the next nBlocks. If no answer can * be given at nBlocks, return an estimate at the lowest number of blocks * where one can be given. */ CFeeRate estimateSmartFee(int nBlocks, int *answerFoundAtBlocks = nullptr) const; /** Estimate fee rate needed to get into the next nBlocks */ CFeeRate estimateFee(int nBlocks) const; /** Write/Read estimates to disk */ bool WriteFeeEstimates(CAutoFile &fileout) const; bool ReadFeeEstimates(CAutoFile &filein); size_t DynamicMemoryUsage() const; boost::signals2::signal NotifyEntryAdded; boost::signals2::signal NotifyEntryRemoved; private: /** * UpdateForDescendants is used by UpdateTransactionsFromBlock to update the * descendants for a single transaction that has been added to the mempool * but may have child transactions in the mempool, eg during a chain reorg. * setExclude is the set of descendant transactions in the mempool that must * not be accounted for (because any descendants in setExclude were added to * the mempool after the transaction being updated and hence their state is * already reflected in the parent state). * * cachedDescendants will be updated with the descendants of the transaction * being updated, so that future invocations don't need to walk the same * transaction again, if encountered in another transaction chain. */ void UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendants, const std::set &setExclude); /** * Update ancestors of hash to add/remove it as a descendant transaction. */ void UpdateAncestorsOf(bool add, txiter hash, setEntries &setAncestors); /** Set ancestor state for an entry */ void UpdateEntryForAncestors(txiter it, const setEntries &setAncestors); /** * For each transaction being removed, update ancestors and any direct * children. If updateDescendants is true, then also update in-mempool * descendants' ancestor state. */ void UpdateForRemoveFromMempool(const setEntries &entriesToRemove, bool updateDescendants); /** Sever link between specified transaction and direct children. */ void UpdateChildrenForRemoval(txiter entry); /** * Before calling removeUnchecked for a given transaction, * UpdateForRemoveFromMempool must be called on the entire (dependent) set * of transactions being removed at the same time. We use each * CTxMemPoolEntry's setMemPoolParents in order to walk ancestors of a given * transaction that is removed, so we can't remove intermediate transactions * in a chain before we've updated all the state for the removal. */ void removeUnchecked( txiter entry, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN); }; /** * CCoinsView that brings transactions from a memorypool into view. * It does not check for spendings by memory pool transactions. */ class CCoinsViewMemPool : public CCoinsViewBacked { protected: const CTxMemPool &mempool; public: CCoinsViewMemPool(CCoinsView *baseIn, const CTxMemPool &mempoolIn); bool GetCoin(const COutPoint &outpoint, Coin &coin) const override; bool HaveCoin(const COutPoint &outpoint) const override; }; // We want to sort transactions by coin age priority typedef std::pair TxCoinAgePriority; struct TxCoinAgePriorityCompare { bool operator()(const TxCoinAgePriority &a, const TxCoinAgePriority &b) { if (a.first == b.first) { // Reverse order to make sort less than return CompareTxMemPoolEntryByScore()(*(b.second), *(a.second)); } return a.first < b.first; } }; /** * DisconnectedBlockTransactions * * During the reorg, it's desirable to re-add previously confirmed transactions * to the mempool, so that anything not re-confirmed in the new chain is * available to be mined. However, it's more efficient to wait until the reorg * is complete and process all still-unconfirmed transactions at that time, * since we expect most confirmed transactions to (typically) still be * confirmed in the new chain, and re-accepting to the memory pool is expensive * (and therefore better to not do in the middle of reorg-processing). * Instead, store the disconnected transactions (in order!) as we go, remove any * that are included in blocks in the new chain, and then process the remaining * still-unconfirmed transactions at the end. */ // multi_index tag names struct txid_index {}; struct insertion_order {}; class DisconnectedBlockTransactions { private: typedef boost::multi_index_container< CTransactionRef, boost::multi_index::indexed_by< // sorted by txid boost::multi_index::hashed_unique< boost::multi_index::tag, mempoolentry_txid, SaltedTxidHasher>, // sorted by order in the blockchain boost::multi_index::sequenced< boost::multi_index::tag>>> indexed_disconnected_transactions; indexed_disconnected_transactions queuedTx; uint64_t cachedInnerUsage = 0; void addTransaction(const CTransactionRef &tx) { queuedTx.insert(tx); cachedInnerUsage += RecursiveDynamicUsage(tx); } public: // It's almost certainly a logic bug if we don't clear out queuedTx before // destruction, as we add to it while disconnecting blocks, and then we // need to re-process remaining transactions to ensure mempool consistency. // For now, assert() that we've emptied out this object on destruction. // This assert() can always be removed if the reorg-processing code were // to be refactored such that this assumption is no longer true (for // instance if there was some other way we cleaned up the mempool after a // reorg, besides draining this object). ~DisconnectedBlockTransactions() { assert(queuedTx.empty()); } // Estimate the overhead of queuedTx to be 6 pointers + an allocation, as // no exact formula for boost::multi_index_contained is implemented. size_t DynamicMemoryUsage() const { return memusage::MallocUsage(sizeof(CTransactionRef) + 6 * sizeof(void *)) * queuedTx.size() + cachedInnerUsage; } const indexed_disconnected_transactions &GetQueuedTx() const { return queuedTx; } // Add entries for a block while reconstructing the topological ordering so // they can be added back to the mempool simply. void addForBlock(const std::vector &vtx); // Remove entries based on txid_index, and update memory usage. void removeForBlock(const std::vector &vtx) { // Short-circuit in the common case of a block being added to the tip if (queuedTx.empty()) { return; } for (auto const &tx : vtx) { auto it = queuedTx.find(tx->GetId()); if (it != queuedTx.end()) { cachedInnerUsage -= RecursiveDynamicUsage(*it); queuedTx.erase(it); } } } // Remove an entry by insertion_order index, and update memory usage. void removeEntry(indexed_disconnected_transactions::index< insertion_order>::type::iterator entry) { cachedInnerUsage -= RecursiveDynamicUsage(*entry); queuedTx.get().erase(entry); } void clear() { cachedInnerUsage = 0; queuedTx.clear(); } /** * Make mempool consistent after a reorg, by re-adding or recursively * erasing disconnected block transactions from the mempool, and also * removing any other transactions from the mempool that are no longer valid * given the new tip/height. * * Note: we assume that disconnectpool only contains transactions that are * NOT confirmed in the current chain nor already in the mempool (otherwise, * in-mempool descendants of such transactions would be removed). * * Passing fAddToMempool=false will skip trying to add the transactions * back, and instead just erase from the mempool as needed. */ void updateMempoolForReorg(const Config &config, bool fAddToMempool); }; #endif // BITCOIN_TXMEMPOOL_H diff --git a/src/uint256.h b/src/uint256.h index 661aaff724..a1e74de821 100644 --- a/src/uint256.h +++ b/src/uint256.h @@ -1,173 +1,173 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_UINT256_H #define BITCOIN_UINT256_H #include "crypto/common.h" #include #include #include #include #include #include /** Template base class for fixed-sized opaque blobs. */ template class base_blob { protected: enum { WIDTH = BITS / 8 }; uint8_t data[WIDTH]; public: base_blob() { memset(data, 0, sizeof(data)); } explicit base_blob(const std::vector &vch); bool IsNull() const { for (int i = 0; i < WIDTH; i++) { if (data[i] != 0) { return false; } } return true; } void SetNull() { memset(data, 0, sizeof(data)); } inline int Compare(const base_blob &other) const { for (size_t i = 0; i < sizeof(data); i++) { uint8_t a = data[sizeof(data) - 1 - i]; uint8_t b = other.data[sizeof(data) - 1 - i]; if (a > b) { return 1; } if (a < b) { return -1; } } return 0; } friend inline bool operator==(const base_blob &a, const base_blob &b) { return a.Compare(b) == 0; } friend inline bool operator!=(const base_blob &a, const base_blob &b) { return a.Compare(b) != 0; } friend inline bool operator<(const base_blob &a, const base_blob &b) { return a.Compare(b) < 0; } friend inline bool operator<=(const base_blob &a, const base_blob &b) { return a.Compare(b) <= 0; } friend inline bool operator>(const base_blob &a, const base_blob &b) { return a.Compare(b) > 0; } friend inline bool operator>=(const base_blob &a, const base_blob &b) { return a.Compare(b) >= 0; } std::string GetHex() const; void SetHex(const char *psz); void SetHex(const std::string &str); std::string ToString() const { return GetHex(); } uint8_t *begin() { return &data[0]; } uint8_t *end() { return &data[WIDTH]; } const uint8_t *begin() const { return &data[0]; } const uint8_t *end() const { return &data[WIDTH]; } unsigned int size() const { return sizeof(data); } uint64_t GetUint64(int pos) const { const uint8_t *ptr = data + pos * 8; return uint64_t(ptr[0]) | (uint64_t(ptr[1]) << 8) | (uint64_t(ptr[2]) << 16) | (uint64_t(ptr[3]) << 24) | (uint64_t(ptr[4]) << 32) | (uint64_t(ptr[5]) << 40) | (uint64_t(ptr[6]) << 48) | (uint64_t(ptr[7]) << 56); } template void Serialize(Stream &s) const { s.write((char *)data, sizeof(data)); } template void Unserialize(Stream &s) { s.read((char *)data, sizeof(data)); } }; /** * 160-bit opaque blob. * @note This type is called uint160 for historical reasons only. It is an * opaque blob of 160 bits and has no integer operations. */ class uint160 : public base_blob<160> { public: uint160() {} - uint160(const base_blob<160> &b) : base_blob<160>(b) {} + explicit uint160(const base_blob<160> &b) : base_blob<160>(b) {} explicit uint160(const std::vector &vch) : base_blob<160>(vch) {} }; /** * 256-bit opaque blob. * @note This type is called uint256 for historical reasons only. It is an * opaque blob of 256 bits and has no integer operations. Use arith_uint256 if * those are required. */ class uint256 : public base_blob<256> { public: uint256() {} - uint256(const base_blob<256> &b) : base_blob<256>(b) {} + explicit uint256(const base_blob<256> &b) : base_blob<256>(b) {} explicit uint256(const std::vector &vch) : base_blob<256>(vch) {} /** * A cheap hash function that just returns 64 bits from the result, it can * be used when the contents are considered uniformly random. It is not * appropriate when the value can easily be influenced from outside as e.g. * a network adversary could provide values to trigger worst-case behavior. */ uint64_t GetCheapHash() const { return ReadLE64(data); } }; /** * uint256 from const char *. * This is a separate function because the constructor uint256(const char*) can * result in dangerously catching uint256(0). */ inline uint256 uint256S(const char *str) { uint256 rv; rv.SetHex(str); return rv; } /** * uint256 from std::string. * This is a separate function because the constructor uint256(const std::string * &str) can result in dangerously catching uint256(0) via std::string(const * char*). */ inline uint256 uint256S(const std::string &str) { uint256 rv; rv.SetHex(str); return rv; } inline uint160 uint160S(const char *str) { uint160 rv; rv.SetHex(str); return rv; } inline uint160 uint160S(const std::string &str) { uint160 rv; rv.SetHex(str); return rv; } #endif // BITCOIN_UINT256_H diff --git a/src/undo.h b/src/undo.h index d89b98d279..7df53ae2ad 100644 --- a/src/undo.h +++ b/src/undo.h @@ -1,144 +1,144 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Copyright (c) 2017 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_UNDO_H #define BITCOIN_UNDO_H #include "coins.h" #include "compressor.h" #include "consensus/consensus.h" #include "serialize.h" class CBlock; class CBlockIndex; class CCoinsViewCache; class CValidationState; /** * Undo information for a CTxIn * * Contains the prevout's CTxOut being spent, and its metadata as well (coinbase * or not, height). The serialization contains a dummy value of zero. This is be * compatible with older versions which expect to see the transaction version * there. */ class TxInUndoSerializer { const Coin *pcoin; public: - TxInUndoSerializer(const Coin *pcoinIn) : pcoin(pcoinIn) {} + explicit TxInUndoSerializer(const Coin *pcoinIn) : pcoin(pcoinIn) {} template void Serialize(Stream &s) const { ::Serialize( s, VARINT(pcoin->GetHeight() * 2 + (pcoin->IsCoinBase() ? 1 : 0))); if (pcoin->GetHeight() > 0) { // Required to maintain compatibility with older undo format. ::Serialize(s, uint8_t(0)); } ::Serialize(s, CTxOutCompressor(REF(pcoin->GetTxOut()))); } }; class TxInUndoDeserializer { Coin *pcoin; public: - TxInUndoDeserializer(Coin *pcoinIn) : pcoin(pcoinIn) {} + explicit TxInUndoDeserializer(Coin *pcoinIn) : pcoin(pcoinIn) {} template void Unserialize(Stream &s) { uint32_t nCode = 0; ::Unserialize(s, VARINT(nCode)); uint32_t nHeight = nCode / 2; bool fCoinBase = nCode & 1; if (nHeight > 0) { // Old versions stored the version number for the last spend of a // transaction's outputs. Non-final spends were indicated with // height = 0. int nVersionDummy; ::Unserialize(s, VARINT(nVersionDummy)); } CTxOut txout; ::Unserialize(s, REF(CTxOutCompressor(REF(txout)))); *pcoin = Coin(std::move(txout), nHeight, fCoinBase); } }; static const size_t MAX_INPUTS_PER_TX = MAX_TX_SIZE / ::GetSerializeSize(CTxIn(), SER_NETWORK, PROTOCOL_VERSION); /** Restore the UTXO in a Coin at a given COutPoint */ class CTxUndo { public: // Undo information for all txins std::vector vprevout; template void Serialize(Stream &s) const { // TODO: avoid reimplementing vector serializer. uint64_t count = vprevout.size(); ::Serialize(s, COMPACTSIZE(REF(count))); for (const auto &prevout : vprevout) { ::Serialize(s, REF(TxInUndoSerializer(&prevout))); } } template void Unserialize(Stream &s) { // TODO: avoid reimplementing vector deserializer. uint64_t count = 0; ::Unserialize(s, COMPACTSIZE(count)); if (count > MAX_INPUTS_PER_TX) { throw std::ios_base::failure("Too many input undo records"); } vprevout.resize(count); for (auto &prevout : vprevout) { ::Unserialize(s, REF(TxInUndoDeserializer(&prevout))); } } }; /** Undo information for a CBlock */ class CBlockUndo { public: // For all but the coinbase std::vector vtxundo; ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(vtxundo); } }; enum DisconnectResult { // All good. DISCONNECT_OK, // Rolled back, but UTXO set was inconsistent with block. DISCONNECT_UNCLEAN, // Something else went wrong. DISCONNECT_FAILED, }; /** * Restore the UTXO in a Coin at a given COutPoint. * @param undo The Coin to be restored. * @param view The coins view to which to apply the changes. * @param out The out point that corresponds to the tx input. * @return A DisconnectResult */ DisconnectResult UndoCoinSpend(const Coin &undo, CCoinsViewCache &view, const COutPoint &out); /** * Undo a block from the block and the undoblock data. * See DisconnectBlock for more details. */ DisconnectResult ApplyBlockUndo(const CBlockUndo &blockUndo, const CBlock &block, const CBlockIndex *pindex, CCoinsViewCache &coins); #endif // BITCOIN_UNDO_H diff --git a/src/validation.cpp b/src/validation.cpp index 20d3ffb82a..167a9ce458 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1,5523 +1,5523 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Copyright (c) 2017-2018 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "validation.h" #include "arith_uint256.h" #include "blockindexworkcomparator.h" #include "blockvalidity.h" #include "chainparams.h" #include "checkpoints.h" #include "checkqueue.h" #include "config.h" #include "consensus/activation.h" #include "consensus/consensus.h" #include "consensus/merkle.h" #include "consensus/tx_verify.h" #include "consensus/validation.h" #include "fs.h" #include "hash.h" #include "init.h" #include "policy/fees.h" #include "policy/policy.h" #include "pow.h" #include "primitives/block.h" #include "primitives/transaction.h" #include "random.h" #include "reverse_iterator.h" #include "script/script.h" #include "script/scriptcache.h" #include "script/sigcache.h" #include "script/standard.h" #include "timedata.h" #include "tinyformat.h" #include "txdb.h" #include "txmempool.h" #include "ui_interface.h" #include "undo.h" #include "util.h" #include "utilmoneystr.h" #include "utilstrencodings.h" #include "validationinterface.h" #include "warnings.h" #include #include #include #include #include #include #if defined(NDEBUG) #error "Bitcoin cannot be compiled without assertions." #endif /** * Global state */ CCriticalSection cs_main; BlockMap mapBlockIndex; CChain chainActive; CBlockIndex *pindexBestHeader = nullptr; CWaitableCriticalSection g_best_block_mutex; CConditionVariable g_best_block_cv; uint256 g_best_block; int nScriptCheckThreads = 0; std::atomic_bool fImporting(false); bool fReindex = false; bool fTxIndex = false; bool fHavePruned = false; bool fPruneMode = false; bool fIsBareMultisigStd = DEFAULT_PERMIT_BAREMULTISIG; bool fRequireStandard = true; bool fCheckBlockIndex = false; bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED; size_t nCoinCacheUsage = 5000 * 300; uint64_t nPruneTarget = 0; int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE; uint256 hashAssumeValid; arith_uint256 nMinimumChainWork; Amount maxTxFee = DEFAULT_TRANSACTION_MAXFEE; CTxMemPool g_mempool; static void CheckBlockIndex(const Consensus::Params &consensusParams); /** Constant stuff for coinbase transactions we create: */ CScript COINBASE_FLAGS; const std::string strMessageMagic = "Bitcoin Signed Message:\n"; // Internal stuff namespace { CBlockIndex *pindexBestInvalid; CBlockIndex *pindexBestParked; /** * The best finalized block. * This block cannot be reorged in any way, shape or form. */ CBlockIndex const *pindexFinalized; /** * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself * and all ancestors) and as good as our current tip or better. Entries may be * failed, though, and pruning nodes may be missing the data for the block. */ std::set setBlockIndexCandidates; /** * All pairs A->B, where A (or one of its ancestors) misses transactions, but B * has transactions. Pruned nodes may have entries where B is missing data. */ std::multimap mapBlocksUnlinked; CCriticalSection cs_LastBlockFile; std::vector vinfoBlockFile; int nLastBlockFile = 0; /** * Global flag to indicate we should check to see if there are block/undo files * that should be deleted. Set on startup or if we allocate more file space when * we're in prune mode. */ bool fCheckForPruning = false; /** * Every received block is assigned a unique and increasing identifier, so we * know which one to give priority in case of a fork. * Blocks loaded from disk are assigned id 0, so start the counter at 1. */ std::atomic nBlockSequenceId{1}; /** Decreasing counter (used by subsequent preciousblock calls). */ int32_t nBlockReverseSequenceId = -1; /** chainwork for the last block that preciousblock has been applied to. */ arith_uint256 nLastPreciousChainwork = 0; /** Dirty block index entries. */ std::set setDirtyBlockIndex; /** Dirty block file entries. */ std::set setDirtyFileInfo; } // namespace CBlockIndex *FindForkInGlobalIndex(const CChain &chain, const CBlockLocator &locator) { // Find the first block the caller has in the main chain for (const uint256 &hash : locator.vHave) { BlockMap::iterator mi = mapBlockIndex.find(hash); if (mi != mapBlockIndex.end()) { CBlockIndex *pindex = (*mi).second; if (chain.Contains(pindex)) { return pindex; } if (pindex->GetAncestor(chain.Height()) == chain.Tip()) { return chain.Tip(); } } } return chain.Genesis(); } std::unique_ptr pcoinsdbview; std::unique_ptr pcoinsTip; std::unique_ptr pblocktree; enum FlushStateMode { FLUSH_STATE_NONE, FLUSH_STATE_IF_NEEDED, FLUSH_STATE_PERIODIC, FLUSH_STATE_ALWAYS }; // See definition for documentation static bool FlushStateToDisk(const CChainParams &chainParams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight = 0); static void FindFilesToPruneManual(std::set &setFilesToPrune, int nManualPruneHeight); static void FindFilesToPrune(std::set &setFilesToPrune, uint64_t nPruneAfterHeight); static FILE *OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly = false); static uint32_t GetBlockScriptFlags(const Config &config, const CBlockIndex *pChainTip); bool TestLockPointValidity(const LockPoints *lp) { AssertLockHeld(cs_main); assert(lp); // If there are relative lock times then the maxInputBlock will be set // If there are no relative lock times, the LockPoints don't depend on the // chain if (lp->maxInputBlock) { // Check whether chainActive is an extension of the block at which the // LockPoints // calculation was valid. If not LockPoints are no longer valid if (!chainActive.Contains(lp->maxInputBlock)) { return false; } } // LockPoints still valid return true; } bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints *lp, bool useExistingLockPoints) { AssertLockHeld(cs_main); AssertLockHeld(g_mempool.cs); CBlockIndex *tip = chainActive.Tip(); CBlockIndex index; index.pprev = tip; // CheckSequenceLocks() uses chainActive.Height()+1 to evaluate height based // locks because when SequenceLocks() is called within ConnectBlock(), the // height of the block *being* evaluated is what is used. Thus if we want to // know if a transaction can be part of the *next* block, we need to use one // more than chainActive.Height() index.nHeight = tip->nHeight + 1; std::pair lockPair; if (useExistingLockPoints) { assert(lp); lockPair.first = lp->height; lockPair.second = lp->time; } else { // pcoinsTip contains the UTXO set for chainActive.Tip() CCoinsViewMemPool viewMemPool(pcoinsTip.get(), g_mempool); std::vector prevheights; prevheights.resize(tx.vin.size()); for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) { const CTxIn &txin = tx.vin[txinIndex]; Coin coin; if (!viewMemPool.GetCoin(txin.prevout, coin)) { return error("%s: Missing input", __func__); } if (coin.GetHeight() == MEMPOOL_HEIGHT) { // Assume all mempool transaction confirm in the next block prevheights[txinIndex] = tip->nHeight + 1; } else { prevheights[txinIndex] = coin.GetHeight(); } } lockPair = CalculateSequenceLocks(tx, flags, &prevheights, index); if (lp) { lp->height = lockPair.first; lp->time = lockPair.second; // Also store the hash of the block with the highest height of all // the blocks which have sequence locked prevouts. This hash needs // to still be on the chain for these LockPoint calculations to be // valid. // Note: It is impossible to correctly calculate a maxInputBlock if // any of the sequence locked inputs depend on unconfirmed txs, // except in the special case where the relative lock time/height is // 0, which is equivalent to no sequence lock. Since we assume input // height of tip+1 for mempool txs and test the resulting lockPair // from CalculateSequenceLocks against tip+1. We know // EvaluateSequenceLocks will fail if there was a non-zero sequence // lock on a mempool input, so we can use the return value of // CheckSequenceLocks to indicate the LockPoints validity. int maxInputHeight = 0; for (int height : prevheights) { // Can ignore mempool inputs since we'll fail if they had // non-zero locks. if (height != tip->nHeight + 1) { maxInputHeight = std::max(maxInputHeight, height); } } lp->maxInputBlock = tip->GetAncestor(maxInputHeight); } } return EvaluateSequenceLocks(index, lockPair); } /** Convert CValidationState to a human-readable message for logging */ std::string FormatStateMessage(const CValidationState &state) { return strprintf( "%s%s (code %i)", state.GetRejectReason(), state.GetDebugMessage().empty() ? "" : ", " + state.GetDebugMessage(), state.GetRejectCode()); } static bool IsCurrentForFeeEstimation() { AssertLockHeld(cs_main); if (IsInitialBlockDownload()) { return false; } if (chainActive.Tip()->GetBlockTime() < (GetTime() - MAX_FEE_ESTIMATION_TIP_AGE)) { return false; } if (chainActive.Height() < pindexBestHeader->nHeight - 1) { return false; } return true; } static bool IsMagneticAnomalyEnabledForCurrentBlock(const Config &config) { AssertLockHeld(cs_main); return IsMagneticAnomalyEnabled(config, chainActive.Tip()); } // Command-line argument "-replayprotectionactivationtime=" will // cause the node to switch to replay protected SigHash ForkID value when the // median timestamp of the previous 11 blocks is greater than or equal to // . Defaults to the pre-defined timestamp when not set. static bool IsReplayProtectionEnabled(const Config &config, int64_t nMedianTimePast) { return nMedianTimePast >= gArgs.GetArg( "-replayprotectionactivationtime", config.GetChainParams().GetConsensus().greatWallActivationTime); } static bool IsReplayProtectionEnabled(const Config &config, const CBlockIndex *pindexPrev) { if (pindexPrev == nullptr) { return false; } return IsReplayProtectionEnabled(config, pindexPrev->GetMedianTimePast()); } static bool IsReplayProtectionEnabledForCurrentBlock(const Config &config) { AssertLockHeld(cs_main); return IsReplayProtectionEnabled(config, chainActive.Tip()); } // Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool // were somehow broken and returning the wrong scriptPubKeys static bool CheckInputsFromMempoolAndCache(const CTransaction &tx, CValidationState &state, const CCoinsViewCache &view, CTxMemPool &pool, const uint32_t flags, bool cacheSigStore, PrecomputedTransactionData &txdata) { AssertLockHeld(cs_main); // pool.cs should be locked already, but go ahead and re-take the lock here // to enforce that mempool doesn't change between when we check the view and // when we actually call through to CheckInputs LOCK(pool.cs); assert(!tx.IsCoinBase()); for (const CTxIn &txin : tx.vin) { const Coin &coin = view.AccessCoin(txin.prevout); // At this point we haven't actually checked if the coins are all // available (or shouldn't assume we have, since CheckInputs does). So // we just return failure if the inputs are not available here, and then // only have to check equivalence for available inputs. if (coin.IsSpent()) { return false; } const CTransactionRef &txFrom = pool.get(txin.prevout.GetTxId()); if (txFrom) { assert(txFrom->GetId() == txin.prevout.GetTxId()); assert(txFrom->vout.size() > txin.prevout.GetN()); assert(txFrom->vout[txin.prevout.GetN()] == coin.GetTxOut()); } else { const Coin &coinFromDisk = pcoinsTip->AccessCoin(txin.prevout); assert(!coinFromDisk.IsSpent()); assert(coinFromDisk.GetTxOut() == coin.GetTxOut()); } } return CheckInputs(tx, state, view, true, flags, cacheSigStore, true, txdata); } static bool AcceptToMemoryPoolWorker( const Config &config, CTxMemPool &pool, CValidationState &state, const CTransactionRef &ptx, bool fLimitFree, bool *pfMissingInputs, int64_t nAcceptTime, bool fOverrideMempoolLimit, const Amount nAbsurdFee, std::vector &coins_to_uncache) { AssertLockHeld(cs_main); const CTransaction &tx = *ptx; const TxId txid = tx.GetId(); // mempool "read lock" (held through // GetMainSignals().TransactionAddedToMempool()) LOCK(pool.cs); if (pfMissingInputs) { *pfMissingInputs = false; } // Coinbase is only valid in a block, not as a loose transaction. if (!CheckRegularTransaction(tx, state)) { // state filled in by CheckRegularTransaction. return false; } // Rather not work on nonstandard transactions (unless -testnet/-regtest) std::string reason; if (fRequireStandard && !IsStandardTx(tx, reason)) { return state.DoS(0, false, REJECT_NONSTANDARD, reason); } // Only accept nLockTime-using transactions that can be mined in the next // block; we don't want our mempool filled up with transactions that can't // be mined yet. CValidationState ctxState; if (!ContextualCheckTransactionForCurrentBlock( config, tx, ctxState, STANDARD_LOCKTIME_VERIFY_FLAGS)) { // We copy the state from a dummy to ensure we don't increase the // ban score of peer for transaction that could be valid in the future. return state.DoS( 0, false, REJECT_NONSTANDARD, ctxState.GetRejectReason(), ctxState.CorruptionPossible(), ctxState.GetDebugMessage()); } // Is it already in the memory pool? if (pool.exists(txid)) { return state.Invalid(false, REJECT_ALREADY_KNOWN, "txn-already-in-mempool"); } // Check for conflicts with in-memory transactions for (const CTxIn &txin : tx.vin) { auto itConflicting = pool.mapNextTx.find(txin.prevout); if (itConflicting != pool.mapNextTx.end()) { // Disable replacement feature for good return state.Invalid(false, REJECT_CONFLICT, "txn-mempool-conflict"); } } { CCoinsView dummy; CCoinsViewCache view(&dummy); Amount nValueIn = Amount::zero(); LockPoints lp; CCoinsViewMemPool viewMemPool(pcoinsTip.get(), pool); view.SetBackend(viewMemPool); // Do all inputs exist? for (const CTxIn txin : tx.vin) { if (!pcoinsTip->HaveCoinInCache(txin.prevout)) { coins_to_uncache.push_back(txin.prevout); } if (!view.HaveCoin(txin.prevout)) { // Are inputs missing because we already have the tx? for (size_t out = 0; out < tx.vout.size(); out++) { // Optimistically just do efficient check of cache for // outputs. if (pcoinsTip->HaveCoinInCache(COutPoint(txid, out))) { return state.Invalid(false, REJECT_DUPLICATE, "txn-already-known"); } } // Otherwise assume this might be an orphan tx for which we just // haven't seen parents yet. if (pfMissingInputs) { *pfMissingInputs = true; } // fMissingInputs and !state.IsInvalid() is used to detect this // condition, don't set state.Invalid() return false; } } // Are the actual inputs available? if (!view.HaveInputs(tx)) { return state.Invalid(false, REJECT_DUPLICATE, "bad-txns-inputs-spent"); } // Bring the best block into scope. view.GetBestBlock(); nValueIn = view.GetValueIn(tx); // We have all inputs cached now, so switch back to dummy, so we don't // need to keep lock on mempool. view.SetBackend(dummy); // Only accept BIP68 sequence locked transactions that can be mined in // the next block; we don't want our mempool filled up with transactions // that can't be mined yet. Must keep pool.cs for this unless we change // CheckSequenceLocks to take a CoinsViewCache instead of create its // own. if (!CheckSequenceLocks(tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp)) { return state.DoS(0, false, REJECT_NONSTANDARD, "non-BIP68-final"); } // Check for non-standard pay-to-script-hash in inputs if (fRequireStandard && !AreInputsStandard(tx, view)) { return state.Invalid(false, REJECT_NONSTANDARD, "bad-txns-nonstandard-inputs"); } int64_t nSigOpsCount = GetTransactionSigOpCount(tx, view, STANDARD_SCRIPT_VERIFY_FLAGS); Amount nValueOut = tx.GetValueOut(); Amount nFees = nValueIn - nValueOut; // nModifiedFees includes any fee deltas from PrioritiseTransaction Amount nModifiedFees = nFees; double nPriorityDummy = 0; pool.ApplyDeltas(txid, nPriorityDummy, nModifiedFees); Amount inChainInputValue; double dPriority = view.GetPriority(tx, chainActive.Height(), inChainInputValue); // Keep track of transactions that spend a coinbase, which we re-scan // during reorgs to ensure COINBASE_MATURITY is still met. bool fSpendsCoinbase = false; for (const CTxIn &txin : tx.vin) { const Coin &coin = view.AccessCoin(txin.prevout); if (coin.IsCoinBase()) { fSpendsCoinbase = true; break; } } CTxMemPoolEntry entry(ptx, nFees, nAcceptTime, dPriority, chainActive.Height(), inChainInputValue, fSpendsCoinbase, nSigOpsCount, lp); unsigned int nSize = entry.GetTxSize(); // Check that the transaction doesn't have an excessive number of // sigops, making it impossible to mine. Since the coinbase transaction // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than // MAX_BLOCK_SIGOPS_PER_MB; we still consider this an invalid rather // than merely non-standard transaction. if (nSigOpsCount > MAX_STANDARD_TX_SIGOPS) { return state.DoS(0, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops", false, strprintf("%d", nSigOpsCount)); } CFeeRate minRelayTxFee = config.GetMinFeePerKB(); Amount mempoolRejectFee = pool.GetMinFee( gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000) .GetFee(nSize); if (mempoolRejectFee > Amount::zero() && nModifiedFees < mempoolRejectFee) { return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool min fee not met", false, strprintf("%d < %d", nFees, mempoolRejectFee)); } if (gArgs.GetBoolArg("-relaypriority", DEFAULT_RELAYPRIORITY) && nModifiedFees < minRelayTxFee.GetFee(nSize) && !AllowFree(entry.GetPriority(chainActive.Height() + 1))) { // Require that free transactions have sufficient priority to be // mined in the next block. return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "insufficient priority"); } // Continuously rate-limit free (really, very-low-fee) transactions. // This mitigates 'penny-flooding' -- sending thousands of free // transactions just to be annoying or make others' transactions take // longer to confirm. if (fLimitFree && nModifiedFees < minRelayTxFee.GetFee(nSize)) { static CCriticalSection csFreeLimiter; static double dFreeCount; static int64_t nLastTime; int64_t nNow = GetTime(); LOCK(csFreeLimiter); // Use an exponentially decaying ~10-minute window: dFreeCount *= pow(1.0 - 1.0 / 600.0, double(nNow - nLastTime)); nLastTime = nNow; // -limitfreerelay unit is thousand-bytes-per-minute // At default rate it would take over a month to fill 1GB // NOTE: Use the actual size here, and not the fee size since this // is counting real size for the rate limiter. if (dFreeCount + nSize >= gArgs.GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY) * 10 * 1000) { return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "rate limited free transaction"); } LogPrint(BCLog::MEMPOOL, "Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount + nSize); dFreeCount += nSize; } if (nAbsurdFee != Amount::zero() && nFees > nAbsurdFee) { return state.Invalid(false, REJECT_HIGHFEE, "absurdly-high-fee", strprintf("%d > %d", nFees, nAbsurdFee)); } // Calculate in-mempool ancestors, up to a limit. CTxMemPool::setEntries setAncestors; size_t nLimitAncestors = gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT); size_t nLimitAncestorSize = gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) * 1000; size_t nLimitDescendants = gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT); size_t nLimitDescendantSize = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000; std::string errString; if (!pool.CalculateMemPoolAncestors( entry, setAncestors, nLimitAncestors, nLimitAncestorSize, nLimitDescendants, nLimitDescendantSize, errString)) { return state.DoS(0, false, REJECT_NONSTANDARD, "too-long-mempool-chain", false, errString); } // Set extraFlags as a set of flags that needs to be activated. uint32_t extraFlags = SCRIPT_VERIFY_NONE; if (IsReplayProtectionEnabledForCurrentBlock(config)) { extraFlags |= SCRIPT_ENABLE_REPLAY_PROTECTION; } if (IsMagneticAnomalyEnabledForCurrentBlock(config)) { extraFlags |= SCRIPT_ENABLE_CHECKDATASIG; } // Check inputs based on the set of flags we activate. uint32_t scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS; if (!config.GetChainParams().RequireStandard()) { scriptVerifyFlags = SCRIPT_ENABLE_SIGHASH_FORKID | gArgs.GetArg("-promiscuousmempoolflags", scriptVerifyFlags); } // Make sure whatever we need to activate is actually activated. scriptVerifyFlags |= extraFlags; // Check against previous transactions. This is done last to help // prevent CPU exhaustion denial-of-service attacks. PrecomputedTransactionData txdata(tx); if (!CheckInputs(tx, state, view, true, scriptVerifyFlags, true, false, txdata)) { // State filled in by CheckInputs. return false; } // Check again against the current block tip's script verification flags // to cache our script execution flags. This is, of course, useless if // the next block has different script flags from the previous one, but // because the cache tracks script flags for us it will auto-invalidate // and we'll just have a few blocks of extra misses on soft-fork // activation. // // This is also useful in case of bugs in the standard flags that cause // transactions to pass as valid when they're actually invalid. For // instance the STRICTENC flag was incorrectly allowing certain CHECKSIG // NOT scripts to pass, even though they were invalid. // // There is a similar check in CreateNewBlock() to prevent creating // invalid blocks (using TestBlockValidity), however allowing such // transactions into the mempool can be exploited as a DoS attack. uint32_t currentBlockScriptVerifyFlags = GetBlockScriptFlags(config, chainActive.Tip()); if (!CheckInputsFromMempoolAndCache(tx, state, view, pool, currentBlockScriptVerifyFlags, true, txdata)) { // If we're using promiscuousmempoolflags, we may hit this normally. // Check if current block has some flags that scriptVerifyFlags does // not before printing an ominous warning. if (!(~scriptVerifyFlags & currentBlockScriptVerifyFlags)) { return error( "%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against " "MANDATORY but not STANDARD flags %s, %s", __func__, txid.ToString(), FormatStateMessage(state)); } if (!CheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS | extraFlags, true, false, txdata)) { return error( "%s: ConnectInputs failed against MANDATORY but not " "STANDARD flags due to promiscuous mempool %s, %s", __func__, txid.ToString(), FormatStateMessage(state)); } LogPrintf("Warning: -promiscuousmempool flags set to not include " "currently enforced soft forks, this may break mining or " "otherwise cause instability!\n"); } // This transaction should only count for fee estimation if // the node is not behind and it is not dependent on any other // transactions in the mempool. bool validForFeeEstimation = IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx); // Store transaction in memory. pool.addUnchecked(txid, entry, setAncestors, validForFeeEstimation); // Trim mempool and check if tx was trimmed. if (!fOverrideMempoolLimit) { pool.LimitSize( gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); if (!pool.exists(txid)) { return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool full"); } } } GetMainSignals().TransactionAddedToMempool(ptx); return true; } /** * (try to) add transaction to memory pool with a specified acceptance time. */ static bool AcceptToMemoryPoolWithTime( const Config &config, CTxMemPool &pool, CValidationState &state, const CTransactionRef &tx, bool fLimitFree, bool *pfMissingInputs, int64_t nAcceptTime, bool fOverrideMempoolLimit = false, const Amount nAbsurdFee = Amount::zero()) { std::vector coins_to_uncache; bool res = AcceptToMemoryPoolWorker( config, pool, state, tx, fLimitFree, pfMissingInputs, nAcceptTime, fOverrideMempoolLimit, nAbsurdFee, coins_to_uncache); if (!res) { for (const COutPoint &outpoint : coins_to_uncache) { pcoinsTip->Uncache(outpoint); } } // After we've (potentially) uncached entries, ensure our coins cache is // still within its size limits CValidationState stateDummy; FlushStateToDisk(config.GetChainParams(), stateDummy, FLUSH_STATE_PERIODIC); return res; } bool AcceptToMemoryPool(const Config &config, CTxMemPool &pool, CValidationState &state, const CTransactionRef &tx, bool fLimitFree, bool *pfMissingInputs, bool fOverrideMempoolLimit, const Amount nAbsurdFee) { return AcceptToMemoryPoolWithTime(config, pool, state, tx, fLimitFree, pfMissingInputs, GetTime(), fOverrideMempoolLimit, nAbsurdFee); } /** * Return transaction in txOut, and if it was found inside a block, its hash is * placed in hashBlock. */ bool GetTransaction(const Config &config, const TxId &txid, CTransactionRef &txOut, uint256 &hashBlock, bool fAllowSlow) { CBlockIndex *pindexSlow = nullptr; LOCK(cs_main); CTransactionRef ptx = g_mempool.get(txid); if (ptx) { txOut = ptx; return true; } if (fTxIndex) { CDiskTxPos postx; if (pblocktree->ReadTxIndex(txid, postx)) { CAutoFile file(OpenBlockFile(postx, true), SER_DISK, CLIENT_VERSION); if (file.IsNull()) { return error("%s: OpenBlockFile failed", __func__); } CBlockHeader header; try { file >> header; fseek(file.Get(), postx.nTxOffset, SEEK_CUR); file >> txOut; } catch (const std::exception &e) { return error("%s: Deserialize or I/O error - %s", __func__, e.what()); } hashBlock = header.GetHash(); if (txOut->GetId() != txid) { return error("%s: txid mismatch", __func__); } return true; } } // use coin database to locate block that contains transaction, and scan it if (fAllowSlow) { const Coin &coin = AccessByTxid(*pcoinsTip, txid); if (!coin.IsSpent()) { pindexSlow = chainActive[coin.GetHeight()]; } } if (pindexSlow) { CBlock block; if (ReadBlockFromDisk(block, pindexSlow, config)) { for (const auto &tx : block.vtx) { if (tx->GetId() == txid) { txOut = tx; hashBlock = pindexSlow->GetBlockHash(); return true; } } } } return false; } ////////////////////////////////////////////////////////////////////////////// // // CBlock and CBlockIndex // static bool WriteBlockToDisk(const CBlock &block, CDiskBlockPos &pos, const CMessageHeader::MessageMagic &messageStart) { // Open history file to append CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) { return error("WriteBlockToDisk: OpenBlockFile failed"); } // Write index header unsigned int nSize = GetSerializeSize(fileout, block); fileout << FLATDATA(messageStart) << nSize; // Write block long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) { return error("WriteBlockToDisk: ftell failed"); } pos.nPos = (unsigned int)fileOutPos; fileout << block; return true; } bool ReadBlockFromDisk(CBlock &block, const CDiskBlockPos &pos, const Config &config) { block.SetNull(); // Open history file to read CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) { return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString()); } // Read block try { filein >> block; } catch (const std::exception &e) { return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString()); } // Check the header if (!CheckProofOfWork(block.GetHash(), block.nBits, config)) { return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString()); } return true; } bool ReadBlockFromDisk(CBlock &block, const CBlockIndex *pindex, const Config &config) { if (!ReadBlockFromDisk(block, pindex->GetBlockPos(), config)) { return false; } if (block.GetHash() != pindex->GetBlockHash()) { return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() " "doesn't match index for %s at %s", pindex->ToString(), pindex->GetBlockPos().ToString()); } return true; } Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams) { int halvings = nHeight / consensusParams.nSubsidyHalvingInterval; // Force block reward to zero when right shift is undefined. if (halvings >= 64) { return Amount::zero(); } Amount nSubsidy = 50 * COIN; // Subsidy is cut in half every 210,000 blocks which will occur // approximately every 4 years. return ((nSubsidy / SATOSHI) >> halvings) * SATOSHI; } bool IsInitialBlockDownload() { // Once this function has returned false, it must remain false. static std::atomic latchToFalse{false}; // Optimization: pre-test latch before taking the lock. if (latchToFalse.load(std::memory_order_relaxed)) { return false; } LOCK(cs_main); if (latchToFalse.load(std::memory_order_relaxed)) { return false; } if (fImporting || fReindex) { return true; } if (chainActive.Tip() == nullptr) { return true; } if (chainActive.Tip()->nChainWork < nMinimumChainWork) { return true; } if (chainActive.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge)) { return true; } LogPrintf("Leaving InitialBlockDownload (latching to false)\n"); latchToFalse.store(true, std::memory_order_relaxed); return false; } CBlockIndex const *pindexBestForkTip = nullptr; CBlockIndex const *pindexBestForkBase = nullptr; static void AlertNotify(const std::string &strMessage) { uiInterface.NotifyAlertChanged(); std::string strCmd = gArgs.GetArg("-alertnotify", ""); if (strCmd.empty()) { return; } // Alert text should be plain ascii coming from a trusted source, but to be // safe we first strip anything not in safeChars, then add single quotes // around the whole string before passing it to the shell: std::string singleQuote("'"); std::string safeStatus = SanitizeString(strMessage); safeStatus = singleQuote + safeStatus + singleQuote; boost::replace_all(strCmd, "%s", safeStatus); std::thread t(runCommand, strCmd); // thread runs free t.detach(); } static void CheckForkWarningConditions() { AssertLockHeld(cs_main); // Before we get past initial download, we cannot reliably alert about forks // (we assume we don't get stuck on a fork before finishing our initial // sync) if (IsInitialBlockDownload()) { return; } // If our best fork is no longer within 72 blocks (+/- 12 hours if no one // mines it) of our head, drop it if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->nHeight >= 72) { pindexBestForkTip = nullptr; } if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > chainActive.Tip()->nChainWork + (GetBlockProof(*chainActive.Tip()) * 6))) { if (!GetfLargeWorkForkFound() && pindexBestForkBase) { std::string warning = std::string("'Warning: Large-work fork detected, forking after " "block ") + pindexBestForkBase->phashBlock->ToString() + std::string("'"); AlertNotify(warning); } if (pindexBestForkTip && pindexBestForkBase) { LogPrintf("%s: Warning: Large fork found\n forking the " "chain at height %d (%s)\n lasting to height %d " "(%s).\nChain state database corruption likely.\n", __func__, pindexBestForkBase->nHeight, pindexBestForkBase->phashBlock->ToString(), pindexBestForkTip->nHeight, pindexBestForkTip->phashBlock->ToString()); SetfLargeWorkForkFound(true); } else { LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks " "longer than our best chain.\nChain state database " "corruption likely.\n", __func__); SetfLargeWorkInvalidChainFound(true); } } else { SetfLargeWorkForkFound(false); SetfLargeWorkInvalidChainFound(false); } } static void CheckForkWarningConditionsOnNewFork(const CBlockIndex *pindexNewForkTip) { AssertLockHeld(cs_main); // If we are on a fork that is sufficiently large, set a warning flag. const CBlockIndex *pfork = chainActive.FindFork(pindexNewForkTip); // We define a condition where we should warn the user about as a fork of at // least 7 blocks with a tip within 72 blocks (+/- 12 hours if no one mines // it) of ours. We use 7 blocks rather arbitrarily as it represents just // under 10% of sustained network hash rate operating on the fork, or a // chain that is entirely longer than ours and invalid (note that this // should be detected by both). We define it this way because it allows us // to only store the highest fork tip (+ base) which meets the 7-block // condition and from this always have the most-likely-to-cause-warning fork if (pfork && (!pindexBestForkTip || pindexNewForkTip->nHeight > pindexBestForkTip->nHeight) && pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) && chainActive.Height() - pindexNewForkTip->nHeight < 72) { pindexBestForkTip = pindexNewForkTip; pindexBestForkBase = pfork; } CheckForkWarningConditions(); } static void InvalidChainFound(CBlockIndex *pindexNew) { if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork) { pindexBestInvalid = pindexNew; } // If the invalid chain found is supposed to be finalized, we need to move // back the finalization point. if (IsBlockFinalized(pindexNew)) { pindexFinalized = pindexNew->pprev; } LogPrintf( "%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__, pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, log(pindexNew->nChainWork.getdouble()) / log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexNew->GetBlockTime())); CBlockIndex *tip = chainActive.Tip(); assert(tip); LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__, tip->GetBlockHash().ToString(), chainActive.Height(), log(tip->nChainWork.getdouble()) / log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip->GetBlockTime())); } static void InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) { if (!state.CorruptionPossible()) { pindex->nStatus = pindex->nStatus.withFailed(); setDirtyBlockIndex.insert(pindex); InvalidChainFound(pindex); } } void SpendCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo, int nHeight) { // Mark inputs spent. if (tx.IsCoinBase()) { return; } txundo.vprevout.reserve(tx.vin.size()); for (const CTxIn &txin : tx.vin) { txundo.vprevout.emplace_back(); bool is_spent = view.SpendCoin(txin.prevout, &txundo.vprevout.back()); assert(is_spent); } } void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo, int nHeight) { SpendCoins(view, tx, txundo, nHeight); AddCoins(view, tx, nHeight); } void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, int nHeight) { // Mark inputs spent. if (!tx.IsCoinBase()) { for (const CTxIn &txin : tx.vin) { bool is_spent = view.SpendCoin(txin.prevout); assert(is_spent); } } // Add outputs. AddCoins(view, tx, nHeight); } bool CScriptCheck::operator()() { const CScript &scriptSig = ptxTo->vin[nIn].scriptSig; return VerifyScript(scriptSig, scriptPubKey, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, amount, cacheStore, txdata), &error); } int GetSpendHeight(const CCoinsViewCache &inputs) { LOCK(cs_main); CBlockIndex *pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second; return pindexPrev->nHeight + 1; } bool CheckInputs(const CTransaction &tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, const uint32_t flags, bool sigCacheStore, bool scriptCacheStore, const PrecomputedTransactionData &txdata, std::vector *pvChecks) { assert(!tx.IsCoinBase()); if (!Consensus::CheckTxInputs(tx, state, inputs, GetSpendHeight(inputs))) { return false; } if (pvChecks) { pvChecks->reserve(tx.vin.size()); } // The first loop above does all the inexpensive checks. Only if ALL inputs // pass do we perform expensive ECDSA signature checks. Helps prevent CPU // exhaustion attacks. // Skip script verification when connecting blocks under the assumedvalid // block. Assuming the assumedvalid block is valid this is safe because // block merkle hashes are still computed and checked, of course, if an // assumed valid block is invalid due to false scriptSigs this optimization // would allow an invalid chain to be accepted. if (!fScriptChecks) { return true; } // First check if script executions have been cached with the same flags. // Note that this assumes that the inputs provided are correct (ie that the // transaction hash which is in tx's prevouts properly commits to the // scriptPubKey in the inputs view of that transaction). uint256 hashCacheEntry = GetScriptCacheKey(tx, flags); if (IsKeyInScriptCache(hashCacheEntry, !scriptCacheStore)) { return true; } for (size_t i = 0; i < tx.vin.size(); i++) { const COutPoint &prevout = tx.vin[i].prevout; const Coin &coin = inputs.AccessCoin(prevout); assert(!coin.IsSpent()); // We very carefully only pass in things to CScriptCheck which are // clearly committed to by tx' witness hash. This provides a sanity // check that our caching is not introducing consensus failures through // additional data in, eg, the coins being spent being checked as a part // of CScriptCheck. const CScript &scriptPubKey = coin.GetTxOut().scriptPubKey; const Amount amount = coin.GetTxOut().nValue; // Verify signature CScriptCheck check(scriptPubKey, amount, tx, i, flags, sigCacheStore, txdata); if (pvChecks) { pvChecks->push_back(std::move(check)); } else if (!check()) { const bool hasNonMandatoryFlags = (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) != 0; if (hasNonMandatoryFlags) { // Check whether the failure was caused by a non-mandatory // script verification check, such as non-standard DER encodings // or non-null dummy arguments; if so, don't trigger DoS // protection to avoid splitting the network between upgraded // and non-upgraded nodes. // // We also check activating the monolith opcodes as it is a // strictly additive change and we would not like to ban some of // our peer that are ahead of us and are considering the fork // as activated. CScriptCheck check2(scriptPubKey, amount, tx, i, flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, sigCacheStore, txdata); if (check2()) { return state.Invalid( false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError()))); } } // Failures of other flags indicate a transaction that is invalid in // new blocks, e.g. a invalid P2SH. We DoS ban such nodes as they // are not following the protocol. That said during an upgrade // careful thought should be taken as to the correct behavior - we // may want to continue peering with non-upgraded nodes even after // soft-fork super-majority signaling has occurred. return state.DoS( 100, false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError()))); } } if (scriptCacheStore && !pvChecks) { // We executed all of the provided scripts, and were told to cache the // result. Do so now. AddKeyInScriptCache(hashCacheEntry); } return true; } namespace { bool UndoWriteToDisk(const CBlockUndo &blockundo, CDiskBlockPos &pos, const uint256 &hashBlock, const CMessageHeader::MessageMagic &messageStart) { // Open history file to append CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) { return error("%s: OpenUndoFile failed", __func__); } // Write index header unsigned int nSize = GetSerializeSize(fileout, blockundo); fileout << FLATDATA(messageStart) << nSize; // Write undo data long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) { return error("%s: ftell failed", __func__); } pos.nPos = (unsigned int)fileOutPos; fileout << blockundo; // calculate & write checksum CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); hasher << hashBlock; hasher << blockundo; fileout << hasher.GetHash(); return true; } bool UndoReadFromDisk(CBlockUndo &blockundo, const CDiskBlockPos &pos, const uint256 &hashBlock) { // Open history file to read CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) { return error("%s: OpenUndoFile failed", __func__); } // Read block uint256 hashChecksum; // We need a CHashVerifier as reserializing may lose data CHashVerifier verifier(&filein); try { verifier << hashBlock; verifier >> blockundo; filein >> hashChecksum; } catch (const std::exception &e) { return error("%s: Deserialize or I/O error - %s", __func__, e.what()); } // Verify checksum if (hashChecksum != verifier.GetHash()) { return error("%s: Checksum mismatch", __func__); } return true; } /** Abort with a message */ bool AbortNode(const std::string &strMessage, const std::string &userMessage = "") { SetMiscWarning(strMessage); LogPrintf("*** %s\n", strMessage); uiInterface.ThreadSafeMessageBox( userMessage.empty() ? _("Error: A fatal internal error occurred, see " "debug.log for details") : userMessage, "", CClientUIInterface::MSG_ERROR); StartShutdown(); return false; } bool AbortNode(CValidationState &state, const std::string &strMessage, const std::string &userMessage = "") { AbortNode(strMessage, userMessage); return state.Error(strMessage); } } // namespace /** Restore the UTXO in a Coin at a given COutPoint. */ DisconnectResult UndoCoinSpend(const Coin &undo, CCoinsViewCache &view, const COutPoint &out) { bool fClean = true; if (view.HaveCoin(out)) { // Overwriting transaction output. fClean = false; } if (undo.GetHeight() == 0) { // Missing undo metadata (height and coinbase). Older versions included // this information only in undo records for the last spend of a // transactions' outputs. This implies that it must be present for some // other output of the same tx. const Coin &alternate = AccessByTxid(view, out.GetTxId()); if (alternate.IsSpent()) { // Adding output for transaction without known metadata return DISCONNECT_FAILED; } // This is somewhat ugly, but hopefully utility is limited. This is only // useful when working from legacy on disck data. In any case, putting // the correct information in there doesn't hurt. const_cast(undo) = Coin(undo.GetTxOut(), alternate.GetHeight(), alternate.IsCoinBase()); } // The potential_overwrite parameter to AddCoin is only allowed to be false // if we know for sure that the coin did not already exist in the cache. As // we have queried for that above using HaveCoin, we don't need to guess. // When fClean is false, a coin already existed and it is an overwrite. view.AddCoin(out, std::move(undo), !fClean); return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN; } /** * Undo the effects of this block (with given index) on the UTXO set represented * by coins. When FAILED is returned, view is left in an indeterminate state. */ static DisconnectResult DisconnectBlock(const CBlock &block, const CBlockIndex *pindex, CCoinsViewCache &view) { CBlockUndo blockUndo; CDiskBlockPos pos = pindex->GetUndoPos(); if (pos.IsNull()) { error("DisconnectBlock(): no undo data available"); return DISCONNECT_FAILED; } if (!UndoReadFromDisk(blockUndo, pos, pindex->pprev->GetBlockHash())) { error("DisconnectBlock(): failure reading undo data"); return DISCONNECT_FAILED; } return ApplyBlockUndo(blockUndo, block, pindex, view); } DisconnectResult ApplyBlockUndo(const CBlockUndo &blockUndo, const CBlock &block, const CBlockIndex *pindex, CCoinsViewCache &view) { bool fClean = true; if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) { error("DisconnectBlock(): block and undo data inconsistent"); return DISCONNECT_FAILED; } // First, restore inputs. for (size_t i = 1; i < block.vtx.size(); i++) { const CTransaction &tx = *(block.vtx[i]); const CTxUndo &txundo = blockUndo.vtxundo[i - 1]; if (txundo.vprevout.size() != tx.vin.size()) { error("DisconnectBlock(): transaction and undo data inconsistent"); return DISCONNECT_FAILED; } for (size_t j = 0; j < tx.vin.size(); j++) { const COutPoint &out = tx.vin[j].prevout; const Coin &undo = txundo.vprevout[j]; DisconnectResult res = UndoCoinSpend(undo, view, out); if (res == DISCONNECT_FAILED) { return DISCONNECT_FAILED; } fClean = fClean && res != DISCONNECT_UNCLEAN; } } // Second, revert created outputs. for (const auto &ptx : block.vtx) { const CTransaction &tx = *ptx; const TxId &txid = tx.GetId(); const bool is_coinbase = tx.IsCoinBase(); // Check that all outputs are available and match the outputs in the // block itself exactly. for (size_t o = 0; o < tx.vout.size(); o++) { if (tx.vout[o].scriptPubKey.IsUnspendable()) { continue; } COutPoint out(txid, o); Coin coin; bool is_spent = view.SpendCoin(out, &coin); if (!is_spent || tx.vout[o] != coin.GetTxOut() || uint32_t(pindex->nHeight) != coin.GetHeight() || is_coinbase != coin.IsCoinBase()) { // transaction output mismatch fClean = false; } } } // Move best block pointer to previous block. view.SetBestBlock(block.hashPrevBlock); return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN; } static void FlushBlockFile(bool fFinalize = false) { LOCK(cs_LastBlockFile); CDiskBlockPos posOld(nLastBlockFile, 0); FILE *fileOld = OpenBlockFile(posOld); if (fileOld) { if (fFinalize) { TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize); } FileCommit(fileOld); fclose(fileOld); } fileOld = OpenUndoFile(posOld); if (fileOld) { if (fFinalize) { TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize); } FileCommit(fileOld); fclose(fileOld); } } static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize); static bool WriteUndoDataForBlock(const CBlockUndo &blockundo, CValidationState &state, CBlockIndex *pindex, const CChainParams &chainparams) { // Write undo information to disk if (pindex->GetUndoPos().IsNull()) { CDiskBlockPos _pos; if (!FindUndoPos( state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) + 40)) { return error("ConnectBlock(): FindUndoPos failed"); } if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), chainparams.DiskMagic())) { return AbortNode(state, "Failed to write undo data"); } // update nUndoPos in block index pindex->nUndoPos = _pos.nPos; pindex->nStatus = pindex->nStatus.withUndo(); setDirtyBlockIndex.insert(pindex); } return true; } static bool WriteTxIndexDataForBlock(const CBlock &block, CValidationState &state, CBlockIndex *pindex) { CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size())); std::vector> vPos; vPos.reserve(block.vtx.size()); for (const CTransactionRef &tx : block.vtx) { vPos.push_back(std::make_pair(tx->GetHash(), pos)); pos.nTxOffset += ::GetSerializeSize(*tx, SER_DISK, CLIENT_VERSION); } if (fTxIndex) { if (!pblocktree->WriteTxIndex(vPos)) { return AbortNode(state, "Failed to write transaction index"); } } return true; } static CCheckQueue scriptcheckqueue(128); void ThreadScriptCheck() { RenameThread("bitcoin-scriptch"); scriptcheckqueue.Thread(); } // Protected by cs_main VersionBitsCache versionbitscache; int32_t ComputeBlockVersion(const CBlockIndex *pindexPrev, const Consensus::Params ¶ms) { int32_t nVersion = VERSIONBITS_TOP_BITS; return nVersion; } // Returns the script flags which should be checked for a given block static uint32_t GetBlockScriptFlags(const Config &config, const CBlockIndex *pChainTip) { AssertLockHeld(cs_main); const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); uint32_t flags = SCRIPT_VERIFY_NONE; // P2SH didn't become active until Apr 1 2012 if (pChainTip->GetMedianTimePast() >= P2SH_ACTIVATION_TIME) { flags |= SCRIPT_VERIFY_P2SH; } // Start enforcing the DERSIG (BIP66) rule. if ((pChainTip->nHeight + 1) >= consensusParams.BIP66Height) { flags |= SCRIPT_VERIFY_DERSIG; } // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule. if ((pChainTip->nHeight + 1) >= consensusParams.BIP65Height) { flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY; } // Start enforcing CSV (BIP68, BIP112 and BIP113) rule. if ((pChainTip->nHeight + 1) >= consensusParams.CSVHeight) { flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY; } // If the UAHF is enabled, we start accepting replay protected txns if (IsUAHFenabled(config, pChainTip)) { flags |= SCRIPT_VERIFY_STRICTENC; flags |= SCRIPT_ENABLE_SIGHASH_FORKID; } // If the DAA HF is enabled, we start rejecting transaction that use a high // s in their signature. We also make sure that signature that are supposed // to fail (for instance in multisig or other forms of smart contracts) are // null. if (IsDAAEnabled(config, pChainTip)) { flags |= SCRIPT_VERIFY_LOW_S; flags |= SCRIPT_VERIFY_NULLFAIL; } // When the magnetic anomaly fork is enabled, we start accepting // transactions using the OP_CHECKDATASIG opcode and it's verify // alternative. We also start enforcing push only signatures and // clean stack. if (IsMagneticAnomalyEnabled(config, pChainTip)) { flags |= SCRIPT_ENABLE_CHECKDATASIG; flags |= SCRIPT_VERIFY_SIGPUSHONLY; flags |= SCRIPT_VERIFY_CLEANSTACK; } // We make sure this node will have replay protection during the next hard // fork. if (IsReplayProtectionEnabled(config, pChainTip)) { flags |= SCRIPT_ENABLE_REPLAY_PROTECTION; } return flags; } static int64_t nTimeCheck = 0; static int64_t nTimeForks = 0; static int64_t nTimeVerify = 0; static int64_t nTimeConnect = 0; static int64_t nTimeIndex = 0; static int64_t nTimeCallbacks = 0; static int64_t nTimeTotal = 0; /** * Apply the effects of this block (with given index) on the UTXO set * represented by coins. Validity checks that depend on the UTXO set are also * done; ConnectBlock() can fail if those validity checks fail (among other * reasons). */ static bool ConnectBlock(const Config &config, const CBlock &block, CValidationState &state, CBlockIndex *pindex, CCoinsViewCache &view, bool fJustCheck = false) { AssertLockHeld(cs_main); int64_t nTimeStart = GetTimeMicros(); // Check it again in case a previous version let a bad block in BlockValidationOptions validationOptions = BlockValidationOptions(!fJustCheck, !fJustCheck); if (!CheckBlock(config, block, state, validationOptions)) { return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state)); } // Verify that the view's current state corresponds to the previous block uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash(); assert(hashPrevBlock == view.GetBestBlock()); // Special case for the genesis block, skipping connection of its // transactions (its coinbase is unspendable) const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); if (block.GetHash() == consensusParams.hashGenesisBlock) { if (!fJustCheck) { view.SetBestBlock(pindex->GetBlockHash()); } return true; } bool fScriptChecks = true; if (!hashAssumeValid.IsNull()) { // We've been configured with the hash of a block which has been // externally verified to have a valid history. A suitable default value // is included with the software and updated from time to time. Because // validity relative to a piece of software is an objective fact these // defaults can be easily reviewed. This setting doesn't force the // selection of any particular chain but makes validating some faster by // effectively caching the result of part of the verification. BlockMap::const_iterator it = mapBlockIndex.find(hashAssumeValid); if (it != mapBlockIndex.end()) { if (it->second->GetAncestor(pindex->nHeight) == pindex && pindexBestHeader->GetAncestor(pindex->nHeight) == pindex && pindexBestHeader->nChainWork >= nMinimumChainWork) { // This block is a member of the assumed verified chain and an // ancestor of the best header. The equivalent time check // discourages hashpower from extorting the network via DOS // attack into accepting an invalid block through telling users // they must manually set assumevalid. Requiring a software // change or burying the invalid block, regardless of the // setting, makes it hard to hide the implication of the demand. // This also avoids having release candidates that are hardly // doing any signature verification at all in testing without // having to artificially set the default assumed verified block // further back. The test against nMinimumChainWork prevents the // skipping when denied access to any chain at least as good as // the expected chain. fScriptChecks = (GetBlockProofEquivalentTime( *pindexBestHeader, *pindex, *pindexBestHeader, consensusParams) <= 60 * 60 * 24 * 7 * 2); } } } int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart; LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1 - nTimeStart), nTimeCheck * 0.000001); // Do not allow blocks that contain transactions which 'overwrite' older // transactions, unless those are already completely spent. If such // overwrites are allowed, coinbases and transactions depending upon those // can be duplicated to remove the ability to spend the first instance -- // even after being sent to another address. See BIP30 and // http://r6.ca/blog/20120206T005236Z.html for more information. This logic // is not necessary for memory pool transactions, as AcceptToMemoryPool // already refuses previously-known transaction ids entirely. This rule was // originally applied to all blocks with a timestamp after March 15, 2012, // 0:00 UTC. Now that the whole chain is irreversibly beyond that time it is // applied to all blocks except the two in the chain that violate it. This // prevents exploiting the issue against nodes during their initial block // download. bool fEnforceBIP30 = (!pindex->phashBlock) || // Enforce on CreateNewBlock // invocations which don't // have a hash. !((pindex->nHeight == 91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763" "b1f4360639393e0e4c8e300e0caec")) || (pindex->nHeight == 91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f" "610ae9601ac046a38084ccb7cd721"))); // Once BIP34 activated it was not possible to create new duplicate // coinbases and thus other than starting with the 2 existing duplicate // coinbase pairs, not possible to create overwriting txs. But by the time // BIP34 activated, in each of the existing pairs the duplicate coinbase had // overwritten the first before the first had been spent. Since those // coinbases are sufficiently buried its no longer possible to create // further duplicate transactions descending from the known pairs either. If // we're on the known chain at height greater than where BIP34 activated, we // can save the db accesses needed for the BIP30 check. CBlockIndex *pindexBIP34height = pindex->pprev->GetAncestor(consensusParams.BIP34Height); // Only continue to enforce if we're below BIP34 activation height or the // block hash at that height doesn't correspond. fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == consensusParams.BIP34Hash)); if (fEnforceBIP30) { for (const auto &tx : block.vtx) { for (size_t o = 0; o < tx->vout.size(); o++) { if (view.HaveCoin(COutPoint(tx->GetId(), o))) { return state.DoS( 100, error("ConnectBlock(): tried to overwrite transaction"), REJECT_INVALID, "bad-txns-BIP30"); } } } } // Start enforcing BIP68 (sequence locks). int nLockTimeFlags = 0; if (pindex->nHeight >= consensusParams.CSVHeight) { nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE; } const uint32_t flags = GetBlockScriptFlags(config, pindex->pprev); const bool fIsMagneticAnomalyEnabled = IsMagneticAnomalyEnabled(config, pindex->pprev); int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1; LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2 - nTime1), nTimeForks * 0.000001); CBlockUndo blockundo; CCheckQueueControl control(fScriptChecks ? &scriptcheckqueue : nullptr); std::vector prevheights; Amount nFees = Amount::zero(); int nInputs = 0; // Sigops counting. We need to do it again because of P2SH. uint64_t nSigOpsCount = 0; const uint64_t currentBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); const uint64_t nMaxSigOpsCount = GetMaxBlockSigOpsCount(currentBlockSize); blockundo.vtxundo.reserve(block.vtx.size() - 1); for (const auto &ptx : block.vtx) { const CTransaction &tx = *ptx; nInputs += tx.vin.size(); if (tx.IsCoinBase()) { // We've already checked for sigops count before P2SH in CheckBlock. nSigOpsCount += GetSigOpCountWithoutP2SH(tx, flags); } if (fIsMagneticAnomalyEnabled || tx.IsCoinBase()) { // We do not need to throw when a transaction is duplicated. If they // are in the same block, CheckBlock will catch it, and if they are // in a different block, it'll register as a double spend or BIP30 // violation. In both cases, we get a more meaningful feedback out // of it. AddCoins(view, tx, pindex->nHeight, true); } } for (const auto &ptx : block.vtx) { const CTransaction &tx = *ptx; if (tx.IsCoinBase()) { continue; } if (!view.HaveInputs(tx)) { return state.DoS(100, error("ConnectBlock(): inputs missing/spent"), REJECT_INVALID, "bad-txns-inputs-missingorspent"); } // Check that transaction is BIP68 final BIP68 lock checks (as // opposed to nLockTime checks) must be in ConnectBlock because they // require the UTXO set. prevheights.resize(tx.vin.size()); for (size_t j = 0; j < tx.vin.size(); j++) { prevheights[j] = view.AccessCoin(tx.vin[j].prevout).GetHeight(); } if (!SequenceLocks(tx, nLockTimeFlags, &prevheights, *pindex)) { return state.DoS( 100, error("%s: contains a non-BIP68-final transaction", __func__), REJECT_INVALID, "bad-txns-nonfinal"); } // GetTransactionSigOpCount counts 2 types of sigops: // * legacy (always) // * p2sh (when P2SH enabled in flags and excludes coinbase) auto txSigOpsCount = GetTransactionSigOpCount(tx, view, flags); if (txSigOpsCount > MAX_TX_SIGOPS_COUNT) { return state.DoS(100, false, REJECT_INVALID, "bad-txn-sigops"); } nSigOpsCount += txSigOpsCount; if (nSigOpsCount > nMaxSigOpsCount) { return state.DoS(100, error("ConnectBlock(): too many sigops"), REJECT_INVALID, "bad-blk-sigops"); } Amount fee = view.GetValueIn(tx) - tx.GetValueOut(); nFees += fee; // Don't cache results if we're actually connecting blocks (still // consult the cache, though). bool fCacheResults = fJustCheck; std::vector vChecks; if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, fCacheResults, PrecomputedTransactionData(tx), &vChecks)) { return error("ConnectBlock(): CheckInputs on %s failed with %s", tx.GetId().ToString(), FormatStateMessage(state)); } control.Add(vChecks); blockundo.vtxundo.push_back(CTxUndo()); SpendCoins(view, tx, blockundo.vtxundo.back(), pindex->nHeight); if (!fIsMagneticAnomalyEnabled) { AddCoins(view, tx, pindex->nHeight); } } int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2; LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, " "%.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime3 - nTime2), 0.001 * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime3 - nTime2) / (nInputs - 1), nTimeConnect * 0.000001); Amount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, consensusParams); if (block.vtx[0]->GetValueOut() > blockReward) { return state.DoS(100, error("ConnectBlock(): coinbase pays too much " "(actual=%d vs limit=%d)", block.vtx[0]->GetValueOut(), blockReward), REJECT_INVALID, "bad-cb-amount"); } if (!control.Wait()) { return state.DoS(100, false, REJECT_INVALID, "blk-bad-inputs", false, "parallel script check failed"); } int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2; LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime4 - nTime2), nInputs <= 1 ? 0 : 0.001 * (nTime4 - nTime2) / (nInputs - 1), nTimeVerify * 0.000001); if (fJustCheck) { return true; } if (!WriteUndoDataForBlock(blockundo, state, pindex, config.GetChainParams())) { return false; } if (!pindex->IsValid(BlockValidity::SCRIPTS)) { pindex->RaiseValidity(BlockValidity::SCRIPTS); setDirtyBlockIndex.insert(pindex); } if (!WriteTxIndexDataForBlock(block, state, pindex)) { return false; } // add this block to the view's block chain view.SetBestBlock(pindex->GetBlockHash()); int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4; LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5 - nTime4), nTimeIndex * 0.000001); int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5; LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6 - nTime5), nTimeCallbacks * 0.000001); // If we just activated the replay protection with that block, it means // transaction in the mempool are now invalid. As a result, we need to clear // the mempool. if (IsReplayProtectionEnabled(config, pindex) && !IsReplayProtectionEnabled(config, pindex->pprev)) { g_mempool.clear(); } return true; } /** * Update the on-disk chain state. * The caches and indexes are flushed depending on the mode we're called with if * they're too large, if it's been a while since the last write, or always and * in all cases if we're in prune mode and are deleting files. */ static bool FlushStateToDisk(const CChainParams &chainparams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight) { int64_t nMempoolUsage = g_mempool.DynamicMemoryUsage(); LOCK(cs_main); static int64_t nLastWrite = 0; static int64_t nLastFlush = 0; static int64_t nLastSetChain = 0; std::set setFilesToPrune; bool fFlushForPrune = false; bool fDoFullFlush = false; int64_t nNow = 0; try { { LOCK(cs_LastBlockFile); if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) { if (nManualPruneHeight > 0) { FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight); } else { FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight()); fCheckForPruning = false; } if (!setFilesToPrune.empty()) { fFlushForPrune = true; if (!fHavePruned) { pblocktree->WriteFlag("prunedblockfiles", true); fHavePruned = true; } } } nNow = GetTimeMicros(); // Avoid writing/flushing immediately after startup. if (nLastWrite == 0) { nLastWrite = nNow; } if (nLastFlush == 0) { nLastFlush = nNow; } if (nLastSetChain == 0) { nLastSetChain = nNow; } int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; int64_t cacheSize = pcoinsTip->DynamicMemoryUsage(); int64_t nTotalSpace = nCoinCacheUsage + std::max(nMempoolSizeMax - nMempoolUsage, 0); // The cache is large and we're within 10% and 10 MiB of the limit, // but we have time now (not in the middle of a block processing). bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024); // The cache is over the limit, we have to write now. bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nTotalSpace; // It's been a while since we wrote the block index to disk. Do this // frequently, so we don't need to redownload after a crash. bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000; // It's been very long since we flushed the cache. Do this // infrequently, to optimize cache usage. bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000; // Combine all conditions that result in a full cache flush. fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune; // Write blocks and block index to disk. if (fDoFullFlush || fPeriodicWrite) { // Depend on nMinDiskSpace to ensure we can write block index if (!CheckDiskSpace(0)) { return state.Error("out of disk space"); } // First make sure all block and undo data is flushed to disk. FlushBlockFile(); // Then update all block file information (which may refer to // block and undo files). { std::vector> vFiles; vFiles.reserve(setDirtyFileInfo.size()); for (int i : setDirtyFileInfo) { vFiles.push_back(std::make_pair(i, &vinfoBlockFile[i])); } setDirtyFileInfo.clear(); std::vector vBlocks; vBlocks.reserve(setDirtyBlockIndex.size()); for (const CBlockIndex *cbi : setDirtyBlockIndex) { vBlocks.push_back(cbi); } setDirtyBlockIndex.clear(); if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) { return AbortNode( state, "Failed to write to block index database"); } } // Finally remove any pruned files if (fFlushForPrune) { UnlinkPrunedFiles(setFilesToPrune); } nLastWrite = nNow; } // Flush best chain related state. This can only be done if the // blocks / block index write was also done. if (fDoFullFlush) { // Typical Coin structures on disk are around 48 bytes in size. // Pushing a new one to the database can cause it to be written // twice (once in the log, and once in the tables). This is // already an overestimation, as most will delete an existing // entry or overwrite one. Still, use a conservative safety // factor of 2. if (!CheckDiskSpace(48 * 2 * 2 * pcoinsTip->GetCacheSize())) { return state.Error("out of disk space"); } // Flush the chainstate (which may refer to block index // entries). if (!pcoinsTip->Flush()) { return AbortNode(state, "Failed to write to coin database"); } nLastFlush = nNow; } } if (fDoFullFlush || ((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000)) { // Update best block in wallet (so we can detect restored wallets). GetMainSignals().SetBestChain(chainActive.GetLocator()); nLastSetChain = nNow; } } catch (const std::runtime_error &e) { return AbortNode( state, std::string("System error while flushing: ") + e.what()); } return true; } void FlushStateToDisk() { CValidationState state; const CChainParams &chainparams = Params(); FlushStateToDisk(chainparams, state, FLUSH_STATE_ALWAYS); } void PruneAndFlush() { CValidationState state; fCheckForPruning = true; const CChainParams &chainparams = Params(); FlushStateToDisk(chainparams, state, FLUSH_STATE_NONE); } /** * Update chainActive and related internal data structures when adding a new * block to the chain tip. */ static void UpdateTip(const Config &config, CBlockIndex *pindexNew) { const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); chainActive.SetTip(pindexNew); // New best block g_mempool.AddTransactionsUpdated(1); { LOCK(g_best_block_mutex); g_best_block = pindexNew->GetBlockHash(); g_best_block_cv.notify_all(); } static bool fWarned = false; std::vector warningMessages; if (!IsInitialBlockDownload()) { int nUpgraded = 0; const CBlockIndex *pindex = chainActive.Tip(); // Check the version of the last 100 blocks to see if we need to // upgrade: for (int i = 0; i < 100 && pindex != nullptr; i++) { int32_t nExpectedVersion = ComputeBlockVersion(pindex->pprev, consensusParams); if (pindex->nVersion > VERSIONBITS_LAST_OLD_BLOCK_VERSION && (pindex->nVersion & ~nExpectedVersion) != 0) { ++nUpgraded; } pindex = pindex->pprev; } if (nUpgraded > 0) { warningMessages.push_back(strprintf( "%d of last 100 blocks have unexpected version", nUpgraded)); } if (nUpgraded > 100 / 2) { std::string strWarning = _("Warning: Unknown block versions being mined! It's possible " "unknown rules are in effect"); // notify GetWarnings(), called by Qt and the JSON-RPC code to warn // the user: SetMiscWarning(strWarning); if (!fWarned) { AlertNotify(strWarning); fWarned = true; } } } LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu " "date='%s' progress=%f cache=%.1fMiB(%utxo)", __func__, chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), chainActive.Tip()->nVersion, log(chainActive.Tip()->nChainWork.getdouble()) / log(2.0), (unsigned long)chainActive.Tip()->nChainTx, DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()), GuessVerificationProgress(config.GetChainParams().TxData(), chainActive.Tip()), pcoinsTip->DynamicMemoryUsage() * (1.0 / (1 << 20)), pcoinsTip->GetCacheSize()); if (!warningMessages.empty()) { LogPrintf(" warning='%s'", boost::algorithm::join(warningMessages, ", ")); } LogPrintf("\n"); } /** * Disconnect chainActive's tip. * After calling, the mempool will be in an inconsistent state, with * transactions from disconnected blocks being added to disconnectpool. You * should make the mempool consistent again by calling updateMempoolForReorg. * with cs_main held. * * If disconnectpool is nullptr, then no disconnected transactions are added to * disconnectpool (note that the caller is responsible for mempool consistency * in any case). */ static bool DisconnectTip(const Config &config, CValidationState &state, DisconnectedBlockTransactions *disconnectpool) { CBlockIndex *pindexDelete = chainActive.Tip(); assert(pindexDelete); // Read block from disk. std::shared_ptr pblock = std::make_shared(); CBlock &block = *pblock; if (!ReadBlockFromDisk(block, pindexDelete, config)) { return AbortNode(state, "Failed to read block"); } // Apply the block atomically to the chain state. int64_t nStart = GetTimeMicros(); { CCoinsViewCache view(pcoinsTip.get()); assert(view.GetBestBlock() == pindexDelete->GetBlockHash()); if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) { return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); } bool flushed = view.Flush(); assert(flushed); } LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001); // Write the chain state to disk, if necessary. if (!FlushStateToDisk(config.GetChainParams(), state, FLUSH_STATE_IF_NEEDED)) { return false; } // If this block was deactivating the replay protection, then we need to // remove transactions that are replay protected from the mempool. There is // no easy way to do this so we'll just discard the whole mempool and then // add the transaction of the block we just disconnected back. // // If we are deactivating Magnetic anomaly, we want to make sure we do not // have transactions in the mempool that use newly introduced opcodes. As a // result, we also cleanup the mempool. if ((IsReplayProtectionEnabled(config, pindexDelete) && !IsReplayProtectionEnabled(config, pindexDelete->pprev)) || (IsMagneticAnomalyEnabled(config, pindexDelete) && !IsMagneticAnomalyEnabled(config, pindexDelete->pprev))) { LogPrint(BCLog::MEMPOOL, "Clearing mempool for reorg"); g_mempool.clear(); // While not strictly necessary, clearing the disconnect pool is also // beneficial so we don't try to reuse its content at the end of the // reorg, which we know will fail. if (disconnectpool) { disconnectpool->clear(); } } if (disconnectpool) { disconnectpool->addForBlock(block.vtx); } // If the tip is finalized, then undo it. if (pindexFinalized == pindexDelete) { pindexFinalized = pindexDelete->pprev; } // Update chainActive and related variables. UpdateTip(config, pindexDelete->pprev); // Let wallets know transactions went from 1-confirmed to // 0-confirmed or conflicted: GetMainSignals().BlockDisconnected(pblock); return true; } static int64_t nTimeReadFromDisk = 0; static int64_t nTimeConnectTotal = 0; static int64_t nTimeFlush = 0; static int64_t nTimeChainState = 0; static int64_t nTimePostConnect = 0; struct PerBlockConnectTrace { CBlockIndex *pindex = nullptr; std::shared_ptr pblock; std::shared_ptr> conflictedTxs; PerBlockConnectTrace() : conflictedTxs(std::make_shared>()) {} }; /** * Used to track blocks whose transactions were applied to the UTXO state as a * part of a single ActivateBestChainStep call. * * This class also tracks transactions that are removed from the mempool as * conflicts (per block) and can be used to pass all those transactions through * SyncTransaction. * * This class assumes (and asserts) that the conflicted transactions for a given * block are added via mempool callbacks prior to the BlockConnected() * associated with those transactions. If any transactions are marked * conflicted, it is assumed that an associated block will always be added. * * This class is single-use, once you call GetBlocksConnected() you have to * throw it away and make a new one. */ class ConnectTrace { private: std::vector blocksConnected; CTxMemPool &pool; public: - ConnectTrace(CTxMemPool &_pool) : blocksConnected(1), pool(_pool) { + explicit ConnectTrace(CTxMemPool &_pool) : blocksConnected(1), pool(_pool) { pool.NotifyEntryRemoved.connect( boost::bind(&ConnectTrace::NotifyEntryRemoved, this, _1, _2)); } ~ConnectTrace() { pool.NotifyEntryRemoved.disconnect( boost::bind(&ConnectTrace::NotifyEntryRemoved, this, _1, _2)); } void BlockConnected(CBlockIndex *pindex, std::shared_ptr pblock) { assert(!blocksConnected.back().pindex); assert(pindex); assert(pblock); blocksConnected.back().pindex = pindex; blocksConnected.back().pblock = std::move(pblock); blocksConnected.emplace_back(); } std::vector &GetBlocksConnected() { // We always keep one extra block at the end of our list because blocks // are added after all the conflicted transactions have been filled in. // Thus, the last entry should always be an empty one waiting for the // transactions from the next block. We pop the last entry here to make // sure the list we return is sane. assert(!blocksConnected.back().pindex); assert(blocksConnected.back().conflictedTxs->empty()); blocksConnected.pop_back(); return blocksConnected; } void NotifyEntryRemoved(CTransactionRef txRemoved, MemPoolRemovalReason reason) { assert(!blocksConnected.back().pindex); if (reason == MemPoolRemovalReason::CONFLICT) { blocksConnected.back().conflictedTxs->emplace_back( std::move(txRemoved)); } } }; static bool FinalizeBlockInternal(const Config &config, CValidationState &state, const CBlockIndex *pindex) { AssertLockHeld(cs_main); if (pindex->nStatus.isInvalid()) { // We try to finalize an invalid block. return state.DoS(100, error("%s: Trying to finalize invalid block %s", __func__, pindex->GetBlockHash().ToString()), REJECT_INVALID, "finalize-invalid-block"); } // Check that the request is consistent with current finalization. if (pindexFinalized && !AreOnTheSameFork(pindex, pindexFinalized)) { return state.DoS( 20, error("%s: Trying to finalize block %s which conflicts " "with already finalized block", __func__, pindex->GetBlockHash().ToString()), REJECT_AGAINST_FINALIZED, "bad-fork-prior-finalized"); } if (IsBlockFinalized(pindex)) { // The block is already finalized. return true; } // We have a new block to finalize. pindexFinalized = pindex; return true; } static const CBlockIndex *FindBlockToFinalize(const Config &config, CBlockIndex *pindexNew) { AssertLockHeld(cs_main); const int32_t maxreorgdepth = gArgs.GetArg("-maxreorgdepth", DEFAULT_MAX_REORG_DEPTH); const int64_t finalizationdelay = gArgs.GetArg("-finalizationdelay", DEFAULT_MIN_FINALIZATION_DELAY); // Find our candidate. // If maxreorgdepth is < 0 pindex will be null and auto finalization // disabled const CBlockIndex *pindex = pindexNew->GetAncestor(pindexNew->nHeight - maxreorgdepth); int64_t now = GetTime(); // If the finalization delay is not expired since the startup time, // finalization should be avoided. Header receive time is not saved to disk // and so cannot be anterior to startup time. if (now < (GetStartupTime() + finalizationdelay)) { return nullptr; } // While our candidate is not eligible (finalization delay not expired), try // the previous one. while (pindex && (pindex != pindexFinalized)) { // Check that the block to finalize is known for a long enough time. // This test will ensure that an attacker could not cause a block to // finalize by forking the chain with a depth > maxreorgdepth. // If the block is loaded from disk, header receive time is 0 and the // block will be finalized. This is safe because the delay since the // node startup is already expired. auto headerReceivedTime = pindex->GetHeaderReceivedTime(); // If finalization delay is <= 0, finalization always occurs immediately if (now >= (headerReceivedTime + finalizationdelay)) { return pindex; } pindex = pindex->pprev; } return nullptr; } /** * Connect a new block to chainActive. pblock is either nullptr or a pointer to * a CBlock corresponding to pindexNew, to bypass loading it again from disk. * * The block is always added to connectTrace (either after loading from disk or * by copying pblock) - if that is not intended, care must be taken to remove * the last entry in blocksConnected in case of failure. */ static bool ConnectTip(const Config &config, CValidationState &state, CBlockIndex *pindexNew, const std::shared_ptr &pblock, ConnectTrace &connectTrace, DisconnectedBlockTransactions &disconnectpool) { assert(pindexNew->pprev == chainActive.Tip()); // Read block from disk. int64_t nTime1 = GetTimeMicros(); std::shared_ptr pthisBlock; if (!pblock) { std::shared_ptr pblockNew = std::make_shared(); if (!ReadBlockFromDisk(*pblockNew, pindexNew, config)) { return AbortNode(state, "Failed to read block"); } pthisBlock = pblockNew; } else { pthisBlock = pblock; } const CBlock &blockConnecting = *pthisBlock; // Apply the block atomically to the chain state. int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1; int64_t nTime3; LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001); { CCoinsViewCache view(pcoinsTip.get()); bool rv = ConnectBlock(config, blockConnecting, state, pindexNew, view); GetMainSignals().BlockChecked(blockConnecting, state); if (!rv) { if (state.IsInvalid()) { InvalidBlockFound(pindexNew, state); } return error("ConnectTip(): ConnectBlock %s failed (%s)", pindexNew->GetBlockHash().ToString(), FormatStateMessage(state)); } // Update the finalized block. const CBlockIndex *pindexToFinalize = FindBlockToFinalize(config, pindexNew); if (pindexToFinalize && !FinalizeBlockInternal(config, state, pindexToFinalize)) { state.SetCorruptionPossible(); return error("ConnectTip(): FinalizeBlock %s failed (%s)", pindexNew->GetBlockHash().ToString(), FormatStateMessage(state)); } nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2; LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001); bool flushed = view.Flush(); assert(flushed); } int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3; LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001); // Write the chain state to disk, if necessary. if (!FlushStateToDisk(config.GetChainParams(), state, FLUSH_STATE_IF_NEEDED)) { return false; } int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4; LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001); // Remove conflicting transactions from the mempool.; g_mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight); disconnectpool.removeForBlock(blockConnecting.vtx); // Update chainActive & related variables. UpdateTip(config, pindexNew); int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1; LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001); LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001); connectTrace.BlockConnected(pindexNew, std::move(pthisBlock)); return true; } /** * Return the tip of the chain with the most work in it, that isn't known to be * invalid (it's however far from certain to be valid). */ static CBlockIndex *FindMostWorkChain() { AssertLockHeld(cs_main); do { CBlockIndex *pindexNew = nullptr; // Find the best candidate header. { std::set::reverse_iterator it = setBlockIndexCandidates.rbegin(); if (it == setBlockIndexCandidates.rend()) { return nullptr; } pindexNew = *it; } // If this block will cause a finalized block to be reorged, then we // mark it as invalid. if (pindexFinalized && !AreOnTheSameFork(pindexNew, pindexFinalized)) { LogPrintf("Mark block %s invalid because it forks prior to the " "finalization point %d.\n", pindexNew->GetBlockHash().ToString(), pindexFinalized->nHeight); pindexNew->nStatus = pindexNew->nStatus.withFailed(); InvalidChainFound(pindexNew); } const CBlockIndex *pindexFork = chainActive.FindFork(pindexNew); // Check whether all blocks on the path between the currently active // chain and the candidate are valid. Just going until the active chain // is an optimization, as we know all blocks in it are valid already. CBlockIndex *pindexTest = pindexNew; bool hasValidAncestor = true; while (hasValidAncestor && pindexTest && pindexTest != pindexFork) { assert(pindexTest->nChainTx || pindexTest->nHeight == 0); // If this is a parked chain, but it has enough PoW, clear the park // state. bool fParkedChain = pindexTest->nStatus.isOnParkedChain(); if (fParkedChain && gArgs.GetBoolArg("-parkdeepreorg", true)) { const CBlockIndex *pindexTip = chainActive.Tip(); // During initialization, pindexTip and/or pindexFork may be // null. In this case, we just ignore the fact that the chain is // parked. if (!pindexTip || !pindexFork) { UnparkBlock(pindexTest); continue; } // A parked chain can be unparked if it has twice as much PoW // accumulated as the main chain has since the fork block. CBlockIndex const *pindexExtraPow = pindexTip; arith_uint256 requiredWork = pindexTip->nChainWork; switch (pindexTip->nHeight - pindexFork->nHeight) { // Limit the penality for depth 1, 2 and 3 to half a block // worth of work to ensure we don't fork accidentaly. case 3: case 2: pindexExtraPow = pindexExtraPow->pprev; // FALLTHROUGH case 1: { const arith_uint256 deltaWork = pindexExtraPow->nChainWork - pindexFork->nChainWork; requiredWork += (deltaWork >> 1); break; } default: requiredWork += pindexExtraPow->nChainWork - pindexFork->nChainWork; break; } if (pindexNew->nChainWork > requiredWork) { // We have enough, clear the parked state. LogPrintf("Unpark block %s as its chain has accumulated " "enough PoW.\n", pindexTest->GetBlockHash().ToString()); fParkedChain = false; UnparkBlock(pindexTest); } } // Pruned nodes may have entries in setBlockIndexCandidates for // which block files have been deleted. Remove those as candidates // for the most work chain if we come across them; we can't switch // to a chain unless we have all the non-active-chain parent blocks. bool fInvalidChain = pindexTest->nStatus.isInvalid(); bool fMissingData = !pindexTest->nStatus.hasData(); if (!(fInvalidChain || fParkedChain || fMissingData)) { // The current block is acceptable, move to the parent, up to // the fork point. pindexTest = pindexTest->pprev; continue; } // Candidate chain is not usable (either invalid or missing data) hasValidAncestor = false; setBlockIndexCandidates.erase(pindexTest); if (fInvalidChain && (pindexBestInvalid == nullptr || pindexNew->nChainWork > pindexBestInvalid->nChainWork)) { pindexBestInvalid = pindexNew; } if (fParkedChain && (pindexBestParked == nullptr || pindexNew->nChainWork > pindexBestParked->nChainWork)) { pindexBestParked = pindexNew; } CBlockIndex *pindexFailed = pindexNew; // Remove the entire chain from the set. while (pindexTest != pindexFailed) { if (fInvalidChain || fParkedChain) { pindexFailed->nStatus = pindexFailed->nStatus.withFailedParent(fInvalidChain) .withParkedParent(fParkedChain); } else if (fMissingData) { // If we're missing data, then add back to // mapBlocksUnlinked, so that if the block arrives in the // future we can try adding to setBlockIndexCandidates // again. mapBlocksUnlinked.insert( std::make_pair(pindexFailed->pprev, pindexFailed)); } setBlockIndexCandidates.erase(pindexFailed); pindexFailed = pindexFailed->pprev; } if (fInvalidChain || fParkedChain) { // We discovered a new chain tip that is either parked or // invalid, we may want to warn. CheckForkWarningConditionsOnNewFork(pindexNew); } } // We found a candidate that has valid ancestors. This is our guy. if (hasValidAncestor) { return pindexNew; } } while (true); } /** * Delete all entries in setBlockIndexCandidates that are worse than the current * tip. */ static void PruneBlockIndexCandidates() { // Note that we can't delete the current block itself, as we may need to // return to it later in case a reorganization to a better block fails. auto it = setBlockIndexCandidates.begin(); while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, chainActive.Tip())) { setBlockIndexCandidates.erase(it++); } // Either the current tip or a successor of it we're working towards is left // in setBlockIndexCandidates. assert(!setBlockIndexCandidates.empty()); } /** * Try to make some progress towards making pindexMostWork the active block. * pblock is either nullptr or a pointer to a CBlock corresponding to * pindexMostWork. */ static bool ActivateBestChainStep(const Config &config, CValidationState &state, CBlockIndex *pindexMostWork, const std::shared_ptr &pblock, bool &fInvalidFound, ConnectTrace &connectTrace) { AssertLockHeld(cs_main); const CBlockIndex *pindexOldTip = chainActive.Tip(); const CBlockIndex *pindexFork = chainActive.FindFork(pindexMostWork); // Disconnect active blocks which are no longer in the best chain. bool fBlocksDisconnected = false; DisconnectedBlockTransactions disconnectpool; while (chainActive.Tip() && chainActive.Tip() != pindexFork) { if (!DisconnectTip(config, state, &disconnectpool)) { // This is likely a fatal error, but keep the mempool consistent, // just in case. Only remove from the mempool in this case. disconnectpool.updateMempoolForReorg(config, false); return false; } fBlocksDisconnected = true; } // Build list of new blocks to connect. std::vector vpindexToConnect; bool fContinue = true; int nHeight = pindexFork ? pindexFork->nHeight : -1; while (fContinue && nHeight != pindexMostWork->nHeight) { // Don't iterate the entire list of potential improvements toward the // best tip, as we likely only need a few blocks along the way. int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight); vpindexToConnect.clear(); vpindexToConnect.reserve(nTargetHeight - nHeight); CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight); while (pindexIter && pindexIter->nHeight != nHeight) { vpindexToConnect.push_back(pindexIter); pindexIter = pindexIter->pprev; } nHeight = nTargetHeight; // Connect new blocks. for (CBlockIndex *pindexConnect : reverse_iterate(vpindexToConnect)) { if (!ConnectTip(config, state, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr(), connectTrace, disconnectpool)) { if (state.IsInvalid()) { // The block violates a consensus rule. if (!state.CorruptionPossible()) { InvalidChainFound(vpindexToConnect.back()); } state = CValidationState(); fInvalidFound = true; fContinue = false; break; } // A system error occurred (disk space, database error, ...). // Make the mempool consistent with the current tip, just in // case any observers try to use it before shutdown. disconnectpool.updateMempoolForReorg(config, false); return false; } else { PruneBlockIndexCandidates(); if (!pindexOldTip || chainActive.Tip()->nChainWork > pindexOldTip->nChainWork) { // We're in a better position than we were. Return // temporarily to release the lock. fContinue = false; break; } } } } if (fBlocksDisconnected) { // If any blocks were disconnected, disconnectpool may be non empty. Add // any disconnected transactions back to the mempool. disconnectpool.updateMempoolForReorg(config, true); } g_mempool.check(pcoinsTip.get()); // Callbacks/notifications for a new best chain. if (fInvalidFound) { CheckForkWarningConditionsOnNewFork(pindexMostWork); } else { CheckForkWarningConditions(); } return true; } static void NotifyHeaderTip() { bool fNotify = false; bool fInitialBlockDownload = false; static CBlockIndex *pindexHeaderOld = nullptr; CBlockIndex *pindexHeader = nullptr; { LOCK(cs_main); pindexHeader = pindexBestHeader; if (pindexHeader != pindexHeaderOld) { fNotify = true; fInitialBlockDownload = IsInitialBlockDownload(); pindexHeaderOld = pindexHeader; } } // Send block tip changed notifications without cs_main if (fNotify) { uiInterface.NotifyHeaderTip(fInitialBlockDownload, pindexHeader); } } bool ActivateBestChain(const Config &config, CValidationState &state, std::shared_ptr pblock) { // Note that while we're often called here from ProcessNewBlock, this is // far from a guarantee. Things in the P2P/RPC will often end up calling // us in the middle of ProcessNewBlock - do not assume pblock is set // sanely for performance or correctness! CBlockIndex *pindexMostWork = nullptr; CBlockIndex *pindexNewTip = nullptr; do { boost::this_thread::interruption_point(); if (ShutdownRequested()) { break; } const CBlockIndex *pindexFork; bool fInitialDownload; { LOCK(cs_main); // Destructed before cs_main is unlocked. ConnectTrace connectTrace(g_mempool); CBlockIndex *pindexOldTip = chainActive.Tip(); if (pindexMostWork == nullptr) { pindexMostWork = FindMostWorkChain(); } // Whether we have anything to do at all. if (pindexMostWork == nullptr || pindexMostWork == chainActive.Tip()) { return true; } bool fInvalidFound = false; std::shared_ptr nullBlockPtr; if (!ActivateBestChainStep( config, state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) { return false; } if (fInvalidFound) { // Wipe cache, we may need another branch now. pindexMostWork = nullptr; } pindexNewTip = chainActive.Tip(); pindexFork = chainActive.FindFork(pindexOldTip); fInitialDownload = IsInitialBlockDownload(); for (const PerBlockConnectTrace &trace : connectTrace.GetBlocksConnected()) { assert(trace.pblock && trace.pindex); GetMainSignals().BlockConnected(trace.pblock, trace.pindex, *trace.conflictedTxs); } } // When we reach this point, we switched to a new tip (stored in // pindexNewTip). // Notifications/callbacks that can run without cs_main // Notify external listeners about the new tip. GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload); // Always notify the UI if a new block tip was connected if (pindexFork != pindexNewTip) { uiInterface.NotifyBlockTip(fInitialDownload, pindexNewTip); } } while (pindexNewTip != pindexMostWork); const CChainParams ¶ms = config.GetChainParams(); CheckBlockIndex(params.GetConsensus()); // Write changes periodically to disk, after relay. if (!FlushStateToDisk(params, state, FLUSH_STATE_PERIODIC)) { return false; } int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT); if (nStopAtHeight && pindexNewTip && pindexNewTip->nHeight >= nStopAtHeight) { StartShutdown(); } return true; } bool PreciousBlock(const Config &config, CValidationState &state, CBlockIndex *pindex) { { LOCK(cs_main); if (pindex->nChainWork < chainActive.Tip()->nChainWork) { // Nothing to do, this block is not at the tip. return true; } if (chainActive.Tip()->nChainWork > nLastPreciousChainwork) { // The chain has been extended since the last call, reset the // counter. nBlockReverseSequenceId = -1; } nLastPreciousChainwork = chainActive.Tip()->nChainWork; setBlockIndexCandidates.erase(pindex); pindex->nSequenceId = nBlockReverseSequenceId; if (nBlockReverseSequenceId > std::numeric_limits::min()) { // We can't keep reducing the counter if somebody really wants to // call preciousblock 2**31-1 times on the same set of tips... nBlockReverseSequenceId--; } // In case this was parked, unpark it. UnparkBlock(pindex); // Make sure it is added to the candidate list if apropriate. if (pindex->IsValid(BlockValidity::TRANSACTIONS) && pindex->nChainTx) { setBlockIndexCandidates.insert(pindex); PruneBlockIndexCandidates(); } } return ActivateBestChain(config, state); } static bool UnwindBlock(const Config &config, CValidationState &state, CBlockIndex *pindex, bool invalidate) { AssertLockHeld(cs_main); // Mark the block as either invalid or parked. pindex->nStatus = invalidate ? pindex->nStatus.withFailed() : pindex->nStatus.withParked(); setDirtyBlockIndex.insert(pindex); DisconnectedBlockTransactions disconnectpool; while (chainActive.Contains(pindex)) { CBlockIndex *pindexWalk = chainActive.Tip(); if (pindexWalk != pindex) { pindexWalk->nStatus = invalidate ? pindexWalk->nStatus.withFailedParent() : pindexWalk->nStatus.withParkedParent(); setDirtyBlockIndex.insert(pindexWalk); } // ActivateBestChain considers blocks already in chainActive // unconditionally valid already, so force disconnect away from it. if (!DisconnectTip(config, state, &disconnectpool)) { // It's probably hopeless to try to make the mempool consistent // here if DisconnectTip failed, but we can try. disconnectpool.updateMempoolForReorg(config, false); return false; } } // DisconnectTip will add transactions to disconnectpool; try to add these // back to the mempool. disconnectpool.updateMempoolForReorg(config, true); // The resulting new best tip may not be in setBlockIndexCandidates anymore, // so add it again. for (const std::pair &it : mapBlockIndex) { CBlockIndex *i = it.second; if (i->IsValid(BlockValidity::TRANSACTIONS) && i->nChainTx && !setBlockIndexCandidates.value_comp()(i, chainActive.Tip())) { setBlockIndexCandidates.insert(i); } } if (invalidate) { InvalidChainFound(pindex); } uiInterface.NotifyBlockTip(IsInitialBlockDownload(), pindex->pprev); return true; } bool FinalizeBlockAndInvalidate(const Config &config, CValidationState &state, CBlockIndex *pindex) { AssertLockHeld(cs_main); if (!FinalizeBlockInternal(config, state, pindex)) { // state is set by FinalizeBlockInternal. return false; } // We have a valid candidate, make sure it is not parked. if (pindex->nStatus.isOnParkedChain()) { UnparkBlock(pindex); } // If the finalized block is not on the active chain, we need to rewind. if (!AreOnTheSameFork(pindex, chainActive.Tip())) { const CBlockIndex *pindexFork = chainActive.FindFork(pindex); CBlockIndex *pindexToInvalidate = chainActive.Tip()->GetAncestor(pindexFork->nHeight + 1); return InvalidateBlock(config, state, pindexToInvalidate); } return true; } bool InvalidateBlock(const Config &config, CValidationState &state, CBlockIndex *pindex) { return UnwindBlock(config, state, pindex, true); } bool ParkBlock(const Config &config, CValidationState &state, CBlockIndex *pindex) { return UnwindBlock(config, state, pindex, false); } template void UpdateFlagsForBlock(CBlockIndex *pindexBase, CBlockIndex *pindex, F f) { BlockStatus newStatus = f(pindex->nStatus); if (pindex->nStatus != newStatus && pindex->GetAncestor(pindexBase->nHeight) == pindexBase) { pindex->nStatus = newStatus; setDirtyBlockIndex.insert(pindex); if (pindex->IsValid(BlockValidity::TRANSACTIONS) && pindex->nChainTx && setBlockIndexCandidates.value_comp()(chainActive.Tip(), pindex)) { setBlockIndexCandidates.insert(pindex); } } } template void UpdateFlags(CBlockIndex *pindex, F f, C fchild) { AssertLockHeld(cs_main); // Update the current block. UpdateFlagsForBlock(pindex, pindex, f); // Update the flags from this block and all its descendants. BlockMap::iterator it = mapBlockIndex.begin(); while (it != mapBlockIndex.end()) { UpdateFlagsForBlock(pindex, it->second, fchild); it++; } // Update the flags from all ancestors too. while (pindex != nullptr) { BlockStatus newStatus = f(pindex->nStatus); if (pindex->nStatus != newStatus) { pindex->nStatus = newStatus; setDirtyBlockIndex.insert(pindex); } pindex = pindex->pprev; } } template void UpdateFlags(CBlockIndex *pindex, F f) { // Handy shorthand. UpdateFlags(pindex, f, f); } bool ResetBlockFailureFlags(CBlockIndex *pindex) { AssertLockHeld(cs_main); if (pindexBestInvalid && (pindexBestInvalid->GetAncestor(pindex->nHeight) == pindex || pindex->GetAncestor(pindexBestInvalid->nHeight) == pindexBestInvalid)) { // Reset the invalid block marker if it is about to be cleared. pindexBestInvalid = nullptr; } // In case we are reconsidering something before the finalization point, // move the finalization point to the last common ancestor. if (pindexFinalized) { pindexFinalized = LastCommonAncestor(pindex, pindexFinalized); } UpdateFlags(pindex, [](const BlockStatus status) { return status.withClearedFailureFlags(); }); return true; } static bool UnparkBlockImpl(CBlockIndex *pindex, bool fClearChildren) { AssertLockHeld(cs_main); if (pindexBestParked && (pindexBestParked->GetAncestor(pindex->nHeight) == pindex || pindex->GetAncestor(pindexBestParked->nHeight) == pindexBestParked)) { // Reset the parked block marker if it is about to be cleared. pindexBestParked = nullptr; } UpdateFlags(pindex, [](const BlockStatus status) { return status.withClearedParkedFlags(); }, [fClearChildren](const BlockStatus status) { return fClearChildren ? status.withClearedParkedFlags() : status.withParkedParent(false); }); return true; } bool UnparkBlockAndChildren(CBlockIndex *pindex) { return UnparkBlockImpl(pindex, true); } bool UnparkBlock(CBlockIndex *pindex) { return UnparkBlockImpl(pindex, false); } const CBlockIndex *GetFinalizedBlock() { AssertLockHeld(cs_main); return pindexFinalized; } bool IsBlockFinalized(const CBlockIndex *pindex) { AssertLockHeld(cs_main); return pindexFinalized && pindexFinalized->GetAncestor(pindex->nHeight) == pindex; } static CBlockIndex *AddToBlockIndex(const CBlockHeader &block) { // Check for duplicate uint256 hash = block.GetHash(); BlockMap::iterator it = mapBlockIndex.find(hash); if (it != mapBlockIndex.end()) { return it->second; } // Construct new block index object CBlockIndex *pindexNew = new CBlockIndex(block); assert(pindexNew); // We assign the sequence id to blocks only when the full data is available, // to avoid miners withholding blocks but broadcasting headers, to get a // competitive advantage. pindexNew->nSequenceId = 0; BlockMap::iterator mi = mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first; pindexNew->phashBlock = &((*mi).first); BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock); if (miPrev != mapBlockIndex.end()) { pindexNew->pprev = (*miPrev).second; pindexNew->nHeight = pindexNew->pprev->nHeight + 1; pindexNew->BuildSkip(); } pindexNew->nTimeReceived = GetTime(); pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime); pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew); pindexNew->RaiseValidity(BlockValidity::TREE); if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork) { pindexBestHeader = pindexNew; } setDirtyBlockIndex.insert(pindexNew); return pindexNew; } /** * Mark a block as having its data received and checked (up to * BLOCK_VALID_TRANSACTIONS). */ bool ReceivedBlockTransactions(const CBlock &block, CValidationState &state, CBlockIndex *pindexNew, const CDiskBlockPos &pos) { pindexNew->nTx = block.vtx.size(); pindexNew->nChainTx = 0; pindexNew->nFile = pos.nFile; pindexNew->nDataPos = pos.nPos; pindexNew->nUndoPos = 0; pindexNew->nStatus = pindexNew->nStatus.withData(); pindexNew->RaiseValidity(BlockValidity::TRANSACTIONS); setDirtyBlockIndex.insert(pindexNew); if (pindexNew->pprev == nullptr || pindexNew->pprev->nChainTx) { // If pindexNew is the genesis block or all parents are // BLOCK_VALID_TRANSACTIONS. std::deque queue; queue.push_back(pindexNew); // Recursively process any descendant blocks that now may be eligible to // be connected. while (!queue.empty()) { CBlockIndex *pindex = queue.front(); queue.pop_front(); pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx; if (pindex->nSequenceId == 0) { // We assign a sequence is when transaction are recieved to // prevent a miner from being able to broadcast a block but not // its content. However, a sequence id may have been set // manually, for instance via PreciousBlock, in which case, we // don't need to assign one. pindex->nSequenceId = nBlockSequenceId++; } if (chainActive.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) { setBlockIndexCandidates.insert(pindex); } std::pair::iterator, std::multimap::iterator> range = mapBlocksUnlinked.equal_range(pindex); while (range.first != range.second) { std::multimap::iterator it = range.first; queue.push_back(it->second); range.first++; mapBlocksUnlinked.erase(it); } } } else if (pindexNew->pprev && pindexNew->pprev->IsValid(BlockValidity::TREE)) { mapBlocksUnlinked.insert(std::make_pair(pindexNew->pprev, pindexNew)); } return true; } static bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false) { LOCK(cs_LastBlockFile); unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile; if (vinfoBlockFile.size() <= nFile) { vinfoBlockFile.resize(nFile + 1); } if (!fKnown) { while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) { nFile++; if (vinfoBlockFile.size() <= nFile) { vinfoBlockFile.resize(nFile + 1); } } pos.nFile = nFile; pos.nPos = vinfoBlockFile[nFile].nSize; } if ((int)nFile != nLastBlockFile) { if (!fKnown) { LogPrintf("Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString()); } FlushBlockFile(!fKnown); nLastBlockFile = nFile; } vinfoBlockFile[nFile].AddBlock(nHeight, nTime); if (fKnown) { vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize); } else { vinfoBlockFile[nFile].nSize += nAddSize; } if (!fKnown) { unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE; unsigned int nNewChunks = (vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE; if (nNewChunks > nOldChunks) { if (fPruneMode) { fCheckForPruning = true; } if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) { FILE *file = OpenBlockFile(pos); if (file) { LogPrintf( "Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile); AllocateFileRange(file, pos.nPos, nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos); fclose(file); } } else { return state.Error("out of disk space"); } } } setDirtyFileInfo.insert(nFile); return true; } static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize) { pos.nFile = nFile; LOCK(cs_LastBlockFile); unsigned int nNewSize; pos.nPos = vinfoBlockFile[nFile].nUndoSize; nNewSize = vinfoBlockFile[nFile].nUndoSize += nAddSize; setDirtyFileInfo.insert(nFile); unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE; unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE; if (nNewChunks > nOldChunks) { if (fPruneMode) { fCheckForPruning = true; } if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) { FILE *file = OpenUndoFile(pos); if (file) { LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile); AllocateFileRange(file, pos.nPos, nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos); fclose(file); } } else { return state.Error("out of disk space"); } } return true; } /** * Return true if the provided block header is valid. * Only verify PoW if blockValidationOptions is configured to do so. * This allows validation of headers on which the PoW hasn't been done. * For example: to validate template handed to mining software. * Do not call this for any check that depends on the context. * For context-dependant calls, see ContextualCheckBlockHeader. */ static bool CheckBlockHeader( const Config &config, const CBlockHeader &block, CValidationState &state, BlockValidationOptions validationOptions = BlockValidationOptions()) { // Check proof of work matches claimed amount if (validationOptions.shouldValidatePoW() && !CheckProofOfWork(block.GetHash(), block.nBits, config)) { return state.DoS(50, false, REJECT_INVALID, "high-hash", false, "proof of work failed"); } return true; } bool CheckBlock(const Config &config, const CBlock &block, CValidationState &state, BlockValidationOptions validationOptions) { // These are checks that are independent of context. if (block.fChecked) { return true; } // Check that the header is valid (particularly PoW). This is mostly // redundant with the call in AcceptBlockHeader. if (!CheckBlockHeader(config, block, state, validationOptions)) { return false; } // Check the merkle root. if (validationOptions.shouldValidateMerkleRoot()) { bool mutated; uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated); if (block.hashMerkleRoot != hashMerkleRoot2) { return state.DoS(100, false, REJECT_INVALID, "bad-txnmrklroot", true, "hashMerkleRoot mismatch"); } // Check for merkle tree malleability (CVE-2012-2459): repeating // sequences of transactions in a block without affecting the merkle // root of a block, while still invalidating it. if (mutated) { return state.DoS(100, false, REJECT_INVALID, "bad-txns-duplicate", true, "duplicate transaction"); } } // All potential-corruption validation must be done before we do any // transaction validation, as otherwise we may mark the header as invalid // because we receive the wrong transactions for it. // First transaction must be coinbase. if (block.vtx.empty()) { return state.DoS(100, false, REJECT_INVALID, "bad-cb-missing", false, "first tx is not coinbase"); } // Size limits. auto nMaxBlockSize = config.GetMaxBlockSize(); // Bail early if there is no way this block is of reasonable size. if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) { return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, "size limits failed"); } auto currentBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); if (currentBlockSize > nMaxBlockSize) { return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, "size limits failed"); } // And a valid coinbase. if (!CheckCoinbase(*block.vtx[0], state)) { return state.Invalid(false, state.GetRejectCode(), state.GetRejectReason(), strprintf("Coinbase check failed (txid %s) %s", block.vtx[0]->GetId().ToString(), state.GetDebugMessage())); } // Keep track of the sigops count. uint64_t nSigOps = 0; auto nMaxSigOpsCount = GetMaxBlockSigOpsCount(currentBlockSize); // Check transactions auto txCount = block.vtx.size(); auto *tx = block.vtx[0].get(); size_t i = 0; while (true) { // Count the sigops for the current transaction. If the total sigops // count is too high, the the block is invalid. nSigOps += GetSigOpCountWithoutP2SH(*tx, STANDARD_SCRIPT_VERIFY_FLAGS); if (nSigOps > nMaxSigOpsCount) { return state.DoS(100, false, REJECT_INVALID, "bad-blk-sigops", false, "out-of-bounds SigOpCount"); } // Go to the next transaction. i++; // We reached the end of the block, success. if (i >= txCount) { break; } // Check that the transaction is valid. Because this check differs for // the coinbase, the loop is arranged such as this only runs after at // least one increment. tx = block.vtx[i].get(); if (!CheckRegularTransaction(*tx, state)) { return state.Invalid( false, state.GetRejectCode(), state.GetRejectReason(), strprintf("Transaction check failed (txid %s) %s", tx->GetId().ToString(), state.GetDebugMessage())); } } if (validationOptions.shouldValidatePoW() && validationOptions.shouldValidateMerkleRoot()) { block.fChecked = true; } return true; } static bool CheckIndexAgainstCheckpoint(const CBlockIndex *pindexPrev, CValidationState &state, const CChainParams &chainparams, const uint256 &hash) { if (*pindexPrev->phashBlock == chainparams.GetConsensus().hashGenesisBlock) { return true; } int nHeight = pindexPrev->nHeight + 1; const CCheckpointData &checkpoints = chainparams.Checkpoints(); // Check that the block chain matches the known block chain up to a // checkpoint. if (!Checkpoints::CheckBlock(checkpoints, nHeight, hash)) { return state.DoS(100, error("%s: rejected by checkpoint lock-in at %d", __func__, nHeight), REJECT_CHECKPOINT, "checkpoint mismatch"); } // Don't accept any forks from the main chain prior to last checkpoint. // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in // our MapBlockIndex. CBlockIndex *pcheckpoint = Checkpoints::GetLastCheckpoint(checkpoints); if (pcheckpoint && nHeight < pcheckpoint->nHeight) { return state.DoS( 100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight), REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint"); } return true; } static bool ContextualCheckBlockHeader(const Config &config, const CBlockHeader &block, CValidationState &state, const CBlockIndex *pindexPrev, int64_t nAdjustedTime) { const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1; // Check proof of work if (block.nBits != GetNextWorkRequired(pindexPrev, &block, config)) { LogPrintf("bad bits after height: %d\n", pindexPrev->nHeight); return state.DoS(100, false, REJECT_INVALID, "bad-diffbits", false, "incorrect proof of work"); } // Check timestamp against prev if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) { return state.Invalid(false, REJECT_INVALID, "time-too-old", "block's timestamp is too early"); } // Check timestamp if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME) { return state.Invalid(false, REJECT_INVALID, "time-too-new", "block timestamp too far in the future"); } // Reject outdated version blocks when 95% (75% on testnet) of the network // has upgraded: // check for version 2, 3 and 4 upgrades if ((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) || (block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) || (block.nVersion < 4 && nHeight >= consensusParams.BIP65Height)) { return state.Invalid( false, REJECT_OBSOLETE, strprintf("bad-version(0x%08x)", block.nVersion), strprintf("rejected nVersion=0x%08x block", block.nVersion)); } return true; } bool ContextualCheckTransactionForCurrentBlock(const Config &config, const CTransaction &tx, CValidationState &state, int flags) { AssertLockHeld(cs_main); // By convention a negative value for flags indicates that the current // network-enforced consensus rules should be used. In a future soft-fork // scenario that would mean checking which rules would be enforced for the // next block and setting the appropriate flags. At the present time no // soft-forks are scheduled, so no flags are set. flags = std::max(flags, 0); // ContextualCheckTransactionForCurrentBlock() uses chainActive.Height()+1 // to evaluate nLockTime because when IsFinalTx() is called within // CBlock::AcceptBlock(), the height of the block *being* evaluated is what // is used. Thus if we want to know if a transaction can be part of the // *next* block, we need to call ContextualCheckTransaction() with one more // than chainActive.Height(). const int nBlockHeight = chainActive.Height() + 1; // BIP113 will require that time-locked transactions have nLockTime set to // less than the median time of the previous block they're contained in. // When the next block is created its previous block will be the current // chain tip, so we use that to calculate the median time passed to // ContextualCheckTransaction() if LOCKTIME_MEDIAN_TIME_PAST is set. const int64_t nMedianTimePast = chainActive.Tip() == nullptr ? 0 : chainActive.Tip()->GetMedianTimePast(); const int64_t nLockTimeCutoff = (flags & LOCKTIME_MEDIAN_TIME_PAST) ? nMedianTimePast : GetAdjustedTime(); return ContextualCheckTransaction(config, tx, state, nBlockHeight, nLockTimeCutoff, nMedianTimePast); } static bool ContextualCheckBlock(const Config &config, const CBlock &block, CValidationState &state, const CBlockIndex *pindexPrev) { const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1; const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); // Start enforcing BIP113 (Median Time Past). int nLockTimeFlags = 0; if (nHeight >= consensusParams.CSVHeight) { nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST; } const int64_t nMedianTimePast = pindexPrev == nullptr ? 0 : pindexPrev->GetMedianTimePast(); const int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST) ? nMedianTimePast : block.GetBlockTime(); const bool fIsMagneticAnomalyEnabled = IsMagneticAnomalyEnabled(config, pindexPrev); // Check that all transactions are finalized const CTransaction *prevTx = nullptr; for (const auto &ptx : block.vtx) { const CTransaction &tx = *ptx; if (fIsMagneticAnomalyEnabled) { if (prevTx && (tx.GetId() <= prevTx->GetId())) { if (tx.GetId() == prevTx->GetId()) { return state.DoS(100, false, REJECT_INVALID, "tx-duplicate", false, strprintf("Duplicated transaction %s", tx.GetId().ToString())); } return state.DoS( 100, false, REJECT_INVALID, "tx-ordering", false, strprintf("Transaction order is invalid (%s < %s)", tx.GetId().ToString(), prevTx->GetId().ToString())); } if (prevTx || !tx.IsCoinBase()) { prevTx = &tx; } } if (!ContextualCheckTransaction(config, tx, state, nHeight, nLockTimeCutoff, nMedianTimePast)) { // state set by ContextualCheckTransaction. return false; } } // Enforce rule that the coinbase starts with serialized block height if (nHeight >= consensusParams.BIP34Height) { CScript expect = CScript() << nHeight; if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() || !std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) { return state.DoS(100, false, REJECT_INVALID, "bad-cb-height", false, "block height mismatch in coinbase"); } } return true; } /** * If the provided block header is valid, add it to the block index. * * Returns true if the block is succesfully added to the block index. */ static bool AcceptBlockHeader(const Config &config, const CBlockHeader &block, CValidationState &state, CBlockIndex **ppindex) { AssertLockHeld(cs_main); const CChainParams &chainparams = config.GetChainParams(); // Check for duplicate uint256 hash = block.GetHash(); BlockMap::iterator miSelf = mapBlockIndex.find(hash); CBlockIndex *pindex = nullptr; if (hash != chainparams.GetConsensus().hashGenesisBlock) { if (miSelf != mapBlockIndex.end()) { // Block header is already known. pindex = miSelf->second; if (ppindex) { *ppindex = pindex; } if (pindex->nStatus.isInvalid()) { return state.Invalid(error("%s: block %s is marked invalid", __func__, hash.ToString()), 0, "duplicate"); } return true; } if (!CheckBlockHeader(config, block, state)) { return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state)); } // Get prev block index BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock); if (mi == mapBlockIndex.end()) { return state.DoS(10, error("%s: prev block not found", __func__), 0, "prev-blk-not-found"); } CBlockIndex *pindexPrev = (*mi).second; assert(pindexPrev); if (pindexPrev->nStatus.isInvalid()) { return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk"); } if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, hash)) { return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__, state.GetRejectReason().c_str()); } if (!ContextualCheckBlockHeader(config, block, state, pindexPrev, GetAdjustedTime())) { return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state)); } } if (pindex == nullptr) { pindex = AddToBlockIndex(block); } if (ppindex) { *ppindex = pindex; } CheckBlockIndex(chainparams.GetConsensus()); return true; } // Exposed wrapper for AcceptBlockHeader bool ProcessNewBlockHeaders(const Config &config, const std::vector &headers, CValidationState &state, const CBlockIndex **ppindex, CBlockHeader *first_invalid) { if (first_invalid != nullptr) { first_invalid->SetNull(); } { LOCK(cs_main); for (const CBlockHeader &header : headers) { // Use a temp pindex instead of ppindex to avoid a const_cast CBlockIndex *pindex = nullptr; if (!AcceptBlockHeader(config, header, state, &pindex)) { if (first_invalid) { *first_invalid = header; } return false; } if (ppindex) { *ppindex = pindex; } } } NotifyHeaderTip(); return true; } /** * Store a block on disk. * * @param[in] config The global config. * @param[in-out] pblock The block we want to accept. * @param[in] fRequested A boolean to indicate if this block was requested * from our peers. * @param[in] dbp If non-null, the disk position of the block. * @param[in-out] fNewBlock True if block was first received via this call. * @return True if the block is accepted as a valid block and written to disk. */ static bool AcceptBlock(const Config &config, const std::shared_ptr &pblock, CValidationState &state, bool fRequested, const CDiskBlockPos *dbp, bool *fNewBlock) { AssertLockHeld(cs_main); const CBlock &block = *pblock; if (fNewBlock) { *fNewBlock = false; } CBlockIndex *pindex = nullptr; if (!AcceptBlockHeader(config, block, state, &pindex)) { return false; } // Try to process all requested blocks that we don't have, but only // process an unrequested block if it's new and has enough work to // advance our tip, and isn't too many blocks ahead. bool fAlreadyHave = pindex->nStatus.hasData(); // TODO: deal better with return value and error conditions for duplicate // and unrequested blocks. if (fAlreadyHave) { return true; } // Compare block header timestamps and received times of the block and the // chaintip. If they have the same chain height, use these diffs as a // tie-breaker, attempting to pick the more honestly-mined block. int64_t newBlockTimeDiff = std::llabs(pindex->GetReceivedTimeDiff()); int64_t chainTipTimeDiff = chainActive.Tip() ? std::llabs(chainActive.Tip()->GetReceivedTimeDiff()) : 0; bool isSameHeight = chainActive.Tip() && (pindex->nChainWork == chainActive.Tip()->nChainWork); if (isSameHeight) { LogPrintf("Chain tip timestamp-to-received-time difference: hash=%s, " "diff=%d\n", chainActive.Tip()->GetBlockHash().ToString(), chainTipTimeDiff); LogPrintf("New block timestamp-to-received-time difference: hash=%s, " "diff=%d\n", pindex->GetBlockHash().ToString(), newBlockTimeDiff); } bool fHasMoreWork = (chainActive.Tip() ? pindex->nChainWork > chainActive.Tip()->nChainWork : true); // Blocks that are too out-of-order needlessly limit the effectiveness of // pruning, because pruning will not delete block files that contain any // blocks which are too close in height to the tip. Apply this test // regardless of whether pruning is enabled; it should generally be safe to // not process unrequested blocks. bool fTooFarAhead = (pindex->nHeight > int(chainActive.Height() + MIN_BLOCKS_TO_KEEP)); // TODO: Decouple this function from the block download logic by removing // fRequested // This requires some new chain datastructure to efficiently look up if a // block is in a chain leading to a candidate for best tip, despite not // being such a candidate itself. // If we didn't ask for it: if (!fRequested) { // This is a previously-processed block that was pruned. if (pindex->nTx != 0) { return true; } // Don't process less-work chains. if (!fHasMoreWork) { return true; } // Block height is too high. if (fTooFarAhead) { return true; } // Protect against DoS attacks from low-work chains. // If our tip is behind, a peer could try to send us // low-work blocks on a fake chain that we would never // request; don't process these. if (pindex->nChainWork < nMinimumChainWork) { return true; } } if (fNewBlock) { *fNewBlock = true; } if (!CheckBlock(config, block, state) || !ContextualCheckBlock(config, block, state, pindex->pprev)) { if (state.IsInvalid() && !state.CorruptionPossible()) { pindex->nStatus = pindex->nStatus.withFailed(); setDirtyBlockIndex.insert(pindex); } return error("%s: %s (block %s)", __func__, FormatStateMessage(state), block.GetHash().ToString()); } // If this is a deep reorg (a regorg of more than one block), preemptively // mark the chain as parked. If it has enough work, it'll unpark // automatically. We mark the block as parked at the very last minute so we // can make sure everything is ready to be reorged if needed. if (gArgs.GetBoolArg("-parkdeepreorg", true)) { const CBlockIndex *pindexFork = chainActive.FindFork(pindex); if (pindexFork && pindexFork->nHeight + 1 < pindex->nHeight) { LogPrintf("Park block %s as it would cause a deep reorg.\n", pindex->GetBlockHash().ToString()); pindex->nStatus = pindex->nStatus.withParked(); setDirtyBlockIndex.insert(pindex); } } // Header is valid/has work and the merkle tree is good. // Relay now, but if it does not build on our best tip, let the // SendMessages loop relay it. if (!IsInitialBlockDownload() && chainActive.Tip() == pindex->pprev) { GetMainSignals().NewPoWValidBlock(pindex, pblock); } int nHeight = pindex->nHeight; const CChainParams &chainparams = config.GetChainParams(); // Write block to history file try { unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); CDiskBlockPos blockPos; if (dbp != nullptr) { blockPos = *dbp; } if (!FindBlockPos(state, blockPos, nBlockSize + 8, nHeight, block.GetBlockTime(), dbp != nullptr)) { return error("AcceptBlock(): FindBlockPos failed"); } if (dbp == nullptr) { if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) { AbortNode(state, "Failed to write block"); } } if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) { return error("AcceptBlock(): ReceivedBlockTransactions failed"); } } catch (const std::runtime_error &e) { return AbortNode(state, std::string("System error: ") + e.what()); } if (fCheckForPruning) { // we just allocated more disk space for block files. FlushStateToDisk(config.GetChainParams(), state, FLUSH_STATE_NONE); } return true; } bool ProcessNewBlock(const Config &config, const std::shared_ptr pblock, bool fForceProcessing, bool *fNewBlock) { { if (fNewBlock) { *fNewBlock = false; } const CChainParams &chainparams = config.GetChainParams(); CValidationState state; // Ensure that CheckBlock() passes before calling AcceptBlock, as // belt-and-suspenders. bool ret = CheckBlock(config, *pblock, state); LOCK(cs_main); if (ret) { // Store to disk ret = AcceptBlock(config, pblock, state, fForceProcessing, nullptr, fNewBlock); } CheckBlockIndex(chainparams.GetConsensus()); if (!ret) { GetMainSignals().BlockChecked(*pblock, state); return error("%s: AcceptBlock FAILED", __func__); } } NotifyHeaderTip(); // Only used to report errors, not invalidity - ignore it CValidationState state; if (!ActivateBestChain(config, state, pblock)) { return error("%s: ActivateBestChain failed", __func__); } return true; } bool TestBlockValidity(const Config &config, CValidationState &state, const CBlock &block, CBlockIndex *pindexPrev, BlockValidationOptions validationOptions) { AssertLockHeld(cs_main); const CChainParams &chainparams = config.GetChainParams(); assert(pindexPrev && pindexPrev == chainActive.Tip()); if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, block.GetHash())) { return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__, state.GetRejectReason().c_str()); } CCoinsViewCache viewNew(pcoinsTip.get()); CBlockIndex indexDummy(block); indexDummy.pprev = pindexPrev; indexDummy.nHeight = pindexPrev->nHeight + 1; // NOTE: CheckBlockHeader is called by CheckBlock if (!ContextualCheckBlockHeader(config, block, state, pindexPrev, GetAdjustedTime())) { return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, FormatStateMessage(state)); } if (!CheckBlock(config, block, state, validationOptions)) { return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state)); } if (!ContextualCheckBlock(config, block, state, pindexPrev)) { return error("%s: Consensus::ContextualCheckBlock: %s", __func__, FormatStateMessage(state)); } if (!ConnectBlock(config, block, state, &indexDummy, viewNew, true)) { return false; } assert(state.IsValid()); return true; } /** * BLOCK PRUNING CODE */ /** * Calculate the amount of disk space the block & undo files currently use. */ static uint64_t CalculateCurrentUsage() { uint64_t retval = 0; for (const CBlockFileInfo &file : vinfoBlockFile) { retval += file.nSize + file.nUndoSize; } return retval; } /** * Prune a block file (modify associated database entries) */ void PruneOneBlockFile(const int fileNumber) { for (const std::pair &it : mapBlockIndex) { CBlockIndex *pindex = it.second; if (pindex->nFile == fileNumber) { pindex->nStatus = pindex->nStatus.withData(false).withUndo(false); pindex->nFile = 0; pindex->nDataPos = 0; pindex->nUndoPos = 0; setDirtyBlockIndex.insert(pindex); // Prune from mapBlocksUnlinked -- any block we prune would have // to be downloaded again in order to consider its chain, at which // point it would be considered as a candidate for // mapBlocksUnlinked or setBlockIndexCandidates. std::pair::iterator, std::multimap::iterator> range = mapBlocksUnlinked.equal_range(pindex->pprev); while (range.first != range.second) { std::multimap::iterator _it = range.first; range.first++; if (_it->second == pindex) { mapBlocksUnlinked.erase(_it); } } } } vinfoBlockFile[fileNumber].SetNull(); setDirtyFileInfo.insert(fileNumber); } void UnlinkPrunedFiles(const std::set &setFilesToPrune) { for (const int i : setFilesToPrune) { CDiskBlockPos pos(i, 0); fs::remove(GetBlockPosFilename(pos, "blk")); fs::remove(GetBlockPosFilename(pos, "rev")); LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, i); } } /** * Calculate the block/rev files to delete based on height specified by user * with RPC command pruneblockchain */ static void FindFilesToPruneManual(std::set &setFilesToPrune, int nManualPruneHeight) { assert(fPruneMode && nManualPruneHeight > 0); LOCK2(cs_main, cs_LastBlockFile); if (chainActive.Tip() == nullptr) { return; } // last block to prune is the lesser of (user-specified height, // MIN_BLOCKS_TO_KEEP from the tip) unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP); int count = 0; for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) { if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) { continue; } PruneOneBlockFile(fileNumber); setFilesToPrune.insert(fileNumber); count++; } LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count); } /* This function is called from the RPC code for pruneblockchain */ void PruneBlockFilesManual(int nManualPruneHeight) { CValidationState state; const CChainParams &chainparams = Params(); FlushStateToDisk(chainparams, state, FLUSH_STATE_NONE, nManualPruneHeight); } /** * Prune block and undo files (blk???.dat and undo???.dat) so that the disk * space used is less than a user-defined target. The user sets the target (in * MB) on the command line or in config file. This will be run on startup and * whenever new space is allocated in a block or undo file, staying below the * target. Changing back to unpruned requires a reindex (which in this case * means the blockchain must be re-downloaded.) * * Pruning functions are called from FlushStateToDisk when the global * fCheckForPruning flag has been set. Block and undo files are deleted in * lock-step (when blk00003.dat is deleted, so is rev00003.dat.). Pruning cannot * take place until the longest chain is at least a certain length (100000 on * mainnet, 1000 on testnet, 1000 on regtest). Pruning will never delete a block * within a defined distance (currently 288) from the active chain's tip. The * block index is updated by unsetting HAVE_DATA and HAVE_UNDO for any blocks * that were stored in the deleted files. A db flag records the fact that at * least some block files have been pruned. * * @param[out] setFilesToPrune The set of file indices that can be unlinked * will be returned */ static void FindFilesToPrune(std::set &setFilesToPrune, uint64_t nPruneAfterHeight) { LOCK2(cs_main, cs_LastBlockFile); if (chainActive.Tip() == nullptr || nPruneTarget == 0) { return; } if (uint64_t(chainActive.Tip()->nHeight) <= nPruneAfterHeight) { return; } unsigned int nLastBlockWeCanPrune = chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP; uint64_t nCurrentUsage = CalculateCurrentUsage(); // We don't check to prune until after we've allocated new space for files, // so we should leave a buffer under our target to account for another // allocation before the next pruning. uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE; uint64_t nBytesToPrune; int count = 0; if (nCurrentUsage + nBuffer >= nPruneTarget) { for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) { nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize; if (vinfoBlockFile[fileNumber].nSize == 0) { continue; } // are we below our target? if (nCurrentUsage + nBuffer < nPruneTarget) { break; } // don't prune files that could have a block within // MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) { continue; } PruneOneBlockFile(fileNumber); // Queue up the files for removal setFilesToPrune.insert(fileNumber); nCurrentUsage -= nBytesToPrune; count++; } } LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB " "max_prune_height=%d removed %d blk/rev pairs\n", nPruneTarget / 1024 / 1024, nCurrentUsage / 1024 / 1024, ((int64_t)nPruneTarget - (int64_t)nCurrentUsage) / 1024 / 1024, nLastBlockWeCanPrune, count); } bool CheckDiskSpace(uint64_t nAdditionalBytes) { uint64_t nFreeBytesAvailable = fs::space(GetDataDir()).available; // Check for nMinDiskSpace bytes (currently 50MB) if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes) { return AbortNode("Disk space is low!", _("Error: Disk space is low!")); } return true; } static FILE *OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fReadOnly) { if (pos.IsNull()) { return nullptr; } fs::path path = GetBlockPosFilename(pos, prefix); fs::create_directories(path.parent_path()); FILE *file = fsbridge::fopen(path, "rb+"); if (!file && !fReadOnly) { file = fsbridge::fopen(path, "wb+"); } if (!file) { LogPrintf("Unable to open file %s\n", path.string()); return nullptr; } if (pos.nPos) { if (fseek(file, pos.nPos, SEEK_SET)) { LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string()); fclose(file); return nullptr; } } return file; } FILE *OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly) { return OpenDiskFile(pos, "blk", fReadOnly); } /** Open an undo file (rev?????.dat) */ static FILE *OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) { return OpenDiskFile(pos, "rev", fReadOnly); } fs::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix) { return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile); } CBlockIndex *InsertBlockIndex(uint256 hash) { if (hash.IsNull()) { return nullptr; } // Return existing BlockMap::iterator mi = mapBlockIndex.find(hash); if (mi != mapBlockIndex.end()) { return (*mi).second; } // Create new CBlockIndex *pindexNew = new CBlockIndex(); if (!pindexNew) { throw std::runtime_error(std::string(__func__) + ": new CBlockIndex failed"); } mi = mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first; pindexNew->phashBlock = &((*mi).first); return pindexNew; } static bool LoadBlockIndexDB(const Config &config) { if (!pblocktree->LoadBlockIndexGuts(config, InsertBlockIndex)) { return false; } boost::this_thread::interruption_point(); // Calculate nChainWork std::vector> vSortedByHeight; vSortedByHeight.reserve(mapBlockIndex.size()); for (const std::pair &item : mapBlockIndex) { CBlockIndex *pindex = item.second; vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex)); } sort(vSortedByHeight.begin(), vSortedByHeight.end()); for (const std::pair &item : vSortedByHeight) { CBlockIndex *pindex = item.second; pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex); pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime); // We can link the chain of blocks for which we've received transactions // at some point. Pruned nodes may have deleted the block. if (pindex->nTx > 0) { if (pindex->pprev) { if (pindex->pprev->nChainTx) { pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx; } else { pindex->nChainTx = 0; mapBlocksUnlinked.insert( std::make_pair(pindex->pprev, pindex)); } } else { pindex->nChainTx = pindex->nTx; } } if (pindex->IsValid(BlockValidity::TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == nullptr)) { setBlockIndexCandidates.insert(pindex); } if (pindex->nStatus.isInvalid() && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork)) { pindexBestInvalid = pindex; } if (pindex->nStatus.isOnParkedChain() && (!pindexBestParked || pindex->nChainWork > pindexBestParked->nChainWork)) { pindexBestParked = pindex; } if (pindex->pprev) { pindex->BuildSkip(); } if (pindex->IsValid(BlockValidity::TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex))) { pindexBestHeader = pindex; } } // Load block file info pblocktree->ReadLastBlockFile(nLastBlockFile); vinfoBlockFile.resize(nLastBlockFile + 1); LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile); for (int nFile = 0; nFile <= nLastBlockFile; nFile++) { pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]); } LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString()); for (int nFile = nLastBlockFile + 1; true; nFile++) { CBlockFileInfo info; if (pblocktree->ReadBlockFileInfo(nFile, info)) { vinfoBlockFile.push_back(info); } else { break; } } // Check presence of blk files LogPrintf("Checking all blk files are present...\n"); std::set setBlkDataFiles; for (const std::pair &item : mapBlockIndex) { CBlockIndex *pindex = item.second; if (pindex->nStatus.hasData()) { setBlkDataFiles.insert(pindex->nFile); } } for (const int i : setBlkDataFiles) { CDiskBlockPos pos(i, 0); if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION) .IsNull()) { return false; } } // Check whether we have ever pruned block & undo files pblocktree->ReadFlag("prunedblockfiles", fHavePruned); if (fHavePruned) { LogPrintf( "LoadBlockIndexDB(): Block files have previously been pruned\n"); } // Check whether we need to continue reindexing bool fReindexing = false; pblocktree->ReadReindexing(fReindexing); fReindex |= fReindexing; // Check whether we have a transaction index pblocktree->ReadFlag("txindex", fTxIndex); LogPrintf("%s: transaction index %s\n", __func__, fTxIndex ? "enabled" : "disabled"); return true; } bool LoadChainTip(const Config &config) { if (chainActive.Tip() && chainActive.Tip()->GetBlockHash() == pcoinsTip->GetBestBlock()) { return true; } if (pcoinsTip->GetBestBlock().IsNull() && mapBlockIndex.size() == 1) { // In case we just added the genesis block, connect it now, so // that we always have a chainActive.Tip() when we return. LogPrintf("%s: Connecting genesis block...\n", __func__); CValidationState state; if (!ActivateBestChain(config, state)) { return false; } } // Load pointer to end of best chain BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock()); if (it == mapBlockIndex.end()) { return false; } chainActive.SetTip(it->second); PruneBlockIndexCandidates(); LogPrintf( "Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n", chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()), GuessVerificationProgress(config.GetChainParams().TxData(), chainActive.Tip())); return true; } CVerifyDB::CVerifyDB() { uiInterface.ShowProgress(_("Verifying blocks..."), 0); } CVerifyDB::~CVerifyDB() { uiInterface.ShowProgress("", 100); } bool CVerifyDB::VerifyDB(const Config &config, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth) { LOCK(cs_main); if (chainActive.Tip() == nullptr || chainActive.Tip()->pprev == nullptr) { return true; } // Verify blocks in the best chain if (nCheckDepth <= 0) { // suffices until the year 19000 nCheckDepth = 1000000000; } if (nCheckDepth > chainActive.Height()) { nCheckDepth = chainActive.Height(); } nCheckLevel = std::max(0, std::min(4, nCheckLevel)); LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel); CCoinsViewCache coins(coinsview); CBlockIndex *pindexState = chainActive.Tip(); CBlockIndex *pindexFailure = nullptr; int nGoodTransactions = 0; CValidationState state; int reportDone = 0; LogPrintf("[0%%]..."); for (CBlockIndex *pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) { boost::this_thread::interruption_point(); int percentageDone = std::max( 1, std::min( 99, (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100)))); if (reportDone < percentageDone / 10) { // report every 10% step LogPrintf("[%d%%]...", percentageDone); reportDone = percentageDone / 10; } uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone); if (pindex->nHeight < chainActive.Height() - nCheckDepth) { break; } if (fPruneMode && !pindex->nStatus.hasData()) { // If pruning, only go back as far as we have data. LogPrintf("VerifyDB(): block verification stopping at height %d " "(pruning, no data)\n", pindex->nHeight); break; } CBlock block; // check level 0: read from disk if (!ReadBlockFromDisk(block, pindex, config)) { return error( "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } // check level 1: verify block validity if (nCheckLevel >= 1 && !CheckBlock(config, block, state)) { return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__, pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state)); } // check level 2: verify undo validity if (nCheckLevel >= 2 && pindex) { CBlockUndo undo; CDiskBlockPos pos = pindex->GetUndoPos(); if (!pos.IsNull()) { if (!UndoReadFromDisk(undo, pos, pindex->pprev->GetBlockHash())) { return error( "VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); } } } // check level 3: check for inconsistencies during memory-only // disconnect of tip blocks if (nCheckLevel >= 3 && pindex == pindexState && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) { assert(coins.GetBestBlock() == pindex->GetBlockHash()); DisconnectResult res = DisconnectBlock(block, pindex, coins); if (res == DISCONNECT_FAILED) { return error("VerifyDB(): *** irrecoverable inconsistency in " "block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } pindexState = pindex->pprev; if (res == DISCONNECT_UNCLEAN) { nGoodTransactions = 0; pindexFailure = pindex; } else { nGoodTransactions += block.vtx.size(); } } if (ShutdownRequested()) { return true; } } if (pindexFailure) { return error("VerifyDB(): *** coin database inconsistencies found " "(last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->nHeight + 1, nGoodTransactions); } // check level 4: try reconnecting blocks if (nCheckLevel >= 4) { CBlockIndex *pindex = pindexState; while (pindex != chainActive.Tip()) { boost::this_thread::interruption_point(); uiInterface.ShowProgress( _("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)))); pindex = chainActive.Next(pindex); CBlock block; if (!ReadBlockFromDisk(block, pindex, config)) { return error( "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } if (!ConnectBlock(config, block, state, pindex, coins)) { return error( "VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } } } LogPrintf("[DONE].\n"); LogPrintf("No coin database inconsistencies in last %i blocks (%i " "transactions)\n", chainActive.Height() - pindexState->nHeight, nGoodTransactions); return true; } /** * Apply the effects of a block on the utxo cache, ignoring that it may already * have been applied. */ static bool RollforwardBlock(const CBlockIndex *pindex, CCoinsViewCache &view, const Config &config) { // TODO: merge with ConnectBlock CBlock block; if (!ReadBlockFromDisk(block, pindex, config)) { return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } for (const CTransactionRef &tx : block.vtx) { // Pass check = true as every addition may be an overwrite. AddCoins(view, *tx, pindex->nHeight, true); } for (const CTransactionRef &tx : block.vtx) { if (tx->IsCoinBase()) { continue; } for (const CTxIn &txin : tx->vin) { view.SpendCoin(txin.prevout); } } return true; } bool ReplayBlocks(const Config &config, CCoinsView *view) { LOCK(cs_main); CCoinsViewCache cache(view); std::vector hashHeads = view->GetHeadBlocks(); if (hashHeads.empty()) { // We're already in a consistent state. return true; } if (hashHeads.size() != 2) { return error("ReplayBlocks(): unknown inconsistent state"); } uiInterface.ShowProgress(_("Replaying blocks..."), 0); LogPrintf("Replaying blocks\n"); // Old tip during the interrupted flush. const CBlockIndex *pindexOld = nullptr; // New tip during the interrupted flush. const CBlockIndex *pindexNew; // Latest block common to both the old and the new tip. const CBlockIndex *pindexFork = nullptr; if (mapBlockIndex.count(hashHeads[0]) == 0) { return error( "ReplayBlocks(): reorganization to unknown block requested"); } pindexNew = mapBlockIndex[hashHeads[0]]; if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush. if (mapBlockIndex.count(hashHeads[1]) == 0) { return error( "ReplayBlocks(): reorganization from unknown block requested"); } pindexOld = mapBlockIndex[hashHeads[1]]; pindexFork = LastCommonAncestor(pindexOld, pindexNew); assert(pindexFork != nullptr); } // Rollback along the old branch. while (pindexOld != pindexFork) { if (pindexOld->nHeight > 0) { // Never disconnect the genesis block. CBlock block; if (!ReadBlockFromDisk(block, pindexOld, config)) { return error("RollbackBlock(): ReadBlockFromDisk() failed at " "%d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString()); } LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight); DisconnectResult res = DisconnectBlock(block, pindexOld, cache); if (res == DISCONNECT_FAILED) { return error( "RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString()); } // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO // was deleted, or an existing UTXO was overwritten. It corresponds // to cases where the block-to-be-disconnect never had all its // operations applied to the UTXO set. However, as both writing a // UTXO and deleting a UTXO are idempotent operations, the result is // still a version of the UTXO set with the effects of that block // undone. } pindexOld = pindexOld->pprev; } // Roll forward from the forking point to the new tip. int nForkHeight = pindexFork ? pindexFork->nHeight : 0; for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) { const CBlockIndex *pindex = pindexNew->GetAncestor(nHeight); LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight); if (!RollforwardBlock(pindex, cache, config)) { return false; } } cache.SetBestBlock(pindexNew->GetBlockHash()); cache.Flush(); uiInterface.ShowProgress("", 100); return true; } bool RewindBlockIndex(const Config &config) { LOCK(cs_main); const CChainParams ¶ms = config.GetChainParams(); int nHeight = chainActive.Height() + 1; // nHeight is now the height of the first insufficiently-validated block, or // tipheight + 1 CValidationState state; CBlockIndex *pindex = chainActive.Tip(); while (chainActive.Height() >= nHeight) { if (fPruneMode && !chainActive.Tip()->nStatus.hasData()) { // If pruning, don't try rewinding past the HAVE_DATA point; since // older blocks can't be served anyway, there's no need to walk // further, and trying to DisconnectTip() will fail (and require a // needless reindex/redownload of the blockchain). break; } if (!DisconnectTip(config, state, nullptr)) { return error( "RewindBlockIndex: unable to disconnect block at height %i", pindex->nHeight); } // Occasionally flush state to disk. if (!FlushStateToDisk(params, state, FLUSH_STATE_PERIODIC)) { return false; } } // Reduce validity flag and have-data flags. // We do this after actual disconnecting, otherwise we'll end up writing the // lack of data to disk before writing the chainstate, resulting in a // failure to continue if interrupted. for (const std::pair &p : mapBlockIndex) { CBlockIndex *pindexIter = p.second; if (pindexIter->IsValid(BlockValidity::TRANSACTIONS) && pindexIter->nChainTx) { setBlockIndexCandidates.insert(pindexIter); } } if (chainActive.Tip() != nullptr) { // We can't prune block index candidates based on our tip if we have // no tip due to chainActive being empty! PruneBlockIndexCandidates(); CheckBlockIndex(params.GetConsensus()); // FlushStateToDisk can possibly read chainActive. Be conservative // and skip it here, we're about to -reindex-chainstate anyway, so // it'll get called a bunch real soon. if (!FlushStateToDisk(params, state, FLUSH_STATE_ALWAYS)) { return false; } } return true; } // May NOT be used after any connections are up as much of the peer-processing // logic assumes a consistent block index state void UnloadBlockIndex() { LOCK(cs_main); setBlockIndexCandidates.clear(); chainActive.SetTip(nullptr); pindexFinalized = nullptr; pindexBestInvalid = nullptr; pindexBestParked = nullptr; pindexBestHeader = nullptr; g_mempool.clear(); mapBlocksUnlinked.clear(); vinfoBlockFile.clear(); nLastBlockFile = 0; nBlockSequenceId = 1; setDirtyBlockIndex.clear(); setDirtyFileInfo.clear(); versionbitscache.Clear(); for (BlockMap::value_type &entry : mapBlockIndex) { delete entry.second; } mapBlockIndex.clear(); fHavePruned = false; } bool LoadBlockIndex(const Config &config) { // Load block index from databases bool needs_init = fReindex; if (!fReindex) { bool ret = LoadBlockIndexDB(config); if (!ret) { return false; } needs_init = mapBlockIndex.empty(); } if (needs_init) { // Everything here is for *new* reindex/DBs. Thus, though // LoadBlockIndexDB may have set fReindex if we shut down // mid-reindex previously, we don't check fReindex and // instead only check it prior to LoadBlockIndexDB to set // needs_init. LogPrintf("Initializing databases...\n"); // Use the provided setting for -txindex in the new database fTxIndex = gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX); pblocktree->WriteFlag("txindex", fTxIndex); } return true; } bool LoadGenesisBlock(const CChainParams &chainparams) { LOCK(cs_main); // Check whether we're already initialized by checking for genesis in // mapBlockIndex. Note that we can't use chainActive here, since it is // set based on the coins db, not the block index db, which is the only // thing loaded at this point. if (mapBlockIndex.count(chainparams.GenesisBlock().GetHash())) { return true; } // Only add the genesis block if not reindexing (in which case we reuse the // one already on disk) try { CBlock &block = const_cast(chainparams.GenesisBlock()); // Start new block file unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); CDiskBlockPos blockPos; CValidationState state; if (!FindBlockPos(state, blockPos, nBlockSize + 8, 0, block.GetBlockTime())) { return error("%s: FindBlockPos failed", __func__); } if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) { return error("%s: writing genesis block to disk failed", __func__); } CBlockIndex *pindex = AddToBlockIndex(block); if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) { return error("%s: genesis block not accepted", __func__); } } catch (const std::runtime_error &e) { return error("%s: failed to write genesis block: %s", __func__, e.what()); } return true; } bool LoadExternalBlockFile(const Config &config, FILE *fileIn, CDiskBlockPos *dbp) { // Map of disk positions for blocks with unknown parent (only used for // reindex) static std::multimap mapBlocksUnknownParent; int64_t nStart = GetTimeMillis(); const CChainParams &chainparams = config.GetChainParams(); int nLoaded = 0; try { // This takes over fileIn and calls fclose() on it in the CBufferedFile // destructor. Make sure we have at least 2*MAX_TX_SIZE space in there // so any transaction can fit in the buffer. CBufferedFile blkdat(fileIn, 2 * MAX_TX_SIZE, MAX_TX_SIZE + 8, SER_DISK, CLIENT_VERSION); uint64_t nRewind = blkdat.GetPos(); while (!blkdat.eof()) { boost::this_thread::interruption_point(); blkdat.SetPos(nRewind); // Start one byte further next time, in case of failure. nRewind++; // Remove former limit. blkdat.SetLimit(); unsigned int nSize = 0; try { // Locate a header. uint8_t buf[CMessageHeader::MESSAGE_START_SIZE]; blkdat.FindByte(chainparams.DiskMagic()[0]); nRewind = blkdat.GetPos() + 1; blkdat >> FLATDATA(buf); if (memcmp(buf, std::begin(chainparams.DiskMagic()), CMessageHeader::MESSAGE_START_SIZE)) { continue; } // Read size. blkdat >> nSize; if (nSize < 80) { continue; } } catch (const std::exception &) { // No valid block header found; don't complain. break; } try { // read block uint64_t nBlockPos = blkdat.GetPos(); if (dbp) { dbp->nPos = nBlockPos; } blkdat.SetLimit(nBlockPos + nSize); blkdat.SetPos(nBlockPos); std::shared_ptr pblock = std::make_shared(); CBlock &block = *pblock; blkdat >> block; nRewind = blkdat.GetPos(); // detect out of order blocks, and store them for later uint256 hash = block.GetHash(); if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex.find(block.hashPrevBlock) == mapBlockIndex.end()) { LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(), block.hashPrevBlock.ToString()); if (dbp) { mapBlocksUnknownParent.insert( std::make_pair(block.hashPrevBlock, *dbp)); } continue; } // process in case the block isn't known yet if (mapBlockIndex.count(hash) == 0 || !mapBlockIndex[hash]->nStatus.hasData()) { LOCK(cs_main); CValidationState state; if (AcceptBlock(config, pblock, state, true, dbp, nullptr)) { nLoaded++; } if (state.IsError()) { break; } } else if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex[hash]->nHeight % 1000 == 0) { LogPrint( BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), mapBlockIndex[hash]->nHeight); } // Activate the genesis block so normal node progress can // continue if (hash == chainparams.GetConsensus().hashGenesisBlock) { CValidationState state; if (!ActivateBestChain(config, state)) { break; } } NotifyHeaderTip(); // Recursively process earlier encountered successors of this // block std::deque queue; queue.push_back(hash); while (!queue.empty()) { uint256 head = queue.front(); queue.pop_front(); std::pair::iterator, std::multimap::iterator> range = mapBlocksUnknownParent.equal_range(head); while (range.first != range.second) { std::multimap::iterator it = range.first; std::shared_ptr pblockrecursive = std::make_shared(); if (ReadBlockFromDisk(*pblockrecursive, it->second, config)) { LogPrint( BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(), head.ToString()); LOCK(cs_main); CValidationState dummy; if (AcceptBlock(config, pblockrecursive, dummy, true, &it->second, nullptr)) { nLoaded++; queue.push_back(pblockrecursive->GetHash()); } } range.first++; mapBlocksUnknownParent.erase(it); NotifyHeaderTip(); } } } catch (const std::exception &e) { LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what()); } } } catch (const std::runtime_error &e) { AbortNode(std::string("System error: ") + e.what()); } if (nLoaded > 0) { LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart); } return nLoaded > 0; } static void CheckBlockIndex(const Consensus::Params &consensusParams) { if (!fCheckBlockIndex) { return; } LOCK(cs_main); // During a reindex, we read the genesis block and call CheckBlockIndex // before ActivateBestChain, so we have the genesis block in mapBlockIndex // but no active chain. (A few of the tests when iterating the block tree // require that chainActive has been initialized.) if (chainActive.Height() < 0) { assert(mapBlockIndex.size() <= 1); return; } // Build forward-pointing map of the entire block tree. std::multimap forward; for (const std::pair &it : mapBlockIndex) { forward.emplace(it.second->pprev, it.second); } assert(forward.size() == mapBlockIndex.size()); std::pair::iterator, std::multimap::iterator> rangeGenesis = forward.equal_range(nullptr); CBlockIndex *pindex = rangeGenesis.first->second; rangeGenesis.first++; // There is only one index entry with parent nullptr. assert(rangeGenesis.first == rangeGenesis.second); // Iterate over the entire block tree, using depth-first search. // Along the way, remember whether there are blocks on the path from genesis // block being explored which are the first to have certain properties. size_t nNodes = 0; int nHeight = 0; // Oldest ancestor of pindex which is invalid. CBlockIndex *pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is parked. CBlockIndex *pindexFirstParked = nullptr; // Oldest ancestor of pindex which does not have data available. CBlockIndex *pindexFirstMissing = nullptr; // Oldest ancestor of pindex for which nTx == 0. CBlockIndex *pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE // (regardless of being valid or not). CBlockIndex *pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS // (regardless of being valid or not). CBlockIndex *pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN // (regardless of being valid or not). CBlockIndex *pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS // (regardless of being valid or not). CBlockIndex *pindexFirstNotScriptsValid = nullptr; while (pindex != nullptr) { nNodes++; if (pindexFirstInvalid == nullptr && pindex->nStatus.hasFailed()) { pindexFirstInvalid = pindex; } if (pindexFirstParked == nullptr && pindex->nStatus.isParked()) { pindexFirstParked = pindex; } if (pindexFirstMissing == nullptr && !pindex->nStatus.hasData()) { pindexFirstMissing = pindex; } if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) { pindexFirstNeverProcessed = pindex; } if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && pindex->nStatus.getValidity() < BlockValidity::TREE) { pindexFirstNotTreeValid = pindex; } if (pindex->pprev != nullptr && pindexFirstNotTransactionsValid == nullptr && pindex->nStatus.getValidity() < BlockValidity::TRANSACTIONS) { pindexFirstNotTransactionsValid = pindex; } if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr && pindex->nStatus.getValidity() < BlockValidity::CHAIN) { pindexFirstNotChainValid = pindex; } if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr && pindex->nStatus.getValidity() < BlockValidity::SCRIPTS) { pindexFirstNotScriptsValid = pindex; } // Begin: actual consistency checks. if (pindex->pprev == nullptr) { // Genesis block checks. // Genesis block's hash must match. assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // The current active chain's genesis block must be this block. assert(pindex == chainActive.Genesis()); } if (pindex->nChainTx == 0) { // nSequenceId can't be set positive for blocks that aren't linked // (negative is used for preciousblock) assert(pindex->nSequenceId <= 0); } // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or // not pruning has occurred). HAVE_DATA is only equivalent to nTx > 0 // (or VALID_TRANSACTIONS) if no pruning has occurred. if (!fHavePruned) { // If we've never pruned, then HAVE_DATA should be equivalent to nTx // > 0 assert(pindex->nStatus.hasData() == (pindex->nTx > 0)); assert(pindexFirstMissing == pindexFirstNeverProcessed); } else if (pindex->nStatus.hasData()) { // If we have pruned, then we can only say that HAVE_DATA implies // nTx > 0 assert(pindex->nTx > 0); } if (pindex->nStatus.hasUndo()) { assert(pindex->nStatus.hasData()); } // This is pruning-independent. assert((pindex->nStatus.getValidity() >= BlockValidity::TRANSACTIONS) == (pindex->nTx > 0)); // All parents having had data (at some point) is equivalent to all // parents being VALID_TRANSACTIONS, which is equivalent to nChainTx // being set. // nChainTx != 0 is used to signal that all parent blocks have been // processed (but may have been pruned). assert((pindexFirstNeverProcessed != nullptr) == (pindex->nChainTx == 0)); assert((pindexFirstNotTransactionsValid != nullptr) == (pindex->nChainTx == 0)); // nHeight must be consistent. assert(pindex->nHeight == nHeight); // For every block except the genesis block, the chainwork must be // larger than the parent's. assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // The pskip pointer must point back for all but the first 2 blocks. assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // All mapBlockIndex entries must at least be TREE valid assert(pindexFirstNotTreeValid == nullptr); if (pindex->nStatus.getValidity() >= BlockValidity::TREE) { // TREE valid implies all parents are TREE valid assert(pindexFirstNotTreeValid == nullptr); } if (pindex->nStatus.getValidity() >= BlockValidity::CHAIN) { // CHAIN valid implies all parents are CHAIN valid assert(pindexFirstNotChainValid == nullptr); } if (pindex->nStatus.getValidity() >= BlockValidity::SCRIPTS) { // SCRIPTS valid implies all parents are SCRIPTS valid assert(pindexFirstNotScriptsValid == nullptr); } if (pindexFirstInvalid == nullptr) { // Checks for not-invalid blocks. // The failed mask cannot be set for blocks without invalid parents. assert(!pindex->nStatus.isInvalid()); } if (pindexFirstParked == nullptr) { // Checks for not-invalid blocks. // The failed mask cannot be set for blocks without invalid parents. assert(!pindex->nStatus.isOnParkedChain()); } if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == nullptr) { if (pindexFirstInvalid == nullptr) { // If this block sorts at least as good as the current tip and // is valid and we have all data for its parents, it must be in // setBlockIndexCandidates or be parked. if (pindexFirstMissing == nullptr) { assert(pindex->nStatus.isOnParkedChain() || setBlockIndexCandidates.count(pindex)); } // chainActive.Tip() must also be there even if some data has // been pruned. if (pindex == chainActive.Tip()) { assert(setBlockIndexCandidates.count(pindex)); } // If some parent is missing, then it could be that this block // was in setBlockIndexCandidates but had to be removed because // of the missing data. In this case it must be in // mapBlocksUnlinked -- see test below. } } else { // If this block sorts worse than the current tip or some ancestor's // block has never been seen, it cannot be in // setBlockIndexCandidates. assert(setBlockIndexCandidates.count(pindex) == 0); } // Check whether this block is in mapBlocksUnlinked. std::pair::iterator, std::multimap::iterator> rangeUnlinked = mapBlocksUnlinked.equal_range(pindex->pprev); bool foundInUnlinked = false; while (rangeUnlinked.first != rangeUnlinked.second) { assert(rangeUnlinked.first->first == pindex->pprev); if (rangeUnlinked.first->second == pindex) { foundInUnlinked = true; break; } rangeUnlinked.first++; } if (pindex->pprev && pindex->nStatus.hasData() && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) { // If this block has block data available, some parent was never // received, and has no invalid parents, it must be in // mapBlocksUnlinked. assert(foundInUnlinked); } if (!pindex->nStatus.hasData()) { // Can't be in mapBlocksUnlinked if we don't HAVE_DATA assert(!foundInUnlinked); } if (pindexFirstMissing == nullptr) { // We aren't missing data for any parent -- cannot be in // mapBlocksUnlinked. assert(!foundInUnlinked); } if (pindex->pprev && pindex->nStatus.hasData() && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) { // We HAVE_DATA for this block, have received data for all parents // at some point, but we're currently missing data for some parent. // We must have pruned. assert(fHavePruned); // This block may have entered mapBlocksUnlinked if: // - it has a descendant that at some point had more work than the // tip, and // - we tried switching to that descendant but were missing // data for some intermediate block between chainActive and the // tip. // So if this block is itself better than chainActive.Tip() and it // wasn't in // setBlockIndexCandidates, then it must be in mapBlocksUnlinked. if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && setBlockIndexCandidates.count(pindex) == 0) { if (pindexFirstInvalid == nullptr) { assert(foundInUnlinked); } } } // Perhaps too slow // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // End: actual consistency checks. // Try descending into the first subnode. std::pair::iterator, std::multimap::iterator> range = forward.equal_range(pindex); if (range.first != range.second) { // A subnode was found. pindex = range.first->second; nHeight++; continue; } // This is a leaf node. Move upwards until we reach a node of which we // have not yet visited the last child. while (pindex) { // We are going to either move to a parent or a sibling of pindex. // If pindex was the first with a certain property, unset the // corresponding variable. if (pindex == pindexFirstInvalid) { pindexFirstInvalid = nullptr; } if (pindex == pindexFirstParked) { pindexFirstParked = nullptr; } if (pindex == pindexFirstMissing) { pindexFirstMissing = nullptr; } if (pindex == pindexFirstNeverProcessed) { pindexFirstNeverProcessed = nullptr; } if (pindex == pindexFirstNotTreeValid) { pindexFirstNotTreeValid = nullptr; } if (pindex == pindexFirstNotTransactionsValid) { pindexFirstNotTransactionsValid = nullptr; } if (pindex == pindexFirstNotChainValid) { pindexFirstNotChainValid = nullptr; } if (pindex == pindexFirstNotScriptsValid) { pindexFirstNotScriptsValid = nullptr; } // Find our parent. CBlockIndex *pindexPar = pindex->pprev; // Find which child we just visited. std::pair::iterator, std::multimap::iterator> rangePar = forward.equal_range(pindexPar); while (rangePar.first->second != pindex) { // Our parent must have at least the node we're coming from as // child. assert(rangePar.first != rangePar.second); rangePar.first++; } // Proceed to the next one. rangePar.first++; if (rangePar.first != rangePar.second) { // Move to the sibling. pindex = rangePar.first->second; break; } else { // Move up further. pindex = pindexPar; nHeight--; continue; } } } // Check that we actually traversed the entire map. assert(nNodes == forward.size()); } std::string CBlockFileInfo::ToString() const { return strprintf( "CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, DateTimeStrFormat("%Y-%m-%d", nTimeFirst), DateTimeStrFormat("%Y-%m-%d", nTimeLast)); } CBlockFileInfo *GetBlockFileInfo(size_t n) { return &vinfoBlockFile.at(n); } static const uint64_t MEMPOOL_DUMP_VERSION = 1; bool LoadMempool(const Config &config) { int64_t nExpiryTimeout = gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60; FILE *filestr = fsbridge::fopen(GetDataDir() / "mempool.dat", "rb"); CAutoFile file(filestr, SER_DISK, CLIENT_VERSION); if (file.IsNull()) { LogPrintf( "Failed to open mempool file from disk. Continuing anyway.\n"); return false; } int64_t count = 0; int64_t skipped = 0; int64_t failed = 0; int64_t nNow = GetTime(); try { uint64_t version; file >> version; if (version != MEMPOOL_DUMP_VERSION) { return false; } uint64_t num; file >> num; double prioritydummy = 0; while (num--) { CTransactionRef tx; int64_t nTime; int64_t nFeeDelta; file >> tx; file >> nTime; file >> nFeeDelta; Amount amountdelta = nFeeDelta * SATOSHI; if (amountdelta != Amount::zero()) { g_mempool.PrioritiseTransaction(tx->GetId(), tx->GetId().ToString(), prioritydummy, amountdelta); } CValidationState state; if (nTime + nExpiryTimeout > nNow) { LOCK(cs_main); AcceptToMemoryPoolWithTime(config, g_mempool, state, tx, true, nullptr, nTime); if (state.IsValid()) { ++count; } else { ++failed; } } else { ++skipped; } if (ShutdownRequested()) { return false; } } std::map mapDeltas; file >> mapDeltas; for (const auto &i : mapDeltas) { g_mempool.PrioritiseTransaction(i.first, i.first.ToString(), prioritydummy, i.second); } } catch (const std::exception &e) { LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing " "anyway.\n", e.what()); return false; } LogPrintf("Imported mempool transactions from disk: %i successes, %i " "failed, %i expired\n", count, failed, skipped); return true; } void DumpMempool(void) { int64_t start = GetTimeMicros(); std::map mapDeltas; std::vector vinfo; { LOCK(g_mempool.cs); for (const auto &i : g_mempool.mapDeltas) { mapDeltas[i.first] = i.second.second; } vinfo = g_mempool.infoAll(); } int64_t mid = GetTimeMicros(); try { FILE *filestr = fsbridge::fopen(GetDataDir() / "mempool.dat.new", "wb"); if (!filestr) { return; } CAutoFile file(filestr, SER_DISK, CLIENT_VERSION); uint64_t version = MEMPOOL_DUMP_VERSION; file << version; file << uint64_t(vinfo.size()); for (const auto &i : vinfo) { file << *(i.tx); file << int64_t(i.nTime); file << i.nFeeDelta; mapDeltas.erase(i.tx->GetId()); } file << mapDeltas; FileCommit(file.Get()); file.fclose(); RenameOver(GetDataDir() / "mempool.dat.new", GetDataDir() / "mempool.dat"); int64_t last = GetTimeMicros(); LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid - start) * 0.000001, (last - mid) * 0.000001); } catch (const std::exception &e) { LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what()); } } //! Guess how far we are in the verification process at the given block index double GuessVerificationProgress(const ChainTxData &data, CBlockIndex *pindex) { if (pindex == nullptr) { return 0.0; } int64_t nNow = time(nullptr); double fTxTotal; if (pindex->nChainTx <= data.nTxCount) { fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate; } else { fTxTotal = pindex->nChainTx + (nNow - pindex->GetBlockTime()) * data.dTxRate; } return pindex->nChainTx / fTxTotal; } class CMainCleanup { public: CMainCleanup() {} ~CMainCleanup() { // block headers for (const std::pair &it : mapBlockIndex) { delete it.second; } mapBlockIndex.clear(); } } instance_of_cmaincleanup; diff --git a/src/validationinterface.cpp b/src/validationinterface.cpp index 59eb3511ac..ee711c1929 100644 --- a/src/validationinterface.cpp +++ b/src/validationinterface.cpp @@ -1,162 +1,162 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "validationinterface.h" #include "init.h" #include "scheduler.h" #include "sync.h" #include "util.h" #include #include #include struct MainSignalsInstance { boost::signals2::signal UpdatedBlockTip; boost::signals2::signal TransactionAddedToMempool; boost::signals2::signal &, const CBlockIndex *pindex, const std::vector &)> BlockConnected; boost::signals2::signal &)> BlockDisconnected; boost::signals2::signal SetBestChain; boost::signals2::signal Inventory; boost::signals2::signal Broadcast; boost::signals2::signal BlockChecked; boost::signals2::signal &)> NewPoWValidBlock; // We are not allowed to assume the scheduler only runs in one thread, // but must ensure all callbacks happen in-order, so we end up creating // our own queue here :( SingleThreadedSchedulerClient m_schedulerClient; - MainSignalsInstance(CScheduler *pscheduler) + explicit MainSignalsInstance(CScheduler *pscheduler) : m_schedulerClient(pscheduler) {} }; static CMainSignals g_signals; void CMainSignals::RegisterBackgroundSignalScheduler(CScheduler &scheduler) { assert(!m_internals); m_internals.reset(new MainSignalsInstance(&scheduler)); } void CMainSignals::UnregisterBackgroundSignalScheduler() { m_internals.reset(nullptr); } void CMainSignals::FlushBackgroundCallbacks() { m_internals->m_schedulerClient.EmptyQueue(); } CMainSignals &GetMainSignals() { return g_signals; } void RegisterValidationInterface(CValidationInterface *pwalletIn) { g_signals.m_internals->UpdatedBlockTip.connect(boost::bind( &CValidationInterface::UpdatedBlockTip, pwalletIn, _1, _2, _3)); g_signals.m_internals->TransactionAddedToMempool.connect(boost::bind( &CValidationInterface::TransactionAddedToMempool, pwalletIn, _1)); g_signals.m_internals->BlockConnected.connect(boost::bind( &CValidationInterface::BlockConnected, pwalletIn, _1, _2, _3)); g_signals.m_internals->BlockDisconnected.connect( boost::bind(&CValidationInterface::BlockDisconnected, pwalletIn, _1)); g_signals.m_internals->SetBestChain.connect( boost::bind(&CValidationInterface::SetBestChain, pwalletIn, _1)); g_signals.m_internals->Inventory.connect( boost::bind(&CValidationInterface::Inventory, pwalletIn, _1)); g_signals.m_internals->Broadcast.connect(boost::bind( &CValidationInterface::ResendWalletTransactions, pwalletIn, _1, _2)); g_signals.m_internals->BlockChecked.connect( boost::bind(&CValidationInterface::BlockChecked, pwalletIn, _1, _2)); g_signals.m_internals->NewPoWValidBlock.connect(boost::bind( &CValidationInterface::NewPoWValidBlock, pwalletIn, _1, _2)); } void UnregisterValidationInterface(CValidationInterface *pwalletIn) { g_signals.m_internals->BlockChecked.disconnect( boost::bind(&CValidationInterface::BlockChecked, pwalletIn, _1, _2)); g_signals.m_internals->Broadcast.disconnect(boost::bind( &CValidationInterface::ResendWalletTransactions, pwalletIn, _1, _2)); g_signals.m_internals->Inventory.disconnect( boost::bind(&CValidationInterface::Inventory, pwalletIn, _1)); g_signals.m_internals->SetBestChain.disconnect( boost::bind(&CValidationInterface::SetBestChain, pwalletIn, _1)); g_signals.m_internals->TransactionAddedToMempool.disconnect(boost::bind( &CValidationInterface::TransactionAddedToMempool, pwalletIn, _1)); g_signals.m_internals->BlockConnected.disconnect(boost::bind( &CValidationInterface::BlockConnected, pwalletIn, _1, _2, _3)); g_signals.m_internals->BlockDisconnected.disconnect( boost::bind(&CValidationInterface::BlockDisconnected, pwalletIn, _1)); g_signals.m_internals->UpdatedBlockTip.disconnect(boost::bind( &CValidationInterface::UpdatedBlockTip, pwalletIn, _1, _2, _3)); g_signals.m_internals->NewPoWValidBlock.disconnect(boost::bind( &CValidationInterface::NewPoWValidBlock, pwalletIn, _1, _2)); } void UnregisterAllValidationInterfaces() { g_signals.m_internals->BlockChecked.disconnect_all_slots(); g_signals.m_internals->Broadcast.disconnect_all_slots(); g_signals.m_internals->Inventory.disconnect_all_slots(); g_signals.m_internals->SetBestChain.disconnect_all_slots(); g_signals.m_internals->TransactionAddedToMempool.disconnect_all_slots(); g_signals.m_internals->BlockConnected.disconnect_all_slots(); g_signals.m_internals->BlockDisconnected.disconnect_all_slots(); g_signals.m_internals->UpdatedBlockTip.disconnect_all_slots(); g_signals.m_internals->NewPoWValidBlock.disconnect_all_slots(); } void CMainSignals::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) { m_internals->UpdatedBlockTip(pindexNew, pindexFork, fInitialDownload); } void CMainSignals::TransactionAddedToMempool(const CTransactionRef &ptx) { m_internals->TransactionAddedToMempool(ptx); } void CMainSignals::BlockConnected( const std::shared_ptr &pblock, const CBlockIndex *pindex, const std::vector &vtxConflicted) { m_internals->BlockConnected(pblock, pindex, vtxConflicted); } void CMainSignals::BlockDisconnected( const std::shared_ptr &pblock) { m_internals->BlockDisconnected(pblock); } void CMainSignals::SetBestChain(const CBlockLocator &locator) { m_internals->SetBestChain(locator); } void CMainSignals::Inventory(const uint256 &hash) { m_internals->Inventory(hash); } void CMainSignals::Broadcast(int64_t nBestBlockTime, CConnman *connman) { m_internals->Broadcast(nBestBlockTime, connman); } void CMainSignals::BlockChecked(const CBlock &block, const CValidationState &state) { m_internals->BlockChecked(block, state); } void CMainSignals::NewPoWValidBlock( const CBlockIndex *pindex, const std::shared_ptr &block) { m_internals->NewPoWValidBlock(pindex, block); } diff --git a/src/versionbits.cpp b/src/versionbits.cpp index 504263e0e0..27f82e15f1 100644 --- a/src/versionbits.cpp +++ b/src/versionbits.cpp @@ -1,208 +1,209 @@ // Copyright (c) 2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "versionbits.h" #include "consensus/params.h" const struct BIP9DeploymentInfo VersionBitsDeploymentInfo[Consensus::MAX_VERSION_BITS_DEPLOYMENTS] = { { /*.name =*/"testdummy", /*.gbt_force =*/true, }, }; ThresholdState AbstractThresholdConditionChecker::GetStateFor( const CBlockIndex *pindexPrev, const Consensus::Params ¶ms, ThresholdConditionCache &cache) const { int nPeriod = Period(params); int nThreshold = Threshold(params); int64_t nTimeStart = BeginTime(params); int64_t nTimeTimeout = EndTime(params); // A block's state is always the same as that of the first of its period, so // it is computed based on a pindexPrev whose height equals a multiple of // nPeriod - 1. if (pindexPrev != nullptr) { pindexPrev = pindexPrev->GetAncestor( pindexPrev->nHeight - ((pindexPrev->nHeight + 1) % nPeriod)); } // Walk backwards in steps of nPeriod to find a pindexPrev whose information // is known std::vector vToCompute; while (cache.count(pindexPrev) == 0) { if (pindexPrev == nullptr) { // The genesis block is by definition defined. cache[pindexPrev] = THRESHOLD_DEFINED; break; } if (pindexPrev->GetMedianTimePast() < nTimeStart) { // Optimization: don't recompute down further, as we know every // earlier block will be before the start time cache[pindexPrev] = THRESHOLD_DEFINED; break; } vToCompute.push_back(pindexPrev); pindexPrev = pindexPrev->GetAncestor(pindexPrev->nHeight - nPeriod); } // At this point, cache[pindexPrev] is known assert(cache.count(pindexPrev)); ThresholdState state = cache[pindexPrev]; // Now walk forward and compute the state of descendants of pindexPrev while (!vToCompute.empty()) { ThresholdState stateNext = state; pindexPrev = vToCompute.back(); vToCompute.pop_back(); switch (state) { case THRESHOLD_DEFINED: { if (pindexPrev->GetMedianTimePast() >= nTimeTimeout) { stateNext = THRESHOLD_FAILED; } else if (pindexPrev->GetMedianTimePast() >= nTimeStart) { stateNext = THRESHOLD_STARTED; } break; } case THRESHOLD_STARTED: { if (pindexPrev->GetMedianTimePast() >= nTimeTimeout) { stateNext = THRESHOLD_FAILED; break; } // We need to count const CBlockIndex *pindexCount = pindexPrev; int count = 0; for (int i = 0; i < nPeriod; i++) { if (Condition(pindexCount, params)) { count++; } pindexCount = pindexCount->pprev; } if (count >= nThreshold) { stateNext = THRESHOLD_LOCKED_IN; } break; } case THRESHOLD_LOCKED_IN: { // Always progresses into ACTIVE. stateNext = THRESHOLD_ACTIVE; break; } case THRESHOLD_FAILED: case THRESHOLD_ACTIVE: { // Nothing happens, these are terminal states. break; } } cache[pindexPrev] = state = stateNext; } return state; } int AbstractThresholdConditionChecker::GetStateSinceHeightFor( const CBlockIndex *pindexPrev, const Consensus::Params ¶ms, ThresholdConditionCache &cache) const { const ThresholdState initialState = GetStateFor(pindexPrev, params, cache); // BIP 9 about state DEFINED: "The genesis block is by definition in this // state for each deployment." if (initialState == THRESHOLD_DEFINED) { return 0; } const int nPeriod = Period(params); // A block's state is always the same as that of the first of its period, so // it is computed based on a pindexPrev whose height equals a multiple of // nPeriod - 1. To ease understanding of the following height calculation, // it helps to remember that right now pindexPrev points to the block prior // to the block that we are computing for, thus: if we are computing for the // last block of a period, then pindexPrev points to the second to last // block of the period, and if we are computing for the first block of a // period, then pindexPrev points to the last block of the previous period. // The parent of the genesis block is represented by nullptr. pindexPrev = pindexPrev->GetAncestor(pindexPrev->nHeight - ((pindexPrev->nHeight + 1) % nPeriod)); const CBlockIndex *previousPeriodParent = pindexPrev->GetAncestor(pindexPrev->nHeight - nPeriod); while (previousPeriodParent != nullptr && GetStateFor(previousPeriodParent, params, cache) == initialState) { pindexPrev = previousPeriodParent; previousPeriodParent = pindexPrev->GetAncestor(pindexPrev->nHeight - nPeriod); } // Adjust the result because right now we point to the parent block. return pindexPrev->nHeight + 1; } namespace { /** * Class to implement versionbits logic. */ class VersionBitsConditionChecker : public AbstractThresholdConditionChecker { private: const Consensus::DeploymentPos id; protected: int64_t BeginTime(const Consensus::Params ¶ms) const override { return params.vDeployments[id].nStartTime; } int64_t EndTime(const Consensus::Params ¶ms) const override { return params.vDeployments[id].nTimeout; } int Period(const Consensus::Params ¶ms) const override { return params.nMinerConfirmationWindow; } int Threshold(const Consensus::Params ¶ms) const override { return params.nRuleChangeActivationThreshold; } bool Condition(const CBlockIndex *pindex, const Consensus::Params ¶ms) const override { return (((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) && (pindex->nVersion & Mask(params)) != 0); } public: - VersionBitsConditionChecker(Consensus::DeploymentPos id_) : id(id_) {} + explicit VersionBitsConditionChecker(Consensus::DeploymentPos id_) + : id(id_) {} uint32_t Mask(const Consensus::Params ¶ms) const { return ((uint32_t)1) << params.vDeployments[id].bit; } }; } // namespace ThresholdState VersionBitsState(const CBlockIndex *pindexPrev, const Consensus::Params ¶ms, Consensus::DeploymentPos pos, VersionBitsCache &cache) { return VersionBitsConditionChecker(pos).GetStateFor(pindexPrev, params, cache.caches[pos]); } int VersionBitsStateSinceHeight(const CBlockIndex *pindexPrev, const Consensus::Params ¶ms, Consensus::DeploymentPos pos, VersionBitsCache &cache) { return VersionBitsConditionChecker(pos).GetStateSinceHeightFor( pindexPrev, params, cache.caches[pos]); } uint32_t VersionBitsMask(const Consensus::Params ¶ms, Consensus::DeploymentPos pos) { return VersionBitsConditionChecker(pos).Mask(params); } void VersionBitsCache::Clear() { for (unsigned int d = 0; d < Consensus::MAX_VERSION_BITS_DEPLOYMENTS; d++) { caches[d].clear(); } } diff --git a/src/wallet/test/wallet_test_fixture.h b/src/wallet/test/wallet_test_fixture.h index 6585a9e5fe..cdab76c093 100644 --- a/src/wallet/test/wallet_test_fixture.h +++ b/src/wallet/test/wallet_test_fixture.h @@ -1,18 +1,19 @@ // Copyright (c) 2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_WALLET_TEST_FIXTURE_H #define BITCOIN_WALLET_TEST_FIXTURE_H #include "test/test_bitcoin.h" /** * Testing setup and teardown for wallet. */ struct WalletTestingSetup : public TestingSetup { - WalletTestingSetup(const std::string &chainName = CBaseChainParams::MAIN); + explicit WalletTestingSetup( + const std::string &chainName = CBaseChainParams::MAIN); ~WalletTestingSetup(); }; #endif diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 25032ca80a..a8ae246ce8 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -1,1211 +1,1211 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Copyright (c) 2018 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_WALLET_WALLET_H #define BITCOIN_WALLET_WALLET_H #include "amount.h" #include "script/ismine.h" #include "script/sign.h" #include "streams.h" #include "tinyformat.h" #include "ui_interface.h" #include "utilstrencodings.h" #include "validationinterface.h" #include "wallet/crypter.h" #include "wallet/rpcwallet.h" #include "wallet/walletdb.h" #include #include #include #include #include #include #include #include #include #include typedef CWallet *CWalletRef; extern std::vector vpwallets; /** * Settings */ extern CFeeRate payTxFee; extern unsigned int nTxConfirmTarget; extern bool bSpendZeroConfChange; static const unsigned int DEFAULT_KEYPOOL_SIZE = 1000; //! -paytxfee default static const Amount DEFAULT_TRANSACTION_FEE = Amount::zero(); //! -fallbackfee default static const Amount DEFAULT_FALLBACK_FEE(20000 * SATOSHI); //! minimum recommended increment for BIP 125 replacement txs static const Amount WALLET_INCREMENTAL_RELAY_FEE(5000 * SATOSHI); //! target minimum change amount static const Amount MIN_CHANGE = CENT; //! final minimum change amount after paying for fees static const Amount MIN_FINAL_CHANGE = MIN_CHANGE / 2; //! Default for -spendzeroconfchange static const bool DEFAULT_SPEND_ZEROCONF_CHANGE = true; //! Default for -walletrejectlongchains static const bool DEFAULT_WALLET_REJECT_LONG_CHAINS = false; //! -txconfirmtarget default static const unsigned int DEFAULT_TX_CONFIRM_TARGET = 6; //! Largest (in bytes) free transaction we're willing to create static const unsigned int MAX_FREE_TRANSACTION_CREATE_SIZE = 1000; static const bool DEFAULT_WALLETBROADCAST = true; static const bool DEFAULT_DISABLE_WALLET = false; //! if set, all keys will be derived by using BIP32 static const bool DEFAULT_USE_HD_WALLET = true; extern const char *DEFAULT_WALLET_DAT; static const int64_t TIMESTAMP_MIN = 0; class CBlockIndex; class CChainParams; class CCoinControl; class COutput; class CReserveKey; class CScript; class CScheduler; class CTxMemPool; class CWalletTx; /** (client) version numbers for particular wallet features */ enum WalletFeature { // the earliest version new wallets supports (only useful for getinfo's // clientversion output) FEATURE_BASE = 10500, // wallet encryption FEATURE_WALLETCRYPT = 40000, // compressed public keys FEATURE_COMPRPUBKEY = 60000, // Hierarchical key derivation after BIP32 (HD Wallet) FEATURE_HD = 130000, // Wallet with HD chain split (change outputs will use m/0'/1'/k) FEATURE_HD_SPLIT = 160300, // HD is optional, use FEATURE_COMPRPUBKEY as latest version FEATURE_LATEST = FEATURE_COMPRPUBKEY, }; /** A key pool entry */ class CKeyPool { public: int64_t nTime; CPubKey vchPubKey; bool fInternal; // for change outputs CKeyPool(); CKeyPool(const CPubKey &vchPubKeyIn, bool internalIn); ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { int nVersion = s.GetVersion(); if (!(s.GetType() & SER_GETHASH)) { READWRITE(nVersion); } READWRITE(nTime); READWRITE(vchPubKey); if (ser_action.ForRead()) { try { READWRITE(fInternal); } catch (std::ios_base::failure &) { /** * flag as external address if we can't read the internal * boolean * (this will be the case for any wallet before the HD chain * split version) */ fInternal = false; } } else { READWRITE(fInternal); } } }; /** Address book data */ class CAddressBookData { public: std::string name; std::string purpose; CAddressBookData() : purpose("unknown") {} typedef std::map StringMap; StringMap destdata; }; struct CRecipient { CScript scriptPubKey; Amount nAmount; bool fSubtractFeeFromAmount; }; typedef std::map mapValue_t; static inline void ReadOrderPos(int64_t &nOrderPos, mapValue_t &mapValue) { if (!mapValue.count("n")) { // TODO: calculate elsewhere nOrderPos = -1; return; } nOrderPos = atoi64(mapValue["n"].c_str()); } static inline void WriteOrderPos(const int64_t &nOrderPos, mapValue_t &mapValue) { if (nOrderPos == -1) return; mapValue["n"] = i64tostr(nOrderPos); } struct COutputEntry { CTxDestination destination; Amount amount; int vout; }; /** A transaction with a merkle branch linking it to the block chain. */ class CMerkleTx { private: /** Constant used in hashBlock to indicate tx has been abandoned */ static const uint256 ABANDON_HASH; public: CTransactionRef tx; uint256 hashBlock; /** * An nIndex == -1 means that hashBlock (in nonzero) refers to the earliest * block in the chain we know this or any in-wallet dependency conflicts * with. Older clients interpret nIndex == -1 as unconfirmed for backward * compatibility. */ int nIndex; CMerkleTx() { SetTx(MakeTransactionRef()); Init(); } - CMerkleTx(CTransactionRef arg) { + explicit CMerkleTx(CTransactionRef arg) { SetTx(std::move(arg)); Init(); } /** * Helper conversion operator to allow passing CMerkleTx where CTransaction * is expected. * TODO: adapt callers and remove this operator. */ operator const CTransaction &() const { return *tx; } void Init() { hashBlock = uint256(); nIndex = -1; } void SetTx(CTransactionRef arg) { tx = std::move(arg); } ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { // For compatibility with older versions. std::vector vMerkleBranch; READWRITE(tx); READWRITE(hashBlock); READWRITE(vMerkleBranch); READWRITE(nIndex); } void SetMerkleBranch(const CBlockIndex *pIndex, int posInBlock); /** * Return depth of transaction in blockchain: * <0 : conflicts with a transaction this deep in the blockchain * 0 : in memory pool, waiting to be included in a block * >=1 : this many blocks deep in the main chain */ int GetDepthInMainChain(const CBlockIndex *&pindexRet) const; int GetDepthInMainChain() const { const CBlockIndex *pindexRet; return GetDepthInMainChain(pindexRet); } bool IsInMainChain() const { const CBlockIndex *pindexRet; return GetDepthInMainChain(pindexRet) > 0; } /** * @return number of blocks to maturity for this transaction: * 0 : is not a coinbase transaction, or is a mature coinbase transaction * >0 : is a coinbase transaction which matures in this many blocks */ int GetBlocksToMaturity() const; /** * Pass this transaction to the mempool. Fails if absolute fee exceeds * absurd fee. */ bool AcceptToMemoryPool(const Amount nAbsurdFee, CValidationState &state); bool hashUnset() const { return (hashBlock.IsNull() || hashBlock == ABANDON_HASH); } bool isAbandoned() const { return (hashBlock == ABANDON_HASH); } void setAbandoned() { hashBlock = ABANDON_HASH; } TxId GetId() const { return tx->GetId(); } bool IsCoinBase() const { return tx->IsCoinBase(); } bool IsImmatureCoinBase() const; }; /** * A transaction with a bunch of additional info that only the owner cares * about. It includes any unrecorded transactions needed to link it back to the * block chain. */ class CWalletTx : public CMerkleTx { private: const CWallet *pwallet; public: mapValue_t mapValue; std::vector> vOrderForm; unsigned int fTimeReceivedIsTxTime; //!< time received by this node unsigned int nTimeReceived; /** * Stable timestamp that never changes, and reflects the order a transaction * was added to the wallet. Timestamp is based on the block time for a * transaction added as part of a block, or else the time when the * transaction was received if it wasn't part of a block, with the timestamp * adjusted in both cases so timestamp order matches the order transactions * were added to the wallet. More details can be found in * CWallet::ComputeTimeSmart(). */ unsigned int nTimeSmart; /** * From me flag is set to 1 for transactions that were created by the wallet * on this bitcoin node, and set to 0 for transactions that were created * externally and came in through the network or sendrawtransaction RPC. */ char fFromMe; std::string strFromAccount; //!< position in ordered transaction list int64_t nOrderPos; // memory only mutable bool fDebitCached; mutable bool fCreditCached; mutable bool fImmatureCreditCached; mutable bool fAvailableCreditCached; mutable bool fWatchDebitCached; mutable bool fWatchCreditCached; mutable bool fImmatureWatchCreditCached; mutable bool fAvailableWatchCreditCached; mutable bool fChangeCached; mutable Amount nDebitCached; mutable Amount nCreditCached; mutable Amount nImmatureCreditCached; mutable Amount nAvailableCreditCached; mutable Amount nWatchDebitCached; mutable Amount nWatchCreditCached; mutable Amount nImmatureWatchCreditCached; mutable Amount nAvailableWatchCreditCached; mutable Amount nChangeCached; CWalletTx() { Init(nullptr); } CWalletTx(const CWallet *pwalletIn, CTransactionRef arg) : CMerkleTx(std::move(arg)) { Init(pwalletIn); } void Init(const CWallet *pwalletIn) { pwallet = pwalletIn; mapValue.clear(); vOrderForm.clear(); fTimeReceivedIsTxTime = false; nTimeReceived = 0; nTimeSmart = 0; fFromMe = false; strFromAccount.clear(); fDebitCached = false; fCreditCached = false; fImmatureCreditCached = false; fAvailableCreditCached = false; fWatchDebitCached = false; fWatchCreditCached = false; fImmatureWatchCreditCached = false; fAvailableWatchCreditCached = false; fChangeCached = false; nDebitCached = Amount::zero(); nCreditCached = Amount::zero(); nImmatureCreditCached = Amount::zero(); nAvailableCreditCached = Amount::zero(); nWatchDebitCached = Amount::zero(); nWatchCreditCached = Amount::zero(); nAvailableWatchCreditCached = Amount::zero(); nImmatureWatchCreditCached = Amount::zero(); nChangeCached = Amount::zero(); nOrderPos = -1; } ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { if (ser_action.ForRead()) { Init(nullptr); } char fSpent = false; if (!ser_action.ForRead()) { mapValue["fromaccount"] = strFromAccount; WriteOrderPos(nOrderPos, mapValue); if (nTimeSmart) { mapValue["timesmart"] = strprintf("%u", nTimeSmart); } } READWRITE(*(CMerkleTx *)this); //!< Used to be vtxPrev std::vector vUnused; READWRITE(vUnused); READWRITE(mapValue); READWRITE(vOrderForm); READWRITE(fTimeReceivedIsTxTime); READWRITE(nTimeReceived); READWRITE(fFromMe); READWRITE(fSpent); if (ser_action.ForRead()) { strFromAccount = mapValue["fromaccount"]; ReadOrderPos(nOrderPos, mapValue); nTimeSmart = mapValue.count("timesmart") ? (unsigned int)atoi64(mapValue["timesmart"]) : 0; } mapValue.erase("fromaccount"); mapValue.erase("version"); mapValue.erase("spent"); mapValue.erase("n"); mapValue.erase("timesmart"); } //! make sure balances are recalculated void MarkDirty() { fCreditCached = false; fAvailableCreditCached = false; fImmatureCreditCached = false; fWatchDebitCached = false; fWatchCreditCached = false; fAvailableWatchCreditCached = false; fImmatureWatchCreditCached = false; fDebitCached = false; fChangeCached = false; } void BindWallet(CWallet *pwalletIn) { pwallet = pwalletIn; MarkDirty(); } //! filter decides which addresses will count towards the debit Amount GetDebit(const isminefilter &filter) const; Amount GetCredit(const isminefilter &filter) const; Amount GetImmatureCredit(bool fUseCache = true) const; Amount GetAvailableCredit(bool fUseCache = true) const; Amount GetImmatureWatchOnlyCredit(const bool &fUseCache = true) const; Amount GetAvailableWatchOnlyCredit(const bool &fUseCache = true) const; Amount GetChange() const; void GetAmounts(std::list &listReceived, std::list &listSent, Amount &nFee, std::string &strSentAccount, const isminefilter &filter) const; bool IsFromMe(const isminefilter &filter) const { return GetDebit(filter) > Amount::zero(); } // True if only scriptSigs are different bool IsEquivalentTo(const CWalletTx &tx) const; bool InMempool() const; bool IsTrusted() const; int64_t GetTxTime() const; int GetRequestCount() const; // RelayWalletTransaction may only be called if fBroadcastTransactions! bool RelayWalletTransaction(CConnman *connman); std::set GetConflicts() const; }; class COutput { public: const CWalletTx *tx; int i; int nDepth; /** Whether we have the private keys to spend this output */ bool fSpendable; /** Whether we know how to spend this output, ignoring the lack of keys */ bool fSolvable; /** * Whether this output is considered safe to spend. Unconfirmed transactions * from outside keys are considered unsafe and will not be used to fund new * spending transactions. */ bool fSafe; COutput(const CWalletTx *txIn, int iIn, int nDepthIn, bool fSpendableIn, bool fSolvableIn, bool fSafeIn) { tx = txIn; i = iIn; nDepth = nDepthIn; fSpendable = fSpendableIn; fSolvable = fSolvableIn; fSafe = fSafeIn; } std::string ToString() const; }; /** Private key that includes an expiration date in case it never gets used. */ class CWalletKey { public: CPrivKey vchPrivKey; int64_t nTimeCreated; int64_t nTimeExpires; std::string strComment; //! todo: add something to note what created it (user, getnewaddress, //! change) maybe should have a map property map - CWalletKey(int64_t nExpires = 0); + explicit CWalletKey(int64_t nExpires = 0); ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { int nVersion = s.GetVersion(); if (!(s.GetType() & SER_GETHASH)) READWRITE(nVersion); READWRITE(vchPrivKey); READWRITE(nTimeCreated); READWRITE(nTimeExpires); READWRITE(LIMITED_STRING(strComment, 65536)); } }; /** * Internal transfers. * Database key is acentry. */ class CAccountingEntry { public: std::string strAccount; Amount nCreditDebit; int64_t nTime; std::string strOtherAccount; std::string strComment; mapValue_t mapValue; //!< position in ordered transaction list int64_t nOrderPos; uint64_t nEntryNo; CAccountingEntry() { SetNull(); } void SetNull() { nCreditDebit = Amount::zero(); nTime = 0; strAccount.clear(); strOtherAccount.clear(); strComment.clear(); nOrderPos = -1; nEntryNo = 0; } ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { int nVersion = s.GetVersion(); if (!(s.GetType() & SER_GETHASH)) READWRITE(nVersion); //! Note: strAccount is serialized as part of the key, not here. READWRITE(nCreditDebit); READWRITE(nTime); READWRITE(LIMITED_STRING(strOtherAccount, 65536)); if (!ser_action.ForRead()) { WriteOrderPos(nOrderPos, mapValue); if (!(mapValue.empty() && _ssExtra.empty())) { CDataStream ss(s.GetType(), s.GetVersion()); ss.insert(ss.begin(), '\0'); ss << mapValue; ss.insert(ss.end(), _ssExtra.begin(), _ssExtra.end()); strComment.append(ss.str()); } } READWRITE(LIMITED_STRING(strComment, 65536)); size_t nSepPos = strComment.find("\0", 0, 1); if (ser_action.ForRead()) { mapValue.clear(); if (std::string::npos != nSepPos) { CDataStream ss( std::vector(strComment.begin() + nSepPos + 1, strComment.end()), s.GetType(), s.GetVersion()); ss >> mapValue; _ssExtra = std::vector(ss.begin(), ss.end()); } ReadOrderPos(nOrderPos, mapValue); } if (std::string::npos != nSepPos) strComment.erase(nSepPos); mapValue.erase("n"); } private: std::vector _ssExtra; }; /** * A CWallet is an extension of a keystore, which also maintains a set of * transactions and balances, and provides the ability to create new * transactions. */ class CWallet final : public CCryptoKeyStore, public CValidationInterface { private: static std::atomic fFlushScheduled; std::atomic fAbortRescan; std::atomic fScanningWallet; /** * Select a set of coins such that nValueRet >= nTargetValue and at least * all coins from coinControl are selected; Never select unconfirmed coins * if they are not ours. */ bool SelectCoins( const std::vector &vAvailableCoins, const Amount nTargetValue, std::set> &setCoinsRet, Amount &nValueRet, const CCoinControl *coinControl = nullptr) const; CWalletDB *pwalletdbEncryption; //! the current wallet version: clients below this version are not able to //! load the wallet int nWalletVersion; //! the maximum wallet format version: memory-only variable that specifies //! to what version this wallet may be upgraded int nWalletMaxVersion; int64_t nNextResend; int64_t nLastResend; bool fBroadcastTransactions; /** * Used to keep track of spent outpoints, and detect and report conflicts * (double-spends or mutated transactions where the mutant gets mined). */ typedef std::multimap TxSpends; TxSpends mapTxSpends; void AddToSpends(const COutPoint &outpoint, const TxId &wtxid); void AddToSpends(const TxId &wtxid); /** * Mark a transaction (and its in-wallet descendants) as conflicting with a * particular block. */ void MarkConflicted(const uint256 &hashBlock, const TxId &txid); void SyncMetaData(std::pair); /** * Used by TransactionAddedToMemorypool/BlockConnected/Disconnected. * Should be called with pindexBlock and posInBlock if this is for a * transaction that is included in a block. */ void SyncTransaction(const CTransactionRef &tx, const CBlockIndex *pindex = nullptr, int posInBlock = 0); /* the HD chain data model (external chain counters) */ CHDChain hdChain; /* HD derive new child key (on internal or external chain) */ void DeriveNewChildKey(CWalletDB &walletdb, CKeyMetadata &metadata, CKey &secret, bool internal = false); std::set setInternalKeyPool; std::set setExternalKeyPool; int64_t m_max_keypool_index; std::map m_pool_key_to_index; int64_t nTimeFirstKey; /** * Private version of AddWatchOnly method which does not accept a timestamp, * and which will reset the wallet's nTimeFirstKey value to 1 if the watch * key did not previously have a timestamp associated with it. Because this * is an inherited virtual method, it is accessible despite being marked * private, but it is marked private anyway to encourage use of the other * AddWatchOnly which accepts a timestamp and sets nTimeFirstKey more * intelligently for more efficient rescans. */ bool AddWatchOnly(const CScript &dest) override; std::unique_ptr dbw; public: const CChainParams &chainParams; /* * Main wallet lock. * This lock protects all the fields added by CWallet. */ mutable CCriticalSection cs_wallet; /** * Get database handle used by this wallet. Ideally this function would not * be necessary. */ CWalletDBWrapper &GetDBHandle() { return *dbw; } /** * Get a name for this wallet for logging/debugging purposes. */ std::string GetName() const { if (dbw) { return dbw->GetName(); } else { return "dummy"; } } void LoadKeyPool(int64_t nIndex, const CKeyPool &keypool); // Map from Key ID (for regular keys) or Script ID (for watch-only keys) to // key metadata. std::map mapKeyMetadata; typedef std::map MasterKeyMap; MasterKeyMap mapMasterKeys; unsigned int nMasterKeyMaxID; // Create wallet with dummy database handle - CWallet(const CChainParams &chainParamsIn) + explicit CWallet(const CChainParams &chainParamsIn) : dbw(new CWalletDBWrapper()), chainParams(chainParamsIn) { SetNull(); } // Create wallet with passed-in database handle CWallet(const CChainParams &chainParamsIn, std::unique_ptr dbw_in) : dbw(std::move(dbw_in)), chainParams(chainParamsIn) { SetNull(); } ~CWallet() { delete pwalletdbEncryption; pwalletdbEncryption = nullptr; } void SetNull() { nWalletVersion = FEATURE_BASE; nWalletMaxVersion = FEATURE_BASE; nMasterKeyMaxID = 0; pwalletdbEncryption = nullptr; nOrderPosNext = 0; nAccountingEntryNumber = 0; nNextResend = 0; nLastResend = 0; m_max_keypool_index = 0; nTimeFirstKey = 0; fBroadcastTransactions = false; fAbortRescan = false; fScanningWallet = false; } std::map mapWallet; std::list laccentries; typedef std::pair TxPair; typedef std::multimap TxItems; TxItems wtxOrdered; int64_t nOrderPosNext; uint64_t nAccountingEntryNumber; std::map mapRequestCount; std::map mapAddressBook; std::set setLockedCoins; const CWalletTx *GetWalletTx(const TxId &txid) const; //! check whether we are allowed to upgrade (or already support) to the //! named feature bool CanSupportFeature(enum WalletFeature wf) const { AssertLockHeld(cs_wallet); return nWalletMaxVersion >= wf; } /** * populate vCoins with vector of available COutputs. */ void AvailableCoins(std::vector &vCoins, bool fOnlySafe = true, const CCoinControl *coinControl = nullptr, const Amount nMinimumAmount = SATOSHI, const Amount nMaximumAmount = MAX_MONEY, const Amount nMinimumSumAmount = MAX_MONEY, const uint64_t &nMaximumCount = 0, const int &nMinDepth = 0, const int &nMaxDepth = 9999999) const; /** * Shuffle and select coins until nTargetValue is reached while avoiding * small change; This method is stochastic for some inputs and upon * completion the coin set and corresponding actual target value is * assembled. */ bool SelectCoinsMinConf( const Amount nTargetValue, int nConfMine, int nConfTheirs, uint64_t nMaxAncestors, std::vector vCoins, std::set> &setCoinsRet, Amount &nValueRet) const; bool IsSpent(const TxId &txid, uint32_t n) const; bool IsLockedCoin(const TxId &txid, uint32_t n) const; void LockCoin(const COutPoint &output); void UnlockCoin(const COutPoint &output); void UnlockAllCoins(); void ListLockedCoins(std::vector &vOutpts); /* * Rescan abort properties */ void AbortRescan() { fAbortRescan = true; } bool IsAbortingRescan() { return fAbortRescan; } bool IsScanning() { return fScanningWallet; } /** * keystore implementation * Generate a new key */ CPubKey GenerateNewKey(CWalletDB &walletdb, bool internal = false); //! Adds a key to the store, and saves it to disk. bool AddKeyPubKey(const CKey &key, const CPubKey &pubkey) override; bool AddKeyPubKeyWithDB(CWalletDB &walletdb, const CKey &key, const CPubKey &pubkey); //! Adds a key to the store, without saving it to disk (used by LoadWallet) bool LoadKey(const CKey &key, const CPubKey &pubkey) { return CCryptoKeyStore::AddKeyPubKey(key, pubkey); } //! Load metadata (used by LoadWallet) bool LoadKeyMetadata(const CTxDestination &pubKey, const CKeyMetadata &metadata); bool LoadMinVersion(int nVersion) { AssertLockHeld(cs_wallet); nWalletVersion = nVersion; nWalletMaxVersion = std::max(nWalletMaxVersion, nVersion); return true; } void UpdateTimeFirstKey(int64_t nCreateTime); //! Adds an encrypted key to the store, and saves it to disk. bool AddCryptedKey(const CPubKey &vchPubKey, const std::vector &vchCryptedSecret) override; //! Adds an encrypted key to the store, without saving it to disk (used by //! LoadWallet) bool LoadCryptedKey(const CPubKey &vchPubKey, const std::vector &vchCryptedSecret); bool AddCScript(const CScript &redeemScript) override; bool LoadCScript(const CScript &redeemScript); //! Adds a destination data tuple to the store, and saves it to disk bool AddDestData(const CTxDestination &dest, const std::string &key, const std::string &value); //! Erases a destination data tuple in the store and on disk bool EraseDestData(const CTxDestination &dest, const std::string &key); //! Adds a destination data tuple to the store, without saving it to disk bool LoadDestData(const CTxDestination &dest, const std::string &key, const std::string &value); //! Look up a destination data tuple in the store, return true if found //! false otherwise bool GetDestData(const CTxDestination &dest, const std::string &key, std::string *value) const; //! Adds a watch-only address to the store, and saves it to disk. bool AddWatchOnly(const CScript &dest, int64_t nCreateTime); bool RemoveWatchOnly(const CScript &dest) override; //! Adds a watch-only address to the store, without saving it to disk (used //! by LoadWallet) bool LoadWatchOnly(const CScript &dest); //! Holds a timestamp at which point the wallet is scheduled (externally) to //! be relocked. Caller must arrange for actual relocking to occur via //! Lock(). int64_t nRelockTime; bool Unlock(const SecureString &strWalletPassphrase); bool ChangeWalletPassphrase(const SecureString &strOldWalletPassphrase, const SecureString &strNewWalletPassphrase); bool EncryptWallet(const SecureString &strWalletPassphrase); void GetKeyBirthTimes(std::map &mapKeyBirth) const; unsigned int ComputeTimeSmart(const CWalletTx &wtx) const; /** * Increment the next transaction order id * @return next transaction order id */ int64_t IncOrderPosNext(CWalletDB *pwalletdb = nullptr); DBErrors ReorderTransactions(); bool AccountMove(std::string strFrom, std::string strTo, const Amount nAmount, std::string strComment = ""); bool GetLabelAddress(CPubKey &pubKey, const std::string &label, bool bForceNew = false); void MarkDirty(); bool AddToWallet(const CWalletTx &wtxIn, bool fFlushOnClose = true); bool LoadToWallet(const CWalletTx &wtxIn); void TransactionAddedToMempool(const CTransactionRef &tx) override; void BlockConnected(const std::shared_ptr &pblock, const CBlockIndex *pindex, const std::vector &vtxConflicted) override; void BlockDisconnected(const std::shared_ptr &pblock) override; bool AddToWalletIfInvolvingMe(const CTransactionRef &tx, const CBlockIndex *pIndex, int posInBlock, bool fUpdate); int64_t RescanFromTime(int64_t startTime, bool update); CBlockIndex *ScanForWalletTransactions(CBlockIndex *pindexStart, CBlockIndex *pindexStop, bool fUpdate = false); void ReacceptWalletTransactions(); void ResendWalletTransactions(int64_t nBestBlockTime, CConnman *connman) override; // ResendWalletTransactionsBefore may only be called if // fBroadcastTransactions! std::vector ResendWalletTransactionsBefore(int64_t nTime, CConnman *connman); Amount GetBalance() const; Amount GetUnconfirmedBalance() const; Amount GetImmatureBalance() const; Amount GetWatchOnlyBalance() const; Amount GetUnconfirmedWatchOnlyBalance() const; Amount GetImmatureWatchOnlyBalance() const; Amount GetLegacyBalance(const isminefilter &filter, int minDepth, const std::string *account) const; /** * Insert additional inputs into the transaction by calling * CreateTransaction(); */ bool FundTransaction(CMutableTransaction &tx, Amount &nFeeRet, bool overrideEstimatedFeeRate, const CFeeRate &specificFeeRate, int &nChangePosInOut, std::string &strFailReason, bool includeWatching, bool lockUnspents, const std::set &setSubtractFeeFromOutputs, bool keepReserveKey = true, const CTxDestination &destChange = CNoDestination()); /** * Create a new transaction paying the recipients with a set of coins * selected by SelectCoins(); Also create the change output, when needed * @note passing nChangePosInOut as -1 will result in setting a random * position */ bool CreateTransaction(const std::vector &vecSend, CWalletTx &wtxNew, CReserveKey &reservekey, Amount &nFeeRet, int &nChangePosInOut, std::string &strFailReason, const CCoinControl *coinControl = nullptr, bool sign = true); bool CommitTransaction(CWalletTx &wtxNew, CReserveKey &reservekey, CConnman *connman, CValidationState &state); void ListAccountCreditDebit(const std::string &strAccount, std::list &entries); bool AddAccountingEntry(const CAccountingEntry &); bool AddAccountingEntry(const CAccountingEntry &, CWalletDB *pwalletdb); template bool DummySignTx(CMutableTransaction &txNew, const ContainerType &coins); static CFeeRate fallbackFee; bool NewKeyPool(); size_t KeypoolCountExternalKeys(); bool TopUpKeyPool(unsigned int kpSize = 0); void ReserveKeyFromKeyPool(int64_t &nIndex, CKeyPool &keypool, bool fRequestedInternal); void KeepKey(int64_t nIndex); void ReturnKey(int64_t nIndex, bool fInternal, const CPubKey &pubkey); bool GetKeyFromPool(CPubKey &key, bool internal = false); int64_t GetOldestKeyPoolTime(); /** * Marks all keys in the keypool up to and including reserve_key as used. */ void MarkReserveKeysAsUsed(int64_t keypool_id); const std::map &GetAllReserveKeys() const { return m_pool_key_to_index; } /** Does the wallet have at least min_keys in the keypool? */ bool HasUnusedKeys(size_t min_keys) const; std::set> GetAddressGroupings(); std::map GetAddressBalances(); std::set GetLabelAddresses(const std::string &label) const; isminetype IsMine(const CTxIn &txin) const; /** * Returns amount of debit if the input matches the filter, otherwise * returns 0 */ Amount GetDebit(const CTxIn &txin, const isminefilter &filter) const; isminetype IsMine(const CTxOut &txout) const; Amount GetCredit(const CTxOut &txout, const isminefilter &filter) const; bool IsChange(const CTxOut &txout) const; Amount GetChange(const CTxOut &txout) const; bool IsMine(const CTransaction &tx) const; /** should probably be renamed to IsRelevantToMe */ bool IsFromMe(const CTransaction &tx) const; Amount GetDebit(const CTransaction &tx, const isminefilter &filter) const; /** Returns whether all of the inputs match the filter */ bool IsAllFromMe(const CTransaction &tx, const isminefilter &filter) const; Amount GetCredit(const CTransaction &tx, const isminefilter &filter) const; Amount GetChange(const CTransaction &tx) const; void SetBestChain(const CBlockLocator &loc) override; DBErrors LoadWallet(bool &fFirstRunRet); DBErrors ZapWalletTx(std::vector &vWtx); DBErrors ZapSelectTx(std::vector &txIdsIn, std::vector &txIdsOut); bool SetAddressBook(const CTxDestination &address, const std::string &strName, const std::string &purpose); bool DelAddressBook(const CTxDestination &address); const std::string &GetLabelName(const CScript &scriptPubKey) const; void Inventory(const uint256 &hash) override { LOCK(cs_wallet); std::map::iterator mi = mapRequestCount.find(hash); if (mi != mapRequestCount.end()) { (*mi).second++; } } void GetScriptForMining(std::shared_ptr &script); unsigned int GetKeyPoolSize() { // set{Ex,In}ternalKeyPool AssertLockHeld(cs_wallet); return setInternalKeyPool.size() + setExternalKeyPool.size(); } //! signify that a particular wallet feature is now used. this may change //! nWalletVersion and nWalletMaxVersion if those are lower bool SetMinVersion(enum WalletFeature, CWalletDB *pwalletdbIn = nullptr, bool fExplicit = false); //! change which version we're allowed to upgrade to (note that this does //! not immediately imply upgrading to that format) bool SetMaxVersion(int nVersion); //! get the current wallet format (the oldest client version guaranteed to //! understand this wallet) int GetVersion() { LOCK(cs_wallet); return nWalletVersion; } //! Get wallet transactions that conflict with given transaction (spend same //! outputs) std::set GetConflicts(const TxId &txid) const; //! Check if a given transaction has any of its outputs spent by another //! transaction in the wallet bool HasWalletSpend(const TxId &txid) const; //! Flush wallet (bitdb flush) void Flush(bool shutdown = false); /** * Address book entry changed. * @note called with lock cs_wallet held. */ boost::signals2::signal NotifyAddressBookChanged; /** * Wallet transaction added, removed or updated. * @note called with lock cs_wallet held. */ boost::signals2::signal NotifyTransactionChanged; /** Show progress e.g. for rescan */ boost::signals2::signal ShowProgress; /** Watch-only address added */ boost::signals2::signal NotifyWatchonlyChanged; /** Inquire whether this wallet broadcasts transactions. */ bool GetBroadcastTransactions() const { return fBroadcastTransactions; } /** Set whether this wallet broadcasts transactions. */ void SetBroadcastTransactions(bool broadcast) { fBroadcastTransactions = broadcast; } /** * Mark a transaction (and it in-wallet descendants) as abandoned so its * inputs may be respent. */ bool AbandonTransaction(const TxId &txid); /** * Initializes the wallet, returns a new CWallet instance or a null pointer * in case of an error. */ static CWallet *CreateWalletFromFile(const CChainParams &chainParams, const std::string walletFile); /** * Wallet post-init setup * Gives the wallet a chance to register repetitive tasks and complete * post-init tasks */ void postInitProcess(CScheduler &scheduler); bool BackupWallet(const std::string &strDest); /* Set the HD chain model (chain child index counters) */ bool SetHDChain(const CHDChain &chain, bool memonly); const CHDChain &GetHDChain() { return hdChain; } /* Returns true if HD is enabled */ bool IsHDEnabled(); /* Generates a new HD master key (will not be activated) */ CPubKey GenerateNewHDMasterKey(); /** * Set the current HD master key (will reset the chain child index counters) * If possibleOldChain is provided, the parameters from the old chain * (version) will be preserved. */ bool SetHDMasterKey(const CPubKey &key, CHDChain *possibleOldChain = nullptr); }; /** A key allocated from the key pool. */ class CReserveKey final : public CReserveScript { protected: CWallet *pwallet; int64_t nIndex; CPubKey vchPubKey; bool fInternal; public: - CReserveKey(CWallet *pwalletIn) { + explicit CReserveKey(CWallet *pwalletIn) { nIndex = -1; pwallet = pwalletIn; fInternal = false; } ~CReserveKey() { ReturnKey(); } void ReturnKey(); bool GetReservedKey(CPubKey &pubkey, bool internal = false); void KeepKey(); void KeepScript() override { KeepKey(); } }; /** * Account information. * Stored in wallet with key "acc"+string account name. */ class CAccount { public: CPubKey vchPubKey; CAccount() { SetNull(); } void SetNull() { vchPubKey = CPubKey(); } ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { int nVersion = s.GetVersion(); if (!(s.GetType() & SER_GETHASH)) { READWRITE(nVersion); } READWRITE(vchPubKey); } }; // Helper for producing a bunch of max-sized low-S signatures (eg 72 bytes) // ContainerType is meant to hold pair, and be iterable so // that each entry corresponds to each vIn, in order. template bool CWallet::DummySignTx(CMutableTransaction &txNew, const ContainerType &coins) { // Fill in dummy signatures for fee calculation. int nIn = 0; for (const auto &coin : coins) { const CScript &scriptPubKey = coin.first->tx->vout[coin.second].scriptPubKey; SignatureData sigdata; if (!ProduceSignature(DummySignatureCreator(this), scriptPubKey, sigdata)) { return false; } else { UpdateTransaction(txNew, nIn, sigdata); } nIn++; } return true; } #endif // BITCOIN_WALLET_WALLET_H diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h index 41b8ad7219..cef1cb4dd2 100644 --- a/src/wallet/walletdb.h +++ b/src/wallet/walletdb.h @@ -1,277 +1,277 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Copyright (c) 2017 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_WALLET_WALLETDB_H #define BITCOIN_WALLET_WALLETDB_H #include "amount.h" #include "key.h" #include "primitives/transaction.h" #include "script/standard.h" // for CTxDestination #include "wallet/db.h" #include #include #include #include #include /** * Overview of wallet database classes: * * - CDBEnv is an environment in which the database exists (has no analog in * dbwrapper.h) * - CWalletDBWrapper represents a wallet database (similar to CDBWrapper in * dbwrapper.h) * - CDB is a low-level database transaction (similar to CDBBatch in * dbwrapper.h) * - CWalletDB is a modifier object for the wallet, and encapsulates a database * transaction as well as methods to act on the database (no analog in * dbwrapper.h) * * The latter two are named confusingly, in contrast to what the names CDB * and CWalletDB suggest they are transient transaction objects and don't * represent the database itself. */ static const bool DEFAULT_FLUSHWALLET = true; class CAccount; class CAccountingEntry; struct CBlockLocator; class CKeyPool; class CMasterKey; class CScript; class CWallet; class CWalletTx; class uint160; class uint256; /** Error statuses for the wallet database */ enum DBErrors { DB_LOAD_OK, DB_CORRUPT, DB_NONCRITICAL_ERROR, DB_TOO_NEW, DB_LOAD_FAIL, DB_NEED_REWRITE }; /* simple HD chain data model */ class CHDChain { public: uint32_t nExternalChainCounter; uint32_t nInternalChainCounter; //!< master key hash160 CKeyID masterKeyID; static const int VERSION_HD_BASE = 1; static const int VERSION_HD_CHAIN_SPLIT = 2; static const int CURRENT_VERSION = VERSION_HD_CHAIN_SPLIT; int nVersion; CHDChain() { SetNull(); } ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(this->nVersion); READWRITE(nExternalChainCounter); READWRITE(masterKeyID); if (this->nVersion >= VERSION_HD_CHAIN_SPLIT) { READWRITE(nInternalChainCounter); } } void SetNull() { nVersion = CHDChain::CURRENT_VERSION; nExternalChainCounter = 0; nInternalChainCounter = 0; masterKeyID.SetNull(); } }; class CKeyMetadata { public: static const int VERSION_BASIC = 1; static const int VERSION_WITH_HDDATA = 10; static const int CURRENT_VERSION = VERSION_WITH_HDDATA; int nVersion; // 0 means unknown. int64_t nCreateTime; // optional HD/bip32 keypath. std::string hdKeypath; // Id of the HD masterkey used to derive this key. CKeyID hdMasterKeyID; CKeyMetadata() { SetNull(); } - CKeyMetadata(int64_t nCreateTime_) { + explicit CKeyMetadata(int64_t nCreateTime_) { SetNull(); nCreateTime = nCreateTime_; } ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(this->nVersion); READWRITE(nCreateTime); if (this->nVersion >= VERSION_WITH_HDDATA) { READWRITE(hdKeypath); READWRITE(hdMasterKeyID); } } void SetNull() { nVersion = CKeyMetadata::CURRENT_VERSION; nCreateTime = 0; hdKeypath.clear(); hdMasterKeyID.SetNull(); } }; /** * Access to the wallet database. * This should really be named CWalletDBBatch, as it represents a single * transaction at the database. It will be committed when the object goes out of * scope. * Optionally (on by default) it will flush to disk as well. */ class CWalletDB { private: template bool WriteIC(const K &key, const T &value, bool fOverwrite = true) { if (!batch.Write(key, value, fOverwrite)) { return false; } m_dbw.IncrementUpdateCounter(); return true; } template bool EraseIC(const K &key) { if (!batch.Erase(key)) { return false; } m_dbw.IncrementUpdateCounter(); return true; } public: - CWalletDB(CWalletDBWrapper &dbw, const char *pszMode = "r+", - bool _fFlushOnClose = true) + explicit CWalletDB(CWalletDBWrapper &dbw, const char *pszMode = "r+", + bool _fFlushOnClose = true) : batch(dbw, pszMode, _fFlushOnClose), m_dbw(dbw) {} bool WriteName(const CTxDestination &address, const std::string &strName); bool EraseName(const CTxDestination &address); bool WritePurpose(const CTxDestination &address, const std::string &purpose); bool ErasePurpose(const CTxDestination &address); bool WriteTx(const CWalletTx &wtx); bool EraseTx(uint256 hash); bool WriteKey(const CPubKey &vchPubKey, const CPrivKey &vchPrivKey, const CKeyMetadata &keyMeta); bool WriteCryptedKey(const CPubKey &vchPubKey, const std::vector &vchCryptedSecret, const CKeyMetadata &keyMeta); bool WriteMasterKey(unsigned int nID, const CMasterKey &kMasterKey); bool WriteCScript(const uint160 &hash, const CScript &redeemScript); bool WriteWatchOnly(const CScript &script, const CKeyMetadata &keymeta); bool EraseWatchOnly(const CScript &script); bool WriteBestBlock(const CBlockLocator &locator); bool ReadBestBlock(CBlockLocator &locator); bool WriteOrderPosNext(int64_t nOrderPosNext); bool ReadPool(int64_t nPool, CKeyPool &keypool); bool WritePool(int64_t nPool, const CKeyPool &keypool); bool ErasePool(int64_t nPool); bool WriteMinVersion(int nVersion); /// This writes directly to the database, and will not update the CWallet's /// cached accounting entries! /// Use wallet.AddAccountingEntry instead, to write *and* update its caches. bool WriteAccountingEntry(const uint64_t nAccEntryNum, const CAccountingEntry &acentry); bool ReadAccount(const std::string &strAccount, CAccount &account); bool WriteAccount(const std::string &strAccount, const CAccount &account); /// Write destination data key,value tuple to database. bool WriteDestData(const CTxDestination &address, const std::string &key, const std::string &value); /// Erase destination data tuple from wallet database. bool EraseDestData(const CTxDestination &address, const std::string &key); Amount GetAccountCreditDebit(const std::string &strAccount); void ListAccountCreditDebit(const std::string &strAccount, std::list &acentries); DBErrors LoadWallet(CWallet *pwallet); DBErrors FindWalletTx(std::vector &txIds, std::vector &vWtx); DBErrors ZapWalletTx(std::vector &vWtx); DBErrors ZapSelectTx(std::vector &txIdsIn, std::vector &txIdsOut); /* Try to (very carefully!) recover wallet database (with a possible key * type filter) */ static bool Recover(const std::string &filename, void *callbackDataIn, bool (*recoverKVcallback)(void *callbackData, CDataStream ssKey, CDataStream ssValue), std::string &out_backup_filename); /* Recover convenience-function to bypass the key filter callback, called * when verify fails, recovers everything */ static bool Recover(const std::string &filename, std::string &out_backup_filename); /* Recover filter (used as callback), will only let keys (cryptographical * keys) as KV/key-type pass through */ static bool RecoverKeysOnlyFilter(void *callbackData, CDataStream ssKey, CDataStream ssValue); /* Function to determine if a certain KV/key-type is a key (cryptographical * key) type */ static bool IsKeyType(const std::string &strType); /* verifies the database environment */ static bool VerifyEnvironment(const std::string &walletFile, const fs::path &dataDir, std::string &errorStr); /* verifies the database file */ static bool VerifyDatabaseFile(const std::string &walletFile, const fs::path &dataDir, std::string &warningStr, std::string &errorStr); //! write the hdchain model (external chain child index counter) bool WriteHDChain(const CHDChain &chain); //! Begin a new transaction bool TxnBegin(); //! Commit current transaction bool TxnCommit(); //! Abort current transaction bool TxnAbort(); //! Read wallet version bool ReadVersion(int &nVersion); //! Write wallet version bool WriteVersion(int nVersion); private: CDB batch; CWalletDBWrapper &m_dbw; CWalletDB(const CWalletDB &); void operator=(const CWalletDB &); }; //! Compacts BDB state so that wallet.dat is self-contained (if there are //! changes) void MaybeCompactWalletDB(); #endif // BITCOIN_WALLET_WALLETDB_H