diff --git a/src/chain.h b/src/chain.h index df3f7ad64..644b0d985 100644 --- a/src/chain.h +++ b/src/chain.h @@ -1,426 +1,426 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_CHAIN_H #define BITCOIN_CHAIN_H #include #include #include #include #include #include #include #include #include #include #include /** * Maximum amount of time that a block timestamp is allowed to exceed the * current network-adjusted time before the block will be accepted. */ static constexpr int64_t MAX_FUTURE_BLOCK_TIME = 2 * 60 * 60; /** * Timestamp window used as a grace period by code that compares external * timestamps (such as timestamps passed to RPCs, or wallet key creation times) * to block timestamps. This should be set at least as high as * MAX_FUTURE_BLOCK_TIME. */ static constexpr int64_t TIMESTAMP_WINDOW = MAX_FUTURE_BLOCK_TIME; /** * Maximum gap between node time and block time used * for the "Catching up..." mode in GUI. * * Ref: https://github.com/bitcoin/bitcoin/pull/1026 */ static constexpr int64_t MAX_BLOCK_TIME_GAP = 90 * 60; /** * The block chain is a tree shaped structure starting with the genesis block at * the root, with each block potentially having multiple candidates to be the * next block. A blockindex may have multiple pprev pointing to it, but at most * one of them can be part of the currently active branch. */ class CBlockIndex { public: //! pointer to the hash of the block, if any. Memory is owned by this //! CBlockIndex const uint256 *phashBlock; //! pointer to the index of the predecessor of this block CBlockIndex *pprev; //! pointer to the index of some further predecessor of this block CBlockIndex *pskip; //! height of the entry in the chain. The genesis block has height 0 int nHeight; //! Which # file this block is stored in (blk?????.dat) int nFile; //! Byte offset within blk?????.dat where this block's data is stored unsigned int nDataPos; //! Byte offset within rev?????.dat where this block's undo data is stored unsigned int nUndoPos; //! (memory only) Total amount of work (expected number of hashes) in the //! chain up to and including this block arith_uint256 nChainWork; //! Number of transactions in this block. //! Note: in a potential headers-first mode, this number cannot be relied //! upon unsigned int nTx; //! (memory only) Number of transactions in the chain up to and including //! this block. //! This value will be non-zero only if and only if transactions for this //! block and all its parents are available. Change to 64-bit type when //! necessary; won't happen before 2030 unsigned int nChainTx; //! Verification status of this block. See enum BlockStatus BlockStatus nStatus; //! block header int32_t nVersion; uint256 hashMerkleRoot; uint32_t nTime; uint32_t nBits; uint32_t nNonce; //! (memory only) Sequential id assigned to distinguish order in which //! blocks are received. int32_t nSequenceId; //! (memory only) block header metadata uint64_t nTimeReceived; //! (memory only) Maximum nTime in the chain up to and including this block. unsigned int nTimeMax; void SetNull() { phashBlock = nullptr; pprev = nullptr; pskip = nullptr; nHeight = 0; nFile = 0; nDataPos = 0; nUndoPos = 0; nChainWork = arith_uint256(); nTx = 0; nChainTx = 0; nStatus = BlockStatus(); nSequenceId = 0; nTimeMax = 0; nVersion = 0; hashMerkleRoot = uint256(); nTime = 0; nTimeReceived = 0; nBits = 0; nNonce = 0; } CBlockIndex() { SetNull(); } explicit CBlockIndex(const CBlockHeader &block) { SetNull(); nVersion = block.nVersion; hashMerkleRoot = block.hashMerkleRoot; nTime = block.nTime; nTimeReceived = 0; nBits = block.nBits; nNonce = block.nNonce; } FlatFilePos GetBlockPos() const { FlatFilePos ret; if (nStatus.hasData()) { ret.nFile = nFile; ret.nPos = nDataPos; } return ret; } FlatFilePos GetUndoPos() const { FlatFilePos ret; if (nStatus.hasUndo()) { ret.nFile = nFile; ret.nPos = nUndoPos; } return ret; } CBlockHeader GetBlockHeader() const { CBlockHeader block; block.nVersion = nVersion; if (pprev) { block.hashPrevBlock = pprev->GetBlockHash(); } block.hashMerkleRoot = hashMerkleRoot; block.nTime = nTime; block.nBits = nBits; block.nNonce = nNonce; return block; } uint256 GetBlockHash() const { return *phashBlock; } int64_t GetBlockTime() const { return int64_t(nTime); } int64_t GetBlockTimeMax() const { return int64_t(nTimeMax); } int64_t GetHeaderReceivedTime() const { return nTimeReceived; } int64_t GetReceivedTimeDiff() const { return GetHeaderReceivedTime() - GetBlockTime(); } static constexpr int nMedianTimeSpan = 11; int64_t GetMedianTimePast() const { int64_t pmedian[nMedianTimeSpan]; int64_t *pbegin = &pmedian[nMedianTimeSpan]; int64_t *pend = &pmedian[nMedianTimeSpan]; const CBlockIndex *pindex = this; for (int i = 0; i < nMedianTimeSpan && pindex; i++, pindex = pindex->pprev) { *(--pbegin) = pindex->GetBlockTime(); } std::sort(pbegin, pend); return pbegin[(pend - pbegin) / 2]; } std::string ToString() const { return strprintf( "CBlockIndex(pprev=%p, nHeight=%d, merkle=%s, hashBlock=%s)", pprev, nHeight, hashMerkleRoot.ToString(), GetBlockHash().ToString()); } //! Check whether this block index entry is valid up to the passed validity //! level. bool IsValid(enum BlockValidity nUpTo = BlockValidity::TRANSACTIONS) const { return nStatus.isValid(nUpTo); } //! Raise the validity level of this block index entry. //! Returns true if the validity was changed. bool RaiseValidity(enum BlockValidity nUpTo) { // Only validity flags allowed. if (nStatus.isInvalid()) { return false; } if (nStatus.getValidity() >= nUpTo) { return false; } nStatus = nStatus.withValidity(nUpTo); return true; } //! Build the skiplist pointer for this entry. void BuildSkip(); //! Efficiently find an ancestor of this block. CBlockIndex *GetAncestor(int height); const CBlockIndex *GetAncestor(int height) const; }; /** * Maintain a map of CBlockIndex for all known headers. */ struct BlockHasher { size_t operator()(const uint256 &hash) const { return hash.GetCheapHash(); } }; typedef std::unordered_map BlockMap; extern BlockMap &mapBlockIndex; extern CCriticalSection cs_main; inline CBlockIndex *LookupBlockIndex(const uint256 &hash) { AssertLockHeld(cs_main); BlockMap::const_iterator it = mapBlockIndex.find(hash); return it == mapBlockIndex.end() ? nullptr : it->second; } arith_uint256 GetBlockProof(const CBlockIndex &block); /** * Return the time it would take to redo the work difference between from and * to, assuming the current hashrate corresponds to the difficulty at tip, in * seconds. */ int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params &); /** * Find the forking point between two chain tips. */ const CBlockIndex *LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb); /** * Check if two block index are on the same fork. */ bool AreOnTheSameFork(const CBlockIndex *pa, const CBlockIndex *pb); /** Used to marshal pointers into hashes for db storage. */ class CDiskBlockIndex : public CBlockIndex { public: uint256 hashPrev; CDiskBlockIndex() { hashPrev = uint256(); } explicit CDiskBlockIndex(const CBlockIndex *pindex) : CBlockIndex(*pindex) { hashPrev = (pprev ? pprev->GetBlockHash() : uint256()); } ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { int _nVersion = s.GetVersion(); if (!(s.GetType() & SER_GETHASH)) { - READWRITE(VARINT(_nVersion)); + READWRITE(VARINT(_nVersion, VarIntMode::NONNEGATIVE_SIGNED)); } - READWRITE(VARINT(nHeight)); + READWRITE(VARINT(nHeight, VarIntMode::NONNEGATIVE_SIGNED)); READWRITE(nStatus); READWRITE(VARINT(nTx)); if (nStatus.hasData() || nStatus.hasUndo()) { - READWRITE(VARINT(nFile)); + READWRITE(VARINT(nFile, VarIntMode::NONNEGATIVE_SIGNED)); } if (nStatus.hasData()) { READWRITE(VARINT(nDataPos)); } if (nStatus.hasUndo()) { READWRITE(VARINT(nUndoPos)); } // block header READWRITE(this->nVersion); READWRITE(hashPrev); READWRITE(hashMerkleRoot); READWRITE(nTime); READWRITE(nBits); READWRITE(nNonce); } uint256 GetBlockHash() const { CBlockHeader block; block.nVersion = nVersion; block.hashPrevBlock = hashPrev; block.hashMerkleRoot = hashMerkleRoot; block.nTime = nTime; block.nBits = nBits; block.nNonce = nNonce; return block.GetHash(); } std::string ToString() const { std::string str = "CDiskBlockIndex("; str += CBlockIndex::ToString(); str += strprintf("\n hashBlock=%s, hashPrev=%s)", GetBlockHash().ToString(), hashPrev.ToString()); return str; } }; /** * An in-memory indexed chain of blocks. */ class CChain { private: std::vector vChain; public: /** * Returns the index entry for the genesis block of this chain, or nullptr * if none. */ CBlockIndex *Genesis() const { return vChain.size() > 0 ? vChain[0] : nullptr; } /** * Returns the index entry for the tip of this chain, or nullptr if none. */ CBlockIndex *Tip() const { return vChain.size() > 0 ? vChain[vChain.size() - 1] : nullptr; } /** * Returns the index entry at a particular height in this chain, or nullptr * if no such height exists. */ CBlockIndex *operator[](int nHeight) const { if (nHeight < 0 || nHeight >= (int)vChain.size()) { return nullptr; } return vChain[nHeight]; } /** Compare two chains efficiently. */ friend bool operator==(const CChain &a, const CChain &b) { return a.vChain.size() == b.vChain.size() && a.vChain[a.vChain.size() - 1] == b.vChain[b.vChain.size() - 1]; } /** Efficiently check whether a block is present in this chain. */ bool Contains(const CBlockIndex *pindex) const { return (*this)[pindex->nHeight] == pindex; } /** * Find the successor of a block in this chain, or nullptr if the given * index is not found or is the tip. */ CBlockIndex *Next(const CBlockIndex *pindex) const { if (!Contains(pindex)) { return nullptr; } return (*this)[pindex->nHeight + 1]; } /** * Return the maximal height in the chain. Is equal to chain.Tip() ? * chain.Tip()->nHeight : -1. */ int Height() const { return vChain.size() - 1; } /** Set/initialize a chain with a given tip. */ void SetTip(CBlockIndex *pindex); /** * Return a CBlockLocator that refers to a block in this chain (by default * the tip). */ CBlockLocator GetLocator(const CBlockIndex *pindex = nullptr) const; /** * Find the last common block between this chain and a block index entry. */ const CBlockIndex *FindFork(const CBlockIndex *pindex) const; /** * Find the earliest block with timestamp equal or greater than the given. */ CBlockIndex *FindEarliestAtLeast(int64_t nTime) const; }; #endif // BITCOIN_CHAIN_H diff --git a/src/flatfile.h b/src/flatfile.h index 01c463d59..2537a4365 100644 --- a/src/flatfile.h +++ b/src/flatfile.h @@ -1,100 +1,100 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2019 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_FLATFILE_H #define BITCOIN_FLATFILE_H #include #include #include struct FlatFilePos { int nFile; unsigned int nPos; ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { - READWRITE(VARINT(nFile)); + READWRITE(VARINT(nFile, VarIntMode::NONNEGATIVE_SIGNED)); READWRITE(VARINT(nPos)); } FlatFilePos() : nFile(-1), nPos(0) {} FlatFilePos(int nFileIn, unsigned int nPosIn) : nFile(nFileIn), nPos(nPosIn) {} friend bool operator==(const FlatFilePos &a, const FlatFilePos &b) { return (a.nFile == b.nFile && a.nPos == b.nPos); } friend bool operator!=(const FlatFilePos &a, const FlatFilePos &b) { return !(a == b); } void SetNull() { nFile = -1; nPos = 0; } bool IsNull() const { return (nFile == -1); } std::string ToString() const; }; /** * FlatFileSeq represents a sequence of numbered files storing raw data. This * class facilitates access to and efficient management of these files. */ class FlatFileSeq { private: const fs::path m_dir; const char *const m_prefix; const size_t m_chunk_size; public: /** * Constructor * * @param dir The base directory that all files live in. * @param prefix A short prefix given to all file names. * @param chunk_size Disk space is pre-allocated in multiples of this * amount. */ FlatFileSeq(fs::path dir, const char *prefix, size_t chunk_size); /** Get the name of the file at the given position. */ fs::path FileName(const FlatFilePos &pos) const; /** Open a handle to the file at the given position. */ FILE *Open(const FlatFilePos &pos, bool read_only = false); /** * Allocate additional space in a file after the given starting position. * The amount allocated will be the minimum multiple of the sequence chunk * size greater than add_size. * * @param[in] pos The starting position that bytes will be allocated after. * @param[in] add_size The minimum number of bytes to be allocated. * @param[out] out_of_space Whether the allocation failed due to * insufficient disk space. * @return The number of bytes successfully allocated. */ size_t Allocate(const FlatFilePos &pos, size_t add_size, bool &out_of_space); /** * Commit a file to disk, and optionally truncate off extra pre-allocated * bytes if final. * * @param[in] pos The first unwritten position in the file to be flushed. * @param[in] finalize True if no more data will be written to this file. * @return true on success, false on failure. */ bool Flush(const FlatFilePos &pos, bool finalize = false); }; #endif // BITCOIN_FLATFILE_H diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 20ebb5a55..518cba55d 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1,2230 +1,2231 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // boost::thread::interrupt #include #include #include struct CUpdatedBlock { uint256 hash; int height; }; static Mutex cs_blockchange; static std::condition_variable cond_blockchange; static CUpdatedBlock latestblock; /** * Calculate the difficulty for a given block index. */ double GetDifficulty(const CBlockIndex *blockindex) { assert(blockindex); int nShift = (blockindex->nBits >> 24) & 0xff; double dDiff = double(0x0000ffff) / double(blockindex->nBits & 0x00ffffff); while (nShift < 29) { dDiff *= 256.0; nShift++; } while (nShift > 29) { dDiff /= 256.0; nShift--; } return dDiff; } static int ComputeNextBlockAndDepth(const CBlockIndex *tip, const CBlockIndex *blockindex, const CBlockIndex *&next) { next = tip->GetAncestor(blockindex->nHeight + 1); if (next && next->pprev == blockindex) { return tip->nHeight - blockindex->nHeight + 1; } next = nullptr; return blockindex == tip ? 1 : -1; } UniValue blockheaderToJSON(const CBlockIndex *tip, const CBlockIndex *blockindex) { UniValue result(UniValue::VOBJ); result.pushKV("hash", blockindex->GetBlockHash().GetHex()); const CBlockIndex *pnext; int confirmations = ComputeNextBlockAndDepth(tip, blockindex, pnext); result.pushKV("confirmations", confirmations); result.pushKV("height", blockindex->nHeight); result.pushKV("version", blockindex->nVersion); result.pushKV("versionHex", strprintf("%08x", blockindex->nVersion)); result.pushKV("merkleroot", blockindex->hashMerkleRoot.GetHex()); result.pushKV("time", int64_t(blockindex->nTime)); result.pushKV("mediantime", int64_t(blockindex->GetMedianTimePast())); result.pushKV("nonce", uint64_t(blockindex->nNonce)); result.pushKV("bits", strprintf("%08x", blockindex->nBits)); result.pushKV("difficulty", GetDifficulty(blockindex)); result.pushKV("chainwork", blockindex->nChainWork.GetHex()); if (blockindex->pprev) { result.pushKV("previousblockhash", blockindex->pprev->GetBlockHash().GetHex()); } if (pnext) { result.pushKV("nextblockhash", pnext->GetBlockHash().GetHex()); } return result; } UniValue blockToJSON(const CBlock &block, const CBlockIndex *tip, const CBlockIndex *blockindex, bool txDetails) { UniValue result(UniValue::VOBJ); result.pushKV("hash", blockindex->GetBlockHash().GetHex()); const CBlockIndex *pnext; int confirmations = ComputeNextBlockAndDepth(tip, blockindex, pnext); result.pushKV("confirmations", confirmations); result.pushKV( "size", (int)::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION)); result.pushKV("height", blockindex->nHeight); result.pushKV("version", block.nVersion); result.pushKV("versionHex", strprintf("%08x", block.nVersion)); result.pushKV("merkleroot", block.hashMerkleRoot.GetHex()); UniValue txs(UniValue::VARR); for (const auto &tx : block.vtx) { if (txDetails) { UniValue objTx(UniValue::VOBJ); TxToUniv(*tx, uint256(), objTx, true, RPCSerializationFlags()); txs.push_back(objTx); } else { txs.push_back(tx->GetId().GetHex()); } } result.pushKV("tx", txs); result.pushKV("time", block.GetBlockTime()); result.pushKV("mediantime", int64_t(blockindex->GetMedianTimePast())); result.pushKV("nonce", uint64_t(block.nNonce)); result.pushKV("bits", strprintf("%08x", block.nBits)); result.pushKV("difficulty", GetDifficulty(blockindex)); result.pushKV("chainwork", blockindex->nChainWork.GetHex()); if (blockindex->pprev) { result.pushKV("previousblockhash", blockindex->pprev->GetBlockHash().GetHex()); } if (pnext) { result.pushKV("nextblockhash", pnext->GetBlockHash().GetHex()); } return result; } static UniValue getblockcount(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getblockcount\n" "\nReturns the number of blocks in the longest blockchain.\n" "\nResult:\n" "n (numeric) The current block count\n" "\nExamples:\n" + HelpExampleCli("getblockcount", "") + HelpExampleRpc("getblockcount", "")); } LOCK(cs_main); return chainActive.Height(); } static UniValue getbestblockhash(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getbestblockhash\n" "\nReturns the hash of the best (tip) block in the " "longest blockchain.\n" "\nResult:\n" "\"hex\" (string) the block hash hex encoded\n" "\nExamples:\n" + HelpExampleCli("getbestblockhash", "") + HelpExampleRpc("getbestblockhash", "")); } LOCK(cs_main); return chainActive.Tip()->GetBlockHash().GetHex(); } UniValue getfinalizedblockhash(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getfinalizedblockhash\n" "\nReturns the hash of the currently finalized block\n" "\nResult:\n" "\"hex\" (string) the block hash hex encoded\n"); } LOCK(cs_main); const CBlockIndex *blockIndexFinalized = GetFinalizedBlock(); if (blockIndexFinalized) { return blockIndexFinalized->GetBlockHash().GetHex(); } return UniValue(UniValue::VSTR); } void RPCNotifyBlockChange(bool ibd, const CBlockIndex *pindex) { if (pindex) { std::lock_guard lock(cs_blockchange); latestblock.hash = pindex->GetBlockHash(); latestblock.height = pindex->nHeight; } cond_blockchange.notify_all(); } static UniValue waitfornewblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 1) { throw std::runtime_error( "waitfornewblock (timeout)\n" "\nWaits for a specific new block and returns " "useful info about it.\n" "\nReturns the current block on timeout or exit.\n" "\nArguments:\n" "1. timeout (int, optional, default=0) Time in " "milliseconds to wait for a response. 0 indicates " "no timeout.\n" "\nResult:\n" "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" " \"height\" : { (int) Block height\n" "}\n" "\nExamples:\n" + HelpExampleCli("waitfornewblock", "1000") + HelpExampleRpc("waitfornewblock", "1000")); } int timeout = 0; if (!request.params[0].isNull()) { timeout = request.params[0].get_int(); } CUpdatedBlock block; { WAIT_LOCK(cs_blockchange, lock); block = latestblock; if (timeout) { cond_blockchange.wait_for( lock, std::chrono::milliseconds(timeout), [&block] { return latestblock.height != block.height || latestblock.hash != block.hash || !IsRPCRunning(); }); } else { cond_blockchange.wait(lock, [&block] { return latestblock.height != block.height || latestblock.hash != block.hash || !IsRPCRunning(); }); } block = latestblock; } UniValue ret(UniValue::VOBJ); ret.pushKV("hash", block.hash.GetHex()); ret.pushKV("height", block.height); return ret; } static UniValue waitforblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "waitforblock (timeout)\n" "\nWaits for a specific new block and returns useful info about " "it.\n" "\nReturns the current block on timeout or exit.\n" "\nArguments:\n" "1. \"blockhash\" (required, string) Block hash to wait for.\n" "2. timeout (int, optional, default=0) Time in milliseconds " "to wait for a response. 0 indicates no timeout.\n" "\nResult:\n" "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" " \"height\" : { (int) Block height\n" "}\n" "\nExamples:\n" + HelpExampleCli("waitforblock", "\"0000000000079f8ef3d2c688c244eb7a4" "570b24c9ed7b4a8c619eb02596f8862\", " "1000") + HelpExampleRpc("waitforblock", "\"0000000000079f8ef3d2c688c244eb7a4" "570b24c9ed7b4a8c619eb02596f8862\", " "1000")); } int timeout = 0; uint256 hash = uint256S(request.params[0].get_str()); if (!request.params[1].isNull()) { timeout = request.params[1].get_int(); } CUpdatedBlock block; { WAIT_LOCK(cs_blockchange, lock); if (timeout) { cond_blockchange.wait_for( lock, std::chrono::milliseconds(timeout), [&hash] { return latestblock.hash == hash || !IsRPCRunning(); }); } else { cond_blockchange.wait(lock, [&hash] { return latestblock.hash == hash || !IsRPCRunning(); }); } block = latestblock; } UniValue ret(UniValue::VOBJ); ret.pushKV("hash", block.hash.GetHex()); ret.pushKV("height", block.height); return ret; } static UniValue waitforblockheight(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "waitforblockheight (timeout)\n" "\nWaits for (at least) block height and returns the height and " "hash\n" "of the current tip.\n" "\nReturns the current block on timeout or exit.\n" "\nArguments:\n" "1. height (required, int) Block height to wait for (int)\n" "2. timeout (int, optional, default=0) Time in milliseconds to " "wait for a response. 0 indicates no timeout.\n" "\nResult:\n" "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" " \"height\" : { (int) Block height\n" "}\n" "\nExamples:\n" + HelpExampleCli("waitforblockheight", "\"100\", 1000") + HelpExampleRpc("waitforblockheight", "\"100\", 1000")); } int timeout = 0; int height = request.params[0].get_int(); if (!request.params[1].isNull()) { timeout = request.params[1].get_int(); } CUpdatedBlock block; { WAIT_LOCK(cs_blockchange, lock); if (timeout) { cond_blockchange.wait_for( lock, std::chrono::milliseconds(timeout), [&height] { return latestblock.height >= height || !IsRPCRunning(); }); } else { cond_blockchange.wait(lock, [&height] { return latestblock.height >= height || !IsRPCRunning(); }); } block = latestblock; } UniValue ret(UniValue::VOBJ); ret.pushKV("hash", block.hash.GetHex()); ret.pushKV("height", block.height); return ret; } static UniValue syncwithvalidationinterfacequeue(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 0) { throw std::runtime_error( "syncwithvalidationinterfacequeue\n" "\nWaits for the validation interface queue to catch up on " "everything that was there when we entered this function.\n" "\nExamples:\n" + HelpExampleCli("syncwithvalidationinterfacequeue", "") + HelpExampleRpc("syncwithvalidationinterfacequeue", "")); } SyncWithValidationInterfaceQueue(); return NullUniValue; } static UniValue getdifficulty(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error("getdifficulty\n" "\nReturns the proof-of-work difficulty as a " "multiple of the minimum difficulty.\n" "\nResult:\n" "n.nnn (numeric) the proof-of-work " "difficulty as a multiple of the minimum " "difficulty.\n" "\nExamples:\n" + HelpExampleCli("getdifficulty", "") + HelpExampleRpc("getdifficulty", "")); } LOCK(cs_main); return GetDifficulty(chainActive.Tip()); } static std::string EntryDescriptionString() { return " \"size\" : n, (numeric) transaction size.\n" " \"fee\" : n, (numeric) transaction fee in " + CURRENCY_UNIT + "\n" " \"modifiedfee\" : n, (numeric) transaction fee with fee " "deltas used for mining priority\n" " \"time\" : n, (numeric) local time transaction " "entered pool in seconds since 1 Jan 1970 GMT\n" " \"height\" : n, (numeric) block height when " "transaction entered pool\n" " \"startingpriority\" : n, (numeric) DEPRECATED. Priority when " "transaction entered pool\n" " \"currentpriority\" : n, (numeric) DEPRECATED. Transaction " "priority now\n" " \"descendantcount\" : n, (numeric) number of in-mempool " "descendant transactions (including this one)\n" " \"descendantsize\" : n, (numeric) virtual transaction size " "of in-mempool descendants (including this one)\n" " \"descendantfees\" : n, (numeric) modified fees (see above) " "of in-mempool descendants (including this one)\n" " \"ancestorcount\" : n, (numeric) number of in-mempool " "ancestor transactions (including this one)\n" " \"ancestorsize\" : n, (numeric) virtual transaction size " "of in-mempool ancestors (including this one)\n" " \"ancestorfees\" : n, (numeric) modified fees (see above) " "of in-mempool ancestors (including this one)\n" " \"depends\" : [ (array) unconfirmed transactions " "used as inputs for this transaction\n" " \"transactionid\", (string) parent transaction id\n" " ... ]\n" " \"spentby\" : [ (array) unconfirmed transactions " "spending outputs from this transaction\n" " \"transactionid\", (string) child transaction id\n" " ... ]\n"; } static void entryToJSON(UniValue &info, const CTxMemPoolEntry &e) EXCLUSIVE_LOCKS_REQUIRED(g_mempool.cs) { AssertLockHeld(g_mempool.cs); info.pushKV("size", (int)e.GetTxSize()); info.pushKV("fee", ValueFromAmount(e.GetFee())); info.pushKV("modifiedfee", ValueFromAmount(e.GetModifiedFee())); info.pushKV("time", e.GetTime()); info.pushKV("height", (int)e.GetHeight()); info.pushKV("startingpriority", e.GetPriority(e.GetHeight())); info.pushKV("currentpriority", e.GetPriority(chainActive.Height())); info.pushKV("descendantcount", e.GetCountWithDescendants()); info.pushKV("descendantsize", e.GetSizeWithDescendants()); info.pushKV("descendantfees", e.GetModFeesWithDescendants() / SATOSHI); info.pushKV("ancestorcount", e.GetCountWithAncestors()); info.pushKV("ancestorsize", e.GetSizeWithAncestors()); info.pushKV("ancestorfees", e.GetModFeesWithAncestors() / SATOSHI); const CTransaction &tx = e.GetTx(); std::set setDepends; for (const CTxIn &txin : tx.vin) { if (g_mempool.exists(txin.prevout.GetTxId())) { setDepends.insert(txin.prevout.GetTxId().ToString()); } } UniValue depends(UniValue::VARR); for (const std::string &dep : setDepends) { depends.push_back(dep); } info.pushKV("depends", depends); UniValue spent(UniValue::VARR); const CTxMemPool::txiter &it = g_mempool.mapTx.find(tx.GetId()); const CTxMemPool::setEntries &setChildren = g_mempool.GetMemPoolChildren(it); for (const CTxMemPool::txiter &childiter : setChildren) { spent.push_back(childiter->GetTx().GetId().ToString()); } info.pushKV("spentby", spent); } UniValue mempoolToJSON(bool fVerbose) { if (fVerbose) { LOCK(g_mempool.cs); UniValue o(UniValue::VOBJ); for (const CTxMemPoolEntry &e : g_mempool.mapTx) { const uint256 &txid = e.GetTx().GetId(); UniValue info(UniValue::VOBJ); entryToJSON(info, e); o.pushKV(txid.ToString(), info); } return o; } else { std::vector vtxids; g_mempool.queryHashes(vtxids); UniValue a(UniValue::VARR); for (const uint256 &txid : vtxids) { a.push_back(txid.ToString()); } return a; } } static UniValue getrawmempool(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 1) { throw std::runtime_error( "getrawmempool ( verbose )\n" "\nReturns all transaction ids in memory pool as a json array of " "string transaction ids.\n" "\nHint: use getmempoolentry to fetch a specific transaction from " "the mempool.\n" "\nArguments:\n" "1. verbose (boolean, optional, default=false) True for a json " "object, false for array of transaction ids\n" "\nResult: (for verbose = false):\n" "[ (json array of string)\n" " \"transactionid\" (string) The transaction id\n" " ,...\n" "]\n" "\nResult: (for verbose = true):\n" "{ (json object)\n" " \"transactionid\" : { (json object)\n" + EntryDescriptionString() + " }, ...\n" "}\n" "\nExamples:\n" + HelpExampleCli("getrawmempool", "true") + HelpExampleRpc("getrawmempool", "true")); } bool fVerbose = false; if (!request.params[0].isNull()) { fVerbose = request.params[0].get_bool(); } return mempoolToJSON(fVerbose); } static UniValue getmempoolancestors(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "getmempoolancestors txid (verbose)\n" "\nIf txid is in the mempool, returns all in-mempool ancestors.\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id " "(must be in mempool)\n" "2. verbose (boolean, optional, default=false) " "True for a json object, false for array of transaction ids\n" "\nResult (for verbose=false):\n" "[ (json array of strings)\n" " \"transactionid\" (string) The transaction id of an " "in-mempool ancestor transaction\n" " ,...\n" "]\n" "\nResult (for verbose=true):\n" "{ (json object)\n" " \"transactionid\" : { (json object)\n" + EntryDescriptionString() + " }, ...\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmempoolancestors", "\"mytxid\"") + HelpExampleRpc("getmempoolancestors", "\"mytxid\"")); } bool fVerbose = false; if (!request.params[1].isNull()) { fVerbose = request.params[1].get_bool(); } uint256 hash = ParseHashV(request.params[0], "parameter 1"); LOCK(g_mempool.cs); CTxMemPool::txiter it = g_mempool.mapTx.find(hash); if (it == g_mempool.mapTx.end()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool"); } CTxMemPool::setEntries setAncestors; uint64_t noLimit = std::numeric_limits::max(); std::string dummy; g_mempool.CalculateMemPoolAncestors(*it, setAncestors, noLimit, noLimit, noLimit, noLimit, dummy, false); if (!fVerbose) { UniValue o(UniValue::VARR); for (CTxMemPool::txiter ancestorIt : setAncestors) { o.push_back(ancestorIt->GetTx().GetId().ToString()); } return o; } else { UniValue o(UniValue::VOBJ); for (CTxMemPool::txiter ancestorIt : setAncestors) { const CTxMemPoolEntry &e = *ancestorIt; const uint256 &_hash = e.GetTx().GetId(); UniValue info(UniValue::VOBJ); entryToJSON(info, e); o.pushKV(_hash.ToString(), info); } return o; } } static UniValue getmempooldescendants(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "getmempooldescendants txid (verbose)\n" "\nIf txid is in the mempool, returns all in-mempool descendants.\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id " "(must be in mempool)\n" "2. verbose (boolean, optional, default=false) " "True for a json object, false for array of transaction ids\n" "\nResult (for verbose=false):\n" "[ (json array of strings)\n" " \"transactionid\" (string) The transaction id of an " "in-mempool descendant transaction\n" " ,...\n" "]\n" "\nResult (for verbose=true):\n" "{ (json object)\n" " \"transactionid\" : { (json object)\n" + EntryDescriptionString() + " }, ...\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmempooldescendants", "\"mytxid\"") + HelpExampleRpc("getmempooldescendants", "\"mytxid\"")); } bool fVerbose = false; if (!request.params[1].isNull()) { fVerbose = request.params[1].get_bool(); } uint256 hash = ParseHashV(request.params[0], "parameter 1"); LOCK(g_mempool.cs); CTxMemPool::txiter it = g_mempool.mapTx.find(hash); if (it == g_mempool.mapTx.end()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool"); } CTxMemPool::setEntries setDescendants; g_mempool.CalculateDescendants(it, setDescendants); // CTxMemPool::CalculateDescendants will include the given tx setDescendants.erase(it); if (!fVerbose) { UniValue o(UniValue::VARR); for (CTxMemPool::txiter descendantIt : setDescendants) { o.push_back(descendantIt->GetTx().GetId().ToString()); } return o; } else { UniValue o(UniValue::VOBJ); for (CTxMemPool::txiter descendantIt : setDescendants) { const CTxMemPoolEntry &e = *descendantIt; const uint256 &_hash = e.GetTx().GetId(); UniValue info(UniValue::VOBJ); entryToJSON(info, e); o.pushKV(_hash.ToString(), info); } return o; } } static UniValue getmempoolentry(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "getmempoolentry txid\n" "\nReturns mempool data for given transaction\n" "\nArguments:\n" "1. \"txid\" (string, required) " "The transaction id (must be in mempool)\n" "\nResult:\n" "{ (json object)\n" + EntryDescriptionString() + "}\n" "\nExamples:\n" + HelpExampleCli("getmempoolentry", "\"mytxid\"") + HelpExampleRpc("getmempoolentry", "\"mytxid\"")); } uint256 hash = ParseHashV(request.params[0], "parameter 1"); LOCK(g_mempool.cs); CTxMemPool::txiter it = g_mempool.mapTx.find(hash); if (it == g_mempool.mapTx.end()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool"); } const CTxMemPoolEntry &e = *it; UniValue info(UniValue::VOBJ); entryToJSON(info, e); return info; } static UniValue getblockhash(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "getblockhash height\n" "\nReturns hash of block in best-block-chain at height provided.\n" "\nArguments:\n" "1. height (numeric, required) The height index\n" "\nResult:\n" "\"hash\" (string) The block hash\n" "\nExamples:\n" + HelpExampleCli("getblockhash", "1000") + HelpExampleRpc("getblockhash", "1000")); } LOCK(cs_main); int nHeight = request.params[0].get_int(); if (nHeight < 0 || nHeight > chainActive.Height()) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Block height out of range"); } CBlockIndex *pblockindex = chainActive[nHeight]; return pblockindex->GetBlockHash().GetHex(); } static UniValue getblockheader(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "getblockheader \"hash\" ( verbose )\n" "\nIf verbose is false, returns a string that is serialized, " "hex-encoded data for blockheader 'hash'.\n" "If verbose is true, returns an Object with information about " "blockheader .\n" "\nArguments:\n" "1. \"hash\" (string, required) The block hash\n" "2. verbose (boolean, optional, default=true) true for a " "json object, false for the hex encoded data\n" "\nResult (for verbose = true):\n" "{\n" " \"hash\" : \"hash\", (string) the block hash (same as " "provided)\n" " \"confirmations\" : n, (numeric) The number of confirmations, " "or -1 if the block is not on the main chain\n" " \"height\" : n, (numeric) The block height or index\n" " \"version\" : n, (numeric) The block version\n" " \"versionHex\" : \"00000000\", (string) The block version " "formatted in hexadecimal\n" " \"merkleroot\" : \"xxxx\", (string) The merkle root\n" " \"time\" : ttt, (numeric) The block time in seconds " "since epoch (Jan 1 1970 GMT)\n" " \"mediantime\" : ttt, (numeric) The median block time in " "seconds since epoch (Jan 1 1970 GMT)\n" " \"nonce\" : n, (numeric) The nonce\n" " \"bits\" : \"1d00ffff\", (string) The bits\n" " \"difficulty\" : x.xxx, (numeric) The difficulty\n" " \"chainwork\" : \"0000...1f3\" (string) Expected number of " "hashes required to produce the current chain (in hex)\n" " \"previousblockhash\" : \"hash\", (string) The hash of the " "previous block\n" " \"nextblockhash\" : \"hash\", (string) The hash of the " "next block\n" "}\n" "\nResult (for verbose=false):\n" "\"data\" (string) A string that is serialized, " "hex-encoded data for block 'hash'.\n" "\nExamples:\n" + HelpExampleCli("getblockheader", "\"00000000c937983704a73af28acdec3" "7b049d214adbda81d7e2a3dd146f6ed09" "\"") + HelpExampleRpc("getblockheader", "\"00000000c937983704a73af28acdec3" "7b049d214adbda81d7e2a3dd146f6ed09" "\"")); } LOCK(cs_main); std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); bool fVerbose = true; if (!request.params[1].isNull()) { fVerbose = request.params[1].get_bool(); } const CBlockIndex *pblockindex = LookupBlockIndex(hash); if (!pblockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } if (!fVerbose) { CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION); ssBlock << pblockindex->GetBlockHeader(); std::string strHex = HexStr(ssBlock.begin(), ssBlock.end()); return strHex; } return blockheaderToJSON(chainActive.Tip(), pblockindex); } static CBlock GetBlockChecked(const Config &config, const CBlockIndex *pblockindex) { CBlock block; if (fHavePruned && !pblockindex->nStatus.hasData() && pblockindex->nTx > 0) { throw JSONRPCError(RPC_MISC_ERROR, "Block not available (pruned data)"); } if (!ReadBlockFromDisk(block, pblockindex, config.GetChainParams().GetConsensus())) { // Block not found on disk. This could be because we have the block // header in our index but don't have the block (for example if a // non-whitelisted node sends us an unrequested long chain of valid // blocks, we add the headers to our index, but don't accept the block). throw JSONRPCError(RPC_MISC_ERROR, "Block not found on disk"); } return block; } static UniValue getblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "getblock \"blockhash\" ( verbosity )\n" "\nIf verbosity is 0 or false, returns a string that is " "serialized, hex-encoded data for block 'hash'.\n" "If verbosity is 1 or true, returns an Object with information " "about block .\n" "If verbosity is 2, returns an Object with information about block " " and information about each transaction.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) The block hash\n" "2. verbosity (numeric, optional, default=1) 0 for " "hex-encoded data, 1 for a json object, and 2 for json object with " "transaction data\n" "\nResult (for verbosity = 0):\n" "\"data\" (string) A string that is serialized, " "hex-encoded data for block 'hash'.\n" "\nResult (for verbosity = 1):\n" "{\n" " \"hash\" : \"hash\", (string) The block hash (same as " "provided)\n" " \"confirmations\" : n, (numeric) The number of confirmations, " "or -1 if the block is not on the main chain\n" " \"size\" : n, (numeric) The block size\n" " \"height\" : n, (numeric) The block height or index\n" " \"version\" : n, (numeric) The block version\n" " \"versionHex\" : \"00000000\", (string) The block version " "formatted in hexadecimal\n" " \"merkleroot\" : \"xxxx\", (string) The merkle root\n" " \"tx\" : [ (array of string) The transaction ids\n" " \"transactionid\" (string) The transaction id\n" " ,...\n" " ],\n" " \"time\" : ttt, (numeric) The block time in seconds " "since epoch (Jan 1 1970 GMT)\n" " \"mediantime\" : ttt, (numeric) The median block time in " "seconds since epoch (Jan 1 1970 GMT)\n" " \"nonce\" : n, (numeric) The nonce\n" " \"bits\" : \"1d00ffff\", (string) The bits\n" " \"difficulty\" : x.xxx, (numeric) The difficulty\n" " \"chainwork\" : \"xxxx\", (string) Expected number of hashes " "required to produce the chain up to this block (in hex)\n" " \"previousblockhash\" : \"hash\", (string) The hash of the " "previous block\n" " \"nextblockhash\" : \"hash\" (string) The hash of the " "next block\n" "}\n" "\nResult (for verbosity = 2):\n" "{\n" " ..., Same output as verbosity = 1\n" " \"tx\" : [ (array of Objects) The transactions in " "the format of the getrawtransaction RPC; different from verbosity " "= 1 \"tx\" result\n" " ...\n" " ],\n" " ... Same output as verbosity = 1\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblock", "\"00000000c937983704a73af28acdec37b049d" "214adbda81d7e2a3dd146f6ed09\"") + HelpExampleRpc("getblock", "\"00000000c937983704a73af28acdec37b049d" "214adbda81d7e2a3dd146f6ed09\"")); } LOCK(cs_main); std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); int verbosity = 1; if (!request.params[1].isNull()) { if (request.params[1].isNum()) { verbosity = request.params[1].get_int(); } else { verbosity = request.params[1].get_bool() ? 1 : 0; } } const CBlockIndex *pblockindex = LookupBlockIndex(hash); if (!pblockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } const CBlock block = GetBlockChecked(config, pblockindex); if (verbosity <= 0) { CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ssBlock << block; std::string strHex = HexStr(ssBlock.begin(), ssBlock.end()); return strHex; } return blockToJSON(block, chainActive.Tip(), pblockindex, verbosity >= 2); } struct CCoinsStats { int nHeight; uint256 hashBlock; uint64_t nTransactions; uint64_t nTransactionOutputs; uint64_t nBogoSize; uint256 hashSerialized; uint64_t nDiskSize; Amount nTotalAmount; CCoinsStats() : nHeight(0), nTransactions(0), nTransactionOutputs(0), nBogoSize(0), nDiskSize(0), nTotalAmount() {} }; static void ApplyStats(CCoinsStats &stats, CHashWriter &ss, const uint256 &hash, const std::map &outputs) { assert(!outputs.empty()); ss << hash; ss << VARINT(outputs.begin()->second.GetHeight() * 2 + outputs.begin()->second.IsCoinBase()); stats.nTransactions++; for (const auto &output : outputs) { ss << VARINT(output.first + 1); ss << output.second.GetTxOut().scriptPubKey; - ss << VARINT(output.second.GetTxOut().nValue / SATOSHI); + ss << VARINT(output.second.GetTxOut().nValue / SATOSHI, + VarIntMode::NONNEGATIVE_SIGNED); stats.nTransactionOutputs++; stats.nTotalAmount += output.second.GetTxOut().nValue; stats.nBogoSize += 32 /* txid */ + 4 /* vout index */ + 4 /* height + coinbase */ + 8 /* amount */ + 2 /* scriptPubKey len */ + output.second.GetTxOut().scriptPubKey.size() /* scriptPubKey */; } - ss << VARINT(0); + ss << VARINT(0u); } //! Calculate statistics about the unspent transaction output set static bool GetUTXOStats(CCoinsView *view, CCoinsStats &stats) { std::unique_ptr pcursor(view->Cursor()); assert(pcursor); CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION); stats.hashBlock = pcursor->GetBestBlock(); { LOCK(cs_main); stats.nHeight = LookupBlockIndex(stats.hashBlock)->nHeight; } ss << stats.hashBlock; uint256 prevkey; std::map outputs; while (pcursor->Valid()) { boost::this_thread::interruption_point(); COutPoint key; Coin coin; if (pcursor->GetKey(key) && pcursor->GetValue(coin)) { if (!outputs.empty() && key.GetTxId() != prevkey) { ApplyStats(stats, ss, prevkey, outputs); outputs.clear(); } prevkey = key.GetTxId(); outputs[key.GetN()] = std::move(coin); } else { return error("%s: unable to read value", __func__); } pcursor->Next(); } if (!outputs.empty()) { ApplyStats(stats, ss, prevkey, outputs); } stats.hashSerialized = ss.GetHash(); stats.nDiskSize = view->EstimateSize(); return true; } static UniValue pruneblockchain(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "pruneblockchain\n" "\nArguments:\n" "1. \"height\" (numeric, required) The block height to prune " "up to. May be set to a discrete height, or a unix timestamp\n" " to prune blocks whose block time is at least 2 " "hours older than the provided timestamp.\n" "\nResult:\n" "n (numeric) Height of the last block pruned.\n" "\nExamples:\n" + HelpExampleCli("pruneblockchain", "1000") + HelpExampleRpc("pruneblockchain", "1000")); } if (!fPruneMode) { throw JSONRPCError( RPC_MISC_ERROR, "Cannot prune blocks because node is not in prune mode."); } LOCK(cs_main); int heightParam = request.params[0].get_int(); if (heightParam < 0) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Negative block height."); } // Height value more than a billion is too high to be a block height, and // too low to be a block time (corresponds to timestamp from Sep 2001). if (heightParam > 1000000000) { // Add a 2 hour buffer to include blocks which might have had old // timestamps CBlockIndex *pindex = chainActive.FindEarliestAtLeast(heightParam - TIMESTAMP_WINDOW); if (!pindex) { throw JSONRPCError( RPC_INVALID_PARAMETER, "Could not find block with at least the specified timestamp."); } heightParam = pindex->nHeight; } unsigned int height = (unsigned int)heightParam; unsigned int chainHeight = (unsigned int)chainActive.Height(); if (chainHeight < config.GetChainParams().PruneAfterHeight()) { throw JSONRPCError(RPC_MISC_ERROR, "Blockchain is too short for pruning."); } else if (height > chainHeight) { throw JSONRPCError( RPC_INVALID_PARAMETER, "Blockchain is shorter than the attempted prune height."); } else if (height > chainHeight - MIN_BLOCKS_TO_KEEP) { LogPrint(BCLog::RPC, "Attempt to prune blocks close to the tip. " "Retaining the minimum number of blocks.\n"); height = chainHeight - MIN_BLOCKS_TO_KEEP; } PruneBlockFilesManual(height); return uint64_t(height); } static UniValue gettxoutsetinfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "gettxoutsetinfo\n" "\nReturns statistics about the unspent transaction output set.\n" "Note this call may take some time.\n" "\nResult:\n" "{\n" " \"height\":n, (numeric) The current block height (index)\n" " \"bestblock\": \"hex\", (string) the best block hash hex\n" " \"transactions\": n, (numeric) The number of transactions\n" " \"txouts\": n, (numeric) The number of output " "transactions\n" " \"bogosize\": n, (numeric) A database-independent " "metric for UTXO set size\n" " \"hash_serialized\": \"hash\", (string) The serialized hash\n" " \"disk_size\": n, (numeric) The estimated size of the " "chainstate on disk\n" " \"total_amount\": x.xxx (numeric) The total amount\n" "}\n" "\nExamples:\n" + HelpExampleCli("gettxoutsetinfo", "") + HelpExampleRpc("gettxoutsetinfo", "")); } UniValue ret(UniValue::VOBJ); CCoinsStats stats; FlushStateToDisk(); if (GetUTXOStats(pcoinsdbview.get(), stats)) { ret.pushKV("height", int64_t(stats.nHeight)); ret.pushKV("bestblock", stats.hashBlock.GetHex()); ret.pushKV("transactions", int64_t(stats.nTransactions)); ret.pushKV("txouts", int64_t(stats.nTransactionOutputs)); ret.pushKV("bogosize", int64_t(stats.nBogoSize)); ret.pushKV("hash_serialized", stats.hashSerialized.GetHex()); ret.pushKV("disk_size", stats.nDiskSize); ret.pushKV("total_amount", ValueFromAmount(stats.nTotalAmount)); } else { throw JSONRPCError(RPC_INTERNAL_ERROR, "Unable to read UTXO set"); } return ret; } UniValue gettxout(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 2 || request.params.size() > 3) { throw std::runtime_error( "gettxout \"txid\" n ( include_mempool )\n" "\nReturns details about an unspent transaction output.\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id\n" "2. \"n\" (numeric, required) vout number\n" "3. \"include_mempool\" (boolean, optional) Whether to include " "the mempool. Default: true." " Note that an unspent output that is spent in the mempool " "won't appear.\n" "\nResult:\n" "{\n" " \"bestblock\" : \"hash\", (string) the block hash\n" " \"confirmations\" : n, (numeric) The number of " "confirmations\n" " \"value\" : x.xxx, (numeric) The transaction value " "in " + CURRENCY_UNIT + "\n" " \"scriptPubKey\" : { (json object)\n" " \"asm\" : \"code\", (string) \n" " \"hex\" : \"hex\", (string) \n" " \"reqSigs\" : n, (numeric) Number of required " "signatures\n" " \"type\" : \"pubkeyhash\", (string) The type, eg pubkeyhash\n" " \"addresses\" : [ (array of string) array of " "bitcoin addresses\n" " \"address\" (string) bitcoin address\n" " ,...\n" " ]\n" " },\n" " \"coinbase\" : true|false (boolean) Coinbase or not\n" "}\n" "\nExamples:\n" "\nGet unspent transactions\n" + HelpExampleCli("listunspent", "") + "\nView the details\n" + HelpExampleCli("gettxout", "\"txid\" 1") + "\nAs a json rpc call\n" + HelpExampleRpc("gettxout", "\"txid\", 1")); } LOCK(cs_main); UniValue ret(UniValue::VOBJ); std::string strTxId = request.params[0].get_str(); TxId txid(uint256S(strTxId)); int n = request.params[1].get_int(); COutPoint out(txid, n); bool fMempool = true; if (!request.params[2].isNull()) { fMempool = request.params[2].get_bool(); } Coin coin; if (fMempool) { LOCK(g_mempool.cs); CCoinsViewMemPool view(pcoinsTip.get(), g_mempool); if (!view.GetCoin(out, coin) || g_mempool.isSpent(out)) { return NullUniValue; } } else { if (!pcoinsTip->GetCoin(out, coin)) { return NullUniValue; } } const CBlockIndex *pindex = LookupBlockIndex(pcoinsTip->GetBestBlock()); ret.pushKV("bestblock", pindex->GetBlockHash().GetHex()); if (coin.GetHeight() == MEMPOOL_HEIGHT) { ret.pushKV("confirmations", 0); } else { ret.pushKV("confirmations", int64_t(pindex->nHeight - coin.GetHeight() + 1)); } ret.pushKV("value", ValueFromAmount(coin.GetTxOut().nValue)); UniValue o(UniValue::VOBJ); ScriptPubKeyToUniv(coin.GetTxOut().scriptPubKey, o, true); ret.pushKV("scriptPubKey", o); ret.pushKV("coinbase", coin.IsCoinBase()); return ret; } static UniValue verifychain(const Config &config, const JSONRPCRequest &request) { int nCheckLevel = gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL); int nCheckDepth = gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS); if (request.fHelp || request.params.size() > 2) { throw std::runtime_error( "verifychain ( checklevel nblocks )\n" "\nVerifies blockchain database.\n" "\nArguments:\n" "1. checklevel (numeric, optional, 0-4, default=" + strprintf("%d", nCheckLevel) + ") How thorough the block verification is.\n" "2. nblocks (numeric, optional, default=" + strprintf("%d", nCheckDepth) + ", 0=all) The number of blocks to check.\n" "\nResult:\n" "true|false (boolean) Verified or not\n" "\nExamples:\n" + HelpExampleCli("verifychain", "") + HelpExampleRpc("verifychain", "")); } LOCK(cs_main); if (!request.params[0].isNull()) { nCheckLevel = request.params[0].get_int(); } if (!request.params[1].isNull()) { nCheckDepth = request.params[1].get_int(); } return CVerifyDB().VerifyDB(config, pcoinsTip.get(), nCheckLevel, nCheckDepth); } UniValue getblockchaininfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getblockchaininfo\n" "Returns an object containing various state info regarding " "blockchain processing.\n" "\nResult:\n" "{\n" " \"chain\": \"xxxx\", (string) current network name " "as defined in BIP70 (main, test, regtest)\n" " \"blocks\": xxxxxx, (numeric) the current number of " "blocks processed in the server\n" " \"headers\": xxxxxx, (numeric) the current number of " "headers we have validated\n" " \"bestblockhash\": \"...\", (string) the hash of the " "currently best block\n" " \"difficulty\": xxxxxx, (numeric) the current " "difficulty\n" " \"mediantime\": xxxxxx, (numeric) median time for the " "current best block\n" " \"verificationprogress\": xxxx, (numeric) estimate of " "verification progress [0..1]\n" " \"initialblockdownload\": xxxx, (bool) (debug information) " "estimate of whether this node is in Initial Block Download mode.\n" " \"chainwork\": \"xxxx\" (string) total amount of work " "in active chain, in hexadecimal\n" " \"size_on_disk\": xxxxxx, (numeric) the estimated size of " "the block and undo files on disk\n" " \"pruned\": xx, (boolean) if the blocks are " "subject to pruning\n" " \"pruneheight\": xxxxxx, (numeric) lowest-height " "complete block stored (only present if pruning is enabled)\n" " \"automatic_pruning\": xx, (boolean) whether automatic " "pruning is enabled (only present if pruning is enabled)\n" " \"prune_target_size\": xxxxxx, (numeric) the target size " "used by pruning (only present if automatic pruning is enabled)\n" " \"warnings\" : \"...\", (string) any network and " "blockchain warnings.\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblockchaininfo", "") + HelpExampleRpc("getblockchaininfo", "")); } LOCK(cs_main); const CBlockIndex *tip = chainActive.Tip(); UniValue obj(UniValue::VOBJ); obj.pushKV("chain", config.GetChainParams().NetworkIDString()); obj.pushKV("blocks", int(chainActive.Height())); obj.pushKV("headers", pindexBestHeader ? pindexBestHeader->nHeight : -1); obj.pushKV("bestblockhash", tip->GetBlockHash().GetHex()); obj.pushKV("difficulty", double(GetDifficulty(tip))); obj.pushKV("mediantime", int64_t(tip->GetMedianTimePast())); obj.pushKV("verificationprogress", GuessVerificationProgress(Params().TxData(), tip)); obj.pushKV("initialblockdownload", IsInitialBlockDownload()); obj.pushKV("chainwork", tip->nChainWork.GetHex()); obj.pushKV("size_on_disk", CalculateCurrentUsage()); obj.pushKV("pruned", fPruneMode); if (fPruneMode) { const CBlockIndex *block = tip; assert(block); while (block->pprev && (block->pprev->nStatus.hasData())) { block = block->pprev; } obj.pushKV("pruneheight", block->nHeight); // if 0, execution bypasses the whole if block. bool automatic_pruning = (gArgs.GetArg("-prune", 0) != 1); obj.pushKV("automatic_pruning", automatic_pruning); if (automatic_pruning) { obj.pushKV("prune_target_size", nPruneTarget); } } obj.pushKV("warnings", GetWarnings("statusbar")); return obj; } /** Comparison function for sorting the getchaintips heads. */ struct CompareBlocksByHeight { bool operator()(const CBlockIndex *a, const CBlockIndex *b) const { // Make sure that unequal blocks with the same height do not compare // equal. Use the pointers themselves to make a distinction. if (a->nHeight != b->nHeight) { return (a->nHeight > b->nHeight); } return a < b; } }; static UniValue getchaintips(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getchaintips\n" "Return information about all known tips in the block tree," " including the main chain as well as orphaned branches.\n" "\nResult:\n" "[\n" " {\n" " \"height\": xxxx, (numeric) height of the chain tip\n" " \"hash\": \"xxxx\", (string) block hash of the tip\n" " \"branchlen\": 0 (numeric) zero for main chain\n" " \"status\": \"active\" (string) \"active\" for the main " "chain\n" " },\n" " {\n" " \"height\": xxxx,\n" " \"hash\": \"xxxx\",\n" " \"branchlen\": 1 (numeric) length of branch " "connecting the tip to the main chain\n" " \"status\": \"xxxx\" (string) status of the chain " "(active, valid-fork, valid-headers, headers-only, invalid)\n" " }\n" "]\n" "Possible values for status:\n" "1. \"invalid\" This branch contains at least one " "invalid block\n" "2. \"parked\" This branch contains at least one " "parked block\n" "3. \"headers-only\" Not all blocks for this branch are " "available, but the headers are valid\n" "4. \"valid-headers\" All blocks are available for this " "branch, but they were never fully validated\n" "5. \"valid-fork\" This branch is not part of the " "active chain, but is fully validated\n" "6. \"active\" This is the tip of the active main " "chain, which is certainly valid\n" "\nExamples:\n" + HelpExampleCli("getchaintips", "") + HelpExampleRpc("getchaintips", "")); } LOCK(cs_main); /** * Idea: the set of chain tips is chainActive.tip, plus orphan blocks which * do not have another orphan building off of them. * Algorithm: * - Make one pass through mapBlockIndex, picking out the orphan blocks, * and also storing a set of the orphan block's pprev pointers. * - Iterate through the orphan blocks. If the block isn't pointed to by * another orphan, it is a chain tip. * - add chainActive.Tip() */ std::set setTips; std::set setOrphans; std::set setPrevs; for (const std::pair &item : mapBlockIndex) { if (!chainActive.Contains(item.second)) { setOrphans.insert(item.second); setPrevs.insert(item.second->pprev); } } for (std::set::iterator it = setOrphans.begin(); it != setOrphans.end(); ++it) { if (setPrevs.erase(*it) == 0) { setTips.insert(*it); } } // Always report the currently active tip. setTips.insert(chainActive.Tip()); /* Construct the output array. */ UniValue res(UniValue::VARR); for (const CBlockIndex *block : setTips) { UniValue obj(UniValue::VOBJ); obj.pushKV("height", block->nHeight); obj.pushKV("hash", block->phashBlock->GetHex()); const int branchLen = block->nHeight - chainActive.FindFork(block)->nHeight; obj.pushKV("branchlen", branchLen); std::string status; if (chainActive.Contains(block)) { // This block is part of the currently active chain. status = "active"; } else if (block->nStatus.isInvalid()) { // This block or one of its ancestors is invalid. status = "invalid"; } else if (block->nStatus.isOnParkedChain()) { // This block or one of its ancestors is parked. status = "parked"; } else if (block->nChainTx == 0) { // This block cannot be connected because full block data for it or // one of its parents is missing. status = "headers-only"; } else if (block->IsValid(BlockValidity::SCRIPTS)) { // This block is fully validated, but no longer part of the active // chain. It was probably the active block once, but was // reorganized. status = "valid-fork"; } else if (block->IsValid(BlockValidity::TREE)) { // The headers for this block are valid, but it has not been // validated. It was probably never part of the most-work chain. status = "valid-headers"; } else { // No clue. status = "unknown"; } obj.pushKV("status", status); res.push_back(obj); } return res; } UniValue mempoolInfoToJSON() { UniValue ret(UniValue::VOBJ); ret.pushKV("size", (int64_t)g_mempool.size()); ret.pushKV("bytes", (int64_t)g_mempool.GetTotalTxSize()); ret.pushKV("usage", (int64_t)g_mempool.DynamicMemoryUsage()); size_t maxmempool = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; ret.pushKV("maxmempool", (int64_t)maxmempool); ret.pushKV("mempoolminfee", ValueFromAmount( std::max(g_mempool.GetMinFee(maxmempool), ::minRelayTxFee) .GetFeePerK())); ret.pushKV("minrelaytxfee", ValueFromAmount(::minRelayTxFee.GetFeePerK())); return ret; } static UniValue getmempoolinfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getmempoolinfo\n" "\nReturns details on the active state of the TX memory pool.\n" "\nResult:\n" "{\n" " \"size\": xxxxx, (numeric) Current tx count\n" " \"bytes\": xxxxx, (numeric) Transaction size.\n" " \"usage\": xxxxx, (numeric) Total memory usage for " "the mempool\n" " \"maxmempool\": xxxxx, (numeric) Maximum memory usage " "for the mempool\n" " \"mempoolminfee\": xxxxx (numeric) Minimum fee rate in " + CURRENCY_UNIT + "/kB for tx to be accepted. Is the maximum of minrelaytxfee and " "minimum mempool fee\n" " \"minrelaytxfee\": xxxxx (numeric) Current minimum relay " "fee for transactions\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmempoolinfo", "") + HelpExampleRpc("getmempoolinfo", "")); } return mempoolInfoToJSON(); } static UniValue preciousblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "preciousblock \"blockhash\"\n" "\nTreats a block as if it were received before others with the " "same work.\n" "\nA later preciousblock call can override the effect of an " "earlier one.\n" "\nThe effects of preciousblock are not retained across restarts.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) the hash of the block to " "mark as precious\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("preciousblock", "\"blockhash\"") + HelpExampleRpc("preciousblock", "\"blockhash\"")); } std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); CBlockIndex *pblockindex; { LOCK(cs_main); pblockindex = LookupBlockIndex(hash); if (!pblockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } } CValidationState state; PreciousBlock(config, state, pblockindex); if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason()); } return NullUniValue; } UniValue finalizeblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "finalizeblock \"blockhash\"\n" "\nTreats a block as final. It cannot be reorged. Any chain\n" "that does not contain this block is invalid. Used on a less\n" "work chain, it can effectively PUTS YOU OUT OF CONSENSUS.\n" "USE WITH CAUTION!\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("finalizeblock", "\"blockhash\"") + HelpExampleRpc("finalizeblock", "\"blockhash\"")); } std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); CValidationState state; { LOCK(cs_main); CBlockIndex *pblockindex = LookupBlockIndex(hash); if (!pblockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } FinalizeBlockAndInvalidate(config, state, pblockindex); } if (state.IsValid()) { ActivateBestChain(config, state); } if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, FormatStateMessage(state)); } return NullUniValue; } static UniValue invalidateblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "invalidateblock \"blockhash\"\n" "\nPermanently marks a block as invalid, as if it " "violated a consensus rule.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) the hash of " "the block to mark as invalid\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("invalidateblock", "\"blockhash\"") + HelpExampleRpc("invalidateblock", "\"blockhash\"")); } const std::string strHash = request.params[0].get_str(); const uint256 hash(uint256S(strHash)); CValidationState state; { LOCK(cs_main); CBlockIndex *pblockindex = LookupBlockIndex(hash); if (!pblockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } InvalidateBlock(config, state, pblockindex); } if (state.IsValid()) { ActivateBestChain(config, state); } if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, FormatStateMessage(state)); } return NullUniValue; } UniValue parkblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error("parkblock \"blockhash\"\n" "\nMarks a block as parked.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) the " "hash of the block to park\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("parkblock", "\"blockhash\"") + HelpExampleRpc("parkblock", "\"blockhash\"")); } const std::string strHash = request.params[0].get_str(); const uint256 hash(uint256S(strHash)); CValidationState state; { LOCK(cs_main); if (mapBlockIndex.count(hash) == 0) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } CBlockIndex *pblockindex = mapBlockIndex[hash]; ParkBlock(config, state, pblockindex); } if (state.IsValid()) { ActivateBestChain(config, state); } if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason()); } return NullUniValue; } static UniValue reconsiderblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "reconsiderblock \"blockhash\"\n" "\nRemoves invalidity status of a block and its descendants, " "reconsider them for activation.\n" "This can be used to undo the effects of invalidateblock.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) the hash of the block to " "reconsider\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("reconsiderblock", "\"blockhash\"") + HelpExampleRpc("reconsiderblock", "\"blockhash\"")); } const std::string strHash = request.params[0].get_str(); const uint256 hash(uint256S(strHash)); { LOCK(cs_main); CBlockIndex *pblockindex = LookupBlockIndex(hash); if (!pblockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } ResetBlockFailureFlags(pblockindex); } CValidationState state; ActivateBestChain(config, state); if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, FormatStateMessage(state)); } return NullUniValue; } UniValue unparkblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "unparkblock \"blockhash\"\n" "\nRemoves parked status of a block and its descendants, " "reconsider them for activation.\n" "This can be used to undo the effects of parkblock.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) the hash of the block to " "unpark\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("unparkblock", "\"blockhash\"") + HelpExampleRpc("unparkblock", "\"blockhash\"")); } const std::string strHash = request.params[0].get_str(); const uint256 hash(uint256S(strHash)); { LOCK(cs_main); if (mapBlockIndex.count(hash) == 0) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } CBlockIndex *pblockindex = mapBlockIndex[hash]; UnparkBlockAndChildren(pblockindex); } CValidationState state; ActivateBestChain(config, state); if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason()); } return NullUniValue; } static UniValue getchaintxstats(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 2) { throw std::runtime_error( "getchaintxstats ( nblocks blockhash )\n" "\nCompute statistics about the total number and rate of " "transactions in the chain.\n" "\nArguments:\n" "1. nblocks (numeric, optional) Size of the window in number " "of blocks (default: one month).\n" "2. \"blockhash\" (string, optional) The hash of the block that " "ends the window.\n" "\nResult:\n" "{\n" " \"time\": xxxxx, (numeric) The " "timestamp for the final block in the window in UNIX format.\n" " \"txcount\": xxxxx, (numeric) The total " "number of transactions in the chain up to that point.\n" " \"window_final_block_hash\": \"...\", (string) The hash of " "the final block in the window.\n" " \"window_block_count\": xxxxx, (numeric) Size of " "the window in number of blocks.\n" " \"window_tx_count\": xxxxx, (numeric) The number " "of transactions in the window. Only returned if " "\"window_block_count\" is > 0.\n" " \"window_interval\": xxxxx, (numeric) The elapsed " "time in the window in seconds. Only returned if " "\"window_block_count\" is > 0.\n" " \"txrate\": x.xx, (numeric) The average " "rate of transactions per second in the window. Only returned if " "\"window_interval\" is > 0.\n" "}\n" "\nExamples:\n" + HelpExampleCli("getchaintxstats", "") + HelpExampleRpc("getchaintxstats", "2016")); } const CBlockIndex *pindex; // By default: 1 month int blockcount = 30 * 24 * 60 * 60 / config.GetChainParams().GetConsensus().nPowTargetSpacing; if (request.params[1].isNull()) { LOCK(cs_main); pindex = chainActive.Tip(); } else { uint256 hash = uint256S(request.params[1].get_str()); LOCK(cs_main); pindex = LookupBlockIndex(hash); if (!pindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } if (!chainActive.Contains(pindex)) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Block is not in main chain"); } } assert(pindex != nullptr); if (request.params[0].isNull()) { blockcount = std::max(0, std::min(blockcount, pindex->nHeight - 1)); } else { blockcount = request.params[0].get_int(); if (blockcount < 0 || (blockcount > 0 && blockcount >= pindex->nHeight)) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid block count: " "should be between 0 and " "the block's height - 1"); } } const CBlockIndex *pindexPast = pindex->GetAncestor(pindex->nHeight - blockcount); int nTimeDiff = pindex->GetMedianTimePast() - pindexPast->GetMedianTimePast(); int nTxDiff = pindex->nChainTx - pindexPast->nChainTx; UniValue ret(UniValue::VOBJ); ret.pushKV("time", int64_t(pindex->nTime)); ret.pushKV("txcount", int64_t(pindex->nChainTx)); ret.pushKV("window_final_block_hash", pindex->GetBlockHash().GetHex()); ret.pushKV("window_block_count", blockcount); if (blockcount > 0) { ret.pushKV("window_tx_count", nTxDiff); ret.pushKV("window_interval", nTimeDiff); if (nTimeDiff > 0) { ret.pushKV("txrate", double(nTxDiff) / nTimeDiff); } } return ret; } template static T CalculateTruncatedMedian(std::vector &scores) { size_t size = scores.size(); if (size == 0) { return T(); } std::sort(scores.begin(), scores.end()); if (size % 2 == 0) { return (scores[size / 2 - 1] + scores[size / 2]) / 2; } else { return scores[size / 2]; } } template static inline bool SetHasKeys(const std::set &set) { return false; } template static inline bool SetHasKeys(const std::set &set, const Tk &key, const Args &... args) { return (set.count(key) != 0) || SetHasKeys(set, args...); } // outpoint (needed for the utxo index) + nHeight + fCoinBase static constexpr size_t PER_UTXO_OVERHEAD = sizeof(COutPoint) + sizeof(uint32_t) + sizeof(bool); static UniValue getblockstats(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 4) { throw std::runtime_error( "getblockstats hash_or_height ( stats )\n" "\nCompute per block statistics for a given window. All amounts " "are in " + CURRENCY_UNIT + ".\n" "It won't work for some heights with pruning.\n" "It won't work without -txindex for utxo_size_inc, *fee or " "*feerate stats.\n" "\nArguments:\n" "1. \"hash_or_height\" (string or numeric, required) The block " "hash or height of the target block\n" "2. \"stats\" (array, optional) Values to plot, by " "default all values (see result below)\n" " [\n" " \"height\", (string, optional) Selected statistic\n" " \"time\", (string, optional) Selected statistic\n" " ,...\n" " ]\n" "\nResult:\n" "{ (json object)\n" " \"avgfee\": x.xxx, (numeric) Average fee in the block\n" " \"avgfeerate\": x.xxx, (numeric) Average feerate (in " + CURRENCY_UNIT + " per byte)\n" " \"avgtxsize\": xxxxx, (numeric) Average transaction size\n" " \"blockhash\": xxxxx, (string) The block hash (to check " "for potential reorgs)\n" " \"height\": xxxxx, (numeric) The height of the block\n" " \"ins\": xxxxx, (numeric) The number of inputs " "(excluding coinbase)\n" " \"maxfee\": xxxxx, (numeric) Maximum fee in the block\n" " \"maxfeerate\": xxxxx, (numeric) Maximum feerate (in " + CURRENCY_UNIT + " per byte)\n" " \"maxtxsize\": xxxxx, (numeric) Maximum transaction size\n" " \"medianfee\": x.xxx, (numeric) Truncated median fee in " "the block\n" " \"medianfeerate\": x.xxx, (numeric) Truncated median feerate " "(in " + CURRENCY_UNIT + " per byte)\n" " \"mediantime\": xxxxx, (numeric) The block median time " "past\n" " \"mediantxsize\": xxxxx, (numeric) Truncated median " "transaction size\n" " \"minfee\": x.xxx, (numeric) Minimum fee in the block\n" " \"minfeerate\": xx.xx, (numeric) Minimum feerate (in " + CURRENCY_UNIT + " per byte)\n" " \"mintxsize\": xxxxx, (numeric) Minimum transaction size\n" " \"outs\": xxxxx, (numeric) The number of outputs\n" " \"subsidy\": x.xxx, (numeric) The block subsidy\n" " \"time\": xxxxx, (numeric) The block time\n" " \"total_out\": x.xxx, (numeric) Total amount in all " "outputs (excluding coinbase and thus reward [ie subsidy + " "totalfee])\n" " \"total_size\": xxxxx, (numeric) Total size of all " "non-coinbase transactions\n" " \"totalfee\": x.xxx, (numeric) The fee total\n" " \"txs\": xxxxx, (numeric) The number of " "transactions (excluding coinbase)\n" " \"utxo_increase\": xxxxx, (numeric) The increase/decrease in " "the number of unspent outputs\n" " \"utxo_size_inc\": xxxxx, (numeric) The increase/decrease in " "size for the utxo index (not discounting op_return and similar)\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblockstats", "1000 '[\"minfeerate\",\"avgfeerate\"]'") + HelpExampleRpc("getblockstats", "1000 '[\"minfeerate\",\"avgfeerate\"]'")); } LOCK(cs_main); CBlockIndex *pindex; if (request.params[0].isNum()) { const int height = request.params[0].get_int(); const int current_tip = chainActive.Height(); if (height < 0) { throw JSONRPCError( RPC_INVALID_PARAMETER, strprintf("Target block height %d is negative", height)); } if (height > current_tip) { throw JSONRPCError( RPC_INVALID_PARAMETER, strprintf("Target block height %d after current tip %d", height, current_tip)); } pindex = chainActive[height]; } else { const std::string strHash = request.params[0].get_str(); const uint256 hash(uint256S(strHash)); pindex = LookupBlockIndex(hash); if (!pindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } if (!chainActive.Contains(pindex)) { throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Block is not in chain %s", Params().NetworkIDString())); } } assert(pindex != nullptr); std::set stats; if (!request.params[1].isNull()) { const UniValue stats_univalue = request.params[1].get_array(); for (unsigned int i = 0; i < stats_univalue.size(); i++) { const std::string stat = stats_univalue[i].get_str(); stats.insert(stat); } } const CBlock block = GetBlockChecked(config, pindex); // Calculate everything if nothing selected (default) const bool do_all = stats.size() == 0; const bool do_mediantxsize = do_all || stats.count("mediantxsize") != 0; const bool do_medianfee = do_all || stats.count("medianfee") != 0; const bool do_medianfeerate = do_all || stats.count("medianfeerate") != 0; const bool loop_inputs = do_all || do_medianfee || do_medianfeerate || SetHasKeys(stats, "utxo_size_inc", "totalfee", "avgfee", "avgfeerate", "minfee", "maxfee", "minfeerate", "maxfeerate"); const bool loop_outputs = do_all || loop_inputs || stats.count("total_out"); const bool do_calculate_size = do_mediantxsize || loop_inputs || SetHasKeys(stats, "total_size", "avgtxsize", "mintxsize", "maxtxsize"); const int64_t blockMaxSize = config.GetMaxBlockSize(); Amount maxfee = Amount::zero(); Amount maxfeerate = Amount::zero(); Amount minfee = MAX_MONEY; Amount minfeerate = MAX_MONEY; Amount total_out = Amount::zero(); Amount totalfee = Amount::zero(); int64_t inputs = 0; int64_t maxtxsize = 0; int64_t mintxsize = blockMaxSize; int64_t outputs = 0; int64_t total_size = 0; int64_t utxo_size_inc = 0; std::vector fee_array; std::vector feerate_array; std::vector txsize_array; const Consensus::Params ¶ms = config.GetChainParams().GetConsensus(); for (const auto &tx : block.vtx) { outputs += tx->vout.size(); Amount tx_total_out = Amount::zero(); if (loop_outputs) { for (const CTxOut &out : tx->vout) { tx_total_out += out.nValue; utxo_size_inc += GetSerializeSize(out, SER_NETWORK, PROTOCOL_VERSION) + PER_UTXO_OVERHEAD; } } if (tx->IsCoinBase()) { continue; } // Don't count coinbase's fake input inputs += tx->vin.size(); // Don't count coinbase reward total_out += tx_total_out; int64_t tx_size = 0; if (do_calculate_size) { tx_size = tx->GetTotalSize(); if (do_mediantxsize) { txsize_array.push_back(tx_size); } maxtxsize = std::max(maxtxsize, tx_size); mintxsize = std::min(mintxsize, tx_size); total_size += tx_size; } if (loop_inputs) { if (!g_txindex) { throw JSONRPCError(RPC_INVALID_PARAMETER, "One or more of the selected stats requires " "-txindex enabled"); } Amount tx_total_in = Amount::zero(); for (const CTxIn &in : tx->vin) { CTransactionRef tx_in; uint256 hashBlock; if (!GetTransaction(params, in.prevout.GetTxId(), tx_in, hashBlock, false)) { throw JSONRPCError(RPC_INTERNAL_ERROR, std::string("Unexpected internal error " "(tx index seems corrupt)")); } CTxOut prevoutput = tx_in->vout[in.prevout.GetN()]; tx_total_in += prevoutput.nValue; utxo_size_inc -= GetSerializeSize(prevoutput, SER_NETWORK, PROTOCOL_VERSION) + PER_UTXO_OVERHEAD; } Amount txfee = tx_total_in - tx_total_out; assert(MoneyRange(txfee)); if (do_medianfee) { fee_array.push_back(txfee); } maxfee = std::max(maxfee, txfee); minfee = std::min(minfee, txfee); totalfee += txfee; Amount feerate = txfee / tx_size; if (do_medianfeerate) { feerate_array.push_back(feerate); } maxfeerate = std::max(maxfeerate, feerate); minfeerate = std::min(minfeerate, feerate); } } UniValue ret_all(UniValue::VOBJ); ret_all.pushKV("avgfee", ValueFromAmount((block.vtx.size() > 1) ? totalfee / int((block.vtx.size() - 1)) : Amount::zero())); ret_all.pushKV("avgfeerate", ValueFromAmount((total_size > 0) ? totalfee / total_size : Amount::zero())); ret_all.pushKV("avgtxsize", (block.vtx.size() > 1) ? total_size / (block.vtx.size() - 1) : 0); ret_all.pushKV("blockhash", pindex->GetBlockHash().GetHex()); ret_all.pushKV("height", (int64_t)pindex->nHeight); ret_all.pushKV("ins", inputs); ret_all.pushKV("maxfee", ValueFromAmount(maxfee)); ret_all.pushKV("maxfeerate", ValueFromAmount(maxfeerate)); ret_all.pushKV("maxtxsize", maxtxsize); ret_all.pushKV("medianfee", ValueFromAmount(CalculateTruncatedMedian(fee_array))); ret_all.pushKV("medianfeerate", ValueFromAmount(CalculateTruncatedMedian(feerate_array))); ret_all.pushKV("mediantime", pindex->GetMedianTimePast()); ret_all.pushKV("mediantxsize", CalculateTruncatedMedian(txsize_array)); ret_all.pushKV( "minfee", ValueFromAmount((minfee == MAX_MONEY) ? Amount::zero() : minfee)); ret_all.pushKV("minfeerate", ValueFromAmount((minfeerate == MAX_MONEY) ? Amount::zero() : minfeerate)); ret_all.pushKV("mintxsize", mintxsize == blockMaxSize ? 0 : mintxsize); ret_all.pushKV("outs", outputs); ret_all.pushKV("subsidy", ValueFromAmount(GetBlockSubsidy( pindex->nHeight, Params().GetConsensus()))); ret_all.pushKV("time", pindex->GetBlockTime()); ret_all.pushKV("total_out", ValueFromAmount(total_out)); ret_all.pushKV("total_size", total_size); ret_all.pushKV("totalfee", ValueFromAmount(totalfee)); ret_all.pushKV("txs", (int64_t)block.vtx.size()); ret_all.pushKV("utxo_increase", outputs - inputs); ret_all.pushKV("utxo_size_inc", utxo_size_inc); if (do_all) { return ret_all; } UniValue ret(UniValue::VOBJ); for (const std::string &stat : stats) { const UniValue &value = ret_all[stat]; if (value.isNull()) { throw JSONRPCError( RPC_INVALID_PARAMETER, strprintf("Invalid selected statistic %s", stat)); } ret.pushKV(stat, value); } return ret; } static UniValue savemempool(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error("savemempool\n" "\nDumps the mempool to disk. It will fail " "until the previous dump is fully loaded.\n" "\nExamples:\n" + HelpExampleCli("savemempool", "") + HelpExampleRpc("savemempool", "")); } if (!g_is_mempool_loaded) { throw JSONRPCError(RPC_MISC_ERROR, "The mempool was not loaded yet"); } if (!DumpMempool()) { throw JSONRPCError(RPC_MISC_ERROR, "Unable to dump mempool to disk"); } return NullUniValue; } // clang-format off static const ContextFreeRPCCommand commands[] = { // category name actor (function) argNames // ------------------- ------------------------ ---------------------- ---------- { "blockchain", "getbestblockhash", getbestblockhash, {} }, { "blockchain", "getblock", getblock, {"blockhash","verbosity|verbose"} }, { "blockchain", "getblockchaininfo", getblockchaininfo, {} }, { "blockchain", "getblockcount", getblockcount, {} }, { "blockchain", "getblockhash", getblockhash, {"height"} }, { "blockchain", "getblockheader", getblockheader, {"blockhash","verbose"} }, { "blockchain", "getblockstats", getblockstats, {"hash_or_height","stats"} }, { "blockchain", "getchaintips", getchaintips, {} }, { "blockchain", "getchaintxstats", getchaintxstats, {"nblocks", "blockhash"} }, { "blockchain", "getdifficulty", getdifficulty, {} }, { "blockchain", "getmempoolancestors", getmempoolancestors, {"txid","verbose"} }, { "blockchain", "getmempooldescendants", getmempooldescendants, {"txid","verbose"} }, { "blockchain", "getmempoolentry", getmempoolentry, {"txid"} }, { "blockchain", "getmempoolinfo", getmempoolinfo, {} }, { "blockchain", "getrawmempool", getrawmempool, {"verbose"} }, { "blockchain", "gettxout", gettxout, {"txid","n","include_mempool"} }, { "blockchain", "gettxoutsetinfo", gettxoutsetinfo, {} }, { "blockchain", "pruneblockchain", pruneblockchain, {"height"} }, { "blockchain", "savemempool", savemempool, {} }, { "blockchain", "verifychain", verifychain, {"checklevel","nblocks"} }, { "blockchain", "preciousblock", preciousblock, {"blockhash"} }, /* Not shown in help */ { "hidden", "getfinalizedblockhash", getfinalizedblockhash, {} }, { "hidden", "finalizeblock", finalizeblock, {"blockhash"} }, { "hidden", "invalidateblock", invalidateblock, {"blockhash"} }, { "hidden", "parkblock", parkblock, {"blockhash"} }, { "hidden", "reconsiderblock", reconsiderblock, {"blockhash"} }, { "hidden", "syncwithvalidationinterfacequeue", syncwithvalidationinterfacequeue, {} }, { "hidden", "unparkblock", unparkblock, {"blockhash"} }, { "hidden", "waitfornewblock", waitfornewblock, {"timeout"} }, { "hidden", "waitforblock", waitforblock, {"blockhash","timeout"} }, { "hidden", "waitforblockheight", waitforblockheight, {"height","timeout"} }, }; // clang-format on void RegisterBlockchainRPCCommands(CRPCTable &t) { for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) { t.appendCommand(commands[vcidx].name, &commands[vcidx]); } } diff --git a/src/serialize.h b/src/serialize.h index eb4e0ba0f..52133f94d 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -1,927 +1,957 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_SERIALIZE_H #define BITCOIN_SERIALIZE_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include static const uint64_t MAX_SIZE = 0x02000000; /** * Dummy data type to identify deserializing constructors. * * By convention, a constructor of a type T with signature * * template T::T(deserialize_type, Stream& s) * * is a deserializing constructor, which builds the type by deserializing it * from s. If T contains const fields, this is likely the only way to do so. */ struct deserialize_type {}; constexpr deserialize_type deserialize{}; /** * Used to bypass the rule against non-const reference to temporary where it * makes sense with wrappers such as CFlatData or CTxDB */ template inline T &REF(const T &val) { return const_cast(val); } /** * Used to acquire a non-const pointer "this" to generate bodies of const * serialization operations from a template */ template inline T *NCONST_PTR(const T *val) { return const_cast(val); } /** * Lowest-level serialization and conversion. * @note Sizes of these types are verified in the tests */ template inline void ser_writedata8(Stream &s, uint8_t obj) { s.write((char *)&obj, 1); } template inline void ser_writedata16(Stream &s, uint16_t obj) { obj = htole16(obj); s.write((char *)&obj, 2); } template inline void ser_writedata32(Stream &s, uint32_t obj) { obj = htole32(obj); s.write((char *)&obj, 4); } template inline void ser_writedata32be(Stream &s, uint32_t obj) { obj = htobe32(obj); s.write((char *)&obj, 4); } template inline void ser_writedata64(Stream &s, uint64_t obj) { obj = htole64(obj); s.write((char *)&obj, 8); } template inline uint8_t ser_readdata8(Stream &s) { uint8_t obj; s.read((char *)&obj, 1); return obj; } template inline uint16_t ser_readdata16(Stream &s) { uint16_t obj; s.read((char *)&obj, 2); return le16toh(obj); } template inline uint32_t ser_readdata32(Stream &s) { uint32_t obj; s.read((char *)&obj, 4); return le32toh(obj); } template inline uint32_t ser_readdata32be(Stream &s) { uint32_t obj; s.read((char *)&obj, 4); return be32toh(obj); } template inline uint64_t ser_readdata64(Stream &s) { uint64_t obj; s.read((char *)&obj, 8); return le64toh(obj); } inline uint64_t ser_double_to_uint64(double x) { union { double x; uint64_t y; } tmp; tmp.x = x; return tmp.y; } inline uint32_t ser_float_to_uint32(float x) { union { float x; uint32_t y; } tmp; tmp.x = x; return tmp.y; } inline double ser_uint64_to_double(uint64_t y) { union { double x; uint64_t y; } tmp; tmp.y = y; return tmp.x; } inline float ser_uint32_to_float(uint32_t y) { union { float x; uint32_t y; } tmp; tmp.y = y; return tmp.x; } ///////////////////////////////////////////////////////////////// // // Templates for serializing to anything that looks like a stream, // i.e. anything that supports .read(char*, size_t) and .write(char*, size_t) // class CSizeComputer; enum { // primary actions SER_NETWORK = (1 << 0), SER_DISK = (1 << 1), SER_GETHASH = (1 << 2), }; //! Convert the reference base type to X, without changing constness or //! reference type. template X &ReadWriteAsHelper(X &x) { return x; } template const X &ReadWriteAsHelper(const X &x) { return x; } #define READWRITE(...) (::SerReadWriteMany(s, ser_action, __VA_ARGS__)) #define READWRITEAS(type, obj) \ (::SerReadWriteMany(s, ser_action, ReadWriteAsHelper(obj))) /** * Implement three methods for serializable objects. These are actually wrappers * over "SerializationOp" template, which implements the body of each class' * serialization code. Adding "ADD_SERIALIZE_METHODS" in the body of the class * causes these wrappers to be added as members. */ #define ADD_SERIALIZE_METHODS \ template void Serialize(Stream &s) const { \ NCONST_PTR(this)->SerializationOp(s, CSerActionSerialize()); \ } \ template void Unserialize(Stream &s) { \ SerializationOp(s, CSerActionUnserialize()); \ } #ifndef CHAR_EQUALS_INT8 // TODO Get rid of bare char template inline void Serialize(Stream &s, char a) { ser_writedata8(s, a); } #endif template inline void Serialize(Stream &s, int8_t a) { ser_writedata8(s, a); } template inline void Serialize(Stream &s, uint8_t a) { ser_writedata8(s, a); } template inline void Serialize(Stream &s, int16_t a) { ser_writedata16(s, a); } template inline void Serialize(Stream &s, uint16_t a) { ser_writedata16(s, a); } template inline void Serialize(Stream &s, int32_t a) { ser_writedata32(s, a); } template inline void Serialize(Stream &s, uint32_t a) { ser_writedata32(s, a); } template inline void Serialize(Stream &s, int64_t a) { ser_writedata64(s, a); } template inline void Serialize(Stream &s, uint64_t a) { ser_writedata64(s, a); } template inline void Serialize(Stream &s, float a) { ser_writedata32(s, ser_float_to_uint32(a)); } template inline void Serialize(Stream &s, double a) { ser_writedata64(s, ser_double_to_uint64(a)); } #ifndef CHAR_EQUALS_INT8 // TODO Get rid of bare char template inline void Unserialize(Stream &s, char &a) { a = ser_readdata8(s); } #endif template inline void Unserialize(Stream &s, int8_t &a) { a = ser_readdata8(s); } template inline void Unserialize(Stream &s, uint8_t &a) { a = ser_readdata8(s); } template inline void Unserialize(Stream &s, int16_t &a) { a = ser_readdata16(s); } template inline void Unserialize(Stream &s, uint16_t &a) { a = ser_readdata16(s); } template inline void Unserialize(Stream &s, int32_t &a) { a = ser_readdata32(s); } template inline void Unserialize(Stream &s, uint32_t &a) { a = ser_readdata32(s); } template inline void Unserialize(Stream &s, int64_t &a) { a = ser_readdata64(s); } template inline void Unserialize(Stream &s, uint64_t &a) { a = ser_readdata64(s); } template inline void Unserialize(Stream &s, float &a) { a = ser_uint32_to_float(ser_readdata32(s)); } template inline void Unserialize(Stream &s, double &a) { a = ser_uint64_to_double(ser_readdata64(s)); } template inline void Serialize(Stream &s, bool a) { char f = a; ser_writedata8(s, f); } template inline void Unserialize(Stream &s, bool &a) { char f = ser_readdata8(s); a = f; } /** * Compact Size * size < 253 -- 1 byte * size <= USHRT_MAX -- 3 bytes (253 + 2 bytes) * size <= UINT_MAX -- 5 bytes (254 + 4 bytes) * size > UINT_MAX -- 9 bytes (255 + 8 bytes) */ inline uint32_t GetSizeOfCompactSize(uint64_t nSize) { if (nSize < 253) { return sizeof(uint8_t); } if (nSize <= std::numeric_limits::max()) { return sizeof(uint8_t) + sizeof(uint16_t); } if (nSize <= std::numeric_limits::max()) { return sizeof(uint8_t) + sizeof(uint32_t); } return sizeof(uint8_t) + sizeof(uint64_t); } inline void WriteCompactSize(CSizeComputer &os, uint64_t nSize); template void WriteCompactSize(Stream &os, uint64_t nSize) { if (nSize < 253) { ser_writedata8(os, nSize); } else if (nSize <= std::numeric_limits::max()) { ser_writedata8(os, 253); ser_writedata16(os, nSize); } else if (nSize <= std::numeric_limits::max()) { ser_writedata8(os, 254); ser_writedata32(os, nSize); } else { ser_writedata8(os, 255); ser_writedata64(os, nSize); } return; } template uint64_t ReadCompactSize(Stream &is) { uint8_t chSize = ser_readdata8(is); uint64_t nSizeRet = 0; if (chSize < 253) { nSizeRet = chSize; } else if (chSize == 253) { nSizeRet = ser_readdata16(is); if (nSizeRet < 253) { throw std::ios_base::failure("non-canonical ReadCompactSize()"); } } else if (chSize == 254) { nSizeRet = ser_readdata32(is); if (nSizeRet < 0x10000u) { throw std::ios_base::failure("non-canonical ReadCompactSize()"); } } else { nSizeRet = ser_readdata64(is); if (nSizeRet < 0x100000000ULL) { throw std::ios_base::failure("non-canonical ReadCompactSize()"); } } if (nSizeRet > MAX_SIZE) { throw std::ios_base::failure("ReadCompactSize(): size too large"); } return nSizeRet; } /** * Variable-length integers: bytes are a MSB base-128 encoding of the number. * The high bit in each byte signifies whether another digit follows. To make * sure the encoding is one-to-one, one is subtracted from all but the last * digit. Thus, the byte sequence a[] with length len, where all but the last * byte has bit 128 set, encodes the number: * * (a[len-1] & 0x7F) + sum(i=1..len-1, 128^i*((a[len-i-1] & 0x7F)+1)) * * Properties: * * Very small (0-127: 1 byte, 128-16511: 2 bytes, 16512-2113663: 3 bytes) * * Every integer has exactly one encoding * * Encoding does not depend on size of original integer type * * No redundancy: every (infinite) byte sequence corresponds to a list * of encoded integers. * * 0: [0x00] 256: [0x81 0x00] * 1: [0x01] 16383: [0xFE 0x7F] * 127: [0x7F] 16384: [0xFF 0x00] * 128: [0x80 0x00] 16511: [0xFF 0x7F] * 255: [0x80 0x7F] 65535: [0x82 0xFE 0x7F] * 2^32: [0x8E 0xFE 0xFE 0xFF 0x00] */ -template inline unsigned int GetSizeOfVarInt(I n) { + +/** + * Mode for encoding VarInts. + * + * Currently there is no support for signed encodings. The default mode will not + * compile with signed values, and the legacy "nonnegative signed" mode will + * accept signed values, but improperly encode and decode them if they are + * negative. In the future, the DEFAULT mode could be extended to support + * negative numbers in a backwards compatible way, and additional modes could be + * added to support different varint formats (e.g. zigzag encoding). + */ +enum class VarIntMode { DEFAULT, NONNEGATIVE_SIGNED }; + +template struct CheckVarIntMode { + constexpr CheckVarIntMode() { + static_assert(Mode != VarIntMode::DEFAULT || std::is_unsigned::value, + "Unsigned type required with mode DEFAULT."); + static_assert(Mode != VarIntMode::NONNEGATIVE_SIGNED || + std::is_signed::value, + "Signed type required with mode NONNEGATIVE_SIGNED."); + } +}; + +template +inline unsigned int GetSizeOfVarInt(I n) { + CheckVarIntMode(); int nRet = 0; while (true) { nRet++; if (n <= 0x7F) { return nRet; } n = (n >> 7) - 1; } } template inline void WriteVarInt(CSizeComputer &os, I n); -template void WriteVarInt(Stream &os, I n) { +template +void WriteVarInt(Stream &os, I n) { + CheckVarIntMode(); uint8_t tmp[(sizeof(n) * 8 + 6) / 7]; int len = 0; while (true) { tmp[len] = (n & 0x7F) | (len ? 0x80 : 0x00); if (n <= 0x7F) { break; } n = (n >> 7) - 1; len++; } do { ser_writedata8(os, tmp[len]); } while (len--); } -template I ReadVarInt(Stream &is) { +template +I ReadVarInt(Stream &is) { + CheckVarIntMode(); I n = 0; while (true) { uint8_t chData = ser_readdata8(is); if (n > (std::numeric_limits::max() >> 7)) { throw std::ios_base::failure("ReadVarInt(): size too large"); } n = (n << 7) | (chData & 0x7F); if ((chData & 0x80) == 0) { return n; } if (n == std::numeric_limits::max()) { throw std::ios_base::failure("ReadVarInt(): size too large"); } n++; } } #define FLATDATA(obj) CFlatData((char *)&(obj), (char *)&(obj) + sizeof(obj)) -#define VARINT(obj) WrapVarInt(REF(obj)) +#define VARINT(obj, ...) WrapVarInt<__VA_ARGS__>(REF(obj)) #define COMPACTSIZE(obj) CCompactSize(REF(obj)) #define LIMITED_STRING(obj, n) LimitedString(REF(obj)) /** * Wrapper for serializing arrays and POD. */ class CFlatData { protected: char *pbegin; char *pend; public: CFlatData(void *pbeginIn, void *pendIn) : pbegin((char *)pbeginIn), pend((char *)pendIn) {} template explicit CFlatData(std::vector &v) { pbegin = (char *)v.data(); pend = (char *)(v.data() + v.size()); } template explicit CFlatData(prevector &v) { pbegin = (char *)v.data(); pend = (char *)(v.data() + v.size()); } char *begin() { return pbegin; } const char *begin() const { return pbegin; } char *end() { return pend; } const char *end() const { return pend; } template void Serialize(Stream &s) const { s.write(pbegin, pend - pbegin); } template void Unserialize(Stream &s) { s.read(pbegin, pend - pbegin); } }; -template class CVarInt { +template class CVarInt { protected: I &n; public: explicit CVarInt(I &nIn) : n(nIn) {} template void Serialize(Stream &s) const { - WriteVarInt(s, n); + WriteVarInt(s, n); } template void Unserialize(Stream &s) { - n = ReadVarInt(s); + n = ReadVarInt(s); } }; class CCompactSize { protected: uint64_t &n; public: explicit CCompactSize(uint64_t &nIn) : n(nIn) {} template void Serialize(Stream &s) const { WriteCompactSize(s, n); } template void Unserialize(Stream &s) { n = ReadCompactSize(s); } }; template class LimitedString { protected: std::string &string; public: explicit LimitedString(std::string &_string) : string(_string) {} template void Unserialize(Stream &s) { size_t size = ReadCompactSize(s); if (size > Limit) { throw std::ios_base::failure("String length limit exceeded"); } string.resize(size); if (size != 0) { s.read((char *)string.data(), size); } } template void Serialize(Stream &s) const { WriteCompactSize(s, string.size()); if (!string.empty()) { s.write((char *)string.data(), string.size()); } } }; -template CVarInt WrapVarInt(I &n) { - return CVarInt(n); +template +CVarInt WrapVarInt(I &n) { + return CVarInt{n}; } /** * Forward declarations */ /** * string */ template void Serialize(Stream &os, const std::basic_string &str); template void Unserialize(Stream &is, std::basic_string &str); /** * prevector * prevectors of uint8_t are a special case and are intended to be serialized as * a single opaque blob. */ template void Serialize_impl(Stream &os, const prevector &v, const uint8_t &); template void Serialize_impl(Stream &os, const prevector &v, const V &); template inline void Serialize(Stream &os, const prevector &v); template void Unserialize_impl(Stream &is, prevector &v, const uint8_t &); template void Unserialize_impl(Stream &is, prevector &v, const V &); template inline void Unserialize(Stream &is, prevector &v); /** * vector * vectors of uint8_t are a special case and are intended to be serialized as a * single opaque blob. */ template void Serialize_impl(Stream &os, const std::vector &v, const uint8_t &); template void Serialize_impl(Stream &os, const std::vector &v, const V &); template inline void Serialize(Stream &os, const std::vector &v); template void Unserialize_impl(Stream &is, std::vector &v, const uint8_t &); template void Unserialize_impl(Stream &is, std::vector &v, const V &); template inline void Unserialize(Stream &is, std::vector &v); /** * pair */ template void Serialize(Stream &os, const std::pair &item); template void Unserialize(Stream &is, std::pair &item); /** * map */ template void Serialize(Stream &os, const std::map &m); template void Unserialize(Stream &is, std::map &m); /** * set */ template void Serialize(Stream &os, const std::set &m); template void Unserialize(Stream &is, std::set &m); /** * shared_ptr */ template void Serialize(Stream &os, const std::shared_ptr &p); template void Unserialize(Stream &os, std::shared_ptr &p); /** * unique_ptr */ template void Serialize(Stream &os, const std::unique_ptr &p); template void Unserialize(Stream &os, std::unique_ptr &p); /** * If none of the specialized versions above matched, default to calling member * function. */ template inline void Serialize(Stream &os, const T &a) { a.Serialize(os); } template inline void Unserialize(Stream &is, T &&a) { a.Unserialize(is); } /** * string */ template void Serialize(Stream &os, const std::basic_string &str) { WriteCompactSize(os, str.size()); if (!str.empty()) { os.write((char *)str.data(), str.size() * sizeof(C)); } } template void Unserialize(Stream &is, std::basic_string &str) { size_t nSize = ReadCompactSize(is); str.resize(nSize); if (nSize != 0) { is.read((char *)str.data(), nSize * sizeof(C)); } } /** * prevector */ template void Serialize_impl(Stream &os, const prevector &v, const uint8_t &) { WriteCompactSize(os, v.size()); if (!v.empty()) { os.write((char *)v.data(), v.size() * sizeof(T)); } } template void Serialize_impl(Stream &os, const prevector &v, const V &) { WriteCompactSize(os, v.size()); for (const T &i : v) { ::Serialize(os, i); } } template inline void Serialize(Stream &os, const prevector &v) { Serialize_impl(os, v, T()); } template void Unserialize_impl(Stream &is, prevector &v, const uint8_t &) { // Limit size per read so bogus size value won't cause out of memory v.clear(); size_t nSize = ReadCompactSize(is); size_t i = 0; while (i < nSize) { size_t blk = std::min(nSize - i, size_t(1 + 4999999 / sizeof(T))); v.resize(i + blk); is.read((char *)&v[i], blk * sizeof(T)); i += blk; } } template void Unserialize_impl(Stream &is, prevector &v, const V &) { v.clear(); size_t nSize = ReadCompactSize(is); size_t i = 0; size_t nMid = 0; while (nMid < nSize) { nMid += 5000000 / sizeof(T); if (nMid > nSize) { nMid = nSize; } v.resize(nMid); for (; i < nMid; i++) { Unserialize(is, v[i]); } } } template inline void Unserialize(Stream &is, prevector &v) { Unserialize_impl(is, v, T()); } /** * vector */ template void Serialize_impl(Stream &os, const std::vector &v, const uint8_t &) { WriteCompactSize(os, v.size()); if (!v.empty()) { os.write((char *)v.data(), v.size() * sizeof(T)); } } template void Serialize_impl(Stream &os, const std::vector &v, const V &) { WriteCompactSize(os, v.size()); for (const T &i : v) { ::Serialize(os, i); } } template inline void Serialize(Stream &os, const std::vector &v) { Serialize_impl(os, v, T()); } template void Unserialize_impl(Stream &is, std::vector &v, const uint8_t &) { // Limit size per read so bogus size value won't cause out of memory v.clear(); size_t nSize = ReadCompactSize(is); size_t i = 0; while (i < nSize) { size_t blk = std::min(nSize - i, size_t(1 + 4999999 / sizeof(T))); v.resize(i + blk); is.read((char *)&v[i], blk * sizeof(T)); i += blk; } } template void Unserialize_impl(Stream &is, std::vector &v, const V &) { v.clear(); size_t nSize = ReadCompactSize(is); size_t i = 0; size_t nMid = 0; while (nMid < nSize) { nMid += 5000000 / sizeof(T); if (nMid > nSize) { nMid = nSize; } v.resize(nMid); for (; i < nMid; i++) { Unserialize(is, v[i]); } } } template inline void Unserialize(Stream &is, std::vector &v) { Unserialize_impl(is, v, T()); } /** * pair */ template void Serialize(Stream &os, const std::pair &item) { Serialize(os, item.first); Serialize(os, item.second); } template void Unserialize(Stream &is, std::pair &item) { Unserialize(is, item.first); Unserialize(is, item.second); } /** * map */ template void Serialize(Stream &os, const std::map &m) { WriteCompactSize(os, m.size()); for (const auto &entry : m) { Serialize(os, entry); } } template void Unserialize(Stream &is, std::map &m) { m.clear(); size_t nSize = ReadCompactSize(is); typename std::map::iterator mi = m.begin(); for (size_t i = 0; i < nSize; i++) { std::pair item; Unserialize(is, item); mi = m.insert(mi, item); } } /** * set */ template void Serialize(Stream &os, const std::set &m) { WriteCompactSize(os, m.size()); for (const K &i : m) { Serialize(os, i); } } template void Unserialize(Stream &is, std::set &m) { m.clear(); size_t nSize = ReadCompactSize(is); typename std::set::iterator it = m.begin(); for (size_t i = 0; i < nSize; i++) { K key; Unserialize(is, key); it = m.insert(it, key); } } /** * unique_ptr */ template void Serialize(Stream &os, const std::unique_ptr &p) { Serialize(os, *p); } template void Unserialize(Stream &is, std::unique_ptr &p) { p.reset(new T(deserialize, is)); } /** * shared_ptr */ template void Serialize(Stream &os, const std::shared_ptr &p) { Serialize(os, *p); } template void Unserialize(Stream &is, std::shared_ptr &p) { p = std::make_shared(deserialize, is); } /** * Support for ADD_SERIALIZE_METHODS and READWRITE macro */ struct CSerActionSerialize { constexpr bool ForRead() const { return false; } }; struct CSerActionUnserialize { constexpr bool ForRead() const { return true; } }; /** * ::GetSerializeSize implementations * * Computing the serialized size of objects is done through a special stream * object of type CSizeComputer, which only records the number of bytes written * to it. * * If your Serialize or SerializationOp method has non-trivial overhead for * serialization, it may be worthwhile to implement a specialized version for * CSizeComputer, which uses the s.seek() method to record bytes that would * be written instead. */ class CSizeComputer { protected: size_t nSize; const int nType; const int nVersion; public: CSizeComputer(int nTypeIn, int nVersionIn) : nSize(0), nType(nTypeIn), nVersion(nVersionIn) {} void write(const char *psz, size_t _nSize) { this->nSize += _nSize; } /** Pretend _nSize bytes are written, without specifying them. */ void seek(size_t _nSize) { this->nSize += _nSize; } template CSizeComputer &operator<<(const T &obj) { ::Serialize(*this, obj); return (*this); } size_t size() const { return nSize; } int GetVersion() const { return nVersion; } int GetType() const { return nType; } }; template void SerializeMany(Stream &s) {} template void SerializeMany(Stream &s, const Arg &arg, const Args &... args) { ::Serialize(s, arg); ::SerializeMany(s, args...); } template inline void UnserializeMany(Stream &s) {} template inline void UnserializeMany(Stream &s, Arg &&arg, Args &&... args) { ::Unserialize(s, arg); ::UnserializeMany(s, args...); } template inline void SerReadWriteMany(Stream &s, CSerActionSerialize ser_action, const Args &... args) { ::SerializeMany(s, args...); } template inline void SerReadWriteMany(Stream &s, CSerActionUnserialize ser_action, Args &&... args) { ::UnserializeMany(s, args...); } template inline void WriteVarInt(CSizeComputer &s, I n) { s.seek(GetSizeOfVarInt(n)); } inline void WriteCompactSize(CSizeComputer &s, uint64_t nSize) { s.seek(GetSizeOfCompactSize(nSize)); } template size_t GetSerializeSize(const T &t, int nType, int nVersion) { return (CSizeComputer(nType, nVersion) << t).size(); } template size_t GetSerializeSize(const S &s, const T &t) { return (CSizeComputer(s.GetType(), s.GetVersion()) << t).size(); } #endif // BITCOIN_SERIALIZE_H diff --git a/src/test/hash_tests.cpp b/src/test/hash_tests.cpp index 5df3ab9d1..79dd301fd 100644 --- a/src/test/hash_tests.cpp +++ b/src/test/hash_tests.cpp @@ -1,207 +1,207 @@ // Copyright (c) 2013-2018 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include BOOST_FIXTURE_TEST_SUITE(hash_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(murmurhash3) { #define T(expected, seed, data) \ BOOST_CHECK_EQUAL(MurmurHash3(seed, ParseHex(data)), expected) // Test MurmurHash3 with various inputs. Of course this is retested in the // bloom filter tests - they would fail if MurmurHash3() had any problems - // but is useful for those trying to implement Bitcoin libraries as a // source of test data for their MurmurHash3() primitive during // development. // // The magic number 0xFBA4C795 comes from CBloomFilter::Hash() T(0x00000000U, 0x00000000, ""); T(0x6a396f08U, 0xFBA4C795, ""); T(0x81f16f39U, 0xffffffff, ""); T(0x514e28b7U, 0x00000000, "00"); T(0xea3f0b17U, 0xFBA4C795, "00"); T(0xfd6cf10dU, 0x00000000, "ff"); T(0x16c6b7abU, 0x00000000, "0011"); T(0x8eb51c3dU, 0x00000000, "001122"); T(0xb4471bf8U, 0x00000000, "00112233"); T(0xe2301fa8U, 0x00000000, "0011223344"); T(0xfc2e4a15U, 0x00000000, "001122334455"); T(0xb074502cU, 0x00000000, "00112233445566"); T(0x8034d2a0U, 0x00000000, "0011223344556677"); T(0xb4698defU, 0x00000000, "001122334455667788"); #undef T } /** * SipHash-2-4 output with * k = 00 01 02 ... * and * in = (empty string) * in = 00 (1 byte) * in = 00 01 (2 bytes) * in = 00 01 02 (3 bytes) * ... * in = 00 01 02 ... 3e (63 bytes) * * from: https://131002.net/siphash/siphash24.c */ uint64_t siphash_4_2_testvec[] = { 0x726fdb47dd0e0e31, 0x74f839c593dc67fd, 0x0d6c8009d9a94f5a, 0x85676696d7fb7e2d, 0xcf2794e0277187b7, 0x18765564cd99a68d, 0xcbc9466e58fee3ce, 0xab0200f58b01d137, 0x93f5f5799a932462, 0x9e0082df0ba9e4b0, 0x7a5dbbc594ddb9f3, 0xf4b32f46226bada7, 0x751e8fbc860ee5fb, 0x14ea5627c0843d90, 0xf723ca908e7af2ee, 0xa129ca6149be45e5, 0x3f2acc7f57c29bdb, 0x699ae9f52cbe4794, 0x4bc1b3f0968dd39c, 0xbb6dc91da77961bd, 0xbed65cf21aa2ee98, 0xd0f2cbb02e3b67c7, 0x93536795e3a33e88, 0xa80c038ccd5ccec8, 0xb8ad50c6f649af94, 0xbce192de8a85b8ea, 0x17d835b85bbb15f3, 0x2f2e6163076bcfad, 0xde4daaaca71dc9a5, 0xa6a2506687956571, 0xad87a3535c49ef28, 0x32d892fad841c342, 0x7127512f72f27cce, 0xa7f32346f95978e3, 0x12e0b01abb051238, 0x15e034d40fa197ae, 0x314dffbe0815a3b4, 0x027990f029623981, 0xcadcd4e59ef40c4d, 0x9abfd8766a33735c, 0x0e3ea96b5304a7d0, 0xad0c42d6fc585992, 0x187306c89bc215a9, 0xd4a60abcf3792b95, 0xf935451de4f21df2, 0xa9538f0419755787, 0xdb9acddff56ca510, 0xd06c98cd5c0975eb, 0xe612a3cb9ecba951, 0xc766e62cfcadaf96, 0xee64435a9752fe72, 0xa192d576b245165a, 0x0a8787bf8ecb74b2, 0x81b3e73d20b49b6f, 0x7fa8220ba3b2ecea, 0x245731c13ca42499, 0xb78dbfaf3a8d83bd, 0xea1ad565322a1a0b, 0x60e61c23a3795013, 0x6606d7e446282b93, 0x6ca4ecb15c5f91e1, 0x9f626da15c9625f3, 0xe51b38608ef25f57, 0x958a324ceb064572}; BOOST_AUTO_TEST_CASE(siphash) { CSipHasher hasher(0x0706050403020100ULL, 0x0F0E0D0C0B0A0908ULL); BOOST_CHECK_EQUAL(hasher.Finalize(), 0x726fdb47dd0e0e31ull); static const uint8_t t0[1] = {0}; hasher.Write(t0, 1); BOOST_CHECK_EQUAL(hasher.Finalize(), 0x74f839c593dc67fdull); static const uint8_t t1[7] = {1, 2, 3, 4, 5, 6, 7}; hasher.Write(t1, 7); BOOST_CHECK_EQUAL(hasher.Finalize(), 0x93f5f5799a932462ull); hasher.Write(0x0F0E0D0C0B0A0908ULL); BOOST_CHECK_EQUAL(hasher.Finalize(), 0x3f2acc7f57c29bdbull); static const uint8_t t2[2] = {16, 17}; hasher.Write(t2, 2); BOOST_CHECK_EQUAL(hasher.Finalize(), 0x4bc1b3f0968dd39cull); static const uint8_t t3[9] = {18, 19, 20, 21, 22, 23, 24, 25, 26}; hasher.Write(t3, 9); BOOST_CHECK_EQUAL(hasher.Finalize(), 0x2f2e6163076bcfadull); static const uint8_t t4[5] = {27, 28, 29, 30, 31}; hasher.Write(t4, 5); BOOST_CHECK_EQUAL(hasher.Finalize(), 0x7127512f72f27cceull); hasher.Write(0x2726252423222120ULL); BOOST_CHECK_EQUAL(hasher.Finalize(), 0x0e3ea96b5304a7d0ull); hasher.Write(0x2F2E2D2C2B2A2928ULL); BOOST_CHECK_EQUAL(hasher.Finalize(), 0xe612a3cb9ecba951ull); BOOST_CHECK_EQUAL( SipHashUint256(0x0706050403020100ULL, 0x0F0E0D0C0B0A0908ULL, uint256S("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09" "080706050403020100")), 0x7127512f72f27cceull); // Check test vectors from spec, one byte at a time CSipHasher hasher2(0x0706050403020100ULL, 0x0F0E0D0C0B0A0908ULL); for (uint8_t x = 0; x < ARRAYLEN(siphash_4_2_testvec); ++x) { BOOST_CHECK_EQUAL(hasher2.Finalize(), siphash_4_2_testvec[x]); hasher2.Write(&x, 1); } // Check test vectors from spec, eight bytes at a time CSipHasher hasher3(0x0706050403020100ULL, 0x0F0E0D0C0B0A0908ULL); for (uint8_t x = 0; x < ARRAYLEN(siphash_4_2_testvec); x += 8) { BOOST_CHECK_EQUAL(hasher3.Finalize(), siphash_4_2_testvec[x]); hasher3.Write(uint64_t(x) | (uint64_t(x + 1) << 8) | (uint64_t(x + 2) << 16) | (uint64_t(x + 3) << 24) | (uint64_t(x + 4) << 32) | (uint64_t(x + 5) << 40) | (uint64_t(x + 6) << 48) | (uint64_t(x + 7) << 56)); } CHashWriter ss(SER_DISK, CLIENT_VERSION); CMutableTransaction tx; // Note these tests were originally written with tx.nVersion=1 // and the test would be affected by default tx version bumps if not fixed. tx.nVersion = 1; ss << tx; BOOST_CHECK_EQUAL(SipHashUint256(1, 2, ss.GetHash()), 0x79751e980c2a0a35ULL); // Check consistency between CSipHasher and SipHashUint256[Extra]. FastRandomContext ctx; for (int i = 0; i < 16; ++i) { uint64_t k1 = ctx.rand64(); uint64_t k2 = ctx.rand64(); uint256 x = InsecureRand256(); uint32_t n = ctx.rand32(); uint8_t nb[4]; WriteLE32(nb, n); CSipHasher sip256(k1, k2); sip256.Write(x.begin(), 32); CSipHasher sip288 = sip256; sip288.Write(nb, 4); BOOST_CHECK_EQUAL(SipHashUint256(k1, k2, x), sip256.Finalize()); BOOST_CHECK_EQUAL(SipHashUint256Extra(k1, k2, x, n), sip288.Finalize()); } } namespace { class CDummyObject { uint32_t value; public: CDummyObject() : value(0) {} uint32_t GetValue() { return value; } template void Serialize(Stream &s) const { - int nVersionDummy = 0; + unsigned int nVersionDummy = 0; ::Serialize(s, VARINT(nVersionDummy)); ::Serialize(s, VARINT(value)); } template void Unserialize(Stream &s) { - int nVersionDummy; + unsigned int nVersionDummy; ::Unserialize(s, VARINT(nVersionDummy)); ::Unserialize(s, VARINT(value)); } }; } // namespace BOOST_AUTO_TEST_CASE(hashverifier_tests) { std::vector data = ParseHex("4223"); CDataStream ss(data, SER_DISK, CLIENT_VERSION); CHashVerifier verifier(&ss); CDummyObject dummy; verifier >> dummy; uint256 checksum = verifier.GetHash(); BOOST_CHECK_EQUAL(dummy.GetValue(), 0x23); CHashWriter h0(SER_DISK, CLIENT_VERSION); h0 << CDataStream(data, SER_DISK, CLIENT_VERSION); BOOST_CHECK(h0.GetHash() == checksum); CHashWriter h1(SER_DISK, CLIENT_VERSION); h1 << dummy; BOOST_CHECK(h1.GetHash() != checksum); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index 089f4e969..b231d12d7 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -1,437 +1,438 @@ // Copyright (c) 2012-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include BOOST_FIXTURE_TEST_SUITE(serialize_tests, BasicTestingSetup) class CSerializeMethodsTestSingle { protected: int intval; bool boolval; std::string stringval; const char *charstrval; CTransactionRef txval; public: CSerializeMethodsTestSingle() = default; CSerializeMethodsTestSingle(int intvalin, bool boolvalin, std::string stringvalin, const char *charstrvalin, const CTransactionRef &txvalin) : intval(intvalin), boolval(boolvalin), stringval(std::move(stringvalin)), charstrval(charstrvalin), txval(txvalin) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(intval); READWRITE(boolval); READWRITE(stringval); READWRITE(FLATDATA(charstrval)); READWRITE(txval); } bool operator==(const CSerializeMethodsTestSingle &rhs) { return intval == rhs.intval && boolval == rhs.boolval && stringval == rhs.stringval && strcmp(charstrval, rhs.charstrval) == 0 && *txval == *rhs.txval; } }; class CSerializeMethodsTestMany : public CSerializeMethodsTestSingle { public: using CSerializeMethodsTestSingle::CSerializeMethodsTestSingle; ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(intval, boolval, stringval, FLATDATA(charstrval), txval); } }; BOOST_AUTO_TEST_CASE(sizes) { BOOST_CHECK_EQUAL(sizeof(char), GetSerializeSize(char(0), 0, 0)); BOOST_CHECK_EQUAL(sizeof(int8_t), GetSerializeSize(int8_t(0), 0, 0)); BOOST_CHECK_EQUAL(sizeof(uint8_t), GetSerializeSize(uint8_t(0), 0, 0)); BOOST_CHECK_EQUAL(sizeof(int16_t), GetSerializeSize(int16_t(0), 0, 0)); BOOST_CHECK_EQUAL(sizeof(uint16_t), GetSerializeSize(uint16_t(0), 0, 0)); BOOST_CHECK_EQUAL(sizeof(int32_t), GetSerializeSize(int32_t(0), 0, 0)); BOOST_CHECK_EQUAL(sizeof(uint32_t), GetSerializeSize(uint32_t(0), 0, 0)); BOOST_CHECK_EQUAL(sizeof(int64_t), GetSerializeSize(int64_t(0), 0, 0)); BOOST_CHECK_EQUAL(sizeof(uint64_t), GetSerializeSize(uint64_t(0), 0, 0)); BOOST_CHECK_EQUAL(sizeof(float), GetSerializeSize(float(0), 0, 0)); BOOST_CHECK_EQUAL(sizeof(double), GetSerializeSize(double(0), 0, 0)); // Bool is serialized as char BOOST_CHECK_EQUAL(sizeof(char), GetSerializeSize(bool(0), 0, 0)); // Sanity-check GetSerializeSize and c++ type matching BOOST_CHECK_EQUAL(GetSerializeSize(char(0), 0, 0), 1U); BOOST_CHECK_EQUAL(GetSerializeSize(int8_t(0), 0, 0), 1U); BOOST_CHECK_EQUAL(GetSerializeSize(uint8_t(0), 0, 0), 1U); BOOST_CHECK_EQUAL(GetSerializeSize(int16_t(0), 0, 0), 2U); BOOST_CHECK_EQUAL(GetSerializeSize(uint16_t(0), 0, 0), 2U); BOOST_CHECK_EQUAL(GetSerializeSize(int32_t(0), 0, 0), 4U); BOOST_CHECK_EQUAL(GetSerializeSize(uint32_t(0), 0, 0), 4U); BOOST_CHECK_EQUAL(GetSerializeSize(int64_t(0), 0, 0), 8U); BOOST_CHECK_EQUAL(GetSerializeSize(uint64_t(0), 0, 0), 8U); BOOST_CHECK_EQUAL(GetSerializeSize(float(0), 0, 0), 4U); BOOST_CHECK_EQUAL(GetSerializeSize(double(0), 0, 0), 8U); BOOST_CHECK_EQUAL(GetSerializeSize(bool(0), 0, 0), 1U); } BOOST_AUTO_TEST_CASE(floats_conversion) { // Choose values that map unambiguously to binary floating point to avoid // rounding issues at the compiler side. BOOST_CHECK_EQUAL(ser_uint32_to_float(0x00000000), 0.0F); BOOST_CHECK_EQUAL(ser_uint32_to_float(0x3f000000), 0.5F); BOOST_CHECK_EQUAL(ser_uint32_to_float(0x3f800000), 1.0F); BOOST_CHECK_EQUAL(ser_uint32_to_float(0x40000000), 2.0F); BOOST_CHECK_EQUAL(ser_uint32_to_float(0x40800000), 4.0F); BOOST_CHECK_EQUAL(ser_uint32_to_float(0x44444444), 785.066650390625F); BOOST_CHECK_EQUAL(ser_float_to_uint32(0.0F), 0x00000000U); BOOST_CHECK_EQUAL(ser_float_to_uint32(0.5F), 0x3f000000U); BOOST_CHECK_EQUAL(ser_float_to_uint32(1.0F), 0x3f800000U); BOOST_CHECK_EQUAL(ser_float_to_uint32(2.0F), 0x40000000U); BOOST_CHECK_EQUAL(ser_float_to_uint32(4.0F), 0x40800000U); BOOST_CHECK_EQUAL(ser_float_to_uint32(785.066650390625F), 0x44444444U); } BOOST_AUTO_TEST_CASE(doubles_conversion) { // Choose values that map unambiguously to binary floating point to avoid // rounding issues at the compiler side. BOOST_CHECK_EQUAL(ser_uint64_to_double(0x0000000000000000ULL), 0.0); BOOST_CHECK_EQUAL(ser_uint64_to_double(0x3fe0000000000000ULL), 0.5); BOOST_CHECK_EQUAL(ser_uint64_to_double(0x3ff0000000000000ULL), 1.0); BOOST_CHECK_EQUAL(ser_uint64_to_double(0x4000000000000000ULL), 2.0); BOOST_CHECK_EQUAL(ser_uint64_to_double(0x4010000000000000ULL), 4.0); BOOST_CHECK_EQUAL(ser_uint64_to_double(0x4088888880000000ULL), 785.066650390625); BOOST_CHECK_EQUAL(ser_double_to_uint64(0.0), 0x0000000000000000ULL); BOOST_CHECK_EQUAL(ser_double_to_uint64(0.5), 0x3fe0000000000000ULL); BOOST_CHECK_EQUAL(ser_double_to_uint64(1.0), 0x3ff0000000000000ULL); BOOST_CHECK_EQUAL(ser_double_to_uint64(2.0), 0x4000000000000000ULL); BOOST_CHECK_EQUAL(ser_double_to_uint64(4.0), 0x4010000000000000ULL); BOOST_CHECK_EQUAL(ser_double_to_uint64(785.066650390625), 0x4088888880000000ULL); } /* Python code to generate the below hashes: def reversed_hex(x): return binascii.hexlify(''.join(reversed(x))) def dsha256(x): return hashlib.sha256(hashlib.sha256(x).digest()).digest() reversed_hex(dsha256(''.join(struct.pack('> j; BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i); } } BOOST_AUTO_TEST_CASE(doubles) { CDataStream ss(SER_DISK, 0); // encode for (int i = 0; i < 1000; i++) { ss << double(i); } BOOST_CHECK(Hash(ss.begin(), ss.end()) == uint256S("43d0c82591953c4eafe114590d392676a01585d25b25d433557f0" "d7878b23f96")); // decode for (int i = 0; i < 1000; i++) { double j; ss >> j; BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i); } } BOOST_AUTO_TEST_CASE(varints) { // encode CDataStream ss(SER_DISK, 0); CDataStream::size_type size = 0; for (int i = 0; i < 100000; i++) { - ss << VARINT(i); - size += ::GetSerializeSize(VARINT(i), 0, 0); + ss << VARINT(i, VarIntMode::NONNEGATIVE_SIGNED); + size += + ::GetSerializeSize(VARINT(i, VarIntMode::NONNEGATIVE_SIGNED), 0, 0); BOOST_CHECK(size == ss.size()); } for (uint64_t i = 0; i < 100000000000ULL; i += 999999937) { ss << VARINT(i); size += ::GetSerializeSize(VARINT(i), 0, 0); BOOST_CHECK(size == ss.size()); } // decode for (int i = 0; i < 100000; i++) { int j = -1; - ss >> VARINT(j); + ss >> VARINT(j, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i); } for (uint64_t i = 0; i < 100000000000ULL; i += 999999937) { uint64_t j = -1; ss >> VARINT(j); BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i); } } BOOST_AUTO_TEST_CASE(varints_bitpatterns) { CDataStream ss(SER_DISK, 0); - ss << VARINT(0); + ss << VARINT(0, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "00"); ss.clear(); - ss << VARINT(0x7f); + ss << VARINT(0x7f, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear(); - ss << VARINT((int8_t)0x7f); + ss << VARINT((int8_t)0x7f, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear(); - ss << VARINT(0x80); + ss << VARINT(0x80, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "8000"); ss.clear(); ss << VARINT((uint8_t)0x80); BOOST_CHECK_EQUAL(HexStr(ss), "8000"); ss.clear(); - ss << VARINT(0x1234); + ss << VARINT(0x1234, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "a334"); ss.clear(); - ss << VARINT((int16_t)0x1234); + ss << VARINT((int16_t)0x1234, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "a334"); ss.clear(); - ss << VARINT(0xffff); + ss << VARINT(0xffff, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "82fe7f"); ss.clear(); ss << VARINT((uint16_t)0xffff); BOOST_CHECK_EQUAL(HexStr(ss), "82fe7f"); ss.clear(); - ss << VARINT(0x123456); + ss << VARINT(0x123456, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "c7e756"); ss.clear(); - ss << VARINT((int32_t)0x123456); + ss << VARINT((int32_t)0x123456, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "c7e756"); ss.clear(); ss << VARINT(0x80123456U); BOOST_CHECK_EQUAL(HexStr(ss), "86ffc7e756"); ss.clear(); ss << VARINT((uint32_t)0x80123456U); BOOST_CHECK_EQUAL(HexStr(ss), "86ffc7e756"); ss.clear(); ss << VARINT(0xffffffff); BOOST_CHECK_EQUAL(HexStr(ss), "8efefefe7f"); ss.clear(); - ss << VARINT(0x7fffffffffffffffLL); + ss << VARINT(0x7fffffffffffffffLL, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "fefefefefefefefe7f"); ss.clear(); ss << VARINT(0xffffffffffffffffULL); BOOST_CHECK_EQUAL(HexStr(ss), "80fefefefefefefefe7f"); ss.clear(); } static bool isTooLargeException(const std::ios_base::failure &ex) { std::ios_base::failure expectedException( "ReadCompactSize(): size too large"); // The string returned by what() can be different for different platforms. // Instead of directly comparing the ex.what() with an expected string, // create an instance of exception to see if ex.what() matches the expected // explanatory string returned by the exception instance. return strcmp(expectedException.what(), ex.what()) == 0; } BOOST_AUTO_TEST_CASE(compactsize) { CDataStream ss(SER_DISK, 0); std::vector::size_type i, j; for (i = 1; i <= MAX_SIZE; i *= 2) { WriteCompactSize(ss, i - 1); WriteCompactSize(ss, i); } for (i = 1; i <= MAX_SIZE; i *= 2) { j = ReadCompactSize(ss); BOOST_CHECK_MESSAGE((i - 1) == j, "decoded:" << j << " expected:" << (i - 1)); j = ReadCompactSize(ss); BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i); } WriteCompactSize(ss, MAX_SIZE); BOOST_CHECK_EQUAL(ReadCompactSize(ss), MAX_SIZE); WriteCompactSize(ss, MAX_SIZE + 1); BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isTooLargeException); WriteCompactSize(ss, std::numeric_limits::max()); BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isTooLargeException); WriteCompactSize(ss, std::numeric_limits::max()); BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isTooLargeException); } static bool isCanonicalException(const std::ios_base::failure &ex) { std::ios_base::failure expectedException("non-canonical ReadCompactSize()"); // The string returned by what() can be different for different platforms. // Instead of directly comparing the ex.what() with an expected string, // create an instance of exception to see if ex.what() matches the expected // explanatory string returned by the exception instance. return strcmp(expectedException.what(), ex.what()) == 0; } BOOST_AUTO_TEST_CASE(noncanonical) { // Write some non-canonical CompactSize encodings, and make sure an // exception is thrown when read back. CDataStream ss(SER_DISK, 0); std::vector::size_type n; // zero encoded with three bytes: ss.write("\xfd\x00\x00", 3); BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException); // 0xfc encoded with three bytes: ss.write("\xfd\xfc\x00", 3); BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException); // 0xfd encoded with three bytes is OK: ss.write("\xfd\xfd\x00", 3); n = ReadCompactSize(ss); BOOST_CHECK(n == 0xfd); // zero encoded with five bytes: ss.write("\xfe\x00\x00\x00\x00", 5); BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException); // 0xffff encoded with five bytes: ss.write("\xfe\xff\xff\x00\x00", 5); BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException); // zero encoded with nine bytes: ss.write("\xff\x00\x00\x00\x00\x00\x00\x00\x00", 9); BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException); // 0x01ffffff encoded with nine bytes: ss.write("\xff\xff\xff\xff\x01\x00\x00\x00\x00", 9); BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException); } BOOST_AUTO_TEST_CASE(insert_delete) { // Test inserting/deleting bytes. CDataStream ss(SER_DISK, 0); BOOST_CHECK_EQUAL(ss.size(), 0U); ss.write("\x00\x01\x02\xff", 4); BOOST_CHECK_EQUAL(ss.size(), 4U); char c = (char)11; // Inserting at beginning/end/middle: ss.insert(ss.begin(), c); BOOST_CHECK_EQUAL(ss.size(), 5U); BOOST_CHECK_EQUAL(ss[0], c); BOOST_CHECK_EQUAL(ss[1], 0); ss.insert(ss.end(), c); BOOST_CHECK_EQUAL(ss.size(), 6U); BOOST_CHECK_EQUAL(ss[4], (char)0xff); BOOST_CHECK_EQUAL(ss[5], c); ss.insert(ss.begin() + 2, c); BOOST_CHECK_EQUAL(ss.size(), 7U); BOOST_CHECK_EQUAL(ss[2], c); // Delete at beginning/end/middle ss.erase(ss.begin()); BOOST_CHECK_EQUAL(ss.size(), 6U); BOOST_CHECK_EQUAL(ss[0], 0); ss.erase(ss.begin() + ss.size() - 1); BOOST_CHECK_EQUAL(ss.size(), 5U); BOOST_CHECK_EQUAL(ss[4], (char)0xff); ss.erase(ss.begin() + 1); BOOST_CHECK_EQUAL(ss.size(), 4U); BOOST_CHECK_EQUAL(ss[0], 0); BOOST_CHECK_EQUAL(ss[1], 1); BOOST_CHECK_EQUAL(ss[2], 2); BOOST_CHECK_EQUAL(ss[3], (char)0xff); // Make sure GetAndClear does the right thing: CSerializeData d; ss.GetAndClear(d); BOOST_CHECK_EQUAL(ss.size(), 0U); } BOOST_AUTO_TEST_CASE(class_methods) { int intval(100); bool boolval(true); std::string stringval("testing"); const char *charstrval("testing charstr"); CMutableTransaction txval; CTransactionRef tx_ref{MakeTransactionRef(txval)}; CSerializeMethodsTestSingle methodtest1(intval, boolval, stringval, charstrval, tx_ref); CSerializeMethodsTestMany methodtest2(intval, boolval, stringval, charstrval, tx_ref); CSerializeMethodsTestSingle methodtest3; CSerializeMethodsTestMany methodtest4; CDataStream ss(SER_DISK, PROTOCOL_VERSION); BOOST_CHECK(methodtest1 == methodtest2); ss << methodtest1; ss >> methodtest4; ss << methodtest2; ss >> methodtest3; BOOST_CHECK(methodtest1 == methodtest2); BOOST_CHECK(methodtest2 == methodtest3); BOOST_CHECK(methodtest3 == methodtest4); CDataStream ss2(SER_DISK, PROTOCOL_VERSION, intval, boolval, stringval, FLATDATA(charstrval), txval); ss2 >> methodtest3; BOOST_CHECK(methodtest3 == methodtest4); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/txdb.cpp b/src/txdb.cpp index 9acbdb5da..d44c9dabd 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -1,444 +1,444 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include // boost::this_thread::interruption_point() (mingw) #include static const char DB_COIN = 'C'; static const char DB_COINS = 'c'; static const char DB_BLOCK_FILES = 'f'; static const char DB_BLOCK_INDEX = 'b'; static const char DB_BEST_BLOCK = 'B'; static const char DB_HEAD_BLOCKS = 'H'; static const char DB_FLAG = 'F'; static const char DB_REINDEX_FLAG = 'R'; static const char DB_LAST_BLOCK = 'l'; namespace { struct CoinEntry { COutPoint *outpoint; char key; explicit CoinEntry(const COutPoint *ptr) : outpoint(const_cast(ptr)), key(DB_COIN) {} template void Serialize(Stream &s) const { s << key; s << outpoint->GetTxId(); s << VARINT(outpoint->GetN()); } template void Unserialize(Stream &s) { s >> key; TxId id; s >> id; uint32_t n = 0; s >> VARINT(n); *outpoint = COutPoint(id, n); } }; } // namespace CCoinsViewDB::CCoinsViewDB(size_t nCacheSize, bool fMemory, bool fWipe) : db(GetDataDir() / "chainstate", nCacheSize, fMemory, fWipe, true) {} bool CCoinsViewDB::GetCoin(const COutPoint &outpoint, Coin &coin) const { return db.Read(CoinEntry(&outpoint), coin); } bool CCoinsViewDB::HaveCoin(const COutPoint &outpoint) const { return db.Exists(CoinEntry(&outpoint)); } uint256 CCoinsViewDB::GetBestBlock() const { uint256 hashBestChain; if (!db.Read(DB_BEST_BLOCK, hashBestChain)) return uint256(); return hashBestChain; } std::vector CCoinsViewDB::GetHeadBlocks() const { std::vector vhashHeadBlocks; if (!db.Read(DB_HEAD_BLOCKS, vhashHeadBlocks)) { return std::vector(); } return vhashHeadBlocks; } bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { CDBBatch batch(db); size_t count = 0; size_t changed = 0; size_t batch_size = (size_t)gArgs.GetArg("-dbbatchsize", nDefaultDbBatchSize); int crash_simulate = gArgs.GetArg("-dbcrashratio", 0); assert(!hashBlock.IsNull()); uint256 old_tip = GetBestBlock(); if (old_tip.IsNull()) { // We may be in the middle of replaying. std::vector old_heads = GetHeadBlocks(); if (old_heads.size() == 2) { assert(old_heads[0] == hashBlock); old_tip = old_heads[1]; } } // In the first batch, mark the database as being in the middle of a // transition from old_tip to hashBlock. // A vector is used for future extensibility, as we may want to support // interrupting after partial writes from multiple independent reorgs. batch.Erase(DB_BEST_BLOCK); batch.Write(DB_HEAD_BLOCKS, std::vector{hashBlock, old_tip}); for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) { if (it->second.flags & CCoinsCacheEntry::DIRTY) { CoinEntry entry(&it->first); if (it->second.coin.IsSpent()) { batch.Erase(entry); } else { batch.Write(entry, it->second.coin); } changed++; } count++; CCoinsMap::iterator itOld = it++; mapCoins.erase(itOld); if (batch.SizeEstimate() > batch_size) { LogPrint(BCLog::COINDB, "Writing partial batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0)); db.WriteBatch(batch); batch.Clear(); if (crash_simulate) { static FastRandomContext rng; if (rng.randrange(crash_simulate) == 0) { LogPrintf("Simulating a crash. Goodbye.\n"); _Exit(0); } } } } // In the last batch, mark the database as consistent with hashBlock again. batch.Erase(DB_HEAD_BLOCKS); batch.Write(DB_BEST_BLOCK, hashBlock); LogPrint(BCLog::COINDB, "Writing final batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0)); bool ret = db.WriteBatch(batch); LogPrint(BCLog::COINDB, "Committed %u changed transaction outputs (out of " "%u) to coin database...\n", (unsigned int)changed, (unsigned int)count); return ret; } size_t CCoinsViewDB::EstimateSize() const { return db.EstimateSize(DB_COIN, char(DB_COIN + 1)); } CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CDBWrapper(gArgs.IsArgSet("-blocksdir") ? GetDataDir() / "blocks" / "index" : GetBlocksDir() / "index", nCacheSize, fMemory, fWipe) {} bool CBlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo &info) { return Read(std::make_pair(DB_BLOCK_FILES, nFile), info); } bool CBlockTreeDB::WriteReindexing(bool fReindexing) { if (fReindexing) return Write(DB_REINDEX_FLAG, '1'); else return Erase(DB_REINDEX_FLAG); } bool CBlockTreeDB::ReadReindexing(bool &fReindexing) { fReindexing = Exists(DB_REINDEX_FLAG); return true; } bool CBlockTreeDB::ReadLastBlockFile(int &nFile) { return Read(DB_LAST_BLOCK, nFile); } CCoinsViewCursor *CCoinsViewDB::Cursor() const { CCoinsViewDBCursor *i = new CCoinsViewDBCursor( const_cast(db).NewIterator(), GetBestBlock()); /** * It seems that there are no "const iterators" for LevelDB. Since we only * need read operations on it, use a const-cast to get around that * restriction. */ i->pcursor->Seek(DB_COIN); // Cache key of first record if (i->pcursor->Valid()) { CoinEntry entry(&i->keyTmp.second); i->pcursor->GetKey(entry); i->keyTmp.first = entry.key; } else { // Make sure Valid() and GetKey() return false i->keyTmp.first = 0; } return i; } bool CCoinsViewDBCursor::GetKey(COutPoint &key) const { // Return cached key if (keyTmp.first == DB_COIN) { key = keyTmp.second; return true; } return false; } bool CCoinsViewDBCursor::GetValue(Coin &coin) const { return pcursor->GetValue(coin); } unsigned int CCoinsViewDBCursor::GetValueSize() const { return pcursor->GetValueSize(); } bool CCoinsViewDBCursor::Valid() const { return keyTmp.first == DB_COIN; } void CCoinsViewDBCursor::Next() { pcursor->Next(); CoinEntry entry(&keyTmp.second); if (!pcursor->Valid() || !pcursor->GetKey(entry)) { // Invalidate cached key after last record so that Valid() and GetKey() // return false keyTmp.first = 0; } else { keyTmp.first = entry.key; } } bool CBlockTreeDB::WriteBatchSync( const std::vector> &fileInfo, int nLastFile, const std::vector &blockinfo) { CDBBatch batch(*this); for (std::vector>::const_iterator it = fileInfo.begin(); it != fileInfo.end(); it++) { batch.Write(std::make_pair(DB_BLOCK_FILES, it->first), *it->second); } batch.Write(DB_LAST_BLOCK, nLastFile); for (std::vector::const_iterator it = blockinfo.begin(); it != blockinfo.end(); it++) { batch.Write(std::make_pair(DB_BLOCK_INDEX, (*it)->GetBlockHash()), CDiskBlockIndex(*it)); } return WriteBatch(batch, true); } bool CBlockTreeDB::WriteFlag(const std::string &name, bool fValue) { return Write(std::make_pair(DB_FLAG, name), fValue ? '1' : '0'); } bool CBlockTreeDB::ReadFlag(const std::string &name, bool &fValue) { char ch; if (!Read(std::make_pair(DB_FLAG, name), ch)) return false; fValue = ch == '1'; return true; } bool CBlockTreeDB::LoadBlockIndexGuts( const Consensus::Params ¶ms, std::function insertBlockIndex) { std::unique_ptr pcursor(NewIterator()); pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256())); // Load mapBlockIndex while (pcursor->Valid()) { boost::this_thread::interruption_point(); std::pair key; if (!pcursor->GetKey(key) || key.first != DB_BLOCK_INDEX) { break; } CDiskBlockIndex diskindex; if (!pcursor->GetValue(diskindex)) { return error("%s : failed to read value", __func__); } // Construct block index object CBlockIndex *pindexNew = insertBlockIndex(diskindex.GetBlockHash()); pindexNew->pprev = insertBlockIndex(diskindex.hashPrev); pindexNew->nHeight = diskindex.nHeight; pindexNew->nFile = diskindex.nFile; pindexNew->nDataPos = diskindex.nDataPos; pindexNew->nUndoPos = diskindex.nUndoPos; pindexNew->nVersion = diskindex.nVersion; pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot; pindexNew->nTime = diskindex.nTime; pindexNew->nBits = diskindex.nBits; pindexNew->nNonce = diskindex.nNonce; pindexNew->nStatus = diskindex.nStatus; pindexNew->nTx = diskindex.nTx; if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, params)) { return error("%s: CheckProofOfWork failed: %s", __func__, pindexNew->ToString()); } pcursor->Next(); } return true; } namespace { //! Legacy class to deserialize pre-pertxout database entries without reindex. class CCoins { public: //! whether transaction is a coinbase bool fCoinBase; //! unspent transaction outputs; spent outputs are .IsNull(); spent outputs //! at the end of the array are dropped std::vector vout; //! at which height this transaction was included in the active block chain int nHeight; //! empty constructor CCoins() : fCoinBase(false), vout(0), nHeight(0) {} template void Unserialize(Stream &s) { uint32_t nCode = 0; // version - int nVersionDummy; + unsigned int nVersionDummy; ::Unserialize(s, VARINT(nVersionDummy)); // header code ::Unserialize(s, VARINT(nCode)); fCoinBase = nCode & 1; std::vector vAvail(2, false); vAvail[0] = (nCode & 2) != 0; vAvail[1] = (nCode & 4) != 0; uint32_t nMaskCode = (nCode / 8) + ((nCode & 6) != 0 ? 0 : 1); // spentness bitmask while (nMaskCode > 0) { uint8_t chAvail = 0; ::Unserialize(s, chAvail); for (unsigned int p = 0; p < 8; p++) { bool f = (chAvail & (1 << p)) != 0; vAvail.push_back(f); } if (chAvail != 0) { nMaskCode--; } } // txouts themself vout.assign(vAvail.size(), CTxOut()); for (size_t i = 0; i < vAvail.size(); i++) { if (vAvail[i]) { ::Unserialize(s, CTxOutCompressor(vout[i])); } } // coinbase height - ::Unserialize(s, VARINT(nHeight)); + ::Unserialize(s, VARINT(nHeight, VarIntMode::NONNEGATIVE_SIGNED)); } }; } // namespace /** * Upgrade the database from older formats. * * Currently implemented: from the per-tx utxo model (0.8..0.14.x) to per-txout. */ bool CCoinsViewDB::Upgrade() { std::unique_ptr pcursor(db.NewIterator()); pcursor->Seek(std::make_pair(DB_COINS, uint256())); if (!pcursor->Valid()) { return true; } int64_t count = 0; LogPrintf("Upgrading utxo-set database...\n"); LogPrintfToBeContinued("[0%%]..."); uiInterface.ShowProgress(_("Upgrading UTXO database"), 0, true); size_t batch_size = 1 << 24; CDBBatch batch(db); int reportDone = 0; std::pair key; std::pair prev_key = {DB_COINS, uint256()}; while (pcursor->Valid()) { boost::this_thread::interruption_point(); if (ShutdownRequested()) { break; } if (!pcursor->GetKey(key) || key.first != DB_COINS) { break; } if (count++ % 256 == 0) { uint32_t high = 0x100 * *key.second.begin() + *(key.second.begin() + 1); int percentageDone = (int)(high * 100.0 / 65536.0 + 0.5); uiInterface.ShowProgress(_("Upgrading UTXO database"), percentageDone, true); if (reportDone < percentageDone / 10) { // report max. every 10% step LogPrintfToBeContinued("[%d%%]...", percentageDone); reportDone = percentageDone / 10; } } CCoins old_coins; if (!pcursor->GetValue(old_coins)) { return error("%s: cannot parse CCoins record", __func__); } TxId id(key.second); for (size_t i = 0; i < old_coins.vout.size(); ++i) { if (!old_coins.vout[i].IsNull() && !old_coins.vout[i].scriptPubKey.IsUnspendable()) { Coin newcoin(std::move(old_coins.vout[i]), old_coins.nHeight, old_coins.fCoinBase); COutPoint outpoint(id, i); CoinEntry entry(&outpoint); batch.Write(entry, newcoin); } } batch.Erase(key); if (batch.SizeEstimate() > batch_size) { db.WriteBatch(batch); batch.Clear(); db.CompactRange(prev_key, key); prev_key = key; } pcursor->Next(); } db.WriteBatch(batch); db.CompactRange({DB_COINS, uint256()}, key); uiInterface.ShowProgress("", 100, false); LogPrintf("[%s].\n", ShutdownRequested() ? "CANCELLED" : "DONE"); return !ShutdownRequested(); } diff --git a/src/undo.h b/src/undo.h index 9d15e5fd4..3040f88f7 100644 --- a/src/undo.h +++ b/src/undo.h @@ -1,145 +1,145 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Copyright (c) 2017 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_UNDO_H #define BITCOIN_UNDO_H #include #include #include #include #include class CBlock; class CBlockIndex; class CCoinsViewCache; class CValidationState; /** * Undo information for a CTxIn * * Contains the prevout's CTxOut being spent, and its metadata as well (coinbase * or not, height). The serialization contains a dummy value of zero. This is * compatible with older versions which expect to see the transaction version * there. */ class TxInUndoSerializer { const Coin *pcoin; public: explicit TxInUndoSerializer(const Coin *pcoinIn) : pcoin(pcoinIn) {} template void Serialize(Stream &s) const { ::Serialize( s, VARINT(pcoin->GetHeight() * 2 + (pcoin->IsCoinBase() ? 1 : 0))); if (pcoin->GetHeight() > 0) { // Required to maintain compatibility with older undo format. ::Serialize(s, uint8_t(0)); } ::Serialize(s, CTxOutCompressor(REF(pcoin->GetTxOut()))); } }; class TxInUndoDeserializer { Coin *pcoin; public: explicit TxInUndoDeserializer(Coin *pcoinIn) : pcoin(pcoinIn) {} template void Unserialize(Stream &s) { uint32_t nCode = 0; ::Unserialize(s, VARINT(nCode)); uint32_t nHeight = nCode / 2; bool fCoinBase = nCode & 1; if (nHeight > 0) { // Old versions stored the version number for the last spend of a // transaction's outputs. Non-final spends were indicated with // height = 0. - int nVersionDummy; + unsigned int nVersionDummy; ::Unserialize(s, VARINT(nVersionDummy)); } CTxOut txout; ::Unserialize(s, CTxOutCompressor(REF(txout))); *pcoin = Coin(std::move(txout), nHeight, fCoinBase); } }; static const size_t MAX_INPUTS_PER_TX = MAX_TX_SIZE / ::GetSerializeSize(CTxIn(), SER_NETWORK, PROTOCOL_VERSION); /** Restore the UTXO in a Coin at a given COutPoint */ class CTxUndo { public: // Undo information for all txins std::vector vprevout; template void Serialize(Stream &s) const { // TODO: avoid reimplementing vector serializer. uint64_t count = vprevout.size(); ::Serialize(s, COMPACTSIZE(REF(count))); for (const auto &prevout : vprevout) { ::Serialize(s, TxInUndoSerializer(&prevout)); } } template void Unserialize(Stream &s) { // TODO: avoid reimplementing vector deserializer. uint64_t count = 0; ::Unserialize(s, COMPACTSIZE(count)); if (count > MAX_INPUTS_PER_TX) { throw std::ios_base::failure("Too many input undo records"); } vprevout.resize(count); for (auto &prevout : vprevout) { ::Unserialize(s, TxInUndoDeserializer(&prevout)); } } }; /** Undo information for a CBlock */ class CBlockUndo { public: // For all but the coinbase std::vector vtxundo; ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(vtxundo); } }; enum DisconnectResult { // All good. DISCONNECT_OK, // Rolled back, but UTXO set was inconsistent with block. DISCONNECT_UNCLEAN, // Something else went wrong. DISCONNECT_FAILED, }; /** * Restore the UTXO in a Coin at a given COutPoint. * @param undo The Coin to be restored. * @param view The coins view to which to apply the changes. * @param out The out point that corresponds to the tx input. * @return A DisconnectResult */ DisconnectResult UndoCoinSpend(const Coin &undo, CCoinsViewCache &view, const COutPoint &out); /** * Undo a block from the block and the undoblock data. * See DisconnectBlock for more details. */ DisconnectResult ApplyBlockUndo(const CBlockUndo &blockUndo, const CBlock &block, const CBlockIndex *pindex, CCoinsViewCache &coins); #endif // BITCOIN_UNDO_H