Page MenuHomePhabricator

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/src/bench/checkblock.cpp b/src/bench/checkblock.cpp
index 77a2d7e8b..e5f2ebfff 100644
--- a/src/bench/checkblock.cpp
+++ b/src/bench/checkblock.cpp
@@ -1,56 +1,58 @@
// Copyright (c) 2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <bench/bench.h>
#include <bench/data.h>
#include <chainparams.h>
#include <config.h>
#include <consensus/validation.h>
#include <streams.h>
#include <validation.h>
// These are the two major time-sinks which happen after we have fully received
// a block off the wire, but before we can relay the block on to peers using
// compact block relay.
static void DeserializeBlockTest(benchmark::Bench &bench) {
CDataStream stream(benchmark::data::block413567, SER_NETWORK,
PROTOCOL_VERSION);
- char a = '\0';
- stream.write(&a, 1); // Prevent compaction
+ std::byte a{0};
+ // Prevent compaction
+ stream.write({&a, 1});
bench.unit("block").run([&] {
CBlock block;
stream >> block;
bool rewound = stream.Rewind(benchmark::data::block413567.size());
assert(rewound);
});
}
static void DeserializeAndCheckBlockTest(benchmark::Bench &bench) {
CDataStream stream(benchmark::data::block413567, SER_NETWORK,
PROTOCOL_VERSION);
- char a = '\0';
- stream.write(&a, 1); // Prevent compaction
+ std::byte a{0};
+ // Prevent compaction
+ stream.write({&a, 1});
const Config &config = GetConfig();
const Consensus::Params params = config.GetChainParams().GetConsensus();
BlockValidationOptions options(config);
bench.unit("block").run([&] {
// Note that CBlock caches its checked state, so we need to recreate it
// here.
CBlock block;
stream >> block;
bool rewound = stream.Rewind(benchmark::data::block413567.size());
assert(rewound);
BlockValidationState validationState;
bool checked = CheckBlock(block, validationState, params, options);
assert(checked);
});
}
BENCHMARK(DeserializeBlockTest);
BENCHMARK(DeserializeAndCheckBlockTest);
diff --git a/src/bench/rpc_blockchain.cpp b/src/bench/rpc_blockchain.cpp
index bca3fc944..78eb8717c 100644
--- a/src/bench/rpc_blockchain.cpp
+++ b/src/bench/rpc_blockchain.cpp
@@ -1,65 +1,65 @@
// Copyright (c) 2016-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <bench/bench.h>
#include <bench/data.h>
#include <rpc/blockchain.h>
#include <streams.h>
#include <validation.h>
#include <test/util/setup_common.h>
#include <univalue.h>
namespace {
struct TestBlockAndIndex {
const std::unique_ptr<const TestingSetup> testing_setup{
MakeNoLogFileContext<const TestingSetup>(CBaseChainParams::MAIN)};
CBlock block{};
BlockHash blockHash{};
CBlockIndex blockindex{};
TestBlockAndIndex() {
CDataStream stream(benchmark::data::block413567, SER_NETWORK,
PROTOCOL_VERSION);
- char a = '\0';
+ std::byte a{0};
// Prevent compaction
- stream.write(&a, 1);
+ stream.write({&a, 1});
stream >> block;
blockHash = block.GetHash();
blockindex.phashBlock = &blockHash;
blockindex.nBits = 403014710;
}
};
} // namespace
static void BlockToJsonVerbose(benchmark::Bench &bench) {
TestBlockAndIndex data;
bench.run([&] {
auto univalue = blockToJSON(
data.testing_setup->m_node.chainman->m_blockman, data.block,
&data.blockindex, &data.blockindex, /*txDetails=*/true);
ankerl::nanobench::doNotOptimizeAway(univalue);
});
}
BENCHMARK(BlockToJsonVerbose);
static void BlockToJsonVerboseWrite(benchmark::Bench &bench) {
TestBlockAndIndex data;
auto univalue = blockToJSON(data.testing_setup->m_node.chainman->m_blockman,
data.block, &data.blockindex, &data.blockindex,
/*txDetails=*/true);
bench.run([&] {
auto str = univalue.write();
ankerl::nanobench::doNotOptimizeAway(str);
});
}
BENCHMARK(BlockToJsonVerboseWrite);
diff --git a/src/common/bloom.cpp b/src/common/bloom.cpp
index 2a597950a..8d9cb753b 100644
--- a/src/common/bloom.cpp
+++ b/src/common/bloom.cpp
@@ -1,268 +1,268 @@
// Copyright (c) 2012-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <common/bloom.h>
#include <hash.h>
#include <primitives/transaction.h>
#include <random.h>
#include <script/script.h>
#include <script/standard.h>
#include <streams.h>
#include <util/fastrange.h>
#include <cmath>
#include <cstdlib>
#include <algorithm>
#define LN2SQUARED 0.4804530139182014246671025263266649717305529515945455
#define LN2 0.6931471805599453094172321214581765680755001343602552
/**
* The ideal size for a bloom filter with a given number of elements and false
* positive rate is:
* - nElements * log(fp rate) / ln(2)^2
* We ignore filter parameters which will create a bloom filter larger than the
* protocol limits
*
* The ideal number of hash functions is filter size * ln(2) / number of
* elements. Again, we ignore filter parameters which will create a bloom filter
* with more hash functions than the protocol limits.
* See https://en.wikipedia.org/wiki/Bloom_filter for an explanation of these
* formulas.
*/
CBloomFilter::CBloomFilter(const uint32_t nElements, const double nFPRate,
const uint32_t nTweakIn, uint8_t nFlagsIn)
: vData(std::min<uint32_t>(-1 / LN2SQUARED * nElements * log(nFPRate),
MAX_BLOOM_FILTER_SIZE * 8) /
8),
nHashFuncs(std::min<uint32_t>(vData.size() * 8 / nElements * LN2,
MAX_HASH_FUNCS)),
nTweak(nTweakIn), nFlags(nFlagsIn) {}
inline uint32_t CBloomFilter::Hash(uint32_t nHashNum,
Span<const uint8_t> vDataToHash) const {
// 0xFBA4C795 chosen as it guarantees a reasonable bit difference between
// nHashNum values.
return MurmurHash3(nHashNum * 0xFBA4C795 + nTweak, vDataToHash) %
(vData.size() * 8);
}
void CBloomFilter::insert(Span<const uint8_t> vKey) {
if (vData.empty()) {
// Avoid divide-by-zero (CVE-2013-5700)
return;
}
for (uint32_t i = 0; i < nHashFuncs; i++) {
uint32_t nIndex = Hash(i, vKey);
// Sets bit nIndex of vData
vData[nIndex >> 3] |= (1 << (7 & nIndex));
}
}
void CBloomFilter::insert(const COutPoint &outpoint) {
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << outpoint;
- insert(stream);
+ insert(MakeUCharSpan(stream));
}
bool CBloomFilter::contains(Span<const uint8_t> vKey) const {
if (vData.empty()) {
// Avoid divide-by-zero (CVE-2013-5700)
return true;
}
for (uint32_t i = 0; i < nHashFuncs; i++) {
uint32_t nIndex = Hash(i, vKey);
// Checks bit nIndex of vData
if (!(vData[nIndex >> 3] & (1 << (7 & nIndex)))) {
return false;
}
}
return true;
}
bool CBloomFilter::contains(const COutPoint &outpoint) const {
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << outpoint;
- return contains(stream);
+ return contains(MakeUCharSpan(stream));
}
bool CBloomFilter::IsWithinSizeConstraints() const {
return vData.size() <= MAX_BLOOM_FILTER_SIZE &&
nHashFuncs <= MAX_HASH_FUNCS;
}
bool CBloomFilter::MatchAndInsertOutputs(const CTransaction &tx) {
bool fFound = false;
// Match if the filter contains the hash of tx for finding tx when they
// appear in a block
if (vData.empty()) {
// zero-size = "match-all" filter
return true;
}
const TxId &txid = tx.GetId();
if (contains(txid)) {
fFound = true;
}
for (size_t i = 0; i < tx.vout.size(); i++) {
const CTxOut &txout = tx.vout[i];
// Match if the filter contains any arbitrary script data element in any
// scriptPubKey in tx. If this matches, also add the specific output
// that was matched. This means clients don't have to update the filter
// themselves when a new relevant tx is discovered in order to find
// spending transactions, which avoids round-tripping and race
// conditions.
CScript::const_iterator pc = txout.scriptPubKey.begin();
std::vector<uint8_t> data;
while (pc < txout.scriptPubKey.end()) {
opcodetype opcode;
if (!txout.scriptPubKey.GetOp(pc, opcode, data)) {
break;
}
if (data.size() != 0 && contains(data)) {
fFound = true;
if ((nFlags & BLOOM_UPDATE_MASK) == BLOOM_UPDATE_ALL) {
insert(COutPoint(txid, i));
} else if ((nFlags & BLOOM_UPDATE_MASK) ==
BLOOM_UPDATE_P2PUBKEY_ONLY) {
std::vector<std::vector<uint8_t>> vSolutions;
TxoutType type = Solver(txout.scriptPubKey, vSolutions);
if (type == TxoutType::PUBKEY ||
type == TxoutType::MULTISIG) {
insert(COutPoint(txid, i));
}
}
break;
}
}
}
return fFound;
}
bool CBloomFilter::MatchInputs(const CTransaction &tx) {
for (const CTxIn &txin : tx.vin) {
// Match if the filter contains an outpoint tx spends
if (contains(txin.prevout)) {
return true;
}
// Match if the filter contains any arbitrary script data element in any
// scriptSig in tx
CScript::const_iterator pc = txin.scriptSig.begin();
std::vector<uint8_t> data;
while (pc < txin.scriptSig.end()) {
opcodetype opcode;
if (!txin.scriptSig.GetOp(pc, opcode, data)) {
break;
}
if (data.size() != 0 && contains(data)) {
return true;
}
}
}
return false;
}
CRollingBloomFilter::CRollingBloomFilter(const uint32_t nElements,
const double fpRate) {
double logFpRate = log(fpRate);
/* The optimal number of hash functions is log(fpRate) / log(0.5), but
* restrict it to the range 1-50. */
nHashFuncs = std::max(1, std::min<int>(round(logFpRate / log(0.5)), 50));
/* In this rolling bloom filter, we'll store between 2 and 3 generations of
* nElements / 2 entries. */
nEntriesPerGeneration = (nElements + 1) / 2;
uint32_t nMaxElements = nEntriesPerGeneration * 3;
/* The maximum fpRate = pow(1.0 - exp(-nHashFuncs * nMaxElements /
* nFilterBits), nHashFuncs)
* => pow(fpRate, 1.0 / nHashFuncs) = 1.0 - exp(-nHashFuncs *
* nMaxElements / nFilterBits)
* => 1.0 - pow(fpRate, 1.0 / nHashFuncs) = exp(-nHashFuncs *
* nMaxElements / nFilterBits)
* => log(1.0 - pow(fpRate, 1.0 / nHashFuncs)) = -nHashFuncs *
* nMaxElements / nFilterBits
* => nFilterBits = -nHashFuncs * nMaxElements / log(1.0 -
* pow(fpRate, 1.0 / nHashFuncs))
* => nFilterBits = -nHashFuncs * nMaxElements / log(1.0 -
* exp(logFpRate / nHashFuncs))
*/
uint32_t nFilterBits =
uint32_t(ceil(-1.0 * nHashFuncs * nMaxElements /
log(1.0 - exp(logFpRate / nHashFuncs))));
data.clear();
/* For each data element we need to store 2 bits. If both bits are 0, the
* bit is treated as unset. If the bits are (01), (10), or (11), the bit is
* treated as set in generation 1, 2, or 3 respectively. These bits are
* stored in separate integers: position P corresponds to bit (P & 63) of
* the integers data[(P >> 6) * 2] and data[(P >> 6) * 2 + 1]. */
data.resize(((nFilterBits + 63) / 64) << 1);
reset();
}
/* Similar to CBloomFilter::Hash */
static inline uint32_t RollingBloomHash(uint32_t nHashNum, uint32_t nTweak,
Span<const uint8_t> vDataToHash) {
return MurmurHash3(nHashNum * 0xFBA4C795 + nTweak, vDataToHash);
}
void CRollingBloomFilter::insert(Span<const uint8_t> vKey) {
if (nEntriesThisGeneration == nEntriesPerGeneration) {
nEntriesThisGeneration = 0;
nGeneration++;
if (nGeneration == 4) {
nGeneration = 1;
}
uint64_t nGenerationMask1 = 0 - uint64_t(nGeneration & 1);
uint64_t nGenerationMask2 = 0 - uint64_t(nGeneration >> 1);
/* Wipe old entries that used this generation number. */
for (uint32_t p = 0; p < data.size(); p += 2) {
uint64_t p1 = data[p], p2 = data[p + 1];
uint64_t mask = (p1 ^ nGenerationMask1) | (p2 ^ nGenerationMask2);
data[p] = p1 & mask;
data[p + 1] = p2 & mask;
}
}
nEntriesThisGeneration++;
for (int n = 0; n < nHashFuncs; n++) {
uint32_t h = RollingBloomHash(n, nTweak, vKey);
int bit = h & 0x3F;
/* FastMod works with the upper bits of h, so it is safe to ignore that
* the lower bits of h are already used for bit. */
uint32_t pos = FastRange32(h, data.size());
/* The lowest bit of pos is ignored, and set to zero for the first bit,
* and to one for the second. */
data[pos & ~1U] = (data[pos & ~1U] & ~(uint64_t(1) << bit)) |
uint64_t(nGeneration & 1) << bit;
data[pos | 1U] = (data[pos | 1] & ~(uint64_t(1) << bit)) |
uint64_t(nGeneration >> 1) << bit;
}
}
bool CRollingBloomFilter::contains(Span<const uint8_t> vKey) const {
for (int n = 0; n < nHashFuncs; n++) {
uint32_t h = RollingBloomHash(n, nTweak, vKey);
int bit = h & 0x3F;
uint32_t pos = FastRange32(h, data.size());
/* If the relevant bit is not set in either data[pos & ~1] or data[pos |
* 1], the filter does not contain vKey */
if (!(((data[pos & ~1] | data[pos | 1]) >> bit) & 1)) {
return false;
}
}
return true;
}
void CRollingBloomFilter::reset() {
nTweak = GetRand<unsigned int>();
nEntriesThisGeneration = 0;
nGeneration = 1;
std::fill(data.begin(), data.end(), 0);
}
diff --git a/src/dbwrapper.h b/src/dbwrapper.h
index cd872fdcd..ac198cb57 100644
--- a/src/dbwrapper.h
+++ b/src/dbwrapper.h
@@ -1,349 +1,349 @@
// Copyright (c) 2012-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_DBWRAPPER_H
#define BITCOIN_DBWRAPPER_H
#include <clientversion.h>
#include <fs.h>
#include <serialize.h>
#include <span.h>
#include <streams.h>
#include <util/strencodings.h>
#include <util/system.h>
#include <leveldb/db.h>
#include <leveldb/write_batch.h>
#include <optional>
static const size_t DBWRAPPER_PREALLOC_KEY_SIZE = 64;
static const size_t DBWRAPPER_PREALLOC_VALUE_SIZE = 1024;
class dbwrapper_error : public std::runtime_error {
public:
explicit dbwrapper_error(const std::string &msg)
: std::runtime_error(msg) {}
};
class CDBWrapper;
namespace dbwrapper {
using leveldb::DestroyDB;
}
/**
* These should be considered an implementation detail of the specific database.
*/
namespace dbwrapper_private {
/**
* Handle database error by throwing dbwrapper_error exception.
*/
void HandleError(const leveldb::Status &status);
/**
* Work around circular dependency, as well as for testing in dbwrapper_tests.
* Database obfuscation should be considered an implementation detail of the
* specific database.
*/
const std::vector<uint8_t> &GetObfuscateKey(const CDBWrapper &w);
}; // namespace dbwrapper_private
/** Batch of changes queued to be written to a CDBWrapper */
class CDBBatch {
friend class CDBWrapper;
private:
const CDBWrapper &parent;
leveldb::WriteBatch batch;
CDataStream ssKey;
CDataStream ssValue;
size_t size_estimate;
public:
/**
* @param[in] _parent CDBWrapper that this batch is to be submitted to
*/
explicit CDBBatch(const CDBWrapper &_parent)
: parent(_parent), ssKey(SER_DISK, CLIENT_VERSION),
ssValue(SER_DISK, CLIENT_VERSION), size_estimate(0){};
void Clear() {
batch.Clear();
size_estimate = 0;
}
template <typename K, typename V> void Write(const K &key, const V &value) {
ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey((const char *)ssKey.data(), ssKey.size());
ssValue.reserve(DBWRAPPER_PREALLOC_VALUE_SIZE);
ssValue << value;
ssValue.Xor(dbwrapper_private::GetObfuscateKey(parent));
leveldb::Slice slValue((const char *)ssValue.data(), ssValue.size());
batch.Put(slKey, slValue);
// LevelDB serializes writes as:
// - byte: header
// - varint: key length (1 byte up to 127B, 2 bytes up to 16383B, ...)
// - byte[]: key
// - varint: value length
// - byte[]: value
// The formula below assumes the key and value are both less than 16k.
size_estimate += 3 + (slKey.size() > 127) + slKey.size() +
(slValue.size() > 127) + slValue.size();
ssKey.clear();
ssValue.clear();
}
template <typename K> void Erase(const K &key) {
ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey((const char *)ssKey.data(), ssKey.size());
batch.Delete(slKey);
// LevelDB serializes erases as:
// - byte: header
// - varint: key length
// - byte[]: key
// The formula below assumes the key is less than 16kB.
size_estimate += 2 + (slKey.size() > 127) + slKey.size();
ssKey.clear();
}
size_t SizeEstimate() const { return size_estimate; }
};
class CDBIterator {
private:
const CDBWrapper &parent;
leveldb::Iterator *piter;
public:
/**
* @param[in] _parent Parent CDBWrapper instance.
* @param[in] _piter The original leveldb iterator.
*/
CDBIterator(const CDBWrapper &_parent, leveldb::Iterator *_piter)
: parent(_parent), piter(_piter){};
~CDBIterator();
bool Valid() const;
void SeekToFirst();
template <typename K> void Seek(const K &key) {
CDataStream ssKey(SER_DISK, CLIENT_VERSION);
ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey((const char *)ssKey.data(), ssKey.size());
piter->Seek(slKey);
}
void Next();
template <typename K> bool GetKey(K &key) {
leveldb::Slice slKey = piter->key();
try {
- CDataStream ssKey(MakeUCharSpan(slKey), SER_DISK, CLIENT_VERSION);
+ CDataStream ssKey{MakeByteSpan(slKey), SER_DISK, CLIENT_VERSION};
ssKey >> key;
} catch (const std::exception &) {
return false;
}
return true;
}
template <typename V> bool GetValue(V &value) {
leveldb::Slice slValue = piter->value();
try {
- CDataStream ssValue(MakeUCharSpan(slValue), SER_DISK,
- CLIENT_VERSION);
+ CDataStream ssValue{MakeByteSpan(slValue), SER_DISK,
+ CLIENT_VERSION};
ssValue.Xor(dbwrapper_private::GetObfuscateKey(parent));
ssValue >> value;
} catch (const std::exception &) {
return false;
}
return true;
}
unsigned int GetValueSize() { return piter->value().size(); }
};
class CDBWrapper {
friend const std::vector<uint8_t> &
dbwrapper_private::GetObfuscateKey(const CDBWrapper &w);
private:
//! custom environment this database is using (may be nullptr in case of
//! default environment)
leveldb::Env *penv;
//! database options used
leveldb::Options options;
//! options used when reading from the database
leveldb::ReadOptions readoptions;
//! options used when iterating over values of the database
leveldb::ReadOptions iteroptions;
//! options used when writing to the database
leveldb::WriteOptions writeoptions;
//! options used when sync writing to the database
leveldb::WriteOptions syncoptions;
//! the database itself
leveldb::DB *pdb;
//! the name of this database
std::string m_name;
//! a key used for optional XOR-obfuscation of the database
std::vector<uint8_t> obfuscate_key;
//! the key under which the obfuscation key is stored
static const std::string OBFUSCATE_KEY_KEY;
//! the length of the obfuscate key in number of bytes
static const unsigned int OBFUSCATE_KEY_NUM_BYTES;
std::vector<uint8_t> CreateObfuscateKey() const;
//! path to filesystem storage
const fs::path m_path;
//! whether or not the database resides in memory
bool m_is_memory;
public:
/**
* @param[in] path Location in the filesystem where leveldb data will
* be stored.
* @param[in] nCacheSize Configures various leveldb cache settings.
* @param[in] fMemory If true, use leveldb's memory environment.
* @param[in] fWipe If true, remove all existing data.
* @param[in] obfuscate If true, store data obfuscated via simple XOR. If
* false, XOR
* with a zero'd byte array.
*/
CDBWrapper(const fs::path &path, size_t nCacheSize, bool fMemory = false,
bool fWipe = false, bool obfuscate = false);
~CDBWrapper();
CDBWrapper(const CDBWrapper &) = delete;
CDBWrapper &operator=(const CDBWrapper &) = delete;
template <typename K, typename V> bool Read(const K &key, V &value) const {
CDataStream ssKey(SER_DISK, CLIENT_VERSION);
ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey((const char *)ssKey.data(), ssKey.size());
std::string strValue;
leveldb::Status status = pdb->Get(readoptions, slKey, &strValue);
if (!status.ok()) {
if (status.IsNotFound()) return false;
LogPrintf("LevelDB read failure: %s\n", status.ToString());
dbwrapper_private::HandleError(status);
}
try {
- CDataStream ssValue(MakeUCharSpan(strValue), SER_DISK,
- CLIENT_VERSION);
+ CDataStream ssValue{MakeByteSpan(strValue), SER_DISK,
+ CLIENT_VERSION};
ssValue.Xor(obfuscate_key);
ssValue >> value;
} catch (const std::exception &) {
return false;
}
return true;
}
template <typename K, typename V>
bool Write(const K &key, const V &value, bool fSync = false) {
CDBBatch batch(*this);
batch.Write(key, value);
return WriteBatch(batch, fSync);
}
//! @returns filesystem path to the on-disk data.
std::optional<fs::path> StoragePath() {
if (m_is_memory) {
return {};
}
return m_path;
}
template <typename K> bool Exists(const K &key) const {
CDataStream ssKey(SER_DISK, CLIENT_VERSION);
ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey((const char *)ssKey.data(), ssKey.size());
std::string strValue;
leveldb::Status status = pdb->Get(readoptions, slKey, &strValue);
if (!status.ok()) {
if (status.IsNotFound()) return false;
LogPrintf("LevelDB read failure: %s\n", status.ToString());
dbwrapper_private::HandleError(status);
}
return true;
}
template <typename K> bool Erase(const K &key, bool fSync = false) {
CDBBatch batch(*this);
batch.Erase(key);
return WriteBatch(batch, fSync);
}
bool WriteBatch(CDBBatch &batch, bool fSync = false);
// Get an estimate of LevelDB memory usage (in bytes).
size_t DynamicMemoryUsage() const;
CDBIterator *NewIterator() {
return new CDBIterator(*this, pdb->NewIterator(iteroptions));
}
/**
* Return true if the database managed by this class contains no entries.
*/
bool IsEmpty();
template <typename K>
size_t EstimateSize(const K &key_begin, const K &key_end) const {
CDataStream ssKey1(SER_DISK, CLIENT_VERSION),
ssKey2(SER_DISK, CLIENT_VERSION);
ssKey1.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey2.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey1 << key_begin;
ssKey2 << key_end;
leveldb::Slice slKey1((const char *)ssKey1.data(), ssKey1.size());
leveldb::Slice slKey2((const char *)ssKey2.data(), ssKey2.size());
uint64_t size = 0;
leveldb::Range range(slKey1, slKey2);
pdb->GetApproximateSizes(&range, 1, &size);
return size;
}
/**
* Compact a certain range of keys in the database.
*/
template <typename K>
void CompactRange(const K &key_begin, const K &key_end) const {
CDataStream ssKey1(SER_DISK, CLIENT_VERSION),
ssKey2(SER_DISK, CLIENT_VERSION);
ssKey1.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey2.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey1 << key_begin;
ssKey2 << key_end;
leveldb::Slice slKey1((const char *)ssKey1.data(), ssKey1.size());
leveldb::Slice slKey2((const char *)ssKey2.data(), ssKey2.size());
pdb->CompactRange(&slKey1, &slKey2);
}
};
#endif // BITCOIN_DBWRAPPER_H
diff --git a/src/hash.h b/src/hash.h
index 87de2ea2f..122143223 100644
--- a/src/hash.h
+++ b/src/hash.h
@@ -1,204 +1,204 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_HASH_H
#define BITCOIN_HASH_H
#include <crypto/common.h>
#include <crypto/ripemd160.h>
#include <crypto/sha256.h>
#include <prevector.h>
#include <serialize.h>
#include <uint256.h>
#include <version.h>
#include <vector>
typedef uint256 ChainCode;
/** A hasher class for Bitcoin's 256-bit hash (double SHA-256). */
class CHash256 {
private:
CSHA256 sha;
public:
static const size_t OUTPUT_SIZE = CSHA256::OUTPUT_SIZE;
void Finalize(Span<uint8_t> output) {
assert(output.size() == OUTPUT_SIZE);
uint8_t buf[CSHA256::OUTPUT_SIZE];
sha.Finalize(buf);
sha.Reset().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(output.data());
}
CHash256 &Write(Span<const uint8_t> input) {
sha.Write(input.data(), input.size());
return *this;
}
CHash256 &Reset() {
sha.Reset();
return *this;
}
};
/** A hasher class for Bitcoin's 160-bit hash (SHA-256 + RIPEMD-160). */
class CHash160 {
private:
CSHA256 sha;
public:
static const size_t OUTPUT_SIZE = CRIPEMD160::OUTPUT_SIZE;
void Finalize(Span<uint8_t> output) {
assert(output.size() == OUTPUT_SIZE);
uint8_t buf[CSHA256::OUTPUT_SIZE];
sha.Finalize(buf);
CRIPEMD160().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(output.data());
}
CHash160 &Write(Span<const uint8_t> input) {
sha.Write(input.data(), input.size());
return *this;
}
CHash160 &Reset() {
sha.Reset();
return *this;
}
};
/** Compute the 256-bit hash of an object. */
template <typename T> inline uint256 Hash(const T &in1) {
uint256 result;
CHash256().Write(MakeUCharSpan(in1)).Finalize(result);
return result;
}
/** Compute the 256-bit hash of the concatenation of two objects. */
template <typename T1, typename T2>
inline uint256 Hash(const T1 &in1, const T2 &in2) {
uint256 result;
CHash256()
.Write(MakeUCharSpan(in1))
.Write(MakeUCharSpan(in2))
.Finalize(result);
return result;
}
/** Compute the 160-bit hash an object. */
template <typename T1> inline uint160 Hash160(const T1 &in1) {
uint160 result;
CHash160().Write(MakeUCharSpan(in1)).Finalize(result);
return result;
}
/** A writer stream (for serialization) that computes a 256-bit hash. */
class CHashWriter {
private:
CSHA256 ctx;
const int nType;
const int nVersion;
public:
CHashWriter(int nTypeIn, int nVersionIn)
: nType(nTypeIn), nVersion(nVersionIn) {}
int GetType() const { return nType; }
int GetVersion() const { return nVersion; }
- void write(const char *pch, size_t size) {
- ctx.Write((const uint8_t *)pch, size);
+ void write(Span<const std::byte> src) {
+ ctx.Write(UCharCast(src.data()), src.size());
}
/**
* Compute the double-SHA256 hash of all data written to this object.
*
* Invalidates this object.
*/
uint256 GetHash() {
uint256 result;
ctx.Finalize(result.begin());
ctx.Reset()
.Write(result.begin(), CSHA256::OUTPUT_SIZE)
.Finalize(result.begin());
return result;
}
/**
* Compute the SHA256 hash of all data written to this object.
*
* Invalidates this object.
*/
uint256 GetSHA256() {
uint256 result;
ctx.Finalize(result.begin());
return result;
}
/**
* Returns the first 64 bits from the resulting hash.
*/
inline uint64_t GetCheapHash() {
uint256 result = GetHash();
return ReadLE64(result.begin());
}
template <typename T> CHashWriter &operator<<(const T &obj) {
// Serialize to this stream
::Serialize(*this, obj);
return (*this);
}
};
/**
* Reads data from an underlying stream, while hashing the read data.
*/
template <typename Source> class CHashVerifier : public CHashWriter {
private:
Source *source;
public:
explicit CHashVerifier(Source *source_)
: CHashWriter(source_->GetType(), source_->GetVersion()),
source(source_) {}
- void read(char *pch, size_t nSize) {
- source->read(pch, nSize);
- this->write(pch, nSize);
+ void read(Span<std::byte> dst) {
+ source->read(dst);
+ this->write(dst);
}
void ignore(size_t nSize) {
- char data[1024];
+ std::byte data[1024];
while (nSize > 0) {
size_t now = std::min<size_t>(nSize, 1024);
- read(data, now);
+ read({data, now});
nSize -= now;
}
}
template <typename T> CHashVerifier<Source> &operator>>(T &&obj) {
// Unserialize from this stream
::Unserialize(*this, obj);
return (*this);
}
};
/** Compute the 256-bit hash of an object's serialization. */
template <typename T>
uint256 SerializeHash(const T &obj, int nType = SER_GETHASH,
int nVersion = PROTOCOL_VERSION) {
CHashWriter ss(nType, nVersion);
ss << obj;
return ss.GetHash();
}
uint32_t MurmurHash3(uint32_t nHashSeed, Span<const uint8_t> vDataToHash);
void BIP32Hash(const ChainCode &chainCode, uint32_t nChild, uint8_t header,
const uint8_t data[32], uint8_t output[64]);
#endif // BITCOIN_HASH_H
diff --git a/src/net.cpp b/src/net.cpp
index 13c1d6dc4..979aa5562 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -1,3642 +1,3642 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#if defined(HAVE_CONFIG_H)
#include <config/bitcoin-config.h>
#endif
#include <net.h>
#include <addrdb.h>
#include <addrman.h>
#include <avalanche/avalanche.h>
#include <banman.h>
#include <clientversion.h>
#include <compat.h>
#include <config.h>
#include <consensus/consensus.h>
#include <crypto/sha256.h>
#include <dnsseeds.h>
#include <fs.h>
#include <i2p.h>
#include <netaddress.h>
#include <netbase.h>
#include <node/ui_interface.h>
#include <protocol.h>
#include <random.h>
#include <scheduler.h>
#include <util/sock.h>
#include <util/strencodings.h>
#include <util/system.h>
#include <util/thread.h>
#include <util/trace.h>
#include <util/translation.h>
#ifdef WIN32
#include <cstring>
#else
#include <fcntl.h>
#endif
#ifdef USE_POLL
#include <poll.h>
#endif
#include <algorithm>
#include <array>
#include <cmath>
#include <cstdint>
#include <functional>
#include <limits>
#include <optional>
#include <unordered_map>
/** Maximum number of block-relay-only anchor connections */
static constexpr size_t MAX_BLOCK_RELAY_ONLY_ANCHORS = 2;
static_assert(MAX_BLOCK_RELAY_ONLY_ANCHORS <=
static_cast<size_t>(MAX_BLOCK_RELAY_ONLY_CONNECTIONS),
"MAX_BLOCK_RELAY_ONLY_ANCHORS must not exceed "
"MAX_BLOCK_RELAY_ONLY_CONNECTIONS.");
/** Anchor IP address database file name */
const char *const ANCHORS_DATABASE_FILENAME = "anchors.dat";
// How often to dump addresses to peers.dat
static constexpr std::chrono::minutes DUMP_PEERS_INTERVAL{15};
/**
* Number of DNS seeds to query when the number of connections is low.
*/
static constexpr int DNSSEEDS_TO_QUERY_AT_ONCE = 3;
/**
* How long to delay before querying DNS seeds
*
* If we have more than THRESHOLD entries in addrman, then it's likely
* that we got those addresses from having previously connected to the P2P
* network, and that we'll be able to successfully reconnect to the P2P
* network via contacting one of them. So if that's the case, spend a
* little longer trying to connect to known peers before querying the
* DNS seeds.
*/
static constexpr std::chrono::seconds DNSSEEDS_DELAY_FEW_PEERS{11};
static constexpr std::chrono::minutes DNSSEEDS_DELAY_MANY_PEERS{5};
// "many" vs "few" peers
static constexpr int DNSSEEDS_DELAY_PEER_THRESHOLD = 1000;
/** The default timeframe for -maxuploadtarget. 1 day. */
static constexpr std::chrono::seconds MAX_UPLOAD_TIMEFRAME{60 * 60 * 24};
// We add a random period time (0 to 1 seconds) to feeler connections to prevent
// synchronization.
#define FEELER_SLEEP_WINDOW 1
/** Used to pass flags to the Bind() function */
enum BindFlags {
BF_NONE = 0,
BF_EXPLICIT = (1U << 0),
BF_REPORT_ERROR = (1U << 1),
/**
* Do not call AddLocal() for our special addresses, e.g., for incoming
* Tor connections, to prevent gossiping them over the network.
*/
BF_DONT_ADVERTISE = (1U << 2),
};
// The set of sockets cannot be modified while waiting
// The sleep time needs to be small to avoid new sockets stalling
static const uint64_t SELECT_TIMEOUT_MILLISECONDS = 50;
const std::string NET_MESSAGE_COMMAND_OTHER = "*other*";
// SHA256("netgroup")[0:8]
static const uint64_t RANDOMIZER_ID_NETGROUP = 0x6c0edd8036ef4036ULL;
// SHA256("localhostnonce")[0:8]
static const uint64_t RANDOMIZER_ID_LOCALHOSTNONCE = 0xd93e69e2bbfa5735ULL;
// SHA256("localhostnonce")[8:16]
static const uint64_t RANDOMIZER_ID_EXTRAENTROPY = 0x94b05d41679a4ff7ULL;
// SHA256("addrcache")[0:8]
static const uint64_t RANDOMIZER_ID_ADDRCACHE = 0x1cf2e4ddd306dda9ULL;
//
// Global state variables
//
bool fDiscover = true;
bool fListen = true;
GlobalMutex g_maplocalhost_mutex;
std::map<CNetAddr, LocalServiceInfo>
mapLocalHost GUARDED_BY(g_maplocalhost_mutex);
static bool vfLimited[NET_MAX] GUARDED_BY(g_maplocalhost_mutex) = {};
void CConnman::AddAddrFetch(const std::string &strDest) {
LOCK(m_addr_fetches_mutex);
m_addr_fetches.push_back(strDest);
}
uint16_t GetListenPort() {
// If -bind= is provided with ":port" part, use that (first one if multiple
// are provided).
for (const std::string &bind_arg : gArgs.GetArgs("-bind")) {
CService bind_addr;
constexpr uint16_t dummy_port = 0;
if (Lookup(bind_arg, bind_addr, dummy_port, /*fAllowLookup=*/false)) {
if (bind_addr.GetPort() != dummy_port) {
return bind_addr.GetPort();
}
}
}
// Otherwise, if -whitebind= without NetPermissionFlags::NoBan is provided,
// use that
// (-whitebind= is required to have ":port").
for (const std::string &whitebind_arg : gArgs.GetArgs("-whitebind")) {
NetWhitebindPermissions whitebind;
bilingual_str error;
if (NetWhitebindPermissions::TryParse(whitebind_arg, whitebind,
error)) {
if (!NetPermissions::HasFlag(whitebind.m_flags,
NetPermissionFlags::NoBan)) {
return whitebind.m_service.GetPort();
}
}
}
// Otherwise, if -port= is provided, use that. Otherwise use the default
// port.
return static_cast<uint16_t>(
gArgs.GetIntArg("-port", Params().GetDefaultPort()));
}
// find 'best' local address for a particular peer
bool GetLocal(CService &addr, const CNetAddr *paddrPeer) {
if (!fListen) {
return false;
}
int nBestScore = -1;
int nBestReachability = -1;
{
LOCK(g_maplocalhost_mutex);
for (const auto &entry : mapLocalHost) {
int nScore = entry.second.nScore;
int nReachability = entry.first.GetReachabilityFrom(paddrPeer);
if (nReachability > nBestReachability ||
(nReachability == nBestReachability && nScore > nBestScore)) {
addr = CService(entry.first, entry.second.nPort);
nBestReachability = nReachability;
nBestScore = nScore;
}
}
}
return nBestScore >= 0;
}
//! Convert the pnSeed6 array into usable address objects.
static std::vector<CAddress>
convertSeed6(const std::vector<SeedSpec6> &vSeedsIn) {
// It'll only connect to one or two seed nodes because once it connects,
// it'll get a pile of addresses with newer timestamps. Seed nodes are given
// a random 'last seen time' of between one and two weeks ago.
const auto one_week{7 * 24h};
std::vector<CAddress> vSeedsOut;
vSeedsOut.reserve(vSeedsIn.size());
FastRandomContext rng;
for (const auto &seed_in : vSeedsIn) {
struct in6_addr ip;
memcpy(&ip, seed_in.addr, sizeof(ip));
CAddress addr(CService(ip, seed_in.port),
GetDesirableServiceFlags(NODE_NONE));
addr.nTime =
rng.rand_uniform_delay(Now<NodeSeconds>() - one_week, -one_week);
vSeedsOut.push_back(addr);
}
return vSeedsOut;
}
// Get best local address for a particular peer as a CService. Otherwise, return
// the unroutable 0.0.0.0 but filled in with the normal parameters, since the IP
// may be changed to a useful one by discovery.
CService GetLocalAddress(const CNetAddr &addrPeer) {
CService ret{CNetAddr(), GetListenPort()};
CService addr;
if (GetLocal(addr, &addrPeer)) {
ret = CService{addr};
}
return ret;
}
static int GetnScore(const CService &addr) {
LOCK(g_maplocalhost_mutex);
const auto it = mapLocalHost.find(addr);
return (it != mapLocalHost.end()) ? it->second.nScore : 0;
}
// Is our peer's addrLocal potentially useful as an external IP source?
bool IsPeerAddrLocalGood(CNode *pnode) {
CService addrLocal = pnode->GetAddrLocal();
return fDiscover && pnode->addr.IsRoutable() && addrLocal.IsRoutable() &&
IsReachable(addrLocal.GetNetwork());
}
std::optional<CService> GetLocalAddrForPeer(CNode &node) {
CService addrLocal{GetLocalAddress(node.addr)};
if (gArgs.GetBoolArg("-addrmantest", false)) {
// use IPv4 loopback during addrmantest
addrLocal = CService(LookupNumeric("127.0.0.1", GetListenPort()));
}
// If discovery is enabled, sometimes give our peer the address it
// tells us that it sees us as in case it has a better idea of our
// address than we do.
FastRandomContext rng;
if (IsPeerAddrLocalGood(&node) &&
(!addrLocal.IsRoutable() ||
rng.randbits((GetnScore(addrLocal) > LOCAL_MANUAL) ? 3 : 1) == 0)) {
if (node.IsInboundConn()) {
// For inbound connections, assume both the address and the port
// as seen from the peer.
addrLocal = CService{node.GetAddrLocal()};
} else {
// For outbound connections, assume just the address as seen from
// the peer and leave the port in `addrLocal` as returned by
// `GetLocalAddress()` above. The peer has no way to observe our
// listening port when we have initiated the connection.
addrLocal.SetIP(node.GetAddrLocal());
}
}
if (addrLocal.IsRoutable() || gArgs.GetBoolArg("-addrmantest", false)) {
LogPrint(BCLog::NET, "Advertising address %s to peer=%d\n",
addrLocal.ToString(), node.GetId());
return addrLocal;
}
// Address is unroutable. Don't advertise.
return std::nullopt;
}
// Learn a new local address.
bool AddLocal(const CService &addr, int nScore) {
if (!addr.IsRoutable()) {
return false;
}
if (!fDiscover && nScore < LOCAL_MANUAL) {
return false;
}
if (!IsReachable(addr)) {
return false;
}
LogPrintf("AddLocal(%s,%i)\n", addr.ToString(), nScore);
{
LOCK(g_maplocalhost_mutex);
const auto [it, is_newly_added] =
mapLocalHost.emplace(addr, LocalServiceInfo());
LocalServiceInfo &info = it->second;
if (is_newly_added || nScore >= info.nScore) {
info.nScore = nScore + !is_newly_added;
info.nPort = addr.GetPort();
}
}
return true;
}
bool AddLocal(const CNetAddr &addr, int nScore) {
return AddLocal(CService(addr, GetListenPort()), nScore);
}
void RemoveLocal(const CService &addr) {
LOCK(g_maplocalhost_mutex);
LogPrintf("RemoveLocal(%s)\n", addr.ToString());
mapLocalHost.erase(addr);
}
void SetReachable(enum Network net, bool reachable) {
if (net == NET_UNROUTABLE || net == NET_INTERNAL) {
return;
}
LOCK(g_maplocalhost_mutex);
vfLimited[net] = !reachable;
}
bool IsReachable(enum Network net) {
LOCK(g_maplocalhost_mutex);
return !vfLimited[net];
}
bool IsReachable(const CNetAddr &addr) {
return IsReachable(addr.GetNetwork());
}
/** vote for a local address */
bool SeenLocal(const CService &addr) {
LOCK(g_maplocalhost_mutex);
const auto it = mapLocalHost.find(addr);
if (it == mapLocalHost.end()) {
return false;
}
++it->second.nScore;
return true;
}
/** check whether a given address is potentially local */
bool IsLocal(const CService &addr) {
LOCK(g_maplocalhost_mutex);
return mapLocalHost.count(addr) > 0;
}
CNode *CConnman::FindNode(const CNetAddr &ip) {
LOCK(m_nodes_mutex);
for (CNode *pnode : m_nodes) {
if (static_cast<CNetAddr>(pnode->addr) == ip) {
return pnode;
}
}
return nullptr;
}
CNode *CConnman::FindNode(const CSubNet &subNet) {
LOCK(m_nodes_mutex);
for (CNode *pnode : m_nodes) {
if (subNet.Match(static_cast<CNetAddr>(pnode->addr))) {
return pnode;
}
}
return nullptr;
}
CNode *CConnman::FindNode(const std::string &addrName) {
LOCK(m_nodes_mutex);
for (CNode *pnode : m_nodes) {
if (pnode->m_addr_name == addrName) {
return pnode;
}
}
return nullptr;
}
CNode *CConnman::FindNode(const CService &addr) {
LOCK(m_nodes_mutex);
for (CNode *pnode : m_nodes) {
if (static_cast<CService>(pnode->addr) == addr) {
return pnode;
}
}
return nullptr;
}
bool CConnman::AlreadyConnectedToAddress(const CAddress &addr) {
return FindNode(static_cast<CNetAddr>(addr)) ||
FindNode(addr.ToStringIPPort());
}
bool CConnman::CheckIncomingNonce(uint64_t nonce) {
LOCK(m_nodes_mutex);
for (const CNode *pnode : m_nodes) {
if (!pnode->fSuccessfullyConnected && !pnode->IsInboundConn() &&
pnode->GetLocalNonce() == nonce) {
return false;
}
}
return true;
}
/** Get the bind address for a socket as CAddress */
static CAddress GetBindAddress(SOCKET sock) {
CAddress addr_bind;
struct sockaddr_storage sockaddr_bind;
socklen_t sockaddr_bind_len = sizeof(sockaddr_bind);
if (sock != INVALID_SOCKET) {
if (!getsockname(sock, (struct sockaddr *)&sockaddr_bind,
&sockaddr_bind_len)) {
addr_bind.SetSockAddr((const struct sockaddr *)&sockaddr_bind);
} else {
LogPrint(BCLog::NET, "Warning: getsockname failed\n");
}
}
return addr_bind;
}
CNode *CConnman::ConnectNode(CAddress addrConnect, const char *pszDest,
bool fCountFailure, ConnectionType conn_type) {
assert(conn_type != ConnectionType::INBOUND);
if (pszDest == nullptr) {
if (IsLocal(addrConnect)) {
return nullptr;
}
// Look for an existing connection
CNode *pnode = FindNode(static_cast<CService>(addrConnect));
if (pnode) {
LogPrintf("Failed to open new connection, already connected\n");
return nullptr;
}
}
LogPrint(
BCLog::NET, "trying connection %s lastseen=%.1fhrs\n",
pszDest ? pszDest : addrConnect.ToString(),
Ticks<HoursDouble>(pszDest ? 0h : AdjustedTime() - addrConnect.nTime));
// Resolve
const uint16_t default_port{pszDest != nullptr
? Params().GetDefaultPort(pszDest)
: Params().GetDefaultPort()};
if (pszDest) {
std::vector<CService> resolved;
if (Lookup(pszDest, resolved, default_port,
fNameLookup && !HaveNameProxy(), 256) &&
!resolved.empty()) {
addrConnect =
CAddress(resolved[GetRand(resolved.size())], NODE_NONE);
if (!addrConnect.IsValid()) {
LogPrint(BCLog::NET,
"Resolver returned invalid address %s for %s\n",
addrConnect.ToString(), pszDest);
return nullptr;
}
// It is possible that we already have a connection to the IP/port
// pszDest resolved to. In that case, drop the connection that was
// just created.
LOCK(m_nodes_mutex);
CNode *pnode = FindNode(static_cast<CService>(addrConnect));
if (pnode) {
LogPrintf("Failed to open new connection, already connected\n");
return nullptr;
}
}
}
// Connect
bool connected = false;
std::unique_ptr<Sock> sock;
proxyType proxy;
CAddress addr_bind;
assert(!addr_bind.IsValid());
if (addrConnect.IsValid()) {
bool proxyConnectionFailed = false;
if (addrConnect.GetNetwork() == NET_I2P &&
m_i2p_sam_session.get() != nullptr) {
i2p::Connection conn;
if (m_i2p_sam_session->Connect(addrConnect, conn,
proxyConnectionFailed)) {
connected = true;
sock = std::move(conn.sock);
addr_bind = CAddress{conn.me, NODE_NONE};
}
} else if (GetProxy(addrConnect.GetNetwork(), proxy)) {
sock = CreateSock(proxy.proxy);
if (!sock) {
return nullptr;
}
connected = ConnectThroughProxy(
proxy, addrConnect.ToStringIP(), addrConnect.GetPort(), *sock,
nConnectTimeout, proxyConnectionFailed);
} else {
// no proxy needed (none set for target network)
sock = CreateSock(addrConnect);
if (!sock) {
return nullptr;
}
connected =
ConnectSocketDirectly(addrConnect, *sock, nConnectTimeout,
conn_type == ConnectionType::MANUAL);
}
if (!proxyConnectionFailed) {
// If a connection to the node was attempted, and failure (if any)
// is not caused by a problem connecting to the proxy, mark this as
// an attempt.
addrman.Attempt(addrConnect, fCountFailure);
}
} else if (pszDest && GetNameProxy(proxy)) {
sock = CreateSock(proxy.proxy);
if (!sock) {
return nullptr;
}
std::string host;
uint16_t port{default_port};
SplitHostPort(std::string(pszDest), port, host);
bool proxyConnectionFailed;
connected = ConnectThroughProxy(proxy, host, port, *sock,
nConnectTimeout, proxyConnectionFailed);
}
if (!connected) {
return nullptr;
}
// Add node
NodeId id = GetNewNodeId();
uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE)
.Write(id)
.Finalize();
uint64_t extra_entropy =
GetDeterministicRandomizer(RANDOMIZER_ID_EXTRAENTROPY)
.Write(id)
.Finalize();
if (!addr_bind.IsValid()) {
addr_bind = GetBindAddress(sock->Get());
}
CNode *pnode = new CNode(
id, sock->Release(), addrConnect, CalculateKeyedNetGroup(addrConnect),
nonce, extra_entropy, addr_bind, pszDest ? pszDest : "", conn_type,
/* inbound_onion */ false);
pnode->AddRef();
// We're making a new connection, harvest entropy from the time (and our
// peer count)
RandAddEvent(uint32_t(id));
return pnode;
}
void CNode::CloseSocketDisconnect() {
fDisconnect = true;
LOCK(cs_hSocket);
if (hSocket != INVALID_SOCKET) {
LogPrint(BCLog::NET, "disconnecting peer=%d\n", id);
CloseSocket(hSocket);
}
}
void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags &flags,
const CNetAddr &addr) const {
for (const auto &subnet : vWhitelistedRange) {
if (subnet.m_subnet.Match(addr)) {
NetPermissions::AddFlag(flags, subnet.m_flags);
}
}
}
std::string ConnectionTypeAsString(ConnectionType conn_type) {
switch (conn_type) {
case ConnectionType::INBOUND:
return "inbound";
case ConnectionType::MANUAL:
return "manual";
case ConnectionType::FEELER:
return "feeler";
case ConnectionType::OUTBOUND_FULL_RELAY:
return "outbound-full-relay";
case ConnectionType::BLOCK_RELAY:
return "block-relay-only";
case ConnectionType::ADDR_FETCH:
return "addr-fetch";
case ConnectionType::AVALANCHE_OUTBOUND:
return "avalanche";
} // no default case, so the compiler can warn about missing cases
assert(false);
}
CService CNode::GetAddrLocal() const {
AssertLockNotHeld(m_addr_local_mutex);
LOCK(m_addr_local_mutex);
return addrLocal;
}
void CNode::SetAddrLocal(const CService &addrLocalIn) {
AssertLockNotHeld(m_addr_local_mutex);
LOCK(m_addr_local_mutex);
if (addrLocal.IsValid()) {
error("Addr local already set for node: %i. Refusing to change from %s "
"to %s",
id, addrLocal.ToString(), addrLocalIn.ToString());
} else {
addrLocal = addrLocalIn;
}
}
Network CNode::ConnectedThroughNetwork() const {
return m_inbound_onion ? NET_ONION : addr.GetNetClass();
}
void CNode::copyStats(CNodeStats &stats) {
stats.nodeid = this->GetId();
stats.addr = addr;
stats.addrBind = addrBind;
stats.m_network = ConnectedThroughNetwork();
stats.m_last_send = m_last_send;
stats.m_last_recv = m_last_recv;
stats.m_last_tx_time = m_last_tx_time;
stats.m_last_proof_time = m_last_proof_time;
stats.m_last_block_time = m_last_block_time;
stats.m_connected = m_connected;
stats.nTimeOffset = nTimeOffset;
stats.m_addr_name = m_addr_name;
stats.nVersion = nVersion;
{
LOCK(m_subver_mutex);
stats.cleanSubVer = cleanSubVer;
}
stats.fInbound = IsInboundConn();
stats.m_bip152_highbandwidth_to = m_bip152_highbandwidth_to;
stats.m_bip152_highbandwidth_from = m_bip152_highbandwidth_from;
{
LOCK(cs_vSend);
stats.mapSendBytesPerMsgCmd = mapSendBytesPerMsgCmd;
stats.nSendBytes = nSendBytes;
}
{
LOCK(cs_vRecv);
stats.mapRecvBytesPerMsgCmd = mapRecvBytesPerMsgCmd;
stats.nRecvBytes = nRecvBytes;
}
stats.m_permissionFlags = m_permissionFlags;
stats.m_last_ping_time = m_last_ping_time;
stats.m_min_ping_time = m_min_ping_time;
// Leave string empty if addrLocal invalid (not filled in yet)
CService addrLocalUnlocked = GetAddrLocal();
stats.addrLocal =
addrLocalUnlocked.IsValid() ? addrLocalUnlocked.ToString() : "";
stats.m_conn_type = m_conn_type;
stats.m_availabilityScore = m_avalanche_enabled
? std::make_optional(getAvailabilityScore())
: std::nullopt;
}
bool CNode::ReceiveMsgBytes(const Config &config, Span<const uint8_t> msg_bytes,
bool &complete) {
complete = false;
const auto time = GetTime<std::chrono::microseconds>();
LOCK(cs_vRecv);
m_last_recv = std::chrono::duration_cast<std::chrono::seconds>(time);
nRecvBytes += msg_bytes.size();
while (msg_bytes.size() > 0) {
// Absorb network data.
int handled = m_deserializer->Read(config, msg_bytes);
if (handled < 0) {
return false;
}
if (m_deserializer->Complete()) {
// decompose a transport agnostic CNetMessage from the deserializer
CNetMessage msg = m_deserializer->GetMessage(config, time);
// Store received bytes per message command to prevent a memory DOS,
// only allow valid commands.
mapMsgCmdSize::iterator i = mapRecvBytesPerMsgCmd.find(msg.m_type);
if (i == mapRecvBytesPerMsgCmd.end()) {
i = mapRecvBytesPerMsgCmd.find(NET_MESSAGE_COMMAND_OTHER);
}
assert(i != mapRecvBytesPerMsgCmd.end());
i->second += msg.m_raw_message_size;
// push the message to the process queue,
vRecvMsg.push_back(std::move(msg));
complete = true;
}
}
return true;
}
int V1TransportDeserializer::readHeader(const Config &config,
Span<const uint8_t> msg_bytes) {
// copy data to temporary parsing buffer
uint32_t nRemaining = CMessageHeader::HEADER_SIZE - nHdrPos;
uint32_t nCopy = std::min<unsigned int>(nRemaining, msg_bytes.size());
memcpy(&hdrbuf[nHdrPos], msg_bytes.data(), nCopy);
nHdrPos += nCopy;
// if header incomplete, exit
if (nHdrPos < CMessageHeader::HEADER_SIZE) {
return nCopy;
}
// deserialize to CMessageHeader
try {
hdrbuf >> hdr;
} catch (const std::exception &) {
return -1;
}
// Reject oversized messages
if (hdr.IsOversized(config)) {
LogPrint(BCLog::NET, "Oversized header detected\n");
return -1;
}
// switch state to reading message data
in_data = true;
return nCopy;
}
int V1TransportDeserializer::readData(Span<const uint8_t> msg_bytes) {
unsigned int nRemaining = hdr.nMessageSize - nDataPos;
unsigned int nCopy = std::min<unsigned int>(nRemaining, msg_bytes.size());
if (vRecv.size() < nDataPos + nCopy) {
// Allocate up to 256 KiB ahead, but never more than the total message
// size.
vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024));
}
hasher.Write(msg_bytes.first(nCopy));
memcpy(&vRecv[nDataPos], msg_bytes.data(), nCopy);
nDataPos += nCopy;
return nCopy;
}
const uint256 &V1TransportDeserializer::GetMessageHash() const {
assert(Complete());
if (data_hash.IsNull()) {
hasher.Finalize(data_hash);
}
return data_hash;
}
CNetMessage
V1TransportDeserializer::GetMessage(const Config &config,
const std::chrono::microseconds time) {
// decompose a single CNetMessage from the TransportDeserializer
CNetMessage msg(std::move(vRecv));
// store state about valid header, netmagic and checksum
msg.m_valid_header = hdr.IsValid(config);
// FIXME Split CheckHeaderMagicAndCommand() into CheckHeaderMagic() and
// CheckCommand() to prevent the net magic check code duplication.
msg.m_valid_netmagic =
(memcmp(std::begin(hdr.pchMessageStart),
std::begin(config.GetChainParams().NetMagic()),
CMessageHeader::MESSAGE_START_SIZE) == 0);
uint256 hash = GetMessageHash();
// store command string, payload size
msg.m_type = hdr.GetCommand();
msg.m_message_size = hdr.nMessageSize;
msg.m_raw_message_size = hdr.nMessageSize + CMessageHeader::HEADER_SIZE;
// We just received a message off the wire, harvest entropy from the time
// (and the message checksum)
RandAddEvent(ReadLE32(hash.begin()));
msg.m_valid_checksum = (memcmp(hash.begin(), hdr.pchChecksum,
CMessageHeader::CHECKSUM_SIZE) == 0);
if (!msg.m_valid_checksum) {
LogPrint(BCLog::NET,
"CHECKSUM ERROR (%s, %u bytes), expected %s was %s\n",
SanitizeString(msg.m_type), msg.m_message_size,
HexStr(Span{hash}.first(CMessageHeader::CHECKSUM_SIZE)),
HexStr(hdr.pchChecksum));
}
// store receive time
msg.m_time = time;
// reset the network deserializer (prepare for the next message)
Reset();
return msg;
}
void V1TransportSerializer::prepareForTransport(const Config &config,
CSerializedNetMsg &msg,
std::vector<uint8_t> &header) {
// create dbl-sha256 checksum
uint256 hash = Hash(msg.data);
// create header
CMessageHeader hdr(config.GetChainParams().NetMagic(), msg.m_type.c_str(),
msg.data.size());
memcpy(hdr.pchChecksum, hash.begin(), CMessageHeader::CHECKSUM_SIZE);
// serialize header
header.reserve(CMessageHeader::HEADER_SIZE);
CVectorWriter{SER_NETWORK, INIT_PROTO_VERSION, header, 0, hdr};
}
size_t CConnman::SocketSendData(CNode &node) const {
size_t nSentSize = 0;
size_t nMsgCount = 0;
for (const auto &data : node.vSendMsg) {
assert(data.size() > node.nSendOffset);
int nBytes = 0;
{
LOCK(node.cs_hSocket);
if (node.hSocket == INVALID_SOCKET) {
break;
}
nBytes = send(
node.hSocket,
reinterpret_cast<const char *>(data.data()) + node.nSendOffset,
data.size() - node.nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT);
}
if (nBytes == 0) {
// couldn't send anything at all
break;
}
if (nBytes < 0) {
// error
int nErr = WSAGetLastError();
if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE &&
nErr != WSAEINTR && nErr != WSAEINPROGRESS) {
LogPrint(BCLog::NET, "socket send error for peer=%d: %s\n",
node.GetId(), NetworkErrorString(nErr));
node.CloseSocketDisconnect();
}
break;
}
assert(nBytes > 0);
node.m_last_send = GetTime<std::chrono::seconds>();
node.nSendBytes += nBytes;
node.nSendOffset += nBytes;
nSentSize += nBytes;
if (node.nSendOffset != data.size()) {
// could not send full message; stop sending more
break;
}
node.nSendOffset = 0;
node.nSendSize -= data.size();
node.fPauseSend = node.nSendSize > nSendBufferMaxSize;
nMsgCount++;
}
node.vSendMsg.erase(node.vSendMsg.begin(),
node.vSendMsg.begin() + nMsgCount);
if (node.vSendMsg.empty()) {
assert(node.nSendOffset == 0);
assert(node.nSendSize == 0);
}
return nSentSize;
}
static bool ReverseCompareNodeMinPingTime(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) {
return a.m_min_ping_time > b.m_min_ping_time;
}
static bool ReverseCompareNodeTimeConnected(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) {
return a.m_connected > b.m_connected;
}
static bool CompareNetGroupKeyed(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) {
return a.nKeyedNetGroup < b.nKeyedNetGroup;
}
static bool CompareNodeBlockTime(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) {
// There is a fall-through here because it is common for a node to have many
// peers which have not yet relayed a block.
if (a.m_last_block_time != b.m_last_block_time) {
return a.m_last_block_time < b.m_last_block_time;
}
if (a.fRelevantServices != b.fRelevantServices) {
return b.fRelevantServices;
}
return a.m_connected > b.m_connected;
}
static bool CompareNodeTXTime(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) {
// There is a fall-through here because it is common for a node to have more
// than a few peers that have not yet relayed txn.
if (a.m_last_tx_time != b.m_last_tx_time) {
return a.m_last_tx_time < b.m_last_tx_time;
}
if (a.m_relay_txs != b.m_relay_txs) {
return b.m_relay_txs;
}
if (a.fBloomFilter != b.fBloomFilter) {
return a.fBloomFilter;
}
return a.m_connected > b.m_connected;
}
static bool CompareNodeProofTime(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) {
// There is a fall-through here because it is common for a node to have more
// than a few peers that have not yet relayed proofs. This fallback is also
// used in the case avalanche is not enabled.
if (a.m_last_proof_time != b.m_last_proof_time) {
return a.m_last_proof_time < b.m_last_proof_time;
}
return a.m_connected > b.m_connected;
}
// Pick out the potential block-relay only peers, and sort them by last block
// time.
static bool CompareNodeBlockRelayOnlyTime(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) {
if (a.m_relay_txs != b.m_relay_txs) {
return a.m_relay_txs;
}
if (a.m_last_block_time != b.m_last_block_time) {
return a.m_last_block_time < b.m_last_block_time;
}
if (a.fRelevantServices != b.fRelevantServices) {
return b.fRelevantServices;
}
return a.m_connected > b.m_connected;
}
static bool CompareNodeAvailabilityScore(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) {
// Equality can happen if the nodes have no score or it has not been
// computed yet.
if (a.availabilityScore != b.availabilityScore) {
return a.availabilityScore < b.availabilityScore;
}
return a.m_connected > b.m_connected;
}
/**
* Sort eviction candidates by network/localhost and connection uptime.
* Candidates near the beginning are more likely to be evicted, and those
* near the end are more likely to be protected, e.g. less likely to be evicted.
* - First, nodes that are not `is_local` and that do not belong to `network`,
* sorted by increasing uptime (from most recently connected to connected
* longer).
* - Then, nodes that are `is_local` or belong to `network`, sorted by
* increasing uptime.
*/
struct CompareNodeNetworkTime {
const bool m_is_local;
const Network m_network;
CompareNodeNetworkTime(bool is_local, Network network)
: m_is_local(is_local), m_network(network) {}
bool operator()(const NodeEvictionCandidate &a,
const NodeEvictionCandidate &b) const {
if (m_is_local && a.m_is_local != b.m_is_local) {
return b.m_is_local;
}
if ((a.m_network == m_network) != (b.m_network == m_network)) {
return b.m_network == m_network;
}
return a.m_connected > b.m_connected;
};
};
//! Sort an array by the specified comparator, then erase the last K elements
//! where predicate is true.
template <typename T, typename Comparator>
static void EraseLastKElements(
std::vector<T> &elements, Comparator comparator, size_t k,
std::function<bool(const NodeEvictionCandidate &)> predicate =
[](const NodeEvictionCandidate &n) { return true; }) {
std::sort(elements.begin(), elements.end(), comparator);
size_t eraseSize = std::min(k, elements.size());
elements.erase(
std::remove_if(elements.end() - eraseSize, elements.end(), predicate),
elements.end());
}
void ProtectEvictionCandidatesByRatio(
std::vector<NodeEvictionCandidate> &eviction_candidates) {
// Protect the half of the remaining nodes which have been connected the
// longest. This replicates the non-eviction implicit behavior, and
// precludes attacks that start later.
// To promote the diversity of our peer connections, reserve up to half of
// these protected spots for Tor/onion, localhost and I2P peers, even if
// they're not the longest uptime overall. This helps protect these
// higher-latency peers that tend to be otherwise disadvantaged under our
// eviction criteria.
const size_t initial_size = eviction_candidates.size();
const size_t total_protect_size{initial_size / 2};
// Disadvantaged networks to protect: I2P, localhost and Tor/onion. In case
// of equal counts, earlier array members have first opportunity to recover
// unused slots from the previous iteration.
struct Net {
bool is_local;
Network id;
size_t count;
};
std::array<Net, 3> networks{{{false, NET_I2P, 0},
{/* localhost */ true, NET_MAX, 0},
{false, NET_ONION, 0}}};
// Count and store the number of eviction candidates per network.
for (Net &n : networks) {
n.count = std::count_if(
eviction_candidates.cbegin(), eviction_candidates.cend(),
[&n](const NodeEvictionCandidate &c) {
return n.is_local ? c.m_is_local : c.m_network == n.id;
});
}
// Sort `networks` by ascending candidate count, to give networks having
// fewer candidates the first opportunity to recover unused protected slots
// from the previous iteration.
std::stable_sort(networks.begin(), networks.end(),
[](Net a, Net b) { return a.count < b.count; });
// Protect up to 25% of the eviction candidates by disadvantaged network.
const size_t max_protect_by_network{total_protect_size / 2};
size_t num_protected{0};
while (num_protected < max_protect_by_network) {
// Count the number of disadvantaged networks from which we have peers
// to protect.
auto num_networks = std::count_if(networks.begin(), networks.end(),
[](const Net &n) { return n.count; });
if (num_networks == 0) {
break;
}
const size_t disadvantaged_to_protect{max_protect_by_network -
num_protected};
const size_t protect_per_network{std::max(
disadvantaged_to_protect / num_networks, static_cast<size_t>(1))};
// Early exit flag if there are no remaining candidates by disadvantaged
// network.
bool protected_at_least_one{false};
for (Net &n : networks) {
if (n.count == 0) {
continue;
}
const size_t before = eviction_candidates.size();
EraseLastKElements(
eviction_candidates, CompareNodeNetworkTime(n.is_local, n.id),
protect_per_network, [&n](const NodeEvictionCandidate &c) {
return n.is_local ? c.m_is_local : c.m_network == n.id;
});
const size_t after = eviction_candidates.size();
if (before > after) {
protected_at_least_one = true;
const size_t delta{before - after};
num_protected += delta;
if (num_protected >= max_protect_by_network) {
break;
}
n.count -= delta;
}
}
if (!protected_at_least_one) {
break;
}
}
// Calculate how many we removed, and update our total number of peers that
// we want to protect based on uptime accordingly.
assert(num_protected == initial_size - eviction_candidates.size());
const size_t remaining_to_protect{total_protect_size - num_protected};
EraseLastKElements(eviction_candidates, ReverseCompareNodeTimeConnected,
remaining_to_protect);
}
[[nodiscard]] std::optional<NodeId>
SelectNodeToEvict(std::vector<NodeEvictionCandidate> &&vEvictionCandidates) {
// Protect connections with certain characteristics
// Deterministically select 4 peers to protect by netgroup.
// An attacker cannot predict which netgroups will be protected
EraseLastKElements(vEvictionCandidates, CompareNetGroupKeyed, 4);
// Protect the 8 nodes with the lowest minimum ping time.
// An attacker cannot manipulate this metric without physically moving nodes
// closer to the target.
EraseLastKElements(vEvictionCandidates, ReverseCompareNodeMinPingTime, 8);
// Protect 4 nodes that most recently sent us novel transactions accepted
// into our mempool. An attacker cannot manipulate this metric without
// performing useful work.
EraseLastKElements(vEvictionCandidates, CompareNodeTXTime, 4);
// Protect 4 nodes that most recently sent us novel proofs accepted
// into our proof pool. An attacker cannot manipulate this metric without
// performing useful work.
// TODO this filter must happen before the last tx time once avalanche is
// enabled for pre-consensus.
EraseLastKElements(vEvictionCandidates, CompareNodeProofTime, 4);
// Protect up to 8 non-tx-relay peers that have sent us novel blocks.
EraseLastKElements(vEvictionCandidates, CompareNodeBlockRelayOnlyTime, 8,
[](const NodeEvictionCandidate &n) {
return !n.m_relay_txs && n.fRelevantServices;
});
// Protect 4 nodes that most recently sent us novel blocks.
// An attacker cannot manipulate this metric without performing useful work.
EraseLastKElements(vEvictionCandidates, CompareNodeBlockTime, 4);
// Protect up to 128 nodes that have the highest avalanche availability
// score.
EraseLastKElements(vEvictionCandidates, CompareNodeAvailabilityScore, 128,
[](NodeEvictionCandidate const &n) {
return n.availabilityScore > 0.;
});
// Protect some of the remaining eviction candidates by ratios of desirable
// or disadvantaged characteristics.
ProtectEvictionCandidatesByRatio(vEvictionCandidates);
if (vEvictionCandidates.empty()) {
return std::nullopt;
}
// If any remaining peers are preferred for eviction consider only them.
// This happens after the other preferences since if a peer is really the
// best by other criteria (esp relaying blocks)
// then we probably don't want to evict it no matter what.
if (std::any_of(
vEvictionCandidates.begin(), vEvictionCandidates.end(),
[](NodeEvictionCandidate const &n) { return n.prefer_evict; })) {
vEvictionCandidates.erase(
std::remove_if(
vEvictionCandidates.begin(), vEvictionCandidates.end(),
[](NodeEvictionCandidate const &n) { return !n.prefer_evict; }),
vEvictionCandidates.end());
}
// Identify the network group with the most connections and youngest member.
// (vEvictionCandidates is already sorted by reverse connect time)
uint64_t naMostConnections;
unsigned int nMostConnections = 0;
std::chrono::seconds nMostConnectionsTime{0};
std::map<uint64_t, std::vector<NodeEvictionCandidate>> mapNetGroupNodes;
for (const NodeEvictionCandidate &node : vEvictionCandidates) {
std::vector<NodeEvictionCandidate> &group =
mapNetGroupNodes[node.nKeyedNetGroup];
group.push_back(node);
const auto grouptime{group[0].m_connected};
size_t group_size = group.size();
if (group_size > nMostConnections ||
(group_size == nMostConnections &&
grouptime > nMostConnectionsTime)) {
nMostConnections = group_size;
nMostConnectionsTime = grouptime;
naMostConnections = node.nKeyedNetGroup;
}
}
// Reduce to the network group with the most connections
vEvictionCandidates = std::move(mapNetGroupNodes[naMostConnections]);
// Disconnect from the network group with the most connections
return vEvictionCandidates.front().id;
}
/** Try to find a connection to evict when the node is full.
* Extreme care must be taken to avoid opening the node to attacker
* triggered network partitioning.
* The strategy used here is to protect a small number of peers
* for each of several distinct characteristics which are difficult
* to forge. In order to partition a node the attacker must be
* simultaneously better at all of them than honest peers.
*/
bool CConnman::AttemptToEvictConnection() {
std::vector<NodeEvictionCandidate> vEvictionCandidates;
{
LOCK(m_nodes_mutex);
for (const CNode *node : m_nodes) {
if (node->HasPermission(NetPermissionFlags::NoBan)) {
continue;
}
if (!node->IsInboundConn()) {
continue;
}
if (node->fDisconnect) {
continue;
}
NodeEvictionCandidate candidate = {
node->GetId(),
node->m_connected,
node->m_min_ping_time,
node->m_last_block_time,
node->m_last_proof_time,
node->m_last_tx_time,
node->m_has_all_wanted_services,
node->m_relays_txs.load(),
node->m_bloom_filter_loaded.load(),
node->nKeyedNetGroup,
node->m_prefer_evict,
node->addr.IsLocal(),
node->ConnectedThroughNetwork(),
node->m_avalanche_enabled
? node->getAvailabilityScore()
: -std::numeric_limits<double>::infinity()};
vEvictionCandidates.push_back(candidate);
}
}
const std::optional<NodeId> node_id_to_evict =
SelectNodeToEvict(std::move(vEvictionCandidates));
if (!node_id_to_evict) {
return false;
}
LOCK(m_nodes_mutex);
for (CNode *pnode : m_nodes) {
if (pnode->GetId() == *node_id_to_evict) {
LogPrint(
BCLog::NET,
"selected %s connection for eviction peer=%d; disconnecting\n",
pnode->ConnectionTypeAsString(), pnode->GetId());
pnode->fDisconnect = true;
return true;
}
}
return false;
}
void CConnman::AcceptConnection(const ListenSocket &hListenSocket) {
struct sockaddr_storage sockaddr;
socklen_t len = sizeof(sockaddr);
SOCKET hSocket =
accept(hListenSocket.socket, (struct sockaddr *)&sockaddr, &len);
CAddress addr;
if (hSocket == INVALID_SOCKET) {
const int nErr = WSAGetLastError();
if (nErr != WSAEWOULDBLOCK) {
LogPrintf("socket error accept failed: %s\n",
NetworkErrorString(nErr));
}
return;
}
if (!addr.SetSockAddr((const struct sockaddr *)&sockaddr)) {
LogPrintf("Warning: Unknown socket family\n");
}
const CAddress addr_bind = GetBindAddress(hSocket);
NetPermissionFlags permissionFlags = NetPermissionFlags::None;
hListenSocket.AddSocketPermissionFlags(permissionFlags);
CreateNodeFromAcceptedSocket(hSocket, permissionFlags, addr_bind, addr);
}
void CConnman::CreateNodeFromAcceptedSocket(SOCKET hSocket,
NetPermissionFlags permissionFlags,
const CAddress &addr_bind,
const CAddress &addr) {
int nInbound = 0;
int nMaxInbound = nMaxConnections - m_max_outbound;
AddWhitelistPermissionFlags(permissionFlags, addr);
if (NetPermissions::HasFlag(permissionFlags,
NetPermissionFlags::Implicit)) {
NetPermissions::ClearFlag(permissionFlags,
NetPermissionFlags::Implicit);
if (gArgs.GetBoolArg("-whitelistforcerelay",
DEFAULT_WHITELISTFORCERELAY)) {
NetPermissions::AddFlag(permissionFlags,
NetPermissionFlags::ForceRelay);
}
if (gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)) {
NetPermissions::AddFlag(permissionFlags, NetPermissionFlags::Relay);
}
NetPermissions::AddFlag(permissionFlags, NetPermissionFlags::Mempool);
NetPermissions::AddFlag(permissionFlags, NetPermissionFlags::NoBan);
}
{
LOCK(m_nodes_mutex);
for (const CNode *pnode : m_nodes) {
if (pnode->IsInboundConn()) {
nInbound++;
}
}
}
if (!fNetworkActive) {
LogPrint(BCLog::NET,
"connection from %s dropped: not accepting new connections\n",
addr.ToString());
CloseSocket(hSocket);
return;
}
if (!IsSelectableSocket(hSocket)) {
LogPrintf("connection from %s dropped: non-selectable socket\n",
addr.ToString());
CloseSocket(hSocket);
return;
}
// According to the internet TCP_NODELAY is not carried into accepted
// sockets on all platforms. Set it again here just to be sure.
SetSocketNoDelay(hSocket);
// Don't accept connections from banned peers.
bool banned = m_banman && m_banman->IsBanned(addr);
if (!NetPermissions::HasFlag(permissionFlags, NetPermissionFlags::NoBan) &&
banned) {
LogPrint(BCLog::NET, "connection from %s dropped (banned)\n",
addr.ToString());
CloseSocket(hSocket);
return;
}
// Only accept connections from discouraged peers if our inbound slots
// aren't (almost) full.
bool discouraged = m_banman && m_banman->IsDiscouraged(addr);
if (!NetPermissions::HasFlag(permissionFlags, NetPermissionFlags::NoBan) &&
nInbound + 1 >= nMaxInbound && discouraged) {
LogPrint(BCLog::NET, "connection from %s dropped (discouraged)\n",
addr.ToString());
CloseSocket(hSocket);
return;
}
if (nInbound >= nMaxInbound) {
if (!AttemptToEvictConnection()) {
// No connection to evict, disconnect the new connection
LogPrint(BCLog::NET, "failed to find an eviction candidate - "
"connection dropped (full)\n");
CloseSocket(hSocket);
return;
}
}
NodeId id = GetNewNodeId();
uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE)
.Write(id)
.Finalize();
uint64_t extra_entropy =
GetDeterministicRandomizer(RANDOMIZER_ID_EXTRAENTROPY)
.Write(id)
.Finalize();
ServiceFlags nodeServices = nLocalServices;
if (NetPermissions::HasFlag(permissionFlags,
NetPermissionFlags::BloomFilter)) {
nodeServices = static_cast<ServiceFlags>(nodeServices | NODE_BLOOM);
}
const bool inbound_onion =
std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) !=
m_onion_binds.end();
CNode *pnode = new CNode(id, hSocket, addr, CalculateKeyedNetGroup(addr),
nonce, extra_entropy, addr_bind, "",
ConnectionType::INBOUND, inbound_onion);
pnode->AddRef();
pnode->m_permissionFlags = permissionFlags;
pnode->m_prefer_evict = discouraged;
for (auto interface : m_msgproc) {
interface->InitializeNode(*config, *pnode, nodeServices);
}
LogPrint(BCLog::NET, "connection from %s accepted\n", addr.ToString());
{
LOCK(m_nodes_mutex);
m_nodes.push_back(pnode);
}
// We received a new connection, harvest entropy from the time (and our peer
// count)
RandAddEvent(uint32_t(id));
}
bool CConnman::AddConnection(const std::string &address,
ConnectionType conn_type) {
std::optional<int> max_connections;
switch (conn_type) {
case ConnectionType::INBOUND:
case ConnectionType::MANUAL:
return false;
case ConnectionType::OUTBOUND_FULL_RELAY:
max_connections = m_max_outbound_full_relay;
break;
case ConnectionType::BLOCK_RELAY:
max_connections = m_max_outbound_block_relay;
break;
// no limit for ADDR_FETCH because -seednode has no limit either
case ConnectionType::ADDR_FETCH:
break;
// no limit for FEELER connections since they're short-lived
case ConnectionType::FEELER:
break;
case ConnectionType::AVALANCHE_OUTBOUND:
max_connections = m_max_avalanche_outbound;
break;
} // no default case, so the compiler can warn about missing cases
// Count existing connections
int existing_connections =
WITH_LOCK(m_nodes_mutex,
return std::count_if(
m_nodes.begin(), m_nodes.end(), [conn_type](CNode *node) {
return node->m_conn_type == conn_type;
}););
// Max connections of specified type already exist
if (max_connections != std::nullopt &&
existing_connections >= max_connections) {
return false;
}
// Max total outbound connections already exist
CSemaphoreGrant grant(*semOutbound, true);
if (!grant) {
return false;
}
OpenNetworkConnection(CAddress(), false, &grant, address.c_str(),
conn_type);
return true;
}
void CConnman::DisconnectNodes() {
{
LOCK(m_nodes_mutex);
if (!fNetworkActive) {
// Disconnect any connected nodes
for (CNode *pnode : m_nodes) {
if (!pnode->fDisconnect) {
LogPrint(BCLog::NET,
"Network not active, dropping peer=%d\n",
pnode->GetId());
pnode->fDisconnect = true;
}
}
}
// Disconnect unused nodes
std::vector<CNode *> nodes_copy = m_nodes;
for (CNode *pnode : nodes_copy) {
if (pnode->fDisconnect) {
// remove from m_nodes
m_nodes.erase(remove(m_nodes.begin(), m_nodes.end(), pnode),
m_nodes.end());
// release outbound grant (if any)
pnode->grantOutbound.Release();
// close socket and cleanup
pnode->CloseSocketDisconnect();
// hold in disconnected pool until all refs are released
pnode->Release();
m_nodes_disconnected.push_back(pnode);
}
}
}
{
// Delete disconnected nodes
std::list<CNode *> nodes_disconnected_copy = m_nodes_disconnected;
for (CNode *pnode : nodes_disconnected_copy) {
// Destroy the object only after other threads have stopped using
// it.
if (pnode->GetRefCount() <= 0) {
m_nodes_disconnected.remove(pnode);
DeleteNode(pnode);
}
}
}
}
void CConnman::NotifyNumConnectionsChanged() {
size_t nodes_size;
{
LOCK(m_nodes_mutex);
nodes_size = m_nodes.size();
}
if (nodes_size != nPrevNodeCount) {
nPrevNodeCount = nodes_size;
if (m_client_interface) {
m_client_interface->NotifyNumConnectionsChanged(nodes_size);
}
}
}
bool CConnman::ShouldRunInactivityChecks(const CNode &node,
std::chrono::seconds now) const {
return node.m_connected + m_peer_connect_timeout < now;
}
bool CConnman::InactivityCheck(const CNode &node) const {
// Tests that see disconnects after using mocktime can start nodes with a
// large timeout. For example, -peertimeout=999999999.
const auto now{GetTime<std::chrono::seconds>()};
const auto last_send{node.m_last_send.load()};
const auto last_recv{node.m_last_recv.load()};
if (!ShouldRunInactivityChecks(node, now)) {
return false;
}
if (last_recv.count() == 0 || last_send.count() == 0) {
LogPrint(BCLog::NET,
"socket no message in first %i seconds, %d %d peer=%d\n",
count_seconds(m_peer_connect_timeout), last_recv.count() != 0,
last_send.count() != 0, node.GetId());
return true;
}
if (now > last_send + TIMEOUT_INTERVAL) {
LogPrint(BCLog::NET, "socket sending timeout: %is peer=%d\n",
count_seconds(now - last_send), node.GetId());
return true;
}
if (now > last_recv + TIMEOUT_INTERVAL) {
LogPrint(BCLog::NET, "socket receive timeout: %is peer=%d\n",
count_seconds(now - last_recv), node.GetId());
return true;
}
if (!node.fSuccessfullyConnected) {
LogPrint(BCLog::NET, "version handshake timeout peer=%d\n",
node.GetId());
return true;
}
return false;
}
bool CConnman::GenerateSelectSet(std::set<SOCKET> &recv_set,
std::set<SOCKET> &send_set,
std::set<SOCKET> &error_set) {
for (const ListenSocket &hListenSocket : vhListenSocket) {
recv_set.insert(hListenSocket.socket);
}
{
LOCK(m_nodes_mutex);
for (CNode *pnode : m_nodes) {
// Implement the following logic:
// * If there is data to send, select() for sending data. As this
// only happens when optimistic write failed, we choose to first
// drain the write buffer in this case before receiving more. This
// avoids needlessly queueing received data, if the remote peer is
// not themselves receiving data. This means properly utilizing
// TCP flow control signalling.
// * Otherwise, if there is space left in the receive buffer,
// select() for receiving data.
// * Hand off all complete messages to the processor, to be handled
// without blocking here.
bool select_recv = !pnode->fPauseRecv;
bool select_send;
{
LOCK(pnode->cs_vSend);
select_send = !pnode->vSendMsg.empty();
}
LOCK(pnode->cs_hSocket);
if (pnode->hSocket == INVALID_SOCKET) {
continue;
}
error_set.insert(pnode->hSocket);
if (select_send) {
send_set.insert(pnode->hSocket);
continue;
}
if (select_recv) {
recv_set.insert(pnode->hSocket);
}
}
}
return !recv_set.empty() || !send_set.empty() || !error_set.empty();
}
#ifdef USE_POLL
void CConnman::SocketEvents(std::set<SOCKET> &recv_set,
std::set<SOCKET> &send_set,
std::set<SOCKET> &error_set) {
std::set<SOCKET> recv_select_set, send_select_set, error_select_set;
if (!GenerateSelectSet(recv_select_set, send_select_set,
error_select_set)) {
interruptNet.sleep_for(
std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS));
return;
}
std::unordered_map<SOCKET, struct pollfd> pollfds;
for (SOCKET socket_id : recv_select_set) {
pollfds[socket_id].fd = socket_id;
pollfds[socket_id].events |= POLLIN;
}
for (SOCKET socket_id : send_select_set) {
pollfds[socket_id].fd = socket_id;
pollfds[socket_id].events |= POLLOUT;
}
for (SOCKET socket_id : error_select_set) {
pollfds[socket_id].fd = socket_id;
// These flags are ignored, but we set them for clarity
pollfds[socket_id].events |= POLLERR | POLLHUP;
}
std::vector<struct pollfd> vpollfds;
vpollfds.reserve(pollfds.size());
for (auto it : pollfds) {
vpollfds.push_back(std::move(it.second));
}
if (poll(vpollfds.data(), vpollfds.size(), SELECT_TIMEOUT_MILLISECONDS) <
0) {
return;
}
if (interruptNet) {
return;
}
for (struct pollfd pollfd_entry : vpollfds) {
if (pollfd_entry.revents & POLLIN) {
recv_set.insert(pollfd_entry.fd);
}
if (pollfd_entry.revents & POLLOUT) {
send_set.insert(pollfd_entry.fd);
}
if (pollfd_entry.revents & (POLLERR | POLLHUP)) {
error_set.insert(pollfd_entry.fd);
}
}
}
#else
void CConnman::SocketEvents(std::set<SOCKET> &recv_set,
std::set<SOCKET> &send_set,
std::set<SOCKET> &error_set) {
std::set<SOCKET> recv_select_set, send_select_set, error_select_set;
if (!GenerateSelectSet(recv_select_set, send_select_set,
error_select_set)) {
interruptNet.sleep_for(
std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS));
return;
}
//
// Find which sockets have data to receive
//
struct timeval timeout;
timeout.tv_sec = 0;
// frequency to poll pnode->vSend
timeout.tv_usec = SELECT_TIMEOUT_MILLISECONDS * 1000;
fd_set fdsetRecv;
fd_set fdsetSend;
fd_set fdsetError;
FD_ZERO(&fdsetRecv);
FD_ZERO(&fdsetSend);
FD_ZERO(&fdsetError);
SOCKET hSocketMax = 0;
for (SOCKET hSocket : recv_select_set) {
FD_SET(hSocket, &fdsetRecv);
hSocketMax = std::max(hSocketMax, hSocket);
}
for (SOCKET hSocket : send_select_set) {
FD_SET(hSocket, &fdsetSend);
hSocketMax = std::max(hSocketMax, hSocket);
}
for (SOCKET hSocket : error_select_set) {
FD_SET(hSocket, &fdsetError);
hSocketMax = std::max(hSocketMax, hSocket);
}
int nSelect =
select(hSocketMax + 1, &fdsetRecv, &fdsetSend, &fdsetError, &timeout);
if (interruptNet) {
return;
}
if (nSelect == SOCKET_ERROR) {
int nErr = WSAGetLastError();
LogPrintf("socket select error %s\n", NetworkErrorString(nErr));
for (unsigned int i = 0; i <= hSocketMax; i++) {
FD_SET(i, &fdsetRecv);
}
FD_ZERO(&fdsetSend);
FD_ZERO(&fdsetError);
if (!interruptNet.sleep_for(
std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS))) {
return;
}
}
for (SOCKET hSocket : recv_select_set) {
if (FD_ISSET(hSocket, &fdsetRecv)) {
recv_set.insert(hSocket);
}
}
for (SOCKET hSocket : send_select_set) {
if (FD_ISSET(hSocket, &fdsetSend)) {
send_set.insert(hSocket);
}
}
for (SOCKET hSocket : error_select_set) {
if (FD_ISSET(hSocket, &fdsetError)) {
error_set.insert(hSocket);
}
}
}
#endif
void CConnman::SocketHandler() {
std::set<SOCKET> recv_set, send_set, error_set;
SocketEvents(recv_set, send_set, error_set);
if (interruptNet) {
return;
}
//
// Accept new connections
//
for (const ListenSocket &hListenSocket : vhListenSocket) {
if (hListenSocket.socket != INVALID_SOCKET &&
recv_set.count(hListenSocket.socket) > 0) {
AcceptConnection(hListenSocket);
}
}
//
// Service each socket
//
std::vector<CNode *> nodes_copy;
{
LOCK(m_nodes_mutex);
nodes_copy = m_nodes;
for (CNode *pnode : nodes_copy) {
pnode->AddRef();
}
}
for (CNode *pnode : nodes_copy) {
if (interruptNet) {
return;
}
//
// Receive
//
bool recvSet = false;
bool sendSet = false;
bool errorSet = false;
{
LOCK(pnode->cs_hSocket);
if (pnode->hSocket == INVALID_SOCKET) {
continue;
}
recvSet = recv_set.count(pnode->hSocket) > 0;
sendSet = send_set.count(pnode->hSocket) > 0;
errorSet = error_set.count(pnode->hSocket) > 0;
}
if (recvSet || errorSet) {
// typical socket buffer is 8K-64K
uint8_t pchBuf[0x10000];
int32_t nBytes = 0;
{
LOCK(pnode->cs_hSocket);
if (pnode->hSocket == INVALID_SOCKET) {
continue;
}
nBytes = recv(pnode->hSocket, (char *)pchBuf, sizeof(pchBuf),
MSG_DONTWAIT);
}
if (nBytes > 0) {
bool notify = false;
if (!pnode->ReceiveMsgBytes(*config, {pchBuf, (size_t)nBytes},
notify)) {
pnode->CloseSocketDisconnect();
}
RecordBytesRecv(nBytes);
if (notify) {
size_t nSizeAdded = 0;
auto it(pnode->vRecvMsg.begin());
for (; it != pnode->vRecvMsg.end(); ++it) {
// vRecvMsg contains only completed CNetMessage
// the single possible partially deserialized message
// are held by TransportDeserializer
nSizeAdded += it->m_raw_message_size;
}
{
LOCK(pnode->cs_vProcessMsg);
pnode->vProcessMsg.splice(pnode->vProcessMsg.end(),
pnode->vRecvMsg,
pnode->vRecvMsg.begin(), it);
pnode->nProcessQueueSize += nSizeAdded;
pnode->fPauseRecv =
pnode->nProcessQueueSize > nReceiveFloodSize;
}
WakeMessageHandler();
}
} else if (nBytes == 0) {
// socket closed gracefully
if (!pnode->fDisconnect) {
LogPrint(BCLog::NET, "socket closed for peer=%d\n",
pnode->GetId());
}
pnode->CloseSocketDisconnect();
} else if (nBytes < 0) {
// error
int nErr = WSAGetLastError();
if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE &&
nErr != WSAEINTR && nErr != WSAEINPROGRESS) {
if (!pnode->fDisconnect) {
LogPrint(BCLog::NET,
"socket recv error for peer=%d: %s\n",
pnode->GetId(), NetworkErrorString(nErr));
}
pnode->CloseSocketDisconnect();
}
}
}
if (sendSet) {
// Send data
size_t bytes_sent =
WITH_LOCK(pnode->cs_vSend, return SocketSendData(*pnode));
if (bytes_sent) {
RecordBytesSent(bytes_sent);
}
}
if (InactivityCheck(*pnode)) {
pnode->fDisconnect = true;
}
}
{
LOCK(m_nodes_mutex);
for (CNode *pnode : nodes_copy) {
pnode->Release();
}
}
}
void CConnman::ThreadSocketHandler() {
while (!interruptNet) {
DisconnectNodes();
NotifyNumConnectionsChanged();
SocketHandler();
}
}
void CConnman::WakeMessageHandler() {
{
LOCK(mutexMsgProc);
fMsgProcWake = true;
}
condMsgProc.notify_one();
}
void CConnman::ThreadDNSAddressSeed() {
FastRandomContext rng;
std::vector<std::string> seeds =
GetRandomizedDNSSeeds(config->GetChainParams());
// Number of seeds left before testing if we have enough connections
int seeds_right_now = 0;
int found = 0;
if (gArgs.GetBoolArg("-forcednsseed", DEFAULT_FORCEDNSSEED)) {
// When -forcednsseed is provided, query all.
seeds_right_now = seeds.size();
} else if (addrman.size() == 0) {
// If we have no known peers, query all.
// This will occur on the first run, or if peers.dat has been
// deleted.
seeds_right_now = seeds.size();
}
// goal: only query DNS seed if address need is acute
// * If we have a reasonable number of peers in addrman, spend
// some time trying them first. This improves user privacy by
// creating fewer identifying DNS requests, reduces trust by
// giving seeds less influence on the network topology, and
// reduces traffic to the seeds.
// * When querying DNS seeds query a few at once, this ensures
// that we don't give DNS seeds the ability to eclipse nodes
// that query them.
// * If we continue having problems, eventually query all the
// DNS seeds, and if that fails too, also try the fixed seeds.
// (done in ThreadOpenConnections)
const std::chrono::seconds seeds_wait_time =
(addrman.size() >= DNSSEEDS_DELAY_PEER_THRESHOLD
? DNSSEEDS_DELAY_MANY_PEERS
: DNSSEEDS_DELAY_FEW_PEERS);
for (const std::string &seed : seeds) {
if (seeds_right_now == 0) {
seeds_right_now += DNSSEEDS_TO_QUERY_AT_ONCE;
if (addrman.size() > 0) {
LogPrintf("Waiting %d seconds before querying DNS seeds.\n",
seeds_wait_time.count());
std::chrono::seconds to_wait = seeds_wait_time;
while (to_wait.count() > 0) {
// if sleeping for the MANY_PEERS interval, wake up
// early to see if we have enough peers and can stop
// this thread entirely freeing up its resources
std::chrono::seconds w =
std::min(DNSSEEDS_DELAY_FEW_PEERS, to_wait);
if (!interruptNet.sleep_for(w)) {
return;
}
to_wait -= w;
int nRelevant = 0;
{
LOCK(m_nodes_mutex);
for (const CNode *pnode : m_nodes) {
if (pnode->fSuccessfullyConnected &&
pnode->IsFullOutboundConn()) {
++nRelevant;
}
}
}
if (nRelevant >= 2) {
if (found > 0) {
LogPrintf("%d addresses found from DNS seeds\n",
found);
LogPrintf(
"P2P peers available. Finished DNS seeding.\n");
} else {
LogPrintf(
"P2P peers available. Skipped DNS seeding.\n");
}
return;
}
}
}
}
if (interruptNet) {
return;
}
// hold off on querying seeds if P2P network deactivated
if (!fNetworkActive) {
LogPrintf("Waiting for network to be reactivated before querying "
"DNS seeds.\n");
do {
if (!interruptNet.sleep_for(std::chrono::seconds{1})) {
return;
}
} while (!fNetworkActive);
}
LogPrintf("Loading addresses from DNS seed %s\n", seed);
if (HaveNameProxy()) {
AddAddrFetch(seed);
} else {
std::vector<CNetAddr> vIPs;
std::vector<CAddress> vAdd;
ServiceFlags requiredServiceBits =
GetDesirableServiceFlags(NODE_NONE);
std::string host = strprintf("x%x.%s", requiredServiceBits, seed);
CNetAddr resolveSource;
if (!resolveSource.SetInternal(host)) {
continue;
}
// Limits number of IPs learned from a DNS seed
unsigned int nMaxIPs = 256;
if (LookupHost(host, vIPs, nMaxIPs, true)) {
for (const CNetAddr &ip : vIPs) {
CAddress addr = CAddress(
CService(ip, config->GetChainParams().GetDefaultPort()),
requiredServiceBits);
// Use a random age between 3 and 7 days old.
addr.nTime = rng.rand_uniform_delay(
Now<NodeSeconds>() - 3 * 24h, -4 * 24h);
vAdd.push_back(addr);
found++;
}
addrman.Add(vAdd, resolveSource);
} else {
// We now avoid directly using results from DNS Seeds which do
// not support service bit filtering, instead using them as a
// addrfetch to get nodes with our desired service bits.
AddAddrFetch(seed);
}
}
--seeds_right_now;
}
LogPrintf("%d addresses found from DNS seeds\n", found);
}
void CConnman::DumpAddresses() {
int64_t nStart = GetTimeMillis();
DumpPeerAddresses(config->GetChainParams(), ::gArgs, addrman);
LogPrint(BCLog::NET, "Flushed %d addresses to peers.dat %dms\n",
addrman.size(), GetTimeMillis() - nStart);
}
void CConnman::ProcessAddrFetch() {
std::string strDest;
{
LOCK(m_addr_fetches_mutex);
if (m_addr_fetches.empty()) {
return;
}
strDest = m_addr_fetches.front();
m_addr_fetches.pop_front();
}
CAddress addr;
CSemaphoreGrant grant(*semOutbound, true);
if (grant) {
OpenNetworkConnection(addr, false, &grant, strDest.c_str(),
ConnectionType::ADDR_FETCH);
}
}
bool CConnman::GetTryNewOutboundPeer() const {
return m_try_another_outbound_peer;
}
void CConnman::SetTryNewOutboundPeer(bool flag) {
m_try_another_outbound_peer = flag;
LogPrint(BCLog::NET, "net: setting try another outbound peer=%s\n",
flag ? "true" : "false");
}
// Return the number of peers we have over our outbound connection limit.
// Exclude peers that are marked for disconnect, or are going to be disconnected
// soon (eg ADDR_FETCH and FEELER).
// Also exclude peers that haven't finished initial connection handshake yet (so
// that we don't decide we're over our desired connection limit, and then evict
// some peer that has finished the handshake).
int CConnman::GetExtraFullOutboundCount() const {
int full_outbound_peers = 0;
{
LOCK(m_nodes_mutex);
for (const CNode *pnode : m_nodes) {
if (pnode->fSuccessfullyConnected && !pnode->fDisconnect &&
pnode->IsFullOutboundConn()) {
++full_outbound_peers;
}
}
}
return std::max(full_outbound_peers - m_max_outbound_full_relay -
m_max_avalanche_outbound,
0);
}
int CConnman::GetExtraBlockRelayCount() const {
int block_relay_peers = 0;
{
LOCK(m_nodes_mutex);
for (const CNode *pnode : m_nodes) {
if (pnode->fSuccessfullyConnected && !pnode->fDisconnect &&
pnode->IsBlockOnlyConn()) {
++block_relay_peers;
}
}
}
return std::max(block_relay_peers - m_max_outbound_block_relay, 0);
}
void CConnman::ThreadOpenConnections(
const std::vector<std::string> connect,
std::function<void(const CAddress &, ConnectionType)> mockOpenConnection) {
// Connect to specific addresses
if (!connect.empty()) {
for (int64_t nLoop = 0;; nLoop++) {
ProcessAddrFetch();
for (const std::string &strAddr : connect) {
CAddress addr(CService(), NODE_NONE);
OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(),
ConnectionType::MANUAL);
for (int i = 0; i < 10 && i < nLoop; i++) {
if (!interruptNet.sleep_for(
std::chrono::milliseconds(500))) {
return;
}
}
}
if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) {
return;
}
}
}
// Initiate network connections
auto start = GetTime<std::chrono::microseconds>();
// Minimum time before next feeler connection (in microseconds).
auto next_feeler = GetExponentialRand(start, FEELER_INTERVAL);
auto next_extra_block_relay =
GetExponentialRand(start, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
const bool dnsseed = gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED);
bool add_fixed_seeds = gArgs.GetBoolArg("-fixedseeds", DEFAULT_FIXEDSEEDS);
if (!add_fixed_seeds) {
LogPrintf("Fixed seeds are disabled\n");
}
while (!interruptNet) {
ProcessAddrFetch();
// No need to sleep the thread if we are mocking the network connection
if (!mockOpenConnection &&
!interruptNet.sleep_for(std::chrono::milliseconds(500))) {
return;
}
CSemaphoreGrant grant(*semOutbound);
if (interruptNet) {
return;
}
if (add_fixed_seeds && addrman.size() == 0) {
// When the node starts with an empty peers.dat, there are a few
// other sources of peers before we fallback on to fixed seeds:
// -dnsseed, -seednode, -addnode If none of those are available, we
// fallback on to fixed seeds immediately, else we allow 60 seconds
// for any of those sources to populate addrman.
bool add_fixed_seeds_now = false;
// It is cheapest to check if enough time has passed first.
if (GetTime<std::chrono::seconds>() >
start + std::chrono::minutes{1}) {
add_fixed_seeds_now = true;
LogPrintf("Adding fixed seeds as 60 seconds have passed and "
"addrman is empty\n");
}
// Checking !dnsseed is cheaper before locking 2 mutexes.
if (!add_fixed_seeds_now && !dnsseed) {
LOCK2(m_addr_fetches_mutex, m_added_nodes_mutex);
if (m_addr_fetches.empty() && m_added_nodes.empty()) {
add_fixed_seeds_now = true;
LogPrintf(
"Adding fixed seeds as -dnsseed=0, -addnode is not "
"provided and all -seednode(s) attempted\n");
}
}
if (add_fixed_seeds_now) {
CNetAddr local;
local.SetInternal("fixedseeds");
addrman.Add(convertSeed6(config->GetChainParams().FixedSeeds()),
local);
add_fixed_seeds = false;
}
}
//
// Choose an address to connect to based on most recently seen
//
CAddress addrConnect;
// Only connect out to one peer per network group (/16 for IPv4).
int nOutboundFullRelay = 0;
int nOutboundBlockRelay = 0;
int nOutboundAvalanche = 0;
std::set<std::vector<uint8_t>> setConnected;
{
LOCK(m_nodes_mutex);
for (const CNode *pnode : m_nodes) {
if (pnode->IsAvalancheOutboundConnection()) {
nOutboundAvalanche++;
} else if (pnode->IsFullOutboundConn()) {
nOutboundFullRelay++;
} else if (pnode->IsBlockOnlyConn()) {
nOutboundBlockRelay++;
}
// Netgroups for inbound and manual peers are not excluded
// because our goal here is to not use multiple of our
// limited outbound slots on a single netgroup but inbound
// and manual peers do not use our outbound slots. Inbound
// peers also have the added issue that they could be attacker
// controlled and could be used to prevent us from connecting
// to particular hosts if we used them here.
switch (pnode->m_conn_type) {
case ConnectionType::INBOUND:
case ConnectionType::MANUAL:
break;
case ConnectionType::AVALANCHE_OUTBOUND:
case ConnectionType::OUTBOUND_FULL_RELAY:
case ConnectionType::BLOCK_RELAY:
case ConnectionType::ADDR_FETCH:
case ConnectionType::FEELER:
setConnected.insert(
pnode->addr.GetGroup(addrman.GetAsmap()));
} // no default case, so the compiler can warn about missing
// cases
}
}
ConnectionType conn_type = ConnectionType::OUTBOUND_FULL_RELAY;
auto now = GetTime<std::chrono::microseconds>();
bool anchor = false;
bool fFeeler = false;
// Determine what type of connection to open. Opening
// BLOCK_RELAY connections to addresses from anchors.dat gets the
// highest priority. Then we open AVALANCHE_OUTBOUND connection until we
// hit our avalanche outbound peer limit, which is 0 if avalanche is not
// enabled. We fallback after 50 retries to OUTBOUND_FULL_RELAY if the
// peer is not avalanche capable until we meet our full-relay capacity.
// Then we open BLOCK_RELAY connection until we hit our block-relay-only
// peer limit.
// GetTryNewOutboundPeer() gets set when a stale tip is detected, so we
// try opening an additional OUTBOUND_FULL_RELAY connection. If none of
// these conditions are met, check to see if it's time to try an extra
// block-relay-only peer (to confirm our tip is current, see below) or
// the next_feeler timer to decide if we should open a FEELER.
if (!m_anchors.empty() &&
(nOutboundBlockRelay < m_max_outbound_block_relay)) {
conn_type = ConnectionType::BLOCK_RELAY;
anchor = true;
} else if (g_avalanche &&
(nOutboundAvalanche < m_max_avalanche_outbound)) {
conn_type = ConnectionType::AVALANCHE_OUTBOUND;
} else if (nOutboundFullRelay < m_max_outbound_full_relay) {
// OUTBOUND_FULL_RELAY
} else if (nOutboundBlockRelay < m_max_outbound_block_relay) {
conn_type = ConnectionType::BLOCK_RELAY;
} else if (GetTryNewOutboundPeer()) {
// OUTBOUND_FULL_RELAY
} else if (now > next_extra_block_relay &&
m_start_extra_block_relay_peers) {
// Periodically connect to a peer (using regular outbound selection
// methodology from addrman) and stay connected long enough to sync
// headers, but not much else.
//
// Then disconnect the peer, if we haven't learned anything new.
//
// The idea is to make eclipse attacks very difficult to pull off,
// because every few minutes we're finding a new peer to learn
// headers from.
//
// This is similar to the logic for trying extra outbound
// (full-relay) peers, except:
// - we do this all the time on an exponential timer, rather than
// just when our tip is stale
// - we potentially disconnect our next-youngest block-relay-only
// peer, if our newest block-relay-only peer delivers a block more
// recently.
// See the eviction logic in net_processing.cpp.
//
// Because we can promote these connections to block-relay-only
// connections, they do not get their own ConnectionType enum
// (similar to how we deal with extra outbound peers).
next_extra_block_relay =
GetExponentialRand(now, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
conn_type = ConnectionType::BLOCK_RELAY;
} else if (now > next_feeler) {
next_feeler = GetExponentialRand(now, FEELER_INTERVAL);
conn_type = ConnectionType::FEELER;
fFeeler = true;
} else {
// skip to next iteration of while loop
continue;
}
addrman.ResolveCollisions();
const auto nANow{AdjustedTime()};
int nTries = 0;
while (!interruptNet) {
if (anchor && !m_anchors.empty()) {
const CAddress addr = m_anchors.back();
m_anchors.pop_back();
if (!addr.IsValid() || IsLocal(addr) || !IsReachable(addr) ||
!HasAllDesirableServiceFlags(addr.nServices) ||
setConnected.count(addr.GetGroup(addrman.GetAsmap()))) {
continue;
}
addrConnect = addr;
LogPrint(BCLog::NET,
"Trying to make an anchor connection to %s\n",
addrConnect.ToString());
break;
}
// If we didn't find an appropriate destination after trying 100
// addresses fetched from addrman, stop this loop, and let the outer
// loop run again (which sleeps, adds seed nodes, recalculates
// already-connected network ranges, ...) before trying new addrman
// addresses.
nTries++;
if (nTries > 100) {
break;
}
CAddress addr;
NodeSeconds addr_last_try{0s};
if (fFeeler) {
// First, try to get a tried table collision address. This
// returns an empty (invalid) address if there are no collisions
// to try.
std::tie(addr, addr_last_try) = addrman.SelectTriedCollision();
if (!addr.IsValid()) {
// No tried table collisions. Select a new table address
// for our feeler.
std::tie(addr, addr_last_try) = addrman.Select(true);
} else if (AlreadyConnectedToAddress(addr)) {
// If test-before-evict logic would have us connect to a
// peer that we're already connected to, just mark that
// address as Good(). We won't be able to initiate the
// connection anyway, so this avoids inadvertently evicting
// a currently-connected peer.
addrman.Good(addr);
// Select a new table address for our feeler instead.
std::tie(addr, addr_last_try) = addrman.Select(true);
}
} else {
// Not a feeler
std::tie(addr, addr_last_try) = addrman.Select();
}
// Require outbound connections, other than feelers and avalanche,
// to be to distinct network groups
if (!fFeeler && conn_type != ConnectionType::AVALANCHE_OUTBOUND &&
setConnected.count(addr.GetGroup(addrman.GetAsmap()))) {
break;
}
// if we selected an invalid or local address, restart
if (!addr.IsValid() || IsLocal(addr)) {
break;
}
if (!IsReachable(addr)) {
continue;
}
// only consider very recently tried nodes after 30 failed attempts
if (nANow - addr_last_try < 10min && nTries < 30) {
continue;
}
// for non-feelers, require all the services we'll want,
// for feelers, only require they be a full node (only because most
// SPV clients don't have a good address DB available)
if (!fFeeler && !HasAllDesirableServiceFlags(addr.nServices)) {
continue;
}
if (fFeeler && !MayHaveUsefulAddressDB(addr.nServices)) {
continue;
}
// Do not connect to bad ports, unless 50 invalid addresses have
// been selected already.
if (nTries < 50 && (addr.IsIPv4() || addr.IsIPv6()) &&
IsBadPort(addr.GetPort())) {
continue;
}
// For avalanche peers, check they have the avalanche service bit
// set.
if (conn_type == ConnectionType::AVALANCHE_OUTBOUND &&
!(addr.nServices & NODE_AVALANCHE)) {
// If this peer is not suitable as an avalanche one and we tried
// over 20 addresses already, see if we can fallback to a non
// avalanche full outbound.
if (nTries < 20 ||
nOutboundFullRelay >= m_max_outbound_full_relay ||
setConnected.count(addr.GetGroup(addrman.GetAsmap()))) {
// Fallback is not desirable or possible, try another one
continue;
}
// Fallback is possible, update the connection type accordingly
conn_type = ConnectionType::OUTBOUND_FULL_RELAY;
}
addrConnect = addr;
break;
}
if (addrConnect.IsValid()) {
if (fFeeler) {
// Add small amount of random noise before connection to avoid
// synchronization.
int randsleep = GetRand<int>(FEELER_SLEEP_WINDOW * 1000);
if (!interruptNet.sleep_for(
std::chrono::milliseconds(randsleep))) {
return;
}
LogPrint(BCLog::NET, "Making feeler connection to %s\n",
addrConnect.ToString());
}
// This mock is for testing purpose only. It prevents the thread
// from attempting the connection which is useful for testing.
if (mockOpenConnection) {
mockOpenConnection(addrConnect, conn_type);
} else {
OpenNetworkConnection(addrConnect,
int(setConnected.size()) >=
std::min(nMaxConnections - 1, 2),
&grant, nullptr, conn_type);
}
}
}
}
std::vector<CAddress> CConnman::GetCurrentBlockRelayOnlyConns() const {
std::vector<CAddress> ret;
LOCK(m_nodes_mutex);
for (const CNode *pnode : m_nodes) {
if (pnode->IsBlockOnlyConn()) {
ret.push_back(pnode->addr);
}
}
return ret;
}
std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo() const {
std::vector<AddedNodeInfo> ret;
std::list<std::string> lAddresses(0);
{
LOCK(m_added_nodes_mutex);
ret.reserve(m_added_nodes.size());
std::copy(m_added_nodes.cbegin(), m_added_nodes.cend(),
std::back_inserter(lAddresses));
}
// Build a map of all already connected addresses (by IP:port and by name)
// to inbound/outbound and resolved CService
std::map<CService, bool> mapConnected;
std::map<std::string, std::pair<bool, CService>> mapConnectedByName;
{
LOCK(m_nodes_mutex);
for (const CNode *pnode : m_nodes) {
if (pnode->addr.IsValid()) {
mapConnected[pnode->addr] = pnode->IsInboundConn();
}
std::string addrName{pnode->m_addr_name};
if (!addrName.empty()) {
mapConnectedByName[std::move(addrName)] =
std::make_pair(pnode->IsInboundConn(),
static_cast<const CService &>(pnode->addr));
}
}
}
for (const std::string &strAddNode : lAddresses) {
CService service(
LookupNumeric(strAddNode, Params().GetDefaultPort(strAddNode)));
AddedNodeInfo addedNode{strAddNode, CService(), false, false};
if (service.IsValid()) {
// strAddNode is an IP:port
auto it = mapConnected.find(service);
if (it != mapConnected.end()) {
addedNode.resolvedAddress = service;
addedNode.fConnected = true;
addedNode.fInbound = it->second;
}
} else {
// strAddNode is a name
auto it = mapConnectedByName.find(strAddNode);
if (it != mapConnectedByName.end()) {
addedNode.resolvedAddress = it->second.second;
addedNode.fConnected = true;
addedNode.fInbound = it->second.first;
}
}
ret.emplace_back(std::move(addedNode));
}
return ret;
}
void CConnman::ThreadOpenAddedConnections() {
while (true) {
CSemaphoreGrant grant(*semAddnode);
std::vector<AddedNodeInfo> vInfo = GetAddedNodeInfo();
bool tried = false;
for (const AddedNodeInfo &info : vInfo) {
if (!info.fConnected) {
if (!grant.TryAcquire()) {
// If we've used up our semaphore and need a new one, let's
// not wait here since while we are waiting the
// addednodeinfo state might change.
break;
}
tried = true;
CAddress addr(CService(), NODE_NONE);
OpenNetworkConnection(addr, false, &grant,
info.strAddedNode.c_str(),
ConnectionType::MANUAL);
if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) {
return;
}
}
}
// Retry every 60 seconds if a connection was attempted, otherwise two
// seconds.
if (!interruptNet.sleep_for(std::chrono::seconds(tried ? 60 : 2))) {
return;
}
}
}
// If successful, this moves the passed grant to the constructed node.
void CConnman::OpenNetworkConnection(const CAddress &addrConnect,
bool fCountFailure,
CSemaphoreGrant *grantOutbound,
const char *pszDest,
ConnectionType conn_type) {
assert(conn_type != ConnectionType::INBOUND);
//
// Initiate outbound network connection
//
if (interruptNet) {
return;
}
if (!fNetworkActive) {
return;
}
if (!pszDest) {
bool banned_or_discouraged =
m_banman && (m_banman->IsDiscouraged(addrConnect) ||
m_banman->IsBanned(addrConnect));
if (IsLocal(addrConnect) || banned_or_discouraged ||
AlreadyConnectedToAddress(addrConnect)) {
return;
}
} else if (FindNode(std::string(pszDest))) {
return;
}
CNode *pnode = ConnectNode(addrConnect, pszDest, fCountFailure, conn_type);
if (!pnode) {
return;
}
if (grantOutbound) {
grantOutbound->MoveTo(pnode->grantOutbound);
}
for (auto interface : m_msgproc) {
interface->InitializeNode(*config, *pnode, nLocalServices);
}
{
LOCK(m_nodes_mutex);
m_nodes.push_back(pnode);
}
}
Mutex NetEventsInterface::g_msgproc_mutex;
void CConnman::ThreadMessageHandler() {
LOCK(NetEventsInterface::g_msgproc_mutex);
FastRandomContext rng;
while (!flagInterruptMsgProc) {
std::vector<CNode *> nodes_copy;
{
LOCK(m_nodes_mutex);
nodes_copy = m_nodes;
for (CNode *pnode : nodes_copy) {
pnode->AddRef();
}
}
bool fMoreWork = false;
// Randomize the order in which we process messages from/to our peers.
// This prevents attacks in which an attacker exploits having multiple
// consecutive connections in the m_nodes list.
Shuffle(nodes_copy.begin(), nodes_copy.end(), rng);
for (CNode *pnode : nodes_copy) {
if (pnode->fDisconnect) {
continue;
}
bool fMoreNodeWork = false;
// Receive messages
for (auto interface : m_msgproc) {
fMoreNodeWork |= interface->ProcessMessages(
*config, pnode, flagInterruptMsgProc);
}
fMoreWork |= (fMoreNodeWork && !pnode->fPauseSend);
if (flagInterruptMsgProc) {
return;
}
// Send messages
for (auto interface : m_msgproc) {
interface->SendMessages(*config, pnode);
}
if (flagInterruptMsgProc) {
return;
}
}
{
LOCK(m_nodes_mutex);
for (CNode *pnode : nodes_copy) {
pnode->Release();
}
}
WAIT_LOCK(mutexMsgProc, lock);
if (!fMoreWork) {
condMsgProc.wait_until(lock,
std::chrono::steady_clock::now() +
std::chrono::milliseconds(100),
[this]() EXCLUSIVE_LOCKS_REQUIRED(
mutexMsgProc) { return fMsgProcWake; });
}
fMsgProcWake = false;
}
}
void CConnman::ThreadI2PAcceptIncoming() {
static constexpr auto err_wait_begin = 1s;
static constexpr auto err_wait_cap = 5min;
auto err_wait = err_wait_begin;
bool advertising_listen_addr = false;
i2p::Connection conn;
while (!interruptNet) {
if (!m_i2p_sam_session->Listen(conn)) {
if (advertising_listen_addr && conn.me.IsValid()) {
RemoveLocal(conn.me);
advertising_listen_addr = false;
}
interruptNet.sleep_for(err_wait);
if (err_wait < err_wait_cap) {
err_wait *= 2;
}
continue;
}
if (!advertising_listen_addr) {
AddLocal(conn.me, LOCAL_MANUAL);
advertising_listen_addr = true;
}
if (!m_i2p_sam_session->Accept(conn)) {
continue;
}
CreateNodeFromAcceptedSocket(
conn.sock->Release(), NetPermissionFlags::None,
CAddress{conn.me, NODE_NONE}, CAddress{conn.peer, NODE_NONE});
}
}
bool CConnman::BindListenPort(const CService &addrBind, bilingual_str &strError,
NetPermissionFlags permissions) {
int nOne = 1;
// Create socket for listening for incoming connections
struct sockaddr_storage sockaddr;
socklen_t len = sizeof(sockaddr);
if (!addrBind.GetSockAddr((struct sockaddr *)&sockaddr, &len)) {
strError = strprintf(
Untranslated("Error: Bind address family for %s not supported"),
addrBind.ToString());
LogPrintf("%s\n", strError.original);
return false;
}
std::unique_ptr<Sock> sock = CreateSock(addrBind);
if (!sock) {
strError =
strprintf(Untranslated("Error: Couldn't open socket for incoming "
"connections (socket returned error %s)"),
NetworkErrorString(WSAGetLastError()));
LogPrintf("%s\n", strError.original);
return false;
}
// Allow binding if the port is still in TIME_WAIT state after
// the program was closed and restarted.
setsockopt(sock->Get(), SOL_SOCKET, SO_REUSEADDR, (sockopt_arg_type)&nOne,
sizeof(int));
// Some systems don't have IPV6_V6ONLY but are always v6only; others do have
// the option and enable it by default or not. Try to enable it, if
// possible.
if (addrBind.IsIPv6()) {
#ifdef IPV6_V6ONLY
setsockopt(sock->Get(), IPPROTO_IPV6, IPV6_V6ONLY,
(sockopt_arg_type)&nOne, sizeof(int));
#endif
#ifdef WIN32
int nProtLevel = PROTECTION_LEVEL_UNRESTRICTED;
setsockopt(sock->Get(), IPPROTO_IPV6, IPV6_PROTECTION_LEVEL,
(sockopt_arg_type)&nProtLevel, sizeof(int));
#endif
}
if (::bind(sock->Get(), (struct sockaddr *)&sockaddr, len) ==
SOCKET_ERROR) {
int nErr = WSAGetLastError();
if (nErr == WSAEADDRINUSE) {
strError = strprintf(_("Unable to bind to %s on this computer. %s "
"is probably already running."),
addrBind.ToString(), PACKAGE_NAME);
} else {
strError = strprintf(_("Unable to bind to %s on this computer "
"(bind returned error %s)"),
addrBind.ToString(), NetworkErrorString(nErr));
}
LogPrintf("%s\n", strError.original);
return false;
}
LogPrintf("Bound to %s\n", addrBind.ToString());
// Listen for incoming connections
if (listen(sock->Get(), SOMAXCONN) == SOCKET_ERROR) {
strError = strprintf(_("Error: Listening for incoming connections "
"failed (listen returned error %s)"),
NetworkErrorString(WSAGetLastError()));
LogPrintf("%s\n", strError.original);
return false;
}
vhListenSocket.push_back(ListenSocket(sock->Release(), permissions));
return true;
}
void Discover() {
if (!fDiscover) {
return;
}
#ifdef WIN32
// Get local host IP
char pszHostName[256] = "";
if (gethostname(pszHostName, sizeof(pszHostName)) != SOCKET_ERROR) {
std::vector<CNetAddr> vaddr;
if (LookupHost(pszHostName, vaddr, 0, true)) {
for (const CNetAddr &addr : vaddr) {
if (AddLocal(addr, LOCAL_IF)) {
LogPrintf("%s: %s - %s\n", __func__, pszHostName,
addr.ToString());
}
}
}
}
#elif (HAVE_DECL_GETIFADDRS && HAVE_DECL_FREEIFADDRS)
// Get local host ip
struct ifaddrs *myaddrs;
if (getifaddrs(&myaddrs) == 0) {
for (struct ifaddrs *ifa = myaddrs; ifa != nullptr;
ifa = ifa->ifa_next) {
if (ifa->ifa_addr == nullptr || (ifa->ifa_flags & IFF_UP) == 0 ||
strcmp(ifa->ifa_name, "lo") == 0 ||
strcmp(ifa->ifa_name, "lo0") == 0) {
continue;
}
if (ifa->ifa_addr->sa_family == AF_INET) {
struct sockaddr_in *s4 =
reinterpret_cast<struct sockaddr_in *>(ifa->ifa_addr);
CNetAddr addr(s4->sin_addr);
if (AddLocal(addr, LOCAL_IF)) {
LogPrintf("%s: IPv4 %s: %s\n", __func__, ifa->ifa_name,
addr.ToString());
}
} else if (ifa->ifa_addr->sa_family == AF_INET6) {
struct sockaddr_in6 *s6 =
reinterpret_cast<struct sockaddr_in6 *>(ifa->ifa_addr);
CNetAddr addr(s6->sin6_addr);
if (AddLocal(addr, LOCAL_IF)) {
LogPrintf("%s: IPv6 %s: %s\n", __func__, ifa->ifa_name,
addr.ToString());
}
}
}
freeifaddrs(myaddrs);
}
#endif
}
void CConnman::SetNetworkActive(bool active) {
LogPrintf("%s: %s\n", __func__, active);
if (fNetworkActive == active) {
return;
}
fNetworkActive = active;
if (m_client_interface) {
m_client_interface->NotifyNetworkActiveChanged(fNetworkActive);
}
}
CConnman::CConnman(const Config &configIn, uint64_t nSeed0In, uint64_t nSeed1In,
AddrMan &addrmanIn, bool network_active)
: config(&configIn), addrman(addrmanIn), nSeed0(nSeed0In),
nSeed1(nSeed1In) {
SetTryNewOutboundPeer(false);
Options connOptions;
Init(connOptions);
SetNetworkActive(network_active);
}
NodeId CConnman::GetNewNodeId() {
return nLastNodeId.fetch_add(1);
}
bool CConnman::Bind(const CService &addr, unsigned int flags,
NetPermissionFlags permissions) {
if (!(flags & BF_EXPLICIT) && !IsReachable(addr)) {
return false;
}
bilingual_str strError;
if (!BindListenPort(addr, strError, permissions)) {
if ((flags & BF_REPORT_ERROR) && m_client_interface) {
m_client_interface->ThreadSafeMessageBox(
strError, "", CClientUIInterface::MSG_ERROR);
}
return false;
}
if (addr.IsRoutable() && fDiscover && !(flags & BF_DONT_ADVERTISE) &&
!NetPermissions::HasFlag(permissions, NetPermissionFlags::NoBan)) {
AddLocal(addr, LOCAL_BIND);
}
return true;
}
bool CConnman::InitBinds(const Options &options) {
bool fBound = false;
for (const auto &addrBind : options.vBinds) {
fBound |= Bind(addrBind, (BF_EXPLICIT | BF_REPORT_ERROR),
NetPermissionFlags::None);
}
for (const auto &addrBind : options.vWhiteBinds) {
fBound |= Bind(addrBind.m_service, (BF_EXPLICIT | BF_REPORT_ERROR),
addrBind.m_flags);
}
for (const auto &addr_bind : options.onion_binds) {
fBound |= Bind(addr_bind, BF_EXPLICIT | BF_DONT_ADVERTISE,
NetPermissionFlags::None);
}
if (options.bind_on_any) {
struct in_addr inaddr_any;
inaddr_any.s_addr = htonl(INADDR_ANY);
struct in6_addr inaddr6_any = IN6ADDR_ANY_INIT;
fBound |= Bind(CService(inaddr6_any, GetListenPort()), BF_NONE,
NetPermissionFlags::None);
fBound |=
Bind(CService(inaddr_any, GetListenPort()),
!fBound ? BF_REPORT_ERROR : BF_NONE, NetPermissionFlags::None);
}
return fBound;
}
bool CConnman::Start(CScheduler &scheduler, const Options &connOptions) {
Init(connOptions);
if (fListen && !InitBinds(connOptions)) {
if (m_client_interface) {
m_client_interface->ThreadSafeMessageBox(
_("Failed to listen on any port. Use -listen=0 if you want "
"this."),
"", CClientUIInterface::MSG_ERROR);
}
return false;
}
proxyType i2p_sam;
if (GetProxy(NET_I2P, i2p_sam)) {
m_i2p_sam_session = std::make_unique<i2p::sam::Session>(
gArgs.GetDataDirNet() / "i2p_private_key", i2p_sam.proxy,
&interruptNet);
}
for (const auto &strDest : connOptions.vSeedNodes) {
AddAddrFetch(strDest);
}
if (m_use_addrman_outgoing) {
// Load addresses from anchors.dat
m_anchors =
ReadAnchors(config->GetChainParams(),
gArgs.GetDataDirNet() / ANCHORS_DATABASE_FILENAME);
if (m_anchors.size() > MAX_BLOCK_RELAY_ONLY_ANCHORS) {
m_anchors.resize(MAX_BLOCK_RELAY_ONLY_ANCHORS);
}
LogPrintf(
"%i block-relay-only anchors will be tried for connections.\n",
m_anchors.size());
}
if (m_client_interface) {
m_client_interface->InitMessage(
_("Starting network threads...").translated);
}
fAddressesInitialized = true;
if (semOutbound == nullptr) {
// initialize semaphore
semOutbound = std::make_unique<CSemaphore>(
std::min(m_max_outbound, nMaxConnections));
}
if (semAddnode == nullptr) {
// initialize semaphore
semAddnode = std::make_unique<CSemaphore>(nMaxAddnode);
}
//
// Start threads
//
assert(m_msgproc.size() > 0);
InterruptSocks5(false);
interruptNet.reset();
flagInterruptMsgProc = false;
{
LOCK(mutexMsgProc);
fMsgProcWake = false;
}
// Send and receive from sockets, accept connections
threadSocketHandler = std::thread(&util::TraceThread, "net",
[this] { ThreadSocketHandler(); });
if (!gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED)) {
LogPrintf("DNS seeding disabled\n");
} else {
threadDNSAddressSeed = std::thread(&util::TraceThread, "dnsseed",
[this] { ThreadDNSAddressSeed(); });
}
// Initiate manual connections
threadOpenAddedConnections = std::thread(
&util::TraceThread, "addcon", [this] { ThreadOpenAddedConnections(); });
if (connOptions.m_use_addrman_outgoing &&
!connOptions.m_specified_outgoing.empty()) {
if (m_client_interface) {
m_client_interface->ThreadSafeMessageBox(
_("Cannot provide specific connections and have addrman find "
"outgoing connections at the same."),
"", CClientUIInterface::MSG_ERROR);
}
return false;
}
if (connOptions.m_use_addrman_outgoing ||
!connOptions.m_specified_outgoing.empty()) {
threadOpenConnections =
std::thread(&util::TraceThread, "opencon",
[this, connect = connOptions.m_specified_outgoing] {
ThreadOpenConnections(connect, nullptr);
});
}
// Process messages
threadMessageHandler = std::thread(&util::TraceThread, "msghand",
[this] { ThreadMessageHandler(); });
if (connOptions.m_i2p_accept_incoming &&
m_i2p_sam_session.get() != nullptr) {
threadI2PAcceptIncoming =
std::thread(&util::TraceThread, "i2paccept",
[this] { ThreadI2PAcceptIncoming(); });
}
// Dump network addresses
scheduler.scheduleEvery(
[this]() {
this->DumpAddresses();
return true;
},
DUMP_PEERS_INTERVAL);
return true;
}
class CNetCleanup {
public:
CNetCleanup() {}
~CNetCleanup() {
#ifdef WIN32
// Shutdown Windows Sockets
WSACleanup();
#endif
}
};
static CNetCleanup instance_of_cnetcleanup;
void CConnman::Interrupt() {
{
LOCK(mutexMsgProc);
flagInterruptMsgProc = true;
}
condMsgProc.notify_all();
interruptNet();
InterruptSocks5(true);
if (semOutbound) {
for (int i = 0; i < m_max_outbound; i++) {
semOutbound->post();
}
}
if (semAddnode) {
for (int i = 0; i < nMaxAddnode; i++) {
semAddnode->post();
}
}
}
void CConnman::StopThreads() {
if (threadI2PAcceptIncoming.joinable()) {
threadI2PAcceptIncoming.join();
}
if (threadMessageHandler.joinable()) {
threadMessageHandler.join();
}
if (threadOpenConnections.joinable()) {
threadOpenConnections.join();
}
if (threadOpenAddedConnections.joinable()) {
threadOpenAddedConnections.join();
}
if (threadDNSAddressSeed.joinable()) {
threadDNSAddressSeed.join();
}
if (threadSocketHandler.joinable()) {
threadSocketHandler.join();
}
}
void CConnman::StopNodes() {
if (fAddressesInitialized) {
DumpAddresses();
fAddressesInitialized = false;
if (m_use_addrman_outgoing) {
// Anchor connections are only dumped during clean shutdown.
std::vector<CAddress> anchors_to_dump =
GetCurrentBlockRelayOnlyConns();
if (anchors_to_dump.size() > MAX_BLOCK_RELAY_ONLY_ANCHORS) {
anchors_to_dump.resize(MAX_BLOCK_RELAY_ONLY_ANCHORS);
}
DumpAnchors(config->GetChainParams(),
gArgs.GetDataDirNet() / ANCHORS_DATABASE_FILENAME,
anchors_to_dump);
}
}
// Delete peer connections.
std::vector<CNode *> nodes;
WITH_LOCK(m_nodes_mutex, nodes.swap(m_nodes));
for (CNode *pnode : nodes) {
pnode->CloseSocketDisconnect();
DeleteNode(pnode);
}
// Close listening sockets.
for (ListenSocket &hListenSocket : vhListenSocket) {
if (hListenSocket.socket != INVALID_SOCKET) {
if (!CloseSocket(hListenSocket.socket)) {
LogPrintf("CloseSocket(hListenSocket) failed with error %s\n",
NetworkErrorString(WSAGetLastError()));
}
}
}
for (CNode *pnode : m_nodes_disconnected) {
DeleteNode(pnode);
}
m_nodes_disconnected.clear();
vhListenSocket.clear();
semOutbound.reset();
semAddnode.reset();
}
void CConnman::DeleteNode(CNode *pnode) {
assert(pnode);
for (auto interface : m_msgproc) {
interface->FinalizeNode(*config, *pnode);
}
delete pnode;
}
CConnman::~CConnman() {
Interrupt();
Stop();
}
std::vector<CAddress>
CConnman::GetAddresses(size_t max_addresses, size_t max_pct,
std::optional<Network> network) const {
std::vector<CAddress> addresses =
addrman.GetAddr(max_addresses, max_pct, network);
if (m_banman) {
addresses.erase(std::remove_if(addresses.begin(), addresses.end(),
[this](const CAddress &addr) {
return m_banman->IsDiscouraged(
addr) ||
m_banman->IsBanned(addr);
}),
addresses.end());
}
return addresses;
}
std::vector<CAddress>
CConnman::GetAddresses(CNode &requestor, size_t max_addresses, size_t max_pct) {
auto local_socket_bytes = requestor.addrBind.GetAddrBytes();
uint64_t cache_id =
GetDeterministicRandomizer(RANDOMIZER_ID_ADDRCACHE)
.Write(requestor.addr.GetNetwork())
.Write(local_socket_bytes.data(), local_socket_bytes.size())
.Finalize();
const auto current_time = GetTime<std::chrono::microseconds>();
auto r = m_addr_response_caches.emplace(cache_id, CachedAddrResponse{});
CachedAddrResponse &cache_entry = r.first->second;
// New CachedAddrResponse have expiration 0.
if (cache_entry.m_cache_entry_expiration < current_time) {
cache_entry.m_addrs_response_cache =
GetAddresses(max_addresses, max_pct, /* network */ std::nullopt);
// Choosing a proper cache lifetime is a trade-off between the privacy
// leak minimization and the usefulness of ADDR responses to honest
// users.
//
// Longer cache lifetime makes it more difficult for an attacker to
// scrape enough AddrMan data to maliciously infer something useful. By
// the time an attacker scraped enough AddrMan records, most of the
// records should be old enough to not leak topology info by e.g.
// analyzing real-time changes in timestamps.
//
// It takes only several hundred requests to scrape everything from an
// AddrMan containing 100,000 nodes, so ~24 hours of cache lifetime
// indeed makes the data less inferable by the time most of it could be
// scraped (considering that timestamps are updated via ADDR
// self-announcements and when nodes communicate). We also should be
// robust to those attacks which may not require scraping *full*
// victim's AddrMan (because even several timestamps of the same handful
// of nodes may leak privacy).
//
// On the other hand, longer cache lifetime makes ADDR responses
// outdated and less useful for an honest requestor, e.g. if most nodes
// in the ADDR response are no longer active.
//
// However, the churn in the network is known to be rather low. Since we
// consider nodes to be "terrible" (see IsTerrible()) if the timestamps
// are older than 30 days, max. 24 hours of "penalty" due to cache
// shouldn't make any meaningful difference in terms of the freshness of
// the response.
cache_entry.m_cache_entry_expiration =
current_time + std::chrono::hours(21) +
GetRandMillis(std::chrono::hours(6));
}
return cache_entry.m_addrs_response_cache;
}
bool CConnman::AddNode(const std::string &strNode) {
LOCK(m_added_nodes_mutex);
for (const std::string &it : m_added_nodes) {
if (strNode == it) {
return false;
}
}
m_added_nodes.push_back(strNode);
return true;
}
bool CConnman::RemoveAddedNode(const std::string &strNode) {
LOCK(m_added_nodes_mutex);
for (std::vector<std::string>::iterator it = m_added_nodes.begin();
it != m_added_nodes.end(); ++it) {
if (strNode == *it) {
m_added_nodes.erase(it);
return true;
}
}
return false;
}
size_t CConnman::GetNodeCount(NumConnections flags) const {
LOCK(m_nodes_mutex);
// Shortcut if we want total
if (flags == CConnman::CONNECTIONS_ALL) {
return m_nodes.size();
}
int nNum = 0;
for (const auto &pnode : m_nodes) {
if (flags &
(pnode->IsInboundConn() ? CONNECTIONS_IN : CONNECTIONS_OUT)) {
nNum++;
}
}
return nNum;
}
void CConnman::GetNodeStats(std::vector<CNodeStats> &vstats) const {
vstats.clear();
LOCK(m_nodes_mutex);
vstats.reserve(m_nodes.size());
for (CNode *pnode : m_nodes) {
vstats.emplace_back();
pnode->copyStats(vstats.back());
vstats.back().m_mapped_as = pnode->addr.GetMappedAS(addrman.GetAsmap());
}
}
bool CConnman::DisconnectNode(const std::string &strNode) {
LOCK(m_nodes_mutex);
if (CNode *pnode = FindNode(strNode)) {
LogPrint(BCLog::NET,
"disconnect by address%s matched peer=%d; disconnecting\n",
(fLogIPs ? strprintf("=%s", strNode) : ""), pnode->GetId());
pnode->fDisconnect = true;
return true;
}
return false;
}
bool CConnman::DisconnectNode(const CSubNet &subnet) {
bool disconnected = false;
LOCK(m_nodes_mutex);
for (CNode *pnode : m_nodes) {
if (subnet.Match(pnode->addr)) {
LogPrint(BCLog::NET,
"disconnect by subnet%s matched peer=%d; disconnecting\n",
(fLogIPs ? strprintf("=%s", subnet.ToString()) : ""),
pnode->GetId());
pnode->fDisconnect = true;
disconnected = true;
}
}
return disconnected;
}
bool CConnman::DisconnectNode(const CNetAddr &addr) {
return DisconnectNode(CSubNet(addr));
}
bool CConnman::DisconnectNode(NodeId id) {
LOCK(m_nodes_mutex);
for (CNode *pnode : m_nodes) {
if (id == pnode->GetId()) {
LogPrint(BCLog::NET, "disconnect by id peer=%d; disconnecting\n",
pnode->GetId());
pnode->fDisconnect = true;
return true;
}
}
return false;
}
void CConnman::RecordBytesRecv(uint64_t bytes) {
nTotalBytesRecv += bytes;
}
void CConnman::RecordBytesSent(uint64_t bytes) {
LOCK(cs_totalBytesSent);
nTotalBytesSent += bytes;
const auto now = GetTime<std::chrono::seconds>();
if (nMaxOutboundCycleStartTime + MAX_UPLOAD_TIMEFRAME < now) {
// timeframe expired, reset cycle
nMaxOutboundCycleStartTime = now;
nMaxOutboundTotalBytesSentInCycle = 0;
}
// TODO, exclude peers with download permission
nMaxOutboundTotalBytesSentInCycle += bytes;
}
uint64_t CConnman::GetMaxOutboundTarget() const {
LOCK(cs_totalBytesSent);
return nMaxOutboundLimit;
}
std::chrono::seconds CConnman::GetMaxOutboundTimeframe() const {
return MAX_UPLOAD_TIMEFRAME;
}
std::chrono::seconds CConnman::GetMaxOutboundTimeLeftInCycle() const {
LOCK(cs_totalBytesSent);
if (nMaxOutboundLimit == 0) {
return 0s;
}
if (nMaxOutboundCycleStartTime.count() == 0) {
return MAX_UPLOAD_TIMEFRAME;
}
const std::chrono::seconds cycleEndTime =
nMaxOutboundCycleStartTime + MAX_UPLOAD_TIMEFRAME;
const auto now = GetTime<std::chrono::seconds>();
return (cycleEndTime < now) ? 0s : cycleEndTime - now;
}
bool CConnman::OutboundTargetReached(bool historicalBlockServingLimit) const {
LOCK(cs_totalBytesSent);
if (nMaxOutboundLimit == 0) {
return false;
}
if (historicalBlockServingLimit) {
// keep a large enough buffer to at least relay each block once.
const std::chrono::seconds timeLeftInCycle =
GetMaxOutboundTimeLeftInCycle();
const uint64_t buffer =
timeLeftInCycle / std::chrono::minutes{10} * ONE_MEGABYTE;
if (buffer >= nMaxOutboundLimit ||
nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit - buffer) {
return true;
}
} else if (nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit) {
return true;
}
return false;
}
uint64_t CConnman::GetOutboundTargetBytesLeft() const {
LOCK(cs_totalBytesSent);
if (nMaxOutboundLimit == 0) {
return 0;
}
return (nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit)
? 0
: nMaxOutboundLimit - nMaxOutboundTotalBytesSentInCycle;
}
uint64_t CConnman::GetTotalBytesRecv() const {
return nTotalBytesRecv;
}
uint64_t CConnman::GetTotalBytesSent() const {
LOCK(cs_totalBytesSent);
return nTotalBytesSent;
}
ServiceFlags CConnman::GetLocalServices() const {
return nLocalServices;
}
unsigned int CConnman::GetReceiveFloodSize() const {
return nReceiveFloodSize;
}
void CNode::invsPolled(uint32_t count) {
invCounters += count;
}
void CNode::invsVoted(uint32_t count) {
invCounters += uint64_t(count) << 32;
}
void CNode::updateAvailabilityScore(double decayFactor) {
if (!m_avalanche_enabled) {
return;
}
uint64_t windowInvCounters = invCounters.exchange(0);
double previousScore = availabilityScore;
int64_t polls = windowInvCounters & std::numeric_limits<uint32_t>::max();
int64_t votes = windowInvCounters >> 32;
availabilityScore =
decayFactor * (2 * votes - polls) + (1. - decayFactor) * previousScore;
}
double CNode::getAvailabilityScore() const {
// The score is set atomically so there is no need to lock the statistics
// when reading.
return availabilityScore;
}
CNode::CNode(NodeId idIn, SOCKET hSocketIn, const CAddress &addrIn,
uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn,
uint64_t nLocalExtraEntropyIn, const CAddress &addrBindIn,
const std::string &addrNameIn, ConnectionType conn_type_in,
bool inbound_onion)
: m_connected(GetTime<std::chrono::seconds>()), addr(addrIn),
addrBind(addrBindIn), m_addr_name{addrNameIn.empty()
? addr.ToStringIPPort()
: addrNameIn},
m_inbound_onion(inbound_onion), nKeyedNetGroup(nKeyedNetGroupIn),
// Don't relay addr messages to peers that we connect to as
// block-relay-only peers (to prevent adversaries from inferring these
// links from addr traffic).
id(idIn), nLocalHostNonce(nLocalHostNonceIn),
nLocalExtraEntropy(nLocalExtraEntropyIn), m_conn_type(conn_type_in) {
if (inbound_onion) {
assert(conn_type_in == ConnectionType::INBOUND);
}
hSocket = hSocketIn;
for (const std::string &msg : getAllNetMessageTypes()) {
mapRecvBytesPerMsgCmd[msg] = 0;
}
mapRecvBytesPerMsgCmd[NET_MESSAGE_COMMAND_OTHER] = 0;
if (fLogIPs) {
LogPrint(BCLog::NET, "Added connection to %s peer=%d\n", m_addr_name,
id);
} else {
LogPrint(BCLog::NET, "Added connection peer=%d\n", id);
}
m_deserializer = std::make_unique<V1TransportDeserializer>(
V1TransportDeserializer(GetConfig().GetChainParams().NetMagic(),
SER_NETWORK, INIT_PROTO_VERSION));
m_serializer =
std::make_unique<V1TransportSerializer>(V1TransportSerializer());
}
CNode::~CNode() {
CloseSocket(hSocket);
}
bool CConnman::NodeFullyConnected(const CNode *pnode) {
return pnode && pnode->fSuccessfullyConnected && !pnode->fDisconnect;
}
void CConnman::PushMessage(CNode *pnode, CSerializedNetMsg &&msg) {
size_t nMessageSize = msg.data.size();
LogPrint(BCLog::NETDEBUG, "sending %s (%d bytes) peer=%d\n", msg.m_type,
nMessageSize, pnode->GetId());
if (gArgs.GetBoolArg("-capturemessages", false)) {
CaptureMessage(pnode->addr, msg.m_type, msg.data,
/*is_incoming=*/false);
}
TRACE6(net, outbound_message, pnode->GetId(), pnode->m_addr_name.c_str(),
pnode->ConnectionTypeAsString().c_str(), msg.m_type.c_str(),
msg.data.size(), msg.data.data());
// make sure we use the appropriate network transport format
std::vector<uint8_t> serializedHeader;
pnode->m_serializer->prepareForTransport(*config, msg, serializedHeader);
size_t nTotalSize = nMessageSize + serializedHeader.size();
size_t nBytesSent = 0;
{
LOCK(pnode->cs_vSend);
bool optimisticSend(pnode->vSendMsg.empty());
// log total amount of bytes per message type
pnode->mapSendBytesPerMsgCmd[msg.m_type] += nTotalSize;
pnode->nSendSize += nTotalSize;
if (pnode->nSendSize > nSendBufferMaxSize) {
pnode->fPauseSend = true;
}
pnode->vSendMsg.push_back(std::move(serializedHeader));
if (nMessageSize) {
pnode->vSendMsg.push_back(std::move(msg.data));
}
// If write queue empty, attempt "optimistic write"
if (optimisticSend == true) {
nBytesSent = SocketSendData(*pnode);
}
}
if (nBytesSent) {
RecordBytesSent(nBytesSent);
}
}
bool CConnman::ForNode(NodeId id, std::function<bool(CNode *pnode)> func) {
CNode *found = nullptr;
LOCK(m_nodes_mutex);
for (auto &&pnode : m_nodes) {
if (pnode->GetId() == id) {
found = pnode;
break;
}
}
return found != nullptr && NodeFullyConnected(found) && func(found);
}
CSipHasher CConnman::GetDeterministicRandomizer(uint64_t id) const {
return CSipHasher(nSeed0, nSeed1).Write(id);
}
uint64_t CConnman::CalculateKeyedNetGroup(const CAddress &ad) const {
std::vector<uint8_t> vchNetGroup(ad.GetGroup(addrman.GetAsmap()));
return GetDeterministicRandomizer(RANDOMIZER_ID_NETGROUP)
.Write(vchNetGroup.data(), vchNetGroup.size())
.Finalize();
}
/**
* This function convert MaxBlockSize from byte to
* MB with a decimal precision one digit rounded down
* E.g.
* 1660000 -> 1.6
* 2010000 -> 2.0
* 1000000 -> 1.0
* 230000 -> 0.2
* 50000 -> 0.0
*
* NB behavior for EB<1MB not standardized yet still
* the function applies the same algo used for
* EB greater or equal to 1MB
*/
std::string getSubVersionEB(uint64_t MaxBlockSize) {
// Prepare EB string we are going to add to SubVer:
// 1) translate from byte to MB and convert to string
// 2) limit the EB string to the first decimal digit (floored)
std::stringstream ebMBs;
ebMBs << (MaxBlockSize / (ONE_MEGABYTE / 10));
std::string eb = ebMBs.str();
eb.insert(eb.size() - 1, ".", 1);
if (eb.substr(0, 1) == ".") {
eb = "0" + eb;
}
return eb;
}
std::string userAgent(const Config &config) {
// format excessive blocksize value
std::string eb = getSubVersionEB(config.GetMaxBlockSize());
std::vector<std::string> uacomments;
uacomments.push_back("EB" + eb);
// Comments are checked for char compliance at startup, it is safe to add
// them to the user agent string
for (const std::string &cmt : gArgs.GetArgs("-uacomment")) {
uacomments.push_back(cmt);
}
const std::string client_name = gArgs.GetArg("-uaclientname", CLIENT_NAME);
const std::string client_version =
gArgs.GetArg("-uaclientversion", FormatVersion(CLIENT_VERSION));
// Size compliance is checked at startup, it is safe to not check it again
return FormatUserAgent(client_name, client_version, uacomments);
}
void CaptureMessageToFile(const CAddress &addr, const std::string &msg_type,
Span<const uint8_t> data, bool is_incoming) {
// Note: This function captures the message at the time of processing,
// not at socket receive/send time.
// This ensures that the messages are always in order from an application
// layer (processing) perspective.
auto now = GetTime<std::chrono::microseconds>();
// Windows folder names can not include a colon
std::string clean_addr = addr.ToString();
std::replace(clean_addr.begin(), clean_addr.end(), ':', '_');
fs::path base_path = gArgs.GetDataDirNet() / "message_capture" / clean_addr;
fs::create_directories(base_path);
fs::path path =
base_path / (is_incoming ? "msgs_recv.dat" : "msgs_sent.dat");
AutoFile f{fsbridge::fopen(path, "ab")};
ser_writedata64(f, now.count());
- f.write(msg_type.data(), msg_type.length());
+ f.write(MakeByteSpan(msg_type));
for (auto i = msg_type.length(); i < CMessageHeader::COMMAND_SIZE; ++i) {
f << uint8_t{'\0'};
}
uint32_t size = data.size();
ser_writedata32(f, size);
- f.write((const char *)data.data(), data.size());
+ f.write(AsBytes(data));
}
std::function<void(const CAddress &addr, const std::string &msg_type,
Span<const uint8_t> data, bool is_incoming)>
CaptureMessage = CaptureMessageToFile;
diff --git a/src/psbt.cpp b/src/psbt.cpp
index b537ed4dd..b54faf78c 100644
--- a/src/psbt.cpp
+++ b/src/psbt.cpp
@@ -1,321 +1,321 @@
// Copyright (c) 2009-2018 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <psbt.h>
#include <util/check.h>
#include <util/strencodings.h>
PartiallySignedTransaction::PartiallySignedTransaction(
const CMutableTransaction &txIn)
: tx(txIn) {
inputs.resize(txIn.vin.size());
outputs.resize(txIn.vout.size());
}
bool PartiallySignedTransaction::IsNull() const {
return !tx && inputs.empty() && outputs.empty() && unknown.empty();
}
bool PartiallySignedTransaction::Merge(const PartiallySignedTransaction &psbt) {
// Prohibited to merge two PSBTs over different transactions
if (tx->GetId() != psbt.tx->GetId()) {
return false;
}
for (size_t i = 0; i < inputs.size(); ++i) {
inputs[i].Merge(psbt.inputs[i]);
}
for (size_t i = 0; i < outputs.size(); ++i) {
outputs[i].Merge(psbt.outputs[i]);
}
unknown.insert(psbt.unknown.begin(), psbt.unknown.end());
return true;
}
bool PartiallySignedTransaction::AddInput(const CTxIn &txin,
PSBTInput &psbtin) {
if (std::find(tx->vin.begin(), tx->vin.end(), txin) != tx->vin.end()) {
return false;
}
tx->vin.push_back(txin);
psbtin.partial_sigs.clear();
psbtin.final_script_sig.clear();
inputs.push_back(psbtin);
return true;
}
bool PartiallySignedTransaction::AddOutput(const CTxOut &txout,
const PSBTOutput &psbtout) {
tx->vout.push_back(txout);
outputs.push_back(psbtout);
return true;
}
bool PartiallySignedTransaction::GetInputUTXO(CTxOut &utxo,
int input_index) const {
PSBTInput input = inputs[input_index];
if (!input.utxo.IsNull()) {
utxo = input.utxo;
} else {
return false;
}
return true;
}
bool PSBTInput::IsNull() const {
return utxo.IsNull() && partial_sigs.empty() && unknown.empty() &&
hd_keypaths.empty() && redeem_script.empty();
}
void PSBTInput::FillSignatureData(SignatureData &sigdata) const {
if (!final_script_sig.empty()) {
sigdata.scriptSig = final_script_sig;
sigdata.complete = true;
}
if (sigdata.complete) {
return;
}
sigdata.signatures.insert(partial_sigs.begin(), partial_sigs.end());
if (!redeem_script.empty()) {
sigdata.redeem_script = redeem_script;
}
for (const auto &key_pair : hd_keypaths) {
sigdata.misc_pubkeys.emplace(key_pair.first.GetID(), key_pair);
}
}
void PSBTInput::FromSignatureData(const SignatureData &sigdata) {
if (sigdata.complete) {
partial_sigs.clear();
hd_keypaths.clear();
redeem_script.clear();
if (!sigdata.scriptSig.empty()) {
final_script_sig = sigdata.scriptSig;
}
return;
}
partial_sigs.insert(sigdata.signatures.begin(), sigdata.signatures.end());
if (redeem_script.empty() && !sigdata.redeem_script.empty()) {
redeem_script = sigdata.redeem_script;
}
for (const auto &entry : sigdata.misc_pubkeys) {
hd_keypaths.emplace(entry.second);
}
}
void PSBTInput::Merge(const PSBTInput &input) {
if (utxo.IsNull() && !input.utxo.IsNull()) {
utxo = input.utxo;
}
partial_sigs.insert(input.partial_sigs.begin(), input.partial_sigs.end());
hd_keypaths.insert(input.hd_keypaths.begin(), input.hd_keypaths.end());
unknown.insert(input.unknown.begin(), input.unknown.end());
if (redeem_script.empty() && !input.redeem_script.empty()) {
redeem_script = input.redeem_script;
}
if (final_script_sig.empty() && !input.final_script_sig.empty()) {
final_script_sig = input.final_script_sig;
}
}
void PSBTOutput::FillSignatureData(SignatureData &sigdata) const {
if (!redeem_script.empty()) {
sigdata.redeem_script = redeem_script;
}
for (const auto &key_pair : hd_keypaths) {
sigdata.misc_pubkeys.emplace(key_pair.first.GetID(), key_pair);
}
}
void PSBTOutput::FromSignatureData(const SignatureData &sigdata) {
if (redeem_script.empty() && !sigdata.redeem_script.empty()) {
redeem_script = sigdata.redeem_script;
}
for (const auto &entry : sigdata.misc_pubkeys) {
hd_keypaths.emplace(entry.second);
}
}
bool PSBTOutput::IsNull() const {
return redeem_script.empty() && hd_keypaths.empty() && unknown.empty();
}
void PSBTOutput::Merge(const PSBTOutput &output) {
hd_keypaths.insert(output.hd_keypaths.begin(), output.hd_keypaths.end());
unknown.insert(output.unknown.begin(), output.unknown.end());
if (redeem_script.empty() && !output.redeem_script.empty()) {
redeem_script = output.redeem_script;
}
}
bool PSBTInputSigned(const PSBTInput &input) {
return !input.final_script_sig.empty();
}
void UpdatePSBTOutput(const SigningProvider &provider,
PartiallySignedTransaction &psbt, int index) {
CMutableTransaction &tx = *Assert(psbt.tx);
const CTxOut &out = tx.vout.at(index);
PSBTOutput &psbt_out = psbt.outputs.at(index);
// Fill a SignatureData with output info
SignatureData sigdata;
psbt_out.FillSignatureData(sigdata);
// Construct a would-be spend of this output, to update sigdata with.
// Note that ProduceSignature is used to fill in metadata (not actual
// signatures), so provider does not need to provide any private keys (it
// can be a HidingSigningProvider).
MutableTransactionSignatureCreator creator(&tx, /* index */ 0, out.nValue,
SigHashType().withForkId());
ProduceSignature(provider, creator, out.scriptPubKey, sigdata);
// Put redeem_script and key paths, into PSBTOutput.
psbt_out.FromSignatureData(sigdata);
}
bool SignPSBTInput(const SigningProvider &provider,
PartiallySignedTransaction &psbt, int index,
SigHashType sighash, SignatureData *out_sigdata,
bool use_dummy) {
PSBTInput &input = psbt.inputs.at(index);
const CMutableTransaction &tx = *psbt.tx;
if (PSBTInputSigned(input)) {
return true;
}
// Fill SignatureData with input info
SignatureData sigdata;
input.FillSignatureData(sigdata);
// Get UTXO
CTxOut utxo;
if (input.utxo.IsNull()) {
return false;
}
utxo = input.utxo;
bool sig_complete{false};
if (use_dummy) {
sig_complete = ProduceSignature(provider, DUMMY_SIGNATURE_CREATOR,
utxo.scriptPubKey, sigdata);
} else {
MutableTransactionSignatureCreator creator(&tx, index, utxo.nValue,
sighash);
sig_complete =
ProduceSignature(provider, creator, utxo.scriptPubKey, sigdata);
}
input.FromSignatureData(sigdata);
// Fill in the missing info
if (out_sigdata != nullptr) {
out_sigdata->missing_pubkeys = sigdata.missing_pubkeys;
out_sigdata->missing_sigs = sigdata.missing_sigs;
out_sigdata->missing_redeem_script = sigdata.missing_redeem_script;
}
return sig_complete;
}
bool FinalizePSBT(PartiallySignedTransaction &psbtx) {
// Finalize input signatures -- in case we have partial signatures that add
// up to a complete
// signature, but have not combined them yet (e.g. because the combiner
// that created this PartiallySignedTransaction did not understand them),
// this will combine them into a final script.
bool complete = true;
for (size_t i = 0; i < psbtx.tx->vin.size(); ++i) {
complete &=
SignPSBTInput(DUMMY_SIGNING_PROVIDER, psbtx, i, SigHashType());
}
return complete;
}
bool FinalizeAndExtractPSBT(PartiallySignedTransaction &psbtx,
CMutableTransaction &result) {
// It's not safe to extract a PSBT that isn't finalized, and there's no easy
// way to check
// whether a PSBT is finalized without finalizing it, so we just do this.
if (!FinalizePSBT(psbtx)) {
return false;
}
result = *psbtx.tx;
for (size_t i = 0; i < result.vin.size(); ++i) {
result.vin[i].scriptSig = psbtx.inputs[i].final_script_sig;
}
return true;
}
TransactionError
CombinePSBTs(PartiallySignedTransaction &out,
const std::vector<PartiallySignedTransaction> &psbtxs) {
// Copy the first one
out = psbtxs[0];
// Merge
for (auto it = std::next(psbtxs.begin()); it != psbtxs.end(); ++it) {
if (!out.Merge(*it)) {
return TransactionError::PSBT_MISMATCH;
}
}
return TransactionError::OK;
}
std::string PSBTRoleName(const PSBTRole role) {
switch (role) {
case PSBTRole::CREATOR:
return "creator";
case PSBTRole::UPDATER:
return "updater";
case PSBTRole::SIGNER:
return "signer";
case PSBTRole::FINALIZER:
return "finalizer";
case PSBTRole::EXTRACTOR:
return "extractor";
// no default case, so the compiler can warn about missing cases
}
assert(false);
}
bool DecodeBase64PSBT(PartiallySignedTransaction &psbt,
const std::string &base64_tx, std::string &error) {
bool invalid;
std::string tx_data = DecodeBase64(base64_tx, &invalid);
if (invalid) {
error = "invalid base64";
return false;
}
return DecodeRawPSBT(psbt, tx_data, error);
}
bool DecodeRawPSBT(PartiallySignedTransaction &psbt, const std::string &tx_data,
std::string &error) {
- CDataStream ss_data(MakeUCharSpan(tx_data), SER_NETWORK, PROTOCOL_VERSION);
+ CDataStream ss_data(MakeByteSpan(tx_data), SER_NETWORK, PROTOCOL_VERSION);
try {
ss_data >> psbt;
if (!ss_data.empty()) {
error = "extra data after PSBT";
return false;
}
} catch (const std::exception &e) {
error = e.what();
return false;
}
return true;
}
diff --git a/src/pubkey.h b/src/pubkey.h
index c90b7f795..3ddc3f65d 100644
--- a/src/pubkey.h
+++ b/src/pubkey.h
@@ -1,231 +1,231 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Copyright (c) 2017 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_PUBKEY_H
#define BITCOIN_PUBKEY_H
#include <hash.h>
#include <serialize.h>
#include <uint256.h>
#include <boost/range/adaptor/sliced.hpp>
#include <stdexcept>
#include <vector>
const unsigned int BIP32_EXTKEY_SIZE = 74;
/** A reference to a CKey: the Hash160 of its serialized public key */
class CKeyID : public uint160 {
public:
CKeyID() : uint160() {}
explicit CKeyID(const uint160 &in) : uint160(in) {}
};
using ChainCode = uint256;
/** An encapsulated public key. */
class CPubKey {
public:
/**
* secp256k1:
*/
static constexpr unsigned int SIZE = 65;
static constexpr unsigned int COMPRESSED_SIZE = 33;
static constexpr unsigned int SCHNORR_SIZE = 64;
static constexpr unsigned int SIGNATURE_SIZE = 72;
static constexpr unsigned int COMPACT_SIGNATURE_SIZE = 65;
/**
* see www.keylength.com
* script supports up to 75 for single byte push
*/
static_assert(SIZE >= COMPRESSED_SIZE,
"COMPRESSED_SIZE is larger than SIZE");
private:
/**
* Just store the serialized data.
* Its length can very cheaply be computed from the first byte.
*/
uint8_t vch[SIZE];
//! Compute the length of a pubkey with a given first byte.
static unsigned int GetLen(uint8_t chHeader) {
if (chHeader == 2 || chHeader == 3) {
return COMPRESSED_SIZE;
}
if (chHeader == 4 || chHeader == 6 || chHeader == 7) {
return SIZE;
}
return 0;
}
//! Set this key data to be invalid
void Invalidate() { vch[0] = 0xFF; }
public:
bool static ValidSize(const std::vector<uint8_t> &vch) {
return vch.size() > 0 && GetLen(vch[0]) == vch.size();
}
//! Construct an invalid public key.
CPubKey() { Invalidate(); }
//! Initialize a public key using begin/end iterators to byte data.
template <typename T> void Set(const T pbegin, const T pend) {
int len = pend == pbegin ? 0 : GetLen(pbegin[0]);
if (len && len == (pend - pbegin)) {
memcpy(vch, (uint8_t *)&pbegin[0], len);
} else {
Invalidate();
}
}
//! Construct a public key using begin/end iterators to byte data.
template <typename T> CPubKey(const T pbegin, const T pend) {
Set(pbegin, pend);
}
//! Construct a public key from a byte vector.
explicit CPubKey(Span<const uint8_t> _vch) {
Set(_vch.begin(), _vch.end());
}
//! Simple read-only vector-like interface to the pubkey data.
unsigned int size() const { return GetLen(vch[0]); }
const uint8_t *data() const { return vch; }
const uint8_t *begin() const { return vch; }
const uint8_t *end() const { return vch + size(); }
const uint8_t &operator[](unsigned int pos) const { return vch[pos]; }
//! Comparator implementation.
friend bool operator==(const CPubKey &a, const CPubKey &b) {
return a.vch[0] == b.vch[0] && memcmp(a.vch, b.vch, a.size()) == 0;
}
friend bool operator!=(const CPubKey &a, const CPubKey &b) {
return !(a == b);
}
friend bool operator<(const CPubKey &a, const CPubKey &b) {
return a.vch[0] < b.vch[0] ||
(a.vch[0] == b.vch[0] && memcmp(a.vch, b.vch, a.size()) < 0);
}
//! Implement serialization, as if this was a byte vector.
template <typename Stream> void Serialize(Stream &s) const {
unsigned int len = size();
::WriteCompactSize(s, len);
- s.write((char *)vch, len);
+ s.write(AsBytes(Span{vch, len}));
}
template <typename Stream> void Unserialize(Stream &s) {
const unsigned int len(::ReadCompactSize(s));
if (len <= SIZE) {
- s.read((char *)vch, len);
+ s.read(AsWritableBytes(Span{vch, len}));
if (len != size()) {
Invalidate();
}
} else {
// invalid pubkey, skip available data
s.ignore(len);
Invalidate();
}
}
//! Get the KeyID of this public key (hash of its serialization)
CKeyID GetID() const { return CKeyID(Hash160(Span{vch}.first(size()))); }
//! Get the 256-bit hash of this public key.
uint256 GetHash() const { return Hash(Span{vch}.first(size())); }
/*
* Check syntactic correctness.
*
* Note that this is consensus critical as CheckSig() calls it!
*/
bool IsValid() const { return size() > 0; }
//! fully validate whether this is a valid public key (more expensive than
//! IsValid())
bool IsFullyValid() const;
//! Check whether this is a compressed public key.
bool IsCompressed() const { return size() == COMPRESSED_SIZE; }
/**
* Verify a DER-serialized ECDSA signature (~72 bytes).
* If this public key is not fully valid, the return value will be false.
*/
bool VerifyECDSA(const uint256 &hash,
const std::vector<uint8_t> &vchSig) const;
/**
* Verify a Schnorr signature (=64 bytes).
* If this public key is not fully valid, the return value will be false.
*/
bool VerifySchnorr(const uint256 &hash,
const std::array<uint8_t, SCHNORR_SIZE> &sig) const;
bool VerifySchnorr(const uint256 &hash,
const std::vector<uint8_t> &vchSig) const;
/**
* Check whether a DER-serialized ECDSA signature is normalized (lower-S).
*/
static bool
CheckLowS(const boost::sliced_range<const std::vector<uint8_t>> &vchSig);
static bool CheckLowS(const std::vector<uint8_t> &vchSig) {
return CheckLowS(vchSig | boost::adaptors::sliced(0, vchSig.size()));
}
//! Recover a public key from a compact ECDSA signature.
bool RecoverCompact(const uint256 &hash,
const std::vector<uint8_t> &vchSig);
//! Turn this public key into an uncompressed public key.
bool Decompress();
//! Derive BIP32 child pubkey.
bool Derive(CPubKey &pubkeyChild, ChainCode &ccChild, unsigned int nChild,
const ChainCode &cc) const;
};
struct CExtPubKey {
uint8_t nDepth;
uint8_t vchFingerprint[4];
unsigned int nChild;
ChainCode chaincode;
CPubKey pubkey;
friend bool operator==(const CExtPubKey &a, const CExtPubKey &b) {
return a.nDepth == b.nDepth &&
memcmp(a.vchFingerprint, b.vchFingerprint,
sizeof(vchFingerprint)) == 0 &&
a.nChild == b.nChild && a.chaincode == b.chaincode &&
a.pubkey == b.pubkey;
}
friend bool operator!=(const CExtPubKey &a, const CExtPubKey &b) {
return !(a == b);
}
void Encode(uint8_t code[BIP32_EXTKEY_SIZE]) const;
void Decode(const uint8_t code[BIP32_EXTKEY_SIZE]);
bool Derive(CExtPubKey &out, unsigned int nChild) const;
CExtPubKey() = default;
};
/**
* Users of this module must hold an ECCVerifyHandle. The constructor and
* destructor of these are not allowed to run in parallel, though.
*/
class ECCVerifyHandle {
static int refcount;
public:
ECCVerifyHandle();
~ECCVerifyHandle();
};
#endif // BITCOIN_PUBKEY_H
diff --git a/src/script/bitcoinconsensus.cpp b/src/script/bitcoinconsensus.cpp
index 1815a808e..8eb0455db 100644
--- a/src/script/bitcoinconsensus.cpp
+++ b/src/script/bitcoinconsensus.cpp
@@ -1,139 +1,139 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <script/bitcoinconsensus.h>
#include <primitives/transaction.h>
#include <pubkey.h>
#include <script/interpreter.h>
#include <version.h>
namespace {
/** A class that deserializes a single CTransaction one time. */
class TxInputStream {
public:
TxInputStream(int nTypeIn, int nVersionIn, const uint8_t *txTo,
size_t txToLen)
: m_type(nTypeIn), m_version(nVersionIn), m_data(txTo),
m_remaining(txToLen) {}
- void read(char *pch, size_t nSize) {
- if (nSize > m_remaining) {
+ void read(Span<std::byte> dst) {
+ if (dst.size() > m_remaining) {
throw std::ios_base::failure(std::string(__func__) +
": end of data");
}
- if (pch == nullptr) {
+ if (dst.data() == nullptr) {
throw std::ios_base::failure(std::string(__func__) +
": bad destination buffer");
}
if (m_data == nullptr) {
throw std::ios_base::failure(std::string(__func__) +
": bad source buffer");
}
- memcpy(pch, m_data, nSize);
- m_remaining -= nSize;
- m_data += nSize;
+ memcpy(dst.data(), m_data, dst.size());
+ m_remaining -= dst.size();
+ m_data += dst.size();
}
template <typename T> TxInputStream &operator>>(T &&obj) {
::Unserialize(*this, obj);
return *this;
}
int GetVersion() const { return m_version; }
int GetType() const { return m_type; }
private:
const int m_type;
const int m_version;
const uint8_t *m_data;
size_t m_remaining;
};
inline int set_error(bitcoinconsensus_error *ret,
bitcoinconsensus_error serror) {
if (ret) {
*ret = serror;
}
return 0;
}
struct ECCryptoClosure {
ECCVerifyHandle handle;
};
ECCryptoClosure instance_of_eccryptoclosure;
} // namespace
/** Check that all specified flags are part of the libconsensus interface. */
static bool verify_flags(unsigned int flags) {
return (flags & ~(bitcoinconsensus_SCRIPT_FLAGS_VERIFY_ALL)) == 0;
}
static int verify_script(const uint8_t *scriptPubKey,
unsigned int scriptPubKeyLen, Amount amount,
const uint8_t *txTo, unsigned int txToLen,
unsigned int nIn, unsigned int flags,
bitcoinconsensus_error *err) {
if (!verify_flags(flags)) {
return set_error(err, bitcoinconsensus_ERR_INVALID_FLAGS);
}
try {
TxInputStream stream(SER_NETWORK, PROTOCOL_VERSION, txTo, txToLen);
CTransaction tx(deserialize, stream);
if (nIn >= tx.vin.size()) {
return set_error(err, bitcoinconsensus_ERR_TX_INDEX);
}
if (GetSerializeSize(tx, PROTOCOL_VERSION) != txToLen) {
return set_error(err, bitcoinconsensus_ERR_TX_SIZE_MISMATCH);
}
// Regardless of the verification result, the tx did not error.
set_error(err, bitcoinconsensus_ERR_OK);
PrecomputedTransactionData txdata(tx);
return VerifyScript(
tx.vin[nIn].scriptSig,
CScript(scriptPubKey, scriptPubKey + scriptPubKeyLen), flags,
TransactionSignatureChecker(&tx, nIn, amount, txdata), nullptr);
} catch (const std::exception &) {
// Error deserializing
return set_error(err, bitcoinconsensus_ERR_TX_DESERIALIZE);
}
}
int bitcoinconsensus_verify_script_with_amount(
const uint8_t *scriptPubKey, unsigned int scriptPubKeyLen, int64_t amount,
const uint8_t *txTo, unsigned int txToLen, unsigned int nIn,
unsigned int flags, bitcoinconsensus_error *err) {
Amount am(amount * SATOSHI);
return ::verify_script(scriptPubKey, scriptPubKeyLen, am, txTo, txToLen,
nIn, flags, err);
}
int bitcoinconsensus_verify_script(const uint8_t *scriptPubKey,
unsigned int scriptPubKeyLen,
const uint8_t *txTo, unsigned int txToLen,
unsigned int nIn, unsigned int flags,
bitcoinconsensus_error *err) {
if (flags & bitcoinconsensus_SCRIPT_ENABLE_SIGHASH_FORKID ||
flags & bitcoinconsensus_SCRIPT_FLAGS_VERIFY_WITNESS_DEPRECATED) {
return set_error(err, bitcoinconsensus_ERR_AMOUNT_REQUIRED);
}
return ::verify_script(scriptPubKey, scriptPubKeyLen, Amount::zero(), txTo,
txToLen, nIn, flags, err);
}
unsigned int bitcoinconsensus_version() {
// Just use the API version for now
return BITCOINCONSENSUS_API_VER;
}
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index 458c596c2..a55f559c5 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -1,1895 +1,1895 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Copyright (c) 2017-2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <script/interpreter.h>
#include <crypto/ripemd160.h>
#include <crypto/sha1.h>
#include <crypto/sha256.h>
#include <pubkey.h>
#include <script/bitfield.h>
#include <script/script.h>
#include <script/sigencoding.h>
#include <uint256.h>
#include <util/bitmanip.h>
bool CastToBool(const valtype &vch) {
for (size_t i = 0; i < vch.size(); i++) {
if (vch[i] != 0) {
// Can be negative zero
if (i == vch.size() - 1 && vch[i] == 0x80) {
return false;
}
return true;
}
}
return false;
}
/**
* Script is a stack machine (like Forth) that evaluates a predicate
* returning a bool indicating valid or not. There are no loops.
*/
#define stacktop(i) (stack.at(stack.size() + (i)))
#define altstacktop(i) (altstack.at(altstack.size() + (i)))
static inline void popstack(std::vector<valtype> &stack) {
if (stack.empty()) {
throw std::runtime_error("popstack(): stack empty");
}
stack.pop_back();
}
int FindAndDelete(CScript &script, const CScript &b) {
int nFound = 0;
if (b.empty()) {
return nFound;
}
CScript result;
CScript::const_iterator pc = script.begin(), pc2 = script.begin(),
end = script.end();
opcodetype opcode;
do {
result.insert(result.end(), pc2, pc);
while (static_cast<size_t>(end - pc) >= b.size() &&
std::equal(b.begin(), b.end(), pc)) {
pc = pc + b.size();
++nFound;
}
pc2 = pc;
} while (script.GetOp(pc, opcode));
if (nFound > 0) {
result.insert(result.end(), pc2, end);
script = std::move(result);
}
return nFound;
}
static void CleanupScriptCode(CScript &scriptCode,
const std::vector<uint8_t> &vchSig,
uint32_t flags) {
// Drop the signature in scripts when SIGHASH_FORKID is not used.
SigHashType sigHashType = GetHashType(vchSig);
if (!(flags & SCRIPT_ENABLE_SIGHASH_FORKID) || !sigHashType.hasForkId()) {
FindAndDelete(scriptCode, CScript() << vchSig);
}
}
static bool IsOpcodeDisabled(opcodetype opcode, uint32_t flags) {
switch (opcode) {
case OP_INVERT:
case OP_2MUL:
case OP_2DIV:
case OP_MUL:
case OP_LSHIFT:
case OP_RSHIFT:
// Disabled opcodes.
return true;
default:
break;
}
return false;
}
namespace {
/**
* A data type to abstract out the condition stack during script execution.
*
* Conceptually it acts like a vector of booleans, one for each level of nested
* IF/THEN/ELSE, indicating whether we're in the active or inactive branch of
* each.
*
* The elements on the stack cannot be observed individually; we only need to
* expose whether the stack is empty and whether or not any false values are
* present at all. To implement OP_ELSE, a toggle_top modifier is added, which
* flips the last value without returning it.
*
* This uses an optimized implementation that does not materialize the
* actual stack. Instead, it just stores the size of the would-be stack,
* and the position of the first false value in it.
*/
class ConditionStack {
private:
//! A constant for m_first_false_pos to indicate there are no falses.
static constexpr uint32_t NO_FALSE = std::numeric_limits<uint32_t>::max();
//! The size of the implied stack.
uint32_t m_stack_size = 0;
//! The position of the first false value on the implied stack, or NO_FALSE
//! if all true.
uint32_t m_first_false_pos = NO_FALSE;
public:
bool empty() const { return m_stack_size == 0; }
bool all_true() const { return m_first_false_pos == NO_FALSE; }
void push_back(bool f) {
if (m_first_false_pos == NO_FALSE && !f) {
// The stack consists of all true values, and a false is added.
// The first false value will appear at the current size.
m_first_false_pos = m_stack_size;
}
++m_stack_size;
}
void pop_back() {
assert(m_stack_size > 0);
--m_stack_size;
if (m_first_false_pos == m_stack_size) {
// When popping off the first false value, everything becomes true.
m_first_false_pos = NO_FALSE;
}
}
void toggle_top() {
assert(m_stack_size > 0);
if (m_first_false_pos == NO_FALSE) {
// The current stack is all true values; the first false will be the
// top.
m_first_false_pos = m_stack_size - 1;
} else if (m_first_false_pos == m_stack_size - 1) {
// The top is the first false value; toggling it will make
// everything true.
m_first_false_pos = NO_FALSE;
} else {
// There is a false value, but not on top. No action is needed as
// toggling anything but the first false value is unobservable.
}
}
};
} // namespace
/**
* Helper for OP_CHECKSIG and OP_CHECKSIGVERIFY
*
* A return value of false means the script fails entirely. When true is
* returned, the fSuccess variable indicates whether the signature check itself
* succeeded.
*/
static bool EvalChecksig(const valtype &vchSig, const valtype &vchPubKey,
CScript::const_iterator pbegincodehash,
CScript::const_iterator pend, uint32_t flags,
const BaseSignatureChecker &checker,
ScriptExecutionMetrics &metrics, ScriptError *serror,
bool &fSuccess) {
if (!CheckTransactionSignatureEncoding(vchSig, flags, serror) ||
!CheckPubKeyEncoding(vchPubKey, flags, serror)) {
// serror is set
return false;
}
if (vchSig.size()) {
// Subset of script starting at the most recent
// codeseparator
CScript scriptCode(pbegincodehash, pend);
// Remove signature for pre-fork scripts
CleanupScriptCode(scriptCode, vchSig, flags);
fSuccess = checker.CheckSig(vchSig, vchPubKey, scriptCode, flags);
metrics.nSigChecks += 1;
if (!fSuccess && (flags & SCRIPT_VERIFY_NULLFAIL)) {
return set_error(serror, ScriptError::SIG_NULLFAIL);
}
}
return true;
}
bool EvalScript(std::vector<valtype> &stack, const CScript &script,
uint32_t flags, const BaseSignatureChecker &checker,
ScriptExecutionMetrics &metrics, ScriptError *serror) {
static const CScriptNum bnZero(0);
static const CScriptNum bnOne(1);
static const valtype vchFalse(0);
static const valtype vchTrue(1, 1);
CScript::const_iterator pc = script.begin();
CScript::const_iterator pend = script.end();
CScript::const_iterator pbegincodehash = script.begin();
opcodetype opcode;
valtype vchPushValue;
ConditionStack vfExec;
std::vector<valtype> altstack;
set_error(serror, ScriptError::UNKNOWN);
if (script.size() > MAX_SCRIPT_SIZE) {
return set_error(serror, ScriptError::SCRIPT_SIZE);
}
int nOpCount = 0;
bool fRequireMinimal = (flags & SCRIPT_VERIFY_MINIMALDATA) != 0;
try {
while (pc < pend) {
bool fExec = vfExec.all_true();
//
// Read instruction
//
if (!script.GetOp(pc, opcode, vchPushValue)) {
return set_error(serror, ScriptError::BAD_OPCODE);
}
if (vchPushValue.size() > MAX_SCRIPT_ELEMENT_SIZE) {
return set_error(serror, ScriptError::PUSH_SIZE);
}
// Note how OP_RESERVED does not count towards the opcode limit.
if (opcode > OP_16 && ++nOpCount > MAX_OPS_PER_SCRIPT) {
return set_error(serror, ScriptError::OP_COUNT);
}
// Some opcodes are disabled (CVE-2010-5137).
if (IsOpcodeDisabled(opcode, flags)) {
return set_error(serror, ScriptError::DISABLED_OPCODE);
}
if (fExec && 0 <= opcode && opcode <= OP_PUSHDATA4) {
if (fRequireMinimal &&
!CheckMinimalPush(vchPushValue, opcode)) {
return set_error(serror, ScriptError::MINIMALDATA);
}
stack.push_back(vchPushValue);
} else if (fExec || (OP_IF <= opcode && opcode <= OP_ENDIF)) {
switch (opcode) {
//
// Push value
//
case OP_1NEGATE:
case OP_1:
case OP_2:
case OP_3:
case OP_4:
case OP_5:
case OP_6:
case OP_7:
case OP_8:
case OP_9:
case OP_10:
case OP_11:
case OP_12:
case OP_13:
case OP_14:
case OP_15:
case OP_16: {
// ( -- value)
CScriptNum bn((int)opcode - (int)(OP_1 - 1));
stack.push_back(bn.getvch());
// The result of these opcodes should always be the
// minimal way to push the data they push, so no need
// for a CheckMinimalPush here.
} break;
//
// Control
//
case OP_NOP:
break;
case OP_CHECKLOCKTIMEVERIFY: {
if (!(flags & SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY)) {
break;
}
if (stack.size() < 1) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
// Note that elsewhere numeric opcodes are limited to
// operands in the range -2**31+1 to 2**31-1, however it
// is legal for opcodes to produce results exceeding
// that range. This limitation is implemented by
// CScriptNum's default 4-byte limit.
//
// If we kept to that limit we'd have a year 2038
// problem, even though the nLockTime field in
// transactions themselves is uint32 which only becomes
// meaningless after the year 2106.
//
// Thus as a special case we tell CScriptNum to accept
// up to 5-byte bignums, which are good until 2**39-1,
// well beyond the 2**32-1 limit of the nLockTime field
// itself.
const CScriptNum nLockTime(stacktop(-1),
fRequireMinimal, 5);
// In the rare event that the argument may be < 0 due to
// some arithmetic being done first, you can always use
// 0 MAX CHECKLOCKTIMEVERIFY.
if (nLockTime < 0) {
return set_error(serror,
ScriptError::NEGATIVE_LOCKTIME);
}
// Actually compare the specified lock time with the
// transaction.
if (!checker.CheckLockTime(nLockTime)) {
return set_error(serror,
ScriptError::UNSATISFIED_LOCKTIME);
}
break;
}
case OP_CHECKSEQUENCEVERIFY: {
if (!(flags & SCRIPT_VERIFY_CHECKSEQUENCEVERIFY)) {
break;
}
if (stack.size() < 1) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
// nSequence, like nLockTime, is a 32-bit unsigned
// integer field. See the comment in CHECKLOCKTIMEVERIFY
// regarding 5-byte numeric operands.
const CScriptNum nSequence(stacktop(-1),
fRequireMinimal, 5);
// In the rare event that the argument may be < 0 due to
// some arithmetic being done first, you can always use
// 0 MAX CHECKSEQUENCEVERIFY.
if (nSequence < 0) {
return set_error(serror,
ScriptError::NEGATIVE_LOCKTIME);
}
// To provide for future soft-fork extensibility, if the
// operand has the disabled lock-time flag set,
// CHECKSEQUENCEVERIFY behaves as a NOP.
if ((nSequence &
CTxIn::SEQUENCE_LOCKTIME_DISABLE_FLAG) != 0) {
break;
}
// Compare the specified sequence number with the input.
if (!checker.CheckSequence(nSequence)) {
return set_error(serror,
ScriptError::UNSATISFIED_LOCKTIME);
}
break;
}
case OP_NOP1:
case OP_NOP4:
case OP_NOP5:
case OP_NOP6:
case OP_NOP7:
case OP_NOP8:
case OP_NOP9:
case OP_NOP10: {
if (flags & SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_NOPS) {
return set_error(
serror,
ScriptError::DISCOURAGE_UPGRADABLE_NOPS);
}
} break;
case OP_IF:
case OP_NOTIF: {
// <expression> if [statements] [else [statements]]
// endif
bool fValue = false;
if (fExec) {
if (stack.size() < 1) {
return set_error(
serror,
ScriptError::UNBALANCED_CONDITIONAL);
}
valtype &vch = stacktop(-1);
if (flags & SCRIPT_VERIFY_MINIMALIF) {
if (vch.size() > 1) {
return set_error(serror,
ScriptError::MINIMALIF);
}
if (vch.size() == 1 && vch[0] != 1) {
return set_error(serror,
ScriptError::MINIMALIF);
}
}
fValue = CastToBool(vch);
if (opcode == OP_NOTIF) {
fValue = !fValue;
}
popstack(stack);
}
vfExec.push_back(fValue);
} break;
case OP_ELSE: {
if (vfExec.empty()) {
return set_error(
serror, ScriptError::UNBALANCED_CONDITIONAL);
}
vfExec.toggle_top();
} break;
case OP_ENDIF: {
if (vfExec.empty()) {
return set_error(
serror, ScriptError::UNBALANCED_CONDITIONAL);
}
vfExec.pop_back();
} break;
case OP_VERIFY: {
// (true -- ) or
// (false -- false) and return
if (stack.size() < 1) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
bool fValue = CastToBool(stacktop(-1));
if (fValue) {
popstack(stack);
} else {
return set_error(serror, ScriptError::VERIFY);
}
} break;
case OP_RETURN: {
return set_error(serror, ScriptError::OP_RETURN);
} break;
//
// Stack ops
//
case OP_TOALTSTACK: {
if (stack.size() < 1) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
altstack.push_back(stacktop(-1));
popstack(stack);
} break;
case OP_FROMALTSTACK: {
if (altstack.size() < 1) {
return set_error(
serror,
ScriptError::INVALID_ALTSTACK_OPERATION);
}
stack.push_back(altstacktop(-1));
popstack(altstack);
} break;
case OP_2DROP: {
// (x1 x2 -- )
if (stack.size() < 2) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
popstack(stack);
popstack(stack);
} break;
case OP_2DUP: {
// (x1 x2 -- x1 x2 x1 x2)
if (stack.size() < 2) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
valtype vch1 = stacktop(-2);
valtype vch2 = stacktop(-1);
stack.push_back(vch1);
stack.push_back(vch2);
} break;
case OP_3DUP: {
// (x1 x2 x3 -- x1 x2 x3 x1 x2 x3)
if (stack.size() < 3) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
valtype vch1 = stacktop(-3);
valtype vch2 = stacktop(-2);
valtype vch3 = stacktop(-1);
stack.push_back(vch1);
stack.push_back(vch2);
stack.push_back(vch3);
} break;
case OP_2OVER: {
// (x1 x2 x3 x4 -- x1 x2 x3 x4 x1 x2)
if (stack.size() < 4) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
valtype vch1 = stacktop(-4);
valtype vch2 = stacktop(-3);
stack.push_back(vch1);
stack.push_back(vch2);
} break;
case OP_2ROT: {
// (x1 x2 x3 x4 x5 x6 -- x3 x4 x5 x6 x1 x2)
if (stack.size() < 6) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
valtype vch1 = stacktop(-6);
valtype vch2 = stacktop(-5);
stack.erase(stack.end() - 6, stack.end() - 4);
stack.push_back(vch1);
stack.push_back(vch2);
} break;
case OP_2SWAP: {
// (x1 x2 x3 x4 -- x3 x4 x1 x2)
if (stack.size() < 4) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
swap(stacktop(-4), stacktop(-2));
swap(stacktop(-3), stacktop(-1));
} break;
case OP_IFDUP: {
// (x - 0 | x x)
if (stack.size() < 1) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
valtype vch = stacktop(-1);
if (CastToBool(vch)) {
stack.push_back(vch);
}
} break;
case OP_DEPTH: {
// -- stacksize
CScriptNum bn(stack.size());
stack.push_back(bn.getvch());
} break;
case OP_DROP: {
// (x -- )
if (stack.size() < 1) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
popstack(stack);
} break;
case OP_DUP: {
// (x -- x x)
if (stack.size() < 1) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
valtype vch = stacktop(-1);
stack.push_back(vch);
} break;
case OP_NIP: {
// (x1 x2 -- x2)
if (stack.size() < 2) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
stack.erase(stack.end() - 2);
} break;
case OP_OVER: {
// (x1 x2 -- x1 x2 x1)
if (stack.size() < 2) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
valtype vch = stacktop(-2);
stack.push_back(vch);
} break;
case OP_PICK:
case OP_ROLL: {
// (xn ... x2 x1 x0 n - xn ... x2 x1 x0 xn)
// (xn ... x2 x1 x0 n - ... x2 x1 x0 xn)
if (stack.size() < 2) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
int n =
CScriptNum(stacktop(-1), fRequireMinimal).getint();
popstack(stack);
if (n < 0 || n >= (int)stack.size()) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
valtype vch = stacktop(-n - 1);
if (opcode == OP_ROLL) {
stack.erase(stack.end() - n - 1);
}
stack.push_back(vch);
} break;
case OP_ROT: {
// (x1 x2 x3 -- x2 x3 x1)
// x2 x1 x3 after first swap
// x2 x3 x1 after second swap
if (stack.size() < 3) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
swap(stacktop(-3), stacktop(-2));
swap(stacktop(-2), stacktop(-1));
} break;
case OP_SWAP: {
// (x1 x2 -- x2 x1)
if (stack.size() < 2) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
swap(stacktop(-2), stacktop(-1));
} break;
case OP_TUCK: {
// (x1 x2 -- x2 x1 x2)
if (stack.size() < 2) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
valtype vch = stacktop(-1);
stack.insert(stack.end() - 2, vch);
} break;
case OP_SIZE: {
// (in -- in size)
if (stack.size() < 1) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
CScriptNum bn(stacktop(-1).size());
stack.push_back(bn.getvch());
} break;
//
// Bitwise logic
//
case OP_AND:
case OP_OR:
case OP_XOR: {
// (x1 x2 - out)
if (stack.size() < 2) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
valtype &vch1 = stacktop(-2);
valtype &vch2 = stacktop(-1);
// Inputs must be the same size
if (vch1.size() != vch2.size()) {
return set_error(serror,
ScriptError::INVALID_OPERAND_SIZE);
}
// To avoid allocating, we modify vch1 in place.
switch (opcode) {
case OP_AND:
for (size_t i = 0; i < vch1.size(); ++i) {
vch1[i] &= vch2[i];
}
break;
case OP_OR:
for (size_t i = 0; i < vch1.size(); ++i) {
vch1[i] |= vch2[i];
}
break;
case OP_XOR:
for (size_t i = 0; i < vch1.size(); ++i) {
vch1[i] ^= vch2[i];
}
break;
default:
break;
}
// And pop vch2.
popstack(stack);
} break;
case OP_EQUAL:
case OP_EQUALVERIFY:
// case OP_NOTEQUAL: // use OP_NUMNOTEQUAL
{
// (x1 x2 - bool)
if (stack.size() < 2) {
return set_error(
serror,
ScriptError::INVALID_STACK_OPERATION);
}
valtype &vch1 = stacktop(-2);
valtype &vch2 = stacktop(-1);
bool fEqual = (vch1 == vch2);
// OP_NOTEQUAL is disabled because it would be too
// easy to say something like n != 1 and have some
// wiseguy pass in 1 with extra zero bytes after it
// (numerically, 0x01 == 0x0001 == 0x000001)
// if (opcode == OP_NOTEQUAL)
// fEqual = !fEqual;
popstack(stack);
popstack(stack);
stack.push_back(fEqual ? vchTrue : vchFalse);
if (opcode == OP_EQUALVERIFY) {
if (fEqual) {
popstack(stack);
} else {
return set_error(serror,
ScriptError::EQUALVERIFY);
}
}
}
break;
//
// Numeric
//
case OP_1ADD:
case OP_1SUB:
case OP_NEGATE:
case OP_ABS:
case OP_NOT:
case OP_0NOTEQUAL: {
// (in -- out)
if (stack.size() < 1) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
CScriptNum bn(stacktop(-1), fRequireMinimal);
switch (opcode) {
case OP_1ADD:
bn += bnOne;
break;
case OP_1SUB:
bn -= bnOne;
break;
case OP_NEGATE:
bn = -bn;
break;
case OP_ABS:
if (bn < bnZero) {
bn = -bn;
}
break;
case OP_NOT:
bn = (bn == bnZero);
break;
case OP_0NOTEQUAL:
bn = (bn != bnZero);
break;
default:
assert(!"invalid opcode");
break;
}
popstack(stack);
stack.push_back(bn.getvch());
} break;
case OP_ADD:
case OP_SUB:
case OP_DIV:
case OP_MOD:
case OP_BOOLAND:
case OP_BOOLOR:
case OP_NUMEQUAL:
case OP_NUMEQUALVERIFY:
case OP_NUMNOTEQUAL:
case OP_LESSTHAN:
case OP_GREATERTHAN:
case OP_LESSTHANOREQUAL:
case OP_GREATERTHANOREQUAL:
case OP_MIN:
case OP_MAX: {
// (x1 x2 -- out)
if (stack.size() < 2) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
CScriptNum bn1(stacktop(-2), fRequireMinimal);
CScriptNum bn2(stacktop(-1), fRequireMinimal);
CScriptNum bn(0);
switch (opcode) {
case OP_ADD:
bn = bn1 + bn2;
break;
case OP_SUB:
bn = bn1 - bn2;
break;
case OP_DIV:
// denominator must not be 0
if (bn2 == 0) {
return set_error(serror,
ScriptError::DIV_BY_ZERO);
}
bn = bn1 / bn2;
break;
case OP_MOD:
// divisor must not be 0
if (bn2 == 0) {
return set_error(serror,
ScriptError::MOD_BY_ZERO);
}
bn = bn1 % bn2;
break;
case OP_BOOLAND:
bn = (bn1 != bnZero && bn2 != bnZero);
break;
case OP_BOOLOR:
bn = (bn1 != bnZero || bn2 != bnZero);
break;
case OP_NUMEQUAL:
bn = (bn1 == bn2);
break;
case OP_NUMEQUALVERIFY:
bn = (bn1 == bn2);
break;
case OP_NUMNOTEQUAL:
bn = (bn1 != bn2);
break;
case OP_LESSTHAN:
bn = (bn1 < bn2);
break;
case OP_GREATERTHAN:
bn = (bn1 > bn2);
break;
case OP_LESSTHANOREQUAL:
bn = (bn1 <= bn2);
break;
case OP_GREATERTHANOREQUAL:
bn = (bn1 >= bn2);
break;
case OP_MIN:
bn = (bn1 < bn2 ? bn1 : bn2);
break;
case OP_MAX:
bn = (bn1 > bn2 ? bn1 : bn2);
break;
default:
assert(!"invalid opcode");
break;
}
popstack(stack);
popstack(stack);
stack.push_back(bn.getvch());
if (opcode == OP_NUMEQUALVERIFY) {
if (CastToBool(stacktop(-1))) {
popstack(stack);
} else {
return set_error(serror,
ScriptError::NUMEQUALVERIFY);
}
}
} break;
case OP_WITHIN: {
// (x min max -- out)
if (stack.size() < 3) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
CScriptNum bn1(stacktop(-3), fRequireMinimal);
CScriptNum bn2(stacktop(-2), fRequireMinimal);
CScriptNum bn3(stacktop(-1), fRequireMinimal);
bool fValue = (bn2 <= bn1 && bn1 < bn3);
popstack(stack);
popstack(stack);
popstack(stack);
stack.push_back(fValue ? vchTrue : vchFalse);
} break;
//
// Crypto
//
case OP_RIPEMD160:
case OP_SHA1:
case OP_SHA256:
case OP_HASH160:
case OP_HASH256: {
// (in -- hash)
if (stack.size() < 1) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
valtype &vch = stacktop(-1);
valtype vchHash((opcode == OP_RIPEMD160 ||
opcode == OP_SHA1 ||
opcode == OP_HASH160)
? 20
: 32);
if (opcode == OP_RIPEMD160) {
CRIPEMD160()
.Write(vch.data(), vch.size())
.Finalize(vchHash.data());
} else if (opcode == OP_SHA1) {
CSHA1()
.Write(vch.data(), vch.size())
.Finalize(vchHash.data());
} else if (opcode == OP_SHA256) {
CSHA256()
.Write(vch.data(), vch.size())
.Finalize(vchHash.data());
} else if (opcode == OP_HASH160) {
CHash160().Write(vch).Finalize(vchHash);
} else if (opcode == OP_HASH256) {
CHash256().Write(vch).Finalize(vchHash);
}
popstack(stack);
stack.push_back(vchHash);
} break;
case OP_CODESEPARATOR: {
// Hash starts after the code separator
pbegincodehash = pc;
} break;
case OP_CHECKSIG:
case OP_CHECKSIGVERIFY: {
// (sig pubkey -- bool)
if (stack.size() < 2) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
valtype &vchSig = stacktop(-2);
valtype &vchPubKey = stacktop(-1);
bool fSuccess = false;
if (!EvalChecksig(vchSig, vchPubKey, pbegincodehash,
pend, flags, checker, metrics, serror,
fSuccess)) {
return false;
}
popstack(stack);
popstack(stack);
stack.push_back(fSuccess ? vchTrue : vchFalse);
if (opcode == OP_CHECKSIGVERIFY) {
if (fSuccess) {
popstack(stack);
} else {
return set_error(serror,
ScriptError::CHECKSIGVERIFY);
}
}
} break;
case OP_CHECKDATASIG:
case OP_CHECKDATASIGVERIFY: {
// (sig message pubkey -- bool)
if (stack.size() < 3) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
valtype &vchSig = stacktop(-3);
valtype &vchMessage = stacktop(-2);
valtype &vchPubKey = stacktop(-1);
if (!CheckDataSignatureEncoding(vchSig, flags,
serror) ||
!CheckPubKeyEncoding(vchPubKey, flags, serror)) {
// serror is set
return false;
}
bool fSuccess = false;
if (vchSig.size()) {
valtype vchHash(32);
CSHA256()
.Write(vchMessage.data(), vchMessage.size())
.Finalize(vchHash.data());
fSuccess = checker.VerifySignature(
vchSig, CPubKey(vchPubKey), uint256(vchHash));
metrics.nSigChecks += 1;
if (!fSuccess && (flags & SCRIPT_VERIFY_NULLFAIL)) {
return set_error(serror,
ScriptError::SIG_NULLFAIL);
}
}
popstack(stack);
popstack(stack);
popstack(stack);
stack.push_back(fSuccess ? vchTrue : vchFalse);
if (opcode == OP_CHECKDATASIGVERIFY) {
if (fSuccess) {
popstack(stack);
} else {
return set_error(
serror, ScriptError::CHECKDATASIGVERIFY);
}
}
} break;
case OP_CHECKMULTISIG:
case OP_CHECKMULTISIGVERIFY: {
// ([dummy] [sig ...] num_of_signatures [pubkey ...]
// num_of_pubkeys -- bool)
const size_t idxKeyCount = 1;
if (stack.size() < idxKeyCount) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
const int nKeysCount =
CScriptNum(stacktop(-idxKeyCount), fRequireMinimal)
.getint();
if (nKeysCount < 0 ||
nKeysCount > MAX_PUBKEYS_PER_MULTISIG) {
return set_error(serror, ScriptError::PUBKEY_COUNT);
}
nOpCount += nKeysCount;
if (nOpCount > MAX_OPS_PER_SCRIPT) {
return set_error(serror, ScriptError::OP_COUNT);
}
// stack depth of the top pubkey
const size_t idxTopKey = idxKeyCount + 1;
// stack depth of nSigsCount
const size_t idxSigCount = idxTopKey + nKeysCount;
if (stack.size() < idxSigCount) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
const int nSigsCount =
CScriptNum(stacktop(-idxSigCount), fRequireMinimal)
.getint();
if (nSigsCount < 0 || nSigsCount > nKeysCount) {
return set_error(serror, ScriptError::SIG_COUNT);
}
// stack depth of the top signature
const size_t idxTopSig = idxSigCount + 1;
// stack depth of the dummy element
const size_t idxDummy = idxTopSig + nSigsCount;
if (stack.size() < idxDummy) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
// Subset of script starting at the most recent
// codeseparator
CScript scriptCode(pbegincodehash, pend);
// Assuming success is usually a bad idea, but the
// schnorr path can only succeed.
bool fSuccess = true;
if ((flags & SCRIPT_ENABLE_SCHNORR_MULTISIG) &&
stacktop(-idxDummy).size() != 0) {
// SCHNORR MULTISIG
static_assert(
MAX_PUBKEYS_PER_MULTISIG < 32,
"Schnorr multisig checkbits implementation "
"assumes < 32 pubkeys.");
uint32_t checkBits = 0;
// Dummy element is to be interpreted as a bitfield
// that represent which pubkeys should be checked.
valtype &vchDummy = stacktop(-idxDummy);
if (!DecodeBitfield(vchDummy, nKeysCount, checkBits,
serror)) {
// serror is set
return false;
}
// The bitfield doesn't set the right number of
// signatures.
if (countBits(checkBits) != uint32_t(nSigsCount)) {
return set_error(
serror, ScriptError::INVALID_BIT_COUNT);
}
const size_t idxBottomKey =
idxTopKey + nKeysCount - 1;
const size_t idxBottomSig =
idxTopSig + nSigsCount - 1;
int iKey = 0;
for (int iSig = 0; iSig < nSigsCount;
iSig++, iKey++) {
if ((checkBits >> iKey) == 0) {
// This is a sanity check and should be
// unreachable.
return set_error(
serror, ScriptError::INVALID_BIT_RANGE);
}
// Find the next suitable key.
while (((checkBits >> iKey) & 0x01) == 0) {
iKey++;
}
if (iKey >= nKeysCount) {
// This is a sanity check and should be
// unreachable.
return set_error(serror,
ScriptError::PUBKEY_COUNT);
}
// Check the signature.
valtype &vchSig =
stacktop(-idxBottomSig + iSig);
valtype &vchPubKey =
stacktop(-idxBottomKey + iKey);
// Note that only pubkeys associated with a
// signature are checked for validity.
if (!CheckTransactionSchnorrSignatureEncoding(
vchSig, flags, serror) ||
!CheckPubKeyEncoding(vchPubKey, flags,
serror)) {
// serror is set
return false;
}
// Check signature
if (!checker.CheckSig(vchSig, vchPubKey,
scriptCode, flags)) {
// This can fail if the signature is empty,
// which also is a NULLFAIL error as the
// bitfield should have been null in this
// situation.
return set_error(serror,
ScriptError::SIG_NULLFAIL);
}
// this is guaranteed to execute exactly
// nSigsCount times (if not script error)
metrics.nSigChecks += 1;
}
if ((checkBits >> iKey) != 0) {
// This is a sanity check and should be
// unreachable.
return set_error(
serror, ScriptError::INVALID_BIT_COUNT);
}
} else {
// LEGACY MULTISIG (ECDSA / NULL)
// Remove signature for pre-fork scripts
for (int k = 0; k < nSigsCount; k++) {
valtype &vchSig = stacktop(-idxTopSig - k);
CleanupScriptCode(scriptCode, vchSig, flags);
}
int nSigsRemaining = nSigsCount;
int nKeysRemaining = nKeysCount;
while (fSuccess && nSigsRemaining > 0) {
valtype &vchSig = stacktop(
-idxTopSig - (nSigsCount - nSigsRemaining));
valtype &vchPubKey = stacktop(
-idxTopKey - (nKeysCount - nKeysRemaining));
// Note how this makes the exact order of
// pubkey/signature evaluation distinguishable
// by CHECKMULTISIG NOT if the STRICTENC flag is
// set. See the script_(in)valid tests for
// details.
if (!CheckTransactionECDSASignatureEncoding(
vchSig, flags, serror) ||
!CheckPubKeyEncoding(vchPubKey, flags,
serror)) {
// serror is set
return false;
}
// Check signature
bool fOk = checker.CheckSig(vchSig, vchPubKey,
scriptCode, flags);
if (fOk) {
nSigsRemaining--;
}
nKeysRemaining--;
// If there are more signatures left than keys
// left, then too many signatures have failed.
// Exit early, without checking any further
// signatures.
if (nSigsRemaining > nKeysRemaining) {
fSuccess = false;
}
}
bool areAllSignaturesNull = true;
for (int i = 0; i < nSigsCount; i++) {
if (stacktop(-idxTopSig - i).size()) {
areAllSignaturesNull = false;
break;
}
}
// If the operation failed, we may require that all
// signatures must be empty vector
if (!fSuccess && (flags & SCRIPT_VERIFY_NULLFAIL) &&
!areAllSignaturesNull) {
return set_error(serror,
ScriptError::SIG_NULLFAIL);
}
if (!areAllSignaturesNull) {
// This is not identical to the number of actual
// ECDSA verifies, but, it is an upper bound
// that can be easily determined without doing
// CPU-intensive checks.
metrics.nSigChecks += nKeysCount;
}
}
// Clean up stack of all arguments
for (size_t i = 0; i < idxDummy; i++) {
popstack(stack);
}
stack.push_back(fSuccess ? vchTrue : vchFalse);
if (opcode == OP_CHECKMULTISIGVERIFY) {
if (fSuccess) {
popstack(stack);
} else {
return set_error(
serror, ScriptError::CHECKMULTISIGVERIFY);
}
}
} break;
//
// Byte string operations
//
case OP_CAT: {
// (x1 x2 -- out)
if (stack.size() < 2) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
valtype &vch1 = stacktop(-2);
valtype &vch2 = stacktop(-1);
if (vch1.size() + vch2.size() >
MAX_SCRIPT_ELEMENT_SIZE) {
return set_error(serror, ScriptError::PUSH_SIZE);
}
vch1.insert(vch1.end(), vch2.begin(), vch2.end());
popstack(stack);
} break;
case OP_SPLIT: {
// (in position -- x1 x2)
if (stack.size() < 2) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
const valtype &data = stacktop(-2);
// Make sure the split point is appropriate.
uint64_t position =
CScriptNum(stacktop(-1), fRequireMinimal).getint();
if (position > data.size()) {
return set_error(serror,
ScriptError::INVALID_SPLIT_RANGE);
}
// Prepare the results in their own buffer as `data`
// will be invalidated.
valtype n1(data.begin(), data.begin() + position);
valtype n2(data.begin() + position, data.end());
// Replace existing stack values by the new values.
stacktop(-2) = std::move(n1);
stacktop(-1) = std::move(n2);
} break;
case OP_REVERSEBYTES: {
// (in -- out)
if (stack.size() < 1) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
valtype &data = stacktop(-1);
std::reverse(data.begin(), data.end());
} break;
//
// Conversion operations
//
case OP_NUM2BIN: {
// (in size -- out)
if (stack.size() < 2) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
uint64_t size =
CScriptNum(stacktop(-1), fRequireMinimal).getint();
if (size > MAX_SCRIPT_ELEMENT_SIZE) {
return set_error(serror, ScriptError::PUSH_SIZE);
}
popstack(stack);
valtype &rawnum = stacktop(-1);
// Try to see if we can fit that number in the number of
// byte requested.
CScriptNum::MinimallyEncode(rawnum);
if (rawnum.size() > size) {
// We definitively cannot.
return set_error(serror,
ScriptError::IMPOSSIBLE_ENCODING);
}
// We already have an element of the right size, we
// don't need to do anything.
if (rawnum.size() == size) {
break;
}
uint8_t signbit = 0x00;
if (rawnum.size() > 0) {
signbit = rawnum.back() & 0x80;
rawnum[rawnum.size() - 1] &= 0x7f;
}
rawnum.reserve(size);
while (rawnum.size() < size - 1) {
rawnum.push_back(0x00);
}
rawnum.push_back(signbit);
} break;
case OP_BIN2NUM: {
// (in -- out)
if (stack.size() < 1) {
return set_error(
serror, ScriptError::INVALID_STACK_OPERATION);
}
valtype &n = stacktop(-1);
CScriptNum::MinimallyEncode(n);
// The resulting number must be a valid number.
if (!CScriptNum::IsMinimallyEncoded(n)) {
return set_error(serror,
ScriptError::INVALID_NUMBER_RANGE);
}
} break;
default:
return set_error(serror, ScriptError::BAD_OPCODE);
}
}
// Size limits
if (stack.size() + altstack.size() > MAX_STACK_SIZE) {
return set_error(serror, ScriptError::STACK_SIZE);
}
}
} catch (...) {
return set_error(serror, ScriptError::UNKNOWN);
}
if (!vfExec.empty()) {
return set_error(serror, ScriptError::UNBALANCED_CONDITIONAL);
}
return set_success(serror);
}
namespace {
/**
* Wrapper that serializes like CTransaction, but with the modifications
* required for the signature hash done in-place
*/
template <class T> class CTransactionSignatureSerializer {
private:
//! reference to the spending transaction (the one being serialized)
const T &txTo;
//! output script being consumed
const CScript &scriptCode;
//! input index of txTo being signed
const unsigned int nIn;
//! container for hashtype flags
const SigHashType sigHashType;
public:
CTransactionSignatureSerializer(const T &txToIn,
const CScript &scriptCodeIn,
unsigned int nInIn,
SigHashType sigHashTypeIn)
: txTo(txToIn), scriptCode(scriptCodeIn), nIn(nInIn),
sigHashType(sigHashTypeIn) {}
/** Serialize the passed scriptCode, skipping OP_CODESEPARATORs */
template <typename S> void SerializeScriptCode(S &s) const {
CScript::const_iterator it = scriptCode.begin();
CScript::const_iterator itBegin = it;
opcodetype opcode;
unsigned int nCodeSeparators = 0;
while (scriptCode.GetOp(it, opcode)) {
if (opcode == OP_CODESEPARATOR) {
nCodeSeparators++;
}
}
::WriteCompactSize(s, scriptCode.size() - nCodeSeparators);
it = itBegin;
while (scriptCode.GetOp(it, opcode)) {
if (opcode == OP_CODESEPARATOR) {
- s.write((char *)&itBegin[0], it - itBegin - 1);
+ s.write(AsBytes(Span{&itBegin[0], size_t(it - itBegin - 1)}));
itBegin = it;
}
}
if (itBegin != scriptCode.end()) {
- s.write((char *)&itBegin[0], it - itBegin);
+ s.write(AsBytes(Span{&itBegin[0], size_t(it - itBegin)}));
}
}
/** Serialize an input of txTo */
template <typename S> void SerializeInput(S &s, unsigned int nInput) const {
// In case of SIGHASH_ANYONECANPAY, only the input being signed is
// serialized
if (sigHashType.hasAnyoneCanPay()) {
nInput = nIn;
}
// Serialize the prevout
::Serialize(s, txTo.vin[nInput].prevout);
// Serialize the script
if (nInput != nIn) {
// Blank out other inputs' signatures
::Serialize(s, CScript());
} else {
SerializeScriptCode(s);
}
// Serialize the nSequence
if (nInput != nIn &&
(sigHashType.getBaseType() == BaseSigHashType::SINGLE ||
sigHashType.getBaseType() == BaseSigHashType::NONE)) {
// let the others update at will
::Serialize(s, (int)0);
} else {
::Serialize(s, txTo.vin[nInput].nSequence);
}
}
/** Serialize an output of txTo */
template <typename S>
void SerializeOutput(S &s, unsigned int nOutput) const {
if (sigHashType.getBaseType() == BaseSigHashType::SINGLE &&
nOutput != nIn) {
// Do not lock-in the txout payee at other indices as txin
::Serialize(s, CTxOut());
} else {
::Serialize(s, txTo.vout[nOutput]);
}
}
/** Serialize txTo */
template <typename S> void Serialize(S &s) const {
// Serialize nVersion
::Serialize(s, txTo.nVersion);
// Serialize vin
unsigned int nInputs =
sigHashType.hasAnyoneCanPay() ? 1 : txTo.vin.size();
::WriteCompactSize(s, nInputs);
for (unsigned int nInput = 0; nInput < nInputs; nInput++) {
SerializeInput(s, nInput);
}
// Serialize vout
unsigned int nOutputs =
(sigHashType.getBaseType() == BaseSigHashType::NONE)
? 0
: ((sigHashType.getBaseType() == BaseSigHashType::SINGLE)
? nIn + 1
: txTo.vout.size());
::WriteCompactSize(s, nOutputs);
for (unsigned int nOutput = 0; nOutput < nOutputs; nOutput++) {
SerializeOutput(s, nOutput);
}
// Serialize nLockTime
::Serialize(s, txTo.nLockTime);
}
};
template <class T> uint256 GetPrevoutHash(const T &txTo) {
CHashWriter ss(SER_GETHASH, 0);
for (const auto &txin : txTo.vin) {
ss << txin.prevout;
}
return ss.GetHash();
}
template <class T> uint256 GetSequenceHash(const T &txTo) {
CHashWriter ss(SER_GETHASH, 0);
for (const auto &txin : txTo.vin) {
ss << txin.nSequence;
}
return ss.GetHash();
}
template <class T> uint256 GetOutputsHash(const T &txTo) {
CHashWriter ss(SER_GETHASH, 0);
for (const auto &txout : txTo.vout) {
ss << txout;
}
return ss.GetHash();
}
} // namespace
template <class T>
PrecomputedTransactionData::PrecomputedTransactionData(const T &txTo) {
hashPrevouts = GetPrevoutHash(txTo);
hashSequence = GetSequenceHash(txTo);
hashOutputs = GetOutputsHash(txTo);
}
// explicit instantiation
template PrecomputedTransactionData::PrecomputedTransactionData(
const CTransaction &txTo);
template PrecomputedTransactionData::PrecomputedTransactionData(
const CMutableTransaction &txTo);
template <class T>
uint256 SignatureHash(const CScript &scriptCode, const T &txTo,
unsigned int nIn, SigHashType sigHashType,
const Amount amount,
const PrecomputedTransactionData *cache, uint32_t flags) {
assert(nIn < txTo.vin.size());
if (flags & SCRIPT_ENABLE_REPLAY_PROTECTION) {
// Legacy chain's value for fork id must be of the form 0xffxxxx.
// By xoring with 0xdead, we ensure that the value will be different
// from the original one, even if it already starts with 0xff.
uint32_t newForkValue = sigHashType.getForkValue() ^ 0xdead;
sigHashType = sigHashType.withForkValue(0xff0000 | newForkValue);
}
if (sigHashType.hasForkId() && (flags & SCRIPT_ENABLE_SIGHASH_FORKID)) {
uint256 hashPrevouts;
uint256 hashSequence;
uint256 hashOutputs;
if (!sigHashType.hasAnyoneCanPay()) {
hashPrevouts = cache ? cache->hashPrevouts : GetPrevoutHash(txTo);
}
if (!sigHashType.hasAnyoneCanPay() &&
(sigHashType.getBaseType() != BaseSigHashType::SINGLE) &&
(sigHashType.getBaseType() != BaseSigHashType::NONE)) {
hashSequence = cache ? cache->hashSequence : GetSequenceHash(txTo);
}
if ((sigHashType.getBaseType() != BaseSigHashType::SINGLE) &&
(sigHashType.getBaseType() != BaseSigHashType::NONE)) {
hashOutputs = cache ? cache->hashOutputs : GetOutputsHash(txTo);
} else if ((sigHashType.getBaseType() == BaseSigHashType::SINGLE) &&
(nIn < txTo.vout.size())) {
CHashWriter ss(SER_GETHASH, 0);
ss << txTo.vout[nIn];
hashOutputs = ss.GetHash();
}
CHashWriter ss(SER_GETHASH, 0);
// Version
ss << txTo.nVersion;
// Input prevouts/nSequence (none/all, depending on flags)
ss << hashPrevouts;
ss << hashSequence;
// The input being signed (replacing the scriptSig with scriptCode +
// amount). The prevout may already be contained in hashPrevout, and the
// nSequence may already be contain in hashSequence.
ss << txTo.vin[nIn].prevout;
ss << scriptCode;
ss << amount;
ss << txTo.vin[nIn].nSequence;
// Outputs (none/one/all, depending on flags)
ss << hashOutputs;
// Locktime
ss << txTo.nLockTime;
// Sighash type
ss << sigHashType;
return ss.GetHash();
}
// Check for invalid use of SIGHASH_SINGLE
if ((sigHashType.getBaseType() == BaseSigHashType::SINGLE) &&
(nIn >= txTo.vout.size())) {
// nOut out of range
return uint256::ONE;
}
// Wrapper to serialize only the necessary parts of the transaction being
// signed
CTransactionSignatureSerializer<T> txTmp(txTo, scriptCode, nIn,
sigHashType);
// Serialize and hash
CHashWriter ss(SER_GETHASH, 0);
ss << txTmp << sigHashType;
return ss.GetHash();
}
bool BaseSignatureChecker::VerifySignature(const std::vector<uint8_t> &vchSig,
const CPubKey &pubkey,
const uint256 &sighash) const {
if (vchSig.size() == 64) {
return pubkey.VerifySchnorr(sighash, vchSig);
} else {
return pubkey.VerifyECDSA(sighash, vchSig);
}
}
template <class T>
bool GenericTransactionSignatureChecker<T>::CheckSig(
const std::vector<uint8_t> &vchSigIn, const std::vector<uint8_t> &vchPubKey,
const CScript &scriptCode, uint32_t flags) const {
CPubKey pubkey(vchPubKey);
if (!pubkey.IsValid()) {
return false;
}
// Hash type is one byte tacked on to the end of the signature
std::vector<uint8_t> vchSig(vchSigIn);
if (vchSig.empty()) {
return false;
}
SigHashType sigHashType = GetHashType(vchSig);
vchSig.pop_back();
uint256 sighash = SignatureHash(scriptCode, *txTo, nIn, sigHashType, amount,
this->txdata, flags);
if (!VerifySignature(vchSig, pubkey, sighash)) {
return false;
}
return true;
}
template <class T>
bool GenericTransactionSignatureChecker<T>::CheckLockTime(
const CScriptNum &nLockTime) const {
// There are two kinds of nLockTime: lock-by-blockheight and
// lock-by-blocktime, distinguished by whether nLockTime <
// LOCKTIME_THRESHOLD.
//
// We want to compare apples to apples, so fail the script unless the type
// of nLockTime being tested is the same as the nLockTime in the
// transaction.
if (!((txTo->nLockTime < LOCKTIME_THRESHOLD &&
nLockTime < LOCKTIME_THRESHOLD) ||
(txTo->nLockTime >= LOCKTIME_THRESHOLD &&
nLockTime >= LOCKTIME_THRESHOLD))) {
return false;
}
// Now that we know we're comparing apples-to-apples, the comparison is a
// simple numeric one.
if (nLockTime > int64_t(txTo->nLockTime)) {
return false;
}
// Finally the nLockTime feature can be disabled and thus
// CHECKLOCKTIMEVERIFY bypassed if every txin has been finalized by setting
// nSequence to maxint. The transaction would be allowed into the
// blockchain, making the opcode ineffective.
//
// Testing if this vin is not final is sufficient to prevent this condition.
// Alternatively we could test all inputs, but testing just this input
// minimizes the data required to prove correct CHECKLOCKTIMEVERIFY
// execution.
if (CTxIn::SEQUENCE_FINAL == txTo->vin[nIn].nSequence) {
return false;
}
return true;
}
template <class T>
bool GenericTransactionSignatureChecker<T>::CheckSequence(
const CScriptNum &nSequence) const {
// Relative lock times are supported by comparing the passed in operand to
// the sequence number of the input.
const int64_t txToSequence = int64_t(txTo->vin[nIn].nSequence);
// Fail if the transaction's version number is not set high enough to
// trigger BIP 68 rules.
if (static_cast<uint32_t>(txTo->nVersion) < 2) {
return false;
}
// Sequence numbers with their most significant bit set are not consensus
// constrained. Testing that the transaction's sequence number do not have
// this bit set prevents using this property to get around a
// CHECKSEQUENCEVERIFY check.
if (txToSequence & CTxIn::SEQUENCE_LOCKTIME_DISABLE_FLAG) {
return false;
}
// Mask off any bits that do not have consensus-enforced meaning before
// doing the integer comparisons
const uint32_t nLockTimeMask =
CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG | CTxIn::SEQUENCE_LOCKTIME_MASK;
const int64_t txToSequenceMasked = txToSequence & nLockTimeMask;
const CScriptNum nSequenceMasked = nSequence & nLockTimeMask;
// There are two kinds of nSequence: lock-by-blockheight and
// lock-by-blocktime, distinguished by whether nSequenceMasked <
// CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG.
//
// We want to compare apples to apples, so fail the script unless the type
// of nSequenceMasked being tested is the same as the nSequenceMasked in the
// transaction.
if (!((txToSequenceMasked < CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG &&
nSequenceMasked < CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG) ||
(txToSequenceMasked >= CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG &&
nSequenceMasked >= CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG))) {
return false;
}
// Now that we know we're comparing apples-to-apples, the comparison is a
// simple numeric one.
if (nSequenceMasked > txToSequenceMasked) {
return false;
}
return true;
}
// explicit instantiation
template class GenericTransactionSignatureChecker<CTransaction>;
template class GenericTransactionSignatureChecker<CMutableTransaction>;
bool VerifyScript(const CScript &scriptSig, const CScript &scriptPubKey,
uint32_t flags, const BaseSignatureChecker &checker,
ScriptExecutionMetrics &metricsOut, ScriptError *serror) {
set_error(serror, ScriptError::UNKNOWN);
// If FORKID is enabled, we also ensure strict encoding.
if (flags & SCRIPT_ENABLE_SIGHASH_FORKID) {
flags |= SCRIPT_VERIFY_STRICTENC;
}
if ((flags & SCRIPT_VERIFY_SIGPUSHONLY) != 0 && !scriptSig.IsPushOnly()) {
return set_error(serror, ScriptError::SIG_PUSHONLY);
}
ScriptExecutionMetrics metrics = {};
// scriptSig and scriptPubKey must be evaluated sequentially on the same
// stack rather than being simply concatenated (see CVE-2010-5141)
std::vector<valtype> stack, stackCopy;
if (!EvalScript(stack, scriptSig, flags, checker, metrics, serror)) {
// serror is set
return false;
}
if (flags & SCRIPT_VERIFY_P2SH) {
stackCopy = stack;
}
if (!EvalScript(stack, scriptPubKey, flags, checker, metrics, serror)) {
// serror is set
return false;
}
if (stack.empty()) {
return set_error(serror, ScriptError::EVAL_FALSE);
}
if (CastToBool(stack.back()) == false) {
return set_error(serror, ScriptError::EVAL_FALSE);
}
// Additional validation for spend-to-script-hash transactions:
if ((flags & SCRIPT_VERIFY_P2SH) && scriptPubKey.IsPayToScriptHash()) {
// scriptSig must be literals-only or validation fails
if (!scriptSig.IsPushOnly()) {
return set_error(serror, ScriptError::SIG_PUSHONLY);
}
// Restore stack.
swap(stack, stackCopy);
// stack cannot be empty here, because if it was the P2SH HASH <> EQUAL
// scriptPubKey would be evaluated with an empty stack and the
// EvalScript above would return false.
assert(!stack.empty());
const valtype &pubKeySerialized = stack.back();
CScript pubKey2(pubKeySerialized.begin(), pubKeySerialized.end());
popstack(stack);
// Bail out early if SCRIPT_DISALLOW_SEGWIT_RECOVERY is not set, the
// redeem script is a p2sh segwit program, and it was the only item
// pushed onto the stack.
if ((flags & SCRIPT_DISALLOW_SEGWIT_RECOVERY) == 0 && stack.empty() &&
pubKey2.IsWitnessProgram()) {
// must set metricsOut for all successful returns
metricsOut = metrics;
return set_success(serror);
}
if (!EvalScript(stack, pubKey2, flags, checker, metrics, serror)) {
// serror is set
return false;
}
if (stack.empty()) {
return set_error(serror, ScriptError::EVAL_FALSE);
}
if (!CastToBool(stack.back())) {
return set_error(serror, ScriptError::EVAL_FALSE);
}
}
// The CLEANSTACK check is only performed after potential P2SH evaluation,
// as the non-P2SH evaluation of a P2SH script will obviously not result in
// a clean stack (the P2SH inputs remain). The same holds for witness
// evaluation.
if ((flags & SCRIPT_VERIFY_CLEANSTACK) != 0) {
// Disallow CLEANSTACK without P2SH, as otherwise a switch
// CLEANSTACK->P2SH+CLEANSTACK would be possible, which is not a
// softfork (and P2SH should be one).
assert((flags & SCRIPT_VERIFY_P2SH) != 0);
if (stack.size() != 1) {
return set_error(serror, ScriptError::CLEANSTACK);
}
}
if (flags & SCRIPT_VERIFY_INPUT_SIGCHECKS) {
// This limit is intended for standard use, and is based on an
// examination of typical and historical standard uses.
// - allowing P2SH ECDSA multisig with compressed keys, which at an
// extreme (1-of-15) may have 15 SigChecks in ~590 bytes of scriptSig.
// - allowing Bare ECDSA multisig, which at an extreme (1-of-3) may have
// 3 sigchecks in ~72 bytes of scriptSig.
// - Since the size of an input is 41 bytes + length of scriptSig, then
// the most dense possible inputs satisfying this rule would be:
// 2 sigchecks and 26 bytes: 1/33.50 sigchecks/byte.
// 3 sigchecks and 69 bytes: 1/36.66 sigchecks/byte.
// The latter can be readily done with 1-of-3 bare multisignatures,
// however the former is not practically doable with standard scripts,
// so the practical density limit is 1/36.66.
static_assert(INT_MAX > MAX_SCRIPT_SIZE,
"overflow sanity check on max script size");
static_assert(INT_MAX / 43 / 3 > MAX_OPS_PER_SCRIPT,
"overflow sanity check on maximum possible sigchecks "
"from sig+redeem+pub scripts");
if (int(scriptSig.size()) < metrics.nSigChecks * 43 - 60) {
return set_error(serror, ScriptError::INPUT_SIGCHECKS);
}
}
metricsOut = metrics;
return set_success(serror);
}
diff --git a/src/seeder/bitcoin.cpp b/src/seeder/bitcoin.cpp
index 0938c4f52..f75a31c94 100644
--- a/src/seeder/bitcoin.cpp
+++ b/src/seeder/bitcoin.cpp
@@ -1,310 +1,310 @@
// Copyright (c) 2017-2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <seeder/bitcoin.h>
#include <chainparams.h>
#include <clientversion.h>
#include <hash.h>
#include <netbase.h>
#include <primitives/blockhash.h>
#include <seeder/db.h>
#include <seeder/messagewriter.h>
#include <serialize.h>
#include <uint256.h>
#include <util/sock.h>
#include <util/time.h>
#include <algorithm>
#define BITCOIN_SEED_NONCE 0x0539a019ca550825ULL
void CSeederNode::Send() {
if (!sock) {
return;
}
if (vSend.empty()) {
return;
}
int nBytes = sock->Send(&vSend[0], vSend.size(), 0);
if (nBytes > 0) {
vSend.erase(vSend.begin(), vSend.begin() + nBytes);
} else {
sock.reset();
}
}
PeerMessagingState CSeederNode::ProcessMessage(std::string strCommand,
CDataStream &recv) {
// tfm::format(std::cout, "%s: RECV %s\n", ToString(you),
// strCommand);
if (strCommand == NetMsgType::VERSION) {
int64_t nTime;
CService addrMe;
uint64_t nNonce = 1;
uint64_t nServiceInt;
recv >> nVersion >> nServiceInt >> nTime;
yourServices = ServiceFlags(nServiceInt);
// Ignore the addrMe service bits sent by the peer
recv.ignore(8);
recv >> addrMe;
// The version message includes information about the sending node
// which we don't use:
// - 8 bytes (service bits)
// - 16 bytes (ipv6 address)
// - 2 bytes (port)
recv.ignore(26);
recv >> nNonce;
recv >> strSubVer;
recv >> nStartingHeight;
vSend.SetVersion(std::min(nVersion, PROTOCOL_VERSION));
MessageWriter::WriteMessage(vSend, NetMsgType::VERACK);
return PeerMessagingState::AwaitingMessages;
}
if (strCommand == NetMsgType::VERACK) {
vRecv.SetVersion(std::min(nVersion, PROTOCOL_VERSION));
// tfm::format(std::cout, "\n%s: version %i\n", ToString(you),
// nVersion);
if (vAddr) {
MessageWriter::WriteMessage(vSend, NetMsgType::GETADDR);
std::vector<BlockHash> locatorHash(
1, Params().Checkpoints().mapCheckpoints.rbegin()->second);
MessageWriter::WriteMessage(vSend, NetMsgType::GETHEADERS,
CBlockLocator(locatorHash), uint256());
doneAfter = Now<NodeSeconds>() + GetTimeout();
} else {
doneAfter = Now<NodeSeconds>() + 1s;
}
return PeerMessagingState::AwaitingMessages;
}
if (strCommand == NetMsgType::ADDR && vAddr) {
std::vector<CAddress> vAddrNew;
recv >> vAddrNew;
// tfm::format(std::cout, "%s: got %i addresses\n",
// ToString(you),
// (int)vAddrNew.size());
auto now = Now<NodeSeconds>();
std::vector<CAddress>::iterator it = vAddrNew.begin();
if (vAddrNew.size() > 1) {
if (TicksSinceEpoch<std::chrono::seconds>(doneAfter) == 0 ||
doneAfter > now + 1s) {
doneAfter = now + 1s;
}
}
while (it != vAddrNew.end()) {
CAddress &addr = *it;
// tfm::format(std::cout, "%s: got address %s\n",
// ToString(you),
// addr.ToString(), (int)(vAddr->size()));
it++;
if (addr.nTime <= NodeSeconds{100000000s} ||
addr.nTime > now + 10min) {
addr.nTime = now - 5 * 24h;
}
if (addr.nTime > now - 7 * 24h) {
vAddr->push_back(addr);
}
// tfm::format(std::cout, "%s: added address %s (#%i)\n",
// ToString(you),
// addr.ToString(), (int)(vAddr->size()));
if (vAddr->size() > ADDR_SOFT_CAP) {
doneAfter = NodeSeconds{1s};
return PeerMessagingState::Finished;
}
}
return PeerMessagingState::AwaitingMessages;
}
return PeerMessagingState::AwaitingMessages;
}
bool CSeederNode::ProcessMessages() {
if (vRecv.empty()) {
return false;
}
const CMessageHeader::MessageMagic netMagic = Params().NetMagic();
do {
CDataStream::iterator pstart = std::search(
vRecv.begin(), vRecv.end(), BEGIN(netMagic), END(netMagic));
uint32_t nHeaderSize =
GetSerializeSize(CMessageHeader(netMagic), vRecv.GetVersion());
if (vRecv.end() - pstart < nHeaderSize) {
if (vRecv.size() > nHeaderSize) {
vRecv.erase(vRecv.begin(), vRecv.end() - nHeaderSize);
}
break;
}
vRecv.erase(vRecv.begin(), pstart);
- std::vector<uint8_t> vHeaderSave(vRecv.begin(),
- vRecv.begin() + nHeaderSize);
+ std::vector<std::byte> vHeaderSave(vRecv.begin(),
+ vRecv.begin() + nHeaderSize);
CMessageHeader hdr(netMagic);
vRecv >> hdr;
if (!hdr.IsValidWithoutConfig(netMagic)) {
// tfm::format(std::cout, "%s: BAD (invalid header)\n",
// ToString(you));
ban = 100000;
return true;
}
std::string strCommand = hdr.GetCommand();
unsigned int nMessageSize = hdr.nMessageSize;
if (nMessageSize > MAX_SIZE) {
// tfm::format(std::cout, "%s: BAD (message too large)\n",
// ToString(you));
ban = 100000;
return true;
}
if (nMessageSize > vRecv.size()) {
vRecv.insert(vRecv.begin(), vHeaderSave.begin(), vHeaderSave.end());
break;
}
if (vRecv.GetVersion() >= 209) {
uint256 hash = Hash(Span{vRecv}.first(nMessageSize));
if (memcmp(hash.begin(), hdr.pchChecksum,
CMessageHeader::CHECKSUM_SIZE) != 0) {
continue;
}
}
- std::vector<uint8_t> vec{vRecv.begin(), vRecv.begin() + nMessageSize};
+ std::vector<std::byte> vec{vRecv.begin(), vRecv.begin() + nMessageSize};
CDataStream vMsg(MakeUCharSpan(vec), vRecv.GetType(),
vRecv.GetVersion());
vRecv.ignore(nMessageSize);
if (ProcessMessage(strCommand, vMsg) == PeerMessagingState::Finished) {
return true;
}
// tfm::format(std::cout, "%s: done processing %s\n",
// ToString(you),
// strCommand);
} while (1);
return false;
}
CSeederNode::CSeederNode(const CService &ip, std::vector<CAddress> *vAddrIn)
: vSend(SER_NETWORK, 0), vRecv(SER_NETWORK, 0), nHeaderStart(-1),
nMessageStart(-1), nVersion(0), vAddr(vAddrIn), ban(0),
doneAfter(NodeSeconds{0s}), you(ip),
yourServices(ServiceFlags(NODE_NETWORK)) {
if (GetTime() > 1329696000) {
vSend.SetVersion(209);
vRecv.SetVersion(209);
}
}
bool CSeederNode::Run() {
// FIXME: This logic is duplicated with CConnman::ConnectNode for no
// good reason.
bool connected = false;
proxyType proxy;
if (you.IsValid()) {
bool proxyConnectionFailed = false;
if (GetProxy(you.GetNetwork(), proxy)) {
sock = CreateSock(proxy.proxy);
if (!sock) {
return false;
}
connected = ConnectThroughProxy(
proxy, you.ToStringIP(), you.GetPort(), *sock, nConnectTimeout,
proxyConnectionFailed);
} else {
// no proxy needed (none set for target network)
sock = CreateSock(you);
if (!sock) {
return false;
}
// no proxy needed (none set for target network)
connected =
ConnectSocketDirectly(you, *sock, nConnectTimeout, false);
}
}
if (!connected) {
// tfm::format(std::cout, "Cannot connect to %s\n",
// ToString(you));
sock.reset();
return false;
}
// Write version message
// Don't include the time in CAddress serialization. See D14753.
uint64_t nLocalServices = 0;
uint64_t nLocalNonce = BITCOIN_SEED_NONCE;
uint64_t your_services{yourServices};
uint64_t my_services{ServiceFlags(NODE_NETWORK)};
uint8_t fRelayTxs = 0;
const std::string clientName = gArgs.GetArg("-uaclientname", CLIENT_NAME);
const std::string clientVersion =
gArgs.GetArg("-uaclientversion", FormatVersion(CLIENT_VERSION));
const std::string userAgent =
FormatUserAgent(clientName, clientVersion, {"seeder"});
MessageWriter::WriteMessage(vSend, NetMsgType::VERSION, PROTOCOL_VERSION,
nLocalServices, GetTime(), your_services, you,
my_services, CService(), nLocalNonce, userAgent,
GetRequireHeight(), fRelayTxs);
Send();
bool res = true;
NodeSeconds now;
while (now = Now<NodeSeconds>(),
ban == 0 &&
(TicksSinceEpoch<std::chrono::seconds>(doneAfter) == 0 ||
doneAfter > now) &&
sock) {
char pchBuf[0x10000];
fd_set fdsetRecv;
fd_set fdsetError;
FD_ZERO(&fdsetRecv);
FD_ZERO(&fdsetError);
FD_SET(sock->Get(), &fdsetRecv);
FD_SET(sock->Get(), &fdsetError);
struct timeval wa;
if (TicksSinceEpoch<std::chrono::seconds>(doneAfter) != 0) {
wa.tv_sec = (doneAfter - now).count();
wa.tv_usec = 0;
} else {
wa.tv_sec = GetTimeout().count();
wa.tv_usec = 0;
}
int ret =
select(sock->Get() + 1, &fdsetRecv, nullptr, &fdsetError, &wa);
if (ret != 1) {
if (TicksSinceEpoch<std::chrono::seconds>(doneAfter) == 0) {
res = false;
}
break;
}
int nBytes = sock->Recv(pchBuf, sizeof(pchBuf), 0);
int nPos = vRecv.size();
if (nBytes > 0) {
vRecv.resize(nPos + nBytes);
memcpy(&vRecv[nPos], pchBuf, nBytes);
} else if (nBytes == 0) {
// tfm::format(std::cout, "%s: BAD (connection closed
// prematurely)\n",
// ToString(you));
res = false;
break;
} else {
// tfm::format(std::cout, "%s: BAD (connection error)\n",
// ToString(you));
res = false;
break;
}
ProcessMessages();
Send();
}
if (!sock) {
res = false;
}
sock.reset();
return (ban == 0) && res;
}
diff --git a/src/seeder/messagewriter.h b/src/seeder/messagewriter.h
index fbe423188..1d926c5f2 100644
--- a/src/seeder/messagewriter.h
+++ b/src/seeder/messagewriter.h
@@ -1,37 +1,34 @@
// Copyright (c) 2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_SEEDER_MESSAGEWRITER_H
#define BITCOIN_SEEDER_MESSAGEWRITER_H
#include <config.h>
#include <net.h>
#include <netmessagemaker.h>
namespace MessageWriter {
template <typename... Args>
static void WriteMessage(CDataStream &stream, std::string command,
Args &&...args) {
CSerializedNetMsg payload = CNetMsgMaker(stream.GetVersion())
.Make(command, std::forward<Args>(args)...);
- size_t nMessageSize = payload.data.size();
// Serialize header
std::vector<uint8_t> serializedHeader;
V1TransportSerializer serializer = V1TransportSerializer();
serializer.prepareForTransport(GetConfig(), payload, serializedHeader);
// Write message header + payload to outgoing stream
- stream.write(reinterpret_cast<const char *>(serializedHeader.data()),
- serializedHeader.size());
- if (nMessageSize) {
- stream.write(reinterpret_cast<const char *>(payload.data.data()),
- nMessageSize);
+ stream.write(MakeByteSpan(serializedHeader));
+ if (payload.data.size()) {
+ stream.write(MakeByteSpan(payload.data));
}
}
} // namespace MessageWriter
#endif // BITCOIN_SEEDER_MESSAGEWRITER_H
diff --git a/src/seeder/test/message_writer_tests.cpp b/src/seeder/test/message_writer_tests.cpp
index a6f2c3228..961f58073 100644
--- a/src/seeder/test/message_writer_tests.cpp
+++ b/src/seeder/test/message_writer_tests.cpp
@@ -1,92 +1,92 @@
// Copyright (c) 2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <hash.h>
#include <primitives/block.h>
#include <protocol.h>
#include <seeder/messagewriter.h>
#include <streams.h>
#include <version.h>
#include <boost/test/unit_test.hpp>
#include <string>
#include <vector>
BOOST_AUTO_TEST_SUITE(message_writer_tests)
template <typename... Args>
static void CheckMessage(CDataStream &expectedMessage, std::string command,
Args &&...args) {
CDataStream message(SER_NETWORK, PROTOCOL_VERSION);
MessageWriter::WriteMessage(message, command, std::forward<Args>(args)...);
BOOST_CHECK_EQUAL(message.size(), expectedMessage.size());
for (size_t i = 0; i < message.size(); i++) {
- BOOST_CHECK_EQUAL(message[i], expectedMessage[i]);
+ BOOST_CHECK_EQUAL(uint8_t(message[i]), uint8_t(expectedMessage[i]));
}
}
BOOST_AUTO_TEST_CASE(simple_header_and_payload_message_writer_test) {
SelectParams(CBaseChainParams::MAIN);
int64_t now = GetTime();
uint64_t nonce = 0;
uint64_t serviceFlags = uint64_t(ServiceFlags(NODE_NETWORK));
CService service;
CAddress addrTo(service, ServiceFlags(NODE_NETWORK));
CAddress addrFrom(service, ServiceFlags(NODE_NETWORK));
std::string user_agent = "/Bitcoin ABC:0.0.0(seeder)/";
int start_height = 1;
CDataStream versionPayload(SER_NETWORK, PROTOCOL_VERSION);
versionPayload << PROTOCOL_VERSION << serviceFlags << now << addrTo
<< addrFrom << nonce << user_agent << start_height;
CMessageHeader versionhdr(Params().NetMagic(), NetMsgType::VERSION,
versionPayload.size());
uint256 hash = Hash(versionPayload);
memcpy(versionhdr.pchChecksum, hash.begin(), CMessageHeader::CHECKSUM_SIZE);
CDataStream expectedVersion(SER_NETWORK, PROTOCOL_VERSION);
expectedVersion << versionhdr << versionPayload;
CheckMessage(expectedVersion, NetMsgType::VERSION, PROTOCOL_VERSION,
serviceFlags, now, addrTo, addrFrom, nonce, user_agent,
start_height);
}
BOOST_AUTO_TEST_CASE(header_empty_payload_message_writer_test) {
SelectParams(CBaseChainParams::MAIN);
CMessageHeader verackHeader(Params().NetMagic(), NetMsgType::VERACK, 0);
CDataStream expectedVerack(SER_NETWORK, PROTOCOL_VERSION);
// This is an empty payload, but is still necessary for the checksum
std::vector<uint8_t> payload;
uint256 hash = Hash(payload);
memcpy(verackHeader.pchChecksum, hash.begin(),
CMessageHeader::CHECKSUM_SIZE);
expectedVerack << verackHeader;
CheckMessage(expectedVerack, NetMsgType::VERACK);
}
BOOST_AUTO_TEST_CASE(write_getheaders_message_test) {
SelectParams(CBaseChainParams::MAIN);
CDataStream payload(SER_NETWORK, PROTOCOL_VERSION);
BlockHash bhash(uint256S(
"0000000099f5509b5f36b1926bcf82b21d936ebeadee811030dfbbb7fae915d7"));
std::vector<BlockHash> vlocator(1, bhash);
CBlockLocator locatorhash(vlocator);
payload << locatorhash << uint256();
uint256 hash = Hash(payload);
CMessageHeader msgHeader(Params().NetMagic(), NetMsgType::GETHEADERS,
payload.size());
memcpy(msgHeader.pchChecksum, hash.begin(), CMessageHeader::CHECKSUM_SIZE);
CDataStream expectedMsg(SER_NETWORK, PROTOCOL_VERSION);
expectedMsg << msgHeader << payload;
CheckMessage(expectedMsg, NetMsgType::GETHEADERS, locatorhash, uint256());
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/seeder/util.h b/src/seeder/util.h
index 41cb661b5..47dc5e751 100644
--- a/src/seeder/util.h
+++ b/src/seeder/util.h
@@ -1,11 +1,13 @@
// Copyright (c) 2017-2019 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_SEEDER_UTIL_H
#define BITCOIN_SEEDER_UTIL_H
-#define BEGIN(a) ((uint8_t *)&(a))
-#define END(a) ((uint8_t *)&((&(a))[1]))
+#include <span.h>
+
+#define BEGIN(a) BytePtr(&(a))
+#define END(a) BytePtr(&((&(a))[1]))
#endif // BITCOIN_SEEDER_UTIL_H
diff --git a/src/serialize.h b/src/serialize.h
index 315e994dc..907ce8fc6 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -1,1287 +1,1273 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_SERIALIZE_H
#define BITCOIN_SERIALIZE_H
#include <compat/endian.h>
#include <prevector.h>
#include <rcu.h>
#include <span.h>
#include <algorithm>
#include <array>
#include <cstdint>
#include <cstring>
#include <ios>
#include <limits>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
/**
* The maximum size of a serialized object in bytes or number of elements
* (for eg vectors) when the size is encoded as CompactSize.
*/
static constexpr uint64_t MAX_SIZE = 0x02000000;
/**
* Maximum amount of memory (in bytes) to allocate at once when deserializing
* vectors.
*/
static const unsigned int MAX_VECTOR_ALLOCATE = 5000000;
/**
* Dummy data type to identify deserializing constructors.
*
* By convention, a constructor of a type T with signature
*
* template <typename Stream> T::T(deserialize_type, Stream& s)
*
* is a deserializing constructor, which builds the type by deserializing it
* from s. If T contains const fields, this is likely the only way to do so.
*/
struct deserialize_type {};
constexpr deserialize_type deserialize{};
-//! Safely convert odd char pointer types to standard ones.
-inline char *CharCast(char *c) {
- return c;
-}
-inline char *CharCast(uint8_t *c) {
- return (char *)c;
-}
-inline const char *CharCast(const char *c) {
- return c;
-}
-inline const char *CharCast(const uint8_t *c) {
- return (const char *)c;
-}
-
/**
* Lowest-level serialization and conversion.
- * @note Sizes of these types are verified in the tests
*/
template <typename Stream> inline void ser_writedata8(Stream &s, uint8_t obj) {
- s.write((char *)&obj, 1);
+ s.write(AsBytes(Span{&obj, 1}));
}
template <typename Stream>
inline void ser_writedata16(Stream &s, uint16_t obj) {
obj = htole16(obj);
- s.write((char *)&obj, 2);
+ s.write(AsBytes(Span{&obj, 1}));
}
template <typename Stream>
inline void ser_writedata16be(Stream &s, uint16_t obj) {
obj = htobe16(obj);
- s.write((char *)&obj, 2);
+ s.write(AsBytes(Span{&obj, 1}));
}
template <typename Stream>
inline void ser_writedata32(Stream &s, uint32_t obj) {
obj = htole32(obj);
- s.write((char *)&obj, 4);
+ s.write(AsBytes(Span{&obj, 1}));
}
template <typename Stream>
inline void ser_writedata32be(Stream &s, uint32_t obj) {
obj = htobe32(obj);
- s.write((char *)&obj, 4);
+ s.write(AsBytes(Span{&obj, 1}));
}
template <typename Stream>
inline void ser_writedata64(Stream &s, uint64_t obj) {
obj = htole64(obj);
- s.write((char *)&obj, 8);
+ s.write(AsBytes(Span{&obj, 1}));
}
template <typename Stream> inline uint8_t ser_readdata8(Stream &s) {
uint8_t obj;
- s.read((char *)&obj, 1);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return obj;
}
template <typename Stream> inline uint16_t ser_readdata16(Stream &s) {
uint16_t obj;
- s.read((char *)&obj, 2);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return le16toh(obj);
}
template <typename Stream> inline uint16_t ser_readdata16be(Stream &s) {
uint16_t obj;
- s.read((char *)&obj, 2);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return be16toh(obj);
}
template <typename Stream> inline uint32_t ser_readdata32(Stream &s) {
uint32_t obj;
- s.read((char *)&obj, 4);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return le32toh(obj);
}
template <typename Stream> inline uint32_t ser_readdata32be(Stream &s) {
uint32_t obj;
- s.read((char *)&obj, 4);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return be32toh(obj);
}
template <typename Stream> inline uint64_t ser_readdata64(Stream &s) {
uint64_t obj;
- s.read((char *)&obj, 8);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return le64toh(obj);
}
inline uint64_t ser_double_to_uint64(double x) {
uint64_t tmp;
std::memcpy(&tmp, &x, sizeof(x));
static_assert(sizeof(tmp) == sizeof(x),
"double and uint64_t assumed to have the same size");
return tmp;
}
inline uint32_t ser_float_to_uint32(float x) {
uint32_t tmp;
std::memcpy(&tmp, &x, sizeof(x));
static_assert(sizeof(tmp) == sizeof(x),
"float and uint32_t assumed to have the same size");
return tmp;
}
inline double ser_uint64_to_double(uint64_t y) {
double tmp;
std::memcpy(&tmp, &y, sizeof(y));
static_assert(sizeof(tmp) == sizeof(y),
"double and uint64_t assumed to have the same size");
return tmp;
}
inline float ser_uint32_to_float(uint32_t y) {
float tmp;
std::memcpy(&tmp, &y, sizeof(y));
static_assert(sizeof(tmp) == sizeof(y),
"float and uint32_t assumed to have the same size");
return tmp;
}
/////////////////////////////////////////////////////////////////
//
// Templates for serializing to anything that looks like a stream,
-// i.e. anything that supports .read(char*, size_t) and .write(char*, size_t)
+// i.e. anything that supports .read(Span<std::byte>) and .write(Span<const
+// std::byte>)
//
class CSizeComputer;
enum {
// primary actions
SER_NETWORK = (1 << 0),
SER_DISK = (1 << 1),
SER_GETHASH = (1 << 2),
};
//! Convert the reference base type to X, without changing constness or
//! reference type.
template <typename X> X &ReadWriteAsHelper(X &x) {
return x;
}
template <typename X> const X &ReadWriteAsHelper(const X &x) {
return x;
}
#define READWRITE(...) (::SerReadWriteMany(s, ser_action, __VA_ARGS__))
#define READWRITEAS(type, obj) \
(::SerReadWriteMany(s, ser_action, ReadWriteAsHelper<type>(obj)))
#define SER_READ(obj, code) \
::SerRead( \
s, ser_action, obj, \
[&](Stream &s, typename std::remove_const<Type>::type &obj) { code; })
#define SER_WRITE(obj, code) \
::SerWrite(s, ser_action, obj, [&](Stream &s, const Type &obj) { code; })
/**
* Implement the Ser and Unser methods needed for implementing a formatter
* (see Using below).
*
* Both Ser and Unser are delegated to a single static method SerializationOps,
* which is polymorphic in the serialized/deserialized type (allowing it to be
* const when serializing, and non-const when deserializing).
*
* Example use:
* struct FooFormatter {
* FORMATTER_METHODS(Class, obj) { READWRITE(obj.val1, VARINT(obj.val2)); }
* }
* would define a class FooFormatter that defines a serialization of Class
* objects consisting of serializing its val1 member using the default
* serialization, and its val2 member using VARINT serialization. That
* FooFormatter can then be used in statements like
* READWRITE(Using<FooFormatter>(obj.bla)).
*/
#define FORMATTER_METHODS(cls, obj) \
template <typename Stream> static void Ser(Stream &s, const cls &obj) { \
SerializationOps(obj, s, CSerActionSerialize()); \
} \
template <typename Stream> static void Unser(Stream &s, cls &obj) { \
SerializationOps(obj, s, CSerActionUnserialize()); \
} \
template <typename Stream, typename Type, typename Operation> \
static inline void SerializationOps(Type &obj, Stream &s, \
Operation ser_action)
/**
* Implement the Serialize and Unserialize methods by delegating to a
* single templated static method that takes the to-be-(de)serialized
* object as a parameter. This approach has the advantage that the
* constness of the object becomes a template parameter, and thus
* allows a single implementation that sees the object as const for
* serializing and non-const for deserializing, without casts.
*/
#define SERIALIZE_METHODS(cls, obj) \
template <typename Stream> void Serialize(Stream &s) const { \
static_assert(std::is_same<const cls &, decltype(*this)>::value, \
"Serialize type mismatch"); \
Ser(s, *this); \
} \
template <typename Stream> void Unserialize(Stream &s) { \
static_assert(std::is_same<cls &, decltype(*this)>::value, \
"Unserialize type mismatch"); \
Unser(s, *this); \
} \
FORMATTER_METHODS(cls, obj)
#ifndef CHAR_EQUALS_INT8
// TODO Get rid of bare char
template <typename Stream> inline void Serialize(Stream &s, char a) {
ser_writedata8(s, a);
}
#endif
template <typename Stream> inline void Serialize(Stream &s, int8_t a) {
ser_writedata8(s, a);
}
template <typename Stream> inline void Serialize(Stream &s, uint8_t a) {
ser_writedata8(s, a);
}
template <typename Stream> inline void Serialize(Stream &s, int16_t a) {
ser_writedata16(s, a);
}
template <typename Stream> inline void Serialize(Stream &s, uint16_t a) {
ser_writedata16(s, a);
}
template <typename Stream> inline void Serialize(Stream &s, int32_t a) {
ser_writedata32(s, a);
}
template <typename Stream> inline void Serialize(Stream &s, uint32_t a) {
ser_writedata32(s, a);
}
template <typename Stream> inline void Serialize(Stream &s, int64_t a) {
ser_writedata64(s, a);
}
template <typename Stream> inline void Serialize(Stream &s, uint64_t a) {
ser_writedata64(s, a);
}
template <typename Stream> inline void Serialize(Stream &s, float a) {
ser_writedata32(s, ser_float_to_uint32(a));
}
template <typename Stream> inline void Serialize(Stream &s, double a) {
ser_writedata64(s, ser_double_to_uint64(a));
}
template <typename Stream, size_t N>
inline void Serialize(Stream &s, const int8_t (&a)[N]) {
s.write(a, N);
}
template <typename Stream, size_t N>
inline void Serialize(Stream &s, const uint8_t (&a)[N]) {
- s.write(CharCast(a), N);
+ s.write(MakeByteSpan(a));
}
template <typename Stream, size_t N>
inline void Serialize(Stream &s, const std::array<int8_t, N> &a) {
s.write(a.data(), N);
}
template <typename Stream, size_t N>
inline void Serialize(Stream &s, const std::array<uint8_t, N> &a) {
- s.write(CharCast(a.data()), N);
+ s.write(MakeByteSpan(a));
}
#ifndef CHAR_EQUALS_INT8
// TODO Get rid of bare char
template <typename Stream> inline void Unserialize(Stream &s, char &a) {
a = ser_readdata8(s);
}
template <typename Stream, size_t N>
inline void Serialize(Stream &s, const char (&a)[N]) {
- s.write(a, N);
+ s.write(MakeByteSpan(a));
}
template <typename Stream, size_t N>
inline void Serialize(Stream &s, const std::array<char, N> &a) {
- s.write(a.data(), N);
+ s.write(MakeByteSpan(a));
}
#endif
template <typename Stream>
inline void Serialize(Stream &s, const Span<const uint8_t> &span) {
- s.write(CharCast(span.data()), span.size());
+ s.write(AsBytes(span));
}
template <typename Stream>
inline void Serialize(Stream &s, const Span<uint8_t> &span) {
- s.write(CharCast(span.data()), span.size());
+ s.write(AsBytes(span));
}
template <typename Stream> inline void Unserialize(Stream &s, int8_t &a) {
a = ser_readdata8(s);
}
template <typename Stream> inline void Unserialize(Stream &s, uint8_t &a) {
a = ser_readdata8(s);
}
template <typename Stream> inline void Unserialize(Stream &s, int16_t &a) {
a = ser_readdata16(s);
}
template <typename Stream> inline void Unserialize(Stream &s, uint16_t &a) {
a = ser_readdata16(s);
}
template <typename Stream> inline void Unserialize(Stream &s, int32_t &a) {
a = ser_readdata32(s);
}
template <typename Stream> inline void Unserialize(Stream &s, uint32_t &a) {
a = ser_readdata32(s);
}
template <typename Stream> inline void Unserialize(Stream &s, int64_t &a) {
a = ser_readdata64(s);
}
template <typename Stream> inline void Unserialize(Stream &s, uint64_t &a) {
a = ser_readdata64(s);
}
template <typename Stream> inline void Unserialize(Stream &s, float &a) {
a = ser_uint32_to_float(ser_readdata32(s));
}
template <typename Stream> inline void Unserialize(Stream &s, double &a) {
a = ser_uint64_to_double(ser_readdata64(s));
}
template <typename Stream, size_t N>
inline void Unserialize(Stream &s, int8_t (&a)[N]) {
- s.read(a, N);
+ s.read(MakeWritableByteSpan(a));
}
template <typename Stream, size_t N>
inline void Unserialize(Stream &s, uint8_t (&a)[N]) {
- s.read(CharCast(a), N);
+ s.read(MakeWritableByteSpan(a));
}
template <typename Stream, size_t N>
inline void Unserialize(Stream &s, std::array<int8_t, N> &a) {
s.read(a.data(), N);
}
template <typename Stream, size_t N>
inline void Unserialize(Stream &s, std::array<uint8_t, N> &a) {
- s.read(CharCast(a.data()), N);
+ s.read(MakeWritableByteSpan(a));
}
#ifndef CHAR_EQUALS_INT8
template <typename Stream, size_t N>
inline void Unserialize(Stream &s, char (&a)[N]) {
- s.read(CharCast(a), N);
+ s.read(MakeWritableByteSpan(a));
}
template <typename Stream, size_t N>
inline void Unserialize(Stream &s, std::array<char, N> &a) {
- s.read(CharCast(a.data()), N);
+ s.read(MakeWritableByteSpan(a));
}
#endif
template <typename Stream> inline void Serialize(Stream &s, bool a) {
char f = a;
ser_writedata8(s, f);
}
template <typename Stream> inline void Unserialize(Stream &s, bool &a) {
char f = ser_readdata8(s);
a = f;
}
template <typename Stream>
inline void Unserialize(Stream &s, Span<uint8_t> &span) {
- s.read(CharCast(span.data()), span.size());
+ s.read(AsWritableBytes(span));
}
/**
* Compact Size
* size < 253 -- 1 byte
* size <= USHRT_MAX -- 3 bytes (253 + 2 bytes)
* size <= UINT_MAX -- 5 bytes (254 + 4 bytes)
* size > UINT_MAX -- 9 bytes (255 + 8 bytes)
*/
inline uint32_t GetSizeOfCompactSize(uint64_t nSize) {
if (nSize < 253) {
return sizeof(uint8_t);
}
if (nSize <= std::numeric_limits<uint16_t>::max()) {
return sizeof(uint8_t) + sizeof(uint16_t);
}
if (nSize <= std::numeric_limits<uint32_t>::max()) {
return sizeof(uint8_t) + sizeof(uint32_t);
}
return sizeof(uint8_t) + sizeof(uint64_t);
}
inline void WriteCompactSize(CSizeComputer &os, uint64_t nSize);
template <typename Stream> void WriteCompactSize(Stream &os, uint64_t nSize) {
if (nSize < 253) {
ser_writedata8(os, nSize);
} else if (nSize <= std::numeric_limits<uint16_t>::max()) {
ser_writedata8(os, 253);
ser_writedata16(os, nSize);
} else if (nSize <= std::numeric_limits<uint32_t>::max()) {
ser_writedata8(os, 254);
ser_writedata32(os, nSize);
} else {
ser_writedata8(os, 255);
ser_writedata64(os, nSize);
}
return;
}
/**
* Decode a CompactSize-encoded variable-length integer.
*
* As these are primarily used to encode the size of vector-like serializations,
* by default a range check is performed. When used as a generic number
* encoding, range_check should be set to false.
*/
template <typename Stream>
uint64_t ReadCompactSize(Stream &is, bool range_check = true) {
uint8_t chSize = ser_readdata8(is);
uint64_t nSizeRet = 0;
if (chSize < 253) {
nSizeRet = chSize;
} else if (chSize == 253) {
nSizeRet = ser_readdata16(is);
if (nSizeRet < 253) {
throw std::ios_base::failure("non-canonical ReadCompactSize()");
}
} else if (chSize == 254) {
nSizeRet = ser_readdata32(is);
if (nSizeRet < 0x10000u) {
throw std::ios_base::failure("non-canonical ReadCompactSize()");
}
} else {
nSizeRet = ser_readdata64(is);
if (nSizeRet < 0x100000000ULL) {
throw std::ios_base::failure("non-canonical ReadCompactSize()");
}
}
if (range_check && nSizeRet > MAX_SIZE) {
throw std::ios_base::failure("ReadCompactSize(): size too large");
}
return nSizeRet;
}
/**
* Variable-length integers: bytes are a MSB base-128 encoding of the number.
* The high bit in each byte signifies whether another digit follows. To make
* sure the encoding is one-to-one, one is subtracted from all but the last
* digit. Thus, the byte sequence a[] with length len, where all but the last
* byte has bit 128 set, encodes the number:
*
* (a[len-1] & 0x7F) + sum(i=1..len-1, 128^i*((a[len-i-1] & 0x7F)+1))
*
* Properties:
* * Very small (0-127: 1 byte, 128-16511: 2 bytes, 16512-2113663: 3 bytes)
* * Every integer has exactly one encoding
* * Encoding does not depend on size of original integer type
* * No redundancy: every (infinite) byte sequence corresponds to a list
* of encoded integers.
*
* 0: [0x00] 256: [0x81 0x00]
* 1: [0x01] 16383: [0xFE 0x7F]
* 127: [0x7F] 16384: [0xFF 0x00]
* 128: [0x80 0x00] 16511: [0xFF 0x7F]
* 255: [0x80 0x7F] 65535: [0x82 0xFE 0x7F]
* 2^32: [0x8E 0xFE 0xFE 0xFF 0x00]
*/
/**
* Mode for encoding VarInts.
*
* Currently there is no support for signed encodings. The default mode will not
* compile with signed values, and the legacy "nonnegative signed" mode will
* accept signed values, but improperly encode and decode them if they are
* negative. In the future, the DEFAULT mode could be extended to support
* negative numbers in a backwards compatible way, and additional modes could be
* added to support different varint formats (e.g. zigzag encoding).
*/
enum class VarIntMode { DEFAULT, NONNEGATIVE_SIGNED };
template <VarIntMode Mode, typename I> struct CheckVarIntMode {
constexpr CheckVarIntMode() {
static_assert(Mode != VarIntMode::DEFAULT || std::is_unsigned<I>::value,
"Unsigned type required with mode DEFAULT.");
static_assert(Mode != VarIntMode::NONNEGATIVE_SIGNED ||
std::is_signed<I>::value,
"Signed type required with mode NONNEGATIVE_SIGNED.");
}
};
template <VarIntMode Mode, typename I>
inline unsigned int GetSizeOfVarInt(I n) {
CheckVarIntMode<Mode, I>();
int nRet = 0;
while (true) {
nRet++;
if (n <= 0x7F) {
return nRet;
}
n = (n >> 7) - 1;
}
}
template <typename I> inline void WriteVarInt(CSizeComputer &os, I n);
template <typename Stream, VarIntMode Mode, typename I>
void WriteVarInt(Stream &os, I n) {
CheckVarIntMode<Mode, I>();
uint8_t tmp[(sizeof(n) * 8 + 6) / 7];
int len = 0;
while (true) {
tmp[len] = (n & 0x7F) | (len ? 0x80 : 0x00);
if (n <= 0x7F) {
break;
}
n = (n >> 7) - 1;
len++;
}
do {
ser_writedata8(os, tmp[len]);
} while (len--);
}
template <typename Stream, VarIntMode Mode, typename I>
I ReadVarInt(Stream &is) {
CheckVarIntMode<Mode, I>();
I n = 0;
while (true) {
uint8_t chData = ser_readdata8(is);
if (n > (std::numeric_limits<I>::max() >> 7)) {
throw std::ios_base::failure("ReadVarInt(): size too large");
}
n = (n << 7) | (chData & 0x7F);
if ((chData & 0x80) == 0) {
return n;
}
if (n == std::numeric_limits<I>::max()) {
throw std::ios_base::failure("ReadVarInt(): size too large");
}
n++;
}
}
/**
* Simple wrapper class to serialize objects using a formatter; used by
* Using().
*/
template <typename Formatter, typename T> class Wrapper {
static_assert(std::is_lvalue_reference<T>::value,
"Wrapper needs an lvalue reference type T");
protected:
T m_object;
public:
explicit Wrapper(T obj) : m_object(obj) {}
template <typename Stream> void Serialize(Stream &s) const {
Formatter().Ser(s, m_object);
}
template <typename Stream> void Unserialize(Stream &s) {
Formatter().Unser(s, m_object);
}
};
/**
* Cause serialization/deserialization of an object to be done using a
* specified formatter class.
*
* To use this, you need a class Formatter that has public functions Ser(stream,
* const object&) for serialization, and Unser(stream, object&) for
* deserialization. Serialization routines (inside READWRITE, or directly with
* << and >> operators), can then use Using<Formatter>(object).
*
* This works by constructing a Wrapper<Formatter, T>-wrapped version of object,
* where T is const during serialization, and non-const during deserialization,
* which maintains const correctness.
*/
template <typename Formatter, typename T>
static inline Wrapper<Formatter, T &> Using(T &&t) {
return Wrapper<Formatter, T &>(t);
}
#define VARINT_MODE(obj, mode) Using<VarIntFormatter<mode>>(obj)
#define VARINT(obj) Using<VarIntFormatter<VarIntMode::DEFAULT>>(obj)
#define COMPACTSIZE(obj) Using<CompactSizeFormatter<true>>(obj)
#define LIMITED_STRING(obj, n) Using<LimitedStringFormatter<n>>(obj)
/**
* Serialization wrapper class for integers in VarInt format.
*/
template <VarIntMode Mode> struct VarIntFormatter {
template <typename Stream, typename I> void Ser(Stream &s, I v) {
WriteVarInt<Stream, Mode, typename std::remove_cv<I>::type>(s, v);
}
template <typename Stream, typename I> void Unser(Stream &s, I &v) {
v = ReadVarInt<Stream, Mode, typename std::remove_cv<I>::type>(s);
}
};
/**
* Serialization wrapper class for custom integers and enums.
*
* It permits specifying the serialized size (1 to 8 bytes) and endianness.
*
* Use the big endian mode for values that are stored in memory in native
* byte order, but serialized in big endian notation. This is only intended
* to implement serializers that are compatible with existing formats, and
* its use is not recommended for new data structures.
*/
template <int Bytes, bool BigEndian = false> struct CustomUintFormatter {
static_assert(Bytes > 0 && Bytes <= 8,
"CustomUintFormatter Bytes out of range");
static constexpr uint64_t MAX = 0xffffffffffffffff >> (8 * (8 - Bytes));
template <typename Stream, typename I> void Ser(Stream &s, I v) {
if (v < 0 || v > MAX) {
throw std::ios_base::failure(
"CustomUintFormatter value out of range");
}
if (BigEndian) {
uint64_t raw = htobe64(v);
- s.write(((const char *)&raw) + 8 - Bytes, Bytes);
+ s.write({BytePtr(&raw) + 8 - Bytes, Bytes});
} else {
uint64_t raw = htole64(v);
- s.write((const char *)&raw, Bytes);
+ s.write({BytePtr(&raw), Bytes});
}
}
template <typename Stream, typename I> void Unser(Stream &s, I &v) {
using U = typename std::conditional<std::is_enum<I>::value,
std::underlying_type<I>,
std::common_type<I>>::type::type;
static_assert(std::numeric_limits<U>::max() >= MAX &&
std::numeric_limits<U>::min() <= 0,
"Assigned type too small");
uint64_t raw = 0;
if (BigEndian) {
- s.read(((char *)&raw) + 8 - Bytes, Bytes);
+ s.read({BytePtr(&raw) + 8 - Bytes, Bytes});
v = static_cast<I>(be64toh(raw));
} else {
- s.read((char *)&raw, Bytes);
+ s.read({BytePtr(&raw), Bytes});
v = static_cast<I>(le64toh(raw));
}
}
};
template <int Bytes>
using BigEndianFormatter = CustomUintFormatter<Bytes, true>;
/** Formatter for integers in CompactSize format. */
template <bool RangeCheck> struct CompactSizeFormatter {
template <typename Stream, typename I> void Unser(Stream &s, I &v) {
uint64_t n = ReadCompactSize<Stream>(s, RangeCheck);
if (n < std::numeric_limits<I>::min() ||
n > std::numeric_limits<I>::max()) {
throw std::ios_base::failure("CompactSize exceeds limit of type");
}
v = n;
}
template <typename Stream, typename I> void Ser(Stream &s, I v) {
static_assert(std::is_unsigned<I>::value,
"CompactSize only supported for unsigned integers");
static_assert(std::numeric_limits<I>::max() <=
std::numeric_limits<uint64_t>::max(),
"CompactSize only supports 64-bit integers and below");
WriteCompactSize<Stream>(s, v);
}
};
template <typename U, bool LOSSY = false> struct ChronoFormatter {
template <typename Stream, typename Tp> void Unser(Stream &s, Tp &tp) {
U u;
s >> u;
// Lossy deserialization does not make sense, so force Wnarrowing
tp = Tp{typename Tp::duration{typename Tp::duration::rep{u}}};
}
template <typename Stream, typename Tp> void Ser(Stream &s, Tp tp) {
if constexpr (LOSSY) {
s << U(tp.time_since_epoch().count());
} else {
s << U{tp.time_since_epoch().count()};
}
}
};
template <typename U> using LossyChronoFormatter = ChronoFormatter<U, true>;
template <size_t Limit> struct LimitedStringFormatter {
template <typename Stream> void Unser(Stream &s, std::string &v) {
size_t size = ReadCompactSize(s);
if (size > Limit) {
throw std::ios_base::failure("String length limit exceeded");
}
v.resize(size);
if (size != 0) {
- s.read((char *)v.data(), size);
+ s.read(MakeWritableByteSpan(v));
}
}
template <typename Stream> void Ser(Stream &s, const std::string &v) {
s << v;
}
};
/**
* Formatter to serialize/deserialize vector elements using another formatter
*
* Example:
* struct X {
* std::vector<uint64_t> v;
* SERIALIZE_METHODS(X, obj) {
* READWRITE(Using<VectorFormatter<VarInt>>(obj.v));
* }
* };
* will define a struct that contains a vector of uint64_t, which is serialized
* as a vector of VarInt-encoded integers.
*
* V is not required to be an std::vector type. It works for any class that
* exposes a value_type, size, reserve, emplace_back, back, and const iterators.
*/
template <class Formatter> struct VectorFormatter {
template <typename Stream, typename V> void Ser(Stream &s, const V &v) {
Formatter formatter;
WriteCompactSize(s, v.size());
for (const typename V::value_type &elem : v) {
formatter.Ser(s, elem);
}
}
template <typename Stream, typename V> void Unser(Stream &s, V &v) {
Formatter formatter;
v.clear();
size_t size = ReadCompactSize(s);
size_t allocated = 0;
while (allocated < size) {
// For DoS prevention, do not blindly allocate as much as the stream
// claims to contain. Instead, allocate in 5MiB batches, so that an
// attacker actually needs to provide X MiB of data to make us
// allocate X+5 Mib.
static_assert(sizeof(typename V::value_type) <= MAX_VECTOR_ALLOCATE,
"Vector element size too large");
allocated =
std::min(size, allocated + MAX_VECTOR_ALLOCATE /
sizeof(typename V::value_type));
v.reserve(allocated);
while (v.size() < allocated) {
v.emplace_back();
formatter.Unser(s, v.back());
}
}
};
};
/**
* Helper for differentially encoded Compact Size integers in lists.
*
* Instead of using raw indexes, the number encoded is the difference between
* the current index and the previous index, minus one. For example, a first
* index of 0 implies a real index of 0, a second index of 0 thereafter refers
* to a real index of 1, etc.
*
* To be used with a VectorFormatter.
*/
class DifferenceFormatter {
uint64_t m_shift = 0;
public:
template <typename Stream, typename I> void Ser(Stream &s, I v) {
if (v < m_shift || v >= std::numeric_limits<uint64_t>::max()) {
throw std::ios_base::failure("differential value overflow");
}
WriteCompactSize(s, v - m_shift);
m_shift = uint64_t(v) + 1;
}
template <typename Stream, typename I> void Unser(Stream &s, I &v) {
uint64_t n = ReadCompactSize(s);
m_shift += n;
if (m_shift < n || m_shift >= std::numeric_limits<uint64_t>::max() ||
m_shift < std::numeric_limits<I>::min() ||
m_shift > std::numeric_limits<I>::max()) {
throw std::ios_base::failure("differential value overflow");
}
v = I(m_shift++);
}
};
/**
* Helper for a list of items containing a differentially encoded index as their
* first member. See DifferenceFormatter for info about the index encoding.
*
* The index should be a public member of the object.
* SerData()/UnserData() methods must be implemented to serialize/deserialize
* the remaining item data.
*
* To be used with a VectorFormatter.
*/
struct DifferentialIndexedItemFormatter : public DifferenceFormatter {
template <typename Stream, typename T> void Ser(Stream &s, T v) {
DifferenceFormatter::Ser(s, v.index);
v.SerData(s);
}
template <typename Stream, typename T> void Unser(Stream &s, T &v) {
DifferenceFormatter::Unser(s, v.index);
v.UnserData(s);
}
};
/**
* Forward declarations
*/
/**
* string
*/
template <typename Stream, typename C>
void Serialize(Stream &os, const std::basic_string<C> &str);
template <typename Stream, typename C>
void Unserialize(Stream &is, std::basic_string<C> &str);
/**
* prevector
* prevectors of uint8_t are a special case and are intended to be serialized as
* a single opaque blob.
*/
template <typename Stream, unsigned int N, typename T>
void Serialize_impl(Stream &os, const prevector<N, T> &v, const uint8_t &);
template <typename Stream, unsigned int N, typename T, typename V>
void Serialize_impl(Stream &os, const prevector<N, T> &v, const V &);
template <typename Stream, unsigned int N, typename T>
inline void Serialize(Stream &os, const prevector<N, T> &v);
template <typename Stream, unsigned int N, typename T>
void Unserialize_impl(Stream &is, prevector<N, T> &v, const uint8_t &);
template <typename Stream, unsigned int N, typename T, typename V>
void Unserialize_impl(Stream &is, prevector<N, T> &v, const V &);
template <typename Stream, unsigned int N, typename T>
inline void Unserialize(Stream &is, prevector<N, T> &v);
/**
* vector
* vectors of uint8_t are a special case and are intended to be serialized as a
* single opaque blob.
*/
template <typename Stream, typename T, typename A>
void Serialize_impl(Stream &os, const std::vector<T, A> &v, const uint8_t &);
template <typename Stream, typename T, typename A>
void Serialize_impl(Stream &os, const std::vector<T, A> &v, const bool &);
template <typename Stream, typename T, typename A, typename V>
void Serialize_impl(Stream &os, const std::vector<T, A> &v, const V &);
template <typename Stream, typename T, typename A>
inline void Serialize(Stream &os, const std::vector<T, A> &v);
template <typename Stream, typename T, typename A>
void Unserialize_impl(Stream &is, std::vector<T, A> &v, const uint8_t &);
template <typename Stream, typename T, typename A, typename V>
void Unserialize_impl(Stream &is, std::vector<T, A> &v, const V &);
template <typename Stream, typename T, typename A>
inline void Unserialize(Stream &is, std::vector<T, A> &v);
/**
* pair
*/
template <typename Stream, typename K, typename T>
void Serialize(Stream &os, const std::pair<K, T> &item);
template <typename Stream, typename K, typename T>
void Unserialize(Stream &is, std::pair<K, T> &item);
/**
* map
*/
template <typename Stream, typename K, typename T, typename Pred, typename A>
void Serialize(Stream &os, const std::map<K, T, Pred, A> &m);
template <typename Stream, typename K, typename T, typename Pred, typename A>
void Unserialize(Stream &is, std::map<K, T, Pred, A> &m);
/**
* set
*/
template <typename Stream, typename K, typename Pred, typename A>
void Serialize(Stream &os, const std::set<K, Pred, A> &m);
template <typename Stream, typename K, typename Pred, typename A>
void Unserialize(Stream &is, std::set<K, Pred, A> &m);
/**
* shared_ptr
*/
template <typename Stream, typename T>
void Serialize(Stream &os, const std::shared_ptr<const T> &p);
template <typename Stream, typename T>
void Unserialize(Stream &os, std::shared_ptr<const T> &p);
/**
* unique_ptr
*/
template <typename Stream, typename T>
void Serialize(Stream &os, const std::unique_ptr<const T> &p);
template <typename Stream, typename T>
void Unserialize(Stream &os, std::unique_ptr<const T> &p);
/**
* RCUPtr
*/
template <typename Stream, typename T>
void Serialize(Stream &os, const RCUPtr<const T> &p);
template <typename Stream, typename T>
void Unserialize(Stream &os, RCUPtr<const T> &p);
/**
* If none of the specialized versions above matched, default to calling member
* function.
*/
template <typename Stream, typename T>
inline void Serialize(Stream &os, const T &a) {
a.Serialize(os);
}
template <typename Stream, typename T>
inline void Unserialize(Stream &is, T &&a) {
a.Unserialize(is);
}
/**
* Default formatter. Serializes objects as themselves.
*
* The vector/prevector serialization code passes this to VectorFormatter
* to enable reusing that logic. It shouldn't be needed elsewhere.
*/
struct DefaultFormatter {
template <typename Stream, typename T>
static void Ser(Stream &s, const T &t) {
Serialize(s, t);
}
template <typename Stream, typename T> static void Unser(Stream &s, T &t) {
Unserialize(s, t);
}
};
/**
* string
*/
template <typename Stream, typename C>
void Serialize(Stream &os, const std::basic_string<C> &str) {
WriteCompactSize(os, str.size());
if (!str.empty()) {
- os.write((char *)str.data(), str.size() * sizeof(C));
+ os.write(MakeByteSpan(str));
}
}
template <typename Stream, typename C>
void Unserialize(Stream &is, std::basic_string<C> &str) {
size_t nSize = ReadCompactSize(is);
str.resize(nSize);
if (nSize != 0) {
- is.read((char *)str.data(), nSize * sizeof(C));
+ is.read(MakeWritableByteSpan(str));
}
}
/**
* prevector
*/
template <typename Stream, unsigned int N, typename T>
void Serialize_impl(Stream &os, const prevector<N, T> &v, const uint8_t &) {
WriteCompactSize(os, v.size());
if (!v.empty()) {
- os.write((char *)v.data(), v.size() * sizeof(T));
+ os.write(MakeByteSpan(v));
}
}
template <typename Stream, unsigned int N, typename T, typename V>
void Serialize_impl(Stream &os, const prevector<N, T> &v, const V &) {
Serialize(os, Using<VectorFormatter<DefaultFormatter>>(v));
}
template <typename Stream, unsigned int N, typename T>
inline void Serialize(Stream &os, const prevector<N, T> &v) {
Serialize_impl(os, v, T());
}
template <typename Stream, unsigned int N, typename T>
void Unserialize_impl(Stream &is, prevector<N, T> &v, const uint8_t &) {
// Limit size per read so bogus size value won't cause out of memory
v.clear();
size_t nSize = ReadCompactSize(is);
size_t i = 0;
while (i < nSize) {
size_t blk = std::min(nSize - i, size_t(1 + 4999999 / sizeof(T)));
v.resize_uninitialized(i + blk);
- is.read((char *)&v[i], blk * sizeof(T));
+ is.read(AsWritableBytes(Span{&v[i], blk}));
i += blk;
}
}
template <typename Stream, unsigned int N, typename T, typename V>
void Unserialize_impl(Stream &is, prevector<N, T> &v, const V &) {
Unserialize(is, Using<VectorFormatter<DefaultFormatter>>(v));
}
template <typename Stream, unsigned int N, typename T>
inline void Unserialize(Stream &is, prevector<N, T> &v) {
Unserialize_impl(is, v, T());
}
/**
* vector
*/
template <typename Stream, typename T, typename A>
void Serialize_impl(Stream &os, const std::vector<T, A> &v, const uint8_t &) {
WriteCompactSize(os, v.size());
if (!v.empty()) {
- os.write((char *)v.data(), v.size() * sizeof(T));
+ os.write(MakeByteSpan(v));
}
}
template <typename Stream, typename T, typename A>
void Serialize_impl(Stream &os, const std::vector<T, A> &v, const bool &) {
// A special case for std::vector<bool>, as dereferencing
// std::vector<bool>::const_iterator does not result in a const bool&
// due to std::vector's special casing for bool arguments.
WriteCompactSize(os, v.size());
for (bool elem : v) {
::Serialize(os, elem);
}
}
template <typename Stream, typename T, typename A, typename V>
void Serialize_impl(Stream &os, const std::vector<T, A> &v, const V &) {
Serialize(os, Using<VectorFormatter<DefaultFormatter>>(v));
}
template <typename Stream, typename T, typename A>
inline void Serialize(Stream &os, const std::vector<T, A> &v) {
Serialize_impl(os, v, T());
}
template <typename Stream, typename T, typename A>
void Unserialize_impl(Stream &is, std::vector<T, A> &v, const uint8_t &) {
// Limit size per read so bogus size value won't cause out of memory
v.clear();
size_t nSize = ReadCompactSize(is);
size_t i = 0;
while (i < nSize) {
size_t blk = std::min(nSize - i, size_t(1 + 4999999 / sizeof(T)));
v.resize(i + blk);
- is.read((char *)&v[i], blk * sizeof(T));
+ is.read(AsWritableBytes(Span{&v[i], blk}));
i += blk;
}
}
template <typename Stream, typename T, typename A, typename V>
void Unserialize_impl(Stream &is, std::vector<T, A> &v, const V &) {
Unserialize(is, Using<VectorFormatter<DefaultFormatter>>(v));
}
template <typename Stream, typename T, typename A>
inline void Unserialize(Stream &is, std::vector<T, A> &v) {
Unserialize_impl(is, v, T());
}
/**
* pair
*/
template <typename Stream, typename K, typename T>
void Serialize(Stream &os, const std::pair<K, T> &item) {
Serialize(os, item.first);
Serialize(os, item.second);
}
template <typename Stream, typename K, typename T>
void Unserialize(Stream &is, std::pair<K, T> &item) {
Unserialize(is, item.first);
Unserialize(is, item.second);
}
/**
* map
*/
template <typename Stream, typename K, typename T, typename Pred, typename A>
void Serialize(Stream &os, const std::map<K, T, Pred, A> &m) {
WriteCompactSize(os, m.size());
for (const auto &entry : m) {
Serialize(os, entry);
}
}
template <typename Stream, typename K, typename T, typename Pred, typename A>
void Unserialize(Stream &is, std::map<K, T, Pred, A> &m) {
m.clear();
size_t nSize = ReadCompactSize(is);
typename std::map<K, T, Pred, A>::iterator mi = m.begin();
for (size_t i = 0; i < nSize; i++) {
std::pair<K, T> item;
Unserialize(is, item);
mi = m.insert(mi, item);
}
}
/**
* set
*/
template <typename Stream, typename K, typename Pred, typename A>
void Serialize(Stream &os, const std::set<K, Pred, A> &m) {
WriteCompactSize(os, m.size());
for (const K &i : m) {
Serialize(os, i);
}
}
template <typename Stream, typename K, typename Pred, typename A>
void Unserialize(Stream &is, std::set<K, Pred, A> &m) {
m.clear();
size_t nSize = ReadCompactSize(is);
typename std::set<K, Pred, A>::iterator it = m.begin();
for (size_t i = 0; i < nSize; i++) {
K key;
Unserialize(is, key);
it = m.insert(it, key);
}
}
/**
* unique_ptr
*/
template <typename Stream, typename T>
void Serialize(Stream &os, const std::unique_ptr<const T> &p) {
Serialize(os, *p);
}
template <typename Stream, typename T>
void Unserialize(Stream &is, std::unique_ptr<const T> &p) {
p.reset(new T(deserialize, is));
}
/**
* shared_ptr
*/
template <typename Stream, typename T>
void Serialize(Stream &os, const std::shared_ptr<const T> &p) {
Serialize(os, *p);
}
template <typename Stream, typename T>
void Unserialize(Stream &is, std::shared_ptr<const T> &p) {
p = std::make_shared<const T>(deserialize, is);
}
/**
* RCUPtr
*/
template <typename Stream, typename T>
void Serialize(Stream &os, const RCUPtr<const T> &p) {
Serialize(os, *p);
}
template <typename Stream, typename T>
void Unserialize(Stream &is, RCUPtr<const T> &p) {
p = RCUPtr<const T>::make(deserialize, is);
}
/**
* Support for SERIALIZE_METHODS and READWRITE macro.
*/
struct CSerActionSerialize {
constexpr bool ForRead() const { return false; }
};
struct CSerActionUnserialize {
constexpr bool ForRead() const { return true; }
};
/**
* ::GetSerializeSize implementations
*
* Computing the serialized size of objects is done through a special stream
* object of type CSizeComputer, which only records the number of bytes written
* to it.
*
* If your Serialize or SerializationOp method has non-trivial overhead for
* serialization, it may be worthwhile to implement a specialized version for
* CSizeComputer, which uses the s.seek() method to record bytes that would
* be written instead.
*/
class CSizeComputer {
protected:
size_t nSize;
const int nVersion;
public:
explicit CSizeComputer(int nVersionIn) : nSize(0), nVersion(nVersionIn) {}
- void write(const char *psz, size_t _nSize) { this->nSize += _nSize; }
+ void write(Span<const std::byte> src) { this->nSize += src.size(); }
/** Pretend _nSize bytes are written, without specifying them. */
void seek(size_t _nSize) { this->nSize += _nSize; }
template <typename T> CSizeComputer &operator<<(const T &obj) {
::Serialize(*this, obj);
return (*this);
}
size_t size() const { return nSize; }
int GetVersion() const { return nVersion; }
};
template <typename Stream> void SerializeMany(Stream &s) {}
template <typename Stream, typename Arg, typename... Args>
void SerializeMany(Stream &s, const Arg &arg, const Args &...args) {
::Serialize(s, arg);
::SerializeMany(s, args...);
}
template <typename Stream> inline void UnserializeMany(Stream &s) {}
template <typename Stream, typename Arg, typename... Args>
inline void UnserializeMany(Stream &s, Arg &&arg, Args &&...args) {
::Unserialize(s, arg);
::UnserializeMany(s, args...);
}
template <typename Stream, typename... Args>
inline void SerReadWriteMany(Stream &s, CSerActionSerialize ser_action,
const Args &...args) {
::SerializeMany(s, args...);
}
template <typename Stream, typename... Args>
inline void SerReadWriteMany(Stream &s, CSerActionUnserialize ser_action,
Args &&...args) {
::UnserializeMany(s, args...);
}
template <typename Stream, typename Type, typename Fn>
inline void SerRead(Stream &s, CSerActionSerialize ser_action, Type &&, Fn &&) {
}
template <typename Stream, typename Type, typename Fn>
inline void SerRead(Stream &s, CSerActionUnserialize ser_action, Type &&obj,
Fn &&fn) {
fn(s, std::forward<Type>(obj));
}
template <typename Stream, typename Type, typename Fn>
inline void SerWrite(Stream &s, CSerActionSerialize ser_action, Type &&obj,
Fn &&fn) {
fn(s, std::forward<Type>(obj));
}
template <typename Stream, typename Type, typename Fn>
inline void SerWrite(Stream &s, CSerActionUnserialize ser_action, Type &&,
Fn &&) {}
template <typename I> inline void WriteVarInt(CSizeComputer &s, I n) {
s.seek(GetSizeOfVarInt<I>(n));
}
inline void WriteCompactSize(CSizeComputer &s, uint64_t nSize) {
s.seek(GetSizeOfCompactSize(nSize));
}
template <typename T> size_t GetSerializeSize(const T &t, int nVersion = 0) {
return (CSizeComputer(nVersion) << t).size();
}
template <typename... T>
size_t GetSerializeSizeMany(int nVersion, const T &...t) {
CSizeComputer sc(nVersion);
SerializeMany(sc, t...);
return sc.size();
}
#endif // BITCOIN_SERIALIZE_H
diff --git a/src/streams.h b/src/streams.h
index 91f519487..ca9a0674f 100644
--- a/src/streams.h
+++ b/src/streams.h
@@ -1,815 +1,816 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_STREAMS_H
#define BITCOIN_STREAMS_H
#include <serialize.h>
#include <span.h>
#include <support/allocators/zeroafterfree.h>
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <ios>
#include <limits>
#include <optional>
#include <string>
#include <utility>
#include <vector>
template <typename Stream> class OverrideStream {
Stream *stream;
const int nType;
const int nVersion;
public:
OverrideStream(Stream *stream_, int nType_, int nVersion_)
: stream(stream_), nType(nType_), nVersion(nVersion_) {}
template <typename T> OverrideStream<Stream> &operator<<(const T &obj) {
// Serialize to this stream
::Serialize(*this, obj);
return (*this);
}
template <typename T> OverrideStream<Stream> &operator>>(T &&obj) {
// Unserialize from this stream
::Unserialize(*this, obj);
return (*this);
}
- void write(const char *pch, size_t nSize) { stream->write(pch, nSize); }
+ void write(Span<const std::byte> src) { stream->write(src); }
- void read(char *pch, size_t nSize) { stream->read(pch, nSize); }
+ void read(Span<std::byte> dst) { stream->read(dst); }
int GetVersion() const { return nVersion; }
int GetType() const { return nType; }
void ignore(size_t size) { return stream->ignore(size); }
};
template <typename S> OverrideStream<S> WithOrVersion(S *s, int nVersionFlag) {
return OverrideStream<S>(s, s->GetType(), s->GetVersion() | nVersionFlag);
}
/**
* Minimal stream for overwriting and/or appending to an existing byte vector.
*
* The referenced vector will grow as necessary.
*/
class CVectorWriter {
public:
/**
* @param[in] nTypeIn Serialization Type
* @param[in] nVersionIn Serialization Version (including any flags)
* @param[in] vchDataIn Referenced byte vector to overwrite/append
* @param[in] nPosIn Starting position. Vector index where writes should
* start. The vector will initially grow as necessary to max(nPosIn,
* vec.size()). So to append, use vec.size().
*/
CVectorWriter(int nTypeIn, int nVersionIn, std::vector<uint8_t> &vchDataIn,
size_t nPosIn)
: nType(nTypeIn), nVersion(nVersionIn), vchData(vchDataIn),
nPos(nPosIn) {
if (nPos > vchData.size()) {
vchData.resize(nPos);
}
}
/**
* (other params same as above)
* @param[in] args A list of items to serialize starting at nPosIn.
*/
template <typename... Args>
CVectorWriter(int nTypeIn, int nVersionIn, std::vector<uint8_t> &vchDataIn,
size_t nPosIn, Args &&...args)
: CVectorWriter(nTypeIn, nVersionIn, vchDataIn, nPosIn) {
::SerializeMany(*this, std::forward<Args>(args)...);
}
- void write(const char *pch, size_t nSize) {
+ void write(Span<const std::byte> src) {
assert(nPos <= vchData.size());
- size_t nOverwrite = std::min(nSize, vchData.size() - nPos);
+ size_t nOverwrite = std::min(src.size(), vchData.size() - nPos);
if (nOverwrite) {
- memcpy(vchData.data() + nPos,
- reinterpret_cast<const uint8_t *>(pch), nOverwrite);
+ memcpy(vchData.data() + nPos, src.data(), nOverwrite);
}
- if (nOverwrite < nSize) {
- vchData.insert(vchData.end(),
- reinterpret_cast<const uint8_t *>(pch) + nOverwrite,
- reinterpret_cast<const uint8_t *>(pch) + nSize);
+ if (nOverwrite < src.size()) {
+ vchData.insert(vchData.end(), UCharCast(src.data()) + nOverwrite,
+ UCharCast(src.end()));
}
- nPos += nSize;
+ nPos += src.size();
}
template <typename T> CVectorWriter &operator<<(const T &obj) {
// Serialize to this stream
::Serialize(*this, obj);
return (*this);
}
int GetVersion() const { return nVersion; }
int GetType() const { return nType; }
void seek(size_t nSize) {
nPos += nSize;
if (nPos > vchData.size()) {
vchData.resize(nPos);
}
}
private:
const int nType;
const int nVersion;
std::vector<uint8_t> &vchData;
size_t nPos;
};
/**
* Minimal stream for reading from an existing byte array by Span.
*/
class SpanReader {
private:
const int m_type;
const int m_version;
Span<const uint8_t> m_data;
public:
/**
* @param[in] type Serialization Type
* @param[in] version Serialization Version (including any flags)
* @param[in] data Referenced byte vector to overwrite/append
*/
SpanReader(int type, int version, Span<const uint8_t> data)
: m_type(type), m_version(version), m_data(data) {}
template <typename T> SpanReader &operator>>(T &obj) {
// Unserialize from this stream
::Unserialize(*this, obj);
return (*this);
}
int GetVersion() const { return m_version; }
int GetType() const { return m_type; }
size_t size() const { return m_data.size(); }
bool empty() const { return m_data.empty(); }
- void read(char *dst, size_t n) {
- if (n == 0) {
+ void read(Span<std::byte> dst) {
+ if (dst.size() == 0) {
return;
}
// Read from the beginning of the buffer
- if (n > m_data.size()) {
+ if (dst.size() > m_data.size()) {
throw std::ios_base::failure("SpanReader::read(): end of data");
}
- memcpy(dst, m_data.data(), n);
- m_data = m_data.subspan(n);
+ memcpy(dst.data(), m_data.data(), dst.size());
+ m_data = m_data.subspan(dst.size());
}
};
/**
* Double ended buffer combining vector and stream-like interfaces.
*
* >> and << read and write unformatted data using the above serialization
* templates. Fills with data in linear time; some stringstream implementations
* take N^2 time.
*/
class CDataStream {
protected:
using vector_type = SerializeData;
vector_type vch;
unsigned int nReadPos{0};
int nType;
int nVersion;
public:
typedef vector_type::allocator_type allocator_type;
typedef vector_type::size_type size_type;
typedef vector_type::difference_type difference_type;
typedef vector_type::reference reference;
typedef vector_type::const_reference const_reference;
typedef vector_type::value_type value_type;
typedef vector_type::iterator iterator;
typedef vector_type::const_iterator const_iterator;
typedef vector_type::reverse_iterator reverse_iterator;
explicit CDataStream(int nTypeIn, int nVersionIn)
: nType{nTypeIn}, nVersion{nVersionIn} {}
+ explicit CDataStream(Span<const uint8_t> sp, int type, int version)
+ : CDataStream{AsBytes(sp), type, version} {}
explicit CDataStream(Span<const value_type> sp, int nTypeIn, int nVersionIn)
: vch(sp.data(), sp.data() + sp.size()), nType{nTypeIn},
nVersion{nVersionIn} {}
template <typename... Args>
CDataStream(int nTypeIn, int nVersionIn, Args &&...args)
: nType{nTypeIn}, nVersion{nVersionIn} {
::SerializeMany(*this, std::forward<Args>(args)...);
}
- std::string str() const { return (std::string(begin(), end())); }
+ std::string str() const {
+ return std::string{UCharCast(data()), UCharCast(data() + size())};
+ }
//
// Vector subset
//
const_iterator begin() const { return vch.begin() + nReadPos; }
iterator begin() { return vch.begin() + nReadPos; }
const_iterator end() const { return vch.end(); }
iterator end() { return vch.end(); }
size_type size() const { return vch.size() - nReadPos; }
bool empty() const { return vch.size() == nReadPos; }
void resize(size_type n, value_type c = value_type{}) {
vch.resize(n + nReadPos, c);
}
void reserve(size_type n) { vch.reserve(n + nReadPos); }
const_reference operator[](size_type pos) const {
return vch[pos + nReadPos];
}
reference operator[](size_type pos) { return vch[pos + nReadPos]; }
void clear() {
vch.clear();
nReadPos = 0;
}
iterator insert(iterator it, const value_type x) {
return vch.insert(it, x);
}
void insert(iterator it, size_type n, const value_type x) {
vch.insert(it, n, x);
}
value_type *data() { return vch.data() + nReadPos; }
const value_type *data() const { return vch.data() + nReadPos; }
void insert(iterator it, std::vector<value_type>::const_iterator first,
std::vector<value_type>::const_iterator last) {
if (last == first) {
return;
}
assert(last - first > 0);
if (it == vch.begin() + nReadPos &&
(unsigned int)(last - first) <= nReadPos) {
// special case for inserting at the front when there's room
nReadPos -= (last - first);
memcpy(&vch[nReadPos], &first[0], last - first);
} else {
vch.insert(it, first, last);
}
}
// This was added to have full compat with the std::vector interface but is
// unused (except in a Bitcoin ABC specific test in stream_tests)
- void insert(iterator it, const char *first, const char *last) {
+ void insert(iterator it, const value_type *first, const value_type *last) {
if (last == first) {
return;
}
assert(last - first > 0);
if (it == vch.begin() + nReadPos &&
(unsigned int)(last - first) <= nReadPos) {
// special case for inserting at the front when there's room
nReadPos -= (last - first);
memcpy(&vch[nReadPos], &first[0], last - first);
} else {
vch.insert(it, first, last);
}
}
iterator erase(iterator it) {
if (it == vch.begin() + nReadPos) {
// special case for erasing from the front
if (++nReadPos >= vch.size()) {
// whenever we reach the end, we take the opportunity to clear
// the buffer
nReadPos = 0;
return vch.erase(vch.begin(), vch.end());
}
return vch.begin() + nReadPos;
} else {
return vch.erase(it);
}
}
iterator erase(iterator first, iterator last) {
if (first == vch.begin() + nReadPos) {
// special case for erasing from the front
if (last == vch.end()) {
nReadPos = 0;
return vch.erase(vch.begin(), vch.end());
} else {
nReadPos = (last - vch.begin());
return last;
}
} else
return vch.erase(first, last);
}
inline void Compact() {
vch.erase(vch.begin(), vch.begin() + nReadPos);
nReadPos = 0;
}
bool Rewind(std::optional<size_type> n = std::nullopt) {
// Total rewind if no size is passed
if (!n) {
nReadPos = 0;
return true;
}
// Rewind by n characters if the buffer hasn't been compacted yet
if (*n > nReadPos) {
return false;
}
nReadPos -= *n;
return true;
}
//
// Stream subset
//
bool eof() const { return size() == 0; }
CDataStream *rdbuf() { return this; }
int in_avail() const { return size(); }
void SetType(int n) { nType = n; }
int GetType() const { return nType; }
void SetVersion(int n) { nVersion = n; }
int GetVersion() const { return nVersion; }
- void read(char *pch, size_t nSize) {
- if (nSize == 0) {
+ void read(Span<value_type> dst) {
+ if (dst.size() == 0) {
return;
}
// Read from the beginning of the buffer
- unsigned int nReadPosNext = nReadPos + nSize;
+ unsigned int nReadPosNext = nReadPos + dst.size();
if (nReadPosNext > vch.size()) {
throw std::ios_base::failure("CDataStream::read(): end of data");
}
- memcpy(pch, &vch[nReadPos], nSize);
+ memcpy(dst.data(), &vch[nReadPos], dst.size());
if (nReadPosNext == vch.size()) {
nReadPos = 0;
vch.clear();
return;
}
nReadPos = nReadPosNext;
}
void ignore(int nSize) {
// Ignore from the beginning of the buffer
if (nSize < 0) {
throw std::ios_base::failure(
"CDataStream::ignore(): nSize negative");
}
unsigned int nReadPosNext = nReadPos + nSize;
if (nReadPosNext >= vch.size()) {
if (nReadPosNext > vch.size()) {
throw std::ios_base::failure(
"CDataStream::ignore(): end of data");
}
nReadPos = 0;
vch.clear();
return;
}
nReadPos = nReadPosNext;
}
- void write(const char *pch, size_t nSize) {
+ void write(Span<const value_type> src) {
// Write to the end of the buffer
- vch.insert(vch.end(), pch, pch + nSize);
+ vch.insert(vch.end(), src.begin(), src.end());
}
template <typename Stream> void Serialize(Stream &s) const {
// Special case: stream << stream concatenates like stream += stream
if (!vch.empty()) {
- s.write((char *)vch.data(), vch.size() * sizeof(value_type));
+ s.write(MakeByteSpan(vch));
}
}
template <typename T> CDataStream &operator<<(const T &obj) {
// Serialize to this stream
::Serialize(*this, obj);
return (*this);
}
template <typename T> CDataStream &operator>>(T &&obj) {
// Unserialize from this stream
::Unserialize(*this, obj);
return (*this);
}
/**
* XOR the contents of this stream with a certain key.
*
* @param[in] key The key used to XOR the data in this stream.
*/
void Xor(const std::vector<uint8_t> &key) {
if (key.size() == 0) {
return;
}
for (size_type i = 0, j = 0; i != size(); i++) {
- vch[i] ^= key[j++];
+ vch[i] ^= std::byte{key[j++]};
// This potentially acts on very many bytes of data, so it's
// important that we calculate `j`, i.e. the `key` index in this way
// instead of doing a %, which would effectively be a division for
// each byte Xor'd -- much slower than need be.
if (j == key.size()) j = 0;
}
}
};
template <typename IStream> class BitStreamReader {
private:
IStream &m_istream;
/// Buffered byte read in from the input stream. A new byte is read into the
/// buffer when m_offset reaches 8.
uint8_t m_buffer{0};
/// Number of high order bits in m_buffer already returned by previous
/// Read() calls. The next bit to be returned is at this offset from the
/// most significant bit position.
int m_offset{8};
public:
explicit BitStreamReader(IStream &istream) : m_istream(istream) {}
/**
* Read the specified number of bits from the stream. The data is returned
* in the nbits least significant bits of a 64-bit uint.
*/
uint64_t Read(int nbits) {
if (nbits < 0 || nbits > 64) {
throw std::out_of_range("nbits must be between 0 and 64");
}
uint64_t data = 0;
while (nbits > 0) {
if (m_offset == 8) {
m_istream >> m_buffer;
m_offset = 0;
}
int bits = std::min(8 - m_offset, nbits);
data <<= bits;
data |= static_cast<uint8_t>(m_buffer << m_offset) >> (8 - bits);
m_offset += bits;
nbits -= bits;
}
return data;
}
};
template <typename OStream> class BitStreamWriter {
private:
OStream &m_ostream;
/// Buffered byte waiting to be written to the output stream. The byte is
/// written buffer when m_offset reaches 8 or Flush() is called.
uint8_t m_buffer{0};
/// Number of high order bits in m_buffer already written by previous
/// Write() calls and not yet flushed to the stream. The next bit to be
/// written to is at this offset from the most significant bit position.
int m_offset{0};
public:
explicit BitStreamWriter(OStream &ostream) : m_ostream(ostream) {}
~BitStreamWriter() { Flush(); }
/**
* Write the nbits least significant bits of a 64-bit int to the output
* stream. Data is buffered until it completes an octet.
*/
void Write(uint64_t data, int nbits) {
if (nbits < 0 || nbits > 64) {
throw std::out_of_range("nbits must be between 0 and 64");
}
while (nbits > 0) {
int bits = std::min(8 - m_offset, nbits);
m_buffer |= (data << (64 - nbits)) >> (64 - 8 + m_offset);
m_offset += bits;
nbits -= bits;
if (m_offset == 8) {
Flush();
}
}
}
/**
* Flush any unwritten bits to the output stream, padding with 0's to the
* next byte boundary.
*/
void Flush() {
if (m_offset == 0) {
return;
}
m_ostream << m_buffer;
m_buffer = 0;
m_offset = 0;
}
};
/**
* Non-refcounted RAII wrapper for FILE*
*
* Will automatically close the file when it goes out of scope if not null. If
* you're returning the file pointer, return file.release(). If you need to
* close the file early, use file.fclose() instead of fclose(file).
*/
class AutoFile {
protected:
FILE *file;
public:
explicit AutoFile(FILE *filenew) : file{filenew} {}
~AutoFile() { fclose(); }
// Disallow copies
AutoFile(const AutoFile &) = delete;
AutoFile &operator=(const AutoFile &) = delete;
int fclose() {
int retval{0};
if (file) {
retval = ::fclose(file);
file = nullptr;
}
return retval;
}
/**
* Get wrapped FILE* with transfer of ownership.
* @note This will invalidate the AutoFile object, and makes it the
* responsibility of the caller of this function to clean up the returned
* FILE*.
*/
FILE *release() {
FILE *ret = file;
file = nullptr;
return ret;
}
/**
* Get wrapped FILE* without transfer of ownership.
* @note Ownership of the FILE* will remain with this class. Use this only
* if the scope of the AutoFile outlives use of the passed pointer.
*/
FILE *Get() const { return file; }
/** Return true if the wrapped FILE* is nullptr, false otherwise. */
bool IsNull() const { return (file == nullptr); }
//
// Stream subset
//
- void read(char *pch, size_t nSize) {
+ void read(Span<std::byte> dst) {
if (!file) {
throw std::ios_base::failure(
"AutoFile::read: file handle is nullptr");
}
- if (fread(pch, 1, nSize, file) != nSize) {
+ if (fread(dst.data(), 1, dst.size(), file) != dst.size()) {
throw std::ios_base::failure(feof(file)
? "AutoFile::read: end of file"
: "AutoFile::read: fread failed");
}
}
void ignore(size_t nSize) {
if (!file) {
throw std::ios_base::failure(
"AutoFile::ignore: file handle is nullptr");
}
uint8_t data[4096];
while (nSize > 0) {
size_t nNow = std::min<size_t>(nSize, sizeof(data));
if (fread(data, 1, nNow, file) != nNow) {
throw std::ios_base::failure(
feof(file) ? "AutoFile::ignore: end of file"
: "AutoFile::read: fread failed");
}
nSize -= nNow;
}
}
- void write(const char *pch, size_t nSize) {
+ void write(Span<const std::byte> src) {
if (!file) {
throw std::ios_base::failure(
"AutoFile::write: file handle is nullptr");
}
- if (fwrite(pch, 1, nSize, file) != nSize) {
+ if (fwrite(src.data(), 1, src.size(), file) != src.size()) {
throw std::ios_base::failure("AutoFile::write: write failed");
}
}
template <typename T> AutoFile &operator<<(const T &obj) {
if (!file)
throw std::ios_base::failure(
"AutoFile::operator<<: file handle is nullptr");
::Serialize(*this, obj);
return *this;
}
template <typename T> AutoFile &operator>>(T &&obj) {
if (!file)
throw std::ios_base::failure(
"AutoFile::operator>>: file handle is nullptr");
::Unserialize(*this, obj);
return *this;
}
};
class CAutoFile : public AutoFile {
private:
const int nType;
const int nVersion;
public:
CAutoFile(FILE *filenew, int nTypeIn, int nVersionIn)
: AutoFile{filenew}, nType(nTypeIn), nVersion(nVersionIn) {}
int GetType() const { return nType; }
int GetVersion() const { return nVersion; }
template <typename T> CAutoFile &operator<<(const T &obj) {
// Serialize to this stream
if (!file) {
throw std::ios_base::failure(
"CAutoFile::operator<<: file handle is nullptr");
}
::Serialize(*this, obj);
return (*this);
}
template <typename T> CAutoFile &operator>>(T &&obj) {
// Unserialize from this stream
if (!file) {
throw std::ios_base::failure(
"CAutoFile::operator>>: file handle is nullptr");
}
::Unserialize(*this, obj);
return (*this);
}
};
/**
* Non-refcounted RAII wrapper around a FILE* that implements a ring buffer to
* deserialize from. It guarantees the ability to rewind a given number of
* bytes.
*
* Will automatically close the file when it goes out of scope if not null. If
* you need to close the file early, use file.fclose() instead of fclose(file).
*/
class CBufferedFile {
private:
const int nType;
const int nVersion;
//! source file
FILE *src;
//! how many bytes have been read from source
uint64_t nSrcPos;
//! how many bytes have been read from this
uint64_t nReadPos;
//! up to which position we're allowed to read
uint64_t nReadLimit;
//! how many bytes we guarantee to rewind
uint64_t nRewind;
//! the buffer
- std::vector<char> vchBuf;
+ std::vector<std::byte> vchBuf;
protected:
//! read data from the source to fill the buffer
bool Fill() {
unsigned int pos = nSrcPos % vchBuf.size();
unsigned int readNow = vchBuf.size() - pos;
unsigned int nAvail = vchBuf.size() - (nSrcPos - nReadPos) - nRewind;
if (nAvail < readNow) {
readNow = nAvail;
}
if (readNow == 0) {
return false;
}
size_t nBytes = fread((void *)&vchBuf[pos], 1, readNow, src);
if (nBytes == 0) {
throw std::ios_base::failure(
feof(src) ? "CBufferedFile::Fill: end of file"
: "CBufferedFile::Fill: fread failed");
}
nSrcPos += nBytes;
return true;
}
public:
CBufferedFile(FILE *fileIn, uint64_t nBufSize, uint64_t nRewindIn,
int nTypeIn, int nVersionIn)
: nType(nTypeIn), nVersion(nVersionIn), nSrcPos(0), nReadPos(0),
nReadLimit(std::numeric_limits<uint64_t>::max()), nRewind(nRewindIn),
- vchBuf(nBufSize, 0) {
+ vchBuf(nBufSize, std::byte{0}) {
if (nRewindIn >= nBufSize) {
throw std::ios_base::failure(
"Rewind limit must be less than buffer size");
}
src = fileIn;
}
~CBufferedFile() { fclose(); }
// Disallow copies
CBufferedFile(const CBufferedFile &) = delete;
CBufferedFile &operator=(const CBufferedFile &) = delete;
int GetVersion() const { return nVersion; }
int GetType() const { return nType; }
void fclose() {
if (src) {
::fclose(src);
src = nullptr;
}
}
//! check whether we're at the end of the source file
bool eof() const { return nReadPos == nSrcPos && feof(src); }
//! read a number of bytes
- void read(char *pch, size_t nSize) {
- if (nSize + nReadPos > nReadLimit) {
+ void read(Span<std::byte> dst) {
+ if (dst.size() + nReadPos > nReadLimit) {
throw std::ios_base::failure("Read attempted past buffer limit");
}
- while (nSize > 0) {
+ while (dst.size() > 0) {
if (nReadPos == nSrcPos) {
Fill();
}
unsigned int pos = nReadPos % vchBuf.size();
- size_t nNow = nSize;
+ size_t nNow = dst.size();
if (nNow + pos > vchBuf.size()) {
nNow = vchBuf.size() - pos;
}
if (nNow + nReadPos > nSrcPos) {
nNow = nSrcPos - nReadPos;
}
- memcpy(pch, &vchBuf[pos], nNow);
+ memcpy(dst.data(), &vchBuf[pos], nNow);
nReadPos += nNow;
- pch += nNow;
- nSize -= nNow;
+ dst = dst.subspan(nNow);
}
}
//! return the current reading position
uint64_t GetPos() const { return nReadPos; }
//! rewind to a given reading position
bool SetPos(uint64_t nPos) {
size_t bufsize = vchBuf.size();
if (nPos + bufsize < nSrcPos) {
// rewinding too far, rewind as far as possible
nReadPos = nSrcPos - bufsize;
return false;
}
if (nPos > nSrcPos) {
// can't go this far forward, go as far as possible
nReadPos = nSrcPos;
return false;
}
nReadPos = nPos;
return true;
}
//! Prevent reading beyond a certain position. No argument removes the
//! limit.
bool SetLimit(uint64_t nPos = std::numeric_limits<uint64_t>::max()) {
if (nPos < nReadPos) {
return false;
}
nReadLimit = nPos;
return true;
}
template <typename T> CBufferedFile &operator>>(T &&obj) {
// Unserialize from this stream
::Unserialize(*this, obj);
return (*this);
}
//! search for a given byte in the stream, and remain positioned on it
- void FindByte(char ch) {
+ void FindByte(uint8_t ch) {
while (true) {
if (nReadPos == nSrcPos) {
Fill();
}
- if (vchBuf[nReadPos % vchBuf.size()] == ch) {
+ if (vchBuf[nReadPos % vchBuf.size()] == std::byte{ch}) {
break;
}
nReadPos++;
}
}
};
#endif // BITCOIN_STREAMS_H
diff --git a/src/support/allocators/zeroafterfree.h b/src/support/allocators/zeroafterfree.h
index fa29a3d23..aa370c944 100644
--- a/src/support/allocators/zeroafterfree.h
+++ b/src/support/allocators/zeroafterfree.h
@@ -1,43 +1,44 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_SUPPORT_ALLOCATORS_ZEROAFTERFREE_H
#define BITCOIN_SUPPORT_ALLOCATORS_ZEROAFTERFREE_H
#include <support/cleanse.h>
#include <memory>
#include <vector>
template <typename T>
struct zero_after_free_allocator : public std::allocator<T> {
using base = std::allocator<T>;
using traits = std::allocator_traits<base>;
using size_type = typename traits::size_type;
using difference_type = typename traits::difference_type;
using pointer = typename traits::pointer;
using const_pointer = typename traits::const_pointer;
using value_type = typename traits::value_type;
zero_after_free_allocator() noexcept {}
zero_after_free_allocator(const zero_after_free_allocator &a) noexcept
: base(a) {}
template <typename U>
zero_after_free_allocator(const zero_after_free_allocator<U> &a) noexcept
: base(a) {}
~zero_after_free_allocator() noexcept {}
template <typename _Other> struct rebind {
typedef zero_after_free_allocator<_Other> other;
};
void deallocate(T *p, std::size_t n) {
if (p != nullptr) memory_cleanse(p, sizeof(T) * n);
std::allocator<T>::deallocate(p, n);
}
};
/** Byte-vector that clears its contents before deletion. */
-using SerializeData = std::vector<uint8_t, zero_after_free_allocator<uint8_t>>;
+using SerializeData =
+ std::vector<std::byte, zero_after_free_allocator<std::byte>>;
#endif // BITCOIN_SUPPORT_ALLOCATORS_ZEROAFTERFREE_H
diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp
index 9b02426e7..e9689e488 100644
--- a/src/test/bloom_tests.cpp
+++ b/src/test/bloom_tests.cpp
@@ -1,1151 +1,1155 @@
// Copyright (c) 2012-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <common/bloom.h>
#include <clientversion.h>
#include <consensus/merkle.h>
#include <key.h>
#include <key_io.h>
#include <merkleblock.h>
#include <primitives/block.h>
#include <random.h>
#include <serialize.h>
#include <streams.h>
#include <uint256.h>
#include <util/strencodings.h>
#include <util/system.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
#include <algorithm>
#include <vector>
BOOST_FIXTURE_TEST_SUITE(bloom_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize) {
CBloomFilter filter(3, 0.01, 0, BLOOM_UPDATE_ALL);
BOOST_CHECK_MESSAGE(
!filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")),
"Bloom filter should be empty!");
filter.insert(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8"));
BOOST_CHECK_MESSAGE(
filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")),
"Bloom filter doesn't contain just-inserted object!");
// One bit different in first byte
BOOST_CHECK_MESSAGE(
!filter.contains(ParseHex("19108ad8ed9bb6274d3980bab5a85c048f0950c8")),
"Bloom filter contains something it shouldn't!");
filter.insert(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee"));
BOOST_CHECK_MESSAGE(
filter.contains(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee")),
"Bloom filter doesn't contain just-inserted object (2)!");
filter.insert(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5"));
BOOST_CHECK_MESSAGE(
filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")),
"Bloom filter doesn't contain just-inserted object (3)!");
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << filter;
std::vector<uint8_t> expected = ParseHex("03614e9b050000000000000001");
+ auto result{MakeUCharSpan(stream)};
- BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(),
+ BOOST_CHECK_EQUAL_COLLECTIONS(result.begin(), result.end(),
expected.begin(), expected.end());
BOOST_CHECK_MESSAGE(
filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")),
"Bloom filter doesn't contain just-inserted object!");
}
BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize_with_tweak) {
// Same test as bloom_create_insert_serialize, but we add a nTweak of 100
CBloomFilter filter(3, 0.01, 2147483649UL, BLOOM_UPDATE_ALL);
filter.insert(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8"));
BOOST_CHECK_MESSAGE(
filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")),
"Bloom filter doesn't contain just-inserted object!");
// One bit different in first byte
BOOST_CHECK_MESSAGE(
!filter.contains(ParseHex("19108ad8ed9bb6274d3980bab5a85c048f0950c8")),
"Bloom filter contains something it shouldn't!");
filter.insert(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee"));
BOOST_CHECK_MESSAGE(
filter.contains(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee")),
"Bloom filter doesn't contain just-inserted object (2)!");
filter.insert(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5"));
BOOST_CHECK_MESSAGE(
filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")),
"Bloom filter doesn't contain just-inserted object (3)!");
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << filter;
std::vector<uint8_t> expected = ParseHex("03ce4299050000000100008001");
+ auto result{MakeUCharSpan(stream)};
- BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(),
+ BOOST_CHECK_EQUAL_COLLECTIONS(result.begin(), result.end(),
expected.begin(), expected.end());
}
BOOST_AUTO_TEST_CASE(bloom_create_insert_key) {
std::string strSecret =
std::string("5Kg1gnAjaLfKiwhhPpGS3QfRg2m6awQvaj98JCZBZQ5SuS2F15C");
CKey key = DecodeSecret(strSecret);
CPubKey pubkey = key.GetPubKey();
std::vector<uint8_t> vchPubKey(pubkey.begin(), pubkey.end());
CBloomFilter filter(2, 0.001, 0, BLOOM_UPDATE_ALL);
filter.insert(vchPubKey);
uint160 hash = pubkey.GetID();
filter.insert(hash);
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << filter;
std::vector<uint8_t> expected = ParseHex("038fc16b080000000000000001");
+ auto result{MakeUCharSpan(stream)};
- BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(),
+ BOOST_CHECK_EQUAL_COLLECTIONS(result.begin(), result.end(),
expected.begin(), expected.end());
}
BOOST_AUTO_TEST_CASE(bloom_match) {
// Random real transaction
// (b4749f017444b051c44dfd2720e88f314ff94f3dd6d56d40ef65854fcd7fff6b)
CDataStream stream(
ParseHex("01000000010b26e9b7735eb6aabdf358bab62f9816a21ba9ebdb719d5299e"
"88607d722c190000000008b4830450220070aca44506c5cef3a16ed519d7c"
"3c39f8aab192c4e1c90d065f37b8a4af6141022100a8e160b856c2d43d27d"
"8fba71e5aef6405b8643ac4cb7cb3c462aced7f14711a0141046d11fee51b"
"0e60666d5049a9101a72741df480b96ee26488a4d3466b95c9a40ac5eeef8"
"7e10a5cd336c19a84565f80fa6c547957b7700ff4dfbdefe76036c339ffff"
"ffff021bff3d11000000001976a91404943fdd508053c75000106d3bc6e27"
"54dbcff1988ac2f15de00000000001976a914a266436d2965547608b9e15d"
"9032a7b9d64fa43188ac00000000"),
SER_DISK, CLIENT_VERSION);
CTransaction tx(deserialize, stream);
// and one which spends it
// (e2769b09e784f32f62ef849763d4f45b98e07ba658647343b915ff832b110436)
uint8_t ch[] = {
0x01, 0x00, 0x00, 0x00, 0x01, 0x6b, 0xff, 0x7f, 0xcd, 0x4f, 0x85, 0x65,
0xef, 0x40, 0x6d, 0xd5, 0xd6, 0x3d, 0x4f, 0xf9, 0x4f, 0x31, 0x8f, 0xe8,
0x20, 0x27, 0xfd, 0x4d, 0xc4, 0x51, 0xb0, 0x44, 0x74, 0x01, 0x9f, 0x74,
0xb4, 0x00, 0x00, 0x00, 0x00, 0x8c, 0x49, 0x30, 0x46, 0x02, 0x21, 0x00,
0xda, 0x0d, 0xc6, 0xae, 0xce, 0xfe, 0x1e, 0x06, 0xef, 0xdf, 0x05, 0x77,
0x37, 0x57, 0xde, 0xb1, 0x68, 0x82, 0x09, 0x30, 0xe3, 0xb0, 0xd0, 0x3f,
0x46, 0xf5, 0xfc, 0xf1, 0x50, 0xbf, 0x99, 0x0c, 0x02, 0x21, 0x00, 0xd2,
0x5b, 0x5c, 0x87, 0x04, 0x00, 0x76, 0xe4, 0xf2, 0x53, 0xf8, 0x26, 0x2e,
0x76, 0x3e, 0x2d, 0xd5, 0x1e, 0x7f, 0xf0, 0xbe, 0x15, 0x77, 0x27, 0xc4,
0xbc, 0x42, 0x80, 0x7f, 0x17, 0xbd, 0x39, 0x01, 0x41, 0x04, 0xe6, 0xc2,
0x6e, 0xf6, 0x7d, 0xc6, 0x10, 0xd2, 0xcd, 0x19, 0x24, 0x84, 0x78, 0x9a,
0x6c, 0xf9, 0xae, 0xa9, 0x93, 0x0b, 0x94, 0x4b, 0x7e, 0x2d, 0xb5, 0x34,
0x2b, 0x9d, 0x9e, 0x5b, 0x9f, 0xf7, 0x9a, 0xff, 0x9a, 0x2e, 0xe1, 0x97,
0x8d, 0xd7, 0xfd, 0x01, 0xdf, 0xc5, 0x22, 0xee, 0x02, 0x28, 0x3d, 0x3b,
0x06, 0xa9, 0xd0, 0x3a, 0xcf, 0x80, 0x96, 0x96, 0x8d, 0x7d, 0xbb, 0x0f,
0x91, 0x78, 0xff, 0xff, 0xff, 0xff, 0x02, 0x8b, 0xa7, 0x94, 0x0e, 0x00,
0x00, 0x00, 0x00, 0x19, 0x76, 0xa9, 0x14, 0xba, 0xde, 0xec, 0xfd, 0xef,
0x05, 0x07, 0x24, 0x7f, 0xc8, 0xf7, 0x42, 0x41, 0xd7, 0x3b, 0xc0, 0x39,
0x97, 0x2d, 0x7b, 0x88, 0xac, 0x40, 0x94, 0xa8, 0x02, 0x00, 0x00, 0x00,
0x00, 0x19, 0x76, 0xa9, 0x14, 0xc1, 0x09, 0x32, 0x48, 0x3f, 0xec, 0x93,
0xed, 0x51, 0xf5, 0xfe, 0x95, 0xe7, 0x25, 0x59, 0xf2, 0xcc, 0x70, 0x43,
0xf9, 0x88, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00};
std::vector<uint8_t> vch(ch, ch + sizeof(ch) - 1);
CDataStream spendStream(vch, SER_DISK, CLIENT_VERSION);
CTransaction spendingTx(deserialize, spendStream);
CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
filter.insert(uint256S(
"0xb4749f017444b051c44dfd2720e88f314ff94f3dd6d56d40ef65854fcd7fff6b"));
BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(tx),
"Simple Bloom filter didn't match tx hash");
filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
// byte-reversed tx hash
filter.insert(ParseHex(
"6bff7fcd4f8565ef406dd5d63d4ff94f318fe82027fd4dc451b04474019f74b4"));
BOOST_CHECK_MESSAGE(
filter.IsRelevantAndUpdate(tx),
"Simple Bloom filter didn't match manually serialized tx hash");
filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
filter.insert(ParseHex("30450220070aca44506c5cef3a16ed519d7c3c39f8aab192c4e"
"1c90d065f37b8a4af6141022100a8e160b856c2d43d27d8fba7"
"1e5aef6405b8643ac4cb7cb3c462aced7f14711a01"));
BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(tx),
"Simple Bloom filter didn't match input signature");
filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
filter.insert(ParseHex("046d11fee51b0e60666d5049a9101a72741df480b96ee26488a"
"4d3466b95c9a40ac5eeef87e10a5cd336c19a84565f80fa6c54"
"7957b7700ff4dfbdefe76036c339"));
BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(tx),
"Simple Bloom filter didn't match input pub key");
filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
filter.insert(ParseHex("04943fdd508053c75000106d3bc6e2754dbcff19"));
BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(tx),
"Simple Bloom filter didn't match output address");
BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(spendingTx),
"Simple Bloom filter didn't add output");
filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
filter.insert(ParseHex("04943fdd508053c75000106d3bc6e2754dbcff19"));
BOOST_CHECK_MESSAGE(filter.MatchAndInsertOutputs(tx),
"Simple Bloom filter didn't match output address");
BOOST_CHECK_MESSAGE(!filter.MatchAndInsertOutputs(spendingTx),
"Simple Bloom filter matched unrelated output");
BOOST_CHECK_MESSAGE(filter.MatchInputs(spendingTx),
"Simple Bloom filter didn't add output");
filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
filter.insert(ParseHex("a266436d2965547608b9e15d9032a7b9d64fa431"));
BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(tx),
"Simple Bloom filter didn't match output address");
const TxId txid(uint256S(
"0x90c122d70786e899529d71dbeba91ba216982fb6ba58f3bdaab65e73b7e9260b"));
filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
filter.insert(COutPoint(txid, 0));
BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(tx),
"Simple Bloom filter didn't match COutPoint");
filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
COutPoint prevOutPoint(txid, 0);
{
std::vector<uint8_t> data(32 + sizeof(uint32_t));
memcpy(data.data(), prevOutPoint.GetTxId().begin(), 32);
uint32_t n = prevOutPoint.GetN();
memcpy(data.data() + 32, &n, sizeof(uint32_t));
filter.insert(data);
}
BOOST_CHECK_MESSAGE(
filter.IsRelevantAndUpdate(tx),
"Simple Bloom filter didn't match manually serialized COutPoint");
filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
filter.insert(uint256S(
"00000009e784f32f62ef849763d4f45b98e07ba658647343b915ff832b110436"));
BOOST_CHECK_MESSAGE(!filter.IsRelevantAndUpdate(tx),
"Simple Bloom filter matched random tx hash");
filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
filter.insert(ParseHex("0000006d2965547608b9e15d9032a7b9d64fa431"));
BOOST_CHECK_MESSAGE(!filter.IsRelevantAndUpdate(tx),
"Simple Bloom filter matched random address");
filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
filter.insert(COutPoint(txid, 1));
BOOST_CHECK_MESSAGE(!filter.IsRelevantAndUpdate(tx),
"Simple Bloom filter matched COutPoint for an output "
"we didn't care about");
const TxId lowtxid(uint256S(
"0x000000d70786e899529d71dbeba91ba216982fb6ba58f3bdaab65e73b7e9260b"));
filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
filter.insert(COutPoint(lowtxid, 0));
BOOST_CHECK_MESSAGE(!filter.IsRelevantAndUpdate(tx),
"Simple Bloom filter matched COutPoint for an output "
"we didn't care about");
}
BOOST_AUTO_TEST_CASE(merkle_block_1) {
CBlock block = getBlock13b8a();
CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
// Match the last transaction
filter.insert(uint256S(
"0x74d681e0e03bafa802c8aa084379aa98d9fcd632ddc2ed9782b586ec87451f20"));
CMerkleBlock merkleBlock(block, filter);
BOOST_CHECK_EQUAL(merkleBlock.header.GetHash().GetHex(),
block.GetHash().GetHex());
BOOST_CHECK_EQUAL(merkleBlock.vMatchedTxn.size(), 1U);
std::pair<size_t, uint256> pair = merkleBlock.vMatchedTxn[0];
BOOST_CHECK(merkleBlock.vMatchedTxn[0].second ==
uint256S("0x74d681e0e03bafa802c8aa084379aa98d9fcd632ddc2ed9782b"
"586ec87451f20"));
BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 8);
std::vector<uint256> vMatched;
std::vector<size_t> vIndex;
BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) ==
block.hashMerkleRoot);
BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size());
for (size_t i = 0; i < vMatched.size(); i++) {
BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second);
}
// Also match the 8th transaction
filter.insert(uint256S(
"0xdd1fd2a6fc16404faf339881a90adbde7f4f728691ac62e8f168809cdfae1053"));
merkleBlock = CMerkleBlock(block, filter);
BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash());
BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 2);
BOOST_CHECK(merkleBlock.vMatchedTxn[1] == pair);
BOOST_CHECK(merkleBlock.vMatchedTxn[0].second ==
uint256S("0xdd1fd2a6fc16404faf339881a90adbde7f4f728691ac62e8f16"
"8809cdfae1053"));
BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 7);
BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) ==
block.hashMerkleRoot);
BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size());
for (size_t i = 0; i < vMatched.size(); i++) {
BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second);
}
}
BOOST_AUTO_TEST_CASE(merkle_block_2) {
// Random real block
// (000000005a4ded781e667e06ceefafb71410b511fe0d5adc3e5a27ecbec34ae6)
// With 4 txes
CBlock block;
CDataStream stream(
ParseHex(
"0100000075616236cc2126035fadb38deb65b9102cc2c41c09cdf29fc051906800"
"000000fe7d5e12ef0ff901f6050211249919b1c0653771832b3a80c66cea42847f"
"0ae1d4d26e49ffff001d00f0a44104010000000100000000000000000000000000"
"00000000000000000000000000000000000000ffffffff0804ffff001d029105ff"
"ffffff0100f2052a010000004341046d8709a041d34357697dfcb30a9d05900a62"
"94078012bf3bb09c6f9b525f1d16d5503d7905db1ada9501446ea00728668fc571"
"9aa80be2fdfc8a858a4dbdd4fbac00000000010000000255605dc6f5c3dc148b6d"
"a58442b0b2cd422be385eab2ebea4119ee9c268d28350000000049483045022100"
"aa46504baa86df8a33b1192b1b9367b4d729dc41e389f2c04f3e5c7f0559aae702"
"205e82253a54bf5c4f65b7428551554b2045167d6d206dfe6a2e198127d3f7df15"
"01ffffffff55605dc6f5c3dc148b6da58442b0b2cd422be385eab2ebea4119ee9c"
"268d2835010000004847304402202329484c35fa9d6bb32a55a70c0982f606ce0e"
"3634b69006138683bcd12cbb6602200c28feb1e2555c3210f1dddb299738b4ff8b"
"be9667b68cb8764b5ac17b7adf0001ffffffff0200e1f505000000004341046a07"
"65b5865641ce08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a"
"68aee3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac00180d"
"8f000000004341044a656f065871a353f216ca26cef8dde2f03e8c16202d2e8ad7"
"69f02032cb86a5eb5e56842e92e19141d60a01928f8dd2c875a390f67c1f6c94cf"
"c617c0ea45afac0000000001000000025f9a06d3acdceb56be1bfeaa3e8a25e62d"
"182fa24fefe899d1c17f1dad4c2028000000004847304402205d6058484157235b"
"06028c30736c15613a28bdb768ee628094ca8b0030d4d6eb0220328789c9a2ec27"
"ddaec0ad5ef58efded42e6ea17c2e1ce838f3d6913f5e95db601ffffffff5f9a06"
"d3acdceb56be1bfeaa3e8a25e62d182fa24fefe899d1c17f1dad4c202801000000"
"4a493046022100c45af050d3cea806cedd0ab22520c53ebe63b987b8954146cdca"
"42487b84bdd6022100b9b027716a6b59e640da50a864d6dd8a0ef24c76ce62391f"
"a3eabaf4d2886d2d01ffffffff0200e1f505000000004341046a0765b5865641ce"
"08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a68aee3d8d484"
"8b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac00180d8f0000000043"
"41046a0765b5865641ce08dd39690aade26dfbf5511430ca428a3089261361cef1"
"70e3929a68aee3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0c"
"ac000000000100000002e2274e5fea1bf29d963914bd301aa63b64daaf8a3e88f1"
"19b5046ca5738a0f6b0000000048473044022016e7a727a061ea2254a6c358376a"
"aa617ac537eb836c77d646ebda4c748aac8b0220192ce28bf9f2c06a6467e6531e"
"27648d2b3e2e2bae85159c9242939840295ba501ffffffffe2274e5fea1bf29d96"
"3914bd301aa63b64daaf8a3e88f119b5046ca5738a0f6b010000004a4930460221"
"00b7a1a755588d4190118936e15cd217d133b0e4a53c3c15924010d5648d8925c9"
"022100aaef031874db2114f2d869ac2de4ae53908fbfea5b2b1862e181626bb900"
"5c9f01ffffffff0200e1f505000000004341044a656f065871a353f216ca26cef8"
"dde2f03e8c16202d2e8ad769f02032cb86a5eb5e56842e92e19141d60a01928f8d"
"d2c875a390f67c1f6c94cfc617c0ea45afac00180d8f000000004341046a0765b5"
"865641ce08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a68ae"
"e3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac00000000"),
SER_NETWORK, PROTOCOL_VERSION);
stream >> block;
CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
// Match the first transaction
filter.insert(uint256S(
"0xe980fe9f792d014e73b95203dc1335c5f9ce19ac537a419e6df5b47aecb93b70"));
CMerkleBlock merkleBlock(block, filter);
BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash());
BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 1);
std::pair<size_t, uint256> pair = merkleBlock.vMatchedTxn[0];
BOOST_CHECK(merkleBlock.vMatchedTxn[0].second ==
uint256S("0xe980fe9f792d014e73b95203dc1335c5f9ce19ac537a419e6df"
"5b47aecb93b70"));
BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 0);
std::vector<uint256> vMatched;
std::vector<size_t> vIndex;
BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) ==
block.hashMerkleRoot);
BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size());
for (size_t i = 0; i < vMatched.size(); i++) {
BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second);
}
// Match an output from the second transaction (the pubkey for address
// 1DZTzaBHUDM7T3QvUKBz4qXMRpkg8jsfB5)
// This should match the third transaction because it spends the output
// matched
// It also matches the fourth transaction, which spends to the pubkey again
filter.insert(ParseHex("044a656f065871a353f216ca26cef8dde2f03e8c16202d2e8ad"
"769f02032cb86a5eb5e56842e92e19141d60a01928f8dd2c875"
"a390f67c1f6c94cfc617c0ea45af"));
merkleBlock = CMerkleBlock(block, filter);
BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash());
BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 4);
BOOST_CHECK(pair == merkleBlock.vMatchedTxn[0]);
BOOST_CHECK(merkleBlock.vMatchedTxn[1].second ==
uint256S("0x28204cad1d7fc1d199e8ef4fa22f182de6258a3eaafe1bbe56e"
"bdcacd3069a5f"));
BOOST_CHECK(merkleBlock.vMatchedTxn[1].first == 1);
BOOST_CHECK(merkleBlock.vMatchedTxn[2].second ==
uint256S("0x6b0f8a73a56c04b519f1883e8aafda643ba61a30bd1439969df"
"21bea5f4e27e2"));
BOOST_CHECK(merkleBlock.vMatchedTxn[2].first == 2);
BOOST_CHECK(merkleBlock.vMatchedTxn[3].second ==
uint256S("0x3c1d7e82342158e4109df2e0b6348b6e84e403d8b4046d70076"
"63ace63cddb23"));
BOOST_CHECK(merkleBlock.vMatchedTxn[3].first == 3);
BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) ==
block.hashMerkleRoot);
BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size());
for (size_t i = 0; i < vMatched.size(); i++) {
BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second);
}
}
BOOST_AUTO_TEST_CASE(merkle_block_2_reversed) {
// Like merkle_block_2 except this block gets its transactions reversed in
// order to check non-topological processing.
// Random real block
// (000000005a4ded781e667e06ceefafb71410b511fe0d5adc3e5a27ecbec34ae6)
// With 4 txes
CBlock block;
CDataStream stream(
ParseHex(
"0100000075616236cc2126035fadb38deb65b9102cc2c41c09cdf29fc051906800"
"000000fe7d5e12ef0ff901f6050211249919b1c0653771832b3a80c66cea42847f"
"0ae1d4d26e49ffff001d00f0a44104010000000100000000000000000000000000"
"00000000000000000000000000000000000000ffffffff0804ffff001d029105ff"
"ffffff0100f2052a010000004341046d8709a041d34357697dfcb30a9d05900a62"
"94078012bf3bb09c6f9b525f1d16d5503d7905db1ada9501446ea00728668fc571"
"9aa80be2fdfc8a858a4dbdd4fbac00000000010000000255605dc6f5c3dc148b6d"
"a58442b0b2cd422be385eab2ebea4119ee9c268d28350000000049483045022100"
"aa46504baa86df8a33b1192b1b9367b4d729dc41e389f2c04f3e5c7f0559aae702"
"205e82253a54bf5c4f65b7428551554b2045167d6d206dfe6a2e198127d3f7df15"
"01ffffffff55605dc6f5c3dc148b6da58442b0b2cd422be385eab2ebea4119ee9c"
"268d2835010000004847304402202329484c35fa9d6bb32a55a70c0982f606ce0e"
"3634b69006138683bcd12cbb6602200c28feb1e2555c3210f1dddb299738b4ff8b"
"be9667b68cb8764b5ac17b7adf0001ffffffff0200e1f505000000004341046a07"
"65b5865641ce08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a"
"68aee3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac00180d"
"8f000000004341044a656f065871a353f216ca26cef8dde2f03e8c16202d2e8ad7"
"69f02032cb86a5eb5e56842e92e19141d60a01928f8dd2c875a390f67c1f6c94cf"
"c617c0ea45afac0000000001000000025f9a06d3acdceb56be1bfeaa3e8a25e62d"
"182fa24fefe899d1c17f1dad4c2028000000004847304402205d6058484157235b"
"06028c30736c15613a28bdb768ee628094ca8b0030d4d6eb0220328789c9a2ec27"
"ddaec0ad5ef58efded42e6ea17c2e1ce838f3d6913f5e95db601ffffffff5f9a06"
"d3acdceb56be1bfeaa3e8a25e62d182fa24fefe899d1c17f1dad4c202801000000"
"4a493046022100c45af050d3cea806cedd0ab22520c53ebe63b987b8954146cdca"
"42487b84bdd6022100b9b027716a6b59e640da50a864d6dd8a0ef24c76ce62391f"
"a3eabaf4d2886d2d01ffffffff0200e1f505000000004341046a0765b5865641ce"
"08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a68aee3d8d484"
"8b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac00180d8f0000000043"
"41046a0765b5865641ce08dd39690aade26dfbf5511430ca428a3089261361cef1"
"70e3929a68aee3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0c"
"ac000000000100000002e2274e5fea1bf29d963914bd301aa63b64daaf8a3e88f1"
"19b5046ca5738a0f6b0000000048473044022016e7a727a061ea2254a6c358376a"
"aa617ac537eb836c77d646ebda4c748aac8b0220192ce28bf9f2c06a6467e6531e"
"27648d2b3e2e2bae85159c9242939840295ba501ffffffffe2274e5fea1bf29d96"
"3914bd301aa63b64daaf8a3e88f119b5046ca5738a0f6b010000004a4930460221"
"00b7a1a755588d4190118936e15cd217d133b0e4a53c3c15924010d5648d8925c9"
"022100aaef031874db2114f2d869ac2de4ae53908fbfea5b2b1862e181626bb900"
"5c9f01ffffffff0200e1f505000000004341044a656f065871a353f216ca26cef8"
"dde2f03e8c16202d2e8ad769f02032cb86a5eb5e56842e92e19141d60a01928f8d"
"d2c875a390f67c1f6c94cfc617c0ea45afac00180d8f000000004341046a0765b5"
"865641ce08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a68ae"
"e3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac00000000"),
SER_NETWORK, PROTOCOL_VERSION);
stream >> block;
// Reverse the transactions and recalculate merkle root. The remainder of
// this test is the same as merkle_block_2 above except the transaction
// indices get reversed too.
std::reverse(block.vtx.begin(), block.vtx.end());
block.hashMerkleRoot = BlockMerkleRoot(block);
CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
// Match the fourth (was first) transaction
filter.insert(uint256S(
"0xe980fe9f792d014e73b95203dc1335c5f9ce19ac537a419e6df5b47aecb93b70"));
CMerkleBlock merkleBlock(block, filter);
BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash());
BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 1);
std::pair<size_t, uint256> pair = merkleBlock.vMatchedTxn[0];
BOOST_CHECK(merkleBlock.vMatchedTxn[0].second ==
uint256S("0xe980fe9f792d014e73b95203dc1335c5f9ce19ac537a419e6df"
"5b47aecb93b70"));
BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 3);
std::vector<uint256> vMatched;
std::vector<size_t> vIndex;
BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) ==
block.hashMerkleRoot);
BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size());
for (size_t i = 0; i < vMatched.size(); i++) {
BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second);
}
// Match an output from the third (was second) transaction (the pubkey for
// address 1DZTzaBHUDM7T3QvUKBz4qXMRpkg8jsfB5) This should match the second
// (was third) transaction because it spends the output matched
// It also matches the first (was fourth) transaction, which spends to the
// pubkey again
filter.insert(ParseHex("044a656f065871a353f216ca26cef8dde2f03e8c16202d2e8ad"
"769f02032cb86a5eb5e56842e92e19141d60a01928f8dd2c875"
"a390f67c1f6c94cfc617c0ea45af"));
merkleBlock = CMerkleBlock(block, filter);
BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash());
BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 4);
BOOST_CHECK(merkleBlock.vMatchedTxn[0].second ==
uint256S("0x3c1d7e82342158e4109df2e0b6348b6e84e403d8b4046d70076"
"63ace63cddb23"));
BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 0);
BOOST_CHECK(merkleBlock.vMatchedTxn[1].second ==
uint256S("0x6b0f8a73a56c04b519f1883e8aafda643ba61a30bd1439969df"
"21bea5f4e27e2"));
BOOST_CHECK(merkleBlock.vMatchedTxn[1].first == 1);
BOOST_CHECK(merkleBlock.vMatchedTxn[2].second ==
uint256S("0x28204cad1d7fc1d199e8ef4fa22f182de6258a3eaafe1bbe56e"
"bdcacd3069a5f"));
BOOST_CHECK(merkleBlock.vMatchedTxn[2].first == 2);
BOOST_CHECK(pair == merkleBlock.vMatchedTxn[3]);
BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) ==
block.hashMerkleRoot);
BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size());
for (size_t i = 0; i < vMatched.size(); i++) {
BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second);
}
}
BOOST_AUTO_TEST_CASE(merkle_block_2_with_update_none) {
// Random real block
// (000000005a4ded781e667e06ceefafb71410b511fe0d5adc3e5a27ecbec34ae6)
// With 4 txes
CBlock block;
CDataStream stream(
ParseHex(
"0100000075616236cc2126035fadb38deb65b9102cc2c41c09cdf29fc051906800"
"000000fe7d5e12ef0ff901f6050211249919b1c0653771832b3a80c66cea42847f"
"0ae1d4d26e49ffff001d00f0a44104010000000100000000000000000000000000"
"00000000000000000000000000000000000000ffffffff0804ffff001d029105ff"
"ffffff0100f2052a010000004341046d8709a041d34357697dfcb30a9d05900a62"
"94078012bf3bb09c6f9b525f1d16d5503d7905db1ada9501446ea00728668fc571"
"9aa80be2fdfc8a858a4dbdd4fbac00000000010000000255605dc6f5c3dc148b6d"
"a58442b0b2cd422be385eab2ebea4119ee9c268d28350000000049483045022100"
"aa46504baa86df8a33b1192b1b9367b4d729dc41e389f2c04f3e5c7f0559aae702"
"205e82253a54bf5c4f65b7428551554b2045167d6d206dfe6a2e198127d3f7df15"
"01ffffffff55605dc6f5c3dc148b6da58442b0b2cd422be385eab2ebea4119ee9c"
"268d2835010000004847304402202329484c35fa9d6bb32a55a70c0982f606ce0e"
"3634b69006138683bcd12cbb6602200c28feb1e2555c3210f1dddb299738b4ff8b"
"be9667b68cb8764b5ac17b7adf0001ffffffff0200e1f505000000004341046a07"
"65b5865641ce08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a"
"68aee3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac00180d"
"8f000000004341044a656f065871a353f216ca26cef8dde2f03e8c16202d2e8ad7"
"69f02032cb86a5eb5e56842e92e19141d60a01928f8dd2c875a390f67c1f6c94cf"
"c617c0ea45afac0000000001000000025f9a06d3acdceb56be1bfeaa3e8a25e62d"
"182fa24fefe899d1c17f1dad4c2028000000004847304402205d6058484157235b"
"06028c30736c15613a28bdb768ee628094ca8b0030d4d6eb0220328789c9a2ec27"
"ddaec0ad5ef58efded42e6ea17c2e1ce838f3d6913f5e95db601ffffffff5f9a06"
"d3acdceb56be1bfeaa3e8a25e62d182fa24fefe899d1c17f1dad4c202801000000"
"4a493046022100c45af050d3cea806cedd0ab22520c53ebe63b987b8954146cdca"
"42487b84bdd6022100b9b027716a6b59e640da50a864d6dd8a0ef24c76ce62391f"
"a3eabaf4d2886d2d01ffffffff0200e1f505000000004341046a0765b5865641ce"
"08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a68aee3d8d484"
"8b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac00180d8f0000000043"
"41046a0765b5865641ce08dd39690aade26dfbf5511430ca428a3089261361cef1"
"70e3929a68aee3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0c"
"ac000000000100000002e2274e5fea1bf29d963914bd301aa63b64daaf8a3e88f1"
"19b5046ca5738a0f6b0000000048473044022016e7a727a061ea2254a6c358376a"
"aa617ac537eb836c77d646ebda4c748aac8b0220192ce28bf9f2c06a6467e6531e"
"27648d2b3e2e2bae85159c9242939840295ba501ffffffffe2274e5fea1bf29d96"
"3914bd301aa63b64daaf8a3e88f119b5046ca5738a0f6b010000004a4930460221"
"00b7a1a755588d4190118936e15cd217d133b0e4a53c3c15924010d5648d8925c9"
"022100aaef031874db2114f2d869ac2de4ae53908fbfea5b2b1862e181626bb900"
"5c9f01ffffffff0200e1f505000000004341044a656f065871a353f216ca26cef8"
"dde2f03e8c16202d2e8ad769f02032cb86a5eb5e56842e92e19141d60a01928f8d"
"d2c875a390f67c1f6c94cfc617c0ea45afac00180d8f000000004341046a0765b5"
"865641ce08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a68ae"
"e3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac00000000"),
SER_NETWORK, PROTOCOL_VERSION);
stream >> block;
CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_NONE);
// Match the first transaction
filter.insert(uint256S(
"0xe980fe9f792d014e73b95203dc1335c5f9ce19ac537a419e6df5b47aecb93b70"));
CMerkleBlock merkleBlock(block, filter);
BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash());
BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 1);
std::pair<size_t, uint256> pair = merkleBlock.vMatchedTxn[0];
BOOST_CHECK(merkleBlock.vMatchedTxn[0].second ==
uint256S("0xe980fe9f792d014e73b95203dc1335c5f9ce19ac537a419e6df"
"5b47aecb93b70"));
BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 0);
std::vector<uint256> vMatched;
std::vector<size_t> vIndex;
BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) ==
block.hashMerkleRoot);
BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size());
for (size_t i = 0; i < vMatched.size(); i++) {
BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second);
}
// Match an output from the second transaction (the pubkey for address
// 1DZTzaBHUDM7T3QvUKBz4qXMRpkg8jsfB5)
// This should not match the third transaction though it spends the output
// matched
// It will match the fourth transaction, which has another pay-to-pubkey
// output to the same address
filter.insert(ParseHex("044a656f065871a353f216ca26cef8dde2f03e8c16202d2e8ad"
"769f02032cb86a5eb5e56842e92e19141d60a01928f8dd2c875"
"a390f67c1f6c94cfc617c0ea45af"));
merkleBlock = CMerkleBlock(block, filter);
BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash());
BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 3);
BOOST_CHECK(pair == merkleBlock.vMatchedTxn[0]);
BOOST_CHECK(merkleBlock.vMatchedTxn[1].second ==
uint256S("0x28204cad1d7fc1d199e8ef4fa22f182de6258a3eaafe1bbe56e"
"bdcacd3069a5f"));
BOOST_CHECK(merkleBlock.vMatchedTxn[1].first == 1);
BOOST_CHECK(merkleBlock.vMatchedTxn[2].second ==
uint256S("0x3c1d7e82342158e4109df2e0b6348b6e84e403d8b4046d70076"
"63ace63cddb23"));
BOOST_CHECK(merkleBlock.vMatchedTxn[2].first == 3);
BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) ==
block.hashMerkleRoot);
BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size());
for (size_t i = 0; i < vMatched.size(); i++) {
BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second);
}
}
BOOST_AUTO_TEST_CASE(merkle_block_3_and_serialize) {
// Random real block
// (000000000000dab0130bbcc991d3d7ae6b81aa6f50a798888dfe62337458dc45)
// With one tx
CBlock block;
CDataStream stream(
ParseHex("0100000079cda856b143d9db2c1caff01d1aecc8630d30625d10e8b4b8b00"
"00000000000b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3f"
"f60abe184f196367291b4d4c86041b8fa45d6301010000000100000000000"
"00000000000000000000000000000000000000000000000000000ffffffff"
"08044c86041b020a02ffffffff0100f2052a01000000434104ecd3229b057"
"1c3be876feaac0442a9f13c5a572742927af1dc623353ecf8c202225f6486"
"8137a18cdd85cbbb4c74fbccfd4f49639cf1bdc94a5672bb15ad5d4cac000"
"00000"),
SER_NETWORK, PROTOCOL_VERSION);
stream >> block;
CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
// Match the only transaction
filter.insert(uint256S(
"0x63194f18be0af63f2c6bc9dc0f777cbefed3d9415c4af83f3ee3a3d669c00cb5"));
CMerkleBlock merkleBlock(block, filter);
BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash());
BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 1);
BOOST_CHECK(merkleBlock.vMatchedTxn[0].second ==
uint256S("0x63194f18be0af63f2c6bc9dc0f777cbefed3d9415c4af83f3ee"
"3a3d669c00cb5"));
BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 0);
std::vector<uint256> vMatched;
std::vector<size_t> vIndex;
BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) ==
block.hashMerkleRoot);
BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size());
for (size_t i = 0; i < vMatched.size(); i++) {
BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second);
}
CDataStream merkleStream(SER_NETWORK, PROTOCOL_VERSION);
merkleStream << merkleBlock;
std::vector<uint8_t> expected =
ParseHex("0100000079cda856b143d9db2c1caff01d1aecc8630d30625d10e8b4b8b00"
"00000000000b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3f"
"f60abe184f196367291b4d4c86041b8fa45d630100000001b50cc069d6a3e"
"33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f19630101");
+ auto result{MakeUCharSpan(merkleStream)};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(),
- merkleStream.begin(), merkleStream.end());
+ result.begin(), result.end());
}
BOOST_AUTO_TEST_CASE(merkle_block_4) {
// Random real block
// (000000000000b731f2eef9e8c63173adfb07e41bd53eb0ef0a6b720d6cb6dea4)
// With 7 txes
CBlock block;
CDataStream stream(
ParseHex(
"0100000082bb869cf3a793432a66e826e05a6fc37469f8efb7421dc88067010000"
"0000007f16c5962e8bd963659c793ce370d95f093bc7e367117b3c30c1f8fdd0d9"
"728776381b4d4c86041b554b852907010000000100000000000000000000000000"
"00000000000000000000000000000000000000ffffffff07044c86041b0136ffff"
"ffff0100f2052a01000000434104eaafc2314def4ca98ac970241bcab022b9c1e1"
"f4ea423a20f134c876f2c01ec0f0dd5b2e86e7168cefe0d81113c3807420ce13ad"
"1357231a2252247d97a46a91ac000000000100000001bcad20a6a29827d1424f08"
"989255120bf7f3e9e3cdaaa6bb31b0737fe048724300000000494830450220356e"
"834b046cadc0f8ebb5a8a017b02de59c86305403dad52cd77b55af062ea1022100"
"9253cd6c119d4729b77c978e1e2aa19f5ea6e0e52b3f16e32fa608cd5bab753901"
"ffffffff02008d380c010000001976a9142b4b8072ecbba129b6453c63e129e643"
"207249ca88ac0065cd1d000000001976a9141b8dd13b994bcfc787b32aeadf58cc"
"b3615cbd5488ac000000000100000003fdacf9b3eb077412e7a968d2e4f11b9a9d"
"ee312d666187ed77ee7d26af16cb0b000000008c493046022100ea1608e70911ca"
"0de5af51ba57ad23b9a51db8d28f82c53563c56a05c20f5a87022100a8bdc8b4a8"
"acc8634c6b420410150775eb7f2474f5615f7fccd65af30f310fbf01410465fdf4"
"9e29b06b9a1582287b6279014f834edc317695d125ef623c1cc3aaece245bd69fc"
"ad7508666e9c74a49dc9056d5fc14338ef38118dc4afae5fe2c585caffffffff30"
"9e1913634ecb50f3c4f83e96e70b2df071b497b8973a3e75429df397b5af830000"
"00004948304502202bdb79c596a9ffc24e96f4386199aba386e9bc7b6071516e2b"
"51dda942b3a1ed022100c53a857e76b724fc14d45311eac5019650d415c3abb542"
"8f3aae16d8e69bec2301ffffffff2089e33491695080c9edc18a428f7d834db5b6"
"d372df13ce2b1b0e0cbcb1e6c10000000049483045022100d4ce67c5896ee251c8"
"10ac1ff9ceccd328b497c8f553ab6e08431e7d40bad6b5022033119c0c2b7d792d"
"31f1187779c7bd95aefd93d90a715586d73801d9b47471c601ffffffff01007144"
"60030000001976a914c7b55141d097ea5df7a0ed330cf794376e53ec8d88ac0000"
"000001000000045bf0e214aa4069a3e792ecee1e1bf0c1d397cde8dd08138f4b72"
"a00681743447000000008b48304502200c45de8c4f3e2c1821f2fc878cba97b1e6"
"f8807d94930713aa1c86a67b9bf1e40221008581abfef2e30f957815fc89978423"
"746b2086375ca8ecf359c85c2a5b7c88ad01410462bb73f76ca0994fcb8b4271e6"
"fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd62"
"38f4d87270efb1d3ae37079b794a92d7ec95ffffffffd669f7d7958d40fc59d225"
"3d88e0f248e29b599c80bbcec344a83dda5f9aa72c000000008a47304402207812"
"4c8beeaa825f9e0b30bff96e564dd859432f2d0cb3b72d3d5d93d38d7e93022069"
"1d233b6c0f995be5acb03d70a7f7a65b6bc9bdd426260f38a1346669507a360141"
"0462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab8"
"44c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95ff"
"fffffff878af0d93f5229a68166cf051fd372bb7a537232946e0a46f53636b4daf"
"daa4000000008c493046022100c717d1714551663f69c3c5759bdbb3a0fcd3fab0"
"23abc0e522fe6440de35d8290221008d9cbe25bffc44af2b18e81c58eb37293fd7"
"fe1c2e7b46fc37ee8c96c50ab1e201410462bb73f76ca0994fcb8b4271e6fb7561"
"f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d8"
"7270efb1d3ae37079b794a92d7ec95ffffffff27f2b668859cd7f2f894aa0fd2d9"
"e60963bcd07c88973f425f999b8cbfd7a1e2000000008c493046022100e0084714"
"7cbf517bcc2f502f3ddc6d284358d102ed20d47a8aa788a62f0db780022100d17b"
"2d6fa84dcaf1c95d88d7e7c30385aecf415588d749afd3ec81f6022cecd7014104"
"62bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844"
"c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95ffff"
"ffff0100c817a8040000001976a914b6efd80d99179f4f4ff6f4dd0a007d018c38"
"5d2188ac000000000100000001834537b2f1ce8ef9373a258e10545ce5a50b758d"
"f616cd4356e0032554ebd3c4000000008b483045022100e68f422dd7c34fdce11e"
"eb4509ddae38201773dd62f284e8aa9d96f85099d0b002202243bd399ff96b649a"
"0fad05fa759d6a882f0af8c90cf7632c2840c29070aec20141045e58067e815c2f"
"464c6a2a15f987758374203895710c2d452442e28496ff38ba8f5fd901dc20e29e"
"88477167fe4fc299bf818fd0d9e1632d467b2a3d9503b1aaffffffff0280d7e636"
"030000001976a914f34c3e10eb387efe872acb614c89e78bfca7815d88ac404b4c"
"00000000001976a914a84e272933aaf87e1715d7786c51dfaeb5b65a6f88ac0000"
"0000010000000143ac81c8e6f6ef307dfe17f3d906d999e23e0189fda838c5510d"
"850927e03ae7000000008c4930460221009c87c344760a64cb8ae6685a3eec2c1a"
"c1bed5b88c87de51acd0e124f266c16602210082d07c037359c3a257b5c63ebd90"
"f5a5edf97b2ac1c434b08ca998839f346dd40141040ba7e521fa7946d12edbb1d1"
"e95a15c34bd4398195e86433c92b431cd315f455fe30032ede69cad9d1e1ed6c3c"
"4ec0dbfced53438c625462afb792dcb098544bffffffff0240420f000000000019"
"76a9144676d1b820d63ec272f1900d59d43bc6463d96f888ac40420f0000000000"
"1976a914648d04341d00d7968b3405c034adc38d4d8fb9bd88ac00000000010000"
"000248cc917501ea5c55f4a8d2009c0567c40cfe037c2e71af017d0a452ff705e3"
"f1000000008b483045022100bf5fdc86dc5f08a5d5c8e43a8c9d5b1ed8c65562e2"
"80007b52b133021acd9acc02205e325d613e555f772802bf413d36ba807892ed1a"
"690a77811d3033b3de226e0a01410429fa713b124484cb2bd7b5557b2c0b9df7b2"
"b1fee61825eadc5ae6c37a9920d38bfccdc7dc3cb0c47d7b173dbc9db8d37db0a3"
"3ae487982c59c6f8606e9d1791ffffffff41ed70551dd7e841883ab8f0b16bf041"
"76b7d1480e4f0af9f3d4c3595768d068000000008b4830450221008513ad65187b"
"903aed1102d1d0c47688127658c51106753fed0151ce9c16b80902201432b9ebcb"
"87bd04ceb2de66035fbbaf4bf8b00d1cfe41f1a1f7338f9ad79d210141049d4cf8"
"0125bf50be1709f718c07ad15d0fc612b7da1f5570dddc35f2a352f0f27c978b06"
"820edca9ef982c35fda2d255afba340068c5035552368bc7200c1488ffffffff01"
"00093d00000000001976a9148edb68822f1ad580b043c7b3df2e400f8699eb4888"
"ac00000000"),
SER_NETWORK, PROTOCOL_VERSION);
stream >> block;
CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
// Match the last transaction
filter.insert(uint256S(
"0x0a2a92f0bda4727d0a13eaddf4dd9ac6b5c61a1429e6b2b818f19b15df0ac154"));
CMerkleBlock merkleBlock(block, filter);
BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash());
BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 1);
std::pair<size_t, uint256> pair = merkleBlock.vMatchedTxn[0];
BOOST_CHECK(merkleBlock.vMatchedTxn[0].second ==
uint256S("0x0a2a92f0bda4727d0a13eaddf4dd9ac6b5c61a1429e6b2b818f"
"19b15df0ac154"));
BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 6);
std::vector<uint256> vMatched;
std::vector<size_t> vIndex;
BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) ==
block.hashMerkleRoot);
BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size());
for (size_t i = 0; i < vMatched.size(); i++) {
BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second);
}
// Also match the 4th transaction
filter.insert(uint256S(
"0x02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041"));
merkleBlock = CMerkleBlock(block, filter);
BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash());
BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 2);
BOOST_CHECK(merkleBlock.vMatchedTxn[0].second ==
uint256S("0x02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de673"
"26471df5bc041"));
BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 3);
BOOST_CHECK(merkleBlock.vMatchedTxn[1] == pair);
BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) ==
block.hashMerkleRoot);
BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size());
for (size_t i = 0; i < vMatched.size(); i++) {
BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second);
}
}
BOOST_AUTO_TEST_CASE(merkle_block_4_test_p2pubkey_only) {
// Random real block
// (000000000000b731f2eef9e8c63173adfb07e41bd53eb0ef0a6b720d6cb6dea4)
// With 7 txes
CBlock block;
CDataStream stream(
ParseHex(
"0100000082bb869cf3a793432a66e826e05a6fc37469f8efb7421dc88067010000"
"0000007f16c5962e8bd963659c793ce370d95f093bc7e367117b3c30c1f8fdd0d9"
"728776381b4d4c86041b554b852907010000000100000000000000000000000000"
"00000000000000000000000000000000000000ffffffff07044c86041b0136ffff"
"ffff0100f2052a01000000434104eaafc2314def4ca98ac970241bcab022b9c1e1"
"f4ea423a20f134c876f2c01ec0f0dd5b2e86e7168cefe0d81113c3807420ce13ad"
"1357231a2252247d97a46a91ac000000000100000001bcad20a6a29827d1424f08"
"989255120bf7f3e9e3cdaaa6bb31b0737fe048724300000000494830450220356e"
"834b046cadc0f8ebb5a8a017b02de59c86305403dad52cd77b55af062ea1022100"
"9253cd6c119d4729b77c978e1e2aa19f5ea6e0e52b3f16e32fa608cd5bab753901"
"ffffffff02008d380c010000001976a9142b4b8072ecbba129b6453c63e129e643"
"207249ca88ac0065cd1d000000001976a9141b8dd13b994bcfc787b32aeadf58cc"
"b3615cbd5488ac000000000100000003fdacf9b3eb077412e7a968d2e4f11b9a9d"
"ee312d666187ed77ee7d26af16cb0b000000008c493046022100ea1608e70911ca"
"0de5af51ba57ad23b9a51db8d28f82c53563c56a05c20f5a87022100a8bdc8b4a8"
"acc8634c6b420410150775eb7f2474f5615f7fccd65af30f310fbf01410465fdf4"
"9e29b06b9a1582287b6279014f834edc317695d125ef623c1cc3aaece245bd69fc"
"ad7508666e9c74a49dc9056d5fc14338ef38118dc4afae5fe2c585caffffffff30"
"9e1913634ecb50f3c4f83e96e70b2df071b497b8973a3e75429df397b5af830000"
"00004948304502202bdb79c596a9ffc24e96f4386199aba386e9bc7b6071516e2b"
"51dda942b3a1ed022100c53a857e76b724fc14d45311eac5019650d415c3abb542"
"8f3aae16d8e69bec2301ffffffff2089e33491695080c9edc18a428f7d834db5b6"
"d372df13ce2b1b0e0cbcb1e6c10000000049483045022100d4ce67c5896ee251c8"
"10ac1ff9ceccd328b497c8f553ab6e08431e7d40bad6b5022033119c0c2b7d792d"
"31f1187779c7bd95aefd93d90a715586d73801d9b47471c601ffffffff01007144"
"60030000001976a914c7b55141d097ea5df7a0ed330cf794376e53ec8d88ac0000"
"000001000000045bf0e214aa4069a3e792ecee1e1bf0c1d397cde8dd08138f4b72"
"a00681743447000000008b48304502200c45de8c4f3e2c1821f2fc878cba97b1e6"
"f8807d94930713aa1c86a67b9bf1e40221008581abfef2e30f957815fc89978423"
"746b2086375ca8ecf359c85c2a5b7c88ad01410462bb73f76ca0994fcb8b4271e6"
"fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd62"
"38f4d87270efb1d3ae37079b794a92d7ec95ffffffffd669f7d7958d40fc59d225"
"3d88e0f248e29b599c80bbcec344a83dda5f9aa72c000000008a47304402207812"
"4c8beeaa825f9e0b30bff96e564dd859432f2d0cb3b72d3d5d93d38d7e93022069"
"1d233b6c0f995be5acb03d70a7f7a65b6bc9bdd426260f38a1346669507a360141"
"0462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab8"
"44c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95ff"
"fffffff878af0d93f5229a68166cf051fd372bb7a537232946e0a46f53636b4daf"
"daa4000000008c493046022100c717d1714551663f69c3c5759bdbb3a0fcd3fab0"
"23abc0e522fe6440de35d8290221008d9cbe25bffc44af2b18e81c58eb37293fd7"
"fe1c2e7b46fc37ee8c96c50ab1e201410462bb73f76ca0994fcb8b4271e6fb7561"
"f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d8"
"7270efb1d3ae37079b794a92d7ec95ffffffff27f2b668859cd7f2f894aa0fd2d9"
"e60963bcd07c88973f425f999b8cbfd7a1e2000000008c493046022100e0084714"
"7cbf517bcc2f502f3ddc6d284358d102ed20d47a8aa788a62f0db780022100d17b"
"2d6fa84dcaf1c95d88d7e7c30385aecf415588d749afd3ec81f6022cecd7014104"
"62bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844"
"c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95ffff"
"ffff0100c817a8040000001976a914b6efd80d99179f4f4ff6f4dd0a007d018c38"
"5d2188ac000000000100000001834537b2f1ce8ef9373a258e10545ce5a50b758d"
"f616cd4356e0032554ebd3c4000000008b483045022100e68f422dd7c34fdce11e"
"eb4509ddae38201773dd62f284e8aa9d96f85099d0b002202243bd399ff96b649a"
"0fad05fa759d6a882f0af8c90cf7632c2840c29070aec20141045e58067e815c2f"
"464c6a2a15f987758374203895710c2d452442e28496ff38ba8f5fd901dc20e29e"
"88477167fe4fc299bf818fd0d9e1632d467b2a3d9503b1aaffffffff0280d7e636"
"030000001976a914f34c3e10eb387efe872acb614c89e78bfca7815d88ac404b4c"
"00000000001976a914a84e272933aaf87e1715d7786c51dfaeb5b65a6f88ac0000"
"0000010000000143ac81c8e6f6ef307dfe17f3d906d999e23e0189fda838c5510d"
"850927e03ae7000000008c4930460221009c87c344760a64cb8ae6685a3eec2c1a"
"c1bed5b88c87de51acd0e124f266c16602210082d07c037359c3a257b5c63ebd90"
"f5a5edf97b2ac1c434b08ca998839f346dd40141040ba7e521fa7946d12edbb1d1"
"e95a15c34bd4398195e86433c92b431cd315f455fe30032ede69cad9d1e1ed6c3c"
"4ec0dbfced53438c625462afb792dcb098544bffffffff0240420f000000000019"
"76a9144676d1b820d63ec272f1900d59d43bc6463d96f888ac40420f0000000000"
"1976a914648d04341d00d7968b3405c034adc38d4d8fb9bd88ac00000000010000"
"000248cc917501ea5c55f4a8d2009c0567c40cfe037c2e71af017d0a452ff705e3"
"f1000000008b483045022100bf5fdc86dc5f08a5d5c8e43a8c9d5b1ed8c65562e2"
"80007b52b133021acd9acc02205e325d613e555f772802bf413d36ba807892ed1a"
"690a77811d3033b3de226e0a01410429fa713b124484cb2bd7b5557b2c0b9df7b2"
"b1fee61825eadc5ae6c37a9920d38bfccdc7dc3cb0c47d7b173dbc9db8d37db0a3"
"3ae487982c59c6f8606e9d1791ffffffff41ed70551dd7e841883ab8f0b16bf041"
"76b7d1480e4f0af9f3d4c3595768d068000000008b4830450221008513ad65187b"
"903aed1102d1d0c47688127658c51106753fed0151ce9c16b80902201432b9ebcb"
"87bd04ceb2de66035fbbaf4bf8b00d1cfe41f1a1f7338f9ad79d210141049d4cf8"
"0125bf50be1709f718c07ad15d0fc612b7da1f5570dddc35f2a352f0f27c978b06"
"820edca9ef982c35fda2d255afba340068c5035552368bc7200c1488ffffffff01"
"00093d00000000001976a9148edb68822f1ad580b043c7b3df2e400f8699eb4888"
"ac00000000"),
SER_NETWORK, PROTOCOL_VERSION);
stream >> block;
CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_P2PUBKEY_ONLY);
// Match the generation pubkey
filter.insert(ParseHex("04eaafc2314def4ca98ac970241bcab022b9c1e1f4ea423a20f"
"134c876f2c01ec0f0dd5b2e86e7168cefe0d81113c3807420ce"
"13ad1357231a2252247d97a46a91"));
// ...and the output address of the 4th transaction
filter.insert(ParseHex("b6efd80d99179f4f4ff6f4dd0a007d018c385d21"));
CMerkleBlock merkleBlock(block, filter);
BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash());
// We should match the generation outpoint
const TxId txid1(uint256S(
"0x147caa76786596590baa4e98f5d9f48b86c7765e489f7a6ff3360fe5c674360b"));
BOOST_CHECK(filter.contains(COutPoint(txid1, 0)));
// ... but not the 4th transaction's output (its not pay-2-pubkey)
const TxId txid2(uint256S(
"0x02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041"));
BOOST_CHECK(!filter.contains(COutPoint(txid2, 0)));
}
BOOST_AUTO_TEST_CASE(merkle_block_4_test_update_none) {
// Random real block
// (000000000000b731f2eef9e8c63173adfb07e41bd53eb0ef0a6b720d6cb6dea4)
// With 7 txes
CBlock block;
CDataStream stream(
ParseHex(
"0100000082bb869cf3a793432a66e826e05a6fc37469f8efb7421dc88067010000"
"0000007f16c5962e8bd963659c793ce370d95f093bc7e367117b3c30c1f8fdd0d9"
"728776381b4d4c86041b554b852907010000000100000000000000000000000000"
"00000000000000000000000000000000000000ffffffff07044c86041b0136ffff"
"ffff0100f2052a01000000434104eaafc2314def4ca98ac970241bcab022b9c1e1"
"f4ea423a20f134c876f2c01ec0f0dd5b2e86e7168cefe0d81113c3807420ce13ad"
"1357231a2252247d97a46a91ac000000000100000001bcad20a6a29827d1424f08"
"989255120bf7f3e9e3cdaaa6bb31b0737fe048724300000000494830450220356e"
"834b046cadc0f8ebb5a8a017b02de59c86305403dad52cd77b55af062ea1022100"
"9253cd6c119d4729b77c978e1e2aa19f5ea6e0e52b3f16e32fa608cd5bab753901"
"ffffffff02008d380c010000001976a9142b4b8072ecbba129b6453c63e129e643"
"207249ca88ac0065cd1d000000001976a9141b8dd13b994bcfc787b32aeadf58cc"
"b3615cbd5488ac000000000100000003fdacf9b3eb077412e7a968d2e4f11b9a9d"
"ee312d666187ed77ee7d26af16cb0b000000008c493046022100ea1608e70911ca"
"0de5af51ba57ad23b9a51db8d28f82c53563c56a05c20f5a87022100a8bdc8b4a8"
"acc8634c6b420410150775eb7f2474f5615f7fccd65af30f310fbf01410465fdf4"
"9e29b06b9a1582287b6279014f834edc317695d125ef623c1cc3aaece245bd69fc"
"ad7508666e9c74a49dc9056d5fc14338ef38118dc4afae5fe2c585caffffffff30"
"9e1913634ecb50f3c4f83e96e70b2df071b497b8973a3e75429df397b5af830000"
"00004948304502202bdb79c596a9ffc24e96f4386199aba386e9bc7b6071516e2b"
"51dda942b3a1ed022100c53a857e76b724fc14d45311eac5019650d415c3abb542"
"8f3aae16d8e69bec2301ffffffff2089e33491695080c9edc18a428f7d834db5b6"
"d372df13ce2b1b0e0cbcb1e6c10000000049483045022100d4ce67c5896ee251c8"
"10ac1ff9ceccd328b497c8f553ab6e08431e7d40bad6b5022033119c0c2b7d792d"
"31f1187779c7bd95aefd93d90a715586d73801d9b47471c601ffffffff01007144"
"60030000001976a914c7b55141d097ea5df7a0ed330cf794376e53ec8d88ac0000"
"000001000000045bf0e214aa4069a3e792ecee1e1bf0c1d397cde8dd08138f4b72"
"a00681743447000000008b48304502200c45de8c4f3e2c1821f2fc878cba97b1e6"
"f8807d94930713aa1c86a67b9bf1e40221008581abfef2e30f957815fc89978423"
"746b2086375ca8ecf359c85c2a5b7c88ad01410462bb73f76ca0994fcb8b4271e6"
"fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd62"
"38f4d87270efb1d3ae37079b794a92d7ec95ffffffffd669f7d7958d40fc59d225"
"3d88e0f248e29b599c80bbcec344a83dda5f9aa72c000000008a47304402207812"
"4c8beeaa825f9e0b30bff96e564dd859432f2d0cb3b72d3d5d93d38d7e93022069"
"1d233b6c0f995be5acb03d70a7f7a65b6bc9bdd426260f38a1346669507a360141"
"0462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab8"
"44c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95ff"
"fffffff878af0d93f5229a68166cf051fd372bb7a537232946e0a46f53636b4daf"
"daa4000000008c493046022100c717d1714551663f69c3c5759bdbb3a0fcd3fab0"
"23abc0e522fe6440de35d8290221008d9cbe25bffc44af2b18e81c58eb37293fd7"
"fe1c2e7b46fc37ee8c96c50ab1e201410462bb73f76ca0994fcb8b4271e6fb7561"
"f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d8"
"7270efb1d3ae37079b794a92d7ec95ffffffff27f2b668859cd7f2f894aa0fd2d9"
"e60963bcd07c88973f425f999b8cbfd7a1e2000000008c493046022100e0084714"
"7cbf517bcc2f502f3ddc6d284358d102ed20d47a8aa788a62f0db780022100d17b"
"2d6fa84dcaf1c95d88d7e7c30385aecf415588d749afd3ec81f6022cecd7014104"
"62bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844"
"c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95ffff"
"ffff0100c817a8040000001976a914b6efd80d99179f4f4ff6f4dd0a007d018c38"
"5d2188ac000000000100000001834537b2f1ce8ef9373a258e10545ce5a50b758d"
"f616cd4356e0032554ebd3c4000000008b483045022100e68f422dd7c34fdce11e"
"eb4509ddae38201773dd62f284e8aa9d96f85099d0b002202243bd399ff96b649a"
"0fad05fa759d6a882f0af8c90cf7632c2840c29070aec20141045e58067e815c2f"
"464c6a2a15f987758374203895710c2d452442e28496ff38ba8f5fd901dc20e29e"
"88477167fe4fc299bf818fd0d9e1632d467b2a3d9503b1aaffffffff0280d7e636"
"030000001976a914f34c3e10eb387efe872acb614c89e78bfca7815d88ac404b4c"
"00000000001976a914a84e272933aaf87e1715d7786c51dfaeb5b65a6f88ac0000"
"0000010000000143ac81c8e6f6ef307dfe17f3d906d999e23e0189fda838c5510d"
"850927e03ae7000000008c4930460221009c87c344760a64cb8ae6685a3eec2c1a"
"c1bed5b88c87de51acd0e124f266c16602210082d07c037359c3a257b5c63ebd90"
"f5a5edf97b2ac1c434b08ca998839f346dd40141040ba7e521fa7946d12edbb1d1"
"e95a15c34bd4398195e86433c92b431cd315f455fe30032ede69cad9d1e1ed6c3c"
"4ec0dbfced53438c625462afb792dcb098544bffffffff0240420f000000000019"
"76a9144676d1b820d63ec272f1900d59d43bc6463d96f888ac40420f0000000000"
"1976a914648d04341d00d7968b3405c034adc38d4d8fb9bd88ac00000000010000"
"000248cc917501ea5c55f4a8d2009c0567c40cfe037c2e71af017d0a452ff705e3"
"f1000000008b483045022100bf5fdc86dc5f08a5d5c8e43a8c9d5b1ed8c65562e2"
"80007b52b133021acd9acc02205e325d613e555f772802bf413d36ba807892ed1a"
"690a77811d3033b3de226e0a01410429fa713b124484cb2bd7b5557b2c0b9df7b2"
"b1fee61825eadc5ae6c37a9920d38bfccdc7dc3cb0c47d7b173dbc9db8d37db0a3"
"3ae487982c59c6f8606e9d1791ffffffff41ed70551dd7e841883ab8f0b16bf041"
"76b7d1480e4f0af9f3d4c3595768d068000000008b4830450221008513ad65187b"
"903aed1102d1d0c47688127658c51106753fed0151ce9c16b80902201432b9ebcb"
"87bd04ceb2de66035fbbaf4bf8b00d1cfe41f1a1f7338f9ad79d210141049d4cf8"
"0125bf50be1709f718c07ad15d0fc612b7da1f5570dddc35f2a352f0f27c978b06"
"820edca9ef982c35fda2d255afba340068c5035552368bc7200c1488ffffffff01"
"00093d00000000001976a9148edb68822f1ad580b043c7b3df2e400f8699eb4888"
"ac00000000"),
SER_NETWORK, PROTOCOL_VERSION);
stream >> block;
CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_NONE);
// Match the generation pubkey
filter.insert(ParseHex("04eaafc2314def4ca98ac970241bcab022b9c1e1f4ea423a20f"
"134c876f2c01ec0f0dd5b2e86e7168cefe0d81113c3807420ce"
"13ad1357231a2252247d97a46a91"));
// ...and the output address of the 4th transaction
filter.insert(ParseHex("b6efd80d99179f4f4ff6f4dd0a007d018c385d21"));
CMerkleBlock merkleBlock(block, filter);
BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash());
// We shouldn't match any outpoints (UPDATE_NONE)
const TxId txid1(uint256S(
"0x147caa76786596590baa4e98f5d9f48b86c7765e489f7a6ff3360fe5c674360b"));
BOOST_CHECK(!filter.contains(COutPoint(txid1, 0)));
const TxId txid2(uint256S(
"0x02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041"));
BOOST_CHECK(!filter.contains(COutPoint(txid2, 0)));
}
static std::vector<uint8_t> RandomData() {
uint256 r = InsecureRand256();
return std::vector<uint8_t>(r.begin(), r.end());
}
BOOST_AUTO_TEST_CASE(rolling_bloom) {
SeedInsecureRand(SeedRand::ZEROS);
g_mock_deterministic_tests = true;
// last-100-entry, 1% false positive:
CRollingBloomFilter rb1(100, 0.01);
// Overfill:
static const int DATASIZE = 399;
std::vector<uint8_t> data[DATASIZE];
for (int i = 0; i < DATASIZE; i++) {
data[i] = RandomData();
rb1.insert(data[i]);
}
// Last 100 guaranteed to be remembered:
for (int i = 299; i < DATASIZE; i++) {
BOOST_CHECK(rb1.contains(data[i]));
}
// false positive rate is 1%, so we should get about 100 hits if
// testing 10,000 random keys. We get worst-case false positive
// behavior when the filter is as full as possible, which is
// when we've inserted one minus an integer multiple of nElement*2.
unsigned int nHits = 0;
for (int i = 0; i < 10000; i++) {
if (rb1.contains(RandomData())) {
++nHits;
}
}
// Expect about 100 hits
BOOST_CHECK_EQUAL(nHits, 75U);
BOOST_CHECK(rb1.contains(data[DATASIZE - 1]));
rb1.reset();
BOOST_CHECK(!rb1.contains(data[DATASIZE - 1]));
// Now roll through data, make sure last 100 entries
// are always remembered:
for (int i = 0; i < DATASIZE; i++) {
if (i >= 100) {
BOOST_CHECK(rb1.contains(data[i - 100]));
}
rb1.insert(data[i]);
BOOST_CHECK(rb1.contains(data[i]));
}
// Insert 999 more random entries:
for (int i = 0; i < 999; i++) {
std::vector<uint8_t> d = RandomData();
rb1.insert(d);
BOOST_CHECK(rb1.contains(d));
}
// Sanity check to make sure the filter isn't just filling up:
nHits = 0;
for (int i = 0; i < DATASIZE; i++) {
if (rb1.contains(data[i])) {
++nHits;
}
}
// Expect about 5 false positives
BOOST_CHECK_EQUAL(nHits, 6U);
// last-1000-entry, 0.01% false positive:
CRollingBloomFilter rb2(1000, 0.001);
for (int i = 0; i < DATASIZE; i++) {
rb2.insert(data[i]);
}
// ... room for all of them:
for (int i = 0; i < DATASIZE; i++) {
BOOST_CHECK(rb2.contains(data[i]));
}
g_mock_deterministic_tests = false;
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/fuzz/autofile.cpp b/src/test/fuzz/autofile.cpp
index 51d9a8643..e9da1a5a6 100644
--- a/src/test/fuzz/autofile.cpp
+++ b/src/test/fuzz/autofile.cpp
@@ -1,65 +1,65 @@
// Copyright (c) 2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <streams.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
#include <array>
#include <cstdint>
#include <iostream>
#include <string>
#include <vector>
FUZZ_TARGET(autofile) {
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
FuzzedAutoFileProvider fuzzed_auto_file_provider =
ConsumeAutoFile(fuzzed_data_provider);
AutoFile auto_file{fuzzed_auto_file_provider.open()};
while (fuzzed_data_provider.ConsumeBool()) {
CallOneOf(
fuzzed_data_provider,
[&] {
- std::array<uint8_t, 4096> arr{};
+ std::array<std::byte, 4096> arr{};
try {
auto_file.read(
- (char *)arr.data(),
- fuzzed_data_provider.ConsumeIntegralInRange<size_t>(
- 0, 4096));
+ {arr.data(),
+ fuzzed_data_provider.ConsumeIntegralInRange<size_t>(
+ 0, 4096)});
} catch (const std::ios_base::failure &) {
}
},
[&] {
- const std::array<uint8_t, 4096> arr{};
+ const std::array<std::byte, 4096> arr{};
try {
auto_file.write(
- (const char *)arr.data(),
- fuzzed_data_provider.ConsumeIntegralInRange<size_t>(
- 0, 4096));
+ {arr.data(),
+ fuzzed_data_provider.ConsumeIntegralInRange<size_t>(
+ 0, 4096)});
} catch (const std::ios_base::failure &) {
}
},
[&] {
try {
auto_file.ignore(
fuzzed_data_provider.ConsumeIntegralInRange<size_t>(
0, 4096));
} catch (const std::ios_base::failure &) {
}
},
[&] { auto_file.fclose(); },
[&] { ReadFromStream(fuzzed_data_provider, auto_file); },
[&] { WriteToStream(fuzzed_data_provider, auto_file); });
}
(void)auto_file.Get();
(void)auto_file.IsNull();
if (fuzzed_data_provider.ConsumeBool()) {
FILE *f = auto_file.release();
if (f != nullptr) {
fclose(f);
}
}
}
diff --git a/src/test/fuzz/buffered_file.cpp b/src/test/fuzz/buffered_file.cpp
index 9da8745fc..f36b6e0e1 100644
--- a/src/test/fuzz/buffered_file.cpp
+++ b/src/test/fuzz/buffered_file.cpp
@@ -1,82 +1,82 @@
// Copyright (c) 2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <streams.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
#include <array>
#include <cstdint>
#include <iostream>
#include <optional>
#include <string>
#include <vector>
FUZZ_TARGET(buffered_file) {
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
FuzzedFileProvider fuzzed_file_provider = ConsumeFile(fuzzed_data_provider);
std::optional<CBufferedFile> opt_buffered_file;
FILE *fuzzed_file = fuzzed_file_provider.open();
try {
opt_buffered_file.emplace(
fuzzed_file,
fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096),
fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096),
fuzzed_data_provider.ConsumeIntegral<int>(),
fuzzed_data_provider.ConsumeIntegral<int>());
} catch (const std::ios_base::failure &) {
if (fuzzed_file != nullptr) {
fclose(fuzzed_file);
}
}
if (opt_buffered_file && fuzzed_file != nullptr) {
bool setpos_fail = false;
while (fuzzed_data_provider.ConsumeBool()) {
CallOneOf(
fuzzed_data_provider,
[&] {
- std::array<uint8_t, 4096> arr{};
+ std::array<std::byte, 4096> arr{};
try {
opt_buffered_file->read(
- (char *)arr.data(),
- fuzzed_data_provider.ConsumeIntegralInRange<size_t>(
- 0, 4096));
+ {arr.data(),
+ fuzzed_data_provider
+ .ConsumeIntegralInRange<size_t>(0, 4096)});
} catch (const std::ios_base::failure &) {
}
},
[&] {
opt_buffered_file->SetLimit(
fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(
0, 4096));
},
[&] {
if (!opt_buffered_file->SetPos(
fuzzed_data_provider
.ConsumeIntegralInRange<uint64_t>(0, 4096))) {
setpos_fail = true;
}
},
[&] {
if (setpos_fail) {
// Calling FindByte(...) after a failed SetPos(...) call
// may result in an infinite loop.
return;
}
try {
opt_buffered_file->FindByte(
- fuzzed_data_provider.ConsumeIntegral<char>());
+ fuzzed_data_provider.ConsumeIntegral<uint8_t>());
} catch (const std::ios_base::failure &) {
}
},
[&] {
ReadFromStream(fuzzed_data_provider, *opt_buffered_file);
});
}
opt_buffered_file->GetPos();
opt_buffered_file->GetType();
opt_buffered_file->GetVersion();
}
}
diff --git a/src/test/hash_tests.cpp b/src/test/hash_tests.cpp
index d68548509..46a9baa55 100644
--- a/src/test/hash_tests.cpp
+++ b/src/test/hash_tests.cpp
@@ -1,224 +1,226 @@
// Copyright (c) 2013-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <crypto/siphash.h>
#include <hash.h>
#include <clientversion.h>
#include <streams.h>
#include <util/strencodings.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
BOOST_FIXTURE_TEST_SUITE(hash_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(murmurhash3) {
#define T(expected, seed, data) \
BOOST_CHECK_EQUAL(MurmurHash3(seed, ParseHex(data)), expected)
// Test MurmurHash3 with various inputs. Of course this is retested in the
// bloom filter tests - they would fail if MurmurHash3() had any problems -
// but is useful for those trying to implement Bitcoin libraries as a
// source of test data for their MurmurHash3() primitive during
// development.
//
// The magic number 0xFBA4C795 comes from CBloomFilter::Hash()
T(0x00000000U, 0x00000000, "");
T(0x6a396f08U, 0xFBA4C795, "");
T(0x81f16f39U, 0xffffffff, "");
T(0x514e28b7U, 0x00000000, "00");
T(0xea3f0b17U, 0xFBA4C795, "00");
T(0xfd6cf10dU, 0x00000000, "ff");
T(0x16c6b7abU, 0x00000000, "0011");
T(0x8eb51c3dU, 0x00000000, "001122");
T(0xb4471bf8U, 0x00000000, "00112233");
T(0xe2301fa8U, 0x00000000, "0011223344");
T(0xfc2e4a15U, 0x00000000, "001122334455");
T(0xb074502cU, 0x00000000, "00112233445566");
T(0x8034d2a0U, 0x00000000, "0011223344556677");
T(0xb4698defU, 0x00000000, "001122334455667788");
#undef T
}
/**
* SipHash-2-4 output with
* k = 00 01 02 ...
* and
* in = (empty string)
* in = 00 (1 byte)
* in = 00 01 (2 bytes)
* in = 00 01 02 (3 bytes)
* ...
* in = 00 01 02 ... 3e (63 bytes)
*
* from: https://131002.net/siphash/siphash24.c
*/
uint64_t siphash_4_2_testvec[] = {
0x726fdb47dd0e0e31, 0x74f839c593dc67fd, 0x0d6c8009d9a94f5a,
0x85676696d7fb7e2d, 0xcf2794e0277187b7, 0x18765564cd99a68d,
0xcbc9466e58fee3ce, 0xab0200f58b01d137, 0x93f5f5799a932462,
0x9e0082df0ba9e4b0, 0x7a5dbbc594ddb9f3, 0xf4b32f46226bada7,
0x751e8fbc860ee5fb, 0x14ea5627c0843d90, 0xf723ca908e7af2ee,
0xa129ca6149be45e5, 0x3f2acc7f57c29bdb, 0x699ae9f52cbe4794,
0x4bc1b3f0968dd39c, 0xbb6dc91da77961bd, 0xbed65cf21aa2ee98,
0xd0f2cbb02e3b67c7, 0x93536795e3a33e88, 0xa80c038ccd5ccec8,
0xb8ad50c6f649af94, 0xbce192de8a85b8ea, 0x17d835b85bbb15f3,
0x2f2e6163076bcfad, 0xde4daaaca71dc9a5, 0xa6a2506687956571,
0xad87a3535c49ef28, 0x32d892fad841c342, 0x7127512f72f27cce,
0xa7f32346f95978e3, 0x12e0b01abb051238, 0x15e034d40fa197ae,
0x314dffbe0815a3b4, 0x027990f029623981, 0xcadcd4e59ef40c4d,
0x9abfd8766a33735c, 0x0e3ea96b5304a7d0, 0xad0c42d6fc585992,
0x187306c89bc215a9, 0xd4a60abcf3792b95, 0xf935451de4f21df2,
0xa9538f0419755787, 0xdb9acddff56ca510, 0xd06c98cd5c0975eb,
0xe612a3cb9ecba951, 0xc766e62cfcadaf96, 0xee64435a9752fe72,
0xa192d576b245165a, 0x0a8787bf8ecb74b2, 0x81b3e73d20b49b6f,
0x7fa8220ba3b2ecea, 0x245731c13ca42499, 0xb78dbfaf3a8d83bd,
0xea1ad565322a1a0b, 0x60e61c23a3795013, 0x6606d7e446282b93,
0x6ca4ecb15c5f91e1, 0x9f626da15c9625f3, 0xe51b38608ef25f57,
0x958a324ceb064572};
BOOST_AUTO_TEST_CASE(siphash) {
CSipHasher hasher(0x0706050403020100ULL, 0x0F0E0D0C0B0A0908ULL);
BOOST_CHECK_EQUAL(hasher.Finalize(), 0x726fdb47dd0e0e31ull);
static const uint8_t t0[1] = {0};
hasher.Write(t0, 1);
BOOST_CHECK_EQUAL(hasher.Finalize(), 0x74f839c593dc67fdull);
static const uint8_t t1[7] = {1, 2, 3, 4, 5, 6, 7};
hasher.Write(t1, 7);
BOOST_CHECK_EQUAL(hasher.Finalize(), 0x93f5f5799a932462ull);
hasher.Write(0x0F0E0D0C0B0A0908ULL);
BOOST_CHECK_EQUAL(hasher.Finalize(), 0x3f2acc7f57c29bdbull);
static const uint8_t t2[2] = {16, 17};
hasher.Write(t2, 2);
BOOST_CHECK_EQUAL(hasher.Finalize(), 0x4bc1b3f0968dd39cull);
static const uint8_t t3[9] = {18, 19, 20, 21, 22, 23, 24, 25, 26};
hasher.Write(t3, 9);
BOOST_CHECK_EQUAL(hasher.Finalize(), 0x2f2e6163076bcfadull);
static const uint8_t t4[5] = {27, 28, 29, 30, 31};
hasher.Write(t4, 5);
BOOST_CHECK_EQUAL(hasher.Finalize(), 0x7127512f72f27cceull);
hasher.Write(0x2726252423222120ULL);
BOOST_CHECK_EQUAL(hasher.Finalize(), 0x0e3ea96b5304a7d0ull);
hasher.Write(0x2F2E2D2C2B2A2928ULL);
BOOST_CHECK_EQUAL(hasher.Finalize(), 0xe612a3cb9ecba951ull);
BOOST_CHECK_EQUAL(
SipHashUint256(0x0706050403020100ULL, 0x0F0E0D0C0B0A0908ULL,
uint256S("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09"
"080706050403020100")),
0x7127512f72f27cceull);
// Check test vectors from spec, one byte at a time
CSipHasher hasher2(0x0706050403020100ULL, 0x0F0E0D0C0B0A0908ULL);
for (uint8_t x = 0; x < std::size(siphash_4_2_testvec); ++x) {
BOOST_CHECK_EQUAL(hasher2.Finalize(), siphash_4_2_testvec[x]);
hasher2.Write(&x, 1);
}
// Check test vectors from spec, eight bytes at a time
CSipHasher hasher3(0x0706050403020100ULL, 0x0F0E0D0C0B0A0908ULL);
for (uint8_t x = 0; x < std::size(siphash_4_2_testvec); x += 8) {
BOOST_CHECK_EQUAL(hasher3.Finalize(), siphash_4_2_testvec[x]);
hasher3.Write(uint64_t(x) | (uint64_t(x + 1) << 8) |
(uint64_t(x + 2) << 16) | (uint64_t(x + 3) << 24) |
(uint64_t(x + 4) << 32) | (uint64_t(x + 5) << 40) |
(uint64_t(x + 6) << 48) | (uint64_t(x + 7) << 56));
}
CHashWriter ss(SER_DISK, CLIENT_VERSION);
CMutableTransaction tx;
// Note these tests were originally written with tx.nVersion=1
// and the test would be affected by default tx version bumps if not fixed.
tx.nVersion = 1;
ss << tx;
BOOST_CHECK_EQUAL(SipHashUint256(1, 2, ss.GetHash()),
0x79751e980c2a0a35ULL);
// Check consistency between CSipHasher and SipHashUint256[Extra].
FastRandomContext ctx;
for (int i = 0; i < 16; ++i) {
uint64_t k1 = ctx.rand64();
uint64_t k2 = ctx.rand64();
uint256 x = InsecureRand256();
uint32_t n = ctx.rand32();
uint8_t nb[4];
WriteLE32(nb, n);
CSipHasher sip256(k1, k2);
sip256.Write(x.begin(), 32);
CSipHasher sip288 = sip256;
sip288.Write(nb, 4);
BOOST_CHECK_EQUAL(SipHashUint256(k1, k2, x), sip256.Finalize());
BOOST_CHECK_EQUAL(SipHashUint256Extra(k1, k2, x, n), sip288.Finalize());
}
}
namespace {
class CDummyObject {
uint32_t value;
public:
CDummyObject() : value(0) {}
uint32_t GetValue() { return value; }
template <typename Stream> void Serialize(Stream &s) const {
unsigned int nVersionDummy = 0;
::Serialize(s, VARINT(nVersionDummy));
::Serialize(s, VARINT(value));
}
template <typename Stream> void Unserialize(Stream &s) {
unsigned int nVersionDummy = 0;
::Unserialize(s, VARINT(nVersionDummy));
::Unserialize(s, VARINT(value));
}
};
} // namespace
BOOST_AUTO_TEST_CASE(hashverifier_tests) {
std::vector<uint8_t> data = ParseHex("4223");
CDataStream ss(data, SER_DISK, CLIENT_VERSION);
CHashVerifier<CDataStream> verifier(&ss);
CDummyObject dummy;
verifier >> dummy;
uint256 checksum = verifier.GetHash();
BOOST_CHECK_EQUAL(dummy.GetValue(), 0x23);
CHashWriter h0(SER_DISK, CLIENT_VERSION);
h0 << CDataStream(data, SER_DISK, CLIENT_VERSION);
BOOST_CHECK(h0.GetHash() == checksum);
CHashWriter h1(SER_DISK, CLIENT_VERSION);
h1 << dummy;
BOOST_CHECK(h1.GetHash() != checksum);
}
BOOST_AUTO_TEST_CASE(sh256_tests) {
CHashWriter h0(SER_DISK, CLIENT_VERSION);
- h0.write("abc", 3);
+ h0.write(MakeByteSpan("abc").first(3));
BOOST_CHECK_EQUAL(
h0.GetSHA256().GetHex(),
"ad1500f261ff10b49c7a1796a36103b02322ae5dde404141eacf018fbf1678ba");
CHashWriter h1(SER_DISK, CLIENT_VERSION);
- h1.write("", 0);
+ h1.write(MakeByteSpan("").first(0));
BOOST_CHECK_EQUAL(
h1.GetSHA256().GetHex(),
"55b852781b9995a44c939b64e441ae2724b96f99c8f4fb9a141cfc9842c4b0e3");
CHashWriter h2(SER_DISK, CLIENT_VERSION);
- h2.write("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", 56);
+ h2.write(
+ MakeByteSpan("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq")
+ .first(56));
BOOST_CHECK_EQUAL(
h2.GetSHA256().GetHex(),
"c106db19d4edecf66721ff6459e43ca339603e0c9326c0e5b83806d2616a8d24");
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index c30b5bde0..29441bd5f 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -1,3357 +1,3357 @@
// Copyright (c) 2011-2019 The Bitcoin Core developers
// Copyright (c) 2017-2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <script/script.h>
#include <script/script_error.h>
#include <script/sighashtype.h>
#include <script/sign.h>
#include <script/signingprovider.h>
#include <core_io.h>
#include <key.h>
#include <rpc/util.h>
#include <streams.h>
#include <util/strencodings.h>
#include <util/system.h>
#if defined(HAVE_CONSENSUS_LIB)
#include <script/bitcoinconsensus.h>
#endif
#include <test/data/script_tests.json.h>
#include <test/jsonutil.h>
#include <test/scriptflags.h>
#include <test/sigutil.h>
#include <test/util/setup_common.h>
#include <test/util/transaction_utils.h>
#include <boost/test/unit_test.hpp>
#include <univalue.h>
#include <cstdint>
#include <string>
#include <vector>
// Uncomment if you want to output updated JSON tests.
// #define UPDATE_JSON_TESTS
static const uint32_t gFlags = SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_STRICTENC;
struct ScriptErrorDesc {
ScriptError err;
const char *name;
};
static ScriptErrorDesc script_errors[] = {
{ScriptError::OK, "OK"},
{ScriptError::UNKNOWN, "UNKNOWN_ERROR"},
{ScriptError::EVAL_FALSE, "EVAL_FALSE"},
{ScriptError::OP_RETURN, "OP_RETURN"},
{ScriptError::SCRIPT_SIZE, "SCRIPT_SIZE"},
{ScriptError::PUSH_SIZE, "PUSH_SIZE"},
{ScriptError::OP_COUNT, "OP_COUNT"},
{ScriptError::STACK_SIZE, "STACK_SIZE"},
{ScriptError::SIG_COUNT, "SIG_COUNT"},
{ScriptError::PUBKEY_COUNT, "PUBKEY_COUNT"},
{ScriptError::INPUT_SIGCHECKS, "INPUT_SIGCHECKS"},
{ScriptError::INVALID_OPERAND_SIZE, "OPERAND_SIZE"},
{ScriptError::INVALID_NUMBER_RANGE, "INVALID_NUMBER_RANGE"},
{ScriptError::IMPOSSIBLE_ENCODING, "IMPOSSIBLE_ENCODING"},
{ScriptError::INVALID_SPLIT_RANGE, "SPLIT_RANGE"},
{ScriptError::INVALID_BIT_COUNT, "INVALID_BIT_COUNT"},
{ScriptError::VERIFY, "VERIFY"},
{ScriptError::EQUALVERIFY, "EQUALVERIFY"},
{ScriptError::CHECKMULTISIGVERIFY, "CHECKMULTISIGVERIFY"},
{ScriptError::CHECKSIGVERIFY, "CHECKSIGVERIFY"},
{ScriptError::CHECKDATASIGVERIFY, "CHECKDATASIGVERIFY"},
{ScriptError::NUMEQUALVERIFY, "NUMEQUALVERIFY"},
{ScriptError::BAD_OPCODE, "BAD_OPCODE"},
{ScriptError::DISABLED_OPCODE, "DISABLED_OPCODE"},
{ScriptError::INVALID_STACK_OPERATION, "INVALID_STACK_OPERATION"},
{ScriptError::INVALID_ALTSTACK_OPERATION, "INVALID_ALTSTACK_OPERATION"},
{ScriptError::UNBALANCED_CONDITIONAL, "UNBALANCED_CONDITIONAL"},
{ScriptError::NEGATIVE_LOCKTIME, "NEGATIVE_LOCKTIME"},
{ScriptError::UNSATISFIED_LOCKTIME, "UNSATISFIED_LOCKTIME"},
{ScriptError::SIG_HASHTYPE, "SIG_HASHTYPE"},
{ScriptError::SIG_DER, "SIG_DER"},
{ScriptError::MINIMALDATA, "MINIMALDATA"},
{ScriptError::SIG_PUSHONLY, "SIG_PUSHONLY"},
{ScriptError::SIG_HIGH_S, "SIG_HIGH_S"},
{ScriptError::PUBKEYTYPE, "PUBKEYTYPE"},
{ScriptError::CLEANSTACK, "CLEANSTACK"},
{ScriptError::MINIMALIF, "MINIMALIF"},
{ScriptError::SIG_NULLFAIL, "NULLFAIL"},
{ScriptError::SIG_BADLENGTH, "SIG_BADLENGTH"},
{ScriptError::SIG_NONSCHNORR, "SIG_NONSCHNORR"},
{ScriptError::DISCOURAGE_UPGRADABLE_NOPS, "DISCOURAGE_UPGRADABLE_NOPS"},
{ScriptError::ILLEGAL_FORKID, "ILLEGAL_FORKID"},
{ScriptError::MUST_USE_FORKID, "MISSING_FORKID"},
{ScriptError::DIV_BY_ZERO, "DIV_BY_ZERO"},
{ScriptError::MOD_BY_ZERO, "MOD_BY_ZERO"},
{ScriptError::INVALID_BITFIELD_SIZE, "BITFIELD_SIZE"},
{ScriptError::INVALID_BIT_RANGE, "BIT_RANGE"},
};
static std::string FormatScriptError(ScriptError err) {
for (const auto &se : script_errors) {
if (se.err == err) {
return se.name;
}
}
BOOST_ERROR("Unknown scripterror enumeration value, update script_errors "
"in script_tests.cpp.");
return "";
}
static ScriptError ParseScriptError(const std::string &name) {
for (const auto &se : script_errors) {
if (se.name == name) {
return se.err;
}
}
BOOST_ERROR("Unknown scripterror \"" << name << "\" in test description");
return ScriptError::UNKNOWN;
}
BOOST_FIXTURE_TEST_SUITE(script_tests, BasicTestingSetup)
static void DoTest(const CScript &scriptPubKey, const CScript &scriptSig,
uint32_t flags, const std::string &message,
ScriptError scriptError, const Amount nValue) {
bool expect = (scriptError == ScriptError::OK);
if (flags & SCRIPT_VERIFY_CLEANSTACK) {
flags |= SCRIPT_VERIFY_P2SH;
}
ScriptError err;
const CTransaction txCredit{
BuildCreditingTransaction(scriptPubKey, nValue)};
CMutableTransaction tx = BuildSpendingTransaction(scriptSig, txCredit);
CMutableTransaction tx2 = tx;
BOOST_CHECK_MESSAGE(VerifyScript(scriptSig, scriptPubKey, flags,
MutableTransactionSignatureChecker(
&tx, 0, txCredit.vout[0].nValue),
&err) == expect,
message);
BOOST_CHECK_MESSAGE(err == scriptError, FormatScriptError(err) + " where " +
FormatScriptError(scriptError) +
" expected: " + message);
// Verify that removing flags from a passing test or adding flags to a
// failing test does not change the result, except for some special flags.
for (int i = 0; i < 16; ++i) {
uint32_t extra_flags = InsecureRandBits(32);
// Some flags are not purely-restrictive and thus we can't assume
// anything about what happens when they are flipped. Keep them as-is.
extra_flags &=
~(SCRIPT_ENABLE_SIGHASH_FORKID | SCRIPT_ENABLE_REPLAY_PROTECTION |
SCRIPT_ENABLE_SCHNORR_MULTISIG);
uint32_t combined_flags =
expect ? (flags & ~extra_flags) : (flags | extra_flags);
// Weed out invalid flag combinations.
if (combined_flags & SCRIPT_VERIFY_CLEANSTACK) {
combined_flags |= SCRIPT_VERIFY_P2SH;
}
BOOST_CHECK_MESSAGE(VerifyScript(scriptSig, scriptPubKey,
combined_flags,
MutableTransactionSignatureChecker(
&tx, 0, txCredit.vout[0].nValue),
&err) == expect,
message + strprintf(" (with %s flags %08x)",
expect ? "removed" : "added",
combined_flags ^ flags));
}
#if defined(HAVE_CONSENSUS_LIB)
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << tx2;
uint32_t libconsensus_flags =
flags & bitcoinconsensus_SCRIPT_FLAGS_VERIFY_ALL;
if (libconsensus_flags == flags) {
if (flags & bitcoinconsensus_SCRIPT_ENABLE_SIGHASH_FORKID) {
BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(
scriptPubKey.data(), scriptPubKey.size(),
txCredit.vout[0].nValue / SATOSHI,
- stream.data(), stream.size(), 0,
+ UCharCast(stream.data()), stream.size(), 0,
libconsensus_flags, nullptr) == expect,
message);
} else {
BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(
scriptPubKey.data(), scriptPubKey.size(), 0,
- stream.data(), stream.size(), 0,
+ UCharCast(stream.data()), stream.size(), 0,
libconsensus_flags, nullptr) == expect,
message);
BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script(
scriptPubKey.data(), scriptPubKey.size(),
- stream.data(), stream.size(), 0,
+ UCharCast(stream.data()), stream.size(), 0,
libconsensus_flags, nullptr) == expect,
message);
}
}
#endif
}
namespace {
const uint8_t vchKey0[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1};
const uint8_t vchKey1[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0};
const uint8_t vchKey2[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0};
struct KeyData {
CKey key0, key0C, key1, key1C, key2, key2C;
CPubKey pubkey0, pubkey0C, pubkey0H;
CPubKey pubkey1, pubkey1C;
CPubKey pubkey2, pubkey2C;
KeyData() {
key0.Set(vchKey0, vchKey0 + 32, false);
key0C.Set(vchKey0, vchKey0 + 32, true);
pubkey0 = key0.GetPubKey();
pubkey0H = key0.GetPubKey();
pubkey0C = key0C.GetPubKey();
*const_cast<uint8_t *>(pubkey0H.data()) = 0x06 | (pubkey0H[64] & 1);
key1.Set(vchKey1, vchKey1 + 32, false);
key1C.Set(vchKey1, vchKey1 + 32, true);
pubkey1 = key1.GetPubKey();
pubkey1C = key1C.GetPubKey();
key2.Set(vchKey2, vchKey2 + 32, false);
key2C.Set(vchKey2, vchKey2 + 32, true);
pubkey2 = key2.GetPubKey();
pubkey2C = key2C.GetPubKey();
}
};
class TestBuilder {
private:
//! Actually executed script
CScript script;
//! The P2SH redeemscript
CScript redeemscript;
CTransactionRef creditTx;
CMutableTransaction spendTx;
bool havePush;
std::vector<uint8_t> push;
std::string comment;
uint32_t flags;
ScriptError scriptError;
Amount nValue;
void DoPush() {
if (havePush) {
spendTx.vin[0].scriptSig << push;
havePush = false;
}
}
void DoPush(const std::vector<uint8_t> &data) {
DoPush();
push = data;
havePush = true;
}
std::vector<uint8_t> DoSignECDSA(const CKey &key, const uint256 &hash,
unsigned int lenR = 32,
unsigned int lenS = 32) const {
std::vector<uint8_t> vchSig, r, s;
uint32_t iter = 0;
do {
key.SignECDSA(hash, vchSig, false, iter++);
if ((lenS == 33) != (vchSig[5 + vchSig[3]] == 33)) {
NegateSignatureS(vchSig);
}
r = std::vector<uint8_t>(vchSig.begin() + 4,
vchSig.begin() + 4 + vchSig[3]);
s = std::vector<uint8_t>(vchSig.begin() + 6 + vchSig[3],
vchSig.begin() + 6 + vchSig[3] +
vchSig[5 + vchSig[3]]);
} while (lenR != r.size() || lenS != s.size());
return vchSig;
}
std::vector<uint8_t> DoSignSchnorr(const CKey &key,
const uint256 &hash) const {
std::vector<uint8_t> vchSig;
// no need to iterate for size; schnorrs are always same size.
key.SignSchnorr(hash, vchSig);
return vchSig;
}
public:
TestBuilder(const CScript &script_, const std::string &comment_,
uint32_t flags_, bool P2SH = false,
Amount nValue_ = Amount::zero())
: script(script_), havePush(false), comment(comment_), flags(flags_),
scriptError(ScriptError::OK), nValue(nValue_) {
CScript scriptPubKey = script;
if (P2SH) {
redeemscript = scriptPubKey;
scriptPubKey = CScript()
<< OP_HASH160
<< ToByteVector(CScriptID(redeemscript)) << OP_EQUAL;
}
creditTx =
MakeTransactionRef(BuildCreditingTransaction(scriptPubKey, nValue));
spendTx = BuildSpendingTransaction(CScript(), *creditTx);
}
TestBuilder &SetScriptError(ScriptError err) {
scriptError = err;
return *this;
}
TestBuilder &Opcode(const opcodetype &_op) {
DoPush();
spendTx.vin[0].scriptSig << _op;
return *this;
}
TestBuilder &Num(int num) {
DoPush();
spendTx.vin[0].scriptSig << num;
return *this;
}
TestBuilder &Push(const std::string &hex) {
DoPush(ParseHex(hex));
return *this;
}
TestBuilder &Push(const uint256 &hash) {
DoPush(ToByteVector(hash));
return *this;
}
TestBuilder &Push(const CScript &_script) {
DoPush(std::vector<uint8_t>(_script.begin(), _script.end()));
return *this;
}
TestBuilder &
PushSigECDSA(const CKey &key, SigHashType sigHashType = SigHashType(),
unsigned int lenR = 32, unsigned int lenS = 32,
Amount amount = Amount::zero(),
uint32_t sigFlags = SCRIPT_ENABLE_SIGHASH_FORKID) {
uint256 hash = SignatureHash(script, CTransaction(spendTx), 0,
sigHashType, amount, nullptr, sigFlags);
std::vector<uint8_t> vchSig = DoSignECDSA(key, hash, lenR, lenS);
vchSig.push_back(static_cast<uint8_t>(sigHashType.getRawSigHashType()));
DoPush(vchSig);
return *this;
}
TestBuilder &
PushSigSchnorr(const CKey &key, SigHashType sigHashType = SigHashType(),
Amount amount = Amount::zero(),
uint32_t sigFlags = SCRIPT_ENABLE_SIGHASH_FORKID) {
uint256 hash = SignatureHash(script, CTransaction(spendTx), 0,
sigHashType, amount, nullptr, sigFlags);
std::vector<uint8_t> vchSig = DoSignSchnorr(key, hash);
vchSig.push_back(static_cast<uint8_t>(sigHashType.getRawSigHashType()));
DoPush(vchSig);
return *this;
}
TestBuilder &PushDataSigECDSA(const CKey &key,
const std::vector<uint8_t> &data,
unsigned int lenR = 32,
unsigned int lenS = 32) {
std::vector<uint8_t> vchHash(32);
CSHA256().Write(data.data(), data.size()).Finalize(vchHash.data());
DoPush(DoSignECDSA(key, uint256(vchHash), lenR, lenS));
return *this;
}
TestBuilder &PushDataSigSchnorr(const CKey &key,
const std::vector<uint8_t> &data) {
std::vector<uint8_t> vchHash(32);
CSHA256().Write(data.data(), data.size()).Finalize(vchHash.data());
DoPush(DoSignSchnorr(key, uint256(vchHash)));
return *this;
}
TestBuilder &PushECDSARecoveredPubKey(
const std::vector<uint8_t> &rdata, const std::vector<uint8_t> &sdata,
SigHashType sigHashType = SigHashType(), Amount amount = Amount::zero(),
uint32_t sigFlags = SCRIPT_ENABLE_SIGHASH_FORKID) {
// This calculates a pubkey to verify with a given ECDSA transaction
// signature.
uint256 hash = SignatureHash(script, CTransaction(spendTx), 0,
sigHashType, amount, nullptr, sigFlags);
assert(rdata.size() <= 32);
assert(sdata.size() <= 32);
// Our strategy: make a 'key recovery' signature, and just try all the
// recovery IDs. If none of them work then this means the 'r' value
// doesn't have any corresponding point, and the caller should pick a
// different r.
std::vector<uint8_t> vchSig(65, 0);
std::copy(rdata.begin(), rdata.end(),
vchSig.begin() + (33 - rdata.size()));
std::copy(sdata.begin(), sdata.end(),
vchSig.begin() + (65 - sdata.size()));
CPubKey key;
for (uint8_t recid : {0, 1, 2, 3}) {
vchSig[0] = 31 + recid;
if (key.RecoverCompact(hash, vchSig)) {
// found a match
break;
}
}
if (!key.IsValid()) {
throw std::runtime_error(
std::string("Could not generate pubkey for ") + HexStr(rdata));
}
std::vector<uint8_t> vchKey(key.begin(), key.end());
DoPush(vchKey);
return *this;
}
TestBuilder &
PushECDSASigFromParts(const std::vector<uint8_t> &rdata,
const std::vector<uint8_t> &sdata,
SigHashType sigHashType = SigHashType()) {
// Constructs a DER signature out of variable-length r and s arrays &
// adds hashtype byte.
assert(rdata.size() <= 32);
assert(sdata.size() <= 32);
assert(rdata.size() > 0);
assert(sdata.size() > 0);
assert(rdata[0] != 0);
assert(sdata[0] != 0);
std::vector<uint8_t> vchSig{0x30, 0x00, 0x02};
if (rdata[0] & 0x80) {
vchSig.push_back(rdata.size() + 1);
vchSig.push_back(0);
vchSig.insert(vchSig.end(), rdata.begin(), rdata.end());
} else {
vchSig.push_back(rdata.size());
vchSig.insert(vchSig.end(), rdata.begin(), rdata.end());
}
vchSig.push_back(0x02);
if (sdata[0] & 0x80) {
vchSig.push_back(sdata.size() + 1);
vchSig.push_back(0);
vchSig.insert(vchSig.end(), sdata.begin(), sdata.end());
} else {
vchSig.push_back(sdata.size());
vchSig.insert(vchSig.end(), sdata.begin(), sdata.end());
}
vchSig[1] = vchSig.size() - 2;
vchSig.push_back(static_cast<uint8_t>(sigHashType.getRawSigHashType()));
DoPush(vchSig);
return *this;
}
TestBuilder &Push(const CPubKey &pubkey) {
DoPush(std::vector<uint8_t>(pubkey.begin(), pubkey.end()));
return *this;
}
TestBuilder &PushRedeem() {
DoPush(std::vector<uint8_t>(redeemscript.begin(), redeemscript.end()));
return *this;
}
TestBuilder &EditPush(unsigned int pos, const std::string &hexin,
const std::string &hexout) {
assert(havePush);
std::vector<uint8_t> datain = ParseHex(hexin);
std::vector<uint8_t> dataout = ParseHex(hexout);
assert(pos + datain.size() <= push.size());
BOOST_CHECK_MESSAGE(
std::vector<uint8_t>(push.begin() + pos,
push.begin() + pos + datain.size()) == datain,
comment);
push.erase(push.begin() + pos, push.begin() + pos + datain.size());
push.insert(push.begin() + pos, dataout.begin(), dataout.end());
return *this;
}
TestBuilder &DamagePush(unsigned int pos) {
assert(havePush);
assert(pos < push.size());
push[pos] ^= 1;
return *this;
}
TestBuilder &Test() {
// Make a copy so we can rollback the push.
TestBuilder copy = *this;
DoPush();
DoTest(creditTx->vout[0].scriptPubKey, spendTx.vin[0].scriptSig, flags,
comment, scriptError, nValue);
*this = copy;
return *this;
}
UniValue GetJSON() {
DoPush();
UniValue array(UniValue::VARR);
if (nValue != Amount::zero()) {
UniValue amount(UniValue::VARR);
amount.push_back(nValue);
array.push_back(amount);
}
array.push_back(FormatScript(spendTx.vin[0].scriptSig));
array.push_back(FormatScript(creditTx->vout[0].scriptPubKey));
array.push_back(FormatScriptFlags(flags));
array.push_back(FormatScriptError(scriptError));
array.push_back(comment);
return array;
}
std::string GetComment() const { return comment; }
};
std::string JSONPrettyPrint(const UniValue &univalue) {
std::string ret = univalue.write(4);
// Workaround for libunivalue pretty printer, which puts a space between
// commas and newlines
size_t pos = 0;
while ((pos = ret.find(" \n", pos)) != std::string::npos) {
ret.replace(pos, 2, "\n");
pos++;
}
return ret;
}
} // namespace
BOOST_AUTO_TEST_CASE(script_build) {
const KeyData keys;
std::vector<TestBuilder> tests;
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"P2PK", 0)
.PushSigECDSA(keys.key0));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"P2PK, bad sig", 0)
.PushSigECDSA(keys.key0)
.DamagePush(10)
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160
<< ToByteVector(keys.pubkey1C.GetID())
<< OP_EQUALVERIFY << OP_CHECKSIG,
"P2PKH", 0)
.PushSigECDSA(keys.key1)
.Push(keys.pubkey1C));
tests.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160
<< ToByteVector(keys.pubkey2C.GetID())
<< OP_EQUALVERIFY << OP_CHECKSIG,
"P2PKH, bad pubkey", 0)
.PushSigECDSA(keys.key2)
.Push(keys.pubkey2C)
.DamagePush(5)
.SetScriptError(ScriptError::EQUALVERIFY));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"P2PK anyonecanpay", 0)
.PushSigECDSA(keys.key1, SigHashType().withAnyoneCanPay()));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"P2PK anyonecanpay marked with normal hashtype", 0)
.PushSigECDSA(keys.key1, SigHashType().withAnyoneCanPay())
.EditPush(70, "81", "01")
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0C) << OP_CHECKSIG,
"P2SH(P2PK)", SCRIPT_VERIFY_P2SH, true)
.PushSigECDSA(keys.key0)
.PushRedeem());
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0C) << OP_CHECKSIG,
"P2SH(P2PK), bad redeemscript", SCRIPT_VERIFY_P2SH, true)
.PushSigECDSA(keys.key0)
.PushRedeem()
.DamagePush(10)
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160
<< ToByteVector(keys.pubkey0.GetID())
<< OP_EQUALVERIFY << OP_CHECKSIG,
"P2SH(P2PKH)", SCRIPT_VERIFY_P2SH, true)
.PushSigECDSA(keys.key0)
.Push(keys.pubkey0)
.PushRedeem());
tests.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160
<< ToByteVector(keys.pubkey1.GetID())
<< OP_EQUALVERIFY << OP_CHECKSIG,
"P2SH(P2PKH), bad sig but no VERIFY_P2SH", 0,
true)
.PushSigECDSA(keys.key0)
.DamagePush(10)
.PushRedeem());
tests.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160
<< ToByteVector(keys.pubkey1.GetID())
<< OP_EQUALVERIFY << OP_CHECKSIG,
"P2SH(P2PKH), bad sig", SCRIPT_VERIFY_P2SH,
true)
.PushSigECDSA(keys.key0)
.DamagePush(10)
.PushRedeem()
.SetScriptError(ScriptError::EQUALVERIFY));
tests.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"3-of-3", 0)
.Num(0)
.PushSigECDSA(keys.key0)
.PushSigECDSA(keys.key1)
.PushSigECDSA(keys.key2));
tests.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"3-of-3, 2 sigs", 0)
.Num(0)
.PushSigECDSA(keys.key0)
.PushSigECDSA(keys.key1)
.Num(0)
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"P2SH(2-of-3)", SCRIPT_VERIFY_P2SH, true)
.Num(0)
.PushSigECDSA(keys.key1)
.PushSigECDSA(keys.key2)
.PushRedeem());
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"P2SH(2-of-3), 1 sig", SCRIPT_VERIFY_P2SH, true)
.Num(0)
.PushSigECDSA(keys.key1)
.Num(0)
.PushRedeem()
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too much R padding but no DERSIG", 0)
.PushSigECDSA(keys.key1, SigHashType(), 31, 32)
.EditPush(1, "43021F", "44022000"));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too much R padding", SCRIPT_VERIFY_DERSIG)
.PushSigECDSA(keys.key1, SigHashType(), 31, 32)
.EditPush(1, "43021F", "44022000")
.SetScriptError(ScriptError::SIG_DER));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too much S padding but no DERSIG", 0)
.PushSigECDSA(keys.key1)
.EditPush(1, "44", "45")
.EditPush(37, "20", "2100"));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too much S padding", SCRIPT_VERIFY_DERSIG)
.PushSigECDSA(keys.key1)
.EditPush(1, "44", "45")
.EditPush(37, "20", "2100")
.SetScriptError(ScriptError::SIG_DER));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too little R padding but no DERSIG", 0)
.PushSigECDSA(keys.key1, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220"));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too little R padding", SCRIPT_VERIFY_DERSIG)
.PushSigECDSA(keys.key1, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220")
.SetScriptError(ScriptError::SIG_DER));
tests.push_back(
TestBuilder(
CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with bad sig with too much R padding but no DERSIG", 0)
.PushSigECDSA(keys.key2, SigHashType(), 31, 32)
.EditPush(1, "43021F", "44022000")
.DamagePush(10));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey2C)
<< OP_CHECKSIG << OP_NOT,
"P2PK NOT with bad sig with too much R padding",
SCRIPT_VERIFY_DERSIG)
.PushSigECDSA(keys.key2, SigHashType(), 31, 32)
.EditPush(1, "43021F", "44022000")
.DamagePush(10)
.SetScriptError(ScriptError::SIG_DER));
tests.push_back(
TestBuilder(CScript()
<< ToByteVector(keys.pubkey2C) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with too much R padding but no DERSIG", 0)
.PushSigECDSA(keys.key2, SigHashType(), 31, 32)
.EditPush(1, "43021F", "44022000")
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey2C)
<< OP_CHECKSIG << OP_NOT,
"P2PK NOT with too much R padding",
SCRIPT_VERIFY_DERSIG)
.PushSigECDSA(keys.key2, SigHashType(), 31, 32)
.EditPush(1, "43021F", "44022000")
.SetScriptError(ScriptError::SIG_DER));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"BIP66 example 1, without DERSIG", 0)
.PushSigECDSA(keys.key1, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220"));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"BIP66 example 1, with DERSIG", SCRIPT_VERIFY_DERSIG)
.PushSigECDSA(keys.key1, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220")
.SetScriptError(ScriptError::SIG_DER));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKSIG << OP_NOT,
"BIP66 example 2, without DERSIG", 0)
.PushSigECDSA(keys.key1, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220")
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKSIG << OP_NOT,
"BIP66 example 2, with DERSIG",
SCRIPT_VERIFY_DERSIG)
.PushSigECDSA(keys.key1, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220")
.SetScriptError(ScriptError::SIG_DER));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"BIP66 example 3, without DERSIG", 0)
.Num(0)
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"BIP66 example 3, with DERSIG", SCRIPT_VERIFY_DERSIG)
.Num(0)
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKSIG << OP_NOT,
"BIP66 example 4, without DERSIG", 0)
.Num(0));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKSIG << OP_NOT,
"BIP66 example 4, with DERSIG",
SCRIPT_VERIFY_DERSIG)
.Num(0));
tests.push_back(
TestBuilder(
CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG << OP_NOT,
"BIP66 example 4, with DERSIG, non-null DER-compliant signature",
SCRIPT_VERIFY_DERSIG)
.Push("300602010102010101"));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKSIG << OP_NOT,
"BIP66 example 4, with DERSIG and NULLFAIL",
SCRIPT_VERIFY_DERSIG | SCRIPT_VERIFY_NULLFAIL)
.Num(0));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKSIG << OP_NOT,
"BIP66 example 4, with DERSIG and NULLFAIL, "
"non-null DER-compliant signature",
SCRIPT_VERIFY_DERSIG | SCRIPT_VERIFY_NULLFAIL)
.Push("300602010102010101")
.SetScriptError(ScriptError::SIG_NULLFAIL));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"BIP66 example 5, without DERSIG", 0)
.Num(1)
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"BIP66 example 5, with DERSIG", SCRIPT_VERIFY_DERSIG)
.Num(1)
.SetScriptError(ScriptError::SIG_DER));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKSIG << OP_NOT,
"BIP66 example 6, without DERSIG", 0)
.Num(1));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKSIG << OP_NOT,
"BIP66 example 6, with DERSIG",
SCRIPT_VERIFY_DERSIG)
.Num(1)
.SetScriptError(ScriptError::SIG_DER));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_2
<< OP_CHECKMULTISIG,
"BIP66 example 7, without DERSIG", 0)
.Num(0)
.PushSigECDSA(keys.key1, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220")
.PushSigECDSA(keys.key2));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_2
<< OP_CHECKMULTISIG,
"BIP66 example 7, with DERSIG",
SCRIPT_VERIFY_DERSIG)
.Num(0)
.PushSigECDSA(keys.key1, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220")
.PushSigECDSA(keys.key2)
.SetScriptError(ScriptError::SIG_DER));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_2
<< OP_CHECKMULTISIG << OP_NOT,
"BIP66 example 8, without DERSIG", 0)
.Num(0)
.PushSigECDSA(keys.key1, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220")
.PushSigECDSA(keys.key2)
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_2
<< OP_CHECKMULTISIG << OP_NOT,
"BIP66 example 8, with DERSIG",
SCRIPT_VERIFY_DERSIG)
.Num(0)
.PushSigECDSA(keys.key1, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220")
.PushSigECDSA(keys.key2)
.SetScriptError(ScriptError::SIG_DER));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_2
<< OP_CHECKMULTISIG,
"BIP66 example 9, without DERSIG", 0)
.Num(0)
.Num(0)
.PushSigECDSA(keys.key2, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220")
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_2
<< OP_CHECKMULTISIG,
"BIP66 example 9, with DERSIG",
SCRIPT_VERIFY_DERSIG)
.Num(0)
.Num(0)
.PushSigECDSA(keys.key2, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220")
.SetScriptError(ScriptError::SIG_DER));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_2
<< OP_CHECKMULTISIG << OP_NOT,
"BIP66 example 10, without DERSIG", 0)
.Num(0)
.Num(0)
.PushSigECDSA(keys.key2, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220"));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_2
<< OP_CHECKMULTISIG << OP_NOT,
"BIP66 example 10, with DERSIG",
SCRIPT_VERIFY_DERSIG)
.Num(0)
.Num(0)
.PushSigECDSA(keys.key2, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220")
.SetScriptError(ScriptError::SIG_DER));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_2
<< OP_CHECKMULTISIG,
"BIP66 example 11, without DERSIG", 0)
.Num(0)
.PushSigECDSA(keys.key1, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220")
.Num(0)
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_2
<< OP_CHECKMULTISIG,
"BIP66 example 11, with DERSIG",
SCRIPT_VERIFY_DERSIG)
.Num(0)
.PushSigECDSA(keys.key1, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220")
.Num(0)
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_2
<< OP_CHECKMULTISIG << OP_NOT,
"BIP66 example 12, without DERSIG", 0)
.Num(0)
.PushSigECDSA(keys.key1, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220")
.Num(0));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_2
<< OP_CHECKMULTISIG << OP_NOT,
"BIP66 example 12, with DERSIG",
SCRIPT_VERIFY_DERSIG)
.Num(0)
.PushSigECDSA(keys.key1, SigHashType(), 33, 32)
.EditPush(1, "45022100", "440220")
.Num(0));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG,
"P2PK with multi-byte hashtype, without DERSIG", 0)
.PushSigECDSA(keys.key2)
.EditPush(70, "01", "0101"));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG,
"P2PK with multi-byte hashtype, with DERSIG",
SCRIPT_VERIFY_DERSIG)
.PushSigECDSA(keys.key2)
.EditPush(70, "01", "0101")
.SetScriptError(ScriptError::SIG_DER));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG,
"P2PK with high S but no LOW_S", 0)
.PushSigECDSA(keys.key2, SigHashType(), 32, 33));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG,
"P2PK with high S", SCRIPT_VERIFY_LOW_S)
.PushSigECDSA(keys.key2, SigHashType(), 32, 33)
.SetScriptError(ScriptError::SIG_HIGH_S));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG,
"P2PK with hybrid pubkey but no STRICTENC", 0)
.PushSigECDSA(keys.key0));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG,
"P2PK with hybrid pubkey", SCRIPT_VERIFY_STRICTENC)
.PushSigECDSA(keys.key0, SigHashType())
.SetScriptError(ScriptError::PUBKEYTYPE));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H)
<< OP_CHECKSIG << OP_NOT,
"P2PK NOT with hybrid pubkey but no STRICTENC",
0)
.PushSigECDSA(keys.key0)
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H)
<< OP_CHECKSIG << OP_NOT,
"P2PK NOT with hybrid pubkey",
SCRIPT_VERIFY_STRICTENC)
.PushSigECDSA(keys.key0)
.SetScriptError(ScriptError::PUBKEYTYPE));
tests.push_back(
TestBuilder(CScript()
<< ToByteVector(keys.pubkey0H) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with invalid hybrid pubkey but no STRICTENC", 0)
.PushSigECDSA(keys.key0)
.DamagePush(10));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H)
<< OP_CHECKSIG << OP_NOT,
"P2PK NOT with invalid hybrid pubkey",
SCRIPT_VERIFY_STRICTENC)
.PushSigECDSA(keys.key0)
.DamagePush(10)
.SetScriptError(ScriptError::PUBKEYTYPE));
tests.push_back(
TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey0H)
<< ToByteVector(keys.pubkey1C) << OP_2
<< OP_CHECKMULTISIG,
"1-of-2 with the second 1 hybrid pubkey and no STRICTENC",
0)
.Num(0)
.PushSigECDSA(keys.key1));
tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey0H)
<< ToByteVector(keys.pubkey1C) << OP_2
<< OP_CHECKMULTISIG,
"1-of-2 with the second 1 hybrid pubkey",
SCRIPT_VERIFY_STRICTENC)
.Num(0)
.PushSigECDSA(keys.key1));
tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey0H) << OP_2
<< OP_CHECKMULTISIG,
"1-of-2 with the first 1 hybrid pubkey",
SCRIPT_VERIFY_STRICTENC)
.Num(0)
.PushSigECDSA(keys.key1)
.SetScriptError(ScriptError::PUBKEYTYPE));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"P2PK with undefined hashtype but no STRICTENC", 0)
.PushSigECDSA(keys.key1, SigHashType(5)));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"P2PK with undefined hashtype", SCRIPT_VERIFY_STRICTENC)
.PushSigECDSA(keys.key1, SigHashType(5))
.SetScriptError(ScriptError::SIG_HASHTYPE));
// Generate P2PKH tests for invalid SigHashType
tests.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160
<< ToByteVector(keys.pubkey0.GetID())
<< OP_EQUALVERIFY << OP_CHECKSIG,
"P2PKH with invalid sighashtype", 0)
.PushSigECDSA(keys.key0, SigHashType(0x21), 32, 32,
Amount::zero(), 0)
.Push(keys.pubkey0));
tests.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160
<< ToByteVector(keys.pubkey0.GetID())
<< OP_EQUALVERIFY << OP_CHECKSIG,
"P2PKH with invalid sighashtype and STRICTENC",
SCRIPT_VERIFY_STRICTENC)
.PushSigECDSA(keys.key0, SigHashType(0x21), 32, 32,
Amount::zero(), SCRIPT_VERIFY_STRICTENC)
.Push(keys.pubkey0)
// Should fail for STRICTENC
.SetScriptError(ScriptError::SIG_HASHTYPE));
// Generate P2SH tests for invalid SigHashType
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"P2SH(P2PK) with invalid sighashtype", SCRIPT_VERIFY_P2SH,
true)
.PushSigECDSA(keys.key1, SigHashType(0x21))
.PushRedeem());
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"P2SH(P2PK) with invalid sighashtype and STRICTENC",
SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_STRICTENC, true)
.PushSigECDSA(keys.key1, SigHashType(0x21))
.PushRedeem()
// Should fail for STRICTENC
.SetScriptError(ScriptError::SIG_HASHTYPE));
tests.push_back(
TestBuilder(
CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with invalid sig and undefined hashtype but no STRICTENC",
0)
.PushSigECDSA(keys.key1, SigHashType(5))
.DamagePush(10));
tests.push_back(
TestBuilder(CScript()
<< ToByteVector(keys.pubkey1) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with invalid sig and undefined hashtype",
SCRIPT_VERIFY_STRICTENC)
.PushSigECDSA(keys.key1, SigHashType(5))
.DamagePush(10)
.SetScriptError(ScriptError::SIG_HASHTYPE));
tests.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"3-of-3 with nonzero dummy", 0)
.Num(1)
.PushSigECDSA(keys.key0)
.PushSigECDSA(keys.key1)
.PushSigECDSA(keys.key2));
tests.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG << OP_NOT,
"3-of-3 NOT with invalid sig and nonzero dummy",
0)
.Num(1)
.PushSigECDSA(keys.key0)
.PushSigECDSA(keys.key1)
.PushSigECDSA(keys.key2)
.DamagePush(10));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey1C) << OP_2
<< OP_CHECKMULTISIG,
"2-of-2 with two identical keys and sigs "
"pushed using OP_DUP but no SIGPUSHONLY",
0)
.Num(0)
.PushSigECDSA(keys.key1)
.Opcode(OP_DUP));
tests.push_back(
TestBuilder(
CScript() << OP_2 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey1C) << OP_2
<< OP_CHECKMULTISIG,
"2-of-2 with two identical keys and sigs pushed using OP_DUP",
SCRIPT_VERIFY_SIGPUSHONLY)
.Num(0)
.PushSigECDSA(keys.key1)
.Opcode(OP_DUP)
.SetScriptError(ScriptError::SIG_PUSHONLY));
tests.push_back(
TestBuilder(
CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG,
"P2SH(P2PK) with non-push scriptSig but no P2SH or SIGPUSHONLY", 0,
true)
.PushSigECDSA(keys.key2)
.Opcode(OP_NOP8)
.PushRedeem());
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG,
"P2PK with non-push scriptSig but with P2SH validation", 0)
.PushSigECDSA(keys.key2)
.Opcode(OP_NOP8));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG,
"P2SH(P2PK) with non-push scriptSig but no SIGPUSHONLY",
SCRIPT_VERIFY_P2SH, true)
.PushSigECDSA(keys.key2)
.Opcode(OP_NOP8)
.PushRedeem()
.SetScriptError(ScriptError::SIG_PUSHONLY));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG,
"P2SH(P2PK) with non-push scriptSig but not P2SH",
SCRIPT_VERIFY_SIGPUSHONLY, true)
.PushSigECDSA(keys.key2)
.Opcode(OP_NOP8)
.PushRedeem()
.SetScriptError(ScriptError::SIG_PUSHONLY));
tests.push_back(
TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey1C) << OP_2
<< OP_CHECKMULTISIG,
"2-of-2 with two identical keys and sigs pushed",
SCRIPT_VERIFY_SIGPUSHONLY)
.Num(0)
.PushSigECDSA(keys.key1)
.PushSigECDSA(keys.key1));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"P2PK with unnecessary input but no CLEANSTACK",
SCRIPT_VERIFY_P2SH)
.Num(11)
.PushSigECDSA(keys.key0));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"P2PK with unnecessary input",
SCRIPT_VERIFY_CLEANSTACK | SCRIPT_VERIFY_P2SH)
.Num(11)
.PushSigECDSA(keys.key0)
.SetScriptError(ScriptError::CLEANSTACK));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"P2SH with unnecessary input but no CLEANSTACK",
SCRIPT_VERIFY_P2SH, true)
.Num(11)
.PushSigECDSA(keys.key0)
.PushRedeem());
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"P2SH with unnecessary input",
SCRIPT_VERIFY_CLEANSTACK | SCRIPT_VERIFY_P2SH, true)
.Num(11)
.PushSigECDSA(keys.key0)
.PushRedeem()
.SetScriptError(ScriptError::CLEANSTACK));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"P2SH with CLEANSTACK",
SCRIPT_VERIFY_CLEANSTACK | SCRIPT_VERIFY_P2SH, true)
.PushSigECDSA(keys.key0)
.PushRedeem());
static const Amount TEST_AMOUNT(int64_t(12345000000000) * SATOSHI);
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"P2PK FORKID", SCRIPT_ENABLE_SIGHASH_FORKID, false,
TEST_AMOUNT)
.PushSigECDSA(keys.key0, SigHashType().withForkId(), 32, 32,
TEST_AMOUNT));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"P2PK INVALID AMOUNT", SCRIPT_ENABLE_SIGHASH_FORKID, false,
TEST_AMOUNT)
.PushSigECDSA(keys.key0, SigHashType().withForkId(), 32, 32,
TEST_AMOUNT + SATOSHI)
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"P2PK INVALID FORKID", SCRIPT_VERIFY_STRICTENC, false,
TEST_AMOUNT)
.PushSigECDSA(keys.key0, SigHashType().withForkId(), 32, 32,
TEST_AMOUNT)
.SetScriptError(ScriptError::ILLEGAL_FORKID));
// Test replay protection
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"P2PK REPLAY PROTECTED",
SCRIPT_ENABLE_SIGHASH_FORKID |
SCRIPT_ENABLE_REPLAY_PROTECTION,
false, TEST_AMOUNT)
.PushSigECDSA(keys.key0, SigHashType().withForkId(), 32, 32,
TEST_AMOUNT,
SCRIPT_ENABLE_SIGHASH_FORKID |
SCRIPT_ENABLE_REPLAY_PROTECTION));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"P2PK REPLAY PROTECTED",
SCRIPT_ENABLE_SIGHASH_FORKID |
SCRIPT_ENABLE_REPLAY_PROTECTION,
false, TEST_AMOUNT)
.PushSigECDSA(keys.key0, SigHashType().withForkId(), 32, 32,
TEST_AMOUNT, SCRIPT_ENABLE_SIGHASH_FORKID)
.SetScriptError(ScriptError::EVAL_FALSE));
// Test OP_CHECKDATASIG
const uint32_t checkdatasigflags =
SCRIPT_VERIFY_STRICTENC | SCRIPT_VERIFY_NULLFAIL;
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKDATASIG,
"Standard CHECKDATASIG", checkdatasigflags)
.PushDataSigECDSA(keys.key1, {})
.Num(0));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKDATASIG << OP_NOT,
"CHECKDATASIG with NULLFAIL flags",
checkdatasigflags)
.PushDataSigECDSA(keys.key1, {})
.Num(1)
.SetScriptError(ScriptError::SIG_NULLFAIL));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKDATASIG << OP_NOT,
"CHECKDATASIG without NULLFAIL flags",
checkdatasigflags & ~SCRIPT_VERIFY_NULLFAIL)
.PushDataSigECDSA(keys.key1, {})
.Num(1));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKDATASIG << OP_NOT,
"CHECKDATASIG empty signature",
checkdatasigflags)
.Num(0)
.Num(0));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKDATASIG,
"CHECKDATASIG with High S but no Low S", checkdatasigflags)
.PushDataSigECDSA(keys.key1, {}, 32, 33)
.Num(0));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKDATASIG,
"CHECKDATASIG with High S",
checkdatasigflags | SCRIPT_VERIFY_LOW_S)
.PushDataSigECDSA(keys.key1, {}, 32, 33)
.Num(0)
.SetScriptError(ScriptError::SIG_HIGH_S));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKDATASIG,
"CHECKDATASIG with too little R padding but no DERSIG",
checkdatasigflags & ~SCRIPT_VERIFY_STRICTENC)
.PushDataSigECDSA(keys.key1, {}, 33, 32)
.EditPush(1, "45022100", "440220")
.Num(0));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKDATASIG,
"CHECKDATASIG with too little R padding", checkdatasigflags)
.PushDataSigECDSA(keys.key1, {}, 33, 32)
.EditPush(1, "45022100", "440220")
.Num(0)
.SetScriptError(ScriptError::SIG_DER));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKDATASIG,
"CHECKDATASIG with hybrid pubkey but no STRICTENC",
checkdatasigflags & ~SCRIPT_VERIFY_STRICTENC)
.PushDataSigECDSA(keys.key0, {})
.Num(0));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKDATASIG,
"CHECKDATASIG with hybrid pubkey", checkdatasigflags)
.PushDataSigECDSA(keys.key0, {})
.Num(0)
.SetScriptError(ScriptError::PUBKEYTYPE));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKDATASIG
<< OP_NOT,
"CHECKDATASIG with invalid hybrid pubkey but no STRICTENC",
0)
.PushDataSigECDSA(keys.key0, {})
.DamagePush(10)
.Num(0));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKDATASIG,
"CHECKDATASIG with invalid hybrid pubkey",
checkdatasigflags)
.PushDataSigECDSA(keys.key0, {})
.DamagePush(10)
.Num(0)
.SetScriptError(ScriptError::PUBKEYTYPE));
// Test OP_CHECKDATASIGVERIFY
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKDATASIGVERIFY << OP_TRUE,
"Standard CHECKDATASIGVERIFY",
checkdatasigflags)
.PushDataSigECDSA(keys.key1, {})
.Num(0));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKDATASIGVERIFY << OP_TRUE,
"CHECKDATASIGVERIFY with NULLFAIL flags",
checkdatasigflags)
.PushDataSigECDSA(keys.key1, {})
.Num(1)
.SetScriptError(ScriptError::SIG_NULLFAIL));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKDATASIGVERIFY << OP_TRUE,
"CHECKDATASIGVERIFY without NULLFAIL flags",
checkdatasigflags & ~SCRIPT_VERIFY_NULLFAIL)
.PushDataSigECDSA(keys.key1, {})
.Num(1)
.SetScriptError(ScriptError::CHECKDATASIGVERIFY));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKDATASIGVERIFY << OP_TRUE,
"CHECKDATASIGVERIFY empty signature",
checkdatasigflags)
.Num(0)
.Num(0)
.SetScriptError(ScriptError::CHECKDATASIGVERIFY));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKDATASIGVERIFY << OP_TRUE,
"CHECKDATASIG with High S but no Low S",
checkdatasigflags)
.PushDataSigECDSA(keys.key1, {}, 32, 33)
.Num(0));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKDATASIGVERIFY << OP_TRUE,
"CHECKDATASIG with High S",
checkdatasigflags | SCRIPT_VERIFY_LOW_S)
.PushDataSigECDSA(keys.key1, {}, 32, 33)
.Num(0)
.SetScriptError(ScriptError::SIG_HIGH_S));
tests.push_back(
TestBuilder(
CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKDATASIGVERIFY
<< OP_TRUE,
"CHECKDATASIGVERIFY with too little R padding but no DERSIG",
checkdatasigflags & ~SCRIPT_VERIFY_STRICTENC)
.PushDataSigECDSA(keys.key1, {}, 33, 32)
.EditPush(1, "45022100", "440220")
.Num(0));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C)
<< OP_CHECKDATASIGVERIFY << OP_TRUE,
"CHECKDATASIGVERIFY with too little R padding",
checkdatasigflags)
.PushDataSigECDSA(keys.key1, {}, 33, 32)
.EditPush(1, "45022100", "440220")
.Num(0)
.SetScriptError(ScriptError::SIG_DER));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0H)
<< OP_CHECKDATASIGVERIFY << OP_TRUE,
"CHECKDATASIGVERIFY with hybrid pubkey but no STRICTENC",
checkdatasigflags & ~SCRIPT_VERIFY_STRICTENC)
.PushDataSigECDSA(keys.key0, {})
.Num(0));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H)
<< OP_CHECKDATASIGVERIFY << OP_TRUE,
"CHECKDATASIGVERIFY with hybrid pubkey",
checkdatasigflags)
.PushDataSigECDSA(keys.key0, {})
.Num(0)
.SetScriptError(ScriptError::PUBKEYTYPE));
tests.push_back(
TestBuilder(
CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKDATASIGVERIFY
<< OP_TRUE,
"CHECKDATASIGVERIFY with invalid hybrid pubkey but no STRICTENC", 0)
.PushDataSigECDSA(keys.key0, {})
.DamagePush(10)
.Num(0)
.SetScriptError(ScriptError::CHECKDATASIGVERIFY));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H)
<< OP_CHECKDATASIGVERIFY << OP_TRUE,
"CHECKDATASIGVERIFY with invalid hybrid pubkey",
checkdatasigflags)
.PushDataSigECDSA(keys.key0, {})
.DamagePush(10)
.Num(0)
.SetScriptError(ScriptError::PUBKEYTYPE));
// Test all six CHECK*SIG* opcodes with Schnorr signatures.
// - STRICTENC flag on/off.
// - test with different key / mismatching key
// CHECKSIG and Schnorr
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"CHECKSIG Schnorr", 0)
.PushSigSchnorr(keys.key0));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"CHECKSIG Schnorr w/ STRICTENC", SCRIPT_VERIFY_STRICTENC)
.PushSigSchnorr(keys.key0));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"CHECKSIG Schnorr other key", SCRIPT_VERIFY_STRICTENC)
.PushSigSchnorr(keys.key1));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0)
<< OP_CHECKSIG << OP_NOT,
"CHECKSIG Schnorr mismatched key",
SCRIPT_VERIFY_STRICTENC)
.PushSigSchnorr(keys.key1));
// CHECKSIGVERIFY and Schnorr
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0)
<< OP_CHECKSIGVERIFY << OP_1,
"CHECKSIGVERIFY Schnorr", 0)
.PushSigSchnorr(keys.key0));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0)
<< OP_CHECKSIGVERIFY << OP_1,
"CHECKSIGVERIFY Schnorr w/ STRICTENC",
SCRIPT_VERIFY_STRICTENC)
.PushSigSchnorr(keys.key0));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1)
<< OP_CHECKSIGVERIFY << OP_1,
"CHECKSIGVERIFY Schnorr other key",
SCRIPT_VERIFY_STRICTENC)
.PushSigSchnorr(keys.key1));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0)
<< OP_CHECKSIGVERIFY << OP_1,
"CHECKSIGVERIFY Schnorr mismatched key",
SCRIPT_VERIFY_STRICTENC)
.PushSigSchnorr(keys.key1)
.SetScriptError(ScriptError::CHECKSIGVERIFY));
// CHECKDATASIG and Schnorr
tests.push_back(TestBuilder(CScript() << OP_0 << ToByteVector(keys.pubkey0)
<< OP_CHECKDATASIG,
"CHECKDATASIG Schnorr", 0)
.PushDataSigSchnorr(keys.key0, {}));
tests.push_back(TestBuilder(CScript() << OP_0 << ToByteVector(keys.pubkey0)
<< OP_CHECKDATASIG,
"CHECKDATASIG Schnorr w/ STRICTENC",
SCRIPT_VERIFY_STRICTENC)
.PushDataSigSchnorr(keys.key0, {}));
tests.push_back(TestBuilder(CScript() << OP_0 << ToByteVector(keys.pubkey1)
<< OP_CHECKDATASIG,
"CHECKDATASIG Schnorr other key",
SCRIPT_VERIFY_STRICTENC)
.PushDataSigSchnorr(keys.key1, {}));
tests.push_back(TestBuilder(CScript() << OP_0 << ToByteVector(keys.pubkey0)
<< OP_CHECKDATASIG << OP_NOT,
"CHECKDATASIG Schnorr mismatched key",
SCRIPT_VERIFY_STRICTENC)
.PushDataSigSchnorr(keys.key1, {}));
tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1)
<< OP_CHECKDATASIG,
"CHECKDATASIG Schnorr other message",
SCRIPT_VERIFY_STRICTENC)
.PushDataSigSchnorr(keys.key1, {1}));
tests.push_back(TestBuilder(CScript() << OP_0 << ToByteVector(keys.pubkey1)
<< OP_CHECKDATASIG << OP_NOT,
"CHECKDATASIG Schnorr wrong message",
SCRIPT_VERIFY_STRICTENC)
.PushDataSigSchnorr(keys.key1, {1}));
// CHECKDATASIGVERIFY and Schnorr
tests.push_back(TestBuilder(CScript() << OP_0 << ToByteVector(keys.pubkey0)
<< OP_CHECKDATASIGVERIFY << OP_1,
"CHECKDATASIGVERIFY Schnorr", 0)
.PushDataSigSchnorr(keys.key0, {}));
tests.push_back(TestBuilder(CScript() << OP_0 << ToByteVector(keys.pubkey0)
<< OP_CHECKDATASIGVERIFY << OP_1,
"CHECKDATASIGVERIFY Schnorr w/ STRICTENC",
SCRIPT_VERIFY_STRICTENC)
.PushDataSigSchnorr(keys.key0, {}));
tests.push_back(TestBuilder(CScript() << OP_0 << ToByteVector(keys.pubkey1)
<< OP_CHECKDATASIGVERIFY << OP_1,
"CHECKDATASIGVERIFY Schnorr other key",
SCRIPT_VERIFY_STRICTENC)
.PushDataSigSchnorr(keys.key1, {}));
tests.push_back(TestBuilder(CScript() << OP_0 << ToByteVector(keys.pubkey0)
<< OP_CHECKDATASIGVERIFY << OP_1,
"CHECKDATASIGVERIFY Schnorr mismatched key",
SCRIPT_VERIFY_STRICTENC)
.PushDataSigSchnorr(keys.key1, {})
.SetScriptError(ScriptError::CHECKDATASIGVERIFY));
tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1)
<< OP_CHECKDATASIGVERIFY << OP_1,
"CHECKDATASIGVERIFY Schnorr other message",
SCRIPT_VERIFY_STRICTENC)
.PushDataSigSchnorr(keys.key1, {1}));
tests.push_back(TestBuilder(CScript() << OP_0 << ToByteVector(keys.pubkey1)
<< OP_CHECKDATASIGVERIFY << OP_1,
"CHECKDATASIGVERIFY Schnorr wrong message",
SCRIPT_VERIFY_STRICTENC)
.PushDataSigSchnorr(keys.key1, {1})
.SetScriptError(ScriptError::CHECKDATASIGVERIFY));
// CHECKMULTISIG 1-of-1 and Schnorr
tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey0)
<< OP_1 << OP_CHECKMULTISIG,
"CHECKMULTISIG Schnorr w/ no STRICTENC", 0)
.Num(0)
.PushSigSchnorr(keys.key0)
.SetScriptError(ScriptError::SIG_BADLENGTH));
tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey0)
<< OP_1 << OP_CHECKMULTISIG,
"CHECKMULTISIG Schnorr w/ STRICTENC",
SCRIPT_VERIFY_STRICTENC)
.Num(0)
.PushSigSchnorr(keys.key0)
.SetScriptError(ScriptError::SIG_BADLENGTH));
// Test multisig with multiple Schnorr signatures
tests.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"Schnorr 3-of-3", 0)
.Num(0)
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key1)
.PushSigSchnorr(keys.key2)
.SetScriptError(ScriptError::SIG_BADLENGTH));
tests.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"Schnorr-ECDSA-mixed 3-of-3", 0)
.Num(0)
.PushSigECDSA(keys.key0)
.PushSigECDSA(keys.key1)
.PushSigSchnorr(keys.key2)
.SetScriptError(ScriptError::SIG_BADLENGTH));
// CHECKMULTISIGVERIFY 1-of-1 and Schnorr
tests.push_back(
TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey0) << OP_1
<< OP_CHECKMULTISIGVERIFY << OP_1,
"CHECKMULTISIGVERIFY Schnorr w/ no STRICTENC", 0)
.Num(0)
.PushSigSchnorr(keys.key0)
.SetScriptError(ScriptError::SIG_BADLENGTH));
tests.push_back(TestBuilder(CScript()
<< OP_1 << ToByteVector(keys.pubkey0)
<< OP_1 << OP_CHECKMULTISIGVERIFY << OP_1,
"CHECKMULTISIGVERIFY Schnorr w/ STRICTENC",
SCRIPT_VERIFY_STRICTENC)
.Num(0)
.PushSigSchnorr(keys.key0)
.SetScriptError(ScriptError::SIG_BADLENGTH));
// Test damaged Schnorr signatures
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0)
<< OP_CHECKSIG << OP_NOT,
"Schnorr P2PK, bad sig", 0)
.PushSigSchnorr(keys.key0)
.DamagePush(10));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0)
<< OP_CHECKSIG << OP_NOT,
"Schnorr P2PK, bad sig STRICTENC",
SCRIPT_VERIFY_STRICTENC)
.PushSigSchnorr(keys.key0)
.DamagePush(10));
tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0)
<< OP_CHECKSIG << OP_NOT,
"Schnorr P2PK, bad sig NULLFAIL",
SCRIPT_VERIFY_NULLFAIL)
.PushSigSchnorr(keys.key0)
.DamagePush(10)
.SetScriptError(ScriptError::SIG_NULLFAIL));
// Make sure P2PKH works with Schnorr
tests.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160
<< ToByteVector(keys.pubkey1C.GetID())
<< OP_EQUALVERIFY << OP_CHECKSIG,
"Schnorr P2PKH", 0)
.PushSigSchnorr(keys.key1)
.Push(keys.pubkey1C));
// Test of different pubkey encodings with Schnorr
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0C) << OP_CHECKSIG,
"Schnorr P2PK with compressed pubkey",
SCRIPT_VERIFY_STRICTENC)
.PushSigSchnorr(keys.key0, SigHashType()));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"Schnorr P2PK with uncompressed pubkey",
SCRIPT_VERIFY_STRICTENC)
.PushSigSchnorr(keys.key0, SigHashType()));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG,
"Schnorr P2PK with hybrid pubkey", SCRIPT_VERIFY_STRICTENC)
.PushSigSchnorr(keys.key0, SigHashType())
.SetScriptError(ScriptError::PUBKEYTYPE));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG,
"Schnorr P2PK with hybrid pubkey but no STRICTENC", 0)
.PushSigSchnorr(keys.key0));
tests.push_back(
TestBuilder(
CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG << OP_NOT,
"Schnorr P2PK NOT with damaged hybrid pubkey but no STRICTENC", 0)
.PushSigSchnorr(keys.key0)
.DamagePush(10));
// Ensure sighash types get checked with schnorr
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"Schnorr P2PK with undefined basehashtype and STRICTENC",
SCRIPT_VERIFY_STRICTENC)
.PushSigSchnorr(keys.key1, SigHashType(5))
.SetScriptError(ScriptError::SIG_HASHTYPE));
tests.push_back(
TestBuilder(CScript() << OP_DUP << OP_HASH160
<< ToByteVector(keys.pubkey0.GetID())
<< OP_EQUALVERIFY << OP_CHECKSIG,
"Schnorr P2PKH with invalid sighashtype but no STRICTENC",
0)
.PushSigSchnorr(keys.key0, SigHashType(0x21), Amount::zero(), 0)
.Push(keys.pubkey0));
tests.push_back(
TestBuilder(CScript() << OP_DUP << OP_HASH160
<< ToByteVector(keys.pubkey0.GetID())
<< OP_EQUALVERIFY << OP_CHECKSIG,
"Schnorr P2PKH with invalid sighashtype and STRICTENC",
SCRIPT_VERIFY_STRICTENC)
.PushSigSchnorr(keys.key0, SigHashType(0x21), Amount::zero(),
SCRIPT_VERIFY_STRICTENC)
.Push(keys.pubkey0)
.SetScriptError(ScriptError::SIG_HASHTYPE));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"Schnorr P2PK anyonecanpay", 0)
.PushSigSchnorr(keys.key1, SigHashType().withAnyoneCanPay()));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"Schnorr P2PK anyonecanpay marked with normal hashtype", 0)
.PushSigSchnorr(keys.key1, SigHashType().withAnyoneCanPay())
.EditPush(64, "81", "01")
.SetScriptError(ScriptError::EVAL_FALSE));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"Schnorr P2PK with forkID",
SCRIPT_VERIFY_STRICTENC | SCRIPT_ENABLE_SIGHASH_FORKID)
.PushSigSchnorr(keys.key1, SigHashType().withForkId()));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"Schnorr P2PK with non-forkID sig",
SCRIPT_VERIFY_STRICTENC | SCRIPT_ENABLE_SIGHASH_FORKID)
.PushSigSchnorr(keys.key1)
.SetScriptError(ScriptError::MUST_USE_FORKID));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"Schnorr P2PK with cheater forkID bit",
SCRIPT_VERIFY_STRICTENC | SCRIPT_ENABLE_SIGHASH_FORKID)
.PushSigSchnorr(keys.key1)
.EditPush(64, "01", "41")
.SetScriptError(ScriptError::EVAL_FALSE));
{
// There is a point with x = 7 + order but not x = 7.
// Since r = x mod order, this can have valid signatures, as
// demonstrated here.
std::vector<uint8_t> rdata{7};
std::vector<uint8_t> sdata{7};
tests.push_back(TestBuilder(CScript() << OP_CHECKSIG,
"recovered-pubkey CHECKSIG 7,7 (wrapped r)",
SCRIPT_VERIFY_STRICTENC)
.PushECDSASigFromParts(rdata, sdata)
.PushECDSARecoveredPubKey(rdata, sdata));
}
{
// Arbitrary r value that is 29 bytes long, to give room for varying
// the length of s:
std::vector<uint8_t> rdata = ParseHex(
"776879206d757374207765207375666665722077697468206563647361");
std::vector<uint8_t> sdata(58 - rdata.size() - 1, 33);
tests.push_back(
TestBuilder(CScript() << OP_CHECKSIG,
"recovered-pubkey CHECKSIG with 63-byte DER",
SCRIPT_VERIFY_STRICTENC)
.PushECDSASigFromParts(rdata, sdata)
.PushECDSARecoveredPubKey(rdata, sdata));
}
{
// 64-byte ECDSA sig does not work.
std::vector<uint8_t> rdata = ParseHex(
"776879206d757374207765207375666665722077697468206563647361");
std::vector<uint8_t> sdata(58 - rdata.size(), 33);
tests.push_back(
TestBuilder(CScript() << OP_CHECKSIG,
"recovered-pubkey CHECKSIG with 64-byte DER",
SCRIPT_VERIFY_STRICTENC)
.PushECDSASigFromParts(rdata, sdata)
.PushECDSARecoveredPubKey(rdata, sdata)
.SetScriptError(ScriptError::EVAL_FALSE));
}
{
std::vector<uint8_t> rdata = ParseHex(
"776879206d757374207765207375666665722077697468206563647361");
std::vector<uint8_t> sdata(58 - rdata.size() + 1, 33);
tests.push_back(
TestBuilder(CScript() << OP_CHECKSIG,
"recovered-pubkey CHECKSIG with 65-byte DER",
SCRIPT_VERIFY_STRICTENC)
.PushECDSASigFromParts(rdata, sdata)
.PushECDSARecoveredPubKey(rdata, sdata));
}
{
// Try 64-byte ECDSA sig again, in multisig.
std::vector<uint8_t> rdata = ParseHex(
"776879206d757374207765207375666665722077697468206563647361");
std::vector<uint8_t> sdata(58 - rdata.size(), 33);
tests.push_back(
TestBuilder(CScript()
<< OP_1 << OP_SWAP << OP_1 << OP_CHECKMULTISIG,
"recovered-pubkey CHECKMULTISIG with 64-byte DER",
SCRIPT_VERIFY_STRICTENC)
.Num(0)
.PushECDSASigFromParts(rdata, sdata)
.PushECDSARecoveredPubKey(rdata, sdata)
.SetScriptError(ScriptError::SIG_BADLENGTH));
}
// New-multisig tests follow. New multisig will activate with a bunch of
// related flags active from other upgrades, so we do tests with this group
// of flags turned on:
uint32_t newmultisigflags =
SCRIPT_ENABLE_SCHNORR_MULTISIG | SCRIPT_VERIFY_NULLFAIL |
SCRIPT_VERIFY_MINIMALDATA | SCRIPT_VERIFY_STRICTENC;
// Tests of the legacy multisig (null dummy element), but with the
// SCRIPT_ENABLE_SCHNORR_MULTISIG flag turned on. These show the desired
// legacy behaviour that should be retained.
tests.push_back(
TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey0H)
<< ToByteVector(keys.pubkey1C) << OP_2
<< OP_CHECKMULTISIG,
"1-of-2 with unchecked hybrid pubkey with SCHNORR_MULTISIG",
newmultisigflags)
.Num(0)
.PushSigECDSA(keys.key1));
tests.push_back(
TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey0H) << OP_2
<< OP_CHECKMULTISIG,
"1-of-2 with checked hybrid pubkey with SCHNORR_MULTISIG",
newmultisigflags)
.Num(0)
.PushSigECDSA(keys.key1)
.SetScriptError(ScriptError::PUBKEYTYPE));
tests.push_back(
TestBuilder(
CScript() << OP_1 << ToByteVector(keys.pubkey0) << OP_1
<< OP_CHECKMULTISIG,
"Legacy 1-of-1 Schnorr w/ SCHNORR_MULTISIG but no STRICTENC",
newmultisigflags & ~SCRIPT_VERIFY_STRICTENC)
.Num(0)
.PushSigSchnorr(keys.key0)
.SetScriptError(ScriptError::SIG_BADLENGTH));
tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey0)
<< OP_1 << OP_CHECKMULTISIG,
"Legacy 1-of-1 Schnorr w/ SCHNORR_MULTISIG",
newmultisigflags)
.Num(0)
.PushSigSchnorr(keys.key0)
.SetScriptError(ScriptError::SIG_BADLENGTH));
tests.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"Legacy 3-of-3 Schnorr w/ SCHNORR_MULTISIG",
newmultisigflags)
.Num(0)
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key1)
.PushSigSchnorr(keys.key2)
.SetScriptError(ScriptError::SIG_BADLENGTH));
tests.push_back(
TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"Legacy 3-of-3 mixed Schnorr-ECDSA w/ SCHNORR_MULTISIG",
newmultisigflags)
.Num(0)
.PushSigECDSA(keys.key0)
.PushSigECDSA(keys.key1)
.PushSigSchnorr(keys.key2)
.SetScriptError(ScriptError::SIG_BADLENGTH));
{
// Try valid 64-byte ECDSA sig in multisig.
std::vector<uint8_t> rdata = ParseHex(
"776879206d757374207765207375666665722077697468206563647361");
std::vector<uint8_t> sdata(58 - rdata.size(), 33);
tests.push_back(TestBuilder(CScript() << OP_1 << OP_SWAP << OP_1
<< OP_CHECKMULTISIG,
"recovered-pubkey CHECKMULTISIG with "
"64-byte DER w/ SCHNORR_MULTISIG",
newmultisigflags)
.Num(0)
.PushECDSASigFromParts(rdata, sdata)
.PushECDSARecoveredPubKey(rdata, sdata)
.SetScriptError(ScriptError::SIG_BADLENGTH));
}
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG << OP_NOT,
"CHECKMULTISIG 2-of-3 w/ SCHNORR_MULTISIG "
"(return-false still valid via legacy mode)",
newmultisigflags)
.Num(0)
.Num(0)
.Num(0));
tests.push_back(TestBuilder(CScript() << OP_0 << OP_0 << OP_CHECKMULTISIG,
"CHECKMULTISIG 0-of-0 w/ SCHNORR_MULTISIG",
newmultisigflags)
.Num(0));
tests.push_back(
TestBuilder(CScript() << OP_0 << ToByteVector(ParseHex("BEEF")) << OP_1
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 0-of-1 w/ SCHNORR_MULTISIG, null dummy",
newmultisigflags)
.Num(0));
// Tests of schnorr checkmultisig actually turned on (flag on & dummy
// element is not null).
tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey0)
<< OP_1 << OP_CHECKMULTISIG,
"CHECKMULTISIG 1-of-1 Schnorr",
newmultisigflags)
.Num(0b1)
.PushSigSchnorr(keys.key0));
tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey0)
<< OP_1 << OP_CHECKMULTISIG,
"CHECKMULTISIG 1-of-1 Schnorr, nonminimal bits",
newmultisigflags)
.Push("0100")
.PushSigSchnorr(keys.key0)
.SetScriptError(ScriptError::INVALID_BITFIELD_SIZE));
tests.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 3-of-3 Schnorr",
newmultisigflags)
.Num(0b111)
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key1)
.PushSigSchnorr(keys.key2));
tests.push_back(TestBuilder(CScript() << OP_4 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 4-of-3 Schnorr",
newmultisigflags)
.Num(0b1111)
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key1)
.PushSigSchnorr(keys.key2)
.SetScriptError(ScriptError::SIG_COUNT));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 2-of-3 (110) Schnorr",
newmultisigflags)
.Num(0b110)
.PushSigSchnorr(keys.key1)
.PushSigSchnorr(keys.key2));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 2-of-3 (101) Schnorr",
newmultisigflags)
.Num(0b101)
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key2));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 2-of-3 (011) Schnorr",
newmultisigflags)
.Num(0b011)
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key1));
tests.push_back(
TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 2-of-3 Schnorr, mismatched bits Schnorr",
newmultisigflags)
.Num(0b011)
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key2)
.SetScriptError(ScriptError::SIG_NULLFAIL));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 2-of-3 Schnorr, all bits set",
newmultisigflags)
.Num(0b111)
.PushSigSchnorr(keys.key1)
.PushSigSchnorr(keys.key2)
.SetScriptError(ScriptError::INVALID_BIT_COUNT));
tests.push_back(
TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 2-of-3 Schnorr, extra high bit set",
newmultisigflags)
.Num(0b1110)
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key1)
.SetScriptError(ScriptError::INVALID_BIT_RANGE));
tests.push_back(
TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 2-of-3 Schnorr, too high bit set",
newmultisigflags)
.Num(0b1010)
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key1)
.SetScriptError(ScriptError::INVALID_BIT_RANGE));
tests.push_back(
TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 2-of-3 Schnorr, too few bits set",
newmultisigflags)
.Num(0b010)
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key1)
.SetScriptError(ScriptError::INVALID_BIT_COUNT));
tests.push_back(
TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 2-of-3 Schnorr, with no bits set "
"(attempt to malleate return-false)",
newmultisigflags)
.Push("00")
.Num(0)
.Num(0)
.SetScriptError(ScriptError::INVALID_BIT_COUNT));
tests.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG null dummy with schnorr sigs "
"(with SCHNORR_MULTISIG on)",
newmultisigflags)
.Num(0)
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key1)
.SetScriptError(ScriptError::SIG_BADLENGTH));
tests.push_back(
TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 2-of-3 Schnorr, misordered signatures",
newmultisigflags)
.Num(0b011)
.PushSigSchnorr(keys.key1)
.PushSigSchnorr(keys.key0)
.SetScriptError(ScriptError::SIG_NULLFAIL));
tests.push_back(
TestBuilder(
CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C) << OP_DUP << OP_2DUP
<< OP_2DUP << ToByteVector(keys.pubkey2C) << OP_8
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 2-of-8 Schnorr, right way to represent 0b10000001",
newmultisigflags)
.Num(-1)
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key2));
tests.push_back(
TestBuilder(
CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C) << OP_DUP << OP_2DUP
<< OP_2DUP << ToByteVector(keys.pubkey2C) << OP_8
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 2-of-8 Schnorr, wrong way to represent 0b10000001",
newmultisigflags)
.Num(0b10000001)
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key2)
.SetScriptError(ScriptError::INVALID_BITFIELD_SIZE));
tests.push_back(
TestBuilder(CScript() << OP_1 << -1 << -1 << -1 << -1 << -1
<< ToByteVector(keys.pubkey0C) << -1 << 7
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 1-of-7 Schnorr, second-to-last key",
newmultisigflags)
.Push("20")
.PushSigSchnorr(keys.key0));
tests.push_back(
TestBuilder(CScript() << OP_1 << -1 << -1 << -1 << -1 << -1 << -1 << -1
<< -1 << -1 << -1 << ToByteVector(keys.pubkey0C)
<< -1 << -1 << 13 << OP_CHECKMULTISIG,
"CHECKMULTISIG 1-of-13 Schnorr, third-to-last key",
newmultisigflags)
.Push("0004")
.PushSigSchnorr(keys.key0));
tests.push_back(
TestBuilder(CScript()
<< OP_OVER << OP_DUP << OP_DUP << OP_2DUP << OP_3DUP
<< OP_3DUP << OP_3DUP << OP_3DUP << 20
<< ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_OVER << OP_DUP
<< OP_DUP << OP_2DUP << OP_3DUP << OP_3DUP << OP_3DUP
<< OP_3DUP << 20 << OP_CHECKMULTISIG,
"CHECKMULTISIG 20-of-20 Schnorr", newmultisigflags)
.Push("ffff0f")
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key1)
.PushSigSchnorr(keys.key2));
tests.push_back(
TestBuilder(
CScript() << OP_OVER << OP_DUP << OP_DUP << OP_2DUP << OP_3DUP
<< OP_3DUP << OP_3DUP << OP_3DUP << 20
<< ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_OVER << OP_DUP
<< OP_DUP << OP_2DUP << OP_3DUP << OP_3DUP << OP_3DUP
<< OP_3DUP << 20 << OP_CHECKMULTISIG,
"CHECKMULTISIG 20-of-20 Schnorr, checkbits +1", newmultisigflags)
.Push("000010")
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key1)
.PushSigSchnorr(keys.key2)
.SetScriptError(ScriptError::INVALID_BIT_RANGE));
tests.push_back(
TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey0C) << OP_DUP
<< ToByteVector(keys.pubkey1C) << OP_3DUP
<< OP_3DUP << OP_3DUP << OP_3DUP << OP_3DUP
<< OP_3DUP << 21 << OP_CHECKMULTISIG,
"CHECKMULTISIG 1-of-21 Schnorr", newmultisigflags)
.Push("000010")
.PushSigSchnorr(keys.key0)
.SetScriptError(ScriptError::PUBKEY_COUNT));
tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< OP_DUP << OP_2DUP << OP_3DUP
<< OP_3DUP << OP_3DUP << OP_3DUP
<< OP_3DUP << 20 << OP_CHECKMULTISIG,
"CHECKMULTISIG 1-of-20 Schnorr, first key",
newmultisigflags)
.Push("010000")
.PushSigSchnorr(keys.key0));
tests.push_back(
TestBuilder(
CScript() << OP_1 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C) << OP_DUP << OP_2DUP
<< OP_3DUP << OP_3DUP << OP_3DUP << OP_3DUP << OP_3DUP
<< 20 << OP_CHECKMULTISIG,
"CHECKMULTISIG 1-of-20 Schnorr, first key, wrong endianness",
newmultisigflags)
.Push("000001")
.PushSigSchnorr(keys.key0)
.SetScriptError(ScriptError::SIG_NULLFAIL));
tests.push_back(
TestBuilder(
CScript() << OP_1 << ToByteVector(keys.pubkey0C) << OP_2DUP
<< OP_2DUP << OP_3DUP << OP_3DUP << OP_3DUP << OP_3DUP
<< OP_3DUP << 20 << OP_CHECKMULTISIG,
"CHECKMULTISIG 1-of-20 Schnorr, truncating zeros not allowed",
newmultisigflags)
.Num(1)
.PushSigSchnorr(keys.key0)
.SetScriptError(ScriptError::INVALID_BITFIELD_SIZE));
tests.push_back(
TestBuilder(CScript()
<< OP_1 << ToByteVector(keys.pubkey0C) << OP_DUP
<< OP_2DUP << OP_3DUP << OP_3DUP << OP_3DUP << OP_3DUP
<< OP_3DUP << ToByteVector(keys.pubkey1C) << 20
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 1-of-20 Schnorr, last key", newmultisigflags)
.Push("000008")
.PushSigSchnorr(keys.key1));
tests.push_back(
TestBuilder(CScript()
<< OP_1 << ToByteVector(keys.pubkey0C) << OP_DUP
<< OP_2DUP << OP_3DUP << OP_3DUP << OP_3DUP << OP_3DUP
<< OP_3DUP << ToByteVector(keys.pubkey1C) << 20
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 1-of-20 Schnorr, last key, wrong endianness",
newmultisigflags)
.Push("080000")
.PushSigSchnorr(keys.key1)
.SetScriptError(ScriptError::SIG_NULLFAIL));
tests.push_back(TestBuilder(CScript()
<< OP_1 << ToByteVector(keys.pubkey0C)
<< OP_DUP << OP_2DUP << OP_3DUP << OP_3DUP
<< OP_3DUP << OP_3DUP << OP_3DUP
<< ToByteVector(keys.pubkey1C) << 20
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 1-of-20 Schnorr, last key, "
"truncating zeros not allowed",
newmultisigflags)
.Push("0800")
.PushSigSchnorr(keys.key1)
.SetScriptError(ScriptError::INVALID_BITFIELD_SIZE));
tests.push_back(
TestBuilder(CScript() << OP_2 << ToByteVector(ParseHex("BEEF"))
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 2-of-3 (110) Schnorr, first key garbage",
newmultisigflags)
.Num(0b110)
.PushSigSchnorr(keys.key1)
.PushSigSchnorr(keys.key2));
tests.push_back(
TestBuilder(CScript() << OP_2 << ToByteVector(ParseHex("BEEF"))
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 2-of-3 (011) Schnorr, first key garbage",
newmultisigflags)
.Num(0b011)
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key1)
.SetScriptError(ScriptError::PUBKEYTYPE));
tests.push_back(
TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(ParseHex("BEEF")) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 2-of-3 (011) Schnorr, last key garbage",
newmultisigflags)
.Num(0b011)
.PushSigSchnorr(keys.key0)
.PushSigSchnorr(keys.key1));
tests.push_back(
TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(ParseHex("BEEF")) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 2-of-3 (110) Schnorr, last key garbage",
newmultisigflags)
.Num(0b110)
.PushSigSchnorr(keys.key1)
.PushSigSchnorr(keys.key2)
.SetScriptError(ScriptError::PUBKEYTYPE));
tests.push_back(
TestBuilder(
CScript() << OP_0 << OP_0 << OP_CHECKMULTISIG,
"CHECKMULTISIG 0-of-0 with SCHNORR_MULTISIG, dummy must be null",
newmultisigflags)
.Push("00")
.SetScriptError(ScriptError::INVALID_BITFIELD_SIZE));
tests.push_back(TestBuilder(CScript()
<< OP_0 << ToByteVector(ParseHex("BEEF"))
<< OP_1 << OP_CHECKMULTISIG,
"CHECKMULTISIG 0-of-1 with SCHNORR_MULTISIG, "
"dummy need not be null",
newmultisigflags)
.Push("00"));
tests.push_back(
TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey0) << OP_1
<< OP_CHECKMULTISIGVERIFY << OP_1,
"OP_CHECKMULTISIGVERIFY Schnorr", newmultisigflags)
.Num(0b1)
.PushSigSchnorr(keys.key0));
tests.push_back(
TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey0) << OP_1
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 1-of-1 ECDSA signature in Schnorr mode",
newmultisigflags)
.Num(0b1)
.PushSigECDSA(keys.key0)
.SetScriptError(ScriptError::SIG_NONSCHNORR));
tests.push_back(
TestBuilder(
CScript() << OP_3 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << OP_3
<< OP_CHECKMULTISIG,
"CHECKMULTISIG 3-of-3 Schnorr with mixed-in ECDSA signature",
newmultisigflags)
.Num(0b111)
.PushSigECDSA(keys.key0)
.PushSigSchnorr(keys.key1)
.PushSigSchnorr(keys.key2)
.SetScriptError(ScriptError::SIG_NONSCHNORR));
// SigChecks tests follow. We want to primarily focus on behaviour with
// the modern set of (relevant) flags.
uint32_t sigchecksflags =
SCRIPT_ENABLE_SCHNORR_MULTISIG | SCRIPT_VERIFY_NULLFAIL |
SCRIPT_VERIFY_MINIMALDATA | SCRIPT_VERIFY_STRICTENC |
SCRIPT_VERIFY_INPUT_SIGCHECKS | SCRIPT_VERIFY_P2SH;
// First, try some important use cases that we want to make sure are
// supported but that have high density of sigchecks.
tests.push_back(TestBuilder(CScript() << 1 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << 3
<< OP_CHECKMULTISIG,
"SigChecks on bare CHECKMULTISIG 1-of-3 ECDSA",
sigchecksflags)
.Num(0)
.PushSigECDSA(keys.key0));
tests.push_back(
TestBuilder(CScript() << 1 << ToByteVector(keys.pubkey0C) << -1 << -1
<< -1 << -1 << -1 << -1 << -1 << -1 << -1 << -1
<< -1 << -1 << -1 << -1 << -1 << -1 << -1 << -1
<< -1 << 20 << OP_CHECKMULTISIG,
"SigChecks on bare CHECKMULTISIG 1-of-20 Schnorr",
sigchecksflags)
.Push("010000")
.PushSigSchnorr(keys.key0));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0C) << OP_CHECKSIG,
"SigChecks on P2PK Schnorr", sigchecksflags)
.PushSigSchnorr(keys.key0));
tests.push_back(
TestBuilder(CScript() << ToByteVector(keys.pubkey0C) << OP_CHECKSIG,
"SigChecks on P2PK ECDSA", sigchecksflags)
.PushSigECDSA(keys.key0));
tests.push_back(
TestBuilder(
CScript()
<< 1 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C)
<< ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C)
<< ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C)
<< ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C)
<< ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C)
<< ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C)
<< ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C)
<< 15 << OP_CHECKMULTISIG,
"SigChecks on P2SH CHECKMULTISIG 1-of-15 ECDSA with compressed "
"keys",
sigchecksflags, true)
.Num(0)
.PushSigECDSA(keys.key0)
.PushRedeem());
tests.push_back(
TestBuilder(CScript()
<< ToByteVector(keys.pubkey0C) << 0 << OP_OVER
<< OP_CHECKSIG << OP_OVER << OP_CHECKSIG << OP_OVER
<< OP_CHECKSIG << OP_OVER << OP_CHECKSIG << OP_OVER
<< OP_CHECKSIG << OP_OVER << OP_CHECKSIG << OP_OVER
<< OP_CHECKSIG << OP_OVER << OP_CHECKSIG << OP_OVER
<< OP_CHECKSIG << OP_OVER << OP_CHECKSIG << OP_OVER
<< OP_CHECKSIG << OP_OVER << OP_CHECKSIG << OP_OVER
<< OP_CHECKSIG << OP_OVER << OP_CHECKSIG << OP_OVER
<< OP_CHECKSIG << OP_OVER << OP_CHECKSIG << OP_DROP,
"Null signatures make no SigChecks (CHECKSIG)",
sigchecksflags, true)
.PushRedeem());
tests.push_back(
TestBuilder(CScript()
<< 0 << ToByteVector(keys.pubkey0C) << 0 << 0
<< OP_2OVER << OP_CHECKDATASIG << OP_2OVER
<< OP_CHECKDATASIG << OP_2OVER << OP_CHECKDATASIG
<< OP_2OVER << OP_CHECKDATASIG << OP_2OVER
<< OP_CHECKDATASIG << OP_2OVER << OP_CHECKDATASIG
<< OP_2OVER << OP_CHECKDATASIG << OP_2OVER
<< OP_CHECKDATASIG << OP_2OVER << OP_CHECKDATASIG
<< OP_2OVER << OP_CHECKDATASIG << OP_2OVER
<< OP_CHECKDATASIG << OP_2OVER << OP_CHECKDATASIG
<< OP_2OVER << OP_CHECKDATASIG << OP_2OVER
<< OP_CHECKDATASIG << OP_2OVER << OP_CHECKDATASIG
<< OP_2OVER << OP_CHECKDATASIG << OP_2DROP << OP_NIP,
"Null signatures make no SigChecks (CHECKDATASIG)",
sigchecksflags, true)
.PushRedeem());
// Note that the following test case is "legacy-only", there is no schnorr
// counterpart since schnorr mode does not permit any null signatures nor
// an incorrect popcount in checkbits.
tests.push_back(
TestBuilder(CScript()
<< OP_DUP << OP_2DUP << OP_3DUP << OP_3DUP << OP_3DUP
<< OP_3DUP << 16 << ToByteVector(keys.pubkey0C)
<< OP_DUP << OP_2DUP << OP_3DUP << OP_3DUP << OP_3DUP
<< OP_3DUP << 16 << OP_CHECKMULTISIG << OP_NOT,
"Null signatures make no SigChecks (CHECKMULTISIG)",
sigchecksflags, true)
.Num(0)
.Num(0)
.PushRedeem());
// Now some unusual use cases (some are unsupported behaviour)
tests.push_back(TestBuilder(CScript() << 1 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C)
<< OP_DUP << 4 << OP_CHECKMULTISIG,
"SigChecks on bare CHECKMULTISIG 1-of-4 ECDSA",
sigchecksflags)
.Num(0)
.PushSigECDSA(keys.key0)
.SetScriptError(ScriptError::INPUT_SIGCHECKS));
tests.push_back(
TestBuilder(
CScript()
<< 1 << -1 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey2C) << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << 15 << OP_CHECKMULTISIG,
"SigChecks on P2SH CHECKMULTISIG 1-of-15 ECDSA with a runt key",
sigchecksflags, true)
.Num(0)
.PushSigECDSA(keys.key0)
.PushRedeem()
.SetScriptError(ScriptError::INPUT_SIGCHECKS));
tests.push_back(
TestBuilder(
CScript()
<< 1 << -1 << ToByteVector(keys.pubkey0C)
<< ToByteVector(keys.pubkey2C) << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << ToByteVector(keys.pubkey1C)
<< ToByteVector(keys.pubkey2C) << 15 << OP_CHECKMULTISIG,
"SigChecks on P2SH CHECKMULTISIG 1-of-15 Schnorr with a runt key",
sigchecksflags, true)
.Push("0200")
.PushSigSchnorr(keys.key0)
.PushRedeem());
tests.push_back(TestBuilder(CScript() << 0 << -1 << -1 << -1 << -1 << -1
<< -1 << -1 << -1 << -1 << -1 << 10
<< OP_CHECKMULTISIG,
"Very short P2SH multisig 0-of-10, spent with "
"legacy mode (0 sigchecks)",
sigchecksflags, true)
.Num(0)
.PushRedeem());
tests.push_back(TestBuilder(CScript() << 0 << -1 << -1 << -1 << -1 << -1
<< -1 << -1 << -1 << -1 << -1 << 10
<< OP_CHECKMULTISIG,
"Very short P2SH multisig 0-of-10, spent with "
"schnorr mode (0 sigchecks)",
sigchecksflags, true)
.Push("0000")
.PushRedeem());
std::set<std::string> tests_set;
{
UniValue json_tests = read_json(std::string(
json_tests::script_tests,
json_tests::script_tests + sizeof(json_tests::script_tests)));
for (unsigned int idx = 0; idx < json_tests.size(); idx++) {
const UniValue &tv = json_tests[idx];
tests_set.insert(JSONPrettyPrint(tv.get_array()));
}
}
#ifdef UPDATE_JSON_TESTS
std::string strGen;
#endif
for (TestBuilder &test : tests) {
test.Test();
std::string str = JSONPrettyPrint(test.GetJSON());
#ifdef UPDATE_JSON_TESTS
strGen += str + ",\n";
#else
if (tests_set.count(str) == 0) {
BOOST_CHECK_MESSAGE(false, "Missing auto script_valid test: " +
test.GetComment());
}
#endif
}
#ifdef UPDATE_JSON_TESTS
FILE *file = fsbridge::fopen("script_tests.json.gen", "w");
fputs(strGen.c_str(), file);
fclose(file);
#endif
}
BOOST_AUTO_TEST_CASE(script_json_test) {
// Read tests from test/data/script_tests.json
// Format is an array of arrays
// Inner arrays are [ ["wit"..., nValue]?, "scriptSig", "scriptPubKey",
// "flags", "expected_scripterror" ]
// ... where scriptSig and scriptPubKey are stringified
// scripts.
UniValue tests = read_json(std::string(
json_tests::script_tests,
json_tests::script_tests + sizeof(json_tests::script_tests)));
for (unsigned int idx = 0; idx < tests.size(); idx++) {
UniValue test = tests[idx];
std::string strTest = test.write();
Amount nValue = Amount::zero();
unsigned int pos = 0;
if (test.size() > 0 && test[pos].isArray()) {
nValue = AmountFromValue(test[pos][0]);
pos++;
}
// Allow size > 3; extra stuff ignored (useful for comments)
if (test.size() < 4 + pos) {
if (test.size() != 1) {
BOOST_ERROR("Bad test: " << strTest);
}
continue;
}
std::string scriptSigString = test[pos++].get_str();
std::string scriptPubKeyString = test[pos++].get_str();
try {
CScript scriptSig = ParseScript(scriptSigString);
CScript scriptPubKey = ParseScript(scriptPubKeyString);
unsigned int scriptflags = ParseScriptFlags(test[pos++].get_str());
ScriptError scriptError = ParseScriptError(test[pos++].get_str());
DoTest(scriptPubKey, scriptSig, scriptflags, strTest, scriptError,
nValue);
} catch (std::runtime_error &e) {
BOOST_TEST_MESSAGE("Script test failed. scriptSig: "
<< scriptSigString
<< " scriptPubKey: " << scriptPubKeyString);
BOOST_TEST_MESSAGE("Exception: " << e.what());
throw;
}
}
}
BOOST_AUTO_TEST_CASE(script_PushData) {
// Check that PUSHDATA1, PUSHDATA2, and PUSHDATA4 create the same value on
// the stack as the 1-75 opcodes do.
static const uint8_t direct[] = {1, 0x5a};
static const uint8_t pushdata1[] = {OP_PUSHDATA1, 1, 0x5a};
static const uint8_t pushdata2[] = {OP_PUSHDATA2, 1, 0, 0x5a};
static const uint8_t pushdata4[] = {OP_PUSHDATA4, 1, 0, 0, 0, 0x5a};
ScriptError err;
std::vector<std::vector<uint8_t>> directStack;
BOOST_CHECK(EvalScript(directStack,
CScript(direct, direct + sizeof(direct)),
SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), &err));
BOOST_CHECK_MESSAGE(err == ScriptError::OK, ScriptErrorString(err));
std::vector<std::vector<uint8_t>> pushdata1Stack;
BOOST_CHECK(EvalScript(pushdata1Stack,
CScript(pushdata1, pushdata1 + sizeof(pushdata1)),
SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), &err));
BOOST_CHECK(pushdata1Stack == directStack);
BOOST_CHECK_MESSAGE(err == ScriptError::OK, ScriptErrorString(err));
std::vector<std::vector<uint8_t>> pushdata2Stack;
BOOST_CHECK(EvalScript(pushdata2Stack,
CScript(pushdata2, pushdata2 + sizeof(pushdata2)),
SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), &err));
BOOST_CHECK(pushdata2Stack == directStack);
BOOST_CHECK_MESSAGE(err == ScriptError::OK, ScriptErrorString(err));
std::vector<std::vector<uint8_t>> pushdata4Stack;
BOOST_CHECK(EvalScript(pushdata4Stack,
CScript(pushdata4, pushdata4 + sizeof(pushdata4)),
SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), &err));
BOOST_CHECK(pushdata4Stack == directStack);
BOOST_CHECK_MESSAGE(err == ScriptError::OK, ScriptErrorString(err));
const std::vector<uint8_t> pushdata1_trunc{OP_PUSHDATA1, 1};
const std::vector<uint8_t> pushdata2_trunc{OP_PUSHDATA2, 1, 0};
const std::vector<uint8_t> pushdata4_trunc{OP_PUSHDATA4, 1, 0, 0, 0};
std::vector<std::vector<uint8_t>> stack_ignore;
BOOST_CHECK(!EvalScript(
stack_ignore, CScript(pushdata1_trunc.begin(), pushdata1_trunc.end()),
SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), &err));
BOOST_CHECK_EQUAL(err, ScriptError::BAD_OPCODE);
BOOST_CHECK(!EvalScript(
stack_ignore, CScript(pushdata2_trunc.begin(), pushdata2_trunc.end()),
SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), &err));
BOOST_CHECK_EQUAL(err, ScriptError::BAD_OPCODE);
BOOST_CHECK(!EvalScript(
stack_ignore, CScript(pushdata4_trunc.begin(), pushdata4_trunc.end()),
SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), &err));
BOOST_CHECK_EQUAL(err, ScriptError::BAD_OPCODE);
}
BOOST_AUTO_TEST_CASE(script_cltv_truncated) {
const auto script_cltv_trunc = CScript() << OP_CHECKLOCKTIMEVERIFY;
std::vector<std::vector<uint8_t>> stack_ignore;
ScriptError err;
BOOST_CHECK(!EvalScript(stack_ignore, script_cltv_trunc,
SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY,
BaseSignatureChecker(), &err));
BOOST_CHECK_EQUAL(err, ScriptError::INVALID_STACK_OPERATION);
}
static CScript sign_multisig(const CScript &scriptPubKey,
const std::vector<CKey> &keys,
const CTransaction &transaction) {
uint256 hash = SignatureHash(scriptPubKey, transaction, 0, SigHashType(),
Amount::zero());
CScript result;
//
// NOTE: CHECKMULTISIG has an unfortunate bug; it requires one extra item on
// the stack, before the signatures. Putting OP_0 on the stack is the
// workaround; fixing the bug would mean splitting the block chain (old
// clients would not accept new CHECKMULTISIG transactions, and vice-versa)
//
result << OP_0;
for (const CKey &key : keys) {
std::vector<uint8_t> vchSig;
BOOST_CHECK(key.SignECDSA(hash, vchSig));
vchSig.push_back(uint8_t(SIGHASH_ALL));
result << vchSig;
}
return result;
}
static CScript sign_multisig(const CScript &scriptPubKey, const CKey &key,
const CTransaction &transaction) {
std::vector<CKey> keys;
keys.push_back(key);
return sign_multisig(scriptPubKey, keys, transaction);
}
BOOST_AUTO_TEST_CASE(script_CHECKMULTISIG12) {
ScriptError err;
CKey key1, key2, key3;
key1.MakeNewKey(true);
key2.MakeNewKey(false);
key3.MakeNewKey(true);
CScript scriptPubKey12;
scriptPubKey12 << OP_1 << ToByteVector(key1.GetPubKey())
<< ToByteVector(key2.GetPubKey()) << OP_2
<< OP_CHECKMULTISIG;
const CTransaction txFrom12{
BuildCreditingTransaction(scriptPubKey12, Amount::zero())};
CMutableTransaction txTo12 = BuildSpendingTransaction(CScript(), txFrom12);
CScript goodsig1 =
sign_multisig(scriptPubKey12, key1, CTransaction(txTo12));
BOOST_CHECK(VerifyScript(
goodsig1, scriptPubKey12, gFlags,
MutableTransactionSignatureChecker(&txTo12, 0, txFrom12.vout[0].nValue),
&err));
BOOST_CHECK_MESSAGE(err == ScriptError::OK, ScriptErrorString(err));
txTo12.vout[0].nValue = 2 * SATOSHI;
BOOST_CHECK(!VerifyScript(
goodsig1, scriptPubKey12, gFlags,
MutableTransactionSignatureChecker(&txTo12, 0, txFrom12.vout[0].nValue),
&err));
BOOST_CHECK_MESSAGE(err == ScriptError::EVAL_FALSE, ScriptErrorString(err));
CScript goodsig2 =
sign_multisig(scriptPubKey12, key2, CTransaction(txTo12));
BOOST_CHECK(VerifyScript(
goodsig2, scriptPubKey12, gFlags,
MutableTransactionSignatureChecker(&txTo12, 0, txFrom12.vout[0].nValue),
&err));
BOOST_CHECK_MESSAGE(err == ScriptError::OK, ScriptErrorString(err));
CScript badsig1 = sign_multisig(scriptPubKey12, key3, CTransaction(txTo12));
BOOST_CHECK(!VerifyScript(
badsig1, scriptPubKey12, gFlags,
MutableTransactionSignatureChecker(&txTo12, 0, txFrom12.vout[0].nValue),
&err));
BOOST_CHECK_MESSAGE(err == ScriptError::EVAL_FALSE, ScriptErrorString(err));
}
BOOST_AUTO_TEST_CASE(script_CHECKMULTISIG23) {
ScriptError err;
CKey key1, key2, key3, key4;
key1.MakeNewKey(true);
key2.MakeNewKey(false);
key3.MakeNewKey(true);
key4.MakeNewKey(false);
CScript scriptPubKey23;
scriptPubKey23 << OP_2 << ToByteVector(key1.GetPubKey())
<< ToByteVector(key2.GetPubKey())
<< ToByteVector(key3.GetPubKey()) << OP_3
<< OP_CHECKMULTISIG;
const CTransaction txFrom23{
BuildCreditingTransaction(scriptPubKey23, Amount::zero())};
CMutableTransaction mutableTxTo23 =
BuildSpendingTransaction(CScript(), txFrom23);
// after it has been set up, mutableTxTo23 does not change in this test, so
// we can convert it to readonly transaction and use
// TransactionSignatureChecker instead of MutableTransactionSignatureChecker
const CTransaction txTo23(mutableTxTo23);
std::vector<CKey> keys;
keys.push_back(key1);
keys.push_back(key2);
CScript goodsig1 = sign_multisig(scriptPubKey23, keys, txTo23);
BOOST_CHECK(VerifyScript(
goodsig1, scriptPubKey23, gFlags,
TransactionSignatureChecker(&txTo23, 0, txFrom23.vout[0].nValue),
&err));
BOOST_CHECK_MESSAGE(err == ScriptError::OK, ScriptErrorString(err));
keys.clear();
keys.push_back(key1);
keys.push_back(key3);
CScript goodsig2 = sign_multisig(scriptPubKey23, keys, txTo23);
BOOST_CHECK(VerifyScript(
goodsig2, scriptPubKey23, gFlags,
TransactionSignatureChecker(&txTo23, 0, txFrom23.vout[0].nValue),
&err));
BOOST_CHECK_MESSAGE(err == ScriptError::OK, ScriptErrorString(err));
keys.clear();
keys.push_back(key2);
keys.push_back(key3);
CScript goodsig3 = sign_multisig(scriptPubKey23, keys, txTo23);
BOOST_CHECK(VerifyScript(
goodsig3, scriptPubKey23, gFlags,
TransactionSignatureChecker(&txTo23, 0, txFrom23.vout[0].nValue),
&err));
BOOST_CHECK_MESSAGE(err == ScriptError::OK, ScriptErrorString(err));
keys.clear();
keys.push_back(key2);
keys.push_back(key2); // Can't re-use sig
CScript badsig1 = sign_multisig(scriptPubKey23, keys, txTo23);
BOOST_CHECK(!VerifyScript(
badsig1, scriptPubKey23, gFlags,
TransactionSignatureChecker(&txTo23, 0, txFrom23.vout[0].nValue),
&err));
BOOST_CHECK_MESSAGE(err == ScriptError::EVAL_FALSE, ScriptErrorString(err));
keys.clear();
keys.push_back(key2);
keys.push_back(key1); // sigs must be in correct order
CScript badsig2 = sign_multisig(scriptPubKey23, keys, txTo23);
BOOST_CHECK(!VerifyScript(
badsig2, scriptPubKey23, gFlags,
TransactionSignatureChecker(&txTo23, 0, txFrom23.vout[0].nValue),
&err));
BOOST_CHECK_MESSAGE(err == ScriptError::EVAL_FALSE, ScriptErrorString(err));
keys.clear();
keys.push_back(key3);
keys.push_back(key2); // sigs must be in correct order
CScript badsig3 = sign_multisig(scriptPubKey23, keys, txTo23);
BOOST_CHECK(!VerifyScript(
badsig3, scriptPubKey23, gFlags,
TransactionSignatureChecker(&txTo23, 0, txFrom23.vout[0].nValue),
&err));
BOOST_CHECK_MESSAGE(err == ScriptError::EVAL_FALSE, ScriptErrorString(err));
keys.clear();
keys.push_back(key4);
keys.push_back(key2); // sigs must match pubkeys
CScript badsig4 = sign_multisig(scriptPubKey23, keys, txTo23);
BOOST_CHECK(!VerifyScript(
badsig4, scriptPubKey23, gFlags,
TransactionSignatureChecker(&txTo23, 0, txFrom23.vout[0].nValue),
&err));
BOOST_CHECK_MESSAGE(err == ScriptError::EVAL_FALSE, ScriptErrorString(err));
keys.clear();
keys.push_back(key1);
keys.push_back(key4); // sigs must match pubkeys
CScript badsig5 = sign_multisig(scriptPubKey23, keys, txTo23);
BOOST_CHECK(!VerifyScript(
badsig5, scriptPubKey23, gFlags,
TransactionSignatureChecker(&txTo23, 0, txFrom23.vout[0].nValue),
&err));
BOOST_CHECK_MESSAGE(err == ScriptError::EVAL_FALSE, ScriptErrorString(err));
keys.clear(); // Must have signatures
CScript badsig6 = sign_multisig(scriptPubKey23, keys, txTo23);
BOOST_CHECK(!VerifyScript(
badsig6, scriptPubKey23, gFlags,
TransactionSignatureChecker(&txTo23, 0, txFrom23.vout[0].nValue),
&err));
BOOST_CHECK_MESSAGE(err == ScriptError::INVALID_STACK_OPERATION,
ScriptErrorString(err));
}
/* Wrapper around ProduceSignature to combine two scriptsigs */
SignatureData CombineSignatures(const CTxOut &txout,
const CMutableTransaction &tx,
const SignatureData &scriptSig1,
const SignatureData &scriptSig2) {
SignatureData data;
data.MergeSignatureData(scriptSig1);
data.MergeSignatureData(scriptSig2);
ProduceSignature(DUMMY_SIGNING_PROVIDER,
MutableTransactionSignatureCreator(&tx, 0, txout.nValue),
txout.scriptPubKey, data);
return data;
}
BOOST_AUTO_TEST_CASE(script_combineSigs) {
// Test the ProduceSignature's ability to combine signatures function
FillableSigningProvider keystore;
std::vector<CKey> keys;
std::vector<CPubKey> pubkeys;
for (int i = 0; i < 3; i++) {
CKey key;
key.MakeNewKey(i % 2 == 1);
keys.push_back(key);
pubkeys.push_back(key.GetPubKey());
BOOST_CHECK(keystore.AddKey(key));
}
CMutableTransaction txFrom = BuildCreditingTransaction(
GetScriptForDestination(PKHash(keys[0].GetPubKey())), Amount::zero());
CMutableTransaction txTo =
BuildSpendingTransaction(CScript(), CTransaction(txFrom));
CScript &scriptPubKey = txFrom.vout[0].scriptPubKey;
SignatureData scriptSig;
SignatureData empty;
SignatureData combined =
CombineSignatures(txFrom.vout[0], txTo, empty, empty);
BOOST_CHECK(combined.scriptSig.empty());
// Single signature case:
BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0,
SigHashType().withForkId()));
scriptSig = DataFromTransaction(txTo, 0, txFrom.vout[0]);
combined = CombineSignatures(txFrom.vout[0], txTo, scriptSig, empty);
BOOST_CHECK(combined.scriptSig == scriptSig.scriptSig);
combined = CombineSignatures(txFrom.vout[0], txTo, empty, scriptSig);
BOOST_CHECK(combined.scriptSig == scriptSig.scriptSig);
SignatureData scriptSigCopy = scriptSig;
// Signing again will give a different, valid signature:
BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0,
SigHashType().withForkId()));
scriptSig = DataFromTransaction(txTo, 0, txFrom.vout[0]);
combined =
CombineSignatures(txFrom.vout[0], txTo, scriptSigCopy, scriptSig);
BOOST_CHECK(combined.scriptSig == scriptSigCopy.scriptSig ||
combined.scriptSig == scriptSig.scriptSig);
// P2SH, single-signature case:
CScript pkSingle;
pkSingle << ToByteVector(keys[0].GetPubKey()) << OP_CHECKSIG;
BOOST_CHECK(keystore.AddCScript(pkSingle));
scriptPubKey = GetScriptForDestination(ScriptHash(pkSingle));
BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0,
SigHashType().withForkId()));
scriptSig = DataFromTransaction(txTo, 0, txFrom.vout[0]);
combined = CombineSignatures(txFrom.vout[0], txTo, scriptSig, empty);
BOOST_CHECK(combined.scriptSig == scriptSig.scriptSig);
combined = CombineSignatures(txFrom.vout[0], txTo, empty, scriptSig);
BOOST_CHECK(combined.scriptSig == scriptSig.scriptSig);
scriptSigCopy = scriptSig;
BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0,
SigHashType().withForkId()));
scriptSig = DataFromTransaction(txTo, 0, txFrom.vout[0]);
combined =
CombineSignatures(txFrom.vout[0], txTo, scriptSigCopy, scriptSig);
BOOST_CHECK(combined.scriptSig == scriptSigCopy.scriptSig ||
combined.scriptSig == scriptSig.scriptSig);
// Hardest case: Multisig 2-of-3
scriptPubKey = GetScriptForMultisig(2, pubkeys);
BOOST_CHECK(keystore.AddCScript(scriptPubKey));
BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0,
SigHashType().withForkId()));
scriptSig = DataFromTransaction(txTo, 0, txFrom.vout[0]);
combined = CombineSignatures(txFrom.vout[0], txTo, scriptSig, empty);
BOOST_CHECK(combined.scriptSig == scriptSig.scriptSig);
combined = CombineSignatures(txFrom.vout[0], txTo, empty, scriptSig);
BOOST_CHECK(combined.scriptSig == scriptSig.scriptSig);
// A couple of partially-signed versions:
std::vector<uint8_t> sig1;
uint256 hash1 = SignatureHash(scriptPubKey, CTransaction(txTo), 0,
SigHashType().withForkId(), Amount::zero());
BOOST_CHECK(keys[0].SignECDSA(hash1, sig1));
sig1.push_back(SIGHASH_ALL | SIGHASH_FORKID);
std::vector<uint8_t> sig2;
uint256 hash2 = SignatureHash(
scriptPubKey, CTransaction(txTo), 0,
SigHashType().withBaseType(BaseSigHashType::NONE).withForkId(),
Amount::zero());
BOOST_CHECK(keys[1].SignECDSA(hash2, sig2));
sig2.push_back(SIGHASH_NONE | SIGHASH_FORKID);
std::vector<uint8_t> sig3;
uint256 hash3 = SignatureHash(
scriptPubKey, CTransaction(txTo), 0,
SigHashType().withBaseType(BaseSigHashType::SINGLE).withForkId(),
Amount::zero());
BOOST_CHECK(keys[2].SignECDSA(hash3, sig3));
sig3.push_back(SIGHASH_SINGLE | SIGHASH_FORKID);
// Not fussy about order (or even existence) of placeholders or signatures:
CScript partial1a = CScript() << OP_0 << sig1 << OP_0;
CScript partial1b = CScript() << OP_0 << OP_0 << sig1;
CScript partial2a = CScript() << OP_0 << sig2;
CScript partial2b = CScript() << sig2 << OP_0;
CScript partial3a = CScript() << sig3;
CScript partial3b = CScript() << OP_0 << OP_0 << sig3;
CScript partial3c = CScript() << OP_0 << sig3 << OP_0;
CScript complete12 = CScript() << OP_0 << sig1 << sig2;
CScript complete13 = CScript() << OP_0 << sig1 << sig3;
CScript complete23 = CScript() << OP_0 << sig2 << sig3;
SignatureData partial1_sigs;
partial1_sigs.signatures.emplace(keys[0].GetPubKey().GetID(),
SigPair(keys[0].GetPubKey(), sig1));
SignatureData partial2_sigs;
partial2_sigs.signatures.emplace(keys[1].GetPubKey().GetID(),
SigPair(keys[1].GetPubKey(), sig2));
SignatureData partial3_sigs;
partial3_sigs.signatures.emplace(keys[2].GetPubKey().GetID(),
SigPair(keys[2].GetPubKey(), sig3));
combined =
CombineSignatures(txFrom.vout[0], txTo, partial1_sigs, partial1_sigs);
BOOST_CHECK(combined.scriptSig == partial1a);
combined =
CombineSignatures(txFrom.vout[0], txTo, partial1_sigs, partial2_sigs);
BOOST_CHECK(combined.scriptSig == complete12);
combined =
CombineSignatures(txFrom.vout[0], txTo, partial2_sigs, partial1_sigs);
BOOST_CHECK(combined.scriptSig == complete12);
combined =
CombineSignatures(txFrom.vout[0], txTo, partial1_sigs, partial2_sigs);
BOOST_CHECK(combined.scriptSig == complete12);
combined =
CombineSignatures(txFrom.vout[0], txTo, partial3_sigs, partial1_sigs);
BOOST_CHECK(combined.scriptSig == complete13);
combined =
CombineSignatures(txFrom.vout[0], txTo, partial2_sigs, partial3_sigs);
BOOST_CHECK(combined.scriptSig == complete23);
combined =
CombineSignatures(txFrom.vout[0], txTo, partial3_sigs, partial2_sigs);
BOOST_CHECK(combined.scriptSig == complete23);
combined =
CombineSignatures(txFrom.vout[0], txTo, partial3_sigs, partial3_sigs);
BOOST_CHECK(combined.scriptSig == partial3c);
}
BOOST_AUTO_TEST_CASE(script_standard_push) {
ScriptError err;
for (int i = 0; i < 67000; i++) {
CScript script;
script << i;
BOOST_CHECK_MESSAGE(script.IsPushOnly(),
"Number " << i << " is not pure push.");
BOOST_CHECK_MESSAGE(VerifyScript(script, CScript() << OP_1,
SCRIPT_VERIFY_MINIMALDATA,
BaseSignatureChecker(), &err),
"Number " << i << " push is not minimal data.");
BOOST_CHECK_MESSAGE(err == ScriptError::OK, ScriptErrorString(err));
}
for (unsigned int i = 0; i <= MAX_SCRIPT_ELEMENT_SIZE; i++) {
std::vector<uint8_t> data(i, '\111');
CScript script;
script << data;
BOOST_CHECK_MESSAGE(script.IsPushOnly(),
"Length " << i << " is not pure push.");
BOOST_CHECK_MESSAGE(VerifyScript(script, CScript() << OP_1,
SCRIPT_VERIFY_MINIMALDATA,
BaseSignatureChecker(), &err),
"Length " << i << " push is not minimal data.");
BOOST_CHECK_MESSAGE(err == ScriptError::OK, ScriptErrorString(err));
}
}
BOOST_AUTO_TEST_CASE(script_IsPushOnly_on_invalid_scripts) {
// IsPushOnly returns false when given a script containing only pushes that
// are invalid due to truncation. IsPushOnly() is consensus critical because
// P2SH evaluation uses it, although this specific behavior should not be
// consensus critical as the P2SH evaluation would fail first due to the
// invalid push. Still, it doesn't hurt to test it explicitly.
static const uint8_t direct[] = {1};
BOOST_CHECK(!CScript(direct, direct + sizeof(direct)).IsPushOnly());
}
BOOST_AUTO_TEST_CASE(script_GetScriptAsm) {
BOOST_CHECK_EQUAL("OP_CHECKLOCKTIMEVERIFY",
ScriptToAsmStr(CScript() << OP_NOP2, true));
BOOST_CHECK_EQUAL(
"OP_CHECKLOCKTIMEVERIFY",
ScriptToAsmStr(CScript() << OP_CHECKLOCKTIMEVERIFY, true));
BOOST_CHECK_EQUAL("OP_CHECKLOCKTIMEVERIFY",
ScriptToAsmStr(CScript() << OP_NOP2));
BOOST_CHECK_EQUAL("OP_CHECKLOCKTIMEVERIFY",
ScriptToAsmStr(CScript() << OP_CHECKLOCKTIMEVERIFY));
std::string derSig("304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e"
"3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38"
"d782e53023ee313d741ad0cfbc0c5090");
std::string pubKey(
"03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2");
std::vector<uint8_t> vchPubKey = ToByteVector(ParseHex(pubKey));
BOOST_CHECK_EQUAL(
derSig + "00 " + pubKey,
ScriptToAsmStr(CScript() << ToByteVector(ParseHex(derSig + "00"))
<< vchPubKey,
true));
BOOST_CHECK_EQUAL(
derSig + "80 " + pubKey,
ScriptToAsmStr(CScript() << ToByteVector(ParseHex(derSig + "80"))
<< vchPubKey,
true));
BOOST_CHECK_EQUAL(
derSig + "[ALL] " + pubKey,
ScriptToAsmStr(CScript() << ToByteVector(ParseHex(derSig + "01"))
<< vchPubKey,
true));
BOOST_CHECK_EQUAL(
derSig + "[ALL|ANYONECANPAY] " + pubKey,
ScriptToAsmStr(CScript() << ToByteVector(ParseHex(derSig + "81"))
<< vchPubKey,
true));
BOOST_CHECK_EQUAL(
derSig + "[ALL|FORKID] " + pubKey,
ScriptToAsmStr(CScript() << ToByteVector(ParseHex(derSig + "41"))
<< vchPubKey,
true));
BOOST_CHECK_EQUAL(
derSig + "[ALL|FORKID|ANYONECANPAY] " + pubKey,
ScriptToAsmStr(CScript() << ToByteVector(ParseHex(derSig + "c1"))
<< vchPubKey,
true));
BOOST_CHECK_EQUAL(
derSig + "[NONE] " + pubKey,
ScriptToAsmStr(CScript() << ToByteVector(ParseHex(derSig + "02"))
<< vchPubKey,
true));
BOOST_CHECK_EQUAL(
derSig + "[NONE|ANYONECANPAY] " + pubKey,
ScriptToAsmStr(CScript() << ToByteVector(ParseHex(derSig + "82"))
<< vchPubKey,
true));
BOOST_CHECK_EQUAL(
derSig + "[NONE|FORKID] " + pubKey,
ScriptToAsmStr(CScript() << ToByteVector(ParseHex(derSig + "42"))
<< vchPubKey,
true));
BOOST_CHECK_EQUAL(
derSig + "[NONE|FORKID|ANYONECANPAY] " + pubKey,
ScriptToAsmStr(CScript() << ToByteVector(ParseHex(derSig + "c2"))
<< vchPubKey,
true));
BOOST_CHECK_EQUAL(
derSig + "[SINGLE] " + pubKey,
ScriptToAsmStr(CScript() << ToByteVector(ParseHex(derSig + "03"))
<< vchPubKey,
true));
BOOST_CHECK_EQUAL(
derSig + "[SINGLE|ANYONECANPAY] " + pubKey,
ScriptToAsmStr(CScript() << ToByteVector(ParseHex(derSig + "83"))
<< vchPubKey,
true));
BOOST_CHECK_EQUAL(
derSig + "[SINGLE|FORKID] " + pubKey,
ScriptToAsmStr(CScript() << ToByteVector(ParseHex(derSig + "43"))
<< vchPubKey,
true));
BOOST_CHECK_EQUAL(
derSig + "[SINGLE|FORKID|ANYONECANPAY] " + pubKey,
ScriptToAsmStr(CScript() << ToByteVector(ParseHex(derSig + "c3"))
<< vchPubKey,
true));
BOOST_CHECK_EQUAL(derSig + "00 " + pubKey,
ScriptToAsmStr(CScript()
<< ToByteVector(ParseHex(derSig + "00"))
<< vchPubKey));
BOOST_CHECK_EQUAL(derSig + "80 " + pubKey,
ScriptToAsmStr(CScript()
<< ToByteVector(ParseHex(derSig + "80"))
<< vchPubKey));
BOOST_CHECK_EQUAL(derSig + "01 " + pubKey,
ScriptToAsmStr(CScript()
<< ToByteVector(ParseHex(derSig + "01"))
<< vchPubKey));
BOOST_CHECK_EQUAL(derSig + "02 " + pubKey,
ScriptToAsmStr(CScript()
<< ToByteVector(ParseHex(derSig + "02"))
<< vchPubKey));
BOOST_CHECK_EQUAL(derSig + "03 " + pubKey,
ScriptToAsmStr(CScript()
<< ToByteVector(ParseHex(derSig + "03"))
<< vchPubKey));
BOOST_CHECK_EQUAL(derSig + "81 " + pubKey,
ScriptToAsmStr(CScript()
<< ToByteVector(ParseHex(derSig + "81"))
<< vchPubKey));
BOOST_CHECK_EQUAL(derSig + "82 " + pubKey,
ScriptToAsmStr(CScript()
<< ToByteVector(ParseHex(derSig + "82"))
<< vchPubKey));
BOOST_CHECK_EQUAL(derSig + "83 " + pubKey,
ScriptToAsmStr(CScript()
<< ToByteVector(ParseHex(derSig + "83"))
<< vchPubKey));
}
static CScript ScriptFromHex(const char *hex) {
std::vector<uint8_t> data = ParseHex(hex);
return CScript(data.begin(), data.end());
}
BOOST_AUTO_TEST_CASE(script_FindAndDelete) {
// Exercise the FindAndDelete functionality
CScript s;
CScript d;
CScript expect;
s = CScript() << OP_1 << OP_2;
// delete nothing should be a no-op
d = CScript();
expect = s;
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 0);
BOOST_CHECK(s == expect);
s = CScript() << OP_1 << OP_2 << OP_3;
d = CScript() << OP_2;
expect = CScript() << OP_1 << OP_3;
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 1);
BOOST_CHECK(s == expect);
s = CScript() << OP_3 << OP_1 << OP_3 << OP_3 << OP_4 << OP_3;
d = CScript() << OP_3;
expect = CScript() << OP_1 << OP_4;
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 4);
BOOST_CHECK(s == expect);
// PUSH 0x02ff03 onto stack
s = ScriptFromHex("0302ff03");
d = ScriptFromHex("0302ff03");
expect = CScript();
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 1);
BOOST_CHECK(s == expect);
// PUSH 0x2ff03 PUSH 0x2ff03
s = ScriptFromHex("0302ff030302ff03");
d = ScriptFromHex("0302ff03");
expect = CScript();
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 2);
BOOST_CHECK(s == expect);
s = ScriptFromHex("0302ff030302ff03");
d = ScriptFromHex("02");
expect = s; // FindAndDelete matches entire opcodes
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 0);
BOOST_CHECK(s == expect);
s = ScriptFromHex("0302ff030302ff03");
d = ScriptFromHex("ff");
expect = s;
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 0);
BOOST_CHECK(s == expect);
// This is an odd edge case: strip of the push-three-bytes prefix, leaving
// 02ff03 which is push-two-bytes:
s = ScriptFromHex("0302ff030302ff03");
d = ScriptFromHex("03");
expect = CScript() << ParseHex("ff03") << ParseHex("ff03");
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 2);
BOOST_CHECK(s == expect);
// Byte sequence that spans multiple opcodes:
// PUSH(0xfeed) OP_1 OP_VERIFY
s = ScriptFromHex("02feed5169");
d = ScriptFromHex("feed51");
expect = s;
// doesn't match 'inside' opcodes
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 0);
BOOST_CHECK(s == expect);
// PUSH(0xfeed) OP_1 OP_VERIFY
s = ScriptFromHex("02feed5169");
d = ScriptFromHex("02feed51");
expect = ScriptFromHex("69");
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 1);
BOOST_CHECK(s == expect);
s = ScriptFromHex("516902feed5169");
d = ScriptFromHex("feed51");
expect = s;
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 0);
BOOST_CHECK(s == expect);
s = ScriptFromHex("516902feed5169");
d = ScriptFromHex("02feed51");
expect = ScriptFromHex("516969");
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 1);
BOOST_CHECK(s == expect);
s = CScript() << OP_0 << OP_0 << OP_1 << OP_1;
d = CScript() << OP_0 << OP_1;
// FindAndDelete is single-pass
expect = CScript() << OP_0 << OP_1;
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 1);
BOOST_CHECK(s == expect);
s = CScript() << OP_0 << OP_0 << OP_1 << OP_0 << OP_1 << OP_1;
d = CScript() << OP_0 << OP_1;
// FindAndDelete is single-pass
expect = CScript() << OP_0 << OP_1;
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 2);
BOOST_CHECK(s == expect);
// Another weird edge case:
// End with invalid push (not enough data)...
s = ScriptFromHex("0003feed");
// ... can remove the invalid push
d = ScriptFromHex("03feed");
expect = ScriptFromHex("00");
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 1);
BOOST_CHECK(s == expect);
s = ScriptFromHex("0003feed");
d = ScriptFromHex("00");
expect = ScriptFromHex("03feed");
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 1);
BOOST_CHECK(s == expect);
}
BOOST_AUTO_TEST_CASE(IsWitnessProgram) {
// Valid version: [0,16]
// Valid program_len: [2,40]
for (int version = -1; version <= 17; version++) {
for (unsigned int program_len = 1; program_len <= 41; program_len++) {
CScript script;
std::vector<uint8_t> program(program_len, '\42');
int parsed_version;
std::vector<uint8_t> parsed_program;
script << version << program;
bool result =
script.IsWitnessProgram(parsed_version, parsed_program);
bool expected = version >= 0 && version <= 16 && program_len >= 2 &&
program_len <= 40;
BOOST_CHECK_EQUAL(result, expected);
if (result) {
BOOST_CHECK_EQUAL(version, parsed_version);
BOOST_CHECK(program == parsed_program);
}
}
}
// Tests with 1 and 3 stack elements
{
CScript script;
script << OP_0;
BOOST_CHECK_MESSAGE(
!script.IsWitnessProgram(),
"Failed IsWitnessProgram check with 1 stack element");
}
{
CScript script;
script << OP_0 << std::vector<uint8_t>(20, '\42') << OP_1;
BOOST_CHECK_MESSAGE(
!script.IsWitnessProgram(),
"Failed IsWitnessProgram check with 3 stack elements");
}
}
BOOST_AUTO_TEST_CASE(script_HasValidOps) {
// Exercise the HasValidOps functionality
CScript script;
// Normal script
script =
ScriptFromHex("76a9141234567890abcdefa1a2a3a4a5a6a7a8a9a0aaab88ac");
BOOST_CHECK(script.HasValidOps());
script =
ScriptFromHex("76a914ff34567890abcdefa1a2a3a4a5a6a7a8a9a0aaab88ac");
BOOST_CHECK(script.HasValidOps());
// Script with OP_INVALIDOPCODE explicit
script = ScriptFromHex("ff88ac");
BOOST_CHECK(!script.HasValidOps());
// Script with undefined opcode
script = ScriptFromHex("88acc0");
BOOST_CHECK(!script.HasValidOps());
// Check all non push opcodes.
for (uint8_t opcode = OP_1NEGATE; opcode < FIRST_UNDEFINED_OP_VALUE;
opcode++) {
script = CScript() << opcode;
BOOST_CHECK(script.HasValidOps());
}
script = CScript() << FIRST_UNDEFINED_OP_VALUE;
BOOST_CHECK(!script.HasValidOps());
}
#if defined(HAVE_CONSENSUS_LIB)
/* Test simple (successful) usage of bitcoinconsensus_verify_script */
BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_returns_true) {
unsigned int libconsensus_flags = 0;
int nIn = 0;
CScript scriptPubKey;
CScript scriptSig;
scriptPubKey << OP_1;
const CTransaction creditTx(
BuildCreditingTransaction(scriptPubKey, SATOSHI));
const CTransaction spendTx(BuildSpendingTransaction(scriptSig, creditTx));
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << spendTx;
bitcoinconsensus_error err;
int result = bitcoinconsensus_verify_script(
- scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(),
- nIn, libconsensus_flags, &err);
+ scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()),
+ stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 1);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_OK);
}
/* Test bitcoinconsensus_verify_script returns invalid tx index err*/
BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_tx_index_err) {
unsigned int libconsensus_flags = 0;
int nIn = 3;
CScript scriptPubKey;
CScript scriptSig;
scriptPubKey << OP_EQUAL;
const CTransaction creditTx(
BuildCreditingTransaction(scriptPubKey, SATOSHI));
const CTransaction spendTx(BuildSpendingTransaction(scriptSig, creditTx));
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << spendTx;
bitcoinconsensus_error err;
int result = bitcoinconsensus_verify_script(
- scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(),
- nIn, libconsensus_flags, &err);
+ scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()),
+ stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_TX_INDEX);
}
/* Test bitcoinconsensus_verify_script returns tx size mismatch err*/
BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_tx_size) {
unsigned int libconsensus_flags = 0;
int nIn = 0;
CScript scriptPubKey;
CScript scriptSig;
scriptPubKey << OP_EQUAL;
const CTransaction creditTx(
BuildCreditingTransaction(scriptPubKey, SATOSHI));
const CTransaction spendTx(BuildSpendingTransaction(scriptSig, creditTx));
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << spendTx;
bitcoinconsensus_error err;
int result = bitcoinconsensus_verify_script(
- scriptPubKey.data(), scriptPubKey.size(), stream.data(),
+ scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()),
stream.size() * 2, nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_TX_SIZE_MISMATCH);
}
/* Test bitcoinconsensus_verify_script returns invalid tx serialization error */
BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_tx_serialization) {
unsigned int libconsensus_flags = 0;
int nIn = 0;
CScript scriptPubKey;
CScript scriptSig;
scriptPubKey << OP_EQUAL;
const CTransaction creditTx(
BuildCreditingTransaction(scriptPubKey, SATOSHI));
const CTransaction spendTx(BuildSpendingTransaction(scriptSig, creditTx));
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << 0xffffffff;
bitcoinconsensus_error err;
int result = bitcoinconsensus_verify_script(
- scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(),
- nIn, libconsensus_flags, &err);
+ scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()),
+ stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_TX_DESERIALIZE);
}
/* Test bitcoinconsensus_verify_script returns amount required error */
BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_amount_required_err) {
unsigned int libconsensus_flags =
bitcoinconsensus_SCRIPT_ENABLE_SIGHASH_FORKID;
int nIn = 0;
CScript scriptPubKey;
CScript scriptSig;
scriptPubKey << OP_EQUAL;
const CTransaction creditTx(
BuildCreditingTransaction(scriptPubKey, SATOSHI));
const CTransaction spendTx(BuildSpendingTransaction(scriptSig, creditTx));
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << spendTx;
bitcoinconsensus_error err;
int result = bitcoinconsensus_verify_script(
- scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(),
- nIn, libconsensus_flags, &err);
+ scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()),
+ stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_AMOUNT_REQUIRED);
}
/* Test bitcoinconsensus_verify_script returns invalid flags err */
BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_invalid_flags) {
unsigned int libconsensus_flags = 1 << 3;
int nIn = 0;
CScript scriptPubKey;
CScript scriptSig;
scriptPubKey << OP_EQUAL;
const CTransaction creditTx(
BuildCreditingTransaction(scriptPubKey, SATOSHI));
const CTransaction spendTx(BuildSpendingTransaction(scriptSig, creditTx));
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << spendTx;
bitcoinconsensus_error err;
int result = bitcoinconsensus_verify_script(
- scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(),
- nIn, libconsensus_flags, &err);
+ scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()),
+ stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_INVALID_FLAGS);
}
#endif
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp
index 82887bf91..76e57d88c 100644
--- a/src/test/serialize_tests.cpp
+++ b/src/test/serialize_tests.cpp
@@ -1,586 +1,588 @@
// Copyright (c) 2012-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <serialize.h>
#include <avalanche/proof.h>
#include <avalanche/proofbuilder.h>
#include <avalanche/test/util.h>
#include <hash.h>
#include <streams.h>
#include <util/strencodings.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
#include <cstdint>
#include <limits>
BOOST_FIXTURE_TEST_SUITE(serialize_tests, BasicTestingSetup)
class CSerializeMethodsTestSingle {
protected:
int intval;
bool boolval;
std::string stringval;
char charstrval[16];
CTransactionRef txval;
avalanche::ProofRef proofval;
public:
CSerializeMethodsTestSingle() = default;
CSerializeMethodsTestSingle(int intvalin, bool boolvalin,
std::string stringvalin,
const uint8_t *charstrvalin,
const CTransactionRef &txvalin,
const avalanche::ProofRef &proofvalin)
: intval(intvalin), boolval(boolvalin),
stringval(std::move(stringvalin)), txval(txvalin),
proofval(proofvalin) {
memcpy(charstrval, charstrvalin, sizeof(charstrval));
}
SERIALIZE_METHODS(CSerializeMethodsTestSingle, obj) {
READWRITE(obj.intval);
READWRITE(obj.boolval);
READWRITE(obj.stringval);
READWRITE(obj.charstrval);
READWRITE(obj.txval);
READWRITE(obj.proofval);
}
bool operator==(const CSerializeMethodsTestSingle &rhs) {
return intval == rhs.intval && boolval == rhs.boolval &&
stringval == rhs.stringval &&
strcmp(charstrval, rhs.charstrval) == 0 &&
*txval == *rhs.txval &&
proofval->getId() == rhs.proofval->getId();
}
};
class CSerializeMethodsTestMany : public CSerializeMethodsTestSingle {
public:
using CSerializeMethodsTestSingle::CSerializeMethodsTestSingle;
SERIALIZE_METHODS(CSerializeMethodsTestMany, obj) {
READWRITE(obj.intval, obj.boolval, obj.stringval, obj.charstrval,
obj.txval, obj.proofval);
}
};
BOOST_AUTO_TEST_CASE(sizes) {
BOOST_CHECK_EQUAL(sizeof(char), GetSerializeSize(char(0)));
BOOST_CHECK_EQUAL(sizeof(int8_t), GetSerializeSize(int8_t(0)));
BOOST_CHECK_EQUAL(sizeof(uint8_t), GetSerializeSize(uint8_t(0)));
BOOST_CHECK_EQUAL(sizeof(int16_t), GetSerializeSize(int16_t(0)));
BOOST_CHECK_EQUAL(sizeof(uint16_t), GetSerializeSize(uint16_t(0)));
BOOST_CHECK_EQUAL(sizeof(int32_t), GetSerializeSize(int32_t(0)));
BOOST_CHECK_EQUAL(sizeof(uint32_t), GetSerializeSize(uint32_t(0)));
BOOST_CHECK_EQUAL(sizeof(int64_t), GetSerializeSize(int64_t(0)));
BOOST_CHECK_EQUAL(sizeof(uint64_t), GetSerializeSize(uint64_t(0)));
BOOST_CHECK_EQUAL(sizeof(float), GetSerializeSize(float(0)));
BOOST_CHECK_EQUAL(sizeof(double), GetSerializeSize(double(0)));
// Bool is serialized as uint8_t
BOOST_CHECK_EQUAL(sizeof(uint8_t), GetSerializeSize(bool(0)));
// Sanity-check GetSerializeSize and c++ type matching
BOOST_CHECK_EQUAL(GetSerializeSize(char(0)), 1U);
BOOST_CHECK_EQUAL(GetSerializeSize(int8_t(0)), 1U);
BOOST_CHECK_EQUAL(GetSerializeSize(uint8_t(0)), 1U);
BOOST_CHECK_EQUAL(GetSerializeSize(int16_t(0)), 2U);
BOOST_CHECK_EQUAL(GetSerializeSize(uint16_t(0)), 2U);
BOOST_CHECK_EQUAL(GetSerializeSize(int32_t(0)), 4U);
BOOST_CHECK_EQUAL(GetSerializeSize(uint32_t(0)), 4U);
BOOST_CHECK_EQUAL(GetSerializeSize(int64_t(0)), 8U);
BOOST_CHECK_EQUAL(GetSerializeSize(uint64_t(0)), 8U);
BOOST_CHECK_EQUAL(GetSerializeSize(float(0)), 4U);
BOOST_CHECK_EQUAL(GetSerializeSize(double(0)), 8U);
BOOST_CHECK_EQUAL(GetSerializeSize(bool(0)), 1U);
}
BOOST_AUTO_TEST_CASE(floats_conversion) {
// Choose values that map unambiguously to binary floating point to avoid
// rounding issues at the compiler side.
BOOST_CHECK_EQUAL(ser_uint32_to_float(0x00000000), 0.0F);
BOOST_CHECK_EQUAL(ser_uint32_to_float(0x3f000000), 0.5F);
BOOST_CHECK_EQUAL(ser_uint32_to_float(0x3f800000), 1.0F);
BOOST_CHECK_EQUAL(ser_uint32_to_float(0x40000000), 2.0F);
BOOST_CHECK_EQUAL(ser_uint32_to_float(0x40800000), 4.0F);
BOOST_CHECK_EQUAL(ser_uint32_to_float(0x44444444), 785.066650390625F);
BOOST_CHECK_EQUAL(ser_float_to_uint32(0.0F), 0x00000000U);
BOOST_CHECK_EQUAL(ser_float_to_uint32(0.5F), 0x3f000000U);
BOOST_CHECK_EQUAL(ser_float_to_uint32(1.0F), 0x3f800000U);
BOOST_CHECK_EQUAL(ser_float_to_uint32(2.0F), 0x40000000U);
BOOST_CHECK_EQUAL(ser_float_to_uint32(4.0F), 0x40800000U);
BOOST_CHECK_EQUAL(ser_float_to_uint32(785.066650390625F), 0x44444444U);
}
BOOST_AUTO_TEST_CASE(doubles_conversion) {
// Choose values that map unambiguously to binary floating point to avoid
// rounding issues at the compiler side.
BOOST_CHECK_EQUAL(ser_uint64_to_double(0x0000000000000000ULL), 0.0);
BOOST_CHECK_EQUAL(ser_uint64_to_double(0x3fe0000000000000ULL), 0.5);
BOOST_CHECK_EQUAL(ser_uint64_to_double(0x3ff0000000000000ULL), 1.0);
BOOST_CHECK_EQUAL(ser_uint64_to_double(0x4000000000000000ULL), 2.0);
BOOST_CHECK_EQUAL(ser_uint64_to_double(0x4010000000000000ULL), 4.0);
BOOST_CHECK_EQUAL(ser_uint64_to_double(0x4088888880000000ULL),
785.066650390625);
BOOST_CHECK_EQUAL(ser_double_to_uint64(0.0), 0x0000000000000000ULL);
BOOST_CHECK_EQUAL(ser_double_to_uint64(0.5), 0x3fe0000000000000ULL);
BOOST_CHECK_EQUAL(ser_double_to_uint64(1.0), 0x3ff0000000000000ULL);
BOOST_CHECK_EQUAL(ser_double_to_uint64(2.0), 0x4000000000000000ULL);
BOOST_CHECK_EQUAL(ser_double_to_uint64(4.0), 0x4010000000000000ULL);
BOOST_CHECK_EQUAL(ser_double_to_uint64(785.066650390625),
0x4088888880000000ULL);
}
/*
Python code to generate the below hashes:
def reversed_hex(x):
return b''.join(reversed(x)).hex().encode()
def dsha256(x):
return hashlib.sha256(hashlib.sha256(x).digest()).digest()
reversed_hex(dsha256(b''.join(struct.pack('<f', x) for x in range(0,1000))))
== '8e8b4cf3e4df8b332057e3e23af42ebc663b61e0495d5e7e32d85099d7f3fe0c'
reversed_hex(dsha256(b''.join(struct.pack('<d', x) for x in range(0,1000))))
== '43d0c82591953c4eafe114590d392676a01585d25b25d433557f0d7878b23f96'
*/
BOOST_AUTO_TEST_CASE(floats) {
CDataStream ss(SER_DISK, 0);
// encode
for (int i = 0; i < 1000; i++) {
ss << float(i);
}
BOOST_CHECK(Hash(ss) ==
uint256S("8e8b4cf3e4df8b332057e3e23af42ebc663b61e0495d5e7e32d85"
"099d7f3fe0c"));
// decode
for (int i = 0; i < 1000; i++) {
float j;
ss >> j;
BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i);
}
}
BOOST_AUTO_TEST_CASE(doubles) {
CDataStream ss(SER_DISK, 0);
// encode
for (int i = 0; i < 1000; i++) {
ss << double(i);
}
BOOST_CHECK(Hash(ss) ==
uint256S("43d0c82591953c4eafe114590d392676a01585d25b25d433557f0"
"d7878b23f96"));
// decode
for (int i = 0; i < 1000; i++) {
double j;
ss >> j;
BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i);
}
}
BOOST_AUTO_TEST_CASE(varints) {
// encode
CDataStream ss(SER_DISK, 0);
CDataStream::size_type size = 0;
for (int i = 0; i < 100000; i++) {
ss << VARINT_MODE(i, VarIntMode::NONNEGATIVE_SIGNED);
size +=
::GetSerializeSize(VARINT_MODE(i, VarIntMode::NONNEGATIVE_SIGNED));
BOOST_CHECK(size == ss.size());
}
for (uint64_t i = 0; i < 100000000000ULL; i += 999999937) {
ss << VARINT(i);
size += ::GetSerializeSize(VARINT(i));
BOOST_CHECK(size == ss.size());
}
// decode
for (int i = 0; i < 100000; i++) {
int j = -1;
ss >> VARINT_MODE(j, VarIntMode::NONNEGATIVE_SIGNED);
BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i);
}
for (uint64_t i = 0; i < 100000000000ULL; i += 999999937) {
uint64_t j = std::numeric_limits<uint64_t>::max();
ss >> VARINT(j);
BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i);
}
}
BOOST_AUTO_TEST_CASE(varints_bitpatterns) {
CDataStream ss(SER_DISK, 0);
ss << VARINT_MODE(0, VarIntMode::NONNEGATIVE_SIGNED);
BOOST_CHECK_EQUAL(HexStr(ss), "00");
ss.clear();
ss << VARINT_MODE(0x7f, VarIntMode::NONNEGATIVE_SIGNED);
BOOST_CHECK_EQUAL(HexStr(ss), "7f");
ss.clear();
ss << VARINT_MODE((int8_t)0x7f, VarIntMode::NONNEGATIVE_SIGNED);
BOOST_CHECK_EQUAL(HexStr(ss), "7f");
ss.clear();
ss << VARINT_MODE(0x80, VarIntMode::NONNEGATIVE_SIGNED);
BOOST_CHECK_EQUAL(HexStr(ss), "8000");
ss.clear();
ss << VARINT((uint8_t)0x80);
BOOST_CHECK_EQUAL(HexStr(ss), "8000");
ss.clear();
ss << VARINT_MODE(0x1234, VarIntMode::NONNEGATIVE_SIGNED);
BOOST_CHECK_EQUAL(HexStr(ss), "a334");
ss.clear();
ss << VARINT_MODE((int16_t)0x1234, VarIntMode::NONNEGATIVE_SIGNED);
BOOST_CHECK_EQUAL(HexStr(ss), "a334");
ss.clear();
ss << VARINT_MODE(0xffff, VarIntMode::NONNEGATIVE_SIGNED);
BOOST_CHECK_EQUAL(HexStr(ss), "82fe7f");
ss.clear();
ss << VARINT((uint16_t)0xffff);
BOOST_CHECK_EQUAL(HexStr(ss), "82fe7f");
ss.clear();
ss << VARINT_MODE(0x123456, VarIntMode::NONNEGATIVE_SIGNED);
BOOST_CHECK_EQUAL(HexStr(ss), "c7e756");
ss.clear();
ss << VARINT_MODE((int32_t)0x123456, VarIntMode::NONNEGATIVE_SIGNED);
BOOST_CHECK_EQUAL(HexStr(ss), "c7e756");
ss.clear();
ss << VARINT(0x80123456U);
BOOST_CHECK_EQUAL(HexStr(ss), "86ffc7e756");
ss.clear();
ss << VARINT((uint32_t)0x80123456U);
BOOST_CHECK_EQUAL(HexStr(ss), "86ffc7e756");
ss.clear();
ss << VARINT(0xffffffff);
BOOST_CHECK_EQUAL(HexStr(ss), "8efefefe7f");
ss.clear();
ss << VARINT_MODE(0x7fffffffffffffffLL, VarIntMode::NONNEGATIVE_SIGNED);
BOOST_CHECK_EQUAL(HexStr(ss), "fefefefefefefefe7f");
ss.clear();
ss << VARINT(0xffffffffffffffffULL);
BOOST_CHECK_EQUAL(HexStr(ss), "80fefefefefefefefe7f");
ss.clear();
}
static bool isTooLargeException(const std::ios_base::failure &ex) {
std::ios_base::failure expectedException(
"ReadCompactSize(): size too large");
// The string returned by what() can be different for different platforms.
// Instead of directly comparing the ex.what() with an expected string,
// create an instance of exception to see if ex.what() matches the expected
// explanatory string returned by the exception instance.
return strcmp(expectedException.what(), ex.what()) == 0;
}
BOOST_AUTO_TEST_CASE(compactsize) {
CDataStream ss(SER_DISK, 0);
std::vector<char>::size_type i, j;
for (i = 1; i <= MAX_SIZE; i *= 2) {
WriteCompactSize(ss, i - 1);
WriteCompactSize(ss, i);
}
for (i = 1; i <= MAX_SIZE; i *= 2) {
j = ReadCompactSize(ss);
BOOST_CHECK_MESSAGE((i - 1) == j,
"decoded:" << j << " expected:" << (i - 1));
j = ReadCompactSize(ss);
BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i);
}
WriteCompactSize(ss, MAX_SIZE);
BOOST_CHECK_EQUAL(ReadCompactSize(ss), MAX_SIZE);
WriteCompactSize(ss, MAX_SIZE + 1);
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure,
isTooLargeException);
WriteCompactSize(ss, std::numeric_limits<int64_t>::max());
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure,
isTooLargeException);
WriteCompactSize(ss, std::numeric_limits<uint64_t>::max());
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure,
isTooLargeException);
}
static bool isCanonicalException(const std::ios_base::failure &ex) {
std::ios_base::failure expectedException("non-canonical ReadCompactSize()");
// The string returned by what() can be different for different platforms.
// Instead of directly comparing the ex.what() with an expected string,
// create an instance of exception to see if ex.what() matches the expected
// explanatory string returned by the exception instance.
return strcmp(expectedException.what(), ex.what()) == 0;
}
BOOST_AUTO_TEST_CASE(vector_bool) {
std::vector<uint8_t> vec1{1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1,
1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1};
std::vector<bool> vec2{1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1,
1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1};
BOOST_CHECK(vec1 == std::vector<uint8_t>(vec2.begin(), vec2.end()));
BOOST_CHECK(SerializeHash(vec1) == SerializeHash(vec2));
}
BOOST_AUTO_TEST_CASE(noncanonical) {
// Write some non-canonical CompactSize encodings, and make sure an
// exception is thrown when read back.
CDataStream ss(SER_DISK, 0);
std::vector<char>::size_type n;
// zero encoded with three bytes:
- ss.write("\xfd\x00\x00", 3);
+ ss.write(MakeByteSpan("\xfd\x00\x00").first(3));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure,
isCanonicalException);
// 0xfc encoded with three bytes:
- ss.write("\xfd\xfc\x00", 3);
+ ss.write(MakeByteSpan("\xfd\xfc\x00").first(3));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure,
isCanonicalException);
// 0xfd encoded with three bytes is OK:
- ss.write("\xfd\xfd\x00", 3);
+ ss.write(MakeByteSpan("\xfd\xfd\x00").first(3));
n = ReadCompactSize(ss);
BOOST_CHECK(n == 0xfd);
// zero encoded with five bytes:
- ss.write("\xfe\x00\x00\x00\x00", 5);
+ ss.write(MakeByteSpan("\xfe\x00\x00\x00\x00").first(5));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure,
isCanonicalException);
// 0xffff encoded with five bytes:
- ss.write("\xfe\xff\xff\x00\x00", 5);
+ ss.write(MakeByteSpan("\xfe\xff\xff\x00\x00").first(5));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure,
isCanonicalException);
// zero encoded with nine bytes:
- ss.write("\xff\x00\x00\x00\x00\x00\x00\x00\x00", 9);
+ ss.write(MakeByteSpan("\xff\x00\x00\x00\x00\x00\x00\x00\x00").first(9));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure,
isCanonicalException);
// 0x01ffffff encoded with nine bytes:
- ss.write("\xff\xff\xff\xff\x01\x00\x00\x00\x00", 9);
+ ss.write(MakeByteSpan("\xff\xff\xff\xff\x01\x00\x00\x00\x00").first(9));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure,
isCanonicalException);
}
BOOST_AUTO_TEST_CASE(insert_delete) {
+ constexpr auto B2I{[](std::byte b) { return std::to_integer<uint8_t>(b); }};
+
// Test inserting/deleting bytes.
CDataStream ss(SER_DISK, 0);
BOOST_CHECK_EQUAL(ss.size(), 0U);
- ss.write("\x00\x01\x02\xff", 4);
+ ss.write(MakeByteSpan("\x00\x01\x02\xff").first(4));
BOOST_CHECK_EQUAL(ss.size(), 4U);
- char c = (char)11;
+ uint8_t c{11};
// Inserting at beginning/end/middle:
- ss.insert(ss.begin(), c);
+ ss.insert(ss.begin(), std::byte{c});
BOOST_CHECK_EQUAL(ss.size(), 5U);
- BOOST_CHECK_EQUAL(ss[0], c);
- BOOST_CHECK_EQUAL(ss[1], 0);
+ BOOST_CHECK_EQUAL(B2I(ss[0]), c);
+ BOOST_CHECK_EQUAL(B2I(ss[1]), 0);
- ss.insert(ss.end(), c);
+ ss.insert(ss.end(), std::byte{c});
BOOST_CHECK_EQUAL(ss.size(), 6U);
- BOOST_CHECK_EQUAL(ss[4], 0xff);
- BOOST_CHECK_EQUAL(ss[5], c);
+ BOOST_CHECK_EQUAL(B2I(ss[4]), 0xff);
+ BOOST_CHECK_EQUAL(B2I(ss[5]), c);
- ss.insert(ss.begin() + 2, c);
+ ss.insert(ss.begin() + 2, std::byte{c});
BOOST_CHECK_EQUAL(ss.size(), 7U);
- BOOST_CHECK_EQUAL(ss[2], c);
+ BOOST_CHECK_EQUAL(B2I(ss[2]), c);
// Delete at beginning/end/middle
ss.erase(ss.begin());
BOOST_CHECK_EQUAL(ss.size(), 6U);
- BOOST_CHECK_EQUAL(ss[0], 0);
+ BOOST_CHECK_EQUAL(B2I(ss[0]), 0);
ss.erase(ss.begin() + ss.size() - 1);
BOOST_CHECK_EQUAL(ss.size(), 5U);
- BOOST_CHECK_EQUAL(ss[4], 0xff);
+ BOOST_CHECK_EQUAL(B2I(ss[4]), 0xff);
ss.erase(ss.begin() + 1);
BOOST_CHECK_EQUAL(ss.size(), 4U);
- BOOST_CHECK_EQUAL(ss[0], 0);
- BOOST_CHECK_EQUAL(ss[1], 1);
- BOOST_CHECK_EQUAL(ss[2], 2);
- BOOST_CHECK_EQUAL(ss[3], 0xff);
+ BOOST_CHECK_EQUAL(B2I(ss[0]), 0);
+ BOOST_CHECK_EQUAL(B2I(ss[1]), 1);
+ BOOST_CHECK_EQUAL(B2I(ss[2]), 2);
+ BOOST_CHECK_EQUAL(B2I(ss[3]), 0xff);
}
BOOST_AUTO_TEST_CASE(class_methods) {
int intval(100);
bool boolval(true);
std::string stringval("testing");
const uint8_t charstrval[16]{"testing charstr"};
CMutableTransaction txval;
CTransactionRef tx_ref{MakeTransactionRef(txval)};
avalanche::ProofBuilder pb(0, 0, CKey::MakeCompressedKey(),
avalanche::UNSPENDABLE_ECREG_PAYOUT_SCRIPT);
avalanche::ProofRef proofval = pb.build();
CSerializeMethodsTestSingle methodtest1(intval, boolval, stringval,
charstrval, tx_ref, proofval);
CSerializeMethodsTestMany methodtest2(intval, boolval, stringval,
charstrval, tx_ref, proofval);
CSerializeMethodsTestSingle methodtest3;
CSerializeMethodsTestMany methodtest4;
CDataStream ss(SER_DISK, PROTOCOL_VERSION);
BOOST_CHECK(methodtest1 == methodtest2);
ss << methodtest1;
ss >> methodtest4;
ss << methodtest2;
ss >> methodtest3;
BOOST_CHECK(methodtest1 == methodtest2);
BOOST_CHECK(methodtest2 == methodtest3);
BOOST_CHECK(methodtest3 == methodtest4);
CDataStream ss2(SER_DISK, PROTOCOL_VERSION, intval, boolval, stringval,
charstrval, txval, proofval);
ss2 >> methodtest3;
BOOST_CHECK(methodtest3 == methodtest4);
}
namespace {
struct DifferentialIndexedItem {
uint32_t index;
std::string text;
template <typename Stream> void SerData(Stream &s) { s << text; }
template <typename Stream> void UnserData(Stream &s) { s >> text; }
bool operator==(const DifferentialIndexedItem &other) const {
return index == other.index && text == other.text;
}
bool operator!=(const DifferentialIndexedItem &other) const {
return !(*this == other);
}
// Make boost happy
friend std::ostream &operator<<(std::ostream &os,
const DifferentialIndexedItem &item) {
os << "index: " << item.index << ", text: " << item.text;
return os;
}
DifferentialIndexedItem() {}
DifferentialIndexedItem(uint32_t indexIn)
: index(indexIn), text(ToString(index)) {}
};
template <typename Formatter, typename T>
static void checkDifferentialEncodingRoundtrip() {
Formatter formatter;
const std::vector<T> indicesIn{0, 1, 2, 5, 10, 20, 50, 100};
std::vector<T> indicesOut;
CDataStream ss(SER_DISK, PROTOCOL_VERSION);
formatter.Ser(ss, indicesIn);
formatter.Unser(ss, indicesOut);
BOOST_CHECK_EQUAL_COLLECTIONS(indicesIn.begin(), indicesIn.end(),
indicesOut.begin(), indicesOut.end());
}
template <typename Formatter, typename T>
static void checkDifferentialEncodingOverflow() {
Formatter formatter;
{
const std::vector<T> indicesIn{1, 0};
CDataStream ss(SER_DISK, PROTOCOL_VERSION);
BOOST_CHECK_EXCEPTION(formatter.Ser(ss, indicesIn),
std::ios_base::failure,
HasReason("differential value overflow"));
}
}
} // namespace
BOOST_AUTO_TEST_CASE(difference_formatter) {
{
// Roundtrip with internals check
VectorFormatter<DifferenceFormatter> formatter;
std::vector<uint32_t> indicesIn{0, 1, 2, 5, 10, 20, 50, 100};
std::vector<uint32_t> indicesOut;
CDataStream ss(SER_DISK, PROTOCOL_VERSION);
formatter.Ser(ss, indicesIn);
// Check the stream is differentially encoded. Don't care about the
// prefixes and vector length here (assumed to be < 253).
const std::string streamStr = ss.str();
const std::string differences =
HexStr(streamStr.substr(streamStr.size() - indicesIn.size()));
BOOST_CHECK_EQUAL(differences, "0000000204091d31");
formatter.Unser(ss, indicesOut);
BOOST_CHECK_EQUAL_COLLECTIONS(indicesIn.begin(), indicesIn.end(),
indicesOut.begin(), indicesOut.end());
}
checkDifferentialEncodingRoundtrip<VectorFormatter<DifferenceFormatter>,
uint32_t>();
checkDifferentialEncodingRoundtrip<
VectorFormatter<DifferentialIndexedItemFormatter>,
DifferentialIndexedItem>();
{
// Checking 32 bits overflow requires to manually create the serialized
// stream, so only do it with uint32_t
std::vector<uint32_t> indicesOut;
// Compute the number of MAX_SIZE increment we need to cause an overflow
const uint64_t overflow =
uint64_t(std::numeric_limits<uint32_t>::max()) + 1;
// Due to differential encoding, a value of MAX_SIZE bumps the index by
// MAX_SIZE + 1
BOOST_CHECK_GE(overflow, MAX_SIZE + 1);
const uint64_t overflowIter = overflow / (MAX_SIZE + 1);
// Make sure the iteration fits in an uint32_t and is <= MAX_SIZE
BOOST_CHECK_LE(overflowIter, std::numeric_limits<uint32_t>::max());
BOOST_CHECK_LE(overflowIter, MAX_SIZE);
uint32_t remainder =
uint32_t(overflow - ((MAX_SIZE + 1) * overflowIter));
auto buildStream = [&](uint32_t lastItemDifference) {
CDataStream ss(SER_DISK, PROTOCOL_VERSION);
WriteCompactSize(ss, overflowIter + 1);
for (uint32_t i = 0; i < overflowIter; i++) {
WriteCompactSize(ss, MAX_SIZE);
}
// This will cause an overflow if lastItemDifference >= remainder
WriteCompactSize(ss, lastItemDifference);
return ss;
};
VectorFormatter<DifferenceFormatter> formatter;
auto noThrowStream = buildStream(remainder - 1);
BOOST_CHECK_NO_THROW(formatter.Unser(noThrowStream, indicesOut));
auto overflowStream = buildStream(remainder);
BOOST_CHECK_EXCEPTION(formatter.Unser(overflowStream, indicesOut),
std::ios_base::failure,
HasReason("differential value overflow"));
}
checkDifferentialEncodingOverflow<VectorFormatter<DifferenceFormatter>,
uint32_t>();
checkDifferentialEncodingOverflow<
VectorFormatter<DifferentialIndexedItemFormatter>,
DifferentialIndexedItem>();
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp
index de8b6964d..b2bcdae56 100644
--- a/src/test/streams_tests.cpp
+++ b/src/test/streams_tests.cpp
@@ -1,471 +1,474 @@
// Copyright (c) 2012-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <fs.h>
#include <random.h>
#include <streams.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
BOOST_FIXTURE_TEST_SUITE(streams_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(streams_vector_writer) {
uint8_t a(1);
uint8_t b(2);
uint8_t bytes[] = {3, 4, 5, 6};
std::vector<uint8_t> vch;
// Each test runs twice. Serializing a second time at the same starting
// point should yield the same results, even if the first test grew the
// vector.
CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, a, b);
BOOST_CHECK((vch == std::vector<uint8_t>{{1, 2}}));
CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, a, b);
BOOST_CHECK((vch == std::vector<uint8_t>{{1, 2}}));
vch.clear();
CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b);
BOOST_CHECK((vch == std::vector<uint8_t>{{0, 0, 1, 2}}));
CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b);
BOOST_CHECK((vch == std::vector<uint8_t>{{0, 0, 1, 2}}));
vch.clear();
vch.resize(5, 0);
CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b);
BOOST_CHECK((vch == std::vector<uint8_t>{{0, 0, 1, 2, 0}}));
CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b);
BOOST_CHECK((vch == std::vector<uint8_t>{{0, 0, 1, 2, 0}}));
vch.clear();
vch.resize(4, 0);
CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 3, a, b);
BOOST_CHECK((vch == std::vector<uint8_t>{{0, 0, 0, 1, 2}}));
CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 3, a, b);
BOOST_CHECK((vch == std::vector<uint8_t>{{0, 0, 0, 1, 2}}));
vch.clear();
vch.resize(4, 0);
CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 4, a, b);
BOOST_CHECK((vch == std::vector<uint8_t>{{0, 0, 0, 0, 1, 2}}));
CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 4, a, b);
BOOST_CHECK((vch == std::vector<uint8_t>{{0, 0, 0, 0, 1, 2}}));
vch.clear();
CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, bytes);
BOOST_CHECK((vch == std::vector<uint8_t>{{3, 4, 5, 6}}));
CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, bytes);
BOOST_CHECK((vch == std::vector<uint8_t>{{3, 4, 5, 6}}));
vch.clear();
vch.resize(4, 8);
CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, bytes, b);
BOOST_CHECK((vch == std::vector<uint8_t>{{8, 8, 1, 3, 4, 5, 6, 2}}));
CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, bytes, b);
BOOST_CHECK((vch == std::vector<uint8_t>{{8, 8, 1, 3, 4, 5, 6, 2}}));
vch.clear();
}
BOOST_AUTO_TEST_CASE(streams_vector_reader) {
std::vector<uint8_t> vch = {1, 255, 3, 4, 5, 6};
SpanReader reader{SER_NETWORK, INIT_PROTO_VERSION, vch};
BOOST_CHECK_EQUAL(reader.size(), 6U);
BOOST_CHECK(!reader.empty());
// Read a single byte as an uint8_t.
uint8_t a;
reader >> a;
BOOST_CHECK_EQUAL(a, 1);
BOOST_CHECK_EQUAL(reader.size(), 5U);
BOOST_CHECK(!reader.empty());
// Read a single byte as a (signed) int8_t.
int8_t b;
reader >> b;
BOOST_CHECK_EQUAL(b, -1);
BOOST_CHECK_EQUAL(reader.size(), 4U);
BOOST_CHECK(!reader.empty());
// Read a 4 bytes as an unsigned uint32_t.
uint32_t c;
reader >> c;
// 100992003 = 3,4,5,6 in little-endian base-256
BOOST_CHECK_EQUAL(c, 100992003);
BOOST_CHECK_EQUAL(reader.size(), 0U);
BOOST_CHECK(reader.empty());
// Reading after end of byte vector throws an error.
int32_t d;
BOOST_CHECK_THROW(reader >> d, std::ios_base::failure);
// Read a 4 bytes as a (signed) int32_t from the beginning of the buffer.
SpanReader new_reader{SER_NETWORK, INIT_PROTO_VERSION, vch};
new_reader >> d;
// 67370753 = 1,255,3,4 in little-endian base-256
BOOST_CHECK_EQUAL(d, 67370753);
BOOST_CHECK_EQUAL(new_reader.size(), 2U);
BOOST_CHECK(!new_reader.empty());
// Reading after end of byte vector throws an error even if the reader is
// not totally empty.
BOOST_CHECK_THROW(new_reader >> d, std::ios_base::failure);
}
BOOST_AUTO_TEST_CASE(bitstream_reader_writer) {
CDataStream data(SER_NETWORK, INIT_PROTO_VERSION);
BitStreamWriter<CDataStream> bit_writer(data);
bit_writer.Write(0, 1);
bit_writer.Write(2, 2);
bit_writer.Write(6, 3);
bit_writer.Write(11, 4);
bit_writer.Write(1, 5);
bit_writer.Write(32, 6);
bit_writer.Write(7, 7);
bit_writer.Write(30497, 16);
bit_writer.Flush();
CDataStream data_copy(data);
uint32_t serialized_int1;
data >> serialized_int1;
// NOTE: Serialized as LE
BOOST_CHECK_EQUAL(serialized_int1, (uint32_t)0x7700C35A);
uint16_t serialized_int2;
data >> serialized_int2;
// NOTE: Serialized as LE
BOOST_CHECK_EQUAL(serialized_int2, (uint16_t)0x1072);
BitStreamReader<CDataStream> bit_reader(data_copy);
BOOST_CHECK_EQUAL(bit_reader.Read(1), 0U);
BOOST_CHECK_EQUAL(bit_reader.Read(2), 2U);
BOOST_CHECK_EQUAL(bit_reader.Read(3), 6U);
BOOST_CHECK_EQUAL(bit_reader.Read(4), 11U);
BOOST_CHECK_EQUAL(bit_reader.Read(5), 1U);
BOOST_CHECK_EQUAL(bit_reader.Read(6), 32U);
BOOST_CHECK_EQUAL(bit_reader.Read(7), 7U);
BOOST_CHECK_EQUAL(bit_reader.Read(16), 30497U);
BOOST_CHECK_THROW(bit_reader.Read(8), std::ios_base::failure);
}
BOOST_AUTO_TEST_CASE(streams_serializedata_xor) {
- std::vector<uint8_t> in;
+ std::vector<std::byte> in;
std::vector<char> expected_xor;
std::vector<uint8_t> key;
CDataStream ds(in, 0, 0);
// Degenerate case
key.push_back('\x00');
key.push_back('\x00');
ds.Xor(key);
BOOST_CHECK_EQUAL(std::string(expected_xor.begin(), expected_xor.end()),
ds.str());
- in.push_back('\x0f');
- in.push_back('\xf0');
+ in.push_back(std::byte{0x0f});
+ in.push_back(std::byte{0xf0});
expected_xor.push_back('\xf0');
expected_xor.push_back('\x0f');
// Single character key
ds.clear();
ds.insert(ds.begin(), in.begin(), in.end());
key.clear();
key.push_back('\xff');
ds.Xor(key);
BOOST_CHECK_EQUAL(std::string(expected_xor.begin(), expected_xor.end()),
ds.str());
// Multi character key
in.clear();
expected_xor.clear();
- in.push_back('\xf0');
- in.push_back('\x0f');
+ in.push_back(std::byte{0xf0});
+ in.push_back(std::byte{0x0f});
expected_xor.push_back('\x0f');
expected_xor.push_back('\x00');
ds.clear();
ds.insert(ds.begin(), in.begin(), in.end());
key.clear();
key.push_back('\xff');
key.push_back('\x0f');
ds.Xor(key);
BOOST_CHECK_EQUAL(std::string(expected_xor.begin(), expected_xor.end()),
ds.str());
}
BOOST_AUTO_TEST_CASE(streams_empty_vector) {
std::vector<uint8_t> in;
CDataStream ds(in, 0, 0);
// read 0 bytes used to cause a segfault on some older systems.
- BOOST_CHECK_NO_THROW(ds.read(nullptr, 0));
+ BOOST_CHECK_NO_THROW(ds.read({}));
// Same goes for writing 0 bytes from a vector ...
- const std::vector<uint8_t> vdata{'f', 'o', 'o', 'b', 'a', 'r'};
+ const std::vector<std::byte> vdata{std::byte{'f'}, std::byte{'o'},
+ std::byte{'o'}, std::byte{'b'},
+ std::byte{'a'}, std::byte{'r'}};
BOOST_CHECK_NO_THROW(ds.insert(ds.begin(), vdata.begin(), vdata.begin()));
BOOST_CHECK_NO_THROW(ds.insert(ds.begin(), vdata.begin(), vdata.end()));
// ... or an array.
- const char adata[6] = {'f', 'o', 'o', 'b', 'a', 'r'};
+ const std::byte adata[6] = {std::byte{'f'}, std::byte{'o'}, std::byte{'o'},
+ std::byte{'b'}, std::byte{'a'}, std::byte{'r'}};
BOOST_CHECK_NO_THROW(ds.insert(ds.begin(), &adata[0], &adata[0]));
BOOST_CHECK_NO_THROW(ds.insert(ds.begin(), &adata[0], &adata[6]));
}
BOOST_AUTO_TEST_CASE(streams_buffered_file) {
FILE *file = fsbridge::fopen("streams_test_tmp", "w+b");
// The value at each offset is the offset.
for (uint8_t j = 0; j < 40; ++j) {
fwrite(&j, 1, 1, file);
}
rewind(file);
// The buffer size (second arg) must be greater than the rewind
// amount (third arg).
try {
CBufferedFile bfbad(file, 25, 25, 222, 333);
BOOST_CHECK(false);
} catch (const std::exception &e) {
BOOST_CHECK(
strstr(e.what(), "Rewind limit must be less than buffer size") !=
nullptr);
}
// The buffer is 25 bytes, allow rewinding 10 bytes.
CBufferedFile bf(file, 25, 10, 222, 333);
BOOST_CHECK(!bf.eof());
// These two members have no functional effect.
BOOST_CHECK_EQUAL(bf.GetType(), 222);
BOOST_CHECK_EQUAL(bf.GetVersion(), 333);
uint8_t i;
bf >> i;
BOOST_CHECK_EQUAL(i, 0);
bf >> i;
BOOST_CHECK_EQUAL(i, 1);
// After reading bytes 0 and 1, we're positioned at 2.
BOOST_CHECK_EQUAL(bf.GetPos(), 2U);
// Rewind to offset 0, ok (within the 10 byte window).
BOOST_CHECK(bf.SetPos(0));
bf >> i;
BOOST_CHECK_EQUAL(i, 0);
// We can go forward to where we've been, but beyond may fail.
BOOST_CHECK(bf.SetPos(2));
bf >> i;
BOOST_CHECK_EQUAL(i, 2);
// If you know the maximum number of bytes that should be
// read to deserialize the variable, you can limit the read
// extent. The current file offset is 3, so the following
// SetLimit() allows zero bytes to be read.
BOOST_CHECK(bf.SetLimit(3));
try {
bf >> i;
BOOST_CHECK(false);
} catch (const std::exception &e) {
BOOST_CHECK(strstr(e.what(), "Read attempted past buffer limit") !=
nullptr);
}
// The default argument removes the limit completely.
BOOST_CHECK(bf.SetLimit());
// The read position should still be at 3 (no change).
BOOST_CHECK_EQUAL(bf.GetPos(), 3U);
// Read from current offset, 3, forward until position 10.
for (uint8_t j = 3; j < 10; ++j) {
bf >> i;
BOOST_CHECK_EQUAL(i, j);
}
BOOST_CHECK_EQUAL(bf.GetPos(), 10U);
// We're guaranteed (just barely) to be able to rewind to zero.
BOOST_CHECK(bf.SetPos(0));
BOOST_CHECK_EQUAL(bf.GetPos(), 0U);
bf >> i;
BOOST_CHECK_EQUAL(i, 0);
// We can set the position forward again up to the farthest
// into the stream we've been, but no farther. (Attempting
// to go farther may succeed, but it's not guaranteed.)
BOOST_CHECK(bf.SetPos(10));
bf >> i;
BOOST_CHECK_EQUAL(i, 10);
BOOST_CHECK_EQUAL(bf.GetPos(), 11U);
// Now it's only guaranteed that we can rewind to offset 1
// (current read position, 11, minus rewind amount, 10).
BOOST_CHECK(bf.SetPos(1));
BOOST_CHECK_EQUAL(bf.GetPos(), 1U);
bf >> i;
BOOST_CHECK_EQUAL(i, 1);
// We can stream into large variables, even larger than
// the buffer size.
BOOST_CHECK(bf.SetPos(11));
{
uint8_t a[40 - 11];
bf >> a;
for (uint8_t j = 0; j < sizeof(a); ++j) {
BOOST_CHECK_EQUAL(a[j], 11 + j);
}
}
BOOST_CHECK_EQUAL(bf.GetPos(), 40U);
// We've read the entire file, the next read should throw.
try {
bf >> i;
BOOST_CHECK(false);
} catch (const std::exception &e) {
BOOST_CHECK(strstr(e.what(), "CBufferedFile::Fill: end of file") !=
nullptr);
}
// Attempting to read beyond the end sets the EOF indicator.
BOOST_CHECK(bf.eof());
// Still at offset 40, we can go back 10, to 30.
BOOST_CHECK_EQUAL(bf.GetPos(), 40U);
BOOST_CHECK(bf.SetPos(30));
bf >> i;
BOOST_CHECK_EQUAL(i, 30);
BOOST_CHECK_EQUAL(bf.GetPos(), 31U);
// We're too far to rewind to position zero.
BOOST_CHECK(!bf.SetPos(0));
// But we should now be positioned at least as far back as allowed
// by the rewind window (relative to our farthest read position, 40).
BOOST_CHECK(bf.GetPos() <= 30);
// We can explicitly close the file, or the destructor will do it.
bf.fclose();
fs::remove("streams_test_tmp");
}
BOOST_AUTO_TEST_CASE(streams_buffered_file_rand) {
// Make this test deterministic.
SeedInsecureRand(SeedRand::ZEROS);
for (int rep = 0; rep < 50; ++rep) {
FILE *file = fsbridge::fopen("streams_test_tmp", "w+b");
size_t fileSize = InsecureRandRange(256);
for (uint8_t i = 0; i < fileSize; ++i) {
fwrite(&i, 1, 1, file);
}
rewind(file);
size_t bufSize = InsecureRandRange(300) + 1;
size_t rewindSize = InsecureRandRange(bufSize);
CBufferedFile bf(file, bufSize, rewindSize, 222, 333);
size_t currentPos = 0;
size_t maxPos = 0;
for (int step = 0; step < 100; ++step) {
if (currentPos >= fileSize) {
break;
}
// We haven't read to the end of the file yet.
BOOST_CHECK(!bf.eof());
BOOST_CHECK_EQUAL(bf.GetPos(), currentPos);
// Pretend the file consists of a series of objects of varying
// sizes; the boundaries of the objects can interact arbitrarily
// with the CBufferFile's internal buffer. These first three
// cases simulate objects of various sizes (1, 2, 5 bytes).
switch (InsecureRandRange(5)) {
case 0: {
uint8_t a[1];
if (currentPos + 1 > fileSize) {
continue;
}
bf.SetLimit(currentPos + 1);
bf >> a;
for (uint8_t i = 0; i < 1; ++i) {
BOOST_CHECK_EQUAL(a[i], currentPos);
currentPos++;
}
break;
}
case 1: {
uint8_t a[2];
if (currentPos + 2 > fileSize) {
continue;
}
bf.SetLimit(currentPos + 2);
bf >> a;
for (uint8_t i = 0; i < 2; ++i) {
BOOST_CHECK_EQUAL(a[i], currentPos);
currentPos++;
}
break;
}
case 2: {
uint8_t a[5];
if (currentPos + 5 > fileSize) {
continue;
}
bf.SetLimit(currentPos + 5);
bf >> a;
for (uint8_t i = 0; i < 5; ++i) {
BOOST_CHECK_EQUAL(a[i], currentPos);
currentPos++;
}
break;
}
case 3: {
// Find a byte value (that is at or ahead of the current
// position).
size_t find = currentPos + InsecureRandRange(8);
if (find >= fileSize) {
find = fileSize - 1;
}
bf.FindByte(static_cast<char>(find));
// The value at each offset is the offset.
BOOST_CHECK_EQUAL(bf.GetPos(), find);
currentPos = find;
bf.SetLimit(currentPos + 1);
uint8_t i;
bf >> i;
BOOST_CHECK_EQUAL(i, currentPos);
currentPos++;
break;
}
case 4: {
size_t requestPos = InsecureRandRange(maxPos + 4);
bool okay = bf.SetPos(requestPos);
// The new position may differ from the requested position
// because we may not be able to rewind beyond the rewind
// window, and we may not be able to move forward beyond the
// farthest position we've reached so far.
currentPos = bf.GetPos();
BOOST_CHECK_EQUAL(okay, currentPos == requestPos);
// Check that we can position within the rewind window.
if (requestPos <= maxPos && maxPos > rewindSize &&
requestPos >= maxPos - rewindSize) {
// We requested a position within the rewind window.
BOOST_CHECK(okay);
}
break;
}
}
if (maxPos < currentPos) {
maxPos = currentPos;
}
}
}
fs::remove("streams_test_tmp");
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/uint256.h b/src/uint256.h
index 0732096e1..5221bd9b6 100644
--- a/src/uint256.h
+++ b/src/uint256.h
@@ -1,170 +1,172 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_UINT256_H
#define BITCOIN_UINT256_H
+#include <span.h>
+
#include <cassert>
#include <cstdint>
#include <cstring>
#include <string>
#include <vector>
/** Template base class for fixed-sized opaque blobs. */
template <unsigned int BITS> class base_blob {
protected:
static constexpr int WIDTH = BITS / 8;
uint8_t m_data[WIDTH];
public:
/* construct 0 value by default */
constexpr base_blob() : m_data() {}
/* constructor for constants between 1 and 255 */
constexpr explicit base_blob(uint8_t v) : m_data{v} {}
explicit base_blob(const std::vector<uint8_t> &vch);
bool IsNull() const {
for (int i = 0; i < WIDTH; i++) {
if (m_data[i] != 0) {
return false;
}
}
return true;
}
void SetNull() { memset(m_data, 0, sizeof(m_data)); }
inline int Compare(const base_blob &other) const {
for (size_t i = 0; i < sizeof(m_data); i++) {
uint8_t a = m_data[sizeof(m_data) - 1 - i];
uint8_t b = other.m_data[sizeof(m_data) - 1 - i];
if (a > b) {
return 1;
}
if (a < b) {
return -1;
}
}
return 0;
}
friend inline bool operator==(const base_blob &a, const base_blob &b) {
return a.Compare(b) == 0;
}
friend inline bool operator!=(const base_blob &a, const base_blob &b) {
return a.Compare(b) != 0;
}
friend inline bool operator<(const base_blob &a, const base_blob &b) {
return a.Compare(b) < 0;
}
friend inline bool operator<=(const base_blob &a, const base_blob &b) {
return a.Compare(b) <= 0;
}
friend inline bool operator>(const base_blob &a, const base_blob &b) {
return a.Compare(b) > 0;
}
friend inline bool operator>=(const base_blob &a, const base_blob &b) {
return a.Compare(b) >= 0;
}
std::string GetHex() const;
void SetHex(const char *psz);
void SetHex(const std::string &str);
std::string ToString() const { return GetHex(); }
const uint8_t *data() const { return m_data; }
uint8_t *data() { return m_data; }
uint8_t *begin() { return &m_data[0]; }
uint8_t *end() { return &m_data[WIDTH]; }
const uint8_t *begin() const { return &m_data[0]; }
const uint8_t *end() const { return &m_data[WIDTH]; }
unsigned int size() const { return sizeof(m_data); }
uint64_t GetUint64(int pos) const {
const uint8_t *ptr = m_data + pos * 8;
return uint64_t(ptr[0]) | (uint64_t(ptr[1]) << 8) |
(uint64_t(ptr[2]) << 16) | (uint64_t(ptr[3]) << 24) |
(uint64_t(ptr[4]) << 32) | (uint64_t(ptr[5]) << 40) |
(uint64_t(ptr[6]) << 48) | (uint64_t(ptr[7]) << 56);
}
template <typename Stream> void Serialize(Stream &s) const {
- s.write((char *)m_data, sizeof(m_data));
+ s.write(MakeByteSpan(m_data));
}
template <typename Stream> void Unserialize(Stream &s) {
- s.read((char *)m_data, sizeof(m_data));
+ s.read(MakeWritableByteSpan(m_data));
}
};
/**
* 160-bit opaque blob.
* @note This type is called uint160 for historical reasons only. It is an
* opaque blob of 160 bits and has no integer operations.
*/
class uint160 : public base_blob<160> {
public:
constexpr uint160() {}
explicit uint160(const std::vector<uint8_t> &vch) : base_blob<160>(vch) {}
};
/**
* 256-bit opaque blob.
* @note This type is called uint256 for historical reasons only. It is an
* opaque blob of 256 bits and has no integer operations. Use arith_uint256 if
* those are required.
*/
class uint256 : public base_blob<256> {
public:
constexpr uint256() {}
constexpr explicit uint256(uint8_t v) : base_blob<256>(v) {}
explicit uint256(const std::vector<uint8_t> &vch) : base_blob<256>(vch) {}
static const uint256 ZERO;
static const uint256 ONE;
};
/**
* uint256 from const char *.
* This is a separate function because the constructor uint256(const char*) can
* result in dangerously catching uint256(0).
*/
inline uint256 uint256S(const char *str) {
uint256 rv;
rv.SetHex(str);
return rv;
}
/**
* uint256 from std::string.
* This is a separate function because the constructor uint256(const std::string
* &str) can result in dangerously catching uint256(0) via std::string(const
* char*).
*/
inline uint256 uint256S(const std::string &str) {
uint256 rv;
rv.SetHex(str);
return rv;
}
inline uint160 uint160S(const char *str) {
uint160 rv;
rv.SetHex(str);
return rv;
}
inline uint160 uint160S(const std::string &str) {
uint160 rv;
rv.SetHex(str);
return rv;
}
#endif // BITCOIN_UINT256_H
diff --git a/src/validation.cpp b/src/validation.cpp
index b5a1167ee..53d4a2e1d 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -1,6562 +1,6562 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2018 The Bitcoin Core developers
// Copyright (c) 2017-2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <validation.h>
#include <kernel/disconnected_transactions.h>
#include <kernel/mempool_entry.h>
#include <kernel/mempool_persist.h>
#include <arith_uint256.h>
#include <avalanche/avalanche.h>
#include <avalanche/processor.h>
#include <blockvalidity.h>
#include <chainparams.h>
#include <checkpoints.h>
#include <checkqueue.h>
#include <config.h>
#include <consensus/activation.h>
#include <consensus/amount.h>
#include <consensus/merkle.h>
#include <consensus/tx_check.h>
#include <consensus/tx_verify.h>
#include <consensus/validation.h>
#include <fs.h>
#include <hash.h>
#include <index/blockfilterindex.h>
#include <logging.h>
#include <logging/timer.h>
#include <minerfund.h>
#include <node/blockstorage.h>
#include <node/coinstats.h>
#include <node/ui_interface.h>
#include <node/utxo_snapshot.h>
#include <policy/block/minerfund.h>
#include <policy/block/preconsensus.h>
#include <policy/block/stakingrewards.h>
#include <policy/policy.h>
#include <policy/settings.h>
#include <pow/pow.h>
#include <primitives/block.h>
#include <primitives/transaction.h>
#include <random.h>
#include <reverse_iterator.h>
#include <script/script.h>
#include <script/scriptcache.h>
#include <script/sigcache.h>
#include <shutdown.h>
#include <tinyformat.h>
#include <txdb.h>
#include <txmempool.h>
#include <undo.h>
#include <util/check.h> // For NDEBUG compile time check
#include <util/strencodings.h>
#include <util/string.h>
#include <util/system.h>
#include <util/time.h>
#include <util/trace.h>
#include <util/translation.h>
#include <validationinterface.h>
#include <warnings.h>
#include <algorithm>
#include <atomic>
#include <cassert>
#include <chrono>
#include <deque>
#include <numeric>
#include <optional>
#include <string>
#include <thread>
using kernel::LoadMempool;
using fsbridge::FopenFn;
using node::BLOCKFILE_CHUNK_SIZE;
using node::BlockManager;
using node::BlockMap;
using node::CCoinsStats;
using node::CoinStatsHashType;
using node::ComputeUTXOStats;
using node::fReindex;
using node::nPruneTarget;
using node::OpenBlockFile;
using node::ReadBlockFromDisk;
using node::SnapshotMetadata;
using node::UNDOFILE_CHUNK_SIZE;
using node::UndoReadFromDisk;
using node::UnlinkPrunedFiles;
#define MICRO 0.000001
#define MILLI 0.001
/** Time to wait between writing blocks/block index to disk. */
static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
/** Time to wait between flushing chainstate to disk. */
static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
const std::vector<std::string> CHECKLEVEL_DOC{
"level 0 reads the blocks from disk",
"level 1 verifies block validity",
"level 2 verifies undo data",
"level 3 checks disconnection of tip blocks",
"level 4 tries to reconnect the blocks",
"each level includes the checks of the previous levels",
};
GlobalMutex g_best_block_mutex;
std::condition_variable g_best_block_cv;
uint256 g_best_block;
BlockValidationOptions::BlockValidationOptions(const Config &config)
: excessiveBlockSize(config.GetMaxBlockSize()), checkPoW(true),
checkMerkleRoot(true) {}
const CBlockIndex *
Chainstate::FindForkInGlobalIndex(const CBlockLocator &locator) const {
AssertLockHeld(cs_main);
// Find the latest block common to locator and chain - we expect that
// locator.vHave is sorted descending by height.
for (const BlockHash &hash : locator.vHave) {
const CBlockIndex *pindex{m_blockman.LookupBlockIndex(hash)};
if (pindex) {
if (m_chain.Contains(pindex)) {
return pindex;
}
if (pindex->GetAncestor(m_chain.Height()) == m_chain.Tip()) {
return m_chain.Tip();
}
}
}
return m_chain.Genesis();
}
static uint32_t GetNextBlockScriptFlags(const CBlockIndex *pindex,
const ChainstateManager &chainman);
bool CheckSequenceLocksAtTip(CBlockIndex *tip, const CCoinsView &coins_view,
const CTransaction &tx, LockPoints *lp,
bool useExistingLockPoints) {
assert(tip != nullptr);
CBlockIndex index;
index.pprev = tip;
// CheckSequenceLocksAtTip() uses active_chainstate.m_chain.Height()+1 to
// evaluate height based locks because when SequenceLocks() is called within
// ConnectBlock(), the height of the block *being* evaluated is what is
// used. Thus if we want to know if a transaction can be part of the *next*
// block, we need to use one more than active_chainstate.m_chain.Height()
index.nHeight = tip->nHeight + 1;
std::pair<int, int64_t> lockPair;
if (useExistingLockPoints) {
assert(lp);
lockPair.first = lp->height;
lockPair.second = lp->time;
} else {
std::vector<int> prevheights;
prevheights.resize(tx.vin.size());
for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
const CTxIn &txin = tx.vin[txinIndex];
Coin coin;
if (!coins_view.GetCoin(txin.prevout, coin)) {
return error("%s: Missing input", __func__);
}
if (coin.GetHeight() == MEMPOOL_HEIGHT) {
// Assume all mempool transaction confirm in the next block
prevheights[txinIndex] = tip->nHeight + 1;
} else {
prevheights[txinIndex] = coin.GetHeight();
}
}
lockPair = CalculateSequenceLocks(tx, STANDARD_LOCKTIME_VERIFY_FLAGS,
prevheights, index);
if (lp) {
lp->height = lockPair.first;
lp->time = lockPair.second;
}
}
return EvaluateSequenceLocks(index, lockPair);
}
// Command-line argument "-replayprotectionactivationtime=<timestamp>" will
// cause the node to switch to replay protected SigHash ForkID value when the
// median timestamp of the previous 11 blocks is greater than or equal to
// <timestamp>. Defaults to the pre-defined timestamp when not set.
static bool IsReplayProtectionEnabled(const Consensus::Params &params,
int64_t nMedianTimePast) {
return nMedianTimePast >= gArgs.GetIntArg("-replayprotectionactivationtime",
params.augustoActivationTime);
}
static bool IsReplayProtectionEnabled(const Consensus::Params &params,
const CBlockIndex *pindexPrev) {
if (pindexPrev == nullptr) {
return false;
}
return IsReplayProtectionEnabled(params, pindexPrev->GetMedianTimePast());
}
/**
* Checks to avoid mempool polluting consensus critical paths since cached
* signature and script validity results will be reused if we validate this
* transaction again during block validation.
*/
static bool CheckInputsFromMempoolAndCache(
const CTransaction &tx, TxValidationState &state,
const CCoinsViewCache &view, const CTxMemPool &pool, const uint32_t flags,
PrecomputedTransactionData &txdata, int &nSigChecksOut,
CCoinsViewCache &coins_tip) EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs) {
AssertLockHeld(cs_main);
AssertLockHeld(pool.cs);
assert(!tx.IsCoinBase());
for (const CTxIn &txin : tx.vin) {
const Coin &coin = view.AccessCoin(txin.prevout);
// This coin was checked in PreChecks and MemPoolAccept
// has been holding cs_main since then.
Assume(!coin.IsSpent());
if (coin.IsSpent()) {
return false;
}
// If the Coin is available, there are 2 possibilities:
// it is available in our current ChainstateActive UTXO set,
// or it's a UTXO provided by a transaction in our mempool.
// Ensure the scriptPubKeys in Coins from CoinsView are correct.
const CTransactionRef &txFrom = pool.get(txin.prevout.GetTxId());
if (txFrom) {
assert(txFrom->GetId() == txin.prevout.GetTxId());
assert(txFrom->vout.size() > txin.prevout.GetN());
assert(txFrom->vout[txin.prevout.GetN()] == coin.GetTxOut());
} else {
const Coin &coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout);
assert(!coinFromUTXOSet.IsSpent());
assert(coinFromUTXOSet.GetTxOut() == coin.GetTxOut());
}
}
// Call CheckInputScripts() to cache signature and script validity against
// current tip consensus rules.
return CheckInputScripts(tx, state, view, flags, /*sigCacheStore=*/true,
/*scriptCacheStore=*/true, txdata, nSigChecksOut);
}
namespace {
class MemPoolAccept {
public:
MemPoolAccept(CTxMemPool &mempool, Chainstate &active_chainstate)
: m_pool(mempool), m_view(&m_dummy),
m_viewmempool(&active_chainstate.CoinsTip(), m_pool),
m_active_chainstate(active_chainstate) {}
// We put the arguments we're handed into a struct, so we can pass them
// around easier.
struct ATMPArgs {
const Config &m_config;
const int64_t m_accept_time;
const bool m_bypass_limits;
/*
* Return any outpoints which were not previously present in the coins
* cache, but were added as a result of validating the tx for mempool
* acceptance. This allows the caller to optionally remove the cache
* additions if the associated transaction ends up being rejected by
* the mempool.
*/
std::vector<COutPoint> &m_coins_to_uncache;
const bool m_test_accept;
const unsigned int m_heightOverride;
/**
* When true, the mempool will not be trimmed when individual
* transactions are submitted in Finalize(). Instead, limits should be
* enforced at the end to ensure the package is not partially submitted.
*/
const bool m_package_submission;
/** Parameters for single transaction mempool validation. */
static ATMPArgs SingleAccept(const Config &config, int64_t accept_time,
bool bypass_limits,
std::vector<COutPoint> &coins_to_uncache,
bool test_accept,
unsigned int heightOverride) {
return ATMPArgs{config,
accept_time,
bypass_limits,
coins_to_uncache,
test_accept,
heightOverride,
/*package_submission=*/false};
}
/**
* Parameters for test package mempool validation through
* testmempoolaccept.
*/
static ATMPArgs
PackageTestAccept(const Config &config, int64_t accept_time,
std::vector<COutPoint> &coins_to_uncache) {
return ATMPArgs{config, accept_time,
/*bypass_limits=*/false, coins_to_uncache,
/*test_accept=*/true,
/*height_override=*/0,
// not submitting to mempool
/*package_submission=*/false};
}
/** Parameters for child-with-unconfirmed-parents package validation. */
static ATMPArgs
PackageChildWithParents(const Config &config, int64_t accept_time,
std::vector<COutPoint> &coins_to_uncache) {
return ATMPArgs{config,
accept_time,
/*bypass_limits=*/false,
coins_to_uncache,
/*test_accept=*/false,
/*height_override=*/0,
/*package_submission=*/true};
}
private:
// Private ctor to avoid exposing details to clients and allowing the
// possibility of mixing up the order of the arguments. Use static
// functions above instead.
ATMPArgs(const Config &config, int64_t accept_time, bool bypass_limits,
std::vector<COutPoint> &coins_to_uncache, bool test_accept,
unsigned int height_override, bool package_submission)
: m_config{config}, m_accept_time{accept_time},
m_bypass_limits{bypass_limits},
m_coins_to_uncache{coins_to_uncache}, m_test_accept{test_accept},
m_heightOverride{height_override}, m_package_submission{
package_submission} {}
};
// Single transaction acceptance
MempoolAcceptResult AcceptSingleTransaction(const CTransactionRef &ptx,
ATMPArgs &args)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Multiple transaction acceptance. Transactions may or may not be
* interdependent, but must not conflict with each other, and the
* transactions cannot already be in the mempool. Parents must come
* before children if any dependencies exist.
*/
PackageMempoolAcceptResult
AcceptMultipleTransactions(const std::vector<CTransactionRef> &txns,
ATMPArgs &args)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Package (more specific than just multiple transactions) acceptance.
* Package must be a child with all of its unconfirmed parents, and
* topologically sorted.
*/
PackageMempoolAcceptResult AcceptPackage(const Package &package,
ATMPArgs &args)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
private:
// All the intermediate state that gets passed between the various levels
// of checking a given transaction.
struct Workspace {
Workspace(const CTransactionRef &ptx,
const uint32_t next_block_script_verify_flags)
: m_ptx(ptx),
m_next_block_script_verify_flags(next_block_script_verify_flags) {
}
/**
* Mempool entry constructed for this transaction.
* Constructed in PreChecks() but not inserted into the mempool until
* Finalize().
*/
std::unique_ptr<CTxMemPoolEntry> m_entry;
/**
* Virtual size of the transaction as used by the mempool, calculated
* using serialized size of the transaction and sigchecks.
*/
int64_t m_vsize;
/**
* Fees paid by this transaction: total input amounts subtracted by
* total output amounts.
*/
Amount m_base_fees;
/**
* Base fees + any fee delta set by the user with
* prioritisetransaction.
*/
Amount m_modified_fees;
const CTransactionRef &m_ptx;
TxValidationState m_state;
/**
* A temporary cache containing serialized transaction data for
* signature verification.
* Reused across PreChecks and ConsensusScriptChecks.
*/
PrecomputedTransactionData m_precomputed_txdata;
// ABC specific flags that are used in both PreChecks and
// ConsensusScriptChecks
const uint32_t m_next_block_script_verify_flags;
int m_sig_checks_standard;
};
// Run the policy checks on a given transaction, excluding any script
// checks. Looks up inputs, calculates feerate, considers replacement,
// evaluates package limits, etc. As this function can be invoked for "free"
// by a peer, only tests that are fast should be done here (to avoid CPU
// DoS).
bool PreChecks(ATMPArgs &args, Workspace &ws)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
// Re-run the script checks, using consensus flags, and try to cache the
// result in the scriptcache. This should be done after
// PolicyScriptChecks(). This requires that all inputs either be in our
// utxo set or in the mempool.
bool ConsensusScriptChecks(const ATMPArgs &args, Workspace &ws)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
// Try to add the transaction to the mempool, removing any conflicts first.
// Returns true if the transaction is in the mempool after any size
// limiting is performed, false otherwise.
bool Finalize(const ATMPArgs &args, Workspace &ws)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
// Submit all transactions to the mempool and call ConsensusScriptChecks to
// add to the script cache - should only be called after successful
// validation of all transactions in the package.
// The package may end up partially-submitted after size limiting;
// returns true if all transactions are successfully added to the mempool,
// false otherwise.
bool SubmitPackage(const ATMPArgs &args, std::vector<Workspace> &workspaces,
PackageValidationState &package_state,
std::map<const TxId, const MempoolAcceptResult> &results)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
private:
CTxMemPool &m_pool;
CCoinsViewCache m_view;
CCoinsViewMemPool m_viewmempool;
CCoinsView m_dummy;
Chainstate &m_active_chainstate;
};
bool MemPoolAccept::PreChecks(ATMPArgs &args, Workspace &ws) {
AssertLockHeld(cs_main);
AssertLockHeld(m_pool.cs);
const CTransactionRef &ptx = ws.m_ptx;
const CTransaction &tx = *ws.m_ptx;
const TxId &txid = ws.m_ptx->GetId();
// Copy/alias what we need out of args
const int64_t nAcceptTime = args.m_accept_time;
const bool bypass_limits = args.m_bypass_limits;
std::vector<COutPoint> &coins_to_uncache = args.m_coins_to_uncache;
const unsigned int heightOverride = args.m_heightOverride;
// Alias what we need out of ws
TxValidationState &state = ws.m_state;
// Coinbase is only valid in a block, not as a loose transaction.
if (!CheckRegularTransaction(tx, state)) {
// state filled in by CheckRegularTransaction.
return false;
}
// Rather not work on nonstandard transactions (unless -testnet)
std::string reason;
if (m_pool.m_require_standard &&
!IsStandardTx(tx, m_pool.m_max_datacarrier_bytes,
m_pool.m_permit_bare_multisig,
m_pool.m_dust_relay_feerate, reason)) {
return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
}
// Only accept nLockTime-using transactions that can be mined in the next
// block; we don't want our mempool filled up with transactions that can't
// be mined yet.
TxValidationState ctxState;
if (!ContextualCheckTransactionForCurrentBlock(
m_active_chainstate.m_chain.Tip(),
args.m_config.GetChainParams().GetConsensus(), tx, ctxState)) {
// We copy the state from a dummy to ensure we don't increase the
// ban score of peer for transaction that could be valid in the future.
return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND,
ctxState.GetRejectReason(),
ctxState.GetDebugMessage());
}
// Is it already in the memory pool?
if (m_pool.exists(txid)) {
return state.Invalid(TxValidationResult::TX_DUPLICATE,
"txn-already-in-mempool");
}
// Check for conflicts with in-memory transactions
for (const CTxIn &txin : tx.vin) {
const CTransaction *ptxConflicting = m_pool.GetConflictTx(txin.prevout);
if (ptxConflicting) {
// Disable replacement feature for good
return state.Invalid(TxValidationResult::TX_CONFLICT,
"txn-mempool-conflict");
}
}
LockPoints lp;
m_view.SetBackend(m_viewmempool);
const CCoinsViewCache &coins_cache = m_active_chainstate.CoinsTip();
// Do all inputs exist?
for (const CTxIn &txin : tx.vin) {
if (!coins_cache.HaveCoinInCache(txin.prevout)) {
coins_to_uncache.push_back(txin.prevout);
}
// Note: this call may add txin.prevout to the coins cache
// (coins_cache.cacheCoins) by way of FetchCoin(). It should be
// removed later (via coins_to_uncache) if this tx turns out to be
// invalid.
if (!m_view.HaveCoin(txin.prevout)) {
// Are inputs missing because we already have the tx?
for (size_t out = 0; out < tx.vout.size(); out++) {
// Optimistically just do efficient check of cache for
// outputs.
if (coins_cache.HaveCoinInCache(COutPoint(txid, out))) {
return state.Invalid(TxValidationResult::TX_DUPLICATE,
"txn-already-known");
}
}
// Otherwise assume this might be an orphan tx for which we just
// haven't seen parents yet.
return state.Invalid(TxValidationResult::TX_MISSING_INPUTS,
"bad-txns-inputs-missingorspent");
}
}
// Are the actual inputs available?
if (!m_view.HaveInputs(tx)) {
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
"bad-txns-inputs-spent");
}
// Bring the best block into scope.
m_view.GetBestBlock();
// we have all inputs cached now, so switch back to dummy (to protect
// against bugs where we pull more inputs from disk that miss being
// added to coins_to_uncache)
m_view.SetBackend(m_dummy);
assert(m_active_chainstate.m_blockman.LookupBlockIndex(
m_view.GetBestBlock()) == m_active_chainstate.m_chain.Tip());
// Only accept BIP68 sequence locked transactions that can be mined in
// the next block; we don't want our mempool filled up with transactions
// that can't be mined yet.
// Pass in m_view which has all of the relevant inputs cached. Note that,
// since m_view's backend was removed, it no longer pulls coins from the
// mempool.
if (!CheckSequenceLocksAtTip(m_active_chainstate.m_chain.Tip(), m_view, tx,
&lp)) {
return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND,
"non-BIP68-final");
}
// The mempool holds txs for the next block, so pass height+1 to
// CheckTxInputs
if (!Consensus::CheckTxInputs(tx, state, m_view,
m_active_chainstate.m_chain.Height() + 1,
ws.m_base_fees)) {
// state filled in by CheckTxInputs
return false;
}
// Check for non-standard pay-to-script-hash in inputs
if (m_pool.m_require_standard &&
!AreInputsStandard(tx, m_view, ws.m_next_block_script_verify_flags)) {
return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD,
"bad-txns-nonstandard-inputs");
}
// ws.m_modified_fess includes any fee deltas from PrioritiseTransaction
ws.m_modified_fees = ws.m_base_fees;
m_pool.ApplyDelta(txid, ws.m_modified_fees);
unsigned int nSize = tx.GetTotalSize();
// No transactions are allowed below the min relay feerate except from
// disconnected blocks.
// Do not change this to use virtualsize without coordinating a network
// policy upgrade.
if (!bypass_limits &&
ws.m_modified_fees < m_pool.m_min_relay_feerate.GetFee(nSize)) {
return state.Invalid(
TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met",
strprintf("%d < %d", ws.m_modified_fees,
m_pool.m_min_relay_feerate.GetFee(nSize)));
}
// Validate input scripts against standard script flags.
const uint32_t scriptVerifyFlags =
ws.m_next_block_script_verify_flags | STANDARD_SCRIPT_VERIFY_FLAGS;
ws.m_precomputed_txdata = PrecomputedTransactionData{tx};
if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false,
ws.m_precomputed_txdata, ws.m_sig_checks_standard)) {
// State filled in by CheckInputScripts
return false;
}
ws.m_entry = std::make_unique<CTxMemPoolEntry>(
ptx, ws.m_base_fees, nAcceptTime,
heightOverride ? heightOverride : m_active_chainstate.m_chain.Height(),
ws.m_sig_checks_standard, lp);
ws.m_vsize = ws.m_entry->GetTxVirtualSize();
Amount mempoolRejectFee = m_pool.GetMinFee().GetFee(ws.m_vsize);
if (!bypass_limits && mempoolRejectFee > Amount::zero() &&
ws.m_modified_fees < mempoolRejectFee) {
return state.Invalid(
TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met",
strprintf("%d < %d", ws.m_modified_fees, mempoolRejectFee));
}
return true;
}
bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs &args, Workspace &ws) {
AssertLockHeld(cs_main);
AssertLockHeld(m_pool.cs);
const CTransaction &tx = *ws.m_ptx;
const TxId &txid = tx.GetId();
TxValidationState &state = ws.m_state;
// Check again against the next block's script verification flags
// to cache our script execution flags.
//
// This is also useful in case of bugs in the standard flags that cause
// transactions to pass as valid when they're actually invalid. For
// instance the STRICTENC flag was incorrectly allowing certain CHECKSIG
// NOT scripts to pass, even though they were invalid.
//
// There is a similar check in CreateNewBlock() to prevent creating
// invalid blocks (using TestBlockValidity), however allowing such
// transactions into the mempool can be exploited as a DoS attack.
int nSigChecksConsensus;
if (!CheckInputsFromMempoolAndCache(
tx, state, m_view, m_pool, ws.m_next_block_script_verify_flags,
ws.m_precomputed_txdata, nSigChecksConsensus,
m_active_chainstate.CoinsTip())) {
// This can occur under some circumstances, if the node receives an
// unrequested tx which is invalid due to new consensus rules not
// being activated yet (during IBD).
LogPrintf("BUG! PLEASE REPORT THIS! CheckInputScripts failed against "
"latest-block but not STANDARD flags %s, %s\n",
txid.ToString(), state.ToString());
return Assume(false);
}
if (ws.m_sig_checks_standard != nSigChecksConsensus) {
// We can't accept this transaction as we've used the standard count
// for the mempool/mining, but the consensus count will be enforced
// in validation (we don't want to produce bad block templates).
return error(
"%s: BUG! PLEASE REPORT THIS! SigChecks count differed between "
"standard and consensus flags in %s",
__func__, txid.ToString());
}
return true;
}
bool MemPoolAccept::Finalize(const ATMPArgs &args, Workspace &ws) {
AssertLockHeld(cs_main);
AssertLockHeld(m_pool.cs);
const TxId &txid = ws.m_ptx->GetId();
TxValidationState &state = ws.m_state;
const bool bypass_limits = args.m_bypass_limits;
// Store transaction in memory
CTxMemPoolEntry *pentry = ws.m_entry.release();
auto entry = CTxMemPoolEntryRef::acquire(pentry);
m_pool.addUnchecked(entry);
// Trim mempool and check if tx was trimmed.
// If we are validating a package, don't trim here because we could evict a
// previous transaction in the package. LimitMempoolSize() should be called
// at the very end to make sure the mempool is still within limits and
// package submission happens atomically.
if (!args.m_package_submission && !bypass_limits) {
m_pool.LimitSize(m_active_chainstate.CoinsTip());
if (!m_pool.exists(txid)) {
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
"mempool full");
}
}
return true;
}
// Get the coins spent by ptx from the coins_view. Assumes coins are present.
static std::vector<Coin> getSpentCoins(const CTransactionRef &ptx,
const CCoinsViewCache &coins_view) {
std::vector<Coin> spent_coins;
spent_coins.reserve(ptx->vin.size());
for (const CTxIn &input : ptx->vin) {
Coin coin;
const bool coinFound = coins_view.GetCoin(input.prevout, coin);
Assume(coinFound);
spent_coins.push_back(std::move(coin));
}
return spent_coins;
}
bool MemPoolAccept::SubmitPackage(
const ATMPArgs &args, std::vector<Workspace> &workspaces,
PackageValidationState &package_state,
std::map<const TxId, const MempoolAcceptResult> &results) {
AssertLockHeld(cs_main);
AssertLockHeld(m_pool.cs);
// Sanity check: none of the transactions should be in the mempool.
assert(std::all_of(
workspaces.cbegin(), workspaces.cend(),
[this](const auto &ws) { return !m_pool.exists(ws.m_ptx->GetId()); }));
bool all_submitted = true;
// ConsensusScriptChecks adds to the script cache and is therefore
// consensus-critical; CheckInputsFromMempoolAndCache asserts that
// transactions only spend coins available from the mempool or UTXO set.
// Submit each transaction to the mempool immediately after calling
// ConsensusScriptChecks to make the outputs available for subsequent
// transactions.
for (Workspace &ws : workspaces) {
if (!ConsensusScriptChecks(args, ws)) {
results.emplace(ws.m_ptx->GetId(),
MempoolAcceptResult::Failure(ws.m_state));
// Since PreChecks() passed, this should never fail.
all_submitted = Assume(false);
}
// If we call LimitMempoolSize() for each individual Finalize(), the
// mempool will not take the transaction's descendant feerate into
// account because it hasn't seen them yet. Also, we risk evicting a
// transaction that a subsequent package transaction depends on.
// Instead, allow the mempool to temporarily bypass limits, the maximum
// package size) while submitting transactions individually and then
// trim at the very end.
if (!Finalize(args, ws)) {
results.emplace(ws.m_ptx->GetId(),
MempoolAcceptResult::Failure(ws.m_state));
// Since LimitMempoolSize() won't be called, this should never fail.
all_submitted = Assume(false);
}
}
// It may or may not be the case that all the transactions made it into the
// mempool. Regardless, make sure we haven't exceeded max mempool size.
m_pool.LimitSize(m_active_chainstate.CoinsTip());
if (!all_submitted) {
return false;
}
// Find the txids of the transactions that made it into the mempool. Allow
// partial submission, but don't report success unless they all made it into
// the mempool.
for (Workspace &ws : workspaces) {
if (m_pool.exists(ws.m_ptx->GetId())) {
results.emplace(ws.m_ptx->GetId(), MempoolAcceptResult::Success(
ws.m_vsize, ws.m_base_fees));
GetMainSignals().TransactionAddedToMempool(
ws.m_ptx,
std::make_shared<const std::vector<Coin>>(
getSpentCoins(ws.m_ptx, m_view)),
m_pool.GetAndIncrementSequence());
} else {
all_submitted = false;
ws.m_state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
"mempool full");
results.emplace(ws.m_ptx->GetId(),
MempoolAcceptResult::Failure(ws.m_state));
}
}
return all_submitted;
}
MempoolAcceptResult
MemPoolAccept::AcceptSingleTransaction(const CTransactionRef &ptx,
ATMPArgs &args) {
AssertLockHeld(cs_main);
// mempool "read lock" (held through
// GetMainSignals().TransactionAddedToMempool())
LOCK(m_pool.cs);
const CBlockIndex *tip = m_active_chainstate.m_chain.Tip();
Workspace ws(ptx,
GetNextBlockScriptFlags(tip, m_active_chainstate.m_chainman));
// Perform the inexpensive checks first and avoid hashing and signature
// verification unless those checks pass, to mitigate CPU exhaustion
// denial-of-service attacks.
if (!PreChecks(args, ws)) {
return MempoolAcceptResult::Failure(ws.m_state);
}
if (!ConsensusScriptChecks(args, ws)) {
return MempoolAcceptResult::Failure(ws.m_state);
}
const TxId txid = ptx->GetId();
// Mempool sanity check -- in our new mempool no tx can be added if its
// outputs are already spent in the mempool (that is, no children before
// parents allowed; the mempool must be consistent at all times).
//
// This means that on reorg, the disconnectpool *must* always import
// the existing mempool tx's, clear the mempool, and then re-add
// remaining tx's in topological order via this function. Our new mempool
// has fast adds, so this is ok.
if (auto it = m_pool.mapNextTx.lower_bound(COutPoint{txid, 0});
it != m_pool.mapNextTx.end() && it->first->GetTxId() == txid) {
LogPrintf("%s: BUG! PLEASE REPORT THIS! Attempt to add txid %s, but "
"its outputs are already spent in the "
"mempool\n",
__func__, txid.ToString());
ws.m_state.Invalid(TxValidationResult::TX_CHILD_BEFORE_PARENT,
"txn-child-before-parent");
return MempoolAcceptResult::Failure(ws.m_state);
}
// Tx was accepted, but not added
if (args.m_test_accept) {
return MempoolAcceptResult::Success(ws.m_vsize, ws.m_base_fees);
}
if (!Finalize(args, ws)) {
return MempoolAcceptResult::Failure(ws.m_state);
}
GetMainSignals().TransactionAddedToMempool(
ptx,
std::make_shared<const std::vector<Coin>>(getSpentCoins(ptx, m_view)),
m_pool.GetAndIncrementSequence());
return MempoolAcceptResult::Success(ws.m_vsize, ws.m_base_fees);
}
PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(
const std::vector<CTransactionRef> &txns, ATMPArgs &args) {
AssertLockHeld(cs_main);
// These context-free package limits can be done before taking the mempool
// lock.
PackageValidationState package_state;
if (!CheckPackage(txns, package_state)) {
return PackageMempoolAcceptResult(package_state, {});
}
std::vector<Workspace> workspaces{};
workspaces.reserve(txns.size());
std::transform(
txns.cbegin(), txns.cend(), std::back_inserter(workspaces),
[this](const auto &tx) {
return Workspace(
tx, GetNextBlockScriptFlags(m_active_chainstate.m_chain.Tip(),
m_active_chainstate.m_chainman));
});
std::map<const TxId, const MempoolAcceptResult> results;
LOCK(m_pool.cs);
// Do all PreChecks first and fail fast to avoid running expensive script
// checks when unnecessary.
for (Workspace &ws : workspaces) {
if (!PreChecks(args, ws)) {
package_state.Invalid(PackageValidationResult::PCKG_TX,
"transaction failed");
// Exit early to avoid doing pointless work. Update the failed tx
// result; the rest are unfinished.
results.emplace(ws.m_ptx->GetId(),
MempoolAcceptResult::Failure(ws.m_state));
return PackageMempoolAcceptResult(package_state,
std::move(results));
}
// Make the coins created by this transaction available for subsequent
// transactions in the package to spend.
m_viewmempool.PackageAddTransaction(ws.m_ptx);
if (args.m_test_accept) {
// When test_accept=true, transactions that pass PreChecks
// are valid because there are no further mempool checks (passing
// PreChecks implies passing ConsensusScriptChecks).
results.emplace(ws.m_ptx->GetId(), MempoolAcceptResult::Success(
ws.m_vsize, ws.m_base_fees));
}
}
if (args.m_test_accept) {
return PackageMempoolAcceptResult(package_state, std::move(results));
}
if (!SubmitPackage(args, workspaces, package_state, results)) {
package_state.Invalid(PackageValidationResult::PCKG_TX,
"submission failed");
return PackageMempoolAcceptResult(package_state, std::move(results));
}
return PackageMempoolAcceptResult(package_state, std::move(results));
}
PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package &package,
ATMPArgs &args) {
AssertLockHeld(cs_main);
PackageValidationState package_state;
// Check that the package is well-formed. If it isn't, we won't try to
// validate any of the transactions and thus won't return any
// MempoolAcceptResults, just a package-wide error.
// Context-free package checks.
if (!CheckPackage(package, package_state)) {
return PackageMempoolAcceptResult(package_state, {});
}
// All transactions in the package must be a parent of the last transaction.
// This is just an opportunity for us to fail fast on a context-free check
// without taking the mempool lock.
if (!IsChildWithParents(package)) {
package_state.Invalid(PackageValidationResult::PCKG_POLICY,
"package-not-child-with-parents");
return PackageMempoolAcceptResult(package_state, {});
}
// IsChildWithParents() guarantees the package is > 1 transactions.
assert(package.size() > 1);
// The package must be 1 child with all of its unconfirmed parents. The
// package is expected to be sorted, so the last transaction is the child.
const auto &child = package.back();
std::unordered_set<TxId, SaltedTxIdHasher> unconfirmed_parent_txids;
std::transform(
package.cbegin(), package.cend() - 1,
std::inserter(unconfirmed_parent_txids, unconfirmed_parent_txids.end()),
[](const auto &tx) { return tx->GetId(); });
// All child inputs must refer to a preceding package transaction or a
// confirmed UTXO. The only way to verify this is to look up the child's
// inputs in our current coins view (not including mempool), and enforce
// that all parents not present in the package be available at chain tip.
// Since this check can bring new coins into the coins cache, keep track of
// these coins and uncache them if we don't end up submitting this package
// to the mempool.
const CCoinsViewCache &coins_tip_cache = m_active_chainstate.CoinsTip();
for (const auto &input : child->vin) {
if (!coins_tip_cache.HaveCoinInCache(input.prevout)) {
args.m_coins_to_uncache.push_back(input.prevout);
}
}
// Using the MemPoolAccept m_view cache allows us to look up these same
// coins faster later. This should be connecting directly to CoinsTip, not
// to m_viewmempool, because we specifically require inputs to be confirmed
// if they aren't in the package.
m_view.SetBackend(m_active_chainstate.CoinsTip());
const auto package_or_confirmed = [this, &unconfirmed_parent_txids](
const auto &input) {
return unconfirmed_parent_txids.count(input.prevout.GetTxId()) > 0 ||
m_view.HaveCoin(input.prevout);
};
if (!std::all_of(child->vin.cbegin(), child->vin.cend(),
package_or_confirmed)) {
package_state.Invalid(PackageValidationResult::PCKG_POLICY,
"package-not-child-with-unconfirmed-parents");
return PackageMempoolAcceptResult(package_state, {});
}
// Protect against bugs where we pull more inputs from disk that miss being
// added to coins_to_uncache. The backend will be connected again when
// needed in PreChecks.
m_view.SetBackend(m_dummy);
LOCK(m_pool.cs);
std::map<const TxId, const MempoolAcceptResult> results;
// Node operators are free to set their mempool policies however they
// please, nodes may receive transactions in different orders, and malicious
// counterparties may try to take advantage of policy differences to pin or
// delay propagation of transactions. As such, it's possible for some
// package transaction(s) to already be in the mempool, and we don't want to
// reject the entire package in that case (as that could be a censorship
// vector). De-duplicate the transactions that are already in the mempool,
// and only call AcceptMultipleTransactions() with the new transactions.
// This ensures we don't double-count transaction counts and sizes when
// checking ancestor/descendant limits, or double-count transaction fees for
// fee-related policy.
std::vector<CTransactionRef> txns_new;
for (const auto &tx : package) {
const auto &txid = tx->GetId();
// An already confirmed tx is treated as one not in mempool, because all
// we know is that the inputs aren't available.
if (m_pool.exists(txid)) {
// Exact transaction already exists in the mempool.
auto iter = m_pool.GetIter(txid);
assert(iter != std::nullopt);
results.emplace(txid, MempoolAcceptResult::MempoolTx(
(*iter.value())->GetTxSize(),
(*iter.value())->GetFee()));
} else {
// Transaction does not already exist in the mempool.
txns_new.push_back(tx);
}
}
// Nothing to do if the entire package has already been submitted.
if (txns_new.empty()) {
return PackageMempoolAcceptResult(package_state, std::move(results));
}
// Validate the (deduplicated) transactions as a package.
auto submission_result = AcceptMultipleTransactions(txns_new, args);
// Include already-in-mempool transaction results in the final result.
for (const auto &[txid, mempoolaccept_res] : results) {
submission_result.m_tx_results.emplace(txid, mempoolaccept_res);
}
return submission_result;
}
} // namespace
MempoolAcceptResult AcceptToMemoryPool(Chainstate &active_chainstate,
const CTransactionRef &tx,
int64_t accept_time, bool bypass_limits,
bool test_accept,
unsigned int heightOverride) {
AssertLockHeld(::cs_main);
assert(active_chainstate.GetMempool() != nullptr);
CTxMemPool &pool{*active_chainstate.GetMempool()};
std::vector<COutPoint> coins_to_uncache;
auto args = MemPoolAccept::ATMPArgs::SingleAccept(
active_chainstate.m_chainman.GetConfig(), accept_time, bypass_limits,
coins_to_uncache, test_accept, heightOverride);
const MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate)
.AcceptSingleTransaction(tx, args);
if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) {
// Remove coins that were not present in the coins cache before calling
// ATMPW; this is to prevent memory DoS in case we receive a large
// number of invalid transactions that attempt to overrun the in-memory
// coins cache
// (`CCoinsViewCache::cacheCoins`).
for (const COutPoint &outpoint : coins_to_uncache) {
active_chainstate.CoinsTip().Uncache(outpoint);
}
}
// After we've (potentially) uncached entries, ensure our coins cache is
// still within its size limits
BlockValidationState stateDummy;
active_chainstate.FlushStateToDisk(stateDummy, FlushStateMode::PERIODIC);
return result;
}
PackageMempoolAcceptResult ProcessNewPackage(Chainstate &active_chainstate,
CTxMemPool &pool,
const Package &package,
bool test_accept) {
AssertLockHeld(cs_main);
assert(!package.empty());
assert(std::all_of(package.cbegin(), package.cend(),
[](const auto &tx) { return tx != nullptr; }));
const Config &config = active_chainstate.m_chainman.GetConfig();
std::vector<COutPoint> coins_to_uncache;
const auto result = [&]() EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
AssertLockHeld(cs_main);
if (test_accept) {
auto args = MemPoolAccept::ATMPArgs::PackageTestAccept(
config, GetTime(), coins_to_uncache);
return MemPoolAccept(pool, active_chainstate)
.AcceptMultipleTransactions(package, args);
} else {
auto args = MemPoolAccept::ATMPArgs::PackageChildWithParents(
config, GetTime(), coins_to_uncache);
return MemPoolAccept(pool, active_chainstate)
.AcceptPackage(package, args);
}
}();
// Uncache coins pertaining to transactions that were not submitted to the
// mempool.
if (test_accept || result.m_state.IsInvalid()) {
for (const COutPoint &hashTx : coins_to_uncache) {
active_chainstate.CoinsTip().Uncache(hashTx);
}
}
// Ensure the coins cache is still within limits.
BlockValidationState state_dummy;
active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC);
return result;
}
Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams) {
int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
// Force block reward to zero when right shift is undefined.
if (halvings >= 64) {
return Amount::zero();
}
Amount nSubsidy = 50 * COIN;
// Subsidy is cut in half every 210,000 blocks which will occur
// approximately every 4 years.
return ((nSubsidy / SATOSHI) >> halvings) * SATOSHI;
}
CoinsViews::CoinsViews(std::string ldb_name, size_t cache_size_bytes,
bool in_memory, bool should_wipe)
: m_dbview(gArgs.GetDataDirNet() / ldb_name, cache_size_bytes, in_memory,
should_wipe),
m_catcherview(&m_dbview) {}
void CoinsViews::InitCache() {
AssertLockHeld(::cs_main);
m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
}
Chainstate::Chainstate(CTxMemPool *mempool, BlockManager &blockman,
ChainstateManager &chainman,
std::optional<BlockHash> from_snapshot_blockhash)
: m_mempool(mempool), m_blockman(blockman), m_chainman(chainman),
m_from_snapshot_blockhash(from_snapshot_blockhash) {}
void Chainstate::InitCoinsDB(size_t cache_size_bytes, bool in_memory,
bool should_wipe, std::string leveldb_name) {
if (m_from_snapshot_blockhash) {
leveldb_name += node::SNAPSHOT_CHAINSTATE_SUFFIX;
}
m_coins_views = std::make_unique<CoinsViews>(leveldb_name, cache_size_bytes,
in_memory, should_wipe);
}
void Chainstate::InitCoinsCache(size_t cache_size_bytes) {
AssertLockHeld(::cs_main);
assert(m_coins_views != nullptr);
m_coinstip_cache_size_bytes = cache_size_bytes;
m_coins_views->InitCache();
}
// Note that though this is marked const, we may end up modifying
// `m_cached_finished_ibd`, which is a performance-related implementation
// detail. This function must be marked `const` so that `CValidationInterface`
// clients (which are given a `const Chainstate*`) can call it.
//
bool Chainstate::IsInitialBlockDownload() const {
// Optimization: pre-test latch before taking the lock.
if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
return false;
}
LOCK(cs_main);
if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
return false;
}
if (m_chainman.m_blockman.LoadingBlocks()) {
return true;
}
if (m_chain.Tip() == nullptr) {
return true;
}
if (m_chain.Tip()->nChainWork < m_chainman.MinimumChainWork()) {
return true;
}
if (m_chain.Tip()->Time() <
Now<NodeSeconds>() - m_chainman.m_options.max_tip_age) {
return true;
}
LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
m_cached_finished_ibd.store(true, std::memory_order_relaxed);
return false;
}
static void AlertNotify(const std::string &strMessage) {
uiInterface.NotifyAlertChanged();
#if defined(HAVE_SYSTEM)
std::string strCmd = gArgs.GetArg("-alertnotify", "");
if (strCmd.empty()) {
return;
}
// Alert text should be plain ascii coming from a trusted source, but to be
// safe we first strip anything not in safeChars, then add single quotes
// around the whole string before passing it to the shell:
std::string singleQuote("'");
std::string safeStatus = SanitizeString(strMessage);
safeStatus = singleQuote + safeStatus + singleQuote;
ReplaceAll(strCmd, "%s", safeStatus);
std::thread t(runCommand, strCmd);
// thread runs free
t.detach();
#endif
}
void Chainstate::CheckForkWarningConditions() {
AssertLockHeld(cs_main);
// Before we get past initial download, we cannot reliably alert about forks
// (we assume we don't get stuck on a fork before finishing our initial
// sync)
if (IsInitialBlockDownload()) {
return;
}
// If our best fork is no longer within 72 blocks (+/- 12 hours if no one
// mines it) of our head, or if it is back on the active chain, drop it
if (m_best_fork_tip && (m_chain.Height() - m_best_fork_tip->nHeight >= 72 ||
m_chain.Contains(m_best_fork_tip))) {
m_best_fork_tip = nullptr;
}
if (m_best_fork_tip ||
(m_chainman.m_best_invalid &&
m_chainman.m_best_invalid->nChainWork >
m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6))) {
if (!GetfLargeWorkForkFound() && m_best_fork_base) {
std::string warning =
std::string("'Warning: Large-work fork detected, forking after "
"block ") +
m_best_fork_base->phashBlock->ToString() + std::string("'");
AlertNotify(warning);
}
if (m_best_fork_tip && m_best_fork_base) {
LogPrintf("%s: Warning: Large fork found\n forking the "
"chain at height %d (%s)\n lasting to height %d "
"(%s).\nChain state database corruption likely.\n",
__func__, m_best_fork_base->nHeight,
m_best_fork_base->phashBlock->ToString(),
m_best_fork_tip->nHeight,
m_best_fork_tip->phashBlock->ToString());
SetfLargeWorkForkFound(true);
} else {
LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks "
"longer than our best chain.\nChain state database "
"corruption likely.\n",
__func__);
SetfLargeWorkInvalidChainFound(true);
}
} else {
SetfLargeWorkForkFound(false);
SetfLargeWorkInvalidChainFound(false);
}
}
void Chainstate::CheckForkWarningConditionsOnNewFork(
CBlockIndex *pindexNewForkTip) {
AssertLockHeld(cs_main);
// If we are on a fork that is sufficiently large, set a warning flag.
const CBlockIndex *pfork = m_chain.FindFork(pindexNewForkTip);
// We define a condition where we should warn the user about as a fork of at
// least 7 blocks with a tip within 72 blocks (+/- 12 hours if no one mines
// it) of ours. We use 7 blocks rather arbitrarily as it represents just
// under 10% of sustained network hash rate operating on the fork, or a
// chain that is entirely longer than ours and invalid (note that this
// should be detected by both). We define it this way because it allows us
// to only store the highest fork tip (+ base) which meets the 7-block
// condition and from this always have the most-likely-to-cause-warning fork
if (pfork &&
(!m_best_fork_tip ||
pindexNewForkTip->nHeight > m_best_fork_tip->nHeight) &&
pindexNewForkTip->nChainWork - pfork->nChainWork >
(GetBlockProof(*pfork) * 7) &&
m_chain.Height() - pindexNewForkTip->nHeight < 72) {
m_best_fork_tip = pindexNewForkTip;
m_best_fork_base = pfork;
}
CheckForkWarningConditions();
}
// Called both upon regular invalid block discovery *and* InvalidateBlock
void Chainstate::InvalidChainFound(CBlockIndex *pindexNew) {
AssertLockHeld(cs_main);
if (!m_chainman.m_best_invalid ||
pindexNew->nChainWork > m_chainman.m_best_invalid->nChainWork) {
m_chainman.m_best_invalid = pindexNew;
}
if (m_chainman.m_best_header != nullptr &&
m_chainman.m_best_header->GetAncestor(pindexNew->nHeight) ==
pindexNew) {
m_chainman.m_best_header = m_chain.Tip();
}
// If the invalid chain found is supposed to be finalized, we need to move
// back the finalization point.
if (IsBlockAvalancheFinalized(pindexNew)) {
LOCK(cs_avalancheFinalizedBlockIndex);
m_avalancheFinalizedBlockIndex = pindexNew->pprev;
}
LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n",
__func__, pindexNew->GetBlockHash().ToString(),
pindexNew->nHeight,
log(pindexNew->nChainWork.getdouble()) / log(2.0),
FormatISO8601DateTime(pindexNew->GetBlockTime()));
CBlockIndex *tip = m_chain.Tip();
assert(tip);
LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n",
__func__, tip->GetBlockHash().ToString(), m_chain.Height(),
log(tip->nChainWork.getdouble()) / log(2.0),
FormatISO8601DateTime(tip->GetBlockTime()));
}
// Same as InvalidChainFound, above, except not called directly from
// InvalidateBlock, which does its own setBlockIndexCandidates management.
void Chainstate::InvalidBlockFound(CBlockIndex *pindex,
const BlockValidationState &state) {
AssertLockHeld(cs_main);
if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
pindex->nStatus = pindex->nStatus.withFailed();
m_chainman.m_failed_blocks.insert(pindex);
m_blockman.m_dirty_blockindex.insert(pindex);
InvalidChainFound(pindex);
}
}
void SpendCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
int nHeight) {
// Mark inputs spent.
if (tx.IsCoinBase()) {
return;
}
txundo.vprevout.reserve(tx.vin.size());
for (const CTxIn &txin : tx.vin) {
txundo.vprevout.emplace_back();
bool is_spent = view.SpendCoin(txin.prevout, &txundo.vprevout.back());
assert(is_spent);
}
}
void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
int nHeight) {
SpendCoins(view, tx, txundo, nHeight);
AddCoins(view, tx, nHeight);
}
bool CScriptCheck::operator()() {
const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
if (!VerifyScript(scriptSig, m_tx_out.scriptPubKey, nFlags,
CachingTransactionSignatureChecker(
ptxTo, nIn, m_tx_out.nValue, cacheStore, txdata),
metrics, &error)) {
return false;
}
if ((pTxLimitSigChecks &&
!pTxLimitSigChecks->consume_and_check(metrics.nSigChecks)) ||
(pBlockLimitSigChecks &&
!pBlockLimitSigChecks->consume_and_check(metrics.nSigChecks))) {
// we can't assign a meaningful script error (since the script
// succeeded), but remove the ScriptError::OK which could be
// misinterpreted.
error = ScriptError::SIGCHECKS_LIMIT_EXCEEDED;
return false;
}
return true;
}
bool CheckInputScripts(const CTransaction &tx, TxValidationState &state,
const CCoinsViewCache &inputs, const uint32_t flags,
bool sigCacheStore, bool scriptCacheStore,
const PrecomputedTransactionData &txdata,
int &nSigChecksOut, TxSigCheckLimiter &txLimitSigChecks,
CheckInputsLimiter *pBlockLimitSigChecks,
std::vector<CScriptCheck> *pvChecks) {
AssertLockHeld(cs_main);
assert(!tx.IsCoinBase());
if (pvChecks) {
pvChecks->reserve(tx.vin.size());
}
// First check if script executions have been cached with the same flags.
// Note that this assumes that the inputs provided are correct (ie that the
// transaction hash which is in tx's prevouts properly commits to the
// scriptPubKey in the inputs view of that transaction).
ScriptCacheKey hashCacheEntry(tx, flags);
if (IsKeyInScriptCache(hashCacheEntry, !scriptCacheStore, nSigChecksOut)) {
if (!txLimitSigChecks.consume_and_check(nSigChecksOut) ||
(pBlockLimitSigChecks &&
!pBlockLimitSigChecks->consume_and_check(nSigChecksOut))) {
return state.Invalid(TxValidationResult::TX_CONSENSUS,
"too-many-sigchecks");
}
return true;
}
int nSigChecksTotal = 0;
for (size_t i = 0; i < tx.vin.size(); i++) {
const COutPoint &prevout = tx.vin[i].prevout;
const Coin &coin = inputs.AccessCoin(prevout);
assert(!coin.IsSpent());
// We very carefully only pass in things to CScriptCheck which are
// clearly committed to by tx's hash. This provides a sanity
// check that our caching is not introducing consensus failures through
// additional data in, eg, the coins being spent being checked as a part
// of CScriptCheck.
// Verify signature
CScriptCheck check(coin.GetTxOut(), tx, i, flags, sigCacheStore, txdata,
&txLimitSigChecks, pBlockLimitSigChecks);
// If pvChecks is not null, defer the check execution to the caller.
if (pvChecks) {
pvChecks->push_back(std::move(check));
continue;
}
if (!check()) {
ScriptError scriptError = check.GetScriptError();
// Compute flags without the optional standardness flags.
// This differs from MANDATORY_SCRIPT_VERIFY_FLAGS as it contains
// additional upgrade flags (see AcceptToMemoryPoolWorker variable
// extraFlags).
uint32_t mandatoryFlags =
flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS;
if (flags != mandatoryFlags) {
// Check whether the failure was caused by a non-mandatory
// script verification check. If so, ensure we return
// NOT_STANDARD instead of CONSENSUS to avoid downstream users
// splitting the network between upgraded and non-upgraded nodes
// by banning CONSENSUS-failing data providers.
CScriptCheck check2(coin.GetTxOut(), tx, i, mandatoryFlags,
sigCacheStore, txdata);
if (check2()) {
return state.Invalid(
TxValidationResult::TX_NOT_STANDARD,
strprintf("non-mandatory-script-verify-flag (%s)",
ScriptErrorString(scriptError)));
}
// update the error message to reflect the mandatory violation.
scriptError = check2.GetScriptError();
}
// MANDATORY flag failures correspond to
// TxValidationResult::TX_CONSENSUS. Because CONSENSUS failures are
// the most serious case of validation failures, we may need to
// consider using RECENT_CONSENSUS_CHANGE for any script failure
// that could be due to non-upgraded nodes which we may want to
// support, to avoid splitting the network (but this depends on the
// details of how net_processing handles such errors).
return state.Invalid(
TxValidationResult::TX_CONSENSUS,
strprintf("mandatory-script-verify-flag-failed (%s)",
ScriptErrorString(scriptError)));
}
nSigChecksTotal += check.GetScriptExecutionMetrics().nSigChecks;
}
nSigChecksOut = nSigChecksTotal;
if (scriptCacheStore && !pvChecks) {
// We executed all of the provided scripts, and were told to cache the
// result. Do so now.
AddKeyInScriptCache(hashCacheEntry, nSigChecksTotal);
}
return true;
}
bool AbortNode(BlockValidationState &state, const std::string &strMessage,
const bilingual_str &userMessage) {
AbortNode(strMessage, userMessage);
return state.Error(strMessage);
}
/** Restore the UTXO in a Coin at a given COutPoint. */
DisconnectResult UndoCoinSpend(Coin &&undo, CCoinsViewCache &view,
const COutPoint &out) {
bool fClean = true;
if (view.HaveCoin(out)) {
// Overwriting transaction output.
fClean = false;
}
if (undo.GetHeight() == 0) {
// Missing undo metadata (height and coinbase). Older versions included
// this information only in undo records for the last spend of a
// transactions' outputs. This implies that it must be present for some
// other output of the same tx.
const Coin &alternate = AccessByTxid(view, out.GetTxId());
if (alternate.IsSpent()) {
// Adding output for transaction without known metadata
return DisconnectResult::FAILED;
}
// This is somewhat ugly, but hopefully utility is limited. This is only
// useful when working from legacy on disck data. In any case, putting
// the correct information in there doesn't hurt.
const_cast<Coin &>(undo) = Coin(undo.GetTxOut(), alternate.GetHeight(),
alternate.IsCoinBase());
}
// If the coin already exists as an unspent coin in the cache, then the
// possible_overwrite parameter to AddCoin must be set to true. We have
// already checked whether an unspent coin exists above using HaveCoin, so
// we don't need to guess. When fClean is false, an unspent coin already
// existed and it is an overwrite.
view.AddCoin(out, std::move(undo), !fClean);
return fClean ? DisconnectResult::OK : DisconnectResult::UNCLEAN;
}
/**
* Undo the effects of this block (with given index) on the UTXO set represented
* by coins. When FAILED is returned, view is left in an indeterminate state.
*/
DisconnectResult Chainstate::DisconnectBlock(const CBlock &block,
const CBlockIndex *pindex,
CCoinsViewCache &view) {
AssertLockHeld(::cs_main);
CBlockUndo blockUndo;
if (!UndoReadFromDisk(blockUndo, pindex)) {
error("DisconnectBlock(): failure reading undo data");
return DisconnectResult::FAILED;
}
return ApplyBlockUndo(std::move(blockUndo), block, pindex, view);
}
DisconnectResult ApplyBlockUndo(CBlockUndo &&blockUndo, const CBlock &block,
const CBlockIndex *pindex,
CCoinsViewCache &view) {
bool fClean = true;
if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
error("DisconnectBlock(): block and undo data inconsistent");
return DisconnectResult::FAILED;
}
// First, restore inputs.
for (size_t i = 1; i < block.vtx.size(); i++) {
const CTransaction &tx = *(block.vtx[i]);
CTxUndo &txundo = blockUndo.vtxundo[i - 1];
if (txundo.vprevout.size() != tx.vin.size()) {
error("DisconnectBlock(): transaction and undo data inconsistent");
return DisconnectResult::FAILED;
}
for (size_t j = 0; j < tx.vin.size(); j++) {
const COutPoint &out = tx.vin[j].prevout;
DisconnectResult res =
UndoCoinSpend(std::move(txundo.vprevout[j]), view, out);
if (res == DisconnectResult::FAILED) {
return DisconnectResult::FAILED;
}
fClean = fClean && res != DisconnectResult::UNCLEAN;
}
// At this point, all of txundo.vprevout should have been moved out.
}
// Second, revert created outputs.
for (const auto &ptx : block.vtx) {
const CTransaction &tx = *ptx;
const TxId &txid = tx.GetId();
const bool is_coinbase = tx.IsCoinBase();
// Check that all outputs are available and match the outputs in the
// block itself exactly.
for (size_t o = 0; o < tx.vout.size(); o++) {
if (tx.vout[o].scriptPubKey.IsUnspendable()) {
continue;
}
COutPoint out(txid, o);
Coin coin;
bool is_spent = view.SpendCoin(out, &coin);
if (!is_spent || tx.vout[o] != coin.GetTxOut() ||
uint32_t(pindex->nHeight) != coin.GetHeight() ||
is_coinbase != coin.IsCoinBase()) {
// transaction output mismatch
fClean = false;
}
}
}
// Move best block pointer to previous block.
view.SetBestBlock(block.hashPrevBlock);
return fClean ? DisconnectResult::OK : DisconnectResult::UNCLEAN;
}
static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
void StartScriptCheckWorkerThreads(int threads_num) {
scriptcheckqueue.StartWorkerThreads(threads_num);
}
void StopScriptCheckWorkerThreads() {
scriptcheckqueue.StopWorkerThreads();
}
// Returns the script flags which should be checked for the block after
// the given block.
static uint32_t GetNextBlockScriptFlags(const CBlockIndex *pindex,
const ChainstateManager &chainman) {
const Consensus::Params &consensusparams = chainman.GetConsensus();
uint32_t flags = SCRIPT_VERIFY_NONE;
// Enforce P2SH (BIP16)
if (DeploymentActiveAfter(pindex, chainman, Consensus::DEPLOYMENT_P2SH)) {
flags |= SCRIPT_VERIFY_P2SH;
}
// Enforce the DERSIG (BIP66) rule.
if (DeploymentActiveAfter(pindex, chainman, Consensus::DEPLOYMENT_DERSIG)) {
flags |= SCRIPT_VERIFY_DERSIG;
}
// Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule.
if (DeploymentActiveAfter(pindex, chainman, Consensus::DEPLOYMENT_CLTV)) {
flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
}
// Start enforcing CSV (BIP68, BIP112 and BIP113) rule.
if (DeploymentActiveAfter(pindex, chainman, Consensus::DEPLOYMENT_CSV)) {
flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
}
// If the UAHF is enabled, we start accepting replay protected txns
if (IsUAHFenabled(consensusparams, pindex)) {
flags |= SCRIPT_VERIFY_STRICTENC;
flags |= SCRIPT_ENABLE_SIGHASH_FORKID;
}
// If the DAA HF is enabled, we start rejecting transaction that use a high
// s in their signature. We also make sure that signature that are supposed
// to fail (for instance in multisig or other forms of smart contracts) are
// null.
if (IsDAAEnabled(consensusparams, pindex)) {
flags |= SCRIPT_VERIFY_LOW_S;
flags |= SCRIPT_VERIFY_NULLFAIL;
}
// When the magnetic anomaly fork is enabled, we start accepting
// transactions using the OP_CHECKDATASIG opcode and it's verify
// alternative. We also start enforcing push only signatures and
// clean stack.
if (IsMagneticAnomalyEnabled(consensusparams, pindex)) {
flags |= SCRIPT_VERIFY_SIGPUSHONLY;
flags |= SCRIPT_VERIFY_CLEANSTACK;
}
if (IsGravitonEnabled(consensusparams, pindex)) {
flags |= SCRIPT_ENABLE_SCHNORR_MULTISIG;
flags |= SCRIPT_VERIFY_MINIMALDATA;
}
if (IsPhononEnabled(consensusparams, pindex)) {
flags |= SCRIPT_ENFORCE_SIGCHECKS;
}
// We make sure this node will have replay protection during the next hard
// fork.
if (IsReplayProtectionEnabled(consensusparams, pindex)) {
flags |= SCRIPT_ENABLE_REPLAY_PROTECTION;
}
return flags;
}
static int64_t nTimeCheck = 0;
static int64_t nTimeForks = 0;
static int64_t nTimeVerify = 0;
static int64_t nTimeConnect = 0;
static int64_t nTimeIndex = 0;
static int64_t nTimeTotal = 0;
static int64_t nBlocksTotal = 0;
/**
* Apply the effects of this block (with given index) on the UTXO set
* represented by coins. Validity checks that depend on the UTXO set are also
* done; ConnectBlock() can fail if those validity checks fail (among other
* reasons).
*/
bool Chainstate::ConnectBlock(const CBlock &block, BlockValidationState &state,
CBlockIndex *pindex, CCoinsViewCache &view,
BlockValidationOptions options, Amount *blockFees,
bool fJustCheck) {
AssertLockHeld(cs_main);
assert(pindex);
const BlockHash block_hash{block.GetHash()};
assert(*pindex->phashBlock == block_hash);
int64_t nTimeStart = GetTimeMicros();
const CChainParams &params{m_chainman.GetParams()};
const Consensus::Params &consensusParams = params.GetConsensus();
// Check it again in case a previous version let a bad block in
// NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
// ContextualCheckBlockHeader() here. This means that if we add a new
// consensus rule that is enforced in one of those two functions, then we
// may have let in a block that violates the rule prior to updating the
// software, and we would NOT be enforcing the rule here. Fully solving
// upgrade from one software version to the next after a consensus rule
// change is potentially tricky and issue-specific.
// Also, currently the rule against blocks more than 2 hours in the future
// is enforced in ContextualCheckBlockHeader(); we wouldn't want to
// re-enforce that rule here (at least until we make it impossible for
// m_adjusted_time_callback() to go backward).
if (!CheckBlock(block, state, consensusParams,
options.withCheckPoW(!fJustCheck)
.withCheckMerkleRoot(!fJustCheck))) {
if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED) {
// We don't write down blocks to disk if they may have been
// corrupted, so this should be impossible unless we're having
// hardware problems.
return AbortNode(state, "Corrupt block found indicating potential "
"hardware failure; shutting down");
}
return error("%s: Consensus::CheckBlock: %s", __func__,
state.ToString());
}
// Verify that the view's current state corresponds to the previous block
BlockHash hashPrevBlock =
pindex->pprev == nullptr ? BlockHash() : pindex->pprev->GetBlockHash();
assert(hashPrevBlock == view.GetBestBlock());
nBlocksTotal++;
// Special case for the genesis block, skipping connection of its
// transactions (its coinbase is unspendable)
if (block_hash == consensusParams.hashGenesisBlock) {
if (!fJustCheck) {
view.SetBestBlock(pindex->GetBlockHash());
}
return true;
}
bool fScriptChecks = true;
if (!m_chainman.AssumedValidBlock().IsNull()) {
// We've been configured with the hash of a block which has been
// externally verified to have a valid history. A suitable default value
// is included with the software and updated from time to time. Because
// validity relative to a piece of software is an objective fact these
// defaults can be easily reviewed. This setting doesn't force the
// selection of any particular chain but makes validating some faster by
// effectively caching the result of part of the verification.
BlockMap::const_iterator it{
m_blockman.m_block_index.find(m_chainman.AssumedValidBlock())};
if (it != m_blockman.m_block_index.end()) {
if (it->second.GetAncestor(pindex->nHeight) == pindex &&
m_chainman.m_best_header->GetAncestor(pindex->nHeight) ==
pindex &&
m_chainman.m_best_header->nChainWork >=
m_chainman.MinimumChainWork()) {
// This block is a member of the assumed verified chain and an
// ancestor of the best header.
// Script verification is skipped when connecting blocks under
// the assumevalid block. Assuming the assumevalid block is
// valid this is safe because block merkle hashes are still
// computed and checked, Of course, if an assumed valid block is
// invalid due to false scriptSigs this optimization would allow
// an invalid chain to be accepted.
// The equivalent time check discourages hash power from
// extorting the network via DOS attack into accepting an
// invalid block through telling users they must manually set
// assumevalid. Requiring a software change or burying the
// invalid block, regardless of the setting, makes it hard to
// hide the implication of the demand. This also avoids having
// release candidates that are hardly doing any signature
// verification at all in testing without having to artificially
// set the default assumed verified block further back. The test
// against the minimum chain work prevents the skipping when
// denied access to any chain at least as good as the expected
// chain.
fScriptChecks = (GetBlockProofEquivalentTime(
*m_chainman.m_best_header, *pindex,
*m_chainman.m_best_header,
consensusParams) <= 60 * 60 * 24 * 7 * 2);
}
}
}
int64_t nTime1 = GetTimeMicros();
nTimeCheck += nTime1 - nTimeStart;
LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n",
MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO,
nTimeCheck * MILLI / nBlocksTotal);
// Do not allow blocks that contain transactions which 'overwrite' older
// transactions, unless those are already completely spent. If such
// overwrites are allowed, coinbases and transactions depending upon those
// can be duplicated to remove the ability to spend the first instance --
// even after being sent to another address.
// See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html
// for more information. This rule was originally applied to all blocks
// with a timestamp after March 15, 2012, 0:00 UTC. Now that the whole
// chain is irreversibly beyond that time it is applied to all blocks
// except the two in the chain that violate it. This prevents exploiting
// the issue against nodes during their initial block download.
bool fEnforceBIP30 = !((pindex->nHeight == 91842 &&
pindex->GetBlockHash() ==
uint256S("0x00000000000a4d0a398161ffc163c503763"
"b1f4360639393e0e4c8e300e0caec")) ||
(pindex->nHeight == 91880 &&
pindex->GetBlockHash() ==
uint256S("0x00000000000743f190a18c5577a3c2d2a1f"
"610ae9601ac046a38084ccb7cd721")));
// Once BIP34 activated it was not possible to create new duplicate
// coinbases and thus other than starting with the 2 existing duplicate
// coinbase pairs, not possible to create overwriting txs. But by the time
// BIP34 activated, in each of the existing pairs the duplicate coinbase had
// overwritten the first before the first had been spent. Since those
// coinbases are sufficiently buried it's no longer possible to create
// further duplicate transactions descending from the known pairs either. If
// we're on the known chain at height greater than where BIP34 activated, we
// can save the db accesses needed for the BIP30 check.
// BIP34 requires that a block at height X (block X) has its coinbase
// scriptSig start with a CScriptNum of X (indicated height X). The above
// logic of no longer requiring BIP30 once BIP34 activates is flawed in the
// case that there is a block X before the BIP34 height of 227,931 which has
// an indicated height Y where Y is greater than X. The coinbase for block
// X would also be a valid coinbase for block Y, which could be a BIP30
// violation. An exhaustive search of all mainnet coinbases before the
// BIP34 height which have an indicated height greater than the block height
// reveals many occurrences. The 3 lowest indicated heights found are
// 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
// heights would be the first opportunity for BIP30 to be violated.
// The search reveals a great many blocks which have an indicated height
// greater than 1,983,702, so we simply remove the optimization to skip
// BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
// that block in another 25 years or so, we should take advantage of a
// future consensus change to do a new and improved version of BIP34 that
// will actually prevent ever creating any duplicate coinbases in the
// future.
static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
// There is no potential to create a duplicate coinbase at block 209,921
// because this is still before the BIP34 height and so explicit BIP30
// checking is still active.
// The final case is block 176,684 which has an indicated height of
// 490,897. Unfortunately, this issue was not discovered until about 2 weeks
// before block 490,897 so there was not much opportunity to address this
// case other than to carefully analyze it and determine it would not be a
// problem. Block 490,897 was, in fact, mined with a different coinbase than
// block 176,684, but it is important to note that even if it hadn't been or
// is remined on an alternate fork with a duplicate coinbase, we would still
// not run into a BIP30 violation. This is because the coinbase for 176,684
// is spent in block 185,956 in transaction
// d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
// spending transaction can't be duplicated because it also spends coinbase
// 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
// coinbase has an indicated height of over 4.2 billion, and wouldn't be
// duplicatable until that height, and it's currently impossible to create a
// chain that long. Nevertheless we may wish to consider a future soft fork
// which retroactively prevents block 490,897 from creating a duplicate
// coinbase. The two historical BIP30 violations often provide a confusing
// edge case when manipulating the UTXO and it would be simpler not to have
// another edge case to deal with.
// testnet3 has no blocks before the BIP34 height with indicated heights
// post BIP34 before approximately height 486,000,000 and presumably will
// be reset before it reaches block 1,983,702 and starts doing unnecessary
// BIP30 checking again.
assert(pindex->pprev);
CBlockIndex *pindexBIP34height =
pindex->pprev->GetAncestor(consensusParams.BIP34Height);
// Only continue to enforce if we're below BIP34 activation height or the
// block hash at that height doesn't correspond.
fEnforceBIP30 =
fEnforceBIP30 &&
(!pindexBIP34height ||
!(pindexBIP34height->GetBlockHash() == consensusParams.BIP34Hash));
// TODO: Remove BIP30 checking from block height 1,983,702 on, once we have
// a consensus change that ensures coinbases at those heights can not
// duplicate earlier coinbases.
if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
for (const auto &tx : block.vtx) {
for (size_t o = 0; o < tx->vout.size(); o++) {
if (view.HaveCoin(COutPoint(tx->GetId(), o))) {
LogPrintf("ERROR: ConnectBlock(): tried to overwrite "
"transaction\n");
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"bad-txns-BIP30");
}
}
}
}
// Enforce BIP68 (sequence locks).
int nLockTimeFlags = 0;
if (DeploymentActiveAt(*pindex, consensusParams,
Consensus::DEPLOYMENT_CSV)) {
nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
}
const uint32_t flags = GetNextBlockScriptFlags(pindex->pprev, m_chainman);
int64_t nTime2 = GetTimeMicros();
nTimeForks += nTime2 - nTime1;
LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n",
MILLI * (nTime2 - nTime1), nTimeForks * MICRO,
nTimeForks * MILLI / nBlocksTotal);
std::vector<int> prevheights;
Amount nFees = Amount::zero();
int nInputs = 0;
// Limit the total executed signature operations in the block, a consensus
// rule. Tracking during the CPU-consuming part (validation of uncached
// inputs) is per-input atomic and validation in each thread stops very
// quickly after the limit is exceeded, so an adversary cannot cause us to
// exceed the limit by much at all.
CheckInputsLimiter nSigChecksBlockLimiter(
GetMaxBlockSigChecksCount(options.getExcessiveBlockSize()));
std::vector<TxSigCheckLimiter> nSigChecksTxLimiters;
nSigChecksTxLimiters.resize(block.vtx.size() - 1);
CBlockUndo blockundo;
blockundo.vtxundo.resize(block.vtx.size() - 1);
CCheckQueueControl<CScriptCheck> control(fScriptChecks ? &scriptcheckqueue
: nullptr);
// Add all outputs
try {
for (const auto &ptx : block.vtx) {
AddCoins(view, *ptx, pindex->nHeight);
}
} catch (const std::logic_error &e) {
// This error will be thrown from AddCoin if we try to connect a block
// containing duplicate transactions. Such a thing should normally be
// caught early nowadays (due to ContextualCheckBlock's CTOR
// enforcement) however some edge cases can escape that:
// - ContextualCheckBlock does not get re-run after saving the block to
// disk, and older versions may have saved a weird block.
// - its checks are not applied to pre-CTOR chains, which we might visit
// with checkpointing off.
LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"tx-duplicate");
}
size_t txIndex = 0;
// nSigChecksRet may be accurate (found in cache) or 0 (checks were
// deferred into vChecks).
int nSigChecksRet;
for (const auto &ptx : block.vtx) {
const CTransaction &tx = *ptx;
const bool isCoinBase = tx.IsCoinBase();
nInputs += tx.vin.size();
{
Amount txfee = Amount::zero();
TxValidationState tx_state;
if (!isCoinBase &&
!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight,
txfee)) {
// Any transaction validation failure in ConnectBlock is a block
// consensus failure.
state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(),
tx_state.GetDebugMessage());
return error("%s: Consensus::CheckTxInputs: %s, %s", __func__,
tx.GetId().ToString(), state.ToString());
}
nFees += txfee;
}
if (!MoneyRange(nFees)) {
LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n",
__func__);
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"bad-txns-accumulated-fee-outofrange");
}
// The following checks do not apply to the coinbase.
if (isCoinBase) {
continue;
}
// Check that transaction is BIP68 final BIP68 lock checks (as
// opposed to nLockTime checks) must be in ConnectBlock because they
// require the UTXO set.
prevheights.resize(tx.vin.size());
for (size_t j = 0; j < tx.vin.size(); j++) {
prevheights[j] = view.AccessCoin(tx.vin[j].prevout).GetHeight();
}
if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n",
__func__);
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"bad-txns-nonfinal");
}
// Don't cache results if we're actually connecting blocks (still
// consult the cache, though).
bool fCacheResults = fJustCheck;
const bool fEnforceSigCheck = flags & SCRIPT_ENFORCE_SIGCHECKS;
if (!fEnforceSigCheck) {
// Historically, there has been transactions with a very high
// sigcheck count, so we need to disable this check for such
// transactions.
nSigChecksTxLimiters[txIndex] = TxSigCheckLimiter::getDisabled();
}
std::vector<CScriptCheck> vChecks;
TxValidationState tx_state;
if (fScriptChecks &&
!CheckInputScripts(tx, tx_state, view, flags, fCacheResults,
fCacheResults, PrecomputedTransactionData(tx),
nSigChecksRet, nSigChecksTxLimiters[txIndex],
&nSigChecksBlockLimiter, &vChecks)) {
// Any transaction validation failure in ConnectBlock is a block
// consensus failure
state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(),
tx_state.GetDebugMessage());
return error(
"ConnectBlock(): CheckInputScripts on %s failed with %s",
tx.GetId().ToString(), state.ToString());
}
control.Add(vChecks);
// Note: this must execute in the same iteration as CheckTxInputs (not
// in a separate loop) in order to detect double spends. However,
// this does not prevent double-spending by duplicated transaction
// inputs in the same transaction (cf. CVE-2018-17144) -- that check is
// done in CheckBlock (CheckRegularTransaction).
SpendCoins(view, tx, blockundo.vtxundo.at(txIndex), pindex->nHeight);
txIndex++;
}
int64_t nTime3 = GetTimeMicros();
nTimeConnect += nTime3 - nTime2;
LogPrint(BCLog::BENCH,
" - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) "
"[%.2fs (%.2fms/blk)]\n",
(unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2),
MILLI * (nTime3 - nTime2) / block.vtx.size(),
nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs - 1),
nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
const Amount blockReward =
nFees + GetBlockSubsidy(pindex->nHeight, consensusParams);
if (block.vtx[0]->GetValueOut() > blockReward) {
LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs "
"limit=%d)\n",
block.vtx[0]->GetValueOut(), blockReward);
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"bad-cb-amount");
}
if (blockFees) {
*blockFees = nFees;
}
if (!control.Wait()) {
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"blk-bad-inputs", "parallel script check failed");
}
int64_t nTime4 = GetTimeMicros();
nTimeVerify += nTime4 - nTime2;
LogPrint(
BCLog::BENCH,
" - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n",
nInputs - 1, MILLI * (nTime4 - nTime2),
nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs - 1),
nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
if (fJustCheck) {
return true;
}
if (!m_blockman.WriteUndoDataForBlock(blockundo, state, pindex, params)) {
return false;
}
if (!pindex->IsValid(BlockValidity::SCRIPTS)) {
pindex->RaiseValidity(BlockValidity::SCRIPTS);
m_blockman.m_dirty_blockindex.insert(pindex);
}
assert(pindex->phashBlock);
// add this block to the view's block chain
view.SetBestBlock(pindex->GetBlockHash());
int64_t nTime5 = GetTimeMicros();
nTimeIndex += nTime5 - nTime4;
LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n",
MILLI * (nTime5 - nTime4), nTimeIndex * MICRO,
nTimeIndex * MILLI / nBlocksTotal);
TRACE6(validation, block_connected, block_hash.data(), pindex->nHeight,
block.vtx.size(), nInputs, nSigChecksRet,
// in microseconds (µs)
nTime5 - nTimeStart);
return true;
}
CoinsCacheSizeState Chainstate::GetCoinsCacheSizeState() {
AssertLockHeld(::cs_main);
return this->GetCoinsCacheSizeState(m_coinstip_cache_size_bytes,
m_mempool ? m_mempool->m_max_size_bytes
: 0);
}
CoinsCacheSizeState
Chainstate::GetCoinsCacheSizeState(size_t max_coins_cache_size_bytes,
size_t max_mempool_size_bytes) {
AssertLockHeld(::cs_main);
int64_t nMempoolUsage = m_mempool ? m_mempool->DynamicMemoryUsage() : 0;
int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
int64_t nTotalSpace =
max_coins_cache_size_bytes +
std::max<int64_t>(int64_t(max_mempool_size_bytes) - nMempoolUsage, 0);
//! No need to periodic flush if at least this much space still available.
static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES =
10 * 1024 * 1024; // 10MB
int64_t large_threshold = std::max(
(9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
if (cacheSize > nTotalSpace) {
LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize,
nTotalSpace);
return CoinsCacheSizeState::CRITICAL;
} else if (cacheSize > large_threshold) {
return CoinsCacheSizeState::LARGE;
}
return CoinsCacheSizeState::OK;
}
bool Chainstate::FlushStateToDisk(BlockValidationState &state,
FlushStateMode mode, int nManualPruneHeight) {
LOCK(cs_main);
assert(this->CanFlushToDisk());
std::set<int> setFilesToPrune;
bool full_flush_completed = false;
const size_t coins_count = CoinsTip().GetCacheSize();
const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
try {
{
bool fFlushForPrune = false;
bool fDoFullFlush = false;
CoinsCacheSizeState cache_state = GetCoinsCacheSizeState();
LOCK(m_blockman.cs_LastBlockFile);
if (m_blockman.IsPruneMode() &&
(m_blockman.m_check_for_pruning || nManualPruneHeight > 0) &&
!fReindex) {
// Make sure we don't prune above the blockfilterindexes
// bestblocks. Pruning is height-based.
int last_prune = m_chain.Height();
ForEachBlockFilterIndex([&](BlockFilterIndex &index) {
last_prune = std::max(
1, std::min(last_prune,
index.GetSummary().best_block_height));
});
if (nManualPruneHeight > 0) {
LOG_TIME_MILLIS_WITH_CATEGORY(
"find files to prune (manual)", BCLog::BENCH);
m_blockman.FindFilesToPruneManual(
setFilesToPrune,
std::min(last_prune, nManualPruneHeight),
m_chain.Height());
} else {
LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune",
BCLog::BENCH);
m_blockman.FindFilesToPrune(
setFilesToPrune,
m_chainman.GetParams().PruneAfterHeight(),
m_chain.Height(), last_prune, IsInitialBlockDownload());
m_blockman.m_check_for_pruning = false;
}
if (!setFilesToPrune.empty()) {
fFlushForPrune = true;
if (!m_blockman.m_have_pruned) {
m_blockman.m_block_tree_db->WriteFlag(
"prunedblockfiles", true);
m_blockman.m_have_pruned = true;
}
}
}
const auto nNow = GetTime<std::chrono::microseconds>();
// Avoid writing/flushing immediately after startup.
if (m_last_write.count() == 0) {
m_last_write = nNow;
}
if (m_last_flush.count() == 0) {
m_last_flush = nNow;
}
// The cache is large and we're within 10% and 10 MiB of the limit,
// but we have time now (not in the middle of a block processing).
bool fCacheLarge = mode == FlushStateMode::PERIODIC &&
cache_state >= CoinsCacheSizeState::LARGE;
// The cache is over the limit, we have to write now.
bool fCacheCritical = mode == FlushStateMode::IF_NEEDED &&
cache_state >= CoinsCacheSizeState::CRITICAL;
// It's been a while since we wrote the block index to disk. Do this
// frequently, so we don't need to redownload after a crash.
bool fPeriodicWrite = mode == FlushStateMode::PERIODIC &&
nNow > m_last_write + DATABASE_WRITE_INTERVAL;
// It's been very long since we flushed the cache. Do this
// infrequently, to optimize cache usage.
bool fPeriodicFlush = mode == FlushStateMode::PERIODIC &&
nNow > m_last_flush + DATABASE_FLUSH_INTERVAL;
// Combine all conditions that result in a full cache flush.
fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge ||
fCacheCritical || fPeriodicFlush || fFlushForPrune;
// Write blocks and block index to disk.
if (fDoFullFlush || fPeriodicWrite) {
// Ensure we can write block index
if (!CheckDiskSpace(gArgs.GetBlocksDirPath())) {
return AbortNode(state, "Disk space is too low!",
_("Disk space is too low!"));
}
{
LOG_TIME_MILLIS_WITH_CATEGORY(
"write block and undo data to disk", BCLog::BENCH);
// First make sure all block and undo data is flushed to
// disk.
m_blockman.FlushBlockFile();
}
// Then update all block file information (which may refer to
// block and undo files).
{
LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk",
BCLog::BENCH);
if (!m_blockman.WriteBlockIndexDB()) {
return AbortNode(
state, "Failed to write to block index database");
}
}
// Finally remove any pruned files
if (fFlushForPrune) {
LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files",
BCLog::BENCH);
UnlinkPrunedFiles(setFilesToPrune);
}
m_last_write = nNow;
}
// Flush best chain related state. This can only be done if the
// blocks / block index write was also done.
if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
LOG_TIME_MILLIS_WITH_CATEGORY(
strprintf("write coins cache to disk (%d coins, %.2fkB)",
coins_count, coins_mem_usage / 1000),
BCLog::BENCH);
// Typical Coin structures on disk are around 48 bytes in size.
// Pushing a new one to the database can cause it to be written
// twice (once in the log, and once in the tables). This is
// already an overestimation, as most will delete an existing
// entry or overwrite one. Still, use a conservative safety
// factor of 2.
if (!CheckDiskSpace(gArgs.GetDataDirNet(),
48 * 2 * 2 * CoinsTip().GetCacheSize())) {
return AbortNode(state, "Disk space is too low!",
_("Disk space is too low!"));
}
// Flush the chainstate (which may refer to block index
// entries).
if (!CoinsTip().Flush()) {
return AbortNode(state, "Failed to write to coin database");
}
m_last_flush = nNow;
full_flush_completed = true;
}
TRACE5(utxocache, flush,
// in microseconds (µs)
GetTimeMicros() - nNow.count(), uint32_t(mode), coins_count,
uint64_t(coins_mem_usage), fFlushForPrune);
}
if (full_flush_completed) {
// Update best block in wallet (so we can detect restored wallets).
GetMainSignals().ChainStateFlushed(m_chain.GetLocator());
}
} catch (const std::runtime_error &e) {
return AbortNode(state, std::string("System error while flushing: ") +
e.what());
}
return true;
}
void Chainstate::ForceFlushStateToDisk() {
BlockValidationState state;
if (!this->FlushStateToDisk(state, FlushStateMode::ALWAYS)) {
LogPrintf("%s: failed to flush state (%s)\n", __func__,
state.ToString());
}
}
void Chainstate::PruneAndFlush() {
BlockValidationState state;
m_blockman.m_check_for_pruning = true;
if (!this->FlushStateToDisk(state, FlushStateMode::NONE)) {
LogPrintf("%s: failed to flush state (%s)\n", __func__,
state.ToString());
}
}
static void UpdateTipLog(const CCoinsViewCache &coins_tip,
const CBlockIndex *tip, const CChainParams &params,
const std::string &func_name,
const std::string &prefix)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
LogPrintf("%s%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%ld "
"date='%s' progress=%f cache=%.1fMiB(%utxo)\n",
prefix, func_name, tip->GetBlockHash().ToString(), tip->nHeight,
tip->nVersion, log(tip->nChainWork.getdouble()) / log(2.0),
tip->GetChainTxCount(),
FormatISO8601DateTime(tip->GetBlockTime()),
GuessVerificationProgress(params.TxData(), tip),
coins_tip.DynamicMemoryUsage() * (1.0 / (1 << 20)),
coins_tip.GetCacheSize());
}
void Chainstate::UpdateTip(const CBlockIndex *pindexNew) {
AssertLockHeld(::cs_main);
const auto &coins_tip = CoinsTip();
const CChainParams &params{m_chainman.GetParams()};
// The remainder of the function isn't relevant if we are not acting on
// the active chainstate, so return if need be.
if (this != &m_chainman.ActiveChainstate()) {
// Only log every so often so that we don't bury log messages at the
// tip.
constexpr int BACKGROUND_LOG_INTERVAL = 2000;
if (pindexNew->nHeight % BACKGROUND_LOG_INTERVAL == 0) {
UpdateTipLog(coins_tip, pindexNew, params, __func__,
"[background validation] ");
}
return;
}
// New best block
if (m_mempool) {
m_mempool->AddTransactionsUpdated(1);
}
{
LOCK(g_best_block_mutex);
g_best_block = pindexNew->GetBlockHash();
g_best_block_cv.notify_all();
}
UpdateTipLog(coins_tip, pindexNew, params, __func__, "");
}
/**
* Disconnect m_chain's tip.
* After calling, the mempool will be in an inconsistent state, with
* transactions from disconnected blocks being added to disconnectpool. You
* should make the mempool consistent again by calling updateMempoolForReorg.
* with cs_main held.
*
* If disconnectpool is nullptr, then no disconnected transactions are added to
* disconnectpool (note that the caller is responsible for mempool consistency
* in any case).
*/
bool Chainstate::DisconnectTip(BlockValidationState &state,
DisconnectedBlockTransactions *disconnectpool) {
AssertLockHeld(cs_main);
if (m_mempool) {
AssertLockHeld(m_mempool->cs);
}
CBlockIndex *pindexDelete = m_chain.Tip();
const Consensus::Params &consensusParams = m_chainman.GetConsensus();
assert(pindexDelete);
// Read block from disk.
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
CBlock &block = *pblock;
if (!ReadBlockFromDisk(block, pindexDelete, consensusParams)) {
return error("DisconnectTip(): Failed to read block");
}
// Apply the block atomically to the chain state.
int64_t nStart = GetTimeMicros();
{
CCoinsViewCache view(&CoinsTip());
assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
if (DisconnectBlock(block, pindexDelete, view) !=
DisconnectResult::OK) {
return error("DisconnectTip(): DisconnectBlock %s failed",
pindexDelete->GetBlockHash().ToString());
}
bool flushed = view.Flush();
assert(flushed);
}
LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n",
(GetTimeMicros() - nStart) * MILLI);
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(state, FlushStateMode::IF_NEEDED)) {
return false;
}
if (m_mempool) {
// If this block is deactivating a fork, we move all mempool
// transactions in front of disconnectpool for reprocessing in a future
// updateMempoolForReorg call
if (pindexDelete->pprev != nullptr &&
GetNextBlockScriptFlags(pindexDelete, m_chainman) !=
GetNextBlockScriptFlags(pindexDelete->pprev, m_chainman)) {
LogPrint(BCLog::MEMPOOL,
"Disconnecting mempool due to rewind of upgrade block\n");
if (disconnectpool) {
disconnectpool->importMempool(*m_mempool);
}
m_mempool->clear();
}
if (disconnectpool) {
disconnectpool->addForBlock(block.vtx, *m_mempool);
}
}
m_chain.SetTip(pindexDelete->pprev);
UpdateTip(pindexDelete->pprev);
// Let wallets know transactions went from 1-confirmed to
// 0-confirmed or conflicted:
GetMainSignals().BlockDisconnected(pblock, pindexDelete);
return true;
}
static int64_t nTimeReadFromDisk = 0;
static int64_t nTimeConnectTotal = 0;
static int64_t nTimeFlush = 0;
static int64_t nTimeChainState = 0;
static int64_t nTimePostConnect = 0;
struct PerBlockConnectTrace {
CBlockIndex *pindex = nullptr;
std::shared_ptr<const CBlock> pblock;
PerBlockConnectTrace() {}
};
/**
* Used to track blocks whose transactions were applied to the UTXO state as a
* part of a single ActivateBestChainStep call.
*
* This class is single-use, once you call GetBlocksConnected() you have to
* throw it away and make a new one.
*/
class ConnectTrace {
private:
std::vector<PerBlockConnectTrace> blocksConnected;
public:
explicit ConnectTrace() : blocksConnected(1) {}
void BlockConnected(CBlockIndex *pindex,
std::shared_ptr<const CBlock> pblock) {
assert(!blocksConnected.back().pindex);
assert(pindex);
assert(pblock);
blocksConnected.back().pindex = pindex;
blocksConnected.back().pblock = std::move(pblock);
blocksConnected.emplace_back();
}
std::vector<PerBlockConnectTrace> &GetBlocksConnected() {
// We always keep one extra block at the end of our list because blocks
// are added after all the conflicted transactions have been filled in.
// Thus, the last entry should always be an empty one waiting for the
// transactions from the next block. We pop the last entry here to make
// sure the list we return is sane.
assert(!blocksConnected.back().pindex);
blocksConnected.pop_back();
return blocksConnected;
}
};
/**
* Connect a new block to m_chain. pblock is either nullptr or a pointer to
* a CBlock corresponding to pindexNew, to bypass loading it again from disk.
*
* The block is added to connectTrace if connection succeeds.
*/
bool Chainstate::ConnectTip(BlockValidationState &state,
BlockPolicyValidationState &blockPolicyState,
CBlockIndex *pindexNew,
const std::shared_ptr<const CBlock> &pblock,
ConnectTrace &connectTrace,
DisconnectedBlockTransactions &disconnectpool) {
AssertLockHeld(cs_main);
if (m_mempool) {
AssertLockHeld(m_mempool->cs);
}
const Consensus::Params &consensusParams = m_chainman.GetConsensus();
assert(pindexNew->pprev == m_chain.Tip());
// Read block from disk.
int64_t nTime1 = GetTimeMicros();
std::shared_ptr<const CBlock> pthisBlock;
if (!pblock) {
std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
if (!ReadBlockFromDisk(*pblockNew, pindexNew, consensusParams)) {
return AbortNode(state, "Failed to read block");
}
pthisBlock = pblockNew;
} else {
pthisBlock = pblock;
}
const CBlock &blockConnecting = *pthisBlock;
// Apply the block atomically to the chain state.
int64_t nTime2 = GetTimeMicros();
nTimeReadFromDisk += nTime2 - nTime1;
int64_t nTime3;
LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n",
(nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
{
Amount blockFees{Amount::zero()};
CCoinsViewCache view(&CoinsTip());
bool rv = ConnectBlock(blockConnecting, state, pindexNew, view,
BlockValidationOptions(m_chainman.GetConfig()),
&blockFees);
GetMainSignals().BlockChecked(blockConnecting, state);
if (!rv) {
if (state.IsInvalid()) {
InvalidBlockFound(pindexNew, state);
}
return error("%s: ConnectBlock %s failed, %s", __func__,
pindexNew->GetBlockHash().ToString(),
state.ToString());
}
/**
* The block is valid by consensus rules so now we check if the block
* passes all block policy checks. If not, then park the block and bail.
*
* We check block parking policies before flushing changes to the UTXO
* set. This allows us to avoid rewinding everything immediately after.
*
* Only check block parking policies the first time the block is
* connected. Avalanche voting can override the parking decision made by
* these policies.
*/
const BlockHash blockhash = pindexNew->GetBlockHash();
if (!IsInitialBlockDownload() &&
!m_filterParkingPoliciesApplied.contains(blockhash)) {
m_filterParkingPoliciesApplied.insert(blockhash);
const Amount blockReward =
blockFees +
GetBlockSubsidy(pindexNew->nHeight, consensusParams);
std::vector<std::unique_ptr<ParkingPolicy>> parkingPolicies;
parkingPolicies.emplace_back(std::make_unique<MinerFundPolicy>(
consensusParams, *pindexNew, blockConnecting, blockReward));
if (g_avalanche) {
parkingPolicies.emplace_back(
std::make_unique<StakingRewardsPolicy>(
consensusParams, *pindexNew, blockConnecting,
blockReward));
if (m_mempool) {
parkingPolicies.emplace_back(
std::make_unique<PreConsensusPolicy>(
*pindexNew, blockConnecting, m_mempool));
}
}
// If any block policy is violated, bail on the first one found
if (std::find_if_not(parkingPolicies.begin(), parkingPolicies.end(),
[&](const auto &policy) {
bool ret = (*policy)(blockPolicyState);
if (!ret) {
LogPrintf(
"Park block because it "
"violated a block policy: %s\n",
blockPolicyState.ToString());
}
return ret;
}) != parkingPolicies.end()) {
pindexNew->nStatus = pindexNew->nStatus.withParked();
m_blockman.m_dirty_blockindex.insert(pindexNew);
return false;
}
}
nTime3 = GetTimeMicros();
nTimeConnectTotal += nTime3 - nTime2;
assert(nBlocksTotal > 0);
LogPrint(BCLog::BENCH,
" - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n",
(nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO,
nTimeConnectTotal * MILLI / nBlocksTotal);
bool flushed = view.Flush();
assert(flushed);
}
int64_t nTime4 = GetTimeMicros();
nTimeFlush += nTime4 - nTime3;
LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n",
(nTime4 - nTime3) * MILLI, nTimeFlush * MICRO,
nTimeFlush * MILLI / nBlocksTotal);
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(state, FlushStateMode::IF_NEEDED)) {
return false;
}
int64_t nTime5 = GetTimeMicros();
nTimeChainState += nTime5 - nTime4;
LogPrint(BCLog::BENCH,
" - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n",
(nTime5 - nTime4) * MILLI, nTimeChainState * MICRO,
nTimeChainState * MILLI / nBlocksTotal);
// Remove conflicting transactions from the mempool;
if (m_mempool) {
disconnectpool.removeForBlock(blockConnecting.vtx, *m_mempool);
// If this block is activating a fork, we move all mempool transactions
// in front of disconnectpool for reprocessing in a future
// updateMempoolForReorg call
if (pindexNew->pprev != nullptr &&
GetNextBlockScriptFlags(pindexNew, m_chainman) !=
GetNextBlockScriptFlags(pindexNew->pprev, m_chainman)) {
LogPrint(
BCLog::MEMPOOL,
"Disconnecting mempool due to acceptance of upgrade block\n");
disconnectpool.importMempool(*m_mempool);
}
}
// Update m_chain & related variables.
m_chain.SetTip(pindexNew);
UpdateTip(pindexNew);
int64_t nTime6 = GetTimeMicros();
nTimePostConnect += nTime6 - nTime5;
nTimeTotal += nTime6 - nTime1;
LogPrint(BCLog::BENCH,
" - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n",
(nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO,
nTimePostConnect * MILLI / nBlocksTotal);
LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n",
(nTime6 - nTime1) * MILLI, nTimeTotal * MICRO,
nTimeTotal * MILLI / nBlocksTotal);
// If we are the background validation chainstate, check to see if we are
// done validating the snapshot (i.e. our tip has reached the snapshot's
// base block).
if (this != &m_chainman.ActiveChainstate()) {
// This call may set `m_disabled`, which is referenced immediately
// afterwards in ActivateBestChain, so that we stop connecting blocks
// past the snapshot base.
m_chainman.MaybeCompleteSnapshotValidation();
}
connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
return true;
}
/**
* Return the tip of the chain with the most work in it, that isn't known to be
* invalid (it's however far from certain to be valid).
*/
CBlockIndex *Chainstate::FindMostWorkChain(
std::vector<const CBlockIndex *> &blocksToReconcile) {
AssertLockHeld(::cs_main);
do {
CBlockIndex *pindexNew = nullptr;
// Find the best candidate header.
{
std::set<CBlockIndex *, CBlockIndexWorkComparator>::reverse_iterator
it = setBlockIndexCandidates.rbegin();
if (it == setBlockIndexCandidates.rend()) {
return nullptr;
}
pindexNew = *it;
}
// If this block will cause an avalanche finalized block to be reorged,
// then we park it.
{
LOCK(cs_avalancheFinalizedBlockIndex);
if (m_avalancheFinalizedBlockIndex &&
!AreOnTheSameFork(pindexNew, m_avalancheFinalizedBlockIndex)) {
LogPrintf("Park block %s because it forks prior to the "
"avalanche finalized chaintip.\n",
pindexNew->GetBlockHash().ToString());
pindexNew->nStatus = pindexNew->nStatus.withParked();
m_blockman.m_dirty_blockindex.insert(pindexNew);
}
}
const bool fAvalancheEnabled = isAvalancheEnabled(gArgs);
const bool fAutoUnpark =
gArgs.GetBoolArg("-automaticunparking", !fAvalancheEnabled);
const CBlockIndex *pindexFork = m_chain.FindFork(pindexNew);
// Check whether all blocks on the path between the currently active
// chain and the candidate are valid. Just going until the active chain
// is an optimization, as we know all blocks in it are valid already.
CBlockIndex *pindexTest = pindexNew;
bool hasValidAncestor = true;
while (hasValidAncestor && pindexTest && pindexTest != pindexFork) {
assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
// If this is a parked chain, but it has enough PoW, clear the park
// state.
bool fParkedChain = pindexTest->nStatus.isOnParkedChain();
if (fAutoUnpark && fParkedChain) {
const CBlockIndex *pindexTip = m_chain.Tip();
// During initialization, pindexTip and/or pindexFork may be
// null. In this case, we just ignore the fact that the chain is
// parked.
if (!pindexTip || !pindexFork) {
UnparkBlock(pindexTest);
continue;
}
// A parked chain can be unparked if it has twice as much PoW
// accumulated as the main chain has since the fork block.
CBlockIndex const *pindexExtraPow = pindexTip;
arith_uint256 requiredWork = pindexTip->nChainWork;
switch (pindexTip->nHeight - pindexFork->nHeight) {
// Limit the penality for depth 1, 2 and 3 to half a block
// worth of work to ensure we don't fork accidentally.
case 3:
case 2:
pindexExtraPow = pindexExtraPow->pprev;
// FALLTHROUGH
case 1: {
const arith_uint256 deltaWork =
pindexExtraPow->nChainWork - pindexFork->nChainWork;
requiredWork += (deltaWork >> 1);
break;
}
default:
requiredWork +=
pindexExtraPow->nChainWork - pindexFork->nChainWork;
break;
}
if (pindexNew->nChainWork > requiredWork) {
// We have enough, clear the parked state.
LogPrintf("Unpark chain up to block %s as it has "
"accumulated enough PoW.\n",
pindexNew->GetBlockHash().ToString());
fParkedChain = false;
UnparkBlock(pindexTest);
}
}
// Pruned nodes may have entries in setBlockIndexCandidates for
// which block files have been deleted. Remove those as candidates
// for the most work chain if we come across them; we can't switch
// to a chain unless we have all the non-active-chain parent blocks.
bool fInvalidChain = pindexTest->nStatus.isInvalid();
bool fMissingData = !pindexTest->nStatus.hasData();
if (!(fInvalidChain || fParkedChain || fMissingData)) {
// The current block is acceptable, move to the parent, up to
// the fork point.
pindexTest = pindexTest->pprev;
continue;
}
// Candidate chain is not usable (either invalid or parked or
// missing data)
hasValidAncestor = false;
setBlockIndexCandidates.erase(pindexTest);
if (fInvalidChain && (m_chainman.m_best_invalid == nullptr ||
pindexNew->nChainWork >
m_chainman.m_best_invalid->nChainWork)) {
m_chainman.m_best_invalid = pindexNew;
}
if (fParkedChain && (m_chainman.m_best_parked == nullptr ||
pindexNew->nChainWork >
m_chainman.m_best_parked->nChainWork)) {
m_chainman.m_best_parked = pindexNew;
}
LogPrintf("Considered switching to better tip %s but that chain "
"contains a%s%s%s block.\n",
pindexNew->GetBlockHash().ToString(),
fInvalidChain ? "n invalid" : "",
fParkedChain ? " parked" : "",
fMissingData ? " missing-data" : "");
CBlockIndex *pindexFailed = pindexNew;
// Remove the entire chain from the set.
while (pindexTest != pindexFailed) {
if (fInvalidChain || fParkedChain) {
pindexFailed->nStatus =
pindexFailed->nStatus.withFailedParent(fInvalidChain)
.withParkedParent(fParkedChain);
} else if (fMissingData) {
// If we're missing data, then add back to
// m_blocks_unlinked, so that if the block arrives in the
// future we can try adding to setBlockIndexCandidates
// again.
m_blockman.m_blocks_unlinked.insert(
std::make_pair(pindexFailed->pprev, pindexFailed));
}
setBlockIndexCandidates.erase(pindexFailed);
pindexFailed = pindexFailed->pprev;
}
if (fInvalidChain || fParkedChain) {
// We discovered a new chain tip that is either parked or
// invalid, we may want to warn.
CheckForkWarningConditionsOnNewFork(pindexNew);
}
}
if (fAvalancheEnabled && g_avalanche) {
blocksToReconcile.push_back(pindexNew);
}
// We found a candidate that has valid ancestors. This is our guy.
if (hasValidAncestor) {
return pindexNew;
}
} while (true);
}
/**
* Delete all entries in setBlockIndexCandidates that are worse than the current
* tip.
*/
void Chainstate::PruneBlockIndexCandidates() {
// Note that we can't delete the current block itself, as we may need to
// return to it later in case a reorganization to a better block fails.
auto it = setBlockIndexCandidates.begin();
while (it != setBlockIndexCandidates.end() &&
setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
setBlockIndexCandidates.erase(it++);
}
// Either the current tip or a successor of it we're working towards is left
// in setBlockIndexCandidates.
assert(!setBlockIndexCandidates.empty());
}
/**
* Try to make some progress towards making pindexMostWork the active block.
* pblock is either nullptr or a pointer to a CBlock corresponding to
* pindexMostWork.
*
* @returns true unless a system error occurred
*/
bool Chainstate::ActivateBestChainStep(
BlockValidationState &state, CBlockIndex *pindexMostWork,
const std::shared_ptr<const CBlock> &pblock, bool &fInvalidFound,
ConnectTrace &connectTrace) {
AssertLockHeld(cs_main);
if (m_mempool) {
AssertLockHeld(m_mempool->cs);
}
const CBlockIndex *pindexOldTip = m_chain.Tip();
const CBlockIndex *pindexFork = m_chain.FindFork(pindexMostWork);
// Disconnect active blocks which are no longer in the best chain.
bool fBlocksDisconnected = false;
DisconnectedBlockTransactions disconnectpool;
while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
if (!fBlocksDisconnected) {
// Import and clear mempool; we must do this to preserve
// topological ordering in the mempool index. This is ok since
// inserts into the mempool are very fast now in our new
// implementation.
disconnectpool.importMempool(*m_mempool);
}
if (!DisconnectTip(state, &disconnectpool)) {
// This is likely a fatal error, but keep the mempool consistent,
// just in case. Only remove from the mempool in this case.
if (m_mempool) {
disconnectpool.updateMempoolForReorg(*this, false, *m_mempool);
}
// If we're unable to disconnect a block during normal operation,
// then that is a failure of our local system -- we should abort
// rather than stay on a less work chain.
AbortNode(state,
"Failed to disconnect block; see debug.log for details");
return false;
}
fBlocksDisconnected = true;
}
// Build list of new blocks to connect.
std::vector<CBlockIndex *> vpindexToConnect;
bool fContinue = true;
int nHeight = pindexFork ? pindexFork->nHeight : -1;
while (fContinue && nHeight != pindexMostWork->nHeight) {
// Don't iterate the entire list of potential improvements toward the
// best tip, as we likely only need a few blocks along the way.
int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
vpindexToConnect.clear();
vpindexToConnect.reserve(nTargetHeight - nHeight);
CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
while (pindexIter && pindexIter->nHeight != nHeight) {
vpindexToConnect.push_back(pindexIter);
pindexIter = pindexIter->pprev;
}
nHeight = nTargetHeight;
// Connect new blocks.
for (CBlockIndex *pindexConnect : reverse_iterate(vpindexToConnect)) {
BlockPolicyValidationState blockPolicyState;
if (!ConnectTip(state, blockPolicyState, pindexConnect,
pindexConnect == pindexMostWork
? pblock
: std::shared_ptr<const CBlock>(),
connectTrace, disconnectpool)) {
if (state.IsInvalid()) {
// The block violates a consensus rule.
if (state.GetResult() !=
BlockValidationResult::BLOCK_MUTATED) {
InvalidChainFound(vpindexToConnect.back());
}
state = BlockValidationState();
fInvalidFound = true;
fContinue = false;
break;
}
if (blockPolicyState.IsInvalid()) {
// The block violates a policy rule.
fContinue = false;
break;
}
// A system error occurred (disk space, database error, ...).
// Make the mempool consistent with the current tip, just in
// case any observers try to use it before shutdown.
if (m_mempool) {
disconnectpool.updateMempoolForReorg(*this, false,
*m_mempool);
}
return false;
} else {
PruneBlockIndexCandidates();
if (!pindexOldTip ||
m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
// We're in a better position than we were. Return
// temporarily to release the lock.
fContinue = false;
break;
}
}
}
}
if (m_mempool) {
if (fBlocksDisconnected || !disconnectpool.isEmpty()) {
// If any blocks were disconnected, we need to update the mempool
// even if disconnectpool is empty. The disconnectpool may also be
// non-empty if the mempool was imported due to new validation rules
// being in effect.
LogPrint(BCLog::MEMPOOL,
"Updating mempool due to reorganization or "
"rules upgrade/downgrade\n");
disconnectpool.updateMempoolForReorg(*this, true, *m_mempool);
}
m_mempool->check(this->CoinsTip(), this->m_chain.Height() + 1);
}
// Callbacks/notifications for a new best chain.
if (fInvalidFound) {
CheckForkWarningConditionsOnNewFork(pindexMostWork);
} else {
CheckForkWarningConditions();
}
return true;
}
static SynchronizationState GetSynchronizationState(bool init) {
if (!init) {
return SynchronizationState::POST_INIT;
}
if (::fReindex) {
return SynchronizationState::INIT_REINDEX;
}
return SynchronizationState::INIT_DOWNLOAD;
}
static bool NotifyHeaderTip(Chainstate &chainstate) LOCKS_EXCLUDED(cs_main) {
bool fNotify = false;
bool fInitialBlockDownload = false;
static CBlockIndex *pindexHeaderOld = nullptr;
CBlockIndex *pindexHeader = nullptr;
{
LOCK(cs_main);
pindexHeader = chainstate.m_chainman.m_best_header;
if (pindexHeader != pindexHeaderOld) {
fNotify = true;
fInitialBlockDownload = chainstate.IsInitialBlockDownload();
pindexHeaderOld = pindexHeader;
}
}
// Send block tip changed notifications without cs_main
if (fNotify) {
uiInterface.NotifyHeaderTip(
GetSynchronizationState(fInitialBlockDownload),
pindexHeader->nHeight, pindexHeader->nTime, false);
}
return fNotify;
}
static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) {
AssertLockNotHeld(cs_main);
if (GetMainSignals().CallbacksPending() > 10) {
SyncWithValidationInterfaceQueue();
}
}
bool Chainstate::ActivateBestChain(BlockValidationState &state,
std::shared_ptr<const CBlock> pblock,
bool skip_checkblockindex) {
AssertLockNotHeld(m_chainstate_mutex);
// Note that while we're often called here from ProcessNewBlock, this is
// far from a guarantee. Things in the P2P/RPC will often end up calling
// us in the middle of ProcessNewBlock - do not assume pblock is set
// sanely for performance or correctness!
AssertLockNotHeld(::cs_main);
// ABC maintains a fair degree of expensive-to-calculate internal state
// because this function periodically releases cs_main so that it does not
// lock up other threads for too long during large connects - and to allow
// for e.g. the callback queue to drain we use m_chainstate_mutex to enforce
// mutual exclusion so that only one caller may execute this function at a
// time
LOCK(m_chainstate_mutex);
// Belt-and-suspenders check that we aren't attempting to advance the
// background chainstate past the snapshot base block.
if (WITH_LOCK(::cs_main, return m_disabled)) {
LogPrintf("m_disabled is set - this chainstate should not be in "
"operation. Please report this as a bug. %s\n",
PACKAGE_BUGREPORT);
return false;
}
CBlockIndex *pindexMostWork = nullptr;
CBlockIndex *pindexNewTip = nullptr;
int nStopAtHeight = gArgs.GetIntArg("-stopatheight", DEFAULT_STOPATHEIGHT);
do {
// Block until the validation queue drains. This should largely
// never happen in normal operation, however may happen during
// reindex, causing memory blowup if we run too far ahead.
// Note that if a validationinterface callback ends up calling
// ActivateBestChain this may lead to a deadlock! We should
// probably have a DEBUG_LOCKORDER test for this in the future.
LimitValidationInterfaceQueue();
std::vector<const CBlockIndex *> blocksToReconcile;
bool blocks_connected = false;
{
LOCK(cs_main);
// Lock transaction pool for at least as long as it takes for
// connectTrace to be consumed
LOCK(MempoolMutex());
CBlockIndex *starting_tip = m_chain.Tip();
do {
// We absolutely may not unlock cs_main until we've made forward
// progress (with the exception of shutdown due to hardware
// issues, low disk space, etc).
// Destructed before cs_main is unlocked
ConnectTrace connectTrace;
if (pindexMostWork == nullptr) {
pindexMostWork = FindMostWorkChain(blocksToReconcile);
}
// Whether we have anything to do at all.
if (pindexMostWork == nullptr ||
pindexMostWork == m_chain.Tip()) {
break;
}
bool fInvalidFound = false;
std::shared_ptr<const CBlock> nullBlockPtr;
if (!ActivateBestChainStep(
state, pindexMostWork,
pblock && pblock->GetHash() ==
pindexMostWork->GetBlockHash()
? pblock
: nullBlockPtr,
fInvalidFound, connectTrace)) {
// A system error occurred
return false;
}
blocks_connected = true;
if (fInvalidFound ||
(pindexMostWork && pindexMostWork->nStatus.isParked())) {
// Wipe cache, we may need another branch now.
pindexMostWork = nullptr;
}
pindexNewTip = m_chain.Tip();
for (const PerBlockConnectTrace &trace :
connectTrace.GetBlocksConnected()) {
assert(trace.pblock && trace.pindex);
GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
}
// This will have been toggled in
// ActivateBestChainStep -> ConnectTip ->
// MaybeCompleteSnapshotValidation, if at all, so we should
// catch it here.
//
// Break this do-while to ensure we don't advance past the base
// snapshot.
if (m_disabled) {
break;
}
} while (!m_chain.Tip() ||
(starting_tip && CBlockIndexWorkComparator()(
m_chain.Tip(), starting_tip)));
// Check the index once we're done with the above loop, since
// we're going to release cs_main soon. If the index is in a bad
// state now, then it's better to know immediately rather than
// randomly have it cause a problem in a race.
if (!skip_checkblockindex) {
CheckBlockIndex();
}
if (blocks_connected) {
const CBlockIndex *pindexFork = m_chain.FindFork(starting_tip);
bool fInitialDownload = IsInitialBlockDownload();
// Notify external listeners about the new tip.
// Enqueue while holding cs_main to ensure that UpdatedBlockTip
// is called in the order in which blocks are connected
if (pindexFork != pindexNewTip) {
// Notify ValidationInterface subscribers
GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork,
fInitialDownload);
// Always notify the UI if a new block tip was connected
uiInterface.NotifyBlockTip(
GetSynchronizationState(fInitialDownload),
pindexNewTip);
}
}
}
// When we reach this point, we switched to a new tip (stored in
// pindexNewTip).
if (g_avalanche) {
for (const CBlockIndex *pindex : blocksToReconcile) {
g_avalanche->addToReconcile(pindex);
g_avalanche->computeStakingReward(pindex);
}
}
if (!blocks_connected) {
return true;
}
if (nStopAtHeight && pindexNewTip &&
pindexNewTip->nHeight >= nStopAtHeight) {
StartShutdown();
}
if (WITH_LOCK(::cs_main, return m_disabled)) {
// Background chainstate has reached the snapshot base block, so
// exit.
break;
}
// We check shutdown only after giving ActivateBestChainStep a chance to
// run once so that we never shutdown before connecting the genesis
// block during LoadChainTip(). Previously this caused an assert()
// failure during shutdown in such cases as the UTXO DB flushing checks
// that the best block hash is non-null.
if (ShutdownRequested()) {
break;
}
} while (pindexNewTip != pindexMostWork);
// Write changes periodically to disk, after relay.
if (!FlushStateToDisk(state, FlushStateMode::PERIODIC)) {
return false;
}
return true;
}
bool Chainstate::PreciousBlock(BlockValidationState &state,
CBlockIndex *pindex) {
AssertLockNotHeld(m_chainstate_mutex);
AssertLockNotHeld(::cs_main);
{
LOCK(cs_main);
if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
// Nothing to do, this block is not at the tip.
return true;
}
if (m_chain.Tip()->nChainWork > nLastPreciousChainwork) {
// The chain has been extended since the last call, reset the
// counter.
nBlockReverseSequenceId = -1;
}
nLastPreciousChainwork = m_chain.Tip()->nChainWork;
setBlockIndexCandidates.erase(pindex);
pindex->nSequenceId = nBlockReverseSequenceId;
if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
// We can't keep reducing the counter if somebody really wants to
// call preciousblock 2**31-1 times on the same set of tips...
nBlockReverseSequenceId--;
}
// In case this was parked, unpark it.
UnparkBlock(pindex);
// Make sure it is added to the candidate list if appropriate.
if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
pindex->HaveTxsDownloaded()) {
setBlockIndexCandidates.insert(pindex);
PruneBlockIndexCandidates();
}
}
return ActivateBestChain(state);
}
namespace {
// Leverage RAII to run a functor at scope end
template <typename Func> struct Defer {
Func func;
Defer(Func &&f) : func(std::move(f)) {}
~Defer() { func(); }
};
} // namespace
bool Chainstate::UnwindBlock(BlockValidationState &state, CBlockIndex *pindex,
bool invalidate) {
// Genesis block can't be invalidated or parked
assert(pindex);
if (pindex->nHeight == 0) {
return false;
}
CBlockIndex *to_mark_failed_or_parked = pindex;
bool pindex_was_in_chain = false;
int disconnected = 0;
// We do not allow ActivateBestChain() to run while UnwindBlock() is
// running, as that could cause the tip to change while we disconnect
// blocks. (Note for backport of Core PR16849: we acquire
// LOCK(m_chainstate_mutex) in the Park, Invalidate and FinalizeBlock
// functions due to differences in our code)
AssertLockHeld(m_chainstate_mutex);
// We'll be acquiring and releasing cs_main below, to allow the validation
// callbacks to run. However, we should keep the block index in a
// consistent state as we disconnect blocks -- in particular we need to
// add equal-work blocks to setBlockIndexCandidates as we disconnect.
// To avoid walking the block index repeatedly in search of candidates,
// build a map once so that we can look up candidate blocks by chain
// work as we go.
std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
{
LOCK(cs_main);
for (auto &entry : m_blockman.m_block_index) {
CBlockIndex *candidate = &entry.second;
// We don't need to put anything in our active chain into the
// multimap, because those candidates will be found and considered
// as we disconnect.
// Instead, consider only non-active-chain blocks that have at
// least as much work as where we expect the new tip to end up.
if (!m_chain.Contains(candidate) &&
!CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
candidate->IsValid(BlockValidity::TRANSACTIONS) &&
candidate->HaveTxsDownloaded()) {
candidate_blocks_by_work.insert(
std::make_pair(candidate->nChainWork, candidate));
}
}
}
{
LOCK(cs_main);
// Lock for as long as disconnectpool is in scope to make sure
// UpdateMempoolForReorg is called after DisconnectTip without unlocking
// in between
LOCK(MempoolMutex());
constexpr int maxDisconnectPoolBlocks = 10;
bool ret = false;
DisconnectedBlockTransactions disconnectpool;
// After 10 blocks this becomes nullptr, so that DisconnectTip will
// stop giving us unwound block txs if we are doing a deep unwind.
DisconnectedBlockTransactions *optDisconnectPool = &disconnectpool;
// Disable thread safety analysis because we can't require m_mempool->cs
// as m_mempool can be null. We keep the runtime analysis though.
Defer deferred([&]() NO_THREAD_SAFETY_ANALYSIS {
AssertLockHeld(cs_main);
if (m_mempool && !disconnectpool.isEmpty()) {
AssertLockHeld(m_mempool->cs);
// DisconnectTip will add transactions to disconnectpool.
// When all unwinding is done and we are on a new tip, we must
// add all transactions back to the mempool against the new tip.
disconnectpool.updateMempoolForReorg(*this,
/* fAddToMempool = */ ret,
*m_mempool);
}
});
// Disconnect (descendants of) pindex, and mark them invalid.
while (true) {
if (ShutdownRequested()) {
break;
}
// Make sure the queue of validation callbacks doesn't grow
// unboundedly.
// FIXME this commented code is a regression and could cause OOM if
// a very old block is invalidated via the invalidateblock RPC.
// This can be uncommented if the main signals are moved away from
// cs_main or this code is refactored so that cs_main can be
// released at this point.
//
// LimitValidationInterfaceQueue();
if (!m_chain.Contains(pindex)) {
break;
}
if (m_mempool && disconnected == 0) {
// On first iteration, we grab all the mempool txs to preserve
// topological ordering. This has the side-effect of temporarily
// clearing the mempool, but we will re-add later in
// updateMempoolForReorg() (above). This technique guarantees
// mempool consistency as well as ensures that our topological
// entry_id index is always correct.
disconnectpool.importMempool(*m_mempool);
}
pindex_was_in_chain = true;
CBlockIndex *invalid_walk_tip = m_chain.Tip();
// ActivateBestChain considers blocks already in m_chain
// unconditionally valid already, so force disconnect away from it.
ret = DisconnectTip(state, optDisconnectPool);
++disconnected;
if (optDisconnectPool && disconnected > maxDisconnectPoolBlocks) {
// Stop using the disconnect pool after 10 blocks. After 10
// blocks we no longer add block tx's to the disconnectpool.
// However, when this scope ends we will reconcile what's
// in the pool with the new tip (in the deferred d'tor above).
optDisconnectPool = nullptr;
}
if (!ret) {
return false;
}
assert(invalid_walk_tip->pprev == m_chain.Tip());
// We immediately mark the disconnected blocks as invalid.
// This prevents a case where pruned nodes may fail to
// invalidateblock and be left unable to start as they have no tip
// candidates (as there are no blocks that meet the "have data and
// are not invalid per nStatus" criteria for inclusion in
// setBlockIndexCandidates).
invalid_walk_tip->nStatus =
invalidate ? invalid_walk_tip->nStatus.withFailed()
: invalid_walk_tip->nStatus.withParked();
m_blockman.m_dirty_blockindex.insert(invalid_walk_tip);
setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
if (invalid_walk_tip == to_mark_failed_or_parked->pprev &&
(invalidate ? to_mark_failed_or_parked->nStatus.hasFailed()
: to_mark_failed_or_parked->nStatus.isParked())) {
// We only want to mark the last disconnected block as
// Failed (or Parked); its children need to be FailedParent (or
// ParkedParent) instead.
to_mark_failed_or_parked->nStatus =
(invalidate
? to_mark_failed_or_parked->nStatus.withFailed(false)
.withFailedParent()
: to_mark_failed_or_parked->nStatus.withParked(false)
.withParkedParent());
m_blockman.m_dirty_blockindex.insert(to_mark_failed_or_parked);
}
// Add any equal or more work headers to setBlockIndexCandidates
auto candidate_it = candidate_blocks_by_work.lower_bound(
invalid_walk_tip->pprev->nChainWork);
while (candidate_it != candidate_blocks_by_work.end()) {
if (!CBlockIndexWorkComparator()(candidate_it->second,
invalid_walk_tip->pprev)) {
setBlockIndexCandidates.insert(candidate_it->second);
candidate_it = candidate_blocks_by_work.erase(candidate_it);
} else {
++candidate_it;
}
}
// Track the last disconnected block, so we can correct its
// FailedParent (or ParkedParent) status in future iterations, or,
// if it's the last one, call InvalidChainFound on it.
to_mark_failed_or_parked = invalid_walk_tip;
}
}
CheckBlockIndex();
{
LOCK(cs_main);
if (m_chain.Contains(to_mark_failed_or_parked)) {
// If the to-be-marked invalid block is in the active chain,
// something is interfering and we can't proceed.
return false;
}
// Mark pindex (or the last disconnected block) as invalid (or parked),
// even when it never was in the main chain.
to_mark_failed_or_parked->nStatus =
invalidate ? to_mark_failed_or_parked->nStatus.withFailed()
: to_mark_failed_or_parked->nStatus.withParked();
m_blockman.m_dirty_blockindex.insert(to_mark_failed_or_parked);
if (invalidate) {
m_chainman.m_failed_blocks.insert(to_mark_failed_or_parked);
}
// If any new blocks somehow arrived while we were disconnecting
// (above), then the pre-calculation of what should go into
// setBlockIndexCandidates may have missed entries. This would
// technically be an inconsistency in the block index, but if we clean
// it up here, this should be an essentially unobservable error.
// Loop back over all block index entries and add any missing entries
// to setBlockIndexCandidates.
for (auto &[_, block_index] : m_blockman.m_block_index) {
if (block_index.IsValid(BlockValidity::TRANSACTIONS) &&
block_index.HaveTxsDownloaded() &&
!setBlockIndexCandidates.value_comp()(&block_index,
m_chain.Tip())) {
setBlockIndexCandidates.insert(&block_index);
}
}
if (invalidate) {
InvalidChainFound(to_mark_failed_or_parked);
}
}
// Only notify about a new block tip if the active chain was modified.
if (pindex_was_in_chain) {
uiInterface.NotifyBlockTip(
GetSynchronizationState(IsInitialBlockDownload()),
to_mark_failed_or_parked->pprev);
}
return true;
}
bool Chainstate::InvalidateBlock(BlockValidationState &state,
CBlockIndex *pindex) {
AssertLockNotHeld(m_chainstate_mutex);
AssertLockNotHeld(::cs_main);
// See 'Note for backport of Core PR16849' in Chainstate::UnwindBlock
LOCK(m_chainstate_mutex);
return UnwindBlock(state, pindex, true);
}
bool Chainstate::ParkBlock(BlockValidationState &state, CBlockIndex *pindex) {
AssertLockNotHeld(m_chainstate_mutex);
AssertLockNotHeld(::cs_main);
// See 'Note for backport of Core PR16849' in Chainstate::UnwindBlock
LOCK(m_chainstate_mutex);
return UnwindBlock(state, pindex, false);
}
template <typename F>
bool Chainstate::UpdateFlagsForBlock(CBlockIndex *pindexBase,
CBlockIndex *pindex, F f) {
BlockStatus newStatus = f(pindex->nStatus);
if (pindex->nStatus != newStatus &&
(!pindexBase ||
pindex->GetAncestor(pindexBase->nHeight) == pindexBase)) {
pindex->nStatus = newStatus;
m_blockman.m_dirty_blockindex.insert(pindex);
if (newStatus.isValid()) {
m_chainman.m_failed_blocks.erase(pindex);
}
if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
pindex->HaveTxsDownloaded() &&
setBlockIndexCandidates.value_comp()(m_chain.Tip(), pindex)) {
setBlockIndexCandidates.insert(pindex);
}
return true;
}
return false;
}
template <typename F, typename C, typename AC>
void Chainstate::UpdateFlags(CBlockIndex *pindex, CBlockIndex *&pindexReset,
F f, C fChild, AC fAncestorWasChanged) {
AssertLockHeld(cs_main);
// Update the current block and ancestors; while we're doing this, identify
// which was the deepest ancestor we changed.
CBlockIndex *pindexDeepestChanged = pindex;
for (auto pindexAncestor = pindex; pindexAncestor != nullptr;
pindexAncestor = pindexAncestor->pprev) {
if (UpdateFlagsForBlock(nullptr, pindexAncestor, f)) {
pindexDeepestChanged = pindexAncestor;
}
}
if (pindexReset &&
pindexReset->GetAncestor(pindexDeepestChanged->nHeight) ==
pindexDeepestChanged) {
// reset pindexReset if it had a modified ancestor.
pindexReset = nullptr;
}
// Update all blocks under modified blocks.
for (auto &[_, block_index] : m_blockman.m_block_index) {
UpdateFlagsForBlock(pindex, &block_index, fChild);
UpdateFlagsForBlock(pindexDeepestChanged, &block_index,
fAncestorWasChanged);
}
}
void Chainstate::ResetBlockFailureFlags(CBlockIndex *pindex) {
AssertLockHeld(cs_main);
UpdateFlags(
pindex, m_chainman.m_best_invalid,
[](const BlockStatus status) {
return status.withClearedFailureFlags();
},
[](const BlockStatus status) {
return status.withClearedFailureFlags();
},
[](const BlockStatus status) {
return status.withFailedParent(false);
});
}
void Chainstate::UnparkBlockImpl(CBlockIndex *pindex, bool fClearChildren) {
AssertLockHeld(cs_main);
UpdateFlags(
pindex, m_chainman.m_best_parked,
[](const BlockStatus status) {
return status.withClearedParkedFlags();
},
[fClearChildren](const BlockStatus status) {
return fClearChildren ? status.withClearedParkedFlags()
: status.withParkedParent(false);
},
[](const BlockStatus status) {
return status.withParkedParent(false);
});
}
void Chainstate::UnparkBlockAndChildren(CBlockIndex *pindex) {
return UnparkBlockImpl(pindex, true);
}
void Chainstate::UnparkBlock(CBlockIndex *pindex) {
return UnparkBlockImpl(pindex, false);
}
bool Chainstate::AvalancheFinalizeBlock(CBlockIndex *pindex) {
if (!pindex) {
return false;
}
if (!m_chain.Contains(pindex)) {
LogPrint(BCLog::AVALANCHE,
"The block to mark finalized by avalanche is not on the "
"active chain: %s\n",
pindex->GetBlockHash().ToString());
return false;
}
if (g_avalanche) {
g_avalanche->cleanupStakingRewards(pindex->nHeight);
}
if (IsBlockAvalancheFinalized(pindex)) {
return true;
}
{
LOCK(cs_avalancheFinalizedBlockIndex);
m_avalancheFinalizedBlockIndex = pindex;
}
GetMainSignals().BlockFinalized(pindex);
return true;
}
void Chainstate::ClearAvalancheFinalizedBlock() {
LOCK(cs_avalancheFinalizedBlockIndex);
m_avalancheFinalizedBlockIndex = nullptr;
}
bool Chainstate::IsBlockAvalancheFinalized(const CBlockIndex *pindex) const {
LOCK(cs_avalancheFinalizedBlockIndex);
return pindex && m_avalancheFinalizedBlockIndex &&
m_avalancheFinalizedBlockIndex->GetAncestor(pindex->nHeight) ==
pindex;
}
/**
* Mark a block as having its data received and checked (up to
* BLOCK_VALID_TRANSACTIONS).
*/
void Chainstate::ReceivedBlockTransactions(const CBlock &block,
CBlockIndex *pindexNew,
const FlatFilePos &pos) {
pindexNew->nTx = block.vtx.size();
pindexNew->nSize = ::GetSerializeSize(block, PROTOCOL_VERSION);
pindexNew->nFile = pos.nFile;
pindexNew->nDataPos = pos.nPos;
pindexNew->nUndoPos = 0;
pindexNew->nStatus = pindexNew->nStatus.withData();
pindexNew->RaiseValidity(BlockValidity::TRANSACTIONS);
m_blockman.m_dirty_blockindex.insert(pindexNew);
if (pindexNew->UpdateChainStats()) {
// If pindexNew is the genesis block or all parents are
// BLOCK_VALID_TRANSACTIONS.
std::deque<CBlockIndex *> queue;
queue.push_back(pindexNew);
// Recursively process any descendant blocks that now may be eligible to
// be connected.
while (!queue.empty()) {
CBlockIndex *pindex = queue.front();
queue.pop_front();
pindex->UpdateChainStats();
if (pindex->nSequenceId == 0) {
// We assign a sequence is when transaction are received to
// prevent a miner from being able to broadcast a block but not
// its content. However, a sequence id may have been set
// manually, for instance via PreciousBlock, in which case, we
// don't need to assign one.
pindex->nSequenceId = nBlockSequenceId++;
}
if (m_chain.Tip() == nullptr ||
!setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
setBlockIndexCandidates.insert(pindex);
}
std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
range = m_blockman.m_blocks_unlinked.equal_range(pindex);
while (range.first != range.second) {
std::multimap<CBlockIndex *, CBlockIndex *>::iterator it =
range.first;
queue.push_back(it->second);
range.first++;
m_blockman.m_blocks_unlinked.erase(it);
}
}
} else if (pindexNew->pprev &&
pindexNew->pprev->IsValid(BlockValidity::TREE)) {
m_blockman.m_blocks_unlinked.insert(
std::make_pair(pindexNew->pprev, pindexNew));
}
}
/**
* Return true if the provided block header is valid.
* Only verify PoW if blockValidationOptions is configured to do so.
* This allows validation of headers on which the PoW hasn't been done.
* For example: to validate template handed to mining software.
* Do not call this for any check that depends on the context.
* For context-dependent calls, see ContextualCheckBlockHeader.
*/
static bool CheckBlockHeader(const CBlockHeader &block,
BlockValidationState &state,
const Consensus::Params &params,
BlockValidationOptions validationOptions) {
// Check proof of work matches claimed amount
if (validationOptions.shouldValidatePoW() &&
!CheckProofOfWork(block.GetHash(), block.nBits, params)) {
return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER,
"high-hash", "proof of work failed");
}
return true;
}
bool CheckBlock(const CBlock &block, BlockValidationState &state,
const Consensus::Params &params,
BlockValidationOptions validationOptions) {
// These are checks that are independent of context.
if (block.fChecked) {
return true;
}
// Check that the header is valid (particularly PoW). This is mostly
// redundant with the call in AcceptBlockHeader.
if (!CheckBlockHeader(block, state, params, validationOptions)) {
return false;
}
// Check the merkle root.
if (validationOptions.shouldValidateMerkleRoot()) {
bool mutated;
uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
if (block.hashMerkleRoot != hashMerkleRoot2) {
return state.Invalid(BlockValidationResult::BLOCK_MUTATED,
"bad-txnmrklroot", "hashMerkleRoot mismatch");
}
// Check for merkle tree malleability (CVE-2012-2459): repeating
// sequences of transactions in a block without affecting the merkle
// root of a block, while still invalidating it.
if (mutated) {
return state.Invalid(BlockValidationResult::BLOCK_MUTATED,
"bad-txns-duplicate", "duplicate transaction");
}
}
// All potential-corruption validation must be done before we do any
// transaction validation, as otherwise we may mark the header as invalid
// because we receive the wrong transactions for it.
// First transaction must be coinbase.
if (block.vtx.empty()) {
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"bad-cb-missing", "first tx is not coinbase");
}
// Size limits.
auto nMaxBlockSize = validationOptions.getExcessiveBlockSize();
// Bail early if there is no way this block is of reasonable size.
if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) {
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"bad-blk-length", "size limits failed");
}
auto currentBlockSize = ::GetSerializeSize(block, PROTOCOL_VERSION);
if (currentBlockSize > nMaxBlockSize) {
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"bad-blk-length", "size limits failed");
}
// And a valid coinbase.
TxValidationState tx_state;
if (!CheckCoinbase(*block.vtx[0], tx_state)) {
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(),
strprintf("Coinbase check failed (txid %s) %s",
block.vtx[0]->GetId().ToString(),
tx_state.GetDebugMessage()));
}
// Check transactions for regularity, skipping the first. Note that this
// is the first time we check that all after the first are !IsCoinBase.
for (size_t i = 1; i < block.vtx.size(); i++) {
auto *tx = block.vtx[i].get();
if (!CheckRegularTransaction(*tx, tx_state)) {
return state.Invalid(
BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(),
strprintf("Transaction check failed (txid %s) %s",
tx->GetId().ToString(), tx_state.GetDebugMessage()));
}
}
if (validationOptions.shouldValidatePoW() &&
validationOptions.shouldValidateMerkleRoot()) {
block.fChecked = true;
}
return true;
}
bool HasValidProofOfWork(const std::vector<CBlockHeader> &headers,
const Consensus::Params &consensusParams) {
return std::all_of(headers.cbegin(), headers.cend(),
[&](const auto &header) {
return CheckProofOfWork(
header.GetHash(), header.nBits, consensusParams);
});
}
arith_uint256 CalculateHeadersWork(const std::vector<CBlockHeader> &headers) {
arith_uint256 total_work{0};
for (const CBlockHeader &header : headers) {
CBlockIndex dummy(header);
total_work += GetBlockProof(dummy);
}
return total_work;
}
/**
* Context-dependent validity checks.
* By "context", we mean only the previous block headers, but not the UTXO
* set; UTXO-related validity checks are done in ConnectBlock().
* NOTE: This function is not currently invoked by ConnectBlock(), so we
* should consider upgrade issues if we change which consensus rules are
* enforced in this function (eg by adding a new consensus rule). See comment
* in ConnectBlock().
* Note that -reindex-chainstate skips the validation that happens here!
*/
static bool ContextualCheckBlockHeader(
const CBlockHeader &block, BlockValidationState &state,
BlockManager &blockman, ChainstateManager &chainman,
const CBlockIndex *pindexPrev, NodeClock::time_point now,
const std::optional<CCheckpointData> &test_checkpoints = std::nullopt)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
assert(pindexPrev != nullptr);
const int nHeight = pindexPrev->nHeight + 1;
const CChainParams &params = chainman.GetParams();
// Check proof of work
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, params)) {
LogPrintf("bad bits after height: %d\n", pindexPrev->nHeight);
return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER,
"bad-diffbits", "incorrect proof of work");
}
// Check against checkpoints
if (chainman.m_options.checkpoints_enabled) {
const CCheckpointData &checkpoints =
test_checkpoints ? test_checkpoints.value() : params.Checkpoints();
// Check that the block chain matches the known block chain up to a
// checkpoint.
if (!Checkpoints::CheckBlock(checkpoints, nHeight, block.GetHash())) {
LogPrint(BCLog::VALIDATION,
"ERROR: %s: rejected by checkpoint lock-in at %d\n",
__func__, nHeight);
return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT,
"checkpoint mismatch");
}
// Don't accept any forks from the main chain prior to last checkpoint.
// GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's
// in our BlockIndex().
const CBlockIndex *pcheckpoint =
blockman.GetLastCheckpoint(checkpoints);
if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
LogPrint(BCLog::VALIDATION,
"ERROR: %s: forked chain older than last checkpoint "
"(height %d)\n",
__func__, nHeight);
return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT,
"bad-fork-prior-to-checkpoint");
}
}
// Check timestamp against prev
if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) {
return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER,
"time-too-old", "block's timestamp is too early");
}
// Check timestamp
if (block.Time() > now + std::chrono::seconds{MAX_FUTURE_BLOCK_TIME}) {
return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE,
"time-too-new",
"block timestamp too far in the future");
}
// Reject blocks with outdated version
if ((block.nVersion < 2 &&
DeploymentActiveAfter(pindexPrev, chainman,
Consensus::DEPLOYMENT_HEIGHTINCB)) ||
(block.nVersion < 3 &&
DeploymentActiveAfter(pindexPrev, chainman,
Consensus::DEPLOYMENT_DERSIG)) ||
(block.nVersion < 4 &&
DeploymentActiveAfter(pindexPrev, chainman,
Consensus::DEPLOYMENT_CLTV))) {
return state.Invalid(
BlockValidationResult::BLOCK_INVALID_HEADER,
strprintf("bad-version(0x%08x)", block.nVersion),
strprintf("rejected nVersion=0x%08x block", block.nVersion));
}
return true;
}
bool ContextualCheckTransactionForCurrentBlock(
const CBlockIndex *active_chain_tip, const Consensus::Params &params,
const CTransaction &tx, TxValidationState &state) {
AssertLockHeld(cs_main);
// TODO: Make active_chain_tip a reference
assert(active_chain_tip);
// ContextualCheckTransactionForCurrentBlock() uses
// active_chain_tip.Height()+1 to evaluate nLockTime because when
// IsFinalTx() is called within AcceptBlock(), the height of the
// block *being* evaluated is what is used. Thus if we want to know if a
// transaction can be part of the *next* block, we need to call
// ContextualCheckTransaction() with one more than
// active_chain_tip.Height().
const int nBlockHeight = active_chain_tip->nHeight + 1;
// BIP113 will require that time-locked transactions have nLockTime set to
// less than the median time of the previous block they're contained in.
// When the next block is created its previous block will be the current
// chain tip, so we use that to calculate the median time passed to
// ContextualCheckTransaction().
// This time can also be used for consensus upgrades.
const int64_t nMedianTimePast{active_chain_tip->GetMedianTimePast()};
return ContextualCheckTransaction(params, tx, state, nBlockHeight,
nMedianTimePast);
}
/**
* NOTE: This function is not currently invoked by ConnectBlock(), so we
* should consider upgrade issues if we change which consensus rules are
* enforced in this function (eg by adding a new consensus rule). See comment
* in ConnectBlock().
* Note that -reindex-chainstate skips the validation that happens here!
*/
static bool ContextualCheckBlock(const CBlock &block,
BlockValidationState &state,
const ChainstateManager &chainman,
const CBlockIndex *pindexPrev) {
const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
// Enforce BIP113 (Median Time Past).
bool enforce_locktime_median_time_past{false};
if (DeploymentActiveAfter(pindexPrev, chainman,
Consensus::DEPLOYMENT_CSV)) {
assert(pindexPrev != nullptr);
enforce_locktime_median_time_past = true;
}
const int64_t nMedianTimePast =
pindexPrev == nullptr ? 0 : pindexPrev->GetMedianTimePast();
const int64_t nLockTimeCutoff{enforce_locktime_median_time_past
? nMedianTimePast
: block.GetBlockTime()};
const Consensus::Params params = chainman.GetConsensus();
const bool fIsMagneticAnomalyEnabled =
IsMagneticAnomalyEnabled(params, pindexPrev);
// Check transactions:
// - canonical ordering
// - ensure they are finalized
// - check they have the minimum size
const CTransaction *prevTx = nullptr;
for (const auto &ptx : block.vtx) {
const CTransaction &tx = *ptx;
if (fIsMagneticAnomalyEnabled) {
if (prevTx && (tx.GetId() <= prevTx->GetId())) {
if (tx.GetId() == prevTx->GetId()) {
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"tx-duplicate",
strprintf("Duplicated transaction %s",
tx.GetId().ToString()));
}
return state.Invalid(
BlockValidationResult::BLOCK_CONSENSUS, "tx-ordering",
strprintf("Transaction order is invalid (%s < %s)",
tx.GetId().ToString(),
prevTx->GetId().ToString()));
}
if (prevTx || !tx.IsCoinBase()) {
prevTx = &tx;
}
}
TxValidationState tx_state;
if (!ContextualCheckTransaction(params, tx, tx_state, nHeight,
nLockTimeCutoff)) {
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(),
tx_state.GetDebugMessage());
}
}
// Enforce rule that the coinbase starts with serialized block height
if (DeploymentActiveAfter(pindexPrev, chainman,
Consensus::DEPLOYMENT_HEIGHTINCB)) {
CScript expect = CScript() << nHeight;
if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
!std::equal(expect.begin(), expect.end(),
block.vtx[0]->vin[0].scriptSig.begin())) {
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"bad-cb-height",
"block height mismatch in coinbase");
}
}
return true;
}
/**
* If the provided block header is valid, add it to the block index.
*
* Returns true if the block is successfully added to the block index.
*/
bool ChainstateManager::AcceptBlockHeader(
const CBlockHeader &block, BlockValidationState &state,
CBlockIndex **ppindex, bool min_pow_checked,
const std::optional<CCheckpointData> &test_checkpoints) {
AssertLockHeld(cs_main);
const Config &config = this->GetConfig();
const CChainParams &chainparams = config.GetChainParams();
// Check for duplicate
BlockHash hash = block.GetHash();
BlockMap::iterator miSelf{m_blockman.m_block_index.find(hash)};
if (hash != chainparams.GetConsensus().hashGenesisBlock) {
if (miSelf != m_blockman.m_block_index.end()) {
// Block header is already known.
CBlockIndex *pindex = &(miSelf->second);
if (ppindex) {
*ppindex = pindex;
}
if (pindex->nStatus.isInvalid()) {
LogPrint(BCLog::VALIDATION, "%s: block %s is marked invalid\n",
__func__, hash.ToString());
return state.Invalid(
BlockValidationResult::BLOCK_CACHED_INVALID, "duplicate");
}
return true;
}
if (!CheckBlockHeader(block, state, chainparams.GetConsensus(),
BlockValidationOptions(config))) {
LogPrint(BCLog::VALIDATION,
"%s: Consensus::CheckBlockHeader: %s, %s\n", __func__,
hash.ToString(), state.ToString());
return false;
}
// Get prev block index
BlockMap::iterator mi{
m_blockman.m_block_index.find(block.hashPrevBlock)};
if (mi == m_blockman.m_block_index.end()) {
LogPrint(BCLog::VALIDATION,
"header %s has prev block not found: %s\n",
hash.ToString(), block.hashPrevBlock.ToString());
return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV,
"prev-blk-not-found");
}
CBlockIndex *pindexPrev = &((*mi).second);
assert(pindexPrev);
if (pindexPrev->nStatus.isInvalid()) {
LogPrint(BCLog::VALIDATION,
"header %s has prev block invalid: %s\n", hash.ToString(),
block.hashPrevBlock.ToString());
return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV,
"bad-prevblk");
}
if (!ContextualCheckBlockHeader(
block, state, m_blockman, *this, pindexPrev,
m_options.adjusted_time_callback(), test_checkpoints)) {
LogPrint(BCLog::VALIDATION,
"%s: Consensus::ContextualCheckBlockHeader: %s, %s\n",
__func__, hash.ToString(), state.ToString());
return false;
}
/* Determine if this block descends from any block which has been found
* invalid (m_failed_blocks), then mark pindexPrev and any blocks
* between them as failed. For example:
*
* D3
* /
* B2 - C2
* / \
* A D2 - E2 - F2
* \
* B1 - C1 - D1 - E1
*
* In the case that we attempted to reorg from E1 to F2, only to find
* C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
* but NOT D3 (it was not in any of our candidate sets at the time).
*
* In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
* in LoadBlockIndex.
*/
if (!pindexPrev->IsValid(BlockValidity::SCRIPTS)) {
// The above does not mean "invalid": it checks if the previous
// block hasn't been validated up to BlockValidity::SCRIPTS. This is
// a performance optimization, in the common case of adding a new
// block to the tip, we don't need to iterate over the failed blocks
// list.
for (const CBlockIndex *failedit : m_failed_blocks) {
if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
assert(failedit->nStatus.hasFailed());
CBlockIndex *invalid_walk = pindexPrev;
while (invalid_walk != failedit) {
invalid_walk->nStatus =
invalid_walk->nStatus.withFailedParent();
m_blockman.m_dirty_blockindex.insert(invalid_walk);
invalid_walk = invalid_walk->pprev;
}
LogPrint(BCLog::VALIDATION,
"header %s has prev block invalid: %s\n",
hash.ToString(), block.hashPrevBlock.ToString());
return state.Invalid(
BlockValidationResult::BLOCK_INVALID_PREV,
"bad-prevblk");
}
}
}
}
if (!min_pow_checked) {
LogPrint(BCLog::VALIDATION,
"%s: not adding new block header %s, missing anti-dos "
"proof-of-work validation\n",
__func__, hash.ToString());
return state.Invalid(BlockValidationResult::BLOCK_HEADER_LOW_WORK,
"too-little-chainwork");
}
CBlockIndex *pindex{m_blockman.AddToBlockIndex(block, m_best_header)};
if (ppindex) {
*ppindex = pindex;
}
return true;
}
// Exposed wrapper for AcceptBlockHeader
bool ChainstateManager::ProcessNewBlockHeaders(
const std::vector<CBlockHeader> &headers, bool min_pow_checked,
BlockValidationState &state, const CBlockIndex **ppindex,
const std::optional<CCheckpointData> &test_checkpoints) {
AssertLockNotHeld(cs_main);
{
LOCK(cs_main);
for (const CBlockHeader &header : headers) {
// Use a temp pindex instead of ppindex to avoid a const_cast
CBlockIndex *pindex = nullptr;
bool accepted = AcceptBlockHeader(
header, state, &pindex, min_pow_checked, test_checkpoints);
ActiveChainstate().CheckBlockIndex();
if (!accepted) {
return false;
}
if (ppindex) {
*ppindex = pindex;
}
}
}
if (NotifyHeaderTip(ActiveChainstate())) {
if (ActiveChainstate().IsInitialBlockDownload() && ppindex &&
*ppindex) {
const CBlockIndex &last_accepted{**ppindex};
const int64_t blocks_left{
(GetTime() - last_accepted.GetBlockTime()) /
this->GetConsensus().nPowTargetSpacing};
const double progress{100.0 * last_accepted.nHeight /
(last_accepted.nHeight + blocks_left)};
LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n",
last_accepted.nHeight, progress);
}
}
return true;
}
void ChainstateManager::ReportHeadersPresync(const arith_uint256 &work,
int64_t height,
int64_t timestamp) {
AssertLockNotHeld(cs_main);
const auto &chainstate = ActiveChainstate();
{
LOCK(cs_main);
// Don't report headers presync progress if we already have a
// post-minchainwork header chain.
// This means we lose reporting for potentially legimate, but unlikely,
// deep reorgs, but prevent attackers that spam low-work headers from
// filling our logs.
if (m_best_header->nChainWork >=
UintToArith256(GetConsensus().nMinimumChainWork)) {
return;
}
// Rate limit headers presync updates to 4 per second, as these are not
// subject to DoS protection.
auto now = Now<SteadyMilliseconds>();
if (now < m_last_presync_update + 250ms) {
return;
}
m_last_presync_update = now;
}
bool initial_download = chainstate.IsInitialBlockDownload();
uiInterface.NotifyHeaderTip(GetSynchronizationState(initial_download),
height, timestamp, /*presync=*/true);
if (initial_download) {
const int64_t blocks_left{(GetTime() - timestamp) /
GetConsensus().nPowTargetSpacing};
const double progress{100.0 * height / (height + blocks_left)};
LogPrintf("Pre-synchronizing blockheaders, height: %d (~%.2f%%)\n",
height, progress);
}
}
/**
* Store a block on disk.
*
* @param[in,out] pblock The block we want to accept.
* @param[in] fRequested A boolean to indicate if this block was requested
* from our peers.
* @param[in] dbp If non-null, the disk position of the block.
* @param[in,out] fNewBlock True if block was first received via this call.
* @param[in] min_pow_checked True if proof-of-work anti-DoS checks have
* been done by caller for headers chain
* @return True if the block is accepted as a valid block and written to disk.
*/
bool Chainstate::AcceptBlock(const std::shared_ptr<const CBlock> &pblock,
BlockValidationState &state, bool fRequested,
const FlatFilePos *dbp, bool *fNewBlock,
bool min_pow_checked) {
AssertLockHeld(cs_main);
const CBlock &block = *pblock;
if (fNewBlock) {
*fNewBlock = false;
}
CBlockIndex *pindex = nullptr;
bool accepted_header{
m_chainman.AcceptBlockHeader(block, state, &pindex, min_pow_checked)};
CheckBlockIndex();
if (!accepted_header) {
return false;
}
// Try to process all requested blocks that we don't have, but only
// process an unrequested block if it's new and has enough work to
// advance our tip, and isn't too many blocks ahead.
bool fAlreadyHave = pindex->nStatus.hasData();
// TODO: deal better with return value and error conditions for duplicate
// and unrequested blocks.
if (fAlreadyHave) {
return true;
}
// Compare block header timestamps and received times of the block and the
// chaintip. If they have the same chain height, use these diffs as a
// tie-breaker, attempting to pick the more honestly-mined block.
int64_t newBlockTimeDiff = std::llabs(pindex->GetReceivedTimeDiff());
int64_t chainTipTimeDiff =
m_chain.Tip() ? std::llabs(m_chain.Tip()->GetReceivedTimeDiff()) : 0;
bool isSameHeight =
m_chain.Tip() && (pindex->nChainWork == m_chain.Tip()->nChainWork);
if (isSameHeight) {
LogPrintf("Chain tip timestamp-to-received-time difference: hash=%s, "
"diff=%d\n",
m_chain.Tip()->GetBlockHash().ToString(), chainTipTimeDiff);
LogPrintf("New block timestamp-to-received-time difference: hash=%s, "
"diff=%d\n",
pindex->GetBlockHash().ToString(), newBlockTimeDiff);
}
bool fHasMoreOrSameWork =
(m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork
: true);
// Blocks that are too out-of-order needlessly limit the effectiveness of
// pruning, because pruning will not delete block files that contain any
// blocks which are too close in height to the tip. Apply this test
// regardless of whether pruning is enabled; it should generally be safe to
// not process unrequested blocks.
bool fTooFarAhead{pindex->nHeight >
m_chain.Height() + int(MIN_BLOCKS_TO_KEEP)};
// TODO: Decouple this function from the block download logic by removing
// fRequested
// This requires some new chain data structure to efficiently look up if a
// block is in a chain leading to a candidate for best tip, despite not
// being such a candidate itself.
// Note that this would break the getblockfrompeer RPC
// If we didn't ask for it:
if (!fRequested) {
// This is a previously-processed block that was pruned.
if (pindex->nTx != 0) {
return true;
}
// Don't process less-work chains.
if (!fHasMoreOrSameWork) {
return true;
}
// Block height is too high.
if (fTooFarAhead) {
return true;
}
// Protect against DoS attacks from low-work chains.
// If our tip is behind, a peer could try to send us
// low-work blocks on a fake chain that we would never
// request; don't process these.
if (pindex->nChainWork < m_chainman.MinimumChainWork()) {
return true;
}
}
const CChainParams &params{m_chainman.GetParams()};
const Consensus::Params &consensusParams = params.GetConsensus();
if (!CheckBlock(block, state, consensusParams,
BlockValidationOptions(m_chainman.GetConfig())) ||
!ContextualCheckBlock(block, state, m_chainman, pindex->pprev)) {
if (state.IsInvalid() &&
state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
pindex->nStatus = pindex->nStatus.withFailed();
m_blockman.m_dirty_blockindex.insert(pindex);
}
return error("%s: %s (block %s)", __func__, state.ToString(),
block.GetHash().ToString());
}
// If connecting the new block would require rewinding more than one block
// from the active chain (i.e., a "deep reorg"), then mark the new block as
// parked. If it has enough work then it will be automatically unparked
// later, during FindMostWorkChain. We mark the block as parked at the very
// last minute so we can make sure everything is ready to be reorged if
// needed.
if (gArgs.GetBoolArg("-parkdeepreorg", true)) {
const CBlockIndex *pindexFork = m_chain.FindFork(pindex);
if (pindexFork && pindexFork->nHeight + 1 < m_chain.Height()) {
LogPrintf("Park block %s as it would cause a deep reorg.\n",
pindex->GetBlockHash().ToString());
pindex->nStatus = pindex->nStatus.withParked();
m_blockman.m_dirty_blockindex.insert(pindex);
}
}
// Header is valid/has work and the merkle tree is good.
// Relay now, but if it does not build on our best tip, let the
// SendMessages loop relay it.
if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev) {
GetMainSignals().NewPoWValidBlock(pindex, pblock);
}
// Write block to history file
if (fNewBlock) {
*fNewBlock = true;
}
try {
FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, pindex->nHeight,
m_chain, params, dbp)};
if (blockPos.IsNull()) {
state.Error(strprintf(
"%s: Failed to find position to write new block to disk",
__func__));
return false;
}
ReceivedBlockTransactions(block, pindex, blockPos);
} catch (const std::runtime_error &e) {
return AbortNode(state, std::string("System error: ") + e.what());
}
FlushStateToDisk(state, FlushStateMode::NONE);
CheckBlockIndex();
return true;
}
bool ChainstateManager::ProcessNewBlock(
const std::shared_ptr<const CBlock> &block, bool force_processing,
bool min_pow_checked, bool *new_block) {
AssertLockNotHeld(cs_main);
{
if (new_block) {
*new_block = false;
}
BlockValidationState state;
// CheckBlock() does not support multi-threaded block validation
// because CBlock::fChecked can cause data race.
// Therefore, the following critical section must include the
// CheckBlock() call as well.
LOCK(cs_main);
// Skipping AcceptBlock() for CheckBlock() failures means that we will
// never mark a block as invalid if CheckBlock() fails. This is
// protective against consensus failure if there are any unknown form
// s of block malleability that cause CheckBlock() to fail; see e.g.
// CVE-2012-2459 and
// https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2019-February/016697.html.
// Because CheckBlock() is not very expensive, the anti-DoS benefits of
// caching failure (of a definitely-invalid block) are not substantial.
bool ret = CheckBlock(*block, state, this->GetConsensus(),
BlockValidationOptions(this->GetConfig()));
if (ret) {
// Store to disk
ret = ActiveChainstate().AcceptBlock(block, state, force_processing,
nullptr, new_block,
min_pow_checked);
}
if (!ret) {
GetMainSignals().BlockChecked(*block, state);
return error("%s: AcceptBlock FAILED (%s)", __func__,
state.ToString());
}
}
NotifyHeaderTip(ActiveChainstate());
// Only used to report errors, not invalidity - ignore it
BlockValidationState state;
if (!ActiveChainstate().ActivateBestChain(state, block)) {
return error("%s: ActivateBestChain failed (%s)", __func__,
state.ToString());
}
return true;
}
MempoolAcceptResult
ChainstateManager::ProcessTransaction(const CTransactionRef &tx,
bool test_accept) {
AssertLockHeld(cs_main);
Chainstate &active_chainstate = ActiveChainstate();
if (!active_chainstate.GetMempool()) {
TxValidationState state;
state.Invalid(TxValidationResult::TX_NO_MEMPOOL, "no-mempool");
return MempoolAcceptResult::Failure(state);
}
auto result = AcceptToMemoryPool(active_chainstate, tx, GetTime(),
/*bypass_limits=*/false, test_accept);
active_chainstate.GetMempool()->check(
active_chainstate.CoinsTip(), active_chainstate.m_chain.Height() + 1);
return result;
}
bool TestBlockValidity(
BlockValidationState &state, const CChainParams &params,
Chainstate &chainstate, const CBlock &block, CBlockIndex *pindexPrev,
const std::function<NodeClock::time_point()> &adjusted_time_callback,
BlockValidationOptions validationOptions) {
AssertLockHeld(cs_main);
assert(pindexPrev && pindexPrev == chainstate.m_chain.Tip());
CCoinsViewCache viewNew(&chainstate.CoinsTip());
BlockHash block_hash(block.GetHash());
CBlockIndex indexDummy(block);
indexDummy.pprev = pindexPrev;
indexDummy.nHeight = pindexPrev->nHeight + 1;
indexDummy.phashBlock = &block_hash;
// NOTE: CheckBlockHeader is called by CheckBlock
if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman,
chainstate.m_chainman, pindexPrev,
adjusted_time_callback())) {
return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__,
state.ToString());
}
if (!CheckBlock(block, state, params.GetConsensus(), validationOptions)) {
return error("%s: Consensus::CheckBlock: %s", __func__,
state.ToString());
}
if (!ContextualCheckBlock(block, state, chainstate.m_chainman,
pindexPrev)) {
return error("%s: Consensus::ContextualCheckBlock: %s", __func__,
state.ToString());
}
if (!chainstate.ConnectBlock(block, state, &indexDummy, viewNew,
validationOptions, nullptr, true)) {
return false;
}
assert(state.IsValid());
return true;
}
/* This function is called from the RPC code for pruneblockchain */
void PruneBlockFilesManual(Chainstate &active_chainstate,
int nManualPruneHeight) {
BlockValidationState state;
if (active_chainstate.FlushStateToDisk(state, FlushStateMode::NONE,
nManualPruneHeight)) {
LogPrintf("%s: failed to flush state (%s)\n", __func__,
state.ToString());
}
}
void Chainstate::LoadMempool(const fs::path &load_path,
FopenFn mockable_fopen_function) {
if (!m_mempool) {
return;
}
::LoadMempool(*m_mempool, load_path, *this, mockable_fopen_function);
m_mempool->SetLoadTried(!ShutdownRequested());
}
bool Chainstate::LoadChainTip() {
AssertLockHeld(cs_main);
const CCoinsViewCache &coins_cache = CoinsTip();
// Never called when the coins view is empty
assert(!coins_cache.GetBestBlock().IsNull());
const CBlockIndex *tip = m_chain.Tip();
if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
return true;
}
// Load pointer to end of best chain
CBlockIndex *pindex =
m_blockman.LookupBlockIndex(coins_cache.GetBestBlock());
if (!pindex) {
return false;
}
m_chain.SetTip(pindex);
PruneBlockIndexCandidates();
tip = m_chain.Tip();
LogPrintf(
"Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
tip->GetBlockHash().ToString(), m_chain.Height(),
FormatISO8601DateTime(tip->GetBlockTime()),
GuessVerificationProgress(m_chainman.GetParams().TxData(), tip));
return true;
}
CVerifyDB::CVerifyDB() {
uiInterface.ShowProgress(_("Verifying blocks...").translated, 0, false);
}
CVerifyDB::~CVerifyDB() {
uiInterface.ShowProgress("", 100, false);
}
VerifyDBResult CVerifyDB::VerifyDB(Chainstate &chainstate,
CCoinsView &coinsview, int nCheckLevel,
int nCheckDepth) {
AssertLockHeld(cs_main);
const Config &config = chainstate.m_chainman.GetConfig();
const CChainParams &params = config.GetChainParams();
const Consensus::Params &consensusParams = params.GetConsensus();
if (chainstate.m_chain.Tip() == nullptr ||
chainstate.m_chain.Tip()->pprev == nullptr) {
return VerifyDBResult::SUCCESS;
}
// Verify blocks in the best chain
if (nCheckDepth <= 0 || nCheckDepth > chainstate.m_chain.Height()) {
nCheckDepth = chainstate.m_chain.Height();
}
nCheckLevel = std::max(0, std::min(4, nCheckLevel));
LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth,
nCheckLevel);
CCoinsViewCache coins(&coinsview);
CBlockIndex *pindex;
CBlockIndex *pindexFailure = nullptr;
int nGoodTransactions = 0;
BlockValidationState state;
int reportDone = 0;
bool skipped_no_block_data{false};
bool skipped_l3_checks{false};
LogPrintf("Verification progress: 0%%\n");
const bool is_snapshot_cs{!chainstate.m_from_snapshot_blockhash};
for (pindex = chainstate.m_chain.Tip(); pindex && pindex->pprev;
pindex = pindex->pprev) {
const int percentageDone = std::max(
1, std::min(99, (int)(((double)(chainstate.m_chain.Height() -
pindex->nHeight)) /
(double)nCheckDepth *
(nCheckLevel >= 4 ? 50 : 100))));
if (reportDone < percentageDone / 10) {
// report every 10% step
LogPrintf("Verification progress: %d%%\n", percentageDone);
reportDone = percentageDone / 10;
}
uiInterface.ShowProgress(_("Verifying blocks...").translated,
percentageDone, false);
if (pindex->nHeight <= chainstate.m_chain.Height() - nCheckDepth) {
break;
}
if ((chainstate.m_blockman.IsPruneMode() || is_snapshot_cs) &&
!pindex->nStatus.hasData()) {
// If pruning or running under an assumeutxo snapshot, only go
// back as far as we have data.
LogPrintf("VerifyDB(): block verification stopping at height %d "
"(no data). This could be due to pruning or use of an "
"assumeutxo snapshot.\n",
pindex->nHeight);
skipped_no_block_data = true;
break;
}
CBlock block;
// check level 0: read from disk
if (!ReadBlockFromDisk(block, pindex, consensusParams)) {
LogPrintf(
"Verification error: ReadBlockFromDisk failed at %d, hash=%s\n",
pindex->nHeight, pindex->GetBlockHash().ToString());
return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
// check level 1: verify block validity
if (nCheckLevel >= 1 && !CheckBlock(block, state, consensusParams,
BlockValidationOptions(config))) {
LogPrintf(
"Verification error: found bad block at %d, hash=%s (%s)\n",
pindex->nHeight, pindex->GetBlockHash().ToString(),
state.ToString());
return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
// check level 2: verify undo validity
if (nCheckLevel >= 2 && pindex) {
CBlockUndo undo;
if (!pindex->GetUndoPos().IsNull()) {
if (!UndoReadFromDisk(undo, pindex)) {
LogPrintf("Verification error: found bad undo data at %d, "
"hash=%s\n",
pindex->nHeight,
pindex->GetBlockHash().ToString());
return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
}
}
// check level 3: check for inconsistencies during memory-only
// disconnect of tip blocks
size_t curr_coins_usage = coins.DynamicMemoryUsage() +
chainstate.CoinsTip().DynamicMemoryUsage();
if (nCheckLevel >= 3) {
if (curr_coins_usage <= chainstate.m_coinstip_cache_size_bytes) {
assert(coins.GetBestBlock() == pindex->GetBlockHash());
DisconnectResult res =
chainstate.DisconnectBlock(block, pindex, coins);
if (res == DisconnectResult::FAILED) {
LogPrintf("Verification error: irrecoverable inconsistency "
"in block data at %d, hash=%s\n",
pindex->nHeight,
pindex->GetBlockHash().ToString());
return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
if (res == DisconnectResult::UNCLEAN) {
nGoodTransactions = 0;
pindexFailure = pindex;
} else {
nGoodTransactions += block.vtx.size();
}
} else {
skipped_l3_checks = true;
}
}
if (ShutdownRequested()) {
return VerifyDBResult::INTERRUPTED;
}
}
if (pindexFailure) {
LogPrintf("Verification error: coin database inconsistencies found "
"(last %i blocks, %i good transactions before that)\n",
chainstate.m_chain.Height() - pindexFailure->nHeight + 1,
nGoodTransactions);
return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
if (skipped_l3_checks) {
LogPrintf("Skipped verification of level >=3 (insufficient database "
"cache size). Consider increasing -dbcache.\n");
}
// store block count as we move pindex at check level >= 4
int block_count = chainstate.m_chain.Height() - pindex->nHeight;
// check level 4: try reconnecting blocks
if (nCheckLevel >= 4 && !skipped_l3_checks) {
while (pindex != chainstate.m_chain.Tip()) {
const int percentageDone = std::max(
1, std::min(99, 100 - int(double(chainstate.m_chain.Height() -
pindex->nHeight) /
double(nCheckDepth) * 50)));
if (reportDone < percentageDone / 10) {
// report every 10% step
LogPrintf("Verification progress: %d%%\n", percentageDone);
reportDone = percentageDone / 10;
}
uiInterface.ShowProgress(_("Verifying blocks...").translated,
percentageDone, false);
pindex = chainstate.m_chain.Next(pindex);
CBlock block;
if (!ReadBlockFromDisk(block, pindex, consensusParams)) {
LogPrintf("Verification error: ReadBlockFromDisk failed at %d, "
"hash=%s\n",
pindex->nHeight, pindex->GetBlockHash().ToString());
return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
if (!chainstate.ConnectBlock(block, state, pindex, coins,
BlockValidationOptions(config))) {
LogPrintf("Verification error: found unconnectable block at "
"%d, hash=%s (%s)\n",
pindex->nHeight, pindex->GetBlockHash().ToString(),
state.ToString());
return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
if (ShutdownRequested()) {
return VerifyDBResult::INTERRUPTED;
}
}
}
LogPrintf("Verification: No coin database inconsistencies in last %i "
"blocks (%i transactions)\n",
block_count, nGoodTransactions);
if (skipped_l3_checks) {
return VerifyDBResult::SKIPPED_L3_CHECKS;
}
if (skipped_no_block_data) {
return VerifyDBResult::SKIPPED_MISSING_BLOCKS;
}
return VerifyDBResult::SUCCESS;
}
/**
* Apply the effects of a block on the utxo cache, ignoring that it may already
* have been applied.
*/
bool Chainstate::RollforwardBlock(const CBlockIndex *pindex,
CCoinsViewCache &view) {
AssertLockHeld(cs_main);
// TODO: merge with ConnectBlock
CBlock block;
if (!ReadBlockFromDisk(block, pindex, m_chainman.GetConsensus())) {
return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s",
pindex->nHeight, pindex->GetBlockHash().ToString());
}
for (const CTransactionRef &tx : block.vtx) {
// Pass check = true as every addition may be an overwrite.
AddCoins(view, *tx, pindex->nHeight, true);
}
for (const CTransactionRef &tx : block.vtx) {
if (tx->IsCoinBase()) {
continue;
}
for (const CTxIn &txin : tx->vin) {
view.SpendCoin(txin.prevout);
}
}
return true;
}
bool Chainstate::ReplayBlocks() {
LOCK(cs_main);
CCoinsView &db = this->CoinsDB();
CCoinsViewCache cache(&db);
std::vector<BlockHash> hashHeads = db.GetHeadBlocks();
if (hashHeads.empty()) {
// We're already in a consistent state.
return true;
}
if (hashHeads.size() != 2) {
return error("ReplayBlocks(): unknown inconsistent state");
}
uiInterface.ShowProgress(_("Replaying blocks...").translated, 0, false);
LogPrintf("Replaying blocks\n");
// Old tip during the interrupted flush.
const CBlockIndex *pindexOld = nullptr;
// New tip during the interrupted flush.
const CBlockIndex *pindexNew;
// Latest block common to both the old and the new tip.
const CBlockIndex *pindexFork = nullptr;
if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
return error(
"ReplayBlocks(): reorganization to unknown block requested");
}
pindexNew = &(m_blockman.m_block_index[hashHeads[0]]);
if (!hashHeads[1].IsNull()) {
// The old tip is allowed to be 0, indicating it's the first flush.
if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
return error(
"ReplayBlocks(): reorganization from unknown block requested");
}
pindexOld = &(m_blockman.m_block_index[hashHeads[1]]);
pindexFork = LastCommonAncestor(pindexOld, pindexNew);
assert(pindexFork != nullptr);
}
// Rollback along the old branch.
while (pindexOld != pindexFork) {
if (pindexOld->nHeight > 0) {
// Never disconnect the genesis block.
CBlock block;
if (!ReadBlockFromDisk(block, pindexOld,
m_chainman.GetConsensus())) {
return error("RollbackBlock(): ReadBlockFromDisk() failed at "
"%d, hash=%s",
pindexOld->nHeight,
pindexOld->GetBlockHash().ToString());
}
LogPrintf("Rolling back %s (%i)\n",
pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
if (res == DisconnectResult::FAILED) {
return error(
"RollbackBlock(): DisconnectBlock failed at %d, hash=%s",
pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
}
// If DisconnectResult::UNCLEAN is returned, it means a non-existing
// UTXO was deleted, or an existing UTXO was overwritten. It
// corresponds to cases where the block-to-be-disconnect never had
// all its operations applied to the UTXO set. However, as both
// writing a UTXO and deleting a UTXO are idempotent operations, the
// result is still a version of the UTXO set with the effects of
// that block undone.
}
pindexOld = pindexOld->pprev;
}
// Roll forward from the forking point to the new tip.
int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight;
++nHeight) {
const CBlockIndex &pindex{*Assert(pindexNew->GetAncestor(nHeight))};
LogPrintf("Rolling forward %s (%i)\n", pindex.GetBlockHash().ToString(),
nHeight);
uiInterface.ShowProgress(_("Replaying blocks...").translated,
(int)((nHeight - nForkHeight) * 100.0 /
(pindexNew->nHeight - nForkHeight)),
false);
if (!RollforwardBlock(&pindex, cache)) {
return false;
}
}
cache.SetBestBlock(pindexNew->GetBlockHash());
cache.Flush();
uiInterface.ShowProgress("", 100, false);
return true;
}
// May NOT be used after any connections are up as much of the peer-processing
// logic assumes a consistent block index state
void Chainstate::UnloadBlockIndex() {
AssertLockHeld(::cs_main);
nBlockSequenceId = 1;
m_best_fork_tip = nullptr;
m_best_fork_base = nullptr;
setBlockIndexCandidates.clear();
}
bool ChainstateManager::LoadBlockIndex() {
AssertLockHeld(cs_main);
// Load block index from databases
bool needs_init = fReindex;
if (!fReindex) {
bool ret = m_blockman.LoadBlockIndexDB(GetConsensus());
if (!ret) {
return false;
}
std::vector<CBlockIndex *> vSortedByHeight{
m_blockman.GetAllBlockIndices()};
std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
CBlockIndexHeightOnlyComparator());
// Find start of assumed-valid region.
int first_assumed_valid_height = std::numeric_limits<int>::max();
for (const CBlockIndex *block : vSortedByHeight) {
if (block->IsAssumedValid()) {
auto chainstates = GetAll();
// If we encounter an assumed-valid block index entry, ensure
// that we have one chainstate that tolerates assumed-valid
// entries and another that does not (i.e. the background
// validation chainstate), since assumed-valid entries should
// always be pending validation by a fully-validated chainstate.
auto any_chain = [&](auto fnc) {
return std::any_of(chainstates.cbegin(), chainstates.cend(),
fnc);
};
assert(any_chain([](auto chainstate) {
return chainstate->reliesOnAssumedValid();
}));
assert(any_chain([](auto chainstate) {
return !chainstate->reliesOnAssumedValid();
}));
first_assumed_valid_height = block->nHeight;
LogPrintf("Saw first assumedvalid block at height %d (%s)\n",
first_assumed_valid_height, block->ToString());
break;
}
}
for (CBlockIndex *pindex : vSortedByHeight) {
if (ShutdownRequested()) {
return false;
}
if (pindex->IsAssumedValid() ||
(pindex->IsValid(BlockValidity::TRANSACTIONS) &&
(pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) {
// Fill each chainstate's block candidate set. Only add
// assumed-valid blocks to the tip candidate set if the
// chainstate is allowed to rely on assumed-valid blocks.
//
// If all setBlockIndexCandidates contained the assumed-valid
// blocks, the background chainstate's ActivateBestChain() call
// would add assumed-valid blocks to the chain (based on how
// FindMostWorkChain() works). Obviously we don't want this
// since the purpose of the background validation chain is to
// validate assumed-valid blocks.
//
// Note: This is considering all blocks whose height is greater
// or equal to the first assumed-valid block to be assumed-valid
// blocks, and excluding them from the background chainstate's
// setBlockIndexCandidates set. This does mean that some blocks
// which are not technically assumed-valid (later blocks on a
// fork beginning before the first assumed-valid block) might
// not get added to the background chainstate, but this is ok,
// because they will still be attached to the active chainstate
// if they actually contain more work.
//
// Instead of this height-based approach, an earlier attempt was
// made at detecting "holistically" whether the block index
// under consideration relied on an assumed-valid ancestor, but
// this proved to be too slow to be practical.
for (Chainstate *chainstate : GetAll()) {
if (chainstate->reliesOnAssumedValid() ||
pindex->nHeight < first_assumed_valid_height) {
chainstate->setBlockIndexCandidates.insert(pindex);
}
}
}
if (pindex->nStatus.isInvalid() &&
(!m_best_invalid ||
pindex->nChainWork > m_best_invalid->nChainWork)) {
m_best_invalid = pindex;
}
if (pindex->nStatus.isOnParkedChain() &&
(!m_best_parked ||
pindex->nChainWork > m_best_parked->nChainWork)) {
m_best_parked = pindex;
}
if (pindex->IsValid(BlockValidity::TREE) &&
(m_best_header == nullptr ||
CBlockIndexWorkComparator()(m_best_header, pindex))) {
m_best_header = pindex;
}
}
needs_init = m_blockman.m_block_index.empty();
}
if (needs_init) {
// Everything here is for *new* reindex/DBs. Thus, though
// LoadBlockIndexDB may have set fReindex if we shut down
// mid-reindex previously, we don't check fReindex and
// instead only check it prior to LoadBlockIndexDB to set
// needs_init.
LogPrintf("Initializing databases...\n");
}
return true;
}
bool Chainstate::LoadGenesisBlock() {
LOCK(cs_main);
const CChainParams &params{m_chainman.GetParams()};
// Check whether we're already initialized by checking for genesis in
// m_blockman.m_block_index. Note that we can't use m_chain here, since it
// is set based on the coins db, not the block index db, which is the only
// thing loaded at this point.
if (m_blockman.m_block_index.count(params.GenesisBlock().GetHash())) {
return true;
}
try {
const CBlock &block = params.GenesisBlock();
FlatFilePos blockPos{
m_blockman.SaveBlockToDisk(block, 0, m_chain, params, nullptr)};
if (blockPos.IsNull()) {
return error("%s: writing genesis block to disk failed", __func__);
}
CBlockIndex *pindex =
m_blockman.AddToBlockIndex(block, m_chainman.m_best_header);
ReceivedBlockTransactions(block, pindex, blockPos);
} catch (const std::runtime_error &e) {
return error("%s: failed to write genesis block: %s", __func__,
e.what());
}
return true;
}
void Chainstate::LoadExternalBlockFile(
FILE *fileIn, FlatFilePos *dbp,
std::multimap<BlockHash, FlatFilePos> *blocks_with_unknown_parent) {
AssertLockNotHeld(m_chainstate_mutex);
// Either both should be specified (-reindex), or neither (-loadblock).
assert(!dbp == !blocks_with_unknown_parent);
int64_t nStart = GetTimeMillis();
const CChainParams &params{m_chainman.GetParams()};
int nLoaded = 0;
try {
// This takes over fileIn and calls fclose() on it in the CBufferedFile
// destructor. Make sure we have at least 2*MAX_TX_SIZE space in there
// so any transaction can fit in the buffer.
CBufferedFile blkdat(fileIn, 2 * MAX_TX_SIZE, MAX_TX_SIZE + 8, SER_DISK,
CLIENT_VERSION);
uint64_t nRewind = blkdat.GetPos();
while (!blkdat.eof()) {
if (ShutdownRequested()) {
return;
}
blkdat.SetPos(nRewind);
// Start one byte further next time, in case of failure.
nRewind++;
// Remove former limit.
blkdat.SetLimit();
unsigned int nSize = 0;
try {
// Locate a header.
uint8_t buf[CMessageHeader::MESSAGE_START_SIZE];
- blkdat.FindByte(char(params.DiskMagic()[0]));
+ blkdat.FindByte(params.DiskMagic()[0]);
nRewind = blkdat.GetPos() + 1;
blkdat >> buf;
if (memcmp(buf, params.DiskMagic().data(),
CMessageHeader::MESSAGE_START_SIZE)) {
continue;
}
// Read size.
blkdat >> nSize;
if (nSize < 80) {
continue;
}
} catch (const std::exception &) {
// No valid block header found; don't complain.
break;
}
try {
// read block
uint64_t nBlockPos = blkdat.GetPos();
if (dbp) {
dbp->nPos = nBlockPos;
}
blkdat.SetLimit(nBlockPos + nSize);
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
CBlock &block = *pblock;
blkdat >> block;
nRewind = blkdat.GetPos();
const BlockHash hash = block.GetHash();
{
LOCK(cs_main);
// detect out of order blocks, and store them for later
if (hash != params.GetConsensus().hashGenesisBlock &&
!m_blockman.LookupBlockIndex(block.hashPrevBlock)) {
LogPrint(
BCLog::REINDEX,
"%s: Out of order block %s, parent %s not known\n",
__func__, hash.ToString(),
block.hashPrevBlock.ToString());
if (dbp && blocks_with_unknown_parent) {
blocks_with_unknown_parent->emplace(
block.hashPrevBlock, *dbp);
}
continue;
}
// process in case the block isn't known yet
const CBlockIndex *pindex =
m_blockman.LookupBlockIndex(hash);
if (!pindex || !pindex->nStatus.hasData()) {
BlockValidationState state;
if (AcceptBlock(pblock, state, true, dbp, nullptr,
true)) {
nLoaded++;
}
if (state.IsError()) {
break;
}
} else if (hash != params.GetConsensus().hashGenesisBlock &&
pindex->nHeight % 1000 == 0) {
LogPrint(
BCLog::REINDEX,
"Block Import: already had block %s at height %d\n",
hash.ToString(), pindex->nHeight);
}
}
// Activate the genesis block so normal node progress can
// continue
if (hash == params.GetConsensus().hashGenesisBlock) {
BlockValidationState state;
if (!ActivateBestChain(state, nullptr)) {
break;
}
}
if (m_blockman.IsPruneMode() && !fReindex && pblock) {
// Must update the tip for pruning to work while importing
// with -loadblock. This is a tradeoff to conserve disk
// space at the expense of time spent updating the tip to be
// able to prune. Otherwise, ActivateBestChain won't be
// called by the import process until after all of the block
// files are loaded. ActivateBestChain can be called by
// concurrent network message processing, but that is not
// reliable for the purpose of pruning while importing.
BlockValidationState state;
if (!ActivateBestChain(state, pblock)) {
LogPrint(BCLog::REINDEX,
"failed to activate chain (%s)\n",
state.ToString());
break;
}
}
NotifyHeaderTip(*this);
if (!blocks_with_unknown_parent) {
continue;
}
// Recursively process earlier encountered successors of this
// block
std::deque<BlockHash> queue;
queue.push_back(hash);
while (!queue.empty()) {
BlockHash head = queue.front();
queue.pop_front();
auto range = blocks_with_unknown_parent->equal_range(head);
while (range.first != range.second) {
std::multimap<BlockHash, FlatFilePos>::iterator it =
range.first;
std::shared_ptr<CBlock> pblockrecursive =
std::make_shared<CBlock>();
if (ReadBlockFromDisk(*pblockrecursive, it->second,
params.GetConsensus())) {
LogPrint(
BCLog::REINDEX,
"%s: Processing out of order child %s of %s\n",
__func__, pblockrecursive->GetHash().ToString(),
head.ToString());
LOCK(cs_main);
BlockValidationState dummy;
if (AcceptBlock(pblockrecursive, dummy, true,
&it->second, nullptr, true)) {
nLoaded++;
queue.push_back(pblockrecursive->GetHash());
}
}
range.first++;
blocks_with_unknown_parent->erase(it);
NotifyHeaderTip(*this);
}
}
} catch (const std::exception &e) {
// Historical bugs added extra data to the block files that does
// not deserialize cleanly. Commonly this data is between
// readable blocks, but it does not really matter. Such data is
// not fatal to the import process. The code that reads the
// block files deals with invalid data by simply ignoring it. It
// continues to search for the next {4 byte magic message start
// bytes + 4 byte length + block} that does deserialize cleanly
// and passes all of the other block validation checks dealing
// with POW and the merkle root, etc... We merely note with this
// informational log message when unexpected data is
// encountered. We could also be experiencing a storage system
// read error, or a read of a previous bad write. These are
// possible, but less likely scenarios. We don't have enough
// information to tell a difference here. The reindex process is
// not the place to attempt to clean and/or compact the block
// files. If so desired, a studious node operator may use
// knowledge of the fact that the block files are not entirely
// pristine in order to prepare a set of pristine, and perhaps
// ordered, block files for later reindexing.
LogPrint(BCLog::REINDEX,
"%s: unexpected data at file offset 0x%x - %s. "
"continuing\n",
__func__, (nRewind - 1), e.what());
}
}
} catch (const std::runtime_error &e) {
AbortNode(std::string("System error: ") + e.what());
}
LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded,
GetTimeMillis() - nStart);
}
void Chainstate::CheckBlockIndex() {
if (!m_chainman.ShouldCheckBlockIndex()) {
return;
}
LOCK(cs_main);
// During a reindex, we read the genesis block and call CheckBlockIndex
// before ActivateBestChain, so we have the genesis block in
// m_blockman.m_block_index but no active chain. (A few of the tests when
// iterating the block tree require that m_chain has been initialized.)
if (m_chain.Height() < 0) {
assert(m_blockman.m_block_index.size() <= 1);
return;
}
// Build forward-pointing map of the entire block tree.
std::multimap<CBlockIndex *, CBlockIndex *> forward;
for (auto &[_, block_index] : m_blockman.m_block_index) {
forward.emplace(block_index.pprev, &block_index);
}
assert(forward.size() == m_blockman.m_block_index.size());
std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
rangeGenesis = forward.equal_range(nullptr);
CBlockIndex *pindex = rangeGenesis.first->second;
rangeGenesis.first++;
// There is only one index entry with parent nullptr.
assert(rangeGenesis.first == rangeGenesis.second);
// Iterate over the entire block tree, using depth-first search.
// Along the way, remember whether there are blocks on the path from genesis
// block being explored which are the first to have certain properties.
size_t nNodes = 0;
int nHeight = 0;
// Oldest ancestor of pindex which is invalid.
CBlockIndex *pindexFirstInvalid = nullptr;
// Oldest ancestor of pindex which is parked.
CBlockIndex *pindexFirstParked = nullptr;
// Oldest ancestor of pindex which does not have data available.
CBlockIndex *pindexFirstMissing = nullptr;
// Oldest ancestor of pindex for which nTx == 0.
CBlockIndex *pindexFirstNeverProcessed = nullptr;
// Oldest ancestor of pindex which does not have BLOCK_VALID_TREE
// (regardless of being valid or not).
CBlockIndex *pindexFirstNotTreeValid = nullptr;
// Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS
// (regardless of being valid or not).
CBlockIndex *pindexFirstNotTransactionsValid = nullptr;
// Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN
// (regardless of being valid or not).
CBlockIndex *pindexFirstNotChainValid = nullptr;
// Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS
// (regardless of being valid or not).
CBlockIndex *pindexFirstNotScriptsValid = nullptr;
while (pindex != nullptr) {
nNodes++;
if (pindexFirstInvalid == nullptr && pindex->nStatus.hasFailed()) {
pindexFirstInvalid = pindex;
}
if (pindexFirstParked == nullptr && pindex->nStatus.isParked()) {
pindexFirstParked = pindex;
}
// Assumed-valid index entries will not have data since we haven't
// downloaded the full block yet.
if (pindexFirstMissing == nullptr && !pindex->nStatus.hasData() &&
!pindex->IsAssumedValid()) {
pindexFirstMissing = pindex;
}
if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) {
pindexFirstNeverProcessed = pindex;
}
if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr &&
pindex->nStatus.getValidity() < BlockValidity::TREE) {
pindexFirstNotTreeValid = pindex;
}
if (pindex->pprev != nullptr && !pindex->IsAssumedValid()) {
if (pindexFirstNotTransactionsValid == nullptr &&
pindex->nStatus.getValidity() < BlockValidity::TRANSACTIONS) {
pindexFirstNotTransactionsValid = pindex;
}
if (pindexFirstNotChainValid == nullptr &&
pindex->nStatus.getValidity() < BlockValidity::CHAIN) {
pindexFirstNotChainValid = pindex;
}
if (pindexFirstNotScriptsValid == nullptr &&
pindex->nStatus.getValidity() < BlockValidity::SCRIPTS) {
pindexFirstNotScriptsValid = pindex;
}
}
// Begin: actual consistency checks.
if (pindex->pprev == nullptr) {
// Genesis block checks.
// Genesis block's hash must match.
assert(pindex->GetBlockHash() ==
m_chainman.GetConsensus().hashGenesisBlock);
// The current active chain's genesis block must be this block.
assert(pindex == m_chain.Genesis());
}
if (!pindex->HaveTxsDownloaded()) {
// nSequenceId can't be set positive for blocks that aren't linked
// (negative is used for preciousblock)
assert(pindex->nSequenceId <= 0);
}
// VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or
// not pruning has occurred). HAVE_DATA is only equivalent to nTx > 0
// (or VALID_TRANSACTIONS) if no pruning has occurred.
// Unless these indexes are assumed valid and pending block download on
// a background chainstate.
if (!m_blockman.m_have_pruned && !pindex->IsAssumedValid()) {
// If we've never pruned, then HAVE_DATA should be equivalent to nTx
// > 0
assert(pindex->nStatus.hasData() == (pindex->nTx > 0));
assert(pindexFirstMissing == pindexFirstNeverProcessed);
} else if (pindex->nStatus.hasData()) {
// If we have pruned, then we can only say that HAVE_DATA implies
// nTx > 0
assert(pindex->nTx > 0);
}
if (pindex->nStatus.hasUndo()) {
assert(pindex->nStatus.hasData());
}
if (pindex->IsAssumedValid()) {
// Assumed-valid blocks should have some nTx value.
assert(pindex->nTx > 0);
// Assumed-valid blocks should connect to the main chain.
assert(pindex->nStatus.getValidity() >= BlockValidity::TREE);
} else {
// Otherwise there should only be an nTx value if we have
// actually seen a block's transactions.
// This is pruning-independent.
assert((pindex->nStatus.getValidity() >=
BlockValidity::TRANSACTIONS) == (pindex->nTx > 0));
}
// All parents having had data (at some point) is equivalent to all
// parents being VALID_TRANSACTIONS, which is equivalent to
// HaveTxsDownloaded(). All parents having had data (at some point) is
// equivalent to all parents being VALID_TRANSACTIONS, which is
// equivalent to HaveTxsDownloaded().
assert((pindexFirstNeverProcessed == nullptr) ==
(pindex->HaveTxsDownloaded()));
assert((pindexFirstNotTransactionsValid == nullptr) ==
(pindex->HaveTxsDownloaded()));
// nHeight must be consistent.
assert(pindex->nHeight == nHeight);
// For every block except the genesis block, the chainwork must be
// larger than the parent's.
assert(pindex->pprev == nullptr ||
pindex->nChainWork >= pindex->pprev->nChainWork);
// The pskip pointer must point back for all but the first 2 blocks.
assert(nHeight < 2 ||
(pindex->pskip && (pindex->pskip->nHeight < nHeight)));
// All m_blockman.m_block_index entries must at least be TREE valid
assert(pindexFirstNotTreeValid == nullptr);
if (pindex->nStatus.getValidity() >= BlockValidity::TREE) {
// TREE valid implies all parents are TREE valid
assert(pindexFirstNotTreeValid == nullptr);
}
if (pindex->nStatus.getValidity() >= BlockValidity::CHAIN) {
// CHAIN valid implies all parents are CHAIN valid
assert(pindexFirstNotChainValid == nullptr);
}
if (pindex->nStatus.getValidity() >= BlockValidity::SCRIPTS) {
// SCRIPTS valid implies all parents are SCRIPTS valid
assert(pindexFirstNotScriptsValid == nullptr);
}
if (pindexFirstInvalid == nullptr) {
// Checks for not-invalid blocks.
// The failed mask cannot be set for blocks without invalid parents.
assert(!pindex->nStatus.isInvalid());
}
if (pindexFirstParked == nullptr) {
// Checks for not-parked blocks.
// The parked mask cannot be set for blocks without parked parents.
// (i.e., hasParkedParent only if an ancestor is properly parked).
assert(!pindex->nStatus.isOnParkedChain());
}
if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) &&
pindexFirstNeverProcessed == nullptr) {
if (pindexFirstInvalid == nullptr) {
// Don't perform this check for the background chainstate since
// its setBlockIndexCandidates shouldn't have some entries (i.e.
// those past the snapshot block) which do exist in the block
// index for the active chainstate.
if (this == &m_chainman.ActiveChainstate()) {
// If this block sorts at least as good as the current tip
// and is valid and we have all data for its parents, it
// must be in setBlockIndexCandidates or be parked.
if (pindexFirstMissing == nullptr) {
assert(pindex->nStatus.isOnParkedChain() ||
setBlockIndexCandidates.count(pindex));
}
// m_chain.Tip() must also be there even if some data has
// been pruned.
if (pindex == m_chain.Tip()) {
assert(setBlockIndexCandidates.count(pindex));
}
}
// If some parent is missing, then it could be that this block
// was in setBlockIndexCandidates but had to be removed because
// of the missing data. In this case it must be in
// m_blocks_unlinked -- see test below.
}
} else {
// If this block sorts worse than the current tip or some ancestor's
// block has never been seen, it cannot be in
// setBlockIndexCandidates.
assert(setBlockIndexCandidates.count(pindex) == 0);
}
// Check whether this block is in m_blocks_unlinked.
std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
rangeUnlinked =
m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
bool foundInUnlinked = false;
while (rangeUnlinked.first != rangeUnlinked.second) {
assert(rangeUnlinked.first->first == pindex->pprev);
if (rangeUnlinked.first->second == pindex) {
foundInUnlinked = true;
break;
}
rangeUnlinked.first++;
}
if (pindex->pprev && pindex->nStatus.hasData() &&
pindexFirstNeverProcessed != nullptr &&
pindexFirstInvalid == nullptr) {
// If this block has block data available, some parent was never
// received, and has no invalid parents, it must be in
// m_blocks_unlinked.
assert(foundInUnlinked);
}
if (!pindex->nStatus.hasData()) {
// Can't be in m_blocks_unlinked if we don't HAVE_DATA
assert(!foundInUnlinked);
}
if (pindexFirstMissing == nullptr) {
// We aren't missing data for any parent -- cannot be in
// m_blocks_unlinked.
assert(!foundInUnlinked);
}
if (pindex->pprev && pindex->nStatus.hasData() &&
pindexFirstNeverProcessed == nullptr &&
pindexFirstMissing != nullptr) {
// We HAVE_DATA for this block, have received data for all parents
// at some point, but we're currently missing data for some parent.
// We must have pruned.
assert(m_blockman.m_have_pruned);
// This block may have entered m_blocks_unlinked if:
// - it has a descendant that at some point had more work than the
// tip, and
// - we tried switching to that descendant but were missing
// data for some intermediate block between m_chain and the
// tip.
// So if this block is itself better than m_chain.Tip() and it
// wasn't in
// setBlockIndexCandidates, then it must be in m_blocks_unlinked.
if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) &&
setBlockIndexCandidates.count(pindex) == 0) {
if (pindexFirstInvalid == nullptr) {
assert(foundInUnlinked);
}
}
}
// Perhaps too slow
// assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash());
// End: actual consistency checks.
// Try descending into the first subnode.
std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
range = forward.equal_range(pindex);
if (range.first != range.second) {
// A subnode was found.
pindex = range.first->second;
nHeight++;
continue;
}
// This is a leaf node. Move upwards until we reach a node of which we
// have not yet visited the last child.
while (pindex) {
// We are going to either move to a parent or a sibling of pindex.
// If pindex was the first with a certain property, unset the
// corresponding variable.
if (pindex == pindexFirstInvalid) {
pindexFirstInvalid = nullptr;
}
if (pindex == pindexFirstParked) {
pindexFirstParked = nullptr;
}
if (pindex == pindexFirstMissing) {
pindexFirstMissing = nullptr;
}
if (pindex == pindexFirstNeverProcessed) {
pindexFirstNeverProcessed = nullptr;
}
if (pindex == pindexFirstNotTreeValid) {
pindexFirstNotTreeValid = nullptr;
}
if (pindex == pindexFirstNotTransactionsValid) {
pindexFirstNotTransactionsValid = nullptr;
}
if (pindex == pindexFirstNotChainValid) {
pindexFirstNotChainValid = nullptr;
}
if (pindex == pindexFirstNotScriptsValid) {
pindexFirstNotScriptsValid = nullptr;
}
// Find our parent.
CBlockIndex *pindexPar = pindex->pprev;
// Find which child we just visited.
std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
rangePar = forward.equal_range(pindexPar);
while (rangePar.first->second != pindex) {
// Our parent must have at least the node we're coming from as
// child.
assert(rangePar.first != rangePar.second);
rangePar.first++;
}
// Proceed to the next one.
rangePar.first++;
if (rangePar.first != rangePar.second) {
// Move to the sibling.
pindex = rangePar.first->second;
break;
} else {
// Move up further.
pindex = pindexPar;
nHeight--;
continue;
}
}
}
// Check that we actually traversed the entire map.
assert(nNodes == forward.size());
}
std::string Chainstate::ToString() {
AssertLockHeld(::cs_main);
CBlockIndex *tip = m_chain.Tip();
return strprintf("Chainstate [%s] @ height %d (%s)",
m_from_snapshot_blockhash ? "snapshot" : "ibd",
tip ? tip->nHeight : -1,
tip ? tip->GetBlockHash().ToString() : "null");
}
bool Chainstate::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size) {
AssertLockHeld(::cs_main);
if (coinstip_size == m_coinstip_cache_size_bytes &&
coinsdb_size == m_coinsdb_cache_size_bytes) {
// Cache sizes are unchanged, no need to continue.
return true;
}
size_t old_coinstip_size = m_coinstip_cache_size_bytes;
m_coinstip_cache_size_bytes = coinstip_size;
m_coinsdb_cache_size_bytes = coinsdb_size;
CoinsDB().ResizeCache(coinsdb_size);
LogPrintf("[%s] resized coinsdb cache to %.1f MiB\n", this->ToString(),
coinsdb_size * (1.0 / 1024 / 1024));
LogPrintf("[%s] resized coinstip cache to %.1f MiB\n", this->ToString(),
coinstip_size * (1.0 / 1024 / 1024));
BlockValidationState state;
bool ret;
if (coinstip_size > old_coinstip_size) {
// Likely no need to flush if cache sizes have grown.
ret = FlushStateToDisk(state, FlushStateMode::IF_NEEDED);
} else {
// Otherwise, flush state to disk and deallocate the in-memory coins
// map.
ret = FlushStateToDisk(state, FlushStateMode::ALWAYS);
CoinsTip().ReallocateCache();
}
return ret;
}
//! Guess how far we are in the verification process at the given block index
//! require cs_main if pindex has not been validated yet (because the chain's
//! transaction count might be unset) This conditional lock requirement might be
//! confusing, see: https://github.com/bitcoin/bitcoin/issues/15994
double GuessVerificationProgress(const ChainTxData &data,
const CBlockIndex *pindex) {
if (pindex == nullptr) {
return 0.0;
}
int64_t nNow = time(nullptr);
double fTxTotal;
if (pindex->GetChainTxCount() <= data.nTxCount) {
fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate;
} else {
fTxTotal = pindex->GetChainTxCount() +
(nNow - pindex->GetBlockTime()) * data.dTxRate;
}
return std::min<double>(pindex->GetChainTxCount() / fTxTotal, 1.0);
}
std::optional<BlockHash> ChainstateManager::SnapshotBlockhash() const {
LOCK(::cs_main);
if (m_active_chainstate && m_active_chainstate->m_from_snapshot_blockhash) {
// If a snapshot chainstate exists, it will always be our active.
return m_active_chainstate->m_from_snapshot_blockhash;
}
return std::nullopt;
}
std::vector<Chainstate *> ChainstateManager::GetAll() {
LOCK(::cs_main);
std::vector<Chainstate *> out;
for (Chainstate *pchainstate :
{m_ibd_chainstate.get(), m_snapshot_chainstate.get()}) {
if (this->IsUsable(pchainstate)) {
out.push_back(pchainstate);
}
}
return out;
}
Chainstate &ChainstateManager::InitializeChainstate(CTxMemPool *mempool) {
AssertLockHeld(::cs_main);
assert(!m_ibd_chainstate);
assert(!m_active_chainstate);
m_ibd_chainstate = std::make_unique<Chainstate>(mempool, m_blockman, *this);
m_active_chainstate = m_ibd_chainstate.get();
return *m_active_chainstate;
}
const AssumeutxoData *ExpectedAssumeutxo(const int height,
const CChainParams &chainparams) {
const MapAssumeutxo &valid_assumeutxos_map = chainparams.Assumeutxo();
const auto assumeutxo_found = valid_assumeutxos_map.find(height);
if (assumeutxo_found != valid_assumeutxos_map.end()) {
return &assumeutxo_found->second;
}
return nullptr;
}
static bool DeleteCoinsDBFromDisk(const fs::path &db_path, bool is_snapshot)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
if (is_snapshot) {
fs::path base_blockhash_path =
db_path / node::SNAPSHOT_BLOCKHASH_FILENAME;
try {
const bool existed{fs::remove(base_blockhash_path)};
if (!existed) {
LogPrintf("[snapshot] snapshot chainstate dir being removed "
"lacks %s file\n",
fs::PathToString(node::SNAPSHOT_BLOCKHASH_FILENAME));
}
} catch (const fs::filesystem_error &e) {
LogPrintf("[snapshot] failed to remove file %s: %s\n",
fs::PathToString(base_blockhash_path),
fsbridge::get_filesystem_error_message(e));
}
}
std::string path_str = fs::PathToString(db_path);
LogPrintf("Removing leveldb dir at %s\n", path_str);
// We have to destruct before this call leveldb::DB in order to release the
// db lock, otherwise `DestroyDB` will fail. See `leveldb::~DBImpl()`.
const bool destroyed = dbwrapper::DestroyDB(path_str, {}).ok();
if (!destroyed) {
LogPrintf("error: leveldb DestroyDB call failed on %s\n", path_str);
}
// Datadir should be removed from filesystem; otherwise initialization may
// detect it on subsequent statups and get confused.
//
// If the base_blockhash_path removal above fails in the case of snapshot
// chainstates, this will return false since leveldb won't remove a
// non-empty directory.
return destroyed && !fs::exists(db_path);
}
bool ChainstateManager::ActivateSnapshot(AutoFile &coins_file,
const SnapshotMetadata &metadata,
bool in_memory) {
BlockHash base_blockhash = metadata.m_base_blockhash;
if (this->SnapshotBlockhash()) {
LogPrintf("[snapshot] can't activate a snapshot-based chainstate more "
"than once\n");
return false;
}
int64_t current_coinsdb_cache_size{0};
int64_t current_coinstip_cache_size{0};
// Cache percentages to allocate to each chainstate.
//
// These particular percentages don't matter so much since they will only be
// relevant during snapshot activation; caches are rebalanced at the
// conclusion of this function. We want to give (essentially) all available
// cache capacity to the snapshot to aid the bulk load later in this
// function.
static constexpr double IBD_CACHE_PERC = 0.01;
static constexpr double SNAPSHOT_CACHE_PERC = 0.99;
{
LOCK(::cs_main);
// Resize the coins caches to ensure we're not exceeding memory limits.
//
// Allocate the majority of the cache to the incoming snapshot
// chainstate, since (optimistically) getting to its tip will be the top
// priority. We'll need to call `MaybeRebalanceCaches()` once we're done
// with this function to ensure the right allocation (including the
// possibility that no snapshot was activated and that we should restore
// the active chainstate caches to their original size).
//
current_coinsdb_cache_size =
this->ActiveChainstate().m_coinsdb_cache_size_bytes;
current_coinstip_cache_size =
this->ActiveChainstate().m_coinstip_cache_size_bytes;
// Temporarily resize the active coins cache to make room for the
// newly-created snapshot chain.
this->ActiveChainstate().ResizeCoinsCaches(
static_cast<size_t>(current_coinstip_cache_size * IBD_CACHE_PERC),
static_cast<size_t>(current_coinsdb_cache_size * IBD_CACHE_PERC));
}
auto snapshot_chainstate =
WITH_LOCK(::cs_main, return std::make_unique<Chainstate>(
/* mempool */ nullptr, m_blockman, *this,
base_blockhash));
{
LOCK(::cs_main);
snapshot_chainstate->InitCoinsDB(
static_cast<size_t>(current_coinsdb_cache_size *
SNAPSHOT_CACHE_PERC),
in_memory, false, "chainstate");
snapshot_chainstate->InitCoinsCache(static_cast<size_t>(
current_coinstip_cache_size * SNAPSHOT_CACHE_PERC));
}
bool snapshot_ok = this->PopulateAndValidateSnapshot(*snapshot_chainstate,
coins_file, metadata);
// If not in-memory, persist the base blockhash for use during subsequent
// initialization.
if (!in_memory) {
LOCK(::cs_main);
if (!node::WriteSnapshotBaseBlockhash(*snapshot_chainstate)) {
snapshot_ok = false;
}
}
if (!snapshot_ok) {
LOCK(::cs_main);
this->MaybeRebalanceCaches();
// PopulateAndValidateSnapshot can return (in error) before the leveldb
// datadir has been created, so only attempt removal if we got that far.
if (auto snapshot_datadir = node::FindSnapshotChainstateDir()) {
// We have to destruct leveldb::DB in order to release the db lock,
// otherwise DestroyDB() (in DeleteCoinsDBFromDisk()) will fail. See
// `leveldb::~DBImpl()`. Destructing the chainstate (and so
// resetting the coinsviews object) does this.
snapshot_chainstate.reset();
bool removed =
DeleteCoinsDBFromDisk(*snapshot_datadir, /*is_snapshot=*/true);
if (!removed) {
AbortNode(
strprintf("Failed to remove snapshot chainstate dir (%s). "
"Manually remove it before restarting.\n",
fs::PathToString(*snapshot_datadir)));
}
}
return false;
}
{
LOCK(::cs_main);
assert(!m_snapshot_chainstate);
m_snapshot_chainstate.swap(snapshot_chainstate);
const bool chaintip_loaded = m_snapshot_chainstate->LoadChainTip();
assert(chaintip_loaded);
m_active_chainstate = m_snapshot_chainstate.get();
LogPrintf("[snapshot] successfully activated snapshot %s\n",
base_blockhash.ToString());
LogPrintf("[snapshot] (%.2f MB)\n",
m_snapshot_chainstate->CoinsTip().DynamicMemoryUsage() /
(1000 * 1000));
this->MaybeRebalanceCaches();
}
return true;
}
static void FlushSnapshotToDisk(CCoinsViewCache &coins_cache,
bool snapshot_loaded) {
LOG_TIME_MILLIS_WITH_CATEGORY_MSG_ONCE(
strprintf("%s (%.2f MB)",
snapshot_loaded ? "saving snapshot chainstate"
: "flushing coins cache",
coins_cache.DynamicMemoryUsage() / (1000 * 1000)),
BCLog::LogFlags::ALL);
coins_cache.Flush();
}
struct StopHashingException : public std::exception {
const char *what() const throw() override {
return "ComputeUTXOStats interrupted by shutdown.";
}
};
static void SnapshotUTXOHashBreakpoint() {
if (ShutdownRequested()) {
throw StopHashingException();
}
}
bool ChainstateManager::PopulateAndValidateSnapshot(
Chainstate &snapshot_chainstate, AutoFile &coins_file,
const SnapshotMetadata &metadata) {
// It's okay to release cs_main before we're done using `coins_cache`
// because we know that nothing else will be referencing the newly created
// snapshot_chainstate yet.
CCoinsViewCache &coins_cache =
*WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsTip());
BlockHash base_blockhash = metadata.m_base_blockhash;
CBlockIndex *snapshot_start_block = WITH_LOCK(
::cs_main, return m_blockman.LookupBlockIndex(base_blockhash));
if (!snapshot_start_block) {
// Needed for ComputeUTXOStats and ExpectedAssumeutxo to determine the
// height and to avoid a crash when base_blockhash.IsNull()
LogPrintf("[snapshot] Did not find snapshot start blockheader %s\n",
base_blockhash.ToString());
return false;
}
int base_height = snapshot_start_block->nHeight;
auto maybe_au_data = ExpectedAssumeutxo(base_height, GetParams());
if (!maybe_au_data) {
LogPrintf("[snapshot] assumeutxo height in snapshot metadata not "
"recognized (%d) - refusing to load snapshot\n",
base_height);
return false;
}
const AssumeutxoData &au_data = *maybe_au_data;
COutPoint outpoint;
Coin coin;
const uint64_t coins_count = metadata.m_coins_count;
uint64_t coins_left = metadata.m_coins_count;
LogPrintf("[snapshot] loading coins from snapshot %s\n",
base_blockhash.ToString());
int64_t coins_processed{0};
while (coins_left > 0) {
try {
coins_file >> outpoint;
coins_file >> coin;
} catch (const std::ios_base::failure &) {
LogPrintf("[snapshot] bad snapshot format or truncated snapshot "
"after deserializing %d coins\n",
coins_count - coins_left);
return false;
}
if (coin.GetHeight() > uint32_t(base_height) ||
// Avoid integer wrap-around in coinstats.cpp:ApplyHash
outpoint.GetN() >=
std::numeric_limits<decltype(outpoint.GetN())>::max()) {
LogPrintf(
"[snapshot] bad snapshot data after deserializing %d coins\n",
coins_count - coins_left);
return false;
}
coins_cache.EmplaceCoinInternalDANGER(std::move(outpoint),
std::move(coin));
--coins_left;
++coins_processed;
if (coins_processed % 1000000 == 0) {
LogPrintf("[snapshot] %d coins loaded (%.2f%%, %.2f MB)\n",
coins_processed,
static_cast<float>(coins_processed) * 100 /
static_cast<float>(coins_count),
coins_cache.DynamicMemoryUsage() / (1000 * 1000));
}
// Batch write and flush (if we need to) every so often.
//
// If our average Coin size is roughly 41 bytes, checking every 120,000
// coins means <5MB of memory imprecision.
if (coins_processed % 120000 == 0) {
if (ShutdownRequested()) {
return false;
}
const auto snapshot_cache_state = WITH_LOCK(
::cs_main, return snapshot_chainstate.GetCoinsCacheSizeState());
if (snapshot_cache_state >= CoinsCacheSizeState::CRITICAL) {
// This is a hack - we don't know what the actual best block is,
// but that doesn't matter for the purposes of flushing the
// cache here. We'll set this to its correct value
// (`base_blockhash`) below after the coins are loaded.
coins_cache.SetBestBlock(BlockHash{GetRandHash()});
// No need to acquire cs_main since this chainstate isn't being
// used yet.
FlushSnapshotToDisk(coins_cache, /*snapshot_loaded=*/false);
}
}
}
// Important that we set this. This and the coins_cache accesses above are
// sort of a layer violation, but either we reach into the innards of
// CCoinsViewCache here or we have to invert some of the Chainstate to
// embed them in a snapshot-activation-specific CCoinsViewCache bulk load
// method.
coins_cache.SetBestBlock(base_blockhash);
bool out_of_coins{false};
try {
coins_file >> outpoint;
} catch (const std::ios_base::failure &) {
// We expect an exception since we should be out of coins.
out_of_coins = true;
}
if (!out_of_coins) {
LogPrintf("[snapshot] bad snapshot - coins left over after "
"deserializing %d coins\n",
coins_count);
return false;
}
LogPrintf("[snapshot] loaded %d (%.2f MB) coins from snapshot %s\n",
coins_count, coins_cache.DynamicMemoryUsage() / (1000 * 1000),
base_blockhash.ToString());
// No need to acquire cs_main since this chainstate isn't being used yet.
FlushSnapshotToDisk(coins_cache, /*snapshot_loaded=*/true);
assert(coins_cache.GetBestBlock() == base_blockhash);
// As above, okay to immediately release cs_main here since no other context
// knows about the snapshot_chainstate.
CCoinsViewDB *snapshot_coinsdb =
WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsDB());
std::optional<CCoinsStats> maybe_stats;
try {
maybe_stats = ComputeUTXOStats(CoinStatsHashType::HASH_SERIALIZED,
snapshot_coinsdb, m_blockman,
SnapshotUTXOHashBreakpoint);
} catch (StopHashingException const &) {
return false;
}
if (!maybe_stats.has_value()) {
LogPrintf("[snapshot] failed to generate coins stats\n");
return false;
}
// Assert that the deserialized chainstate contents match the expected
// assumeutxo value.
if (AssumeutxoHash{maybe_stats->hashSerialized} !=
au_data.hash_serialized) {
LogPrintf("[snapshot] bad snapshot content hash: expected %s, got %s\n",
au_data.hash_serialized.ToString(),
maybe_stats->hashSerialized.ToString());
return false;
}
snapshot_chainstate.m_chain.SetTip(snapshot_start_block);
// The remainder of this function requires modifying data protected by
// cs_main.
LOCK(::cs_main);
// Fake various pieces of CBlockIndex state:
CBlockIndex *index = nullptr;
// Don't make any modifications to the genesis block.
// This is especially important because we don't want to erroneously
// apply ASSUMED_VALID_FLAG to genesis, which would happen if we didn't
// skip it here (since it apparently isn't BlockValidity::SCRIPTS).
constexpr int AFTER_GENESIS_START{1};
for (int i = AFTER_GENESIS_START; i <= snapshot_chainstate.m_chain.Height();
++i) {
index = snapshot_chainstate.m_chain[i];
// Fake nTx so that LoadBlockIndex() loads assumed-valid CBlockIndex
// entries (among other things)
if (!index->nTx) {
index->nTx = 1;
}
// Fake nChainTx so that GuessVerificationProgress reports accurately
index->nChainTx = index->pprev->nChainTx + index->nTx;
// Mark unvalidated block index entries beneath the snapshot base block
// as assumed-valid.
if (!index->IsValid(BlockValidity::SCRIPTS)) {
// This flag will be removed once the block is fully validated by a
// background chainstate.
index->nStatus = index->nStatus.withAssumedValid();
}
m_blockman.m_dirty_blockindex.insert(index);
// Changes to the block index will be flushed to disk after this call
// returns in `ActivateSnapshot()`, when `MaybeRebalanceCaches()` is
// called, since we've added a snapshot chainstate and therefore will
// have to downsize the IBD chainstate, which will result in a call to
// `FlushStateToDisk(ALWAYS)`.
}
assert(index);
index->nChainTx = au_data.nChainTx;
snapshot_chainstate.setBlockIndexCandidates.insert(snapshot_start_block);
LogPrintf("[snapshot] validated snapshot (%.2f MB)\n",
coins_cache.DynamicMemoryUsage() / (1000 * 1000));
return true;
}
// Currently, this function holds cs_main for its duration, which could be for
// multiple minutes due to the ComputeUTXOStats call. This hold is necessary
// because we need to avoid advancing the background validation chainstate
// farther than the snapshot base block - and this function is also invoked
// from within ConnectTip, i.e. from within ActivateBestChain, so cs_main is
// held anyway.
//
// Eventually (TODO), we could somehow separate this function's runtime from
// maintenance of the active chain, but that will either require
//
// (i) setting `m_disabled` immediately and ensuring all chainstate accesses go
// through IsUsable() checks, or
//
// (ii) giving each chainstate its own lock instead of using cs_main for
// everything.
SnapshotCompletionResult ChainstateManager::MaybeCompleteSnapshotValidation(
std::function<void(bilingual_str)> shutdown_fnc) {
AssertLockHeld(cs_main);
if (m_ibd_chainstate.get() == &this->ActiveChainstate() ||
!this->IsUsable(m_snapshot_chainstate.get()) ||
!this->IsUsable(m_ibd_chainstate.get()) ||
!m_ibd_chainstate->m_chain.Tip()) {
// Nothing to do - this function only applies to the background
// validation chainstate.
return SnapshotCompletionResult::SKIPPED;
}
const int snapshot_tip_height = this->ActiveHeight();
const int snapshot_base_height = *Assert(this->GetSnapshotBaseHeight());
const CBlockIndex &index_new = *Assert(m_ibd_chainstate->m_chain.Tip());
if (index_new.nHeight < snapshot_base_height) {
// Background IBD not complete yet.
return SnapshotCompletionResult::SKIPPED;
}
assert(SnapshotBlockhash());
BlockHash snapshot_blockhash = *Assert(SnapshotBlockhash());
auto handle_invalid_snapshot = [&]() EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
bilingual_str user_error = strprintf(
_("%s failed to validate the -assumeutxo snapshot state. "
"This indicates a hardware problem, or a bug in the software, or "
"a bad software modification that allowed an invalid snapshot to "
"be loaded. As a result of this, the node will shut down and "
"stop using any state that was built on the snapshot, resetting "
"the chain height from %d to %d. On the next restart, the node "
"will resume syncing from %d without using any snapshot data. "
"Please report this incident to %s, including how you obtained "
"the snapshot. The invalid snapshot chainstate has been left on "
"disk in case it is helpful in diagnosing the issue that caused "
"this error."),
PACKAGE_NAME, snapshot_tip_height, snapshot_base_height,
snapshot_base_height, PACKAGE_BUGREPORT);
LogPrintf("[snapshot] !!! %s\n", user_error.original);
LogPrintf("[snapshot] deleting snapshot, reverting to validated chain, "
"and stopping node\n");
m_active_chainstate = m_ibd_chainstate.get();
m_snapshot_chainstate->m_disabled = true;
assert(!this->IsUsable(m_snapshot_chainstate.get()));
assert(this->IsUsable(m_ibd_chainstate.get()));
m_snapshot_chainstate->InvalidateCoinsDBOnDisk();
shutdown_fnc(user_error);
};
if (index_new.GetBlockHash() != snapshot_blockhash) {
LogPrintf(
"[snapshot] supposed base block %s does not match the "
"snapshot base block %s (height %d). Snapshot is not valid.\n",
index_new.ToString(), snapshot_blockhash.ToString(),
snapshot_base_height);
handle_invalid_snapshot();
return SnapshotCompletionResult::BASE_BLOCKHASH_MISMATCH;
}
assert(index_new.nHeight == snapshot_base_height);
int curr_height = m_ibd_chainstate->m_chain.Height();
assert(snapshot_base_height == curr_height);
assert(snapshot_base_height == index_new.nHeight);
assert(this->IsUsable(m_snapshot_chainstate.get()));
assert(this->GetAll().size() == 2);
CCoinsViewDB &ibd_coins_db = m_ibd_chainstate->CoinsDB();
m_ibd_chainstate->ForceFlushStateToDisk();
auto maybe_au_data = ExpectedAssumeutxo(curr_height, GetParams());
if (!maybe_au_data) {
LogPrintf("[snapshot] assumeutxo data not found for height "
"(%d) - refusing to validate snapshot\n",
curr_height);
handle_invalid_snapshot();
return SnapshotCompletionResult::MISSING_CHAINPARAMS;
}
const AssumeutxoData &au_data = *maybe_au_data;
std::optional<CCoinsStats> maybe_ibd_stats;
LogPrintf(
"[snapshot] computing UTXO stats for background chainstate to validate "
"snapshot - this could take a few minutes\n");
try {
maybe_ibd_stats =
ComputeUTXOStats(CoinStatsHashType::HASH_SERIALIZED, &ibd_coins_db,
m_blockman, SnapshotUTXOHashBreakpoint);
} catch (StopHashingException const &) {
return SnapshotCompletionResult::STATS_FAILED;
}
if (!maybe_ibd_stats) {
LogPrintf(
"[snapshot] failed to generate stats for validation coins db\n");
// While this isn't a problem with the snapshot per se, this condition
// prevents us from validating the snapshot, so we should shut down and
// let the user handle the issue manually.
handle_invalid_snapshot();
return SnapshotCompletionResult::STATS_FAILED;
}
const auto &ibd_stats = *maybe_ibd_stats;
// Compare the background validation chainstate's UTXO set hash against the
// hard-coded assumeutxo hash we expect.
//
// TODO: For belt-and-suspenders, we could cache the UTXO set
// hash for the snapshot when it's loaded in its chainstate's leveldb. We
// could then reference that here for an additional check.
if (AssumeutxoHash{ibd_stats.hashSerialized} != au_data.hash_serialized) {
LogPrintf("[snapshot] hash mismatch: actual=%s, expected=%s\n",
ibd_stats.hashSerialized.ToString(),
au_data.hash_serialized.ToString());
handle_invalid_snapshot();
return SnapshotCompletionResult::HASH_MISMATCH;
}
LogPrintf("[snapshot] snapshot beginning at %s has been fully validated\n",
snapshot_blockhash.ToString());
m_ibd_chainstate->m_disabled = true;
this->MaybeRebalanceCaches();
return SnapshotCompletionResult::SUCCESS;
}
Chainstate &ChainstateManager::ActiveChainstate() const {
LOCK(::cs_main);
assert(m_active_chainstate);
return *m_active_chainstate;
}
bool ChainstateManager::IsSnapshotActive() const {
LOCK(::cs_main);
return m_snapshot_chainstate &&
m_active_chainstate == m_snapshot_chainstate.get();
}
void ChainstateManager::MaybeRebalanceCaches() {
AssertLockHeld(::cs_main);
bool ibd_usable = this->IsUsable(m_ibd_chainstate.get());
bool snapshot_usable = this->IsUsable(m_snapshot_chainstate.get());
assert(ibd_usable || snapshot_usable);
if (ibd_usable && !snapshot_usable) {
LogPrintf("[snapshot] allocating all cache to the IBD chainstate\n");
// Allocate everything to the IBD chainstate.
m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache,
m_total_coinsdb_cache);
} else if (snapshot_usable && !ibd_usable) {
// If background validation has completed and snapshot is our active
// chain...
LogPrintf(
"[snapshot] allocating all cache to the snapshot chainstate\n");
// Allocate everything to the snapshot chainstate.
m_snapshot_chainstate->ResizeCoinsCaches(m_total_coinstip_cache,
m_total_coinsdb_cache);
} else if (ibd_usable && snapshot_usable) {
// If both chainstates exist, determine who needs more cache based on
// IBD status.
//
// Note: shrink caches first so that we don't inadvertently overwhelm
// available memory.
if (m_snapshot_chainstate->IsInitialBlockDownload()) {
m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache * 0.05,
m_total_coinsdb_cache * 0.05);
m_snapshot_chainstate->ResizeCoinsCaches(
m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95);
} else {
m_snapshot_chainstate->ResizeCoinsCaches(
m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05);
m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache * 0.95,
m_total_coinsdb_cache * 0.95);
}
}
}
void ChainstateManager::ResetChainstates() {
m_ibd_chainstate.reset();
m_snapshot_chainstate.reset();
m_active_chainstate = nullptr;
}
/**
* Apply default chain params to nullopt members.
* This helps to avoid coding errors around the accidental use of the compare
* operators that accept nullopt, thus ignoring the intended default value.
*/
static ChainstateManager::Options &&Flatten(ChainstateManager::Options &&opts) {
if (!opts.check_block_index.has_value()) {
opts.check_block_index =
opts.config.GetChainParams().DefaultConsistencyChecks();
}
if (!opts.minimum_chain_work.has_value()) {
opts.minimum_chain_work = UintToArith256(
opts.config.GetChainParams().GetConsensus().nMinimumChainWork);
}
if (!opts.assumed_valid_block.has_value()) {
opts.assumed_valid_block =
opts.config.GetChainParams().GetConsensus().defaultAssumeValid;
}
Assert(opts.adjusted_time_callback);
return std::move(opts);
}
ChainstateManager::ChainstateManager(Options options)
: m_options{Flatten(std::move(options))} {}
bool ChainstateManager::DetectSnapshotChainstate(CTxMemPool *mempool) {
assert(!m_snapshot_chainstate);
std::optional<fs::path> path = node::FindSnapshotChainstateDir();
if (!path) {
return false;
}
std::optional<BlockHash> base_blockhash =
node::ReadSnapshotBaseBlockhash(*path);
if (!base_blockhash) {
return false;
}
LogPrintf("[snapshot] detected active snapshot chainstate (%s) - loading\n",
fs::PathToString(*path));
this->ActivateExistingSnapshot(mempool, *base_blockhash);
return true;
}
Chainstate &
ChainstateManager::ActivateExistingSnapshot(CTxMemPool *mempool,
BlockHash base_blockhash) {
assert(!m_snapshot_chainstate);
m_snapshot_chainstate = std::make_unique<Chainstate>(mempool, m_blockman,
*this, base_blockhash);
LogPrintf("[snapshot] switching active chainstate to %s\n",
m_snapshot_chainstate->ToString());
m_active_chainstate = m_snapshot_chainstate.get();
return *m_snapshot_chainstate;
}
void Chainstate::InvalidateCoinsDBOnDisk() {
AssertLockHeld(::cs_main);
// Should never be called on a non-snapshot chainstate.
assert(m_from_snapshot_blockhash);
auto storage_path_maybe = this->CoinsDB().StoragePath();
// Should never be called with a non-existent storage path.
assert(storage_path_maybe);
fs::path snapshot_datadir = *storage_path_maybe;
// Coins views no longer usable.
m_coins_views.reset();
auto invalid_path = snapshot_datadir + "_INVALID";
std::string dbpath = fs::PathToString(snapshot_datadir);
std::string target = fs::PathToString(invalid_path);
LogPrintf("[snapshot] renaming snapshot datadir %s to %s\n", dbpath,
target);
// The invalid snapshot datadir is simply moved and not deleted because we
// may want to do forensics later during issue investigation. The user is
// instructed accordingly in MaybeCompleteSnapshotValidation().
try {
fs::rename(snapshot_datadir, invalid_path);
} catch (const fs::filesystem_error &e) {
auto src_str = fs::PathToString(snapshot_datadir);
auto dest_str = fs::PathToString(invalid_path);
LogPrintf("%s: error renaming file '%s' -> '%s': %s\n", __func__,
src_str, dest_str, e.what());
AbortNode(strprintf("Rename of '%s' -> '%s' failed. "
"You should resolve this by manually moving or "
"deleting the invalid "
"snapshot directory %s, otherwise you will "
"encounter the same error again "
"on the next startup.",
src_str, dest_str, src_str));
}
}
const CBlockIndex *ChainstateManager::GetSnapshotBaseBlock() const {
const auto blockhash_op = this->SnapshotBlockhash();
if (!blockhash_op) {
return nullptr;
}
return Assert(m_blockman.LookupBlockIndex(*blockhash_op));
}
std::optional<int> ChainstateManager::GetSnapshotBaseHeight() const {
const CBlockIndex *base = this->GetSnapshotBaseBlock();
return base ? std::make_optional(base->nHeight) : std::nullopt;
}
bool ChainstateManager::ValidatedSnapshotCleanup() {
AssertLockHeld(::cs_main);
auto get_storage_path = [](auto &chainstate) EXCLUSIVE_LOCKS_REQUIRED(
::cs_main) -> std::optional<fs::path> {
if (!(chainstate && chainstate->HasCoinsViews())) {
return {};
}
return chainstate->CoinsDB().StoragePath();
};
std::optional<fs::path> ibd_chainstate_path_maybe =
get_storage_path(m_ibd_chainstate);
std::optional<fs::path> snapshot_chainstate_path_maybe =
get_storage_path(m_snapshot_chainstate);
if (!this->IsSnapshotValidated()) {
// No need to clean up.
return false;
}
// If either path doesn't exist, that means at least one of the chainstates
// is in-memory, in which case we can't do on-disk cleanup. You'd better be
// in a unittest!
if (!ibd_chainstate_path_maybe || !snapshot_chainstate_path_maybe) {
LogPrintf("[snapshot] snapshot chainstate cleanup cannot happen with "
"in-memory chainstates. You are testing, right?\n");
return false;
}
const auto &snapshot_chainstate_path = *snapshot_chainstate_path_maybe;
const auto &ibd_chainstate_path = *ibd_chainstate_path_maybe;
// Since we're going to be moving around the underlying leveldb filesystem
// content for each chainstate, make sure that the chainstates (and their
// constituent CoinsViews members) have been destructed first.
//
// The caller of this method will be responsible for reinitializing
// chainstates if they want to continue operation.
this->ResetChainstates();
// No chainstates should be considered usable.
assert(this->GetAll().size() == 0);
LogPrintf("[snapshot] deleting background chainstate directory (now "
"unnecessary) (%s)\n",
fs::PathToString(ibd_chainstate_path));
fs::path tmp_old{ibd_chainstate_path + "_todelete"};
auto rename_failed_abort = [](fs::path p_old, fs::path p_new,
const fs::filesystem_error &err) {
LogPrintf("Error renaming file (%s): %s\n", fs::PathToString(p_old),
err.what());
AbortNode(strprintf(
"Rename of '%s' -> '%s' failed. "
"Cannot clean up the background chainstate leveldb directory.",
fs::PathToString(p_old), fs::PathToString(p_new)));
};
try {
fs::rename(ibd_chainstate_path, tmp_old);
} catch (const fs::filesystem_error &e) {
rename_failed_abort(ibd_chainstate_path, tmp_old, e);
throw;
}
LogPrintf("[snapshot] moving snapshot chainstate (%s) to "
"default chainstate directory (%s)\n",
fs::PathToString(snapshot_chainstate_path),
fs::PathToString(ibd_chainstate_path));
try {
fs::rename(snapshot_chainstate_path, ibd_chainstate_path);
} catch (const fs::filesystem_error &e) {
rename_failed_abort(snapshot_chainstate_path, ibd_chainstate_path, e);
throw;
}
if (!DeleteCoinsDBFromDisk(tmp_old, /*is_snapshot=*/false)) {
// No need to AbortNode because once the unneeded bg chainstate data is
// moved, it will not interfere with subsequent initialization.
LogPrintf("Deletion of %s failed. Please remove it manually, as the "
"directory is now unnecessary.\n",
fs::PathToString(tmp_old));
} else {
LogPrintf("[snapshot] deleted background chainstate directory (%s)\n",
fs::PathToString(ibd_chainstate_path));
}
return true;
}
diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp
index 2379400df..9ca28c272 100644
--- a/src/wallet/bdb.cpp
+++ b/src/wallet/bdb.cpp
@@ -1,930 +1,930 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <fs.h>
#include <wallet/bdb.h>
#include <wallet/db.h>
#include <util/strencodings.h>
#include <util/translation.h>
#include <cstdint>
#ifndef WIN32
#include <sys/stat.h>
#endif
namespace {
//! Make sure database has a unique fileid within the environment. If it
//! doesn't, throw an error. BDB caches do not work properly when more than one
//! open database has the same fileid (values written to one database may show
//! up in reads to other databases).
//!
//! BerkeleyDB generates unique fileids by default
//! (https://docs.oracle.com/cd/E17275_01/html/programmer_reference/program_copy.html),
//! so bitcoin should never create different databases with the same fileid, but
//! this error can be triggered if users manually copy database files.
void CheckUniqueFileid(const BerkeleyEnvironment &env,
const std::string &filename, Db &db,
WalletDatabaseFileId &fileid) {
if (env.IsMock()) {
return;
}
int ret = db.get_mpf()->get_fileid(fileid.value);
if (ret != 0) {
throw std::runtime_error(
strprintf("BerkeleyDatabase: Can't open database %s (get_fileid "
"failed with %d)",
filename, ret));
}
for (const auto &item : env.m_fileids) {
if (fileid == item.second && &fileid != &item.second) {
throw std::runtime_error(
strprintf("BerkeleyDatabase: Can't open database %s "
"(duplicates fileid %s "
"from %s)",
filename, HexStr(item.second.value), item.first));
}
}
}
RecursiveMutex cs_db;
//! Map from directory name to db environment.
std::map<std::string, std::weak_ptr<BerkeleyEnvironment>>
g_dbenvs GUARDED_BY(cs_db);
} // namespace
bool WalletDatabaseFileId::operator==(const WalletDatabaseFileId &rhs) const {
return memcmp(value, &rhs.value, sizeof(value)) == 0;
}
/**
* @param[in] wallet_path Path to wallet directory. Or (for backwards
* compatibility only) a path to a berkeley btree data file inside a wallet
* directory.
* @param[out] database_filename Filename of berkeley btree data file inside the
* wallet directory.
* @return A shared pointer to the BerkeleyEnvironment object for the wallet
* directory, never empty because ~BerkeleyEnvironment erases the weak pointer
* from the g_dbenvs map.
* @post A new BerkeleyEnvironment weak pointer is inserted into g_dbenvs if the
* directory path key was not already in the map.
*/
std::shared_ptr<BerkeleyEnvironment>
GetWalletEnv(const fs::path &wallet_path, std::string &database_filename) {
fs::path env_directory;
SplitWalletPath(wallet_path, env_directory, database_filename);
LOCK(cs_db);
auto inserted = g_dbenvs.emplace(fs::PathToString(env_directory),
std::weak_ptr<BerkeleyEnvironment>());
if (inserted.second) {
auto env = std::make_shared<BerkeleyEnvironment>(env_directory);
inserted.first->second = env;
return env;
}
return inserted.first->second.lock();
}
//
// BerkeleyBatch
//
void BerkeleyEnvironment::Close() {
if (!fDbEnvInit) {
return;
}
fDbEnvInit = false;
for (auto &db : m_databases) {
BerkeleyDatabase &database = db.second.get();
assert(database.m_refcount <= 0);
if (database.m_db) {
database.m_db->close(0);
database.m_db.reset();
}
}
FILE *error_file = nullptr;
dbenv->get_errfile(&error_file);
int ret = dbenv->close(0);
if (ret != 0) {
LogPrintf("BerkeleyEnvironment::Close: Error %d closing database "
"environment: %s\n",
ret, DbEnv::strerror(ret));
}
if (!fMockDb) {
DbEnv(uint32_t(0)).remove(strPath.c_str(), 0);
}
if (error_file) {
fclose(error_file);
}
UnlockDirectory(fs::PathFromString(strPath), ".walletlock");
}
void BerkeleyEnvironment::Reset() {
dbenv.reset(new DbEnv(DB_CXX_NO_EXCEPTIONS));
fDbEnvInit = false;
fMockDb = false;
}
BerkeleyEnvironment::BerkeleyEnvironment(const fs::path &dir_path)
: strPath(fs::PathToString(dir_path)) {
Reset();
}
BerkeleyEnvironment::~BerkeleyEnvironment() {
LOCK(cs_db);
g_dbenvs.erase(strPath);
Close();
}
bool BerkeleyEnvironment::Open(bilingual_str &err) {
if (fDbEnvInit) {
return true;
}
fs::path pathIn = fs::PathFromString(strPath);
TryCreateDirectories(pathIn);
if (!LockDirectory(pathIn, ".walletlock")) {
LogPrintf("Cannot obtain a lock on wallet directory %s. Another "
"instance of bitcoin may be using it.\n",
strPath);
err = strprintf(_("Error initializing wallet database environment %s!"),
fs::quoted(fs::PathToString(Directory())));
return false;
}
fs::path pathLogDir = pathIn / "database";
TryCreateDirectories(pathLogDir);
fs::path pathErrorFile = pathIn / "db.log";
LogPrintf("BerkeleyEnvironment::Open: LogDir=%s ErrorFile=%s\n",
fs::PathToString(pathLogDir), fs::PathToString(pathErrorFile));
unsigned int nEnvFlags = 0;
if (gArgs.GetBoolArg("-privdb", DEFAULT_WALLET_PRIVDB)) {
nEnvFlags |= DB_PRIVATE;
}
dbenv->set_lg_dir(fs::PathToString(pathLogDir).c_str());
// 1 MiB should be enough for just the wallet
dbenv->set_cachesize(0, 0x100000, 1);
dbenv->set_lg_bsize(0x10000);
dbenv->set_lg_max(1048576);
dbenv->set_lk_max_locks(40000);
dbenv->set_lk_max_objects(40000);
/// debug
dbenv->set_errfile(fsbridge::fopen(pathErrorFile, "a"));
dbenv->set_flags(DB_AUTO_COMMIT, 1);
dbenv->set_flags(DB_TXN_WRITE_NOSYNC, 1);
dbenv->log_set_config(DB_LOG_AUTO_REMOVE, 1);
int ret =
dbenv->open(strPath.c_str(),
DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |
DB_INIT_TXN | DB_THREAD | DB_RECOVER | nEnvFlags,
S_IRUSR | S_IWUSR);
if (ret != 0) {
LogPrintf("BerkeleyEnvironment::Open: Error %d opening database "
"environment: %s\n",
ret, DbEnv::strerror(ret));
int ret2 = dbenv->close(0);
if (ret2 != 0) {
LogPrintf("BerkeleyEnvironment::Open: Error %d closing failed "
"database environment: %s\n",
ret2, DbEnv::strerror(ret2));
}
Reset();
err = strprintf(_("Error initializing wallet database environment %s!"),
fs::quoted(fs::PathToString(Directory())));
if (ret == DB_RUNRECOVERY) {
err += Untranslated(" ") +
_("This error could occur if this wallet was not shutdown "
"cleanly and was last loaded using a build with a newer "
"version of Berkeley DB. If so, please use the software "
"that last loaded this wallet");
}
return false;
}
fDbEnvInit = true;
fMockDb = false;
return true;
}
//! Construct an in-memory mock Berkeley environment for testing
BerkeleyEnvironment::BerkeleyEnvironment() {
Reset();
LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::MakeMock\n");
dbenv->set_cachesize(1, 0, 1);
dbenv->set_lg_bsize(10485760 * 4);
dbenv->set_lg_max(10485760);
dbenv->set_lk_max_locks(10000);
dbenv->set_lk_max_objects(10000);
dbenv->set_flags(DB_AUTO_COMMIT, 1);
dbenv->log_set_config(DB_LOG_IN_MEMORY, 1);
int ret =
dbenv->open(nullptr,
DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |
DB_INIT_TXN | DB_THREAD | DB_PRIVATE,
S_IRUSR | S_IWUSR);
if (ret > 0) {
throw std::runtime_error(
strprintf("BerkeleyEnvironment::MakeMock: Error %d opening "
"database environment.",
ret));
}
fDbEnvInit = true;
fMockDb = true;
}
BerkeleyBatch::SafeDbt::SafeDbt() {
m_dbt.set_flags(DB_DBT_MALLOC);
}
BerkeleyBatch::SafeDbt::SafeDbt(void *data, size_t size) : m_dbt(data, size) {}
BerkeleyBatch::SafeDbt::~SafeDbt() {
if (m_dbt.get_data() != nullptr) {
// Clear memory, e.g. in case it was a private key
memory_cleanse(m_dbt.get_data(), m_dbt.get_size());
// under DB_DBT_MALLOC, data is malloced by the Dbt, but must be
// freed by the caller.
// https://docs.oracle.com/cd/E17275_01/html/api_reference/C/dbt.html
if (m_dbt.get_flags() & DB_DBT_MALLOC) {
free(m_dbt.get_data());
}
}
}
const void *BerkeleyBatch::SafeDbt::get_data() const {
return m_dbt.get_data();
}
uint32_t BerkeleyBatch::SafeDbt::get_size() const {
return m_dbt.get_size();
}
BerkeleyBatch::SafeDbt::operator Dbt *() {
return &m_dbt;
}
bool BerkeleyDatabase::Verify(bilingual_str &errorStr) {
fs::path walletDir = env->Directory();
fs::path file_path = walletDir / strFile;
LogPrintf("Using BerkeleyDB version %s\n", BerkeleyDatabaseVersion());
LogPrintf("Using wallet %s\n", fs::PathToString(file_path));
if (!env->Open(errorStr)) {
return false;
}
if (fs::exists(file_path)) {
assert(m_refcount == 0);
Db db(env->dbenv.get(), 0);
int result = db.verify(strFile.c_str(), nullptr, nullptr, 0);
if (result != 0) {
errorStr =
strprintf(_("%s corrupt. Try using the wallet tool "
"bitcoin-wallet to salvage or restoring a backup."),
fs::quoted(fs::PathToString(file_path)));
return false;
}
}
// also return true if files does not exists
return true;
}
void BerkeleyEnvironment::CheckpointLSN(const std::string &strFile) {
dbenv->txn_checkpoint(0, 0, 0);
if (fMockDb) {
return;
}
dbenv->lsn_reset(strFile.c_str(), 0);
}
BerkeleyDatabase::~BerkeleyDatabase() {
if (env) {
LOCK(cs_db);
env->CloseDb(strFile);
assert(!m_db);
size_t erased = env->m_databases.erase(strFile);
assert(erased == 1);
env->m_fileids.erase(strFile);
}
}
BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase &database, const bool read_only,
bool fFlushOnCloseIn)
: pdb(nullptr), activeTxn(nullptr), m_cursor(nullptr),
m_database(database) {
database.AddRef();
database.Open();
fReadOnly = read_only;
fFlushOnClose = fFlushOnCloseIn;
env = database.env.get();
pdb = database.m_db.get();
strFile = database.strFile;
if (!Exists(std::string("version"))) {
bool fTmp = fReadOnly;
fReadOnly = false;
Write(std::string("version"), CLIENT_VERSION);
fReadOnly = fTmp;
}
}
void BerkeleyDatabase::Open() {
unsigned int nFlags = DB_THREAD | DB_CREATE;
{
LOCK(cs_db);
bilingual_str open_err;
if (!env->Open(open_err)) {
throw std::runtime_error(
"BerkeleyDatabase: Failed to open database environment.");
}
if (m_db == nullptr) {
int ret;
std::unique_ptr<Db> pdb_temp =
std::make_unique<Db>(env->dbenv.get(), 0);
bool fMockDb = env->IsMock();
if (fMockDb) {
DbMpoolFile *mpf = pdb_temp->get_mpf();
ret = mpf->set_flags(DB_MPOOL_NOFILE, 1);
if (ret != 0) {
throw std::runtime_error(strprintf(
"BerkeleyDatabase: Failed to configure for no "
"temp file backing for database %s",
strFile));
}
}
ret = pdb_temp->open(
nullptr, // Txn pointer
fMockDb ? nullptr : strFile.c_str(), // Filename
fMockDb ? strFile.c_str() : "main", // Logical db name
DB_BTREE, // Database type
nFlags, // Flags
0);
if (ret != 0) {
throw std::runtime_error(strprintf(
"BerkeleyDatabase: Error %d, can't open database %s", ret,
strFile));
}
// Call CheckUniqueFileid on the containing BDB environment to
// avoid BDB data consistency bugs that happen when different data
// files in the same environment have the same fileid.
CheckUniqueFileid(*env, strFile, *pdb_temp,
this->env->m_fileids[strFile]);
m_db.reset(pdb_temp.release());
}
}
}
void BerkeleyBatch::Flush() {
if (activeTxn) {
return;
}
// Flush database activity from memory pool to disk log
unsigned int nMinutes = 0;
if (fReadOnly) {
nMinutes = 1;
}
// env is nullptr for dummy databases (i.e. in tests). Don't actually flush
// if env is nullptr so we don't segfault
if (env) {
env->dbenv->txn_checkpoint(
nMinutes
? gArgs.GetIntArg("-dblogsize", DEFAULT_WALLET_DBLOGSIZE) * 1024
: 0,
nMinutes, 0);
}
}
void BerkeleyDatabase::IncrementUpdateCounter() {
++nUpdateCounter;
}
BerkeleyBatch::~BerkeleyBatch() {
Close();
m_database.RemoveRef();
}
void BerkeleyBatch::Close() {
if (!pdb) {
return;
}
if (activeTxn) {
activeTxn->abort();
}
activeTxn = nullptr;
pdb = nullptr;
CloseCursor();
if (fFlushOnClose) {
Flush();
}
}
void BerkeleyEnvironment::CloseDb(const std::string &strFile) {
LOCK(cs_db);
auto it = m_databases.find(strFile);
assert(it != m_databases.end());
BerkeleyDatabase &database = it->second.get();
if (database.m_db) {
// Close the database handle
database.m_db->close(0);
database.m_db.reset();
}
}
void BerkeleyEnvironment::ReloadDbEnv() {
// Make sure that no Db's are in use
AssertLockNotHeld(cs_db);
std::unique_lock<RecursiveMutex> lock(cs_db);
m_db_in_use.wait(lock, [this]() {
for (auto &db : m_databases) {
if (db.second.get().m_refcount > 0) {
return false;
}
}
return true;
});
std::vector<std::string> filenames;
for (auto it : m_databases) {
filenames.push_back(it.first);
}
// Close the individual Db's
for (const std::string &filename : filenames) {
CloseDb(filename);
}
// Reset the environment
// This will flush and close the environment
Flush(true);
Reset();
bilingual_str open_err;
Open(open_err);
}
bool BerkeleyDatabase::Rewrite(const char *pszSkip) {
while (true) {
{
LOCK(cs_db);
if (m_refcount <= 0) {
// Flush log data to the dat file
env->CloseDb(strFile);
env->CheckpointLSN(strFile);
m_refcount = -1;
bool fSuccess = true;
LogPrintf("BerkeleyBatch::Rewrite: Rewriting %s...\n", strFile);
std::string strFileRes = strFile + ".rewrite";
{ // surround usage of db with extra {}
BerkeleyBatch db(*this, true);
std::unique_ptr<Db> pdbCopy =
std::make_unique<Db>(env->dbenv.get(), 0);
int ret = pdbCopy->open(nullptr, // Txn pointer
strFileRes.c_str(), // Filename
"main", // Logical db name
DB_BTREE, // Database type
DB_CREATE, // Flags
0);
if (ret > 0) {
LogPrintf("BerkeleyBatch::Rewrite: Can't create "
"database file %s\n",
strFileRes);
fSuccess = false;
}
if (db.StartCursor()) {
while (fSuccess) {
CDataStream ssKey(SER_DISK, CLIENT_VERSION);
CDataStream ssValue(SER_DISK, CLIENT_VERSION);
bool complete;
bool ret1 =
db.ReadAtCursor(ssKey, ssValue, complete);
if (complete) {
break;
}
if (!ret1) {
fSuccess = false;
break;
}
if (pszSkip &&
strncmp((const char *)ssKey.data(), pszSkip,
std::min(ssKey.size(),
strlen(pszSkip))) == 0) {
continue;
}
if (strncmp((const char *)ssKey.data(),
"\x07version", 8) == 0) {
// Update version:
ssValue.clear();
ssValue << CLIENT_VERSION;
}
Dbt datKey(ssKey.data(), ssKey.size());
Dbt datValue(ssValue.data(), ssValue.size());
int ret2 = pdbCopy->put(nullptr, &datKey, &datValue,
DB_NOOVERWRITE);
if (ret2 > 0) {
fSuccess = false;
}
}
db.CloseCursor();
}
if (fSuccess) {
db.Close();
env->CloseDb(strFile);
if (pdbCopy->close(0)) {
fSuccess = false;
}
} else {
pdbCopy->close(0);
}
}
if (fSuccess) {
Db dbA(env->dbenv.get(), 0);
if (dbA.remove(strFile.c_str(), nullptr, 0)) {
fSuccess = false;
}
Db dbB(env->dbenv.get(), 0);
if (dbB.rename(strFileRes.c_str(), nullptr, strFile.c_str(),
0)) {
fSuccess = false;
}
}
if (!fSuccess) {
LogPrintf("BerkeleyBatch::Rewrite: Failed to rewrite "
"database file %s\n",
strFileRes);
}
return fSuccess;
}
}
UninterruptibleSleep(std::chrono::milliseconds{100});
}
}
void BerkeleyEnvironment::Flush(bool fShutdown) {
int64_t nStart = GetTimeMillis();
// Flush log data to the actual data file on all files that are not in use
LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: [%s] Flush(%s)%s\n",
strPath, fShutdown ? "true" : "false",
fDbEnvInit ? "" : " database not started");
if (!fDbEnvInit) {
return;
}
{
LOCK(cs_db);
bool no_dbs_accessed = true;
for (auto &db_it : m_databases) {
std::string strFile = db_it.first;
int nRefCount = db_it.second.get().m_refcount;
if (nRefCount < 0) {
continue;
}
LogPrint(
BCLog::WALLETDB,
"BerkeleyEnvironment::Flush: Flushing %s (refcount = %d)...\n",
strFile, nRefCount);
if (nRefCount == 0) {
// Move log data to the dat file
CloseDb(strFile);
LogPrint(BCLog::WALLETDB,
"BerkeleyEnvironment::Flush: %s checkpoint\n",
strFile);
dbenv->txn_checkpoint(0, 0, 0);
LogPrint(BCLog::WALLETDB,
"BerkeleyEnvironment::Flush: %s detach\n", strFile);
if (!fMockDb) {
dbenv->lsn_reset(strFile.c_str(), 0);
}
LogPrint(BCLog::WALLETDB,
"BerkeleyEnvironment::Flush: %s closed\n", strFile);
nRefCount = -1;
} else {
no_dbs_accessed = false;
}
}
LogPrint(BCLog::WALLETDB,
"BerkeleyEnvironment::Flush: Flush(%s)%s took %15dms\n",
fShutdown ? "true" : "false",
fDbEnvInit ? "" : " database not started",
GetTimeMillis() - nStart);
if (fShutdown) {
char **listp;
if (no_dbs_accessed) {
dbenv->log_archive(&listp, DB_ARCH_REMOVE);
Close();
if (!fMockDb) {
fs::remove_all(fs::PathFromString(strPath) / "database");
}
}
}
}
}
bool BerkeleyDatabase::PeriodicFlush() {
// Don't flush if we can't acquire the lock.
TRY_LOCK(cs_db, lockDb);
if (!lockDb) {
return false;
}
// Don't flush if any databases are in use
for (auto &it : env->m_databases) {
if (it.second.get().m_refcount > 0) {
return false;
}
}
// Don't flush if there haven't been any batch writes for this database.
if (m_refcount < 0) {
return false;
}
LogPrint(BCLog::WALLETDB, "Flushing %s\n", strFile);
int64_t nStart = GetTimeMillis();
// Flush wallet file so it's self contained
env->CloseDb(strFile);
env->CheckpointLSN(strFile);
m_refcount = -1;
LogPrint(BCLog::WALLETDB, "Flushed %s %dms\n", strFile,
GetTimeMillis() - nStart);
return true;
}
bool BerkeleyDatabase::Backup(const std::string &strDest) const {
while (true) {
{
LOCK(cs_db);
if (m_refcount <= 0) {
// Flush log data to the dat file
env->CloseDb(strFile);
env->CheckpointLSN(strFile);
// Copy wallet file.
fs::path pathSrc = env->Directory() / strFile;
fs::path pathDest(fs::PathFromString(strDest));
if (fs::is_directory(pathDest)) {
pathDest /= fs::PathFromString(strFile);
}
try {
if (fs::exists(pathDest) &&
fs::equivalent(pathSrc, pathDest)) {
LogPrintf("cannot backup to wallet source file %s\n",
fs::PathToString(pathDest));
return false;
}
fs::copy_file(pathSrc, pathDest,
fs::copy_options::overwrite_existing);
LogPrintf("copied %s to %s\n", strFile,
fs::PathToString(pathDest));
return true;
} catch (const fs::filesystem_error &e) {
LogPrintf("error copying %s to %s - %s\n", strFile,
fs::PathToString(pathDest),
fsbridge::get_filesystem_error_message(e));
return false;
}
}
}
UninterruptibleSleep(std::chrono::milliseconds{100});
}
}
void BerkeleyDatabase::Flush() {
env->Flush(false);
}
void BerkeleyDatabase::Close() {
env->Flush(true);
}
void BerkeleyDatabase::ReloadDbEnv() {
env->ReloadDbEnv();
}
bool BerkeleyBatch::StartCursor() {
assert(!m_cursor);
if (!pdb) {
return false;
}
int ret = pdb->cursor(nullptr, &m_cursor, 0);
return ret == 0;
}
bool BerkeleyBatch::ReadAtCursor(CDataStream &ssKey, CDataStream &ssValue,
bool &complete) {
complete = false;
if (m_cursor == nullptr) {
return false;
}
// Read at cursor
SafeDbt datKey;
SafeDbt datValue;
int ret = m_cursor->get(datKey, datValue, DB_NEXT);
if (ret == DB_NOTFOUND) {
complete = true;
}
if (ret != 0) {
return false;
} else if (datKey.get_data() == nullptr || datValue.get_data() == nullptr) {
return false;
}
// Convert to streams
ssKey.SetType(SER_DISK);
ssKey.clear();
- ssKey.write((char *)datKey.get_data(), datKey.get_size());
+ ssKey.write({BytePtr(datKey.get_data()), datKey.get_size()});
ssValue.SetType(SER_DISK);
ssValue.clear();
- ssValue.write((char *)datValue.get_data(), datValue.get_size());
+ ssValue.write({BytePtr(datValue.get_data()), datValue.get_size()});
return true;
}
void BerkeleyBatch::CloseCursor() {
if (!m_cursor) {
return;
}
m_cursor->close();
m_cursor = nullptr;
}
bool BerkeleyBatch::TxnBegin() {
if (!pdb || activeTxn) {
return false;
}
DbTxn *ptxn = env->TxnBegin();
if (!ptxn) {
return false;
}
activeTxn = ptxn;
return true;
}
bool BerkeleyBatch::TxnCommit() {
if (!pdb || !activeTxn) {
return false;
}
int ret = activeTxn->commit(0);
activeTxn = nullptr;
return (ret == 0);
}
bool BerkeleyBatch::TxnAbort() {
if (!pdb || !activeTxn) {
return false;
}
int ret = activeTxn->abort();
activeTxn = nullptr;
return (ret == 0);
}
std::string BerkeleyDatabaseVersion() {
return DbEnv::version(nullptr, nullptr, nullptr);
}
bool BerkeleyBatch::ReadKey(CDataStream &&key, CDataStream &value) {
if (!pdb) {
return false;
}
SafeDbt datKey(key.data(), key.size());
SafeDbt datValue;
int ret = pdb->get(activeTxn, datKey, datValue, 0);
if (ret == 0 && datValue.get_data() != nullptr) {
- value.write((char *)datValue.get_data(), datValue.get_size());
+ value.write({BytePtr(datValue.get_data()), datValue.get_size()});
return true;
}
return false;
}
bool BerkeleyBatch::WriteKey(CDataStream &&key, CDataStream &&value,
bool overwrite) {
if (!pdb) {
return false;
}
if (fReadOnly) {
assert(!"Write called on database in read-only mode");
}
SafeDbt datKey(key.data(), key.size());
SafeDbt datValue(value.data(), value.size());
int ret =
pdb->put(activeTxn, datKey, datValue, (overwrite ? 0 : DB_NOOVERWRITE));
return (ret == 0);
}
bool BerkeleyBatch::EraseKey(CDataStream &&key) {
if (!pdb) {
return false;
}
if (fReadOnly) {
assert(!"Erase called on database in read-only mode");
}
SafeDbt datKey(key.data(), key.size());
int ret = pdb->del(activeTxn, datKey, 0);
return (ret == 0 || ret == DB_NOTFOUND);
}
bool BerkeleyBatch::HasKey(CDataStream &&key) {
if (!pdb) {
return false;
}
SafeDbt datKey(key.data(), key.size());
int ret = pdb->exists(activeTxn, datKey, 0);
return ret == 0;
}
void BerkeleyDatabase::AddRef() {
LOCK(cs_db);
if (m_refcount < 0) {
m_refcount = 1;
} else {
m_refcount++;
}
}
void BerkeleyDatabase::RemoveRef() {
LOCK(cs_db);
m_refcount--;
if (env) {
env->m_db_in_use.notify_all();
}
}
std::unique_ptr<DatabaseBatch>
BerkeleyDatabase::MakeBatch(bool flush_on_close) {
return std::make_unique<BerkeleyBatch>(*this, false, flush_on_close);
}
bool ExistsBerkeleyDatabase(const fs::path &path) {
fs::path env_directory;
std::string data_filename;
SplitWalletPath(path, env_directory, data_filename);
return IsBerkeleyBtree(env_directory / data_filename);
}
std::unique_ptr<BerkeleyDatabase>
MakeBerkeleyDatabase(const fs::path &path, const DatabaseOptions &options,
DatabaseStatus &status, bilingual_str &error) {
std::unique_ptr<BerkeleyDatabase> db;
{
// Lock env.m_databases until insert in BerkeleyDatabase constructor
LOCK(cs_db);
std::string data_filename;
std::shared_ptr<BerkeleyEnvironment> env =
GetWalletEnv(path, data_filename);
if (env->m_databases.count(data_filename)) {
error = Untranslated(strprintf(
"Refusing to load database. Data file '%s' is already loaded.",
fs::PathToString(env->Directory() / data_filename)));
status = DatabaseStatus::FAILED_ALREADY_LOADED;
return nullptr;
}
db = std::make_unique<BerkeleyDatabase>(std::move(env),
std::move(data_filename));
}
if (options.verify && !db->Verify(error)) {
status = DatabaseStatus::FAILED_VERIFY;
return nullptr;
}
status = DatabaseStatus::SUCCESS;
return db;
}
diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp
index 08a3eaf42..27c8bc336 100644
--- a/src/wallet/sqlite.cpp
+++ b/src/wallet/sqlite.cpp
@@ -1,629 +1,626 @@
// Copyright (c) 2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <wallet/sqlite.h>
#include <logging.h>
#include <sync.h>
#include <util/strencodings.h>
#include <util/system.h>
#include <util/translation.h>
#include <wallet/db.h>
#include <cstdint>
#include <sqlite3.h>
static const char *const DATABASE_FILENAME = "wallet.dat";
static GlobalMutex g_sqlite_mutex;
static int g_sqlite_count GUARDED_BY(g_sqlite_mutex) = 0;
static void ErrorLogCallback(void *arg, int code, const char *msg) {
// From sqlite3_config() documentation for the SQLITE_CONFIG_LOG option:
// "The void pointer that is the second argument to SQLITE_CONFIG_LOG is
// passed through as the first parameter to the application-defined logger
// function whenever that function is invoked."
// Assert that this is the case:
assert(arg == nullptr);
LogPrintf("SQLite Error. Code: %d. Message: %s\n", code, msg);
}
SQLiteDatabase::SQLiteDatabase(const fs::path &dir_path,
const fs::path &file_path, bool mock)
: WalletDatabase(), m_mock(mock), m_dir_path(fs::PathToString(dir_path)),
m_file_path(fs::PathToString(file_path)) {
{
LOCK(g_sqlite_mutex);
LogPrintf("Using SQLite Version %s\n", SQLiteDatabaseVersion());
LogPrintf("Using wallet %s\n", m_dir_path);
if (++g_sqlite_count == 1) {
// Setup logging
int ret =
sqlite3_config(SQLITE_CONFIG_LOG, ErrorLogCallback, nullptr);
if (ret != SQLITE_OK) {
throw std::runtime_error(
strprintf("SQLiteDatabase: Failed to setup error log: %s\n",
sqlite3_errstr(ret)));
}
}
// This is a no-op if sqlite3 is already initialized
int ret = sqlite3_initialize();
if (ret != SQLITE_OK) {
throw std::runtime_error(
strprintf("SQLiteDatabase: Failed to initialize SQLite: %s\n",
sqlite3_errstr(ret)));
}
}
try {
Open();
} catch (const std::runtime_error &) {
// If open fails, cleanup this object and rethrow the exception
Cleanup();
throw;
}
}
void SQLiteBatch::SetupSQLStatements() {
int res;
if (!m_read_stmt) {
if ((res = sqlite3_prepare_v2(
m_database.m_db, "SELECT value FROM main WHERE key = ?", -1,
&m_read_stmt, nullptr)) != SQLITE_OK) {
throw std::runtime_error(strprintf(
"SQLiteDatabase: Failed to setup SQL statements: %s\n",
sqlite3_errstr(res)));
}
}
if (!m_insert_stmt) {
if ((res = sqlite3_prepare_v2(m_database.m_db,
"INSERT INTO main VALUES(?, ?)", -1,
&m_insert_stmt, nullptr)) != SQLITE_OK) {
throw std::runtime_error(strprintf(
"SQLiteDatabase: Failed to setup SQL statements: %s\n",
sqlite3_errstr(res)));
}
}
if (!m_overwrite_stmt) {
if ((res = sqlite3_prepare_v2(
m_database.m_db, "INSERT or REPLACE into main values(?, ?)",
-1, &m_overwrite_stmt, nullptr)) != SQLITE_OK) {
throw std::runtime_error(strprintf(
"SQLiteDatabase: Failed to setup SQL statements: %s\n",
sqlite3_errstr(res)));
}
}
if (!m_delete_stmt) {
if ((res = sqlite3_prepare_v2(m_database.m_db,
"DELETE FROM main WHERE key = ?", -1,
&m_delete_stmt, nullptr)) != SQLITE_OK) {
throw std::runtime_error(strprintf(
"SQLiteDatabase: Failed to setup SQL statements: %s\n",
sqlite3_errstr(res)));
}
}
if (!m_cursor_stmt) {
if ((res = sqlite3_prepare_v2(m_database.m_db,
"SELECT key, value FROM main", -1,
&m_cursor_stmt, nullptr)) != SQLITE_OK) {
throw std::runtime_error(strprintf(
"SQLiteDatabase: Failed to setup SQL statements : %s\n",
sqlite3_errstr(res)));
}
}
}
SQLiteDatabase::~SQLiteDatabase() {
Cleanup();
}
void SQLiteDatabase::Cleanup() noexcept {
Close();
LOCK(g_sqlite_mutex);
if (--g_sqlite_count == 0) {
int ret = sqlite3_shutdown();
if (ret != SQLITE_OK) {
LogPrintf("SQLiteDatabase: Failed to shutdown SQLite: %s\n",
sqlite3_errstr(ret));
}
}
}
bool SQLiteDatabase::Verify(bilingual_str &error) {
assert(m_db);
sqlite3_stmt *stmt{nullptr};
int ret =
sqlite3_prepare_v2(m_db, "PRAGMA integrity_check", -1, &stmt, nullptr);
if (ret != SQLITE_OK) {
sqlite3_finalize(stmt);
error = strprintf(_("SQLiteDatabase: Failed to prepare statement to "
"verify database: %s"),
sqlite3_errstr(ret));
return false;
}
while (true) {
ret = sqlite3_step(stmt);
if (ret == SQLITE_DONE) {
break;
}
if (ret != SQLITE_ROW) {
error = strprintf(_("SQLiteDatabase: Failed to execute statement "
"to verify database: %s"),
sqlite3_errstr(ret));
break;
}
const char *msg = (const char *)sqlite3_column_text(stmt, 0);
if (!msg) {
error = strprintf(_("SQLiteDatabase: Failed to read database "
"verification error: %s"),
sqlite3_errstr(ret));
break;
}
std::string str_msg(msg);
if (str_msg == "ok") {
continue;
}
if (error.empty()) {
error = _("Failed to verify database") + Untranslated("\n");
}
error += Untranslated(strprintf("%s\n", str_msg));
}
sqlite3_finalize(stmt);
return error.empty();
}
void SQLiteDatabase::Open() {
int flags =
SQLITE_OPEN_FULLMUTEX | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE;
if (m_mock) {
// In memory database for mock db
flags |= SQLITE_OPEN_MEMORY;
}
if (m_db == nullptr) {
TryCreateDirectories(fs::PathFromString(m_dir_path));
int ret = sqlite3_open_v2(m_file_path.c_str(), &m_db, flags, nullptr);
if (ret != SQLITE_OK) {
throw std::runtime_error(
strprintf("SQLiteDatabase: Failed to open database: %s\n",
sqlite3_errstr(ret)));
}
}
if (sqlite3_db_readonly(m_db, "main") != 0) {
throw std::runtime_error("SQLiteDatabase: Database opened in readonly "
"mode but read-write permissions are needed");
}
// Acquire an exclusive lock on the database
// First change the locking mode to exclusive
int ret = sqlite3_exec(m_db, "PRAGMA locking_mode = exclusive", nullptr,
nullptr, nullptr);
if (ret != SQLITE_OK) {
throw std::runtime_error(
strprintf("SQLiteDatabase: Unable to change database locking mode "
"to exclusive: %s\n",
sqlite3_errstr(ret)));
}
// Now begin a transaction to acquire the exclusive lock. This lock won't be
// released until we close because of the exclusive locking mode.
ret = sqlite3_exec(m_db, "BEGIN EXCLUSIVE TRANSACTION", nullptr, nullptr,
nullptr);
if (ret != SQLITE_OK) {
throw std::runtime_error(
"SQLiteDatabase: Unable to obtain an exclusive lock on the "
"database, is it being used by another bitcoind?\n");
}
ret = sqlite3_exec(m_db, "COMMIT", nullptr, nullptr, nullptr);
if (ret != SQLITE_OK) {
throw std::runtime_error(strprintf(
"SQLiteDatabase: Unable to end exclusive lock transaction: %s\n",
sqlite3_errstr(ret)));
}
// Enable fullfsync for the platforms that use it
ret = sqlite3_exec(m_db, "PRAGMA fullfsync = true", nullptr, nullptr,
nullptr);
if (ret != SQLITE_OK) {
throw std::runtime_error(
strprintf("SQLiteDatabase: Failed to enable fullfsync: %s\n",
sqlite3_errstr(ret)));
}
// Make the table for our key-value pairs
// First check that the main table exists
sqlite3_stmt *check_main_stmt{nullptr};
ret = sqlite3_prepare_v2(
m_db,
"SELECT name FROM sqlite_master WHERE type='table' AND name='main'", -1,
&check_main_stmt, nullptr);
if (ret != SQLITE_OK) {
throw std::runtime_error(
strprintf("SQLiteDatabase: Failed to prepare statement to check "
"table existence: %s\n",
sqlite3_errstr(ret)));
}
ret = sqlite3_step(check_main_stmt);
if (sqlite3_finalize(check_main_stmt) != SQLITE_OK) {
throw std::runtime_error(
strprintf("SQLiteDatabase: Failed to finalize statement checking "
"table existence: %s\n",
sqlite3_errstr(ret)));
}
bool table_exists;
if (ret == SQLITE_DONE) {
table_exists = false;
} else if (ret == SQLITE_ROW) {
table_exists = true;
} else {
throw std::runtime_error(
strprintf("SQLiteDatabase: Failed to execute statement to check "
"table existence: %s\n",
sqlite3_errstr(ret)));
}
// Do the db setup things because the table doesn't exist only when we are
// creating a new wallet
if (!table_exists) {
ret = sqlite3_exec(m_db,
"CREATE TABLE main(key BLOB PRIMARY KEY NOT NULL, "
"value BLOB NOT NULL)",
nullptr, nullptr, nullptr);
if (ret != SQLITE_OK) {
throw std::runtime_error(
strprintf("SQLiteDatabase: Failed to create new database: %s\n",
sqlite3_errstr(ret)));
}
}
}
bool SQLiteDatabase::Rewrite(const char *skip) {
// Rewrite the database using the VACUUM command:
// https://sqlite.org/lang_vacuum.html
int ret = sqlite3_exec(m_db, "VACUUM", nullptr, nullptr, nullptr);
return ret == SQLITE_OK;
}
bool SQLiteDatabase::Backup(const std::string &dest) const {
sqlite3 *db_copy;
int res = sqlite3_open(dest.c_str(), &db_copy);
if (res != SQLITE_OK) {
sqlite3_close(db_copy);
return false;
}
sqlite3_backup *backup = sqlite3_backup_init(db_copy, "main", m_db, "main");
if (!backup) {
LogPrintf("%s: Unable to begin backup: %s\n", __func__,
sqlite3_errmsg(m_db));
sqlite3_close(db_copy);
return false;
}
// Specifying -1 will copy all of the pages
res = sqlite3_backup_step(backup, -1);
if (res != SQLITE_DONE) {
LogPrintf("%s: Unable to backup: %s\n", __func__, sqlite3_errstr(res));
sqlite3_backup_finish(backup);
sqlite3_close(db_copy);
return false;
}
res = sqlite3_backup_finish(backup);
sqlite3_close(db_copy);
return res == SQLITE_OK;
}
void SQLiteDatabase::Close() {
int res = sqlite3_close(m_db);
if (res != SQLITE_OK) {
throw std::runtime_error(
strprintf("SQLiteDatabase: Failed to close database: %s\n",
sqlite3_errstr(res)));
}
m_db = nullptr;
}
std::unique_ptr<DatabaseBatch> SQLiteDatabase::MakeBatch(bool flush_on_close) {
// We ignore flush_on_close because we don't do manual flushing for SQLite
return std::make_unique<SQLiteBatch>(*this);
}
SQLiteBatch::SQLiteBatch(SQLiteDatabase &database) : m_database(database) {
// Make sure we have a db handle
assert(m_database.m_db);
SetupSQLStatements();
}
void SQLiteBatch::Close() {
// If m_db is in a transaction (i.e. not in autocommit mode), then abort the
// transaction in progress
if (m_database.m_db && sqlite3_get_autocommit(m_database.m_db) == 0) {
if (TxnAbort()) {
LogPrintf("SQLiteBatch: Batch closed unexpectedly without the "
"transaction being explicitly committed or aborted\n");
} else {
LogPrintf(
"SQLiteBatch: Batch closed and failed to abort transaction\n");
}
}
// Free all of the prepared statements
int ret = sqlite3_finalize(m_read_stmt);
if (ret != SQLITE_OK) {
LogPrintf("SQLiteBatch: Batch closed but could not finalize read "
"statement: %s\n",
sqlite3_errstr(ret));
}
ret = sqlite3_finalize(m_insert_stmt);
if (ret != SQLITE_OK) {
LogPrintf("SQLiteBatch: Batch closed but could not finalize insert "
"statement: %s\n",
sqlite3_errstr(ret));
}
ret = sqlite3_finalize(m_overwrite_stmt);
if (ret != SQLITE_OK) {
LogPrintf("SQLiteBatch: Batch closed but could not finalize overwrite "
"statement: %s\n",
sqlite3_errstr(ret));
}
ret = sqlite3_finalize(m_delete_stmt);
if (ret != SQLITE_OK) {
LogPrintf("SQLiteBatch: Batch closed but could not finalize delete "
"statement: %s\n",
sqlite3_errstr(ret));
}
ret = sqlite3_finalize(m_cursor_stmt);
if (ret != SQLITE_OK) {
LogPrintf("SQLiteBatch: Batch closed but could not finalize cursor "
"statement: %s\n",
sqlite3_errstr(ret));
}
m_read_stmt = nullptr;
m_insert_stmt = nullptr;
m_overwrite_stmt = nullptr;
m_delete_stmt = nullptr;
m_cursor_stmt = nullptr;
}
bool SQLiteBatch::ReadKey(CDataStream &&key, CDataStream &value) {
if (!m_database.m_db) {
return false;
}
assert(m_read_stmt);
// Bind: leftmost parameter in statement is index 1
int res = sqlite3_bind_blob(m_read_stmt, 1, key.data(), key.size(),
SQLITE_STATIC);
if (res != SQLITE_OK) {
LogPrintf("%s: Unable to bind statement: %s\n", __func__,
sqlite3_errstr(res));
sqlite3_clear_bindings(m_read_stmt);
sqlite3_reset(m_read_stmt);
return false;
}
res = sqlite3_step(m_read_stmt);
if (res != SQLITE_ROW) {
if (res != SQLITE_DONE) {
// SQLITE_DONE means "not found", don't log an error in that case.
LogPrintf("%s: Unable to execute statement: %s\n", __func__,
sqlite3_errstr(res));
}
sqlite3_clear_bindings(m_read_stmt);
sqlite3_reset(m_read_stmt);
return false;
}
// Leftmost column in result is index 0
- const char *data =
- reinterpret_cast<const char *>(sqlite3_column_blob(m_read_stmt, 0));
- int data_size = sqlite3_column_bytes(m_read_stmt, 0);
- value.write(data, data_size);
+ const std::byte *data{BytePtr(sqlite3_column_blob(m_read_stmt, 0))};
+ size_t data_size(sqlite3_column_bytes(m_read_stmt, 0));
+ value.write({data, data_size});
sqlite3_clear_bindings(m_read_stmt);
sqlite3_reset(m_read_stmt);
return true;
}
bool SQLiteBatch::WriteKey(CDataStream &&key, CDataStream &&value,
bool overwrite) {
if (!m_database.m_db) {
return false;
}
assert(m_insert_stmt && m_overwrite_stmt);
sqlite3_stmt *stmt;
if (overwrite) {
stmt = m_overwrite_stmt;
} else {
stmt = m_insert_stmt;
}
// Bind: leftmost parameter in statement is index 1
// Insert index 1 is key, 2 is value
int res = sqlite3_bind_blob(stmt, 1, key.data(), key.size(), SQLITE_STATIC);
if (res != SQLITE_OK) {
LogPrintf("%s: Unable to bind key to statement: %s\n", __func__,
sqlite3_errstr(res));
sqlite3_clear_bindings(stmt);
sqlite3_reset(stmt);
return false;
}
res = sqlite3_bind_blob(stmt, 2, value.data(), value.size(), SQLITE_STATIC);
if (res != SQLITE_OK) {
LogPrintf("%s: Unable to bind value to statement: %s\n", __func__,
sqlite3_errstr(res));
sqlite3_clear_bindings(stmt);
sqlite3_reset(stmt);
return false;
}
// Execute
res = sqlite3_step(stmt);
sqlite3_clear_bindings(stmt);
sqlite3_reset(stmt);
if (res != SQLITE_DONE) {
LogPrintf("%s: Unable to execute statement: %s\n", __func__,
sqlite3_errstr(res));
}
return res == SQLITE_DONE;
}
bool SQLiteBatch::EraseKey(CDataStream &&key) {
if (!m_database.m_db) {
return false;
}
assert(m_delete_stmt);
// Bind: leftmost parameter in statement is index 1
int res = sqlite3_bind_blob(m_delete_stmt, 1, key.data(), key.size(),
SQLITE_STATIC);
if (res != SQLITE_OK) {
LogPrintf("%s: Unable to bind statement: %s\n", __func__,
sqlite3_errstr(res));
sqlite3_clear_bindings(m_delete_stmt);
sqlite3_reset(m_delete_stmt);
return false;
}
// Execute
res = sqlite3_step(m_delete_stmt);
sqlite3_clear_bindings(m_delete_stmt);
sqlite3_reset(m_delete_stmt);
if (res != SQLITE_DONE) {
LogPrintf("%s: Unable to execute statement: %s\n", __func__,
sqlite3_errstr(res));
}
return res == SQLITE_DONE;
}
bool SQLiteBatch::HasKey(CDataStream &&key) {
if (!m_database.m_db) {
return false;
}
assert(m_read_stmt);
// Bind: leftmost parameter in statement is index 1
bool ret = false;
int res = sqlite3_bind_blob(m_read_stmt, 1, key.data(), key.size(),
SQLITE_STATIC);
if (res == SQLITE_OK) {
res = sqlite3_step(m_read_stmt);
if (res == SQLITE_ROW) {
ret = true;
}
}
sqlite3_clear_bindings(m_read_stmt);
sqlite3_reset(m_read_stmt);
return ret;
}
bool SQLiteBatch::StartCursor() {
assert(!m_cursor_init);
if (!m_database.m_db) {
return false;
}
m_cursor_init = true;
return true;
}
bool SQLiteBatch::ReadAtCursor(CDataStream &key, CDataStream &value,
bool &complete) {
complete = false;
if (!m_cursor_init) {
return false;
}
int res = sqlite3_step(m_cursor_stmt);
if (res == SQLITE_DONE) {
complete = true;
return true;
}
if (res != SQLITE_ROW) {
LogPrintf(
"SQLiteBatch::ReadAtCursor: Unable to execute cursor step: %s\n",
sqlite3_errstr(res));
return false;
}
// Leftmost column in result is index 0
- const char *key_data =
- reinterpret_cast<const char *>(sqlite3_column_blob(m_cursor_stmt, 0));
- int key_data_size = sqlite3_column_bytes(m_cursor_stmt, 0);
- key.write(key_data, key_data_size);
- const char *value_data =
- reinterpret_cast<const char *>(sqlite3_column_blob(m_cursor_stmt, 1));
- int value_data_size = sqlite3_column_bytes(m_cursor_stmt, 1);
- value.write(value_data, value_data_size);
+ const std::byte *key_data{BytePtr(sqlite3_column_blob(m_cursor_stmt, 0))};
+ size_t key_data_size(sqlite3_column_bytes(m_cursor_stmt, 0));
+ key.write({key_data, key_data_size});
+ const std::byte *value_data{BytePtr(sqlite3_column_blob(m_cursor_stmt, 1))};
+ size_t value_data_size(sqlite3_column_bytes(m_cursor_stmt, 1));
+ value.write({value_data, value_data_size});
return true;
}
void SQLiteBatch::CloseCursor() {
sqlite3_reset(m_cursor_stmt);
m_cursor_init = false;
}
bool SQLiteBatch::TxnBegin() {
if (!m_database.m_db || sqlite3_get_autocommit(m_database.m_db) == 0) {
return false;
}
int res = sqlite3_exec(m_database.m_db, "BEGIN TRANSACTION", nullptr,
nullptr, nullptr);
if (res != SQLITE_OK) {
LogPrintf("SQLiteBatch: Failed to begin the transaction\n");
}
return res == SQLITE_OK;
}
bool SQLiteBatch::TxnCommit() {
if (!m_database.m_db || sqlite3_get_autocommit(m_database.m_db) != 0) {
return false;
}
int res = sqlite3_exec(m_database.m_db, "COMMIT TRANSACTION", nullptr,
nullptr, nullptr);
if (res != SQLITE_OK) {
LogPrintf("SQLiteBatch: Failed to commit the transaction\n");
}
return res == SQLITE_OK;
}
bool SQLiteBatch::TxnAbort() {
if (!m_database.m_db || sqlite3_get_autocommit(m_database.m_db) != 0) {
return false;
}
int res = sqlite3_exec(m_database.m_db, "ROLLBACK TRANSACTION", nullptr,
nullptr, nullptr);
if (res != SQLITE_OK) {
LogPrintf("SQLiteBatch: Failed to abort the transaction\n");
}
return res == SQLITE_OK;
}
bool ExistsSQLiteDatabase(const fs::path &path) {
return false;
}
std::unique_ptr<SQLiteDatabase>
MakeSQLiteDatabase(const fs::path &path, const DatabaseOptions &options,
DatabaseStatus &status, bilingual_str &error) {
const fs::path file = path / DATABASE_FILENAME;
try {
auto db = std::make_unique<SQLiteDatabase>(path, file);
if (options.verify && !db->Verify(error)) {
status = DatabaseStatus::FAILED_VERIFY;
return nullptr;
}
return db;
} catch (const std::runtime_error &e) {
status = DatabaseStatus::FAILED_LOAD;
error.original = e.what();
return nullptr;
}
}
std::string SQLiteDatabaseVersion() {
return std::string(sqlite3_libversion());
}

File Metadata

Mime Type
text/x-diff
Expires
Sun, Mar 2, 08:59 (1 d, 1 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5187148
Default Alt Text
(965 KB)

Event Timeline