Page Menu
Home
Phabricator
Search
Configure Global Search
Log In
Files
F14864396
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
744 KB
Subscribers
None
View Options
This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/src/consensus/validation.h b/src/consensus/validation.h
index fba9c7f7f..edaf610b9 100644
--- a/src/consensus/validation.h
+++ b/src/consensus/validation.h
@@ -1,139 +1,141 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_CONSENSUS_VALIDATION_H
#define BITCOIN_CONSENSUS_VALIDATION_H
#include <cassert>
#include <string>
/**
* A "reason" why a transaction was invalid, suitable for determining whether
* the provider of the transaction should be banned/ignored/disconnected/etc.
*/
enum class TxValidationResult {
//! initial value. Tx has not yet been rejected
TX_RESULT_UNSET = 0,
//! invalid by consensus rules
TX_CONSENSUS,
//! inputs failed policy rules
TX_INPUTS_NOT_STANDARD,
//! otherwise didn't meet our local policy rules
TX_NOT_STANDARD,
//! transaction was missing some of its inputs
TX_MISSING_INPUTS,
//! transaction spends a coinbase too early, or violates locktime/sequence
//! locks
TX_PREMATURE_SPEND,
/** Tx already in mempool or in the chain. */
TX_DUPLICATE,
/**
- * Tx conflicts with another mempool tx, i.e. spends the same coin.
+ * Tx conflicts with a finalized tx, i.e. spends the same coin.
*/
TX_CONFLICT,
/**
* This tx outputs are already spent in the mempool. This should never
* happen and is a symptom of a mempool bug/corruption.
*/
TX_CHILD_BEFORE_PARENT,
//! violated mempool's fee/size/descendant/etc limits
TX_MEMPOOL_POLICY,
//! this node does not have a mempool so can't validate the transaction
TX_NO_MEMPOOL,
//! fails some policy, but might be acceptable if submitted in a (different)
//! package
TX_PACKAGE_RECONSIDERABLE,
+ //! fails some policy, but might be reconsidered by avalanche voting
+ TX_AVALANCHE_RECONSIDERABLE,
//! transaction was not validated because package failed
TX_UNKNOWN,
};
/**
* A "reason" why a block was invalid, suitable for determining whether the
* provider of the block should be banned/ignored/disconnected/etc.
* These are much more granular than the rejection codes, which may be more
* useful for some other use-cases.
*/
enum class BlockValidationResult {
//! initial value. Block has not yet been rejected
BLOCK_RESULT_UNSET = 0,
//! invalid by consensus rules (excluding any below reasons)
BLOCK_CONSENSUS,
//! this block was cached as being invalid and we didn't store the reason
//! why
BLOCK_CACHED_INVALID,
//! invalid proof of work or time too old
BLOCK_INVALID_HEADER,
//! the block's data didn't match the data committed to by the PoW
BLOCK_MUTATED,
//! We don't have the previous block the checked one is built on
BLOCK_MISSING_PREV,
//! A block this one builds on is invalid
BLOCK_INVALID_PREV,
//! block timestamp was > 2 hours in the future (or our clock is bad)
BLOCK_TIME_FUTURE,
//! the block failed to meet one of our checkpoints
BLOCK_CHECKPOINT,
//! the block header may be on a too-little-work chain
BLOCK_HEADER_LOW_WORK
};
/**
* Template for capturing information about block/transaction validation.
* This is instantiated by TxValidationState and BlockValidationState for
* validation information on transactions and blocks respectively.
*/
template <typename Result> class ValidationState {
private:
enum class ModeState {
M_VALID, //!< everything ok
M_INVALID, //!< network rule violation (DoS value may be set)
M_ERROR, //!< run-time error
} m_mode{ModeState::M_VALID};
Result m_result{};
std::string m_reject_reason;
std::string m_debug_message;
public:
bool Invalid(Result result, const std::string &reject_reason = "",
const std::string &debug_message = "") {
m_result = result;
m_reject_reason = reject_reason;
m_debug_message = debug_message;
if (m_mode != ModeState::M_ERROR) {
m_mode = ModeState::M_INVALID;
}
return false;
}
bool Error(const std::string &reject_reason) {
if (m_mode == ModeState::M_VALID) {
m_reject_reason = reject_reason;
}
m_mode = ModeState::M_ERROR;
return false;
}
bool IsValid() const { return m_mode == ModeState::M_VALID; }
bool IsInvalid() const { return m_mode == ModeState::M_INVALID; }
bool IsError() const { return m_mode == ModeState::M_ERROR; }
Result GetResult() const { return m_result; }
std::string GetRejectReason() const { return m_reject_reason; }
std::string GetDebugMessage() const { return m_debug_message; }
std::string ToString() const {
if (IsValid()) {
return "Valid";
}
if (!m_debug_message.empty()) {
return m_reject_reason + ", " + m_debug_message;
}
return m_reject_reason;
}
};
class TxValidationState : public ValidationState<TxValidationResult> {};
class BlockValidationState : public ValidationState<BlockValidationResult> {};
#endif // BITCOIN_CONSENSUS_VALIDATION_H
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 1997d0280..41b98217d 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -1,8815 +1,8817 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <net_processing.h>
#include <addrman.h>
#include <avalanche/compactproofs.h>
#include <avalanche/peermanager.h>
#include <avalanche/processor.h>
#include <avalanche/proof.h>
#include <avalanche/statistics.h>
#include <avalanche/validation.h>
#include <banman.h>
#include <blockencodings.h>
#include <blockfilter.h>
#include <blockvalidity.h>
#include <chain.h>
#include <chainparams.h>
#include <config.h>
#include <consensus/amount.h>
#include <consensus/validation.h>
#include <hash.h>
#include <headerssync.h>
#include <index/blockfilterindex.h>
#include <invrequest.h>
#include <kernel/mempool_entry.h>
#include <merkleblock.h>
#include <netbase.h>
#include <netmessagemaker.h>
#include <node/blockstorage.h>
#include <policy/fees.h>
#include <policy/policy.h>
#include <policy/settings.h>
#include <primitives/block.h>
#include <primitives/transaction.h>
#include <random.h>
#include <reverse_iterator.h>
#include <scheduler.h>
#include <streams.h>
#include <tinyformat.h>
#include <txmempool.h>
#include <txorphanage.h>
#include <util/check.h> // For NDEBUG compile time check
#include <util/strencodings.h>
#include <util/trace.h>
#include <validation.h>
#include <algorithm>
#include <atomic>
#include <chrono>
#include <functional>
#include <future>
#include <memory>
#include <numeric>
#include <typeinfo>
/** How long to cache transactions in mapRelay for normal relay */
static constexpr auto RELAY_TX_CACHE_TIME = 15min;
/**
* How long a transaction has to be in the mempool before it can
* unconditionally be relayed (even when not in mapRelay).
*/
static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min;
/**
* Headers download timeout.
* Timeout = base + per_header * (expected number of headers)
*/
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
/** How long to wait for a peer to respond to a getheaders request */
static constexpr auto HEADERS_RESPONSE_TIME{2min};
/**
* Protect at least this many outbound peers from disconnection due to
* slow/behind headers chain.
*/
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
/** Timeout for (unprotected) outbound peers to sync to our chainwork */
static constexpr auto CHAIN_SYNC_TIMEOUT{20min};
/** How frequently to check for stale tips */
static constexpr auto STALE_CHECK_INTERVAL{10min};
/** How frequently to check for extra outbound peers and disconnect. */
static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s};
/**
* Minimum time an outbound-peer-eviction candidate must be connected for, in
* order to evict
*/
static constexpr auto MINIMUM_CONNECT_TIME{30s};
/** SHA256("main address relay")[0:8] */
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
/// Age after which a stale block will no longer be served if requested as
/// protection against fingerprinting. Set to one month, denominated in seconds.
static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
/// Age after which a block is considered historical for purposes of rate
/// limiting block relay. Set to one week, denominated in seconds.
static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
/**
* Time between pings automatically sent out for latency probing and keepalive.
*/
static constexpr auto PING_INTERVAL{2min};
/** The maximum number of entries in a locator */
static const unsigned int MAX_LOCATOR_SZ = 101;
/** The maximum number of entries in an 'inv' protocol message */
static const unsigned int MAX_INV_SZ = 50000;
static_assert(MAX_PROTOCOL_MESSAGE_LENGTH > MAX_INV_SZ * sizeof(CInv),
"Max protocol message length must be greater than largest "
"possible INV message");
/** Minimum time between 2 successives getavaaddr messages from the same peer */
static constexpr auto GETAVAADDR_INTERVAL{2min};
/**
* If no proof was requested from a compact proof message after this timeout
* expired, the proof radix tree can be cleaned up.
*/
static constexpr auto AVALANCHE_AVAPROOFS_TIMEOUT{2min};
struct DataRequestParameters {
/**
* Maximum number of in-flight data requests from a peer. It is not a hard
* limit, but the threshold at which point the overloaded_peer_delay kicks
* in.
*/
const size_t max_peer_request_in_flight;
/**
* Maximum number of inventories to consider for requesting, per peer. It
* provides a reasonable DoS limit to per-peer memory usage spent on
* announcements, while covering peers continuously sending INVs at the
* maximum rate (by our own policy, see INVENTORY_BROADCAST_PER_SECOND) for
* several minutes, while not receiving the actual data (from any peer) in
* response to requests for them.
*/
const size_t max_peer_announcements;
/** How long to delay requesting data from non-preferred peers */
const std::chrono::seconds nonpref_peer_delay;
/**
* How long to delay requesting data from overloaded peers (see
* max_peer_request_in_flight).
*/
const std::chrono::seconds overloaded_peer_delay;
/**
* How long to wait (in microseconds) before a data request from an
* additional peer.
*/
const std::chrono::microseconds getdata_interval;
/**
* Permission flags a peer requires to bypass the request limits tracking
* limits and delay penalty.
*/
const NetPermissionFlags bypass_request_limits_permissions;
};
static constexpr DataRequestParameters TX_REQUEST_PARAMS{
100, // max_peer_request_in_flight
5000, // max_peer_announcements
std::chrono::seconds(2), // nonpref_peer_delay
std::chrono::seconds(2), // overloaded_peer_delay
std::chrono::seconds(60), // getdata_interval
NetPermissionFlags::Relay, // bypass_request_limits_permissions
};
static constexpr DataRequestParameters PROOF_REQUEST_PARAMS{
100, // max_peer_request_in_flight
5000, // max_peer_announcements
std::chrono::seconds(2), // nonpref_peer_delay
std::chrono::seconds(2), // overloaded_peer_delay
std::chrono::seconds(60), // getdata_interval
NetPermissionFlags::
BypassProofRequestLimits, // bypass_request_limits_permissions
};
/**
* Limit to avoid sending big packets. Not used in processing incoming GETDATA
* for compatibility.
*/
static const unsigned int MAX_GETDATA_SZ = 1000;
/**
* Number of blocks that can be requested at any given time from a single peer.
*/
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
/**
* Default time during which a peer must stall block download progress before
* being disconnected. The actual timeout is increased temporarily if peers are
* disconnected for hitting the timeout
*/
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
/** Maximum timeout for stalling block download. */
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
/**
* Maximum depth of blocks we're willing to serve as compact blocks to peers
* when requested. For older blocks, a regular BLOCK response will be sent.
*/
static const int MAX_CMPCTBLOCK_DEPTH = 5;
/**
* Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests
* for.
*/
static const int MAX_BLOCKTXN_DEPTH = 10;
/**
* Size of the "block download window": how far ahead of our current height do
* we fetch? Larger windows tolerate larger download speed differences between
* peer, but increase the potential degree of disordering of blocks on disk
* (which make reindexing and pruning harder). We'll probably
* want to make this a per-peer adaptive value at some point.
*/
static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
/**
* Block download timeout base, expressed in multiples of the block interval
* (i.e. 10 min)
*/
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
/**
* Additional block download timeout per parallel downloading peer (i.e. 5 min)
*/
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
/**
* Maximum number of headers to announce when relaying blocks with headers
* message.
*/
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
/** Maximum number of unconnecting headers announcements before DoS score */
static const int MAX_NUM_UNCONNECTING_HEADERS_MSGS = 10;
/** Minimum blocks required to signal NODE_NETWORK_LIMITED */
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
/**
* Average delay between local address broadcasts.
*/
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h};
/**
* Average delay between peer address broadcasts.
*/
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s};
/** Delay between rotating the peers we relay a particular address to */
static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h};
/**
* Average delay between trickled inventory transmissions for inbound peers.
* Blocks and peers with NetPermissionFlags::NoBan permission bypass this.
*/
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s};
/**
* Maximum rate of inventory items to send per second.
* Limits the impact of low-fee transaction floods.
*/
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
/** Maximum number of inventory items to send per transmission. */
static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB =
INVENTORY_BROADCAST_PER_SECOND *
count_seconds(INBOUND_INVENTORY_BROADCAST_INTERVAL);
/** The number of most recently announced transactions a peer can request. */
static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
/**
* Verify that INVENTORY_MAX_RECENT_RELAY is enough to cache everything
* typically relayed before unconditional relay from the mempool kicks in. This
* is only a lower bound, and it should be larger to account for higher inv rate
* to outbound peers, and random variations in the broadcast mechanism.
*/
static_assert(INVENTORY_MAX_RECENT_RELAY >= INVENTORY_BROADCAST_PER_SECOND *
UNCONDITIONAL_RELAY_DELAY /
std::chrono::seconds{1},
"INVENTORY_RELAY_MAX too low");
/**
* Average delay between feefilter broadcasts
*/
static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min};
/**
* Maximum feefilter broadcast delay after significant change.
*/
static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min};
/**
* Maximum number of compact filters that may be requested with one
* getcfilters. See BIP 157.
*/
static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
/**
* Maximum number of cf hashes that may be requested with one getcfheaders. See
* BIP 157.
*/
static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
/**
* the maximum percentage of addresses from our addrman to return in response
* to a getaddr message.
*/
static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
/**
* The maximum rate of address records we're willing to process on average. Can
* be bypassed using the NetPermissionFlags::Addr permission.
*/
static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
/**
* The soft limit of the address processing token bucket (the regular
* MAX_ADDR_RATE_PER_SECOND based increments won't go above this, but the
* MAX_ADDR_TO_SEND increment following GETADDR is exempt from this limit).
*/
static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET{MAX_ADDR_TO_SEND};
/** The compactblocks version we support. See BIP 152. */
static constexpr uint64_t CMPCTBLOCKS_VERSION{1};
// Internal stuff
namespace {
/**
* Blocks that are in flight, and that are in the queue to be downloaded.
*/
struct QueuedBlock {
/**
* BlockIndex. We must have this since we only request blocks when we've
* already validated the header.
*/
const CBlockIndex *pindex;
/** Optional, used for CMPCTBLOCK downloads */
std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
};
/**
* Data structure for an individual peer. This struct is not protected by
* cs_main since it does not contain validation-critical data.
*
* Memory is owned by shared pointers and this object is destructed when
* the refcount drops to zero.
*
* Mutexes inside this struct must not be held when locking m_peer_mutex.
*
* TODO: move most members from CNodeState to this structure.
* TODO: move remaining application-layer data members from CNode to this
* structure.
*/
struct Peer {
/** Same id as the CNode object for this peer */
const NodeId m_id{0};
/**
* Services we offered to this peer.
*
* This is supplied by CConnman during peer initialization. It's const
* because there is no protocol defined for renegotiating services
* initially offered to a peer. The set of local services we offer should
* not change after initialization.
*
* An interesting example of this is NODE_NETWORK and initial block
* download: a node which starts up from scratch doesn't have any blocks
* to serve, but still advertises NODE_NETWORK because it will eventually
* fulfill this role after IBD completes. P2P code is written in such a
* way that it can gracefully handle peers who don't make good on their
* service advertisements.
*/
const ServiceFlags m_our_services;
/** Services this peer offered to us. */
std::atomic<ServiceFlags> m_their_services{NODE_NONE};
/** Protects misbehavior data members */
Mutex m_misbehavior_mutex;
/** Accumulated misbehavior score for this peer */
int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
/** Whether this peer should be disconnected and marked as discouraged
* (unless it has NetPermissionFlags::NoBan permission). */
bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
/** Protects block inventory data members */
Mutex m_block_inv_mutex;
/**
* List of blocks that we'll anounce via an `inv` message.
* There is no final sorting before sending, as they are always sent
* immediately and in the order requested.
*/
std::vector<BlockHash> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex);
/**
* Unfiltered list of blocks that we'd like to announce via a `headers`
* message. If we can't announce via a `headers` message, we'll fall back to
* announcing via `inv`.
*/
std::vector<BlockHash>
m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex);
/**
* The final block hash that we sent in an `inv` message to this peer.
* When the peer requests this block, we send an `inv` message to trigger
* the peer to request the next sequence of block hashes.
* Most peers use headers-first syncing, which doesn't use this mechanism
*/
BlockHash m_continuation_block GUARDED_BY(m_block_inv_mutex){};
/** This peer's reported block height when we connected */
std::atomic<int> m_starting_height{-1};
/** The pong reply we're expecting, or 0 if no pong expected. */
std::atomic<uint64_t> m_ping_nonce_sent{0};
/** When the last ping was sent, or 0 if no ping was ever sent */
std::atomic<std::chrono::microseconds> m_ping_start{0us};
/** Whether a ping has been requested by the user */
std::atomic<bool> m_ping_queued{false};
/**
* The feerate in the most recent BIP133 `feefilter` message sent to the
* peer.
* It is *not* a p2p protocol violation for the peer to send us
* transactions with a lower fee rate than this. See BIP133.
*/
Amount m_fee_filter_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){
Amount::zero()};
std::chrono::microseconds m_next_send_feefilter
GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
struct TxRelay {
mutable RecursiveMutex m_bloom_filter_mutex;
/**
* Whether the peer wishes to receive transaction announcements.
*
* This is initially set based on the fRelay flag in the received
* `version` message. If initially set to false, it can only be flipped
* to true if we have offered the peer NODE_BLOOM services and it sends
* us a `filterload` or `filterclear` message. See BIP37.
*/
bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false};
/**
* A bloom filter for which transactions to announce to the peer.
* See BIP37.
*/
std::unique_ptr<CBloomFilter>
m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex)
GUARDED_BY(m_bloom_filter_mutex){nullptr};
/** A rolling bloom filter of all announced tx CInvs to this peer. */
CRollingBloomFilter m_recently_announced_invs GUARDED_BY(
NetEventsInterface::g_msgproc_mutex){INVENTORY_MAX_RECENT_RELAY,
0.000001};
mutable RecursiveMutex m_tx_inventory_mutex;
/**
* A filter of all the txids that the peer has announced to us or we
* have announced to the peer. We use this to avoid announcing
* the same txid to a peer that already has the transaction.
*/
CRollingBloomFilter m_tx_inventory_known_filter
GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
/**
* Set of transaction ids we still have to announce. We use the
* mempool to sort transactions in dependency order before relay, so
* this does not have to be sorted.
*/
std::set<TxId> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex);
/**
* Whether the peer has requested us to send our complete mempool. Only
* permitted if the peer has NetPermissionFlags::Mempool.
* See BIP35.
*/
bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false};
/** The last time a BIP35 `mempool` request was serviced. */
std::atomic<std::chrono::seconds> m_last_mempool_req{0s};
/**
* The next time after which we will send an `inv` message containing
* transaction announcements to this peer.
*/
std::chrono::microseconds m_next_inv_send_time
GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
/**
* Minimum fee rate with which to filter transaction announcements to
* this node. See BIP133.
*/
std::atomic<Amount> m_fee_filter_received{Amount::zero()};
};
/*
* Initializes a TxRelay struct for this peer. Can be called at most once
* for a peer.
*/
TxRelay *SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
LOCK(m_tx_relay_mutex);
Assume(!m_tx_relay);
m_tx_relay = std::make_unique<Peer::TxRelay>();
return m_tx_relay.get();
};
TxRelay *GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
};
const TxRelay *GetTxRelay() const
EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
};
struct ProofRelay {
mutable RecursiveMutex m_proof_inventory_mutex;
std::set<avalanche::ProofId>
m_proof_inventory_to_send GUARDED_BY(m_proof_inventory_mutex);
// Prevent sending proof invs if the peer already knows about them
CRollingBloomFilter m_proof_inventory_known_filter
GUARDED_BY(m_proof_inventory_mutex){10000, 0.000001};
/**
* A rolling bloom filter of all announced Proofs CInvs to this peer.
*/
CRollingBloomFilter m_recently_announced_proofs GUARDED_BY(
NetEventsInterface::g_msgproc_mutex){INVENTORY_MAX_RECENT_RELAY,
0.000001};
std::chrono::microseconds m_next_inv_send_time{0};
RadixTree<const avalanche::Proof, avalanche::ProofRadixTreeAdapter>
sharedProofs;
std::atomic<std::chrono::seconds> lastSharedProofsUpdate{0s};
std::atomic<bool> compactproofs_requested{false};
};
/**
* Proof relay data. Will be a nullptr if we're not relaying
* proofs with this peer
*/
const std::unique_ptr<ProofRelay> m_proof_relay;
/**
* A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND.
*/
std::vector<CAddress>
m_addrs_to_send GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
/**
* Probabilistic filter to track recent addr messages relayed with this
* peer. Used to avoid relaying redundant addresses to this peer.
*
* We initialize this filter for outbound peers (other than
* block-relay-only connections) or when an inbound peer sends us an
* address related message (ADDR, ADDRV2, GETADDR).
*
* Presence of this filter must correlate with m_addr_relay_enabled.
**/
std::unique_ptr<CRollingBloomFilter>
m_addr_known GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
/**
* Whether we are participating in address relay with this connection.
*
* We set this bool to true for outbound peers (other than
* block-relay-only connections), or when an inbound peer sends us an
* address related message (ADDR, ADDRV2, GETADDR).
*
* We use this bool to decide whether a peer is eligible for gossiping
* addr messages. This avoids relaying to peers that are unlikely to
* forward them, effectively blackholing self announcements. Reasons
* peers might support addr relay on the link include that they connected
* to us as a block-relay-only peer or they are a light client.
*
* This field must correlate with whether m_addr_known has been
* initialized.
*/
std::atomic_bool m_addr_relay_enabled{false};
/** Whether a getaddr request to this peer is outstanding. */
bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
/** Guards address sending timers. */
mutable Mutex m_addr_send_times_mutex;
/** Time point to send the next ADDR message to this peer. */
std::chrono::microseconds
m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
/** Time point to possibly re-announce our local address to this peer. */
std::chrono::microseconds
m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
/**
* Whether the peer has signaled support for receiving ADDRv2 (BIP155)
* messages, indicating a preference to receive ADDRv2 instead of ADDR ones.
*/
std::atomic_bool m_wants_addrv2{false};
/** Whether this peer has already sent us a getaddr message. */
bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
/** Guards m_addr_token_bucket */
mutable Mutex m_addr_token_bucket_mutex;
/**
* Number of addresses that can be processed from this peer. Start at 1
* to permit self-announcement.
*/
double m_addr_token_bucket GUARDED_BY(m_addr_token_bucket_mutex){1.0};
/** When m_addr_token_bucket was last updated */
std::chrono::microseconds
m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){
GetTime<std::chrono::microseconds>()};
/** Total number of addresses that were dropped due to rate limiting. */
std::atomic<uint64_t> m_addr_rate_limited{0};
/**
* Total number of addresses that were processed (excludes rate-limited
* ones).
*/
std::atomic<uint64_t> m_addr_processed{0};
/**
* Whether we've sent this peer a getheaders in response to an inv prior to
* initial-headers-sync completing
*/
bool m_inv_triggered_getheaders_before_sync
GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
/** Protects m_getdata_requests **/
Mutex m_getdata_requests_mutex;
/** Work queue of items requested by this peer **/
std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
/** Time of the last getheaders message to this peer */
NodeClock::time_point m_last_getheaders_timestamp
GUARDED_BY(NetEventsInterface::g_msgproc_mutex){};
/** Protects m_headers_sync **/
Mutex m_headers_sync_mutex;
/**
* Headers-sync state for this peer (eg for initial sync, or syncing large
* reorgs)
**/
std::unique_ptr<HeadersSyncState>
m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex)
GUARDED_BY(m_headers_sync_mutex){};
/** Whether we've sent our peer a sendheaders message. **/
std::atomic<bool> m_sent_sendheaders{false};
/** Length of current-streak of unconnecting headers announcements */
int m_num_unconnecting_headers_msgs
GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
/** When to potentially disconnect peer for stalling headers download */
std::chrono::microseconds m_headers_sync_timeout
GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0us};
/**
* Whether this peer wants invs or headers (when possible) for block
* announcements
*/
bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){
false};
explicit Peer(NodeId id, ServiceFlags our_services, bool fRelayProofs)
: m_id(id), m_our_services{our_services},
m_proof_relay(fRelayProofs ? std::make_unique<ProofRelay>()
: nullptr) {}
private:
mutable Mutex m_tx_relay_mutex;
/**
* Transaction relay data. Will be a nullptr if we're not relaying
* transactions with this peer (e.g. if it's a block-relay-only peer or
* the peer has sent us fRelay=false with bloom filters disabled).
*/
std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex);
};
using PeerRef = std::shared_ptr<Peer>;
/**
* Maintain validation-specific state about nodes, protected by cs_main, instead
* by CNode's own locks. This simplifies asynchronous operation, where
* processing of incoming data is done after the ProcessMessage call returns,
* and we're no longer holding the node's locks.
*/
struct CNodeState {
//! The best known block we know this peer has announced.
const CBlockIndex *pindexBestKnownBlock{nullptr};
//! The hash of the last unknown block this peer has announced.
BlockHash hashLastUnknownBlock{};
//! The last full block we both have.
const CBlockIndex *pindexLastCommonBlock{nullptr};
//! The best header we have sent our peer.
const CBlockIndex *pindexBestHeaderSent{nullptr};
//! Whether we've started headers synchronization with this peer.
bool fSyncStarted{false};
//! Since when we're stalling block download progress (in microseconds), or
//! 0.
std::chrono::microseconds m_stalling_since{0us};
std::list<QueuedBlock> vBlocksInFlight;
//! When the first entry in vBlocksInFlight started downloading. Don't care
//! when vBlocksInFlight is empty.
std::chrono::microseconds m_downloading_since{0us};
int nBlocksInFlight{0};
//! Whether we consider this a preferred download peer.
bool fPreferredDownload{false};
/**
* Whether this peer wants invs or cmpctblocks (when possible) for block
* announcements.
*/
bool m_requested_hb_cmpctblocks{false};
/** Whether this peer will send us cmpctblocks if we request them. */
bool m_provides_cmpctblocks{false};
/**
* State used to enforce CHAIN_SYNC_TIMEOUT and EXTRA_PEER_CHECK_INTERVAL
* logic.
*
* Both are only in effect for outbound, non-manual, non-protected
* connections. Any peer protected (m_protect = true) is not chosen for
* eviction. A peer is marked as protected if all of these are true:
* - its connection type is IsBlockOnlyConn() == false
* - it gave us a valid connecting header
* - we haven't reached MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT yet
* - it has a better chain than we have
*
* CHAIN_SYNC_TIMEOUT: if a peer's best known block has less work than our
* tip, set a timeout CHAIN_SYNC_TIMEOUT in the future:
* - If at timeout their best known block now has more work than our tip
* when the timeout was set, then either reset the timeout or clear it
* (after comparing against our current tip's work)
* - If at timeout their best known block still has less work than our tip
* did when the timeout was set, then send a getheaders message, and set a
* shorter timeout, HEADERS_RESPONSE_TIME seconds in future. If their best
* known block is still behind when that new timeout is reached, disconnect.
*
* EXTRA_PEER_CHECK_INTERVAL: after each interval, if we have too many
* outbound peers, drop the outbound one that least recently announced us a
* new block.
*/
struct ChainSyncTimeoutState {
//! A timeout used for checking whether our peer has sufficiently
//! synced.
std::chrono::seconds m_timeout{0s};
//! A header with the work we require on our peer's chain.
const CBlockIndex *m_work_header{nullptr};
//! After timeout is reached, set to true after sending getheaders.
bool m_sent_getheaders{false};
//! Whether this peer is protected from disconnection due to a bad/slow
//! chain.
bool m_protect{false};
};
ChainSyncTimeoutState m_chain_sync;
//! Time of last new block announcement
int64_t m_last_block_announcement{0};
//! Whether this peer is an inbound connection
const bool m_is_inbound;
CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {}
};
class PeerManagerImpl final : public PeerManager {
public:
PeerManagerImpl(CConnman &connman, AddrMan &addrman, BanMan *banman,
ChainstateManager &chainman, CTxMemPool &pool,
avalanche::Processor *const avalanche, Options opts);
/** Overridden from CValidationInterface. */
void BlockConnected(const std::shared_ptr<const CBlock> &pblock,
const CBlockIndex *pindexConnected) override
EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
void BlockDisconnected(const std::shared_ptr<const CBlock> &block,
const CBlockIndex *pindex) override
EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
void UpdatedBlockTip(const CBlockIndex *pindexNew,
const CBlockIndex *pindexFork,
bool fInitialDownload) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void BlockChecked(const CBlock &block,
const BlockValidationState &state) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void NewPoWValidBlock(const CBlockIndex *pindex,
const std::shared_ptr<const CBlock> &pblock) override
EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
/** Implement NetEventsInterface */
void InitializeNode(const Config &config, CNode &node,
ServiceFlags our_services) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void FinalizeNode(const Config &config, const CNode &node) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !cs_proofrequest,
!m_headers_presync_mutex);
bool ProcessMessages(const Config &config, CNode *pfrom,
std::atomic<bool> &interrupt) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
!m_recent_confirmed_transactions_mutex,
!m_most_recent_block_mutex, !cs_proofrequest,
!m_headers_presync_mutex, g_msgproc_mutex);
bool SendMessages(const Config &config, CNode *pto) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
!m_recent_confirmed_transactions_mutex,
!m_most_recent_block_mutex, !cs_proofrequest,
g_msgproc_mutex);
/** Implement PeerManager */
void StartScheduledTasks(CScheduler &scheduler) override;
void CheckForStaleTipAndEvictPeers() override;
std::optional<std::string>
FetchBlock(const Config &config, NodeId peer_id,
const CBlockIndex &block_index) override;
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
bool IgnoresIncomingTxs() override { return m_opts.ignore_incoming_txs; }
void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void RelayTransaction(const TxId &txid) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void RelayProof(const avalanche::ProofId &proofid) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void SetBestHeight(int height) override { m_best_height = height; };
void UnitTestMisbehaving(NodeId peer_id, const int howmuch) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) {
Misbehaving(*Assert(GetPeerRef(peer_id)), howmuch, "");
}
void ProcessMessage(const Config &config, CNode &pfrom,
const std::string &msg_type, CDataStream &vRecv,
const std::chrono::microseconds time_received,
const std::atomic<bool> &interruptMsgProc) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
!m_recent_confirmed_transactions_mutex,
!m_most_recent_block_mutex, !cs_proofrequest,
!m_headers_presync_mutex, g_msgproc_mutex);
void UpdateLastBlockAnnounceTime(NodeId node,
int64_t time_in_seconds) override;
private:
/**
* Consider evicting an outbound peer based on the amount of time they've
* been behind our tip.
*/
void ConsiderEviction(CNode &pto, Peer &peer,
std::chrono::seconds time_in_seconds)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex);
/**
* If we have extra outbound peers, try to disconnect the one with the
* oldest block announcement.
*/
void EvictExtraOutboundPeers(std::chrono::seconds now)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Retrieve unbroadcast transactions from the mempool and reattempt
* sending to peers
*/
void ReattemptInitialBroadcast(CScheduler &scheduler)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/**
* Update the avalanche statistics for all the nodes
*/
void UpdateAvalancheStatistics() const;
/**
* Process periodic avalanche network messaging and cleanups.
*/
void AvalanchePeriodicNetworking(CScheduler &scheduler) const;
/**
* Get a shared pointer to the Peer object.
* May return an empty shared_ptr if the Peer object can't be found.
*/
PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/**
* Get a shared pointer to the Peer object and remove it from m_peer_map.
* May return an empty shared_ptr if the Peer object can't be found.
*/
PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/**
* Increment peer's misbehavior score. If the new value >=
* DISCOURAGEMENT_THRESHOLD, mark the node to be discouraged, meaning the
* peer might be disconnected and added to the discouragement filter.
*/
void Misbehaving(Peer &peer, int howmuch, const std::string &message);
/**
* Potentially mark a node discouraged based on the contents of a
* BlockValidationState object
*
* @param[in] via_compact_block this bool is passed in because
* net_processing should punish peers differently depending on whether the
* data was provided in a compact block message or not. If the compact block
* had a valid header, but contained invalid txs, the peer should not be
* punished. See BIP 152.
*
* @return Returns true if the peer was punished (probably disconnected)
*/
bool MaybePunishNodeForBlock(NodeId nodeid,
const BlockValidationState &state,
bool via_compact_block,
const std::string &message = "")
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/**
* Potentially disconnect and discourage a node based on the contents of a
* TxValidationState object
*
* @return Returns true if the peer was punished (probably disconnected)
*/
bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState &state,
const std::string &message = "")
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/**
* Maybe disconnect a peer and discourage future connections from its
* address.
*
* @param[in] pnode The node to check.
* @param[in] peer The peer object to check.
* @return True if the peer was marked for disconnection in
* this function
*/
bool MaybeDiscourageAndDisconnect(CNode &pnode, Peer &peer);
/**
* Handle a transaction whose result was not
* MempoolAcceptResult::ResultType::VALID.
*
* @param[in] maybe_add_extra_compact_tx Whether this tx should be added to
* vExtraTxnForCompact. Set to false
* if the tx has already been rejected
* before, e.g. is an orphan, to avoid
* adding duplicate entries.
*
* Updates m_txrequest, m_recent_rejects,
* m_recent_rejects_package_reconsiderable, m_orphanage and
* vExtraTxnForCompact.
*/
void ProcessInvalidTx(NodeId nodeid, const CTransactionRef &tx,
const TxValidationState &result,
bool maybe_add_extra_compact_tx)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
struct PackageToValidate {
const Package m_txns;
const std::vector<NodeId> m_senders;
/** Construct a 1-parent-1-child package. */
explicit PackageToValidate(const CTransactionRef &parent,
const CTransactionRef &child,
NodeId parent_sender, NodeId child_sender)
: m_txns{parent, child}, m_senders{parent_sender, child_sender} {}
std::string ToString() const {
Assume(m_txns.size() == 2);
return strprintf(
"parent %s (sender=%d) + child %s (sender=%d)",
m_txns.front()->GetId().ToString(), m_senders.front(),
m_txns.back()->GetId().ToString(), m_senders.back());
}
};
/**
* Handle the results of package validation: calls ProcessValidTx and
* ProcessInvalidTx for individual transactions, and caches rejection for
* the package as a group.
*/
void ProcessPackageResult(const PackageToValidate &package_to_validate,
const PackageMempoolAcceptResult &package_result)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
/**
* Look for a child of this transaction in the orphanage to form a
* 1-parent-1-child package, skipping any combinations that have already
* been tried. Return the resulting package along with the senders of its
* respective transactions, or std::nullopt if no package is found.
*/
std::optional<PackageToValidate> Find1P1CPackage(const CTransactionRef &ptx,
NodeId nodeid)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
/**
* Handle a transaction whose result was
* MempoolAcceptResult::ResultType::VALID. Updates m_txrequest and
* m_orphanage. Also queues the tx for relay.
*/
void ProcessValidTx(NodeId nodeid, const CTransactionRef &tx)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
/**
* Reconsider orphan transactions after a parent has been accepted to the
* mempool.
*
* @peer[in] peer The peer whose orphan transactions we will
* reconsider. Generally only one orphan will be
* reconsidered on each call of this function. If an
* accepted orphan has orphaned children, those will
* need to be reconsidered, creating more work, possibly
* for other peers.
* @return True if meaningful work was done (an orphan was
* accepted/rejected).
* If no meaningful work was done, then the work set for
* this peer will be empty.
*/
bool ProcessOrphanTx(const Config &config, Peer &peer)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
/**
* Process a single headers message from a peer.
*
* @param[in] pfrom CNode of the peer
* @param[in] peer The peer sending us the headers
* @param[in] headers The headers received. Note that this may be
* modified within ProcessHeadersMessage.
* @param[in] via_compact_block Whether this header came in via compact
* block handling.
*/
void ProcessHeadersMessage(const Config &config, CNode &pfrom, Peer &peer,
std::vector<CBlockHeader> &&headers,
bool via_compact_block)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex,
g_msgproc_mutex);
// Various helpers for headers processing, invoked by
// ProcessHeadersMessage()
/**
* Return true if headers are continuous and have valid proof-of-work
* (DoS points assigned on failure)
*/
bool CheckHeadersPoW(const std::vector<CBlockHeader> &headers,
const Consensus::Params &consensusParams, Peer &peer);
/** Calculate an anti-DoS work threshold for headers chains */
arith_uint256 GetAntiDoSWorkThreshold();
/**
* Deal with state tracking and headers sync for peers that send the
* occasional non-connecting header (this can happen due to BIP 130 headers
* announcements for blocks interacting with the 2hr
* (MAX_FUTURE_BLOCK_TIME) rule).
*/
void HandleFewUnconnectingHeaders(CNode &pfrom, Peer &peer,
const std::vector<CBlockHeader> &headers)
EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
/** Return true if the headers connect to each other, false otherwise */
bool
CheckHeadersAreContinuous(const std::vector<CBlockHeader> &headers) const;
/**
* Try to continue a low-work headers sync that has already begun.
* Assumes the caller has already verified the headers connect, and has
* checked that each header satisfies the proof-of-work target included in
* the header.
* @param[in] peer The peer we're syncing with.
* @param[in] pfrom CNode of the peer
* @param[in,out] headers The headers to be processed.
* @return True if the passed in headers were successfully processed
* as the continuation of a low-work headers sync in progress;
* false otherwise.
* If false, the passed in headers will be returned back to
* the caller.
* If true, the returned headers may be empty, indicating
* there is no more work for the caller to do; or the headers
* may be populated with entries that have passed anti-DoS
* checks (and therefore may be validated for block index
* acceptance by the caller).
*/
bool IsContinuationOfLowWorkHeadersSync(Peer &peer, CNode &pfrom,
std::vector<CBlockHeader> &headers)
EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex,
!m_headers_presync_mutex, g_msgproc_mutex);
/**
* Check work on a headers chain to be processed, and if insufficient,
* initiate our anti-DoS headers sync mechanism.
*
* @param[in] peer The peer whose headers we're processing.
* @param[in] pfrom CNode of the peer
* @param[in] chain_start_header Where these headers connect in our
* index.
* @param[in,out] headers The headers to be processed.
*
* @return True if chain was low work and a headers sync was
* initiated (and headers will be empty after calling); false
* otherwise.
*/
bool TryLowWorkHeadersSync(Peer &peer, CNode &pfrom,
const CBlockIndex *chain_start_header,
std::vector<CBlockHeader> &headers)
EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex,
!m_headers_presync_mutex, g_msgproc_mutex);
/**
* Return true if the given header is an ancestor of
* m_chainman.m_best_header or our current tip
*/
bool IsAncestorOfBestHeaderOrTip(const CBlockIndex *header)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Request further headers from this peer with a given locator.
* We don't issue a getheaders message if we have a recent one outstanding.
* This returns true if a getheaders is actually sent, and false otherwise.
*/
bool MaybeSendGetHeaders(CNode &pfrom, const CBlockLocator &locator,
Peer &peer)
EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
/**
* Potentially fetch blocks from this peer upon receipt of new headers tip
*/
void HeadersDirectFetchBlocks(const Config &config, CNode &pfrom,
const CBlockIndex *pindexLast);
/** Update peer state based on received headers message */
void UpdatePeerStateForReceivedHeaders(CNode &pfrom, Peer &peer,
const CBlockIndex *pindexLast,
bool received_new_header,
bool may_have_more_headers)
EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
void SendBlockTransactions(CNode &pfrom, Peer &peer, const CBlock &block,
const BlockTransactionsRequest &req);
/**
* Register with InvRequestTracker that a TX INV has been received from a
* peer. The announcement parameters are decided in PeerManager and then
* passed to InvRequestTracker.
*/
void AddTxAnnouncement(const CNode &node, const TxId &txid,
std::chrono::microseconds current_time)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
/**
* Register with InvRequestTracker that a PROOF INV has been received from a
* peer. The announcement parameters are decided in PeerManager and then
* passed to InvRequestTracker.
*/
void
AddProofAnnouncement(const CNode &node, const avalanche::ProofId &proofid,
std::chrono::microseconds current_time, bool preferred)
EXCLUSIVE_LOCKS_REQUIRED(cs_proofrequest);
/** Send a version message to a peer */
void PushNodeVersion(const Config &config, CNode &pnode, const Peer &peer);
/**
* Send a ping message every PING_INTERVAL or if requested via RPC. May mark
* the peer to be disconnected if a ping has timed out.
* We use mockable time for ping timeouts, so setmocktime may cause pings
* to time out.
*/
void MaybeSendPing(CNode &node_to, Peer &peer,
std::chrono::microseconds now);
/** Send `addr` messages on a regular schedule. */
void MaybeSendAddr(CNode &node, Peer &peer,
std::chrono::microseconds current_time)
EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
/**
* Send a single `sendheaders` message, after we have completed headers
* sync with a peer.
*/
void MaybeSendSendHeaders(CNode &node, Peer &peer)
EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
/** Send `feefilter` message. */
void MaybeSendFeefilter(CNode &node, Peer &peer,
std::chrono::microseconds current_time)
EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
/**
* Relay (gossip) an address to a few randomly chosen nodes.
*
* @param[in] originator The id of the peer that sent us the address. We
* don't want to relay it back.
* @param[in] addr Address to relay.
* @param[in] fReachable Whether the address' network is reachable. We
* relay unreachable addresses less.
*/
void RelayAddress(NodeId originator, const CAddress &addr, bool fReachable)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
FastRandomContext m_rng GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
FeeFilterRounder
m_fee_filter_rounder GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
const CChainParams &m_chainparams;
CConnman &m_connman;
AddrMan &m_addrman;
/**
* Pointer to this node's banman. May be nullptr - check existence before
* dereferencing.
*/
BanMan *const m_banman;
ChainstateManager &m_chainman;
CTxMemPool &m_mempool;
avalanche::Processor *const m_avalanche;
InvRequestTracker<TxId> m_txrequest GUARDED_BY(::cs_main);
Mutex cs_proofrequest;
InvRequestTracker<avalanche::ProofId>
m_proofrequest GUARDED_BY(cs_proofrequest);
/** The height of the best chain */
std::atomic<int> m_best_height{-1};
/** Next time to check for stale tip */
std::chrono::seconds m_stale_tip_check_time{0s};
const Options m_opts;
/**
* Whether we've completed initial sync yet, for determining when to turn
* on extra block-relay-only peers.
*/
bool m_initial_sync_finished{false};
/**
* Protects m_peer_map. This mutex must not be locked while holding a lock
* on any of the mutexes inside a Peer object.
*/
mutable Mutex m_peer_mutex;
/**
* Map of all Peer objects, keyed by peer id. This map is protected
* by the m_peer_mutex. Once a shared pointer reference is
* taken, the lock may be released. Individual fields are protected by
* their own locks.
*/
std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
/** Map maintaining per-node state. */
std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main);
/**
* Get a pointer to a const CNodeState, used when not mutating the
* CNodeState object.
*/
const CNodeState *State(NodeId pnode) const
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Get a pointer to a mutable CNodeState. */
CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
/** Number of nodes with fSyncStarted. */
int nSyncStarted GUARDED_BY(cs_main) = 0;
/** Hash of the last block we received via INV */
BlockHash
m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){};
/**
* Sources of received blocks, saved to be able to punish them when
* processing happens afterwards.
* Set mapBlockSource[hash].second to false if the node should not be
* punished if the block is invalid.
*/
std::map<BlockHash, std::pair<NodeId, bool>>
mapBlockSource GUARDED_BY(cs_main);
/** Number of outbound peers with m_chain_sync.m_protect. */
int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
/** Number of preferable block download peers. */
int m_num_preferred_download_peers GUARDED_BY(cs_main){0};
/** Stalling timeout for blocks in IBD */
std::atomic<std::chrono::seconds> m_block_stalling_timeout{
BLOCK_STALLING_TIMEOUT_DEFAULT};
/**
* Check whether we already have this txid in:
* - mempool
* - orphanage
* - m_recent_rejects
* - m_recent_rejects_package_reconsiderable (if
* include_reconsiderable = true)
* - m_recent_confirmed_transactions
* Also responsible for resetting m_recent_rejects and
* m_recent_rejects_package_reconsiderable if the chain tip has changed.
* */
bool AlreadyHaveTx(const TxId &txid, bool include_reconsiderable)
EXCLUSIVE_LOCKS_REQUIRED(cs_main,
!m_recent_confirmed_transactions_mutex);
/**
* Filter for transactions that were recently rejected by the mempool.
* These are not rerequested until the chain tip changes, at which point
* the entire filter is reset.
*
* Without this filter we'd be re-requesting txs from each of our peers,
* increasing bandwidth consumption considerably. For instance, with 100
* peers, half of which relay a tx we don't accept, that might be a 50x
* bandwidth increase. A flooding attacker attempting to roll-over the
* filter using minimum-sized, 60byte, transactions might manage to send
* 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
* two minute window to send invs to us.
*
* Decreasing the false positive rate is fairly cheap, so we pick one in a
* million to make it highly unlikely for users to have issues with this
* filter.
*
* Memory used: 1.3 MB
*/
CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000,
0.000'001};
/**
* Block hash of chain tip the last time we reset m_recent_rejects and
* m_recent_rejects_package_reconsiderable.
* FIXME: should be of BlockHash type
*/
uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
/**
* Filter for:
* (1) txids of transactions that were recently rejected by the mempool but
* are eligible for reconsideration if submitted with other transactions.
* (2) packages (see GetPackageHash) we have already rejected before and
* should not retry.
*
* Similar to m_recent_rejects, this filter is used to save bandwidth when
* e.g. all of our peers have larger mempools and thus lower minimum
* feerates than us.
*
* When a transaction's error is
* TxValidationResult::TX_PACKAGE_RECONSIDERABLE (in a package or by
* itself), add its txid to this filter. When a package fails for any
* reason, add the combined hash to this filter.
*
* Upon receiving an announcement for a transaction, if it exists in this
* filter, do not download the txdata. When considering packages, if it
* exists in this filter, drop it.
*
* Reset this filter when the chain tip changes.
*
* Parameters are picked to be the same as m_recent_rejects, with the same
* rationale.
*/
CRollingBloomFilter m_recent_rejects_package_reconsiderable
GUARDED_BY(::cs_main){120'000, 0.000'001};
/**
* Filter for transactions that have been recently confirmed.
* We use this to avoid requesting transactions that have already been
* confirmed.
*/
mutable Mutex m_recent_confirmed_transactions_mutex;
CRollingBloomFilter m_recent_confirmed_transactions
GUARDED_BY(m_recent_confirmed_transactions_mutex){24'000, 0.000'001};
/**
* For sending `inv`s to inbound peers, we use a single (exponentially
* distributed) timer for all peers. If we used a separate timer for each
* peer, a spy node could make multiple inbound connections to us to
* accurately determine when we received the transaction (and potentially
* determine the transaction's origin).
*/
std::chrono::microseconds
NextInvToInbounds(std::chrono::microseconds now,
std::chrono::seconds average_interval);
// All of the following cache a recent block, and are protected by
// m_most_recent_block_mutex
mutable Mutex m_most_recent_block_mutex;
std::shared_ptr<const CBlock>
m_most_recent_block GUARDED_BY(m_most_recent_block_mutex);
std::shared_ptr<const CBlockHeaderAndShortTxIDs>
m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex);
BlockHash m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex);
// Data about the low-work headers synchronization, aggregated from all
// peers' HeadersSyncStates.
/** Mutex guarding the other m_headers_presync_* variables. */
Mutex m_headers_presync_mutex;
/**
* A type to represent statistics about a peer's low-work headers sync.
*
* - The first field is the total verified amount of work in that
* synchronization.
* - The second is:
* - nullopt: the sync is in REDOWNLOAD phase (phase 2).
* - {height, timestamp}: the sync has the specified tip height and block
* timestamp (phase 1).
*/
using HeadersPresyncStats =
std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
/** Statistics for all peers in low-work headers sync. */
std::map<NodeId, HeadersPresyncStats>
m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex){};
/** The peer with the most-work entry in m_headers_presync_stats. */
NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex){-1};
/** The m_headers_presync_stats improved, and needs signalling. */
std::atomic_bool m_headers_presync_should_signal{false};
/**
* Height of the highest block announced using BIP 152 high-bandwidth mode.
*/
int m_highest_fast_announce GUARDED_BY(::cs_main){0};
/** Have we requested this block from a peer */
bool IsBlockRequested(const BlockHash &hash)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Remove this block from our tracked requested blocks. Called if:
* - the block has been received from a peer
* - the request for the block has timed out
*/
void RemoveBlockRequest(const BlockHash &hash)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Mark a block as in flight
* Returns false, still setting pit, if the block was already in flight from
* the same peer pit will only be valid as long as the same cs_main lock is
* being held
*/
bool BlockRequested(const Config &config, NodeId nodeid,
const CBlockIndex &block,
std::list<QueuedBlock>::iterator **pit = nullptr)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Update pindexLastCommonBlock and add not-in-flight missing successors to
* vBlocks, until it has at most count entries.
*/
void FindNextBlocksToDownload(NodeId nodeid, unsigned int count,
std::vector<const CBlockIndex *> &vBlocks,
NodeId &nodeStaller)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
std::map<BlockHash, std::pair<NodeId, std::list<QueuedBlock>::iterator>>
mapBlocksInFlight GUARDED_BY(cs_main);
/** When our tip was last updated. */
std::atomic<std::chrono::seconds> m_last_tip_update{0s};
/**
* Determine whether or not a peer can request a transaction, and return it
* (or nullptr if not found or not allowed).
*/
CTransactionRef FindTxForGetData(const Peer &peer, const TxId &txid,
const std::chrono::seconds mempool_req,
const std::chrono::seconds now)
LOCKS_EXCLUDED(cs_main)
EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex);
void ProcessGetData(const Config &config, CNode &pfrom, Peer &peer,
const std::atomic<bool> &interruptMsgProc)
EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex,
peer.m_getdata_requests_mutex,
NetEventsInterface::g_msgproc_mutex)
LOCKS_EXCLUDED(cs_main);
/** Process a new block. Perform any post-processing housekeeping */
void ProcessBlock(const Config &config, CNode &node,
const std::shared_ptr<const CBlock> &block,
bool force_processing, bool min_pow_checked);
/** Relay map. */
typedef std::map<TxId, CTransactionRef> MapRelay;
MapRelay mapRelay GUARDED_BY(cs_main);
/**
* Expiration-time ordered list of (expire time, relay map entry) pairs,
* protected by cs_main).
*/
std::deque<std::pair<std::chrono::microseconds, MapRelay::iterator>>
g_relay_expiration GUARDED_BY(cs_main);
/**
* When a peer sends us a valid block, instruct it to announce blocks to us
* using CMPCTBLOCK if possible by adding its nodeid to the end of
* lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size
* by removing the first element if necessary.
*/
void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Stack of nodes which we have set to announce using compact blocks */
std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
/** Number of peers from which we're downloading blocks. */
int m_peers_downloading_from GUARDED_BY(cs_main) = 0;
void AddToCompactExtraTransactions(const CTransactionRef &tx)
EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
/**
* Orphan/conflicted/etc transactions that are kept for compact block
* reconstruction.
* The last
* -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of
* these are kept in a ring buffer
*/
std::vector<std::pair<TxHash, CTransactionRef>>
vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex);
/** Offset into vExtraTxnForCompact to insert the next tx */
size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0;
/**
* Check whether the last unknown block a peer advertised is not yet known.
*/
void ProcessBlockAvailability(NodeId nodeid)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Update tracking information about which blocks a peer is assumed to have.
*/
void UpdateBlockAvailability(NodeId nodeid, const BlockHash &hash)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* To prevent fingerprinting attacks, only send blocks/headers outside of
* the active chain if they are no more than a month older (both in time,
* and in best equivalent proof of work) than the best header chain we know
* about and we fully-validated them at some point.
*/
bool BlockRequestAllowed(const CBlockIndex *pindex)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool AlreadyHaveBlock(const BlockHash &block_hash)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool AlreadyHaveProof(const avalanche::ProofId &proofid);
void ProcessGetBlockData(const Config &config, CNode &pfrom, Peer &peer,
const CInv &inv)
EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
/**
* Validation logic for compact filters request handling.
*
* May disconnect from the peer in the case of a bad request.
*
* @param[in] node The node that we received the request from
* @param[in] peer The peer that we received the request from
* @param[in] filter_type The filter type the request is for. Must be
* basic filters.
* @param[in] start_height The start height for the request
* @param[in] stop_hash The stop_hash for the request
* @param[in] max_height_diff The maximum number of items permitted to
* request, as specified in BIP 157
* @param[out] stop_index The CBlockIndex for the stop_hash block, if
* the request can be serviced.
* @param[out] filter_index The filter index, if the request can be
* serviced.
* @return True if the request can be serviced.
*/
bool PrepareBlockFilterRequest(CNode &node, Peer &peer,
BlockFilterType filter_type,
uint32_t start_height,
const BlockHash &stop_hash,
uint32_t max_height_diff,
const CBlockIndex *&stop_index,
BlockFilterIndex *&filter_index);
/**
* Handle a cfilters request.
*
* May disconnect from the peer in the case of a bad request.
*
* @param[in] node The node that we received the request from
* @param[in] peer The peer that we received the request from
* @param[in] vRecv The raw message received
*/
void ProcessGetCFilters(CNode &node, Peer &peer, CDataStream &vRecv);
/**
* Handle a cfheaders request.
*
* May disconnect from the peer in the case of a bad request.
*
* @param[in] node The node that we received the request from
* @param[in] peer The peer that we received the request from
* @param[in] vRecv The raw message received
*/
void ProcessGetCFHeaders(CNode &node, Peer &peer, CDataStream &vRecv);
/**
* Handle a getcfcheckpt request.
*
* May disconnect from the peer in the case of a bad request.
*
* @param[in] node The node that we received the request from
* @param[in] peer The peer that we received the request from
* @param[in] vRecv The raw message received
*/
void ProcessGetCFCheckPt(CNode &node, Peer &peer, CDataStream &vRecv);
/**
* Decide a response for an Avalanche poll about the given block.
*
* @param[in] hash The hash of the block being polled for
* @return Our current vote for the block
*/
uint32_t GetAvalancheVoteForBlock(const BlockHash &hash) const
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Decide a response for an Avalanche poll about the given transaction.
*
* @param[in] id The id of the transaction being polled for
* @return Our current vote for the transaction
*/
uint32_t GetAvalancheVoteForTx(const TxId &id) const
EXCLUSIVE_LOCKS_REQUIRED(cs_main,
!m_recent_confirmed_transactions_mutex);
/**
* Checks if address relay is permitted with peer. If needed, initializes
* the m_addr_known bloom filter and sets m_addr_relay_enabled to true.
*
* @return True if address relay is enabled with peer
* False if address relay is disallowed
*/
bool SetupAddressRelay(const CNode &node, Peer &peer)
EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
void AddAddressKnown(Peer &peer, const CAddress &addr)
EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
void PushAddress(Peer &peer, const CAddress &addr)
EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
/**
* Manage reception of an avalanche proof.
*
* @return False if the peer is misbehaving, true otherwise
*/
bool ReceivedAvalancheProof(CNode &node, Peer &peer,
const avalanche::ProofRef &proof)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !cs_proofrequest);
avalanche::ProofRef FindProofForGetData(const Peer &peer,
const avalanche::ProofId &proofid,
const std::chrono::seconds now)
EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex);
bool isPreferredDownloadPeer(const CNode &pfrom);
};
const CNodeState *PeerManagerImpl::State(NodeId pnode) const
EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
if (it == m_node_states.end()) {
return nullptr;
}
return &it->second;
}
CNodeState *PeerManagerImpl::State(NodeId pnode)
EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
return const_cast<CNodeState *>(std::as_const(*this).State(pnode));
}
/**
* Whether the peer supports the address. For example, a peer that does not
* implement BIP155 cannot receive Tor v3 addresses because it requires
* ADDRv2 (BIP155) encoding.
*/
static bool IsAddrCompatible(const Peer &peer, const CAddress &addr) {
return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
}
void PeerManagerImpl::AddAddressKnown(Peer &peer, const CAddress &addr) {
assert(peer.m_addr_known);
peer.m_addr_known->insert(addr.GetKey());
}
void PeerManagerImpl::PushAddress(Peer &peer, const CAddress &addr) {
// Known checking here is only to save space from duplicates.
// Before sending, we'll filter it again for known addresses that were
// added after addresses were pushed.
assert(peer.m_addr_known);
if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) &&
IsAddrCompatible(peer, addr)) {
if (peer.m_addrs_to_send.size() >= m_opts.max_addr_to_send) {
peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] =
addr;
} else {
peer.m_addrs_to_send.push_back(addr);
}
}
}
static void AddKnownTx(Peer &peer, const TxId &txid) {
auto tx_relay = peer.GetTxRelay();
if (!tx_relay) {
return;
}
LOCK(tx_relay->m_tx_inventory_mutex);
tx_relay->m_tx_inventory_known_filter.insert(txid);
}
static void AddKnownProof(Peer &peer, const avalanche::ProofId &proofid) {
if (peer.m_proof_relay != nullptr) {
LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
peer.m_proof_relay->m_proof_inventory_known_filter.insert(proofid);
}
}
bool PeerManagerImpl::isPreferredDownloadPeer(const CNode &pfrom) {
LOCK(cs_main);
const CNodeState *state = State(pfrom.GetId());
return state && state->fPreferredDownload;
}
/** Whether this peer can serve us blocks. */
static bool CanServeBlocks(const Peer &peer) {
return peer.m_their_services & (NODE_NETWORK | NODE_NETWORK_LIMITED);
}
/**
* Whether this peer can only serve limited recent blocks (e.g. because
* it prunes old blocks)
*/
static bool IsLimitedPeer(const Peer &peer) {
return (!(peer.m_their_services & NODE_NETWORK) &&
(peer.m_their_services & NODE_NETWORK_LIMITED));
}
std::chrono::microseconds
PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
std::chrono::seconds average_interval) {
if (m_next_inv_to_inbounds.load() < now) {
// If this function were called from multiple threads simultaneously
// it would possible that both update the next send variable, and return
// a different result to their caller. This is not possible in practice
// as only the net processing thread invokes this function.
m_next_inv_to_inbounds = GetExponentialRand(now, average_interval);
}
return m_next_inv_to_inbounds;
}
bool PeerManagerImpl::IsBlockRequested(const BlockHash &hash) {
return mapBlocksInFlight.find(hash) != mapBlocksInFlight.end();
}
void PeerManagerImpl::RemoveBlockRequest(const BlockHash &hash) {
auto it = mapBlocksInFlight.find(hash);
if (it == mapBlocksInFlight.end()) {
// Block was not requested
return;
}
auto [node_id, list_it] = it->second;
CNodeState *state = State(node_id);
assert(state != nullptr);
if (state->vBlocksInFlight.begin() == list_it) {
// First block on the queue was received, update the start download time
// for the next one
state->m_downloading_since = std::max(
state->m_downloading_since, GetTime<std::chrono::microseconds>());
}
state->vBlocksInFlight.erase(list_it);
state->nBlocksInFlight--;
if (state->nBlocksInFlight == 0) {
// Last validated block on the queue was received.
m_peers_downloading_from--;
}
state->m_stalling_since = 0us;
mapBlocksInFlight.erase(it);
}
bool PeerManagerImpl::BlockRequested(const Config &config, NodeId nodeid,
const CBlockIndex &block,
std::list<QueuedBlock>::iterator **pit) {
const BlockHash &hash{block.GetBlockHash()};
CNodeState *state = State(nodeid);
assert(state != nullptr);
// Short-circuit most stuff in case it is from the same node.
std::map<BlockHash,
std::pair<NodeId, std::list<QueuedBlock>::iterator>>::iterator
itInFlight = mapBlocksInFlight.find(hash);
if (itInFlight != mapBlocksInFlight.end() &&
itInFlight->second.first == nodeid) {
if (pit) {
*pit = &itInFlight->second.second;
}
return false;
}
// Make sure it's not listed somewhere already.
RemoveBlockRequest(hash);
std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(
state->vBlocksInFlight.end(),
{&block, std::unique_ptr<PartiallyDownloadedBlock>(
pit ? new PartiallyDownloadedBlock(config, &m_mempool)
: nullptr)});
state->nBlocksInFlight++;
if (state->nBlocksInFlight == 1) {
// We're starting a block download (batch) from this peer.
state->m_downloading_since = GetTime<std::chrono::microseconds>();
m_peers_downloading_from++;
}
itInFlight = mapBlocksInFlight
.insert(std::make_pair(hash, std::make_pair(nodeid, it)))
.first;
if (pit) {
*pit = &itInFlight->second.second;
}
return true;
}
void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) {
AssertLockHeld(cs_main);
// Never request high-bandwidth mode from peers if we're blocks-only. Our
// mempool will not contain the transactions necessary to reconstruct the
// compact block.
if (m_opts.ignore_incoming_txs) {
return;
}
CNodeState *nodestate = State(nodeid);
if (!nodestate) {
LogPrint(BCLog::NET, "node state unavailable: peer=%d\n", nodeid);
return;
}
if (!nodestate->m_provides_cmpctblocks) {
return;
}
int num_outbound_hb_peers = 0;
for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin();
it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
if (*it == nodeid) {
lNodesAnnouncingHeaderAndIDs.erase(it);
lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
return;
}
CNodeState *state = State(*it);
if (state != nullptr && !state->m_is_inbound) {
++num_outbound_hb_peers;
}
}
if (nodestate->m_is_inbound) {
// If we're adding an inbound HB peer, make sure we're not removing
// our last outbound HB peer in the process.
if (lNodesAnnouncingHeaderAndIDs.size() >= 3 &&
num_outbound_hb_peers == 1) {
CNodeState *remove_node =
State(lNodesAnnouncingHeaderAndIDs.front());
if (remove_node != nullptr && !remove_node->m_is_inbound) {
// Put the HB outbound peer in the second slot, so that it
// doesn't get removed.
std::swap(lNodesAnnouncingHeaderAndIDs.front(),
*std::next(lNodesAnnouncingHeaderAndIDs.begin()));
}
}
}
m_connman.ForNode(nodeid, [this](CNode *pfrom) EXCLUSIVE_LOCKS_REQUIRED(
::cs_main) {
AssertLockHeld(::cs_main);
if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
// As per BIP152, we only get 3 of our peers to announce
// blocks using compact encodings.
m_connman.ForNode(
lNodesAnnouncingHeaderAndIDs.front(), [this](CNode *pnodeStop) {
m_connman.PushMessage(
pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion())
.Make(NetMsgType::SENDCMPCT,
/*high_bandwidth=*/false,
/*version=*/CMPCTBLOCKS_VERSION));
// save BIP152 bandwidth state: we select peer to be
// low-bandwidth
pnodeStop->m_bip152_highbandwidth_to = false;
return true;
});
lNodesAnnouncingHeaderAndIDs.pop_front();
}
m_connman.PushMessage(pfrom,
CNetMsgMaker(pfrom->GetCommonVersion())
.Make(NetMsgType::SENDCMPCT,
/*high_bandwidth=*/true,
/*version=*/CMPCTBLOCKS_VERSION));
// save BIP152 bandwidth state: we select peer to be high-bandwidth
pfrom->m_bip152_highbandwidth_to = true;
lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
return true;
});
}
bool PeerManagerImpl::TipMayBeStale() {
AssertLockHeld(cs_main);
const Consensus::Params &consensusParams = m_chainparams.GetConsensus();
if (m_last_tip_update.load() == 0s) {
m_last_tip_update = GetTime<std::chrono::seconds>();
}
return m_last_tip_update.load() <
GetTime<std::chrono::seconds>() -
std::chrono::seconds{consensusParams.nPowTargetSpacing *
3} &&
mapBlocksInFlight.empty();
}
bool PeerManagerImpl::CanDirectFetch() {
return m_chainman.ActiveChain().Tip()->Time() >
GetAdjustedTime() -
m_chainparams.GetConsensus().PowTargetSpacing() * 20;
}
static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
if (state->pindexBestKnownBlock &&
pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
return true;
}
if (state->pindexBestHeaderSent &&
pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
return true;
}
return false;
}
void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) {
CNodeState *state = State(nodeid);
assert(state != nullptr);
if (!state->hashLastUnknownBlock.IsNull()) {
const CBlockIndex *pindex =
m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock);
if (pindex && pindex->nChainWork > 0) {
if (state->pindexBestKnownBlock == nullptr ||
pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
state->pindexBestKnownBlock = pindex;
}
state->hashLastUnknownBlock.SetNull();
}
}
}
void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid,
const BlockHash &hash) {
CNodeState *state = State(nodeid);
assert(state != nullptr);
ProcessBlockAvailability(nodeid);
const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
if (pindex && pindex->nChainWork > 0) {
// An actually better block was announced.
if (state->pindexBestKnownBlock == nullptr ||
pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
state->pindexBestKnownBlock = pindex;
}
} else {
// An unknown block was announced; just assume that the latest one is
// the best one.
state->hashLastUnknownBlock = hash;
}
}
void PeerManagerImpl::FindNextBlocksToDownload(
NodeId nodeid, unsigned int count,
std::vector<const CBlockIndex *> &vBlocks, NodeId &nodeStaller) {
if (count == 0) {
return;
}
vBlocks.reserve(vBlocks.size() + count);
CNodeState *state = State(nodeid);
assert(state != nullptr);
// Make sure pindexBestKnownBlock is up to date, we'll need it.
ProcessBlockAvailability(nodeid);
if (state->pindexBestKnownBlock == nullptr ||
state->pindexBestKnownBlock->nChainWork <
m_chainman.ActiveChain().Tip()->nChainWork ||
state->pindexBestKnownBlock->nChainWork <
m_chainman.MinimumChainWork()) {
// This peer has nothing interesting.
return;
}
if (state->pindexLastCommonBlock == nullptr) {
// Bootstrap quickly by guessing a parent of our best tip is the forking
// point. Guessing wrong in either direction is not a problem.
state->pindexLastCommonBlock =
m_chainman
.ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight,
m_chainman.ActiveChain().Height())];
}
// If the peer reorganized, our previous pindexLastCommonBlock may not be an
// ancestor of its current tip anymore. Go back enough to fix that.
state->pindexLastCommonBlock = LastCommonAncestor(
state->pindexLastCommonBlock, state->pindexBestKnownBlock);
if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
return;
}
std::vector<const CBlockIndex *> vToFetch;
const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
// Never fetch further than the best block we know the peer has, or more
// than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last linked block we have in
// common with this peer. The +1 is so we can detect stalling, namely if we
// would be able to download that next block if the window were 1 larger.
int nWindowEnd =
state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
int nMaxHeight =
std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
NodeId waitingfor = -1;
while (pindexWalk->nHeight < nMaxHeight) {
// Read up to 128 (or more, if more blocks than that are needed)
// successors of pindexWalk (towards pindexBestKnownBlock) into
// vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as
// expensive as iterating over ~100 CBlockIndex* entries anyway.
int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight,
std::max<int>(count - vBlocks.size(), 128));
vToFetch.resize(nToFetch);
pindexWalk = state->pindexBestKnownBlock->GetAncestor(
pindexWalk->nHeight + nToFetch);
vToFetch[nToFetch - 1] = pindexWalk;
for (unsigned int i = nToFetch - 1; i > 0; i--) {
vToFetch[i - 1] = vToFetch[i]->pprev;
}
// Iterate over those blocks in vToFetch (in forward direction), adding
// the ones that are not yet downloaded and not in flight to vBlocks. In
// the meantime, update pindexLastCommonBlock as long as all ancestors
// are already downloaded, or if it's already part of our chain (and
// therefore don't need it even if pruned).
for (const CBlockIndex *pindex : vToFetch) {
if (!pindex->IsValid(BlockValidity::TREE)) {
// We consider the chain that this peer is on invalid.
return;
}
if (pindex->nStatus.hasData() ||
m_chainman.ActiveChain().Contains(pindex)) {
if (pindex->HaveTxsDownloaded()) {
state->pindexLastCommonBlock = pindex;
}
} else if (!IsBlockRequested(pindex->GetBlockHash())) {
// The block is not already downloaded, and not yet in flight.
if (pindex->nHeight > nWindowEnd) {
// We reached the end of the window.
if (vBlocks.size() == 0 && waitingfor != nodeid) {
// We aren't able to fetch anything, but we would be if
// the download window was one larger.
nodeStaller = waitingfor;
}
return;
}
vBlocks.push_back(pindex);
if (vBlocks.size() == count) {
return;
}
} else if (waitingfor == -1) {
// This is the first already-in-flight block.
waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
}
}
}
}
} // namespace
template <class InvId>
static bool TooManyAnnouncements(const CNode &node,
const InvRequestTracker<InvId> &requestTracker,
const DataRequestParameters &requestParams) {
return !node.HasPermission(
requestParams.bypass_request_limits_permissions) &&
requestTracker.Count(node.GetId()) >=
requestParams.max_peer_announcements;
}
/**
* Compute the request time for this announcement, current time plus delays for:
* - nonpref_peer_delay for announcements from non-preferred connections
* - overloaded_peer_delay for announcements from peers which have at least
* max_peer_request_in_flight requests in flight (and don't have
* NetPermissionFlags::Relay).
*/
template <class InvId>
static std::chrono::microseconds
ComputeRequestTime(const CNode &node,
const InvRequestTracker<InvId> &requestTracker,
const DataRequestParameters &requestParams,
std::chrono::microseconds current_time, bool preferred) {
auto delay = std::chrono::microseconds{0};
if (!preferred) {
delay += requestParams.nonpref_peer_delay;
}
if (!node.HasPermission(requestParams.bypass_request_limits_permissions) &&
requestTracker.CountInFlight(node.GetId()) >=
requestParams.max_peer_request_in_flight) {
delay += requestParams.overloaded_peer_delay;
}
return current_time + delay;
}
void PeerManagerImpl::PushNodeVersion(const Config &config, CNode &pnode,
const Peer &peer) {
uint64_t my_services{peer.m_our_services};
const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())};
uint64_t nonce = pnode.GetLocalNonce();
const int nNodeStartingHeight{m_best_height};
NodeId nodeid = pnode.GetId();
CAddress addr = pnode.addr;
uint64_t extraEntropy = pnode.GetLocalExtraEntropy();
CService addr_you =
addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible()
? addr
: CService();
uint64_t your_services{addr.nServices};
const bool tx_relay = !m_opts.ignore_incoming_txs &&
!pnode.IsBlockOnlyConn() && !pnode.IsFeelerConn();
m_connman.PushMessage(
// your_services, addr_you: Together the pre-version-31402 serialization
// of CAddress "addrYou" (without nTime)
// my_services, CService(): Together the pre-version-31402 serialization
// of CAddress "addrMe" (without nTime)
&pnode, CNetMsgMaker(INIT_PROTO_VERSION)
.Make(NetMsgType::VERSION, PROTOCOL_VERSION, my_services,
nTime, your_services, addr_you, my_services,
CService(), nonce, userAgent(config),
nNodeStartingHeight, tx_relay, extraEntropy));
if (fLogIPs) {
LogPrint(BCLog::NET,
"send version message: version %d, blocks=%d, them=%s, "
"txrelay=%d, peer=%d\n",
PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToString(),
tx_relay, nodeid);
} else {
LogPrint(BCLog::NET,
"send version message: version %d, blocks=%d, "
"txrelay=%d, peer=%d\n",
PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid);
}
}
void PeerManagerImpl::AddTxAnnouncement(
const CNode &node, const TxId &txid,
std::chrono::microseconds current_time) {
// For m_txrequest and state
AssertLockHeld(::cs_main);
if (TooManyAnnouncements(node, m_txrequest, TX_REQUEST_PARAMS)) {
return;
}
const bool preferred = isPreferredDownloadPeer(node);
auto reqtime = ComputeRequestTime(node, m_txrequest, TX_REQUEST_PARAMS,
current_time, preferred);
m_txrequest.ReceivedInv(node.GetId(), txid, preferred, reqtime);
}
void PeerManagerImpl::AddProofAnnouncement(
const CNode &node, const avalanche::ProofId &proofid,
std::chrono::microseconds current_time, bool preferred) {
// For m_proofrequest
AssertLockHeld(cs_proofrequest);
if (TooManyAnnouncements(node, m_proofrequest, PROOF_REQUEST_PARAMS)) {
return;
}
auto reqtime = ComputeRequestTime(
node, m_proofrequest, PROOF_REQUEST_PARAMS, current_time, preferred);
m_proofrequest.ReceivedInv(node.GetId(), proofid, preferred, reqtime);
}
void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node,
int64_t time_in_seconds) {
LOCK(cs_main);
CNodeState *state = State(node);
if (state) {
state->m_last_block_announcement = time_in_seconds;
}
}
void PeerManagerImpl::InitializeNode(const Config &config, CNode &node,
ServiceFlags our_services) {
NodeId nodeid = node.GetId();
{
LOCK(cs_main);
m_node_states.emplace_hint(m_node_states.end(),
std::piecewise_construct,
std::forward_as_tuple(nodeid),
std::forward_as_tuple(node.IsInboundConn()));
assert(m_txrequest.Count(nodeid) == 0);
}
PeerRef peer = std::make_shared<Peer>(nodeid, our_services, !!m_avalanche);
{
LOCK(m_peer_mutex);
m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
}
if (!node.IsInboundConn()) {
PushNodeVersion(config, node, *peer);
}
}
void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler &scheduler) {
std::set<TxId> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
for (const TxId &txid : unbroadcast_txids) {
// Sanity check: all unbroadcast txns should exist in the mempool
if (m_mempool.exists(txid)) {
RelayTransaction(txid);
} else {
m_mempool.RemoveUnbroadcastTx(txid, true);
}
}
if (m_avalanche) {
// Get and sanitize the list of proofids to broadcast. The RelayProof
// call is done in a second loop to avoid locking cs_vNodes while
// cs_peerManager is locked which would cause a potential deadlock due
// to reversed lock order.
auto unbroadcasted_proofids =
m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
auto unbroadcasted_proofids = pm.getUnbroadcastProofs();
auto it = unbroadcasted_proofids.begin();
while (it != unbroadcasted_proofids.end()) {
// Sanity check: all unbroadcast proofs should be bound to a
// peer in the peermanager
if (!pm.isBoundToPeer(*it)) {
pm.removeUnbroadcastProof(*it);
it = unbroadcasted_proofids.erase(it);
continue;
}
++it;
}
return unbroadcasted_proofids;
});
// Remaining proofids are the ones to broadcast
for (const auto &proofid : unbroadcasted_proofids) {
RelayProof(proofid);
}
}
// Schedule next run for 10-15 minutes in the future.
// We add randomness on every cycle to avoid the possibility of P2P
// fingerprinting.
const auto reattemptBroadcastInterval = 10min + GetRandMillis(5min);
scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
reattemptBroadcastInterval);
}
void PeerManagerImpl::UpdateAvalancheStatistics() const {
m_connman.ForEachNode([](CNode *pnode) {
pnode->updateAvailabilityScore(AVALANCHE_STATISTICS_DECAY_FACTOR);
});
if (!m_avalanche) {
// Not enabled or not ready yet
return;
}
// Generate a peer availability score by computing an exponentially
// weighted moving average of the average of node availability scores.
// This ensures the peer score is bound to the lifetime of its proof which
// incentivizes stable network activity.
m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
pm.updateAvailabilityScores(
AVALANCHE_STATISTICS_DECAY_FACTOR, [&](NodeId nodeid) -> double {
double score{0.0};
m_connman.ForNode(nodeid, [&](CNode *pavanode) {
score = pavanode->getAvailabilityScore();
return true;
});
return score;
});
});
}
void PeerManagerImpl::AvalanchePeriodicNetworking(CScheduler &scheduler) const {
const auto now = GetTime<std::chrono::seconds>();
std::vector<NodeId> avanode_ids;
bool fQuorumEstablished;
bool fShouldRequestMoreNodes;
if (!m_avalanche) {
// Not enabled or not ready yet, retry later
goto scheduleLater;
}
m_avalanche->sendDelayedAvahello();
fQuorumEstablished = m_avalanche->isQuorumEstablished();
fShouldRequestMoreNodes =
m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
return pm.shouldRequestMoreNodes();
});
m_connman.ForEachNode([&](CNode *pnode) {
// Build a list of the avalanche peers nodeids
if (pnode->m_avalanche_enabled) {
avanode_ids.push_back(pnode->GetId());
}
PeerRef peer = GetPeerRef(pnode->GetId());
if (peer == nullptr) {
return;
}
// If a proof radix tree timed out, cleanup
if (peer->m_proof_relay &&
now > (peer->m_proof_relay->lastSharedProofsUpdate.load() +
AVALANCHE_AVAPROOFS_TIMEOUT)) {
peer->m_proof_relay->sharedProofs = {};
}
});
if (avanode_ids.empty()) {
// No node is available for messaging, retry later
goto scheduleLater;
}
Shuffle(avanode_ids.begin(), avanode_ids.end(), FastRandomContext());
// Request avalanche addresses from our peers
for (NodeId avanodeId : avanode_ids) {
const bool sentGetavaaddr =
m_connman.ForNode(avanodeId, [&](CNode *pavanode) {
if (!fQuorumEstablished || !pavanode->IsInboundConn()) {
m_connman.PushMessage(
pavanode, CNetMsgMaker(pavanode->GetCommonVersion())
.Make(NetMsgType::GETAVAADDR));
PeerRef peer = GetPeerRef(avanodeId);
WITH_LOCK(peer->m_addr_token_bucket_mutex,
peer->m_addr_token_bucket +=
m_opts.max_addr_to_send);
return true;
}
return false;
});
// If we have no reason to believe that we need more nodes, only request
// addresses from one of our peers.
if (sentGetavaaddr && fQuorumEstablished && !fShouldRequestMoreNodes) {
break;
}
}
if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
// Don't request proofs while in IBD. We're likely to orphan them
// because we don't have the UTXOs.
goto scheduleLater;
}
// If we never had an avaproofs message yet, be kind and only request to a
// subset of our peers as we expect a ton of avaproofs message in the
// process.
if (m_avalanche->getAvaproofsNodeCounter() == 0) {
avanode_ids.resize(std::min<size_t>(avanode_ids.size(), 3));
}
for (NodeId nodeid : avanode_ids) {
// Send a getavaproofs to all of our peers
m_connman.ForNode(nodeid, [&](CNode *pavanode) {
PeerRef peer = GetPeerRef(nodeid);
if (peer->m_proof_relay) {
m_connman.PushMessage(pavanode,
CNetMsgMaker(pavanode->GetCommonVersion())
.Make(NetMsgType::GETAVAPROOFS));
peer->m_proof_relay->compactproofs_requested = true;
}
return true;
});
}
scheduleLater:
// Schedule next run for 2-5 minutes in the future.
// We add randomness on every cycle to avoid the possibility of P2P
// fingerprinting.
const auto avalanchePeriodicNetworkingInterval = 2min + GetRandMillis(3min);
scheduler.scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
avalanchePeriodicNetworkingInterval);
}
void PeerManagerImpl::FinalizeNode(const Config &config, const CNode &node) {
NodeId nodeid = node.GetId();
int misbehavior{0};
{
LOCK(cs_main);
{
// We remove the PeerRef from g_peer_map here, but we don't always
// destruct the Peer. Sometimes another thread is still holding a
// PeerRef, so the refcount is >= 1. Be careful not to do any
// processing here that assumes Peer won't be changed before it's
// destructed.
PeerRef peer = RemovePeer(nodeid);
assert(peer != nullptr);
misbehavior = WITH_LOCK(peer->m_misbehavior_mutex,
return peer->m_misbehavior_score);
LOCK(m_peer_mutex);
m_peer_map.erase(nodeid);
}
CNodeState *state = State(nodeid);
assert(state != nullptr);
if (state->fSyncStarted) {
nSyncStarted--;
}
for (const QueuedBlock &entry : state->vBlocksInFlight) {
mapBlocksInFlight.erase(entry.pindex->GetBlockHash());
}
m_mempool.withOrphanage([nodeid](TxOrphanage &orphanage) {
orphanage.EraseForPeer(nodeid);
});
m_txrequest.DisconnectedPeer(nodeid);
m_num_preferred_download_peers -= state->fPreferredDownload;
m_peers_downloading_from -= (state->nBlocksInFlight != 0);
assert(m_peers_downloading_from >= 0);
m_outbound_peers_with_protect_from_disconnect -=
state->m_chain_sync.m_protect;
assert(m_outbound_peers_with_protect_from_disconnect >= 0);
m_node_states.erase(nodeid);
if (m_node_states.empty()) {
// Do a consistency check after the last peer is removed.
assert(mapBlocksInFlight.empty());
assert(m_num_preferred_download_peers == 0);
assert(m_peers_downloading_from == 0);
assert(m_outbound_peers_with_protect_from_disconnect == 0);
assert(m_txrequest.Size() == 0);
assert(m_mempool.withOrphanage([](const TxOrphanage &orphanage) {
return orphanage.Size();
}) == 0);
}
}
if (node.fSuccessfullyConnected && misbehavior == 0 &&
!node.IsBlockOnlyConn() && !node.IsInboundConn()) {
// Only change visible addrman state for full outbound peers. We don't
// call Connected() for feeler connections since they don't have
// fSuccessfullyConnected set.
m_addrman.Connected(node.addr);
}
{
LOCK(m_headers_presync_mutex);
m_headers_presync_stats.erase(nodeid);
}
WITH_LOCK(cs_proofrequest, m_proofrequest.DisconnectedPeer(nodeid));
LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
}
PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const {
LOCK(m_peer_mutex);
auto it = m_peer_map.find(id);
return it != m_peer_map.end() ? it->second : nullptr;
}
PeerRef PeerManagerImpl::RemovePeer(NodeId id) {
PeerRef ret;
LOCK(m_peer_mutex);
auto it = m_peer_map.find(id);
if (it != m_peer_map.end()) {
ret = std::move(it->second);
m_peer_map.erase(it);
}
return ret;
}
bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid,
CNodeStateStats &stats) const {
{
LOCK(cs_main);
const CNodeState *state = State(nodeid);
if (state == nullptr) {
return false;
}
stats.nSyncHeight = state->pindexBestKnownBlock
? state->pindexBestKnownBlock->nHeight
: -1;
stats.nCommonHeight = state->pindexLastCommonBlock
? state->pindexLastCommonBlock->nHeight
: -1;
for (const QueuedBlock &queue : state->vBlocksInFlight) {
if (queue.pindex) {
stats.vHeightInFlight.push_back(queue.pindex->nHeight);
}
}
}
PeerRef peer = GetPeerRef(nodeid);
if (peer == nullptr) {
return false;
}
stats.their_services = peer->m_their_services;
stats.m_starting_height = peer->m_starting_height;
// It is common for nodes with good ping times to suddenly become lagged,
// due to a new block arriving or other large transfer.
// Merely reporting pingtime might fool the caller into thinking the node
// was still responsive, since pingtime does not update until the ping is
// complete, which might take a while. So, if a ping is taking an unusually
// long time in flight, the caller can immediately detect that this is
// happening.
auto ping_wait{0us};
if ((0 != peer->m_ping_nonce_sent) &&
(0 != peer->m_ping_start.load().count())) {
ping_wait =
GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
}
if (auto tx_relay = peer->GetTxRelay()) {
stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex,
return tx_relay->m_relay_txs);
stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load();
} else {
stats.m_relay_txs = false;
stats.m_fee_filter_received = Amount::zero();
}
stats.m_ping_wait = ping_wait;
stats.m_addr_processed = peer->m_addr_processed.load();
stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load();
{
LOCK(peer->m_headers_sync_mutex);
if (peer->m_headers_sync) {
stats.presync_height = peer->m_headers_sync->GetPresyncHeight();
}
}
return true;
}
void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef &tx) {
if (m_opts.max_extra_txs <= 0) {
return;
}
if (!vExtraTxnForCompact.size()) {
vExtraTxnForCompact.resize(m_opts.max_extra_txs);
}
vExtraTxnForCompact[vExtraTxnForCompactIt] =
std::make_pair(tx->GetHash(), tx);
vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs;
}
void PeerManagerImpl::Misbehaving(Peer &peer, int howmuch,
const std::string &message) {
assert(howmuch > 0);
LOCK(peer.m_misbehavior_mutex);
const int score_before{peer.m_misbehavior_score};
peer.m_misbehavior_score += howmuch;
const int score_now{peer.m_misbehavior_score};
const std::string message_prefixed =
message.empty() ? "" : (": " + message);
std::string warning;
if (score_now >= DISCOURAGEMENT_THRESHOLD &&
score_before < DISCOURAGEMENT_THRESHOLD) {
warning = " DISCOURAGE THRESHOLD EXCEEDED";
peer.m_should_discourage = true;
}
LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s%s\n", peer.m_id,
score_before, score_now, warning, message_prefixed);
}
bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid,
const BlockValidationState &state,
bool via_compact_block,
const std::string &message) {
PeerRef peer{GetPeerRef(nodeid)};
switch (state.GetResult()) {
case BlockValidationResult::BLOCK_RESULT_UNSET:
break;
case BlockValidationResult::BLOCK_HEADER_LOW_WORK:
// We didn't try to process the block because the header chain may
// have too little work.
break;
// The node is providing invalid data:
case BlockValidationResult::BLOCK_CONSENSUS:
case BlockValidationResult::BLOCK_MUTATED:
if (!via_compact_block) {
if (peer) {
Misbehaving(*peer, 100, message);
}
return true;
}
break;
case BlockValidationResult::BLOCK_CACHED_INVALID: {
LOCK(cs_main);
CNodeState *node_state = State(nodeid);
if (node_state == nullptr) {
break;
}
// Ban outbound (but not inbound) peers if on an invalid chain.
// Exempt HB compact block peers. Manual connections are always
// protected from discouragement.
if (!via_compact_block && !node_state->m_is_inbound) {
if (peer) {
Misbehaving(*peer, 100, message);
}
return true;
}
break;
}
case BlockValidationResult::BLOCK_INVALID_HEADER:
case BlockValidationResult::BLOCK_CHECKPOINT:
case BlockValidationResult::BLOCK_INVALID_PREV:
if (peer) {
Misbehaving(*peer, 100, message);
}
return true;
// Conflicting (but not necessarily invalid) data or different policy:
case BlockValidationResult::BLOCK_MISSING_PREV:
// TODO: Handle this much more gracefully (10 DoS points is super
// arbitrary)
if (peer) {
Misbehaving(*peer, 10, message);
}
return true;
case BlockValidationResult::BLOCK_TIME_FUTURE:
break;
}
if (message != "") {
LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
}
return false;
}
bool PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid,
const TxValidationState &state,
const std::string &message) {
PeerRef peer{GetPeerRef(nodeid)};
switch (state.GetResult()) {
case TxValidationResult::TX_RESULT_UNSET:
break;
// The node is providing invalid data:
case TxValidationResult::TX_CONSENSUS:
if (peer) {
Misbehaving(*peer, 100, message);
}
return true;
// Conflicting (but not necessarily invalid) data or different policy:
case TxValidationResult::TX_INPUTS_NOT_STANDARD:
case TxValidationResult::TX_NOT_STANDARD:
case TxValidationResult::TX_MISSING_INPUTS:
case TxValidationResult::TX_PREMATURE_SPEND:
case TxValidationResult::TX_DUPLICATE:
case TxValidationResult::TX_CONFLICT:
case TxValidationResult::TX_CHILD_BEFORE_PARENT:
case TxValidationResult::TX_MEMPOOL_POLICY:
case TxValidationResult::TX_NO_MEMPOOL:
case TxValidationResult::TX_PACKAGE_RECONSIDERABLE:
+ case TxValidationResult::TX_AVALANCHE_RECONSIDERABLE:
case TxValidationResult::TX_UNKNOWN:
break;
}
if (message != "") {
LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
}
return false;
}
bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex *pindex) {
AssertLockHeld(cs_main);
if (m_chainman.ActiveChain().Contains(pindex)) {
return true;
}
return pindex->IsValid(BlockValidity::SCRIPTS) &&
(m_chainman.m_best_header != nullptr) &&
(m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() <
STALE_RELAY_AGE_LIMIT) &&
(GetBlockProofEquivalentTime(
*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header,
m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
}
std::optional<std::string>
PeerManagerImpl::FetchBlock(const Config &config, NodeId peer_id,
const CBlockIndex &block_index) {
if (m_chainman.m_blockman.LoadingBlocks()) {
return "Loading blocks ...";
}
LOCK(cs_main);
// Ensure this peer exists and hasn't been disconnected
CNodeState *state = State(peer_id);
if (state == nullptr) {
return "Peer does not exist";
}
// Mark block as in-flight unless it already is (for this peer).
// If a block was already in-flight for a different peer, its BLOCKTXN
// response will be dropped.
if (!BlockRequested(config, peer_id, block_index)) {
return "Already requested from this peer";
}
// Construct message to request the block
const BlockHash &hash{block_index.GetBlockHash()};
const std::vector<CInv> invs{CInv(MSG_BLOCK, hash)};
// Send block request message to the peer
if (!m_connman.ForNode(peer_id, [this, &invs](CNode *node) {
const CNetMsgMaker msgMaker(node->GetCommonVersion());
this->m_connman.PushMessage(
node, msgMaker.Make(NetMsgType::GETDATA, invs));
return true;
})) {
return "Node not fully connected";
}
LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", hash.ToString(),
peer_id);
return std::nullopt;
}
std::unique_ptr<PeerManager>
PeerManager::make(CConnman &connman, AddrMan &addrman, BanMan *banman,
ChainstateManager &chainman, CTxMemPool &pool,
avalanche::Processor *const avalanche, Options opts) {
return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman,
pool, avalanche, opts);
}
PeerManagerImpl::PeerManagerImpl(CConnman &connman, AddrMan &addrman,
BanMan *banman, ChainstateManager &chainman,
CTxMemPool &pool,
avalanche::Processor *const avalanche,
Options opts)
: m_rng{opts.deterministic_rng},
m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE_PER_KB}, m_rng},
m_chainparams(chainman.GetParams()), m_connman(connman),
m_addrman(addrman), m_banman(banman), m_chainman(chainman),
m_mempool(pool), m_avalanche(avalanche), m_opts{opts} {}
void PeerManagerImpl::StartScheduledTasks(CScheduler &scheduler) {
// Stale tip checking and peer eviction are on two different timers, but we
// don't want them to get out of sync due to drift in the scheduler, so we
// combine them in one function and schedule at the quicker (peer-eviction)
// timer.
static_assert(
EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL,
"peer eviction timer should be less than stale tip check timer");
scheduler.scheduleEvery(
[this]() {
this->CheckForStaleTipAndEvictPeers();
return true;
},
std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
// schedule next run for 10-15 minutes in the future
const auto reattemptBroadcastInterval = 10min + GetRandMillis(5min);
scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
reattemptBroadcastInterval);
// Update the avalanche statistics on a schedule
scheduler.scheduleEvery(
[this]() {
UpdateAvalancheStatistics();
return true;
},
AVALANCHE_STATISTICS_REFRESH_PERIOD);
// schedule next run for 2-5 minutes in the future
const auto avalanchePeriodicNetworkingInterval = 2min + GetRandMillis(3min);
scheduler.scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
avalanchePeriodicNetworkingInterval);
}
/**
* Evict orphan txn pool entries based on a newly connected
* block, remember the recently confirmed transactions, and delete tracked
* announcements for them. Also save the time of the last tip update and
* possibly reduce dynamic block stalling timeout.
*/
void PeerManagerImpl::BlockConnected(
const std::shared_ptr<const CBlock> &pblock, const CBlockIndex *pindex) {
m_mempool.withOrphanage([&pblock](TxOrphanage &orphanage) {
orphanage.EraseForBlock(*pblock);
});
m_mempool.withConflicting([&pblock](TxConflicting &conflicting) {
conflicting.EraseForBlock(*pblock);
});
m_last_tip_update = GetTime<std::chrono::seconds>();
{
LOCK(m_recent_confirmed_transactions_mutex);
for (const CTransactionRef &ptx : pblock->vtx) {
m_recent_confirmed_transactions.insert(ptx->GetId());
}
}
{
LOCK(cs_main);
for (const auto &ptx : pblock->vtx) {
m_txrequest.ForgetInvId(ptx->GetId());
}
}
// In case the dynamic timeout was doubled once or more, reduce it slowly
// back to its default value
auto stalling_timeout = m_block_stalling_timeout.load();
Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT);
if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) {
const auto new_timeout =
std::max(std::chrono::duration_cast<std::chrono::seconds>(
stalling_timeout * 0.85),
BLOCK_STALLING_TIMEOUT_DEFAULT);
if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout,
new_timeout)) {
LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n",
count_seconds(new_timeout));
}
}
}
void PeerManagerImpl::BlockDisconnected(
const std::shared_ptr<const CBlock> &block, const CBlockIndex *pindex) {
// To avoid relay problems with transactions that were previously
// confirmed, clear our filter of recently confirmed transactions whenever
// there's a reorg.
// This means that in a 1-block reorg (where 1 block is disconnected and
// then another block reconnected), our filter will drop to having only one
// block's worth of transactions in it, but that should be fine, since
// presumably the most common case of relaying a confirmed transaction
// should be just after a new block containing it is found.
LOCK(m_recent_confirmed_transactions_mutex);
m_recent_confirmed_transactions.reset();
}
/**
* Maintain state about the best-seen block and fast-announce a compact block
* to compatible peers.
*/
void PeerManagerImpl::NewPoWValidBlock(
const CBlockIndex *pindex, const std::shared_ptr<const CBlock> &pblock) {
std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock =
std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
LOCK(cs_main);
if (pindex->nHeight <= m_highest_fast_announce) {
return;
}
m_highest_fast_announce = pindex->nHeight;
BlockHash hashBlock(pblock->GetHash());
const std::shared_future<CSerializedNetMsg> lazy_ser{
std::async(std::launch::deferred, [&] {
return msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock);
})};
{
LOCK(m_most_recent_block_mutex);
m_most_recent_block_hash = hashBlock;
m_most_recent_block = pblock;
m_most_recent_compact_block = pcmpctblock;
}
m_connman.ForEachNode(
[this, pindex, &lazy_ser, &hashBlock](CNode *pnode)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION ||
pnode->fDisconnect) {
return;
}
ProcessBlockAvailability(pnode->GetId());
CNodeState &state = *State(pnode->GetId());
// If the peer has, or we announced to them the previous block
// already, but we don't think they have this one, go ahead and
// announce it.
if (state.m_requested_hb_cmpctblocks &&
!PeerHasHeader(&state, pindex) &&
PeerHasHeader(&state, pindex->pprev)) {
LogPrint(BCLog::NET,
"%s sending header-and-ids %s to peer=%d\n",
"PeerManager::NewPoWValidBlock",
hashBlock.ToString(), pnode->GetId());
const CSerializedNetMsg &ser_cmpctblock{lazy_ser.get()};
m_connman.PushMessage(pnode, ser_cmpctblock.Copy());
state.pindexBestHeaderSent = pindex;
}
});
}
/**
* Update our best height and announce any block hashes which weren't previously
* in m_chainman.ActiveChain() to our peers.
*/
void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew,
const CBlockIndex *pindexFork,
bool fInitialDownload) {
SetBestHeight(pindexNew->nHeight);
SetServiceFlagsIBDCache(!fInitialDownload);
// Don't relay inventory during initial block download.
if (fInitialDownload) {
return;
}
// Find the hashes of all blocks that weren't previously in the best chain.
std::vector<BlockHash> vHashes;
const CBlockIndex *pindexToAnnounce = pindexNew;
while (pindexToAnnounce != pindexFork) {
vHashes.push_back(pindexToAnnounce->GetBlockHash());
pindexToAnnounce = pindexToAnnounce->pprev;
if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
// Limit announcements in case of a huge reorganization. Rely on the
// peer's synchronization mechanism in that case.
break;
}
}
{
LOCK(m_peer_mutex);
for (auto &it : m_peer_map) {
Peer &peer = *it.second;
LOCK(peer.m_block_inv_mutex);
for (const BlockHash &hash : reverse_iterate(vHashes)) {
peer.m_blocks_for_headers_relay.push_back(hash);
}
}
}
m_connman.WakeMessageHandler();
}
/**
* Handle invalid block rejection and consequent peer banning, maintain which
* peers announce compact blocks.
*/
void PeerManagerImpl::BlockChecked(const CBlock &block,
const BlockValidationState &state) {
LOCK(cs_main);
const BlockHash hash = block.GetHash();
std::map<BlockHash, std::pair<NodeId, bool>>::iterator it =
mapBlockSource.find(hash);
// If the block failed validation, we know where it came from and we're
// still connected to that peer, maybe punish.
if (state.IsInvalid() && it != mapBlockSource.end() &&
State(it->second.first)) {
MaybePunishNodeForBlock(/*nodeid=*/it->second.first, state,
/*via_compact_block=*/!it->second.second);
}
// Check that:
// 1. The block is valid
// 2. We're not in initial block download
// 3. This is currently the best block we're aware of. We haven't updated
// the tip yet so we have no way to check this directly here. Instead we
// just check that there are currently no other blocks in flight.
else if (state.IsValid() &&
!m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
if (it != mapBlockSource.end()) {
MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
}
}
if (it != mapBlockSource.end()) {
mapBlockSource.erase(it);
}
}
//////////////////////////////////////////////////////////////////////////////
//
// Messages
//
bool PeerManagerImpl::AlreadyHaveTx(const TxId &txid,
bool include_reconsiderable) {
if (m_chainman.ActiveChain().Tip()->GetBlockHash() !=
hashRecentRejectsChainTip) {
// If the chain tip has changed previously rejected transactions
// might be now valid, e.g. due to a nLockTime'd tx becoming
// valid, or a double-spend. Reset the rejects filter and give
// those txs a second chance.
hashRecentRejectsChainTip =
m_chainman.ActiveChain().Tip()->GetBlockHash();
m_recent_rejects.reset();
m_recent_rejects_package_reconsiderable.reset();
}
if (m_mempool.withOrphanage([&txid](const TxOrphanage &orphanage) {
return orphanage.HaveTx(txid);
})) {
return true;
}
if (m_mempool.withConflicting([&txid](const TxConflicting &conflicting) {
return conflicting.HaveTx(txid);
})) {
return true;
}
if (include_reconsiderable &&
m_recent_rejects_package_reconsiderable.contains(txid)) {
return true;
}
{
LOCK(m_recent_confirmed_transactions_mutex);
if (m_recent_confirmed_transactions.contains(txid)) {
return true;
}
}
return m_recent_rejects.contains(txid) || m_mempool.exists(txid);
}
bool PeerManagerImpl::AlreadyHaveBlock(const BlockHash &block_hash) {
return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr;
}
bool PeerManagerImpl::AlreadyHaveProof(const avalanche::ProofId &proofid) {
assert(m_avalanche);
auto localProof = m_avalanche->getLocalProof();
if (localProof && localProof->getId() == proofid) {
return true;
}
return m_avalanche->withPeerManager([&proofid](avalanche::PeerManager &pm) {
return pm.exists(proofid) || pm.isInvalid(proofid);
});
}
void PeerManagerImpl::SendPings() {
LOCK(m_peer_mutex);
for (auto &it : m_peer_map) {
it.second->m_ping_queued = true;
}
}
void PeerManagerImpl::RelayTransaction(const TxId &txid) {
LOCK(m_peer_mutex);
for (auto &it : m_peer_map) {
Peer &peer = *it.second;
auto tx_relay = peer.GetTxRelay();
if (!tx_relay) {
continue;
}
LOCK(tx_relay->m_tx_inventory_mutex);
if (!tx_relay->m_tx_inventory_known_filter.contains(txid)) {
tx_relay->m_tx_inventory_to_send.insert(txid);
}
}
}
void PeerManagerImpl::RelayProof(const avalanche::ProofId &proofid) {
LOCK(m_peer_mutex);
for (auto &it : m_peer_map) {
Peer &peer = *it.second;
if (!peer.m_proof_relay) {
continue;
}
LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
if (!peer.m_proof_relay->m_proof_inventory_known_filter.contains(
proofid)) {
peer.m_proof_relay->m_proof_inventory_to_send.insert(proofid);
}
}
}
void PeerManagerImpl::RelayAddress(NodeId originator, const CAddress &addr,
bool fReachable) {
// We choose the same nodes within a given 24h window (if the list of
// connected nodes does not change) and we don't relay to nodes that already
// know an address. So within 24h we will likely relay a given address once.
// This is to prevent a peer from unjustly giving their address better
// propagation by sending it to us repeatedly.
if (!fReachable && !addr.IsRelayable()) {
return;
}
// Relay to a limited number of other nodes
// Use deterministic randomness to send to the same nodes for 24 hours
// at a time so the m_addr_knowns of the chosen nodes prevent repeats
const uint64_t hash_addr{CServiceHash(0, 0)(addr)};
const auto current_time{GetTime<std::chrono::seconds>()};
// Adding address hash makes exact rotation time different per address,
// while preserving periodicity.
const uint64_t time_addr{
(static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) /
count_seconds(ROTATE_ADDR_RELAY_DEST_INTERVAL)};
const CSipHasher hasher{
m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY)
.Write(hash_addr)
.Write(time_addr)};
// Relay reachable addresses to 2 peers. Unreachable addresses are relayed
// randomly to 1 or 2 peers.
unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
std::array<std::pair<uint64_t, Peer *>, 2> best{
{{0, nullptr}, {0, nullptr}}};
assert(nRelayNodes <= best.size());
LOCK(m_peer_mutex);
for (auto &[id, peer] : m_peer_map) {
if (peer->m_addr_relay_enabled && id != originator &&
IsAddrCompatible(*peer, addr)) {
uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
for (unsigned int i = 0; i < nRelayNodes; i++) {
if (hashKey > best[i].first) {
std::copy(best.begin() + i, best.begin() + nRelayNodes - 1,
best.begin() + i + 1);
best[i] = std::make_pair(hashKey, peer.get());
break;
}
}
}
};
for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
PushAddress(*best[i].second, addr);
}
}
void PeerManagerImpl::ProcessGetBlockData(const Config &config, CNode &pfrom,
Peer &peer, const CInv &inv) {
const BlockHash hash(inv.hash);
std::shared_ptr<const CBlock> a_recent_block;
std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
{
LOCK(m_most_recent_block_mutex);
a_recent_block = m_most_recent_block;
a_recent_compact_block = m_most_recent_compact_block;
}
bool need_activate_chain = false;
{
LOCK(cs_main);
const CBlockIndex *pindex =
m_chainman.m_blockman.LookupBlockIndex(hash);
if (pindex) {
if (pindex->HaveTxsDownloaded() &&
!pindex->IsValid(BlockValidity::SCRIPTS) &&
pindex->IsValid(BlockValidity::TREE)) {
// If we have the block and all of its parents, but have not yet
// validated it, we might be in the middle of connecting it (ie
// in the unlock of cs_main before ActivateBestChain but after
// AcceptBlock). In this case, we need to run ActivateBestChain
// prior to checking the relay conditions below.
need_activate_chain = true;
}
}
} // release cs_main before calling ActivateBestChain
if (need_activate_chain) {
BlockValidationState state;
if (!m_chainman.ActiveChainstate().ActivateBestChain(
state, a_recent_block, m_avalanche)) {
LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
state.ToString());
}
}
LOCK(cs_main);
const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
if (!pindex) {
return;
}
if (!BlockRequestAllowed(pindex)) {
LogPrint(BCLog::NET,
"%s: ignoring request from peer=%i for old "
"block that isn't in the main chain\n",
__func__, pfrom.GetId());
return;
}
const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
// Disconnect node in case we have reached the outbound limit for serving
// historical blocks.
if (m_connman.OutboundTargetReached(true) &&
(((m_chainman.m_best_header != nullptr) &&
(m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() >
HISTORICAL_BLOCK_AGE)) ||
inv.IsMsgFilteredBlk()) &&
// nodes with the download permission may exceed target
!pfrom.HasPermission(NetPermissionFlags::Download)) {
LogPrint(BCLog::NET,
"historical block serving limit reached, disconnect peer=%d\n",
pfrom.GetId());
pfrom.fDisconnect = true;
return;
}
// Avoid leaking prune-height by never sending blocks below the
// NODE_NETWORK_LIMITED threshold.
// Add two blocks buffer extension for possible races
if (!pfrom.HasPermission(NetPermissionFlags::NoBan) &&
((((peer.m_our_services & NODE_NETWORK_LIMITED) ==
NODE_NETWORK_LIMITED) &&
((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) &&
(m_chainman.ActiveChain().Tip()->nHeight - pindex->nHeight >
(int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2)))) {
LogPrint(BCLog::NET,
"Ignore block request below NODE_NETWORK_LIMITED "
"threshold, disconnect peer=%d\n",
pfrom.GetId());
// disconnect node and prevent it from stalling (would otherwise wait
// for the missing block)
pfrom.fDisconnect = true;
return;
}
// Pruned nodes may have deleted the block, so check whether it's available
// before trying to send.
if (!pindex->nStatus.hasData()) {
return;
}
std::shared_ptr<const CBlock> pblock;
if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
pblock = a_recent_block;
} else {
// Send block from disk
std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
if (!m_chainman.m_blockman.ReadBlockFromDisk(*pblockRead, *pindex)) {
assert(!"cannot load block from disk");
}
pblock = pblockRead;
}
if (inv.IsMsgBlk()) {
m_connman.PushMessage(&pfrom,
msgMaker.Make(NetMsgType::BLOCK, *pblock));
} else if (inv.IsMsgFilteredBlk()) {
bool sendMerkleBlock = false;
CMerkleBlock merkleBlock;
if (auto tx_relay = peer.GetTxRelay()) {
LOCK(tx_relay->m_bloom_filter_mutex);
if (tx_relay->m_bloom_filter) {
sendMerkleBlock = true;
merkleBlock = CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
}
}
if (sendMerkleBlock) {
m_connman.PushMessage(
&pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
// CMerkleBlock just contains hashes, so also push any
// transactions in the block the client did not see. This avoids
// hurting performance by pointlessly requiring a round-trip.
// Note that there is currently no way for a node to request any
// single transactions we didn't send here - they must either
// disconnect and retry or request the full block. Thus, the
// protocol spec specified allows for us to provide duplicate
// txn here, however we MUST always provide at least what the
// remote peer needs.
typedef std::pair<size_t, uint256> PairType;
for (PairType &pair : merkleBlock.vMatchedTxn) {
m_connman.PushMessage(
&pfrom,
msgMaker.Make(NetMsgType::TX, *pblock->vtx[pair.first]));
}
}
// else
// no response
} else if (inv.IsMsgCmpctBlk()) {
// If a peer is asking for old blocks, we're almost guaranteed they
// won't have a useful mempool to match against a compact block, and
// we don't feel like constructing the object for them, so instead
// we respond with the full, non-compact block.
int nSendFlags = 0;
if (CanDirectFetch() &&
pindex->nHeight >=
m_chainman.ActiveChain().Height() - MAX_CMPCTBLOCK_DEPTH) {
if (a_recent_compact_block &&
a_recent_compact_block->header.GetHash() ==
pindex->GetBlockHash()) {
m_connman.PushMessage(&pfrom,
msgMaker.Make(NetMsgType::CMPCTBLOCK,
*a_recent_compact_block));
} else {
CBlockHeaderAndShortTxIDs cmpctblock(*pblock);
m_connman.PushMessage(
&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
cmpctblock));
}
} else {
m_connman.PushMessage(
&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
}
}
{
LOCK(peer.m_block_inv_mutex);
// Trigger the peer node to send a getblocks request for the next
// batch of inventory.
if (hash == peer.m_continuation_block) {
// Send immediately. This must send even if redundant, and
// we want it right after the last block so they don't wait for
// other stuff first.
std::vector<CInv> vInv;
vInv.push_back(CInv(
MSG_BLOCK, m_chainman.ActiveChain().Tip()->GetBlockHash()));
m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv));
peer.m_continuation_block = BlockHash();
}
}
}
CTransactionRef
PeerManagerImpl::FindTxForGetData(const Peer &peer, const TxId &txid,
const std::chrono::seconds mempool_req,
const std::chrono::seconds now) {
auto txinfo = m_mempool.info(txid);
if (txinfo.tx) {
// If a TX could have been INVed in reply to a MEMPOOL request,
// or is older than UNCONDITIONAL_RELAY_DELAY, permit the request
// unconditionally.
if ((mempool_req.count() && txinfo.m_time <= mempool_req) ||
txinfo.m_time <= now - UNCONDITIONAL_RELAY_DELAY) {
return std::move(txinfo.tx);
}
}
{
LOCK(cs_main);
// Otherwise, the transaction must have been announced recently.
if (Assume(peer.GetTxRelay())
->m_recently_announced_invs.contains(txid)) {
// If it was, it can be relayed from either the mempool...
if (txinfo.tx) {
return std::move(txinfo.tx);
}
// ... or the relay pool.
auto mi = mapRelay.find(txid);
if (mi != mapRelay.end()) {
return mi->second;
}
}
}
return {};
}
//! Determine whether or not a peer can request a proof, and return it (or
//! nullptr if not found or not allowed).
avalanche::ProofRef
PeerManagerImpl::FindProofForGetData(const Peer &peer,
const avalanche::ProofId &proofid,
const std::chrono::seconds now) {
avalanche::ProofRef proof;
bool send_unconditionally =
m_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
return pm.forPeer(proofid, [&](const avalanche::Peer &peer) {
proof = peer.proof;
// If we know that proof for long enough, allow for requesting
// it.
return peer.registration_time <=
now - UNCONDITIONAL_RELAY_DELAY;
});
});
if (!proof) {
// Always send our local proof if it gets requested, assuming it's
// valid. This will make it easier to bind with peers upon startup where
// the status of our proof is unknown pending for a block. Note that it
// still needs to have been announced first (presumably via an avahello
// message).
proof = m_avalanche->getLocalProof();
}
// We don't have this proof
if (!proof) {
return avalanche::ProofRef();
}
if (send_unconditionally) {
return proof;
}
// Otherwise, the proofs must have been announced recently.
if (peer.m_proof_relay->m_recently_announced_proofs.contains(proofid)) {
return proof;
}
return avalanche::ProofRef();
}
void PeerManagerImpl::ProcessGetData(
const Config &config, CNode &pfrom, Peer &peer,
const std::atomic<bool> &interruptMsgProc) {
AssertLockNotHeld(cs_main);
auto tx_relay = peer.GetTxRelay();
std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
std::vector<CInv> vNotFound;
const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
const auto now{GetTime<std::chrono::seconds>()};
// Get last mempool request time
const auto mempool_req = tx_relay != nullptr
? tx_relay->m_last_mempool_req.load()
: std::chrono::seconds::min();
// Process as many TX or AVA_PROOF items from the front of the getdata
// queue as possible, since they're common and it's efficient to batch
// process them.
while (it != peer.m_getdata_requests.end()) {
if (interruptMsgProc) {
return;
}
// The send buffer provides backpressure. If there's no space in
// the buffer, pause processing until the next call.
if (pfrom.fPauseSend) {
break;
}
const CInv &inv = *it;
if (it->IsMsgStakeContender()) {
// Ignore requests for stake contenders. This type is only used for
// polling.
++it;
continue;
}
if (it->IsMsgProof()) {
if (!m_avalanche) {
vNotFound.push_back(inv);
++it;
continue;
}
const avalanche::ProofId proofid(inv.hash);
auto proof = FindProofForGetData(peer, proofid, now);
if (proof) {
m_connman.PushMessage(
&pfrom, msgMaker.Make(NetMsgType::AVAPROOF, *proof));
m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
pm.removeUnbroadcastProof(proofid);
});
} else {
vNotFound.push_back(inv);
}
++it;
continue;
}
if (it->IsMsgTx()) {
if (tx_relay == nullptr) {
// Ignore GETDATA requests for transactions from
// block-relay-only peers and peers that asked us not to
// announce transactions.
continue;
}
const TxId txid(inv.hash);
CTransactionRef tx = FindTxForGetData(peer, txid, mempool_req, now);
if (tx) {
int nSendFlags = 0;
m_connman.PushMessage(
&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
m_mempool.RemoveUnbroadcastTx(txid);
// As we're going to send tx, make sure its unconfirmed parents
// are made requestable.
std::vector<TxId> parent_ids_to_add;
{
LOCK(m_mempool.cs);
auto txiter = m_mempool.GetIter(tx->GetId());
if (txiter) {
auto &pentry = *txiter;
const CTxMemPoolEntry::Parents &parents =
(*pentry)->GetMemPoolParentsConst();
parent_ids_to_add.reserve(parents.size());
for (const auto &parent : parents) {
if (parent.get()->GetTime() >
now - UNCONDITIONAL_RELAY_DELAY) {
parent_ids_to_add.push_back(
parent.get()->GetTx().GetId());
}
}
}
}
for (const TxId &parent_txid : parent_ids_to_add) {
// Relaying a transaction with a recent but unconfirmed
// parent.
if (WITH_LOCK(tx_relay->m_tx_inventory_mutex,
return !tx_relay->m_tx_inventory_known_filter
.contains(parent_txid))) {
tx_relay->m_recently_announced_invs.insert(parent_txid);
}
}
} else {
vNotFound.push_back(inv);
}
++it;
continue;
}
// It's neither a proof nor a transaction
break;
}
// Only process one BLOCK item per call, since they're uncommon and can be
// expensive to process.
if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
const CInv &inv = *it++;
if (inv.IsGenBlkMsg()) {
ProcessGetBlockData(config, pfrom, peer, inv);
}
// else: If the first item on the queue is an unknown type, we erase it
// and continue processing the queue on the next call.
}
peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
if (!vNotFound.empty()) {
// Let the peer know that we didn't find what it asked for, so it
// doesn't have to wait around forever. SPV clients care about this
// message: it's needed when they are recursively walking the
// dependencies of relevant unconfirmed transactions. SPV clients want
// to do that because they want to know about (and store and rebroadcast
// and risk analyze) the dependencies of transactions relevant to them,
// without having to download the entire memory pool. Also, other nodes
// can use these messages to automatically request a transaction from
// some other peer that annnounced it, and stop waiting for us to
// respond. In normal operation, we often send NOTFOUND messages for
// parents of transactions that we relay; if a peer is missing a parent,
// they may assume we have them and request the parents from us.
m_connman.PushMessage(&pfrom,
msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
}
}
void PeerManagerImpl::SendBlockTransactions(
CNode &pfrom, Peer &peer, const CBlock &block,
const BlockTransactionsRequest &req) {
BlockTransactions resp(req);
for (size_t i = 0; i < req.indices.size(); i++) {
if (req.indices[i] >= block.vtx.size()) {
Misbehaving(peer, 100, "getblocktxn with out-of-bounds tx indices");
return;
}
resp.txn[i] = block.vtx[req.indices[i]];
}
LOCK(cs_main);
const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
int nSendFlags = 0;
m_connman.PushMessage(
&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
}
bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader> &headers,
const Consensus::Params &consensusParams,
Peer &peer) {
// Do these headers have proof-of-work matching what's claimed?
if (!HasValidProofOfWork(headers, consensusParams)) {
Misbehaving(peer, 100, "header with invalid proof of work");
return false;
}
// Are these headers connected to each other?
if (!CheckHeadersAreContinuous(headers)) {
Misbehaving(peer, 20, "non-continuous headers sequence");
return false;
}
return true;
}
arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold() {
arith_uint256 near_chaintip_work = 0;
LOCK(cs_main);
if (m_chainman.ActiveChain().Tip() != nullptr) {
const CBlockIndex *tip = m_chainman.ActiveChain().Tip();
// Use a 144 block buffer, so that we'll accept headers that fork from
// near our tip.
near_chaintip_work =
tip->nChainWork -
std::min<arith_uint256>(144 * GetBlockProof(*tip), tip->nChainWork);
}
return std::max(near_chaintip_work, m_chainman.MinimumChainWork());
}
/**
* Special handling for unconnecting headers that might be part of a block
* announcement.
*
* We'll send a getheaders message in response to try to connect the chain.
*
* The peer can send up to MAX_NUM_UNCONNECTING_HEADERS_MSGS in a row that
* don't connect before being given DoS points.
*
* Once a headers message is received that is valid and does connect,
* m_num_unconnecting_headers_msgs gets reset back to 0.
*/
void PeerManagerImpl::HandleFewUnconnectingHeaders(
CNode &pfrom, Peer &peer, const std::vector<CBlockHeader> &headers) {
const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
peer.m_num_unconnecting_headers_msgs++;
// Try to fill in the missing headers.
const CBlockIndex *best_header{
WITH_LOCK(cs_main, return m_chainman.m_best_header)};
if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) {
LogPrint(
BCLog::NET,
"received header %s: missing prev block %s, sending getheaders "
"(%d) to end (peer=%d, m_num_unconnecting_headers_msgs=%d)\n",
headers[0].GetHash().ToString(),
headers[0].hashPrevBlock.ToString(), best_header->nHeight,
pfrom.GetId(), peer.m_num_unconnecting_headers_msgs);
}
// Set hashLastUnknownBlock for this peer, so that if we
// eventually get the headers - even from a different peer -
// we can use this peer to download.
WITH_LOCK(cs_main,
UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()));
// The peer may just be broken, so periodically assign DoS points if this
// condition persists.
if (peer.m_num_unconnecting_headers_msgs %
MAX_NUM_UNCONNECTING_HEADERS_MSGS ==
0) {
Misbehaving(peer, 20,
strprintf("%d non-connecting headers",
peer.m_num_unconnecting_headers_msgs));
}
}
bool PeerManagerImpl::CheckHeadersAreContinuous(
const std::vector<CBlockHeader> &headers) const {
BlockHash hashLastBlock;
for (const CBlockHeader &header : headers) {
if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
return false;
}
hashLastBlock = header.GetHash();
}
return true;
}
bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(
Peer &peer, CNode &pfrom, std::vector<CBlockHeader> &headers) {
if (peer.m_headers_sync) {
auto result = peer.m_headers_sync->ProcessNextHeaders(
headers, headers.size() == MAX_HEADERS_RESULTS);
if (result.request_more) {
auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
// If we were instructed to ask for a locator, it should not be
// empty.
Assume(!locator.vHave.empty());
if (!locator.vHave.empty()) {
// It should be impossible for the getheaders request to fail,
// because we should have cleared the last getheaders timestamp
// when processing the headers that triggered this call. But
// it may be possible to bypass this via compactblock
// processing, so check the result before logging just to be
// safe.
bool sent_getheaders =
MaybeSendGetHeaders(pfrom, locator, peer);
if (sent_getheaders) {
LogPrint(BCLog::NET,
"more getheaders (from %s) to peer=%d\n",
locator.vHave.front().ToString(), pfrom.GetId());
} else {
LogPrint(BCLog::NET,
"error sending next getheaders (from %s) to "
"continue sync with peer=%d\n",
locator.vHave.front().ToString(), pfrom.GetId());
}
}
}
if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) {
peer.m_headers_sync.reset(nullptr);
// Delete this peer's entry in m_headers_presync_stats.
// If this is m_headers_presync_bestpeer, it will be replaced later
// by the next peer that triggers the else{} branch below.
LOCK(m_headers_presync_mutex);
m_headers_presync_stats.erase(pfrom.GetId());
} else {
// Build statistics for this peer's sync.
HeadersPresyncStats stats;
stats.first = peer.m_headers_sync->GetPresyncWork();
if (peer.m_headers_sync->GetState() ==
HeadersSyncState::State::PRESYNC) {
stats.second = {peer.m_headers_sync->GetPresyncHeight(),
peer.m_headers_sync->GetPresyncTime()};
}
// Update statistics in stats.
LOCK(m_headers_presync_mutex);
m_headers_presync_stats[pfrom.GetId()] = stats;
auto best_it =
m_headers_presync_stats.find(m_headers_presync_bestpeer);
bool best_updated = false;
if (best_it == m_headers_presync_stats.end()) {
// If the cached best peer is outdated, iterate over all
// remaining ones (including newly updated one) to find the best
// one.
NodeId peer_best{-1};
const HeadersPresyncStats *stat_best{nullptr};
for (const auto &[_peer, _stat] : m_headers_presync_stats) {
if (!stat_best || _stat > *stat_best) {
peer_best = _peer;
stat_best = &_stat;
}
}
m_headers_presync_bestpeer = peer_best;
best_updated = (peer_best == pfrom.GetId());
} else if (best_it->first == pfrom.GetId() ||
stats > best_it->second) {
// pfrom was and remains the best peer, or pfrom just became
// best.
m_headers_presync_bestpeer = pfrom.GetId();
best_updated = true;
}
if (best_updated && stats.second.has_value()) {
// If the best peer updated, and it is in its first phase,
// signal.
m_headers_presync_should_signal = true;
}
}
if (result.success) {
// We only overwrite the headers passed in if processing was
// successful.
headers.swap(result.pow_validated_headers);
}
return result.success;
}
// Either we didn't have a sync in progress, or something went wrong
// processing these headers, or we are returning headers to the caller to
// process.
return false;
}
bool PeerManagerImpl::TryLowWorkHeadersSync(
Peer &peer, CNode &pfrom, const CBlockIndex *chain_start_header,
std::vector<CBlockHeader> &headers) {
// Calculate the total work on this chain.
arith_uint256 total_work =
chain_start_header->nChainWork + CalculateHeadersWork(headers);
// Our dynamic anti-DoS threshold (minimum work required on a headers chain
// before we'll store it)
arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
// Avoid DoS via low-difficulty-headers by only processing if the headers
// are part of a chain with sufficient work.
if (total_work < minimum_chain_work) {
// Only try to sync with this peer if their headers message was full;
// otherwise they don't have more headers after this so no point in
// trying to sync their too-little-work chain.
if (headers.size() == MAX_HEADERS_RESULTS) {
// Note: we could advance to the last header in this set that is
// known to us, rather than starting at the first header (which we
// may already have); however this is unlikely to matter much since
// ProcessHeadersMessage() already handles the case where all
// headers in a received message are already known and are
// ancestors of m_best_header or chainActive.Tip(), by skipping
// this logic in that case. So even if the first header in this set
// of headers is known, some header in this set must be new, so
// advancing to the first unknown header would be a small effect.
LOCK(peer.m_headers_sync_mutex);
peer.m_headers_sync.reset(
new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(),
chain_start_header, minimum_chain_work));
// Now a HeadersSyncState object for tracking this synchronization
// is created, process the headers using it as normal.
return IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
}
LogPrint(BCLog::NET,
"Ignoring low-work chain (height=%u) from peer=%d\n",
chain_start_header->nHeight + headers.size(), pfrom.GetId());
// Since this is a low-work headers chain, no further processing is
// required.
headers = {};
return true;
}
return false;
}
bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex *header) {
return header != nullptr &&
((m_chainman.m_best_header != nullptr &&
header ==
m_chainman.m_best_header->GetAncestor(header->nHeight)) ||
m_chainman.ActiveChain().Contains(header));
}
bool PeerManagerImpl::MaybeSendGetHeaders(CNode &pfrom,
const CBlockLocator &locator,
Peer &peer) {
const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
const auto current_time = NodeClock::now();
// Only allow a new getheaders message to go out if we don't have a recent
// one already in-flight
if (current_time - peer.m_last_getheaders_timestamp >
HEADERS_RESPONSE_TIME) {
m_connman.PushMessage(
&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, locator, uint256()));
peer.m_last_getheaders_timestamp = current_time;
return true;
}
return false;
}
/**
* Given a new headers tip ending in pindexLast, potentially request blocks
* towards that tip. We require that the given tip have at least as much work as
* our tip, and for our current tip to be "close to synced" (see
* CanDirectFetch()).
*/
void PeerManagerImpl::HeadersDirectFetchBlocks(const Config &config,
CNode &pfrom,
const CBlockIndex *pindexLast) {
const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
LOCK(cs_main);
CNodeState *nodestate = State(pfrom.GetId());
if (CanDirectFetch() && pindexLast->IsValid(BlockValidity::TREE) &&
m_chainman.ActiveChain().Tip()->nChainWork <= pindexLast->nChainWork) {
std::vector<const CBlockIndex *> vToFetch;
const CBlockIndex *pindexWalk = pindexLast;
// Calculate all the blocks we'd need to switch to pindexLast, up to
// a limit.
while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) &&
vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
if (!pindexWalk->nStatus.hasData() &&
!IsBlockRequested(pindexWalk->GetBlockHash())) {
// We don't have this block, and it's not yet in flight.
vToFetch.push_back(pindexWalk);
}
pindexWalk = pindexWalk->pprev;
}
// If pindexWalk still isn't on our main chain, we're looking at a
// very large reorg at a time we think we're close to caught up to
// the main chain -- this shouldn't really happen. Bail out on the
// direct fetch and rely on parallel download instead.
if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
pindexLast->GetBlockHash().ToString(),
pindexLast->nHeight);
} else {
std::vector<CInv> vGetData;
// Download as much as possible, from earliest to latest.
for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
if (nodestate->nBlocksInFlight >=
MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
// Can't download any more from this peer
break;
}
vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
BlockRequested(config, pfrom.GetId(), *pindex);
LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
pindex->GetBlockHash().ToString(), pfrom.GetId());
}
if (vGetData.size() > 1) {
LogPrint(BCLog::NET,
"Downloading blocks toward %s (%d) via headers "
"direct fetch\n",
pindexLast->GetBlockHash().ToString(),
pindexLast->nHeight);
}
if (vGetData.size() > 0) {
if (!m_opts.ignore_incoming_txs &&
nodestate->m_provides_cmpctblocks && vGetData.size() == 1 &&
mapBlocksInFlight.size() == 1 &&
pindexLast->pprev->IsValid(BlockValidity::CHAIN)) {
// In any case, we want to download using a compact
// block, not a regular one.
vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
}
m_connman.PushMessage(
&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
}
}
}
}
/**
* Given receipt of headers from a peer ending in pindexLast, along with
* whether that header was new and whether the headers message was full,
* update the state we keep for the peer.
*/
void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(
CNode &pfrom, Peer &peer, const CBlockIndex *pindexLast,
bool received_new_header, bool may_have_more_headers) {
if (peer.m_num_unconnecting_headers_msgs > 0) {
LogPrint(
BCLog::NET,
"peer=%d: resetting m_num_unconnecting_headers_msgs (%d -> 0)\n",
pfrom.GetId(), peer.m_num_unconnecting_headers_msgs);
}
peer.m_num_unconnecting_headers_msgs = 0;
LOCK(cs_main);
CNodeState *nodestate = State(pfrom.GetId());
assert(pindexLast);
UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
// From here, pindexBestKnownBlock should be guaranteed to be non-null,
// because it is set in UpdateBlockAvailability. Some nullptr checks are
// still present, however, as belt-and-suspenders.
if (received_new_header &&
pindexLast->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
nodestate->m_last_block_announcement = GetTime();
}
// If we're in IBD, we want outbound peers that will serve us a useful
// chain. Disconnect peers that are on chains with insufficient work.
if (m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
!may_have_more_headers) {
// When nCount < MAX_HEADERS_RESULTS, we know we have no more
// headers to fetch from this peer.
if (nodestate->pindexBestKnownBlock &&
nodestate->pindexBestKnownBlock->nChainWork <
m_chainman.MinimumChainWork()) {
// This peer has too little work on their headers chain to help
// us sync -- disconnect if it is an outbound disconnection
// candidate.
// Note: We compare their tip to the minimum chain work (rather than
// m_chainman.ActiveChain().Tip()) because we won't start block
// download until we have a headers chain that has at least
// the minimum chain work, even if a peer has a chain past our tip,
// as an anti-DoS measure.
if (pfrom.IsOutboundOrBlockRelayConn()) {
LogPrintf("Disconnecting outbound peer %d -- headers "
"chain has insufficient work\n",
pfrom.GetId());
pfrom.fDisconnect = true;
}
}
}
// If this is an outbound full-relay peer, check to see if we should
// protect it from the bad/lagging chain logic.
// Note that outbound block-relay peers are excluded from this
// protection, and thus always subject to eviction under the bad/lagging
// chain logic.
// See ChainSyncTimeoutState.
if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() &&
nodestate->pindexBestKnownBlock != nullptr) {
if (m_outbound_peers_with_protect_from_disconnect <
MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT &&
nodestate->pindexBestKnownBlock->nChainWork >=
m_chainman.ActiveChain().Tip()->nChainWork &&
!nodestate->m_chain_sync.m_protect) {
LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n",
pfrom.GetId());
nodestate->m_chain_sync.m_protect = true;
++m_outbound_peers_with_protect_from_disconnect;
}
}
}
void PeerManagerImpl::ProcessHeadersMessage(const Config &config, CNode &pfrom,
Peer &peer,
std::vector<CBlockHeader> &&headers,
bool via_compact_block) {
size_t nCount = headers.size();
if (nCount == 0) {
// Nothing interesting. Stop asking this peers for more headers.
// If we were in the middle of headers sync, receiving an empty headers
// message suggests that the peer suddenly has nothing to give us
// (perhaps it reorged to our chain). Clear download state for this
// peer.
LOCK(peer.m_headers_sync_mutex);
if (peer.m_headers_sync) {
peer.m_headers_sync.reset(nullptr);
LOCK(m_headers_presync_mutex);
m_headers_presync_stats.erase(pfrom.GetId());
}
return;
}
// Before we do any processing, make sure these pass basic sanity checks.
// We'll rely on headers having valid proof-of-work further down, as an
// anti-DoS criteria (note: this check is required before passing any
// headers into HeadersSyncState).
if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) {
// Misbehaving() calls are handled within CheckHeadersPoW(), so we can
// just return. (Note that even if a header is announced via compact
// block, the header itself should be valid, so this type of error can
// always be punished.)
return;
}
const CBlockIndex *pindexLast = nullptr;
// We'll set already_validated_work to true if these headers are
// successfully processed as part of a low-work headers sync in progress
// (either in PRESYNC or REDOWNLOAD phase).
// If true, this will mean that any headers returned to us (ie during
// REDOWNLOAD) can be validated without further anti-DoS checks.
bool already_validated_work = false;
// If we're in the middle of headers sync, let it do its magic.
bool have_headers_sync = false;
{
LOCK(peer.m_headers_sync_mutex);
already_validated_work =
IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
// The headers we passed in may have been:
// - untouched, perhaps if no headers-sync was in progress, or some
// failure occurred
// - erased, such as if the headers were successfully processed and no
// additional headers processing needs to take place (such as if we
// are still in PRESYNC)
// - replaced with headers that are now ready for validation, such as
// during the REDOWNLOAD phase of a low-work headers sync.
// So just check whether we still have headers that we need to process,
// or not.
if (headers.empty()) {
return;
}
have_headers_sync = !!peer.m_headers_sync;
}
// Do these headers connect to something in our block index?
const CBlockIndex *chain_start_header{
WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(
headers[0].hashPrevBlock))};
bool headers_connect_blockindex{chain_start_header != nullptr};
if (!headers_connect_blockindex) {
if (nCount <= MAX_BLOCKS_TO_ANNOUNCE) {
// If this looks like it could be a BIP 130 block announcement, use
// special logic for handling headers that don't connect, as this
// could be benign.
HandleFewUnconnectingHeaders(pfrom, peer, headers);
} else {
Misbehaving(peer, 10, "invalid header received");
}
return;
}
// If the headers we received are already in memory and an ancestor of
// m_best_header or our tip, skip anti-DoS checks. These headers will not
// use any more memory (and we are not leaking information that could be
// used to fingerprint us).
const CBlockIndex *last_received_header{nullptr};
{
LOCK(cs_main);
last_received_header =
m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash());
if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
already_validated_work = true;
}
}
// If our peer has NetPermissionFlags::NoBan privileges, then bypass our
// anti-DoS logic (this saves bandwidth when we connect to a trusted peer
// on startup).
if (pfrom.HasPermission(NetPermissionFlags::NoBan)) {
already_validated_work = true;
}
// At this point, the headers connect to something in our block index.
// Do anti-DoS checks to determine if we should process or store for later
// processing.
if (!already_validated_work &&
TryLowWorkHeadersSync(peer, pfrom, chain_start_header, headers)) {
// If we successfully started a low-work headers sync, then there
// should be no headers to process any further.
Assume(headers.empty());
return;
}
// At this point, we have a set of headers with sufficient work on them
// which can be processed.
// If we don't have the last header, then this peer will have given us
// something new (if these headers are valid).
bool received_new_header{last_received_header == nullptr};
// Now process all the headers.
BlockValidationState state;
if (!m_chainman.ProcessNewBlockHeaders(headers, /*min_pow_checked=*/true,
state, &pindexLast)) {
if (state.IsInvalid()) {
MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block,
"invalid header received");
return;
}
}
Assume(pindexLast);
// Consider fetching more headers if we are not using our headers-sync
// mechanism.
if (nCount == MAX_HEADERS_RESULTS && !have_headers_sync) {
// Headers message had its maximum size; the peer may have more headers.
if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) {
LogPrint(
BCLog::NET,
"more getheaders (%d) to end to peer=%d (startheight:%d)\n",
pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
}
}
UpdatePeerStateForReceivedHeaders(pfrom, peer, pindexLast,
received_new_header,
nCount == MAX_HEADERS_RESULTS);
// Consider immediately downloading blocks.
HeadersDirectFetchBlocks(config, pfrom, pindexLast);
}
void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid,
const CTransactionRef &ptx,
const TxValidationState &state,
bool maybe_add_extra_compact_tx) {
AssertLockNotHeld(m_peer_mutex);
AssertLockHeld(g_msgproc_mutex);
AssertLockHeld(cs_main);
const TxId &txid = ptx->GetId();
LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n",
txid.ToString(), nodeid, state.ToString());
if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) {
return;
}
if (state.GetResult() == TxValidationResult::TX_PACKAGE_RECONSIDERABLE) {
// If the result is TX_PACKAGE_RECONSIDERABLE, add it to
// m_recent_rejects_package_reconsiderable because we should not
// download or submit this transaction by itself again, but may submit
// it as part of a package later.
m_recent_rejects_package_reconsiderable.insert(txid);
} else {
m_recent_rejects.insert(txid);
}
m_txrequest.ForgetInvId(txid);
if (maybe_add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 100000) {
AddToCompactExtraTransactions(ptx);
}
MaybePunishNodeForTx(nodeid, state);
// If the tx failed in ProcessOrphanTx, it should be removed from the
// orphanage unless the tx was still missing inputs. If the tx was not in
// the orphanage, EraseTx does nothing and returns 0.
if (m_mempool.withOrphanage([&txid](TxOrphanage &orphanage) {
return orphanage.EraseTx(txid);
}) > 0) {
LogPrint(BCLog::TXPACKAGES, " removed orphan tx %s\n",
txid.ToString());
}
}
void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef &tx) {
AssertLockNotHeld(m_peer_mutex);
AssertLockHeld(g_msgproc_mutex);
AssertLockHeld(cs_main);
// As this version of the transaction was acceptable, we can forget about
// any requests for it. No-op if the tx is not in txrequest.
m_txrequest.ForgetInvId(tx->GetId());
m_mempool.withOrphanage([&tx](TxOrphanage &orphanage) {
orphanage.AddChildrenToWorkSet(*tx);
// If it came from the orphanage, remove it. No-op if the tx is not in
// txorphanage.
orphanage.EraseTx(tx->GetId());
});
LogPrint(
BCLog::MEMPOOL,
"AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
nodeid, tx->GetId().ToString(), m_mempool.size(),
m_mempool.DynamicMemoryUsage() / 1000);
RelayTransaction(tx->GetId());
}
void PeerManagerImpl::ProcessPackageResult(
const PackageToValidate &package_to_validate,
const PackageMempoolAcceptResult &package_result) {
AssertLockNotHeld(m_peer_mutex);
AssertLockHeld(g_msgproc_mutex);
AssertLockHeld(cs_main);
const auto &package = package_to_validate.m_txns;
const auto &senders = package_to_validate.m_senders;
if (package_result.m_state.IsInvalid()) {
m_recent_rejects_package_reconsiderable.insert(GetPackageHash(package));
}
// We currently only expect to process 1-parent-1-child packages. Remove if
// this changes.
if (!Assume(package.size() == 2)) {
return;
}
// Iterate backwards to erase in-package descendants from the orphanage
// before they become relevant in AddChildrenToWorkSet.
auto package_iter = package.rbegin();
auto senders_iter = senders.rbegin();
while (package_iter != package.rend()) {
const auto &tx = *package_iter;
const NodeId nodeid = *senders_iter;
const auto it_result{package_result.m_tx_results.find(tx->GetId())};
// It is not guaranteed that a result exists for every transaction.
if (it_result != package_result.m_tx_results.end()) {
const auto &tx_result = it_result->second;
switch (tx_result.m_result_type) {
case MempoolAcceptResult::ResultType::VALID: {
ProcessValidTx(nodeid, tx);
break;
}
case MempoolAcceptResult::ResultType::INVALID: {
// Don't add to vExtraTxnForCompact, as these transactions
// should have already been added there when added to the
// orphanage or rejected for TX_PACKAGE_RECONSIDERABLE.
// This should be updated if package submission is ever used
// for transactions that haven't already been validated
// before.
ProcessInvalidTx(nodeid, tx, tx_result.m_state,
/*maybe_add_extra_compact_tx=*/false);
break;
}
case MempoolAcceptResult::ResultType::MEMPOOL_ENTRY: {
// AlreadyHaveTx() should be catching transactions that are
// already in mempool.
Assume(false);
break;
}
}
}
package_iter++;
senders_iter++;
}
}
std::optional<PeerManagerImpl::PackageToValidate>
PeerManagerImpl::Find1P1CPackage(const CTransactionRef &ptx, NodeId nodeid) {
AssertLockNotHeld(m_peer_mutex);
AssertLockHeld(g_msgproc_mutex);
AssertLockHeld(cs_main);
const auto &parent_txid{ptx->GetId()};
Assume(m_recent_rejects_package_reconsiderable.contains(parent_txid));
// Prefer children from this peer. This helps prevent censorship attempts in
// which an attacker sends lots of fake children for the parent, and we
// (unluckily) keep selecting the fake children instead of the real one
// provided by the honest peer.
const auto cpfp_candidates_same_peer{
m_mempool.withOrphanage([&ptx, nodeid](const TxOrphanage &orphanage) {
return orphanage.GetChildrenFromSamePeer(ptx, nodeid);
})};
// These children should be sorted from newest to oldest.
for (const auto &child : cpfp_candidates_same_peer) {
Package maybe_cpfp_package{ptx, child};
if (!m_recent_rejects_package_reconsiderable.contains(
GetPackageHash(maybe_cpfp_package))) {
return PeerManagerImpl::PackageToValidate{ptx, child, nodeid,
nodeid};
}
}
// If no suitable candidate from the same peer is found, also try children
// that were provided by a different peer. This is useful because sometimes
// multiple peers announce both transactions to us, and we happen to
// download them from different peers (we wouldn't have known that these 2
// transactions are related). We still want to find 1p1c packages then.
//
// If we start tracking all announcers of orphans, we can restrict this
// logic to parent + child pairs in which both were provided by the same
// peer, i.e. delete this step.
const auto cpfp_candidates_different_peer{
m_mempool.withOrphanage([&ptx, nodeid](const TxOrphanage &orphanage) {
return orphanage.GetChildrenFromDifferentPeer(ptx, nodeid);
})};
// Find the first 1p1c that hasn't already been rejected. We randomize the
// order to not create a bias that attackers can use to delay package
// acceptance.
//
// Create a random permutation of the indices.
std::vector<size_t> tx_indices(cpfp_candidates_different_peer.size());
std::iota(tx_indices.begin(), tx_indices.end(), 0);
Shuffle(tx_indices.begin(), tx_indices.end(), m_rng);
for (const auto index : tx_indices) {
// If we already tried a package and failed for any reason, the combined
// hash was cached in m_recent_rejects_package_reconsiderable.
const auto [child_tx, child_sender] =
cpfp_candidates_different_peer.at(index);
Package maybe_cpfp_package{ptx, child_tx};
if (!m_recent_rejects_package_reconsiderable.contains(
GetPackageHash(maybe_cpfp_package))) {
return PeerManagerImpl::PackageToValidate{ptx, child_tx, nodeid,
child_sender};
}
}
return std::nullopt;
}
bool PeerManagerImpl::ProcessOrphanTx(const Config &config, Peer &peer) {
AssertLockHeld(g_msgproc_mutex);
LOCK(cs_main);
while (CTransactionRef porphanTx =
m_mempool.withOrphanage([&peer](TxOrphanage &orphanage) {
return orphanage.GetTxToReconsider(peer.m_id);
})) {
const MempoolAcceptResult result =
m_chainman.ProcessTransaction(porphanTx);
const TxValidationState &state = result.m_state;
const TxId &orphanTxId = porphanTx->GetId();
if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
LogPrint(BCLog::TXPACKAGES, " accepted orphan tx %s\n",
orphanTxId.ToString());
ProcessValidTx(peer.m_id, porphanTx);
return true;
}
if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
LogPrint(BCLog::TXPACKAGES,
" invalid orphan tx %s from peer=%d. %s\n",
orphanTxId.ToString(), peer.m_id, state.ToString());
if (Assume(state.IsInvalid() &&
state.GetResult() != TxValidationResult::TX_NO_MEMPOOL &&
state.GetResult() !=
TxValidationResult::TX_RESULT_UNSET)) {
ProcessInvalidTx(peer.m_id, porphanTx, state,
/*maybe_add_extra_compact_tx=*/false);
}
return true;
}
}
return false;
}
bool PeerManagerImpl::PrepareBlockFilterRequest(
CNode &node, Peer &peer, BlockFilterType filter_type, uint32_t start_height,
const BlockHash &stop_hash, uint32_t max_height_diff,
const CBlockIndex *&stop_index, BlockFilterIndex *&filter_index) {
const bool supported_filter_type =
(filter_type == BlockFilterType::BASIC &&
(peer.m_our_services & NODE_COMPACT_FILTERS));
if (!supported_filter_type) {
LogPrint(BCLog::NET,
"peer %d requested unsupported block filter type: %d\n",
node.GetId(), static_cast<uint8_t>(filter_type));
node.fDisconnect = true;
return false;
}
{
LOCK(cs_main);
stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash);
// Check that the stop block exists and the peer would be allowed to
// fetch it.
if (!stop_index || !BlockRequestAllowed(stop_index)) {
LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
node.GetId(), stop_hash.ToString());
node.fDisconnect = true;
return false;
}
}
uint32_t stop_height = stop_index->nHeight;
if (start_height > stop_height) {
LogPrint(
BCLog::NET,
"peer %d sent invalid getcfilters/getcfheaders with " /* Continued
*/
"start height %d and stop height %d\n",
node.GetId(), start_height, stop_height);
node.fDisconnect = true;
return false;
}
if (stop_height - start_height >= max_height_diff) {
LogPrint(BCLog::NET,
"peer %d requested too many cfilters/cfheaders: %d / %d\n",
node.GetId(), stop_height - start_height + 1, max_height_diff);
node.fDisconnect = true;
return false;
}
filter_index = GetBlockFilterIndex(filter_type);
if (!filter_index) {
LogPrint(BCLog::NET, "Filter index for supported type %s not found\n",
BlockFilterTypeName(filter_type));
return false;
}
return true;
}
void PeerManagerImpl::ProcessGetCFilters(CNode &node, Peer &peer,
CDataStream &vRecv) {
uint8_t filter_type_ser;
uint32_t start_height;
BlockHash stop_hash;
vRecv >> filter_type_ser >> start_height >> stop_hash;
const BlockFilterType filter_type =
static_cast<BlockFilterType>(filter_type_ser);
const CBlockIndex *stop_index;
BlockFilterIndex *filter_index;
if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height,
stop_hash, MAX_GETCFILTERS_SIZE, stop_index,
filter_index)) {
return;
}
std::vector<BlockFilter> filters;
if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
LogPrint(BCLog::NET,
"Failed to find block filter in index: filter_type=%s, "
"start_height=%d, stop_hash=%s\n",
BlockFilterTypeName(filter_type), start_height,
stop_hash.ToString());
return;
}
for (const auto &filter : filters) {
CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion())
.Make(NetMsgType::CFILTER, filter);
m_connman.PushMessage(&node, std::move(msg));
}
}
void PeerManagerImpl::ProcessGetCFHeaders(CNode &node, Peer &peer,
CDataStream &vRecv) {
uint8_t filter_type_ser;
uint32_t start_height;
BlockHash stop_hash;
vRecv >> filter_type_ser >> start_height >> stop_hash;
const BlockFilterType filter_type =
static_cast<BlockFilterType>(filter_type_ser);
const CBlockIndex *stop_index;
BlockFilterIndex *filter_index;
if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height,
stop_hash, MAX_GETCFHEADERS_SIZE, stop_index,
filter_index)) {
return;
}
uint256 prev_header;
if (start_height > 0) {
const CBlockIndex *const prev_block =
stop_index->GetAncestor(static_cast<int>(start_height - 1));
if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
LogPrint(BCLog::NET,
"Failed to find block filter header in index: "
"filter_type=%s, block_hash=%s\n",
BlockFilterTypeName(filter_type),
prev_block->GetBlockHash().ToString());
return;
}
}
std::vector<uint256> filter_hashes;
if (!filter_index->LookupFilterHashRange(start_height, stop_index,
filter_hashes)) {
LogPrint(BCLog::NET,
"Failed to find block filter hashes in index: filter_type=%s, "
"start_height=%d, stop_hash=%s\n",
BlockFilterTypeName(filter_type), start_height,
stop_hash.ToString());
return;
}
CSerializedNetMsg msg =
CNetMsgMaker(node.GetCommonVersion())
.Make(NetMsgType::CFHEADERS, filter_type_ser,
stop_index->GetBlockHash(), prev_header, filter_hashes);
m_connman.PushMessage(&node, std::move(msg));
}
void PeerManagerImpl::ProcessGetCFCheckPt(CNode &node, Peer &peer,
CDataStream &vRecv) {
uint8_t filter_type_ser;
BlockHash stop_hash;
vRecv >> filter_type_ser >> stop_hash;
const BlockFilterType filter_type =
static_cast<BlockFilterType>(filter_type_ser);
const CBlockIndex *stop_index;
BlockFilterIndex *filter_index;
if (!PrepareBlockFilterRequest(
node, peer, filter_type, /*start_height=*/0, stop_hash,
/*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
stop_index, filter_index)) {
return;
}
std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
// Populate headers.
const CBlockIndex *block_index = stop_index;
for (int i = headers.size() - 1; i >= 0; i--) {
int height = (i + 1) * CFCHECKPT_INTERVAL;
block_index = block_index->GetAncestor(height);
if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
LogPrint(BCLog::NET,
"Failed to find block filter header in index: "
"filter_type=%s, block_hash=%s\n",
BlockFilterTypeName(filter_type),
block_index->GetBlockHash().ToString());
return;
}
}
CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion())
.Make(NetMsgType::CFCHECKPT, filter_type_ser,
stop_index->GetBlockHash(), headers);
m_connman.PushMessage(&node, std::move(msg));
}
bool IsAvalancheMessageType(const std::string &msg_type) {
return msg_type == NetMsgType::AVAHELLO ||
msg_type == NetMsgType::AVAPOLL ||
msg_type == NetMsgType::AVARESPONSE ||
msg_type == NetMsgType::AVAPROOF ||
msg_type == NetMsgType::GETAVAADDR ||
msg_type == NetMsgType::GETAVAPROOFS ||
msg_type == NetMsgType::AVAPROOFS ||
msg_type == NetMsgType::AVAPROOFSREQ;
}
uint32_t
PeerManagerImpl::GetAvalancheVoteForBlock(const BlockHash &hash) const {
AssertLockHeld(cs_main);
const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
// Unknown block.
if (!pindex) {
return -1;
}
// Invalid block
if (pindex->nStatus.isInvalid()) {
return 1;
}
// Parked block
if (pindex->nStatus.isOnParkedChain()) {
return 2;
}
const CBlockIndex *pindexTip = m_chainman.ActiveChain().Tip();
const CBlockIndex *pindexFork = LastCommonAncestor(pindex, pindexTip);
// Active block.
if (pindex == pindexFork) {
return 0;
}
// Fork block.
if (pindexFork != pindexTip) {
return 3;
}
// Missing block data.
if (!pindex->nStatus.hasData()) {
return -2;
}
// This block is built on top of the tip, we have the data, it
// is pending connection or rejection.
return -3;
};
uint32_t PeerManagerImpl::GetAvalancheVoteForTx(const TxId &id) const {
// Accepted in mempool, or in a recent block
if (m_mempool.exists(id) ||
WITH_LOCK(m_recent_confirmed_transactions_mutex,
return m_recent_confirmed_transactions.contains(id))) {
return 0;
}
// Conflicting tx
if (m_mempool.withConflicting([&id](const TxConflicting &conflicting) {
return conflicting.HaveTx(id);
})) {
return 2;
}
// Invalid tx
if (m_recent_rejects.contains(id)) {
return 1;
}
// Orphan tx
if (m_mempool.withOrphanage([&id](const TxOrphanage &orphanage) {
return orphanage.HaveTx(id);
})) {
return -2;
}
// Unknown tx
return -1;
};
/**
* Decide a response for an Avalanche poll about the given proof.
*
* @param[in] id The id of the proof being polled for
* @return Our current vote for the proof
*/
static uint32_t getAvalancheVoteForProof(const avalanche::Processor &avalanche,
const avalanche::ProofId &id) {
return avalanche.withPeerManager([&id](avalanche::PeerManager &pm) {
// Rejected proof
if (pm.isInvalid(id)) {
return 1;
}
// The proof is actively bound to a peer
if (pm.isBoundToPeer(id)) {
return 0;
}
// Unknown proof
if (!pm.exists(id)) {
return -1;
}
// Immature proof
if (pm.isImmature(id)) {
return 2;
}
// Not immature, but in conflict with an actively bound proof
if (pm.isInConflictingPool(id)) {
return 3;
}
// The proof is known, not rejected, not immature, not a conflict, but
// for some reason unbound. This should not happen if the above pools
// are managed correctly, but added for robustness.
return -2;
});
};
void PeerManagerImpl::ProcessBlock(const Config &config, CNode &node,
const std::shared_ptr<const CBlock> &block,
bool force_processing,
bool min_pow_checked) {
bool new_block{false};
m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked,
&new_block, m_avalanche);
if (new_block) {
node.m_last_block_time = GetTime<std::chrono::seconds>();
} else {
LOCK(cs_main);
mapBlockSource.erase(block->GetHash());
}
}
void PeerManagerImpl::ProcessMessage(
const Config &config, CNode &pfrom, const std::string &msg_type,
CDataStream &vRecv, const std::chrono::microseconds time_received,
const std::atomic<bool> &interruptMsgProc) {
AssertLockHeld(g_msgproc_mutex);
LogPrint(BCLog::NETDEBUG, "received: %s (%u bytes) peer=%d\n",
SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
PeerRef peer = GetPeerRef(pfrom.GetId());
if (peer == nullptr) {
return;
}
if (IsAvalancheMessageType(msg_type)) {
if (!m_avalanche) {
LogPrint(BCLog::AVALANCHE,
"Avalanche is not initialized, ignoring %s message\n",
msg_type);
return;
}
if (!m_avalanche) {
// If avalanche is not enabled, ignore avalanche messages
return;
}
}
if (msg_type == NetMsgType::VERSION) {
// Each connection can only send one version message
if (pfrom.nVersion != 0) {
Misbehaving(*peer, 1, "redundant version message");
return;
}
int64_t nTime;
CService addrMe;
uint64_t nNonce = 1;
ServiceFlags nServices;
int nVersion;
std::string cleanSubVer;
int starting_height = -1;
bool fRelay = true;
uint64_t nExtraEntropy = 1;
vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
if (nTime < 0) {
nTime = 0;
}
// Ignore the addrMe service bits sent by the peer
vRecv.ignore(8);
vRecv >> addrMe;
if (!pfrom.IsInboundConn()) {
m_addrman.SetServices(pfrom.addr, nServices);
}
if (pfrom.ExpectServicesFromConn() &&
!HasAllDesirableServiceFlags(nServices)) {
LogPrint(BCLog::NET,
"peer=%d does not offer the expected services "
"(%08x offered, %08x expected); disconnecting\n",
pfrom.GetId(), nServices,
GetDesirableServiceFlags(nServices));
pfrom.fDisconnect = true;
return;
}
if (pfrom.IsAvalancheOutboundConnection() &&
!(nServices & NODE_AVALANCHE)) {
LogPrint(
BCLog::AVALANCHE,
"peer=%d does not offer the avalanche service; disconnecting\n",
pfrom.GetId());
pfrom.fDisconnect = true;
return;
}
if (nVersion < MIN_PEER_PROTO_VERSION) {
// disconnect from peers older than this proto version
LogPrint(BCLog::NET,
"peer=%d using obsolete version %i; disconnecting\n",
pfrom.GetId(), nVersion);
pfrom.fDisconnect = true;
return;
}
if (!vRecv.empty()) {
// The version message includes information about the sending node
// which we don't use:
// - 8 bytes (service bits)
// - 16 bytes (ipv6 address)
// - 2 bytes (port)
vRecv.ignore(26);
vRecv >> nNonce;
}
if (!vRecv.empty()) {
std::string strSubVer;
vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
cleanSubVer = SanitizeString(strSubVer);
}
if (!vRecv.empty()) {
vRecv >> starting_height;
}
if (!vRecv.empty()) {
vRecv >> fRelay;
}
if (!vRecv.empty()) {
vRecv >> nExtraEntropy;
}
// Disconnect if we connected to ourself
if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce)) {
LogPrintf("connected to self at %s, disconnecting\n",
pfrom.addr.ToString());
pfrom.fDisconnect = true;
return;
}
if (pfrom.IsInboundConn() && addrMe.IsRoutable()) {
SeenLocal(addrMe);
}
// Inbound peers send us their version message when they connect.
// We send our version message in response.
if (pfrom.IsInboundConn()) {
PushNodeVersion(config, pfrom, *peer);
}
// Change version
const int greatest_common_version =
std::min(nVersion, PROTOCOL_VERSION);
pfrom.SetCommonVersion(greatest_common_version);
pfrom.nVersion = nVersion;
const CNetMsgMaker msg_maker(greatest_common_version);
m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK));
// Signal ADDRv2 support (BIP155).
m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2));
pfrom.m_has_all_wanted_services =
HasAllDesirableServiceFlags(nServices);
peer->m_their_services = nServices;
pfrom.SetAddrLocal(addrMe);
{
LOCK(pfrom.m_subver_mutex);
pfrom.cleanSubVer = cleanSubVer;
}
peer->m_starting_height = starting_height;
// We only initialize the m_tx_relay data structure if:
// - this isn't an outbound block-relay-only connection; and
// - fRelay=true or we're offering NODE_BLOOM to this peer
// (NODE_BLOOM means that the peer may turn on tx relay later)
if (!pfrom.IsBlockOnlyConn() &&
(fRelay || (peer->m_our_services & NODE_BLOOM))) {
auto *const tx_relay = peer->SetTxRelay();
{
LOCK(tx_relay->m_bloom_filter_mutex);
// set to true after we get the first filter* message
tx_relay->m_relay_txs = fRelay;
}
if (fRelay) {
pfrom.m_relays_txs = true;
}
}
pfrom.nRemoteHostNonce = nNonce;
pfrom.nRemoteExtraEntropy = nExtraEntropy;
// Potentially mark this peer as a preferred download peer.
{
LOCK(cs_main);
CNodeState *state = State(pfrom.GetId());
state->fPreferredDownload =
(!pfrom.IsInboundConn() ||
pfrom.HasPermission(NetPermissionFlags::NoBan)) &&
!pfrom.IsAddrFetchConn() && CanServeBlocks(*peer);
m_num_preferred_download_peers += state->fPreferredDownload;
}
// Self advertisement & GETADDR logic
if (!pfrom.IsInboundConn() && SetupAddressRelay(pfrom, *peer)) {
// For outbound peers, we try to relay our address (so that other
// nodes can try to find us more quickly, as we have no guarantee
// that an outbound peer is even aware of how to reach us) and do a
// one-time address fetch (to help populate/update our addrman). If
// we're starting up for the first time, our addrman may be pretty
// empty and no one will know who we are, so these mechanisms are
// important to help us connect to the network.
//
// We skip this for block-relay-only peers. We want to avoid
// potentially leaking addr information and we do not want to
// indicate to the peer that we will participate in addr relay.
if (fListen &&
!m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
CAddress addr{GetLocalAddress(pfrom.addr), peer->m_our_services,
AdjustedTime()};
if (addr.IsRoutable()) {
LogPrint(BCLog::NET,
"ProcessMessages: advertising address %s\n",
addr.ToString());
PushAddress(*peer, addr);
} else if (IsPeerAddrLocalGood(&pfrom)) {
// Override just the address with whatever the peer sees us
// as. Leave the port in addr as it was returned by
// GetLocalAddress() above, as this is an outbound
// connection and the peer cannot observe our listening
// port.
addr.SetIP(addrMe);
LogPrint(BCLog::NET,
"ProcessMessages: advertising address %s\n",
addr.ToString());
PushAddress(*peer, addr);
}
}
// Get recent addresses
m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version)
.Make(NetMsgType::GETADDR));
peer->m_getaddr_sent = true;
// When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND
// addresses in response (bypassing the
// MAX_ADDR_PROCESSING_TOKEN_BUCKET limit).
WITH_LOCK(peer->m_addr_token_bucket_mutex,
peer->m_addr_token_bucket += m_opts.max_addr_to_send);
}
if (!pfrom.IsInboundConn()) {
// For non-inbound connections, we update the addrman to record
// connection success so that addrman will have an up-to-date
// notion of which peers are online and available.
//
// While we strive to not leak information about block-relay-only
// connections via the addrman, not moving an address to the tried
// table is also potentially detrimental because new-table entries
// are subject to eviction in the event of addrman collisions. We
// mitigate the information-leak by never calling
// AddrMan::Connected() on block-relay-only peers; see
// FinalizeNode().
//
// This moves an address from New to Tried table in Addrman,
// resolves tried-table collisions, etc.
m_addrman.Good(pfrom.addr);
}
std::string remoteAddr;
if (fLogIPs) {
remoteAddr = ", peeraddr=" + pfrom.addr.ToString();
}
LogPrint(BCLog::NET,
"receive version message: [%s] %s: version %d, blocks=%d, "
"us=%s, txrelay=%d, peer=%d%s\n",
pfrom.addr.ToString(), cleanSubVer, pfrom.nVersion,
peer->m_starting_height, addrMe.ToString(), fRelay,
pfrom.GetId(), remoteAddr);
int64_t currentTime = GetTime();
int64_t nTimeOffset = nTime - currentTime;
pfrom.nTimeOffset = nTimeOffset;
if (nTime < int64_t(m_chainparams.GenesisBlock().nTime)) {
// Ignore time offsets that are improbable (before the Genesis
// block) and may underflow our adjusted time.
Misbehaving(*peer, 20,
"Ignoring invalid timestamp in version message");
} else if (!pfrom.IsInboundConn()) {
// Don't use timedata samples from inbound peers to make it
// harder for others to tamper with our adjusted time.
AddTimeData(pfrom.addr, nTimeOffset);
}
// Feeler connections exist only to verify if address is online.
if (pfrom.IsFeelerConn()) {
LogPrint(BCLog::NET,
"feeler connection completed peer=%d; disconnecting\n",
pfrom.GetId());
pfrom.fDisconnect = true;
}
return;
}
if (pfrom.nVersion == 0) {
// Must have a version message before anything else
Misbehaving(*peer, 10, "non-version message before version handshake");
return;
}
// At this point, the outgoing message serialization version can't change.
const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
if (msg_type == NetMsgType::VERACK) {
if (pfrom.fSuccessfullyConnected) {
LogPrint(BCLog::NET,
"ignoring redundant verack message from peer=%d\n",
pfrom.GetId());
return;
}
if (!pfrom.IsInboundConn()) {
LogPrintf(
"New outbound peer connected: version: %d, blocks=%d, "
"peer=%d%s (%s)\n",
pfrom.nVersion.load(), peer->m_starting_height, pfrom.GetId(),
(fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString())
: ""),
pfrom.ConnectionTypeAsString());
}
if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) {
// Tell our peer we are willing to provide version 1
// cmpctblocks. However, we do not request new block announcements
// using cmpctblock messages. We send this to non-NODE NETWORK peers
// as well, because they may wish to request compact blocks from us.
m_connman.PushMessage(
&pfrom,
msgMaker.Make(NetMsgType::SENDCMPCT, /*high_bandwidth=*/false,
/*version=*/CMPCTBLOCKS_VERSION));
}
if (m_avalanche) {
if (m_avalanche->sendHello(&pfrom)) {
auto localProof = m_avalanche->getLocalProof();
if (localProof) {
AddKnownProof(*peer, localProof->getId());
// Add our proof id to the list or the recently announced
// proof INVs to this peer. This is used for filtering which
// INV can be requested for download.
peer->m_proof_relay->m_recently_announced_proofs.insert(
localProof->getId());
}
}
}
pfrom.fSuccessfullyConnected = true;
return;
}
if (!pfrom.fSuccessfullyConnected) {
// Must have a verack message before anything else
Misbehaving(*peer, 10, "non-verack message before version handshake");
return;
}
if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
int stream_version = vRecv.GetVersion();
if (msg_type == NetMsgType::ADDRV2) {
// Add ADDRV2_FORMAT to the version so that the CNetAddr and
// CAddress unserialize methods know that an address in v2 format is
// coming.
stream_version |= ADDRV2_FORMAT;
}
OverrideStream<CDataStream> s(&vRecv, vRecv.GetType(), stream_version);
std::vector<CAddress> vAddr;
s >> vAddr;
if (!SetupAddressRelay(pfrom, *peer)) {
LogPrint(BCLog::NET, "ignoring %s message from %s peer=%d\n",
msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
return;
}
if (vAddr.size() > m_opts.max_addr_to_send) {
Misbehaving(
*peer, 20,
strprintf("%s message size = %u", msg_type, vAddr.size()));
return;
}
// Store the new addresses
std::vector<CAddress> vAddrOk;
const auto current_a_time{AdjustedTime()};
// Update/increment addr rate limiting bucket.
const auto current_time = GetTime<std::chrono::microseconds>();
{
LOCK(peer->m_addr_token_bucket_mutex);
if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
// Don't increment bucket if it's already full
const auto time_diff =
std::max(current_time - peer->m_addr_token_timestamp, 0us);
const double increment =
CountSecondsDouble(time_diff) * MAX_ADDR_RATE_PER_SECOND;
peer->m_addr_token_bucket =
std::min<double>(peer->m_addr_token_bucket + increment,
MAX_ADDR_PROCESSING_TOKEN_BUCKET);
}
}
peer->m_addr_token_timestamp = current_time;
const bool rate_limited =
!pfrom.HasPermission(NetPermissionFlags::Addr);
uint64_t num_proc = 0;
uint64_t num_rate_limit = 0;
Shuffle(vAddr.begin(), vAddr.end(), m_rng);
for (CAddress &addr : vAddr) {
if (interruptMsgProc) {
return;
}
{
LOCK(peer->m_addr_token_bucket_mutex);
// Apply rate limiting.
if (peer->m_addr_token_bucket < 1.0) {
if (rate_limited) {
++num_rate_limit;
continue;
}
} else {
peer->m_addr_token_bucket -= 1.0;
}
}
// We only bother storing full nodes, though this may include things
// which we would not make an outbound connection to, in part
// because we may make feeler connections to them.
if (!MayHaveUsefulAddressDB(addr.nServices) &&
!HasAllDesirableServiceFlags(addr.nServices)) {
continue;
}
if (addr.nTime <= NodeSeconds{100000000s} ||
addr.nTime > current_a_time + 10min) {
addr.nTime = current_a_time - 5 * 24h;
}
AddAddressKnown(*peer, addr);
if (m_banman &&
(m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
// Do not process banned/discouraged addresses beyond
// remembering we received them
continue;
}
++num_proc;
bool fReachable = IsReachable(addr);
if (addr.nTime > current_a_time - 10min && !peer->m_getaddr_sent &&
vAddr.size() <= 10 && addr.IsRoutable()) {
// Relay to a limited number of other nodes
RelayAddress(pfrom.GetId(), addr, fReachable);
}
// Do not store addresses outside our network
if (fReachable) {
vAddrOk.push_back(addr);
}
}
peer->m_addr_processed += num_proc;
peer->m_addr_rate_limited += num_rate_limit;
LogPrint(BCLog::NET,
"Received addr: %u addresses (%u processed, %u rate-limited) "
"from peer=%d\n",
vAddr.size(), num_proc, num_rate_limit, pfrom.GetId());
m_addrman.Add(vAddrOk, pfrom.addr, 2h);
if (vAddr.size() < 1000) {
peer->m_getaddr_sent = false;
}
// AddrFetch: Require multiple addresses to avoid disconnecting on
// self-announcements
if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
LogPrint(BCLog::NET,
"addrfetch connection completed peer=%d; disconnecting\n",
pfrom.GetId());
pfrom.fDisconnect = true;
}
return;
}
if (msg_type == NetMsgType::SENDADDRV2) {
peer->m_wants_addrv2 = true;
return;
}
if (msg_type == NetMsgType::SENDHEADERS) {
peer->m_prefers_headers = true;
return;
}
if (msg_type == NetMsgType::SENDCMPCT) {
bool sendcmpct_hb{false};
uint64_t sendcmpct_version{0};
vRecv >> sendcmpct_hb >> sendcmpct_version;
if (sendcmpct_version != CMPCTBLOCKS_VERSION) {
return;
}
LOCK(cs_main);
CNodeState *nodestate = State(pfrom.GetId());
nodestate->m_provides_cmpctblocks = true;
nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
// save whether peer selects us as BIP152 high-bandwidth peer
// (receiving sendcmpct(1) signals high-bandwidth,
// sendcmpct(0) low-bandwidth)
pfrom.m_bip152_highbandwidth_from = sendcmpct_hb;
return;
}
if (msg_type == NetMsgType::INV) {
std::vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ) {
Misbehaving(*peer, 20,
strprintf("inv message size = %u", vInv.size()));
return;
}
// Reject tx INVs when the -blocksonly setting is enabled, or this is a
// block-relay-only peer
bool reject_tx_invs{m_opts.ignore_incoming_txs ||
pfrom.IsBlockOnlyConn()};
// Allow peers with relay permission to send data other than blocks
// in blocks only mode
if (pfrom.HasPermission(NetPermissionFlags::Relay)) {
reject_tx_invs = false;
}
const auto current_time{GetTime<std::chrono::microseconds>()};
std::optional<BlockHash> best_block;
auto logInv = [&](const CInv &inv, bool fAlreadyHave) {
LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(),
fAlreadyHave ? "have" : "new", pfrom.GetId());
};
for (CInv &inv : vInv) {
if (interruptMsgProc) {
return;
}
if (inv.IsMsgStakeContender()) {
// Ignore invs with stake contenders. This type is only used for
// polling.
continue;
}
if (inv.IsMsgBlk()) {
LOCK(cs_main);
const bool fAlreadyHave = AlreadyHaveBlock(BlockHash(inv.hash));
logInv(inv, fAlreadyHave);
BlockHash hash{inv.hash};
UpdateBlockAvailability(pfrom.GetId(), hash);
if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() &&
!IsBlockRequested(hash)) {
// Headers-first is the primary method of announcement on
// the network. If a node fell back to sending blocks by
// inv, it may be for a re-org, or because we haven't
// completed initial headers sync. The final block hash
// provided should be the highest, so send a getheaders and
// then fetch the blocks we need to catch up.
best_block = std::move(hash);
}
continue;
}
if (inv.IsMsgProof()) {
const avalanche::ProofId proofid(inv.hash);
const bool fAlreadyHave = AlreadyHaveProof(proofid);
logInv(inv, fAlreadyHave);
AddKnownProof(*peer, proofid);
if (!fAlreadyHave && m_avalanche &&
!m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
const bool preferred = isPreferredDownloadPeer(pfrom);
LOCK(cs_proofrequest);
AddProofAnnouncement(pfrom, proofid, current_time,
preferred);
}
continue;
}
if (inv.IsMsgTx()) {
LOCK(cs_main);
const TxId txid(inv.hash);
const bool fAlreadyHave =
AlreadyHaveTx(txid, /*include_reconsiderable=*/true);
logInv(inv, fAlreadyHave);
AddKnownTx(*peer, txid);
if (reject_tx_invs) {
LogPrint(BCLog::NET,
"transaction (%s) inv sent in violation of "
"protocol, disconnecting peer=%d\n",
txid.ToString(), pfrom.GetId());
pfrom.fDisconnect = true;
return;
} else if (!fAlreadyHave && !m_chainman.ActiveChainstate()
.IsInitialBlockDownload()) {
AddTxAnnouncement(pfrom, txid, current_time);
}
continue;
}
LogPrint(BCLog::NET,
"Unknown inv type \"%s\" received from peer=%d\n",
inv.ToString(), pfrom.GetId());
}
if (best_block) {
// If we haven't started initial headers-sync with this peer, then
// consider sending a getheaders now. On initial startup, there's a
// reliability vs bandwidth tradeoff, where we are only trying to do
// initial headers sync with one peer at a time, with a long
// timeout (at which point, if the sync hasn't completed, we will
// disconnect the peer and then choose another). In the meantime,
// as new blocks are found, we are willing to add one new peer per
// block to sync with as well, to sync quicker in the case where
// our initial peer is unresponsive (but less bandwidth than we'd
// use if we turned on sync with all peers).
LOCK(::cs_main);
CNodeState &state{*Assert(State(pfrom.GetId()))};
if (state.fSyncStarted ||
(!peer->m_inv_triggered_getheaders_before_sync &&
*best_block != m_last_block_inv_triggering_headers_sync)) {
if (MaybeSendGetHeaders(
pfrom, GetLocator(m_chainman.m_best_header), *peer)) {
LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
m_chainman.m_best_header->nHeight,
best_block->ToString(), pfrom.GetId());
}
if (!state.fSyncStarted) {
peer->m_inv_triggered_getheaders_before_sync = true;
// Update the last block hash that triggered a new headers
// sync, so that we don't turn on headers sync with more
// than 1 new peer every new block.
m_last_block_inv_triggering_headers_sync = *best_block;
}
}
}
return;
}
if (msg_type == NetMsgType::GETDATA) {
std::vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ) {
Misbehaving(*peer, 20,
strprintf("getdata message size = %u", vInv.size()));
return;
}
LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n",
vInv.size(), pfrom.GetId());
if (vInv.size() > 0) {
LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n",
vInv[0].ToString(), pfrom.GetId());
}
{
LOCK(peer->m_getdata_requests_mutex);
peer->m_getdata_requests.insert(peer->m_getdata_requests.end(),
vInv.begin(), vInv.end());
ProcessGetData(config, pfrom, *peer, interruptMsgProc);
}
return;
}
if (msg_type == NetMsgType::GETBLOCKS) {
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
if (locator.vHave.size() > MAX_LOCATOR_SZ) {
LogPrint(BCLog::NET,
"getblocks locator size %lld > %d, disconnect peer=%d\n",
locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
pfrom.fDisconnect = true;
return;
}
// We might have announced the currently-being-connected tip using a
// compact block, which resulted in the peer sending a getblocks
// request, which we would otherwise respond to without the new block.
// To avoid this situation we simply verify that we are on our best
// known chain now. This is super overkill, but we handle it better
// for getheaders requests, and there are no known nodes which support
// compact blocks but still use getblocks to request blocks.
{
std::shared_ptr<const CBlock> a_recent_block;
{
LOCK(m_most_recent_block_mutex);
a_recent_block = m_most_recent_block;
}
BlockValidationState state;
if (!m_chainman.ActiveChainstate().ActivateBestChain(
state, a_recent_block, m_avalanche)) {
LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
state.ToString());
}
}
LOCK(cs_main);
// Find the last block the caller has in the main chain
const CBlockIndex *pindex =
m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
// Send the rest of the chain
if (pindex) {
pindex = m_chainman.ActiveChain().Next(pindex);
}
int nLimit = 500;
LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n",
(pindex ? pindex->nHeight : -1),
hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit,
pfrom.GetId());
for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
if (pindex->GetBlockHash() == hashStop) {
LogPrint(BCLog::NET, " getblocks stopping at %d %s\n",
pindex->nHeight, pindex->GetBlockHash().ToString());
break;
}
// If pruning, don't inv blocks unless we have on disk and are
// likely to still have for some reasonable time window (1 hour)
// that block relay might require.
const int nPrunedBlocksLikelyToHave =
MIN_BLOCKS_TO_KEEP -
3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
if (m_chainman.m_blockman.IsPruneMode() &&
(!pindex->nStatus.hasData() ||
pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight -
nPrunedBlocksLikelyToHave)) {
LogPrint(
BCLog::NET,
" getblocks stopping, pruned or too old block at %d %s\n",
pindex->nHeight, pindex->GetBlockHash().ToString());
break;
}
WITH_LOCK(
peer->m_block_inv_mutex,
peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash()));
if (--nLimit <= 0) {
// When this block is requested, we'll send an inv that'll
// trigger the peer to getblocks the next batch of inventory.
LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n",
pindex->nHeight, pindex->GetBlockHash().ToString());
WITH_LOCK(peer->m_block_inv_mutex, {
peer->m_continuation_block = pindex->GetBlockHash();
});
break;
}
}
return;
}
if (msg_type == NetMsgType::GETBLOCKTXN) {
BlockTransactionsRequest req;
vRecv >> req;
std::shared_ptr<const CBlock> recent_block;
{
LOCK(m_most_recent_block_mutex);
if (m_most_recent_block_hash == req.blockhash) {
recent_block = m_most_recent_block;
}
// Unlock m_most_recent_block_mutex to avoid cs_main lock inversion
}
if (recent_block) {
SendBlockTransactions(pfrom, *peer, *recent_block, req);
return;
}
{
LOCK(cs_main);
const CBlockIndex *pindex =
m_chainman.m_blockman.LookupBlockIndex(req.blockhash);
if (!pindex || !pindex->nStatus.hasData()) {
LogPrint(
BCLog::NET,
"Peer %d sent us a getblocktxn for a block we don't have\n",
pfrom.GetId());
return;
}
if (pindex->nHeight >=
m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) {
CBlock block;
const bool ret{
m_chainman.m_blockman.ReadBlockFromDisk(block, *pindex)};
assert(ret);
SendBlockTransactions(pfrom, *peer, block, req);
return;
}
}
// If an older block is requested (should never happen in practice,
// but can happen in tests) send a block response instead of a
// blocktxn response. Sending a full block response instead of a
// small blocktxn response is preferable in the case where a peer
// might maliciously send lots of getblocktxn requests to trigger
// expensive disk reads, because it will require the peer to
// actually receive all the data read from disk over the network.
LogPrint(BCLog::NET,
"Peer %d sent us a getblocktxn for a block > %i deep\n",
pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
CInv inv;
inv.type = MSG_BLOCK;
inv.hash = req.blockhash;
WITH_LOCK(peer->m_getdata_requests_mutex,
peer->m_getdata_requests.push_back(inv));
// The message processing loop will go around again (without pausing)
// and we'll respond then (without cs_main)
return;
}
if (msg_type == NetMsgType::GETHEADERS) {
CBlockLocator locator;
BlockHash hashStop;
vRecv >> locator >> hashStop;
if (locator.vHave.size() > MAX_LOCATOR_SZ) {
LogPrint(BCLog::NET,
"getheaders locator size %lld > %d, disconnect peer=%d\n",
locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
pfrom.fDisconnect = true;
return;
}
if (m_chainman.m_blockman.LoadingBlocks()) {
LogPrint(
BCLog::NET,
"Ignoring getheaders from peer=%d while importing/reindexing\n",
pfrom.GetId());
return;
}
LOCK(cs_main);
// Note that if we were to be on a chain that forks from the
// checkpointed chain, then serving those headers to a peer that has
// seen the checkpointed chain would cause that peer to disconnect us.
// Requiring that our chainwork exceed the minimum chainwork is a
// protection against being fed a bogus chain when we started up for
// the first time and getting partitioned off the honest network for
// serving that chain to others.
if (m_chainman.ActiveTip() == nullptr ||
(m_chainman.ActiveTip()->nChainWork <
m_chainman.MinimumChainWork() &&
!pfrom.HasPermission(NetPermissionFlags::Download))) {
LogPrint(BCLog::NET,
"Ignoring getheaders from peer=%d because active chain "
"has too little work; sending empty response\n",
pfrom.GetId());
// Just respond with an empty headers message, to tell the peer to
// go away but not treat us as unresponsive.
m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS,
std::vector<CBlock>()));
return;
}
CNodeState *nodestate = State(pfrom.GetId());
const CBlockIndex *pindex = nullptr;
if (locator.IsNull()) {
// If locator is null, return the hashStop block
pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop);
if (!pindex) {
return;
}
if (!BlockRequestAllowed(pindex)) {
LogPrint(BCLog::NET,
"%s: ignoring request from peer=%i for old block "
"header that isn't in the main chain\n",
__func__, pfrom.GetId());
return;
}
} else {
// Find the last block the caller has in the main chain
pindex =
m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
if (pindex) {
pindex = m_chainman.ActiveChain().Next(pindex);
}
}
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx
// count at the end
std::vector<CBlock> vHeaders;
int nLimit = MAX_HEADERS_RESULTS;
LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n",
(pindex ? pindex->nHeight : -1),
hashStop.IsNull() ? "end" : hashStop.ToString(),
pfrom.GetId());
for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
vHeaders.push_back(pindex->GetBlockHeader());
if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
break;
}
}
// pindex can be nullptr either if we sent
// m_chainman.ActiveChain().Tip() OR if our peer has
// m_chainman.ActiveChain().Tip() (and thus we are sending an empty
// headers message). In both cases it's safe to update
// pindexBestHeaderSent to be our tip.
//
// It is important that we simply reset the BestHeaderSent value here,
// and not max(BestHeaderSent, newHeaderSent). We might have announced
// the currently-being-connected tip using a compact block, which
// resulted in the peer sending a headers request, which we respond to
// without the new block. By resetting the BestHeaderSent, we ensure we
// will re-announce the new block via headers (or compact blocks again)
// in the SendMessages logic.
nodestate->pindexBestHeaderSent =
pindex ? pindex : m_chainman.ActiveChain().Tip();
m_connman.PushMessage(&pfrom,
msgMaker.Make(NetMsgType::HEADERS, vHeaders));
return;
}
if (msg_type == NetMsgType::TX) {
// Stop processing the transaction early if
// 1) We are in blocks only mode and peer has no relay permission; OR
// 2) This peer is a block-relay-only peer
if ((m_opts.ignore_incoming_txs &&
!pfrom.HasPermission(NetPermissionFlags::Relay)) ||
pfrom.IsBlockOnlyConn()) {
LogPrint(BCLog::NET,
"transaction sent in violation of protocol peer=%d\n",
pfrom.GetId());
pfrom.fDisconnect = true;
return;
}
// Stop processing the transaction early if we are still in IBD since we
// don't have enough information to validate it yet. Sending unsolicited
// transactions is not considered a protocol violation, so don't punish
// the peer.
if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
return;
}
CTransactionRef ptx;
vRecv >> ptx;
const CTransaction &tx = *ptx;
const TxId &txid = tx.GetId();
AddKnownTx(*peer, txid);
LOCK(cs_main);
m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
if (AlreadyHaveTx(txid, /*include_reconsiderable=*/true)) {
if (pfrom.HasPermission(NetPermissionFlags::ForceRelay)) {
// Always relay transactions received from peers with
// forcerelay permission, even if they were already in the
// mempool, allowing the node to function as a gateway for
// nodes hidden behind it.
if (!m_mempool.exists(tx.GetId())) {
LogPrintf("Not relaying non-mempool transaction %s from "
"forcerelay peer=%d\n",
tx.GetId().ToString(), pfrom.GetId());
} else {
LogPrintf("Force relaying tx %s from peer=%d\n",
tx.GetId().ToString(), pfrom.GetId());
RelayTransaction(tx.GetId());
}
}
if (m_recent_rejects_package_reconsiderable.contains(txid)) {
// When a transaction is already in
// m_recent_rejects_package_reconsiderable, we shouldn't submit
// it by itself again. However, look for a matching child in the
// orphanage, as it is possible that they succeed as a package.
LogPrint(BCLog::TXPACKAGES,
"found tx %s in reconsiderable rejects, looking for "
"child in orphanage\n",
txid.ToString());
if (auto package_to_validate{
Find1P1CPackage(ptx, pfrom.GetId())}) {
const auto package_result{ProcessNewPackage(
m_chainman.ActiveChainstate(), m_mempool,
package_to_validate->m_txns, /*test_accept=*/false)};
LogPrint(BCLog::TXPACKAGES,
"package evaluation for %s: %s (%s)\n",
package_to_validate->ToString(),
package_result.m_state.IsValid()
? "package accepted"
: "package rejected",
package_result.m_state.ToString());
ProcessPackageResult(package_to_validate.value(),
package_result);
}
}
// If a tx is detected by m_recent_rejects it is ignored. Because we
// haven't submitted the tx to our mempool, we won't have computed a
// DoS score for it or determined exactly why we consider it
// invalid.
//
// This means we won't penalize any peer subsequently relaying a
// DoSy tx (even if we penalized the first peer who gave it to us)
// because we have to account for m_recent_rejects showing false
// positives. In other words, we shouldn't penalize a peer if we
// aren't *sure* they submitted a DoSy tx.
//
// Note that m_recent_rejects doesn't just record DoSy or invalid
// transactions, but any tx not accepted by the mempool, which may
// be due to node policy (vs. consensus). So we can't blanket
// penalize a peer simply for relaying a tx that our
// m_recent_rejects has caught, regardless of false positives.
return;
}
const MempoolAcceptResult result = m_chainman.ProcessTransaction(ptx);
const TxValidationState &state = result.m_state;
if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
ProcessValidTx(pfrom.GetId(), ptx);
pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
} else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) {
// It may be the case that the orphans parents have all been
// rejected.
bool fRejectedParents = false;
// Deduplicate parent txids, so that we don't have to loop over
// the same parent txid more than once down below.
std::vector<TxId> unique_parents;
unique_parents.reserve(tx.vin.size());
for (const CTxIn &txin : tx.vin) {
// We start with all parents, and then remove duplicates below.
unique_parents.push_back(txin.prevout.GetTxId());
}
std::sort(unique_parents.begin(), unique_parents.end());
unique_parents.erase(
std::unique(unique_parents.begin(), unique_parents.end()),
unique_parents.end());
// Distinguish between parents in m_recent_rejects and
// m_recent_rejects_package_reconsiderable. We can tolerate having
// up to 1 parent in m_recent_rejects_package_reconsiderable since
// we submit 1p1c packages. However, fail immediately if any are in
// m_recent_rejects.
std::optional<TxId> rejected_parent_reconsiderable;
for (const TxId &parent_txid : unique_parents) {
if (m_recent_rejects.contains(parent_txid)) {
fRejectedParents = true;
break;
}
if (m_recent_rejects_package_reconsiderable.contains(
parent_txid) &&
!m_mempool.exists(parent_txid)) {
// More than 1 parent in
// m_recent_rejects_package_reconsiderable:
// 1p1c will not be sufficient to accept this package, so
// just give up here.
if (rejected_parent_reconsiderable.has_value()) {
fRejectedParents = true;
break;
}
rejected_parent_reconsiderable = parent_txid;
}
}
if (!fRejectedParents) {
const auto current_time{GetTime<std::chrono::microseconds>()};
for (const TxId &parent_txid : unique_parents) {
// FIXME: MSG_TX should use a TxHash, not a TxId.
AddKnownTx(*peer, parent_txid);
// Exclude m_recent_rejects_package_reconsiderable: the
// missing parent may have been previously rejected for
// being too low feerate. This orphan might CPFP it.
if (!AlreadyHaveTx(parent_txid,
/*include_reconsiderable=*/false)) {
AddTxAnnouncement(pfrom, parent_txid, current_time);
}
}
// NO_THREAD_SAFETY_ANALYSIS because we can't annotate for
// g_msgproc_mutex
if (unsigned int nEvicted =
m_mempool.withOrphanage(
[&](TxOrphanage &orphanage)
NO_THREAD_SAFETY_ANALYSIS {
if (orphanage.AddTx(ptx, pfrom.GetId())) {
AddToCompactExtraTransactions(ptx);
}
return orphanage.LimitTxs(
m_opts.max_orphan_txs, m_rng);
}) > 0) {
LogPrint(BCLog::TXPACKAGES,
"orphanage overflow, removed %u tx\n", nEvicted);
}
// Once added to the orphan pool, a tx is considered
// AlreadyHave, and we shouldn't request it anymore.
m_txrequest.ForgetInvId(tx.GetId());
} else {
LogPrint(BCLog::MEMPOOL,
"not keeping orphan with rejected parents %s\n",
tx.GetId().ToString());
// We will continue to reject this tx since it has rejected
// parents so avoid re-requesting it from other peers.
m_recent_rejects.insert(tx.GetId());
m_txrequest.ForgetInvId(tx.GetId());
}
}
if (state.IsInvalid()) {
ProcessInvalidTx(pfrom.GetId(), ptx, state,
/*maybe_add_extra_compact_tx=*/true);
}
// When a transaction fails for TX_PACKAGE_RECONSIDERABLE, look for a
// matching child in the orphanage, as it is possible that they succeed
// as a package.
if (state.GetResult() ==
TxValidationResult::TX_PACKAGE_RECONSIDERABLE) {
LogPrint(BCLog::TXPACKAGES,
"tx %s failed but reconsiderable, looking for child in "
"orphanage\n",
txid.ToString());
if (auto package_to_validate{Find1P1CPackage(ptx, pfrom.GetId())}) {
const auto package_result{ProcessNewPackage(
m_chainman.ActiveChainstate(), m_mempool,
package_to_validate->m_txns, /*test_accept=*/false)};
LogPrint(BCLog::TXPACKAGES,
"package evaluation for %s: %s (%s)\n",
package_to_validate->ToString(),
package_result.m_state.IsValid() ? "package accepted"
: "package rejected",
package_result.m_state.ToString());
ProcessPackageResult(package_to_validate.value(),
package_result);
}
}
- if (state.GetResult() == TxValidationResult::TX_CONFLICT) {
+ if (state.GetResult() ==
+ TxValidationResult::TX_AVALANCHE_RECONSIDERABLE) {
unsigned int nEvicted{0};
// NO_THREAD_SAFETY_ANALYSIS because of g_msgproc_mutex required in
// the lambda for m_rng
m_mempool.withConflicting(
[&](TxConflicting &conflicting) NO_THREAD_SAFETY_ANALYSIS {
conflicting.AddTx(ptx, pfrom.GetId());
nEvicted =
conflicting.LimitTxs(m_opts.max_conflicting_txs, m_rng);
});
if (nEvicted > 0) {
LogPrint(BCLog::TXPACKAGES,
"conflicting pool overflow, removed %u tx\n",
nEvicted);
}
}
return;
}
if (msg_type == NetMsgType::CMPCTBLOCK) {
// Ignore cmpctblock received while importing
if (m_chainman.m_blockman.LoadingBlocks()) {
LogPrint(BCLog::NET,
"Unexpected cmpctblock message received from peer %d\n",
pfrom.GetId());
return;
}
CBlockHeaderAndShortTxIDs cmpctblock;
try {
vRecv >> cmpctblock;
} catch (std::ios_base::failure &e) {
// This block has non contiguous or overflowing indexes
Misbehaving(*peer, 100, "cmpctblock-bad-indexes");
return;
}
bool received_new_header = false;
{
LOCK(cs_main);
const CBlockIndex *prev_block =
m_chainman.m_blockman.LookupBlockIndex(
cmpctblock.header.hashPrevBlock);
if (!prev_block) {
// Doesn't connect (or is genesis), instead of DoSing in
// AcceptBlockHeader, request deeper headers
if (!m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
MaybeSendGetHeaders(
pfrom, GetLocator(m_chainman.m_best_header), *peer);
}
return;
}
if (prev_block->nChainWork +
CalculateHeadersWork({cmpctblock.header}) <
GetAntiDoSWorkThreshold()) {
// If we get a low-work header in a compact block, we can ignore
// it.
LogPrint(BCLog::NET,
"Ignoring low-work compact block from peer %d\n",
pfrom.GetId());
return;
}
if (!m_chainman.m_blockman.LookupBlockIndex(
cmpctblock.header.GetHash())) {
received_new_header = true;
}
}
const CBlockIndex *pindex = nullptr;
BlockValidationState state;
if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header},
/*min_pow_checked=*/true, state,
&pindex)) {
if (state.IsInvalid()) {
MaybePunishNodeForBlock(pfrom.GetId(), state,
/*via_compact_block*/ true,
"invalid header via cmpctblock");
return;
}
}
// When we succeed in decoding a block's txids from a cmpctblock
// message we typically jump to the BLOCKTXN handling code, with a
// dummy (empty) BLOCKTXN message, to re-use the logic there in
// completing processing of the putative block (without cs_main).
bool fProcessBLOCKTXN = false;
CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION);
// If we end up treating this as a plain headers message, call that as
// well
// without cs_main.
bool fRevertToHeaderProcessing = false;
// Keep a CBlock for "optimistic" compactblock reconstructions (see
// below)
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
bool fBlockReconstructed = false;
{
LOCK(cs_main);
// If AcceptBlockHeader returned true, it set pindex
assert(pindex);
UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
CNodeState *nodestate = State(pfrom.GetId());
// If this was a new header with more work than our tip, update the
// peer's last block announcement time
if (received_new_header &&
pindex->nChainWork >
m_chainman.ActiveChain().Tip()->nChainWork) {
nodestate->m_last_block_announcement = GetTime();
}
std::map<BlockHash,
std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
iterator blockInFlightIt =
mapBlocksInFlight.find(pindex->GetBlockHash());
bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
if (pindex->nStatus.hasData()) {
// Nothing to do here
return;
}
if (pindex->nChainWork <=
m_chainman.ActiveChain()
.Tip()
->nChainWork || // We know something better
pindex->nTx != 0) {
// We had this block at some point, but pruned it
if (fAlreadyInFlight) {
// We requested this block for some reason, but our mempool
// will probably be useless so we just grab the block via
// normal getdata.
std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
m_connman.PushMessage(
&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
}
return;
}
// If we're not close to tip yet, give up and let parallel block
// fetch work its magic.
if (!fAlreadyInFlight && !CanDirectFetch()) {
return;
}
// We want to be a bit conservative just to be extra careful about
// DoS possibilities in compact block processing...
if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) {
if ((!fAlreadyInFlight && nodestate->nBlocksInFlight <
MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
(fAlreadyInFlight &&
blockInFlightIt->second.first == pfrom.GetId())) {
std::list<QueuedBlock>::iterator *queuedBlockIt = nullptr;
if (!BlockRequested(config, pfrom.GetId(), *pindex,
&queuedBlockIt)) {
if (!(*queuedBlockIt)->partialBlock) {
(*queuedBlockIt)
->partialBlock.reset(
new PartiallyDownloadedBlock(config,
&m_mempool));
} else {
// The block was already in flight using compact
// blocks from the same peer.
LogPrint(BCLog::NET, "Peer sent us compact block "
"we were already syncing!\n");
return;
}
}
PartiallyDownloadedBlock &partialBlock =
*(*queuedBlockIt)->partialBlock;
ReadStatus status =
partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
if (status == READ_STATUS_INVALID) {
// Reset in-flight state in case Misbehaving does not
// result in a disconnect
RemoveBlockRequest(pindex->GetBlockHash());
Misbehaving(*peer, 100, "invalid compact block");
return;
} else if (status == READ_STATUS_FAILED) {
// Duplicate txindices, the block is now in-flight, so
// just request it.
std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
m_connman.PushMessage(
&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
return;
}
BlockTransactionsRequest req;
for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
if (!partialBlock.IsTxAvailable(i)) {
req.indices.push_back(i);
}
}
if (req.indices.empty()) {
// Dirty hack to jump to BLOCKTXN code (TODO: move
// message handling into their own functions)
BlockTransactions txn;
txn.blockhash = cmpctblock.header.GetHash();
blockTxnMsg << txn;
fProcessBLOCKTXN = true;
} else {
req.blockhash = pindex->GetBlockHash();
m_connman.PushMessage(
&pfrom,
msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
}
} else {
// This block is either already in flight from a different
// peer, or this peer has too many blocks outstanding to
// download from. Optimistically try to reconstruct anyway
// since we might be able to without any round trips.
PartiallyDownloadedBlock tempBlock(config, &m_mempool);
ReadStatus status =
tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
if (status != READ_STATUS_OK) {
// TODO: don't ignore failures
return;
}
std::vector<CTransactionRef> dummy;
status = tempBlock.FillBlock(*pblock, dummy);
if (status == READ_STATUS_OK) {
fBlockReconstructed = true;
}
}
} else {
if (fAlreadyInFlight) {
// We requested this block, but its far into the future, so
// our mempool will probably be useless - request the block
// normally.
std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
m_connman.PushMessage(
&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
return;
} else {
// If this was an announce-cmpctblock, we want the same
// treatment as a header message.
fRevertToHeaderProcessing = true;
}
}
} // cs_main
if (fProcessBLOCKTXN) {
return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN,
blockTxnMsg, time_received, interruptMsgProc);
}
if (fRevertToHeaderProcessing) {
// Headers received from HB compact block peers are permitted to be
// relayed before full validation (see BIP 152), so we don't want to
// disconnect the peer if the header turns out to be for an invalid
// block. Note that if a peer tries to build on an invalid chain,
// that will be detected and the peer will be banned.
return ProcessHeadersMessage(config, pfrom, *peer,
{cmpctblock.header},
/*via_compact_block=*/true);
}
if (fBlockReconstructed) {
// If we got here, we were able to optimistically reconstruct a
// block that is in flight from some other peer.
{
LOCK(cs_main);
mapBlockSource.emplace(pblock->GetHash(),
std::make_pair(pfrom.GetId(), false));
}
// Setting force_processing to true means that we bypass some of
// our anti-DoS protections in AcceptBlock, which filters
// unrequested blocks that might be trying to waste our resources
// (eg disk space). Because we only try to reconstruct blocks when
// we're close to caught up (via the CanDirectFetch() requirement
// above, combined with the behavior of not requesting blocks until
// we have a chain with at least the minimum chain work), and we
// ignore compact blocks with less work than our tip, it is safe to
// treat reconstructed compact blocks as having been requested.
ProcessBlock(config, pfrom, pblock, /*force_processing=*/true,
/*min_pow_checked=*/true);
// hold cs_main for CBlockIndex::IsValid()
LOCK(cs_main);
if (pindex->IsValid(BlockValidity::TRANSACTIONS)) {
// Clear download state for this block, which is in process from
// some other peer. We do this after calling. ProcessNewBlock so
// that a malleated cmpctblock announcement can't be used to
// interfere with block relay.
RemoveBlockRequest(pblock->GetHash());
}
}
return;
}
if (msg_type == NetMsgType::BLOCKTXN) {
// Ignore blocktxn received while importing
if (m_chainman.m_blockman.LoadingBlocks()) {
LogPrint(BCLog::NET,
"Unexpected blocktxn message received from peer %d\n",
pfrom.GetId());
return;
}
BlockTransactions resp;
vRecv >> resp;
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
bool fBlockRead = false;
{
LOCK(cs_main);
std::map<BlockHash,
std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
iterator it = mapBlocksInFlight.find(resp.blockhash);
if (it == mapBlocksInFlight.end() ||
!it->second.second->partialBlock ||
it->second.first != pfrom.GetId()) {
LogPrint(BCLog::NET,
"Peer %d sent us block transactions for block "
"we weren't expecting\n",
pfrom.GetId());
return;
}
PartiallyDownloadedBlock &partialBlock =
*it->second.second->partialBlock;
ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
if (status == READ_STATUS_INVALID) {
// Reset in-flight state in case of Misbehaving does not
// result in a disconnect.
RemoveBlockRequest(resp.blockhash);
Misbehaving(
*peer, 100,
"invalid compact block/non-matching block transactions");
return;
} else if (status == READ_STATUS_FAILED) {
// Might have collided, fall back to getdata now :(
std::vector<CInv> invs;
invs.push_back(CInv(MSG_BLOCK, resp.blockhash));
m_connman.PushMessage(&pfrom,
msgMaker.Make(NetMsgType::GETDATA, invs));
} else {
// Block is either okay, or possibly we received
// READ_STATUS_CHECKBLOCK_FAILED.
// Note that CheckBlock can only fail for one of a few reasons:
// 1. bad-proof-of-work (impossible here, because we've already
// accepted the header)
// 2. merkleroot doesn't match the transactions given (already
// caught in FillBlock with READ_STATUS_FAILED, so
// impossible here)
// 3. the block is otherwise invalid (eg invalid coinbase,
// block is too big, too many sigChecks, etc).
// So if CheckBlock failed, #3 is the only possibility.
// Under BIP 152, we don't DoS-ban unless proof of work is
// invalid (we don't require all the stateless checks to have
// been run). This is handled below, so just treat this as
// though the block was successfully read, and rely on the
// handling in ProcessNewBlock to ensure the block index is
// updated, etc.
// it is now an empty pointer
RemoveBlockRequest(resp.blockhash);
fBlockRead = true;
// mapBlockSource is used for potentially punishing peers and
// updating which peers send us compact blocks, so the race
// between here and cs_main in ProcessNewBlock is fine.
// BIP 152 permits peers to relay compact blocks after
// validating the header only; we should not punish peers
// if the block turns out to be invalid.
mapBlockSource.emplace(resp.blockhash,
std::make_pair(pfrom.GetId(), false));
}
} // Don't hold cs_main when we call into ProcessNewBlock
if (fBlockRead) {
// Since we requested this block (it was in mapBlocksInFlight),
// force it to be processed, even if it would not be a candidate for
// new tip (missing previous block, chain not long enough, etc)
// This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
// disk-space attacks), but this should be safe due to the
// protections in the compact block handler -- see related comment
// in compact block optimistic reconstruction handling.
ProcessBlock(config, pfrom, pblock, /*force_processing=*/true,
/*min_pow_checked=*/true);
}
return;
}
if (msg_type == NetMsgType::HEADERS) {
// Ignore headers received while importing
if (m_chainman.m_blockman.LoadingBlocks()) {
LogPrint(BCLog::NET,
"Unexpected headers message received from peer %d\n",
pfrom.GetId());
return;
}
// Assume that this is in response to any outstanding getheaders
// request we may have sent, and clear out the time of our last request
peer->m_last_getheaders_timestamp = {};
std::vector<CBlockHeader> headers;
// Bypass the normal CBlock deserialization, as we don't want to risk
// deserializing 2000 full blocks.
unsigned int nCount = ReadCompactSize(vRecv);
if (nCount > MAX_HEADERS_RESULTS) {
Misbehaving(*peer, 20,
strprintf("too-many-headers: headers message size = %u",
nCount));
return;
}
headers.resize(nCount);
for (unsigned int n = 0; n < nCount; n++) {
vRecv >> headers[n];
// Ignore tx count; assume it is 0.
ReadCompactSize(vRecv);
}
ProcessHeadersMessage(config, pfrom, *peer, std::move(headers),
/*via_compact_block=*/false);
// Check if the headers presync progress needs to be reported to
// validation. This needs to be done without holding the
// m_headers_presync_mutex lock.
if (m_headers_presync_should_signal.exchange(false)) {
HeadersPresyncStats stats;
{
LOCK(m_headers_presync_mutex);
auto it =
m_headers_presync_stats.find(m_headers_presync_bestpeer);
if (it != m_headers_presync_stats.end()) {
stats = it->second;
}
}
if (stats.second) {
m_chainman.ReportHeadersPresync(
stats.first, stats.second->first, stats.second->second);
}
}
return;
}
if (msg_type == NetMsgType::BLOCK) {
// Ignore block received while importing
if (m_chainman.m_blockman.LoadingBlocks()) {
LogPrint(BCLog::NET,
"Unexpected block message received from peer %d\n",
pfrom.GetId());
return;
}
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
vRecv >> *pblock;
LogPrint(BCLog::NET, "received block %s peer=%d\n",
pblock->GetHash().ToString(), pfrom.GetId());
// Process all blocks from whitelisted peers, even if not requested,
// unless we're still syncing with the network. Such an unrequested
// block may still be processed, subject to the conditions in
// AcceptBlock().
bool forceProcessing =
pfrom.HasPermission(NetPermissionFlags::NoBan) &&
!m_chainman.ActiveChainstate().IsInitialBlockDownload();
const BlockHash hash = pblock->GetHash();
bool min_pow_checked = false;
{
LOCK(cs_main);
// Always process the block if we requested it, since we may
// need it even when it's not a candidate for a new best tip.
forceProcessing = IsBlockRequested(hash);
RemoveBlockRequest(hash);
// mapBlockSource is only used for punishing peers and setting
// which peers send us compact blocks, so the race between here and
// cs_main in ProcessNewBlock is fine.
mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
// Check work on this block against our anti-dos thresholds.
const CBlockIndex *prev_block =
m_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock);
if (prev_block &&
prev_block->nChainWork +
CalculateHeadersWork({pblock->GetBlockHeader()}) >=
GetAntiDoSWorkThreshold()) {
min_pow_checked = true;
}
}
ProcessBlock(config, pfrom, pblock, forceProcessing, min_pow_checked);
return;
}
if (msg_type == NetMsgType::AVAHELLO) {
if (!m_avalanche) {
return;
}
{
LOCK(pfrom.cs_avalanche_pubkey);
if (pfrom.m_avalanche_pubkey.has_value()) {
LogPrint(
BCLog::AVALANCHE,
"Ignoring avahello from peer %d: already in our node set\n",
pfrom.GetId());
return;
}
avalanche::Delegation delegation;
vRecv >> delegation;
// A delegation with an all zero limited id indicates that the peer
// has no proof, so we're done.
if (delegation.getLimitedProofId() != uint256::ZERO) {
avalanche::DelegationState state;
CPubKey pubkey;
if (!delegation.verify(state, pubkey)) {
Misbehaving(*peer, 100, "invalid-delegation");
return;
}
pfrom.m_avalanche_pubkey = std::move(pubkey);
HashWriter sighasher{};
sighasher << delegation.getId();
sighasher << pfrom.nRemoteHostNonce;
sighasher << pfrom.GetLocalNonce();
sighasher << pfrom.nRemoteExtraEntropy;
sighasher << pfrom.GetLocalExtraEntropy();
SchnorrSig sig;
vRecv >> sig;
if (!(*pfrom.m_avalanche_pubkey)
.VerifySchnorr(sighasher.GetHash(), sig)) {
Misbehaving(*peer, 100, "invalid-avahello-signature");
return;
}
// If we don't know this proof already, add it to the tracker so
// it can be requested.
const avalanche::ProofId proofid(delegation.getProofId());
if (!AlreadyHaveProof(proofid)) {
const bool preferred = isPreferredDownloadPeer(pfrom);
LOCK(cs_proofrequest);
AddProofAnnouncement(pfrom, proofid,
GetTime<std::chrono::microseconds>(),
preferred);
}
// Don't check the return value. If it fails we probably don't
// know about the proof yet.
m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
return pm.addNode(pfrom.GetId(), proofid);
});
}
pfrom.m_avalanche_enabled = true;
}
// Send getavaaddr and getavaproofs to our avalanche outbound or
// manual connections
if (!pfrom.IsInboundConn()) {
m_connman.PushMessage(&pfrom,
msgMaker.Make(NetMsgType::GETAVAADDR));
WITH_LOCK(peer->m_addr_token_bucket_mutex,
peer->m_addr_token_bucket += m_opts.max_addr_to_send);
if (peer->m_proof_relay &&
!m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
m_connman.PushMessage(&pfrom,
msgMaker.Make(NetMsgType::GETAVAPROOFS));
peer->m_proof_relay->compactproofs_requested = true;
}
}
return;
}
if (msg_type == NetMsgType::AVAPOLL) {
if (!m_avalanche) {
return;
}
const auto now = Now<SteadyMilliseconds>();
const auto last_poll = pfrom.m_last_poll;
pfrom.m_last_poll = now;
if (now <
last_poll + std::chrono::milliseconds(m_opts.avalanche_cooldown)) {
LogPrint(BCLog::AVALANCHE,
"Ignoring repeated avapoll from peer %d: cooldown not "
"elapsed\n",
pfrom.GetId());
return;
}
const bool quorum_established = m_avalanche->isQuorumEstablished();
uint64_t round;
Unserialize(vRecv, round);
unsigned int nCount = ReadCompactSize(vRecv);
if (nCount > AVALANCHE_MAX_ELEMENT_POLL) {
Misbehaving(
*peer, 20,
strprintf("too-many-ava-poll: poll message size = %u", nCount));
return;
}
std::vector<avalanche::Vote> votes;
votes.reserve(nCount);
for (unsigned int n = 0; n < nCount; n++) {
CInv inv;
vRecv >> inv;
// Default vote for unknown inv type
uint32_t vote = -1;
// We don't vote definitively until we have an established quorum
if (!quorum_established) {
votes.emplace_back(vote, inv.hash);
continue;
}
// If inv's type is known, get a vote for its hash
switch (inv.type) {
case MSG_TX: {
if (m_opts.avalanche_preconsensus) {
vote = WITH_LOCK(cs_main, return GetAvalancheVoteForTx(
TxId(inv.hash)));
}
} break;
case MSG_BLOCK: {
vote = WITH_LOCK(cs_main, return GetAvalancheVoteForBlock(
BlockHash(inv.hash)));
} break;
case MSG_AVA_PROOF: {
vote = getAvalancheVoteForProof(
*m_avalanche, avalanche::ProofId(inv.hash));
} break;
default: {
LogPrint(BCLog::AVALANCHE,
"poll inv type %d unknown from peer=%d\n",
inv.type, pfrom.GetId());
}
}
votes.emplace_back(vote, inv.hash);
}
// Send the query to the node.
m_avalanche->sendResponse(
&pfrom, avalanche::Response(round, m_opts.avalanche_cooldown,
std::move(votes)));
return;
}
if (msg_type == NetMsgType::AVARESPONSE) {
if (!m_avalanche) {
return;
}
// As long as QUIC is not implemented, we need to sign response and
// verify response's signatures in order to avoid any manipulation of
// messages at the transport level.
CHashVerifier<CDataStream> verifier(&vRecv);
avalanche::Response response;
verifier >> response;
SchnorrSig sig;
vRecv >> sig;
{
LOCK(pfrom.cs_avalanche_pubkey);
if (!pfrom.m_avalanche_pubkey.has_value() ||
!(*pfrom.m_avalanche_pubkey)
.VerifySchnorr(verifier.GetHash(), sig)) {
Misbehaving(*peer, 100, "invalid-ava-response-signature");
return;
}
}
auto now = GetTime<std::chrono::seconds>();
std::vector<avalanche::VoteItemUpdate> updates;
int banscore{0};
std::string error;
if (!m_avalanche->registerVotes(pfrom.GetId(), response, updates,
banscore, error)) {
if (banscore > 0) {
// If the banscore was set, just increase the node ban score
Misbehaving(*peer, banscore, error);
return;
}
// Otherwise the node may have got a network issue. Increase the
// fault counter instead and only ban if we reached a threshold.
// This allows for fault tolerance should there be a temporary
// outage while still preventing DoS'ing behaviors, as the counter
// is reset if no fault occured over some time period.
pfrom.m_avalanche_message_fault_counter++;
pfrom.m_avalanche_last_message_fault = now;
// Allow up to 12 messages before increasing the ban score. Since
// the queries are cleared after 10s, this is at least 2 minutes
// of network outage tolerance over the 1h window.
if (pfrom.m_avalanche_message_fault_counter > 12) {
Misbehaving(*peer, 2, error);
return;
}
}
// If no fault occurred within the last hour, reset the fault counter
if (now > (pfrom.m_avalanche_last_message_fault.load() + 1h)) {
pfrom.m_avalanche_message_fault_counter = 0;
}
pfrom.invsVoted(response.GetVotes().size());
auto logVoteUpdate = [](const auto &voteUpdate,
const std::string &voteItemTypeStr,
const auto &voteItemId) {
std::string voteOutcome;
switch (voteUpdate.getStatus()) {
case avalanche::VoteStatus::Invalid:
voteOutcome = "invalidated";
break;
case avalanche::VoteStatus::Rejected:
voteOutcome = "rejected";
break;
case avalanche::VoteStatus::Accepted:
voteOutcome = "accepted";
break;
case avalanche::VoteStatus::Finalized:
voteOutcome = "finalized";
break;
case avalanche::VoteStatus::Stale:
voteOutcome = "stalled";
break;
// No default case, so the compiler can warn about missing
// cases
}
LogPrint(BCLog::AVALANCHE, "Avalanche %s %s %s\n", voteOutcome,
voteItemTypeStr, voteItemId.ToString());
};
bool shouldActivateBestChain = false;
for (const auto &u : updates) {
const avalanche::AnyVoteItem &item = u.getVoteItem();
// Don't use a visitor here as we want to ignore unsupported item
// types. This comes in handy when adding new types.
if (auto pitem = std::get_if<const avalanche::ProofRef>(&item)) {
avalanche::ProofRef proof = *pitem;
const avalanche::ProofId &proofid = proof->getId();
logVoteUpdate(u, "proof", proofid);
auto rejectionMode =
avalanche::PeerManager::RejectionMode::DEFAULT;
auto nextCooldownTimePoint = GetTime<std::chrono::seconds>();
switch (u.getStatus()) {
case avalanche::VoteStatus::Invalid:
m_avalanche->withPeerManager(
[&](avalanche::PeerManager &pm) {
pm.setInvalid(proofid);
});
// Fallthrough
case avalanche::VoteStatus::Stale:
// Invalidate mode removes the proof from all proof
// pools
rejectionMode =
avalanche::PeerManager::RejectionMode::INVALIDATE;
// Fallthrough
case avalanche::VoteStatus::Rejected:
if (!m_avalanche->withPeerManager(
[&](avalanche::PeerManager &pm) {
return pm.rejectProof(proofid,
rejectionMode);
})) {
LogPrint(BCLog::AVALANCHE,
"ERROR: Failed to reject proof: %s\n",
proofid.GetHex());
}
break;
case avalanche::VoteStatus::Finalized:
nextCooldownTimePoint += std::chrono::seconds(
m_opts.avalanche_peer_replacement_cooldown);
case avalanche::VoteStatus::Accepted:
if (!m_avalanche->withPeerManager(
[&](avalanche::PeerManager &pm) {
pm.registerProof(
proof,
avalanche::PeerManager::
RegistrationMode::FORCE_ACCEPT);
return pm.forPeer(
proofid,
[&](const avalanche::Peer &peer) {
pm.updateNextPossibleConflictTime(
peer.peerid,
nextCooldownTimePoint);
if (u.getStatus() ==
avalanche::VoteStatus::
Finalized) {
pm.setFinalized(peer.peerid);
}
// Only fail if the peer was not
// created
return true;
});
})) {
LogPrint(BCLog::AVALANCHE,
"ERROR: Failed to accept proof: %s\n",
proofid.GetHex());
}
break;
}
}
if (auto pitem = std::get_if<const CBlockIndex *>(&item)) {
CBlockIndex *pindex = const_cast<CBlockIndex *>(*pitem);
shouldActivateBestChain = true;
logVoteUpdate(u, "block", pindex->GetBlockHash());
switch (u.getStatus()) {
case avalanche::VoteStatus::Invalid:
case avalanche::VoteStatus::Rejected: {
BlockValidationState state;
m_chainman.ActiveChainstate().ParkBlock(state, pindex);
if (!state.IsValid()) {
LogPrintf("ERROR: Database error: %s\n",
state.GetRejectReason());
return;
}
} break;
case avalanche::VoteStatus::Accepted: {
LOCK(cs_main);
m_chainman.ActiveChainstate().UnparkBlock(pindex);
} break;
case avalanche::VoteStatus::Finalized: {
{
LOCK(cs_main);
m_chainman.ActiveChainstate().UnparkBlock(pindex);
}
if (m_opts.avalanche_preconsensus) {
// First check if the block is cached before reading
// from disk.
auto pblock = WITH_LOCK(m_most_recent_block_mutex,
return m_most_recent_block);
if (!pblock ||
pblock->GetHash() != pindex->GetBlockHash()) {
std::shared_ptr<CBlock> pblockRead =
std::make_shared<CBlock>();
if (!m_chainman.m_blockman.ReadBlockFromDisk(
*pblockRead, *pindex)) {
assert(!"cannot load block from disk");
}
pblock = pblockRead;
}
assert(pblock);
LOCK(m_mempool.cs);
m_mempool.removeForFinalizedBlock(pblock->vtx);
}
m_chainman.ActiveChainstate().AvalancheFinalizeBlock(
pindex, *m_avalanche);
} break;
case avalanche::VoteStatus::Stale:
// Fall back on Nakamoto consensus in the absence of
// Avalanche votes for other competing or descendant
// blocks.
break;
}
}
if (!m_opts.avalanche_preconsensus) {
continue;
}
if (auto pitem = std::get_if<const CTransactionRef>(&item)) {
const CTransactionRef tx = *pitem;
assert(tx != nullptr);
const TxId &txid = tx->GetId();
logVoteUpdate(u, "tx", txid);
switch (u.getStatus()) {
case avalanche::VoteStatus::Rejected:
break;
case avalanche::VoteStatus::Invalid: {
// Remove from the mempool and the finalized tree, as
// well as all the children txs.
// FIXME Remember the tx has been invalidated so we
// don't poll for it again and again.
LOCK(m_mempool.cs);
auto it = m_mempool.GetIter(txid);
if (it.has_value()) {
m_mempool.removeRecursive(
*tx, MemPoolRemovalReason::AVALANCHE);
}
break;
}
case avalanche::VoteStatus::Accepted:
break;
case avalanche::VoteStatus::Finalized: {
LOCK(m_mempool.cs);
auto it = m_mempool.GetIter(txid);
if (!it.has_value()) {
LogPrint(BCLog::AVALANCHE,
"Error: finalized tx (%s) is not in the "
"mempool\n",
txid.ToString());
break;
}
m_mempool.setAvalancheFinalized(**it);
break;
}
case avalanche::VoteStatus::Stale:
break;
}
}
}
if (shouldActivateBestChain) {
BlockValidationState state;
if (!m_chainman.ActiveChainstate().ActivateBestChain(
state, /*pblock=*/nullptr, m_avalanche)) {
LogPrintf("failed to activate chain (%s)\n", state.ToString());
}
}
return;
}
if (msg_type == NetMsgType::AVAPROOF) {
if (!m_avalanche) {
return;
}
auto proof = RCUPtr<avalanche::Proof>::make();
vRecv >> *proof;
ReceivedAvalancheProof(pfrom, *peer, proof);
return;
}
if (msg_type == NetMsgType::GETAVAPROOFS) {
if (!m_avalanche) {
return;
}
if (peer->m_proof_relay == nullptr) {
return;
}
peer->m_proof_relay->lastSharedProofsUpdate =
GetTime<std::chrono::seconds>();
peer->m_proof_relay->sharedProofs =
m_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
return pm.getShareableProofsSnapshot();
});
avalanche::CompactProofs compactProofs(
peer->m_proof_relay->sharedProofs);
m_connman.PushMessage(
&pfrom, msgMaker.Make(NetMsgType::AVAPROOFS, compactProofs));
return;
}
if (msg_type == NetMsgType::AVAPROOFS) {
if (!m_avalanche) {
return;
}
if (peer->m_proof_relay == nullptr) {
return;
}
// Only process the compact proofs if we requested them
if (!peer->m_proof_relay->compactproofs_requested) {
LogPrint(BCLog::AVALANCHE, "Ignoring unsollicited avaproofs\n");
return;
}
peer->m_proof_relay->compactproofs_requested = false;
avalanche::CompactProofs compactProofs;
try {
vRecv >> compactProofs;
} catch (std::ios_base::failure &e) {
// This compact proofs have non contiguous or overflowing indexes
Misbehaving(*peer, 100, "avaproofs-bad-indexes");
return;
}
// If there are prefilled proofs, process them first
std::set<uint32_t> prefilledIndexes;
for (const auto &prefilledProof : compactProofs.getPrefilledProofs()) {
if (!ReceivedAvalancheProof(pfrom, *peer, prefilledProof.proof)) {
// If we got an invalid proof, the peer is getting banned and we
// can bail out.
return;
}
}
// If there is no shortid, avoid parsing/responding/accounting for the
// message.
if (compactProofs.getShortIDs().size() == 0) {
LogPrint(BCLog::AVALANCHE,
"Got an avaproofs message with no shortid (peer %d)\n",
pfrom.GetId());
return;
}
// To determine the chance that the number of entries in a bucket
// exceeds N, we use the fact that the number of elements in a single
// bucket is binomially distributed (with n = the number of shorttxids
// S, and p = 1 / the number of buckets), that in the worst case the
// number of buckets is equal to S (due to std::unordered_map having a
// default load factor of 1.0), and that the chance for any bucket to
// exceed N elements is at most buckets * (the chance that any given
// bucket is above N elements). Thus:
// P(max_elements_per_bucket > N) <=
// S * (1 - cdf(binomial(n=S,p=1/S), N))
// If we assume up to 21000000, allowing 15 elements per bucket should
// only fail once per ~2.5 million avaproofs transfers (per peer and
// connection).
// TODO re-evaluate the bucket count to a more realistic value.
// TODO: In the case of a shortid-collision, we should request all the
// proofs which collided. For now, we only request one, which is not
// that bad considering this event is expected to be very rare.
auto shortIdProcessor =
avalanche::ProofShortIdProcessor(compactProofs.getPrefilledProofs(),
compactProofs.getShortIDs(), 15);
if (shortIdProcessor.hasOutOfBoundIndex()) {
// This should be catched by deserialization, but catch it here as
// well as a good measure.
Misbehaving(*peer, 100, "avaproofs-bad-indexes");
return;
}
if (!shortIdProcessor.isEvenlyDistributed()) {
// This is suspicious, don't ban but bail out
return;
}
std::vector<std::pair<avalanche::ProofId, bool>> remoteProofsStatus;
m_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
pm.forEachPeer([&](const avalanche::Peer &peer) {
assert(peer.proof);
uint64_t shortid = compactProofs.getShortID(peer.getProofId());
int added =
shortIdProcessor.matchKnownItem(shortid, peer.proof);
// No collision
if (added >= 0) {
// Because we know the proof, we can determine if our peer
// has it (added = 1) or not (added = 0) and update the
// remote proof status accordingly.
remoteProofsStatus.emplace_back(peer.getProofId(),
added > 0);
}
// In order to properly determine which proof is missing, we
// need to keep scanning for all our proofs.
return true;
});
});
avalanche::ProofsRequest req;
for (size_t i = 0; i < compactProofs.size(); i++) {
if (shortIdProcessor.getItem(i) == nullptr) {
req.indices.push_back(i);
}
}
m_connman.PushMessage(&pfrom,
msgMaker.Make(NetMsgType::AVAPROOFSREQ, req));
const NodeId nodeid = pfrom.GetId();
// We want to keep a count of how many nodes we successfully requested
// avaproofs from as this is used to determine when we are confident our
// quorum is close enough to the other participants.
m_avalanche->avaproofsSent(nodeid);
// Only save remote proofs from stakers
if (WITH_LOCK(pfrom.cs_avalanche_pubkey,
return pfrom.m_avalanche_pubkey.has_value())) {
m_avalanche->withPeerManager(
[&remoteProofsStatus, nodeid](avalanche::PeerManager &pm) {
for (const auto &[proofid, present] : remoteProofsStatus) {
pm.saveRemoteProof(proofid, nodeid, present);
}
});
}
return;
}
if (msg_type == NetMsgType::AVAPROOFSREQ) {
if (peer->m_proof_relay == nullptr) {
return;
}
avalanche::ProofsRequest proofreq;
vRecv >> proofreq;
auto requestedIndiceIt = proofreq.indices.begin();
uint32_t treeIndice = 0;
peer->m_proof_relay->sharedProofs.forEachLeaf([&](const auto &proof) {
if (requestedIndiceIt == proofreq.indices.end()) {
// No more indice to process
return false;
}
if (treeIndice++ == *requestedIndiceIt) {
m_connman.PushMessage(
&pfrom, msgMaker.Make(NetMsgType::AVAPROOF, *proof));
requestedIndiceIt++;
}
return true;
});
peer->m_proof_relay->sharedProofs = {};
return;
}
if (msg_type == NetMsgType::GETADDR) {
// This asymmetric behavior for inbound and outbound connections was
// introduced to prevent a fingerprinting attack: an attacker can send
// specific fake addresses to users' AddrMan and later request them by
// sending getaddr messages. Making nodes which are behind NAT and can
// only make outgoing connections ignore the getaddr message mitigates
// the attack.
if (!pfrom.IsInboundConn()) {
LogPrint(BCLog::NET,
"Ignoring \"getaddr\" from %s connection. peer=%d\n",
pfrom.ConnectionTypeAsString(), pfrom.GetId());
return;
}
// Since this must be an inbound connection, SetupAddressRelay will
// never fail.
Assume(SetupAddressRelay(pfrom, *peer));
// Only send one GetAddr response per connection to reduce resource
// waste and discourage addr stamping of INV announcements.
if (peer->m_getaddr_recvd) {
LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n",
pfrom.GetId());
return;
}
peer->m_getaddr_recvd = true;
peer->m_addrs_to_send.clear();
std::vector<CAddress> vAddr;
const size_t maxAddrToSend = m_opts.max_addr_to_send;
if (pfrom.HasPermission(NetPermissionFlags::Addr)) {
vAddr = m_connman.GetAddresses(maxAddrToSend, MAX_PCT_ADDR_TO_SEND,
/* network */ std::nullopt);
} else {
vAddr = m_connman.GetAddresses(pfrom, maxAddrToSend,
MAX_PCT_ADDR_TO_SEND);
}
for (const CAddress &addr : vAddr) {
PushAddress(*peer, addr);
}
return;
}
if (msg_type == NetMsgType::GETAVAADDR) {
auto now = GetTime<std::chrono::seconds>();
if (now < pfrom.m_nextGetAvaAddr) {
// Prevent a peer from exhausting our resources by spamming
// getavaaddr messages.
LogPrint(BCLog::AVALANCHE,
"Ignoring repeated getavaaddr from peer %d\n",
pfrom.GetId());
return;
}
// Only accept a getavaaddr every GETAVAADDR_INTERVAL at most
pfrom.m_nextGetAvaAddr = now + GETAVAADDR_INTERVAL;
if (!SetupAddressRelay(pfrom, *peer)) {
LogPrint(BCLog::AVALANCHE,
"Ignoring getavaaddr message from %s peer=%d\n",
pfrom.ConnectionTypeAsString(), pfrom.GetId());
return;
}
auto availabilityScoreComparator = [](const CNode *lhs,
const CNode *rhs) {
double scoreLhs = lhs->getAvailabilityScore();
double scoreRhs = rhs->getAvailabilityScore();
if (scoreLhs != scoreRhs) {
return scoreLhs > scoreRhs;
}
return lhs < rhs;
};
// Get up to MAX_ADDR_TO_SEND addresses of the nodes which are the
// most active in the avalanche network. Account for 0 availability as
// well so we can send addresses even if we did not start polling yet.
std::set<const CNode *, decltype(availabilityScoreComparator)> avaNodes(
availabilityScoreComparator);
m_connman.ForEachNode([&](const CNode *pnode) {
if (!pnode->m_avalanche_enabled ||
pnode->getAvailabilityScore() < 0.) {
return;
}
avaNodes.insert(pnode);
if (avaNodes.size() > m_opts.max_addr_to_send) {
avaNodes.erase(std::prev(avaNodes.end()));
}
});
peer->m_addrs_to_send.clear();
for (const CNode *pnode : avaNodes) {
PushAddress(*peer, pnode->addr);
}
return;
}
if (msg_type == NetMsgType::MEMPOOL) {
if (!(peer->m_our_services & NODE_BLOOM) &&
!pfrom.HasPermission(NetPermissionFlags::Mempool)) {
if (!pfrom.HasPermission(NetPermissionFlags::NoBan)) {
LogPrint(BCLog::NET,
"mempool request with bloom filters disabled, "
"disconnect peer=%d\n",
pfrom.GetId());
pfrom.fDisconnect = true;
}
return;
}
if (m_connman.OutboundTargetReached(false) &&
!pfrom.HasPermission(NetPermissionFlags::Mempool)) {
if (!pfrom.HasPermission(NetPermissionFlags::NoBan)) {
LogPrint(BCLog::NET,
"mempool request with bandwidth limit reached, "
"disconnect peer=%d\n",
pfrom.GetId());
pfrom.fDisconnect = true;
}
return;
}
if (auto tx_relay = peer->GetTxRelay()) {
LOCK(tx_relay->m_tx_inventory_mutex);
tx_relay->m_send_mempool = true;
}
return;
}
if (msg_type == NetMsgType::PING) {
if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
uint64_t nonce = 0;
vRecv >> nonce;
// Echo the message back with the nonce. This allows for two useful
// features:
//
// 1) A remote node can quickly check if the connection is
// operational.
// 2) Remote nodes can measure the latency of the network thread. If
// this node is overloaded it won't respond to pings quickly and the
// remote node can avoid sending us more work, like chain download
// requests.
//
// The nonce stops the remote getting confused between different
// pings: without it, if the remote node sends a ping once per
// second and this node takes 5 seconds to respond to each, the 5th
// ping the remote sends would appear to return very quickly.
m_connman.PushMessage(&pfrom,
msgMaker.Make(NetMsgType::PONG, nonce));
}
return;
}
if (msg_type == NetMsgType::PONG) {
const auto ping_end = time_received;
uint64_t nonce = 0;
size_t nAvail = vRecv.in_avail();
bool bPingFinished = false;
std::string sProblem;
if (nAvail >= sizeof(nonce)) {
vRecv >> nonce;
// Only process pong message if there is an outstanding ping (old
// ping without nonce should never pong)
if (peer->m_ping_nonce_sent != 0) {
if (nonce == peer->m_ping_nonce_sent) {
// Matching pong received, this ping is no longer
// outstanding
bPingFinished = true;
const auto ping_time = ping_end - peer->m_ping_start.load();
if (ping_time.count() >= 0) {
// Let connman know about this successful ping-pong
pfrom.PongReceived(ping_time);
} else {
// This should never happen
sProblem = "Timing mishap";
}
} else {
// Nonce mismatches are normal when pings are overlapping
sProblem = "Nonce mismatch";
if (nonce == 0) {
// This is most likely a bug in another implementation
// somewhere; cancel this ping
bPingFinished = true;
sProblem = "Nonce zero";
}
}
} else {
sProblem = "Unsolicited pong without ping";
}
} else {
// This is most likely a bug in another implementation somewhere;
// cancel this ping
bPingFinished = true;
sProblem = "Short payload";
}
if (!(sProblem.empty())) {
LogPrint(BCLog::NET,
"pong peer=%d: %s, %x expected, %x received, %u bytes\n",
pfrom.GetId(), sProblem, peer->m_ping_nonce_sent, nonce,
nAvail);
}
if (bPingFinished) {
peer->m_ping_nonce_sent = 0;
}
return;
}
if (msg_type == NetMsgType::FILTERLOAD) {
if (!(peer->m_our_services & NODE_BLOOM)) {
LogPrint(BCLog::NET,
"filterload received despite not offering bloom services "
"from peer=%d; disconnecting\n",
pfrom.GetId());
pfrom.fDisconnect = true;
return;
}
CBloomFilter filter;
vRecv >> filter;
if (!filter.IsWithinSizeConstraints()) {
// There is no excuse for sending a too-large filter
Misbehaving(*peer, 100, "too-large bloom filter");
} else if (auto tx_relay = peer->GetTxRelay()) {
{
LOCK(tx_relay->m_bloom_filter_mutex);
tx_relay->m_bloom_filter.reset(new CBloomFilter(filter));
tx_relay->m_relay_txs = true;
}
pfrom.m_bloom_filter_loaded = true;
}
return;
}
if (msg_type == NetMsgType::FILTERADD) {
if (!(peer->m_our_services & NODE_BLOOM)) {
LogPrint(BCLog::NET,
"filteradd received despite not offering bloom services "
"from peer=%d; disconnecting\n",
pfrom.GetId());
pfrom.fDisconnect = true;
return;
}
std::vector<uint8_t> vData;
vRecv >> vData;
// Nodes must NEVER send a data item > 520 bytes (the max size for a
// script data object, and thus, the maximum size any matched object can
// have) in a filteradd message.
bool bad = false;
if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
bad = true;
} else if (auto tx_relay = peer->GetTxRelay()) {
LOCK(tx_relay->m_bloom_filter_mutex);
if (tx_relay->m_bloom_filter) {
tx_relay->m_bloom_filter->insert(vData);
} else {
bad = true;
}
}
if (bad) {
// The structure of this code doesn't really allow for a good error
// code. We'll go generic.
Misbehaving(*peer, 100, "bad filteradd message");
}
return;
}
if (msg_type == NetMsgType::FILTERCLEAR) {
if (!(peer->m_our_services & NODE_BLOOM)) {
LogPrint(BCLog::NET,
"filterclear received despite not offering bloom services "
"from peer=%d; disconnecting\n",
pfrom.GetId());
pfrom.fDisconnect = true;
return;
}
auto tx_relay = peer->GetTxRelay();
if (!tx_relay) {
return;
}
{
LOCK(tx_relay->m_bloom_filter_mutex);
tx_relay->m_bloom_filter = nullptr;
tx_relay->m_relay_txs = true;
}
pfrom.m_bloom_filter_loaded = false;
pfrom.m_relays_txs = true;
return;
}
if (msg_type == NetMsgType::FEEFILTER) {
Amount newFeeFilter = Amount::zero();
vRecv >> newFeeFilter;
if (MoneyRange(newFeeFilter)) {
if (auto tx_relay = peer->GetTxRelay()) {
tx_relay->m_fee_filter_received = newFeeFilter;
}
LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n",
CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
}
return;
}
if (msg_type == NetMsgType::GETCFILTERS) {
ProcessGetCFilters(pfrom, *peer, vRecv);
return;
}
if (msg_type == NetMsgType::GETCFHEADERS) {
ProcessGetCFHeaders(pfrom, *peer, vRecv);
return;
}
if (msg_type == NetMsgType::GETCFCHECKPT) {
ProcessGetCFCheckPt(pfrom, *peer, vRecv);
return;
}
if (msg_type == NetMsgType::NOTFOUND) {
std::vector<CInv> vInv;
vRecv >> vInv;
// A peer might send up to 1 notfound per getdata request, but no more
if (vInv.size() <= PROOF_REQUEST_PARAMS.max_peer_announcements +
TX_REQUEST_PARAMS.max_peer_announcements +
MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
for (CInv &inv : vInv) {
if (inv.IsMsgTx()) {
// If we receive a NOTFOUND message for a tx we requested,
// mark the announcement for it as completed in
// InvRequestTracker.
LOCK(::cs_main);
m_txrequest.ReceivedResponse(pfrom.GetId(), TxId(inv.hash));
continue;
}
if (inv.IsMsgProof()) {
LOCK(cs_proofrequest);
m_proofrequest.ReceivedResponse(
pfrom.GetId(), avalanche::ProofId(inv.hash));
}
}
}
return;
}
// Ignore unknown commands for extensibility
LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n",
SanitizeString(msg_type), pfrom.GetId());
return;
}
bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode &pnode, Peer &peer) {
{
LOCK(peer.m_misbehavior_mutex);
// There's nothing to do if the m_should_discourage flag isn't set
if (!peer.m_should_discourage) {
return false;
}
peer.m_should_discourage = false;
} // peer.m_misbehavior_mutex
if (pnode.HasPermission(NetPermissionFlags::NoBan)) {
// We never disconnect or discourage peers for bad behavior if they have
// NetPermissionFlags::NoBan permission
LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id);
return false;
}
if (pnode.IsManualConn()) {
// We never disconnect or discourage manual peers for bad behavior
LogPrintf("Warning: not punishing manually connected peer %d!\n",
peer.m_id);
return false;
}
if (pnode.addr.IsLocal()) {
// We disconnect local peers for bad behavior but don't discourage
// (since that would discourage all peers on the same local address)
LogPrint(BCLog::NET,
"Warning: disconnecting but not discouraging %s peer %d!\n",
pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id);
pnode.fDisconnect = true;
return true;
}
// Normal case: Disconnect the peer and discourage all nodes sharing the
// address
LogPrint(BCLog::NET, "Disconnecting and discouraging peer %d!\n",
peer.m_id);
if (m_banman) {
m_banman->Discourage(pnode.addr);
}
m_connman.DisconnectNode(pnode.addr);
return true;
}
bool PeerManagerImpl::ProcessMessages(const Config &config, CNode *pfrom,
std::atomic<bool> &interruptMsgProc) {
AssertLockHeld(g_msgproc_mutex);
//
// Message format
// (4) message start
// (12) command
// (4) size
// (4) checksum
// (x) data
//
bool fMoreWork = false;
PeerRef peer = GetPeerRef(pfrom->GetId());
if (peer == nullptr) {
return false;
}
{
LOCK(peer->m_getdata_requests_mutex);
if (!peer->m_getdata_requests.empty()) {
ProcessGetData(config, *pfrom, *peer, interruptMsgProc);
}
}
const bool processed_orphan = ProcessOrphanTx(config, *peer);
if (pfrom->fDisconnect) {
return false;
}
if (processed_orphan) {
return true;
}
// this maintains the order of responses and prevents m_getdata_requests to
// grow unbounded
{
LOCK(peer->m_getdata_requests_mutex);
if (!peer->m_getdata_requests.empty()) {
return true;
}
}
// Don't bother if send buffer is too full to respond anyway
if (pfrom->fPauseSend) {
return false;
}
std::list<CNetMessage> msgs;
{
LOCK(pfrom->cs_vProcessMsg);
if (pfrom->vProcessMsg.empty()) {
return false;
}
// Just take one message
msgs.splice(msgs.begin(), pfrom->vProcessMsg,
pfrom->vProcessMsg.begin());
pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size;
pfrom->fPauseRecv =
pfrom->nProcessQueueSize > m_connman.GetReceiveFloodSize();
fMoreWork = !pfrom->vProcessMsg.empty();
}
CNetMessage &msg(msgs.front());
TRACE6(net, inbound_message, pfrom->GetId(), pfrom->m_addr_name.c_str(),
pfrom->ConnectionTypeAsString().c_str(), msg.m_type.c_str(),
msg.m_recv.size(), msg.m_recv.data());
if (m_opts.capture_messages) {
CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv),
/*is_incoming=*/true);
}
msg.SetVersion(pfrom->GetCommonVersion());
// Check network magic
if (!msg.m_valid_netmagic) {
LogPrint(BCLog::NET,
"PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n",
SanitizeString(msg.m_type), pfrom->GetId());
// Make sure we discourage where that come from for some time.
if (m_banman) {
m_banman->Discourage(pfrom->addr);
}
m_connman.DisconnectNode(pfrom->addr);
pfrom->fDisconnect = true;
return false;
}
// Check header
if (!msg.m_valid_header) {
LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n",
SanitizeString(msg.m_type), pfrom->GetId());
return fMoreWork;
}
// Checksum
CDataStream &vRecv = msg.m_recv;
if (!msg.m_valid_checksum) {
LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR peer=%d\n",
__func__, SanitizeString(msg.m_type), msg.m_message_size,
pfrom->GetId());
if (m_banman) {
m_banman->Discourage(pfrom->addr);
}
m_connman.DisconnectNode(pfrom->addr);
return fMoreWork;
}
try {
ProcessMessage(config, *pfrom, msg.m_type, vRecv, msg.m_time,
interruptMsgProc);
if (interruptMsgProc) {
return false;
}
{
LOCK(peer->m_getdata_requests_mutex);
if (!peer->m_getdata_requests.empty()) {
fMoreWork = true;
}
}
// Does this peer has an orphan ready to reconsider?
// (Note: we may have provided a parent for an orphan provided by
// another peer that was already processed; in that case, the extra work
// may not be noticed, possibly resulting in an unnecessary 100ms delay)
if (m_mempool.withOrphanage([&peer](TxOrphanage &orphanage) {
return orphanage.HaveTxToReconsider(peer->m_id);
})) {
fMoreWork = true;
}
} catch (const std::exception &e) {
LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n",
__func__, SanitizeString(msg.m_type), msg.m_message_size,
e.what(), typeid(e).name());
} catch (...) {
LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n",
__func__, SanitizeString(msg.m_type), msg.m_message_size);
}
return fMoreWork;
}
void PeerManagerImpl::ConsiderEviction(CNode &pto, Peer &peer,
std::chrono::seconds time_in_seconds) {
AssertLockHeld(cs_main);
CNodeState &state = *State(pto.GetId());
const CNetMsgMaker msgMaker(pto.GetCommonVersion());
if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() &&
state.fSyncStarted) {
// This is an outbound peer subject to disconnection if they don't
// announce a block with as much work as the current tip within
// CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if their
// chain has more work than ours, we should sync to it, unless it's
// invalid, in which case we should find that out and disconnect from
// them elsewhere).
if (state.pindexBestKnownBlock != nullptr &&
state.pindexBestKnownBlock->nChainWork >=
m_chainman.ActiveChain().Tip()->nChainWork) {
if (state.m_chain_sync.m_timeout != 0s) {
state.m_chain_sync.m_timeout = 0s;
state.m_chain_sync.m_work_header = nullptr;
state.m_chain_sync.m_sent_getheaders = false;
}
} else if (state.m_chain_sync.m_timeout == 0s ||
(state.m_chain_sync.m_work_header != nullptr &&
state.pindexBestKnownBlock != nullptr &&
state.pindexBestKnownBlock->nChainWork >=
state.m_chain_sync.m_work_header->nChainWork)) {
// Our best block known by this peer is behind our tip, and we're
// either noticing that for the first time, OR this peer was able to
// catch up to some earlier point where we checked against our tip.
// Either way, set a new timeout based on current tip.
state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
state.m_chain_sync.m_sent_getheaders = false;
} else if (state.m_chain_sync.m_timeout > 0s &&
time_in_seconds > state.m_chain_sync.m_timeout) {
// No evidence yet that our peer has synced to a chain with work
// equal to that of our tip, when we first detected it was behind.
// Send a single getheaders message to give the peer a chance to
// update us.
if (state.m_chain_sync.m_sent_getheaders) {
// They've run out of time to catch up!
LogPrintf(
"Disconnecting outbound peer %d for old chain, best known "
"block = %s\n",
pto.GetId(),
state.pindexBestKnownBlock != nullptr
? state.pindexBestKnownBlock->GetBlockHash().ToString()
: "<none>");
pto.fDisconnect = true;
} else {
assert(state.m_chain_sync.m_work_header);
// Here, we assume that the getheaders message goes out,
// because it'll either go out or be skipped because of a
// getheaders in-flight already, in which case the peer should
// still respond to us with a sufficiently high work chain tip.
MaybeSendGetHeaders(
pto, GetLocator(state.m_chain_sync.m_work_header->pprev),
peer);
LogPrint(
BCLog::NET,
"sending getheaders to outbound peer=%d to verify chain "
"work (current best known block:%s, benchmark blockhash: "
"%s)\n",
pto.GetId(),
state.pindexBestKnownBlock != nullptr
? state.pindexBestKnownBlock->GetBlockHash().ToString()
: "<none>",
state.m_chain_sync.m_work_header->GetBlockHash()
.ToString());
state.m_chain_sync.m_sent_getheaders = true;
// Bump the timeout to allow a response, which could clear the
// timeout (if the response shows the peer has synced), reset
// the timeout (if the peer syncs to the required work but not
// to our tip), or result in disconnect (if we advance to the
// timeout and pindexBestKnownBlock has not sufficiently
// progressed)
state.m_chain_sync.m_timeout =
time_in_seconds + HEADERS_RESPONSE_TIME;
}
}
}
}
void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) {
// If we have any extra block-relay-only peers, disconnect the youngest
// unless it's given us a block -- in which case, compare with the
// second-youngest, and out of those two, disconnect the peer who least
// recently gave us a block.
// The youngest block-relay-only peer would be the extra peer we connected
// to temporarily in order to sync our tip; see net.cpp.
// Note that we use higher nodeid as a measure for most recent connection.
if (m_connman.GetExtraBlockRelayCount() > 0) {
std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0},
next_youngest_peer{-1, 0};
m_connman.ForEachNode([&](CNode *pnode) {
if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) {
return;
}
if (pnode->GetId() > youngest_peer.first) {
next_youngest_peer = youngest_peer;
youngest_peer.first = pnode->GetId();
youngest_peer.second = pnode->m_last_block_time;
}
});
NodeId to_disconnect = youngest_peer.first;
if (youngest_peer.second > next_youngest_peer.second) {
// Our newest block-relay-only peer gave us a block more recently;
// disconnect our second youngest.
to_disconnect = next_youngest_peer.first;
}
m_connman.ForNode(
to_disconnect,
[&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
// Make sure we're not getting a block right now, and that we've
// been connected long enough for this eviction to happen at
// all. Note that we only request blocks from a peer if we learn
// of a valid headers chain with at least as much work as our
// tip.
CNodeState *node_state = State(pnode->GetId());
if (node_state == nullptr ||
(now - pnode->m_connected >= MINIMUM_CONNECT_TIME &&
node_state->nBlocksInFlight == 0)) {
pnode->fDisconnect = true;
LogPrint(BCLog::NET,
"disconnecting extra block-relay-only peer=%d "
"(last block received at time %d)\n",
pnode->GetId(),
count_seconds(pnode->m_last_block_time));
return true;
} else {
LogPrint(
BCLog::NET,
"keeping block-relay-only peer=%d chosen for eviction "
"(connect time: %d, blocks_in_flight: %d)\n",
pnode->GetId(), count_seconds(pnode->m_connected),
node_state->nBlocksInFlight);
}
return false;
});
}
// Check whether we have too many OUTBOUND_FULL_RELAY peers
if (m_connman.GetExtraFullOutboundCount() <= 0) {
return;
}
// If we have more OUTBOUND_FULL_RELAY peers than we target, disconnect one.
// Pick the OUTBOUND_FULL_RELAY peer that least recently announced us a new
// block, with ties broken by choosing the more recent connection (higher
// node id)
NodeId worst_peer = -1;
int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
m_connman.ForEachNode([&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(
::cs_main) {
AssertLockHeld(::cs_main);
// Only consider OUTBOUND_FULL_RELAY peers that are not already marked
// for disconnection
if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) {
return;
}
CNodeState *state = State(pnode->GetId());
if (state == nullptr) {
// shouldn't be possible, but just in case
return;
}
// Don't evict our protected peers
if (state->m_chain_sync.m_protect) {
return;
}
if (state->m_last_block_announcement < oldest_block_announcement ||
(state->m_last_block_announcement == oldest_block_announcement &&
pnode->GetId() > worst_peer)) {
worst_peer = pnode->GetId();
oldest_block_announcement = state->m_last_block_announcement;
}
});
if (worst_peer == -1) {
return;
}
bool disconnected = m_connman.ForNode(
worst_peer, [&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
// Only disconnect a peer that has been connected to us for some
// reasonable fraction of our check-frequency, to give it time for
// new information to have arrived. Also don't disconnect any peer
// we're trying to download a block from.
CNodeState &state = *State(pnode->GetId());
if (now - pnode->m_connected > MINIMUM_CONNECT_TIME &&
state.nBlocksInFlight == 0) {
LogPrint(BCLog::NET,
"disconnecting extra outbound peer=%d (last block "
"announcement received at time %d)\n",
pnode->GetId(), oldest_block_announcement);
pnode->fDisconnect = true;
return true;
} else {
LogPrint(BCLog::NET,
"keeping outbound peer=%d chosen for eviction "
"(connect time: %d, blocks_in_flight: %d)\n",
pnode->GetId(), count_seconds(pnode->m_connected),
state.nBlocksInFlight);
return false;
}
});
if (disconnected) {
// If we disconnected an extra peer, that means we successfully
// connected to at least one peer after the last time we detected a
// stale tip. Don't try any more extra peers until we next detect a
// stale tip, to limit the load we put on the network from these extra
// connections.
m_connman.SetTryNewOutboundPeer(false);
}
}
void PeerManagerImpl::CheckForStaleTipAndEvictPeers() {
LOCK(cs_main);
auto now{GetTime<std::chrono::seconds>()};
EvictExtraOutboundPeers(now);
if (now > m_stale_tip_check_time) {
// Check whether our tip is stale, and if so, allow using an extra
// outbound peer.
if (!m_chainman.m_blockman.LoadingBlocks() &&
m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() &&
TipMayBeStale()) {
LogPrintf("Potential stale tip detected, will try using extra "
"outbound peer (last tip update: %d seconds ago)\n",
count_seconds(now - m_last_tip_update.load()));
m_connman.SetTryNewOutboundPeer(true);
} else if (m_connman.GetTryNewOutboundPeer()) {
m_connman.SetTryNewOutboundPeer(false);
}
m_stale_tip_check_time = now + STALE_CHECK_INTERVAL;
}
if (!m_initial_sync_finished && CanDirectFetch()) {
m_connman.StartExtraBlockRelayPeers();
m_initial_sync_finished = true;
}
}
void PeerManagerImpl::MaybeSendPing(CNode &node_to, Peer &peer,
std::chrono::microseconds now) {
if (m_connman.ShouldRunInactivityChecks(
node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
peer.m_ping_nonce_sent &&
now > peer.m_ping_start.load() + TIMEOUT_INTERVAL) {
// The ping timeout is using mocktime. To disable the check during
// testing, increase -peertimeout.
LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n",
0.000001 * count_microseconds(now - peer.m_ping_start.load()),
peer.m_id);
node_to.fDisconnect = true;
return;
}
const CNetMsgMaker msgMaker(node_to.GetCommonVersion());
bool pingSend = false;
if (peer.m_ping_queued) {
// RPC ping request by user
pingSend = true;
}
if (peer.m_ping_nonce_sent == 0 &&
now > peer.m_ping_start.load() + PING_INTERVAL) {
// Ping automatically sent as a latency probe & keepalive.
pingSend = true;
}
if (pingSend) {
uint64_t nonce;
do {
nonce = GetRand<uint64_t>();
} while (nonce == 0);
peer.m_ping_queued = false;
peer.m_ping_start = now;
if (node_to.GetCommonVersion() > BIP0031_VERSION) {
peer.m_ping_nonce_sent = nonce;
m_connman.PushMessage(&node_to,
msgMaker.Make(NetMsgType::PING, nonce));
} else {
// Peer is too old to support ping command with nonce, pong will
// never arrive.
peer.m_ping_nonce_sent = 0;
m_connman.PushMessage(&node_to, msgMaker.Make(NetMsgType::PING));
}
}
}
void PeerManagerImpl::MaybeSendAddr(CNode &node, Peer &peer,
std::chrono::microseconds current_time) {
// Nothing to do for non-address-relay peers
if (!peer.m_addr_relay_enabled) {
return;
}
LOCK(peer.m_addr_send_times_mutex);
if (fListen && !m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
peer.m_next_local_addr_send < current_time) {
// If we've sent before, clear the bloom filter for the peer, so
// that our self-announcement will actually go out. This might
// be unnecessary if the bloom filter has already rolled over
// since our last self-announcement, but there is only a small
// bandwidth cost that we can incur by doing this (which happens
// once a day on average).
if (peer.m_next_local_addr_send != 0us) {
peer.m_addr_known->reset();
}
if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) {
CAddress local_addr{*local_service, peer.m_our_services,
AdjustedTime()};
PushAddress(peer, local_addr);
}
peer.m_next_local_addr_send = GetExponentialRand(
current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
}
// We sent an `addr` message to this peer recently. Nothing more to do.
if (current_time <= peer.m_next_addr_send) {
return;
}
peer.m_next_addr_send =
GetExponentialRand(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
const size_t max_addr_to_send = m_opts.max_addr_to_send;
if (!Assume(peer.m_addrs_to_send.size() <= max_addr_to_send)) {
// Should be impossible since we always check size before adding to
// m_addrs_to_send. Recover by trimming the vector.
peer.m_addrs_to_send.resize(max_addr_to_send);
}
// Remove addr records that the peer already knows about, and add new
// addrs to the m_addr_known filter on the same pass.
auto addr_already_known =
[&peer](const CAddress &addr)
EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) {
bool ret = peer.m_addr_known->contains(addr.GetKey());
if (!ret) {
peer.m_addr_known->insert(addr.GetKey());
}
return ret;
};
peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(),
peer.m_addrs_to_send.end(),
addr_already_known),
peer.m_addrs_to_send.end());
// No addr messages to send
if (peer.m_addrs_to_send.empty()) {
return;
}
const char *msg_type;
int make_flags;
if (peer.m_wants_addrv2) {
msg_type = NetMsgType::ADDRV2;
make_flags = ADDRV2_FORMAT;
} else {
msg_type = NetMsgType::ADDR;
make_flags = 0;
}
m_connman.PushMessage(
&node, CNetMsgMaker(node.GetCommonVersion())
.Make(make_flags, msg_type, peer.m_addrs_to_send));
peer.m_addrs_to_send.clear();
// we only send the big addr message once
if (peer.m_addrs_to_send.capacity() > 40) {
peer.m_addrs_to_send.shrink_to_fit();
}
}
void PeerManagerImpl::MaybeSendSendHeaders(CNode &node, Peer &peer) {
// Delay sending SENDHEADERS (BIP 130) until we're done with an
// initial-headers-sync with this peer. Receiving headers announcements for
// new blocks while trying to sync their headers chain is problematic,
// because of the state tracking done.
if (!peer.m_sent_sendheaders &&
node.GetCommonVersion() >= SENDHEADERS_VERSION) {
LOCK(cs_main);
CNodeState &state = *State(node.GetId());
if (state.pindexBestKnownBlock != nullptr &&
state.pindexBestKnownBlock->nChainWork >
m_chainman.MinimumChainWork()) {
// Tell our peer we prefer to receive headers rather than inv's
// We send this to non-NODE NETWORK peers as well, because even
// non-NODE NETWORK peers can announce blocks (such as pruning
// nodes)
m_connman.PushMessage(&node, CNetMsgMaker(node.GetCommonVersion())
.Make(NetMsgType::SENDHEADERS));
peer.m_sent_sendheaders = true;
}
}
}
void PeerManagerImpl::MaybeSendFeefilter(
CNode &pto, Peer &peer, std::chrono::microseconds current_time) {
if (m_opts.ignore_incoming_txs) {
return;
}
if (pto.GetCommonVersion() < FEEFILTER_VERSION) {
return;
}
// peers with the forcerelay permission should not filter txs to us
if (pto.HasPermission(NetPermissionFlags::ForceRelay)) {
return;
}
// Don't send feefilter messages to outbound block-relay-only peers since
// they should never announce transactions to us, regardless of feefilter
// state.
if (pto.IsBlockOnlyConn()) {
return;
}
Amount currentFilter = m_mempool.GetMinFee().GetFeePerK();
if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
// Received tx-inv messages are discarded when the active
// chainstate is in IBD, so tell the peer to not send them.
currentFilter = MAX_MONEY;
} else {
static const Amount MAX_FILTER{m_fee_filter_rounder.round(MAX_MONEY)};
if (peer.m_fee_filter_sent == MAX_FILTER) {
// Send the current filter if we sent MAX_FILTER previously
// and made it out of IBD.
peer.m_next_send_feefilter = 0us;
}
}
if (current_time > peer.m_next_send_feefilter) {
Amount filterToSend = m_fee_filter_rounder.round(currentFilter);
// We always have a fee filter of at least the min relay fee
filterToSend =
std::max(filterToSend, m_mempool.m_min_relay_feerate.GetFeePerK());
if (filterToSend != peer.m_fee_filter_sent) {
m_connman.PushMessage(
&pto, CNetMsgMaker(pto.GetCommonVersion())
.Make(NetMsgType::FEEFILTER, filterToSend));
peer.m_fee_filter_sent = filterToSend;
}
peer.m_next_send_feefilter =
GetExponentialRand(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL);
}
// If the fee filter has changed substantially and it's still more than
// MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then move the
// broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
else if (current_time + MAX_FEEFILTER_CHANGE_DELAY <
peer.m_next_send_feefilter &&
(currentFilter < 3 * peer.m_fee_filter_sent / 4 ||
currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
peer.m_next_send_feefilter =
current_time + GetRandomDuration<std::chrono::microseconds>(
MAX_FEEFILTER_CHANGE_DELAY);
}
}
namespace {
class CompareInvMempoolOrder {
CTxMemPool *mp;
public:
explicit CompareInvMempoolOrder(CTxMemPool *_mempool) : mp(_mempool) {}
bool operator()(std::set<TxId>::iterator a, std::set<TxId>::iterator b) {
/**
* As std::make_heap produces a max-heap, we want the entries which
* are topologically earlier to sort later.
*/
return mp->CompareTopologically(*b, *a);
}
};
} // namespace
bool PeerManagerImpl::SetupAddressRelay(const CNode &node, Peer &peer) {
// We don't participate in addr relay with outbound block-relay-only
// connections to prevent providing adversaries with the additional
// information of addr traffic to infer the link.
if (node.IsBlockOnlyConn()) {
return false;
}
if (!peer.m_addr_relay_enabled.exchange(true)) {
// First addr message we have received from the peer, initialize
// m_addr_known
peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
}
return true;
}
bool PeerManagerImpl::SendMessages(const Config &config, CNode *pto) {
AssertLockHeld(g_msgproc_mutex);
PeerRef peer = GetPeerRef(pto->GetId());
if (!peer) {
return false;
}
const Consensus::Params &consensusParams = m_chainparams.GetConsensus();
// We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
// disconnect misbehaving peers even before the version handshake is
// complete.
if (MaybeDiscourageAndDisconnect(*pto, *peer)) {
return true;
}
// Don't send anything until the version handshake is complete
if (!pto->fSuccessfullyConnected || pto->fDisconnect) {
return true;
}
// If we get here, the outgoing message serialization version is set and
// can't change.
const CNetMsgMaker msgMaker(pto->GetCommonVersion());
const auto current_time{GetTime<std::chrono::microseconds>()};
if (pto->IsAddrFetchConn() &&
current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
LogPrint(BCLog::NET,
"addrfetch connection timeout; disconnecting peer=%d\n",
pto->GetId());
pto->fDisconnect = true;
return true;
}
MaybeSendPing(*pto, *peer, current_time);
// MaybeSendPing may have marked peer for disconnection
if (pto->fDisconnect) {
return true;
}
bool sync_blocks_and_headers_from_peer = false;
MaybeSendAddr(*pto, *peer, current_time);
MaybeSendSendHeaders(*pto, *peer);
{
LOCK(cs_main);
CNodeState &state = *State(pto->GetId());
// Start block sync
if (m_chainman.m_best_header == nullptr) {
m_chainman.m_best_header = m_chainman.ActiveChain().Tip();
}
// Determine whether we might try initial headers sync or parallel
// block download from this peer -- this mostly affects behavior while
// in IBD (once out of IBD, we sync from all peers).
if (state.fPreferredDownload) {
sync_blocks_and_headers_from_peer = true;
} else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) {
// Typically this is an inbound peer. If we don't have any outbound
// peers, or if we aren't downloading any blocks from such peers,
// then allow block downloads from this peer, too.
// We prefer downloading blocks from outbound peers to avoid
// putting undue load on (say) some home user who is just making
// outbound connections to the network, but if our only source of
// the latest blocks is from an inbound peer, we have to be sure to
// eventually download it (and not just wait indefinitely for an
// outbound peer to have it).
if (m_num_preferred_download_peers == 0 ||
mapBlocksInFlight.empty()) {
sync_blocks_and_headers_from_peer = true;
}
}
if (!state.fSyncStarted && CanServeBlocks(*peer) &&
!m_chainman.m_blockman.LoadingBlocks()) {
// Only actively request headers from a single peer, unless we're
// close to today.
if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) ||
m_chainman.m_best_header->Time() > GetAdjustedTime() - 24h) {
const CBlockIndex *pindexStart = m_chainman.m_best_header;
/**
* If possible, start at the block preceding the currently best
* known header. This ensures that we always get a non-empty
* list of headers back as long as the peer is up-to-date. With
* a non-empty response, we can initialise the peer's known best
* block. This wouldn't be possible if we requested starting at
* m_best_header and got back an empty response.
*/
if (pindexStart->pprev) {
pindexStart = pindexStart->pprev;
}
if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) {
LogPrint(
BCLog::NET,
"initial getheaders (%d) to peer=%d (startheight:%d)\n",
pindexStart->nHeight, pto->GetId(),
peer->m_starting_height);
state.fSyncStarted = true;
peer->m_headers_sync_timeout =
current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
(
// Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to
// microseconds before scaling to maintain precision
std::chrono::microseconds{
HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
Ticks<std::chrono::seconds>(
GetAdjustedTime() -
m_chainman.m_best_header->Time()) /
consensusParams.nPowTargetSpacing);
nSyncStarted++;
}
}
}
//
// Try sending block announcements via headers
//
{
// If we have less than MAX_BLOCKS_TO_ANNOUNCE in our list of block
// hashes we're relaying, and our peer wants headers announcements,
// then find the first header not yet known to our peer but would
// connect, and send. If no header would connect, or if we have too
// many blocks, or if the peer doesn't want headers, just add all to
// the inv queue.
LOCK(peer->m_block_inv_mutex);
std::vector<CBlock> vHeaders;
bool fRevertToInv =
((!peer->m_prefers_headers &&
(!state.m_requested_hb_cmpctblocks ||
peer->m_blocks_for_headers_relay.size() > 1)) ||
peer->m_blocks_for_headers_relay.size() >
MAX_BLOCKS_TO_ANNOUNCE);
// last header queued for delivery
const CBlockIndex *pBestIndex = nullptr;
// ensure pindexBestKnownBlock is up-to-date
ProcessBlockAvailability(pto->GetId());
if (!fRevertToInv) {
bool fFoundStartingHeader = false;
// Try to find first header that our peer doesn't have, and then
// send all headers past that one. If we come across an headers
// that aren't on m_chainman.ActiveChain(), give up.
for (const BlockHash &hash : peer->m_blocks_for_headers_relay) {
const CBlockIndex *pindex =
m_chainman.m_blockman.LookupBlockIndex(hash);
assert(pindex);
if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
// Bail out if we reorged away from this block
fRevertToInv = true;
break;
}
if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
// This means that the list of blocks to announce don't
// connect to each other. This shouldn't really be
// possible to hit during regular operation (because
// reorgs should take us to a chain that has some block
// not on the prior chain, which should be caught by the
// prior check), but one way this could happen is by
// using invalidateblock / reconsiderblock repeatedly on
// the tip, causing it to be added multiple times to
// m_blocks_for_headers_relay. Robustly deal with this
// rare situation by reverting to an inv.
fRevertToInv = true;
break;
}
pBestIndex = pindex;
if (fFoundStartingHeader) {
// add this to the headers message
vHeaders.push_back(pindex->GetBlockHeader());
} else if (PeerHasHeader(&state, pindex)) {
// Keep looking for the first new block.
continue;
} else if (pindex->pprev == nullptr ||
PeerHasHeader(&state, pindex->pprev)) {
// Peer doesn't have this header but they do have the
// prior one. Start sending headers.
fFoundStartingHeader = true;
vHeaders.push_back(pindex->GetBlockHeader());
} else {
// Peer doesn't have this header or the prior one --
// nothing will connect, so bail out.
fRevertToInv = true;
break;
}
}
}
if (!fRevertToInv && !vHeaders.empty()) {
if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
// We only send up to 1 block as header-and-ids, as
// otherwise probably means we're doing an initial-ish-sync
// or they're slow.
LogPrint(BCLog::NET,
"%s sending header-and-ids %s to peer=%d\n",
__func__, vHeaders.front().GetHash().ToString(),
pto->GetId());
std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
{
LOCK(m_most_recent_block_mutex);
if (m_most_recent_block_hash ==
pBestIndex->GetBlockHash()) {
cached_cmpctblock_msg =
msgMaker.Make(NetMsgType::CMPCTBLOCK,
*m_most_recent_compact_block);
}
}
if (cached_cmpctblock_msg.has_value()) {
m_connman.PushMessage(
pto, std::move(cached_cmpctblock_msg.value()));
} else {
CBlock block;
const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(
block, *pBestIndex)};
assert(ret);
CBlockHeaderAndShortTxIDs cmpctblock(block);
m_connman.PushMessage(
pto,
msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock));
}
state.pindexBestHeaderSent = pBestIndex;
} else if (peer->m_prefers_headers) {
if (vHeaders.size() > 1) {
LogPrint(BCLog::NET,
"%s: %u headers, range (%s, %s), to peer=%d\n",
__func__, vHeaders.size(),
vHeaders.front().GetHash().ToString(),
vHeaders.back().GetHash().ToString(),
pto->GetId());
} else {
LogPrint(BCLog::NET,
"%s: sending header %s to peer=%d\n", __func__,
vHeaders.front().GetHash().ToString(),
pto->GetId());
}
m_connman.PushMessage(
pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
state.pindexBestHeaderSent = pBestIndex;
} else {
fRevertToInv = true;
}
}
if (fRevertToInv) {
// If falling back to using an inv, just try to inv the tip. The
// last entry in m_blocks_for_headers_relay was our tip at some
// point in the past.
if (!peer->m_blocks_for_headers_relay.empty()) {
const BlockHash &hashToAnnounce =
peer->m_blocks_for_headers_relay.back();
const CBlockIndex *pindex =
m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce);
assert(pindex);
// Warn if we're announcing a block that is not on the main
// chain. This should be very rare and could be optimized
// out. Just log for now.
if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
LogPrint(
BCLog::NET,
"Announcing block %s not on main chain (tip=%s)\n",
hashToAnnounce.ToString(),
m_chainman.ActiveChain()
.Tip()
->GetBlockHash()
.ToString());
}
// If the peer's chain has this block, don't inv it back.
if (!PeerHasHeader(&state, pindex)) {
peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
LogPrint(BCLog::NET,
"%s: sending inv peer=%d hash=%s\n", __func__,
pto->GetId(), hashToAnnounce.ToString());
}
}
}
peer->m_blocks_for_headers_relay.clear();
}
} // release cs_main
//
// Message: inventory
//
std::vector<CInv> vInv;
auto addInvAndMaybeFlush = [&](uint32_t type, const uint256 &hash) {
vInv.emplace_back(type, hash);
if (vInv.size() == MAX_INV_SZ) {
m_connman.PushMessage(
pto, msgMaker.Make(NetMsgType::INV, std::move(vInv)));
vInv.clear();
}
};
{
LOCK(cs_main);
{
LOCK(peer->m_block_inv_mutex);
vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(),
INVENTORY_BROADCAST_MAX_PER_MB *
config.GetMaxBlockSize() /
1000000));
// Add blocks
for (const BlockHash &hash : peer->m_blocks_for_inv_relay) {
addInvAndMaybeFlush(MSG_BLOCK, hash);
}
peer->m_blocks_for_inv_relay.clear();
}
auto computeNextInvSendTime =
[&](std::chrono::microseconds &next) -> bool {
bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan);
if (next < current_time) {
fSendTrickle = true;
if (pto->IsInboundConn()) {
next = NextInvToInbounds(
current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
} else {
// Skip delay for outbound peers, as there is less privacy
// concern for them.
next = current_time;
}
}
return fSendTrickle;
};
// Add proofs to inventory
if (peer->m_proof_relay != nullptr) {
LOCK(peer->m_proof_relay->m_proof_inventory_mutex);
if (computeNextInvSendTime(
peer->m_proof_relay->m_next_inv_send_time)) {
auto it =
peer->m_proof_relay->m_proof_inventory_to_send.begin();
while (it !=
peer->m_proof_relay->m_proof_inventory_to_send.end()) {
const avalanche::ProofId proofid = *it;
it = peer->m_proof_relay->m_proof_inventory_to_send.erase(
it);
if (peer->m_proof_relay->m_proof_inventory_known_filter
.contains(proofid)) {
continue;
}
peer->m_proof_relay->m_proof_inventory_known_filter.insert(
proofid);
addInvAndMaybeFlush(MSG_AVA_PROOF, proofid);
peer->m_proof_relay->m_recently_announced_proofs.insert(
proofid);
}
}
}
if (auto tx_relay = peer->GetTxRelay()) {
LOCK(tx_relay->m_tx_inventory_mutex);
// Check whether periodic sends should happen
const bool fSendTrickle =
computeNextInvSendTime(tx_relay->m_next_inv_send_time);
// Time to send but the peer has requested we not relay
// transactions.
if (fSendTrickle) {
LOCK(tx_relay->m_bloom_filter_mutex);
if (!tx_relay->m_relay_txs) {
tx_relay->m_tx_inventory_to_send.clear();
}
}
// Respond to BIP35 mempool requests
if (fSendTrickle && tx_relay->m_send_mempool) {
auto vtxinfo = m_mempool.infoAll();
tx_relay->m_send_mempool = false;
const CFeeRate filterrate{
tx_relay->m_fee_filter_received.load()};
LOCK(tx_relay->m_bloom_filter_mutex);
for (const auto &txinfo : vtxinfo) {
const TxId &txid = txinfo.tx->GetId();
tx_relay->m_tx_inventory_to_send.erase(txid);
// Don't send transactions that peers will not put into
// their mempool
if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
continue;
}
if (tx_relay->m_bloom_filter &&
!tx_relay->m_bloom_filter->IsRelevantAndUpdate(
*txinfo.tx)) {
continue;
}
tx_relay->m_tx_inventory_known_filter.insert(txid);
// Responses to MEMPOOL requests bypass the
// m_recently_announced_invs filter.
addInvAndMaybeFlush(MSG_TX, txid);
}
tx_relay->m_last_mempool_req =
std::chrono::duration_cast<std::chrono::seconds>(
current_time);
}
// Determine transactions to relay
if (fSendTrickle) {
// Produce a vector with all candidates for sending
std::vector<std::set<TxId>::iterator> vInvTx;
vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
for (std::set<TxId>::iterator it =
tx_relay->m_tx_inventory_to_send.begin();
it != tx_relay->m_tx_inventory_to_send.end(); it++) {
vInvTx.push_back(it);
}
const CFeeRate filterrate{
tx_relay->m_fee_filter_received.load()};
// Send out the inventory in the order of admission to our
// mempool, which is guaranteed to be a topological sort order.
// A heap is used so that not all items need sorting if only a
// few are being sent.
CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
std::make_heap(vInvTx.begin(), vInvTx.end(),
compareInvMempoolOrder);
// No reason to drain out at many times the network's
// capacity, especially since we have many peers and some
// will draw much shorter delays.
unsigned int nRelayedTransactions = 0;
LOCK(tx_relay->m_bloom_filter_mutex);
while (!vInvTx.empty() &&
nRelayedTransactions < INVENTORY_BROADCAST_MAX_PER_MB *
config.GetMaxBlockSize() /
1000000) {
// Fetch the top element from the heap
std::pop_heap(vInvTx.begin(), vInvTx.end(),
compareInvMempoolOrder);
std::set<TxId>::iterator it = vInvTx.back();
vInvTx.pop_back();
const TxId txid = *it;
// Remove it from the to-be-sent set
tx_relay->m_tx_inventory_to_send.erase(it);
// Check if not in the filter already
if (tx_relay->m_tx_inventory_known_filter.contains(txid)) {
continue;
}
// Not in the mempool anymore? don't bother sending it.
auto txinfo = m_mempool.info(txid);
if (!txinfo.tx) {
continue;
}
// Peer told you to not send transactions at that
// feerate? Don't bother sending it.
if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
continue;
}
if (tx_relay->m_bloom_filter &&
!tx_relay->m_bloom_filter->IsRelevantAndUpdate(
*txinfo.tx)) {
continue;
}
// Send
tx_relay->m_recently_announced_invs.insert(txid);
addInvAndMaybeFlush(MSG_TX, txid);
nRelayedTransactions++;
{
// Expire old relay messages
while (!g_relay_expiration.empty() &&
g_relay_expiration.front().first <
current_time) {
mapRelay.erase(g_relay_expiration.front().second);
g_relay_expiration.pop_front();
}
auto ret = mapRelay.insert(
std::make_pair(txid, std::move(txinfo.tx)));
if (ret.second) {
g_relay_expiration.push_back(std::make_pair(
current_time + RELAY_TX_CACHE_TIME, ret.first));
}
}
tx_relay->m_tx_inventory_known_filter.insert(txid);
}
}
}
} // release cs_main
if (!vInv.empty()) {
m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
}
{
LOCK(cs_main);
CNodeState &state = *State(pto->GetId());
// Detect whether we're stalling
auto stalling_timeout = m_block_stalling_timeout.load();
if (state.m_stalling_since.count() &&
state.m_stalling_since < current_time - stalling_timeout) {
// Stalling only triggers when the block download window cannot
// move. During normal steady state, the download window should be
// much larger than the to-be-downloaded set of blocks, so
// disconnection should only happen during initial block download.
LogPrintf("Peer=%d is stalling block download, disconnecting\n",
pto->GetId());
pto->fDisconnect = true;
// Increase timeout for the next peer so that we don't disconnect
// multiple peers if our own bandwidth is insufficient.
const auto new_timeout =
std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX);
if (stalling_timeout != new_timeout &&
m_block_stalling_timeout.compare_exchange_strong(
stalling_timeout, new_timeout)) {
LogPrint(
BCLog::NET,
"Increased stalling timeout temporarily to %d seconds\n",
count_seconds(new_timeout));
}
return true;
}
// In case there is a block that has been in flight from this peer for
// block_interval * (1 + 0.5 * N) (with N the number of peers from which
// we're downloading validated blocks), disconnect due to timeout.
// We compensate for other peers to prevent killing off peers due to our
// own downstream link being saturated. We only count validated
// in-flight blocks so peers can't advertise non-existing block hashes
// to unreasonably increase our timeout.
if (state.vBlocksInFlight.size() > 0) {
QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
int nOtherPeersWithValidatedDownloads =
m_peers_downloading_from - 1;
if (current_time >
state.m_downloading_since +
std::chrono::seconds{consensusParams.nPowTargetSpacing} *
(BLOCK_DOWNLOAD_TIMEOUT_BASE +
BLOCK_DOWNLOAD_TIMEOUT_PER_PEER *
nOtherPeersWithValidatedDownloads)) {
LogPrintf("Timeout downloading block %s from peer=%d, "
"disconnecting\n",
queuedBlock.pindex->GetBlockHash().ToString(),
pto->GetId());
pto->fDisconnect = true;
return true;
}
}
// Check for headers sync timeouts
if (state.fSyncStarted &&
peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
// Detect whether this is a stalling initial-headers-sync peer
if (m_chainman.m_best_header->Time() <= GetAdjustedTime() - 24h) {
if (current_time > peer->m_headers_sync_timeout &&
nSyncStarted == 1 &&
(m_num_preferred_download_peers -
state.fPreferredDownload >=
1)) {
// Disconnect a peer (without NetPermissionFlags::NoBan
// permission) if it is our only sync peer, and we have
// others we could be using instead. Note: If all our peers
// are inbound, then we won't disconnect our sync peer for
// stalling; we have bigger problems if we can't get any
// outbound peers.
if (!pto->HasPermission(NetPermissionFlags::NoBan)) {
LogPrintf("Timeout downloading headers from peer=%d, "
"disconnecting\n",
pto->GetId());
pto->fDisconnect = true;
return true;
} else {
LogPrintf("Timeout downloading headers from noban "
"peer=%d, not disconnecting\n",
pto->GetId());
// Reset the headers sync state so that we have a chance
// to try downloading from a different peer. Note: this
// will also result in at least one more getheaders
// message to be sent to this peer (eventually).
state.fSyncStarted = false;
nSyncStarted--;
peer->m_headers_sync_timeout = 0us;
}
}
} else {
// After we've caught up once, reset the timeout so we can't
// trigger disconnect later.
peer->m_headers_sync_timeout = std::chrono::microseconds::max();
}
}
// Check that outbound peers have reasonable chains GetTime() is used by
// this anti-DoS logic so we can test this using mocktime.
ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
} // release cs_main
std::vector<CInv> vGetData;
//
// Message: getdata (blocks)
//
{
LOCK(cs_main);
CNodeState &state = *State(pto->GetId());
if (CanServeBlocks(*peer) &&
((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) ||
!m_chainman.ActiveChainstate().IsInitialBlockDownload()) &&
state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
std::vector<const CBlockIndex *> vToDownload;
NodeId staller = -1;
FindNextBlocksToDownload(pto->GetId(),
MAX_BLOCKS_IN_TRANSIT_PER_PEER -
state.nBlocksInFlight,
vToDownload, staller);
for (const CBlockIndex *pindex : vToDownload) {
vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
BlockRequested(config, pto->GetId(), *pindex);
LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n",
pindex->GetBlockHash().ToString(), pindex->nHeight,
pto->GetId());
}
if (state.nBlocksInFlight == 0 && staller != -1) {
if (State(staller)->m_stalling_since == 0us) {
State(staller)->m_stalling_since = current_time;
LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
}
}
}
} // release cs_main
auto addGetDataAndMaybeFlush = [&](uint32_t type, const uint256 &hash) {
CInv inv(type, hash);
LogPrint(BCLog::NET, "Requesting %s from peer=%d\n", inv.ToString(),
pto->GetId());
vGetData.push_back(std::move(inv));
if (vGetData.size() >= MAX_GETDATA_SZ) {
m_connman.PushMessage(
pto, msgMaker.Make(NetMsgType::GETDATA, std::move(vGetData)));
vGetData.clear();
}
};
//
// Message: getdata (proof)
//
{
LOCK(cs_proofrequest);
std::vector<std::pair<NodeId, avalanche::ProofId>> expired;
auto requestable =
m_proofrequest.GetRequestable(pto->GetId(), current_time, &expired);
for (const auto &entry : expired) {
LogPrint(BCLog::AVALANCHE,
"timeout of inflight proof %s from peer=%d\n",
entry.second.ToString(), entry.first);
}
for (const auto &proofid : requestable) {
if (!AlreadyHaveProof(proofid)) {
addGetDataAndMaybeFlush(MSG_AVA_PROOF, proofid);
m_proofrequest.RequestedData(
pto->GetId(), proofid,
current_time + PROOF_REQUEST_PARAMS.getdata_interval);
} else {
// We have already seen this proof, no need to download.
// This is just a belt-and-suspenders, as this should
// already be called whenever a proof becomes
// AlreadyHaveProof().
m_proofrequest.ForgetInvId(proofid);
}
}
} // release cs_proofrequest
//
// Message: getdata (transactions)
//
{
LOCK(cs_main);
std::vector<std::pair<NodeId, TxId>> expired;
auto requestable =
m_txrequest.GetRequestable(pto->GetId(), current_time, &expired);
for (const auto &entry : expired) {
LogPrint(BCLog::NET, "timeout of inflight tx %s from peer=%d\n",
entry.second.ToString(), entry.first);
}
for (const TxId &txid : requestable) {
// Exclude m_recent_rejects_package_reconsiderable: we may be
// requesting a missing parent that was previously rejected for
// being too low feerate.
if (!AlreadyHaveTx(txid, /*include_reconsiderable=*/false)) {
addGetDataAndMaybeFlush(MSG_TX, txid);
m_txrequest.RequestedData(
pto->GetId(), txid,
current_time + TX_REQUEST_PARAMS.getdata_interval);
} else {
// We have already seen this transaction, no need to download.
// This is just a belt-and-suspenders, as this should already be
// called whenever a transaction becomes AlreadyHaveTx().
m_txrequest.ForgetInvId(txid);
}
}
if (!vGetData.empty()) {
m_connman.PushMessage(pto,
msgMaker.Make(NetMsgType::GETDATA, vGetData));
}
} // release cs_main
MaybeSendFeefilter(*pto, *peer, current_time);
return true;
}
bool PeerManagerImpl::ReceivedAvalancheProof(CNode &node, Peer &peer,
const avalanche::ProofRef &proof) {
assert(proof != nullptr);
const avalanche::ProofId &proofid = proof->getId();
AddKnownProof(peer, proofid);
if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
// We cannot reliably verify proofs during IBD, so bail out early and
// keep the inventory as pending so it can be requested when the node
// has synced.
return true;
}
const NodeId nodeid = node.GetId();
const bool isStaker = WITH_LOCK(node.cs_avalanche_pubkey,
return node.m_avalanche_pubkey.has_value());
auto saveProofIfStaker = [this, isStaker](const CNode &node,
const avalanche::ProofId &proofid,
const NodeId nodeid) -> bool {
if (isStaker) {
return m_avalanche->withPeerManager(
[&](avalanche::PeerManager &pm) {
return pm.saveRemoteProof(proofid, nodeid, true);
});
}
return false;
};
{
LOCK(cs_proofrequest);
m_proofrequest.ReceivedResponse(nodeid, proofid);
if (AlreadyHaveProof(proofid)) {
m_proofrequest.ForgetInvId(proofid);
saveProofIfStaker(node, proofid, nodeid);
return true;
}
}
// registerProof should not be called while cs_proofrequest because it
// holds cs_main and that creates a potential deadlock during shutdown
avalanche::ProofRegistrationState state;
if (m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
return pm.registerProof(proof, state);
})) {
WITH_LOCK(cs_proofrequest, m_proofrequest.ForgetInvId(proofid));
RelayProof(proofid);
node.m_last_proof_time = GetTime<std::chrono::seconds>();
LogPrint(BCLog::NET, "New avalanche proof: peer=%d, proofid %s\n",
nodeid, proofid.ToString());
}
if (state.GetResult() == avalanche::ProofRegistrationResult::INVALID) {
m_avalanche->withPeerManager(
[&](avalanche::PeerManager &pm) { pm.setInvalid(proofid); });
Misbehaving(peer, 100, state.GetRejectReason());
return false;
}
if (state.GetResult() == avalanche::ProofRegistrationResult::MISSING_UTXO) {
// This is possible that a proof contains a utxo we don't know yet, so
// don't ban for this.
return false;
}
if (!m_avalanche->reconcileOrFinalize(proof)) {
LogPrint(BCLog::AVALANCHE,
"Not polling the avalanche proof (%s): peer=%d, proofid %s\n",
state.IsValid() ? "not-worth-polling"
: state.GetRejectReason(),
nodeid, proofid.ToString());
}
saveProofIfStaker(node, proofid, nodeid);
return true;
}
diff --git a/src/validation.cpp b/src/validation.cpp
index dd0b6f479..26f3844e5 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -1,6936 +1,6939 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2018 The Bitcoin Core developers
// Copyright (c) 2017-2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <validation.h>
#include <kernel/chainparams.h>
#include <kernel/coinstats.h>
#include <kernel/disconnected_transactions.h>
#include <kernel/mempool_entry.h>
#include <kernel/mempool_persist.h>
#include <arith_uint256.h>
#include <avalanche/avalanche.h>
#include <avalanche/processor.h>
#include <blockvalidity.h>
#include <chainparams.h>
#include <checkpoints.h>
#include <checkqueue.h>
#include <common/args.h>
#include <config.h>
#include <consensus/activation.h>
#include <consensus/amount.h>
#include <consensus/merkle.h>
#include <consensus/tx_check.h>
#include <consensus/tx_verify.h>
#include <consensus/validation.h>
#include <hash.h>
#include <kernel/notifications_interface.h>
#include <logging.h>
#include <logging/timer.h>
#include <minerfund.h>
#include <node/blockstorage.h>
#include <node/utxo_snapshot.h>
#include <policy/block/minerfund.h>
#include <policy/block/preconsensus.h>
#include <policy/block/stakingrewards.h>
#include <policy/policy.h>
#include <policy/settings.h>
#include <pow/pow.h>
#include <primitives/block.h>
#include <primitives/transaction.h>
#include <random.h>
#include <reverse_iterator.h>
#include <script/script.h>
#include <script/scriptcache.h>
#include <script/sigcache.h>
#include <shutdown.h>
#include <tinyformat.h>
#include <txdb.h>
#include <txmempool.h>
#include <undo.h>
#include <util/check.h> // For NDEBUG compile time check
#include <util/fs.h>
#include <util/fs_helpers.h>
#include <util/strencodings.h>
#include <util/string.h>
#include <util/time.h>
#include <util/trace.h>
#include <util/translation.h>
#include <validationinterface.h>
#include <warnings.h>
#include <algorithm>
#include <atomic>
#include <cassert>
#include <chrono>
#include <deque>
#include <numeric>
#include <optional>
#include <string>
#include <thread>
using kernel::CCoinsStats;
using kernel::CoinStatsHashType;
using kernel::ComputeUTXOStats;
using kernel::LoadMempool;
using kernel::Notifications;
using fsbridge::FopenFn;
using node::BLOCKFILE_CHUNK_SIZE;
using node::BlockManager;
using node::BlockMap;
using node::fReindex;
using node::SnapshotMetadata;
using node::UNDOFILE_CHUNK_SIZE;
#define MICRO 0.000001
#define MILLI 0.001
/** Time to wait between writing blocks/block index to disk. */
static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
/** Time to wait between flushing chainstate to disk. */
static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
const std::vector<std::string> CHECKLEVEL_DOC{
"level 0 reads the blocks from disk",
"level 1 verifies block validity",
"level 2 verifies undo data",
"level 3 checks disconnection of tip blocks",
"level 4 tries to reconnect the blocks",
"each level includes the checks of the previous levels",
};
/**
* The number of blocks to keep below the deepest prune lock.
* There is nothing special about this number. It is higher than what we
* expect to see in regular mainnet reorgs, but not so high that it would
* noticeably interfere with the pruning mechanism.
* */
static constexpr int PRUNE_LOCK_BUFFER{10};
GlobalMutex g_best_block_mutex;
std::condition_variable g_best_block_cv;
const CBlockIndex *g_best_block;
BlockValidationOptions::BlockValidationOptions(const Config &config)
: excessiveBlockSize(config.GetMaxBlockSize()), checkPoW(true),
checkMerkleRoot(true) {}
const CBlockIndex *
Chainstate::FindForkInGlobalIndex(const CBlockLocator &locator) const {
AssertLockHeld(cs_main);
// Find the latest block common to locator and chain - we expect that
// locator.vHave is sorted descending by height.
for (const BlockHash &hash : locator.vHave) {
const CBlockIndex *pindex{m_blockman.LookupBlockIndex(hash)};
if (pindex) {
if (m_chain.Contains(pindex)) {
return pindex;
}
if (pindex->GetAncestor(m_chain.Height()) == m_chain.Tip()) {
return m_chain.Tip();
}
}
}
return m_chain.Genesis();
}
static uint32_t GetNextBlockScriptFlags(const CBlockIndex *pindex,
const ChainstateManager &chainman);
namespace {
/**
* A helper which calculates heights of inputs of a given transaction.
*
* @param[in] tip The current chain tip. If an input belongs to a mempool
* transaction, we assume it will be confirmed in the next
* block.
* @param[in] coins Any CCoinsView that provides access to the relevant coins.
* @param[in] tx The transaction being evaluated.
*
* @returns A vector of input heights or nullopt, in case of an error.
*/
std::optional<std::vector<int>> CalculatePrevHeights(const CBlockIndex &tip,
const CCoinsView &coins,
const CTransaction &tx) {
std::vector<int> prev_heights;
prev_heights.resize(tx.vin.size());
for (size_t i = 0; i < tx.vin.size(); ++i) {
const CTxIn &txin = tx.vin[i];
Coin coin;
if (!coins.GetCoin(txin.prevout, coin)) {
LogPrintf("ERROR: %s: Missing input %d in transaction \'%s\'\n",
__func__, i, tx.GetId().GetHex());
return std::nullopt;
}
if (coin.GetHeight() == MEMPOOL_HEIGHT) {
// Assume all mempool transaction confirm in the next block.
prev_heights[i] = tip.nHeight + 1;
} else {
prev_heights[i] = coin.GetHeight();
}
}
return prev_heights;
}
} // namespace
std::optional<LockPoints> CalculateLockPointsAtTip(CBlockIndex *tip,
const CCoinsView &coins_view,
const CTransaction &tx) {
assert(tip);
auto prev_heights{CalculatePrevHeights(*tip, coins_view, tx)};
if (!prev_heights.has_value()) {
return std::nullopt;
}
CBlockIndex next_tip;
next_tip.pprev = tip;
// When SequenceLocks() is called within ConnectBlock(), the height
// of the block *being* evaluated is what is used.
// Thus if we want to know if a transaction can be part of the
// *next* block, we need to use one more than
// active_chainstate.m_chain.Height()
next_tip.nHeight = tip->nHeight + 1;
const auto [min_height, min_time] = CalculateSequenceLocks(
tx, STANDARD_LOCKTIME_VERIFY_FLAGS, prev_heights.value(), next_tip);
return LockPoints{min_height, min_time};
}
bool CheckSequenceLocksAtTip(CBlockIndex *tip, const LockPoints &lock_points) {
assert(tip != nullptr);
CBlockIndex index;
index.pprev = tip;
// CheckSequenceLocksAtTip() uses active_chainstate.m_chain.Height()+1 to
// evaluate height based locks because when SequenceLocks() is called within
// ConnectBlock(), the height of the block *being* evaluated is what is
// used. Thus if we want to know if a transaction can be part of the *next*
// block, we need to use one more than active_chainstate.m_chain.Height()
index.nHeight = tip->nHeight + 1;
return EvaluateSequenceLocks(index, {lock_points.height, lock_points.time});
}
// Command-line argument "-replayprotectionactivationtime=<timestamp>" will
// cause the node to switch to replay protected SigHash ForkID value when the
// median timestamp of the previous 11 blocks is greater than or equal to
// <timestamp>. Defaults to the pre-defined timestamp when not set.
static bool IsReplayProtectionEnabled(const Consensus::Params ¶ms,
int64_t nMedianTimePast) {
return nMedianTimePast >= gArgs.GetIntArg("-replayprotectionactivationtime",
params.augustoActivationTime);
}
static bool IsReplayProtectionEnabled(const Consensus::Params ¶ms,
const CBlockIndex *pindexPrev) {
if (pindexPrev == nullptr) {
return false;
}
return IsReplayProtectionEnabled(params, pindexPrev->GetMedianTimePast());
}
/**
* Checks to avoid mempool polluting consensus critical paths since cached
* signature and script validity results will be reused if we validate this
* transaction again during block validation.
*/
static bool CheckInputsFromMempoolAndCache(
const CTransaction &tx, TxValidationState &state,
const CCoinsViewCache &view, const CTxMemPool &pool, const uint32_t flags,
PrecomputedTransactionData &txdata, int &nSigChecksOut,
CCoinsViewCache &coins_tip) EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs) {
AssertLockHeld(cs_main);
AssertLockHeld(pool.cs);
assert(!tx.IsCoinBase());
for (const CTxIn &txin : tx.vin) {
const Coin &coin = view.AccessCoin(txin.prevout);
// This coin was checked in PreChecks and MemPoolAccept
// has been holding cs_main since then.
Assume(!coin.IsSpent());
if (coin.IsSpent()) {
return false;
}
// If the Coin is available, there are 2 possibilities:
// it is available in our current ChainstateActive UTXO set,
// or it's a UTXO provided by a transaction in our mempool.
// Ensure the scriptPubKeys in Coins from CoinsView are correct.
const CTransactionRef &txFrom = pool.get(txin.prevout.GetTxId());
if (txFrom) {
assert(txFrom->GetId() == txin.prevout.GetTxId());
assert(txFrom->vout.size() > txin.prevout.GetN());
assert(txFrom->vout[txin.prevout.GetN()] == coin.GetTxOut());
} else {
const Coin &coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout);
assert(!coinFromUTXOSet.IsSpent());
assert(coinFromUTXOSet.GetTxOut() == coin.GetTxOut());
}
}
// Call CheckInputScripts() to cache signature and script validity against
// current tip consensus rules.
return CheckInputScripts(tx, state, view, flags, /*sigCacheStore=*/true,
/*scriptCacheStore=*/true, txdata, nSigChecksOut);
}
namespace {
class MemPoolAccept {
public:
MemPoolAccept(CTxMemPool &mempool, Chainstate &active_chainstate)
: m_pool(mempool), m_view(&m_dummy),
m_viewmempool(&active_chainstate.CoinsTip(), m_pool),
m_active_chainstate(active_chainstate) {}
// We put the arguments we're handed into a struct, so we can pass them
// around easier.
struct ATMPArgs {
const Config &m_config;
const int64_t m_accept_time;
const bool m_bypass_limits;
/*
* Return any outpoints which were not previously present in the coins
* cache, but were added as a result of validating the tx for mempool
* acceptance. This allows the caller to optionally remove the cache
* additions if the associated transaction ends up being rejected by
* the mempool.
*/
std::vector<COutPoint> &m_coins_to_uncache;
const bool m_test_accept;
const unsigned int m_heightOverride;
/**
* When true, the mempool will not be trimmed when any transactions are
* submitted in Finalize(). Instead, limits should be enforced at the
* end to ensure the package is not partially submitted.
*/
const bool m_package_submission;
/**
* When true, use package feerates instead of individual transaction
* feerates for fee-based policies such as mempool min fee and min relay
* fee.
*/
const bool m_package_feerates;
/** Parameters for single transaction mempool validation. */
static ATMPArgs SingleAccept(const Config &config, int64_t accept_time,
bool bypass_limits,
std::vector<COutPoint> &coins_to_uncache,
bool test_accept,
unsigned int heightOverride) {
return ATMPArgs{
config,
accept_time,
bypass_limits,
coins_to_uncache,
test_accept,
heightOverride,
/*package_submission=*/false,
/*package_feerates=*/false,
};
}
/**
* Parameters for test package mempool validation through
* testmempoolaccept.
*/
static ATMPArgs
PackageTestAccept(const Config &config, int64_t accept_time,
std::vector<COutPoint> &coins_to_uncache) {
return ATMPArgs{
config,
accept_time,
/*bypass_limits=*/false,
coins_to_uncache,
/*test_accept=*/true,
/*height_override=*/0,
// not submitting to mempool
/*package_submission=*/false,
/*package_feerates=*/false,
};
}
/** Parameters for child-with-unconfirmed-parents package validation. */
static ATMPArgs
PackageChildWithParents(const Config &config, int64_t accept_time,
std::vector<COutPoint> &coins_to_uncache) {
return ATMPArgs{
config,
accept_time,
/*bypass_limits=*/false,
coins_to_uncache,
/*test_accept=*/false,
/*height_override=*/0,
/*package_submission=*/true,
/*package_feerates=*/true,
};
}
/** Parameters for a single transaction within a package. */
static ATMPArgs SingleInPackageAccept(const ATMPArgs &package_args) {
return ATMPArgs{
/*config=*/package_args.m_config,
/*accept_time=*/package_args.m_accept_time,
/*bypass_limits=*/false,
/*coins_to_uncache=*/package_args.m_coins_to_uncache,
/*test_accept=*/package_args.m_test_accept,
/*height_override=*/package_args.m_heightOverride,
// do not LimitMempoolSize in Finalize()
/*package_submission=*/true,
// only 1 transaction
/*package_feerates=*/false,
};
}
private:
// Private ctor to avoid exposing details to clients and allowing the
// possibility of mixing up the order of the arguments. Use static
// functions above instead.
ATMPArgs(const Config &config, int64_t accept_time, bool bypass_limits,
std::vector<COutPoint> &coins_to_uncache, bool test_accept,
unsigned int height_override, bool package_submission,
bool package_feerates)
: m_config{config}, m_accept_time{accept_time},
m_bypass_limits{bypass_limits},
m_coins_to_uncache{coins_to_uncache}, m_test_accept{test_accept},
m_heightOverride{height_override},
m_package_submission{package_submission},
m_package_feerates(package_feerates) {}
};
// Single transaction acceptance
MempoolAcceptResult AcceptSingleTransaction(const CTransactionRef &ptx,
ATMPArgs &args)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Multiple transaction acceptance. Transactions may or may not be
* interdependent, but must not conflict with each other, and the
* transactions cannot already be in the mempool. Parents must come
* before children if any dependencies exist.
*/
PackageMempoolAcceptResult
AcceptMultipleTransactions(const std::vector<CTransactionRef> &txns,
ATMPArgs &args)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Submission of a subpackage.
* If subpackage size == 1, calls AcceptSingleTransaction() with adjusted
* ATMPArgs to avoid package policy restrictions like no CPFP carve out
* (PackageMempoolChecks), and creates a PackageMempoolAcceptResult wrapping
* the result.
*
* If subpackage size > 1, calls AcceptMultipleTransactions() with the
* provided ATMPArgs.
*
* Also cleans up all non-chainstate coins from m_view at the end.
*/
PackageMempoolAcceptResult
AcceptSubPackage(const std::vector<CTransactionRef> &subpackage,
ATMPArgs &args)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
/**
* Package (more specific than just multiple transactions) acceptance.
* Package must be a child with all of its unconfirmed parents, and
* topologically sorted.
*/
PackageMempoolAcceptResult AcceptPackage(const Package &package,
ATMPArgs &args)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
private:
// All the intermediate state that gets passed between the various levels
// of checking a given transaction.
struct Workspace {
Workspace(const CTransactionRef &ptx,
const uint32_t next_block_script_verify_flags)
: m_ptx(ptx),
m_next_block_script_verify_flags(next_block_script_verify_flags) {
}
/**
* Mempool entry constructed for this transaction.
* Constructed in PreChecks() but not inserted into the mempool until
* Finalize().
*/
std::unique_ptr<CTxMemPoolEntry> m_entry;
/**
* Virtual size of the transaction as used by the mempool, calculated
* using serialized size of the transaction and sigchecks.
*/
int64_t m_vsize;
/**
* Fees paid by this transaction: total input amounts subtracted by
* total output amounts.
*/
Amount m_base_fees;
/**
* Base fees + any fee delta set by the user with
* prioritisetransaction.
*/
Amount m_modified_fees;
/**
* If we're doing package validation (i.e. m_package_feerates=true), the
* "effective" package feerate of this transaction is the total fees
* divided by the total size of transactions (which may include its
* ancestors and/or descendants).
*/
CFeeRate m_package_feerate{Amount::zero()};
const CTransactionRef &m_ptx;
TxValidationState m_state;
/**
* A temporary cache containing serialized transaction data for
* signature verification.
* Reused across PreChecks and ConsensusScriptChecks.
*/
PrecomputedTransactionData m_precomputed_txdata;
// ABC specific flags that are used in both PreChecks and
// ConsensusScriptChecks
const uint32_t m_next_block_script_verify_flags;
int m_sig_checks_standard;
};
// Run the policy checks on a given transaction, excluding any script
// checks. Looks up inputs, calculates feerate, considers replacement,
// evaluates package limits, etc. As this function can be invoked for "free"
// by a peer, only tests that are fast should be done here (to avoid CPU
// DoS).
bool PreChecks(ATMPArgs &args, Workspace &ws)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
// Re-run the script checks, using consensus flags, and try to cache the
// result in the scriptcache. This should be done after
// PolicyScriptChecks(). This requires that all inputs either be in our
// utxo set or in the mempool.
bool ConsensusScriptChecks(const ATMPArgs &args, Workspace &ws)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
// Try to add the transaction to the mempool, removing any conflicts first.
// Returns true if the transaction is in the mempool after any size
// limiting is performed, false otherwise.
bool Finalize(const ATMPArgs &args, Workspace &ws)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
// Submit all transactions to the mempool and call ConsensusScriptChecks to
// add to the script cache - should only be called after successful
// validation of all transactions in the package.
// Does not call LimitMempoolSize(), so mempool max_size_bytes may be
// temporarily exceeded.
bool SubmitPackage(const ATMPArgs &args, std::vector<Workspace> &workspaces,
PackageValidationState &package_state,
std::map<TxId, MempoolAcceptResult> &results)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
// Compare a package's feerate against minimum allowed.
bool CheckFeeRate(size_t package_size, size_t package_vsize,
Amount package_fee, TxValidationState &state)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_pool.cs) {
AssertLockHeld(::cs_main);
AssertLockHeld(m_pool.cs);
const Amount mempoolRejectFee =
m_pool.GetMinFee().GetFee(package_vsize);
if (mempoolRejectFee > Amount::zero() &&
package_fee < mempoolRejectFee) {
return state.Invalid(
TxValidationResult::TX_PACKAGE_RECONSIDERABLE,
"mempool min fee not met",
strprintf("%d < %d", package_fee, mempoolRejectFee));
}
// Do not change this to use virtualsize without coordinating a network
// policy upgrade.
if (package_fee < m_pool.m_min_relay_feerate.GetFee(package_size)) {
return state.Invalid(
TxValidationResult::TX_PACKAGE_RECONSIDERABLE,
"min relay fee not met",
strprintf("%d < %d", package_fee,
m_pool.m_min_relay_feerate.GetFee(package_size)));
}
return true;
}
private:
CTxMemPool &m_pool;
CCoinsViewCache m_view;
CCoinsViewMemPool m_viewmempool;
CCoinsView m_dummy;
Chainstate &m_active_chainstate;
};
bool MemPoolAccept::PreChecks(ATMPArgs &args, Workspace &ws) {
AssertLockHeld(cs_main);
AssertLockHeld(m_pool.cs);
const CTransactionRef &ptx = ws.m_ptx;
const CTransaction &tx = *ws.m_ptx;
const TxId &txid = ws.m_ptx->GetId();
// Copy/alias what we need out of args
const int64_t nAcceptTime = args.m_accept_time;
const bool bypass_limits = args.m_bypass_limits;
std::vector<COutPoint> &coins_to_uncache = args.m_coins_to_uncache;
const unsigned int heightOverride = args.m_heightOverride;
// Alias what we need out of ws
TxValidationState &state = ws.m_state;
// Coinbase is only valid in a block, not as a loose transaction.
if (!CheckRegularTransaction(tx, state)) {
// state filled in by CheckRegularTransaction.
return false;
}
// Rather not work on nonstandard transactions (unless -testnet)
std::string reason;
if (m_pool.m_require_standard &&
!IsStandardTx(tx, m_pool.m_max_datacarrier_bytes,
m_pool.m_permit_bare_multisig,
m_pool.m_dust_relay_feerate, reason)) {
return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
}
// Only accept nLockTime-using transactions that can be mined in the next
// block; we don't want our mempool filled up with transactions that can't
// be mined yet.
TxValidationState ctxState;
if (!ContextualCheckTransactionForCurrentBlock(
*Assert(m_active_chainstate.m_chain.Tip()),
args.m_config.GetChainParams().GetConsensus(), tx, ctxState)) {
// We copy the state from a dummy to ensure we don't increase the
// ban score of peer for transaction that could be valid in the future.
return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND,
ctxState.GetRejectReason(),
ctxState.GetDebugMessage());
}
// Is it already in the memory pool?
if (m_pool.exists(txid)) {
return state.Invalid(TxValidationResult::TX_DUPLICATE,
"txn-already-in-mempool");
}
// Check for conflicts with in-memory transactions
for (const CTxIn &txin : tx.vin) {
- const CTransactionRef ptxConflicting =
- m_pool.GetConflictTx(txin.prevout);
- if (ptxConflicting) {
- // Disable replacement feature for good
- return state.Invalid(TxValidationResult::TX_CONFLICT,
- "txn-mempool-conflict");
+ if (const auto ptxConflicting = m_pool.GetConflictTx(txin.prevout)) {
+ if (m_pool.isAvalancheFinalized(ptxConflicting->GetId())) {
+ return state.Invalid(TxValidationResult::TX_CONFLICT,
+ "finalized-tx-conflict");
+ }
+
+ return state.Invalid(
+ TxValidationResult::TX_AVALANCHE_RECONSIDERABLE,
+ "txn-mempool-conflict");
}
}
m_view.SetBackend(m_viewmempool);
const CCoinsViewCache &coins_cache = m_active_chainstate.CoinsTip();
// Do all inputs exist?
for (const CTxIn &txin : tx.vin) {
if (!coins_cache.HaveCoinInCache(txin.prevout)) {
coins_to_uncache.push_back(txin.prevout);
}
// Note: this call may add txin.prevout to the coins cache
// (coins_cache.cacheCoins) by way of FetchCoin(). It should be
// removed later (via coins_to_uncache) if this tx turns out to be
// invalid.
if (!m_view.HaveCoin(txin.prevout)) {
// Are inputs missing because we already have the tx?
for (size_t out = 0; out < tx.vout.size(); out++) {
// Optimistically just do efficient check of cache for
// outputs.
if (coins_cache.HaveCoinInCache(COutPoint(txid, out))) {
return state.Invalid(TxValidationResult::TX_DUPLICATE,
"txn-already-known");
}
}
// Otherwise assume this might be an orphan tx for which we just
// haven't seen parents yet.
return state.Invalid(TxValidationResult::TX_MISSING_INPUTS,
"bad-txns-inputs-missingorspent");
}
}
// Are the actual inputs available?
if (!m_view.HaveInputs(tx)) {
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
"bad-txns-inputs-spent");
}
// Bring the best block into scope.
m_view.GetBestBlock();
// we have all inputs cached now, so switch back to dummy (to protect
// against bugs where we pull more inputs from disk that miss being
// added to coins_to_uncache)
m_view.SetBackend(m_dummy);
assert(m_active_chainstate.m_blockman.LookupBlockIndex(
m_view.GetBestBlock()) == m_active_chainstate.m_chain.Tip());
// Only accept BIP68 sequence locked transactions that can be mined in
// the next block; we don't want our mempool filled up with transactions
// that can't be mined yet.
// Pass in m_view which has all of the relevant inputs cached. Note that,
// since m_view's backend was removed, it no longer pulls coins from the
// mempool.
const std::optional<LockPoints> lock_points{CalculateLockPointsAtTip(
m_active_chainstate.m_chain.Tip(), m_view, tx)};
if (!lock_points.has_value() ||
!CheckSequenceLocksAtTip(m_active_chainstate.m_chain.Tip(),
*lock_points)) {
return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND,
"non-BIP68-final");
}
// The mempool holds txs for the next block, so pass height+1 to
// CheckTxInputs
if (!Consensus::CheckTxInputs(tx, state, m_view,
m_active_chainstate.m_chain.Height() + 1,
ws.m_base_fees)) {
// state filled in by CheckTxInputs
return false;
}
// Check for non-standard pay-to-script-hash in inputs
if (m_pool.m_require_standard &&
!AreInputsStandard(tx, m_view, ws.m_next_block_script_verify_flags)) {
return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD,
"bad-txns-nonstandard-inputs");
}
// ws.m_modified_fess includes any fee deltas from PrioritiseTransaction
ws.m_modified_fees = ws.m_base_fees;
m_pool.ApplyDelta(txid, ws.m_modified_fees);
unsigned int nSize = tx.GetTotalSize();
// Validate input scripts against standard script flags.
const uint32_t scriptVerifyFlags =
ws.m_next_block_script_verify_flags | STANDARD_SCRIPT_VERIFY_FLAGS;
ws.m_precomputed_txdata = PrecomputedTransactionData{tx};
if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false,
ws.m_precomputed_txdata, ws.m_sig_checks_standard)) {
// State filled in by CheckInputScripts
return false;
}
ws.m_entry = std::make_unique<CTxMemPoolEntry>(
ptx, ws.m_base_fees, nAcceptTime,
heightOverride ? heightOverride : m_active_chainstate.m_chain.Height(),
ws.m_sig_checks_standard, lock_points.value());
ws.m_vsize = ws.m_entry->GetTxVirtualSize();
// No individual transactions are allowed below the min relay feerate except
// from disconnected blocks. This requirement, unlike CheckFeeRate, cannot
// be bypassed using m_package_feerates because, while a tx could be package
// CPFP'd when entering the mempool, we do not have a DoS-resistant method
// of ensuring the tx remains bumped. For example, the fee-bumping child
// could disappear due to a replacement.
if (!bypass_limits &&
ws.m_modified_fees <
m_pool.m_min_relay_feerate.GetFee(ws.m_ptx->GetTotalSize())) {
// Even though this is a fee-related failure, this result is
// TX_MEMPOOL_POLICY, not TX_PACKAGE_RECONSIDERABLE, because it cannot
// be bypassed using package validation.
return state.Invalid(
TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met",
strprintf("%d < %d", ws.m_modified_fees,
m_pool.m_min_relay_feerate.GetFee(nSize)));
}
// No individual transactions are allowed below the mempool min feerate
// except from disconnected blocks and transactions in a package. Package
// transactions will be checked using package feerate later.
if (!bypass_limits && !args.m_package_feerates &&
!CheckFeeRate(nSize, ws.m_vsize, ws.m_modified_fees, state)) {
return false;
}
return true;
}
bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs &args, Workspace &ws) {
AssertLockHeld(cs_main);
AssertLockHeld(m_pool.cs);
const CTransaction &tx = *ws.m_ptx;
const TxId &txid = tx.GetId();
TxValidationState &state = ws.m_state;
// Check again against the next block's script verification flags
// to cache our script execution flags.
//
// This is also useful in case of bugs in the standard flags that cause
// transactions to pass as valid when they're actually invalid. For
// instance the STRICTENC flag was incorrectly allowing certain CHECKSIG
// NOT scripts to pass, even though they were invalid.
//
// There is a similar check in CreateNewBlock() to prevent creating
// invalid blocks (using TestBlockValidity), however allowing such
// transactions into the mempool can be exploited as a DoS attack.
int nSigChecksConsensus;
if (!CheckInputsFromMempoolAndCache(
tx, state, m_view, m_pool, ws.m_next_block_script_verify_flags,
ws.m_precomputed_txdata, nSigChecksConsensus,
m_active_chainstate.CoinsTip())) {
// This can occur under some circumstances, if the node receives an
// unrequested tx which is invalid due to new consensus rules not
// being activated yet (during IBD).
LogPrintf("BUG! PLEASE REPORT THIS! CheckInputScripts failed against "
"latest-block but not STANDARD flags %s, %s\n",
txid.ToString(), state.ToString());
return Assume(false);
}
if (ws.m_sig_checks_standard != nSigChecksConsensus) {
// We can't accept this transaction as we've used the standard count
// for the mempool/mining, but the consensus count will be enforced
// in validation (we don't want to produce bad block templates).
return error(
"%s: BUG! PLEASE REPORT THIS! SigChecks count differed between "
"standard and consensus flags in %s",
__func__, txid.ToString());
}
return true;
}
bool MemPoolAccept::Finalize(const ATMPArgs &args, Workspace &ws) {
AssertLockHeld(cs_main);
AssertLockHeld(m_pool.cs);
const TxId &txid = ws.m_ptx->GetId();
TxValidationState &state = ws.m_state;
const bool bypass_limits = args.m_bypass_limits;
// Store transaction in memory
CTxMemPoolEntry *pentry = ws.m_entry.release();
auto entry = CTxMemPoolEntryRef::acquire(pentry);
m_pool.addUnchecked(entry);
// Trim mempool and check if tx was trimmed.
// If we are validating a package, don't trim here because we could evict a
// previous transaction in the package. LimitMempoolSize() should be called
// at the very end to make sure the mempool is still within limits and
// package submission happens atomically.
if (!args.m_package_submission && !bypass_limits) {
m_pool.LimitSize(m_active_chainstate.CoinsTip());
if (!m_pool.exists(txid)) {
// The tx no longer meets our (new) mempool minimum feerate but
// could be reconsidered in a package.
return state.Invalid(TxValidationResult::TX_PACKAGE_RECONSIDERABLE,
"mempool full");
}
}
return true;
}
// Get the coins spent by ptx from the coins_view. Assumes coins are present.
static std::vector<Coin> getSpentCoins(const CTransactionRef &ptx,
const CCoinsViewCache &coins_view) {
std::vector<Coin> spent_coins;
spent_coins.reserve(ptx->vin.size());
for (const CTxIn &input : ptx->vin) {
Coin coin;
const bool coinFound = coins_view.GetCoin(input.prevout, coin);
Assume(coinFound);
spent_coins.push_back(std::move(coin));
}
return spent_coins;
}
bool MemPoolAccept::SubmitPackage(
const ATMPArgs &args, std::vector<Workspace> &workspaces,
PackageValidationState &package_state,
std::map<TxId, MempoolAcceptResult> &results) {
AssertLockHeld(cs_main);
AssertLockHeld(m_pool.cs);
// Sanity check: none of the transactions should be in the mempool.
assert(std::all_of(
workspaces.cbegin(), workspaces.cend(),
[this](const auto &ws) { return !m_pool.exists(ws.m_ptx->GetId()); }));
bool all_submitted = true;
// ConsensusScriptChecks adds to the script cache and is therefore
// consensus-critical; CheckInputsFromMempoolAndCache asserts that
// transactions only spend coins available from the mempool or UTXO set.
// Submit each transaction to the mempool immediately after calling
// ConsensusScriptChecks to make the outputs available for subsequent
// transactions.
for (Workspace &ws : workspaces) {
if (!ConsensusScriptChecks(args, ws)) {
results.emplace(ws.m_ptx->GetId(),
MempoolAcceptResult::Failure(ws.m_state));
// Since PreChecks() passed, this should never fail.
all_submitted = false;
package_state.Invalid(
PackageValidationResult::PCKG_MEMPOOL_ERROR,
strprintf("BUG! PolicyScriptChecks succeeded but "
"ConsensusScriptChecks failed: %s",
ws.m_ptx->GetId().ToString()));
}
// If we call LimitMempoolSize() for each individual Finalize(), the
// mempool will not take the transaction's descendant feerate into
// account because it hasn't seen them yet. Also, we risk evicting a
// transaction that a subsequent package transaction depends on.
// Instead, allow the mempool to temporarily bypass limits, the maximum
// package size) while submitting transactions individually and then
// trim at the very end.
if (!Finalize(args, ws)) {
results.emplace(ws.m_ptx->GetId(),
MempoolAcceptResult::Failure(ws.m_state));
// Since LimitMempoolSize() won't be called, this should never fail.
all_submitted = false;
package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
strprintf("BUG! Adding to mempool failed: %s",
ws.m_ptx->GetId().ToString()));
}
}
// It may or may not be the case that all the transactions made it into the
// mempool. Regardless, make sure we haven't exceeded max mempool size.
m_pool.LimitSize(m_active_chainstate.CoinsTip());
std::vector<TxId> all_package_txids;
all_package_txids.reserve(workspaces.size());
std::transform(workspaces.cbegin(), workspaces.cend(),
std::back_inserter(all_package_txids),
[](const auto &ws) { return ws.m_ptx->GetId(); });
// Add successful results. The returned results may change later if
// LimitMempoolSize() evicts them.
for (Workspace &ws : workspaces) {
const auto effective_feerate =
args.m_package_feerates
? ws.m_package_feerate
: CFeeRate{ws.m_modified_fees,
static_cast<uint32_t>(ws.m_vsize)};
const auto effective_feerate_txids =
args.m_package_feerates ? all_package_txids
: std::vector<TxId>({ws.m_ptx->GetId()});
results.emplace(ws.m_ptx->GetId(),
MempoolAcceptResult::Success(ws.m_vsize, ws.m_base_fees,
effective_feerate,
effective_feerate_txids));
GetMainSignals().TransactionAddedToMempool(
ws.m_ptx,
std::make_shared<const std::vector<Coin>>(
getSpentCoins(ws.m_ptx, m_view)),
m_pool.GetAndIncrementSequence());
}
return all_submitted;
}
MempoolAcceptResult
MemPoolAccept::AcceptSingleTransaction(const CTransactionRef &ptx,
ATMPArgs &args) {
AssertLockHeld(cs_main);
// mempool "read lock" (held through
// GetMainSignals().TransactionAddedToMempool())
LOCK(m_pool.cs);
const CBlockIndex *tip = m_active_chainstate.m_chain.Tip();
Workspace ws(ptx,
GetNextBlockScriptFlags(tip, m_active_chainstate.m_chainman));
const std::vector<TxId> single_txid{ws.m_ptx->GetId()};
// Perform the inexpensive checks first and avoid hashing and signature
// verification unless those checks pass, to mitigate CPU exhaustion
// denial-of-service attacks.
if (!PreChecks(args, ws)) {
if (ws.m_state.GetResult() ==
TxValidationResult::TX_PACKAGE_RECONSIDERABLE) {
// Failed for fee reasons. Provide the effective feerate and which
// tx was included.
return MempoolAcceptResult::FeeFailure(
ws.m_state, CFeeRate(ws.m_modified_fees, ws.m_vsize),
single_txid);
}
return MempoolAcceptResult::Failure(ws.m_state);
}
if (!ConsensusScriptChecks(args, ws)) {
return MempoolAcceptResult::Failure(ws.m_state);
}
const TxId txid = ptx->GetId();
// Mempool sanity check -- in our new mempool no tx can be added if its
// outputs are already spent in the mempool (that is, no children before
// parents allowed; the mempool must be consistent at all times).
//
// This means that on reorg, the disconnectpool *must* always import
// the existing mempool tx's, clear the mempool, and then re-add
// remaining tx's in topological order via this function. Our new mempool
// has fast adds, so this is ok.
if (auto it = m_pool.mapNextTx.lower_bound(COutPoint{txid, 0});
it != m_pool.mapNextTx.end() && it->first->GetTxId() == txid) {
LogPrintf("%s: BUG! PLEASE REPORT THIS! Attempt to add txid %s, but "
"its outputs are already spent in the "
"mempool\n",
__func__, txid.ToString());
ws.m_state.Invalid(TxValidationResult::TX_CHILD_BEFORE_PARENT,
"txn-child-before-parent");
return MempoolAcceptResult::Failure(ws.m_state);
}
const CFeeRate effective_feerate{ws.m_modified_fees,
static_cast<uint32_t>(ws.m_vsize)};
// Tx was accepted, but not added
if (args.m_test_accept) {
return MempoolAcceptResult::Success(ws.m_vsize, ws.m_base_fees,
effective_feerate, single_txid);
}
if (!Finalize(args, ws)) {
// The only possible failure reason is fee-related (mempool full).
// Failed for fee reasons. Provide the effective feerate and which txns
// were included.
Assume(ws.m_state.GetResult() ==
TxValidationResult::TX_PACKAGE_RECONSIDERABLE);
return MempoolAcceptResult::FeeFailure(
ws.m_state, CFeeRate(ws.m_modified_fees, ws.m_vsize), single_txid);
}
GetMainSignals().TransactionAddedToMempool(
ptx,
std::make_shared<const std::vector<Coin>>(getSpentCoins(ptx, m_view)),
m_pool.GetAndIncrementSequence());
return MempoolAcceptResult::Success(ws.m_vsize, ws.m_base_fees,
effective_feerate, single_txid);
}
PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(
const std::vector<CTransactionRef> &txns, ATMPArgs &args) {
AssertLockHeld(cs_main);
// These context-free package limits can be done before taking the mempool
// lock.
PackageValidationState package_state;
if (!CheckPackage(txns, package_state)) {
return PackageMempoolAcceptResult(package_state, {});
}
std::vector<Workspace> workspaces{};
workspaces.reserve(txns.size());
std::transform(
txns.cbegin(), txns.cend(), std::back_inserter(workspaces),
[this](const auto &tx) {
return Workspace(
tx, GetNextBlockScriptFlags(m_active_chainstate.m_chain.Tip(),
m_active_chainstate.m_chainman));
});
std::map<TxId, MempoolAcceptResult> results;
LOCK(m_pool.cs);
// Do all PreChecks first and fail fast to avoid running expensive script
// checks when unnecessary.
std::vector<TxId> valid_txids;
for (Workspace &ws : workspaces) {
if (!PreChecks(args, ws)) {
package_state.Invalid(PackageValidationResult::PCKG_TX,
"transaction failed");
// Exit early to avoid doing pointless work. Update the failed tx
// result; the rest are unfinished.
results.emplace(ws.m_ptx->GetId(),
MempoolAcceptResult::Failure(ws.m_state));
return PackageMempoolAcceptResult(package_state,
std::move(results));
}
// Make the coins created by this transaction available for subsequent
// transactions in the package to spend.
m_viewmempool.PackageAddTransaction(ws.m_ptx);
valid_txids.push_back(ws.m_ptx->GetId());
}
// Transactions must meet two minimum feerates: the mempool minimum fee and
// min relay fee. For transactions consisting of exactly one child and its
// parents, it suffices to use the package feerate
// (total modified fees / total size or vsize) to check this requirement.
// Note that this is an aggregate feerate; this function has not checked
// that there are transactions too low feerate to pay for themselves, or
// that the child transactions are higher feerate than their parents. Using
// aggregate feerate may allow "parents pay for child" behavior and permit
// a child that is below mempool minimum feerate. To avoid these behaviors,
// callers of AcceptMultipleTransactions need to restrict txns topology
// (e.g. to ancestor sets) and check the feerates of individuals and
// subsets.
const auto m_total_size = std::accumulate(
workspaces.cbegin(), workspaces.cend(), int64_t{0},
[](int64_t sum, auto &ws) { return sum + ws.m_ptx->GetTotalSize(); });
const auto m_total_vsize =
std::accumulate(workspaces.cbegin(), workspaces.cend(), int64_t{0},
[](int64_t sum, auto &ws) { return sum + ws.m_vsize; });
const auto m_total_modified_fees = std::accumulate(
workspaces.cbegin(), workspaces.cend(), Amount::zero(),
[](Amount sum, auto &ws) { return sum + ws.m_modified_fees; });
const CFeeRate package_feerate(m_total_modified_fees, m_total_vsize);
std::vector<TxId> all_package_txids;
all_package_txids.reserve(workspaces.size());
std::transform(workspaces.cbegin(), workspaces.cend(),
std::back_inserter(all_package_txids),
[](const auto &ws) { return ws.m_ptx->GetId(); });
TxValidationState placeholder_state;
if (args.m_package_feerates &&
!CheckFeeRate(m_total_size, m_total_vsize, m_total_modified_fees,
placeholder_state)) {
package_state.Invalid(PackageValidationResult::PCKG_TX,
"transaction failed");
return PackageMempoolAcceptResult(
package_state, {{workspaces.back().m_ptx->GetId(),
MempoolAcceptResult::FeeFailure(
placeholder_state,
CFeeRate(m_total_modified_fees, m_total_vsize),
all_package_txids)}});
}
for (Workspace &ws : workspaces) {
ws.m_package_feerate = package_feerate;
const TxId &ws_txid = ws.m_ptx->GetId();
if (args.m_test_accept &&
std::find(valid_txids.begin(), valid_txids.end(), ws_txid) !=
valid_txids.end()) {
const auto effective_feerate =
args.m_package_feerates
? ws.m_package_feerate
: CFeeRate{ws.m_modified_fees,
static_cast<uint32_t>(ws.m_vsize)};
const auto effective_feerate_txids =
args.m_package_feerates ? all_package_txids
: std::vector<TxId>{ws.m_ptx->GetId()};
// When test_accept=true, transactions that pass PreChecks
// are valid because there are no further mempool checks (passing
// PreChecks implies passing ConsensusScriptChecks).
results.emplace(ws_txid,
MempoolAcceptResult::Success(
ws.m_vsize, ws.m_base_fees, effective_feerate,
effective_feerate_txids));
}
}
if (args.m_test_accept) {
return PackageMempoolAcceptResult(package_state, std::move(results));
}
if (!SubmitPackage(args, workspaces, package_state, results)) {
// PackageValidationState filled in by SubmitPackage().
return PackageMempoolAcceptResult(package_state, std::move(results));
}
return PackageMempoolAcceptResult(package_state, std::move(results));
}
PackageMempoolAcceptResult
MemPoolAccept::AcceptSubPackage(const std::vector<CTransactionRef> &subpackage,
ATMPArgs &args) {
AssertLockHeld(::cs_main);
AssertLockHeld(m_pool.cs);
auto result = [&]() EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_pool.cs) {
if (subpackage.size() > 1) {
return AcceptMultipleTransactions(subpackage, args);
}
const auto &tx = subpackage.front();
ATMPArgs single_args = ATMPArgs::SingleInPackageAccept(args);
const auto single_res = AcceptSingleTransaction(tx, single_args);
PackageValidationState package_state_wrapped;
if (single_res.m_result_type !=
MempoolAcceptResult::ResultType::VALID) {
package_state_wrapped.Invalid(PackageValidationResult::PCKG_TX,
"transaction failed");
}
return PackageMempoolAcceptResult(package_state_wrapped,
{{tx->GetId(), single_res}});
}();
// Clean up m_view and m_viewmempool so that other subpackage evaluations
// don't have access to coins they shouldn't. Keep some coins in order to
// minimize re-fetching coins from the UTXO set.
//
// There are 3 kinds of coins in m_view:
// (1) Temporary coins from the transactions in subpackage, constructed by
// m_viewmempool.
// (2) Mempool coins from transactions in the mempool, constructed by
// m_viewmempool.
// (3) Confirmed coins fetched from our current UTXO set.
//
// (1) Temporary coins need to be removed, regardless of whether the
// transaction was submitted. If the transaction was submitted to the
// mempool, m_viewmempool will be able to fetch them from there. If it
// wasn't submitted to mempool, it is incorrect to keep them - future calls
// may try to spend those coins that don't actually exist.
// (2) Mempool coins also need to be removed. If the mempool contents have
// changed as a result of submitting or replacing transactions, coins
// previously fetched from mempool may now be spent or nonexistent. Those
// coins need to be deleted from m_view.
// (3) Confirmed coins don't need to be removed. The chainstate has not
// changed (we are holding cs_main and no blocks have been processed) so the
// confirmed tx cannot disappear like a mempool tx can. The coin may now be
// spent after we submitted a tx to mempool, but we have already checked
// that the package does not have 2 transactions spending the same coin.
// Keeping them in m_view is an optimization to not re-fetch confirmed coins
// if we later look up inputs for this transaction again.
for (const auto &outpoint : m_viewmempool.GetNonBaseCoins()) {
// In addition to resetting m_viewmempool, we also need to manually
// delete these coins from m_view because it caches copies of the coins
// it fetched from m_viewmempool previously.
m_view.Uncache(outpoint);
}
// This deletes the temporary and mempool coins.
m_viewmempool.Reset();
return result;
}
PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package &package,
ATMPArgs &args) {
AssertLockHeld(cs_main);
// Used if returning a PackageMempoolAcceptResult directly from this
// function.
PackageValidationState package_state_quit_early;
// Check that the package is well-formed. If it isn't, we won't try to
// validate any of the transactions and thus won't return any
// MempoolAcceptResults, just a package-wide error.
// Context-free package checks.
if (!CheckPackage(package, package_state_quit_early)) {
return PackageMempoolAcceptResult(package_state_quit_early, {});
}
// All transactions in the package must be a parent of the last transaction.
// This is just an opportunity for us to fail fast on a context-free check
// without taking the mempool lock.
if (!IsChildWithParents(package)) {
package_state_quit_early.Invalid(PackageValidationResult::PCKG_POLICY,
"package-not-child-with-parents");
return PackageMempoolAcceptResult(package_state_quit_early, {});
}
// IsChildWithParents() guarantees the package is > 1 transactions.
assert(package.size() > 1);
// The package must be 1 child with all of its unconfirmed parents. The
// package is expected to be sorted, so the last transaction is the child.
const auto &child = package.back();
std::unordered_set<TxId, SaltedTxIdHasher> unconfirmed_parent_txids;
std::transform(
package.cbegin(), package.cend() - 1,
std::inserter(unconfirmed_parent_txids, unconfirmed_parent_txids.end()),
[](const auto &tx) { return tx->GetId(); });
// All child inputs must refer to a preceding package transaction or a
// confirmed UTXO. The only way to verify this is to look up the child's
// inputs in our current coins view (not including mempool), and enforce
// that all parents not present in the package be available at chain tip.
// Since this check can bring new coins into the coins cache, keep track of
// these coins and uncache them if we don't end up submitting this package
// to the mempool.
const CCoinsViewCache &coins_tip_cache = m_active_chainstate.CoinsTip();
for (const auto &input : child->vin) {
if (!coins_tip_cache.HaveCoinInCache(input.prevout)) {
args.m_coins_to_uncache.push_back(input.prevout);
}
}
// Using the MemPoolAccept m_view cache allows us to look up these same
// coins faster later. This should be connecting directly to CoinsTip, not
// to m_viewmempool, because we specifically require inputs to be confirmed
// if they aren't in the package.
m_view.SetBackend(m_active_chainstate.CoinsTip());
const auto package_or_confirmed = [this, &unconfirmed_parent_txids](
const auto &input) {
return unconfirmed_parent_txids.count(input.prevout.GetTxId()) > 0 ||
m_view.HaveCoin(input.prevout);
};
if (!std::all_of(child->vin.cbegin(), child->vin.cend(),
package_or_confirmed)) {
package_state_quit_early.Invalid(
PackageValidationResult::PCKG_POLICY,
"package-not-child-with-unconfirmed-parents");
return PackageMempoolAcceptResult(package_state_quit_early, {});
}
// Protect against bugs where we pull more inputs from disk that miss being
// added to coins_to_uncache. The backend will be connected again when
// needed in PreChecks.
m_view.SetBackend(m_dummy);
LOCK(m_pool.cs);
// Stores results from which we will create the returned
// PackageMempoolAcceptResult. A result may be changed if a mempool
// transaction is evicted later due to LimitMempoolSize().
std::map<TxId, MempoolAcceptResult> results_final;
// Results from individual validation which will be returned if no other
// result is available for this transaction. "Nonfinal" because if a
// transaction fails by itself but succeeds later (i.e. when evaluated with
// a fee-bumping child), the result in this map may be discarded.
std::map<TxId, MempoolAcceptResult> individual_results_nonfinal;
bool quit_early{false};
std::vector<CTransactionRef> txns_package_eval;
for (const auto &tx : package) {
const auto &txid = tx->GetId();
// An already confirmed tx is treated as one not in mempool, because all
// we know is that the inputs aren't available.
if (m_pool.exists(txid)) {
// Exact transaction already exists in the mempool.
// Node operators are free to set their mempool policies however
// they please, nodes may receive transactions in different orders,
// and malicious counterparties may try to take advantage of policy
// differences to pin or delay propagation of transactions. As such,
// it's possible for some package transaction(s) to already be in
// the mempool, and we don't want to reject the entire package in
// that case (as that could be a censorship vector). De-duplicate
// the transactions that are already in the mempool, and only call
// AcceptMultipleTransactions() with the new transactions. This
// ensures we don't double-count transaction counts and sizes when
// checking ancestor/descendant limits, or double-count transaction
// fees for fee-related policy.
auto iter = m_pool.GetIter(txid);
assert(iter != std::nullopt);
results_final.emplace(txid, MempoolAcceptResult::MempoolTx(
(*iter.value())->GetTxSize(),
(*iter.value())->GetFee()));
} else {
// Transaction does not already exist in the mempool.
// Try submitting the transaction on its own.
const auto single_package_res = AcceptSubPackage({tx}, args);
const auto &single_res = single_package_res.m_tx_results.at(txid);
if (single_res.m_result_type ==
MempoolAcceptResult::ResultType::VALID) {
// The transaction succeeded on its own and is now in the
// mempool. Don't include it in package validation, because its
// fees should only be "used" once.
assert(m_pool.exists(txid));
results_final.emplace(txid, single_res);
} else if (single_res.m_state.GetResult() !=
TxValidationResult::TX_PACKAGE_RECONSIDERABLE &&
single_res.m_state.GetResult() !=
TxValidationResult::TX_MISSING_INPUTS) {
// Package validation policy only differs from individual policy
// in its evaluation of feerate. For example, if a transaction
// fails here due to violation of a consensus rule, the result
// will not change when it is submitted as part of a package. To
// minimize the amount of repeated work, unless the transaction
// fails due to feerate or missing inputs (its parent is a
// previous transaction in the package that failed due to
// feerate), don't run package validation. Note that this
// decision might not make sense if different types of packages
// are allowed in the future. Continue individually validating
// the rest of the transactions, because some of them may still
// be valid.
quit_early = true;
package_state_quit_early.Invalid(
PackageValidationResult::PCKG_TX, "transaction failed");
individual_results_nonfinal.emplace(txid, single_res);
} else {
individual_results_nonfinal.emplace(txid, single_res);
txns_package_eval.push_back(tx);
}
}
}
auto multi_submission_result =
quit_early || txns_package_eval.empty()
? PackageMempoolAcceptResult(package_state_quit_early, {})
: AcceptSubPackage(txns_package_eval, args);
PackageValidationState &package_state_final =
multi_submission_result.m_state;
// Make sure we haven't exceeded max mempool size.
// Package transactions that were submitted to mempool or already in mempool
// may be evicted.
m_pool.LimitSize(m_active_chainstate.CoinsTip());
for (const auto &tx : package) {
const auto &txid = tx->GetId();
if (multi_submission_result.m_tx_results.count(txid) > 0) {
// We shouldn't have re-submitted if the tx result was already in
// results_final.
Assume(results_final.count(txid) == 0);
// If it was submitted, check to see if the tx is still in the
// mempool. It could have been evicted due to LimitMempoolSize()
// above.
const auto &txresult =
multi_submission_result.m_tx_results.at(txid);
if (txresult.m_result_type ==
MempoolAcceptResult::ResultType::VALID &&
!m_pool.exists(txid)) {
package_state_final.Invalid(PackageValidationResult::PCKG_TX,
"transaction failed");
TxValidationState mempool_full_state;
mempool_full_state.Invalid(
TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
results_final.emplace(
txid, MempoolAcceptResult::Failure(mempool_full_state));
} else {
results_final.emplace(txid, txresult);
}
} else if (const auto final_it{results_final.find(txid)};
final_it != results_final.end()) {
// Already-in-mempool transaction. Check to see if it's still there,
// as it could have been evicted when LimitMempoolSize() was called.
Assume(final_it->second.m_result_type !=
MempoolAcceptResult::ResultType::INVALID);
Assume(individual_results_nonfinal.count(txid) == 0);
if (!m_pool.exists(tx->GetId())) {
package_state_final.Invalid(PackageValidationResult::PCKG_TX,
"transaction failed");
TxValidationState mempool_full_state;
mempool_full_state.Invalid(
TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
// Replace the previous result.
results_final.erase(txid);
results_final.emplace(
txid, MempoolAcceptResult::Failure(mempool_full_state));
}
} else if (const auto non_final_it{
individual_results_nonfinal.find(txid)};
non_final_it != individual_results_nonfinal.end()) {
Assume(non_final_it->second.m_result_type ==
MempoolAcceptResult::ResultType::INVALID);
// Interesting result from previous processing.
results_final.emplace(txid, non_final_it->second);
}
}
Assume(results_final.size() == package.size());
return PackageMempoolAcceptResult(package_state_final,
std::move(results_final));
}
} // namespace
MempoolAcceptResult AcceptToMemoryPool(Chainstate &active_chainstate,
const CTransactionRef &tx,
int64_t accept_time, bool bypass_limits,
bool test_accept,
unsigned int heightOverride) {
AssertLockHeld(::cs_main);
assert(active_chainstate.GetMempool() != nullptr);
CTxMemPool &pool{*active_chainstate.GetMempool()};
std::vector<COutPoint> coins_to_uncache;
auto args = MemPoolAccept::ATMPArgs::SingleAccept(
active_chainstate.m_chainman.GetConfig(), accept_time, bypass_limits,
coins_to_uncache, test_accept, heightOverride);
const MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate)
.AcceptSingleTransaction(tx, args);
if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) {
// Remove coins that were not present in the coins cache before calling
// ATMPW; this is to prevent memory DoS in case we receive a large
// number of invalid transactions that attempt to overrun the in-memory
// coins cache
// (`CCoinsViewCache::cacheCoins`).
for (const COutPoint &outpoint : coins_to_uncache) {
active_chainstate.CoinsTip().Uncache(outpoint);
}
}
// After we've (potentially) uncached entries, ensure our coins cache is
// still within its size limits
BlockValidationState stateDummy;
active_chainstate.FlushStateToDisk(stateDummy, FlushStateMode::PERIODIC);
return result;
}
PackageMempoolAcceptResult ProcessNewPackage(Chainstate &active_chainstate,
CTxMemPool &pool,
const Package &package,
bool test_accept) {
AssertLockHeld(cs_main);
assert(!package.empty());
assert(std::all_of(package.cbegin(), package.cend(),
[](const auto &tx) { return tx != nullptr; }));
const Config &config = active_chainstate.m_chainman.GetConfig();
std::vector<COutPoint> coins_to_uncache;
const auto result = [&]() EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
AssertLockHeld(cs_main);
if (test_accept) {
auto args = MemPoolAccept::ATMPArgs::PackageTestAccept(
config, GetTime(), coins_to_uncache);
return MemPoolAccept(pool, active_chainstate)
.AcceptMultipleTransactions(package, args);
} else {
auto args = MemPoolAccept::ATMPArgs::PackageChildWithParents(
config, GetTime(), coins_to_uncache);
return MemPoolAccept(pool, active_chainstate)
.AcceptPackage(package, args);
}
}();
// Uncache coins pertaining to transactions that were not submitted to the
// mempool.
if (test_accept || result.m_state.IsInvalid()) {
for (const COutPoint &hashTx : coins_to_uncache) {
active_chainstate.CoinsTip().Uncache(hashTx);
}
}
// Ensure the coins cache is still within limits.
BlockValidationState state_dummy;
active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC);
return result;
}
Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams) {
int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
// Force block reward to zero when right shift is undefined.
if (halvings >= 64) {
return Amount::zero();
}
Amount nSubsidy = 50 * COIN;
// Subsidy is cut in half every 210,000 blocks which will occur
// approximately every 4 years.
return ((nSubsidy / SATOSHI) >> halvings) * SATOSHI;
}
CoinsViews::CoinsViews(DBParams db_params, CoinsViewOptions options)
: m_dbview{std::move(db_params), std::move(options)},
m_catcherview(&m_dbview) {}
void CoinsViews::InitCache() {
AssertLockHeld(::cs_main);
m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
}
Chainstate::Chainstate(CTxMemPool *mempool, BlockManager &blockman,
ChainstateManager &chainman,
std::optional<BlockHash> from_snapshot_blockhash)
: m_mempool(mempool), m_blockman(blockman), m_chainman(chainman),
m_from_snapshot_blockhash(from_snapshot_blockhash) {}
void Chainstate::InitCoinsDB(size_t cache_size_bytes, bool in_memory,
bool should_wipe, std::string leveldb_name) {
if (m_from_snapshot_blockhash) {
leveldb_name += node::SNAPSHOT_CHAINSTATE_SUFFIX;
}
m_coins_views = std::make_unique<CoinsViews>(
DBParams{.path = m_chainman.m_options.datadir / leveldb_name,
.cache_bytes = cache_size_bytes,
.memory_only = in_memory,
.wipe_data = should_wipe,
.obfuscate = true,
.options = m_chainman.m_options.coins_db},
m_chainman.m_options.coins_view);
}
void Chainstate::InitCoinsCache(size_t cache_size_bytes) {
AssertLockHeld(::cs_main);
assert(m_coins_views != nullptr);
m_coinstip_cache_size_bytes = cache_size_bytes;
m_coins_views->InitCache();
}
// Note that though this is marked const, we may end up modifying
// `m_cached_finished_ibd`, which is a performance-related implementation
// detail. This function must be marked `const` so that `CValidationInterface`
// clients (which are given a `const Chainstate*`) can call it.
//
bool Chainstate::IsInitialBlockDownload() const {
// Optimization: pre-test latch before taking the lock.
if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
return false;
}
LOCK(cs_main);
if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
return false;
}
if (m_chainman.m_blockman.LoadingBlocks()) {
return true;
}
if (m_chain.Tip() == nullptr) {
return true;
}
if (m_chain.Tip()->nChainWork < m_chainman.MinimumChainWork()) {
return true;
}
if (m_chain.Tip()->Time() <
Now<NodeSeconds>() - m_chainman.m_options.max_tip_age) {
return true;
}
LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
m_cached_finished_ibd.store(true, std::memory_order_relaxed);
return false;
}
void Chainstate::CheckForkWarningConditions() {
AssertLockHeld(cs_main);
// Before we get past initial download, we cannot reliably alert about forks
// (we assume we don't get stuck on a fork before finishing our initial
// sync)
if (IsInitialBlockDownload()) {
return;
}
// If our best fork is no longer within 72 blocks (+/- 12 hours if no one
// mines it) of our head, or if it is back on the active chain, drop it
if (m_best_fork_tip && (m_chain.Height() - m_best_fork_tip->nHeight >= 72 ||
m_chain.Contains(m_best_fork_tip))) {
m_best_fork_tip = nullptr;
}
if (m_best_fork_tip ||
(m_chainman.m_best_invalid &&
m_chainman.m_best_invalid->nChainWork >
m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6))) {
if (!GetfLargeWorkForkFound() && m_best_fork_base) {
std::string warning =
std::string("'Warning: Large-work fork detected, forking after "
"block ") +
m_best_fork_base->phashBlock->ToString() + std::string("'");
m_chainman.GetNotifications().warning(warning);
}
if (m_best_fork_tip && m_best_fork_base) {
LogPrintf("%s: Warning: Large fork found\n forking the "
"chain at height %d (%s)\n lasting to height %d "
"(%s).\nChain state database corruption likely.\n",
__func__, m_best_fork_base->nHeight,
m_best_fork_base->phashBlock->ToString(),
m_best_fork_tip->nHeight,
m_best_fork_tip->phashBlock->ToString());
SetfLargeWorkForkFound(true);
} else {
LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks "
"longer than our best chain.\nChain state database "
"corruption likely.\n",
__func__);
SetfLargeWorkInvalidChainFound(true);
}
} else {
SetfLargeWorkForkFound(false);
SetfLargeWorkInvalidChainFound(false);
}
}
void Chainstate::CheckForkWarningConditionsOnNewFork(
CBlockIndex *pindexNewForkTip) {
AssertLockHeld(cs_main);
// If we are on a fork that is sufficiently large, set a warning flag.
const CBlockIndex *pfork = m_chain.FindFork(pindexNewForkTip);
// We define a condition where we should warn the user about as a fork of at
// least 7 blocks with a tip within 72 blocks (+/- 12 hours if no one mines
// it) of ours. We use 7 blocks rather arbitrarily as it represents just
// under 10% of sustained network hash rate operating on the fork, or a
// chain that is entirely longer than ours and invalid (note that this
// should be detected by both). We define it this way because it allows us
// to only store the highest fork tip (+ base) which meets the 7-block
// condition and from this always have the most-likely-to-cause-warning fork
if (pfork &&
(!m_best_fork_tip ||
pindexNewForkTip->nHeight > m_best_fork_tip->nHeight) &&
pindexNewForkTip->nChainWork - pfork->nChainWork >
(GetBlockProof(*pfork) * 7) &&
m_chain.Height() - pindexNewForkTip->nHeight < 72) {
m_best_fork_tip = pindexNewForkTip;
m_best_fork_base = pfork;
}
CheckForkWarningConditions();
}
// Called both upon regular invalid block discovery *and* InvalidateBlock
void Chainstate::InvalidChainFound(CBlockIndex *pindexNew) {
AssertLockHeld(cs_main);
if (!m_chainman.m_best_invalid ||
pindexNew->nChainWork > m_chainman.m_best_invalid->nChainWork) {
m_chainman.m_best_invalid = pindexNew;
}
if (m_chainman.m_best_header != nullptr &&
m_chainman.m_best_header->GetAncestor(pindexNew->nHeight) ==
pindexNew) {
m_chainman.m_best_header = m_chain.Tip();
}
// If the invalid chain found is supposed to be finalized, we need to move
// back the finalization point.
if (IsBlockAvalancheFinalized(pindexNew)) {
LOCK(cs_avalancheFinalizedBlockIndex);
m_avalancheFinalizedBlockIndex = pindexNew->pprev;
}
LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n",
__func__, pindexNew->GetBlockHash().ToString(),
pindexNew->nHeight,
log(pindexNew->nChainWork.getdouble()) / log(2.0),
FormatISO8601DateTime(pindexNew->GetBlockTime()));
CBlockIndex *tip = m_chain.Tip();
assert(tip);
LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n",
__func__, tip->GetBlockHash().ToString(), m_chain.Height(),
log(tip->nChainWork.getdouble()) / log(2.0),
FormatISO8601DateTime(tip->GetBlockTime()));
}
// Same as InvalidChainFound, above, except not called directly from
// InvalidateBlock, which does its own setBlockIndexCandidates management.
void Chainstate::InvalidBlockFound(CBlockIndex *pindex,
const BlockValidationState &state) {
AssertLockHeld(cs_main);
if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
pindex->nStatus = pindex->nStatus.withFailed();
m_chainman.m_failed_blocks.insert(pindex);
m_blockman.m_dirty_blockindex.insert(pindex);
InvalidChainFound(pindex);
}
}
void SpendCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
int nHeight) {
// Mark inputs spent.
if (tx.IsCoinBase()) {
return;
}
txundo.vprevout.reserve(tx.vin.size());
for (const CTxIn &txin : tx.vin) {
txundo.vprevout.emplace_back();
bool is_spent = view.SpendCoin(txin.prevout, &txundo.vprevout.back());
assert(is_spent);
}
}
void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
int nHeight) {
SpendCoins(view, tx, txundo, nHeight);
AddCoins(view, tx, nHeight);
}
bool CScriptCheck::operator()() {
const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
if (!VerifyScript(scriptSig, m_tx_out.scriptPubKey, nFlags,
CachingTransactionSignatureChecker(
ptxTo, nIn, m_tx_out.nValue, cacheStore, txdata),
metrics, &error)) {
return false;
}
if ((pTxLimitSigChecks &&
!pTxLimitSigChecks->consume_and_check(metrics.nSigChecks)) ||
(pBlockLimitSigChecks &&
!pBlockLimitSigChecks->consume_and_check(metrics.nSigChecks))) {
// we can't assign a meaningful script error (since the script
// succeeded), but remove the ScriptError::OK which could be
// misinterpreted.
error = ScriptError::SIGCHECKS_LIMIT_EXCEEDED;
return false;
}
return true;
}
bool CheckInputScripts(const CTransaction &tx, TxValidationState &state,
const CCoinsViewCache &inputs, const uint32_t flags,
bool sigCacheStore, bool scriptCacheStore,
const PrecomputedTransactionData &txdata,
int &nSigChecksOut, TxSigCheckLimiter &txLimitSigChecks,
CheckInputsLimiter *pBlockLimitSigChecks,
std::vector<CScriptCheck> *pvChecks) {
AssertLockHeld(cs_main);
assert(!tx.IsCoinBase());
if (pvChecks) {
pvChecks->reserve(tx.vin.size());
}
// First check if script executions have been cached with the same flags.
// Note that this assumes that the inputs provided are correct (ie that the
// transaction hash which is in tx's prevouts properly commits to the
// scriptPubKey in the inputs view of that transaction).
ScriptCacheKey hashCacheEntry(tx, flags);
if (IsKeyInScriptCache(hashCacheEntry, !scriptCacheStore, nSigChecksOut)) {
if (!txLimitSigChecks.consume_and_check(nSigChecksOut) ||
(pBlockLimitSigChecks &&
!pBlockLimitSigChecks->consume_and_check(nSigChecksOut))) {
return state.Invalid(TxValidationResult::TX_CONSENSUS,
"too-many-sigchecks");
}
return true;
}
int nSigChecksTotal = 0;
for (size_t i = 0; i < tx.vin.size(); i++) {
const COutPoint &prevout = tx.vin[i].prevout;
const Coin &coin = inputs.AccessCoin(prevout);
assert(!coin.IsSpent());
// We very carefully only pass in things to CScriptCheck which are
// clearly committed to by tx's hash. This provides a sanity
// check that our caching is not introducing consensus failures through
// additional data in, eg, the coins being spent being checked as a part
// of CScriptCheck.
// Verify signature
CScriptCheck check(coin.GetTxOut(), tx, i, flags, sigCacheStore, txdata,
&txLimitSigChecks, pBlockLimitSigChecks);
// If pvChecks is not null, defer the check execution to the caller.
if (pvChecks) {
pvChecks->push_back(std::move(check));
continue;
}
if (!check()) {
ScriptError scriptError = check.GetScriptError();
// Compute flags without the optional standardness flags.
// This differs from MANDATORY_SCRIPT_VERIFY_FLAGS as it contains
// additional upgrade flags (see AcceptToMemoryPoolWorker variable
// extraFlags).
uint32_t mandatoryFlags =
flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS;
if (flags != mandatoryFlags) {
// Check whether the failure was caused by a non-mandatory
// script verification check. If so, ensure we return
// NOT_STANDARD instead of CONSENSUS to avoid downstream users
// splitting the network between upgraded and non-upgraded nodes
// by banning CONSENSUS-failing data providers.
CScriptCheck check2(coin.GetTxOut(), tx, i, mandatoryFlags,
sigCacheStore, txdata);
if (check2()) {
return state.Invalid(
TxValidationResult::TX_NOT_STANDARD,
strprintf("non-mandatory-script-verify-flag (%s)",
ScriptErrorString(scriptError)));
}
// update the error message to reflect the mandatory violation.
scriptError = check2.GetScriptError();
}
// MANDATORY flag failures correspond to
// TxValidationResult::TX_CONSENSUS. Because CONSENSUS failures are
// the most serious case of validation failures, we may need to
// consider using RECENT_CONSENSUS_CHANGE for any script failure
// that could be due to non-upgraded nodes which we may want to
// support, to avoid splitting the network (but this depends on the
// details of how net_processing handles such errors).
return state.Invalid(
TxValidationResult::TX_CONSENSUS,
strprintf("mandatory-script-verify-flag-failed (%s)",
ScriptErrorString(scriptError)));
}
nSigChecksTotal += check.GetScriptExecutionMetrics().nSigChecks;
}
nSigChecksOut = nSigChecksTotal;
if (scriptCacheStore && !pvChecks) {
// We executed all of the provided scripts, and were told to cache the
// result. Do so now.
AddKeyInScriptCache(hashCacheEntry, nSigChecksTotal);
}
return true;
}
bool AbortNode(BlockValidationState &state, const std::string &strMessage,
const bilingual_str &userMessage) {
AbortNode(strMessage, userMessage);
return state.Error(strMessage);
}
/** Restore the UTXO in a Coin at a given COutPoint. */
DisconnectResult UndoCoinSpend(Coin &&undo, CCoinsViewCache &view,
const COutPoint &out) {
bool fClean = true;
if (view.HaveCoin(out)) {
// Overwriting transaction output.
fClean = false;
}
if (undo.GetHeight() == 0) {
// Missing undo metadata (height and coinbase). Older versions included
// this information only in undo records for the last spend of a
// transactions' outputs. This implies that it must be present for some
// other output of the same tx.
const Coin &alternate = AccessByTxid(view, out.GetTxId());
if (alternate.IsSpent()) {
// Adding output for transaction without known metadata
return DisconnectResult::FAILED;
}
// This is somewhat ugly, but hopefully utility is limited. This is only
// useful when working from legacy on disck data. In any case, putting
// the correct information in there doesn't hurt.
const_cast<Coin &>(undo) = Coin(undo.GetTxOut(), alternate.GetHeight(),
alternate.IsCoinBase());
}
// If the coin already exists as an unspent coin in the cache, then the
// possible_overwrite parameter to AddCoin must be set to true. We have
// already checked whether an unspent coin exists above using HaveCoin, so
// we don't need to guess. When fClean is false, an unspent coin already
// existed and it is an overwrite.
view.AddCoin(out, std::move(undo), !fClean);
return fClean ? DisconnectResult::OK : DisconnectResult::UNCLEAN;
}
/**
* Undo the effects of this block (with given index) on the UTXO set represented
* by coins. When FAILED is returned, view is left in an indeterminate state.
*/
DisconnectResult Chainstate::DisconnectBlock(const CBlock &block,
const CBlockIndex *pindex,
CCoinsViewCache &view) {
AssertLockHeld(::cs_main);
CBlockUndo blockUndo;
if (!m_blockman.UndoReadFromDisk(blockUndo, *pindex)) {
error("DisconnectBlock(): failure reading undo data");
return DisconnectResult::FAILED;
}
return ApplyBlockUndo(std::move(blockUndo), block, pindex, view);
}
DisconnectResult ApplyBlockUndo(CBlockUndo &&blockUndo, const CBlock &block,
const CBlockIndex *pindex,
CCoinsViewCache &view) {
bool fClean = true;
if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
error("DisconnectBlock(): block and undo data inconsistent");
return DisconnectResult::FAILED;
}
// First, restore inputs.
for (size_t i = 1; i < block.vtx.size(); i++) {
const CTransaction &tx = *(block.vtx[i]);
CTxUndo &txundo = blockUndo.vtxundo[i - 1];
if (txundo.vprevout.size() != tx.vin.size()) {
error("DisconnectBlock(): transaction and undo data inconsistent");
return DisconnectResult::FAILED;
}
for (size_t j = 0; j < tx.vin.size(); j++) {
const COutPoint &out = tx.vin[j].prevout;
DisconnectResult res =
UndoCoinSpend(std::move(txundo.vprevout[j]), view, out);
if (res == DisconnectResult::FAILED) {
return DisconnectResult::FAILED;
}
fClean = fClean && res != DisconnectResult::UNCLEAN;
}
// At this point, all of txundo.vprevout should have been moved out.
}
// Second, revert created outputs.
for (const auto &ptx : block.vtx) {
const CTransaction &tx = *ptx;
const TxId &txid = tx.GetId();
const bool is_coinbase = tx.IsCoinBase();
// Check that all outputs are available and match the outputs in the
// block itself exactly.
for (size_t o = 0; o < tx.vout.size(); o++) {
if (tx.vout[o].scriptPubKey.IsUnspendable()) {
continue;
}
COutPoint out(txid, o);
Coin coin;
bool is_spent = view.SpendCoin(out, &coin);
if (!is_spent || tx.vout[o] != coin.GetTxOut() ||
uint32_t(pindex->nHeight) != coin.GetHeight() ||
is_coinbase != coin.IsCoinBase()) {
// transaction output mismatch
fClean = false;
}
}
}
// Move best block pointer to previous block.
view.SetBestBlock(block.hashPrevBlock);
return fClean ? DisconnectResult::OK : DisconnectResult::UNCLEAN;
}
static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
void StartScriptCheckWorkerThreads(int threads_num) {
scriptcheckqueue.StartWorkerThreads(threads_num);
}
void StopScriptCheckWorkerThreads() {
scriptcheckqueue.StopWorkerThreads();
}
// Returns the script flags which should be checked for the block after
// the given block.
static uint32_t GetNextBlockScriptFlags(const CBlockIndex *pindex,
const ChainstateManager &chainman) {
const Consensus::Params &consensusparams = chainman.GetConsensus();
uint32_t flags = SCRIPT_VERIFY_NONE;
// Enforce P2SH (BIP16)
if (DeploymentActiveAfter(pindex, chainman, Consensus::DEPLOYMENT_P2SH)) {
flags |= SCRIPT_VERIFY_P2SH;
}
// Enforce the DERSIG (BIP66) rule.
if (DeploymentActiveAfter(pindex, chainman, Consensus::DEPLOYMENT_DERSIG)) {
flags |= SCRIPT_VERIFY_DERSIG;
}
// Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule.
if (DeploymentActiveAfter(pindex, chainman, Consensus::DEPLOYMENT_CLTV)) {
flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
}
// Start enforcing CSV (BIP68, BIP112 and BIP113) rule.
if (DeploymentActiveAfter(pindex, chainman, Consensus::DEPLOYMENT_CSV)) {
flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
}
// If the UAHF is enabled, we start accepting replay protected txns
if (IsUAHFenabled(consensusparams, pindex)) {
flags |= SCRIPT_VERIFY_STRICTENC;
flags |= SCRIPT_ENABLE_SIGHASH_FORKID;
}
// If the DAA HF is enabled, we start rejecting transaction that use a high
// s in their signature. We also make sure that signature that are supposed
// to fail (for instance in multisig or other forms of smart contracts) are
// null.
if (IsDAAEnabled(consensusparams, pindex)) {
flags |= SCRIPT_VERIFY_LOW_S;
flags |= SCRIPT_VERIFY_NULLFAIL;
}
// When the magnetic anomaly fork is enabled, we start accepting
// transactions using the OP_CHECKDATASIG opcode and it's verify
// alternative. We also start enforcing push only signatures and
// clean stack.
if (IsMagneticAnomalyEnabled(consensusparams, pindex)) {
flags |= SCRIPT_VERIFY_SIGPUSHONLY;
flags |= SCRIPT_VERIFY_CLEANSTACK;
}
if (IsGravitonEnabled(consensusparams, pindex)) {
flags |= SCRIPT_ENABLE_SCHNORR_MULTISIG;
flags |= SCRIPT_VERIFY_MINIMALDATA;
}
if (IsPhononEnabled(consensusparams, pindex)) {
flags |= SCRIPT_ENFORCE_SIGCHECKS;
}
// We make sure this node will have replay protection during the next hard
// fork.
if (IsReplayProtectionEnabled(consensusparams, pindex)) {
flags |= SCRIPT_ENABLE_REPLAY_PROTECTION;
}
return flags;
}
static int64_t nTimeCheck = 0;
static int64_t nTimeForks = 0;
static int64_t nTimeVerify = 0;
static int64_t nTimeConnect = 0;
static int64_t nTimeIndex = 0;
static int64_t nTimeTotal = 0;
static int64_t nBlocksTotal = 0;
/**
* Apply the effects of this block (with given index) on the UTXO set
* represented by coins. Validity checks that depend on the UTXO set are also
* done; ConnectBlock() can fail if those validity checks fail (among other
* reasons).
*/
bool Chainstate::ConnectBlock(const CBlock &block, BlockValidationState &state,
CBlockIndex *pindex, CCoinsViewCache &view,
BlockValidationOptions options, Amount *blockFees,
bool fJustCheck) {
AssertLockHeld(cs_main);
assert(pindex);
const BlockHash block_hash{block.GetHash()};
assert(*pindex->phashBlock == block_hash);
int64_t nTimeStart = GetTimeMicros();
const CChainParams ¶ms{m_chainman.GetParams()};
const Consensus::Params &consensusParams = params.GetConsensus();
// Check it again in case a previous version let a bad block in
// NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
// ContextualCheckBlockHeader() here. This means that if we add a new
// consensus rule that is enforced in one of those two functions, then we
// may have let in a block that violates the rule prior to updating the
// software, and we would NOT be enforcing the rule here. Fully solving
// upgrade from one software version to the next after a consensus rule
// change is potentially tricky and issue-specific.
// Also, currently the rule against blocks more than 2 hours in the future
// is enforced in ContextualCheckBlockHeader(); we wouldn't want to
// re-enforce that rule here (at least until we make it impossible for
// m_adjusted_time_callback() to go backward).
if (!CheckBlock(block, state, consensusParams,
options.withCheckPoW(!fJustCheck)
.withCheckMerkleRoot(!fJustCheck))) {
if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED) {
// We don't write down blocks to disk if they may have been
// corrupted, so this should be impossible unless we're having
// hardware problems.
return AbortNode(state, "Corrupt block found indicating potential "
"hardware failure; shutting down");
}
return error("%s: Consensus::CheckBlock: %s", __func__,
state.ToString());
}
// Verify that the view's current state corresponds to the previous block
BlockHash hashPrevBlock =
pindex->pprev == nullptr ? BlockHash() : pindex->pprev->GetBlockHash();
assert(hashPrevBlock == view.GetBestBlock());
nBlocksTotal++;
// Special case for the genesis block, skipping connection of its
// transactions (its coinbase is unspendable)
if (block_hash == consensusParams.hashGenesisBlock) {
if (!fJustCheck) {
view.SetBestBlock(pindex->GetBlockHash());
}
return true;
}
bool fScriptChecks = true;
if (!m_chainman.AssumedValidBlock().IsNull()) {
// We've been configured with the hash of a block which has been
// externally verified to have a valid history. A suitable default value
// is included with the software and updated from time to time. Because
// validity relative to a piece of software is an objective fact these
// defaults can be easily reviewed. This setting doesn't force the
// selection of any particular chain but makes validating some faster by
// effectively caching the result of part of the verification.
BlockMap::const_iterator it{
m_blockman.m_block_index.find(m_chainman.AssumedValidBlock())};
if (it != m_blockman.m_block_index.end()) {
if (it->second.GetAncestor(pindex->nHeight) == pindex &&
m_chainman.m_best_header->GetAncestor(pindex->nHeight) ==
pindex &&
m_chainman.m_best_header->nChainWork >=
m_chainman.MinimumChainWork()) {
// This block is a member of the assumed verified chain and an
// ancestor of the best header.
// Script verification is skipped when connecting blocks under
// the assumevalid block. Assuming the assumevalid block is
// valid this is safe because block merkle hashes are still
// computed and checked, Of course, if an assumed valid block is
// invalid due to false scriptSigs this optimization would allow
// an invalid chain to be accepted.
// The equivalent time check discourages hash power from
// extorting the network via DOS attack into accepting an
// invalid block through telling users they must manually set
// assumevalid. Requiring a software change or burying the
// invalid block, regardless of the setting, makes it hard to
// hide the implication of the demand. This also avoids having
// release candidates that are hardly doing any signature
// verification at all in testing without having to artificially
// set the default assumed verified block further back. The test
// against the minimum chain work prevents the skipping when
// denied access to any chain at least as good as the expected
// chain.
fScriptChecks = (GetBlockProofEquivalentTime(
*m_chainman.m_best_header, *pindex,
*m_chainman.m_best_header,
consensusParams) <= 60 * 60 * 24 * 7 * 2);
}
}
}
int64_t nTime1 = GetTimeMicros();
nTimeCheck += nTime1 - nTimeStart;
LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n",
MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO,
nTimeCheck * MILLI / nBlocksTotal);
// Do not allow blocks that contain transactions which 'overwrite' older
// transactions, unless those are already completely spent. If such
// overwrites are allowed, coinbases and transactions depending upon those
// can be duplicated to remove the ability to spend the first instance --
// even after being sent to another address.
// See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html
// for more information. This rule was originally applied to all blocks
// with a timestamp after March 15, 2012, 0:00 UTC. Now that the whole
// chain is irreversibly beyond that time it is applied to all blocks
// except the two in the chain that violate it. This prevents exploiting
// the issue against nodes during their initial block download.
bool fEnforceBIP30 = !((pindex->nHeight == 91842 &&
pindex->GetBlockHash() ==
uint256S("0x00000000000a4d0a398161ffc163c503763"
"b1f4360639393e0e4c8e300e0caec")) ||
(pindex->nHeight == 91880 &&
pindex->GetBlockHash() ==
uint256S("0x00000000000743f190a18c5577a3c2d2a1f"
"610ae9601ac046a38084ccb7cd721")));
// Once BIP34 activated it was not possible to create new duplicate
// coinbases and thus other than starting with the 2 existing duplicate
// coinbase pairs, not possible to create overwriting txs. But by the time
// BIP34 activated, in each of the existing pairs the duplicate coinbase had
// overwritten the first before the first had been spent. Since those
// coinbases are sufficiently buried it's no longer possible to create
// further duplicate transactions descending from the known pairs either. If
// we're on the known chain at height greater than where BIP34 activated, we
// can save the db accesses needed for the BIP30 check.
// BIP34 requires that a block at height X (block X) has its coinbase
// scriptSig start with a CScriptNum of X (indicated height X). The above
// logic of no longer requiring BIP30 once BIP34 activates is flawed in the
// case that there is a block X before the BIP34 height of 227,931 which has
// an indicated height Y where Y is greater than X. The coinbase for block
// X would also be a valid coinbase for block Y, which could be a BIP30
// violation. An exhaustive search of all mainnet coinbases before the
// BIP34 height which have an indicated height greater than the block height
// reveals many occurrences. The 3 lowest indicated heights found are
// 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
// heights would be the first opportunity for BIP30 to be violated.
// The search reveals a great many blocks which have an indicated height
// greater than 1,983,702, so we simply remove the optimization to skip
// BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
// that block in another 25 years or so, we should take advantage of a
// future consensus change to do a new and improved version of BIP34 that
// will actually prevent ever creating any duplicate coinbases in the
// future.
static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
// There is no potential to create a duplicate coinbase at block 209,921
// because this is still before the BIP34 height and so explicit BIP30
// checking is still active.
// The final case is block 176,684 which has an indicated height of
// 490,897. Unfortunately, this issue was not discovered until about 2 weeks
// before block 490,897 so there was not much opportunity to address this
// case other than to carefully analyze it and determine it would not be a
// problem. Block 490,897 was, in fact, mined with a different coinbase than
// block 176,684, but it is important to note that even if it hadn't been or
// is remined on an alternate fork with a duplicate coinbase, we would still
// not run into a BIP30 violation. This is because the coinbase for 176,684
// is spent in block 185,956 in transaction
// d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
// spending transaction can't be duplicated because it also spends coinbase
// 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
// coinbase has an indicated height of over 4.2 billion, and wouldn't be
// duplicatable until that height, and it's currently impossible to create a
// chain that long. Nevertheless we may wish to consider a future soft fork
// which retroactively prevents block 490,897 from creating a duplicate
// coinbase. The two historical BIP30 violations often provide a confusing
// edge case when manipulating the UTXO and it would be simpler not to have
// another edge case to deal with.
// testnet3 has no blocks before the BIP34 height with indicated heights
// post BIP34 before approximately height 486,000,000 and presumably will
// be reset before it reaches block 1,983,702 and starts doing unnecessary
// BIP30 checking again.
assert(pindex->pprev);
CBlockIndex *pindexBIP34height =
pindex->pprev->GetAncestor(consensusParams.BIP34Height);
// Only continue to enforce if we're below BIP34 activation height or the
// block hash at that height doesn't correspond.
fEnforceBIP30 =
fEnforceBIP30 &&
(!pindexBIP34height ||
!(pindexBIP34height->GetBlockHash() == consensusParams.BIP34Hash));
// TODO: Remove BIP30 checking from block height 1,983,702 on, once we have
// a consensus change that ensures coinbases at those heights can not
// duplicate earlier coinbases.
if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
for (const auto &tx : block.vtx) {
for (size_t o = 0; o < tx->vout.size(); o++) {
if (view.HaveCoin(COutPoint(tx->GetId(), o))) {
LogPrintf("ERROR: ConnectBlock(): tried to overwrite "
"transaction\n");
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"bad-txns-BIP30");
}
}
}
}
// Enforce BIP68 (sequence locks).
int nLockTimeFlags = 0;
if (DeploymentActiveAt(*pindex, consensusParams,
Consensus::DEPLOYMENT_CSV)) {
nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
}
const uint32_t flags = GetNextBlockScriptFlags(pindex->pprev, m_chainman);
int64_t nTime2 = GetTimeMicros();
nTimeForks += nTime2 - nTime1;
LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n",
MILLI * (nTime2 - nTime1), nTimeForks * MICRO,
nTimeForks * MILLI / nBlocksTotal);
std::vector<int> prevheights;
Amount nFees = Amount::zero();
int nInputs = 0;
// Limit the total executed signature operations in the block, a consensus
// rule. Tracking during the CPU-consuming part (validation of uncached
// inputs) is per-input atomic and validation in each thread stops very
// quickly after the limit is exceeded, so an adversary cannot cause us to
// exceed the limit by much at all.
CheckInputsLimiter nSigChecksBlockLimiter(
GetMaxBlockSigChecksCount(options.getExcessiveBlockSize()));
std::vector<TxSigCheckLimiter> nSigChecksTxLimiters;
nSigChecksTxLimiters.resize(block.vtx.size() - 1);
CBlockUndo blockundo;
blockundo.vtxundo.resize(block.vtx.size() - 1);
CCheckQueueControl<CScriptCheck> control(fScriptChecks ? &scriptcheckqueue
: nullptr);
// Add all outputs
try {
for (const auto &ptx : block.vtx) {
AddCoins(view, *ptx, pindex->nHeight);
}
} catch (const std::logic_error &e) {
// This error will be thrown from AddCoin if we try to connect a block
// containing duplicate transactions. Such a thing should normally be
// caught early nowadays (due to ContextualCheckBlock's CTOR
// enforcement) however some edge cases can escape that:
// - ContextualCheckBlock does not get re-run after saving the block to
// disk, and older versions may have saved a weird block.
// - its checks are not applied to pre-CTOR chains, which we might visit
// with checkpointing off.
LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"tx-duplicate");
}
size_t txIndex = 0;
// nSigChecksRet may be accurate (found in cache) or 0 (checks were
// deferred into vChecks).
int nSigChecksRet;
for (const auto &ptx : block.vtx) {
const CTransaction &tx = *ptx;
const bool isCoinBase = tx.IsCoinBase();
nInputs += tx.vin.size();
{
Amount txfee = Amount::zero();
TxValidationState tx_state;
if (!isCoinBase &&
!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight,
txfee)) {
// Any transaction validation failure in ConnectBlock is a block
// consensus failure.
state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(),
tx_state.GetDebugMessage());
return error("%s: Consensus::CheckTxInputs: %s, %s", __func__,
tx.GetId().ToString(), state.ToString());
}
nFees += txfee;
}
if (!MoneyRange(nFees)) {
LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n",
__func__);
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"bad-txns-accumulated-fee-outofrange");
}
// The following checks do not apply to the coinbase.
if (isCoinBase) {
continue;
}
// Check that transaction is BIP68 final BIP68 lock checks (as
// opposed to nLockTime checks) must be in ConnectBlock because they
// require the UTXO set.
prevheights.resize(tx.vin.size());
for (size_t j = 0; j < tx.vin.size(); j++) {
prevheights[j] = view.AccessCoin(tx.vin[j].prevout).GetHeight();
}
if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n",
__func__);
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"bad-txns-nonfinal");
}
// Don't cache results if we're actually connecting blocks (still
// consult the cache, though).
bool fCacheResults = fJustCheck;
const bool fEnforceSigCheck = flags & SCRIPT_ENFORCE_SIGCHECKS;
if (!fEnforceSigCheck) {
// Historically, there has been transactions with a very high
// sigcheck count, so we need to disable this check for such
// transactions.
nSigChecksTxLimiters[txIndex] = TxSigCheckLimiter::getDisabled();
}
std::vector<CScriptCheck> vChecks;
TxValidationState tx_state;
if (fScriptChecks &&
!CheckInputScripts(tx, tx_state, view, flags, fCacheResults,
fCacheResults, PrecomputedTransactionData(tx),
nSigChecksRet, nSigChecksTxLimiters[txIndex],
&nSigChecksBlockLimiter, &vChecks)) {
// Any transaction validation failure in ConnectBlock is a block
// consensus failure
state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(),
tx_state.GetDebugMessage());
return error(
"ConnectBlock(): CheckInputScripts on %s failed with %s",
tx.GetId().ToString(), state.ToString());
}
control.Add(std::move(vChecks));
// Note: this must execute in the same iteration as CheckTxInputs (not
// in a separate loop) in order to detect double spends. However,
// this does not prevent double-spending by duplicated transaction
// inputs in the same transaction (cf. CVE-2018-17144) -- that check is
// done in CheckBlock (CheckRegularTransaction).
SpendCoins(view, tx, blockundo.vtxundo.at(txIndex), pindex->nHeight);
txIndex++;
}
int64_t nTime3 = GetTimeMicros();
nTimeConnect += nTime3 - nTime2;
LogPrint(BCLog::BENCH,
" - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) "
"[%.2fs (%.2fms/blk)]\n",
(unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2),
MILLI * (nTime3 - nTime2) / block.vtx.size(),
nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs - 1),
nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
const Amount blockReward =
nFees + GetBlockSubsidy(pindex->nHeight, consensusParams);
if (block.vtx[0]->GetValueOut() > blockReward) {
LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs "
"limit=%d)\n",
block.vtx[0]->GetValueOut(), blockReward);
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"bad-cb-amount");
}
if (blockFees) {
*blockFees = nFees;
}
if (!control.Wait()) {
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"blk-bad-inputs", "parallel script check failed");
}
int64_t nTime4 = GetTimeMicros();
nTimeVerify += nTime4 - nTime2;
LogPrint(
BCLog::BENCH,
" - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n",
nInputs - 1, MILLI * (nTime4 - nTime2),
nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs - 1),
nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
if (fJustCheck) {
return true;
}
if (!m_blockman.WriteUndoDataForBlock(blockundo, state, *pindex)) {
return false;
}
if (!pindex->IsValid(BlockValidity::SCRIPTS)) {
pindex->RaiseValidity(BlockValidity::SCRIPTS);
m_blockman.m_dirty_blockindex.insert(pindex);
}
// add this block to the view's block chain
view.SetBestBlock(pindex->GetBlockHash());
int64_t nTime5 = GetTimeMicros();
nTimeIndex += nTime5 - nTime4;
LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n",
MILLI * (nTime5 - nTime4), nTimeIndex * MICRO,
nTimeIndex * MILLI / nBlocksTotal);
TRACE6(validation, block_connected, block_hash.data(), pindex->nHeight,
block.vtx.size(), nInputs, nSigChecksRet,
// in microseconds (µs)
nTime5 - nTimeStart);
return true;
}
CoinsCacheSizeState Chainstate::GetCoinsCacheSizeState() {
AssertLockHeld(::cs_main);
return this->GetCoinsCacheSizeState(m_coinstip_cache_size_bytes,
m_mempool ? m_mempool->m_max_size_bytes
: 0);
}
CoinsCacheSizeState
Chainstate::GetCoinsCacheSizeState(size_t max_coins_cache_size_bytes,
size_t max_mempool_size_bytes) {
AssertLockHeld(::cs_main);
int64_t nMempoolUsage = m_mempool ? m_mempool->DynamicMemoryUsage() : 0;
int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
int64_t nTotalSpace =
max_coins_cache_size_bytes +
std::max<int64_t>(int64_t(max_mempool_size_bytes) - nMempoolUsage, 0);
//! No need to periodic flush if at least this much space still available.
static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES =
10 * 1024 * 1024; // 10MB
int64_t large_threshold = std::max(
(9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
if (cacheSize > nTotalSpace) {
LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize,
nTotalSpace);
return CoinsCacheSizeState::CRITICAL;
} else if (cacheSize > large_threshold) {
return CoinsCacheSizeState::LARGE;
}
return CoinsCacheSizeState::OK;
}
bool Chainstate::FlushStateToDisk(BlockValidationState &state,
FlushStateMode mode, int nManualPruneHeight) {
LOCK(cs_main);
assert(this->CanFlushToDisk());
std::set<int> setFilesToPrune;
bool full_flush_completed = false;
const size_t coins_count = CoinsTip().GetCacheSize();
const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
try {
{
bool fFlushForPrune = false;
bool fDoFullFlush = false;
CoinsCacheSizeState cache_state = GetCoinsCacheSizeState();
LOCK(m_blockman.cs_LastBlockFile);
if (m_blockman.IsPruneMode() &&
(m_blockman.m_check_for_pruning || nManualPruneHeight > 0) &&
!fReindex) {
// Make sure we don't prune any of the prune locks bestblocks.
// Pruning is height-based.
int last_prune{m_chain.Height()};
// prune lock that actually was the limiting factor, only used
// for logging
std::optional<std::string> limiting_lock;
for (const auto &prune_lock : m_blockman.m_prune_locks) {
if (prune_lock.second.height_first ==
std::numeric_limits<int>::max()) {
continue;
}
// Remove the buffer and one additional block here to get
// actual height that is outside of the buffer
const int lock_height{prune_lock.second.height_first -
PRUNE_LOCK_BUFFER - 1};
last_prune = std::max(1, std::min(last_prune, lock_height));
if (last_prune == lock_height) {
limiting_lock = prune_lock.first;
}
}
if (limiting_lock) {
LogPrint(BCLog::PRUNE, "%s limited pruning to height %d\n",
limiting_lock.value(), last_prune);
}
if (nManualPruneHeight > 0) {
LOG_TIME_MILLIS_WITH_CATEGORY(
"find files to prune (manual)", BCLog::BENCH);
m_blockman.FindFilesToPruneManual(
setFilesToPrune,
std::min(last_prune, nManualPruneHeight),
m_chain.Height());
} else {
LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune",
BCLog::BENCH);
m_blockman.FindFilesToPrune(
setFilesToPrune,
m_chainman.GetParams().PruneAfterHeight(),
m_chain.Height(), last_prune, IsInitialBlockDownload());
m_blockman.m_check_for_pruning = false;
}
if (!setFilesToPrune.empty()) {
fFlushForPrune = true;
if (!m_blockman.m_have_pruned) {
m_blockman.m_block_tree_db->WriteFlag(
"prunedblockfiles", true);
m_blockman.m_have_pruned = true;
}
}
}
const auto nNow = GetTime<std::chrono::microseconds>();
// Avoid writing/flushing immediately after startup.
if (m_last_write.count() == 0) {
m_last_write = nNow;
}
if (m_last_flush.count() == 0) {
m_last_flush = nNow;
}
// The cache is large and we're within 10% and 10 MiB of the limit,
// but we have time now (not in the middle of a block processing).
bool fCacheLarge = mode == FlushStateMode::PERIODIC &&
cache_state >= CoinsCacheSizeState::LARGE;
// The cache is over the limit, we have to write now.
bool fCacheCritical = mode == FlushStateMode::IF_NEEDED &&
cache_state >= CoinsCacheSizeState::CRITICAL;
// It's been a while since we wrote the block index to disk. Do this
// frequently, so we don't need to redownload after a crash.
bool fPeriodicWrite = mode == FlushStateMode::PERIODIC &&
nNow > m_last_write + DATABASE_WRITE_INTERVAL;
// It's been very long since we flushed the cache. Do this
// infrequently, to optimize cache usage.
bool fPeriodicFlush = mode == FlushStateMode::PERIODIC &&
nNow > m_last_flush + DATABASE_FLUSH_INTERVAL;
// Combine all conditions that result in a full cache flush.
fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge ||
fCacheCritical || fPeriodicFlush || fFlushForPrune;
// Write blocks and block index to disk.
if (fDoFullFlush || fPeriodicWrite) {
// Ensure we can write block index
if (!CheckDiskSpace(gArgs.GetBlocksDirPath())) {
return AbortNode(state, "Disk space is too low!",
_("Disk space is too low!"));
}
{
LOG_TIME_MILLIS_WITH_CATEGORY(
"write block and undo data to disk", BCLog::BENCH);
// First make sure all block and undo data is flushed to
// disk.
m_blockman.FlushBlockFile();
}
// Then update all block file information (which may refer to
// block and undo files).
{
LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk",
BCLog::BENCH);
if (!m_blockman.WriteBlockIndexDB()) {
return AbortNode(
state, "Failed to write to block index database");
}
}
// Finally remove any pruned files
if (fFlushForPrune) {
LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files",
BCLog::BENCH);
m_blockman.UnlinkPrunedFiles(setFilesToPrune);
}
m_last_write = nNow;
}
// Flush best chain related state. This can only be done if the
// blocks / block index write was also done.
if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
LOG_TIME_MILLIS_WITH_CATEGORY(
strprintf("write coins cache to disk (%d coins, %.2fkB)",
coins_count, coins_mem_usage / 1000),
BCLog::BENCH);
// Typical Coin structures on disk are around 48 bytes in size.
// Pushing a new one to the database can cause it to be written
// twice (once in the log, and once in the tables). This is
// already an overestimation, as most will delete an existing
// entry or overwrite one. Still, use a conservative safety
// factor of 2.
if (!CheckDiskSpace(gArgs.GetDataDirNet(),
48 * 2 * 2 * CoinsTip().GetCacheSize())) {
return AbortNode(state, "Disk space is too low!",
_("Disk space is too low!"));
}
// Flush the chainstate (which may refer to block index
// entries).
if (!CoinsTip().Flush()) {
return AbortNode(state, "Failed to write to coin database");
}
m_last_flush = nNow;
full_flush_completed = true;
}
TRACE5(utxocache, flush,
// in microseconds (µs)
GetTimeMicros() - nNow.count(), uint32_t(mode), coins_count,
uint64_t(coins_mem_usage), fFlushForPrune);
}
if (full_flush_completed) {
// Update best block in wallet (so we can detect restored wallets).
GetMainSignals().ChainStateFlushed(m_chain.GetLocator());
}
} catch (const std::runtime_error &e) {
return AbortNode(state, std::string("System error while flushing: ") +
e.what());
}
return true;
}
void Chainstate::ForceFlushStateToDisk() {
BlockValidationState state;
if (!this->FlushStateToDisk(state, FlushStateMode::ALWAYS)) {
LogPrintf("%s: failed to flush state (%s)\n", __func__,
state.ToString());
}
}
void Chainstate::PruneAndFlush() {
BlockValidationState state;
m_blockman.m_check_for_pruning = true;
if (!this->FlushStateToDisk(state, FlushStateMode::NONE)) {
LogPrintf("%s: failed to flush state (%s)\n", __func__,
state.ToString());
}
}
static void UpdateTipLog(const CCoinsViewCache &coins_tip,
const CBlockIndex *tip, const CChainParams ¶ms,
const std::string &func_name,
const std::string &prefix)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
LogPrintf("%s%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%ld "
"date='%s' progress=%f cache=%.1fMiB(%utxo)\n",
prefix, func_name, tip->GetBlockHash().ToString(), tip->nHeight,
tip->nVersion, log(tip->nChainWork.getdouble()) / log(2.0),
tip->GetChainTxCount(),
FormatISO8601DateTime(tip->GetBlockTime()),
GuessVerificationProgress(params.TxData(), tip),
coins_tip.DynamicMemoryUsage() * (1.0 / (1 << 20)),
coins_tip.GetCacheSize());
}
void Chainstate::UpdateTip(const CBlockIndex *pindexNew) {
AssertLockHeld(::cs_main);
const auto &coins_tip = CoinsTip();
const CChainParams ¶ms{m_chainman.GetParams()};
// The remainder of the function isn't relevant if we are not acting on
// the active chainstate, so return if need be.
if (this != &m_chainman.ActiveChainstate()) {
// Only log every so often so that we don't bury log messages at the
// tip.
constexpr int BACKGROUND_LOG_INTERVAL = 2000;
if (pindexNew->nHeight % BACKGROUND_LOG_INTERVAL == 0) {
UpdateTipLog(coins_tip, pindexNew, params, __func__,
"[background validation] ");
}
return;
}
// New best block
if (m_mempool) {
m_mempool->AddTransactionsUpdated(1);
}
{
LOCK(g_best_block_mutex);
g_best_block = pindexNew;
g_best_block_cv.notify_all();
}
UpdateTipLog(coins_tip, pindexNew, params, __func__, "");
}
/**
* Disconnect m_chain's tip.
* After calling, the mempool will be in an inconsistent state, with
* transactions from disconnected blocks being added to disconnectpool. You
* should make the mempool consistent again by calling updateMempoolForReorg.
* with cs_main held.
*
* If disconnectpool is nullptr, then no disconnected transactions are added to
* disconnectpool (note that the caller is responsible for mempool consistency
* in any case).
*/
bool Chainstate::DisconnectTip(BlockValidationState &state,
DisconnectedBlockTransactions *disconnectpool) {
AssertLockHeld(cs_main);
if (m_mempool) {
AssertLockHeld(m_mempool->cs);
}
CBlockIndex *pindexDelete = m_chain.Tip();
assert(pindexDelete);
assert(pindexDelete->pprev);
// Read block from disk.
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
CBlock &block = *pblock;
if (!m_blockman.ReadBlockFromDisk(block, *pindexDelete)) {
return error("DisconnectTip(): Failed to read block");
}
// Apply the block atomically to the chain state.
int64_t nStart = GetTimeMicros();
{
CCoinsViewCache view(&CoinsTip());
assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
if (DisconnectBlock(block, pindexDelete, view) !=
DisconnectResult::OK) {
return error("DisconnectTip(): DisconnectBlock %s failed",
pindexDelete->GetBlockHash().ToString());
}
bool flushed = view.Flush();
assert(flushed);
}
LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n",
(GetTimeMicros() - nStart) * MILLI);
{
// Prune locks that began at or after the tip should be moved backward
// so they get a chance to reorg
const int max_height_first{pindexDelete->nHeight - 1};
for (auto &prune_lock : m_blockman.m_prune_locks) {
if (prune_lock.second.height_first <= max_height_first) {
continue;
}
prune_lock.second.height_first = max_height_first;
LogPrint(BCLog::PRUNE, "%s prune lock moved back to %d\n",
prune_lock.first, max_height_first);
}
}
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(state, FlushStateMode::IF_NEEDED)) {
return false;
}
if (m_mempool) {
// If this block is deactivating a fork, we move all mempool
// transactions in front of disconnectpool for reprocessing in a future
// updateMempoolForReorg call
if (pindexDelete->pprev != nullptr &&
GetNextBlockScriptFlags(pindexDelete, m_chainman) !=
GetNextBlockScriptFlags(pindexDelete->pprev, m_chainman)) {
LogPrint(BCLog::MEMPOOL,
"Disconnecting mempool due to rewind of upgrade block\n");
if (disconnectpool) {
disconnectpool->importMempool(*m_mempool);
}
m_mempool->clear();
}
if (disconnectpool) {
disconnectpool->addForBlock(block.vtx, *m_mempool);
}
}
m_chain.SetTip(*pindexDelete->pprev);
UpdateTip(pindexDelete->pprev);
// Let wallets know transactions went from 1-confirmed to
// 0-confirmed or conflicted:
GetMainSignals().BlockDisconnected(pblock, pindexDelete);
return true;
}
static int64_t nTimeReadFromDisk = 0;
static int64_t nTimeConnectTotal = 0;
static int64_t nTimeFlush = 0;
static int64_t nTimeChainState = 0;
static int64_t nTimePostConnect = 0;
/**
* Connect a new block to m_chain. pblock is either nullptr or a pointer to
* a CBlock corresponding to pindexNew, to bypass loading it again from disk.
*/
bool Chainstate::ConnectTip(BlockValidationState &state,
BlockPolicyValidationState &blockPolicyState,
CBlockIndex *pindexNew,
const std::shared_ptr<const CBlock> &pblock,
DisconnectedBlockTransactions &disconnectpool,
const avalanche::Processor *const avalanche) {
AssertLockHeld(cs_main);
if (m_mempool) {
AssertLockHeld(m_mempool->cs);
}
const Consensus::Params &consensusParams = m_chainman.GetConsensus();
assert(pindexNew->pprev == m_chain.Tip());
// Read block from disk.
int64_t nTime1 = GetTimeMicros();
std::shared_ptr<const CBlock> pthisBlock;
if (!pblock) {
std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
if (!m_blockman.ReadBlockFromDisk(*pblockNew, *pindexNew)) {
return AbortNode(state, "Failed to read block");
}
pthisBlock = pblockNew;
} else {
pthisBlock = pblock;
}
const CBlock &blockConnecting = *pthisBlock;
// Apply the block atomically to the chain state.
int64_t nTime2 = GetTimeMicros();
nTimeReadFromDisk += nTime2 - nTime1;
int64_t nTime3;
LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n",
(nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
{
Amount blockFees{Amount::zero()};
CCoinsViewCache view(&CoinsTip());
bool rv = ConnectBlock(blockConnecting, state, pindexNew, view,
BlockValidationOptions(m_chainman.GetConfig()),
&blockFees);
GetMainSignals().BlockChecked(blockConnecting, state);
if (!rv) {
if (state.IsInvalid()) {
InvalidBlockFound(pindexNew, state);
}
return error("%s: ConnectBlock %s failed, %s", __func__,
pindexNew->GetBlockHash().ToString(),
state.ToString());
}
/**
* The block is valid by consensus rules so now we check if the block
* passes all block policy checks. If not, then park the block and bail.
*
* We check block parking policies before flushing changes to the UTXO
* set. This allows us to avoid rewinding everything immediately after.
*
* Only check block parking policies the first time the block is
* connected. Avalanche voting can override the parking decision made by
* these policies.
*/
const BlockHash blockhash = pindexNew->GetBlockHash();
if (!IsInitialBlockDownload() &&
!m_filterParkingPoliciesApplied.contains(blockhash)) {
m_filterParkingPoliciesApplied.insert(blockhash);
const Amount blockReward =
blockFees +
GetBlockSubsidy(pindexNew->nHeight, consensusParams);
std::vector<std::unique_ptr<ParkingPolicy>> parkingPolicies;
parkingPolicies.emplace_back(std::make_unique<MinerFundPolicy>(
consensusParams, *pindexNew, blockConnecting, blockReward));
if (avalanche) {
parkingPolicies.emplace_back(
std::make_unique<StakingRewardsPolicy>(
*avalanche, consensusParams, *pindexNew,
blockConnecting, blockReward));
if (m_mempool) {
parkingPolicies.emplace_back(
std::make_unique<PreConsensusPolicy>(
*pindexNew, blockConnecting, m_mempool));
}
}
// If any block policy is violated, bail on the first one found
if (std::find_if_not(parkingPolicies.begin(), parkingPolicies.end(),
[&](const auto &policy) {
bool ret = (*policy)(blockPolicyState);
if (!ret) {
LogPrintf(
"Park block because it "
"violated a block policy: %s\n",
blockPolicyState.ToString());
}
return ret;
}) != parkingPolicies.end()) {
pindexNew->nStatus = pindexNew->nStatus.withParked();
m_blockman.m_dirty_blockindex.insert(pindexNew);
return false;
}
}
nTime3 = GetTimeMicros();
nTimeConnectTotal += nTime3 - nTime2;
assert(nBlocksTotal > 0);
LogPrint(BCLog::BENCH,
" - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n",
(nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO,
nTimeConnectTotal * MILLI / nBlocksTotal);
bool flushed = view.Flush();
assert(flushed);
}
int64_t nTime4 = GetTimeMicros();
nTimeFlush += nTime4 - nTime3;
LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n",
(nTime4 - nTime3) * MILLI, nTimeFlush * MICRO,
nTimeFlush * MILLI / nBlocksTotal);
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(state, FlushStateMode::IF_NEEDED)) {
return false;
}
int64_t nTime5 = GetTimeMicros();
nTimeChainState += nTime5 - nTime4;
LogPrint(BCLog::BENCH,
" - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n",
(nTime5 - nTime4) * MILLI, nTimeChainState * MICRO,
nTimeChainState * MILLI / nBlocksTotal);
// Remove conflicting transactions from the mempool;
if (m_mempool) {
disconnectpool.removeForBlock(blockConnecting.vtx, *m_mempool);
// If this block is activating a fork, we move all mempool transactions
// in front of disconnectpool for reprocessing in a future
// updateMempoolForReorg call
if (pindexNew->pprev != nullptr &&
GetNextBlockScriptFlags(pindexNew, m_chainman) !=
GetNextBlockScriptFlags(pindexNew->pprev, m_chainman)) {
LogPrint(
BCLog::MEMPOOL,
"Disconnecting mempool due to acceptance of upgrade block\n");
disconnectpool.importMempool(*m_mempool);
}
}
// Update m_chain & related variables.
m_chain.SetTip(*pindexNew);
UpdateTip(pindexNew);
int64_t nTime6 = GetTimeMicros();
nTimePostConnect += nTime6 - nTime5;
nTimeTotal += nTime6 - nTime1;
LogPrint(BCLog::BENCH,
" - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n",
(nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO,
nTimePostConnect * MILLI / nBlocksTotal);
LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n",
(nTime6 - nTime1) * MILLI, nTimeTotal * MICRO,
nTimeTotal * MILLI / nBlocksTotal);
// If we are the background validation chainstate, check to see if we are
// done validating the snapshot (i.e. our tip has reached the snapshot's
// base block).
if (this != &m_chainman.ActiveChainstate()) {
// This call may set `m_disabled`, which is referenced immediately
// afterwards in ActivateBestChain, so that we stop connecting blocks
// past the snapshot base.
m_chainman.MaybeCompleteSnapshotValidation();
}
GetMainSignals().BlockConnected(pthisBlock, pindexNew);
return true;
}
/**
* Return the tip of the chain with the most work in it, that isn't known to be
* invalid (it's however far from certain to be valid).
*/
CBlockIndex *Chainstate::FindMostWorkChain(
std::vector<const CBlockIndex *> &blocksToReconcile, bool fAutoUnpark) {
AssertLockHeld(::cs_main);
do {
CBlockIndex *pindexNew = nullptr;
// Find the best candidate header.
{
std::set<CBlockIndex *, CBlockIndexWorkComparator>::reverse_iterator
it = setBlockIndexCandidates.rbegin();
if (it == setBlockIndexCandidates.rend()) {
return nullptr;
}
pindexNew = *it;
}
// If this block will cause an avalanche finalized block to be reorged,
// then we park it.
{
LOCK(cs_avalancheFinalizedBlockIndex);
if (m_avalancheFinalizedBlockIndex &&
!AreOnTheSameFork(pindexNew, m_avalancheFinalizedBlockIndex)) {
LogPrintf("Park block %s because it forks prior to the "
"avalanche finalized chaintip.\n",
pindexNew->GetBlockHash().ToString());
pindexNew->nStatus = pindexNew->nStatus.withParked();
m_blockman.m_dirty_blockindex.insert(pindexNew);
}
}
const CBlockIndex *pindexFork = m_chain.FindFork(pindexNew);
// Check whether all blocks on the path between the currently active
// chain and the candidate are valid. Just going until the active chain
// is an optimization, as we know all blocks in it are valid already.
CBlockIndex *pindexTest = pindexNew;
bool hasValidAncestor = true;
while (hasValidAncestor && pindexTest && pindexTest != pindexFork) {
assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
// If this is a parked chain, but it has enough PoW, clear the park
// state.
bool fParkedChain = pindexTest->nStatus.isOnParkedChain();
if (fAutoUnpark && fParkedChain) {
const CBlockIndex *pindexTip = m_chain.Tip();
// During initialization, pindexTip and/or pindexFork may be
// null. In this case, we just ignore the fact that the chain is
// parked.
if (!pindexTip || !pindexFork) {
UnparkBlock(pindexTest);
continue;
}
// A parked chain can be unparked if it has twice as much PoW
// accumulated as the main chain has since the fork block.
CBlockIndex const *pindexExtraPow = pindexTip;
arith_uint256 requiredWork = pindexTip->nChainWork;
switch (pindexTip->nHeight - pindexFork->nHeight) {
// Limit the penality for depth 1, 2 and 3 to half a block
// worth of work to ensure we don't fork accidentally.
case 3:
case 2:
pindexExtraPow = pindexExtraPow->pprev;
// FALLTHROUGH
case 1: {
const arith_uint256 deltaWork =
pindexExtraPow->nChainWork - pindexFork->nChainWork;
requiredWork += (deltaWork >> 1);
break;
}
default:
requiredWork +=
pindexExtraPow->nChainWork - pindexFork->nChainWork;
break;
}
if (pindexNew->nChainWork > requiredWork) {
// We have enough, clear the parked state.
LogPrintf("Unpark chain up to block %s as it has "
"accumulated enough PoW.\n",
pindexNew->GetBlockHash().ToString());
fParkedChain = false;
UnparkBlock(pindexTest);
}
}
// Pruned nodes may have entries in setBlockIndexCandidates for
// which block files have been deleted. Remove those as candidates
// for the most work chain if we come across them; we can't switch
// to a chain unless we have all the non-active-chain parent blocks.
bool fInvalidChain = pindexTest->nStatus.isInvalid();
bool fMissingData = !pindexTest->nStatus.hasData();
if (!(fInvalidChain || fParkedChain || fMissingData)) {
// The current block is acceptable, move to the parent, up to
// the fork point.
pindexTest = pindexTest->pprev;
continue;
}
// Candidate chain is not usable (either invalid or parked or
// missing data)
hasValidAncestor = false;
setBlockIndexCandidates.erase(pindexTest);
if (fInvalidChain && (m_chainman.m_best_invalid == nullptr ||
pindexNew->nChainWork >
m_chainman.m_best_invalid->nChainWork)) {
m_chainman.m_best_invalid = pindexNew;
}
if (fParkedChain && (m_chainman.m_best_parked == nullptr ||
pindexNew->nChainWork >
m_chainman.m_best_parked->nChainWork)) {
m_chainman.m_best_parked = pindexNew;
}
LogPrintf("Considered switching to better tip %s but that chain "
"contains a%s%s%s block.\n",
pindexNew->GetBlockHash().ToString(),
fInvalidChain ? "n invalid" : "",
fParkedChain ? " parked" : "",
fMissingData ? " missing-data" : "");
CBlockIndex *pindexFailed = pindexNew;
// Remove the entire chain from the set.
while (pindexTest != pindexFailed) {
if (fInvalidChain || fParkedChain) {
pindexFailed->nStatus =
pindexFailed->nStatus.withFailedParent(fInvalidChain)
.withParkedParent(fParkedChain);
} else if (fMissingData) {
// If we're missing data, then add back to
// m_blocks_unlinked, so that if the block arrives in the
// future we can try adding to setBlockIndexCandidates
// again.
m_blockman.m_blocks_unlinked.insert(
std::make_pair(pindexFailed->pprev, pindexFailed));
}
setBlockIndexCandidates.erase(pindexFailed);
pindexFailed = pindexFailed->pprev;
}
if (fInvalidChain || fParkedChain) {
// We discovered a new chain tip that is either parked or
// invalid, we may want to warn.
CheckForkWarningConditionsOnNewFork(pindexNew);
}
}
blocksToReconcile.push_back(pindexNew);
// We found a candidate that has valid ancestors. This is our guy.
if (hasValidAncestor) {
return pindexNew;
}
} while (true);
}
/**
* Delete all entries in setBlockIndexCandidates that are worse than the current
* tip.
*/
void Chainstate::PruneBlockIndexCandidates() {
// Note that we can't delete the current block itself, as we may need to
// return to it later in case a reorganization to a better block fails.
auto it = setBlockIndexCandidates.begin();
while (it != setBlockIndexCandidates.end() &&
setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
setBlockIndexCandidates.erase(it++);
}
// Either the current tip or a successor of it we're working towards is left
// in setBlockIndexCandidates.
assert(!setBlockIndexCandidates.empty());
}
/**
* Try to make some progress towards making pindexMostWork the active block.
* pblock is either nullptr or a pointer to a CBlock corresponding to
* pindexMostWork.
*
* @returns true unless a system error occurred
*/
bool Chainstate::ActivateBestChainStep(
BlockValidationState &state, CBlockIndex *pindexMostWork,
const std::shared_ptr<const CBlock> &pblock, bool &fInvalidFound,
const avalanche::Processor *const avalanche) {
AssertLockHeld(cs_main);
if (m_mempool) {
AssertLockHeld(m_mempool->cs);
}
const CBlockIndex *pindexOldTip = m_chain.Tip();
const CBlockIndex *pindexFork = m_chain.FindFork(pindexMostWork);
// Disconnect active blocks which are no longer in the best chain.
bool fBlocksDisconnected = false;
DisconnectedBlockTransactions disconnectpool;
while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
if (!fBlocksDisconnected) {
// Import and clear mempool; we must do this to preserve
// topological ordering in the mempool index. This is ok since
// inserts into the mempool are very fast now in our new
// implementation.
disconnectpool.importMempool(*m_mempool);
}
if (!DisconnectTip(state, &disconnectpool)) {
// This is likely a fatal error, but keep the mempool consistent,
// just in case. Only remove from the mempool in this case.
if (m_mempool) {
disconnectpool.updateMempoolForReorg(*this, false, *m_mempool);
}
// If we're unable to disconnect a block during normal operation,
// then that is a failure of our local system -- we should abort
// rather than stay on a less work chain.
AbortNode(state,
"Failed to disconnect block; see debug.log for details");
return false;
}
fBlocksDisconnected = true;
}
// Build list of new blocks to connect.
std::vector<CBlockIndex *> vpindexToConnect;
bool fContinue = true;
int nHeight = pindexFork ? pindexFork->nHeight : -1;
while (fContinue && nHeight != pindexMostWork->nHeight) {
// Don't iterate the entire list of potential improvements toward the
// best tip, as we likely only need a few blocks along the way.
int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
vpindexToConnect.clear();
vpindexToConnect.reserve(nTargetHeight - nHeight);
CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
while (pindexIter && pindexIter->nHeight != nHeight) {
vpindexToConnect.push_back(pindexIter);
pindexIter = pindexIter->pprev;
}
nHeight = nTargetHeight;
// Connect new blocks.
for (CBlockIndex *pindexConnect : reverse_iterate(vpindexToConnect)) {
BlockPolicyValidationState blockPolicyState;
if (!ConnectTip(state, blockPolicyState, pindexConnect,
pindexConnect == pindexMostWork
? pblock
: std::shared_ptr<const CBlock>(),
disconnectpool, avalanche)) {
if (state.IsInvalid()) {
// The block violates a consensus rule.
if (state.GetResult() !=
BlockValidationResult::BLOCK_MUTATED) {
InvalidChainFound(vpindexToConnect.back());
}
state = BlockValidationState();
fInvalidFound = true;
fContinue = false;
break;
}
if (blockPolicyState.IsInvalid()) {
// The block violates a policy rule.
fContinue = false;
break;
}
// A system error occurred (disk space, database error, ...).
// Make the mempool consistent with the current tip, just in
// case any observers try to use it before shutdown.
if (m_mempool) {
disconnectpool.updateMempoolForReorg(*this, false,
*m_mempool);
}
return false;
} else {
PruneBlockIndexCandidates();
if (!pindexOldTip ||
m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
// We're in a better position than we were. Return
// temporarily to release the lock.
fContinue = false;
break;
}
}
}
}
if (m_mempool) {
if (fBlocksDisconnected || !disconnectpool.isEmpty()) {
// If any blocks were disconnected, we need to update the mempool
// even if disconnectpool is empty. The disconnectpool may also be
// non-empty if the mempool was imported due to new validation rules
// being in effect.
LogPrint(BCLog::MEMPOOL,
"Updating mempool due to reorganization or "
"rules upgrade/downgrade\n");
disconnectpool.updateMempoolForReorg(*this, true, *m_mempool);
}
m_mempool->check(this->CoinsTip(), this->m_chain.Height() + 1);
}
// Callbacks/notifications for a new best chain.
if (fInvalidFound) {
CheckForkWarningConditionsOnNewFork(pindexMostWork);
} else {
CheckForkWarningConditions();
}
return true;
}
static SynchronizationState GetSynchronizationState(bool init) {
if (!init) {
return SynchronizationState::POST_INIT;
}
if (::fReindex) {
return SynchronizationState::INIT_REINDEX;
}
return SynchronizationState::INIT_DOWNLOAD;
}
static bool NotifyHeaderTip(Chainstate &chainstate) LOCKS_EXCLUDED(cs_main) {
bool fNotify = false;
bool fInitialBlockDownload = false;
static CBlockIndex *pindexHeaderOld = nullptr;
CBlockIndex *pindexHeader = nullptr;
{
LOCK(cs_main);
pindexHeader = chainstate.m_chainman.m_best_header;
if (pindexHeader != pindexHeaderOld) {
fNotify = true;
fInitialBlockDownload = chainstate.IsInitialBlockDownload();
pindexHeaderOld = pindexHeader;
}
}
// Send block tip changed notifications without cs_main
if (fNotify) {
chainstate.m_chainman.GetNotifications().headerTip(
GetSynchronizationState(fInitialBlockDownload),
pindexHeader->nHeight, pindexHeader->nTime, false);
}
return fNotify;
}
static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) {
AssertLockNotHeld(cs_main);
if (GetMainSignals().CallbacksPending() > 10) {
SyncWithValidationInterfaceQueue();
}
}
bool Chainstate::ActivateBestChain(BlockValidationState &state,
std::shared_ptr<const CBlock> pblock,
avalanche::Processor *const avalanche,
bool skip_checkblockindex) {
AssertLockNotHeld(m_chainstate_mutex);
// Note that while we're often called here from ProcessNewBlock, this is
// far from a guarantee. Things in the P2P/RPC will often end up calling
// us in the middle of ProcessNewBlock - do not assume pblock is set
// sanely for performance or correctness!
AssertLockNotHeld(::cs_main);
// ABC maintains a fair degree of expensive-to-calculate internal state
// because this function periodically releases cs_main so that it does not
// lock up other threads for too long during large connects - and to allow
// for e.g. the callback queue to drain we use m_chainstate_mutex to enforce
// mutual exclusion so that only one caller may execute this function at a
// time
LOCK(m_chainstate_mutex);
// Belt-and-suspenders check that we aren't attempting to advance the
// background chainstate past the snapshot base block.
if (WITH_LOCK(::cs_main, return m_disabled)) {
LogPrintf("m_disabled is set - this chainstate should not be in "
"operation. Please report this as a bug. %s\n",
PACKAGE_BUGREPORT);
return false;
}
CBlockIndex *pindexMostWork = nullptr;
CBlockIndex *pindexNewTip = nullptr;
int nStopAtHeight = gArgs.GetIntArg("-stopatheight", DEFAULT_STOPATHEIGHT);
do {
// Block until the validation queue drains. This should largely
// never happen in normal operation, however may happen during
// reindex, causing memory blowup if we run too far ahead.
// Note that if a validationinterface callback ends up calling
// ActivateBestChain this may lead to a deadlock! We should
// probably have a DEBUG_LOCKORDER test for this in the future.
LimitValidationInterfaceQueue();
std::vector<const CBlockIndex *> blocksToReconcile;
bool blocks_connected = false;
const bool fAutoUnpark =
gArgs.GetBoolArg("-automaticunparking", !avalanche);
{
LOCK(cs_main);
// Lock transaction pool for at least as long as it takes for
// updateMempoolForReorg to be executed if needed
LOCK(MempoolMutex());
CBlockIndex *starting_tip = m_chain.Tip();
do {
// We absolutely may not unlock cs_main until we've made forward
// progress (with the exception of shutdown due to hardware
// issues, low disk space, etc).
if (pindexMostWork == nullptr) {
pindexMostWork =
FindMostWorkChain(blocksToReconcile, fAutoUnpark);
}
// Whether we have anything to do at all.
if (pindexMostWork == nullptr ||
pindexMostWork == m_chain.Tip()) {
break;
}
bool fInvalidFound = false;
std::shared_ptr<const CBlock> nullBlockPtr;
if (!ActivateBestChainStep(
state, pindexMostWork,
pblock && pblock->GetHash() ==
pindexMostWork->GetBlockHash()
? pblock
: nullBlockPtr,
fInvalidFound, avalanche)) {
// A system error occurred
return false;
}
blocks_connected = true;
if (fInvalidFound ||
(pindexMostWork && pindexMostWork->nStatus.isParked())) {
// Wipe cache, we may need another branch now.
pindexMostWork = nullptr;
}
pindexNewTip = m_chain.Tip();
// This will have been toggled in
// ActivateBestChainStep -> ConnectTip ->
// MaybeCompleteSnapshotValidation, if at all, so we should
// catch it here.
//
// Break this do-while to ensure we don't advance past the base
// snapshot.
if (m_disabled) {
break;
}
} while (!m_chain.Tip() ||
(starting_tip && CBlockIndexWorkComparator()(
m_chain.Tip(), starting_tip)));
// Check the index once we're done with the above loop, since
// we're going to release cs_main soon. If the index is in a bad
// state now, then it's better to know immediately rather than
// randomly have it cause a problem in a race.
if (!skip_checkblockindex) {
CheckBlockIndex();
}
if (blocks_connected) {
const CBlockIndex *pindexFork = m_chain.FindFork(starting_tip);
bool fInitialDownload = IsInitialBlockDownload();
// Notify external listeners about the new tip.
// Enqueue while holding cs_main to ensure that UpdatedBlockTip
// is called in the order in which blocks are connected
if (pindexFork != pindexNewTip) {
// Notify ValidationInterface subscribers
GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork,
fInitialDownload);
// Always notify the UI if a new block tip was connected
m_chainman.GetNotifications().blockTip(
GetSynchronizationState(fInitialDownload),
*pindexNewTip);
}
}
}
// When we reach this point, we switched to a new tip (stored in
// pindexNewTip).
if (avalanche) {
for (const CBlockIndex *pindex : blocksToReconcile) {
avalanche->addToReconcile(pindex);
avalanche->computeStakingReward(pindex);
}
}
if (!blocks_connected) {
return true;
}
if (nStopAtHeight && pindexNewTip &&
pindexNewTip->nHeight >= nStopAtHeight) {
StartShutdown();
}
if (WITH_LOCK(::cs_main, return m_disabled)) {
// Background chainstate has reached the snapshot base block, so
// exit.
break;
}
// We check shutdown only after giving ActivateBestChainStep a chance to
// run once so that we never shutdown before connecting the genesis
// block during LoadChainTip(). Previously this caused an assert()
// failure during shutdown in such cases as the UTXO DB flushing checks
// that the best block hash is non-null.
if (ShutdownRequested()) {
break;
}
} while (pindexNewTip != pindexMostWork);
// Write changes periodically to disk, after relay.
if (!FlushStateToDisk(state, FlushStateMode::PERIODIC)) {
return false;
}
return true;
}
bool Chainstate::PreciousBlock(BlockValidationState &state, CBlockIndex *pindex,
avalanche::Processor *const avalanche) {
AssertLockNotHeld(m_chainstate_mutex);
AssertLockNotHeld(::cs_main);
{
LOCK(cs_main);
if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
// Nothing to do, this block is not at the tip.
return true;
}
if (m_chain.Tip()->nChainWork > nLastPreciousChainwork) {
// The chain has been extended since the last call, reset the
// counter.
nBlockReverseSequenceId = -1;
}
nLastPreciousChainwork = m_chain.Tip()->nChainWork;
setBlockIndexCandidates.erase(pindex);
pindex->nSequenceId = nBlockReverseSequenceId;
if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
// We can't keep reducing the counter if somebody really wants to
// call preciousblock 2**31-1 times on the same set of tips...
nBlockReverseSequenceId--;
}
// In case this was parked, unpark it.
UnparkBlock(pindex);
// Make sure it is added to the candidate list if appropriate.
if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
pindex->HaveTxsDownloaded()) {
setBlockIndexCandidates.insert(pindex);
PruneBlockIndexCandidates();
}
}
return ActivateBestChain(state, /*pblock=*/nullptr, avalanche);
}
namespace {
// Leverage RAII to run a functor at scope end
template <typename Func> struct Defer {
Func func;
Defer(Func &&f) : func(std::move(f)) {}
~Defer() { func(); }
};
} // namespace
bool Chainstate::UnwindBlock(BlockValidationState &state, CBlockIndex *pindex,
bool invalidate) {
// Genesis block can't be invalidated or parked
assert(pindex);
if (pindex->nHeight == 0) {
return false;
}
CBlockIndex *to_mark_failed_or_parked = pindex;
bool pindex_was_in_chain = false;
int disconnected = 0;
// We do not allow ActivateBestChain() to run while UnwindBlock() is
// running, as that could cause the tip to change while we disconnect
// blocks. (Note for backport of Core PR16849: we acquire
// LOCK(m_chainstate_mutex) in the Park, Invalidate and FinalizeBlock
// functions due to differences in our code)
AssertLockHeld(m_chainstate_mutex);
// We'll be acquiring and releasing cs_main below, to allow the validation
// callbacks to run. However, we should keep the block index in a
// consistent state as we disconnect blocks -- in particular we need to
// add equal-work blocks to setBlockIndexCandidates as we disconnect.
// To avoid walking the block index repeatedly in search of candidates,
// build a map once so that we can look up candidate blocks by chain
// work as we go.
std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
{
LOCK(cs_main);
for (auto &entry : m_blockman.m_block_index) {
CBlockIndex *candidate = &entry.second;
// We don't need to put anything in our active chain into the
// multimap, because those candidates will be found and considered
// as we disconnect.
// Instead, consider only non-active-chain blocks that have at
// least as much work as where we expect the new tip to end up.
if (!m_chain.Contains(candidate) &&
!CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
candidate->IsValid(BlockValidity::TRANSACTIONS) &&
candidate->HaveTxsDownloaded()) {
candidate_blocks_by_work.insert(
std::make_pair(candidate->nChainWork, candidate));
}
}
}
{
LOCK(cs_main);
// Lock for as long as disconnectpool is in scope to make sure
// UpdateMempoolForReorg is called after DisconnectTip without unlocking
// in between
LOCK(MempoolMutex());
constexpr int maxDisconnectPoolBlocks = 10;
bool ret = false;
DisconnectedBlockTransactions disconnectpool;
// After 10 blocks this becomes nullptr, so that DisconnectTip will
// stop giving us unwound block txs if we are doing a deep unwind.
DisconnectedBlockTransactions *optDisconnectPool = &disconnectpool;
// Disable thread safety analysis because we can't require m_mempool->cs
// as m_mempool can be null. We keep the runtime analysis though.
Defer deferred([&]() NO_THREAD_SAFETY_ANALYSIS {
AssertLockHeld(cs_main);
if (m_mempool && !disconnectpool.isEmpty()) {
AssertLockHeld(m_mempool->cs);
// DisconnectTip will add transactions to disconnectpool.
// When all unwinding is done and we are on a new tip, we must
// add all transactions back to the mempool against the new tip.
disconnectpool.updateMempoolForReorg(*this,
/* fAddToMempool = */ ret,
*m_mempool);
}
});
// Disconnect (descendants of) pindex, and mark them invalid.
while (true) {
if (ShutdownRequested()) {
break;
}
// Make sure the queue of validation callbacks doesn't grow
// unboundedly.
// FIXME this commented code is a regression and could cause OOM if
// a very old block is invalidated via the invalidateblock RPC.
// This can be uncommented if the main signals are moved away from
// cs_main or this code is refactored so that cs_main can be
// released at this point.
//
// LimitValidationInterfaceQueue();
if (!m_chain.Contains(pindex)) {
break;
}
if (m_mempool && disconnected == 0) {
// On first iteration, we grab all the mempool txs to preserve
// topological ordering. This has the side-effect of temporarily
// clearing the mempool, but we will re-add later in
// updateMempoolForReorg() (above). This technique guarantees
// mempool consistency as well as ensures that our topological
// entry_id index is always correct.
disconnectpool.importMempool(*m_mempool);
}
pindex_was_in_chain = true;
CBlockIndex *invalid_walk_tip = m_chain.Tip();
// ActivateBestChain considers blocks already in m_chain
// unconditionally valid already, so force disconnect away from it.
ret = DisconnectTip(state, optDisconnectPool);
++disconnected;
if (optDisconnectPool && disconnected > maxDisconnectPoolBlocks) {
// Stop using the disconnect pool after 10 blocks. After 10
// blocks we no longer add block tx's to the disconnectpool.
// However, when this scope ends we will reconcile what's
// in the pool with the new tip (in the deferred d'tor above).
optDisconnectPool = nullptr;
}
if (!ret) {
return false;
}
assert(invalid_walk_tip->pprev == m_chain.Tip());
// We immediately mark the disconnected blocks as invalid.
// This prevents a case where pruned nodes may fail to
// invalidateblock and be left unable to start as they have no tip
// candidates (as there are no blocks that meet the "have data and
// are not invalid per nStatus" criteria for inclusion in
// setBlockIndexCandidates).
invalid_walk_tip->nStatus =
invalidate ? invalid_walk_tip->nStatus.withFailed()
: invalid_walk_tip->nStatus.withParked();
m_blockman.m_dirty_blockindex.insert(invalid_walk_tip);
setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
if (invalid_walk_tip == to_mark_failed_or_parked->pprev &&
(invalidate ? to_mark_failed_or_parked->nStatus.hasFailed()
: to_mark_failed_or_parked->nStatus.isParked())) {
// We only want to mark the last disconnected block as
// Failed (or Parked); its children need to be FailedParent (or
// ParkedParent) instead.
to_mark_failed_or_parked->nStatus =
(invalidate
? to_mark_failed_or_parked->nStatus.withFailed(false)
.withFailedParent()
: to_mark_failed_or_parked->nStatus.withParked(false)
.withParkedParent());
m_blockman.m_dirty_blockindex.insert(to_mark_failed_or_parked);
}
// Add any equal or more work headers to setBlockIndexCandidates
auto candidate_it = candidate_blocks_by_work.lower_bound(
invalid_walk_tip->pprev->nChainWork);
while (candidate_it != candidate_blocks_by_work.end()) {
if (!CBlockIndexWorkComparator()(candidate_it->second,
invalid_walk_tip->pprev)) {
setBlockIndexCandidates.insert(candidate_it->second);
candidate_it = candidate_blocks_by_work.erase(candidate_it);
} else {
++candidate_it;
}
}
// Track the last disconnected block, so we can correct its
// FailedParent (or ParkedParent) status in future iterations, or,
// if it's the last one, call InvalidChainFound on it.
to_mark_failed_or_parked = invalid_walk_tip;
}
}
CheckBlockIndex();
{
LOCK(cs_main);
if (m_chain.Contains(to_mark_failed_or_parked)) {
// If the to-be-marked invalid block is in the active chain,
// something is interfering and we can't proceed.
return false;
}
// Mark pindex (or the last disconnected block) as invalid (or parked),
// even when it never was in the main chain.
to_mark_failed_or_parked->nStatus =
invalidate ? to_mark_failed_or_parked->nStatus.withFailed()
: to_mark_failed_or_parked->nStatus.withParked();
m_blockman.m_dirty_blockindex.insert(to_mark_failed_or_parked);
if (invalidate) {
m_chainman.m_failed_blocks.insert(to_mark_failed_or_parked);
}
// If any new blocks somehow arrived while we were disconnecting
// (above), then the pre-calculation of what should go into
// setBlockIndexCandidates may have missed entries. This would
// technically be an inconsistency in the block index, but if we clean
// it up here, this should be an essentially unobservable error.
// Loop back over all block index entries and add any missing entries
// to setBlockIndexCandidates.
for (auto &[_, block_index] : m_blockman.m_block_index) {
if (block_index.IsValid(BlockValidity::TRANSACTIONS) &&
block_index.HaveTxsDownloaded() &&
!setBlockIndexCandidates.value_comp()(&block_index,
m_chain.Tip())) {
setBlockIndexCandidates.insert(&block_index);
}
}
if (invalidate) {
InvalidChainFound(to_mark_failed_or_parked);
}
}
// Only notify about a new block tip if the active chain was modified.
if (pindex_was_in_chain) {
m_chainman.GetNotifications().blockTip(
GetSynchronizationState(IsInitialBlockDownload()),
*to_mark_failed_or_parked->pprev);
}
return true;
}
bool Chainstate::InvalidateBlock(BlockValidationState &state,
CBlockIndex *pindex) {
AssertLockNotHeld(m_chainstate_mutex);
AssertLockNotHeld(::cs_main);
// See 'Note for backport of Core PR16849' in Chainstate::UnwindBlock
LOCK(m_chainstate_mutex);
return UnwindBlock(state, pindex, true);
}
bool Chainstate::ParkBlock(BlockValidationState &state, CBlockIndex *pindex) {
AssertLockNotHeld(m_chainstate_mutex);
AssertLockNotHeld(::cs_main);
// See 'Note for backport of Core PR16849' in Chainstate::UnwindBlock
LOCK(m_chainstate_mutex);
return UnwindBlock(state, pindex, false);
}
template <typename F>
bool Chainstate::UpdateFlagsForBlock(CBlockIndex *pindexBase,
CBlockIndex *pindex, F f) {
BlockStatus newStatus = f(pindex->nStatus);
if (pindex->nStatus != newStatus &&
(!pindexBase ||
pindex->GetAncestor(pindexBase->nHeight) == pindexBase)) {
pindex->nStatus = newStatus;
m_blockman.m_dirty_blockindex.insert(pindex);
if (newStatus.isValid()) {
m_chainman.m_failed_blocks.erase(pindex);
}
if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
pindex->HaveTxsDownloaded() &&
setBlockIndexCandidates.value_comp()(m_chain.Tip(), pindex)) {
setBlockIndexCandidates.insert(pindex);
}
return true;
}
return false;
}
template <typename F, typename C, typename AC>
void Chainstate::UpdateFlags(CBlockIndex *pindex, CBlockIndex *&pindexReset,
F f, C fChild, AC fAncestorWasChanged) {
AssertLockHeld(cs_main);
// Update the current block and ancestors; while we're doing this, identify
// which was the deepest ancestor we changed.
CBlockIndex *pindexDeepestChanged = pindex;
for (auto pindexAncestor = pindex; pindexAncestor != nullptr;
pindexAncestor = pindexAncestor->pprev) {
if (UpdateFlagsForBlock(nullptr, pindexAncestor, f)) {
pindexDeepestChanged = pindexAncestor;
}
}
if (pindexReset &&
pindexReset->GetAncestor(pindexDeepestChanged->nHeight) ==
pindexDeepestChanged) {
// reset pindexReset if it had a modified ancestor.
pindexReset = nullptr;
}
// Update all blocks under modified blocks.
for (auto &[_, block_index] : m_blockman.m_block_index) {
UpdateFlagsForBlock(pindex, &block_index, fChild);
UpdateFlagsForBlock(pindexDeepestChanged, &block_index,
fAncestorWasChanged);
}
}
void Chainstate::ResetBlockFailureFlags(CBlockIndex *pindex) {
AssertLockHeld(cs_main);
UpdateFlags(
pindex, m_chainman.m_best_invalid,
[](const BlockStatus status) {
return status.withClearedFailureFlags();
},
[](const BlockStatus status) {
return status.withClearedFailureFlags();
},
[](const BlockStatus status) {
return status.withFailedParent(false);
});
}
void Chainstate::UnparkBlockImpl(CBlockIndex *pindex, bool fClearChildren) {
AssertLockHeld(cs_main);
UpdateFlags(
pindex, m_chainman.m_best_parked,
[](const BlockStatus status) {
return status.withClearedParkedFlags();
},
[fClearChildren](const BlockStatus status) {
return fClearChildren ? status.withClearedParkedFlags()
: status.withParkedParent(false);
},
[](const BlockStatus status) {
return status.withParkedParent(false);
});
}
void Chainstate::UnparkBlockAndChildren(CBlockIndex *pindex) {
return UnparkBlockImpl(pindex, true);
}
void Chainstate::UnparkBlock(CBlockIndex *pindex) {
return UnparkBlockImpl(pindex, false);
}
bool Chainstate::AvalancheFinalizeBlock(CBlockIndex *pindex,
avalanche::Processor &avalanche) {
if (!pindex) {
return false;
}
if (!m_chain.Contains(pindex)) {
LogPrint(BCLog::AVALANCHE,
"The block to mark finalized by avalanche is not on the "
"active chain: %s\n",
pindex->GetBlockHash().ToString());
return false;
}
avalanche.cleanupStakingRewards(pindex->nHeight);
if (IsBlockAvalancheFinalized(pindex)) {
return true;
}
{
LOCK(cs_avalancheFinalizedBlockIndex);
m_avalancheFinalizedBlockIndex = pindex;
}
GetMainSignals().BlockFinalized(pindex);
return true;
}
void Chainstate::ClearAvalancheFinalizedBlock() {
LOCK(cs_avalancheFinalizedBlockIndex);
m_avalancheFinalizedBlockIndex = nullptr;
}
bool Chainstate::IsBlockAvalancheFinalized(const CBlockIndex *pindex) const {
LOCK(cs_avalancheFinalizedBlockIndex);
return pindex && m_avalancheFinalizedBlockIndex &&
m_avalancheFinalizedBlockIndex->GetAncestor(pindex->nHeight) ==
pindex;
}
/**
* Mark a block as having its data received and checked (up to
* BLOCK_VALID_TRANSACTIONS).
*/
void Chainstate::ReceivedBlockTransactions(const CBlock &block,
CBlockIndex *pindexNew,
const FlatFilePos &pos) {
pindexNew->nTx = block.vtx.size();
pindexNew->nSize = ::GetSerializeSize(block, PROTOCOL_VERSION);
pindexNew->nFile = pos.nFile;
pindexNew->nDataPos = pos.nPos;
pindexNew->nUndoPos = 0;
pindexNew->nStatus = pindexNew->nStatus.withData();
pindexNew->RaiseValidity(BlockValidity::TRANSACTIONS);
m_blockman.m_dirty_blockindex.insert(pindexNew);
if (pindexNew->UpdateChainStats()) {
// If pindexNew is the genesis block or all parents are
// BLOCK_VALID_TRANSACTIONS.
std::deque<CBlockIndex *> queue;
queue.push_back(pindexNew);
// Recursively process any descendant blocks that now may be eligible to
// be connected.
while (!queue.empty()) {
CBlockIndex *pindex = queue.front();
queue.pop_front();
pindex->UpdateChainStats();
if (pindex->nSequenceId == 0) {
// We assign a sequence is when transaction are received to
// prevent a miner from being able to broadcast a block but not
// its content. However, a sequence id may have been set
// manually, for instance via PreciousBlock, in which case, we
// don't need to assign one.
pindex->nSequenceId = nBlockSequenceId++;
}
if (m_chain.Tip() == nullptr ||
!setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
setBlockIndexCandidates.insert(pindex);
}
std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
range = m_blockman.m_blocks_unlinked.equal_range(pindex);
while (range.first != range.second) {
std::multimap<CBlockIndex *, CBlockIndex *>::iterator it =
range.first;
queue.push_back(it->second);
range.first++;
m_blockman.m_blocks_unlinked.erase(it);
}
}
} else if (pindexNew->pprev &&
pindexNew->pprev->IsValid(BlockValidity::TREE)) {
m_blockman.m_blocks_unlinked.insert(
std::make_pair(pindexNew->pprev, pindexNew));
}
}
/**
* Return true if the provided block header is valid.
* Only verify PoW if blockValidationOptions is configured to do so.
* This allows validation of headers on which the PoW hasn't been done.
* For example: to validate template handed to mining software.
* Do not call this for any check that depends on the context.
* For context-dependent calls, see ContextualCheckBlockHeader.
*/
static bool CheckBlockHeader(const CBlockHeader &block,
BlockValidationState &state,
const Consensus::Params ¶ms,
BlockValidationOptions validationOptions) {
// Check proof of work matches claimed amount
if (validationOptions.shouldValidatePoW() &&
!CheckProofOfWork(block.GetHash(), block.nBits, params)) {
return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER,
"high-hash", "proof of work failed");
}
return true;
}
bool CheckBlock(const CBlock &block, BlockValidationState &state,
const Consensus::Params ¶ms,
BlockValidationOptions validationOptions) {
// These are checks that are independent of context.
if (block.fChecked) {
return true;
}
// Check that the header is valid (particularly PoW). This is mostly
// redundant with the call in AcceptBlockHeader.
if (!CheckBlockHeader(block, state, params, validationOptions)) {
return false;
}
// Check the merkle root.
if (validationOptions.shouldValidateMerkleRoot()) {
bool mutated;
uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
if (block.hashMerkleRoot != hashMerkleRoot2) {
return state.Invalid(BlockValidationResult::BLOCK_MUTATED,
"bad-txnmrklroot", "hashMerkleRoot mismatch");
}
// Check for merkle tree malleability (CVE-2012-2459): repeating
// sequences of transactions in a block without affecting the merkle
// root of a block, while still invalidating it.
if (mutated) {
return state.Invalid(BlockValidationResult::BLOCK_MUTATED,
"bad-txns-duplicate", "duplicate transaction");
}
}
// All potential-corruption validation must be done before we do any
// transaction validation, as otherwise we may mark the header as invalid
// because we receive the wrong transactions for it.
// First transaction must be coinbase.
if (block.vtx.empty()) {
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"bad-cb-missing", "first tx is not coinbase");
}
// Size limits.
auto nMaxBlockSize = validationOptions.getExcessiveBlockSize();
// Bail early if there is no way this block is of reasonable size.
if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) {
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"bad-blk-length", "size limits failed");
}
auto currentBlockSize = ::GetSerializeSize(block, PROTOCOL_VERSION);
if (currentBlockSize > nMaxBlockSize) {
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"bad-blk-length", "size limits failed");
}
// And a valid coinbase.
TxValidationState tx_state;
if (!CheckCoinbase(*block.vtx[0], tx_state)) {
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(),
strprintf("Coinbase check failed (txid %s) %s",
block.vtx[0]->GetId().ToString(),
tx_state.GetDebugMessage()));
}
// Check transactions for regularity, skipping the first. Note that this
// is the first time we check that all after the first are !IsCoinBase.
for (size_t i = 1; i < block.vtx.size(); i++) {
auto *tx = block.vtx[i].get();
if (!CheckRegularTransaction(*tx, tx_state)) {
return state.Invalid(
BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(),
strprintf("Transaction check failed (txid %s) %s",
tx->GetId().ToString(), tx_state.GetDebugMessage()));
}
}
if (validationOptions.shouldValidatePoW() &&
validationOptions.shouldValidateMerkleRoot()) {
block.fChecked = true;
}
return true;
}
bool HasValidProofOfWork(const std::vector<CBlockHeader> &headers,
const Consensus::Params &consensusParams) {
return std::all_of(headers.cbegin(), headers.cend(),
[&](const auto &header) {
return CheckProofOfWork(
header.GetHash(), header.nBits, consensusParams);
});
}
arith_uint256 CalculateHeadersWork(const std::vector<CBlockHeader> &headers) {
arith_uint256 total_work{0};
for (const CBlockHeader &header : headers) {
CBlockIndex dummy(header);
total_work += GetBlockProof(dummy);
}
return total_work;
}
/**
* Context-dependent validity checks.
* By "context", we mean only the previous block headers, but not the UTXO
* set; UTXO-related validity checks are done in ConnectBlock().
* NOTE: This function is not currently invoked by ConnectBlock(), so we
* should consider upgrade issues if we change which consensus rules are
* enforced in this function (eg by adding a new consensus rule). See comment
* in ConnectBlock().
* Note that -reindex-chainstate skips the validation that happens here!
*/
static bool ContextualCheckBlockHeader(
const CBlockHeader &block, BlockValidationState &state,
BlockManager &blockman, ChainstateManager &chainman,
const CBlockIndex *pindexPrev, NodeClock::time_point now,
const std::optional<CCheckpointData> &test_checkpoints = std::nullopt)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
assert(pindexPrev != nullptr);
const int nHeight = pindexPrev->nHeight + 1;
const CChainParams ¶ms = chainman.GetParams();
// Check proof of work
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, params)) {
LogPrintf("bad bits after height: %d\n", pindexPrev->nHeight);
return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER,
"bad-diffbits", "incorrect proof of work");
}
// Check against checkpoints
if (chainman.m_options.checkpoints_enabled) {
const CCheckpointData &checkpoints =
test_checkpoints ? test_checkpoints.value() : params.Checkpoints();
// Check that the block chain matches the known block chain up to a
// checkpoint.
if (!Checkpoints::CheckBlock(checkpoints, nHeight, block.GetHash())) {
LogPrint(BCLog::VALIDATION,
"ERROR: %s: rejected by checkpoint lock-in at %d\n",
__func__, nHeight);
return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT,
"checkpoint mismatch");
}
// Don't accept any forks from the main chain prior to last checkpoint.
// GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's
// in our BlockIndex().
const CBlockIndex *pcheckpoint =
blockman.GetLastCheckpoint(checkpoints);
if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
LogPrint(BCLog::VALIDATION,
"ERROR: %s: forked chain older than last checkpoint "
"(height %d)\n",
__func__, nHeight);
return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT,
"bad-fork-prior-to-checkpoint");
}
}
// Check timestamp against prev
if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) {
return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER,
"time-too-old", "block's timestamp is too early");
}
// Check timestamp
if (block.Time() > now + std::chrono::seconds{MAX_FUTURE_BLOCK_TIME}) {
return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE,
"time-too-new",
"block timestamp too far in the future");
}
// Reject blocks with outdated version
if ((block.nVersion < 2 &&
DeploymentActiveAfter(pindexPrev, chainman,
Consensus::DEPLOYMENT_HEIGHTINCB)) ||
(block.nVersion < 3 &&
DeploymentActiveAfter(pindexPrev, chainman,
Consensus::DEPLOYMENT_DERSIG)) ||
(block.nVersion < 4 &&
DeploymentActiveAfter(pindexPrev, chainman,
Consensus::DEPLOYMENT_CLTV))) {
return state.Invalid(
BlockValidationResult::BLOCK_INVALID_HEADER,
strprintf("bad-version(0x%08x)", block.nVersion),
strprintf("rejected nVersion=0x%08x block", block.nVersion));
}
return true;
}
bool ContextualCheckTransactionForCurrentBlock(
const CBlockIndex &active_chain_tip, const Consensus::Params ¶ms,
const CTransaction &tx, TxValidationState &state) {
AssertLockHeld(cs_main);
// ContextualCheckTransactionForCurrentBlock() uses
// active_chain_tip.Height()+1 to evaluate nLockTime because when
// IsFinalTx() is called within AcceptBlock(), the height of the
// block *being* evaluated is what is used. Thus if we want to know if a
// transaction can be part of the *next* block, we need to call
// ContextualCheckTransaction() with one more than
// active_chain_tip.Height().
const int nBlockHeight = active_chain_tip.nHeight + 1;
// BIP113 will require that time-locked transactions have nLockTime set to
// less than the median time of the previous block they're contained in.
// When the next block is created its previous block will be the current
// chain tip, so we use that to calculate the median time passed to
// ContextualCheckTransaction().
// This time can also be used for consensus upgrades.
const int64_t nMedianTimePast{active_chain_tip.GetMedianTimePast()};
return ContextualCheckTransaction(params, tx, state, nBlockHeight,
nMedianTimePast);
}
/**
* NOTE: This function is not currently invoked by ConnectBlock(), so we
* should consider upgrade issues if we change which consensus rules are
* enforced in this function (eg by adding a new consensus rule). See comment
* in ConnectBlock().
* Note that -reindex-chainstate skips the validation that happens here!
*/
static bool ContextualCheckBlock(const CBlock &block,
BlockValidationState &state,
const ChainstateManager &chainman,
const CBlockIndex *pindexPrev) {
const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
// Enforce BIP113 (Median Time Past).
bool enforce_locktime_median_time_past{false};
if (DeploymentActiveAfter(pindexPrev, chainman,
Consensus::DEPLOYMENT_CSV)) {
assert(pindexPrev != nullptr);
enforce_locktime_median_time_past = true;
}
const int64_t nMedianTimePast =
pindexPrev == nullptr ? 0 : pindexPrev->GetMedianTimePast();
const int64_t nLockTimeCutoff{enforce_locktime_median_time_past
? nMedianTimePast
: block.GetBlockTime()};
const Consensus::Params params = chainman.GetConsensus();
const bool fIsMagneticAnomalyEnabled =
IsMagneticAnomalyEnabled(params, pindexPrev);
// Check transactions:
// - canonical ordering
// - ensure they are finalized
// - check they have the minimum size
const CTransaction *prevTx = nullptr;
for (const auto &ptx : block.vtx) {
const CTransaction &tx = *ptx;
if (fIsMagneticAnomalyEnabled) {
if (prevTx && (tx.GetId() <= prevTx->GetId())) {
if (tx.GetId() == prevTx->GetId()) {
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"tx-duplicate",
strprintf("Duplicated transaction %s",
tx.GetId().ToString()));
}
return state.Invalid(
BlockValidationResult::BLOCK_CONSENSUS, "tx-ordering",
strprintf("Transaction order is invalid (%s < %s)",
tx.GetId().ToString(),
prevTx->GetId().ToString()));
}
if (prevTx || !tx.IsCoinBase()) {
prevTx = &tx;
}
}
TxValidationState tx_state;
if (!ContextualCheckTransaction(params, tx, tx_state, nHeight,
nLockTimeCutoff)) {
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(),
tx_state.GetDebugMessage());
}
}
// Enforce rule that the coinbase starts with serialized block height
if (DeploymentActiveAfter(pindexPrev, chainman,
Consensus::DEPLOYMENT_HEIGHTINCB)) {
CScript expect = CScript() << nHeight;
if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
!std::equal(expect.begin(), expect.end(),
block.vtx[0]->vin[0].scriptSig.begin())) {
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
"bad-cb-height",
"block height mismatch in coinbase");
}
}
return true;
}
/**
* If the provided block header is valid, add it to the block index.
*
* Returns true if the block is successfully added to the block index.
*/
bool ChainstateManager::AcceptBlockHeader(
const CBlockHeader &block, BlockValidationState &state,
CBlockIndex **ppindex, bool min_pow_checked,
const std::optional<CCheckpointData> &test_checkpoints) {
AssertLockHeld(cs_main);
const Config &config = this->GetConfig();
const CChainParams &chainparams = config.GetChainParams();
// Check for duplicate
BlockHash hash = block.GetHash();
BlockMap::iterator miSelf{m_blockman.m_block_index.find(hash)};
if (hash != chainparams.GetConsensus().hashGenesisBlock) {
if (miSelf != m_blockman.m_block_index.end()) {
// Block header is already known.
CBlockIndex *pindex = &(miSelf->second);
if (ppindex) {
*ppindex = pindex;
}
if (pindex->nStatus.isInvalid()) {
LogPrint(BCLog::VALIDATION, "%s: block %s is marked invalid\n",
__func__, hash.ToString());
return state.Invalid(
BlockValidationResult::BLOCK_CACHED_INVALID, "duplicate");
}
return true;
}
if (!CheckBlockHeader(block, state, chainparams.GetConsensus(),
BlockValidationOptions(config))) {
LogPrint(BCLog::VALIDATION,
"%s: Consensus::CheckBlockHeader: %s, %s\n", __func__,
hash.ToString(), state.ToString());
return false;
}
// Get prev block index
BlockMap::iterator mi{
m_blockman.m_block_index.find(block.hashPrevBlock)};
if (mi == m_blockman.m_block_index.end()) {
LogPrint(BCLog::VALIDATION,
"header %s has prev block not found: %s\n",
hash.ToString(), block.hashPrevBlock.ToString());
return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV,
"prev-blk-not-found");
}
CBlockIndex *pindexPrev = &((*mi).second);
assert(pindexPrev);
if (pindexPrev->nStatus.isInvalid()) {
LogPrint(BCLog::VALIDATION,
"header %s has prev block invalid: %s\n", hash.ToString(),
block.hashPrevBlock.ToString());
return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV,
"bad-prevblk");
}
if (!ContextualCheckBlockHeader(
block, state, m_blockman, *this, pindexPrev,
m_options.adjusted_time_callback(), test_checkpoints)) {
LogPrint(BCLog::VALIDATION,
"%s: Consensus::ContextualCheckBlockHeader: %s, %s\n",
__func__, hash.ToString(), state.ToString());
return false;
}
/* Determine if this block descends from any block which has been found
* invalid (m_failed_blocks), then mark pindexPrev and any blocks
* between them as failed. For example:
*
* D3
* /
* B2 - C2
* / \
* A D2 - E2 - F2
* \
* B1 - C1 - D1 - E1
*
* In the case that we attempted to reorg from E1 to F2, only to find
* C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
* but NOT D3 (it was not in any of our candidate sets at the time).
*
* In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
* in LoadBlockIndex.
*/
if (!pindexPrev->IsValid(BlockValidity::SCRIPTS)) {
// The above does not mean "invalid": it checks if the previous
// block hasn't been validated up to BlockValidity::SCRIPTS. This is
// a performance optimization, in the common case of adding a new
// block to the tip, we don't need to iterate over the failed blocks
// list.
for (const CBlockIndex *failedit : m_failed_blocks) {
if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
assert(failedit->nStatus.hasFailed());
CBlockIndex *invalid_walk = pindexPrev;
while (invalid_walk != failedit) {
invalid_walk->nStatus =
invalid_walk->nStatus.withFailedParent();
m_blockman.m_dirty_blockindex.insert(invalid_walk);
invalid_walk = invalid_walk->pprev;
}
LogPrint(BCLog::VALIDATION,
"header %s has prev block invalid: %s\n",
hash.ToString(), block.hashPrevBlock.ToString());
return state.Invalid(
BlockValidationResult::BLOCK_INVALID_PREV,
"bad-prevblk");
}
}
}
}
if (!min_pow_checked) {
LogPrint(BCLog::VALIDATION,
"%s: not adding new block header %s, missing anti-dos "
"proof-of-work validation\n",
__func__, hash.ToString());
return state.Invalid(BlockValidationResult::BLOCK_HEADER_LOW_WORK,
"too-little-chainwork");
}
CBlockIndex *pindex{m_blockman.AddToBlockIndex(block, m_best_header)};
if (ppindex) {
*ppindex = pindex;
}
return true;
}
// Exposed wrapper for AcceptBlockHeader
bool ChainstateManager::ProcessNewBlockHeaders(
const std::vector<CBlockHeader> &headers, bool min_pow_checked,
BlockValidationState &state, const CBlockIndex **ppindex,
const std::optional<CCheckpointData> &test_checkpoints) {
AssertLockNotHeld(cs_main);
{
LOCK(cs_main);
for (const CBlockHeader &header : headers) {
// Use a temp pindex instead of ppindex to avoid a const_cast
CBlockIndex *pindex = nullptr;
bool accepted = AcceptBlockHeader(
header, state, &pindex, min_pow_checked, test_checkpoints);
ActiveChainstate().CheckBlockIndex();
if (!accepted) {
return false;
}
if (ppindex) {
*ppindex = pindex;
}
}
}
if (NotifyHeaderTip(ActiveChainstate())) {
if (ActiveChainstate().IsInitialBlockDownload() && ppindex &&
*ppindex) {
const CBlockIndex &last_accepted{**ppindex};
const int64_t blocks_left{
(GetTime() - last_accepted.GetBlockTime()) /
this->GetConsensus().nPowTargetSpacing};
const double progress{100.0 * last_accepted.nHeight /
(last_accepted.nHeight + blocks_left)};
LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n",
last_accepted.nHeight, progress);
}
}
return true;
}
void ChainstateManager::ReportHeadersPresync(const arith_uint256 &work,
int64_t height,
int64_t timestamp) {
AssertLockNotHeld(cs_main);
const auto &chainstate = ActiveChainstate();
{
LOCK(cs_main);
// Don't report headers presync progress if we already have a
// post-minchainwork header chain.
// This means we lose reporting for potentially legimate, but unlikely,
// deep reorgs, but prevent attackers that spam low-work headers from
// filling our logs.
if (m_best_header->nChainWork >=
UintToArith256(GetConsensus().nMinimumChainWork)) {
return;
}
// Rate limit headers presync updates to 4 per second, as these are not
// subject to DoS protection.
auto now = Now<SteadyMilliseconds>();
if (now < m_last_presync_update + 250ms) {
return;
}
m_last_presync_update = now;
}
bool initial_download = chainstate.IsInitialBlockDownload();
GetNotifications().headerTip(GetSynchronizationState(initial_download),
height, timestamp, /*presync=*/true);
if (initial_download) {
const int64_t blocks_left{(GetTime() - timestamp) /
GetConsensus().nPowTargetSpacing};
const double progress{100.0 * height / (height + blocks_left)};
LogPrintf("Pre-synchronizing blockheaders, height: %d (~%.2f%%)\n",
height, progress);
}
}
/**
* Store a block on disk.
*
* @param[in,out] pblock The block we want to accept.
* @param[in] fRequested A boolean to indicate if this block was requested
* from our peers.
* @param[in] dbp If non-null, the disk position of the block.
* @param[in,out] fNewBlock True if block was first received via this call.
* @param[in] min_pow_checked True if proof-of-work anti-DoS checks have
* been done by caller for headers chain
* @return True if the block is accepted as a valid block and written to disk.
*/
bool Chainstate::AcceptBlock(const std::shared_ptr<const CBlock> &pblock,
BlockValidationState &state, bool fRequested,
const FlatFilePos *dbp, bool *fNewBlock,
bool min_pow_checked) {
AssertLockHeld(cs_main);
const CBlock &block = *pblock;
if (fNewBlock) {
*fNewBlock = false;
}
CBlockIndex *pindex = nullptr;
bool accepted_header{
m_chainman.AcceptBlockHeader(block, state, &pindex, min_pow_checked)};
CheckBlockIndex();
if (!accepted_header) {
return false;
}
// Try to process all requested blocks that we don't have, but only
// process an unrequested block if it's new and has enough work to
// advance our tip, and isn't too many blocks ahead.
bool fAlreadyHave = pindex->nStatus.hasData();
// TODO: deal better with return value and error conditions for duplicate
// and unrequested blocks.
if (fAlreadyHave) {
return true;
}
// Compare block header timestamps and received times of the block and the
// chaintip. If they have the same chain height, use these diffs as a
// tie-breaker, attempting to pick the more honestly-mined block.
int64_t newBlockTimeDiff = std::llabs(pindex->GetReceivedTimeDiff());
int64_t chainTipTimeDiff =
m_chain.Tip() ? std::llabs(m_chain.Tip()->GetReceivedTimeDiff()) : 0;
bool isSameHeight =
m_chain.Tip() && (pindex->nChainWork == m_chain.Tip()->nChainWork);
if (isSameHeight) {
LogPrintf("Chain tip timestamp-to-received-time difference: hash=%s, "
"diff=%d\n",
m_chain.Tip()->GetBlockHash().ToString(), chainTipTimeDiff);
LogPrintf("New block timestamp-to-received-time difference: hash=%s, "
"diff=%d\n",
pindex->GetBlockHash().ToString(), newBlockTimeDiff);
}
bool fHasMoreOrSameWork =
(m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork
: true);
// Blocks that are too out-of-order needlessly limit the effectiveness of
// pruning, because pruning will not delete block files that contain any
// blocks which are too close in height to the tip. Apply this test
// regardless of whether pruning is enabled; it should generally be safe to
// not process unrequested blocks.
bool fTooFarAhead{pindex->nHeight >
m_chain.Height() + int(MIN_BLOCKS_TO_KEEP)};
// TODO: Decouple this function from the block download logic by removing
// fRequested
// This requires some new chain data structure to efficiently look up if a
// block is in a chain leading to a candidate for best tip, despite not
// being such a candidate itself.
// Note that this would break the getblockfrompeer RPC
// If we didn't ask for it:
if (!fRequested) {
// This is a previously-processed block that was pruned.
if (pindex->nTx != 0) {
return true;
}
// Don't process less-work chains.
if (!fHasMoreOrSameWork) {
return true;
}
// Block height is too high.
if (fTooFarAhead) {
return true;
}
// Protect against DoS attacks from low-work chains.
// If our tip is behind, a peer could try to send us
// low-work blocks on a fake chain that we would never
// request; don't process these.
if (pindex->nChainWork < m_chainman.MinimumChainWork()) {
return true;
}
}
const CChainParams ¶ms{m_chainman.GetParams()};
const Consensus::Params &consensusParams = params.GetConsensus();
if (!CheckBlock(block, state, consensusParams,
BlockValidationOptions(m_chainman.GetConfig())) ||
!ContextualCheckBlock(block, state, m_chainman, pindex->pprev)) {
if (state.IsInvalid() &&
state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
pindex->nStatus = pindex->nStatus.withFailed();
m_blockman.m_dirty_blockindex.insert(pindex);
}
return error("%s: %s (block %s)", __func__, state.ToString(),
block.GetHash().ToString());
}
// If connecting the new block would require rewinding more than one block
// from the active chain (i.e., a "deep reorg"), then mark the new block as
// parked. If it has enough work then it will be automatically unparked
// later, during FindMostWorkChain. We mark the block as parked at the very
// last minute so we can make sure everything is ready to be reorged if
// needed.
if (gArgs.GetBoolArg("-parkdeepreorg", true)) {
const CBlockIndex *pindexFork = m_chain.FindFork(pindex);
if (pindexFork && pindexFork->nHeight + 1 < m_chain.Height()) {
LogPrintf("Park block %s as it would cause a deep reorg.\n",
pindex->GetBlockHash().ToString());
pindex->nStatus = pindex->nStatus.withParked();
m_blockman.m_dirty_blockindex.insert(pindex);
}
}
// Header is valid/has work and the merkle tree is good.
// Relay now, but if it does not build on our best tip, let the
// SendMessages loop relay it.
if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev) {
GetMainSignals().NewPoWValidBlock(pindex, pblock);
}
// Write block to history file
if (fNewBlock) {
*fNewBlock = true;
}
try {
FlatFilePos blockPos{
m_blockman.SaveBlockToDisk(block, pindex->nHeight, m_chain, dbp)};
if (blockPos.IsNull()) {
state.Error(strprintf(
"%s: Failed to find position to write new block to disk",
__func__));
return false;
}
ReceivedBlockTransactions(block, pindex, blockPos);
} catch (const std::runtime_error &e) {
return AbortNode(state, std::string("System error: ") + e.what());
}
FlushStateToDisk(state, FlushStateMode::NONE);
CheckBlockIndex();
return true;
}
bool ChainstateManager::ProcessNewBlock(
const std::shared_ptr<const CBlock> &block, bool force_processing,
bool min_pow_checked, bool *new_block,
avalanche::Processor *const avalanche) {
AssertLockNotHeld(cs_main);
{
if (new_block) {
*new_block = false;
}
BlockValidationState state;
// CheckBlock() does not support multi-threaded block validation
// because CBlock::fChecked can cause data race.
// Therefore, the following critical section must include the
// CheckBlock() call as well.
LOCK(cs_main);
// Skipping AcceptBlock() for CheckBlock() failures means that we will
// never mark a block as invalid if CheckBlock() fails. This is
// protective against consensus failure if there are any unknown form
// s of block malleability that cause CheckBlock() to fail; see e.g.
// CVE-2012-2459 and
// https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2019-February/016697.html.
// Because CheckBlock() is not very expensive, the anti-DoS benefits of
// caching failure (of a definitely-invalid block) are not substantial.
bool ret = CheckBlock(*block, state, this->GetConsensus(),
BlockValidationOptions(this->GetConfig()));
if (ret) {
// Store to disk
ret = ActiveChainstate().AcceptBlock(block, state, force_processing,
nullptr, new_block,
min_pow_checked);
}
if (!ret) {
GetMainSignals().BlockChecked(*block, state);
return error("%s: AcceptBlock FAILED (%s)", __func__,
state.ToString());
}
}
NotifyHeaderTip(ActiveChainstate());
// Only used to report errors, not invalidity - ignore it
BlockValidationState state;
if (!ActiveChainstate().ActivateBestChain(state, block, avalanche)) {
return error("%s: ActivateBestChain failed (%s)", __func__,
state.ToString());
}
return true;
}
MempoolAcceptResult
ChainstateManager::ProcessTransaction(const CTransactionRef &tx,
bool test_accept) {
AssertLockHeld(cs_main);
Chainstate &active_chainstate = ActiveChainstate();
if (!active_chainstate.GetMempool()) {
TxValidationState state;
state.Invalid(TxValidationResult::TX_NO_MEMPOOL, "no-mempool");
return MempoolAcceptResult::Failure(state);
}
auto result = AcceptToMemoryPool(active_chainstate, tx, GetTime(),
/*bypass_limits=*/false, test_accept);
active_chainstate.GetMempool()->check(
active_chainstate.CoinsTip(), active_chainstate.m_chain.Height() + 1);
return result;
}
bool TestBlockValidity(
BlockValidationState &state, const CChainParams ¶ms,
Chainstate &chainstate, const CBlock &block, CBlockIndex *pindexPrev,
const std::function<NodeClock::time_point()> &adjusted_time_callback,
BlockValidationOptions validationOptions) {
AssertLockHeld(cs_main);
assert(pindexPrev && pindexPrev == chainstate.m_chain.Tip());
CCoinsViewCache viewNew(&chainstate.CoinsTip());
BlockHash block_hash(block.GetHash());
CBlockIndex indexDummy(block);
indexDummy.pprev = pindexPrev;
indexDummy.nHeight = pindexPrev->nHeight + 1;
indexDummy.phashBlock = &block_hash;
// NOTE: CheckBlockHeader is called by CheckBlock
if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman,
chainstate.m_chainman, pindexPrev,
adjusted_time_callback())) {
return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__,
state.ToString());
}
if (!CheckBlock(block, state, params.GetConsensus(), validationOptions)) {
return error("%s: Consensus::CheckBlock: %s", __func__,
state.ToString());
}
if (!ContextualCheckBlock(block, state, chainstate.m_chainman,
pindexPrev)) {
return error("%s: Consensus::ContextualCheckBlock: %s", __func__,
state.ToString());
}
if (!chainstate.ConnectBlock(block, state, &indexDummy, viewNew,
validationOptions, nullptr, true)) {
return false;
}
assert(state.IsValid());
return true;
}
/* This function is called from the RPC code for pruneblockchain */
void PruneBlockFilesManual(Chainstate &active_chainstate,
int nManualPruneHeight) {
BlockValidationState state;
if (active_chainstate.FlushStateToDisk(state, FlushStateMode::NONE,
nManualPruneHeight)) {
LogPrintf("%s: failed to flush state (%s)\n", __func__,
state.ToString());
}
}
void Chainstate::LoadMempool(const fs::path &load_path,
FopenFn mockable_fopen_function) {
if (!m_mempool) {
return;
}
::LoadMempool(*m_mempool, load_path, *this, mockable_fopen_function);
m_mempool->SetLoadTried(!ShutdownRequested());
}
bool Chainstate::LoadChainTip() {
AssertLockHeld(cs_main);
const CCoinsViewCache &coins_cache = CoinsTip();
// Never called when the coins view is empty
assert(!coins_cache.GetBestBlock().IsNull());
const CBlockIndex *tip = m_chain.Tip();
if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
return true;
}
// Load pointer to end of best chain
CBlockIndex *pindex =
m_blockman.LookupBlockIndex(coins_cache.GetBestBlock());
if (!pindex) {
return false;
}
m_chain.SetTip(*pindex);
PruneBlockIndexCandidates();
tip = m_chain.Tip();
LogPrintf(
"Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
tip->GetBlockHash().ToString(), m_chain.Height(),
FormatISO8601DateTime(tip->GetBlockTime()),
GuessVerificationProgress(m_chainman.GetParams().TxData(), tip));
return true;
}
CVerifyDB::CVerifyDB(Notifications ¬ifications)
: m_notifications{notifications} {
m_notifications.progress(_("Verifying blocks…"), 0, false);
}
CVerifyDB::~CVerifyDB() {
m_notifications.progress(bilingual_str{}, 100, false);
}
VerifyDBResult CVerifyDB::VerifyDB(Chainstate &chainstate,
CCoinsView &coinsview, int nCheckLevel,
int nCheckDepth) {
AssertLockHeld(cs_main);
const Config &config = chainstate.m_chainman.GetConfig();
const CChainParams ¶ms = config.GetChainParams();
const Consensus::Params &consensusParams = params.GetConsensus();
if (chainstate.m_chain.Tip() == nullptr ||
chainstate.m_chain.Tip()->pprev == nullptr) {
return VerifyDBResult::SUCCESS;
}
// Verify blocks in the best chain
if (nCheckDepth <= 0 || nCheckDepth > chainstate.m_chain.Height()) {
nCheckDepth = chainstate.m_chain.Height();
}
nCheckLevel = std::max(0, std::min(4, nCheckLevel));
LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth,
nCheckLevel);
CCoinsViewCache coins(&coinsview);
CBlockIndex *pindex;
CBlockIndex *pindexFailure = nullptr;
int nGoodTransactions = 0;
BlockValidationState state;
int reportDone = 0;
bool skipped_no_block_data{false};
bool skipped_l3_checks{false};
LogPrintf("Verification progress: 0%%\n");
const bool is_snapshot_cs{!chainstate.m_from_snapshot_blockhash};
for (pindex = chainstate.m_chain.Tip(); pindex && pindex->pprev;
pindex = pindex->pprev) {
const int percentageDone = std::max(
1, std::min(99, (int)(((double)(chainstate.m_chain.Height() -
pindex->nHeight)) /
(double)nCheckDepth *
(nCheckLevel >= 4 ? 50 : 100))));
if (reportDone < percentageDone / 10) {
// report every 10% step
LogPrintf("Verification progress: %d%%\n", percentageDone);
reportDone = percentageDone / 10;
}
m_notifications.progress(_("Verifying blocks…"), percentageDone, false);
if (pindex->nHeight <= chainstate.m_chain.Height() - nCheckDepth) {
break;
}
if ((chainstate.m_blockman.IsPruneMode() || is_snapshot_cs) &&
!pindex->nStatus.hasData()) {
// If pruning or running under an assumeutxo snapshot, only go
// back as far as we have data.
LogPrintf("VerifyDB(): block verification stopping at height %d "
"(no data). This could be due to pruning or use of an "
"assumeutxo snapshot.\n",
pindex->nHeight);
skipped_no_block_data = true;
break;
}
CBlock block;
// check level 0: read from disk
if (!chainstate.m_blockman.ReadBlockFromDisk(block, *pindex)) {
LogPrintf(
"Verification error: ReadBlockFromDisk failed at %d, hash=%s\n",
pindex->nHeight, pindex->GetBlockHash().ToString());
return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
// check level 1: verify block validity
if (nCheckLevel >= 1 && !CheckBlock(block, state, consensusParams,
BlockValidationOptions(config))) {
LogPrintf(
"Verification error: found bad block at %d, hash=%s (%s)\n",
pindex->nHeight, pindex->GetBlockHash().ToString(),
state.ToString());
return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
// check level 2: verify undo validity
if (nCheckLevel >= 2 && pindex) {
CBlockUndo undo;
if (!pindex->GetUndoPos().IsNull()) {
if (!chainstate.m_blockman.UndoReadFromDisk(undo, *pindex)) {
LogPrintf("Verification error: found bad undo data at %d, "
"hash=%s\n",
pindex->nHeight,
pindex->GetBlockHash().ToString());
return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
}
}
// check level 3: check for inconsistencies during memory-only
// disconnect of tip blocks
size_t curr_coins_usage = coins.DynamicMemoryUsage() +
chainstate.CoinsTip().DynamicMemoryUsage();
if (nCheckLevel >= 3) {
if (curr_coins_usage <= chainstate.m_coinstip_cache_size_bytes) {
assert(coins.GetBestBlock() == pindex->GetBlockHash());
DisconnectResult res =
chainstate.DisconnectBlock(block, pindex, coins);
if (res == DisconnectResult::FAILED) {
LogPrintf("Verification error: irrecoverable inconsistency "
"in block data at %d, hash=%s\n",
pindex->nHeight,
pindex->GetBlockHash().ToString());
return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
if (res == DisconnectResult::UNCLEAN) {
nGoodTransactions = 0;
pindexFailure = pindex;
} else {
nGoodTransactions += block.vtx.size();
}
} else {
skipped_l3_checks = true;
}
}
if (ShutdownRequested()) {
return VerifyDBResult::INTERRUPTED;
}
}
if (pindexFailure) {
LogPrintf("Verification error: coin database inconsistencies found "
"(last %i blocks, %i good transactions before that)\n",
chainstate.m_chain.Height() - pindexFailure->nHeight + 1,
nGoodTransactions);
return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
if (skipped_l3_checks) {
LogPrintf("Skipped verification of level >=3 (insufficient database "
"cache size). Consider increasing -dbcache.\n");
}
// store block count as we move pindex at check level >= 4
int block_count = chainstate.m_chain.Height() - pindex->nHeight;
// check level 4: try reconnecting blocks
if (nCheckLevel >= 4 && !skipped_l3_checks) {
while (pindex != chainstate.m_chain.Tip()) {
const int percentageDone = std::max(
1, std::min(99, 100 - int(double(chainstate.m_chain.Height() -
pindex->nHeight) /
double(nCheckDepth) * 50)));
if (reportDone < percentageDone / 10) {
// report every 10% step
LogPrintf("Verification progress: %d%%\n", percentageDone);
reportDone = percentageDone / 10;
}
m_notifications.progress(_("Verifying blocks…"), percentageDone,
false);
pindex = chainstate.m_chain.Next(pindex);
CBlock block;
if (!chainstate.m_blockman.ReadBlockFromDisk(block, *pindex)) {
LogPrintf("Verification error: ReadBlockFromDisk failed at %d, "
"hash=%s\n",
pindex->nHeight, pindex->GetBlockHash().ToString());
return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
if (!chainstate.ConnectBlock(block, state, pindex, coins,
BlockValidationOptions(config))) {
LogPrintf("Verification error: found unconnectable block at "
"%d, hash=%s (%s)\n",
pindex->nHeight, pindex->GetBlockHash().ToString(),
state.ToString());
return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
if (ShutdownRequested()) {
return VerifyDBResult::INTERRUPTED;
}
}
}
LogPrintf("Verification: No coin database inconsistencies in last %i "
"blocks (%i transactions)\n",
block_count, nGoodTransactions);
if (skipped_l3_checks) {
return VerifyDBResult::SKIPPED_L3_CHECKS;
}
if (skipped_no_block_data) {
return VerifyDBResult::SKIPPED_MISSING_BLOCKS;
}
return VerifyDBResult::SUCCESS;
}
/**
* Apply the effects of a block on the utxo cache, ignoring that it may already
* have been applied.
*/
bool Chainstate::RollforwardBlock(const CBlockIndex *pindex,
CCoinsViewCache &view) {
AssertLockHeld(cs_main);
// TODO: merge with ConnectBlock
CBlock block;
if (!m_blockman.ReadBlockFromDisk(block, *pindex)) {
return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s",
pindex->nHeight, pindex->GetBlockHash().ToString());
}
for (const CTransactionRef &tx : block.vtx) {
// Pass check = true as every addition may be an overwrite.
AddCoins(view, *tx, pindex->nHeight, true);
}
for (const CTransactionRef &tx : block.vtx) {
if (tx->IsCoinBase()) {
continue;
}
for (const CTxIn &txin : tx->vin) {
view.SpendCoin(txin.prevout);
}
}
return true;
}
bool Chainstate::ReplayBlocks() {
LOCK(cs_main);
CCoinsView &db = this->CoinsDB();
CCoinsViewCache cache(&db);
std::vector<BlockHash> hashHeads = db.GetHeadBlocks();
if (hashHeads.empty()) {
// We're already in a consistent state.
return true;
}
if (hashHeads.size() != 2) {
return error("ReplayBlocks(): unknown inconsistent state");
}
m_chainman.GetNotifications().progress(_("Replaying blocks…"), 0, false);
LogPrintf("Replaying blocks\n");
// Old tip during the interrupted flush.
const CBlockIndex *pindexOld = nullptr;
// New tip during the interrupted flush.
const CBlockIndex *pindexNew;
// Latest block common to both the old and the new tip.
const CBlockIndex *pindexFork = nullptr;
if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
return error(
"ReplayBlocks(): reorganization to unknown block requested");
}
pindexNew = &(m_blockman.m_block_index[hashHeads[0]]);
if (!hashHeads[1].IsNull()) {
// The old tip is allowed to be 0, indicating it's the first flush.
if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
return error(
"ReplayBlocks(): reorganization from unknown block requested");
}
pindexOld = &(m_blockman.m_block_index[hashHeads[1]]);
pindexFork = LastCommonAncestor(pindexOld, pindexNew);
assert(pindexFork != nullptr);
}
// Rollback along the old branch.
while (pindexOld != pindexFork) {
if (pindexOld->nHeight > 0) {
// Never disconnect the genesis block.
CBlock block;
if (!m_blockman.ReadBlockFromDisk(block, *pindexOld)) {
return error("RollbackBlock(): ReadBlockFromDisk() failed at "
"%d, hash=%s",
pindexOld->nHeight,
pindexOld->GetBlockHash().ToString());
}
LogPrintf("Rolling back %s (%i)\n",
pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
if (res == DisconnectResult::FAILED) {
return error(
"RollbackBlock(): DisconnectBlock failed at %d, hash=%s",
pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
}
// If DisconnectResult::UNCLEAN is returned, it means a non-existing
// UTXO was deleted, or an existing UTXO was overwritten. It
// corresponds to cases where the block-to-be-disconnect never had
// all its operations applied to the UTXO set. However, as both
// writing a UTXO and deleting a UTXO are idempotent operations, the
// result is still a version of the UTXO set with the effects of
// that block undone.
}
pindexOld = pindexOld->pprev;
}
// Roll forward from the forking point to the new tip.
int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight;
++nHeight) {
const CBlockIndex &pindex{*Assert(pindexNew->GetAncestor(nHeight))};
LogPrintf("Rolling forward %s (%i)\n", pindex.GetBlockHash().ToString(),
nHeight);
m_chainman.GetNotifications().progress(
_("Replaying blocks…"),
(int)((nHeight - nForkHeight) * 100.0 /
(pindexNew->nHeight - nForkHeight)),
false);
if (!RollforwardBlock(&pindex, cache)) {
return false;
}
}
cache.SetBestBlock(pindexNew->GetBlockHash());
cache.Flush();
m_chainman.GetNotifications().progress(bilingual_str{}, 100, false);
return true;
}
// May NOT be used after any connections are up as much of the peer-processing
// logic assumes a consistent block index state
void Chainstate::UnloadBlockIndex() {
AssertLockHeld(::cs_main);
nBlockSequenceId = 1;
m_best_fork_tip = nullptr;
m_best_fork_base = nullptr;
setBlockIndexCandidates.clear();
}
bool ChainstateManager::LoadBlockIndex() {
AssertLockHeld(cs_main);
// Load block index from databases
bool needs_init = fReindex;
if (!fReindex) {
bool ret = m_blockman.LoadBlockIndexDB();
if (!ret) {
return false;
}
m_blockman.ScanAndUnlinkAlreadyPrunedFiles();
std::vector<CBlockIndex *> vSortedByHeight{
m_blockman.GetAllBlockIndices()};
std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
CBlockIndexHeightOnlyComparator());
// Find start of assumed-valid region.
int first_assumed_valid_height = std::numeric_limits<int>::max();
for (const CBlockIndex *block : vSortedByHeight) {
if (block->IsAssumedValid()) {
auto chainstates = GetAll();
// If we encounter an assumed-valid block index entry, ensure
// that we have one chainstate that tolerates assumed-valid
// entries and another that does not (i.e. the background
// validation chainstate), since assumed-valid entries should
// always be pending validation by a fully-validated chainstate.
auto any_chain = [&](auto fnc) {
return std::any_of(chainstates.cbegin(), chainstates.cend(),
fnc);
};
assert(any_chain([](auto chainstate) {
return chainstate->reliesOnAssumedValid();
}));
assert(any_chain([](auto chainstate) {
return !chainstate->reliesOnAssumedValid();
}));
first_assumed_valid_height = block->nHeight;
LogPrintf("Saw first assumedvalid block at height %d (%s)\n",
first_assumed_valid_height, block->ToString());
break;
}
}
for (CBlockIndex *pindex : vSortedByHeight) {
if (ShutdownRequested()) {
return false;
}
if (pindex->IsAssumedValid() ||
(pindex->IsValid(BlockValidity::TRANSACTIONS) &&
(pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) {
// Fill each chainstate's block candidate set. Only add
// assumed-valid blocks to the tip candidate set if the
// chainstate is allowed to rely on assumed-valid blocks.
//
// If all setBlockIndexCandidates contained the assumed-valid
// blocks, the background chainstate's ActivateBestChain() call
// would add assumed-valid blocks to the chain (based on how
// FindMostWorkChain() works). Obviously we don't want this
// since the purpose of the background validation chain is to
// validate assumed-valid blocks.
//
// Note: This is considering all blocks whose height is greater
// or equal to the first assumed-valid block to be assumed-valid
// blocks, and excluding them from the background chainstate's
// setBlockIndexCandidates set. This does mean that some blocks
// which are not technically assumed-valid (later blocks on a
// fork beginning before the first assumed-valid block) might
// not get added to the background chainstate, but this is ok,
// because they will still be attached to the active chainstate
// if they actually contain more work.
//
// Instead of this height-based approach, an earlier attempt was
// made at detecting "holistically" whether the block index
// under consideration relied on an assumed-valid ancestor, but
// this proved to be too slow to be practical.
for (Chainstate *chainstate : GetAll()) {
if (chainstate->reliesOnAssumedValid() ||
pindex->nHeight < first_assumed_valid_height) {
chainstate->setBlockIndexCandidates.insert(pindex);
}
}
}
if (pindex->nStatus.isInvalid() &&
(!m_best_invalid ||
pindex->nChainWork > m_best_invalid->nChainWork)) {
m_best_invalid = pindex;
}
if (pindex->nStatus.isOnParkedChain() &&
(!m_best_parked ||
pindex->nChainWork > m_best_parked->nChainWork)) {
m_best_parked = pindex;
}
if (pindex->IsValid(BlockValidity::TREE) &&
(m_best_header == nullptr ||
CBlockIndexWorkComparator()(m_best_header, pindex))) {
m_best_header = pindex;
}
}
needs_init = m_blockman.m_block_index.empty();
}
if (needs_init) {
// Everything here is for *new* reindex/DBs. Thus, though
// LoadBlockIndexDB may have set fReindex if we shut down
// mid-reindex previously, we don't check fReindex and
// instead only check it prior to LoadBlockIndexDB to set
// needs_init.
LogPrintf("Initializing databases...\n");
}
return true;
}
bool Chainstate::LoadGenesisBlock() {
LOCK(cs_main);
const CChainParams ¶ms{m_chainman.GetParams()};
// Check whether we're already initialized by checking for genesis in
// m_blockman.m_block_index. Note that we can't use m_chain here, since it
// is set based on the coins db, not the block index db, which is the only
// thing loaded at this point.
if (m_blockman.m_block_index.count(params.GenesisBlock().GetHash())) {
return true;
}
try {
const CBlock &block = params.GenesisBlock();
FlatFilePos blockPos{
m_blockman.SaveBlockToDisk(block, 0, m_chain, nullptr)};
if (blockPos.IsNull()) {
return error("%s: writing genesis block to disk failed", __func__);
}
CBlockIndex *pindex =
m_blockman.AddToBlockIndex(block, m_chainman.m_best_header);
ReceivedBlockTransactions(block, pindex, blockPos);
} catch (const std::runtime_error &e) {
return error("%s: failed to write genesis block: %s", __func__,
e.what());
}
return true;
}
void Chainstate::LoadExternalBlockFile(
FILE *fileIn, FlatFilePos *dbp,
std::multimap<BlockHash, FlatFilePos> *blocks_with_unknown_parent,
avalanche::Processor *const avalanche) {
AssertLockNotHeld(m_chainstate_mutex);
// Either both should be specified (-reindex), or neither (-loadblock).
assert(!dbp == !blocks_with_unknown_parent);
int64_t nStart = GetTimeMillis();
const CChainParams ¶ms{m_chainman.GetParams()};
int nLoaded = 0;
try {
// This takes over fileIn and calls fclose() on it in the CBufferedFile
// destructor. Make sure we have at least 2*MAX_TX_SIZE space in there
// so any transaction can fit in the buffer.
CBufferedFile blkdat(fileIn, 2 * MAX_TX_SIZE, MAX_TX_SIZE + 8, SER_DISK,
CLIENT_VERSION);
// nRewind indicates where to resume scanning in case something goes
// wrong, such as a block fails to deserialize.
uint64_t nRewind = blkdat.GetPos();
while (!blkdat.eof()) {
if (ShutdownRequested()) {
return;
}
blkdat.SetPos(nRewind);
// Start one byte further next time, in case of failure.
nRewind++;
// Remove former limit.
blkdat.SetLimit();
unsigned int nSize = 0;
try {
// Locate a header.
uint8_t buf[CMessageHeader::MESSAGE_START_SIZE];
blkdat.FindByte(std::byte(params.DiskMagic()[0]));
nRewind = blkdat.GetPos() + 1;
blkdat >> buf;
if (memcmp(buf, params.DiskMagic().data(),
CMessageHeader::MESSAGE_START_SIZE)) {
continue;
}
// Read size.
blkdat >> nSize;
if (nSize < 80) {
continue;
}
} catch (const std::exception &) {
// No valid block header found; don't complain.
// (this happens at the end of every blk.dat file)
break;
}
try {
// read block header
const uint64_t nBlockPos{blkdat.GetPos()};
if (dbp) {
dbp->nPos = nBlockPos;
}
blkdat.SetLimit(nBlockPos + nSize);
CBlockHeader header;
blkdat >> header;
const BlockHash hash{header.GetHash()};
// Skip the rest of this block (this may read from disk
// into memory); position to the marker before the next block,
// but it's still possible to rewind to the start of the
// current block (without a disk read).
nRewind = nBlockPos + nSize;
blkdat.SkipTo(nRewind);
// needs to remain available after the cs_main lock is released
// to avoid duplicate reads from disk
std::shared_ptr<CBlock> pblock{};
{
LOCK(cs_main);
// detect out of order blocks, and store them for later
if (hash != params.GetConsensus().hashGenesisBlock &&
!m_blockman.LookupBlockIndex(header.hashPrevBlock)) {
LogPrint(
BCLog::REINDEX,
"%s: Out of order block %s, parent %s not known\n",
__func__, hash.ToString(),
header.hashPrevBlock.ToString());
if (dbp && blocks_with_unknown_parent) {
blocks_with_unknown_parent->emplace(
header.hashPrevBlock, *dbp);
}
continue;
}
// process in case the block isn't known yet
const CBlockIndex *pindex =
m_blockman.LookupBlockIndex(hash);
if (!pindex || !pindex->nStatus.hasData()) {
// This block can be processed immediately; rewind to
// its start, read and deserialize it.
blkdat.SetPos(nBlockPos);
pblock = std::make_shared<CBlock>();
blkdat >> *pblock;
nRewind = blkdat.GetPos();
BlockValidationState state;
if (AcceptBlock(pblock, state, true, dbp, nullptr,
true)) {
nLoaded++;
}
if (state.IsError()) {
break;
}
} else if (hash != params.GetConsensus().hashGenesisBlock &&
pindex->nHeight % 1000 == 0) {
LogPrint(
BCLog::REINDEX,
"Block Import: already had block %s at height %d\n",
hash.ToString(), pindex->nHeight);
}
}
// Activate the genesis block so normal node progress can
// continue
if (hash == params.GetConsensus().hashGenesisBlock) {
BlockValidationState state;
if (!ActivateBestChain(state, nullptr, avalanche)) {
break;
}
}
if (m_blockman.IsPruneMode() && !fReindex && pblock) {
// Must update the tip for pruning to work while importing
// with -loadblock. This is a tradeoff to conserve disk
// space at the expense of time spent updating the tip to be
// able to prune. Otherwise, ActivateBestChain won't be
// called by the import process until after all of the block
// files are loaded. ActivateBestChain can be called by
// concurrent network message processing, but that is not
// reliable for the purpose of pruning while importing.
BlockValidationState state;
if (!ActivateBestChain(state, pblock, avalanche)) {
LogPrint(BCLog::REINDEX,
"failed to activate chain (%s)\n",
state.ToString());
break;
}
}
NotifyHeaderTip(*this);
if (!blocks_with_unknown_parent) {
continue;
}
// Recursively process earlier encountered successors of this
// block
std::deque<BlockHash> queue;
queue.push_back(hash);
while (!queue.empty()) {
BlockHash head = queue.front();
queue.pop_front();
auto range = blocks_with_unknown_parent->equal_range(head);
while (range.first != range.second) {
std::multimap<BlockHash, FlatFilePos>::iterator it =
range.first;
std::shared_ptr<CBlock> pblockrecursive =
std::make_shared<CBlock>();
if (m_blockman.ReadBlockFromDisk(*pblockrecursive,
it->second)) {
LogPrint(
BCLog::REINDEX,
"%s: Processing out of order child %s of %s\n",
__func__, pblockrecursive->GetHash().ToString(),
head.ToString());
LOCK(cs_main);
BlockValidationState dummy;
if (AcceptBlock(pblockrecursive, dummy, true,
&it->second, nullptr, true)) {
nLoaded++;
queue.push_back(pblockrecursive->GetHash());
}
}
range.first++;
blocks_with_unknown_parent->erase(it);
NotifyHeaderTip(*this);
}
}
} catch (const std::exception &e) {
// Historical bugs added extra data to the block files that does
// not deserialize cleanly. Commonly this data is between
// readable blocks, but it does not really matter. Such data is
// not fatal to the import process. The code that reads the
// block files deals with invalid data by simply ignoring it. It
// continues to search for the next {4 byte magic message start
// bytes + 4 byte length + block} that does deserialize cleanly
// and passes all of the other block validation checks dealing
// with POW and the merkle root, etc... We merely note with this
// informational log message when unexpected data is
// encountered. We could also be experiencing a storage system
// read error, or a read of a previous bad write. These are
// possible, but less likely scenarios. We don't have enough
// information to tell a difference here. The reindex process is
// not the place to attempt to clean and/or compact the block
// files. If so desired, a studious node operator may use
// knowledge of the fact that the block files are not entirely
// pristine in order to prepare a set of pristine, and perhaps
// ordered, block files for later reindexing.
LogPrint(BCLog::REINDEX,
"%s: unexpected data at file offset 0x%x - %s. "
"continuing\n",
__func__, (nRewind - 1), e.what());
}
}
} catch (const std::runtime_error &e) {
AbortNode(std::string("System error: ") + e.what());
}
LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded,
GetTimeMillis() - nStart);
}
void Chainstate::CheckBlockIndex() {
if (!m_chainman.ShouldCheckBlockIndex()) {
return;
}
LOCK(cs_main);
// During a reindex, we read the genesis block and call CheckBlockIndex
// before ActivateBestChain, so we have the genesis block in
// m_blockman.m_block_index but no active chain. (A few of the tests when
// iterating the block tree require that m_chain has been initialized.)
if (m_chain.Height() < 0) {
assert(m_blockman.m_block_index.size() <= 1);
return;
}
// Build forward-pointing map of the entire block tree.
std::multimap<CBlockIndex *, CBlockIndex *> forward;
for (auto &[_, block_index] : m_blockman.m_block_index) {
forward.emplace(block_index.pprev, &block_index);
}
assert(forward.size() == m_blockman.m_block_index.size());
std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
rangeGenesis = forward.equal_range(nullptr);
CBlockIndex *pindex = rangeGenesis.first->second;
rangeGenesis.first++;
// There is only one index entry with parent nullptr.
assert(rangeGenesis.first == rangeGenesis.second);
// Iterate over the entire block tree, using depth-first search.
// Along the way, remember whether there are blocks on the path from genesis
// block being explored which are the first to have certain properties.
size_t nNodes = 0;
int nHeight = 0;
// Oldest ancestor of pindex which is invalid.
CBlockIndex *pindexFirstInvalid = nullptr;
// Oldest ancestor of pindex which is parked.
CBlockIndex *pindexFirstParked = nullptr;
// Oldest ancestor of pindex which does not have data available.
CBlockIndex *pindexFirstMissing = nullptr;
// Oldest ancestor of pindex for which nTx == 0.
CBlockIndex *pindexFirstNeverProcessed = nullptr;
// Oldest ancestor of pindex which does not have BLOCK_VALID_TREE
// (regardless of being valid or not).
CBlockIndex *pindexFirstNotTreeValid = nullptr;
// Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS
// (regardless of being valid or not).
CBlockIndex *pindexFirstNotTransactionsValid = nullptr;
// Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN
// (regardless of being valid or not).
CBlockIndex *pindexFirstNotChainValid = nullptr;
// Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS
// (regardless of being valid or not).
CBlockIndex *pindexFirstNotScriptsValid = nullptr;
while (pindex != nullptr) {
nNodes++;
if (pindexFirstInvalid == nullptr && pindex->nStatus.hasFailed()) {
pindexFirstInvalid = pindex;
}
if (pindexFirstParked == nullptr && pindex->nStatus.isParked()) {
pindexFirstParked = pindex;
}
// Assumed-valid index entries will not have data since we haven't
// downloaded the full block yet.
if (pindexFirstMissing == nullptr && !pindex->nStatus.hasData() &&
!pindex->IsAssumedValid()) {
pindexFirstMissing = pindex;
}
if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) {
pindexFirstNeverProcessed = pindex;
}
if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr &&
pindex->nStatus.getValidity() < BlockValidity::TREE) {
pindexFirstNotTreeValid = pindex;
}
if (pindex->pprev != nullptr && !pindex->IsAssumedValid()) {
if (pindexFirstNotTransactionsValid == nullptr &&
pindex->nStatus.getValidity() < BlockValidity::TRANSACTIONS) {
pindexFirstNotTransactionsValid = pindex;
}
if (pindexFirstNotChainValid == nullptr &&
pindex->nStatus.getValidity() < BlockValidity::CHAIN) {
pindexFirstNotChainValid = pindex;
}
if (pindexFirstNotScriptsValid == nullptr &&
pindex->nStatus.getValidity() < BlockValidity::SCRIPTS) {
pindexFirstNotScriptsValid = pindex;
}
}
// Begin: actual consistency checks.
if (pindex->pprev == nullptr) {
// Genesis block checks.
// Genesis block's hash must match.
assert(pindex->GetBlockHash() ==
m_chainman.GetConsensus().hashGenesisBlock);
// The current active chain's genesis block must be this block.
assert(pindex == m_chain.Genesis());
}
if (!pindex->HaveTxsDownloaded()) {
// nSequenceId can't be set positive for blocks that aren't linked
// (negative is used for preciousblock)
assert(pindex->nSequenceId <= 0);
}
// VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or
// not pruning has occurred). HAVE_DATA is only equivalent to nTx > 0
// (or VALID_TRANSACTIONS) if no pruning has occurred.
// Unless these indexes are assumed valid and pending block download on
// a background chainstate.
if (!m_blockman.m_have_pruned && !pindex->IsAssumedValid()) {
// If we've never pruned, then HAVE_DATA should be equivalent to nTx
// > 0
assert(pindex->nStatus.hasData() == (pindex->nTx > 0));
assert(pindexFirstMissing == pindexFirstNeverProcessed);
} else if (pindex->nStatus.hasData()) {
// If we have pruned, then we can only say that HAVE_DATA implies
// nTx > 0
assert(pindex->nTx > 0);
}
if (pindex->nStatus.hasUndo()) {
assert(pindex->nStatus.hasData());
}
if (pindex->IsAssumedValid()) {
// Assumed-valid blocks should have some nTx value.
assert(pindex->nTx > 0);
// Assumed-valid blocks should connect to the main chain.
assert(pindex->nStatus.getValidity() >= BlockValidity::TREE);
} else {
// Otherwise there should only be an nTx value if we have
// actually seen a block's transactions.
// This is pruning-independent.
assert((pindex->nStatus.getValidity() >=
BlockValidity::TRANSACTIONS) == (pindex->nTx > 0));
}
// All parents having had data (at some point) is equivalent to all
// parents being VALID_TRANSACTIONS, which is equivalent to
// HaveTxsDownloaded(). All parents having had data (at some point) is
// equivalent to all parents being VALID_TRANSACTIONS, which is
// equivalent to HaveTxsDownloaded().
assert((pindexFirstNeverProcessed == nullptr) ==
(pindex->HaveTxsDownloaded()));
assert((pindexFirstNotTransactionsValid == nullptr) ==
(pindex->HaveTxsDownloaded()));
// nHeight must be consistent.
assert(pindex->nHeight == nHeight);
// For every block except the genesis block, the chainwork must be
// larger than the parent's.
assert(pindex->pprev == nullptr ||
pindex->nChainWork >= pindex->pprev->nChainWork);
// The pskip pointer must point back for all but the first 2 blocks.
assert(nHeight < 2 ||
(pindex->pskip && (pindex->pskip->nHeight < nHeight)));
// All m_blockman.m_block_index entries must at least be TREE valid
assert(pindexFirstNotTreeValid == nullptr);
if (pindex->nStatus.getValidity() >= BlockValidity::TREE) {
// TREE valid implies all parents are TREE valid
assert(pindexFirstNotTreeValid == nullptr);
}
if (pindex->nStatus.getValidity() >= BlockValidity::CHAIN) {
// CHAIN valid implies all parents are CHAIN valid
assert(pindexFirstNotChainValid == nullptr);
}
if (pindex->nStatus.getValidity() >= BlockValidity::SCRIPTS) {
// SCRIPTS valid implies all parents are SCRIPTS valid
assert(pindexFirstNotScriptsValid == nullptr);
}
if (pindexFirstInvalid == nullptr) {
// Checks for not-invalid blocks.
// The failed mask cannot be set for blocks without invalid parents.
assert(!pindex->nStatus.isInvalid());
}
if (pindexFirstParked == nullptr) {
// Checks for not-parked blocks.
// The parked mask cannot be set for blocks without parked parents.
// (i.e., hasParkedParent only if an ancestor is properly parked).
assert(!pindex->nStatus.isOnParkedChain());
}
if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) &&
pindexFirstNeverProcessed == nullptr) {
if (pindexFirstInvalid == nullptr) {
// Don't perform this check for the background chainstate since
// its setBlockIndexCandidates shouldn't have some entries (i.e.
// those past the snapshot block) which do exist in the block
// index for the active chainstate.
if (this == &m_chainman.ActiveChainstate()) {
// If this block sorts at least as good as the current tip
// and is valid and we have all data for its parents, it
// must be in setBlockIndexCandidates or be parked.
if (pindexFirstMissing == nullptr) {
assert(pindex->nStatus.isOnParkedChain() ||
setBlockIndexCandidates.count(pindex));
}
// m_chain.Tip() must also be there even if some data has
// been pruned.
if (pindex == m_chain.Tip()) {
assert(setBlockIndexCandidates.count(pindex));
}
}
// If some parent is missing, then it could be that this block
// was in setBlockIndexCandidates but had to be removed because
// of the missing data. In this case it must be in
// m_blocks_unlinked -- see test below.
}
} else {
// If this block sorts worse than the current tip or some ancestor's
// block has never been seen, it cannot be in
// setBlockIndexCandidates.
assert(setBlockIndexCandidates.count(pindex) == 0);
}
// Check whether this block is in m_blocks_unlinked.
std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
rangeUnlinked =
m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
bool foundInUnlinked = false;
while (rangeUnlinked.first != rangeUnlinked.second) {
assert(rangeUnlinked.first->first == pindex->pprev);
if (rangeUnlinked.first->second == pindex) {
foundInUnlinked = true;
break;
}
rangeUnlinked.first++;
}
if (pindex->pprev && pindex->nStatus.hasData() &&
pindexFirstNeverProcessed != nullptr &&
pindexFirstInvalid == nullptr) {
// If this block has block data available, some parent was never
// received, and has no invalid parents, it must be in
// m_blocks_unlinked.
assert(foundInUnlinked);
}
if (!pindex->nStatus.hasData()) {
// Can't be in m_blocks_unlinked if we don't HAVE_DATA
assert(!foundInUnlinked);
}
if (pindexFirstMissing == nullptr) {
// We aren't missing data for any parent -- cannot be in
// m_blocks_unlinked.
assert(!foundInUnlinked);
}
if (pindex->pprev && pindex->nStatus.hasData() &&
pindexFirstNeverProcessed == nullptr &&
pindexFirstMissing != nullptr) {
// We HAVE_DATA for this block, have received data for all parents
// at some point, but we're currently missing data for some parent.
// We must have pruned.
assert(m_blockman.m_have_pruned);
// This block may have entered m_blocks_unlinked if:
// - it has a descendant that at some point had more work than the
// tip, and
// - we tried switching to that descendant but were missing
// data for some intermediate block between m_chain and the
// tip.
// So if this block is itself better than m_chain.Tip() and it
// wasn't in
// setBlockIndexCandidates, then it must be in m_blocks_unlinked.
if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) &&
setBlockIndexCandidates.count(pindex) == 0) {
if (pindexFirstInvalid == nullptr) {
assert(foundInUnlinked);
}
}
}
// Perhaps too slow
// assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash());
// End: actual consistency checks.
// Try descending into the first subnode.
std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
range = forward.equal_range(pindex);
if (range.first != range.second) {
// A subnode was found.
pindex = range.first->second;
nHeight++;
continue;
}
// This is a leaf node. Move upwards until we reach a node of which we
// have not yet visited the last child.
while (pindex) {
// We are going to either move to a parent or a sibling of pindex.
// If pindex was the first with a certain property, unset the
// corresponding variable.
if (pindex == pindexFirstInvalid) {
pindexFirstInvalid = nullptr;
}
if (pindex == pindexFirstParked) {
pindexFirstParked = nullptr;
}
if (pindex == pindexFirstMissing) {
pindexFirstMissing = nullptr;
}
if (pindex == pindexFirstNeverProcessed) {
pindexFirstNeverProcessed = nullptr;
}
if (pindex == pindexFirstNotTreeValid) {
pindexFirstNotTreeValid = nullptr;
}
if (pindex == pindexFirstNotTransactionsValid) {
pindexFirstNotTransactionsValid = nullptr;
}
if (pindex == pindexFirstNotChainValid) {
pindexFirstNotChainValid = nullptr;
}
if (pindex == pindexFirstNotScriptsValid) {
pindexFirstNotScriptsValid = nullptr;
}
// Find our parent.
CBlockIndex *pindexPar = pindex->pprev;
// Find which child we just visited.
std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
rangePar = forward.equal_range(pindexPar);
while (rangePar.first->second != pindex) {
// Our parent must have at least the node we're coming from as
// child.
assert(rangePar.first != rangePar.second);
rangePar.first++;
}
// Proceed to the next one.
rangePar.first++;
if (rangePar.first != rangePar.second) {
// Move to the sibling.
pindex = rangePar.first->second;
break;
} else {
// Move up further.
pindex = pindexPar;
nHeight--;
continue;
}
}
}
// Check that we actually traversed the entire map.
assert(nNodes == forward.size());
}
std::string Chainstate::ToString() {
AssertLockHeld(::cs_main);
CBlockIndex *tip = m_chain.Tip();
return strprintf("Chainstate [%s] @ height %d (%s)",
m_from_snapshot_blockhash ? "snapshot" : "ibd",
tip ? tip->nHeight : -1,
tip ? tip->GetBlockHash().ToString() : "null");
}
bool Chainstate::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size) {
AssertLockHeld(::cs_main);
if (coinstip_size == m_coinstip_cache_size_bytes &&
coinsdb_size == m_coinsdb_cache_size_bytes) {
// Cache sizes are unchanged, no need to continue.
return true;
}
size_t old_coinstip_size = m_coinstip_cache_size_bytes;
m_coinstip_cache_size_bytes = coinstip_size;
m_coinsdb_cache_size_bytes = coinsdb_size;
CoinsDB().ResizeCache(coinsdb_size);
LogPrintf("[%s] resized coinsdb cache to %.1f MiB\n", this->ToString(),
coinsdb_size * (1.0 / 1024 / 1024));
LogPrintf("[%s] resized coinstip cache to %.1f MiB\n", this->ToString(),
coinstip_size * (1.0 / 1024 / 1024));
BlockValidationState state;
bool ret;
if (coinstip_size > old_coinstip_size) {
// Likely no need to flush if cache sizes have grown.
ret = FlushStateToDisk(state, FlushStateMode::IF_NEEDED);
} else {
// Otherwise, flush state to disk and deallocate the in-memory coins
// map.
ret = FlushStateToDisk(state, FlushStateMode::ALWAYS);
}
return ret;
}
//! Guess how far we are in the verification process at the given block index
//! require cs_main if pindex has not been validated yet (because the chain's
//! transaction count might be unset) This conditional lock requirement might be
//! confusing, see: https://github.com/bitcoin/bitcoin/issues/15994
double GuessVerificationProgress(const ChainTxData &data,
const CBlockIndex *pindex) {
if (pindex == nullptr) {
return 0.0;
}
int64_t nNow = time(nullptr);
double fTxTotal;
if (pindex->GetChainTxCount() <= data.nTxCount) {
fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate;
} else {
fTxTotal = pindex->GetChainTxCount() +
(nNow - pindex->GetBlockTime()) * data.dTxRate;
}
return std::min<double>(pindex->GetChainTxCount() / fTxTotal, 1.0);
}
std::optional<BlockHash> ChainstateManager::SnapshotBlockhash() const {
LOCK(::cs_main);
if (m_active_chainstate && m_active_chainstate->m_from_snapshot_blockhash) {
// If a snapshot chainstate exists, it will always be our active.
return m_active_chainstate->m_from_snapshot_blockhash;
}
return std::nullopt;
}
std::vector<Chainstate *> ChainstateManager::GetAll() {
LOCK(::cs_main);
std::vector<Chainstate *> out;
for (Chainstate *pchainstate :
{m_ibd_chainstate.get(), m_snapshot_chainstate.get()}) {
if (this->IsUsable(pchainstate)) {
out.push_back(pchainstate);
}
}
return out;
}
Chainstate &ChainstateManager::InitializeChainstate(CTxMemPool *mempool) {
AssertLockHeld(::cs_main);
assert(!m_ibd_chainstate);
assert(!m_active_chainstate);
m_ibd_chainstate = std::make_unique<Chainstate>(mempool, m_blockman, *this);
m_active_chainstate = m_ibd_chainstate.get();
return *m_active_chainstate;
}
const AssumeutxoData *ExpectedAssumeutxo(const int height,
const CChainParams &chainparams) {
const MapAssumeutxo &valid_assumeutxos_map = chainparams.Assumeutxo();
const auto assumeutxo_found = valid_assumeutxos_map.find(height);
if (assumeutxo_found != valid_assumeutxos_map.end()) {
return &assumeutxo_found->second;
}
return nullptr;
}
static bool DeleteCoinsDBFromDisk(const fs::path &db_path, bool is_snapshot)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
if (is_snapshot) {
fs::path base_blockhash_path =
db_path / node::SNAPSHOT_BLOCKHASH_FILENAME;
try {
const bool existed{fs::remove(base_blockhash_path)};
if (!existed) {
LogPrintf("[snapshot] snapshot chainstate dir being removed "
"lacks %s file\n",
fs::PathToString(node::SNAPSHOT_BLOCKHASH_FILENAME));
}
} catch (const fs::filesystem_error &e) {
LogPrintf("[snapshot] failed to remove file %s: %s\n",
fs::PathToString(base_blockhash_path),
fsbridge::get_filesystem_error_message(e));
}
}
std::string path_str = fs::PathToString(db_path);
LogPrintf("Removing leveldb dir at %s\n", path_str);
// We have to destruct before this call leveldb::DB in order to release the
// db lock, otherwise `DestroyDB` will fail. See `leveldb::~DBImpl()`.
const bool destroyed = dbwrapper::DestroyDB(path_str, {}).ok();
if (!destroyed) {
LogPrintf("error: leveldb DestroyDB call failed on %s\n", path_str);
}
// Datadir should be removed from filesystem; otherwise initialization may
// detect it on subsequent statups and get confused.
//
// If the base_blockhash_path removal above fails in the case of snapshot
// chainstates, this will return false since leveldb won't remove a
// non-empty directory.
return destroyed && !fs::exists(db_path);
}
bool ChainstateManager::ActivateSnapshot(AutoFile &coins_file,
const SnapshotMetadata &metadata,
bool in_memory) {
BlockHash base_blockhash = metadata.m_base_blockhash;
if (this->SnapshotBlockhash()) {
LogPrintf("[snapshot] can't activate a snapshot-based chainstate more "
"than once\n");
return false;
}
int64_t current_coinsdb_cache_size{0};
int64_t current_coinstip_cache_size{0};
// Cache percentages to allocate to each chainstate.
//
// These particular percentages don't matter so much since they will only be
// relevant during snapshot activation; caches are rebalanced at the
// conclusion of this function. We want to give (essentially) all available
// cache capacity to the snapshot to aid the bulk load later in this
// function.
static constexpr double IBD_CACHE_PERC = 0.01;
static constexpr double SNAPSHOT_CACHE_PERC = 0.99;
{
LOCK(::cs_main);
// Resize the coins caches to ensure we're not exceeding memory limits.
//
// Allocate the majority of the cache to the incoming snapshot
// chainstate, since (optimistically) getting to its tip will be the top
// priority. We'll need to call `MaybeRebalanceCaches()` once we're done
// with this function to ensure the right allocation (including the
// possibility that no snapshot was activated and that we should restore
// the active chainstate caches to their original size).
//
current_coinsdb_cache_size =
this->ActiveChainstate().m_coinsdb_cache_size_bytes;
current_coinstip_cache_size =
this->ActiveChainstate().m_coinstip_cache_size_bytes;
// Temporarily resize the active coins cache to make room for the
// newly-created snapshot chain.
this->ActiveChainstate().ResizeCoinsCaches(
static_cast<size_t>(current_coinstip_cache_size * IBD_CACHE_PERC),
static_cast<size_t>(current_coinsdb_cache_size * IBD_CACHE_PERC));
}
auto snapshot_chainstate =
WITH_LOCK(::cs_main, return std::make_unique<Chainstate>(
/* mempool */ nullptr, m_blockman, *this,
base_blockhash));
{
LOCK(::cs_main);
snapshot_chainstate->InitCoinsDB(
static_cast<size_t>(current_coinsdb_cache_size *
SNAPSHOT_CACHE_PERC),
in_memory, false, "chainstate");
snapshot_chainstate->InitCoinsCache(static_cast<size_t>(
current_coinstip_cache_size * SNAPSHOT_CACHE_PERC));
}
bool snapshot_ok = this->PopulateAndValidateSnapshot(*snapshot_chainstate,
coins_file, metadata);
// If not in-memory, persist the base blockhash for use during subsequent
// initialization.
if (!in_memory) {
LOCK(::cs_main);
if (!node::WriteSnapshotBaseBlockhash(*snapshot_chainstate)) {
snapshot_ok = false;
}
}
if (!snapshot_ok) {
LOCK(::cs_main);
this->MaybeRebalanceCaches();
// PopulateAndValidateSnapshot can return (in error) before the leveldb
// datadir has been created, so only attempt removal if we got that far.
if (auto snapshot_datadir = node::FindSnapshotChainstateDir()) {
// We have to destruct leveldb::DB in order to release the db lock,
// otherwise DestroyDB() (in DeleteCoinsDBFromDisk()) will fail. See
// `leveldb::~DBImpl()`. Destructing the chainstate (and so
// resetting the coinsviews object) does this.
snapshot_chainstate.reset();
bool removed =
DeleteCoinsDBFromDisk(*snapshot_datadir, /*is_snapshot=*/true);
if (!removed) {
AbortNode(
strprintf("Failed to remove snapshot chainstate dir (%s). "
"Manually remove it before restarting.\n",
fs::PathToString(*snapshot_datadir)));
}
}
return false;
}
{
LOCK(::cs_main);
assert(!m_snapshot_chainstate);
m_snapshot_chainstate.swap(snapshot_chainstate);
const bool chaintip_loaded = m_snapshot_chainstate->LoadChainTip();
assert(chaintip_loaded);
m_active_chainstate = m_snapshot_chainstate.get();
LogPrintf("[snapshot] successfully activated snapshot %s\n",
base_blockhash.ToString());
LogPrintf("[snapshot] (%.2f MB)\n",
m_snapshot_chainstate->CoinsTip().DynamicMemoryUsage() /
(1000 * 1000));
this->MaybeRebalanceCaches();
}
return true;
}
static void FlushSnapshotToDisk(CCoinsViewCache &coins_cache,
bool snapshot_loaded) {
LOG_TIME_MILLIS_WITH_CATEGORY_MSG_ONCE(
strprintf("%s (%.2f MB)",
snapshot_loaded ? "saving snapshot chainstate"
: "flushing coins cache",
coins_cache.DynamicMemoryUsage() / (1000 * 1000)),
BCLog::LogFlags::ALL);
coins_cache.Flush();
}
struct StopHashingException : public std::exception {
const char *what() const throw() override {
return "ComputeUTXOStats interrupted by shutdown.";
}
};
static void SnapshotUTXOHashBreakpoint() {
if (ShutdownRequested()) {
throw StopHashingException();
}
}
bool ChainstateManager::PopulateAndValidateSnapshot(
Chainstate &snapshot_chainstate, AutoFile &coins_file,
const SnapshotMetadata &metadata) {
// It's okay to release cs_main before we're done using `coins_cache`
// because we know that nothing else will be referencing the newly created
// snapshot_chainstate yet.
CCoinsViewCache &coins_cache =
*WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsTip());
BlockHash base_blockhash = metadata.m_base_blockhash;
CBlockIndex *snapshot_start_block = WITH_LOCK(
::cs_main, return m_blockman.LookupBlockIndex(base_blockhash));
if (!snapshot_start_block) {
// Needed for ComputeUTXOStats and ExpectedAssumeutxo to determine the
// height and to avoid a crash when base_blockhash.IsNull()
LogPrintf("[snapshot] Did not find snapshot start blockheader %s\n",
base_blockhash.ToString());
return false;
}
int base_height = snapshot_start_block->nHeight;
auto maybe_au_data = ExpectedAssumeutxo(base_height, GetParams());
if (!maybe_au_data) {
LogPrintf("[snapshot] assumeutxo height in snapshot metadata not "
"recognized (%d) - refusing to load snapshot\n",
base_height);
return false;
}
const AssumeutxoData &au_data = *maybe_au_data;
COutPoint outpoint;
Coin coin;
const uint64_t coins_count = metadata.m_coins_count;
uint64_t coins_left = metadata.m_coins_count;
LogPrintf("[snapshot] loading coins from snapshot %s\n",
base_blockhash.ToString());
int64_t coins_processed{0};
while (coins_left > 0) {
try {
coins_file >> outpoint;
coins_file >> coin;
} catch (const std::ios_base::failure &) {
LogPrintf("[snapshot] bad snapshot format or truncated snapshot "
"after deserializing %d coins\n",
coins_count - coins_left);
return false;
}
if (coin.GetHeight() > uint32_t(base_height) ||
// Avoid integer wrap-around in coinstats.cpp:ApplyHash
outpoint.GetN() >=
std::numeric_limits<decltype(outpoint.GetN())>::max()) {
LogPrintf(
"[snapshot] bad snapshot data after deserializing %d coins\n",
coins_count - coins_left);
return false;
}
coins_cache.EmplaceCoinInternalDANGER(std::move(outpoint),
std::move(coin));
--coins_left;
++coins_processed;
if (coins_processed % 1000000 == 0) {
LogPrintf("[snapshot] %d coins loaded (%.2f%%, %.2f MB)\n",
coins_processed,
static_cast<float>(coins_processed) * 100 /
static_cast<float>(coins_count),
coins_cache.DynamicMemoryUsage() / (1000 * 1000));
}
// Batch write and flush (if we need to) every so often.
//
// If our average Coin size is roughly 41 bytes, checking every 120,000
// coins means <5MB of memory imprecision.
if (coins_processed % 120000 == 0) {
if (ShutdownRequested()) {
return false;
}
const auto snapshot_cache_state = WITH_LOCK(
::cs_main, return snapshot_chainstate.GetCoinsCacheSizeState());
if (snapshot_cache_state >= CoinsCacheSizeState::CRITICAL) {
// This is a hack - we don't know what the actual best block is,
// but that doesn't matter for the purposes of flushing the
// cache here. We'll set this to its correct value
// (`base_blockhash`) below after the coins are loaded.
coins_cache.SetBestBlock(BlockHash{GetRandHash()});
// No need to acquire cs_main since this chainstate isn't being
// used yet.
FlushSnapshotToDisk(coins_cache, /*snapshot_loaded=*/false);
}
}
}
// Important that we set this. This and the coins_cache accesses above are
// sort of a layer violation, but either we reach into the innards of
// CCoinsViewCache here or we have to invert some of the Chainstate to
// embed them in a snapshot-activation-specific CCoinsViewCache bulk load
// method.
coins_cache.SetBestBlock(base_blockhash);
bool out_of_coins{false};
try {
coins_file >> outpoint;
} catch (const std::ios_base::failure &) {
// We expect an exception since we should be out of coins.
out_of_coins = true;
}
if (!out_of_coins) {
LogPrintf("[snapshot] bad snapshot - coins left over after "
"deserializing %d coins\n",
coins_count);
return false;
}
LogPrintf("[snapshot] loaded %d (%.2f MB) coins from snapshot %s\n",
coins_count, coins_cache.DynamicMemoryUsage() / (1000 * 1000),
base_blockhash.ToString());
// No need to acquire cs_main since this chainstate isn't being used yet.
FlushSnapshotToDisk(coins_cache, /*snapshot_loaded=*/true);
assert(coins_cache.GetBestBlock() == base_blockhash);
// As above, okay to immediately release cs_main here since no other context
// knows about the snapshot_chainstate.
CCoinsViewDB *snapshot_coinsdb =
WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsDB());
std::optional<CCoinsStats> maybe_stats;
try {
maybe_stats = ComputeUTXOStats(CoinStatsHashType::HASH_SERIALIZED,
snapshot_coinsdb, m_blockman,
SnapshotUTXOHashBreakpoint);
} catch (StopHashingException const &) {
return false;
}
if (!maybe_stats.has_value()) {
LogPrintf("[snapshot] failed to generate coins stats\n");
return false;
}
// Assert that the deserialized chainstate contents match the expected
// assumeutxo value.
if (AssumeutxoHash{maybe_stats->hashSerialized} !=
au_data.hash_serialized) {
LogPrintf("[snapshot] bad snapshot content hash: expected %s, got %s\n",
au_data.hash_serialized.ToString(),
maybe_stats->hashSerialized.ToString());
return false;
}
snapshot_chainstate.m_chain.SetTip(*snapshot_start_block);
// The remainder of this function requires modifying data protected by
// cs_main.
LOCK(::cs_main);
// Fake various pieces of CBlockIndex state:
CBlockIndex *index = nullptr;
// Don't make any modifications to the genesis block.
// This is especially important because we don't want to erroneously
// apply ASSUMED_VALID_FLAG to genesis, which would happen if we didn't
// skip it here (since it apparently isn't BlockValidity::SCRIPTS).
constexpr int AFTER_GENESIS_START{1};
for (int i = AFTER_GENESIS_START; i <= snapshot_chainstate.m_chain.Height();
++i) {
index = snapshot_chainstate.m_chain[i];
// Fake nTx so that LoadBlockIndex() loads assumed-valid CBlockIndex
// entries (among other things)
if (!index->nTx) {
index->nTx = 1;
}
// Fake nChainTx so that GuessVerificationProgress reports accurately
index->nChainTx = index->pprev->nChainTx + index->nTx;
// Mark unvalidated block index entries beneath the snapshot base block
// as assumed-valid.
if (!index->IsValid(BlockValidity::SCRIPTS)) {
// This flag will be removed once the block is fully validated by a
// background chainstate.
index->nStatus = index->nStatus.withAssumedValid();
}
m_blockman.m_dirty_blockindex.insert(index);
// Changes to the block index will be flushed to disk after this call
// returns in `ActivateSnapshot()`, when `MaybeRebalanceCaches()` is
// called, since we've added a snapshot chainstate and therefore will
// have to downsize the IBD chainstate, which will result in a call to
// `FlushStateToDisk(ALWAYS)`.
}
assert(index);
index->nChainTx = au_data.nChainTx;
snapshot_chainstate.setBlockIndexCandidates.insert(snapshot_start_block);
LogPrintf("[snapshot] validated snapshot (%.2f MB)\n",
coins_cache.DynamicMemoryUsage() / (1000 * 1000));
return true;
}
// Currently, this function holds cs_main for its duration, which could be for
// multiple minutes due to the ComputeUTXOStats call. This hold is necessary
// because we need to avoid advancing the background validation chainstate
// farther than the snapshot base block - and this function is also invoked
// from within ConnectTip, i.e. from within ActivateBestChain, so cs_main is
// held anyway.
//
// Eventually (TODO), we could somehow separate this function's runtime from
// maintenance of the active chain, but that will either require
//
// (i) setting `m_disabled` immediately and ensuring all chainstate accesses go
// through IsUsable() checks, or
//
// (ii) giving each chainstate its own lock instead of using cs_main for
// everything.
SnapshotCompletionResult ChainstateManager::MaybeCompleteSnapshotValidation(
std::function<void(bilingual_str)> shutdown_fnc) {
AssertLockHeld(cs_main);
if (m_ibd_chainstate.get() == &this->ActiveChainstate() ||
!this->IsUsable(m_snapshot_chainstate.get()) ||
!this->IsUsable(m_ibd_chainstate.get()) ||
!m_ibd_chainstate->m_chain.Tip()) {
// Nothing to do - this function only applies to the background
// validation chainstate.
return SnapshotCompletionResult::SKIPPED;
}
const int snapshot_tip_height = this->ActiveHeight();
const int snapshot_base_height = *Assert(this->GetSnapshotBaseHeight());
const CBlockIndex &index_new = *Assert(m_ibd_chainstate->m_chain.Tip());
if (index_new.nHeight < snapshot_base_height) {
// Background IBD not complete yet.
return SnapshotCompletionResult::SKIPPED;
}
assert(SnapshotBlockhash());
BlockHash snapshot_blockhash = *Assert(SnapshotBlockhash());
auto handle_invalid_snapshot = [&]() EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
bilingual_str user_error = strprintf(
_("%s failed to validate the -assumeutxo snapshot state. "
"This indicates a hardware problem, or a bug in the software, or "
"a bad software modification that allowed an invalid snapshot to "
"be loaded. As a result of this, the node will shut down and "
"stop using any state that was built on the snapshot, resetting "
"the chain height from %d to %d. On the next restart, the node "
"will resume syncing from %d without using any snapshot data. "
"Please report this incident to %s, including how you obtained "
"the snapshot. The invalid snapshot chainstate will be left on "
"disk in case it is helpful in diagnosing the issue that caused "
"this error."),
PACKAGE_NAME, snapshot_tip_height, snapshot_base_height,
snapshot_base_height, PACKAGE_BUGREPORT);
LogPrintf("[snapshot] !!! %s\n", user_error.original);
LogPrintf("[snapshot] deleting snapshot, reverting to validated chain, "
"and stopping node\n");
m_active_chainstate = m_ibd_chainstate.get();
m_snapshot_chainstate->m_disabled = true;
assert(!this->IsUsable(m_snapshot_chainstate.get()));
assert(this->IsUsable(m_ibd_chainstate.get()));
auto rename_result = m_snapshot_chainstate->InvalidateCoinsDBOnDisk();
if (!rename_result) {
user_error = strprintf(Untranslated("%s\n%s"), user_error,
util::ErrorString(rename_result));
}
shutdown_fnc(user_error);
};
if (index_new.GetBlockHash() != snapshot_blockhash) {
LogPrintf(
"[snapshot] supposed base block %s does not match the "
"snapshot base block %s (height %d). Snapshot is not valid.\n",
index_new.ToString(), snapshot_blockhash.ToString(),
snapshot_base_height);
handle_invalid_snapshot();
return SnapshotCompletionResult::BASE_BLOCKHASH_MISMATCH;
}
assert(index_new.nHeight == snapshot_base_height);
int curr_height = m_ibd_chainstate->m_chain.Height();
assert(snapshot_base_height == curr_height);
assert(snapshot_base_height == index_new.nHeight);
assert(this->IsUsable(m_snapshot_chainstate.get()));
assert(this->GetAll().size() == 2);
CCoinsViewDB &ibd_coins_db = m_ibd_chainstate->CoinsDB();
m_ibd_chainstate->ForceFlushStateToDisk();
auto maybe_au_data = ExpectedAssumeutxo(curr_height, GetParams());
if (!maybe_au_data) {
LogPrintf("[snapshot] assumeutxo data not found for height "
"(%d) - refusing to validate snapshot\n",
curr_height);
handle_invalid_snapshot();
return SnapshotCompletionResult::MISSING_CHAINPARAMS;
}
const AssumeutxoData &au_data = *maybe_au_data;
std::optional<CCoinsStats> maybe_ibd_stats;
LogPrintf(
"[snapshot] computing UTXO stats for background chainstate to validate "
"snapshot - this could take a few minutes\n");
try {
maybe_ibd_stats =
ComputeUTXOStats(CoinStatsHashType::HASH_SERIALIZED, &ibd_coins_db,
m_blockman, SnapshotUTXOHashBreakpoint);
} catch (StopHashingException const &) {
return SnapshotCompletionResult::STATS_FAILED;
}
if (!maybe_ibd_stats) {
LogPrintf(
"[snapshot] failed to generate stats for validation coins db\n");
// While this isn't a problem with the snapshot per se, this condition
// prevents us from validating the snapshot, so we should shut down and
// let the user handle the issue manually.
handle_invalid_snapshot();
return SnapshotCompletionResult::STATS_FAILED;
}
const auto &ibd_stats = *maybe_ibd_stats;
// Compare the background validation chainstate's UTXO set hash against the
// hard-coded assumeutxo hash we expect.
//
// TODO: For belt-and-suspenders, we could cache the UTXO set
// hash for the snapshot when it's loaded in its chainstate's leveldb. We
// could then reference that here for an additional check.
if (AssumeutxoHash{ibd_stats.hashSerialized} != au_data.hash_serialized) {
LogPrintf("[snapshot] hash mismatch: actual=%s, expected=%s\n",
ibd_stats.hashSerialized.ToString(),
au_data.hash_serialized.ToString());
handle_invalid_snapshot();
return SnapshotCompletionResult::HASH_MISMATCH;
}
LogPrintf("[snapshot] snapshot beginning at %s has been fully validated\n",
snapshot_blockhash.ToString());
m_ibd_chainstate->m_disabled = true;
this->MaybeRebalanceCaches();
return SnapshotCompletionResult::SUCCESS;
}
Chainstate &ChainstateManager::ActiveChainstate() const {
LOCK(::cs_main);
assert(m_active_chainstate);
return *m_active_chainstate;
}
bool ChainstateManager::IsSnapshotActive() const {
LOCK(::cs_main);
return m_snapshot_chainstate &&
m_active_chainstate == m_snapshot_chainstate.get();
}
void ChainstateManager::MaybeRebalanceCaches() {
AssertLockHeld(::cs_main);
bool ibd_usable = this->IsUsable(m_ibd_chainstate.get());
bool snapshot_usable = this->IsUsable(m_snapshot_chainstate.get());
assert(ibd_usable || snapshot_usable);
if (ibd_usable && !snapshot_usable) {
LogPrintf("[snapshot] allocating all cache to the IBD chainstate\n");
// Allocate everything to the IBD chainstate.
m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache,
m_total_coinsdb_cache);
} else if (snapshot_usable && !ibd_usable) {
// If background validation has completed and snapshot is our active
// chain...
LogPrintf(
"[snapshot] allocating all cache to the snapshot chainstate\n");
// Allocate everything to the snapshot chainstate.
m_snapshot_chainstate->ResizeCoinsCaches(m_total_coinstip_cache,
m_total_coinsdb_cache);
} else if (ibd_usable && snapshot_usable) {
// If both chainstates exist, determine who needs more cache based on
// IBD status.
//
// Note: shrink caches first so that we don't inadvertently overwhelm
// available memory.
if (m_snapshot_chainstate->IsInitialBlockDownload()) {
m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache * 0.05,
m_total_coinsdb_cache * 0.05);
m_snapshot_chainstate->ResizeCoinsCaches(
m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95);
} else {
m_snapshot_chainstate->ResizeCoinsCaches(
m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05);
m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache * 0.95,
m_total_coinsdb_cache * 0.95);
}
}
}
void ChainstateManager::ResetChainstates() {
m_ibd_chainstate.reset();
m_snapshot_chainstate.reset();
m_active_chainstate = nullptr;
}
/**
* Apply default chain params to nullopt members.
* This helps to avoid coding errors around the accidental use of the compare
* operators that accept nullopt, thus ignoring the intended default value.
*/
static ChainstateManager::Options &&Flatten(ChainstateManager::Options &&opts) {
if (!opts.check_block_index.has_value()) {
opts.check_block_index =
opts.config.GetChainParams().DefaultConsistencyChecks();
}
if (!opts.minimum_chain_work.has_value()) {
opts.minimum_chain_work = UintToArith256(
opts.config.GetChainParams().GetConsensus().nMinimumChainWork);
}
if (!opts.assumed_valid_block.has_value()) {
opts.assumed_valid_block =
opts.config.GetChainParams().GetConsensus().defaultAssumeValid;
}
Assert(opts.adjusted_time_callback);
return std::move(opts);
}
ChainstateManager::ChainstateManager(
Options options, node::BlockManager::Options blockman_options)
: m_options{Flatten(std::move(options))}, m_blockman{std::move(
blockman_options)} {}
bool ChainstateManager::DetectSnapshotChainstate(CTxMemPool *mempool) {
assert(!m_snapshot_chainstate);
std::optional<fs::path> path = node::FindSnapshotChainstateDir();
if (!path) {
return false;
}
std::optional<BlockHash> base_blockhash =
node::ReadSnapshotBaseBlockhash(*path);
if (!base_blockhash) {
return false;
}
LogPrintf("[snapshot] detected active snapshot chainstate (%s) - loading\n",
fs::PathToString(*path));
this->ActivateExistingSnapshot(mempool, *base_blockhash);
return true;
}
Chainstate &
ChainstateManager::ActivateExistingSnapshot(CTxMemPool *mempool,
BlockHash base_blockhash) {
assert(!m_snapshot_chainstate);
m_snapshot_chainstate = std::make_unique<Chainstate>(mempool, m_blockman,
*this, base_blockhash);
LogPrintf("[snapshot] switching active chainstate to %s\n",
m_snapshot_chainstate->ToString());
m_active_chainstate = m_snapshot_chainstate.get();
return *m_snapshot_chainstate;
}
util::Result<void> Chainstate::InvalidateCoinsDBOnDisk() {
AssertLockHeld(::cs_main);
// Should never be called on a non-snapshot chainstate.
assert(m_from_snapshot_blockhash);
auto storage_path_maybe = this->CoinsDB().StoragePath();
// Should never be called with a non-existent storage path.
assert(storage_path_maybe);
fs::path snapshot_datadir = *storage_path_maybe;
// Coins views no longer usable.
m_coins_views.reset();
auto invalid_path = snapshot_datadir + "_INVALID";
std::string dbpath = fs::PathToString(snapshot_datadir);
std::string target = fs::PathToString(invalid_path);
LogPrintf("[snapshot] renaming snapshot datadir %s to %s\n", dbpath,
target);
// The invalid snapshot datadir is simply moved and not deleted because we
// may want to do forensics later during issue investigation. The user is
// instructed accordingly in MaybeCompleteSnapshotValidation().
try {
fs::rename(snapshot_datadir, invalid_path);
} catch (const fs::filesystem_error &e) {
auto src_str = fs::PathToString(snapshot_datadir);
auto dest_str = fs::PathToString(invalid_path);
LogPrintf("%s: error renaming file '%s' -> '%s': %s\n", __func__,
src_str, dest_str, e.what());
return util::Error{strprintf(_("Rename of '%s' -> '%s' failed. "
"You should resolve this by manually "
"moving or deleting the invalid "
"snapshot directory %s, otherwise you "
"will encounter the same error again "
"on the next startup."),
src_str, dest_str, src_str)};
}
return {};
}
const CBlockIndex *ChainstateManager::GetSnapshotBaseBlock() const {
const auto blockhash_op = this->SnapshotBlockhash();
if (!blockhash_op) {
return nullptr;
}
return Assert(m_blockman.LookupBlockIndex(*blockhash_op));
}
std::optional<int> ChainstateManager::GetSnapshotBaseHeight() const {
const CBlockIndex *base = this->GetSnapshotBaseBlock();
return base ? std::make_optional(base->nHeight) : std::nullopt;
}
bool ChainstateManager::ValidatedSnapshotCleanup() {
AssertLockHeld(::cs_main);
auto get_storage_path = [](auto &chainstate) EXCLUSIVE_LOCKS_REQUIRED(
::cs_main) -> std::optional<fs::path> {
if (!(chainstate && chainstate->HasCoinsViews())) {
return {};
}
return chainstate->CoinsDB().StoragePath();
};
std::optional<fs::path> ibd_chainstate_path_maybe =
get_storage_path(m_ibd_chainstate);
std::optional<fs::path> snapshot_chainstate_path_maybe =
get_storage_path(m_snapshot_chainstate);
if (!this->IsSnapshotValidated()) {
// No need to clean up.
return false;
}
// If either path doesn't exist, that means at least one of the chainstates
// is in-memory, in which case we can't do on-disk cleanup. You'd better be
// in a unittest!
if (!ibd_chainstate_path_maybe || !snapshot_chainstate_path_maybe) {
LogPrintf("[snapshot] snapshot chainstate cleanup cannot happen with "
"in-memory chainstates. You are testing, right?\n");
return false;
}
const auto &snapshot_chainstate_path = *snapshot_chainstate_path_maybe;
const auto &ibd_chainstate_path = *ibd_chainstate_path_maybe;
// Since we're going to be moving around the underlying leveldb filesystem
// content for each chainstate, make sure that the chainstates (and their
// constituent CoinsViews members) have been destructed first.
//
// The caller of this method will be responsible for reinitializing
// chainstates if they want to continue operation.
this->ResetChainstates();
// No chainstates should be considered usable.
assert(this->GetAll().size() == 0);
LogPrintf("[snapshot] deleting background chainstate directory (now "
"unnecessary) (%s)\n",
fs::PathToString(ibd_chainstate_path));
fs::path tmp_old{ibd_chainstate_path + "_todelete"};
auto rename_failed_abort = [](fs::path p_old, fs::path p_new,
const fs::filesystem_error &err) {
LogPrintf("Error renaming file (%s): %s\n", fs::PathToString(p_old),
err.what());
AbortNode(strprintf(
"Rename of '%s' -> '%s' failed. "
"Cannot clean up the background chainstate leveldb directory.",
fs::PathToString(p_old), fs::PathToString(p_new)));
};
try {
fs::rename(ibd_chainstate_path, tmp_old);
} catch (const fs::filesystem_error &e) {
rename_failed_abort(ibd_chainstate_path, tmp_old, e);
throw;
}
LogPrintf("[snapshot] moving snapshot chainstate (%s) to "
"default chainstate directory (%s)\n",
fs::PathToString(snapshot_chainstate_path),
fs::PathToString(ibd_chainstate_path));
try {
fs::rename(snapshot_chainstate_path, ibd_chainstate_path);
} catch (const fs::filesystem_error &e) {
rename_failed_abort(snapshot_chainstate_path, ibd_chainstate_path, e);
throw;
}
if (!DeleteCoinsDBFromDisk(tmp_old, /*is_snapshot=*/false)) {
// No need to AbortNode because once the unneeded bg chainstate data is
// moved, it will not interfere with subsequent initialization.
LogPrintf("Deletion of %s failed. Please remove it manually, as the "
"directory is now unnecessary.\n",
fs::PathToString(tmp_old));
} else {
LogPrintf("[snapshot] deleted background chainstate directory (%s)\n",
fs::PathToString(ibd_chainstate_path));
}
return true;
}
diff --git a/src/validation.h b/src/validation.h
index 2beefc7e8..2b0c1dd8f 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -1,1581 +1,1581 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2019 The Bitcoin Core developers
// Copyright (c) 2017-2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_VALIDATION_H
#define BITCOIN_VALIDATION_H
#if defined(HAVE_CONFIG_H)
#include <config/bitcoin-config.h>
#endif
#include <arith_uint256.h>
#include <attributes.h>
#include <blockfileinfo.h>
#include <blockindexcomparators.h>
#include <chain.h>
#include <common/bloom.h>
#include <config.h>
#include <consensus/amount.h>
#include <consensus/consensus.h>
#include <deploymentstatus.h>
#include <disconnectresult.h>
#include <flatfile.h>
#include <kernel/chainparams.h>
#include <kernel/chainstatemanager_opts.h>
#include <kernel/cs_main.h>
#include <node/blockstorage.h>
#include <policy/packages.h>
#include <script/script_error.h>
#include <script/script_metrics.h>
#include <shutdown.h>
#include <sync.h>
#include <txdb.h>
#include <txmempool.h> // For CTxMemPool::cs
#include <uint256.h>
#include <util/check.h>
#include <util/fs.h>
#include <util/result.h>
#include <util/translation.h>
#include <atomic>
#include <cstdint>
#include <map>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <thread>
#include <utility>
#include <vector>
class BlockPolicyValidationState;
class CChainParams;
class Chainstate;
class ChainstateManager;
class CScriptCheck;
class CTxMemPool;
class CTxUndo;
class DisconnectedBlockTransactions;
struct ChainTxData;
struct FlatFilePos;
struct PrecomputedTransactionData;
struct LockPoints;
struct AssumeutxoData;
namespace node {
class SnapshotMetadata;
} // namespace node
namespace Consensus {
struct Params;
} // namespace Consensus
namespace avalanche {
class Processor;
} // namespace avalanche
#define MIN_TRANSACTION_SIZE \
(::GetSerializeSize(CTransaction(), PROTOCOL_VERSION))
/** Maximum number of dedicated script-checking threads allowed */
static const int MAX_SCRIPTCHECK_THREADS = 15;
/** -par default (number of script-checking threads, 0 = auto) */
static const int DEFAULT_SCRIPTCHECK_THREADS = 0;
static const bool DEFAULT_PEERBLOOMFILTERS = true;
/** Default for -stopatheight */
static const int DEFAULT_STOPATHEIGHT = 0;
/**
* Block files containing a block-height within MIN_BLOCKS_TO_KEEP of
* ActiveChain().Tip() will not be pruned.
*/
static const unsigned int MIN_BLOCKS_TO_KEEP = 288;
static const signed int DEFAULT_CHECKBLOCKS = 6;
static constexpr int DEFAULT_CHECKLEVEL{3};
/**
* Require that user allocate at least 550 MiB for block & undo files
* (blk???.dat and rev???.dat)
* At 1MB per block, 288 blocks = 288MB.
* Add 15% for Undo data = 331MB
* Add 20% for Orphan block rate = 397MB
* We want the low water mark after pruning to be at least 397 MB and since we
* prune in full block file chunks, we need the high water mark which triggers
* the prune to be one 128MB block file + added 15% undo data = 147MB greater
* for a total of 545MB
* Setting the target to >= 550 MiB will make it likely we can respect the
* target.
*/
static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES = 550 * 1024 * 1024;
/** Current sync state passed to tip changed callbacks. */
enum class SynchronizationState { INIT_REINDEX, INIT_DOWNLOAD, POST_INIT };
extern GlobalMutex g_best_block_mutex;
extern std::condition_variable g_best_block_cv;
/** Used to notify getblocktemplate RPC of new tips. */
extern const CBlockIndex *g_best_block;
/** Documentation for argument 'checklevel'. */
extern const std::vector<std::string> CHECKLEVEL_DOC;
class BlockValidationOptions {
private:
uint64_t excessiveBlockSize;
bool checkPoW : 1;
bool checkMerkleRoot : 1;
public:
// Do full validation by default
explicit BlockValidationOptions(const Config &config);
explicit BlockValidationOptions(uint64_t _excessiveBlockSize,
bool _checkPow = true,
bool _checkMerkleRoot = true)
: excessiveBlockSize(_excessiveBlockSize), checkPoW(_checkPow),
checkMerkleRoot(_checkMerkleRoot) {}
BlockValidationOptions withCheckPoW(bool _checkPoW = true) const {
BlockValidationOptions ret = *this;
ret.checkPoW = _checkPoW;
return ret;
}
BlockValidationOptions
withCheckMerkleRoot(bool _checkMerkleRoot = true) const {
BlockValidationOptions ret = *this;
ret.checkMerkleRoot = _checkMerkleRoot;
return ret;
}
bool shouldValidatePoW() const { return checkPoW; }
bool shouldValidateMerkleRoot() const { return checkMerkleRoot; }
uint64_t getExcessiveBlockSize() const { return excessiveBlockSize; }
};
/**
* Run instances of script checking worker threads
*/
void StartScriptCheckWorkerThreads(int threads_num);
/**
* Stop all of the script checking worker threads
*/
void StopScriptCheckWorkerThreads();
Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams);
bool AbortNode(BlockValidationState &state, const std::string &strMessage,
const bilingual_str &userMessage = bilingual_str{});
/**
* Guess verification progress (as a fraction between 0.0=genesis and
* 1.0=current tip).
*/
double GuessVerificationProgress(const ChainTxData &data,
const CBlockIndex *pindex);
/** Prune block files up to a given height */
void PruneBlockFilesManual(Chainstate &active_chainstate,
int nManualPruneHeight);
// clang-format off
/**
* Validation result for a transaction evaluated by MemPoolAccept (single or
* package).
* Here are the expected fields and properties of a result depending on its
* ResultType, applicable to results returned from package evaluation:
*+--------------------------+-----------+-------------------------------------------+---------------+
*| Field or property | VALID | INVALID | MEMPOOL_ENTRY |
*| | |-------------------------------------------| |
*| | | TX_PACKAGE_RECONSIDERABLE | Other | |
*+--------------------------+-----------+---------------------------+---------------+---------------+
*| txid in mempool? | yes | no | no* | yes |
*| m_state | IsValid() | IsInvalid() | IsInvalid() | IsValid() |
*| m_replaced_transactions | yes | no | no | no |
*| m_vsize | yes | no | no | yes |
*| m_base_fees | yes | no | no | yes |
*| m_effective_feerate | yes | yes | no | no |
*| m_txids_fee_calculations | yes | yes | no | no |
*+--------------------------+-----------+---------------------------+---------------+---------------+
* (*) Individual transaction acceptance doesn't return MEMPOOL_ENTRY. It
* returns INVALID, with the error txn-already-in-mempool. In this case, the
- * txid may be in the mempool for a TX_CONFLICT.
+ * txid may be in the mempool for a tx conflict (TX_AVALANCHE_RECONSIDERABLE).
*/
// clang-format on
struct MempoolAcceptResult {
/** Used to indicate the results of mempool validation. */
enum class ResultType {
//! Fully validated, valid.
VALID,
//! Invalid.
INVALID,
//! Valid, transaction was already in the mempool.
MEMPOOL_ENTRY,
};
/** Result type. Present in all MempoolAcceptResults. */
const ResultType m_result_type;
/** Contains information about why the transaction failed. */
const TxValidationState m_state;
/**
* Virtual size as used by the mempool, calculated using serialized size
* and sigchecks.
*/
const std::optional<int64_t> m_vsize;
/** Raw base fees in satoshis. */
const std::optional<Amount> m_base_fees;
/**
* The feerate at which this transaction was considered. This includes any
* fee delta added using prioritisetransaction (i.e. modified fees). If this
* transaction was submitted as a package, this is the package feerate,
* which may also include its descendants and/or ancestors
* (see m_txids_fee_calculations below).
*/
const std::optional<CFeeRate> m_effective_feerate;
/**
* Contains the txids of the transactions used for fee-related checks.
* Includes this transaction's txid and may include others if this
* transaction was validated as part of a package. This is not necessarily
* equivalent to the list of transactions passed to ProcessNewPackage().
*/
const std::optional<std::vector<TxId>> m_txids_fee_calculations;
static MempoolAcceptResult Failure(TxValidationState state) {
return MempoolAcceptResult(state);
}
static MempoolAcceptResult
FeeFailure(TxValidationState state, CFeeRate effective_feerate,
const std::vector<TxId> &txids_fee_calculations) {
return MempoolAcceptResult(state, effective_feerate,
txids_fee_calculations);
}
/** Constructor for success case */
static MempoolAcceptResult
Success(int64_t vsize, Amount fees, CFeeRate effective_feerate,
const std::vector<TxId> &txids_fee_calculations) {
return MempoolAcceptResult(ResultType::VALID, vsize, fees,
effective_feerate, txids_fee_calculations);
}
/**
* Constructor for already-in-mempool case. It wouldn't replace any
* transactions.
*/
static MempoolAcceptResult MempoolTx(int64_t vsize, Amount fees) {
return MempoolAcceptResult(vsize, fees);
}
// Private constructors. Use static methods MempoolAcceptResult::Success,
// etc. to construct.
private:
/** Constructor for failure case */
explicit MempoolAcceptResult(TxValidationState state)
: m_result_type(ResultType::INVALID), m_state(state),
m_base_fees(std::nullopt) {
// Can be invalid or error
Assume(!state.IsValid());
}
/** Generic constructor for success cases */
explicit MempoolAcceptResult(
ResultType result_type, int64_t vsize, Amount fees,
CFeeRate effective_feerate,
const std::vector<TxId> &txids_fee_calculations)
: m_result_type(result_type), m_vsize{vsize}, m_base_fees(fees),
m_effective_feerate(effective_feerate),
m_txids_fee_calculations(txids_fee_calculations) {}
/** Constructor for fee-related failure case */
explicit MempoolAcceptResult(
TxValidationState state, CFeeRate effective_feerate,
const std::vector<TxId> &txids_fee_calculations)
: m_result_type(ResultType::INVALID), m_state(state),
m_effective_feerate(effective_feerate),
m_txids_fee_calculations(txids_fee_calculations) {}
/** Constructor for already-in-mempool case. */
explicit MempoolAcceptResult(int64_t vsize, Amount fees)
: m_result_type(ResultType::MEMPOOL_ENTRY), m_vsize{vsize},
m_base_fees(fees) {}
};
/**
* Validation result for package mempool acceptance.
*/
struct PackageMempoolAcceptResult {
PackageValidationState m_state;
/**
* Map from txid to finished MempoolAcceptResults. The client is
* responsible for keeping track of the transaction objects themselves.
* If a result is not present, it means validation was unfinished for that
* transaction. If there was a package-wide error (see result in m_state),
* m_tx_results will be empty.
*/
std::map<TxId, MempoolAcceptResult> m_tx_results;
explicit PackageMempoolAcceptResult(
PackageValidationState state,
std::map<TxId, MempoolAcceptResult> &&results)
: m_state{state}, m_tx_results(std::move(results)) {}
/**
* Constructor to create a PackageMempoolAcceptResult from a
* MempoolAcceptResult
*/
explicit PackageMempoolAcceptResult(const TxId &txid,
const MempoolAcceptResult &result)
: m_tx_results{{txid, result}} {}
};
/**
* Try to add a transaction to the mempool. This is an internal function and is
* exposed only for testing. Client code should use
* ChainstateManager::ProcessTransaction()
*
* @param[in] active_chainstate Reference to the active chainstate.
* @param[in] tx The transaction to submit for mempool
* acceptance.
* @param[in] accept_time The timestamp for adding the transaction to
* the mempool.
* It is also used to determine when the entry
* expires.
* @param[in] bypass_limits When true, don't enforce mempool fee and
* capacity limits.
* @param[in] test_accept When true, run validation checks but don't
* submit to mempool.
* @param[in] heightOverride Override the block height of the transaction.
* Used only upon reorg.
*
* @returns a MempoolAcceptResult indicating whether the transaction was
* accepted/rejected with reason.
*/
MempoolAcceptResult
AcceptToMemoryPool(Chainstate &active_chainstate, const CTransactionRef &tx,
int64_t accept_time, bool bypass_limits,
bool test_accept = false, unsigned int heightOverride = 0)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Validate (and maybe submit) a package to the mempool.
* See doc/policy/packages.md for full detailson package validation rules.
*
* @param[in] test_accept When true, run validation checks but don't
* submit to mempool.
* @returns a PackageMempoolAcceptResult which includes a MempoolAcceptResult
* for each transaction. If a transaction fails, validation will exit early
* and some results may be missing. It is also possible for the package to
* be partially submitted.
*/
PackageMempoolAcceptResult
ProcessNewPackage(Chainstate &active_chainstate, CTxMemPool &pool,
const Package &txns, bool test_accept)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Simple class for regulating resource usage during CheckInputScripts (and
* CScriptCheck), atomic so as to be compatible with parallel validation.
*/
class CheckInputsLimiter {
protected:
std::atomic<int64_t> remaining;
public:
explicit CheckInputsLimiter(int64_t limit) : remaining(limit) {}
bool consume_and_check(int consumed) {
auto newvalue = (remaining -= consumed);
return newvalue >= 0;
}
bool check() { return remaining >= 0; }
};
class TxSigCheckLimiter : public CheckInputsLimiter {
public:
TxSigCheckLimiter() : CheckInputsLimiter(MAX_TX_SIGCHECKS) {}
// Let's make this bad boy copiable.
TxSigCheckLimiter(const TxSigCheckLimiter &rhs)
: CheckInputsLimiter(rhs.remaining.load()) {}
TxSigCheckLimiter &operator=(const TxSigCheckLimiter &rhs) {
remaining = rhs.remaining.load();
return *this;
}
static TxSigCheckLimiter getDisabled() {
TxSigCheckLimiter txLimiter;
// Historically, there has not been a transaction with more than 20k sig
// checks on testnet or mainnet, so this effectively disable sigchecks.
txLimiter.remaining = 20000;
return txLimiter;
}
};
/**
* Check whether all of this transaction's input scripts succeed.
*
* This involves ECDSA signature checks so can be computationally intensive.
* This function should only be called after the cheap sanity checks in
* CheckTxInputs passed.
*
* If pvChecks is not nullptr, script checks are pushed onto it instead of being
* performed inline. Any script checks which are not necessary (eg due to script
* execution cache hits) are, obviously, not pushed onto pvChecks/run.
*
* Upon success nSigChecksOut will be filled in with either:
* - correct total for all inputs, or,
* - 0, in the case when checks were pushed onto pvChecks (i.e., a cache miss
* with pvChecks non-null), in which case the total can be found by executing
* pvChecks and adding the results.
*
* Setting sigCacheStore/scriptCacheStore to false will remove elements from the
* corresponding cache which are matched. This is useful for checking blocks
* where we will likely never need the cache entry again.
*
* pLimitSigChecks can be passed to limit the sigchecks count either in parallel
* or serial validation. With pvChecks null (serial validation), breaking the
* pLimitSigChecks limit will abort evaluation early and return false. With
* pvChecks not-null (parallel validation): the cached nSigChecks may itself
* break the limit in which case false is returned, OR, each entry in the
* returned pvChecks must be executed exactly once in order to probe the limit
* accurately.
*/
bool CheckInputScripts(const CTransaction &tx, TxValidationState &state,
const CCoinsViewCache &view, const uint32_t flags,
bool sigCacheStore, bool scriptCacheStore,
const PrecomputedTransactionData &txdata,
int &nSigChecksOut, TxSigCheckLimiter &txLimitSigChecks,
CheckInputsLimiter *pBlockLimitSigChecks,
std::vector<CScriptCheck> *pvChecks)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Handy shortcut to full fledged CheckInputScripts call.
*/
static inline bool
CheckInputScripts(const CTransaction &tx, TxValidationState &state,
const CCoinsViewCache &view, const uint32_t flags,
bool sigCacheStore, bool scriptCacheStore,
const PrecomputedTransactionData &txdata, int &nSigChecksOut)
EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
TxSigCheckLimiter nSigChecksTxLimiter;
return CheckInputScripts(tx, state, view, flags, sigCacheStore,
scriptCacheStore, txdata, nSigChecksOut,
nSigChecksTxLimiter, nullptr, nullptr);
}
/**
* Mark all the coins corresponding to a given transaction inputs as spent.
*/
void SpendCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
int nHeight);
/**
* Apply the effects of this transaction on the UTXO set represented by view.
*/
void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
int nHeight);
/**
* Calculate LockPoints required to check if transaction will be BIP68 final in
* the next block to be created on top of tip.
*
* @param[in] tip Chain tip for which tx sequence locks are
* calculated. For example, the tip of the current active chain.
* @param[in] coins_view Any CCoinsView that provides access to the
* relevant coins for checking sequence locks. For example, it can be a
* CCoinsViewCache that isn't connected to anything but contains all the
* relevant coins, or a CCoinsViewMemPool that is connected to the mempool
* and chainstate UTXO set. In the latter case, the caller is responsible
* for holding the appropriate locks to ensure that calls to GetCoin()
* return correct coins.
* @param[in] tx The transaction being evaluated.
*
* @returns The resulting height and time calculated and the hash of the block
* needed for calculation, or std::nullopt if there is an error.
*/
std::optional<LockPoints> CalculateLockPointsAtTip(CBlockIndex *tip,
const CCoinsView &coins_view,
const CTransaction &tx);
/**
* Check if transaction will be BIP68 final in the next block to be created on
* top of tip.
* @param[in] tip Chain tip to check tx sequence locks against.
* For example, the tip of the current active chain.
* @param[in] lock_points LockPoints containing the height and time at
* which this transaction is final.
* Simulates calling SequenceLocks() with data from the tip passed in.
*/
bool CheckSequenceLocksAtTip(CBlockIndex *tip, const LockPoints &lock_points);
/**
* Closure representing one script verification.
* Note that this stores references to the spending transaction.
*
* Note that if pLimitSigChecks is passed, then failure does not imply that
* scripts have failed.
*/
class CScriptCheck {
private:
CTxOut m_tx_out;
const CTransaction *ptxTo;
unsigned int nIn;
uint32_t nFlags;
bool cacheStore;
ScriptError error{ScriptError::UNKNOWN};
ScriptExecutionMetrics metrics;
PrecomputedTransactionData txdata;
TxSigCheckLimiter *pTxLimitSigChecks;
CheckInputsLimiter *pBlockLimitSigChecks;
public:
CScriptCheck(const CTxOut &outIn, const CTransaction &txToIn,
unsigned int nInIn, uint32_t nFlagsIn, bool cacheIn,
const PrecomputedTransactionData &txdataIn,
TxSigCheckLimiter *pTxLimitSigChecksIn = nullptr,
CheckInputsLimiter *pBlockLimitSigChecksIn = nullptr)
: m_tx_out(outIn), ptxTo(&txToIn), nIn(nInIn), nFlags(nFlagsIn),
cacheStore(cacheIn), txdata(txdataIn),
pTxLimitSigChecks(pTxLimitSigChecksIn),
pBlockLimitSigChecks(pBlockLimitSigChecksIn) {}
CScriptCheck(const CScriptCheck &) = delete;
CScriptCheck &operator=(const CScriptCheck &) = delete;
CScriptCheck(CScriptCheck &&) = default;
CScriptCheck &operator=(CScriptCheck &&) = default;
bool operator()();
ScriptError GetScriptError() const { return error; }
ScriptExecutionMetrics GetScriptExecutionMetrics() const { return metrics; }
};
/** Functions for validating blocks and updating the block tree */
/**
* Context-independent validity checks.
*
* Returns true if the provided block is valid (has valid header,
* transactions are valid, block is a valid size, etc.)
*/
bool CheckBlock(const CBlock &block, BlockValidationState &state,
const Consensus::Params ¶ms,
BlockValidationOptions validationOptions);
/**
* This is a variant of ContextualCheckTransaction which computes the contextual
* check for a transaction based on the chain tip.
*
* See consensus/consensus.h for flag definitions.
*/
bool ContextualCheckTransactionForCurrentBlock(
const CBlockIndex &active_chain_tip, const Consensus::Params ¶ms,
const CTransaction &tx, TxValidationState &state)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
/**
* Check a block is completely valid from start to finish (only works on top of
* our current best block)
*/
bool TestBlockValidity(
BlockValidationState &state, const CChainParams ¶ms,
Chainstate &chainstate, const CBlock &block, CBlockIndex *pindexPrev,
const std::function<NodeClock::time_point()> &adjusted_time_callback,
BlockValidationOptions validationOptions) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Check with the proof of work on each blockheader matches the value in nBits
*/
bool HasValidProofOfWork(const std::vector<CBlockHeader> &headers,
const Consensus::Params &consensusParams);
/** Return the sum of the work on a given set of headers */
arith_uint256 CalculateHeadersWork(const std::vector<CBlockHeader> &headers);
enum class VerifyDBResult {
SUCCESS,
CORRUPTED_BLOCK_DB,
INTERRUPTED,
SKIPPED_L3_CHECKS,
SKIPPED_MISSING_BLOCKS,
};
/**
* RAII wrapper for VerifyDB: Verify consistency of the block and coin
* databases.
*/
class CVerifyDB {
private:
kernel::Notifications &m_notifications;
public:
CVerifyDB();
public:
explicit CVerifyDB(kernel::Notifications ¬ifications);
~CVerifyDB();
[[nodiscard]] VerifyDBResult VerifyDB(Chainstate &chainstate,
CCoinsView &coinsview,
int nCheckLevel, int nCheckDepth)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
};
/** @see Chainstate::FlushStateToDisk */
enum class FlushStateMode { NONE, IF_NEEDED, PERIODIC, ALWAYS };
/**
* A convenience class for constructing the CCoinsView* hierarchy used
* to facilitate access to the UTXO set.
*
* This class consists of an arrangement of layered CCoinsView objects,
* preferring to store and retrieve coins in memory via `m_cacheview` but
* ultimately falling back on cache misses to the canonical store of UTXOs on
* disk, `m_dbview`.
*/
class CoinsViews {
public:
//! The lowest level of the CoinsViews cache hierarchy sits in a leveldb
//! database on disk. All unspent coins reside in this store.
CCoinsViewDB m_dbview GUARDED_BY(cs_main);
//! This view wraps access to the leveldb instance and handles read errors
//! gracefully.
CCoinsViewErrorCatcher m_catcherview GUARDED_BY(cs_main);
//! This is the top layer of the cache hierarchy - it keeps as many coins in
//! memory as can fit per the dbcache setting.
std::unique_ptr<CCoinsViewCache> m_cacheview GUARDED_BY(cs_main);
//! This constructor initializes CCoinsViewDB and CCoinsViewErrorCatcher
//! instances, but it *does not* create a CCoinsViewCache instance by
//! default. This is done separately because the presence of the cache has
//! implications on whether or not we're allowed to flush the cache's state
//! to disk, which should not be done until the health of the database is
//! verified.
//!
//! All arguments forwarded onto CCoinsViewDB.
CoinsViews(DBParams db_params, CoinsViewOptions options);
//! Initialize the CCoinsViewCache member.
void InitCache() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
};
enum class CoinsCacheSizeState {
//! The coins cache is in immediate need of a flush.
CRITICAL = 2,
//! The cache is at >= 90% capacity.
LARGE = 1,
OK = 0
};
/**
* Chainstate stores and provides an API to update our local knowledge of the
* current best chain.
*
* Eventually, the API here is targeted at being exposed externally as a
* consumable libconsensus library, so any functions added must only call
* other class member functions, pure functions in other parts of the consensus
* library, callbacks via the validation interface, or read/write-to-disk
* functions (eventually this will also be via callbacks).
*
* Anything that is contingent on the current tip of the chain is stored here,
* whereas block information and metadata independent of the current tip is
* kept in `BlockManager`.
*/
class Chainstate {
protected:
/**
* The ChainState Mutex.
* A lock that must be held when modifying this ChainState.
*/
Mutex m_chainstate_mutex;
/**
* Every received block is assigned a unique and increasing identifier, so
* we know which one to give priority in case of a fork.
* Blocks loaded from disk are assigned id 0, so start the counter at 1.
*/
std::atomic<int32_t> nBlockSequenceId{1};
/** Decreasing counter (used by subsequent preciousblock calls). */
int32_t nBlockReverseSequenceId = -1;
/** chainwork for the last block that preciousblock has been applied to. */
arith_uint256 nLastPreciousChainwork = 0;
/**
* Whether this chainstate is undergoing initial block download.
*
* Mutable because we need to be able to mark IsInitialBlockDownload()
* const, which latches this for caching purposes.
*/
mutable std::atomic<bool> m_cached_finished_ibd{false};
//! Optional mempool that is kept in sync with the chain.
//! Only the active chainstate has a mempool.
CTxMemPool *m_mempool;
//! Manages the UTXO set, which is a reflection of the contents of
//! `m_chain`.
std::unique_ptr<CoinsViews> m_coins_views;
//! This toggle exists for use when doing background validation for UTXO
//! snapshots.
//!
//! In the expected case, it is set once the background validation chain
//! reaches the same height as the base of the snapshot and its UTXO set is
//! found to hash to the expected assumeutxo value. It signals that we
//! should no longer connect blocks to the background chainstate. When set
//! on the background validation chainstate, it signifies that we have fully
//! validated the snapshot chainstate.
//!
//! In the unlikely case that the snapshot chainstate is found to be
//! invalid, this is set to true on the snapshot chainstate.
bool m_disabled GUARDED_BY(::cs_main){false};
mutable Mutex cs_avalancheFinalizedBlockIndex;
/**
* The best block via avalanche voting.
* This block cannot be reorged in any way except by explicit user action.
*/
const CBlockIndex *m_avalancheFinalizedBlockIndex
GUARDED_BY(cs_avalancheFinalizedBlockIndex) = nullptr;
/**
* Filter to prevent parking a block due to block policies more than once.
* After first application of block policies, Avalanche voting will
* determine the final acceptance state. Rare false positives will be
* reconciled by the network and should not have any negative impact.
*/
CRollingBloomFilter m_filterParkingPoliciesApplied =
CRollingBloomFilter{1000, 0.000001};
CBlockIndex const *m_best_fork_tip = nullptr;
CBlockIndex const *m_best_fork_base = nullptr;
public:
//! Reference to a BlockManager instance which itself is shared across all
//! Chainstate instances.
node::BlockManager &m_blockman;
//! The chainstate manager that owns this chainstate. The reference is
//! necessary so that this instance can check whether it is the active
//! chainstate within deeply nested method calls.
ChainstateManager &m_chainman;
explicit Chainstate(
CTxMemPool *mempool, node::BlockManager &blockman,
ChainstateManager &chainman,
std::optional<BlockHash> from_snapshot_blockhash = std::nullopt);
/**
* Initialize the CoinsViews UTXO set database management data structures.
* The in-memory cache is initialized separately.
*
* All parameters forwarded to CoinsViews.
*/
void InitCoinsDB(size_t cache_size_bytes, bool in_memory, bool should_wipe,
std::string leveldb_name = "chainstate");
//! Initialize the in-memory coins cache (to be done after the health of the
//! on-disk database is verified).
void InitCoinsCache(size_t cache_size_bytes)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
//! @returns whether or not the CoinsViews object has been fully initialized
//! and we can
//! safely flush this object to disk.
bool CanFlushToDisk() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
return m_coins_views && m_coins_views->m_cacheview;
}
//! The current chain of blockheaders we consult and build on.
//! @see CChain, CBlockIndex.
CChain m_chain;
/**
* The blockhash which is the base of the snapshot this chainstate was
* created from.
*
* std::nullopt if this chainstate was not created from a snapshot.
*/
const std::optional<BlockHash> m_from_snapshot_blockhash{};
//! Return true if this chainstate relies on blocks that are assumed-valid.
//! In practice this means it was created based on a UTXO snapshot.
bool reliesOnAssumedValid() {
return m_from_snapshot_blockhash.has_value();
}
/**
* The set of all CBlockIndex entries with either BLOCK_VALID_TRANSACTIONS
* (for itself and all ancestors) *or* BLOCK_ASSUMED_VALID (if using
* background chainstates) and as good as our current tip or better.
* Entries may be failed, though, and pruning nodes may be missing the data
* for the block.
*/
std::set<CBlockIndex *, CBlockIndexWorkComparator> setBlockIndexCandidates;
//! @returns A reference to the in-memory cache of the UTXO set.
CCoinsViewCache &CoinsTip() EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
Assert(m_coins_views);
return *Assert(m_coins_views->m_cacheview);
}
//! @returns A reference to the on-disk UTXO set database.
CCoinsViewDB &CoinsDB() EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
return Assert(m_coins_views)->m_dbview;
}
//! @returns A pointer to the mempool.
CTxMemPool *GetMempool() { return m_mempool; }
//! @returns A reference to a wrapped view of the in-memory UTXO set that
//! handles disk read errors gracefully.
CCoinsViewErrorCatcher &CoinsErrorCatcher()
EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
AssertLockHeld(::cs_main);
return Assert(m_coins_views)->m_catcherview;
}
//! Destructs all objects related to accessing the UTXO set.
void ResetCoinsViews() { m_coins_views.reset(); }
//! Does this chainstate have a UTXO set attached?
bool HasCoinsViews() const { return (bool)m_coins_views; }
//! The cache size of the on-disk coins view.
size_t m_coinsdb_cache_size_bytes{0};
//! The cache size of the in-memory coins view.
size_t m_coinstip_cache_size_bytes{0};
//! Resize the CoinsViews caches dynamically and flush state to disk.
//! @returns true unless an error occurred during the flush.
bool ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
/**
* Import blocks from an external file
*
* During reindexing, this function is called for each block file
* (datadir/blocks/blk?????.dat). It reads all blocks contained in the given
* file and attempts to process them (add them to the block index). The
* blocks may be out of order within each file and across files. Often this
* function reads a block but finds that its parent hasn't been read yet, so
* the block can't be processed yet. The function will add an entry to the
* blocks_with_unknown_parent map (which is passed as an argument), so that
* when the block's parent is later read and processed, this function can
* re-read the child block from disk and process it.
*
* Because a block's parent may be in a later file, not just later in the
* same file, the blocks_with_unknown_parent map must be passed in and out
* with each call. It's a multimap, rather than just a map, because multiple
* blocks may have the same parent (when chain splits or stale blocks
* exist). It maps from parent-hash to child-disk-position.
*
* This function can also be used to read blocks from user-specified block
* files using the -loadblock= option. There's no unknown-parent tracking,
* so the last two arguments are omitted.
*
*
* @param[in] fileIn FILE handle to file containing blocks to read
* @param[in] dbp (optional) Disk block position (only for reindex)
* @param[in,out] blocks_with_unknown_parent
* (optional) Map of disk positions for blocks with
* unknown parent, key is parent block hash
* (only used for reindex)
*/
void LoadExternalBlockFile(FILE *fileIn, FlatFilePos *dbp = nullptr,
std::multimap<BlockHash, FlatFilePos>
*blocks_with_unknown_parent = nullptr,
avalanche::Processor *const avalanche = nullptr)
EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex,
!cs_avalancheFinalizedBlockIndex);
/**
* Update the on-disk chain state.
* The caches and indexes are flushed depending on the mode we're called
* with if they're too large, if it's been a while since the last write, or
* always and in all cases if we're in prune mode and are deleting files.
*
* If FlushStateMode::NONE is used, then FlushStateToDisk(...) won't do
* anything besides checking if we need to prune.
*
* @returns true unless a system error occurred
*/
bool FlushStateToDisk(BlockValidationState &state, FlushStateMode mode,
int nManualPruneHeight = 0);
//! Unconditionally flush all changes to disk.
void ForceFlushStateToDisk();
//! Prune blockfiles from the disk if necessary and then flush chainstate
//! changes if we pruned.
void PruneAndFlush();
/**
* Find the best known block, and make it the tip of the block chain. The
* result is either failure or an activated best chain. pblock is either
* nullptr or a pointer to a block that is already loaded (to avoid loading
* it again from disk).
*
* ActivateBestChain is split into steps (see ActivateBestChainStep) so that
* we avoid holding cs_main for an extended period of time; the length of
* this call may be quite long during reindexing or a substantial reorg.
*
* May not be called with cs_main held. May not be called in a
* validationinterface callback.
*
* Note that if this is called while a snapshot chainstate is active, and if
* it is called on a background chainstate whose tip has reached the base
* block of the snapshot, its execution will take *MINUTES* while it hashes
* the background UTXO set to verify the assumeutxo value the snapshot was
* activated with. `cs_main` will be held during this time.
*
* @param[in] skip_checkblockindex (optional)
* If true, skip calling CheckBlockIndex even if -checkblockindex is
* true. If false (default behavior), respect the -checkblockindex arg.
* This is used in tests when we need to skip the checks only
* temporarily, and resume normal behavior later.
* @returns true unless a system error occurred
*/
bool ActivateBestChain(BlockValidationState &state,
std::shared_ptr<const CBlock> pblock = nullptr,
avalanche::Processor *const avalanche = nullptr,
bool skip_checkblockindex = false)
EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex,
!cs_avalancheFinalizedBlockIndex)
LOCKS_EXCLUDED(cs_main);
bool AcceptBlock(const std::shared_ptr<const CBlock> &pblock,
BlockValidationState &state, bool fRequested,
const FlatFilePos *dbp, bool *fNewBlock,
bool min_pow_checked) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
// Block (dis)connection on a given view:
DisconnectResult DisconnectBlock(const CBlock &block,
const CBlockIndex *pindex,
CCoinsViewCache &view)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
bool ConnectBlock(const CBlock &block, BlockValidationState &state,
CBlockIndex *pindex, CCoinsViewCache &view,
BlockValidationOptions options,
Amount *blockFees = nullptr, bool fJustCheck = false)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
// Apply the effects of a block disconnection on the UTXO set.
bool DisconnectTip(BlockValidationState &state,
DisconnectedBlockTransactions *disconnectpool)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_mempool->cs);
// Manual block validity manipulation:
/**
* Mark a block as precious and reorganize.
*
* May not be called in a validationinterface callback.
*/
bool PreciousBlock(BlockValidationState &state, CBlockIndex *pindex,
avalanche::Processor *const avalanche = nullptr)
EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex,
!cs_avalancheFinalizedBlockIndex)
LOCKS_EXCLUDED(cs_main);
/** Mark a block as invalid. */
bool InvalidateBlock(BlockValidationState &state, CBlockIndex *pindex)
LOCKS_EXCLUDED(cs_main)
EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex,
!cs_avalancheFinalizedBlockIndex);
/** Park a block. */
bool ParkBlock(BlockValidationState &state, CBlockIndex *pindex)
LOCKS_EXCLUDED(cs_main)
EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex,
!cs_avalancheFinalizedBlockIndex);
/**
* Mark a block as finalized by avalanche.
*/
bool AvalancheFinalizeBlock(CBlockIndex *pindex,
avalanche::Processor &avalanche)
EXCLUSIVE_LOCKS_REQUIRED(!cs_avalancheFinalizedBlockIndex);
/**
* Clear avalanche finalization.
*/
void ClearAvalancheFinalizedBlock()
EXCLUSIVE_LOCKS_REQUIRED(!cs_avalancheFinalizedBlockIndex);
/**
* Checks if a block is finalized by avalanche voting.
*/
bool IsBlockAvalancheFinalized(const CBlockIndex *pindex) const
EXCLUSIVE_LOCKS_REQUIRED(!cs_avalancheFinalizedBlockIndex);
/** Remove invalidity status from a block and its descendants. */
void ResetBlockFailureFlags(CBlockIndex *pindex)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
template <typename F>
bool UpdateFlagsForBlock(CBlockIndex *pindexBase, CBlockIndex *pindex, F f)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
template <typename F, typename C, typename AC>
void UpdateFlags(CBlockIndex *pindex, CBlockIndex *&pindexReset, F f,
C fChild, AC fAncestorWasChanged)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Remove parked status from a block and its descendants. */
void UnparkBlockAndChildren(CBlockIndex *pindex)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Remove parked status from a block. */
void UnparkBlock(CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Replay blocks that aren't fully applied to the database. */
bool ReplayBlocks();
/**
* Ensures we have a genesis block in the block tree, possibly writing one
* to disk.
*/
bool LoadGenesisBlock();
void PruneBlockIndexCandidates();
void UnloadBlockIndex() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Check whether we are doing an initial block download (synchronizing from
* disk or network)
*/
bool IsInitialBlockDownload() const;
/** Find the last common block of this chain and a locator. */
const CBlockIndex *FindForkInGlobalIndex(const CBlockLocator &locator) const
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Make various assertions about the state of the block index.
*
* By default this only executes fully when using the Regtest chain; see:
* m_options.check_block_index.
*/
void CheckBlockIndex();
/** Load the persisted mempool from disk */
void
LoadMempool(const fs::path &load_path,
fsbridge::FopenFn mockable_fopen_function = fsbridge::fopen);
/** Update the chain tip based on database information, i.e. CoinsTip()'s
* best block. */
bool LoadChainTip() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
//! Dictates whether we need to flush the cache to disk or not.
//!
//! @return the state of the size of the coins cache.
CoinsCacheSizeState GetCoinsCacheSizeState()
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
CoinsCacheSizeState
GetCoinsCacheSizeState(size_t max_coins_cache_size_bytes,
size_t max_mempool_size_bytes)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
std::string ToString() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
//! Indirection necessary to make lock annotations work with an optional
//! mempool.
RecursiveMutex *MempoolMutex() const LOCK_RETURNED(m_mempool->cs) {
return m_mempool ? &m_mempool->cs : nullptr;
}
private:
bool ActivateBestChainStep(
BlockValidationState &state, CBlockIndex *pindexMostWork,
const std::shared_ptr<const CBlock> &pblock, bool &fInvalidFound,
const avalanche::Processor *const avalanche = nullptr)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_mempool->cs,
!cs_avalancheFinalizedBlockIndex);
bool ConnectTip(BlockValidationState &state,
BlockPolicyValidationState &blockPolicyState,
CBlockIndex *pindexNew,
const std::shared_ptr<const CBlock> &pblock,
DisconnectedBlockTransactions &disconnectpool,
const avalanche::Processor *const avalanche = nullptr)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_mempool->cs,
!cs_avalancheFinalizedBlockIndex);
void InvalidBlockFound(CBlockIndex *pindex,
const BlockValidationState &state)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, !cs_avalancheFinalizedBlockIndex);
CBlockIndex *
FindMostWorkChain(std::vector<const CBlockIndex *> &blocksToReconcile,
bool fAutoUnpark)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, !cs_avalancheFinalizedBlockIndex);
void ReceivedBlockTransactions(const CBlock &block, CBlockIndex *pindexNew,
const FlatFilePos &pos)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool RollforwardBlock(const CBlockIndex *pindex, CCoinsViewCache &inputs)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
void UnparkBlockImpl(CBlockIndex *pindex, bool fClearChildren)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool UnwindBlock(BlockValidationState &state, CBlockIndex *pindex,
bool invalidate)
EXCLUSIVE_LOCKS_REQUIRED(m_chainstate_mutex,
!cs_avalancheFinalizedBlockIndex);
void CheckForkWarningConditions() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
void CheckForkWarningConditionsOnNewFork(CBlockIndex *pindexNewForkTip)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
void InvalidChainFound(CBlockIndex *pindexNew)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, !cs_avalancheFinalizedBlockIndex);
const CBlockIndex *FindBlockToFinalize(CBlockIndex *pindexNew)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Check warning conditions and do some notifications on new chain tip set.
*/
void UpdateTip(const CBlockIndex *pindexNew)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
std::chrono::microseconds m_last_write{0};
std::chrono::microseconds m_last_flush{0};
/**
* In case of an invalid snapshot, rename the coins leveldb directory so
* that it can be examined for issue diagnosis.
*/
[[nodiscard]] util::Result<void> InvalidateCoinsDBOnDisk()
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
friend ChainstateManager;
};
enum class SnapshotCompletionResult {
SUCCESS,
SKIPPED,
// Expected assumeutxo configuration data is not found for the height of the
// base block.
MISSING_CHAINPARAMS,
// Failed to generate UTXO statistics (to check UTXO set hash) for the
// background chainstate.
STATS_FAILED,
// The UTXO set hash of the background validation chainstate does not match
// the one expected by assumeutxo chainparams.
HASH_MISMATCH,
// The blockhash of the current tip of the background validation chainstate
// does not match the one expected by the snapshot chainstate.
BASE_BLOCKHASH_MISMATCH,
};
/**
* Provides an interface for creating and interacting with one or two
* chainstates: an IBD chainstate generated by downloading blocks, and
* an optional snapshot chainstate loaded from a UTXO snapshot. Managed
* chainstates can be maintained at different heights simultaneously.
*
* This class provides abstractions that allow the retrieval of the current
* most-work chainstate ("Active") as well as chainstates which may be in
* background use to validate UTXO snapshots.
*
* Definitions:
*
* *IBD chainstate*: a chainstate whose current state has been "fully"
* validated by the initial block download process.
*
* *Snapshot chainstate*: a chainstate populated by loading in an
* assumeutxo UTXO snapshot.
*
* *Active chainstate*: the chainstate containing the current most-work
* chain. Consulted by most parts of the system (net_processing,
* wallet) as a reflection of the current chain and UTXO set.
* This may either be an IBD chainstate or a snapshot chainstate.
*
* *Background IBD chainstate*: an IBD chainstate for which the
* IBD process is happening in the background while use of the
* active (snapshot) chainstate allows the rest of the system to function.
*/
class ChainstateManager {
private:
//! The chainstate used under normal operation (i.e. "regular" IBD) or, if
//! a snapshot is in use, for background validation.
//!
//! Its contents (including on-disk data) will be deleted *upon shutdown*
//! after background validation of the snapshot has completed. We do not
//! free the chainstate contents immediately after it finishes validation
//! to cautiously avoid a case where some other part of the system is still
//! using this pointer (e.g. net_processing).
//!
//! Once this pointer is set to a corresponding chainstate, it will not
//! be reset until init.cpp:Shutdown().
//!
//! This is especially important when, e.g., calling ActivateBestChain()
//! on all chainstates because we are not able to hold ::cs_main going into
//! that call.
std::unique_ptr<Chainstate> m_ibd_chainstate GUARDED_BY(::cs_main);
//! A chainstate initialized on the basis of a UTXO snapshot. If this is
//! non-null, it is always our active chainstate.
//!
//! Once this pointer is set to a corresponding chainstate, it will not
//! be reset until init.cpp:Shutdown().
//!
//! This is especially important when, e.g., calling ActivateBestChain()
//! on all chainstates because we are not able to hold ::cs_main going into
//! that call.
std::unique_ptr<Chainstate> m_snapshot_chainstate GUARDED_BY(::cs_main);
//! Points to either the ibd or snapshot chainstate; indicates our
//! most-work chain.
//!
//! Once this pointer is set to a corresponding chainstate, it will not
//! be reset until init.cpp:Shutdown().
//!
//! This is especially important when, e.g., calling ActivateBestChain()
//! on all chainstates because we are not able to hold ::cs_main going into
//! that call.
Chainstate *m_active_chainstate GUARDED_BY(::cs_main){nullptr};
CBlockIndex *m_best_invalid GUARDED_BY(::cs_main){nullptr};
CBlockIndex *m_best_parked GUARDED_BY(::cs_main){nullptr};
//! Internal helper for ActivateSnapshot().
[[nodiscard]] bool
PopulateAndValidateSnapshot(Chainstate &snapshot_chainstate,
AutoFile &coins_file,
const node::SnapshotMetadata &metadata);
/**
* If a block header hasn't already been seen, call CheckBlockHeader on it,
* ensure that it doesn't descend from an invalid block, and then add it to
* m_block_index.
* Caller must set min_pow_checked=true in order to add a new header to the
* block index (permanent memory storage), indicating that the header is
* known to be part of a sufficiently high-work chain (anti-dos check).
*/
bool AcceptBlockHeader(
const CBlockHeader &block, BlockValidationState &state,
CBlockIndex **ppindex, bool min_pow_checked,
const std::optional<CCheckpointData> &test_checkpoints = std::nullopt)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
friend Chainstate;
//! Returns nullptr if no snapshot has been loaded.
const CBlockIndex *GetSnapshotBaseBlock() const
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
//! Return the height of the base block of the snapshot in use, if one
//! exists, else nullopt.
std::optional<int> GetSnapshotBaseHeight() const
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
//! Return true if a chainstate is considered usable.
//!
//! This is false when a background validation chainstate has completed its
//! validation of an assumed-valid chainstate, or when a snapshot
//! chainstate has been found to be invalid.
bool IsUsable(const Chainstate *const pchainstate) const
EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
return pchainstate && !pchainstate->m_disabled;
}
/** Most recent headers presync progress update, for rate-limiting. */
SteadyMilliseconds m_last_presync_update GUARDED_BY(::cs_main){};
public:
using Options = kernel::ChainstateManagerOpts;
explicit ChainstateManager(Options options,
node::BlockManager::Options blockman_options);
const Config &GetConfig() const { return m_options.config; }
const CChainParams &GetParams() const {
return m_options.config.GetChainParams();
}
const Consensus::Params &GetConsensus() const {
return m_options.config.GetChainParams().GetConsensus();
}
bool ShouldCheckBlockIndex() const {
return *Assert(m_options.check_block_index);
}
const arith_uint256 &MinimumChainWork() const {
return *Assert(m_options.minimum_chain_work);
}
const BlockHash &AssumedValidBlock() const {
return *Assert(m_options.assumed_valid_block);
}
kernel::Notifications &GetNotifications() const {
return m_options.notifications;
};
/**
* Alias for ::cs_main.
* Should be used in new code to make it easier to make ::cs_main a member
* of this class.
* Generally, methods of this class should be annotated to require this
* mutex. This will make calling code more verbose, but also help to:
* - Clarify that the method will acquire a mutex that heavily affects
* overall performance.
* - Force call sites to think how long they need to acquire the mutex to
* get consistent results.
*/
RecursiveMutex &GetMutex() const LOCK_RETURNED(::cs_main) {
return ::cs_main;
}
const Options m_options;
std::thread m_load_block;
//! A single BlockManager instance is shared across each constructed
//! chainstate to avoid duplicating block metadata.
node::BlockManager m_blockman;
/**
* In order to efficiently track invalidity of headers, we keep the set of
* blocks which we tried to connect and found to be invalid here (ie which
* were set to BLOCK_FAILED_VALID since the last restart). We can then
* walk this set and check if a new header is a descendant of something in
* this set, preventing us from having to walk m_block_index when we try
* to connect a bad block and fail.
*
* While this is more complicated than marking everything which descends
* from an invalid block as invalid at the time we discover it to be
* invalid, doing so would require walking all of m_block_index to find all
* descendants. Since this case should be very rare, keeping track of all
* BLOCK_FAILED_VALID blocks in a set should be just fine and work just as
* well.
*
* Because we already walk m_block_index in height-order at startup, we go
* ahead and mark descendants of invalid blocks as FAILED_CHILD at that
* time, instead of putting things in this set.
*/
std::set<CBlockIndex *> m_failed_blocks;
/**
* Best header we've seen so far (used for getheaders queries' starting
* points).
*/
CBlockIndex *m_best_header GUARDED_BY(::cs_main){nullptr};
//! The total number of bytes available for us to use across all in-memory
//! coins caches. This will be split somehow across chainstates.
int64_t m_total_coinstip_cache{0};
//
//! The total number of bytes available for us to use across all leveldb
//! coins databases. This will be split somehow across chainstates.
int64_t m_total_coinsdb_cache{0};
//! Instantiate a new chainstate.
//!
//! @param[in] mempool The mempool to pass to the chainstate
// constructor
Chainstate &InitializeChainstate(CTxMemPool *mempool)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
//! Get all chainstates currently being used.
std::vector<Chainstate *> GetAll();
//! Construct and activate a Chainstate on the basis of UTXO snapshot data.
//!
//! Steps:
//!
//! - Initialize an unused Chainstate.
//! - Load its `CoinsViews` contents from `coins_file`.
//! - Verify that the hash of the resulting coinsdb matches the expected
//! hash per assumeutxo chain parameters.
//! - Wait for our headers chain to include the base block of the snapshot.
//! - "Fast forward" the tip of the new chainstate to the base of the
//! snapshot, faking nTx* block index data along the way.
//! - Move the new chainstate to `m_snapshot_chainstate` and make it our
//! ActiveChainstate().
[[nodiscard]] bool ActivateSnapshot(AutoFile &coins_file,
const node::SnapshotMetadata &metadata,
bool in_memory);
//! Once the background validation chainstate has reached the height which
//! is the base of the UTXO snapshot in use, compare its coins to ensure
//! they match those expected by the snapshot.
//!
//! If the coins match (expected), then mark the validation chainstate for
//! deletion and continue using the snapshot chainstate as active.
//! Otherwise, revert to using the ibd chainstate and shutdown.
SnapshotCompletionResult MaybeCompleteSnapshotValidation(
std::function<void(bilingual_str)> shutdown_fnc =
[](bilingual_str msg) { AbortNode(msg.original, msg); })
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
//! The most-work chain.
Chainstate &ActiveChainstate() const;
CChain &ActiveChain() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) {
return ActiveChainstate().m_chain;
}
int ActiveHeight() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) {
return ActiveChain().Height();
}
CBlockIndex *ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) {
return ActiveChain().Tip();
}
node::BlockMap &BlockIndex() EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
return m_blockman.m_block_index;
}
//! @returns true if a snapshot-based chainstate is in use. Also implies
//! that a background validation chainstate is also in use.
bool IsSnapshotActive() const;
std::optional<BlockHash> SnapshotBlockhash() const;
//! Is there a snapshot in use and has it been fully validated?
bool IsSnapshotValidated() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
return m_snapshot_chainstate && m_ibd_chainstate &&
m_ibd_chainstate->m_disabled;
}
/**
* Process an incoming block. This only returns after the best known valid
* block is made active. Note that it does not, however, guarantee that the
* specific block passed to it has been checked for validity!
*
* If you want to *possibly* get feedback on whether block is valid, you
* must install a CValidationInterface (see validationinterface.h) - this
* will have its BlockChecked method called whenever *any* block completes
* validation.
*
* Note that we guarantee that either the proof-of-work is valid on block,
* or (and possibly also) BlockChecked will have been called.
*
* May not be called in a validationinterface callback.
*
* @param[in] block The block we want to process.
* @param[in] force_processing Process this block even if unrequested;
* used for non-network block sources.
* @param[in] min_pow_checked True if proof-of-work anti-DoS checks have
* been done by caller for headers chain
* (note: only affects headers acceptance; if
* block header is already present in block
* index then this parameter has no effect)
* @param[out] new_block A boolean which is set to indicate if the block
* was first received via this call.
* @returns If the block was processed, independently of block validity
*/
bool ProcessNewBlock(const std::shared_ptr<const CBlock> &block,
bool force_processing, bool min_pow_checked,
bool *new_block,
avalanche::Processor *const avalanche = nullptr)
LOCKS_EXCLUDED(cs_main);
/**
* Process incoming block headers.
*
* May not be called in a validationinterface callback.
*
* @param[in] block The block headers themselves.
* @param[in] min_pow_checked True if proof-of-work anti-DoS checks have
* been done by caller for headers chain
* @param[out] state This may be set to an Error state if any error
* occurred processing them.
* @param[out] ppindex If set, the pointer will be set to point to the
* last new block index object for the given
* headers.
* @return True if block headers were accepted as valid.
*/
bool ProcessNewBlockHeaders(
const std::vector<CBlockHeader> &block, bool min_pow_checked,
BlockValidationState &state, const CBlockIndex **ppindex = nullptr,
const std::optional<CCheckpointData> &test_checkpoints = std::nullopt)
LOCKS_EXCLUDED(cs_main);
/**
* Try to add a transaction to the memory pool.
*
* @param[in] tx The transaction to submit for mempool
* acceptance.
* @param[in] test_accept When true, run validation checks but don't
* submit to mempool.
*/
[[nodiscard]] MempoolAcceptResult
ProcessTransaction(const CTransactionRef &tx, bool test_accept = false)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
//! Load the block tree and coins database from disk, initializing state if
//! we're running with -reindex
bool LoadBlockIndex() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
//! Check to see if caches are out of balance and if so, call
//! ResizeCoinsCaches() as needed.
void MaybeRebalanceCaches() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
/**
* This is used by net_processing to report pre-synchronization progress of
* headers, as headers are not yet fed to validation during that time, but
* validation is (for now) responsible for logging and signalling through
* NotifyHeaderTip, so it needs this information.
*/
void ReportHeadersPresync(const arith_uint256 &work, int64_t height,
int64_t timestamp);
//! When starting up, search the datadir for a chainstate based on a UTXO
//! snapshot that is in the process of being validated.
bool DetectSnapshotChainstate(CTxMemPool *mempool)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
void ResetChainstates() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
//! Switch the active chainstate to one based on a UTXO snapshot that was
//! loaded previously.
Chainstate &ActivateExistingSnapshot(CTxMemPool *mempool,
BlockHash base_blockhash)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
//! If we have validated a snapshot chain during this runtime, copy its
//! chainstate directory over to the main `chainstate` location, completing
//! validation of the snapshot.
//!
//! If the cleanup succeeds, the caller will need to ensure chainstates are
//! reinitialized, since ResetChainstates() will be called before leveldb
//! directories are moved or deleted.
//!
//! @sa node/chainstate:LoadChainstate()
bool ValidatedSnapshotCleanup() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
};
/** Deployment* info via ChainstateManager */
template <typename DEP>
bool DeploymentActiveAfter(const CBlockIndex *pindexPrev,
const ChainstateManager &chainman, DEP dep) {
return DeploymentActiveAfter(pindexPrev, chainman.GetConsensus(), dep);
}
template <typename DEP>
bool DeploymentActiveAt(const CBlockIndex &index,
const ChainstateManager &chainman, DEP dep) {
return DeploymentActiveAt(index, chainman.GetConsensus(), dep);
}
/**
* Return the expected assumeutxo value for a given height, if one exists.
*
* @param[in] height Get the assumeutxo value for this height.
*
* @returns empty if no assumeutxo configuration exists for the given height.
*/
const AssumeutxoData *ExpectedAssumeutxo(const int height,
const CChainParams ¶ms);
#endif // BITCOIN_VALIDATION_H
diff --git a/test/functional/abc_p2p_avalanche_transaction_voting.py b/test/functional/abc_p2p_avalanche_transaction_voting.py
index 4c0cf889d..d03827867 100644
--- a/test/functional/abc_p2p_avalanche_transaction_voting.py
+++ b/test/functional/abc_p2p_avalanche_transaction_voting.py
@@ -1,313 +1,335 @@
# Copyright (c) 2022 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test avalanche transaction voting."""
import random
from test_framework.avatools import can_find_inv_in_poll, get_ava_p2p_interface
from test_framework.blocktools import (
COINBASE_MATURITY,
create_block,
create_coinbase,
make_conform_to_ctor,
)
from test_framework.key import ECPubKey
from test_framework.messages import (
MSG_TX,
AvalancheTxVoteError,
AvalancheVote,
CTransaction,
FromHex,
)
from test_framework.p2p import P2PDataStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, uint256_hex
from test_framework.wallet import MiniWallet
QUORUM_NODE_COUNT = 16
class AvalancheTransactionVotingTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [
[
"-avalanchepreconsensus=1",
"-avacooldown=0",
"-avaproofstakeutxoconfirmations=1",
# Low enough for coinbase transactions to be staked in valid proofs
"-avaproofstakeutxodustthreshold=1000000",
"-avaminquorumstake=0",
"-avaminavaproofsnodecount=0",
"-whitelist=noban@127.0.0.1",
]
]
def run_test(self):
node = self.nodes[0]
poll_node = get_ava_p2p_interface(self, node)
peer = node.add_p2p_connection(P2PDataStore())
# Create helper to check expected poll responses
avakey = ECPubKey()
avakey.set(bytes.fromhex(node.getavalanchekey()))
def assert_response(expected):
response = poll_node.wait_for_avaresponse()
r = response.response
# Verify signature.
assert avakey.verify_schnorr(response.sig, r.get_hash())
# Verify correct votes list
votes = r.votes
assert_equal(len(votes), len(expected))
for i in range(0, len(votes)):
assert_equal(repr(votes[i]), repr(expected[i]))
# Make some valid txs
num_txs = 5
wallet = MiniWallet(node)
self.generate(wallet, num_txs, sync_fun=self.no_op)
# Mature the coinbases
self.generate(node, COINBASE_MATURITY, sync_fun=self.no_op)
assert_equal(node.getmempoolinfo()["size"], 0)
tx_ids = [
int(wallet.send_self_transfer(from_node=node)["txid"], 16)
for _ in range(num_txs)
]
assert_equal(node.getmempoolinfo()["size"], num_txs)
self.log.info("Check the votes are unknown while the quorum is not established")
poll_node.send_poll(tx_ids, MSG_TX)
assert_response(
[AvalancheVote(AvalancheTxVoteError.UNKNOWN, txid) for txid in tx_ids]
)
self.log.info("Check the votes on valid mempool transactions")
def get_quorum():
return [
get_ava_p2p_interface(self, node) for _ in range(0, QUORUM_NODE_COUNT)
]
quorum = get_quorum()
assert node.getavalancheinfo()["ready_to_poll"]
poll_node.send_poll(tx_ids, MSG_TX)
assert_response(
[AvalancheVote(AvalancheTxVoteError.ACCEPTED, txid) for txid in tx_ids]
)
self.log.info("Check the votes on recently mined transactions")
self.generate(node, 1, sync_fun=self.no_op)
assert_equal(node.getmempoolinfo()["size"], 0)
poll_node.send_poll(tx_ids, MSG_TX)
assert_response(
[AvalancheVote(AvalancheTxVoteError.ACCEPTED, txid) for txid in tx_ids]
)
for _ in range(10):
self.generate(node, 1, sync_fun=self.no_op)
poll_node.send_poll(tx_ids, MSG_TX)
assert_response(
[AvalancheVote(AvalancheTxVoteError.ACCEPTED, txid) for txid in tx_ids]
)
self.log.info("Check the votes on unknown transactions")
tx_ids = [random.randint(0, 2**256) for _ in range(10)]
poll_node.send_poll(tx_ids, MSG_TX)
assert_response(
[AvalancheVote(AvalancheTxVoteError.UNKNOWN, txid) for txid in tx_ids]
)
self.log.info("Check the votes on invalid transactions")
invalid_tx = CTransaction()
invalid_txid = int(invalid_tx.rehash(), 16)
# The node has the NOBAN whitelist flag, so it remains connected
peer.send_txs_and_test(
[invalid_tx], node, success=False, reject_reason="bad-txns-vin-empty"
)
poll_node.send_poll([invalid_txid], MSG_TX)
assert_response([AvalancheVote(AvalancheTxVoteError.INVALID, invalid_txid)])
self.log.info("Check the votes on orphan transactions")
def from_wallet_tx(tx):
tx_obj = FromHex(CTransaction(), tx["hex"])
tx_obj.rehash()
return tx_obj
orphan_tx = wallet.create_self_transfer_chain(chain_length=2)[-1]
orphan_txid = int(orphan_tx["txid"], 16)
peer.send_txs_and_test(
[from_wallet_tx(orphan_tx)],
node,
success=False,
reject_reason="bad-txns-inputs-missingorspent",
)
poll_node.send_poll([orphan_txid], MSG_TX)
assert_response([AvalancheVote(AvalancheTxVoteError.ORPHAN, orphan_txid)])
self.log.info("Check the votes on conflicting transactions")
utxo = wallet.get_utxo()
mempool_tx = wallet.create_self_transfer(utxo_to_spend=utxo)
peer.send_txs_and_test([from_wallet_tx(mempool_tx)], node, success=True)
assert mempool_tx["txid"] in node.getrawmempool()
conflicting_tx = wallet.create_self_transfer(utxo_to_spend=utxo)
conflicting_txid = int(conflicting_tx["txid"], 16)
peer.send_txs_and_test(
[from_wallet_tx(conflicting_tx)],
node,
success=False,
reject_reason="txn-mempool-conflict",
)
poll_node.send_poll([conflicting_txid], MSG_TX)
assert_response(
[AvalancheVote(AvalancheTxVoteError.CONFLICTING, conflicting_txid)]
)
self.log.info("Check the node polls for transactions added to the mempool")
# Let's clean up the non transaction inventories from our avalanche polls
def has_finalized_proof(proofid):
can_find_inv_in_poll(quorum, proofid)
return node.getrawavalancheproof(uint256_hex(proofid))["finalized"]
for q in quorum:
self.wait_until(lambda: has_finalized_proof(q.proof.proofid))
def has_finalized_block(block_hash):
can_find_inv_in_poll(quorum, int(block_hash, 16))
return node.isfinalblock(block_hash)
tip = node.getbestblockhash()
self.wait_until(lambda: has_finalized_block(tip))
# Now we can focus on transactions
def has_finalized_tx(txid):
can_find_inv_in_poll(quorum, int(txid, 16))
return node.isfinaltransaction(txid)
def has_invalidated_tx(txid):
can_find_inv_in_poll(
quorum, int(txid, 16), response=AvalancheTxVoteError.INVALID
)
return txid not in node.getrawmempool()
+ self.wait_until(lambda: has_finalized_tx(mempool_tx["txid"]))
+
+ self.log.info("Check the node rejects txs that conflict with a finalized tx")
+
+ another_conflicting_tx = wallet.create_self_transfer(utxo_to_spend=utxo)
+ another_conflicting_txid = int(another_conflicting_tx["txid"], 16)
+ peer.send_txs_and_test(
+ [from_wallet_tx(another_conflicting_tx)],
+ node,
+ success=False,
+ reject_reason="finalized-tx-conflict",
+ )
+ # This transaction is plain invalid, so it is NOT stored in the
+ # conflicting pool
+ assert_equal(
+ node.gettransactionstatus(another_conflicting_tx["txid"])["pool"], "none"
+ )
+ poll_node.send_poll([another_conflicting_txid], MSG_TX)
+ assert_response(
+ [AvalancheVote(AvalancheTxVoteError.INVALID, another_conflicting_txid)]
+ )
+
self.log.info("Check the node can mine a finalized tx")
txid = wallet.send_self_transfer(from_node=node)["txid"]
assert txid in node.getrawmempool()
assert not node.isfinaltransaction(txid)
self.wait_until(lambda: has_finalized_tx(txid))
assert txid in node.getrawmempool()
tip = self.generate(node, 1)[0]
self.log.info("The transaction remains finalized after it's mined")
assert node.isfinaltransaction(txid, tip)
assert txid not in node.getrawmempool()
self.log.info("The transaction remains finalized even when reorg'ed")
node.parkblock(tip)
assert node.getbestblockhash() != tip
assert node.isfinaltransaction(txid)
assert txid in node.getrawmempool()
node.unparkblock(tip)
assert_equal(node.getbestblockhash(), tip)
assert node.isfinaltransaction(txid, tip)
assert txid not in node.getrawmempool()
self.log.info("The transaction remains finalized after the block is finalized")
self.wait_until(lambda: has_finalized_block(tip))
assert node.isfinaltransaction(txid, tip)
assert txid not in node.getrawmempool()
self.log.info("Check the node drops transactions invalidated by avalanche")
parent_tx = wallet.send_self_transfer(from_node=node)
parent_txid = parent_tx["txid"]
child_tx = wallet.send_self_transfer(
from_node=node, utxo_to_spend=parent_tx["new_utxo"]
)
child_txid = child_tx["txid"]
assert parent_txid in node.getrawmempool()
assert child_txid in node.getrawmempool()
assert not node.isfinaltransaction(parent_txid)
assert not node.isfinaltransaction(child_txid)
self.wait_until(lambda: has_invalidated_tx(parent_txid))
assert parent_txid not in node.getrawmempool()
assert child_txid not in node.getrawmempool()
assert_raises_rpc_error(
-5, "No such transaction", node.isfinaltransaction, parent_txid
)
assert_raises_rpc_error(
-5, "No such transaction", node.isfinaltransaction, child_txid
)
self.log.info(
"The node rejects blocks that contains tx conflicting with a finalized one"
)
utxo = wallet.get_utxo()
txid = wallet.send_self_transfer(from_node=node, utxo_to_spend=utxo)["txid"]
assert txid in node.getrawmempool()
assert not node.isfinaltransaction(txid)
self.wait_until(lambda: has_finalized_tx(txid))
assert txid in node.getrawmempool()
conflicting_block = create_block(
int(node.getbestblockhash(), 16),
create_coinbase(node.getblockcount() - 1),
)
conflicting_tx = wallet.create_self_transfer(utxo_to_spend=utxo)["tx"]
assert conflicting_tx.get_id() != txid
conflicting_block.vtx.append(conflicting_tx)
make_conform_to_ctor(conflicting_block)
conflicting_block.hashMerkleRoot = conflicting_block.calc_merkle_root()
conflicting_block.solve()
peer.send_blocks_and_test(
[conflicting_block],
node,
success=False,
reject_reason="finalized-tx-conflict",
)
# The tx is still finalized as the block is rejected
assert node.isfinaltransaction(txid)
assert txid in node.getrawmempool()
# The block is accepted if we remove the offending transaction
conflicting_block.vtx.remove(conflicting_tx)
conflicting_block.hashMerkleRoot = conflicting_block.calc_merkle_root()
conflicting_block.solve()
peer.send_blocks_and_test(
[conflicting_block],
node,
)
if __name__ == "__main__":
AvalancheTransactionVotingTest().main()
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Wed, May 21, 19:22 (1 d, 3 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5865776
Default Alt Text
(744 KB)
Attached To
rABC Bitcoin ABC
Event Timeline
Log In to Comment