diff --git a/doc/release-notes.md b/doc/release-notes.md
index bda5eee34..99c176698 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -1,16 +1,19 @@
# Bitcoin ABC 0.26.7 Release Notes
Bitcoin ABC version 0.26.7 is now available from:
This release includes the following features and fixes:
- `getblockchaininfo` now returns a new `time` field, that provides the chain
tip time.
- Add a `-daemonwait` option to `bitcoind` to wait for initialization to complete
before putting the process in the background. This allows the user or parent
process to more easily know whether the daemon started successfully by observing
the program’s output or exit code.
- The `savemempool` RPC now returns the path to the saved mempool in the `filename` field.
- Bitcoin ABC now supports User-space, Statically Defined Tracing (USDT).
For now only a few tracepoints are available, see [tracing.md](/doc/tracing.md) for more info.
+ - Avalanche is now enabled by default. It is still possible to disable it by
+ using `-avalanche=0` on the command line, or setting `avalanche=0` in the
+ `bitcoin.conf` file.
diff --git a/src/avalanche/avalanche.h b/src/avalanche/avalanche.h
index 87dd340d3..3baffaf68 100644
--- a/src/avalanche/avalanche.h
+++ b/src/avalanche/avalanche.h
@@ -1,69 +1,69 @@
// Copyright (c) 2021 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_AVALANCHE_AVALANCHE_H
#define BITCOIN_AVALANCHE_AVALANCHE_H
#include
#include
#include
namespace avalanche {
class Processor;
}
class ArgsManager;
/**
* Is avalanche enabled by default.
*/
-static constexpr bool AVALANCHE_DEFAULT_ENABLED = false;
+static constexpr bool AVALANCHE_DEFAULT_ENABLED = true;
/**
* Conflicting proofs cooldown time default value in seconds.
* Minimal delay between two proofs with at least a common UTXO.
*/
static constexpr size_t AVALANCHE_DEFAULT_CONFLICTING_PROOF_COOLDOWN = 60;
/**
* Peer replacement cooldown time default value in seconds.
* Minimal delay before a peer can be replaced due to a conflicting proof.
*/
static constexpr size_t AVALANCHE_DEFAULT_PEER_REPLACEMENT_COOLDOWN =
24 * 60 * 60;
/**
* Avalanche default cooldown in milliseconds.
*/
static constexpr size_t AVALANCHE_DEFAULT_COOLDOWN = 100;
/**
* Default minimum cumulative stake of all known peers that constitutes a usable
* quorum.
*/
static constexpr Amount AVALANCHE_DEFAULT_MIN_QUORUM_STAKE =
int64_t(1'000'000'000'000) * SATOSHI; // 10B XEC
/**
* Default minimum percentage of stake-weighted peers we must have a node for to
* constitute a usable quorum.
*/
static constexpr double AVALANCHE_DEFAULT_MIN_QUORUM_CONNECTED_STAKE_RATIO =
0.8;
/**
* Default minimum number of nodes that sent us an avaproofs message before we
* can consider our quorum suitable for polling.
*/
static constexpr double AVALANCHE_DEFAULT_MIN_AVAPROOFS_NODE_COUNT = 8;
/**
* Global avalanche instance.
*/
extern std::unique_ptr g_avalanche;
bool isAvalancheEnabled(const ArgsManager &argsman);
#endif // BITCOIN_AVALANCHE_AVALANCHE_H
diff --git a/test/functional/abc-invalid-chains.py b/test/functional/abc-invalid-chains.py
index dc8605dac..024227671 100755
--- a/test/functional/abc-invalid-chains.py
+++ b/test/functional/abc-invalid-chains.py
@@ -1,156 +1,159 @@
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-ilncense.php.
import time
from test_framework.blocktools import create_block, create_coinbase
from test_framework.p2p import P2PDataStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class InvalidChainsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.tip = None
self.blocks = {}
self.block_heights = {}
- self.extra_args = [["-whitelist=noban@127.0.0.1"]]
+ self.extra_args = [[
+ "-whitelist=noban@127.0.0.1",
+ "-automaticunparking=1",
+ ]]
def next_block(self, number):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height)
block = create_block(base_block_hash, coinbase, block_time)
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
def set_tip(self, number: int):
"""
Move the tip back to a previous block.
"""
self.tip = self.blocks[number]
def run_test(self):
node = self.nodes[0]
peer = node.add_p2p_connection(P2PDataStore())
self.genesis_hash = int(node.getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
# shorthand for functions
block = self.next_block
# Reference for blocks mined in this test:
#
# 11 21 -- 221 - 222
# / / /
# 0 - 1 - 2 - 22 - 23 - 24 - 25
# \
# -- 12 - 13 - 14
# \
# -- 15 - 16 - 17 - 18
# Generate some valid blocks
peer.send_blocks_and_test([block(0), block(1), block(2)], node)
# Explicitly invalidate blocks 1 and 2
# See below for why we do this
node.invalidateblock(self.blocks[1].hash)
assert_equal(self.blocks[0].hash, node.getbestblockhash())
node.invalidateblock(self.blocks[2].hash)
assert_equal(self.blocks[0].hash, node.getbestblockhash())
# Mining on top of blocks 1 or 2 is rejected
self.set_tip(1)
peer.send_blocks_and_test(
[block(11)], node, success=False, force_send=True, reject_reason='bad-prevblk')
self.set_tip(2)
peer.send_blocks_and_test(
[block(21)], node, success=False, force_send=True, reject_reason='bad-prevblk')
# Reconsider block 2 to remove invalid status from *both* 1 and 2
# The goal is to test that block 1 is not retaining any internal state
# that prevents us from accepting blocks building on top of block 1
node.reconsiderblock(self.blocks[2].hash)
assert_equal(self.blocks[2].hash, node.getbestblockhash())
# Mining on the block 1 chain should be accepted
# (needs to mine two blocks because less-work chains are not processed)
self.set_tip(1)
peer.send_blocks_and_test([block(12), block(13)], node)
# Mining on the block 2 chain should still be accepted
# (needs to mine two blocks because less-work chains are not processed)
self.set_tip(2)
peer.send_blocks_and_test([block(22), block(221)], node)
# Mine more blocks from block 22 to be longest chain
self.set_tip(22)
peer.send_blocks_and_test([block(23), block(24)], node)
# Sanity checks
assert_equal(self.blocks[24].hash, node.getbestblockhash())
assert any(self.blocks[221].hash == chaintip["hash"]
for chaintip in node.getchaintips())
# Invalidating the block 2 chain should reject new blocks on that chain
node.invalidateblock(self.blocks[2].hash)
assert_equal(self.blocks[13].hash, node.getbestblockhash())
# Mining on the block 2 chain should be rejected
self.set_tip(24)
peer.send_blocks_and_test(
[block(25)], node, success=False, force_send=True, reject_reason='bad-prevblk')
# Continued mining on the block 1 chain is still ok
self.set_tip(13)
peer.send_blocks_and_test([block(14)], node)
# Mining on a once-valid chain forking from block 2's longest chain,
# which is now invalid, should also be rejected.
self.set_tip(221)
peer.send_blocks_and_test(
[block(222)], node, success=False, force_send=True, reject_reason='bad-prevblk')
self.log.info(
"Make sure that reconsidering a block behaves correctly when cousin chains (neither ancestors nor descendants) become available as a result")
# Reorg out 14 with four blocks.
self.set_tip(13)
peer.send_blocks_and_test(
[block(15), block(16), block(17), block(18)], node)
# Invalidate 17 (so 18 now has failed parent)
node.invalidateblock(self.blocks[17].hash)
assert_equal(self.blocks[16].hash, node.getbestblockhash())
# Invalidate 13 (so 14 and 15 and 16 now also have failed parent)
node.invalidateblock(self.blocks[13].hash)
assert_equal(self.blocks[12].hash, node.getbestblockhash())
# Reconsider 14, which should reconsider 13 and remove failed parent
# from our cousins 15 and 16 as well. Even though we reconsidered
# 14, we end up on a different chain because 15/16 have more work.
# (But, this shouldn't undo our invalidation of 17)
node.reconsiderblock(self.blocks[14].hash)
assert_equal(self.blocks[16].hash, node.getbestblockhash())
if __name__ == '__main__':
InvalidChainsTest().main()
diff --git a/test/functional/abc-mempool-coherence-on-activations.py b/test/functional/abc-mempool-coherence-on-activations.py
index e38ba8fb2..57976ce45 100755
--- a/test/functional/abc-mempool-coherence-on-activations.py
+++ b/test/functional/abc-mempool-coherence-on-activations.py
@@ -1,373 +1,376 @@
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
This test checks the mempool coherence when changing validation rulesets,
which happens on (de)activations of network upgrades (forks).
We test the mempool coherence in 3 cases:
1) on activations, pre-fork-only transactions are evicted from the mempool,
while always-valid transactions remain.
2) on deactivations, post-fork-only transactions (unconfirmed or once
confirmed) are evicted from the mempool, while always-valid transactions
are reincluded.
3) on a reorg to a chain that deactivates and reactivates the fork,
post-fork-only and always-valid transactions (unconfirmed and/or once
confirmed on the shorter chain) are kept or reincluded in the mempool.
"""
from test_framework.blocktools import (
create_block,
create_coinbase,
create_tx_with_script,
make_conform_to_ctor,
)
from test_framework.key import ECKey
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
ToHex,
)
from test_framework.p2p import P2PDataStore
from test_framework.script import (
OP_CHECKSIG,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_FORKID,
CScript,
SignatureHashForkId,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
# ---Code specific to the activation used for this test---
# It might change depending on the activation code currently existing in the
# client software. We use the replay protection activation for this test.
ACTIVATION_TIME = 2000000000
EXTRA_ARG = "-replayprotectionactivationtime={}".format(ACTIVATION_TIME)
# simulation starts before activation
FIRST_BLOCK_TIME = ACTIVATION_TIME - 86400
# Expected RPC error when trying to send an activation specific spend txn.
RPC_EXPECTED_ERROR = "mandatory-script-verify-flag-failed (Signature must be zero for failed CHECK(MULTI)SIG operation)"
def create_fund_and_activation_specific_spending_tx(spend, pre_fork_only):
# Creates 2 transactions:
# 1) txfund: create outputs to be used by txspend. Must be valid pre-fork.
# 2) txspend: spending transaction that is specific to the activation
# being used and can be pre-fork-only or post-fork-only, depending on the
# function parameter.
# This specific implementation uses the replay protection mechanism to
# create transactions that are only valid before or after the fork.
# Generate a key pair to test
private_key = ECKey()
private_key.generate()
public_key = private_key.get_pubkey().get_bytes()
# Fund transaction
script = CScript([public_key, OP_CHECKSIG])
txfund = create_tx_with_script(
spend.tx, spend.n, b'', amount=50 * COIN, script_pub_key=script)
txfund.rehash()
# Activation specific spending tx
txspend = CTransaction()
txspend.vout.append(CTxOut(50 * COIN - 1000, CScript([OP_TRUE])))
txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b''))
# Sign the transaction
# Use forkvalues that create pre-fork-only or post-fork-only
# transactions.
forkvalue = 0 if pre_fork_only else 0xffdead
sighashtype = (forkvalue << 8) | SIGHASH_ALL | SIGHASH_FORKID
sighash = SignatureHashForkId(
script, txspend, 0, sighashtype, 50 * COIN)
sig = private_key.sign_ecdsa(sighash) + \
bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))
txspend.vin[0].scriptSig = CScript([sig])
txspend.rehash()
return txfund, txspend
def create_fund_and_pre_fork_only_tx(spend):
return create_fund_and_activation_specific_spending_tx(
spend, pre_fork_only=True)
def create_fund_and_post_fork_only_tx(spend):
return create_fund_and_activation_specific_spending_tx(
spend, pre_fork_only=False)
# ---Mempool coherence on activations test---
class PreviousSpendableOutput(object):
def __init__(self, tx=CTransaction(), n=-1):
self.tx = tx
self.n = n
class MempoolCoherenceOnActivationsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.block_heights = {}
self.tip = None
self.blocks = {}
- self.extra_args = [['-whitelist=noban@127.0.0.1',
- EXTRA_ARG,
- '-acceptnonstdtxn=1']]
+ self.extra_args = [[
+ '-whitelist=noban@127.0.0.1',
+ EXTRA_ARG,
+ '-acceptnonstdtxn=1',
+ '-automaticunparking=1',
+ ]]
def next_block(self, number):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = FIRST_BLOCK_TIME
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height)
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
# Do PoW, which is cheap on regnet
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
def run_test(self):
node = self.nodes[0]
peer = node.add_p2p_connection(P2PDataStore())
node.setmocktime(ACTIVATION_TIME)
self.genesis_hash = int(node.getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# adds transactions to the block and updates state
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
block.vtx.extend(new_transactions)
old_sha256 = block.sha256
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[
block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
# send a txn to the mempool and check it was accepted
def send_transaction_to_mempool(tx):
tx_id = node.sendrawtransaction(ToHex(tx))
assert tx_id in node.getrawmempool()
# checks the mempool has exactly the same txns as in the provided list
def check_mempool_equal(txns):
assert set(node.getrawmempool()) == set(tx.hash for tx in txns)
# Create an always-valid chained transaction. It spends a
# scriptPub=OP_TRUE coin into another. Returns the transaction and its
# spendable output for further chaining.
def create_always_valid_chained_tx(spend):
tx = create_tx_with_script(
spend.tx, spend.n, b'', amount=spend.tx.vout[0].nValue - 1000, script_pub_key=CScript([OP_TRUE]))
tx.rehash()
return tx, PreviousSpendableOutput(tx, 0)
# shorthand
block = self.next_block
# Create a new block
block(0)
save_spendable_output()
peer.send_blocks_and_test([self.tip], node)
# Now we need that block to mature so we can spend the coinbase.
maturity_blocks = []
for i in range(110):
block(5000 + i)
maturity_blocks.append(self.tip)
save_spendable_output()
peer.send_blocks_and_test(maturity_blocks, node)
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(100):
out.append(get_spendable_output())
# Create 2 pre-fork-only txns (tx_pre0, tx_pre1). Fund txns are valid
# pre-fork, so we can mine them right away.
txfund0, tx_pre0 = create_fund_and_pre_fork_only_tx(out[0])
txfund1, tx_pre1 = create_fund_and_pre_fork_only_tx(out[1])
# Create 2 post-fork-only txns (tx_post0, tx_post1). Fund txns are
# valid pre-fork, so we can mine them right away.
txfund2, tx_post0 = create_fund_and_post_fork_only_tx(out[2])
txfund3, tx_post1 = create_fund_and_post_fork_only_tx(out[3])
# Create blocks to activate the fork. Mine all funding transactions.
bfork = block(5555)
bfork.nTime = ACTIVATION_TIME - 1
update_block(5555, [txfund0, txfund1, txfund2, txfund3])
peer.send_blocks_and_test([self.tip], node)
for i in range(5):
peer.send_blocks_and_test([block(5200 + i)], node)
# Check we are just before the activation time
assert_equal(
node.getblockchaininfo()['mediantime'],
ACTIVATION_TIME - 1)
# We are just before the fork. Pre-fork-only and always-valid chained
# txns (tx_chain0, tx_chain1) are valid, post-fork-only txns are
# rejected.
send_transaction_to_mempool(tx_pre0)
send_transaction_to_mempool(tx_pre1)
tx_chain0, last_chained_output = create_always_valid_chained_tx(out[4])
tx_chain1, last_chained_output = create_always_valid_chained_tx(
last_chained_output)
send_transaction_to_mempool(tx_chain0)
send_transaction_to_mempool(tx_chain1)
assert_raises_rpc_error(-26, RPC_EXPECTED_ERROR,
node.sendrawtransaction, ToHex(tx_post0))
assert_raises_rpc_error(-26, RPC_EXPECTED_ERROR,
node.sendrawtransaction, ToHex(tx_post1))
check_mempool_equal([tx_chain0, tx_chain1, tx_pre0, tx_pre1])
# Activate the fork. Mine the 1st always-valid chained txn and a
# pre-fork-only txn.
block(5556)
update_block(5556, [tx_chain0, tx_pre0])
peer.send_blocks_and_test([self.tip], node)
forkblockid = node.getbestblockhash()
# Check we just activated the fork
assert_equal(node.getblockheader(forkblockid)['mediantime'],
ACTIVATION_TIME)
# Check mempool coherence when activating the fork. Pre-fork-only txns
# were evicted from the mempool, while always-valid txns remain.
# Evicted: tx_pre1
check_mempool_equal([tx_chain1])
# Post-fork-only and always-valid txns are accepted, pre-fork-only txn
# are rejected.
send_transaction_to_mempool(tx_post0)
send_transaction_to_mempool(tx_post1)
tx_chain2, _ = create_always_valid_chained_tx(last_chained_output)
send_transaction_to_mempool(tx_chain2)
assert_raises_rpc_error(-26, RPC_EXPECTED_ERROR,
node.sendrawtransaction, ToHex(tx_pre1))
check_mempool_equal([tx_chain1, tx_chain2, tx_post0, tx_post1])
# Mine the 2nd always-valid chained txn and a post-fork-only txn.
block(5557)
update_block(5557, [tx_chain1, tx_post0])
peer.send_blocks_and_test([self.tip], node)
postforkblockid = node.getbestblockhash()
# The mempool contains the 3rd chained txn and a post-fork-only txn.
check_mempool_equal([tx_chain2, tx_post1])
# In the following we will testing block disconnections and reorgs.
# - tx_chain2 will always be retained in the mempool since it is always
# valid. Its continued presence shows that we are never simply
# clearing the entire mempool.
# - tx_post1 may be evicted from mempool if we land before the fork.
# - tx_post0 is in a block and if 'de-mined', it will either be evicted
# or end up in mempool depending if we land before/after the fork.
# - tx_pre0 is in a block and if 'de-mined', it will either be evicted
# or end up in mempool depending if we land after/before the fork.
# First we do a disconnection of the post-fork block, which is a
# normal disconnection that merely returns the block contents into
# the mempool -- nothing is lost.
node.invalidateblock(postforkblockid)
# In old mempool: tx_chain2, tx_post1
# Recovered from blocks: tx_chain1 and tx_post0.
# Lost from blocks: NONE
# Retained from old mempool: tx_chain2, tx_post1
# Evicted from old mempool: NONE
check_mempool_equal([tx_chain1, tx_chain2, tx_post0, tx_post1])
# Now, disconnect the fork block. This is a special disconnection
# that requires reprocessing the mempool due to change in rules.
node.invalidateblock(forkblockid)
# In old mempool: tx_chain1, tx_chain2, tx_post0, tx_post1
# Recovered from blocks: tx_chain0, tx_pre0
# Lost from blocks: NONE
# Retained from old mempool: tx_chain1, tx_chain2
# Evicted from old mempool: tx_post0, tx_post1
check_mempool_equal([tx_chain0, tx_chain1, tx_chain2, tx_pre0])
# Restore state
node.reconsiderblock(postforkblockid)
node.reconsiderblock(forkblockid)
send_transaction_to_mempool(tx_post1)
check_mempool_equal([tx_chain2, tx_post1])
# Test a reorg that crosses the fork.
# If such a reorg happens, most likely it will both start *and end*
# after the fork. We will test such a case here and make sure that
# post-fork-only transactions are not unnecessarily discarded from
# the mempool in such a reorg. Pre-fork-only transactions however can
# get lost.
# Set up a longer competing chain that doesn't confirm any of our txns.
# This starts after 5204, so it contains neither the forkblockid nor
# the postforkblockid from above.
self.tip = self.blocks[5204]
reorg_blocks = []
for i in range(3):
reorg_blocks.append(block(5900 + i))
# Perform the reorg
peer.send_blocks_and_test(reorg_blocks, node)
# reorg finishes after the fork
assert_equal(
node.getblockchaininfo()['mediantime'],
ACTIVATION_TIME + 2)
# In old mempool: tx_chain2, tx_post1
# Recovered from blocks: tx_chain0, tx_chain1, tx_post0
# Lost from blocks: tx_pre0
# Retained from old mempool: tx_chain2, tx_post1
# Evicted from old mempool: NONE
check_mempool_equal(
[tx_chain0, tx_chain1, tx_chain2, tx_post0, tx_post1])
if __name__ == '__main__':
MempoolCoherenceOnActivationsTest().main()
diff --git a/test/functional/abc-parkedchain.py b/test/functional/abc-parkedchain.py
index 5a6da3bd2..36cd67496 100755
--- a/test/functional/abc-parkedchain.py
+++ b/test/functional/abc-parkedchain.py
@@ -1,237 +1,246 @@
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-"""Test the parckblock and unparkblock RPC calls."""
+"""Test the parkblock and unparkblock RPC calls."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class ParkedChainTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
- self.extra_args = [["-noparkdeepreorg",
- "-noautomaticunparking", "-whitelist=noban@127.0.0.1"], ["-maxreorgdepth=-1"]]
+ self.extra_args = [
+ [
+ "-noparkdeepreorg",
+ "-noautomaticunparking",
+ "-whitelist=noban@127.0.0.1",
+ ],
+ [
+ "-automaticunparking=1",
+ "-maxreorgdepth=-1"
+ ]
+ ]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# There should only be one chaintip, which is expected_tip
def only_valid_tip(self, expected_tip, other_tip_status=None):
node = self.nodes[0]
assert_equal(node.getbestblockhash(), expected_tip)
for tip in node.getchaintips():
if tip["hash"] == expected_tip:
assert_equal(tip["status"], "active")
else:
assert_equal(tip["status"], other_tip_status)
def run_test(self):
def wait_for_tip(node, tip):
def check_tip():
return node.getbestblockhash() == tip
self.wait_until(check_tip)
node = self.nodes[0]
parking_node = self.nodes[1]
self.log.info("Test chain parking...")
self.generate(node, 10, sync_fun=self.no_op)
tip = node.getbestblockhash()
self.generate(node, 1, sync_fun=self.no_op)
block_to_park = node.getbestblockhash()
self.generate(node, 10)
parked_tip = node.getbestblockhash()
# Let's park the chain.
assert parked_tip != tip
assert block_to_park != tip
assert block_to_park != parked_tip
node.parkblock(block_to_park)
assert_equal(node.getbestblockhash(), tip)
# When the chain is unparked, the node reorg into its original chain.
node.unparkblock(parked_tip)
assert_equal(node.getbestblockhash(), parked_tip)
# Parking and then unparking a block should not change its validity,
# and invaliding and reconsidering a block should not change its
# parked state. See the following test cases:
self.log.info("Test invalidate, park, unpark, reconsider...")
self.generate(node, 1, sync_fun=self.no_op)
tip = node.getbestblockhash()
self.generate(node, 1, sync_fun=self.no_op)
bad_tip = node.getbestblockhash()
# Generate an extra block to check that children are invalidated as
# expected and do not produce dangling chaintips
self.generate(node, 1)
good_tip = node.getbestblockhash()
node.invalidateblock(bad_tip)
self.only_valid_tip(tip, other_tip_status="invalid")
node.parkblock(bad_tip)
self.only_valid_tip(tip, other_tip_status="invalid")
node.unparkblock(bad_tip)
self.only_valid_tip(tip, other_tip_status="invalid")
node.reconsiderblock(bad_tip)
self.only_valid_tip(good_tip)
self.log.info("Test park, invalidate, reconsider, unpark")
self.generate(node, 1, sync_fun=self.no_op)
tip = node.getbestblockhash()
self.generate(node, 1, sync_fun=self.no_op)
bad_tip = node.getbestblockhash()
self.generate(node, 1)
good_tip = node.getbestblockhash()
node.parkblock(bad_tip)
self.only_valid_tip(tip, other_tip_status="parked")
node.invalidateblock(bad_tip)
# NOTE: Intuitively, other_tip_status would be "invalid", but because
# only valid (unparked) chains are walked, child blocks' statuses are
# not updated, so the "parked" state remains.
self.only_valid_tip(tip, other_tip_status="parked")
node.reconsiderblock(bad_tip)
self.only_valid_tip(tip, other_tip_status="parked")
node.unparkblock(bad_tip)
self.only_valid_tip(good_tip)
self.log.info("Test invalidate, park, reconsider, unpark...")
self.generate(node, 1, sync_fun=self.no_op)
tip = node.getbestblockhash()
self.generate(node, 1, sync_fun=self.no_op)
bad_tip = node.getbestblockhash()
self.generate(node, 1)
good_tip = node.getbestblockhash()
node.invalidateblock(bad_tip)
self.only_valid_tip(tip, other_tip_status="invalid")
node.parkblock(bad_tip)
self.only_valid_tip(tip, other_tip_status="invalid")
node.reconsiderblock(bad_tip)
self.only_valid_tip(tip, other_tip_status="parked")
node.unparkblock(bad_tip)
self.only_valid_tip(good_tip)
self.log.info("Test park, invalidate, unpark, reconsider")
self.generate(node, 1, sync_fun=self.no_op)
tip = node.getbestblockhash()
self.generate(node, 1, sync_fun=self.no_op)
bad_tip = node.getbestblockhash()
self.generate(node, 1)
good_tip = node.getbestblockhash()
node.parkblock(bad_tip)
self.only_valid_tip(tip, other_tip_status="parked")
node.invalidateblock(bad_tip)
# NOTE: Intuitively, other_tip_status would be "invalid", but because
# only valid (unparked) chains are walked, child blocks' statuses are
# not updated, so the "parked" state remains.
self.only_valid_tip(tip, other_tip_status="parked")
node.unparkblock(bad_tip)
self.only_valid_tip(tip, other_tip_status="invalid")
node.reconsiderblock(bad_tip)
self.only_valid_tip(good_tip)
# To get ready for next testset, make sure both nodes are in sync.
wait_for_tip(parking_node, good_tip)
assert_equal(node.getbestblockhash(), parking_node.getbestblockhash())
# Wait for node 1 to park the chain.
def wait_for_parked_block(block):
def check_block():
for tip in parking_node.getchaintips():
if tip["hash"] == block:
assert tip["status"] != "active"
return tip["status"] == "parked"
return False
self.wait_until(check_block)
def check_reorg_protection(depth, extra_blocks):
self.log.info(
"Test deep reorg parking, {} block deep".format(depth))
# Invalidate the tip on node 0, so it doesn't follow node 1.
node.invalidateblock(node.getbestblockhash())
# Mine block to create a fork of proper depth
self.generatetoaddress(parking_node,
nblocks=depth - 1,
address=parking_node.getnewaddress(
label='coinbase'),
sync_fun=self.no_op,
)
self.generatetoaddress(node,
nblocks=depth,
address=node.getnewaddress(
label='coinbase'),
sync_fun=self.no_op,
)
# extra block should now find themselves parked
for _ in range(extra_blocks):
self.generate(node, 1, sync_fun=self.no_op)
wait_for_parked_block(node.getbestblockhash())
# If we mine one more block, the node reorgs (generate also waits
# for chain sync).
self.generate(node, 1)
check_reorg_protection(1, 0)
check_reorg_protection(2, 0)
check_reorg_protection(3, 1)
check_reorg_protection(4, 4)
check_reorg_protection(5, 5)
check_reorg_protection(6, 6)
check_reorg_protection(100, 100)
# try deep reorg with a log check.
with parking_node.assert_debug_log(["Park block"]):
check_reorg_protection(3, 1)
self.log.info(
"Accepting many blocks at once (possibly out of order) should not park if there is no reorg.")
# rewind one block to make a reorg that is shallow.
node.invalidateblock(parking_node.getbestblockhash())
# generate a ton of blocks at once.
try:
with parking_node.assert_debug_log(["Park block"]):
# Also waits for chain sync
self.generatetoaddress(node,
nblocks=20,
address=node.getnewaddress(label='coinbase'))
except AssertionError as exc:
# good, we want an absence of "Park block" messages
assert "does not partially match log" in exc.args[0]
else:
raise AssertionError("Parked block when there was no deep reorg")
self.log.info("Test that unparking works when -parkdeepreorg=0")
# Set up parking node height = fork + 4, node height = fork + 5
node.invalidateblock(node.getbestblockhash())
self.generate(parking_node, 3, sync_fun=self.no_op)
self.generatetoaddress(node,
nblocks=5,
address=node.getnewaddress(label='coinbase'),
sync_fun=self.no_op,
)
wait_for_parked_block(node.getbestblockhash())
# Restart the parking node without parkdeepreorg.
- self.restart_node(1, ["-parkdeepreorg=0"])
+ self.restart_node(1, self.extra_args[1] + ["-parkdeepreorg=0"])
parking_node = self.nodes[1]
self.connect_nodes(node.index, parking_node.index)
# The other chain should still be marked 'parked'.
wait_for_parked_block(node.getbestblockhash())
# Three more blocks is not enough to unpark. Even though its PoW is
# larger, we are still following the delayed-unparking rules.
self.generate(node, 3, sync_fun=self.no_op)
wait_for_parked_block(node.getbestblockhash())
# Final block pushes over the edge, and should unpark (generate also
# waits for chain sync).
self.generate(node, 1)
# Do not append tests after this point without restarting node again.
# Parking node is no longer parking.
if __name__ == '__main__':
ParkedChainTest().main()
diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py
index c96d2dcef..f339ab39c 100755
--- a/test/functional/feature_bip68_sequence.py
+++ b/test/functional/feature_bip68_sequence.py
@@ -1,502 +1,503 @@
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP68 implementation."""
import time
from test_framework.blocktools import create_block
from test_framework.messages import (
XEC,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
FromHex,
ToHex,
)
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.txtools import pad_tx
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
satoshi_round,
)
SEQUENCE_LOCKTIME_DISABLE_FLAG = (1 << 31)
# this means use time (0 means height)
SEQUENCE_LOCKTIME_TYPE_FLAG = (1 << 22)
# this is a bit-shift
SEQUENCE_LOCKTIME_GRANULARITY = 9
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
# RPC error for non-BIP68 final transactions
NOT_FINAL_ERROR = "non-BIP68-final"
class BIP68Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [
[
"-noparkdeepreorg",
"-maxreorgdepth=-1",
"-acceptnonstdtxn=1",
],
[
"-acceptnonstdtxn=0",
- "-maxreorgdepth=-1"
+ "-maxreorgdepth=-1",
+ "-automaticunparking=1",
]
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
# Generate some coins
self.generate(self.nodes[0], 110)
self.log.info("Running test disable flag")
self.test_disable_flag()
self.log.info("Running test sequence-lock-confirmed-inputs")
self.test_sequence_lock_confirmed_inputs()
self.log.info("Running test sequence-lock-unconfirmed-inputs")
self.test_sequence_lock_unconfirmed_inputs()
self.log.info(
"Running test BIP68 not consensus before versionbits activation")
self.test_bip68_not_consensus()
self.log.info("Activating BIP68 (and 112/113)")
self.activateCSV()
print("Verifying nVersion=2 transactions are standard.")
print("Note that with current versions of bitcoin software, nVersion=2 transactions are always standard (independent of BIP68 activation status).")
self.test_version2_relay()
self.log.info("Passed")
# Test that BIP68 is not in effect if tx version is 1, or if
# the first sequence bit is set.
def test_disable_flag(self):
# Create some unconfirmed inputs
new_addr = self.nodes[0].getnewaddress()
# send 2,000,000 XEC
self.nodes[0].sendtoaddress(new_addr, 2000000)
utxos = self.nodes[0].listunspent(0, 0)
assert len(utxos) > 0
utxo = utxos[0]
tx1 = CTransaction()
value = int(satoshi_round(utxo["amount"] - self.relayfee) * XEC)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
# input to mature.
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
tx1.vin = [
CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
tx1.vout = [CTxOut(value, CScript([b'a']))]
pad_tx(tx1)
tx1_signed = self.nodes[0].signrawtransactionwithwallet(ToHex(tx1))[
"hex"]
tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
tx1_id = int(tx1_id, 16)
# This transaction will enable sequence-locks, so this transaction should
# fail
tx2 = CTransaction()
tx2.nVersion = 2
sequence_value = sequence_value & 0x7fffffff
tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
tx2.vout = [CTxOut(int(value - self.relayfee * XEC), CScript([b'a']))]
pad_tx(tx2)
tx2.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR,
self.nodes[0].sendrawtransaction, ToHex(tx2))
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
tx2.nVersion = 1
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Calculate the median time past of a prior block ("confirmations" before
# the current tip).
def get_median_time_past(self, confirmations):
block_hash = self.nodes[0].getblockhash(
self.nodes[0].getblockcount() - confirmations)
return self.nodes[0].getblockheader(block_hash)["mediantime"]
# Test that sequence locks are respected for transactions spending
# confirmed inputs.
def test_sequence_lock_confirmed_inputs(self):
# Create lots of confirmed utxos, and use them to generate lots of random
# transactions.
max_outputs = 50
addresses = []
while len(addresses) < max_outputs:
addresses.append(self.nodes[0].getnewaddress())
while len(self.nodes[0].listunspent()) < 200:
import random
random.shuffle(addresses)
num_outputs = random.randint(1, max_outputs)
outputs = {}
for i in range(num_outputs):
outputs[addresses[i]] = random.randint(1, 20) * 10000
self.nodes[0].sendmany("", outputs)
self.generate(self.nodes[0], 1)
utxos = self.nodes[0].listunspent()
# Try creating a lot of random transactions.
# Each time, choose a random number of inputs, and randomly set
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
for _ in range(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
# Track whether any sequence locks used should fail
should_pass = True
# Track whether this transaction was built with sequence locks
using_sequence_locks = False
tx = CTransaction()
tx.nVersion = 2
value = 0
for j in range(num_inputs):
# this disables sequence locks
sequence_value = 0xfffffffe
# 50% chance we enable sequence locks
if random.randint(0, 1):
using_sequence_locks = True
# 10% of the time, make the input sequence value pass
input_will_pass = (random.randint(1, 10) == 1)
sequence_value = utxos[j]["confirmations"]
if not input_will_pass:
sequence_value += 1
should_pass = False
# Figure out what the median-time-past was for the confirmed input
# Note that if an input has N confirmations, we're going back N blocks
# from the tip so that we're looking up MTP of the block
# PRIOR to the one the input appears in, as per the BIP68
# spec.
orig_time = self.get_median_time_past(
utxos[j]["confirmations"])
# MTP of the tip
cur_time = self.get_median_time_past(0)
# can only timelock this input if it's not too old --
# otherwise use height
can_time_lock = True
if ((cur_time - orig_time)
>> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time
# lock
if random.randint(0, 1) and can_time_lock:
# Find first time-lock value that fails, or latest one
# that succeeds
time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
if input_will_pass and time_delta > cur_time - orig_time:
sequence_value = (
(cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
elif (not input_will_pass and time_delta <= cur_time - orig_time):
sequence_value = (
(cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) + 1
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx.vin.append(
CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
value += utxos[j]["amount"] * XEC
# Overestimate the size of the tx - signatures should be less than
# 120 bytes, and leave 50 for the output
tx_size = len(ToHex(tx)) // 2 + 120 * num_inputs + 50
tx.vout.append(
CTxOut(int(value - self.relayfee * tx_size * XEC / 1000), CScript([b'a'])))
rawtx = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))[
"hex"]
if (using_sequence_locks and not should_pass):
# This transaction should be rejected
assert_raises_rpc_error(-26, NOT_FINAL_ERROR,
self.nodes[0].sendrawtransaction, rawtx)
else:
# This raw transaction should be accepted
self.nodes[0].sendrawtransaction(rawtx)
utxos = self.nodes[0].listunspent()
# Test that sequence locks on unconfirmed inputs must have nSequence
# height or time of 0 to be accepted.
# Then test that BIP68-invalid transactions are removed from the mempool
# after a reorg.
def test_sequence_lock_unconfirmed_inputs(self):
# Store height so we can easily reset the chain at the end of the test
cur_height = self.nodes[0].getblockcount()
# Create a mempool tx.
txid = self.nodes[0].sendtoaddress(
self.nodes[0].getnewaddress(), 2000000)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# As the fees are calculated prior to the transaction being signed,
# there is some uncertainty that calculate fee provides the correct
# minimal fee. Since regtest coins are free, let's go ahead and
# increase the fee by an order of magnitude to ensure this test
# passes.
fee_multiplier = 10
# Anyone-can-spend mempool tx.
# Sequence lock of 0 should pass.
tx2 = CTransaction()
tx2.nVersion = 2
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [
CTxOut(int(0), CScript([b'a']))]
tx2.vout[0].nValue = tx1.vout[0].nValue - \
fee_multiplier * self.nodes[0].calculate_fee(tx2)
tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(tx2_raw)
# Create a spend of the 0th output of orig_tx with a sequence lock
# of 1, and test what happens when submitting.
# orig_tx.vout[0] must be an anyone-can-spend output
def test_nonzero_locks(orig_tx, node, use_height_lock):
sequence_value = 1
if not use_height_lock:
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx = CTransaction()
tx.nVersion = 2
tx.vin = [
CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)]
tx.vout = [
CTxOut(int(orig_tx.vout[0].nValue - fee_multiplier * node.calculate_fee(tx)), CScript([b'a']))]
pad_tx(tx)
tx.rehash()
if (orig_tx.hash in node.getrawmempool()):
# sendrawtransaction should fail if the tx is in the mempool
assert_raises_rpc_error(-26, NOT_FINAL_ERROR,
node.sendrawtransaction, ToHex(tx))
else:
# sendrawtransaction should succeed if the tx is not in the
# mempool
node.sendrawtransaction(ToHex(tx))
return tx
test_nonzero_locks(
tx2, self.nodes[0], use_height_lock=True)
test_nonzero_locks(
tx2, self.nodes[0], use_height_lock=False)
# Now mine some blocks, but make sure tx2 doesn't get mined.
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(
txid=tx2.hash, fee_delta=-fee_multiplier * self.nodes[0].calculate_fee(tx2))
cur_time = int(time.time())
for _ in range(10):
self.nodes[0].setmocktime(cur_time + 600)
self.generate(self.nodes[0], 1, sync_fun=self.no_op)
cur_time += 600
assert tx2.hash in self.nodes[0].getrawmempool()
test_nonzero_locks(
tx2, self.nodes[0], use_height_lock=True)
test_nonzero_locks(
tx2, self.nodes[0], use_height_lock=False)
# Mine tx2, and then try again
self.nodes[0].prioritisetransaction(
txid=tx2.hash, fee_delta=fee_multiplier * self.nodes[0].calculate_fee(tx2))
# Advance the time on the node so that we can test timelocks
self.nodes[0].setmocktime(cur_time + 600)
# Save block template now to use for the reorg later
tmpl = self.nodes[0].getblocktemplate()
self.generate(self.nodes[0], 1)
assert tx2.hash not in self.nodes[0].getrawmempool()
# Now that tx2 is not in the mempool, a sequence locked spend should
# succeed
tx3 = test_nonzero_locks(
tx2, self.nodes[0], use_height_lock=False)
assert tx3.hash in self.nodes[0].getrawmempool()
self.generate(self.nodes[0], 1)
assert tx3.hash not in self.nodes[0].getrawmempool()
# One more test, this time using height locks
tx4 = test_nonzero_locks(
tx3, self.nodes[0], use_height_lock=True)
assert tx4.hash in self.nodes[0].getrawmempool()
# Now try combining confirmed and unconfirmed inputs
tx5 = test_nonzero_locks(
tx4, self.nodes[0], use_height_lock=True)
assert tx5.hash not in self.nodes[0].getrawmempool()
utxos = self.nodes[0].listunspent()
tx5.vin.append(
CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1))
tx5.vout[0].nValue += int(utxos[0]["amount"] * XEC)
raw_tx5 = self.nodes[0].signrawtransactionwithwallet(ToHex(tx5))["hex"]
assert_raises_rpc_error(-26, NOT_FINAL_ERROR,
self.nodes[0].sendrawtransaction, raw_tx5)
# Test mempool-BIP68 consistency after reorg
#
# State of the transactions in the last blocks:
# ... -> [ tx2 ] -> [ tx3 ]
# tip-1 tip
# And currently tx4 is in the mempool.
#
# If we invalidate the tip, tx3 should get added to the mempool, causing
# tx4 to be removed (fails sequence-lock).
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
assert tx4.hash not in self.nodes[0].getrawmempool()
assert tx3.hash in self.nodes[0].getrawmempool()
# Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
# diagram above).
# This would cause tx2 to be added back to the mempool, which in turn causes
# tx3 to be removed.
for i in range(2):
block = create_block(tmpl=tmpl, ntime=cur_time)
block.rehash()
block.solve()
tip = block.sha256
assert_equal(
None if i == 1 else 'inconclusive',
self.nodes[0].submitblock(
ToHex(block)))
tmpl = self.nodes[0].getblocktemplate()
tmpl['previousblockhash'] = f"{tip:x}"
tmpl['transactions'] = []
cur_time += 1
mempool = self.nodes[0].getrawmempool()
assert tx3.hash not in mempool
assert tx2.hash in mempool
# Reset the chain and get rid of the mocktimed-blocks
self.nodes[0].setmocktime(0)
self.nodes[0].invalidateblock(
self.nodes[0].getblockhash(cur_height + 1))
self.generate(self.nodes[0], 10, sync_fun=self.no_op)
def get_csv_status(self):
height = self.nodes[0].getblockchaininfo()['blocks']
return height >= 576
# Make sure that BIP68 isn't being used to validate blocks, prior to
# versionbits activation. If more blocks are mined prior to this test
# being run, then it's possible the test has activated the soft fork, and
# this test should be moved to run earlier, or deleted.
def test_bip68_not_consensus(self):
assert_equal(self.get_csv_status(), False)
txid = self.nodes[0].sendtoaddress(
self.nodes[0].getnewaddress(), 2000000)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Make an anyone-can-spend transaction
tx2 = CTransaction()
tx2.nVersion = 1
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [
CTxOut(int(tx1.vout[0].nValue - self.relayfee * XEC), CScript([b'a']))]
# sign tx2
tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
pad_tx(tx2)
tx2.rehash()
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Now make an invalid spend of tx2 according to BIP68
# 100 block relative locktime
sequence_value = 100
tx3 = CTransaction()
tx3.nVersion = 2
tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)]
tx3.vout = [
CTxOut(int(tx2.vout[0].nValue - self.relayfee * XEC), CScript([b'a']))]
pad_tx(tx3)
tx3.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR,
self.nodes[0].sendrawtransaction, ToHex(tx3))
# make a block that violates bip68; ensure that the tip updates
block = create_block(tmpl=self.nodes[0].getblocktemplate())
block.vtx.extend(
sorted([tx1, tx2, tx3], key=lambda tx: tx.get_id()))
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
assert_equal(None, self.nodes[0].submitblock(ToHex(block)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def activateCSV(self):
# activation should happen at block height 576
csv_activation_height = 576
height = self.nodes[0].getblockcount()
assert_greater_than(csv_activation_height - height, 1)
self.generate(
self.nodes[0],
csv_activation_height - height - 1,
sync_fun=self.no_op)
assert_equal(self.get_csv_status(), False)
self.disconnect_nodes(0, 1)
self.generate(self.nodes[0], 1, sync_fun=self.no_op)
assert_equal(self.get_csv_status(), True)
# We have a block that has CSV activated, but we want to be at
# the activation point, so we invalidate the tip.
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
self.connect_nodes(0, 1)
self.sync_blocks()
# Use self.nodes[1] to test that version 2 transactions are standard.
def test_version2_relay(self):
inputs = []
outputs = {self.nodes[1].getnewaddress(): 1000000.0}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex']
tx = FromHex(CTransaction(), rawtxfund)
tx.nVersion = 2
tx_signed = self.nodes[1].signrawtransactionwithwallet(ToHex(tx))[
"hex"]
self.nodes[1].sendrawtransaction(tx_signed)
if __name__ == '__main__':
BIP68Test().main()
diff --git a/test/functional/feature_coinstatsindex.py b/test/functional/feature_coinstatsindex.py
index 660d162a1..0216186bd 100755
--- a/test/functional/feature_coinstatsindex.py
+++ b/test/functional/feature_coinstatsindex.py
@@ -1,350 +1,354 @@
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test coinstatsindex across nodes.
Test that the values returned by gettxoutsetinfo are consistent
between a node running the coinstatsindex and a node without
the index.
"""
from decimal import Decimal
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import (
XEC,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
ToHex,
)
from test_framework.script import OP_FALSE, OP_RETURN, CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, try_rpc
class CoinStatsIndexTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.supports_cli = False
self.extra_args = [
- [],
- ["-coinstatsindex"]
+ [
+ "-automaticunparking=1",
+ ],
+ [
+ "-coinstatsindex",
+ ]
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self._test_coin_stats_index()
self._test_use_index_option()
self._test_reorg_index()
self._test_index_rejects_hash_serialized()
def block_sanity_check(self, block_info):
block_subsidy = 50_000_000
assert_equal(
block_info['prevout_spent'] + block_subsidy,
block_info['new_outputs_ex_coinbase'] + block_info['coinbase']
+ block_info['unspendable']
)
def _test_coin_stats_index(self):
node = self.nodes[0]
index_node = self.nodes[1]
# Both none and muhash options allow the usage of the index
index_hash_options = ['none', 'muhash']
# Generate a normal transaction and mine it
self.generate(node, 101)
address = self.nodes[0].get_deterministic_priv_key().address
node.sendtoaddress(
address=address,
amount=10_000_000,
subtractfeefromamount=True)
self.generate(node, 1)
self.log.info(
"Test that gettxoutsetinfo() output is consistent with or without coinstatsindex option")
self.wait_until(lambda: not try_rpc(-32603,
"Unable to read UTXO set", node.gettxoutsetinfo))
res0 = node.gettxoutsetinfo('none')
# The fields 'disk_size' and 'transactions' do not exist on the index
del res0['disk_size'], res0['transactions']
self.wait_until(lambda: not try_rpc(-32603,
"Unable to read UTXO set",
index_node.gettxoutsetinfo,
'muhash'))
for hash_option in index_hash_options:
res1 = index_node.gettxoutsetinfo(hash_option)
# The fields 'block_info' and 'total_unspendable_amount' only exist
# on the index
del res1['block_info'], res1['total_unspendable_amount']
res1.pop('muhash', None)
# Everything left should be the same
assert_equal(res1, res0)
self.log.info(
"Test that gettxoutsetinfo() can get fetch data on specific "
"heights with index")
# Generate a new tip
self.generate(node, 5)
self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set",
index_node.gettxoutsetinfo,
'muhash'))
for hash_option in index_hash_options:
# Fetch old stats by height
res2 = index_node.gettxoutsetinfo(hash_option, 102)
del res2['block_info'], res2['total_unspendable_amount']
res2.pop('muhash', None)
assert_equal(res0, res2)
# Fetch old stats by hash
res3 = index_node.gettxoutsetinfo(hash_option, res0['bestblock'])
del res3['block_info'], res3['total_unspendable_amount']
res3.pop('muhash', None)
assert_equal(res0, res3)
# It does not work without coinstatsindex
assert_raises_rpc_error(
-8, "Querying specific block heights requires coinstatsindex",
node.gettxoutsetinfo, hash_option, 102)
self.log.info("Test gettxoutsetinfo() with index and verbose flag")
for hash_option in index_hash_options:
# Genesis block is unspendable
res4 = index_node.gettxoutsetinfo(hash_option, 0)
assert_equal(res4['total_unspendable_amount'], 50_000_000)
assert_equal(res4['block_info'], {
'unspendable': 50_000_000,
'prevout_spent': 0,
'new_outputs_ex_coinbase': 0,
'coinbase': 0,
'unspendables': {
'genesis_block': 50_000_000,
'bip30': 0,
'scripts': 0,
'unclaimed_rewards': 0
}
})
self.block_sanity_check(res4['block_info'])
# Test an older block height that included a normal tx
res5 = index_node.gettxoutsetinfo(hash_option, 102)
assert_equal(res5['total_unspendable_amount'], 50_000_000)
assert_equal(res5['block_info'], {
'unspendable': 0,
'prevout_spent': 50_000_000,
'new_outputs_ex_coinbase': Decimal('49999997.75'),
'coinbase': Decimal('50000002.25'),
'unspendables': {
'genesis_block': 0,
'bip30': 0,
'scripts': 0,
'unclaimed_rewards': 0
}
})
self.block_sanity_check(res5['block_info'])
# Generate and send a normal tx with two outputs
tx1_inputs = []
tx1_outputs = {self.nodes[0].getnewaddress(): 21_000_000,
self.nodes[0].getnewaddress(): 42_000_000}
raw_tx1 = self.nodes[0].createrawtransaction(tx1_inputs, tx1_outputs)
funded_tx1 = self.nodes[0].fundrawtransaction(raw_tx1)
signed_tx1 = self.nodes[0].signrawtransactionwithwallet(
funded_tx1['hex'])
tx1_txid = self.nodes[0].sendrawtransaction(signed_tx1['hex'])
# Find the right position of the 21 000 000 XEC output
tx1_final = self.nodes[0].gettransaction(tx1_txid)
for output in tx1_final['details']:
if output['amount'] == Decimal(
'21000000.00') and output['category'] == 'receive':
n = output['vout']
# Generate and send another tx with an OP_RETURN output (which is
# unspendable)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(int(tx1_txid, 16), n), b''))
tx2.vout.append(CTxOut(int(20_990_000 * XEC),
CScript([OP_RETURN] + [OP_FALSE] * 30)))
tx2_hex = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))['hex']
self.nodes[0].sendrawtransaction(tx2_hex)
# Include both txs in a block
self.generate(self.nodes[0], 1)
self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set",
index_node.gettxoutsetinfo, 'muhash'))
for hash_option in index_hash_options:
# Check all amounts were registered correctly
res6 = index_node.gettxoutsetinfo(hash_option, 108)
assert_equal(
res6['total_unspendable_amount'],
Decimal('70990000.00'))
assert_equal(res6['block_info'], {
'unspendable': Decimal('20990000.00'),
'prevout_spent': 111_000_000,
'new_outputs_ex_coinbase': Decimal('89999995.94'),
'coinbase': Decimal('50010004.06'),
'unspendables': {
'genesis_block': 0,
'bip30': 0,
'scripts': Decimal('20990000.00'),
'unclaimed_rewards': 0
}
})
self.block_sanity_check(res6['block_info'])
# Create a coinbase that does not claim full subsidy and also
# has two outputs
cb = create_coinbase(109, nValue=35_000_000)
cb.vout.append(CTxOut(5_000_000 * XEC, CScript([OP_FALSE])))
cb.rehash()
# Generate a block that includes previous coinbase
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblock(tip)['time'] + 1
block = create_block(int(tip, 16), cb, block_time)
block.solve()
self.nodes[0].submitblock(ToHex(block))
self.sync_all()
self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set",
index_node.gettxoutsetinfo, 'muhash'))
for hash_option in index_hash_options:
res7 = index_node.gettxoutsetinfo(hash_option, 109)
assert_equal(
res7['total_unspendable_amount'],
Decimal('80990000.00'))
assert_equal(res7['block_info'], {
'unspendable': 10_000_000,
'prevout_spent': 0,
'new_outputs_ex_coinbase': 0,
'coinbase': 40_000_000,
'unspendables': {
'genesis_block': 0,
'bip30': 0,
'scripts': 0,
'unclaimed_rewards': 10_000_000
}
})
self.block_sanity_check(res7['block_info'])
self.log.info("Test that the index is robust across restarts")
res8 = index_node.gettxoutsetinfo('muhash')
self.restart_node(1, extra_args=self.extra_args[1])
res9 = index_node.gettxoutsetinfo('muhash')
assert_equal(res8, res9)
self.generate(index_node, 1, sync_fun=self.no_op)
self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set",
index_node.gettxoutsetinfo, 'muhash'))
res10 = index_node.gettxoutsetinfo('muhash')
assert res8['txouts'] < res10['txouts']
def _test_use_index_option(self):
self.log.info("Test use_index option for nodes running the index")
self.connect_nodes(0, 1)
self.nodes[0].waitforblockheight(110)
res = self.nodes[0].gettxoutsetinfo('muhash')
option_res = self.nodes[1].gettxoutsetinfo(
hash_type='muhash', hash_or_height=None, use_index=False)
del res['disk_size'], option_res['disk_size']
assert_equal(res, option_res)
def _test_reorg_index(self):
self.log.info("Test that index can handle reorgs")
# Generate two block, let the index catch up, then invalidate the
# blocks
index_node = self.nodes[1]
reorg_blocks = self.generatetoaddress(index_node,
2, index_node.getnewaddress())
reorg_block = reorg_blocks[1]
self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set",
index_node.gettxoutsetinfo, 'muhash'))
res_invalid = index_node.gettxoutsetinfo('muhash')
index_node.invalidateblock(reorg_blocks[0])
assert_equal(index_node.gettxoutsetinfo('muhash')['height'], 110)
# Add two new blocks
block = self.generate(index_node, 2, sync_fun=self.no_op)[1]
self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set",
index_node.gettxoutsetinfo, 'muhash'))
res = index_node.gettxoutsetinfo(
hash_type='muhash', hash_or_height=None, use_index=False)
# Test that the result of the reorged block is not returned for its old
# block height
res2 = index_node.gettxoutsetinfo(
hash_type='muhash', hash_or_height=112)
assert_equal(res["bestblock"], block)
assert_equal(res["muhash"], res2["muhash"])
assert res["muhash"] != res_invalid["muhash"]
# Test that requesting reorged out block by hash is still returning
# correct results
res_invalid2 = index_node.gettxoutsetinfo(
hash_type='muhash', hash_or_height=reorg_block)
assert_equal(res_invalid2["muhash"], res_invalid["muhash"])
assert res["muhash"] != res_invalid2["muhash"]
# Add another block, so we don't depend on reconsiderblock remembering
# which blocks were touched by invalidateblock
self.generate(index_node, 1)
# Ensure that removing and re-adding blocks yields consistent results
block = index_node.getblockhash(99)
index_node.invalidateblock(block)
self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set",
index_node.gettxoutsetinfo, 'muhash'))
index_node.reconsiderblock(block)
self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set",
index_node.gettxoutsetinfo, 'muhash'))
res3 = index_node.gettxoutsetinfo(
hash_type='muhash', hash_or_height=112)
assert_equal(res2, res3)
self.log.info(
"Test that a node aware of stale blocks syncs them as well")
node = self.nodes[0]
# Ensure the node is aware of a stale block prior to restart
node.getblock(reorg_block)
self.restart_node(0, ["-coinstatsindex"])
self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set",
node.gettxoutsetinfo, 'muhash'))
assert_raises_rpc_error(-32603, "Unable to read UTXO set",
node.gettxoutsetinfo, 'muhash', reorg_block)
def _test_index_rejects_hash_serialized(self):
self.log.info(
"Test that the rpc raises if the legacy hash is passed with the index")
msg = "hash_serialized hash type cannot be queried for a specific block"
assert_raises_rpc_error(-8, msg,
self.nodes[1].gettxoutsetinfo,
hash_type='hash_serialized', hash_or_height=111)
for use_index in {True, False, None}:
assert_raises_rpc_error(-8, msg,
self.nodes[1].gettxoutsetinfo,
hash_type='hash_serialized',
hash_or_height=111, use_index=use_index)
if __name__ == '__main__':
CoinStatsIndexTest().main()
diff --git a/test/functional/p2p_fingerprint.py b/test/functional/p2p_fingerprint.py
index dd18baf00..d610de89b 100755
--- a/test/functional/p2p_fingerprint.py
+++ b/test/functional/p2p_fingerprint.py
@@ -1,133 +1,134 @@
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various fingerprinting protections.
If a stale block more than a month old or its header are requested by a peer,
the node should pretend that it does not have it to avoid fingerprinting.
"""
import time
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import (
MSG_BLOCK,
CInv,
msg_block,
msg_getdata,
msg_getheaders,
msg_headers,
)
from test_framework.p2p import P2PInterface, p2p_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class P2PFingerprintTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
# Build a chain of blocks on top of given one
def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time):
blocks = []
for _ in range(nblocks):
coinbase = create_coinbase(prev_height + 1)
block_time = prev_median_time + 1
block = create_block(int(prev_hash, 16), coinbase, block_time)
block.solve()
blocks.append(block)
prev_hash = block.hash
prev_height += 1
prev_median_time = block_time
return blocks
# Send a getdata request for a given block hash
def send_block_request(self, block_hash, node):
msg = msg_getdata()
msg.inv.append(CInv(MSG_BLOCK, block_hash))
node.send_message(msg)
# Send a getheaders request for a given single block hash
def send_header_request(self, block_hash, node):
msg = msg_getheaders()
msg.hashstop = block_hash
node.send_message(msg)
# Checks that stale blocks timestamped more than a month ago are not served
# by the node while recent stale blocks and old active chain blocks are.
# This does not currently test that stale blocks timestamped within the
# last month but that have over a month's worth of work are also withheld.
def run_test(self):
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
# Set node time to 60 days ago
self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)
# Generating a chain of 10 blocks
block_hashes = self.generatetoaddress(self.nodes[0],
10, self.nodes[0].get_deterministic_priv_key().address)
# Create longer chain starting 2 blocks before current tip
height = len(block_hashes) - 2
block_hash = block_hashes[height - 1]
block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
new_blocks = self.build_chain(5, block_hash, height, block_time)
# Force reorg to a longer chain
node0.send_message(msg_headers(new_blocks))
node0.wait_for_getdata([x.sha256 for x in new_blocks])
for block in new_blocks:
node0.send_and_ping(msg_block(block))
# Check that reorg succeeded
+ self.nodes[0].unparkblock(new_blocks[-1].hash)
assert_equal(self.nodes[0].getblockcount(), 13)
stale_hash = int(block_hashes[-1], 16)
# Check that getdata request for stale block succeeds
self.send_block_request(stale_hash, node0)
node0.wait_for_block(stale_hash, timeout=3)
# Check that getheader request for stale block header succeeds
self.send_header_request(stale_hash, node0)
node0.wait_for_header(hex(stale_hash), timeout=3)
# Longest chain is extended so stale is much older than chain tip
self.nodes[0].setmocktime(0)
block_hash = int(self.generatetoaddress(self.nodes[0],
1, self.nodes[0].get_deterministic_priv_key().address)[-1], 16)
assert_equal(self.nodes[0].getblockcount(), 14)
node0.wait_for_block(block_hash, timeout=3)
# Request for very old stale block should now fail
with p2p_lock:
node0.last_message.pop("block", None)
self.send_block_request(stale_hash, node0)
node0.sync_with_ping()
assert "block" not in node0.last_message
# Request for very old stale block header should now fail
with p2p_lock:
node0.last_message.pop("headers", None)
self.send_header_request(stale_hash, node0)
node0.sync_with_ping()
assert "headers" not in node0.last_message
# Verify we can fetch very old blocks and headers on the active chain
block_hash = int(block_hashes[2], 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
self.send_block_request(block_hash, node0)
node0.wait_for_block(block_hash, timeout=3)
self.send_header_request(block_hash, node0)
node0.wait_for_header(hex(block_hash), timeout=3)
if __name__ == '__main__':
P2PFingerprintTest().main()