Page Menu
Home
Phabricator
Search
Configure Global Search
Log In
Files
F14865074
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
40 KB
Subscribers
None
View Options
diff --git a/chronik/chronik-indexer/src/query/group_history.rs b/chronik/chronik-indexer/src/query/group_history.rs
index bab87d641..be0e2146f 100644
--- a/chronik/chronik-indexer/src/query/group_history.rs
+++ b/chronik/chronik-indexer/src/query/group_history.rs
@@ -1,492 +1,515 @@
// Copyright (c) 2023 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
//! Module for [`QueryGroupHistory`], to query the tx history of a group.
use std::collections::BTreeSet;
use abc_rust_error::Result;
-use bitcoinsuite_core::{
- hash::Hashed,
- tx::{Tx, TxId},
-};
+use bitcoinsuite_core::tx::{Tx, TxId};
use bytes::Bytes;
use chronik_db::{
db::Db,
group::{Group, GroupMember},
io::{BlockReader, GroupHistoryReader, SpentByReader, TxNum, TxReader},
mem::{Mempool, MempoolGroupHistory},
};
use chronik_plugin::data::PluginNameMap;
use chronik_proto::proto;
use chronik_util::log;
use thiserror::Error;
use crate::{
avalanche::Avalanche,
indexer::Node,
query::{
make_tx_proto, read_plugin_outputs, MakeTxProtoParams, OutputsSpent,
TxTokenData,
},
};
/// Smallest allowed page size
pub const MIN_HISTORY_PAGE_SIZE: usize = 1;
/// Largest allowed page size
pub const MAX_HISTORY_PAGE_SIZE: usize = 200;
static EMPTY_MEMBER_TX_HISTORY: BTreeSet<(i64, TxId)> = BTreeSet::new();
/// Query pages of the tx history of a group
#[derive(Debug)]
pub struct QueryGroupHistory<'a, G: Group> {
/// Database
pub db: &'a Db,
/// Avalanche
pub avalanche: &'a Avalanche,
/// Mempool
pub mempool: &'a Mempool,
/// The part of the mempool we search for this group's history.
pub mempool_history: &'a MempoolGroupHistory<G>,
/// Group to query txs by
pub group: G,
/// Access to bitcoind to read txs
pub node: &'a Node,
/// Whether the SLP/ALP token index is enabled
pub is_token_index_enabled: bool,
/// Whether the script hash index is enabled
pub is_scripthash_index_enabled: bool,
/// Map plugin name <-> plugin idx of all loaded plugins
pub plugin_name_map: &'a PluginNameMap,
}
/// Errors indicating something went wrong with reading txs.
#[derive(Debug, Error, PartialEq)]
pub enum QueryGroupHistoryError {
/// Transaction not in mempool.
#[error("500: Inconsistent mempool: Transaction {0} not in mempool")]
MissingMempoolTx(TxId),
/// tx_num in group index but not in "tx" CF.
#[error("500: Inconsistent DB: Transaction num {0} not in DB")]
MissingDbTx(TxNum),
/// tx_num in DB but has no block.
#[error("500: Inconsistent DB: Transaction num {0} has no block")]
MissingDbTxBlock(TxNum),
/// Can only request page sizes below a certain maximum.
#[error(
"400: Requested page size {0} is too big, \
maximum is {max_history_page_size}",
max_history_page_size = MAX_HISTORY_PAGE_SIZE,
)]
RequestPageSizeTooBig(usize),
/// Can only request page sizes below a certain minimum.
#[error(
"400: Requested page size {0} is too small, \
minimum is {min_history_page_size}",
min_history_page_size = MIN_HISTORY_PAGE_SIZE,
)]
RequestPageSizeTooSmall(usize),
- /// Script hash not found
- #[error("404: Script hash {0:?} not found")]
- ScriptHashNotFound(String),
-
/// Script hash index not enabled
#[error("400: Script hash index disabled")]
ScriptHashIndexDisabled,
}
use self::QueryGroupHistoryError::*;
impl<'a, G: Group> QueryGroupHistory<'a, G> {
fn member_ser_from_member(
&self,
member: &GroupMember<G::Member<'_>>,
db_reader: &GroupHistoryReader<'_, G>,
- ) -> Result<Bytes> {
+ ) -> Result<Option<Bytes>> {
match member {
- GroupMember::Member(member) => Ok(Bytes::copy_from_slice(
+ GroupMember::Member(member) => Ok(Some(Bytes::copy_from_slice(
self.group.ser_member(member).as_ref(),
- )),
+ ))),
GroupMember::MemberHash(memberhash) => {
if !self.is_scripthash_index_enabled {
return Err(ScriptHashIndexDisabled.into());
}
// Check the mempool first, then the db. The script is more
// likely to be in the db, but accessing the mempool's
// hashmap is faster.
if let Some(member_ser) =
self.mempool_history.member_ser_by_member_hash(*memberhash)
{
- return Ok(Bytes::copy_from_slice(member_ser));
+ return Ok(Some(Bytes::copy_from_slice(member_ser)));
+ }
+ match db_reader.member_ser_by_member_hash(*memberhash)? {
+ Some(script_ser) => Ok(Some(Bytes::from(script_ser))),
+ None => Ok(None),
}
- let script_ser = db_reader
- .member_ser_by_member_hash(*memberhash)?
- .ok_or_else(|| ScriptHashNotFound(memberhash.hex_be()))?;
- Ok(Bytes::from(script_ser))
}
}
}
/// Return the confirmed txs of the group in the order as txs occur on the
/// blockchain, i.e.:
/// - Sorted by block height ascendingly.
/// - Within a block, sorted as txs occur in the block.
pub fn confirmed_txs(
&self,
member: GroupMember<G::Member<'_>>,
request_page_num: usize,
request_page_size: usize,
) -> Result<proto::TxHistoryPage> {
if request_page_size < MIN_HISTORY_PAGE_SIZE {
return Err(RequestPageSizeTooSmall(request_page_size).into());
}
if request_page_size > MAX_HISTORY_PAGE_SIZE {
return Err(RequestPageSizeTooBig(request_page_size).into());
}
let db_reader = GroupHistoryReader::<G>::new(self.db)?;
- let member_ser = self.member_ser_from_member(&member, &db_reader)?;
+ let member_ser =
+ match self.member_ser_from_member(&member, &db_reader)? {
+ Some(m) => m,
+ None => {
+ return Ok(proto::TxHistoryPage {
+ txs: vec![],
+ num_pages: 0,
+ num_txs: 0,
+ })
+ }
+ };
let (num_db_pages, num_db_txs) =
db_reader.member_num_pages_and_txs(member_ser.as_ref())?;
let num_request_pages =
(num_db_txs + request_page_size - 1) / request_page_size;
let make_result = |txs: Vec<proto::Tx>| {
if txs.len() != txs.capacity() {
// We should've predicted exactly how many txs we'll return.
log!("WARNING: Allocated more txs than needed\n");
}
proto::TxHistoryPage {
txs,
num_pages: num_request_pages as u32,
num_txs: num_db_txs as u32,
}
};
// Initial index in the list of all txs of this script.
// On 32-bit, this could overflow, so we saturate.
let first_tx_idx = request_page_num.saturating_mul(request_page_size);
// Calculate how many txs we're going to return, and allocate sufficient
// space. Handle out-of-range pages by saturating.
// Since at most `num_returned_txs` can be MAX_HISTORY_PAGE_SIZE, OOM
// attacks are not possible.
let num_returned_txs =
request_page_size.min(num_db_txs.saturating_sub(first_tx_idx));
let mut page_txs = Vec::with_capacity(num_returned_txs);
// Short-circuit so we don't fetch DB again if no results.
if num_returned_txs == 0 {
return Ok(make_result(vec![]));
}
// First DB page to start reading from.
let db_page_num_start = first_tx_idx / db_reader.page_size();
// First tx index with that page.
let mut first_inner_idx = first_tx_idx % db_reader.page_size();
// Iterate DB pages, starting from the DB page the first tx is in.
// Since DB pages are much larger than MAX_HISTORY_PAGE_SIZE, this will
// only fetch 2 pages at most.
for current_page_num in db_page_num_start..num_db_pages {
let db_page_tx_nums = db_reader
.page_txs(member_ser.as_ref(), current_page_num as u32)?
.unwrap_or_default();
for &tx_num in db_page_tx_nums.iter().skip(first_inner_idx) {
page_txs.push(self.read_block_tx(tx_num)?);
// We filled up the requested page size -> return
if page_txs.len() == request_page_size {
return Ok(make_result(page_txs));
}
}
first_inner_idx = 0;
}
// Couldn't fill requested page size completely
Ok(make_result(page_txs))
}
/// Return the group history in reverse chronological order, i.e. the latest
/// one first, including mempool txs.
///
/// We start pages at the most recent mempool tx, go backwards in the
/// mempool until we reach the oldest tx in the mempool, then continue with
/// the most recent DB tx and go backwards from there.
///
/// Note that unlike `confirmed_txs` and `unconfirmed_txs`, the order of txs
/// observed by fetching multiple pages can change if new txs are added, or
/// the page size is changed. This is because txs are fetched from the DB
/// the order they appear on the blockchain, and only then are sorted by
/// time_first_seen.
///
/// This means that if tx1 < tx2 wrt time_first_seen, but tx2 < tx1 wrt
/// txid, tx1 would be ordered *before* tx2 if they are in the same block
/// (because of time_first_seen), but tx1 might be cut out for other page
/// sizes entirely, because it isn't even queried because it comes "too
/// late" on the blockchain (because of txid).
///
/// We accept this trade-off, because if we wanted to always get consistent
/// order here, we'd need to sort txs by time_first_seen in the DB, which
/// isn't a very reliable metric. We could also continue reading more txs
/// until we run into a new block, but that could open potential DoS
/// attacks. And in practice this ordering isn't a big issue, as most people
/// are mostly interested in the "latest" txs of the address.
pub fn rev_history(
&self,
member: GroupMember<G::Member<'_>>,
request_page_num: usize,
request_page_size: usize,
) -> Result<proto::TxHistoryPage> {
if request_page_size < MIN_HISTORY_PAGE_SIZE {
return Err(RequestPageSizeTooSmall(request_page_size).into());
}
if request_page_size > MAX_HISTORY_PAGE_SIZE {
return Err(RequestPageSizeTooBig(request_page_size).into());
}
let db_reader = GroupHistoryReader::<G>::new(self.db)?;
- let member_ser = self.member_ser_from_member(&member, &db_reader)?;
+ let member_ser =
+ match self.member_ser_from_member(&member, &db_reader)? {
+ Some(m) => m,
+ None => {
+ return Ok(proto::TxHistoryPage {
+ txs: vec![],
+ num_pages: 0,
+ num_txs: 0,
+ })
+ }
+ };
let (_, num_db_txs) =
db_reader.member_num_pages_and_txs(member_ser.as_ref())?;
// How many txs in total to skip, beginning from the mempool txs, and
// then continuing backwards into the DB txs.
let request_tx_offset =
request_page_num.saturating_mul(request_page_size);
// All the mempool txs for this member
let mempool_txs = self
.mempool_history
.member_history(member_ser.as_ref())
.unwrap_or(&EMPTY_MEMBER_TX_HISTORY);
let total_num_txs = mempool_txs.len() + num_db_txs;
let total_num_pages =
(total_num_txs + request_page_size - 1) / request_page_size;
let make_result = |txs: Vec<proto::Tx>| {
assert_eq!(txs.len(), txs.capacity());
proto::TxHistoryPage {
txs,
num_pages: total_num_pages as u32,
num_txs: total_num_txs as u32,
}
};
// Number of mempool txs in the result.
// We saturate to clip numbers to >= 0.
let num_page_mempool_txs = request_page_size
.min(mempool_txs.len().saturating_sub(request_tx_offset));
// Backwards offset into the DB. If this were zero, we'd start reading
// at the last tx in the DB.
let request_db_tx_offset =
request_tx_offset.saturating_sub(mempool_txs.len());
// DB txs left after skipping the requested offset.
let num_db_txs_available =
num_db_txs.saturating_sub(request_db_tx_offset);
// How many DB txs we can return at most; the page could already be
// partially filled with mempool txs. This cannot overflow as
// num_page_mempool_txs <= request_page_size.
let max_page_db_txs = request_page_size - num_page_mempool_txs;
// How many DB txs we return. It's either the number of txs we have left
// in the DB or the maximum we can still put on the page.
let num_page_db_txs = max_page_db_txs.min(num_db_txs_available);
// Allocate sufficient space for the txs on the page.
let mut page_txs =
Vec::with_capacity(num_page_mempool_txs + num_page_db_txs);
// Add the requested mempool txs, we skip over the requested offset, and
// take only as many as we can put on the page.
let page_mempool_txs_iter = mempool_txs
.iter()
.rev()
.skip(request_tx_offset)
.take(request_page_size);
for (_, txid) in page_mempool_txs_iter {
let entry = self.mempool.tx(txid).ok_or(MissingMempoolTx(*txid))?;
page_txs.push(make_tx_proto(MakeTxProtoParams {
tx: &entry.tx,
outputs_spent: &OutputsSpent::new_mempool(
self.mempool.spent_by().outputs_spent(txid),
),
time_first_seen: entry.time_first_seen,
is_coinbase: false,
block: None,
avalanche: self.avalanche,
token: TxTokenData::from_mempool(
self.mempool.tokens(),
&entry.tx,
)
.as_ref(),
plugin_outputs: &read_plugin_outputs(
self.db,
self.mempool,
&entry.tx,
None,
!self.plugin_name_map.is_empty(),
)?,
plugin_name_map: self.plugin_name_map,
}));
}
// If we filled up the page with mempool txs, or there's no DB txs on
// this page, we can return early to avoid reading the DB.
if num_page_mempool_txs == request_page_size || num_page_db_txs == 0 {
return Ok(make_result(page_txs));
}
// Initial index to start reading from in the list of all DB txs of this
// member. We then iterate this backwards, until we fill the page or hit
// the first DB tx of the member.
// Note that this never overflows, as num_db_txs_available > 0 due to
// the check num_page_db_txs == 0.
let first_db_tx_idx = num_db_txs_available - 1;
// First DB page to start reading from, from there we go backwards.
let db_page_num_start = first_db_tx_idx / db_reader.page_size();
// First tx index within that page, from there we go backwards.
let mut first_inner_idx = first_db_tx_idx % db_reader.page_size();
'outer: for current_page_num in (0..=db_page_num_start).rev() {
let db_page_tx_nums = db_reader
.page_txs(member_ser.as_ref(), current_page_num as u32)?
.unwrap_or_default();
for inner_idx in (0..=first_inner_idx).rev() {
let tx_num = db_page_tx_nums[inner_idx];
page_txs.push(self.read_block_tx(tx_num)?);
// Filled up page: break out of outer loop.
if page_txs.len() == request_page_size {
break 'outer;
}
}
first_inner_idx = db_reader.page_size() - 1;
}
// We use stable sort, so the block order is retained when timestamps
// are identical.
page_txs[num_page_mempool_txs..].sort_by_key(|tx| {
match (&tx.block, tx.time_first_seen) {
// Within blocks, sort txs without time_first_seen first
(Some(block), 0) => (-block.height, i64::MIN),
// Otherwise, sort by time_first_seen within blocks
(Some(block), time_first_seen) => {
(-block.height, -time_first_seen)
}
(None, _) => unreachable!("We skip sorting mempool txs"),
}
});
Ok(make_result(page_txs))
}
/// Return the unconfirmed txs (i.e. all txs in the mempool) in first-seen
/// order. If two txs have been seen at the same second, we order them by
/// txid.
///
/// This should always be small enough to return without pagination, but
/// just to be future-proof, we always pretend as if there's exactly one
/// page with all the txs (or 0 pages if there's no txs), so we can add
/// pagination later.
pub fn unconfirmed_txs(
&self,
member: GroupMember<G::Member<'_>>,
) -> Result<proto::TxHistoryPage> {
let db_reader = GroupHistoryReader::<G>::new(self.db)?;
- let member_ser = self.member_ser_from_member(&member, &db_reader)?;
+ let member_ser =
+ match self.member_ser_from_member(&member, &db_reader)? {
+ Some(m) => m,
+ None => {
+ return Ok(proto::TxHistoryPage {
+ txs: vec![],
+ num_pages: 0,
+ num_txs: 0,
+ })
+ }
+ };
let txs = match self.mempool_history.member_history(member_ser.as_ref())
{
Some(mempool_txs) => mempool_txs
.iter()
.map(|(_, txid)| -> Result<_> {
let entry =
self.mempool.tx(txid).ok_or(MissingMempoolTx(*txid))?;
Ok(make_tx_proto(MakeTxProtoParams {
tx: &entry.tx,
outputs_spent: &OutputsSpent::new_mempool(
self.mempool.spent_by().outputs_spent(txid),
),
time_first_seen: entry.time_first_seen,
is_coinbase: false,
block: None,
avalanche: self.avalanche,
token: TxTokenData::from_mempool(
self.mempool.tokens(),
&entry.tx,
)
.as_ref(),
plugin_outputs: &read_plugin_outputs(
self.db,
self.mempool,
&entry.tx,
None,
!self.plugin_name_map.is_empty(),
)?,
plugin_name_map: self.plugin_name_map,
}))
})
.collect::<Result<Vec<_>>>()?,
None => vec![],
};
Ok(proto::TxHistoryPage {
num_pages: if txs.is_empty() { 0 } else { 1 },
num_txs: txs.len() as u32,
txs,
})
}
fn read_block_tx(&self, tx_num: TxNum) -> Result<proto::Tx> {
let tx_reader = TxReader::new(self.db)?;
let block_reader = BlockReader::new(self.db)?;
let spent_by_reader = SpentByReader::new(self.db)?;
let block_tx =
tx_reader.tx_by_tx_num(tx_num)?.ok_or(MissingDbTx(tx_num))?;
let block = block_reader
.by_height(block_tx.block_height)?
.ok_or(MissingDbTxBlock(tx_num))?;
let tx = Tx::from(self.node.bridge.load_tx(
block.file_num,
block_tx.entry.data_pos,
block_tx.entry.undo_pos,
)?);
let outputs_spent = OutputsSpent::query(
&spent_by_reader,
&tx_reader,
self.mempool.spent_by().outputs_spent(&block_tx.entry.txid),
tx_num,
)?;
let token = TxTokenData::from_db(
self.db,
tx_num,
&tx,
self.is_token_index_enabled,
)?;
let plugin_outputs = read_plugin_outputs(
self.db,
self.mempool,
&tx,
Some(tx_num),
!self.plugin_name_map.is_empty(),
)?;
Ok(make_tx_proto(MakeTxProtoParams {
tx: &tx,
outputs_spent: &outputs_spent,
time_first_seen: block_tx.entry.time_first_seen,
is_coinbase: block_tx.entry.is_coinbase,
block: Some(&block),
avalanche: self.avalanche,
token: token.as_ref(),
plugin_outputs: &plugin_outputs,
plugin_name_map: self.plugin_name_map,
}))
}
}
diff --git a/test/functional/chronik_scripthash.py b/test/functional/chronik_scripthash.py
index 8c5e0edbb..c14a2fea4 100644
--- a/test/functional/chronik_scripthash.py
+++ b/test/functional/chronik_scripthash.py
@@ -1,470 +1,463 @@
# Copyright (c) 2024 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test Chronik's /script/scripthash/:payload/* endpoints.
"""
from test_framework.address import (
ADDRESS_ECREG_P2SH_OP_TRUE,
ADDRESS_ECREG_UNSPENDABLE,
P2SH_OP_TRUE,
)
from test_framework.blocktools import (
GENESIS_CB_PK,
GENESIS_CB_SCRIPT_PUBKEY,
GENESIS_CB_TXID,
create_block,
make_conform_to_ctor,
)
from test_framework.hash import hex_be_sha256
from test_framework.messages import XEC, CTransaction, FromHex, ToHex
from test_framework.p2p import P2PDataStore
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
from test_framework.wallet import MiniWallet, MiniWalletMode
GENESIS_CB_SCRIPTHASH = hex_be_sha256(GENESIS_CB_SCRIPT_PUBKEY)
SCRIPTHASH_P2SH_OP_TRUE_HEX = hex_be_sha256(P2SH_OP_TRUE)
class ChronikScriptHashTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-chronik", "-chronikscripthashindex=1"]]
self.rpc_timeout = 240
def skip_test_if_missing_module(self):
self.skip_if_no_chronik()
def run_test(self):
self.node = self.nodes[0]
self.chronik = self.node.get_chronik_client()
self.op_true_wallet = MiniWallet(self.node, mode=MiniWalletMode.ADDRESS_OP_TRUE)
# We add a connection to make getblocktemplate work
self.node.add_p2p_connection(P2PDataStore())
self.test_invalid_requests()
self.test_valid_requests()
self.test_conflicts()
self.test_wipe_index()
def test_invalid_requests(self):
for payload in ("lorem_ipsum", "", "deadbeef", "deadbee", 31 * "ff", 33 * "ff"):
err_msg = f'400: Unable to parse script hash "{payload}"'
assert_equal(
self.chronik.script("scripthash", payload).confirmed_txs().err(400).msg,
err_msg,
)
assert_equal(
self.chronik.script("scripthash", payload).history().err(400).msg,
err_msg,
)
assert_equal(
self.chronik.script("scripthash", payload)
.unconfirmed_txs()
.err(400)
.msg,
err_msg,
)
assert_equal(
self.chronik.script("scripthash", payload).utxos().err(400).msg,
err_msg,
)
# Potentially valid sha256 hash, but unlikely to collide with any existing
# scripthash
valid_payload = 32 * "ff"
- err_msg = f'404: Script hash "{valid_payload}" not found'
- assert_equal(
- self.chronik.script("scripthash", valid_payload)
- .confirmed_txs()
- .err(404)
- .msg,
- err_msg,
- )
- assert_equal(
- self.chronik.script("scripthash", valid_payload)
- .unconfirmed_txs()
- .err(404)
- .msg,
- err_msg,
- )
assert_equal(
self.chronik.script("scripthash", valid_payload).utxos().err(404).msg,
- err_msg,
+ f'404: Script hash "{valid_payload}" not found',
)
def test_valid_requests(self):
from test_framework.chronik.client import pb
from test_framework.chronik.test_data import genesis_cb_tx
+ # Unknown scripthash yields an empty history
+ valid_payload = 32 * "ff"
+ for resp in (
+ self.chronik.script("scripthash", valid_payload).confirmed_txs(),
+ self.chronik.script("scripthash", valid_payload).unconfirmed_txs(),
+ ):
+ assert_equal(
+ resp.ok(),
+ pb.TxHistoryPage(),
+ )
+
expected_cb_history = pb.TxHistoryPage(
txs=[genesis_cb_tx()], num_pages=1, num_txs=1
)
assert_equal(
self.chronik.script("scripthash", GENESIS_CB_SCRIPTHASH)
.confirmed_txs()
.ok(),
expected_cb_history,
)
assert_equal(
self.chronik.script("scripthash", GENESIS_CB_SCRIPTHASH).history().ok(),
expected_cb_history,
)
assert_equal(
self.chronik.script("scripthash", GENESIS_CB_SCRIPTHASH).utxos().ok(),
pb.ScriptUtxos(
script=bytes.fromhex(f"41{GENESIS_CB_PK}ac"),
utxos=[
pb.ScriptUtxo(
outpoint=pb.OutPoint(
txid=bytes.fromhex(GENESIS_CB_TXID)[::-1],
out_idx=0,
),
block_height=0,
is_coinbase=True,
value=50_000_000 * XEC,
is_final=False,
)
],
),
)
# No txs in mempool for the genesis pubkey
assert_equal(
self.chronik.script("scripthash", GENESIS_CB_SCRIPTHASH)
.unconfirmed_txs()
.ok(),
pb.TxHistoryPage(num_pages=0, num_txs=0),
)
def check_num_txs(num_block_txs, num_mempool_txs, num_utxos):
page_size = 200
page_num = 0
script_conf_txs = (
self.chronik.script("scripthash", SCRIPTHASH_P2SH_OP_TRUE_HEX)
.confirmed_txs(page_num, page_size)
.ok()
)
assert_equal(script_conf_txs.num_txs, num_block_txs)
script_history = (
self.chronik.script("scripthash", SCRIPTHASH_P2SH_OP_TRUE_HEX)
.history(page_num, page_size)
.ok()
)
assert_equal(script_history.num_txs, num_block_txs + num_mempool_txs)
script_unconf_txs = (
self.chronik.script("scripthash", SCRIPTHASH_P2SH_OP_TRUE_HEX)
.unconfirmed_txs()
.ok()
)
assert_equal(script_unconf_txs.num_txs, num_mempool_txs)
script_utxos = (
self.chronik.script("scripthash", SCRIPTHASH_P2SH_OP_TRUE_HEX)
.utxos()
.ok()
)
assert_equal(len(script_utxos.utxos), num_utxos)
# Generate blocks to some address and verify the history
blockhashes = self.generatetoaddress(self.node, 10, ADDRESS_ECREG_P2SH_OP_TRUE)
check_num_txs(
num_block_txs=len(blockhashes),
num_mempool_txs=0,
num_utxos=len(blockhashes),
)
# Undo last block & check history
self.node.invalidateblock(blockhashes[-1])
check_num_txs(
num_block_txs=len(blockhashes) - 1,
num_mempool_txs=0,
num_utxos=len(blockhashes) - 1,
)
# Create a replacement block (use a different destination address to ensure it
# has a hash different from the invalidated one)
blockhashes[-1] = self.generatetoaddress(
self.node, 1, ADDRESS_ECREG_UNSPENDABLE
)[0]
# Mature 10 coinbase outputs
blockhashes += self.generatetoaddress(
self.node, 101, ADDRESS_ECREG_P2SH_OP_TRUE
)
check_num_txs(
num_block_txs=len(blockhashes) - 1,
num_mempool_txs=0,
num_utxos=len(blockhashes) - 1,
)
# Add mempool txs
self.op_true_wallet.rescan_utxos()
num_mempool_txs = 0
# the number of utxos remains constant throughout the loop because we
# spend one to create another one
num_utxos = len(blockhashes) - 1
for _ in range(10):
self.op_true_wallet.send_self_transfer(from_node=self.node)
num_mempool_txs += 1
check_num_txs(
num_block_txs=len(blockhashes) - 1,
num_mempool_txs=num_mempool_txs,
num_utxos=num_utxos,
)
# Mine mempool txs, now they're in confirmed-txs
blockhashes += self.generatetoaddress(self.node, 1, ADDRESS_ECREG_P2SH_OP_TRUE)
check_num_txs(
num_block_txs=len(blockhashes) + num_mempool_txs - 1,
num_mempool_txs=0,
num_utxos=num_utxos + 1,
)
self.log.info(
"Test a mempool transaction whose script is not already in the confirmed db"
)
# This is the example used in the ElectrumX protocol documentation.
script = CScript(
bytes.fromhex("76a91462e907b15cbf27d5425399ebf6f0fb50ebb88f1888ac")
)
scripthash_hex = hex_be_sha256(script)
assert_equal(
scripthash_hex,
"8b01df4e368ea28f8dc0423bcf7a4923e3a12d307c875e47a0cfbf90b5c39161",
)
# Ensure that this script was never seen before.
assert_equal(
- self.chronik.script("scripthash", scripthash_hex)
- .unconfirmed_txs()
- .err(404)
- .msg,
- f'404: Script hash "{scripthash_hex}" not found',
+ self.chronik.script("scripthash", scripthash_hex).unconfirmed_txs().ok(),
+ pb.TxHistoryPage(),
)
assert_equal(
self.chronik.script("scripthash", scripthash_hex).utxos().err(404).msg,
f'404: Script hash "{scripthash_hex}" not found',
)
txid = self.op_true_wallet.send_to(
from_node=self.node, scriptPubKey=script, amount=1337
)[0]
# There is no confirmed history for this script, but we do have its scripthash
# in the mempool so it no longer triggers a 404 error.
assert_equal(
self.chronik.script("scripthash", scripthash_hex).confirmed_txs().ok(),
pb.TxHistoryPage(num_pages=0, num_txs=0),
)
# We do have one such unconfirmed tx.
proto = self.chronik.script("scripthash", scripthash_hex).unconfirmed_txs().ok()
assert_equal(proto.num_txs, 1)
assert_equal(proto.txs[0].txid, bytes.fromhex(txid)[::-1])
proto = self.chronik.script("scripthash", scripthash_hex).utxos().ok()
assert_equal(len(proto.utxos), 1)
assert_equal(proto.utxos[0].block_height, -1)
assert_equal(proto.utxos[0].value, 1337)
def test_conflicts(self):
self.log.info("A mempool transaction is replaced by a mined transaction")
# Use a different wallet to have a clean history
wallet = MiniWallet(self.node, mode=MiniWalletMode.RAW_P2PK)
script_pubkey = wallet.get_scriptPubKey()
scripthash_hex1 = hex_be_sha256(script_pubkey)
- def assert_404(scripthash_hex):
+ def assert_blank_history(scripthash_hex):
assert_equal(
self.chronik.script("scripthash", scripthash_hex)
.confirmed_txs()
- .err(404)
- .msg,
- f'404: Script hash "{scripthash_hex}" not found',
+ .ok()
+ .num_txs,
+ 0,
)
assert_equal(
self.chronik.script("scripthash", scripthash_hex).utxos().err(404).msg,
f'404: Script hash "{scripthash_hex}" not found',
)
- assert_404(scripthash_hex1)
+ assert_blank_history(scripthash_hex1)
# Create two spendable utxos with this script and confirm them. Fund them with
# the OP_TRUE wallet used in the previous test which should have plenty of
# spendable utxos, so we don't need to create and mature additional coinbase
# utxos.
def get_utxo():
funding_txid, _ = self.op_true_wallet.send_to(
from_node=self.node, scriptPubKey=script_pubkey, amount=25_000_000
)
wallet.rescan_utxos()
utxo_to_spend = wallet.get_utxo(txid=funding_txid)
return utxo_to_spend
utxo_to_spend1 = get_utxo()
utxo_to_spend2 = get_utxo()
self.generate(self.node, 1)
def is_txid_in_history(txid: str, history_page) -> bool:
return any(tx.txid[::-1].hex() == txid for tx in history_page.txs)
def is_utxo_in_utxos(utxo: dict, script_utxos) -> bool:
return any(
txo.outpoint.txid[::-1].hex() == utxo["txid"]
and txo.outpoint.out_idx == utxo["vout"]
for txo in script_utxos.utxos
)
def check_history(
scripthash_hex: str,
conf_txids: list[str],
unconf_txids: list[str],
utxos=None,
):
unconf_txs = (
self.chronik.script("scripthash", scripthash_hex).unconfirmed_txs().ok()
)
conf_txs = (
self.chronik.script("scripthash", scripthash_hex)
.confirmed_txs(page_size=200)
.ok()
)
script_utxos = (
self.chronik.script("scripthash", scripthash_hex).utxos().ok()
)
assert_equal(conf_txs.num_txs, len(conf_txids))
assert_equal(unconf_txs.num_txs, len(unconf_txids))
assert_equal(len(script_utxos.utxos), len(utxos))
assert all(is_txid_in_history(txid, conf_txs) for txid in conf_txids)
assert all(is_txid_in_history(txid, unconf_txs) for txid in unconf_txids)
assert all(is_utxo_in_utxos(utxo, script_utxos) for utxo in utxos)
# Consistency check: None of the txids should be duplicated
all_txids = conf_txids + unconf_txids
assert len(all_txids) == len(set(all_txids))
check_history(
scripthash_hex1,
conf_txids=[utxo_to_spend1["txid"], utxo_to_spend2["txid"]],
unconf_txids=[],
utxos=[utxo_to_spend1, utxo_to_spend2],
)
# Create 2 mempool txs, one of which will later conflict with a block tx.
mempool_tx_to_be_replaced = wallet.send_self_transfer(
from_node=self.nodes[0], utxo_to_spend=utxo_to_spend1
)
other_mempool_tx = wallet.send_self_transfer(
from_node=self.nodes[0], utxo_to_spend=utxo_to_spend2
)
check_history(
scripthash_hex1,
conf_txids=[utxo_to_spend1["txid"], utxo_to_spend2["txid"]],
unconf_txids=[
mempool_tx_to_be_replaced["txid"],
other_mempool_tx["txid"],
],
utxos=[mempool_tx_to_be_replaced["new_utxo"], other_mempool_tx["new_utxo"]],
)
replacement_tx = wallet.create_self_transfer(utxo_to_spend=utxo_to_spend1)
assert replacement_tx["txid"] != mempool_tx_to_be_replaced["txid"]
block = create_block(tmpl=self.node.getblocktemplate())
block.vtx.append(replacement_tx["tx"])
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.node.submitblock(ToHex(block))
# The replaced mempool tx was dropped from both histories.
# The other one is still in the unconfirmed txs.
# The replacement tx is confirmed.
check_history(
scripthash_hex1,
conf_txids=[
utxo_to_spend1["txid"],
utxo_to_spend2["txid"],
replacement_tx["txid"],
],
unconf_txids=[other_mempool_tx["txid"]],
utxos=[other_mempool_tx["new_utxo"], replacement_tx["new_utxo"]],
)
self.log.info(
"A mempool transaction is replaced by a mined transaction to a different "
"script, leaving the first script's history blank."
)
script_pubkey = b"\x21\x03" + 32 * b"\xff" + b"\xac"
scripthash_hex2 = hex_be_sha256(script_pubkey)
funding_txid, funding_out_idx = self.op_true_wallet.send_to(
from_node=self.node, scriptPubKey=script_pubkey, amount=50_000_000
)
check_history(
scripthash_hex2,
conf_txids=[],
unconf_txids=[funding_txid],
utxos=[{"txid": funding_txid, "vout": funding_out_idx}],
)
# Mine a tx spending the same input to a different output.
replacement_tx = FromHex(
CTransaction(), self.node.getrawtransaction(funding_txid)
)
for out_idx, txout in enumerate(replacement_tx.vout):
if txout.scriptPubKey == script_pubkey:
break
replacement_tx.vout[out_idx].scriptPubKey = b"\x21\x03" + 32 * b"\xee" + b"\xac"
replacement_tx.rehash()
block = create_block(tmpl=self.node.getblocktemplate())
block.vtx.append(replacement_tx)
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.node.submitblock(ToHex(block))
# There is no transaction left for this script.
- assert_404(scripthash_hex2)
+ assert_blank_history(scripthash_hex2)
def test_wipe_index(self):
self.log.info("Restarting with chronikscripthashindex=0 wipes the index")
with self.node.assert_debug_log(
[
" Warning: Wiping existing scripthash index, since -chronikscripthashindex=0",
]
):
self.restart_node(0, ["-chronik", "-chronikscripthashindex=0"])
assert_equal(
self.chronik.script("scripthash", GENESIS_CB_SCRIPTHASH)
.confirmed_txs()
.err(400)
.msg,
"400: Script hash index disabled",
)
assert_equal(
self.chronik.script("scripthash", GENESIS_CB_SCRIPTHASH)
.utxos()
.err(400)
.msg,
"400: Script hash index disabled",
)
self.log.info("Restarting with chronikscripthashindex=1 restores the index")
self.restart_node(0, ["-chronik", "-chronikscripthashindex=1"])
assert_equal(
self.chronik.script("p2pk", GENESIS_CB_PK).confirmed_txs().ok(),
self.chronik.script("scripthash", GENESIS_CB_SCRIPTHASH)
.confirmed_txs()
.ok(),
)
if __name__ == "__main__":
ChronikScriptHashTest().main()
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Thu, May 22, 01:24 (13 h, 13 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5866255
Default Alt Text
(40 KB)
Attached To
rABC Bitcoin ABC
Event Timeline
Log In to Comment