diff --git a/chronik/chronik-db/src/db.rs b/chronik/chronik-db/src/db.rs index 86f728d75..4459fd41e 100644 --- a/chronik/chronik-db/src/db.rs +++ b/chronik/chronik-db/src/db.rs @@ -1,173 +1,176 @@ // Copyright (c) 2022 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. //! Module containing [`Db`] and errors, which encapsulates a database. //! Read and write operations should exclusively be done with dedicated writers //! and readers, such as [`crate::io::BlockWriter`]. use std::path::Path; use abc_rust_error::Result; pub use rocksdb::WriteBatch; use rocksdb::{ColumnFamilyDescriptor, IteratorMode}; use thiserror::Error; use crate::{ groups::{ScriptHistoryWriter, ScriptUtxoWriter}, - io::{BlockWriter, MetadataWriter, SpentByWriter, TxWriter}, + io::{ + BlockStatsWriter, BlockWriter, MetadataWriter, SpentByWriter, TxWriter, + }, }; // All column family names used by Chronik should be defined here /// Column family name for the block data. pub const CF_BLK: &str = "blk"; /// Column family for the first tx_num of the block. Used to get a list of the /// txs of the block. pub const CF_BLK_BY_FIRST_TX: &str = "blk_by_first_tx"; /// Column family for stats about blocks. pub const CF_BLK_STATS: &str = "blk_stats"; /// Column family for the block height of the first tx_num of that block. Used /// to get the block height of a tx. pub const CF_FIRST_TX_BY_BLK: &str = "first_tx_by_blk"; /// Column family to lookup a block by its hash. pub const CF_LOOKUP_BLK_BY_HASH: &str = "lookup_blk_by_hash"; /// Column family to lookup a tx by its hash. pub const CF_LOOKUP_TX_BY_HASH: &str = "lookup_tx_by_hash"; /// Column family name for db metadata. pub const CF_META: &str = "meta"; /// Column family to store tx history by script. pub const CF_SCRIPT_HISTORY: &str = "script_history"; /// Column family for utxos by script. pub const CF_SCRIPT_UTXO: &str = "script_utxo"; /// Column family to store which outputs have been spent by which tx inputs. pub const CF_SPENT_BY: &str = "spent_by"; /// Column family for the tx data. pub const CF_TX: &str = "tx"; pub(crate) type CF = rocksdb::ColumnFamily; /// Indexer database. /// Owns the underlying [`rocksdb::DB`] instance. #[derive(Debug)] pub struct Db { db: rocksdb::DB, cf_names: Vec, } /// Errors indicating something went wrong with the database itself. #[derive(Debug, Error)] pub enum DbError { /// Column family requested but not defined during `Db::open`. #[error("Column family {0} doesn't exist")] NoSuchColumnFamily(String), /// Error with RocksDB itself, e.g. db inconsistency. #[error("RocksDB error: {0}")] RocksDb(rocksdb::Error), } use self::DbError::*; impl Db { /// Opens the database under the specified path. /// Creates the database file and necessary column families if necessary. pub fn open(path: impl AsRef) -> Result { let mut cfs = Vec::new(); BlockWriter::add_cfs(&mut cfs); + BlockStatsWriter::add_cfs(&mut cfs); MetadataWriter::add_cfs(&mut cfs); TxWriter::add_cfs(&mut cfs); ScriptHistoryWriter::add_cfs(&mut cfs); ScriptUtxoWriter::add_cfs(&mut cfs); SpentByWriter::add_cfs(&mut cfs); Self::open_with_cfs(path, cfs) } pub(crate) fn open_with_cfs( path: impl AsRef, cfs: Vec, ) -> Result { let db_options = Self::db_options(); let cf_names = cfs.iter().map(|cf| cf.name().to_string()).collect(); let db = rocksdb::DB::open_cf_descriptors(&db_options, path, cfs) .map_err(RocksDb)?; Ok(Db { db, cf_names }) } fn db_options() -> rocksdb::Options { let mut db_options = rocksdb::Options::default(); db_options.create_if_missing(true); db_options.create_missing_column_families(true); db_options } /// Destroy the DB, i.e. delete all it's associated files. /// /// According to the RocksDB docs, this differs from removing the dir: /// DestroyDB() will take care of the case where the RocksDB database is /// stored in multiple directories. For instance, a single DB can be /// configured to store its data in multiple directories by specifying /// different paths to DBOptions::db_paths, DBOptions::db_log_dir, and /// DBOptions::wal_dir. pub fn destroy(path: impl AsRef) -> Result<()> { let db_options = Self::db_options(); rocksdb::DB::destroy(&db_options, path).map_err(RocksDb)?; Ok(()) } /// Return a column family handle with the given name. pub fn cf(&self, name: &str) -> Result<&CF> { Ok(self .db .cf_handle(name) .ok_or_else(|| NoSuchColumnFamily(name.to_string()))?) } pub(crate) fn get( &self, cf: &CF, key: impl AsRef<[u8]>, ) -> Result>> { Ok(self.db.get_pinned_cf(cf, key).map_err(RocksDb)?) } pub(crate) fn iterator_end( &self, cf: &CF, ) -> impl Iterator, Box<[u8]>)>> + '_ { self.db .iterator_cf(cf, IteratorMode::End) .map(|result| Ok(result.map_err(RocksDb)?)) } pub(crate) fn iterator( &self, cf: &CF, start: &[u8], direction: rocksdb::Direction, ) -> impl Iterator, Box<[u8]>)>> + '_ { self.db .iterator_cf(cf, IteratorMode::From(start, direction)) .map(|result| Ok(result.map_err(RocksDb)?)) } /// Writes the batch to the Db atomically. pub fn write_batch(&self, write_batch: WriteBatch) -> Result<()> { self.db.write(write_batch).map_err(RocksDb)?; Ok(()) } /// Whether any of the column families in the DB have any data. /// /// Note: RocksDB forbids not opening all column families, therefore, this /// will always iter through all column families. pub fn is_db_empty(&self) -> Result { for cf_name in &self.cf_names { let cf = self.cf(cf_name)?; let mut cf_iter = self.db.full_iterator_cf(cf, IteratorMode::Start); if cf_iter.next().is_some() { return Ok(false); } } Ok(true) } } diff --git a/chronik/chronik-db/src/io/block_stats.rs b/chronik/chronik-db/src/io/block_stats.rs index 8d9cd784d..98926039a 100644 --- a/chronik/chronik-db/src/io/block_stats.rs +++ b/chronik/chronik-db/src/io/block_stats.rs @@ -1,273 +1,271 @@ // Copyright (c) 2023 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. use abc_rust_error::Result; -#[cfg(test)] use rocksdb::ColumnFamilyDescriptor; use serde::{Deserialize, Serialize}; use crate::{ db::{Db, CF, CF_BLK_STATS}, index_tx::IndexTx, io::{bh_to_bytes, BlockHeight}, ser::{db_deserialize, db_serialize}, }; /// Statistics about a block, like num txs, block size, etc. #[derive( Clone, Debug, Default, Deserialize, Eq, Hash, PartialEq, Serialize, )] pub struct BlockStats { /// Block size of this block in bytes (including headers etc.) pub block_size: u64, /// Number of txs in this block pub num_txs: u64, /// Total number of tx inputs in block (including coinbase) pub num_inputs: u64, /// Total number of tx output in block (including coinbase) pub num_outputs: u64, /// Total number of satoshis spent by tx inputs pub sum_input_sats: i64, /// Block reward for this block pub sum_coinbase_output_sats: i64, /// Total number of satoshis in non-coinbase tx outputs pub sum_normal_output_sats: i64, /// Total number of satoshis burned using OP_RETURN pub sum_burned_sats: i64, } struct BlockStatsColumn<'a> { db: &'a Db, cf: &'a CF, } /// Write [`BlockStats`] to the DB. #[derive(Debug)] pub struct BlockStatsWriter<'a> { col: BlockStatsColumn<'a>, } /// Read [`BlockStats`] from the DB. #[derive(Debug)] pub struct BlockStatsReader<'a> { col: BlockStatsColumn<'a>, } impl<'a> BlockStatsColumn<'a> { fn new(db: &'a Db) -> Result { let cf = db.cf(CF_BLK_STATS)?; Ok(BlockStatsColumn { db, cf }) } } impl<'a> BlockStatsWriter<'a> { /// Create a new [`BlockStatsWriter`]. pub fn new(db: &'a Db) -> Result { Ok(BlockStatsWriter { col: BlockStatsColumn::new(db)?, }) } /// Measure the [`BlockStats`] of the block with the given the block txs and /// add them to the `WriteBatch`. pub fn insert( &self, batch: &mut rocksdb::WriteBatch, block_height: BlockHeight, block_size: u64, txs: &[IndexTx<'_>], ) -> Result<()> { let mut num_inputs = 0; let mut num_outputs = 0; let mut sum_input_sats = 0; let mut sum_normal_output_sats = 0; let mut sum_coinbase_output_sats = 0; let mut sum_burned_sats = 0; for tx in txs { for output in &tx.tx.outputs { if output.script.is_opreturn() { sum_burned_sats += output.value; } } let tx_output_sats = tx.tx.outputs.iter().map(|output| output.value).sum::(); if tx.is_coinbase { sum_coinbase_output_sats += tx_output_sats; } else { sum_normal_output_sats += tx_output_sats; for input in &tx.tx.inputs { if let Some(coin) = input.coin.as_ref() { sum_input_sats += coin.output.value; } } } num_inputs += tx.tx.inputs.len(); num_outputs += tx.tx.outputs.len(); } let stats = BlockStats { block_size, num_txs: txs.len() as u64, num_inputs: num_inputs as u64, num_outputs: num_outputs as u64, sum_input_sats, sum_coinbase_output_sats, sum_normal_output_sats, sum_burned_sats, }; batch.put_cf( self.col.cf, bh_to_bytes(block_height), db_serialize(&stats)?, ); Ok(()) } /// Delete the block stats for the block with the given height. pub fn delete( &self, batch: &mut rocksdb::WriteBatch, block_height: BlockHeight, ) { batch.delete_cf(self.col.cf, bh_to_bytes(block_height)); } - #[cfg(test)] pub(crate) fn add_cfs(columns: &mut Vec) { columns.push(ColumnFamilyDescriptor::new( CF_BLK_STATS, rocksdb::Options::default(), )); } } impl<'a> BlockStatsReader<'a> { /// Create a new [`BlockStatsReader`]. pub fn new(db: &'a Db) -> Result { Ok(BlockStatsReader { col: BlockStatsColumn::new(db)?, }) } /// Read the [`BlockStats`] from the DB, or [`None`] if the block doesn't /// exist. pub fn by_height( &self, block_height: BlockHeight, ) -> Result> { match self.col.db.get(self.col.cf, bh_to_bytes(block_height))? { Some(ser_block_stats) => { Ok(Some(db_deserialize::(&ser_block_stats)?)) } None => Ok(None), } } } impl std::fmt::Debug for BlockStatsColumn<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "BlockStatsColumn {{ .. }}") } } #[cfg(test)] mod tests { use abc_rust_error::Result; use bitcoinsuite_core::{ script::{opcode::*, ScriptMut}, tx::{Tx, TxId, TxMut, TxOutput}, }; use pretty_assertions::assert_eq; use rocksdb::WriteBatch; use crate::{ db::Db, index_tx::prepare_indexed_txs, io::{ BlockStats, BlockStatsReader, BlockStatsWriter, BlockTxs, TxEntry, TxWriter, }, test::make_inputs_tx, }; #[test] fn test_block_stats() -> Result<()> { let tempdir = tempdir::TempDir::new("chronik-db--block_stats")?; let mut cfs = Vec::new(); TxWriter::add_cfs(&mut cfs); BlockStatsWriter::add_cfs(&mut cfs); let db = Db::open_with_cfs(tempdir.path(), cfs)?; let tx_writer = TxWriter::new(&db)?; let stats_writer = BlockStatsWriter::new(&db)?; let stats_reader = BlockStatsReader::new(&db)?; let block = vec![ make_inputs_tx(0x01, [(0x00, u32::MAX, 0xffff)], [50, 20]), make_inputs_tx(0x02, [(0x01, 0, 50)], [40, 10]), make_inputs_tx( 0x03, [(0x02, 0, 40), (0x01, 1, 20), (0x02, 1, 10)], [60, 5], ), Tx::with_txid( TxId::from([0x05; 32]), TxMut { version: 1, inputs: make_inputs_tx(0, [(0x03, 0, 60)], []) .inputs .clone(), outputs: vec![TxOutput { value: 60, script: { let mut script = ScriptMut::default(); script.put_opcodes([OP_RETURN, OP_1]); script.freeze() }, }], locktime: 0, }, ), ]; let block_txs = block .iter() .map(|tx| TxEntry { txid: tx.txid(), ..Default::default() }) .collect::>(); let mut batch = WriteBatch::default(); let first_tx_num = tx_writer.insert( &mut batch, &BlockTxs { txs: block_txs, block_height: 1, }, )?; let index_txs = prepare_indexed_txs(&db, first_tx_num, &block)?; stats_writer.insert(&mut batch, 1, 1337, &index_txs)?; db.write_batch(batch)?; assert_eq!( stats_reader.by_height(1)?, Some(BlockStats { block_size: 1337, num_txs: 4, num_inputs: 6, num_outputs: 7, sum_input_sats: 180, sum_coinbase_output_sats: 70, sum_normal_output_sats: 175, sum_burned_sats: 60, }), ); let mut batch = WriteBatch::default(); stats_writer.delete(&mut batch, 1); db.write_batch(batch)?; assert_eq!(stats_reader.by_height(1)?, None); Ok(()) } } diff --git a/chronik/chronik-indexer/src/indexer.rs b/chronik/chronik-indexer/src/indexer.rs index f2a92ec07..5e1565ea6 100644 --- a/chronik/chronik-indexer/src/indexer.rs +++ b/chronik/chronik-indexer/src/indexer.rs @@ -1,691 +1,701 @@ // Copyright (c) 2022 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. //! Module containing [`ChronikIndexer`] to index blocks and txs. use std::path::PathBuf; use abc_rust_error::{Result, WrapErr}; use bitcoinsuite_core::{ block::BlockHash, tx::{Tx, TxId}, }; use chronik_bridge::{ffi, util::expect_unique_ptr}; use chronik_db::{ db::{Db, WriteBatch}, groups::{ FnCompressScript, ScriptGroup, ScriptHistoryWriter, ScriptUtxoWriter, }, index_tx::prepare_indexed_txs, io::{ - BlockHeight, BlockReader, BlockTxs, BlockWriter, DbBlock, - MetadataReader, MetadataWriter, SchemaVersion, SpentByWriter, TxEntry, - TxWriter, + BlockHeight, BlockReader, BlockStatsWriter, BlockTxs, BlockWriter, + DbBlock, MetadataReader, MetadataWriter, SchemaVersion, SpentByWriter, + TxEntry, TxWriter, }, mem::{Mempool, MempoolTx}, }; use chronik_util::{log, log_chronik}; use thiserror::Error; use tokio::sync::RwLock; use crate::{ avalanche::Avalanche, query::{QueryBlocks, QueryGroupHistory, QueryGroupUtxos, QueryTxs}, subs::{BlockMsg, BlockMsgType, Subs}, subs_group::TxMsgType, }; -const CURRENT_INDEXER_VERSION: SchemaVersion = 6; +const CURRENT_INDEXER_VERSION: SchemaVersion = 7; /// Params for setting up a [`ChronikIndexer`] instance. #[derive(Clone)] pub struct ChronikIndexerParams { /// Folder where the node stores its data, net-dependent. pub datadir_net: PathBuf, /// Whether to clear the DB before opening the DB, e.g. when reindexing. pub wipe_db: bool, /// Function ptr to compress scripts. pub fn_compress_script: FnCompressScript, } /// Struct for indexing blocks and txs. Maintains db handles and mempool. #[derive(Debug)] pub struct ChronikIndexer { db: Db, mempool: Mempool, script_group: ScriptGroup, avalanche: Avalanche, subs: RwLock, } /// Block to be indexed by Chronik. #[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct ChronikBlock { /// Data about the block (w/o txs) pub db_block: DbBlock, /// Txs in the block, with locations of where they are stored on disk. pub block_txs: BlockTxs, + /// Block size in bytes. + pub size: u64, /// Txs in the block, with inputs/outputs so we can group them. pub txs: Vec, } /// Errors for [`BlockWriter`] and [`BlockReader`]. #[derive(Debug, Eq, Error, PartialEq)] pub enum ChronikIndexerError { /// Failed creating the folder for the indexes #[error("Failed creating path {0}")] CreateIndexesDirFailed(PathBuf), /// Cannot rewind blocks that bitcoind doesn't have #[error( "Cannot rewind Chronik, it contains block {0} that the node doesn't \ have. You may need to use -reindex/-chronikreindex, or delete \ indexes/chronik and restart" )] CannotRewindChronik(BlockHash), /// Lower block doesn't exist but higher block does #[error( "Inconsistent DB: Block {missing} doesn't exist, but {exists} does" )] BlocksBelowMissing { /// Lower height that is missing missing: BlockHeight, /// Higher height that exists exists: BlockHeight, }, /// Corrupted schema version #[error( "Corrupted schema version in the Chronik database, consider running \ -reindex/-chronikreindex" )] CorruptedSchemaVersion, /// Missing schema version for non-empty database #[error( "Missing schema version in non-empty Chronik database, consider \ running -reindex/-chronikreindex" )] MissingSchemaVersion, /// This Chronik instance is outdated #[error( "Chronik outdated: Chronik has version {}, but the database has \ version {0}. Upgrade your node to the appropriate version.", CURRENT_INDEXER_VERSION )] ChronikOutdated(SchemaVersion), /// Database is outdated #[error( "DB outdated: Chronik has version {}, but the database has version \ {0}. -reindex/-chronikreindex to reindex the database to the new \ version.", CURRENT_INDEXER_VERSION )] DatabaseOutdated(SchemaVersion), } use self::ChronikIndexerError::*; impl ChronikIndexer { /// Setup the indexer with the given parameters, e.g. open the DB etc. pub fn setup(params: ChronikIndexerParams) -> Result { let indexes_path = params.datadir_net.join("indexes"); if !indexes_path.exists() { std::fs::create_dir(&indexes_path).wrap_err_with(|| { CreateIndexesDirFailed(indexes_path.clone()) })?; } let db_path = indexes_path.join("chronik"); if params.wipe_db { log!("Wiping Chronik at {}\n", db_path.to_string_lossy()); Db::destroy(&db_path)?; } log_chronik!("Opening Chronik at {}\n", db_path.to_string_lossy()); let db = Db::open(&db_path)?; verify_schema_version(&db)?; let script_group = ScriptGroup::new(params.fn_compress_script); let mempool = Mempool::new(script_group.clone()); Ok(ChronikIndexer { db, mempool, script_group: script_group.clone(), avalanche: Avalanche::default(), subs: RwLock::new(Subs::new(script_group)), }) } /// Resync Chronik index to the node pub fn resync_indexer( &mut self, bridge: &ffi::ChronikBridge, ) -> Result<()> { let block_reader = BlockReader::new(&self.db)?; let indexer_tip = block_reader.tip()?; let Ok(node_tip_index) = bridge.get_chain_tip() else { if let Some(indexer_tip) = &indexer_tip { return Err( CannotRewindChronik(indexer_tip.hash.clone()).into() ); } return Ok(()); }; let node_tip_info = ffi::get_block_info(node_tip_index); let node_height = node_tip_info.height; let node_tip_hash = BlockHash::from(node_tip_info.hash); let fork_height = match indexer_tip { Some(tip) => { let indexer_tip_hash = tip.hash.clone(); let indexer_height = tip.height; log!( "Node and Chronik diverged, node is on block \ {node_tip_hash} at height {node_height}, and Chronik is \ on block {indexer_tip_hash} at height {indexer_height}.\n" ); let indexer_tip_index = bridge .lookup_block_index(tip.hash.to_bytes()) .map_err(|_| CannotRewindChronik(tip.hash.clone()))?; self.rewind_indexer(bridge, indexer_tip_index, &tip)? } None => { log!( "Chronik database empty, syncing to block {node_tip_hash} \ at height {node_height}.\n" ); -1 } }; let tip_height = node_tip_info.height; for height in fork_height + 1..=tip_height { let block_index = ffi::get_block_ancestor(node_tip_index, height)?; let ffi_block = bridge.load_block(block_index)?; let ffi_block = expect_unique_ptr("load_block", &ffi_block); let block = self.make_chronik_block(ffi_block, block_index)?; let hash = block.db_block.hash.clone(); self.handle_block_connected(block)?; log_chronik!( "Added block {hash}, height {height}/{tip_height} to Chronik\n" ); if height % 100 == 0 { log!( "Synced Chronik up to block {hash} at height \ {height}/{tip_height}\n" ); } } log!( "Chronik completed re-syncing with the node, both are now at \ block {node_tip_hash} at height {node_height}.\n" ); Ok(()) } fn rewind_indexer( &mut self, bridge: &ffi::ChronikBridge, indexer_tip_index: &ffi::CBlockIndex, indexer_db_tip: &DbBlock, ) -> Result { let indexer_height = indexer_db_tip.height; let fork_block_index = bridge .find_fork(indexer_tip_index) .map_err(|_| CannotRewindChronik(indexer_db_tip.hash.clone()))?; let fork_info = ffi::get_block_info(fork_block_index); let fork_block_hash = BlockHash::from(fork_info.hash); let fork_height = fork_info.height; let revert_height = fork_height + 1; log!( "The last common block is {fork_block_hash} at height \ {fork_height}.\n" ); log!("Reverting Chronik blocks {revert_height} to {indexer_height}.\n"); for height in (revert_height..indexer_height).rev() { let db_block = BlockReader::new(&self.db)? .by_height(height)? .ok_or(BlocksBelowMissing { missing: height, exists: indexer_height, })?; let block_index = bridge .lookup_block_index(db_block.hash.to_bytes()) .map_err(|_| CannotRewindChronik(db_block.hash))?; let ffi_block = bridge.load_block(block_index)?; let ffi_block = expect_unique_ptr("load_block", &ffi_block); let block = self.make_chronik_block(ffi_block, block_index)?; self.handle_block_disconnected(block)?; } Ok(fork_info.height) } /// Add transaction to the indexer's mempool. pub fn handle_tx_added_to_mempool( &mut self, mempool_tx: MempoolTx, ) -> Result<()> { self.subs .get_mut() .handle_tx_event(&mempool_tx.tx, TxMsgType::AddedToMempool); self.mempool.insert(mempool_tx)?; Ok(()) } /// Remove tx from the indexer's mempool, e.g. by a conflicting tx, expiry /// etc. This is not called when the transaction has been mined (and thus /// also removed from the mempool). pub fn handle_tx_removed_from_mempool(&mut self, txid: TxId) -> Result<()> { let mempool_tx = self.mempool.remove(txid)?; self.subs .get_mut() .handle_tx_event(&mempool_tx.tx, TxMsgType::RemovedFromMempool); Ok(()) } /// Add the block to the index. pub fn handle_block_connected( &mut self, block: ChronikBlock, ) -> Result<()> { + let height = block.db_block.height; let mut batch = WriteBatch::default(); let block_writer = BlockWriter::new(&self.db)?; let tx_writer = TxWriter::new(&self.db)?; + let block_stats_writer = BlockStatsWriter::new(&self.db)?; let script_history_writer = ScriptHistoryWriter::new(&self.db, self.script_group.clone())?; let script_utxo_writer = ScriptUtxoWriter::new(&self.db, self.script_group.clone())?; let spent_by_writer = SpentByWriter::new(&self.db)?; block_writer.insert(&mut batch, &block.db_block)?; let first_tx_num = tx_writer.insert(&mut batch, &block.block_txs)?; let index_txs = prepare_indexed_txs(&self.db, first_tx_num, &block.txs)?; + block_stats_writer + .insert(&mut batch, height, block.size, &index_txs)?; script_history_writer.insert(&mut batch, &index_txs)?; script_utxo_writer.insert(&mut batch, &index_txs)?; spent_by_writer.insert(&mut batch, &index_txs)?; self.db.write_batch(batch)?; for tx in &block.block_txs.txs { self.mempool.remove_mined(&tx.txid)?; } let subs = self.subs.get_mut(); subs.broadcast_block_msg(BlockMsg { msg_type: BlockMsgType::Connected, hash: block.db_block.hash, height: block.db_block.height, }); for tx in &block.txs { subs.handle_tx_event(tx, TxMsgType::Confirmed); } Ok(()) } /// Remove the block from the index. pub fn handle_block_disconnected( &mut self, block: ChronikBlock, ) -> Result<()> { let mut batch = WriteBatch::default(); let block_writer = BlockWriter::new(&self.db)?; let tx_writer = TxWriter::new(&self.db)?; + let block_stats_writer = BlockStatsWriter::new(&self.db)?; let script_history_writer = ScriptHistoryWriter::new(&self.db, self.script_group.clone())?; let script_utxo_writer = ScriptUtxoWriter::new(&self.db, self.script_group.clone())?; let spent_by_writer = SpentByWriter::new(&self.db)?; block_writer.delete(&mut batch, &block.db_block)?; let first_tx_num = tx_writer.delete(&mut batch, &block.block_txs)?; let index_txs = prepare_indexed_txs(&self.db, first_tx_num, &block.txs)?; + block_stats_writer.delete(&mut batch, block.db_block.height); script_history_writer.delete(&mut batch, &index_txs)?; script_utxo_writer.delete(&mut batch, &index_txs)?; spent_by_writer.delete(&mut batch, &index_txs)?; self.avalanche.disconnect_block(block.db_block.height)?; self.db.write_batch(batch)?; let subs = self.subs.get_mut(); subs.broadcast_block_msg(BlockMsg { msg_type: BlockMsgType::Disconnected, hash: block.db_block.hash, height: block.db_block.height, }); Ok(()) } /// Block finalized with Avalanche. pub fn handle_block_finalized( &mut self, block: ChronikBlock, ) -> Result<()> { self.avalanche.finalize_block(block.db_block.height)?; let subs = self.subs.get_mut(); subs.broadcast_block_msg(BlockMsg { msg_type: BlockMsgType::Finalized, hash: block.db_block.hash, height: block.db_block.height, }); for tx in &block.txs { subs.handle_tx_event(tx, TxMsgType::Finalized); } Ok(()) } /// Return [`QueryBlocks`] to read blocks from the DB. pub fn blocks(&self) -> QueryBlocks<'_> { QueryBlocks { db: &self.db, avalanche: &self.avalanche, } } /// Return [`QueryTxs`] to return txs from mempool/DB. pub fn txs(&self) -> QueryTxs<'_> { QueryTxs { db: &self.db, avalanche: &self.avalanche, mempool: &self.mempool, } } /// Return [`QueryGroupHistory`] for scripts to query the tx history of /// scripts. pub fn script_history(&self) -> Result> { Ok(QueryGroupHistory { db: &self.db, avalanche: &self.avalanche, mempool: &self.mempool, mempool_history: self.mempool.script_history(), group: self.script_group.clone(), }) } /// Return [`QueryGroupUtxos`] for scripts to query the utxos of scripts. pub fn script_utxos(&self) -> Result> { Ok(QueryGroupUtxos { db: &self.db, avalanche: &self.avalanche, mempool: &self.mempool, mempool_utxos: self.mempool.script_utxos(), group: self.script_group.clone(), }) } /// Subscribers, behind read/write lock pub fn subs(&self) -> &RwLock { &self.subs } /// Build the ChronikBlock from the CBlockIndex pub fn make_chronik_block( &self, block: &ffi::CBlock, bindex: &ffi::CBlockIndex, ) -> Result { let block = ffi::bridge_block(block, bindex)?; let db_block = DbBlock { hash: BlockHash::from(block.hash), prev_hash: BlockHash::from(block.prev_hash), height: block.height, n_bits: block.n_bits, timestamp: block.timestamp, file_num: block.file_num, data_pos: block.data_pos, }; let block_txs = BlockTxs { block_height: block.height, txs: block .txs .iter() .map(|tx| { let txid = TxId::from(tx.tx.txid); TxEntry { txid, data_pos: tx.data_pos, undo_pos: tx.undo_pos, time_first_seen: match self.mempool.tx(&txid) { Some(tx) => tx.time_first_seen, None => 0, }, is_coinbase: tx.undo_pos == 0, } }) .collect(), }; let txs = block .txs .into_iter() .map(|block_tx| Tx::from(block_tx.tx)) .collect::>(); Ok(ChronikBlock { db_block, block_txs, + size: block.size, txs, }) } } fn verify_schema_version(db: &Db) -> Result<()> { let metadata_reader = MetadataReader::new(db)?; let metadata_writer = MetadataWriter::new(db)?; let is_empty = db.is_db_empty()?; match metadata_reader .schema_version() .wrap_err(CorruptedSchemaVersion)? { Some(schema_version) => { assert!(!is_empty, "Empty DB can't have a schema version"); if schema_version > CURRENT_INDEXER_VERSION { return Err(ChronikOutdated(schema_version).into()); } if schema_version < CURRENT_INDEXER_VERSION { return Err(DatabaseOutdated(schema_version).into()); } } None => { if !is_empty { return Err(MissingSchemaVersion.into()); } let mut batch = WriteBatch::default(); metadata_writer .update_schema_version(&mut batch, CURRENT_INDEXER_VERSION)?; db.write_batch(batch)?; } } log!("Chronik has version {CURRENT_INDEXER_VERSION}\n"); Ok(()) } impl std::fmt::Debug for ChronikIndexerParams { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ChronikIndexerParams") .field("datadir_net", &self.datadir_net) .field("wipe_db", &self.wipe_db) .field("fn_compress_script", &"..") .finish() } } #[cfg(test)] mod tests { use abc_rust_error::Result; use bitcoinsuite_core::block::BlockHash; use chronik_db::{ db::{Db, WriteBatch, CF_META}, groups::prefix_mock_compress, io::{BlockReader, BlockTxs, DbBlock, MetadataReader, MetadataWriter}, }; use pretty_assertions::assert_eq; use crate::indexer::{ ChronikBlock, ChronikIndexer, ChronikIndexerError, ChronikIndexerParams, CURRENT_INDEXER_VERSION, }; #[test] fn test_indexer() -> Result<()> { let tempdir = tempdir::TempDir::new("chronik-indexer--indexer")?; let datadir_net = tempdir.path().join("regtest"); let params = ChronikIndexerParams { datadir_net: datadir_net.clone(), wipe_db: false, fn_compress_script: prefix_mock_compress, }; // regtest folder doesn't exist yet -> error assert_eq!( ChronikIndexer::setup(params.clone()) .unwrap_err() .downcast::()?, ChronikIndexerError::CreateIndexesDirFailed( datadir_net.join("indexes"), ), ); // create regtest folder, setup will work now std::fs::create_dir(&datadir_net)?; let mut indexer = ChronikIndexer::setup(params.clone())?; // indexes and indexes/chronik folder now exist assert!(datadir_net.join("indexes").exists()); assert!(datadir_net.join("indexes").join("chronik").exists()); // DB is empty assert_eq!(BlockReader::new(&indexer.db)?.by_height(0)?, None); let block = ChronikBlock { db_block: DbBlock { hash: BlockHash::from([4; 32]), prev_hash: BlockHash::from([0; 32]), height: 0, n_bits: 0x1deadbef, timestamp: 1234567890, file_num: 0, data_pos: 1337, }, block_txs: BlockTxs { block_height: 0, txs: vec![], }, + size: 285, txs: vec![], }; // Add block indexer.handle_block_connected(block.clone())?; assert_eq!( BlockReader::new(&indexer.db)?.by_height(0)?, Some(block.db_block.clone()) ); // Remove block again indexer.handle_block_disconnected(block.clone())?; assert_eq!(BlockReader::new(&indexer.db)?.by_height(0)?, None); // Add block then wipe, block not there indexer.handle_block_connected(block)?; std::mem::drop(indexer); let indexer = ChronikIndexer::setup(ChronikIndexerParams { wipe_db: true, ..params })?; assert_eq!(BlockReader::new(&indexer.db)?.by_height(0)?, None); Ok(()) } #[test] fn test_schema_version() -> Result<()> { let dir = tempdir::TempDir::new("chronik-indexer--schema_version")?; let chronik_path = dir.path().join("indexes").join("chronik"); let params = ChronikIndexerParams { datadir_net: dir.path().to_path_buf(), wipe_db: false, fn_compress_script: prefix_mock_compress, }; // Setting up DB first time sets the schema version ChronikIndexer::setup(params.clone())?; { let db = Db::open(&chronik_path)?; assert_eq!( MetadataReader::new(&db)?.schema_version()?, Some(CURRENT_INDEXER_VERSION) ); } // Opening DB again works fine ChronikIndexer::setup(params.clone())?; // Override DB schema version to 0 { let db = Db::open(&chronik_path)?; let mut batch = WriteBatch::default(); MetadataWriter::new(&db)?.update_schema_version(&mut batch, 0)?; db.write_batch(batch)?; } // -> DB too old assert_eq!( ChronikIndexer::setup(params.clone()) .unwrap_err() .downcast::()?, ChronikIndexerError::DatabaseOutdated(0), ); // Override DB schema version to CURRENT_INDEXER_VERSION + 1 { let db = Db::open(&chronik_path)?; let mut batch = WriteBatch::default(); MetadataWriter::new(&db)?.update_schema_version( &mut batch, CURRENT_INDEXER_VERSION + 1, )?; db.write_batch(batch)?; } // -> Chronik too old assert_eq!( ChronikIndexer::setup(params.clone()) .unwrap_err() .downcast::()?, ChronikIndexerError::ChronikOutdated(CURRENT_INDEXER_VERSION + 1), ); // Corrupt schema version { let db = Db::open(&chronik_path)?; let cf_meta = db.cf(CF_META)?; let mut batch = WriteBatch::default(); batch.put_cf(cf_meta, b"SCHEMA_VERSION", [0xff]); db.write_batch(batch)?; } assert_eq!( ChronikIndexer::setup(params.clone()) .unwrap_err() .downcast::()?, ChronikIndexerError::CorruptedSchemaVersion, ); // New db path, but has existing data let new_dir = dir.path().join("new"); let new_chronik_path = new_dir.join("indexes").join("chronik"); std::fs::create_dir_all(&new_chronik_path)?; let new_params = ChronikIndexerParams { datadir_net: new_dir, wipe_db: false, ..params }; { // new db with obscure field in meta let db = Db::open(&new_chronik_path)?; let mut batch = WriteBatch::default(); batch.put_cf(db.cf(CF_META)?, b"FOO", b"BAR"); db.write_batch(batch)?; } // Error: non-empty DB without schema version assert_eq!( ChronikIndexer::setup(new_params.clone()) .unwrap_err() .downcast::()?, ChronikIndexerError::MissingSchemaVersion, ); // with wipe it works ChronikIndexer::setup(ChronikIndexerParams { wipe_db: true, ..new_params })?; Ok(()) } } diff --git a/chronik/chronik-indexer/src/query/blocks.rs b/chronik/chronik-indexer/src/query/blocks.rs index fbfa42415..0447185b9 100644 --- a/chronik/chronik-indexer/src/query/blocks.rs +++ b/chronik/chronik-indexer/src/query/blocks.rs @@ -1,139 +1,165 @@ // Copyright (c) 2023 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. //! Module for [`QueryBlocks`], to query blocks. use abc_rust_error::Result; use bitcoinsuite_core::block::BlockHash; use chronik_db::{ db::Db, - io::{BlockHeight, BlockReader, DbBlock}, + io::{BlockHeight, BlockReader, BlockStats, BlockStatsReader, DbBlock}, }; use chronik_proto::proto; use thiserror::Error; use crate::avalanche::Avalanche; const MAX_BLOCKS_PAGE_SIZE: usize = 500; /// Struct for querying blocks from the DB. #[derive(Debug)] pub struct QueryBlocks<'a> { /// Db. pub db: &'a Db, /// Avalanche. pub avalanche: &'a Avalanche, } /// Errors indicating something went wrong with querying blocks. #[derive(Debug, Error, PartialEq)] pub enum QueryBlockError { /// Query is neither a hex hash nor an integer string #[error("400: Not a hash or height: {0}")] NotHashOrHeight(String), /// Block not found in DB #[error("404: Block not found: {0}")] BlockNotFound(String), /// Invalid block start height #[error("400: Invalid block start height: {0}")] InvalidStartHeight(BlockHeight), /// Invalid block end height #[error("400: Invalid block end height: {0}")] InvalidEndHeight(BlockHeight), /// Blocks page size too large #[error( "400: Blocks page size too large, may not be above {} but got {0}", MAX_BLOCKS_PAGE_SIZE )] BlocksPageSizeTooLarge(usize), + + /// DB is missing block stats + #[error("500: Inconsistent DB: Missing block stats for height {0}")] + MissingBlockStats(BlockHeight), } use self::QueryBlockError::*; impl<'a> QueryBlocks<'a> { /// Query a block by hash or height from DB. /// /// `height` may not have any leading zeros, because otherwise it might /// become ambiguous with a hash. pub fn by_hash_or_height( &self, hash_or_height: String, ) -> Result { let db_blocks = BlockReader::new(self.db)?; + let block_stats_reader = BlockStatsReader::new(self.db)?; let db_block = if let Ok(hash) = hash_or_height.parse::() { db_blocks.by_hash(&hash)? } else { let height = match hash_or_height.parse::() { // disallow leading zeros Ok(0) if hash_or_height.len() == 1 => 0, Ok(height) if !hash_or_height.starts_with('0') => height, _ => return Err(NotHashOrHeight(hash_or_height).into()), }; db_blocks.by_height(height)? }; let db_block = db_block.ok_or(BlockNotFound(hash_or_height))?; + let block_stats = block_stats_reader + .by_height(db_block.height)? + .ok_or(MissingBlockStats(db_block.height))?; Ok(proto::Block { - block_info: Some(self.make_block_info_proto(&db_block)), + block_info: Some( + self.make_block_info_proto(&db_block, &block_stats), + ), }) } /// Query blocks by a range of heights. Start and end height are inclusive. pub fn by_range( &self, start_height: BlockHeight, end_height: BlockHeight, ) -> Result { if start_height < 0 { return Err(InvalidStartHeight(start_height).into()); } if end_height < start_height { return Err(InvalidEndHeight(end_height).into()); } let num_blocks = end_height as usize - start_height as usize + 1; if num_blocks > MAX_BLOCKS_PAGE_SIZE { return Err(BlocksPageSizeTooLarge(num_blocks).into()); } let block_reader = BlockReader::new(self.db)?; + let block_stats_reader = BlockStatsReader::new(self.db)?; let mut blocks = Vec::with_capacity(num_blocks); for block_height in start_height..=end_height { let block = block_reader.by_height(block_height)?; let block = match block { Some(block) => block, None => break, }; - blocks.push(self.make_block_info_proto(&block)); + let block_stats = block_stats_reader + .by_height(block_height)? + .ok_or(MissingBlockStats(block_height))?; + blocks.push(self.make_block_info_proto(&block, &block_stats)); } Ok(proto::Blocks { blocks }) } /// Query some info about the blockchain, e.g. the tip hash and height. pub fn blockchain_info(&self) -> Result { let block_reader = BlockReader::new(self.db)?; match block_reader.tip()? { Some(block) => Ok(proto::BlockchainInfo { tip_hash: block.hash.to_vec(), tip_height: block.height, }), None => Ok(proto::BlockchainInfo { tip_hash: vec![0; 32], tip_height: -1, }), } } - fn make_block_info_proto(&self, db_block: &DbBlock) -> proto::BlockInfo { + fn make_block_info_proto( + &self, + db_block: &DbBlock, + block_stats: &BlockStats, + ) -> proto::BlockInfo { proto::BlockInfo { hash: db_block.hash.to_vec(), prev_hash: db_block.prev_hash.to_vec(), height: db_block.height, n_bits: db_block.n_bits, timestamp: db_block.timestamp, is_final: self.avalanche.is_final_height(db_block.height), + block_size: block_stats.block_size, + num_txs: block_stats.num_txs, + num_inputs: block_stats.num_inputs, + num_outputs: block_stats.num_outputs, + sum_input_sats: block_stats.sum_input_sats, + sum_coinbase_output_sats: block_stats.sum_coinbase_output_sats, + sum_normal_output_sats: block_stats.sum_normal_output_sats, + sum_burned_sats: block_stats.sum_burned_sats, } } } diff --git a/chronik/chronik-proto/proto/chronik.proto b/chronik/chronik-proto/proto/chronik.proto index 6b74da3eb..ae1d0a3db 100644 --- a/chronik/chronik-proto/proto/chronik.proto +++ b/chronik/chronik-proto/proto/chronik.proto @@ -1,242 +1,258 @@ // Copyright (c) 2023 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. syntax = "proto3"; package chronik; // Block on the blockchain message Block { // Info about the block BlockInfo block_info = 1; } // Range of blocks message Blocks { // Queried blocks repeated BlockInfo blocks = 1; } // Info about the state of the blockchain. message BlockchainInfo { // Hash (little-endian) of the current tip bytes tip_hash = 1; // Height of the current tip (genesis has height = 0) int32 tip_height = 2; } // Info about a block message BlockInfo { // Hash (little-endian) bytes hash = 1; // Hash of the previous block (little-endian) bytes prev_hash = 2; // Height in the chain int32 height = 3; // nBits field encoding the target uint32 n_bits = 4; // Timestamp field of the block int64 timestamp = 5; // Whether the block has been finalized by Avalanche bool is_final = 14; + // Block size of this block in bytes (including headers etc.) + uint64 block_size = 6; + // Number of txs in this block + uint64 num_txs = 7; + // Total number of tx inputs in block (including coinbase) + uint64 num_inputs = 8; + // Total number of tx output in block (including coinbase) + uint64 num_outputs = 9; + // Total number of satoshis spent by tx inputs + int64 sum_input_sats = 10; + // Block reward for this block + int64 sum_coinbase_output_sats = 11; + // Total number of satoshis in non-coinbase tx outputs + int64 sum_normal_output_sats = 12; + // Total number of satoshis burned using OP_RETURN + int64 sum_burned_sats = 13; } // Details about a transaction message Tx { // TxId (little-endian) of the tx bytes txid = 1; // nVersion int32 version = 2; // Inputs of the tx (aka. `vin`) repeated TxInput inputs = 3; // Outputs of the tx (aka. `vout`) repeated TxOutput outputs = 4; // nLockTime uint32 lock_time = 5; // Which block this tx is in, or None, if in the mempool BlockMetadata block = 8; // Time this tx has first been added to the mempool, or 0 if unknown int64 time_first_seen = 9; // Whether this tx is a coinbase tx bool is_coinbase = 12; } // UTXO of a script. message ScriptUtxo { // txid and out_idx of the unspent output. OutPoint outpoint = 1; // Block height of the UTXO, or -1 if in mempool. int32 block_height = 2; // Whether the UTXO has been created in a coinbase tx. bool is_coinbase = 3; // Value of the output, in satoshis. int64 value = 5; // Whether the UTXO has been finalized by Avalanche. bool is_final = 10; } // COutPoint, points to a coin being spent by an input. message OutPoint { // TxId of the tx of the output being spent. bytes txid = 1; // Index of the output spent within the transaction. uint32 out_idx = 2; } // Points to an input spending a coin. message SpentBy { // TxId of the tx with the input. bytes txid = 1; // Index in the inputs of the tx. uint32 input_idx = 2; } // CTxIn, spends a coin. message TxInput { // Reference to the coin being spent. OutPoint prev_out = 1; // scriptSig, script unlocking the coin. bytes input_script = 2; // scriptPubKey, script of the output locking the coin. bytes output_script = 3; // value of the output being spent, in satoshis. int64 value = 4; // nSequence of the input. uint32 sequence_no = 5; } // CTxOut, creates a new coin. message TxOutput { // Value of the coin, in satoshis. int64 value = 1; // scriptPubKey, script locking the output. bytes output_script = 2; // Which tx and input spent this output, if any. SpentBy spent_by = 4; } // Data about a block which a Tx is in. message BlockMetadata { // Height of the block the tx is in. int32 height = 1; // Hash of the block the tx is in. bytes hash = 2; // nTime of the block the tx is in. int64 timestamp = 3; // Whether the block has been finalized by Avalanche. bool is_final = 4; } // Page with txs message TxHistoryPage { // Txs of the page repeated Tx txs = 1; // How many pages there are total uint32 num_pages = 2; // How many txs there are total uint32 num_txs = 3; } // List of UTXOs of a script message ScriptUtxos { // The serialized script of the UTXOs bytes script = 1; // UTXOs of the script. repeated ScriptUtxo utxos = 2; } // Raw serialized tx. message RawTx { // Bytes of the serialized tx. bytes raw_tx = 1; } // Subscription to WebSocket updates. message WsSub { // Set this to `true` to unsubscribe from the event. bool is_unsub = 1; // What kind of updates to subscribe to. oneof sub_type { // Subscription to block updates WsSubBlocks blocks = 2; // Subscription to a script WsSubScript script = 3; } } // Subscription to blocks. They will be sent any time a block got connected, // disconnected or finalized. message WsSubBlocks {} // Subscription to a script. They will be send every time a tx spending the // given script or sending to the given script has been added to/removed from // the mempool, or confirmed in a block. message WsSubScript { // Script type to subscribe to ("p2pkh", "p2sh", "p2pk", "other"). string script_type = 1; // Payload for the given script type: // - 20-byte hash for "p2pkh" and "p2sh" // - 33-byte or 65-byte pubkey for "p2pk" // - Serialized script for "other" bytes payload = 2; } // Message coming from the WebSocket message WsMsg { // Kind of message oneof msg_type { // Error, e.g. when a bad message has been sent into the WebSocket. Error error = 1; // Block got connected, disconnected, finalized, etc. MsgBlock block = 2; // Tx got added to/removed from the mempool, or confirmed in a block. MsgTx tx = 3; } } // Block got connected, disconnected, finalized, etc. message MsgBlock { // What happened to the block BlockMsgType msg_type = 1; // Hash of the block (little-endian) bytes block_hash = 2; // Height of the block int32 block_height = 3; } // Type of message for the block enum BlockMsgType { // Block connected to the blockchain BLK_CONNECTED = 0; // Block disconnected from the blockchain BLK_DISCONNECTED = 1; // Block has been finalized by Avalanche BLK_FINALIZED = 2; } // Tx got added to/removed from mempool, or confirmed in a block, etc. message MsgTx { // What happened to the tx TxMsgType msg_type = 1; // Txid of the tx (little-endian) bytes txid = 2; } // Type of message for a tx enum TxMsgType { // Tx added to the mempool TX_ADDED_TO_MEMPOOL = 0; // Tx removed from the mempool TX_REMOVED_FROM_MEMPOOL = 1; // Tx confirmed in a block TX_CONFIRMED = 2; // Tx finalized by Avalanche TX_FINALIZED = 3; } // Error message returned from our APIs. message Error { // 2, as legacy chronik uses this for the message so we're still compatible. string msg = 2; } diff --git a/test/functional/chronik_block.py b/test/functional/chronik_block.py index a3993fe7f..579d87361 100644 --- a/test/functional/chronik_block.py +++ b/test/functional/chronik_block.py @@ -1,111 +1,135 @@ #!/usr/bin/env python3 # Copyright (c) 2023 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test Chronik's /block endpoint. """ from test_framework.address import ADDRESS_ECREG_P2SH_OP_TRUE, ADDRESS_ECREG_UNSPENDABLE from test_framework.blocktools import GENESIS_BLOCK_HASH, TIME_GENESIS_BLOCK from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class ChronikBlockTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [['-chronik']] self.rpc_timeout = 240 def skip_test_if_missing_module(self): self.skip_if_no_chronik() def run_test(self): from test_framework.chronik.client import ChronikClient, pb node = self.nodes[0] chronik = ChronikClient('127.0.0.1', node.chronik_port) expected_genesis_block = pb.Block( block_info=pb.BlockInfo( hash=bytes.fromhex(GENESIS_BLOCK_HASH)[::-1], prev_hash=bytes(32), height=0, n_bits=0x207fffff, timestamp=TIME_GENESIS_BLOCK, + block_size=285, + num_txs=1, + num_inputs=1, + num_outputs=1, + sum_input_sats=0, + sum_coinbase_output_sats=5000000000, + sum_normal_output_sats=0, + sum_burned_sats=0, ), ) # Not a valid hash or height assert_equal(chronik.block('1234f').err(400).msg, '400: Not a hash or height: 1234f') assert_equal(chronik.block('00' * 31).err(400).msg, f'400: Not a hash or height: {"00"*31}') assert_equal(chronik.block('01').err(400).msg, '400: Not a hash or height: 01') assert_equal(chronik.block('12345678901').err(400).msg, '400: Not a hash or height: 12345678901') # Query genesis block using height assert_equal(chronik.block(0).ok(), expected_genesis_block) # Or hash assert_equal(chronik.block(GENESIS_BLOCK_HASH).ok(), expected_genesis_block) # Block 1 not found assert_equal(chronik.block(1).err(404).msg, '404: Block not found: 1') # Block "0000...0000" not found assert_equal(chronik.block('00' * 32).err(404).msg, f'404: Block not found: {"00"*32}') # Generate 100 blocks, verify they form a chain block_hashes = ( [GENESIS_BLOCK_HASH] + self.generatetoaddress(node, 100, ADDRESS_ECREG_P2SH_OP_TRUE) ) for i in range(1, 101): proto_block = chronik.block(i).ok() assert_equal(proto_block, pb.Block( block_info=pb.BlockInfo( hash=bytes.fromhex(block_hashes[i])[::-1], prev_hash=bytes.fromhex(block_hashes[i - 1])[::-1], height=i, n_bits=0x207fffff, timestamp=proto_block.block_info.timestamp, + block_size=181, + num_txs=1, + num_inputs=1, + num_outputs=1, + sum_input_sats=0, + sum_coinbase_output_sats=5000000000, + sum_normal_output_sats=0, + sum_burned_sats=0, ), )) assert_equal(proto_block, chronik.block(block_hashes[i]).ok()) block_hashes.append(proto_block.block_info.hash) # Invalidate in the middle of the chain node.invalidateblock(block_hashes[50]) # Gives 404 for the invalidated blocks for i in range(50, 101): assert_equal(chronik.block(i).err(404).msg, f'404: Block not found: {i}') assert_equal( chronik.block(block_hashes[i]).err(404).msg, f'404: Block not found: {block_hashes[i]}') # Previous blocks are still fine for i in range(0, 50): chronik.block(i).ok() chronik.block(block_hashes[i]).ok() # Mine fork block and check it connects fork_hash = self.generatetoaddress(node, 1, ADDRESS_ECREG_UNSPENDABLE)[0] proto_block = chronik.block(50).ok() assert_equal(proto_block, pb.Block( block_info=pb.BlockInfo( hash=bytes.fromhex(fork_hash)[::-1], prev_hash=bytes.fromhex(block_hashes[49])[::-1], height=50, n_bits=0x207fffff, timestamp=proto_block.block_info.timestamp, + block_size=181, + num_txs=1, + num_inputs=1, + num_outputs=1, + sum_input_sats=0, + sum_coinbase_output_sats=5000000000, + sum_normal_output_sats=0, + sum_burned_sats=0, ), )) assert_equal(chronik.block(fork_hash).ok(), proto_block) if __name__ == '__main__': ChronikBlockTest().main() diff --git a/test/functional/chronik_block_info.py b/test/functional/chronik_block_info.py new file mode 100644 index 000000000..731c3328a --- /dev/null +++ b/test/functional/chronik_block_info.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +# Copyright (c) 2023 The Bitcoin developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +""" +Test if the `BlockInfo` fields are set correctly in Chronik. +""" + +from test_framework.address import ( + ADDRESS_ECREG_P2SH_OP_TRUE, + ADDRESS_ECREG_UNSPENDABLE, + P2SH_OP_TRUE, + SCRIPTSIG_OP_TRUE, +) +from test_framework.blocktools import ( + create_block, + create_coinbase, + make_conform_to_ctor, +) +from test_framework.messages import COutPoint, CTransaction, CTxIn, CTxOut +from test_framework.p2p import P2PDataStore +from test_framework.script import OP_RETURN, CScript +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + + +class ChronikBlockInfoTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + self.extra_args = [['-chronik']] + self.rpc_timeout = 240 + + def skip_test_if_missing_module(self): + self.skip_if_no_chronik() + + def run_test(self): + from test_framework.chronik.client import ChronikClient, pb + + node = self.nodes[0] + node.setmocktime(1300000000) + chronik = ChronikClient('127.0.0.1', node.chronik_port) + + peer = node.add_p2p_connection(P2PDataStore()) + + coinblockhash = self.generatetoaddress(node, 1, ADDRESS_ECREG_P2SH_OP_TRUE)[0] + coinblock = node.getblock(coinblockhash) + cointx = coinblock['tx'][0] + + prev_hash = self.generatetoaddress(node, 100, ADDRESS_ECREG_UNSPENDABLE)[-1] + + coinvalue = 5000000000 + tx = CTransaction() + tx.vin = [CTxIn(outpoint=COutPoint(int(cointx, 16), 0), + scriptSig=SCRIPTSIG_OP_TRUE)] + tx.vout = [ + CTxOut(coinvalue - 10000, P2SH_OP_TRUE), + CTxOut(1000, CScript([OP_RETURN, b'test'])), + ] + tx.rehash() + + txid = node.sendrawtransaction(tx.serialize().hex()) + + tip_hash = self.generatetoaddress(node, 1, ADDRESS_ECREG_UNSPENDABLE)[-1] + + assert_equal(chronik.block(tip_hash).ok(), pb.Block( + block_info=pb.BlockInfo( + hash=bytes.fromhex(tip_hash)[::-1], + prev_hash=bytes.fromhex(prev_hash)[::-1], + height=102, + n_bits=0x207fffff, + timestamp=1300000018, + block_size=281, + num_txs=2, + num_inputs=2, + num_outputs=3, + sum_input_sats=coinvalue, + sum_coinbase_output_sats=coinvalue + 9000, + sum_normal_output_sats=coinvalue - 9000, + sum_burned_sats=1000, + ), + )) + + node.invalidateblock(tip_hash) + chronik.block(tip_hash).err(404) + + tx2 = CTransaction() + tx2.vin = [CTxIn(outpoint=COutPoint(int(txid, 16), 0), + scriptSig=SCRIPTSIG_OP_TRUE)] + tx2.vout = [ + CTxOut(3000, CScript([OP_RETURN, b'test'])), + CTxOut(5000, CScript([OP_RETURN, b'test'])), + CTxOut(coinvalue - 20000, P2SH_OP_TRUE), + ] + tx2.rehash() + + block = create_block(int(prev_hash, 16), + create_coinbase(102, b'\x03' * 33), + 1300000500) + block.vtx += [tx, tx2] + make_conform_to_ctor(block) + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + peer.send_blocks_and_test([block], node) + + assert_equal(chronik.block(block.hash).ok(), pb.Block( + block_info=pb.BlockInfo( + hash=bytes.fromhex(block.hash)[::-1], + prev_hash=bytes.fromhex(prev_hash)[::-1], + height=102, + n_bits=0x207fffff, + timestamp=1300000500, + block_size=403, + num_txs=3, + num_inputs=3, + num_outputs=7, + sum_input_sats=coinvalue * 2 - 10000, + sum_coinbase_output_sats=coinvalue, + sum_normal_output_sats=coinvalue * 2 - 21000, + sum_burned_sats=9000, + ), + )) + + +if __name__ == '__main__': + ChronikBlockInfoTest().main() diff --git a/test/functional/chronik_blocks.py b/test/functional/chronik_blocks.py index 243f1f837..8fb69887f 100644 --- a/test/functional/chronik_blocks.py +++ b/test/functional/chronik_blocks.py @@ -1,78 +1,94 @@ #!/usr/bin/env python3 # Copyright (c) 2023 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test Chronik's /blocks/:start_height/:end_height endpoint. """ from test_framework.address import ADDRESS_ECREG_UNSPENDABLE from test_framework.blocktools import GENESIS_BLOCK_HASH, TIME_GENESIS_BLOCK from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class ChronikBlockRangeTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [['-chronik']] def skip_test_if_missing_module(self): self.skip_if_no_chronik() def run_test(self): from test_framework.chronik.client import ChronikClient, pb node = self.nodes[0] node.setmocktime(1300000000) chronik = ChronikClient('127.0.0.1', node.chronik_port) assert_equal(chronik.blocks(-1, 0).err(400).msg, '400: Invalid block start height: -1') assert_equal(chronik.blocks(-2**31, 0).err(400).msg, f'400: Invalid block start height: {-2**31}') assert_equal(chronik.blocks(2, 1).err(400).msg, '400: Invalid block end height: 1') assert_equal( chronik.blocks(1, 501).err(400).msg, '400: Blocks page size too large, may not be above 500 but got 501') # Doesn't overflow: assert_equal( chronik.blocks(0, 2**31 - 1).err(400).msg, f'400: Blocks page size too large, may not be above 500 but got {2**31}') genesis_info = pb.BlockInfo( hash=bytes.fromhex(GENESIS_BLOCK_HASH)[::-1], prev_hash=bytes(32), height=0, n_bits=0x207fffff, timestamp=TIME_GENESIS_BLOCK, + block_size=285, + num_txs=1, + num_inputs=1, + num_outputs=1, + sum_input_sats=0, + sum_coinbase_output_sats=5000000000, + sum_normal_output_sats=0, + sum_burned_sats=0, ) assert_equal(chronik.blocks(0, 100).ok(), pb.Blocks(blocks=[genesis_info])) assert_equal(chronik.blocks(0, 0).ok(), pb.Blocks(blocks=[genesis_info])) assert_equal(chronik.blocks(500, 500).ok(), pb.Blocks(blocks=[])) assert_equal(chronik.blocks(1, 500).ok(), pb.Blocks(blocks=[])) assert_equal(chronik.blocks(500, 999).ok(), pb.Blocks(blocks=[])) assert_equal(chronik.blocks(2**31 - 500, 2**31 - 1).ok(), pb.Blocks(blocks=[])) block_hashes = [GENESIS_BLOCK_HASH] block_hashes += self.generatetoaddress(node, 12, ADDRESS_ECREG_UNSPENDABLE) assert_equal( chronik.blocks(8, 12).ok(), pb.Blocks(blocks=[ pb.BlockInfo( hash=bytes.fromhex(block_hashes[height])[::-1], prev_hash=bytes.fromhex(block_hashes[height - 1])[::-1], height=height, n_bits=0x207fffff, timestamp=1300000003, + block_size=181, + num_txs=1, + num_inputs=1, + num_outputs=1, + sum_input_sats=0, + sum_coinbase_output_sats=5000000000, + sum_normal_output_sats=0, + sum_burned_sats=0, ) for height in range(8, 13) ]), ) if __name__ == '__main__': ChronikBlockRangeTest().main()