diff --git a/chronik/bitcoinsuite-core/src/tx/tx.rs b/chronik/bitcoinsuite-core/src/tx/tx.rs --- a/chronik/bitcoinsuite-core/src/tx/tx.rs +++ b/chronik/bitcoinsuite-core/src/tx/tx.rs @@ -111,6 +111,14 @@ pub is_coinbase: bool, } +/// Empty tx, serializes to 00000000000000000000. +pub static EMPTY_TX: TxMut = TxMut { + version: 0, + inputs: vec![], + outputs: vec![], + locktime: 0, +}; + impl Tx { /// Create a new [`Tx`] with a given [`TxId`] and [`TxMut`]. /// diff --git a/chronik/chronik-cpp/chronik.cpp b/chronik/chronik-cpp/chronik.cpp --- a/chronik/chronik-cpp/chronik.cpp +++ b/chronik/chronik-cpp/chronik.cpp @@ -51,6 +51,8 @@ .default_port = BaseParams().ChronikPort(), .wipe_db = fWipe, .enable_token_index = gArgs.GetBoolArg("-chroniktokenindex", true), + .enable_lokad_id_index = + gArgs.GetBoolArg("-chroniklokadidindex", true), .is_pause_allowed = is_pause_allowed, .enable_perf_stats = gArgs.GetBoolArg("-chronikperfstats", false), .ws_ping_interval_secs = diff --git a/chronik/chronik-db/src/db.rs b/chronik/chronik-db/src/db.rs --- a/chronik/chronik-db/src/db.rs +++ b/chronik/chronik-db/src/db.rs @@ -15,8 +15,8 @@ use crate::{ groups::{ - ScriptHistoryWriter, ScriptUtxoWriter, TokenIdHistoryWriter, - TokenIdUtxoWriter, + LokadIdHistoryWriter, ScriptHistoryWriter, ScriptUtxoWriter, + TokenIdHistoryWriter, TokenIdUtxoWriter, }, io::{ token::TokenWriter, BlockStatsWriter, BlockWriter, MetadataWriter, @@ -35,6 +35,10 @@ /// Column family for the block height of the first tx_num of that block. Used /// to get the block height of a tx. pub const CF_FIRST_TX_BY_BLK: &str = "first_tx_by_blk"; +/// Column family to store tx history by LOKAD ID. +pub const CF_LOKAD_ID_HISTORY: &str = "lokad_id_history"; +/// Column family to store number of txs by LOKAD ID. +pub const CF_LOKAD_ID_HISTORY_NUM_TXS: &str = "lokad_id_history_num_txs"; /// Column family to lookup a block by its hash. pub const CF_LOOKUP_BLK_BY_HASH: &str = "lookup_blk_by_hash"; /// Column family to lookup a tx by its hash. @@ -103,6 +107,7 @@ TokenWriter::add_cfs(&mut cfs); TokenIdHistoryWriter::add_cfs(&mut cfs); TokenIdUtxoWriter::add_cfs(&mut cfs); + LokadIdHistoryWriter::add_cfs(&mut cfs); Self::open_with_cfs(path, cfs) } diff --git a/chronik/chronik-db/src/groups/lokad_id.rs b/chronik/chronik-db/src/groups/lokad_id.rs new file mode 100644 --- /dev/null +++ b/chronik/chronik-db/src/groups/lokad_id.rs @@ -0,0 +1,88 @@ +// Copyright (c) 2024 The Bitcoin developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +use bitcoinsuite_core::tx::EMPTY_TX; +use bitcoinsuite_slp::lokad_id::{parse_tx_lokad_ids, LokadId, LokadIdIter}; + +use crate::{ + db::{CF_LOKAD_ID_HISTORY, CF_LOKAD_ID_HISTORY_NUM_TXS}, + group::{Group, GroupQuery, MemberItem, UtxoDataOutput}, + io::{ + GroupHistoryConf, GroupHistoryReader, GroupHistoryWriter, GroupUtxoConf, + }, + mem::MempoolGroupHistory, +}; + +/// Index the mempool tx history of scripts +pub type MempoolLokadIdHistory = MempoolGroupHistory; +/// Index the tx history of script in the DB +pub type LokadIdHistoryWriter<'a> = GroupHistoryWriter<'a, LokadIdGroup>; +/// Read the tx history of scripts in the DB +pub type LokadIdHistoryReader<'a> = GroupHistoryReader<'a, LokadIdGroup>; + +/// Group txs by input/output scripts. +#[derive(Clone, Debug)] +pub struct LokadIdGroup; + +/// Iterator over the scripts of a tx +#[derive(Debug)] +pub struct LokadIdGroupIter<'a> { + iter: std::iter::Enumerate>, +} + +impl<'a> Iterator for LokadIdGroupIter<'a> { + type Item = MemberItem; + + fn next(&mut self) -> Option { + let (idx, member) = self.iter.next()?; + Some(MemberItem { idx, member }) + } +} + +impl Group for LokadIdGroup { + type Aux = (); + type Iter<'a> = LokadIdGroupIter<'a>; + type Member<'a> = LokadId; + type MemberSer = LokadId; + // Ignored, LOKAD ID doesn't apply to UTXOs but to entire txs + type UtxoData = UtxoDataOutput; + + fn input_members<'a>( + &self, + _query: GroupQuery<'a>, + _aux: &(), + ) -> Self::Iter<'a> { + // Don't use; actual parsing happens in output_members + LokadIdGroupIter { + iter: parse_tx_lokad_ids(&EMPTY_TX).enumerate(), + } + } + + fn output_members<'a>( + &self, + query: GroupQuery<'a>, + _aux: &(), + ) -> Self::Iter<'a> { + // LOKAD ID is per-tx not per-utxo, so we only use output_members + LokadIdGroupIter { + iter: parse_tx_lokad_ids(query.tx).enumerate(), + } + } + + fn ser_member(&self, member: &Self::Member<'_>) -> Self::MemberSer { + *member + } + + fn tx_history_conf() -> GroupHistoryConf { + GroupHistoryConf { + cf_page_name: CF_LOKAD_ID_HISTORY, + cf_num_txs_name: CF_LOKAD_ID_HISTORY_NUM_TXS, + page_size: 1000, + } + } + + fn utxo_conf() -> GroupUtxoConf { + panic!("LokadIdGroup should not be used to group UTXOs") + } +} diff --git a/chronik/chronik-db/src/groups/mod.rs b/chronik/chronik-db/src/groups/mod.rs --- a/chronik/chronik-db/src/groups/mod.rs +++ b/chronik/chronik-db/src/groups/mod.rs @@ -4,8 +4,10 @@ //! Collection of group implementations to group transactions by when indexing. +mod lokad_id; mod script; mod token_id; +pub use self::lokad_id::*; pub use self::script::*; pub use self::token_id::*; diff --git a/chronik/chronik-db/src/io/group_history.rs b/chronik/chronik-db/src/io/group_history.rs --- a/chronik/chronik-db/src/io/group_history.rs +++ b/chronik/chronik-db/src/io/group_history.rs @@ -314,6 +314,12 @@ Ok(()) } + /// Clear all history data from the DB + pub fn wipe(&self, batch: &mut WriteBatch) { + batch.delete_range_cf(self.col.cf_page, [].as_ref(), &[0xff; 16]); + batch.delete_range_cf(self.col.cf_num_txs, [].as_ref(), &[0xff; 16]); + } + fn fetch_members_num_txs<'tx>( &self, txs: &'tx [IndexTx<'tx>], diff --git a/chronik/chronik-db/src/io/group_utxos.rs b/chronik/chronik-db/src/io/group_utxos.rs --- a/chronik/chronik-db/src/io/group_utxos.rs +++ b/chronik/chronik-db/src/io/group_utxos.rs @@ -306,6 +306,11 @@ Ok(()) } + /// Clear all UTXO data from the DB + pub fn wipe(&self, batch: &mut WriteBatch) { + batch.delete_range_cf(self.col.cf, [].as_ref(), &[0xff; 16]); + } + /// Upgrade the DB from version 10 to version 11 pub fn upgrade_10_to_11(&self) -> Result<()> { log!( diff --git a/chronik/chronik-db/src/io/metadata.rs b/chronik/chronik-db/src/io/metadata.rs --- a/chronik/chronik-db/src/io/metadata.rs +++ b/chronik/chronik-db/src/io/metadata.rs @@ -19,6 +19,9 @@ /// Field in the `meta` cf storing whether Chronik had the token index enabled pub const FIELD_TOKEN_INDEX_ENABLED: &[u8] = b"TOKEN_INDEX_ENABLED"; +/// Field in the `meta` cf whether Chronik had the LOKAD ID index enabled +pub const FIELD_LOKAD_ID_INDEX_ENABLED: &[u8] = b"LOKAD_ID_INDEX_ENABLED"; + /// Write database metadata pub struct MetadataWriter<'a> { cf: &'a CF, @@ -65,6 +68,20 @@ Ok(()) } + /// Update the flag storing whether the LOKAD ID index is enabled in the DB + pub fn update_is_lokad_id_index_enabled( + &self, + batch: &mut rocksdb::WriteBatch, + is_lokad_id_index_enabled: bool, + ) -> Result<()> { + batch.put_cf( + self.cf, + FIELD_LOKAD_ID_INDEX_ENABLED, + db_serialize::(&is_lokad_id_index_enabled)?, + ); + Ok(()) + } + pub(crate) fn add_cfs(columns: &mut Vec) { columns.push(ColumnFamilyDescriptor::new( CF_META, @@ -106,6 +123,16 @@ None => Ok(true), } } + + /// Read whether the LOKAD ID index is enabled, or None if unspecified + pub fn is_lokad_id_index_enabled(&self) -> Result> { + match self.db.get(self.cf, FIELD_LOKAD_ID_INDEX_ENABLED)? { + Some(ser_token_index) => { + Ok(Some(db_deserialize::(&ser_token_index)?)) + } + None => Ok(None), + } + } } impl std::fmt::Debug for MetadataReader<'_> { diff --git a/chronik/chronik-db/src/mem/mempool.rs b/chronik/chronik-db/src/mem/mempool.rs --- a/chronik/chronik-db/src/mem/mempool.rs +++ b/chronik/chronik-db/src/mem/mempool.rs @@ -13,8 +13,9 @@ use crate::{ db::Db, groups::{ - MempoolScriptHistory, MempoolScriptUtxos, MempoolTokenIdHistory, - MempoolTokenIdUtxos, ScriptGroup, TokenIdGroup, TokenIdGroupAux, + LokadIdGroup, MempoolLokadIdHistory, MempoolScriptHistory, + MempoolScriptUtxos, MempoolTokenIdHistory, MempoolTokenIdUtxos, + ScriptGroup, TokenIdGroup, TokenIdGroupAux, }, mem::{MempoolSpentBy, MempoolTokens}, }; @@ -31,7 +32,9 @@ tokens: MempoolTokens, token_id_history: MempoolTokenIdHistory, token_id_utxos: MempoolTokenIdUtxos, + lokad_id_history: MempoolLokadIdHistory, is_token_index_enabled: bool, + is_lokad_id_index_enabled: bool, } /// Result after adding a tx to the mempool @@ -68,7 +71,11 @@ impl Mempool { /// Create a new [`Mempool`]. - pub fn new(script_group: ScriptGroup, enable_token_index: bool) -> Self { + pub fn new( + script_group: ScriptGroup, + enable_token_index: bool, + is_lokad_id_index_enabled: bool, + ) -> Self { Mempool { txs: HashMap::new(), script_history: MempoolScriptHistory::new(script_group.clone()), @@ -77,7 +84,9 @@ tokens: MempoolTokens::default(), token_id_history: MempoolTokenIdHistory::new(TokenIdGroup), token_id_utxos: MempoolTokenIdUtxos::new(TokenIdGroup), + lokad_id_history: MempoolLokadIdHistory::new(LokadIdGroup), is_token_index_enabled: enable_token_index, + is_lokad_id_index_enabled, } } @@ -110,6 +119,9 @@ } else { token_id_aux = TokenIdGroupAux::default(); } + if self.is_lokad_id_index_enabled { + self.lokad_id_history.insert(&mempool_tx, &()); + } if self.txs.insert(txid, mempool_tx).is_some() { return Err(DuplicateTx(txid).into()); } @@ -146,6 +158,9 @@ } else { token_id_aux = TokenIdGroupAux::default(); } + if self.is_lokad_id_index_enabled { + self.lokad_id_history.remove(&mempool_tx, &()); + } Ok(MempoolResult { mempool_tx: Cow::Owned(mempool_tx), token_id_aux, @@ -165,6 +180,9 @@ self.token_id_utxos.remove_mined(&mempool_tx, &token_id_aux); self.tokens.remove(txid); } + if self.is_lokad_id_index_enabled { + self.lokad_id_history.remove(&mempool_tx, &()); + } return Ok(Some(mempool_tx)); } Ok(None) @@ -204,4 +222,9 @@ pub fn token_id_utxos(&self) -> &MempoolTokenIdUtxos { &self.token_id_utxos } + + /// Tx history of LOKAD IDs in the mempool. + pub fn lokad_id_history(&self) -> &MempoolLokadIdHistory { + &self.lokad_id_history + } } diff --git a/chronik/chronik-http/src/handlers.rs b/chronik/chronik-http/src/handlers.rs --- a/chronik/chronik-http/src/handlers.rs +++ b/chronik/chronik-http/src/handlers.rs @@ -9,7 +9,10 @@ use hyper::Uri; use thiserror::Error; -use crate::{error::ReportError, parse::parse_script_variant_hex}; +use crate::{ + error::ReportError, + parse::{parse_lokad_id_hex, parse_script_variant_hex}, +}; /// Errors for HTTP handlers. #[derive(Debug, Error, PartialEq)] @@ -192,3 +195,52 @@ let utxos = token_id_utxos.utxos(token_id)?; Ok(proto::Utxos { utxos }) } + +/// Return a page of the confirmed txs of the given LOKAD ID. +pub async fn handle_lokad_id_confirmed_txs( + lokad_id_hex: &str, + query_params: &HashMap, + indexer: &ChronikIndexer, + node: &Node, +) -> Result { + let lokad_id = parse_lokad_id_hex(lokad_id_hex)?; + let lokad_id_history = indexer.lokad_id_history(node); + let page_num: u32 = get_param(query_params, "page")?.unwrap_or(0); + let page_size: u32 = get_param(query_params, "page_size")?.unwrap_or(25); + lokad_id_history.confirmed_txs( + lokad_id, + page_num as usize, + page_size as usize, + ) +} + +/// Return a page of the tx history of the given LOKAD ID, in reverse +/// chronological order, i.e. the latest transaction first and then going back +/// in time. +pub async fn handle_lokad_id_history( + lokad_id_hex: &str, + query_params: &HashMap, + indexer: &ChronikIndexer, + node: &Node, +) -> Result { + let lokad_id = parse_lokad_id_hex(lokad_id_hex)?; + let lokad_id_history = indexer.lokad_id_history(node); + let page_num: u32 = get_param(query_params, "page")?.unwrap_or(0); + let page_size: u32 = get_param(query_params, "page_size")?.unwrap_or(25); + lokad_id_history.rev_history( + lokad_id, + page_num as usize, + page_size as usize, + ) +} + +/// Return a page of the unconfirmed txs of the given LOKAD ID. +pub async fn handle_lokad_id_unconfirmed_txs( + lokad_id_hex: &str, + indexer: &ChronikIndexer, + node: &Node, +) -> Result { + let lokad_id = parse_lokad_id_hex(lokad_id_hex)?; + let lokad_id_history = indexer.lokad_id_history(node); + lokad_id_history.unconfirmed_txs(lokad_id) +} diff --git a/chronik/chronik-http/src/parse.rs b/chronik/chronik-http/src/parse.rs --- a/chronik/chronik-http/src/parse.rs +++ b/chronik/chronik-http/src/parse.rs @@ -9,6 +9,7 @@ error::DataError, script::{ScriptType, ScriptTypeError, ScriptVariant}, }; +use bitcoinsuite_slp::lokad_id::LokadId; use thiserror::Error; /// Errors indicating parsing failed. @@ -25,6 +26,10 @@ /// Script payload invalid for script_type #[error("400: Invalid payload for {0:?}: {1}")] InvalidScriptPayload(ScriptType, DataError), + + /// LOKAD ID must be 4 bytes + #[error("400: Invalid LOKAD ID length: expected 4 but got {0}")] + InvalidLokadIdLength(usize), } use self::ChronikParseError::*; @@ -53,3 +58,15 @@ Ok(ScriptVariant::from_type_and_payload(script_type, payload) .map_err(|err| InvalidScriptPayload(script_type, err))?) } + +/// Parse LOKAD ID hex string +pub fn parse_lokad_id_hex(lokad_id_hex: &str) -> Result { + let lokad_id = parse_hex(lokad_id_hex)?; + parse_lokad_id(&lokad_id) +} + +/// Parse LOKAD ID bytestring +pub fn parse_lokad_id(lokad_id: &[u8]) -> Result { + Ok(LokadId::try_from(lokad_id) + .map_err(|_| InvalidLokadIdLength(lokad_id.len()))?) +} diff --git a/chronik/chronik-http/src/server.rs b/chronik/chronik-http/src/server.rs --- a/chronik/chronik-http/src/server.rs +++ b/chronik/chronik-http/src/server.rs @@ -206,6 +206,18 @@ "/token-id/:token_id/utxos", routing::get(handle_token_id_utxos), ) + .route( + "/lokad-id/:lokad_id/confirmed-txs", + routing::get(handle_lokad_id_confirmed_txs), + ) + .route( + "/lokad-id/:lokad_id/history", + routing::get(handle_lokad_id_history), + ) + .route( + "/lokad-id/:lokad_id/unconfirmed-txs", + routing::get(handle_lokad_id_unconfirmed_txs), + ) .route("/ws", routing::get(handle_ws)) .route("/pause", routing::get(handle_pause)) .route("/resume", routing::get(handle_resume)) @@ -488,6 +500,58 @@ )) } +async fn handle_lokad_id_confirmed_txs( + Path(lokad_id_hex): Path, + Query(query_params): Query>, + Extension(indexer): Extension, + Extension(node): Extension, +) -> Result, ReportError> { + let indexer = indexer.read().await; + Ok(Protobuf( + handlers::handle_lokad_id_confirmed_txs( + &lokad_id_hex, + &query_params, + &indexer, + &node, + ) + .await?, + )) +} + +async fn handle_lokad_id_history( + Path(lokad_id_hex): Path, + Query(query_params): Query>, + Extension(indexer): Extension, + Extension(node): Extension, +) -> Result, ReportError> { + let indexer = indexer.read().await; + Ok(Protobuf( + handlers::handle_lokad_id_history( + &lokad_id_hex, + &query_params, + &indexer, + &node, + ) + .await?, + )) +} + +async fn handle_lokad_id_unconfirmed_txs( + Path(lokad_id_hex): Path, + Extension(indexer): Extension, + Extension(node): Extension, +) -> Result, ReportError> { + let indexer = indexer.read().await; + Ok(Protobuf( + handlers::handle_lokad_id_unconfirmed_txs( + &lokad_id_hex, + &indexer, + &node, + ) + .await?, + )) +} + async fn handle_pause( Extension(pause_notify): Extension, ) -> Result, ReportError> { diff --git a/chronik/chronik-http/src/ws.rs b/chronik/chronik-http/src/ws.rs --- a/chronik/chronik-http/src/ws.rs +++ b/chronik/chronik-http/src/ws.rs @@ -9,7 +9,7 @@ use abc_rust_error::Result; use axum::extract::ws::{self, WebSocket}; use bitcoinsuite_core::script::ScriptVariant; -use bitcoinsuite_slp::token_id::TokenId; +use bitcoinsuite_slp::{lokad_id::LokadId, token_id::TokenId}; use chronik_indexer::{ subs::{BlockMsg, BlockMsgType}, subs_group::{TxMsg, TxMsgType}, @@ -23,7 +23,7 @@ use crate::{ error::report_status_error, - parse::parse_script_variant, + parse::{parse_lokad_id, parse_script_variant}, server::{ChronikIndexerRef, ChronikSettings}, }; @@ -57,16 +57,19 @@ Blocks, Script(ScriptVariant), TokenId(TokenId), + LokadId(LokadId), } type SubRecvBlocks = Option>; type SubRecvScripts = HashMap>; type SubRecvTokenId = HashMap>; +type SubRecvLokadId = HashMap>; struct SubRecv { blocks: SubRecvBlocks, scripts: SubRecvScripts, token_ids: SubRecvTokenId, + lokad_ids: SubRecvLokadId, ws_ping_interval: Duration, } @@ -77,6 +80,7 @@ action = Self::recv_blocks(&mut self.blocks) => action, action = Self::recv_scripts(&mut self.scripts) => action, action = Self::recv_token_ids(&mut self.token_ids) => action, + action = Self::recv_lokad_ids(&mut self.lokad_ids) => action, action = Self::schedule_ping(self.ws_ping_interval) => action, } } @@ -119,6 +123,22 @@ } } + async fn recv_lokad_ids( + lokad_ids: &mut SubRecvLokadId, + ) -> Result { + if lokad_ids.is_empty() { + futures::future::pending().await + } else { + let lokad_ids_receivers = select_all( + lokad_ids + .values_mut() + .map(|receiver| Box::pin(receiver.recv())), + ); + let (tx_msg, _, _) = lokad_ids_receivers.await; + sub_tx_msg_action(tx_msg) + } + } + async fn schedule_ping(ws_ping_interval: Duration) -> Result { tokio::time::sleep(ws_ping_interval).await; let ping_payload = b"Bitcoin ABC Chronik Indexer".to_vec(); @@ -166,6 +186,24 @@ self.token_ids.insert(token_id, recv); } } + WsSubType::LokadId(lokad_id) => { + if sub.is_unsub { + log_chronik!( + "WS unsubscribe from LOKAD ID {}\n", + hex::encode(lokad_id) + ); + std::mem::drop(self.lokad_ids.remove(&lokad_id)); + subs.subs_lokad_id_mut().unsubscribe_from_member(&lokad_id) + } else { + log_chronik!( + "WS subscribe to LOKAD ID {}\n", + hex::encode(lokad_id) + ); + let recv = + subs.subs_lokad_id_mut().subscribe_to_member(&lokad_id); + self.lokad_ids.insert(lokad_id, recv); + } + } } } @@ -208,6 +246,9 @@ Some(SubType::TokenId(token_id)) => WsSubType::TokenId( token_id.token_id.parse::()?, ), + Some(SubType::LokadId(lokad_id)) => { + WsSubType::LokadId(parse_lokad_id(&lokad_id.lokad_id)?) + } }, })) } @@ -276,6 +317,7 @@ blocks: Default::default(), scripts: Default::default(), token_ids: Default::default(), + lokad_ids: Default::default(), ws_ping_interval: settings.ws_ping_interval, }; let mut last_msg = None; diff --git a/chronik/chronik-indexer/src/indexer.rs b/chronik/chronik-indexer/src/indexer.rs --- a/chronik/chronik-indexer/src/indexer.rs +++ b/chronik/chronik-indexer/src/indexer.rs @@ -15,8 +15,9 @@ use chronik_db::{ db::{Db, WriteBatch}, groups::{ - ScriptGroup, ScriptHistoryWriter, ScriptUtxoWriter, TokenIdGroup, - TokenIdGroupAux, TokenIdHistoryWriter, TokenIdUtxoWriter, + LokadIdGroup, LokadIdHistoryWriter, ScriptGroup, ScriptHistoryWriter, + ScriptUtxoWriter, TokenIdGroup, TokenIdGroupAux, TokenIdHistoryWriter, + TokenIdUtxoWriter, }, index_tx::{ prepare_indexed_txs_cached, PrepareUpdateMode, TxNumCacheSettings, @@ -56,6 +57,10 @@ pub wipe_db: bool, /// Whether Chronik should index SLP/ALP token txs. pub enable_token_index: bool, + /// Whether Chronik should index txs by LOKAD ID. + /// This will be overridden to `true` if the DB is empty and + /// `enable_lokad_id_index_specified` is false. + pub enable_lokad_id_index: bool, /// Whether to output Chronik performance statistics into a perf/ folder pub enable_perf_stats: bool, /// Settings for tuning TxNumCache. @@ -73,6 +78,10 @@ subs: RwLock, perf_path: Option, is_token_index_enabled: bool, + is_lokad_id_index_enabled: bool, + /// Whether the LOKAD ID index needs to be reindexed, will be set to + /// `false` after it caught up with the rest of Chronik. + needs_lokad_id_reindex: bool, } /// Access to the bitcoind node. @@ -183,11 +192,21 @@ log_chronik!("Opening Chronik at {}\n", db_path.to_string_lossy()); let db = Db::open(&db_path)?; + let is_db_empty = db.is_db_empty()?; let schema_version = verify_schema_version(&db)?; verify_enable_token_index(&db, params.enable_token_index)?; + let needs_lokad_id_reindex = verify_lokad_id_index( + &db, + is_db_empty, + params.enable_lokad_id_index, + )?; upgrade_db_if_needed(&db, schema_version, params.enable_token_index)?; - let mempool = Mempool::new(ScriptGroup, params.enable_token_index); + let mempool = Mempool::new( + ScriptGroup, + params.enable_token_index, + params.enable_lokad_id_index, + ); Ok(ChronikIndexer { db, mempool, @@ -199,6 +218,8 @@ subs: RwLock::new(Subs::new(ScriptGroup)), perf_path: params.enable_perf_stats.then_some(perf_path), is_token_index_enabled: params.enable_token_index, + is_lokad_id_index_enabled: params.enable_token_index, + needs_lokad_id_reindex, }) } @@ -243,6 +264,10 @@ -1 } }; + if self.needs_lokad_id_reindex { + self.reindex_lokad_id_index(bridge, node_tip_index, start_height)?; + self.needs_lokad_id_reindex = false; + } let tip_height = node_tip_info.height; for height in start_height + 1..=tip_height { if bridge.shutdown_requested() { @@ -315,6 +340,64 @@ Ok(fork_info.height) } + fn reindex_lokad_id_index( + &mut self, + bridge: &ffi::ChronikBridge, + node_tip_index: &ffi::CBlockIndex, + end_height: BlockHeight, + ) -> Result<()> { + let lokad_id_writer = + LokadIdHistoryWriter::new(&self.db, LokadIdGroup)?; + let tx_reader = TxReader::new(&self.db)?; + let metadata_writer = MetadataWriter::new(&self.db)?; + + // First, wipe the LOKAD ID index + let mut batch = WriteBatch::default(); + lokad_id_writer.wipe(&mut batch); + self.db.write_batch(batch)?; + + for height in 0..=end_height { + if bridge.shutdown_requested() { + log!("Stopped reindexing LOKAD ID index\n"); + return Ok(()); + } + let block_index = ffi::get_block_ancestor(node_tip_index, height)?; + let block = self.load_chronik_block(bridge, block_index)?; + let first_tx_num = tx_reader + .first_tx_num_by_block(block.db_block.height)? + .unwrap(); + let index_txs = prepare_indexed_txs_cached( + &self.db, + first_tx_num, + &block.txs, + &mut self.mem_data.tx_num_cache, + PrepareUpdateMode::Add, + )?; + let hash = block.db_block.hash.clone(); + let mut batch = WriteBatch::default(); + lokad_id_writer.insert( + &mut batch, + &index_txs, + &(), + &mut GroupHistoryMemData::default(), + )?; + self.db.write_batch(batch)?; + if height % 100 == 0 { + log!( + "Synced Chronik LOKAD ID index up to block {hash} at \ + height {height}/{end_height} (-chroniklokadidindex=0 to \ + disable)\n" + ); + } + } + + let mut batch = WriteBatch::default(); + metadata_writer.update_is_lokad_id_index_enabled(&mut batch, true)?; + self.db.write_batch(batch)?; + + Ok(()) + } + /// Add transaction to the indexer's mempool. pub fn handle_tx_added_to_mempool( &mut self, @@ -362,6 +445,8 @@ TokenIdHistoryWriter::new(&self.db, TokenIdGroup)?; let token_id_utxo_writer = TokenIdUtxoWriter::new(&self.db, TokenIdGroup)?; + let lokad_id_history_writer = + LokadIdHistoryWriter::new(&self.db, LokadIdGroup)?; block_writer.insert(&mut batch, &block.db_block)?; let first_tx_num = tx_writer.insert( &mut batch, @@ -394,6 +479,14 @@ &index_txs, &mut self.mem_data.spent_by, )?; + if self.is_lokad_id_index_enabled { + lokad_id_history_writer.insert( + &mut batch, + &index_txs, + &(), + &mut GroupHistoryMemData::default(), + )?; + } let token_id_aux; if self.is_token_index_enabled { let processed_token_batch = @@ -453,6 +546,8 @@ TokenIdHistoryWriter::new(&self.db, TokenIdGroup)?; let token_id_utxo_writer = TokenIdUtxoWriter::new(&self.db, TokenIdGroup)?; + let lokad_id_history_writer = + LokadIdHistoryWriter::new(&self.db, LokadIdGroup)?; block_writer.delete(&mut batch, &block.db_block)?; let first_tx_num = tx_writer.delete( &mut batch, @@ -484,6 +579,17 @@ &index_txs, &mut self.mem_data.spent_by, )?; + if self.is_lokad_id_index_enabled { + // Skip delete if rewinding indexer; will be wiped later anyway + if !self.needs_lokad_id_reindex { + lokad_id_history_writer.delete( + &mut batch, + &index_txs, + &(), + &mut GroupHistoryMemData::default(), + )?; + } + } if self.is_token_index_enabled { let token_id_aux = TokenIdGroupAux::from_db(&index_txs, &self.db)?; token_id_history_writer.delete( @@ -644,6 +750,23 @@ } } + /// Return [`QueryGroupHistory`] for LOKAD IDs to query the tx history of + /// LOKAD IDs. + pub fn lokad_id_history<'a>( + &'a self, + node: &'a Node, + ) -> QueryGroupHistory<'a, LokadIdGroup> { + QueryGroupHistory { + db: &self.db, + avalanche: &self.avalanche, + mempool: &self.mempool, + mempool_history: self.mempool.lokad_id_history(), + group: LokadIdGroup, + node, + is_token_index_enabled: self.is_token_index_enabled, + } + } + /// Subscribers, behind read/write lock pub fn subs(&self) -> &RwLock { &self.subs @@ -805,6 +928,43 @@ Ok(()) } +/// Verify user config and DB are in sync. Returns whether the LOKAD ID index +/// needs to be reindexed. +fn verify_lokad_id_index( + db: &Db, + is_db_empty: bool, + enable: bool, +) -> Result { + let metadata_reader = MetadataReader::new(db)?; + let metadata_writer = MetadataWriter::new(db)?; + let lokad_id_writer = LokadIdHistoryWriter::new(db, LokadIdGroup)?; + let is_enabled_db = metadata_reader + .is_lokad_id_index_enabled()? + .unwrap_or(false); + let mut batch = WriteBatch::default(); + if !is_db_empty { + if enable && !is_enabled_db { + // DB non-empty without LOKAD ID index, but index enabled -> reindex + return Ok(true); + } + if !enable && is_enabled_db { + // Otherwise, the LOKAD ID index has been enabled and now + // specified to be disabled, so we wipe the index. + log!( + "Warning: Wiping existing LOKAD ID index, since \ + -chroniklokadidindex=0\n" + ); + log!( + "You will need to specify -chroniklokadidindex=1 to restore\n" + ); + lokad_id_writer.wipe(&mut batch); + } + } + metadata_writer.update_is_lokad_id_index_enabled(&mut batch, enable)?; + db.write_batch(batch)?; + Ok(false) +} + impl Node { /// If `result` is [`Err`], logs and aborts the node. pub fn ok_or_abort(&self, func_name: &str, result: Result) { @@ -851,6 +1011,7 @@ datadir_net: datadir_net.clone(), wipe_db: false, enable_token_index: false, + enable_lokad_id_index: false, enable_perf_stats: false, tx_num_cache: Default::default(), }; @@ -920,6 +1081,7 @@ datadir_net: dir.path().to_path_buf(), wipe_db: false, enable_token_index: false, + enable_lokad_id_index: false, enable_perf_stats: false, tx_num_cache: Default::default(), }; diff --git a/chronik/chronik-indexer/src/subs.rs b/chronik/chronik-indexer/src/subs.rs --- a/chronik/chronik-indexer/src/subs.rs +++ b/chronik/chronik-indexer/src/subs.rs @@ -6,7 +6,7 @@ use bitcoinsuite_core::{block::BlockHash, tx::Tx}; use chronik_db::{ - groups::{ScriptGroup, TokenIdGroup, TokenIdGroupAux}, + groups::{LokadIdGroup, ScriptGroup, TokenIdGroup, TokenIdGroupAux}, io::BlockHeight, }; use chronik_util::log; @@ -44,6 +44,7 @@ subs_block: broadcast::Sender, subs_script: SubsGroup, subs_token_id: SubsGroup, + subs_lokad_id: SubsGroup, } impl Subs { @@ -53,6 +54,7 @@ subs_block: broadcast::channel(BLOCK_CHANNEL_CAPACITY).0, subs_script: SubsGroup::new(script_group), subs_token_id: SubsGroup::new(TokenIdGroup), + subs_lokad_id: SubsGroup::new(LokadIdGroup), } } @@ -71,6 +73,11 @@ &mut self.subs_token_id } + /// Mutable reference to the token ID subscribers. + pub fn subs_lokad_id_mut(&mut self) -> &mut SubsGroup { + &mut self.subs_lokad_id + } + /// Send out updates to subscribers for this tx and msg_type. pub fn handle_tx_event( &mut self, @@ -81,6 +88,7 @@ self.subs_script.handle_tx_event(tx, &(), msg_type); self.subs_token_id .handle_tx_event(tx, token_id_aux, msg_type); + self.subs_lokad_id.handle_tx_event(tx, &(), msg_type); } /// Send out msg_type updates for the txs of the block to subscribers. @@ -90,7 +98,10 @@ msg_type: TxMsgType, token_id_aux: &TokenIdGroupAux, ) { - if self.subs_script.is_empty() && self.subs_token_id.is_empty() { + if self.subs_script.is_empty() + && self.subs_token_id.is_empty() + && self.subs_lokad_id.is_empty() + { // Short-circuit if no subscriptions return; } @@ -98,6 +109,7 @@ self.subs_script.handle_tx_event(tx, &(), msg_type); self.subs_token_id .handle_tx_event(tx, token_id_aux, msg_type); + self.subs_lokad_id.handle_tx_event(tx, &(), msg_type); } } diff --git a/chronik/chronik-lib/src/bridge.rs b/chronik/chronik-lib/src/bridge.rs --- a/chronik/chronik-lib/src/bridge.rs +++ b/chronik/chronik-lib/src/bridge.rs @@ -77,6 +77,7 @@ datadir_net: params.datadir_net.into(), wipe_db: params.wipe_db, enable_token_index: params.enable_token_index, + enable_lokad_id_index: params.enable_lokad_id_index, enable_perf_stats: params.enable_perf_stats, tx_num_cache: TxNumCacheSettings { bucket_size: params.tx_num_cache.bucket_size, diff --git a/chronik/chronik-lib/src/ffi.rs b/chronik/chronik-lib/src/ffi.rs --- a/chronik/chronik-lib/src/ffi.rs +++ b/chronik/chronik-lib/src/ffi.rs @@ -24,6 +24,8 @@ pub wipe_db: bool, /// Whether Chronik should index SLP/ALP token transactions pub enable_token_index: bool, + /// Whether Chronik should index transactions by LOKAD ID + pub enable_lokad_id_index: bool, /// Whether pausing Chronik indexing is allowed pub is_pause_allowed: bool, /// Whether to output Chronik performance statistics into a perf/ diff --git a/chronik/chronik-proto/proto/chronik.proto b/chronik/chronik-proto/proto/chronik.proto --- a/chronik/chronik-proto/proto/chronik.proto +++ b/chronik/chronik-proto/proto/chronik.proto @@ -408,6 +408,8 @@ WsSubScript script = 3; // Subscription to a token ID WsSubTokenId token_id = 4; + // Subscription to a lokad ID + WsSubLokadId lokad_id = 5; } } @@ -435,6 +437,15 @@ string token_id = 1; } +// Subscription to a LOKAD ID. They will be sent every time a tx matches the given LOKAD ID in one of the following ways: +// - `OP_RETURN ...`: The first output has an OP_RETURN with the given LOKAD ID as first pushop +// - `OP_RETURN OP_RESERVED "..." "..." ...`: The first output has an eMPP encoded OP_RETURN, and one (or more) of the pushops has the LOKAD ID as prefix. +// - ` ...`: An input's scriptSig has the given LOKAD ID as the first pushop +message WsSubLokadId { + // 4-byte LOKAD ID. + bytes lokad_id = 1; +} + // Message coming from the WebSocket message WsMsg { // Kind of message diff --git a/src/init.cpp b/src/init.cpp --- a/src/init.cpp +++ b/src/init.cpp @@ -660,6 +660,9 @@ argsman.AddArg("-chroniktokenindex", "Enable token indexing in Chronik (default: 1)", ArgsManager::ALLOW_BOOL, OptionsCategory::CHRONIK); + argsman.AddArg("-chroniklokadidindex", + "Enable LOKAD ID indexing in Chronik (default: 1)", + ArgsManager::ALLOW_BOOL, OptionsCategory::CHRONIK); argsman.AddArg("-chronikreindex", "Reindex the Chronik indexer from genesis, but leave the " "other indexes untouched", diff --git a/test/functional/chronik_lokad_id_group.py b/test/functional/chronik_lokad_id_group.py new file mode 100644 --- /dev/null +++ b/test/functional/chronik_lokad_id_group.py @@ -0,0 +1,273 @@ +# Copyright (c) 2024 The Bitcoin developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +""" +Test Chronik indexes tx by LOKAD ID correctly. +""" + +from test_framework.address import ( + ADDRESS_ECREG_P2SH_OP_TRUE, + ADDRESS_ECREG_UNSPENDABLE, + P2SH_OP_TRUE, + SCRIPTSIG_OP_TRUE, +) +from test_framework.blocktools import create_block, create_coinbase +from test_framework.messages import COutPoint, CTransaction, CTxIn, CTxOut +from test_framework.p2p import P2PDataStore +from test_framework.script import ( + OP_EQUAL, + OP_HASH160, + OP_RESERVED, + OP_RETURN, + CScript, + hash160, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, chronik_sub_lokad_id + + +class ChronikLokadIdGroup(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + self.extra_args = [["-chronik"]] + + def skip_test_if_missing_module(self): + self.skip_if_no_chronik() + + def run_test(self): + from test_framework.chronik.client import pb + + node = self.nodes[0] + peer = node.add_p2p_connection(P2PDataStore()) + chronik = node.get_chronik_client() + ws1 = chronik.ws() + ws2 = chronik.ws() + + mocktime = 1300000000 + node.setmocktime(mocktime) + + coinblockhash = self.generatetoaddress(node, 1, ADDRESS_ECREG_P2SH_OP_TRUE)[0] + coinblock = node.getblock(coinblockhash) + cointx = coinblock["tx"][0] + coinvalue = 5000000000 + + self.generatetoaddress(node, 100, ADDRESS_ECREG_UNSPENDABLE) + + def p2lokad(lokad_id: bytes): + return CScript([lokad_id, OP_EQUAL]) + + def p2sh_lokad(lokad_id: bytes): + return CScript([OP_HASH160, hash160(p2lokad(lokad_id)), OP_EQUAL]) + + def spend_p2lokad(lokad_id: bytes): + return CScript([lokad_id, p2lokad(lokad_id)]) + + def page_txids(txs): + return [tx.txid[::-1].hex() for tx in txs] + + def lokad_id_unconf(lokad_id: bytes): + return page_txids( + chronik.lokad_id(lokad_id.hex()).unconfirmed_txs().ok().txs + ) + + def lokad_id_conf(lokad_id: bytes): + return page_txids(chronik.lokad_id(lokad_id.hex()).confirmed_txs().ok().txs) + + def lokad_id_history(lokad_id: bytes): + return page_txids(chronik.lokad_id(lokad_id.hex()).history().ok().txs) + + def ws_msg(txid: str, msg_type): + return pb.WsMsg( + tx=pb.MsgTx( + msg_type=msg_type, + txid=bytes.fromhex(txid)[::-1], + ) + ) + + chronik_sub_lokad_id(ws1, node, b"lok0") + chronik_sub_lokad_id(ws2, node, b"lok1") + chronik_sub_lokad_id(ws2, node, b"lok2") + chronik_sub_lokad_id(ws2, node, b"lok3") + + tx0 = CTransaction() + tx0.vin = [CTxIn(COutPoint(int(cointx, 16), 0), SCRIPTSIG_OP_TRUE)] + tx0.vout = [ + CTxOut(0, CScript([OP_RETURN, b"lok0"])), + CTxOut(10000, p2sh_lokad(b"lok1")), + CTxOut(10000, p2sh_lokad(b"lok2")), + CTxOut(10000, p2sh_lokad(b"lok3")), + CTxOut(coinvalue - 100000, P2SH_OP_TRUE), + ] + tx0.rehash() + chronik.broadcast_tx(tx0.serialize()).ok() + assert_equal(lokad_id_unconf(b"lok0"), [tx0.hash]) + + assert_equal(ws1.recv(), ws_msg(tx0.hash, pb.TX_ADDED_TO_MEMPOOL)) + + node.setmocktime(mocktime + 1) + tx1 = CTransaction() + tx1.vin = [CTxIn(COutPoint(tx0.sha256, 1), spend_p2lokad(b"lok1"))] + tx1.vout = [CTxOut(0, CScript([OP_RETURN, b"lok0", b"x" * 100]))] + tx1.rehash() + chronik.broadcast_tx(tx1.serialize()).ok() + assert_equal(lokad_id_unconf(b"lok0"), [tx0.hash, tx1.hash]) + assert_equal(lokad_id_unconf(b"lok1"), [tx1.hash]) + assert_equal(lokad_id_unconf(b"xxxx"), []) + + assert_equal(ws1.recv(), ws_msg(tx1.hash, pb.TX_ADDED_TO_MEMPOOL)) + assert_equal(ws2.recv(), ws_msg(tx1.hash, pb.TX_ADDED_TO_MEMPOOL)) + + # Unsub ws2 from lok2 + chronik_sub_lokad_id(ws2, node, b"lok2", is_unsub=True) + + node.setmocktime(mocktime + 2) + tx2 = CTransaction() + tx2.vin = [CTxIn(COutPoint(tx0.sha256, 2), spend_p2lokad(b"lok2"))] + tx2.vout = [ + CTxOut( + 0, CScript([OP_RETURN, OP_RESERVED, b"lok2__", b"lok0" + b"x" * 100]) + ) + ] + tx2.rehash() + chronik.broadcast_tx(tx2.serialize()).ok() + assert_equal(lokad_id_unconf(b"lok0"), [tx0.hash, tx1.hash, tx2.hash]) + assert_equal(lokad_id_unconf(b"lok1"), [tx1.hash]) + assert_equal(lokad_id_unconf(b"lok2"), [tx2.hash]) + + # Only sent to ws1, not ws2 + assert_equal(ws1.recv(), ws_msg(tx2.hash, pb.TX_ADDED_TO_MEMPOOL)) + + # Mine tx0, tx1, tx2 + blockhash = self.generatetoaddress(node, 1, ADDRESS_ECREG_P2SH_OP_TRUE)[0] + + assert_equal(lokad_id_conf(b"lok0"), sorted([tx0.hash, tx1.hash, tx2.hash])) + assert_equal(lokad_id_conf(b"lok1"), [tx1.hash]) + assert_equal(lokad_id_conf(b"lok2"), [tx2.hash]) + + for txid in sorted([tx0.hash, tx1.hash, tx2.hash]): + assert_equal(ws1.recv(), ws_msg(txid, pb.TX_CONFIRMED)) + assert_equal(ws2.recv(), ws_msg(tx1.hash, pb.TX_CONFIRMED)) + + tx3 = CTransaction() + tx3.vin = [CTxIn(COutPoint(tx0.sha256, 3), spend_p2lokad(b"lok3"))] + tx3.vout = [ + CTxOut(0, CScript([OP_RETURN, OP_RESERVED, b"lok2", b"lok0" + b"x" * 100])) + ] + tx3.rehash() + chronik.broadcast_tx(tx3.serialize()).ok() + + assert_equal(ws1.recv(), ws_msg(tx3.hash, pb.TX_ADDED_TO_MEMPOOL)) + assert_equal(ws2.recv(), ws_msg(tx3.hash, pb.TX_ADDED_TO_MEMPOOL)) + + # Unconfirmed + assert_equal(lokad_id_unconf(b"lok0"), [tx3.hash]) + assert_equal(lokad_id_unconf(b"lok1"), []) + assert_equal(lokad_id_unconf(b"lok2"), [tx3.hash]) + assert_equal(lokad_id_unconf(b"lok3"), [tx3.hash]) + # Confirmed stays unchanged + assert_equal(lokad_id_conf(b"lok0"), sorted([tx0.hash, tx1.hash, tx2.hash])) + assert_equal(lokad_id_conf(b"lok1"), [tx1.hash]) + assert_equal(lokad_id_conf(b"lok2"), [tx2.hash]) + assert_equal(lokad_id_conf(b"lok3"), []) + # History + assert_equal( + lokad_id_history(b"lok0"), [tx3.hash, tx2.hash, tx1.hash, tx0.hash] + ) + assert_equal(lokad_id_history(b"lok1"), [tx1.hash]) + assert_equal(lokad_id_history(b"lok2"), [tx3.hash, tx2.hash]) + assert_equal(lokad_id_history(b"lok3"), [tx3.hash]) + + # Mine conflicting tx kicking out tx3 + tx3_conflict = CTransaction() + tx3_conflict.vin = [CTxIn(COutPoint(tx0.sha256, 3), spend_p2lokad(b"lok3"))] + tx3_conflict.vout = [ + CTxOut(0, CScript([OP_RETURN, OP_RESERVED, b"lok4" + b"x" * 100])) + ] + tx3_conflict.rehash() + + block = create_block(int(blockhash, 16), create_coinbase(103), mocktime + 100) + block.vtx += [tx3_conflict] + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + peer.send_blocks_and_test([block], node) + + assert_equal(ws1.recv(), ws_msg(tx3.hash, pb.TX_REMOVED_FROM_MEMPOOL)) + assert_equal(ws2.recv(), ws_msg(tx3.hash, pb.TX_REMOVED_FROM_MEMPOOL)) + + assert_equal(ws2.recv(), ws_msg(tx3_conflict.hash, pb.TX_CONFIRMED)) + + # No unconfirmed anymore + assert_equal(lokad_id_unconf(b"lok0"), []) + assert_equal(lokad_id_unconf(b"lok1"), []) + assert_equal(lokad_id_unconf(b"lok2"), []) + assert_equal(lokad_id_unconf(b"lok3"), []) + # Confirmed + assert_equal(lokad_id_conf(b"lok0"), sorted([tx0.hash, tx1.hash, tx2.hash])) + assert_equal(lokad_id_conf(b"lok1"), [tx1.hash]) + assert_equal(lokad_id_conf(b"lok2"), [tx2.hash]) + assert_equal(lokad_id_conf(b"lok3"), [tx3_conflict.hash]) + assert_equal(lokad_id_conf(b"lok4"), [tx3_conflict.hash]) + # History + assert_equal(lokad_id_history(b"lok0"), [tx2.hash, tx1.hash, tx0.hash]) + assert_equal(lokad_id_history(b"lok1"), [tx1.hash]) + assert_equal(lokad_id_history(b"lok2"), [tx2.hash]) + assert_equal(lokad_id_history(b"lok3"), [tx3_conflict.hash]) + assert_equal(lokad_id_history(b"lok4"), [tx3_conflict.hash]) + + node.invalidateblock(block.hash) + + assert_equal(ws2.recv(), ws_msg(tx3_conflict.hash, pb.TX_ADDED_TO_MEMPOOL)) + + # Back to unconfirmed + assert_equal(lokad_id_unconf(b"lok0"), []) + assert_equal(lokad_id_unconf(b"lok1"), []) + assert_equal(lokad_id_unconf(b"lok2"), []) + assert_equal(lokad_id_unconf(b"lok3"), [tx3_conflict.hash]) + assert_equal(lokad_id_unconf(b"lok4"), [tx3_conflict.hash]) + # Confirmed + assert_equal(lokad_id_conf(b"lok0"), sorted([tx0.hash, tx1.hash, tx2.hash])) + assert_equal(lokad_id_conf(b"lok1"), [tx1.hash]) + assert_equal(lokad_id_conf(b"lok2"), [tx2.hash]) + assert_equal(lokad_id_conf(b"lok3"), []) + assert_equal(lokad_id_conf(b"lok4"), []) + # History + assert_equal(lokad_id_history(b"lok0"), [tx2.hash, tx1.hash, tx0.hash]) + assert_equal(lokad_id_history(b"lok1"), [tx1.hash]) + assert_equal(lokad_id_history(b"lok2"), [tx2.hash]) + assert_equal(lokad_id_history(b"lok3"), [tx3_conflict.hash]) + assert_equal(lokad_id_history(b"lok4"), [tx3_conflict.hash]) + + # Restarting leaves the LOKAD index intact + self.restart_node(0, ["-chronik"]) + assert_equal(lokad_id_history(b"lok0"), [tx2.hash, tx1.hash, tx0.hash]) + assert_equal(lokad_id_history(b"lok1"), [tx1.hash]) + assert_equal(lokad_id_history(b"lok2"), [tx2.hash]) + + # Restarting with index disabled wipes the DB + self.restart_node(0, ["-chronik", "-chroniklokadidindex=0"]) + assert_equal(lokad_id_history(b"lok0"), []) + assert_equal(lokad_id_history(b"lok1"), []) + assert_equal(lokad_id_history(b"lok2"), []) + + # Restarting with chroniklokadidindex=1 reindexes the LOKAD ID index + self.restart_node(0, ["-chronik"]) + assert_equal(lokad_id_history(b"lok0"), [tx2.hash, tx1.hash, tx0.hash]) + assert_equal(lokad_id_history(b"lok1"), [tx1.hash]) + assert_equal(lokad_id_history(b"lok2"), [tx2.hash]) + + # Restarting again still leaves the index intact + self.restart_node(0, ["-chronik", "-chroniklokadidindex=1"]) + assert_equal(lokad_id_history(b"lok0"), [tx2.hash, tx1.hash, tx0.hash]) + assert_equal(lokad_id_history(b"lok1"), [tx1.hash]) + assert_equal(lokad_id_history(b"lok2"), [tx2.hash]) + + # Wipe index again + self.restart_node(0, ["-chronik", "-chroniklokadidindex=0"]) + assert_equal(lokad_id_history(b"lok0"), []) + assert_equal(lokad_id_history(b"lok1"), []) + assert_equal(lokad_id_history(b"lok2"), []) + + +if __name__ == "__main__": + ChronikLokadIdGroup().main() diff --git a/test/functional/test_framework/chronik/client.py b/test/functional/test_framework/chronik/client.py --- a/test/functional/test_framework/chronik/client.py +++ b/test/functional/test_framework/chronik/client.py @@ -108,6 +108,32 @@ return self.client._request_get(f"/token-id/{self.token_id}/utxos", pb.Utxos) +class ChronikLokadIdClient: + def __init__(self, client: "ChronikClient", lokad_id: str) -> None: + self.client = client + self.lokad_id = lokad_id + + def confirmed_txs(self, page=None, page_size=None): + query = _page_query_params(page, page_size) + return self.client._request_get( + f"/lokad-id/{self.lokad_id}/confirmed-txs{query}", + pb.TxHistoryPage, + ) + + def history(self, page=None, page_size=None): + query = _page_query_params(page, page_size) + return self.client._request_get( + f"/lokad-id/{self.lokad_id}/history{query}", + pb.TxHistoryPage, + ) + + def unconfirmed_txs(self): + return self.client._request_get( + f"/lokad-id/{self.lokad_id}/unconfirmed-txs", + pb.TxHistoryPage, + ) + + class ChronikWs: def __init__(self, client: "ChronikClient", **kwargs) -> None: self.messages: List[pb.WsMsg] = [] @@ -203,6 +229,13 @@ ) self.send_bytes(sub.SerializeToString()) + def sub_lokad_id(self, lokad_id: bytes, *, is_unsub=False) -> None: + sub = pb.WsSub( + is_unsub=is_unsub, + lokad_id=pb.WsSubLokadId(lokad_id=lokad_id), + ) + self.send_bytes(sub.SerializeToString()) + def close(self): self.ws.close() self.ws_thread.join(self.timeout) @@ -321,6 +354,9 @@ def token_id(self, token_id: str) -> ChronikTokenIdClient: return ChronikTokenIdClient(self, token_id) + def lokad_id(self, lokad_id_hex: str) -> ChronikLokadIdClient: + return ChronikLokadIdClient(self, lokad_id_hex) + def pause(self) -> ChronikResponse: return self._request_get("/pause", pb.Empty) diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -664,6 +664,13 @@ ws.sub_token_id(token_id, is_unsub=is_unsub) +def chronik_sub_lokad_id(ws, node, lokad_id: bytes, *, is_unsub=False) -> None: + """Subscribe to LOKAD ID events and make sure the subscription is active before returning""" + subscribe_log = "unsubscribe from" if is_unsub else "subscribe to" + with node.assert_debug_log([f"WS {subscribe_log} LOKAD ID {lokad_id.hex()}"]): + ws.sub_lokad_id(lokad_id, is_unsub=is_unsub) + + class TestFrameworkUtil(unittest.TestCase): def test_modinv(self): test_vectors = [