Page MenuHomePhabricator

No OneTemporary

diff --git a/chronik/chronik-db/src/db.rs b/chronik/chronik-db/src/db.rs
index 308263d85..3a029a3e4 100644
--- a/chronik/chronik-db/src/db.rs
+++ b/chronik/chronik-db/src/db.rs
@@ -1,165 +1,167 @@
// Copyright (c) 2022 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
//! Module containing [`Db`] and errors, which encapsulates a database.
//! Read and write operations should exclusively be done with dedicated writers
//! and readers, such as [`crate::io::BlockWriter`].
use std::path::Path;
use abc_rust_error::Result;
pub use rocksdb::WriteBatch;
use rocksdb::{ColumnFamilyDescriptor, IteratorMode};
use thiserror::Error;
use crate::{
groups::ScriptHistoryWriter,
io::{BlockWriter, MetadataWriter, TxWriter},
};
// All column family names used by Chronik should be defined here
/// Column family name for the block data.
pub const CF_BLK: &str = "blk";
/// Column family for the first tx_num of the block. Used to get a list of the
/// txs of the block.
pub const CF_BLK_BY_FIRST_TX: &str = "blk_by_first_tx";
/// Column family for the block height of the first tx_num of that block. Used
/// to get the block height of a tx.
pub const CF_FIRST_TX_BY_BLK: &str = "first_tx_by_blk";
/// Column family to lookup a block by its hash.
pub const CF_LOOKUP_BLK_BY_HASH: &str = "lookup_blk_by_hash";
/// Column family to lookup a tx by its hash.
pub const CF_LOOKUP_TX_BY_HASH: &str = "lookup_tx_by_hash";
/// Column family name for db metadata.
pub const CF_META: &str = "meta";
/// Column family to store tx history by script.
pub const CF_SCRIPT_HISTORY: &str = "script_history";
+/// Column family for utxos by script.
+pub const CF_SCRIPT_UTXO: &str = "script_utxo";
/// Column family for the tx data.
pub const CF_TX: &str = "tx";
pub(crate) type CF = rocksdb::ColumnFamily;
/// Indexer database.
/// Owns the underlying [`rocksdb::DB`] instance.
#[derive(Debug)]
pub struct Db {
db: rocksdb::DB,
cf_names: Vec<String>,
}
/// Errors indicating something went wrong with the database itself.
#[derive(Debug, Error)]
pub enum DbError {
/// Column family requested but not defined during `Db::open`.
#[error("Column family {0} doesn't exist")]
NoSuchColumnFamily(String),
/// Error with RocksDB itself, e.g. db inconsistency.
#[error("RocksDB error: {0}")]
RocksDb(rocksdb::Error),
}
use self::DbError::*;
impl Db {
/// Opens the database under the specified path.
/// Creates the database file and necessary column families if necessary.
pub fn open(path: impl AsRef<Path>) -> Result<Self> {
let mut cfs = Vec::new();
BlockWriter::add_cfs(&mut cfs);
MetadataWriter::add_cfs(&mut cfs);
TxWriter::add_cfs(&mut cfs);
ScriptHistoryWriter::add_cfs(&mut cfs);
Self::open_with_cfs(path, cfs)
}
pub(crate) fn open_with_cfs(
path: impl AsRef<Path>,
cfs: Vec<ColumnFamilyDescriptor>,
) -> Result<Self> {
let db_options = Self::db_options();
let cf_names = cfs.iter().map(|cf| cf.name().to_string()).collect();
let db = rocksdb::DB::open_cf_descriptors(&db_options, path, cfs)
.map_err(RocksDb)?;
Ok(Db { db, cf_names })
}
fn db_options() -> rocksdb::Options {
let mut db_options = rocksdb::Options::default();
db_options.create_if_missing(true);
db_options.create_missing_column_families(true);
db_options
}
/// Destroy the DB, i.e. delete all it's associated files.
///
/// According to the RocksDB docs, this differs from removing the dir:
/// DestroyDB() will take care of the case where the RocksDB database is
/// stored in multiple directories. For instance, a single DB can be
/// configured to store its data in multiple directories by specifying
/// different paths to DBOptions::db_paths, DBOptions::db_log_dir, and
/// DBOptions::wal_dir.
pub fn destroy(path: impl AsRef<Path>) -> Result<()> {
let db_options = Self::db_options();
rocksdb::DB::destroy(&db_options, path).map_err(RocksDb)?;
Ok(())
}
/// Return a column family handle with the given name.
pub fn cf(&self, name: &str) -> Result<&CF> {
Ok(self
.db
.cf_handle(name)
.ok_or_else(|| NoSuchColumnFamily(name.to_string()))?)
}
pub(crate) fn get(
&self,
cf: &CF,
key: impl AsRef<[u8]>,
) -> Result<Option<rocksdb::DBPinnableSlice<'_>>> {
Ok(self.db.get_pinned_cf(cf, key).map_err(RocksDb)?)
}
pub(crate) fn iterator_end(
&self,
cf: &CF,
) -> impl Iterator<Item = Result<(Box<[u8]>, Box<[u8]>)>> + '_ {
self.db
.iterator_cf(cf, IteratorMode::End)
.map(|result| Ok(result.map_err(RocksDb)?))
}
pub(crate) fn iterator(
&self,
cf: &CF,
start: &[u8],
direction: rocksdb::Direction,
) -> impl Iterator<Item = Result<(Box<[u8]>, Box<[u8]>)>> + '_ {
self.db
.iterator_cf(cf, IteratorMode::From(start, direction))
.map(|result| Ok(result.map_err(RocksDb)?))
}
/// Writes the batch to the Db atomically.
pub fn write_batch(&self, write_batch: WriteBatch) -> Result<()> {
self.db.write(write_batch).map_err(RocksDb)?;
Ok(())
}
/// Whether any of the column families in the DB have any data.
///
/// Note: RocksDB forbids not opening all column families, therefore, this
/// will always iter through all column families.
pub fn is_db_empty(&self) -> Result<bool> {
for cf_name in &self.cf_names {
let cf = self.cf(cf_name)?;
let mut cf_iter = self.db.full_iterator_cf(cf, IteratorMode::Start);
if cf_iter.next().is_some() {
return Ok(false);
}
}
Ok(true)
}
}
diff --git a/chronik/chronik-db/src/group.rs b/chronik/chronik-db/src/group.rs
index 70ee80d7a..2b9cccc38 100644
--- a/chronik/chronik-db/src/group.rs
+++ b/chronik/chronik-db/src/group.rs
@@ -1,88 +1,91 @@
// Copyright (c) 2023 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
//! Module for [`Group`] and [`GroupQuery`].
use bitcoinsuite_core::tx::Tx;
-use crate::io::GroupHistoryConf;
+use crate::io::{GroupHistoryConf, GroupUtxoConf};
/// Struct giving impls of [`Group`] all the necessary data to determine the
/// member of the group.
#[derive(Clone, Copy, Debug)]
pub struct GroupQuery<'a> {
/// Whether the tx is a coinbase tx.
pub is_coinbase: bool,
/// The transaction that should be grouped.
pub tx: &'a Tx,
}
/// Item returned by `members_tx`.
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub struct MemberItem<M> {
/// Index of the item in the list of inputs/outputs.
pub idx: usize,
/// Member of this item.
pub member: M,
}
/// Groups txs and determines which members they are.
///
/// A member is one instance in a group and can be anything in a tx, e.g. the
/// input and output scripts, the SLP token ID, a SWaP signal, a smart contract
/// etc.
pub trait Group {
/// Iterator over the members found for a given [`GroupQuery`].
type Iter<'a>: IntoIterator<Item = MemberItem<Self::Member<'a>>> + 'a;
/// Member of a group, this is what txs will be grouped by.
///
/// We use a HashMap to group txs, so it must implement [`std::hash::Hash`].
type Member<'a>: std::hash::Hash + Eq;
/// Serialized member, this is what will be used as key in the DB.
/// Normally, this will be a [`Vec<u8>`] or an [`u8`] array or slice.
///
/// We use this to allow members to separate between the code that groups
/// them from the serialization of members. For example, we can group by raw
/// input/output script bytecode, but then use compressed scripts when
/// adding to the DB. That way, we don't have to run the compression every
/// time we compare/hash elements for grouping.
///
/// Note: For group history, this will be suffixed by a 4-byte page number.
type MemberSer<'a>: AsRef<[u8]> + 'a;
/// Find the group's members in the given query's tx's inputs.
///
/// Note: This is allowed to return a member multiple times per query.
///
/// Note: The returned iterator is allowed to borrow from the query.
fn input_members<'a>(&self, query: GroupQuery<'a>) -> Self::Iter<'a>;
/// Find the group's members in the given query's tx's outputs.
///
/// Note: This is allowed to return a member multiple times per query.
///
/// Note: The returned iterator is allowed to borrow from the query.
fn output_members<'a>(&self, query: GroupQuery<'a>) -> Self::Iter<'a>;
/// Serialize the given member.
fn ser_member<'a>(&self, member: &Self::Member<'a>) -> Self::MemberSer<'a>;
/// The [`GroupHistoryConf`] for this group.
fn tx_history_conf() -> GroupHistoryConf;
+
+ /// The [`GroupUtxoConf`] for this group.
+ fn utxo_conf() -> GroupUtxoConf;
}
/// Helper which returns the `G::Member`s of both inputs and outputs of the
/// group for the tx.
pub fn tx_members_for_group<'a, G: Group>(
group: &G,
query: GroupQuery<'a>,
) -> impl Iterator<Item = G::Member<'a>> {
group
.input_members(query)
.into_iter()
.chain(group.output_members(query))
.map(|item| item.member)
}
diff --git a/chronik/chronik-db/src/groups/script.rs b/chronik/chronik-db/src/groups/script.rs
index 8c0299181..aa92bc85f 100644
--- a/chronik/chronik-db/src/groups/script.rs
+++ b/chronik/chronik-db/src/groups/script.rs
@@ -1,198 +1,206 @@
// Copyright (c) 2023 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
use bitcoinsuite_core::script::Script;
use crate::{
- db::CF_SCRIPT_HISTORY,
+ db::{CF_SCRIPT_HISTORY, CF_SCRIPT_UTXO},
group::{Group, GroupQuery, MemberItem},
- io::{GroupHistoryConf, GroupHistoryReader, GroupHistoryWriter},
+ io::{
+ GroupHistoryConf, GroupHistoryReader, GroupHistoryWriter, GroupUtxoConf,
+ },
mem::MempoolGroupHistory,
};
/// Index the mempool tx history of scripts
pub type MempoolScriptHistory = MempoolGroupHistory<ScriptGroup>;
/// Index the tx history of script in the DB
pub type ScriptHistoryWriter<'a> = GroupHistoryWriter<'a, ScriptGroup>;
/// Read the tx history of scripts in the DB
pub type ScriptHistoryReader<'a> = GroupHistoryReader<'a, ScriptGroup>;
/// Function ptr to compress scripts
pub type FnCompressScript = fn(&Script) -> Vec<u8>;
/// Group txs by input/output scripts.
#[derive(Clone)]
pub struct ScriptGroup {
/// Function to compress scripts.
fn_compress_script: FnCompressScript,
}
impl Group for ScriptGroup {
type Iter<'a> = Vec<MemberItem<&'a Script>>;
type Member<'a> = &'a Script;
type MemberSer<'a> = Vec<u8>;
fn input_members<'a>(&self, query: GroupQuery<'a>) -> Self::Iter<'a> {
if query.is_coinbase {
return vec![];
}
let mut input_scripts = Vec::with_capacity(query.tx.inputs.len());
for (idx, input) in query.tx.inputs.iter().enumerate() {
if let Some(coin) = &input.coin {
input_scripts.push(MemberItem {
idx,
member: &coin.output.script,
});
}
}
input_scripts
}
fn output_members<'a>(&self, query: GroupQuery<'a>) -> Self::Iter<'a> {
let mut output_scripts = Vec::with_capacity(query.tx.outputs.len());
for (idx, output) in query.tx.outputs.iter().enumerate() {
if !output.script.is_opreturn() {
output_scripts.push(MemberItem {
idx,
member: &output.script,
});
}
}
output_scripts
}
fn ser_member<'a>(&self, member: &Self::Member<'a>) -> Self::MemberSer<'a> {
(self.fn_compress_script)(member)
}
fn tx_history_conf() -> GroupHistoryConf {
GroupHistoryConf {
cf_name: CF_SCRIPT_HISTORY,
page_size: 1000,
}
}
+
+ fn utxo_conf() -> GroupUtxoConf {
+ GroupUtxoConf {
+ cf_name: CF_SCRIPT_UTXO,
+ }
+ }
}
impl ScriptGroup {
/// Create a new [`ScriptGroup`].
pub fn new(fn_compress_script: FnCompressScript) -> Self {
ScriptGroup { fn_compress_script }
}
}
impl std::fmt::Debug for ScriptGroup {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ScriptGroup").finish_non_exhaustive()
}
}
/// A mock "compression" that just prefixes with "COMPRESS:".
pub fn prefix_mock_compress(script: &Script) -> Vec<u8> {
[b"COMPRESS:".as_ref(), script.as_ref()].concat()
}
#[cfg(test)]
mod tests {
use bitcoinsuite_core::{
script::Script,
tx::{Coin, Tx, TxId, TxInput, TxMut, TxOutput},
};
use crate::{
group::{tx_members_for_group, Group, GroupQuery, MemberItem},
groups::{prefix_mock_compress, ScriptGroup},
};
#[test]
fn test_script_group() {
let script_group = ScriptGroup::new(prefix_mock_compress);
let tx = Tx::with_txid(
TxId::from([0; 32]),
TxMut {
inputs: [[0x51].as_ref(), &[0x52]]
.into_iter()
.map(|script| TxInput {
coin: Some(Coin {
output: TxOutput {
script: Script::new(script.into()),
..Default::default()
},
..Default::default()
}),
..Default::default()
})
.collect(),
outputs: [[0x53].as_ref(), &[0x51]]
.into_iter()
.map(|script| TxOutput {
script: Script::new(script.into()),
..Default::default()
})
.collect(),
..Default::default()
},
);
let make_script = |script: Vec<u8>| Script::new(script.into());
fn make_member_item(
idx: usize,
script: &Script,
) -> MemberItem<&Script> {
MemberItem {
idx,
member: script,
}
}
let query = GroupQuery {
is_coinbase: false,
tx: &tx,
};
assert_eq!(
tx_members_for_group(&script_group, query).collect::<Vec<_>>(),
vec![
&make_script(vec![0x51]),
&make_script(vec![0x52]),
&make_script(vec![0x53]),
&make_script(vec![0x51]),
],
);
assert_eq!(
script_group.input_members(query),
vec![
make_member_item(0, &make_script(vec![0x51])),
make_member_item(1, &make_script(vec![0x52])),
],
);
assert_eq!(
script_group.output_members(query),
vec![
make_member_item(0, &make_script(vec![0x53])),
make_member_item(1, &make_script(vec![0x51])),
],
);
let query = GroupQuery {
is_coinbase: true,
tx: &tx,
};
assert_eq!(
tx_members_for_group(&script_group, query).collect::<Vec<_>>(),
vec![
&Script::new(vec![0x53].into()),
&Script::new(vec![0x51].into()),
],
);
assert_eq!(script_group.input_members(query), vec![]);
assert_eq!(
script_group.output_members(query),
vec![
make_member_item(0, &make_script(vec![0x53])),
make_member_item(1, &make_script(vec![0x51])),
],
);
assert_eq!(
script_group.ser_member(&&make_script(vec![0x53])),
[b"COMPRESS:".as_ref(), &[0x53]].concat(),
);
}
}
diff --git a/chronik/chronik-db/src/io/group_utxos.rs b/chronik/chronik-db/src/io/group_utxos.rs
new file mode 100644
index 000000000..ddeaadc0c
--- /dev/null
+++ b/chronik/chronik-db/src/io/group_utxos.rs
@@ -0,0 +1,452 @@
+// Copyright (c) 2023 The Bitcoin developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+//! Module for [`GroupUtxoWriter`] and [`GroupUtxoReader`].
+
+use std::{
+ collections::{hash_map::Entry, HashMap},
+ marker::PhantomData,
+};
+
+use abc_rust_error::Result;
+use rocksdb::WriteBatch;
+use serde::{Deserialize, Serialize};
+use thiserror::Error;
+
+use crate::{
+ db::{Db, CF},
+ group::{Group, GroupQuery},
+ index_tx::IndexTx,
+ io::TxNum,
+ ser::{db_deserialize, db_serialize},
+};
+
+/// Configuration for group utxos reader/writers.
+#[derive(Clone, Debug, Default, Eq, PartialEq)]
+pub struct GroupUtxoConf {
+ /// Column family to store the group utxos entries.
+ pub cf_name: &'static str,
+}
+
+struct GroupUtxoColumn<'a> {
+ db: &'a Db,
+ cf: &'a CF,
+}
+
+/// Outpoint in the DB, but with [`TxNum`] instead of `TxId` for the txid.
+#[derive(
+ Clone,
+ Copy,
+ Debug,
+ Default,
+ Deserialize,
+ Eq,
+ Ord,
+ PartialEq,
+ PartialOrd,
+ Serialize,
+)]
+pub struct UtxoOutpoint {
+ /// [`TxNum`] of tx of the outpoint.
+ pub tx_num: TxNum,
+ /// Output of the tx referenced by the outpoint.
+ pub out_idx: u32,
+}
+
+/// Entry in the UTXO DB for a group.
+#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)]
+pub struct UtxoEntry {
+ /// Outpoint of the UTXO.
+ pub outpoint: UtxoOutpoint,
+
+ /// Value of the UTXO in satoshis.
+ ///
+ /// This is currently geared towards ScriptGroup, which can fully
+ /// reconstruct the UTXO without reading the full tx from the node
+ /// blockfile, using only the UTXO value.
+ ///
+ /// This may or may not be useful for other groups, but when that time
+ /// arrives, we can make this more generic by e.g. adding a `type
+ /// UtxoPayload: Deserialize + Serialize`.
+ pub value: i64,
+}
+
+/// Write UTXOs of a group to the DB.
+#[derive(Debug)]
+pub struct GroupUtxoWriter<'a, G: Group> {
+ col: GroupUtxoColumn<'a>,
+ group: G,
+}
+
+/// Read UTXOs of a group from the DB.
+#[derive(Debug)]
+pub struct GroupUtxoReader<'a, G: Group> {
+ col: GroupUtxoColumn<'a>,
+ phantom: PhantomData<G>,
+}
+
+/// Error indicating something went wrong with the tx index.
+#[derive(Debug, Error, PartialEq, Eq)]
+pub enum GroupUtxoError {
+ /// UTXO already in the DB
+ #[error(
+ "Duplicate UTXO: {0:?} has been added twice to the member's UTXOs"
+ )]
+ DuplicateUtxo(UtxoEntry),
+
+ /// UTXO already in the DB
+ #[error("UTXO doesn't exist: {0:?} is not in the member's UTXOs")]
+ UtxoDoesntExist(UtxoOutpoint),
+}
+
+use self::GroupUtxoError::*;
+
+impl<'a> GroupUtxoColumn<'a> {
+ fn new(db: &'a Db, conf: &GroupUtxoConf) -> Result<Self> {
+ let cf = db.cf(conf.cf_name)?;
+ Ok(GroupUtxoColumn { db, cf })
+ }
+}
+
+impl<'a, G: Group> GroupUtxoWriter<'a, G> {
+ /// Create a new [`GroupUtxoWriter`].
+ pub fn new(db: &'a Db, group: G) -> Result<Self> {
+ let conf = G::utxo_conf();
+ let col = GroupUtxoColumn::new(db, &conf)?;
+ Ok(GroupUtxoWriter { col, group })
+ }
+
+ /// Insert the txs of a block from the UTXOs in the DB for the group.
+ ///
+ /// Add all the UTXOs created by the outputs of the txs to the DB, remove
+ /// all the UTXOs spend by the inputs of the txs.
+ pub fn insert<'tx>(
+ &self,
+ batch: &mut WriteBatch,
+ txs: &'tx [IndexTx<'tx>],
+ ) -> Result<()> {
+ let mut updated_utxos =
+ HashMap::<G::Member<'tx>, Vec<UtxoEntry>>::new();
+ for index_tx in txs {
+ let query = GroupQuery {
+ is_coinbase: index_tx.is_coinbase,
+ tx: index_tx.tx,
+ };
+ for item in self.group.output_members(query) {
+ let entries =
+ self.get_or_fetch(&mut updated_utxos, item.member)?;
+ let new_entry = Self::output_utxo(index_tx, item.idx);
+ Self::insert_utxo_entry(new_entry, entries)?;
+ }
+ }
+ for index_tx in txs {
+ if index_tx.is_coinbase {
+ continue;
+ }
+ let query = GroupQuery {
+ is_coinbase: index_tx.is_coinbase,
+ tx: index_tx.tx,
+ };
+ for item in self.group.input_members(query) {
+ let entries =
+ self.get_or_fetch(&mut updated_utxos, item.member)?;
+ let delete_entry = Self::input_utxo(index_tx, item.idx);
+ Self::delete_utxo_entry(&delete_entry.outpoint, entries)?;
+ }
+ }
+ self.update_write_batch(batch, &updated_utxos)?;
+ Ok(())
+ }
+
+ /// Remove the txs of a block from the UTXOs in the DB for the group.
+ ///
+ /// Add all the UTXOs spent by the inputs of the txs and remove all the
+ /// UTXOs created by the outputs of the txs.
+ pub fn delete<'tx>(
+ &self,
+ batch: &mut WriteBatch,
+ txs: &'tx [IndexTx<'tx>],
+ ) -> Result<()> {
+ let mut updated_utxos =
+ HashMap::<G::Member<'tx>, Vec<UtxoEntry>>::new();
+ for index_tx in txs {
+ if index_tx.is_coinbase {
+ continue;
+ }
+ let query = GroupQuery {
+ is_coinbase: index_tx.is_coinbase,
+ tx: index_tx.tx,
+ };
+ for item in self.group.input_members(query) {
+ let entries =
+ self.get_or_fetch(&mut updated_utxos, item.member)?;
+ let new_entry = Self::input_utxo(index_tx, item.idx);
+ Self::insert_utxo_entry(new_entry, entries)?;
+ }
+ }
+ for index_tx in txs {
+ let query = GroupQuery {
+ is_coinbase: index_tx.is_coinbase,
+ tx: index_tx.tx,
+ };
+ for item in self.group.output_members(query) {
+ let entries =
+ self.get_or_fetch(&mut updated_utxos, item.member)?;
+ let delete_entry = Self::output_utxo(index_tx, item.idx);
+ Self::delete_utxo_entry(&delete_entry.outpoint, entries)?;
+ }
+ }
+ self.update_write_batch(batch, &updated_utxos)?;
+ Ok(())
+ }
+
+ #[cfg(test)]
+ pub(crate) fn add_cfs(columns: &mut Vec<rocksdb::ColumnFamilyDescriptor>) {
+ columns.push(rocksdb::ColumnFamilyDescriptor::new(
+ G::utxo_conf().cf_name,
+ rocksdb::Options::default(),
+ ));
+ }
+
+ fn output_utxo(index_tx: &IndexTx<'_>, idx: usize) -> UtxoEntry {
+ UtxoEntry {
+ outpoint: UtxoOutpoint {
+ tx_num: index_tx.tx_num,
+ out_idx: idx as u32,
+ },
+ value: index_tx.tx.outputs[idx].value,
+ }
+ }
+
+ fn input_utxo(index_tx: &IndexTx<'_>, idx: usize) -> UtxoEntry {
+ UtxoEntry {
+ outpoint: UtxoOutpoint {
+ tx_num: index_tx.input_nums[idx],
+ out_idx: index_tx.tx.inputs[idx].prev_out.out_idx,
+ },
+ value: index_tx.tx.inputs[idx]
+ .coin
+ .as_ref()
+ .map(|coin| coin.output.value)
+ .unwrap_or_default(),
+ }
+ }
+
+ fn insert_utxo_entry(
+ new_entry: UtxoEntry,
+ entries: &mut Vec<UtxoEntry>,
+ ) -> Result<()> {
+ match entries
+ .binary_search_by_key(&&new_entry.outpoint, |entry| &entry.outpoint)
+ {
+ Ok(_) => return Err(DuplicateUtxo(new_entry).into()),
+ Err(insert_idx) => entries.insert(insert_idx, new_entry),
+ }
+ Ok(())
+ }
+
+ fn delete_utxo_entry(
+ delete_outpoint: &UtxoOutpoint,
+ entries: &mut Vec<UtxoEntry>,
+ ) -> Result<()> {
+ match entries
+ .binary_search_by_key(&delete_outpoint, |entry| &entry.outpoint)
+ {
+ Ok(delete_idx) => entries.remove(delete_idx),
+ Err(_) => return Err(UtxoDoesntExist(*delete_outpoint).into()),
+ };
+ Ok(())
+ }
+
+ fn get_or_fetch<'u, 'tx>(
+ &self,
+ utxos: &'u mut HashMap<G::Member<'tx>, Vec<UtxoEntry>>,
+ member: G::Member<'tx>,
+ ) -> Result<&'u mut Vec<UtxoEntry>> {
+ match utxos.entry(member) {
+ Entry::Occupied(entry) => Ok(entry.into_mut()),
+ Entry::Vacant(entry) => {
+ let member_ser = self.group.ser_member(entry.key());
+ let db_entries =
+ match self.col.db.get(self.col.cf, member_ser.as_ref())? {
+ Some(data) => db_deserialize::<Vec<UtxoEntry>>(&data)?,
+ None => vec![],
+ };
+ Ok(entry.insert(db_entries))
+ }
+ }
+ }
+
+ fn update_write_batch(
+ &self,
+ batch: &mut WriteBatch,
+ utxos: &HashMap<G::Member<'_>, Vec<UtxoEntry>>,
+ ) -> Result<()> {
+ for (member, utxos) in utxos {
+ let member_ser = self.group.ser_member(member);
+ if utxos.is_empty() {
+ batch.delete_cf(self.col.cf, member_ser.as_ref());
+ } else {
+ batch.put_cf(
+ self.col.cf,
+ member_ser.as_ref(),
+ db_serialize(&utxos)?,
+ );
+ }
+ }
+ Ok(())
+ }
+}
+
+impl<'a, G: Group> GroupUtxoReader<'a, G> {
+ /// Create a new [`GroupUtxoReader`].
+ pub fn new(db: &'a Db) -> Result<Self> {
+ let conf = G::utxo_conf();
+ let col = GroupUtxoColumn::new(db, &conf)?;
+ Ok(GroupUtxoReader {
+ col,
+ phantom: PhantomData,
+ })
+ }
+
+ /// Query the UTXOs for the given member.
+ pub fn utxos(&self, member: &[u8]) -> Result<Option<Vec<UtxoEntry>>> {
+ match self.col.db.get(self.col.cf, member)? {
+ Some(entry) => Ok(Some(db_deserialize::<Vec<UtxoEntry>>(&entry)?)),
+ None => Ok(None),
+ }
+ }
+}
+
+impl std::fmt::Debug for GroupUtxoColumn<'_> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "GroupUtxoColumn {{ .. }}")
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::cell::RefCell;
+
+ use abc_rust_error::Result;
+ use bitcoinsuite_core::tx::Tx;
+ use rocksdb::WriteBatch;
+
+ use crate::{
+ db::Db,
+ index_tx::prepare_indexed_txs,
+ io::{
+ BlockTxs, GroupUtxoReader, GroupUtxoWriter, TxEntry, TxWriter,
+ UtxoEntry, UtxoOutpoint,
+ },
+ test::{make_inputs_tx, ser_value, ValueGroup},
+ };
+
+ #[test]
+ fn test_value_group_utxos() -> Result<()> {
+ abc_rust_error::install();
+ let tempdir = tempdir::TempDir::new("chronik-db--group_history")?;
+ let mut cfs = Vec::new();
+ GroupUtxoWriter::<ValueGroup>::add_cfs(&mut cfs);
+ TxWriter::add_cfs(&mut cfs);
+ let db = Db::open_with_cfs(tempdir.path(), cfs)?;
+ let tx_writer = TxWriter::new(&db)?;
+ let group_writer = GroupUtxoWriter::new(&db, ValueGroup)?;
+ let group_reader = GroupUtxoReader::<ValueGroup>::new(&db)?;
+
+ let block_height = RefCell::new(-1);
+ let txs_batch = |txs: &[Tx]| BlockTxs {
+ txs: txs
+ .iter()
+ .map(|tx| TxEntry {
+ txid: tx.txid(),
+ ..Default::default()
+ })
+ .collect(),
+ block_height: *block_height.borrow(),
+ };
+ let connect_block = |txs: &[Tx]| -> Result<()> {
+ let mut batch = WriteBatch::default();
+ *block_height.borrow_mut() += 1;
+ let first_tx_num = tx_writer.insert(&mut batch, &txs_batch(txs))?;
+ let index_txs = prepare_indexed_txs(&db, first_tx_num, txs)?;
+ group_writer.insert(&mut batch, &index_txs)?;
+ db.write_batch(batch)?;
+ Ok(())
+ };
+ let disconnect_block = |txs: &[Tx]| -> Result<()> {
+ let mut batch = WriteBatch::default();
+ let first_tx_num = tx_writer.delete(&mut batch, &txs_batch(txs))?;
+ let index_txs = prepare_indexed_txs(&db, first_tx_num, txs)?;
+ group_writer.delete(&mut batch, &index_txs)?;
+ db.write_batch(batch)?;
+ *block_height.borrow_mut() -= 1;
+ Ok(())
+ };
+ let utxo = |tx_num, out_idx, value| UtxoEntry {
+ outpoint: UtxoOutpoint { tx_num, out_idx },
+ value,
+ };
+ let read_utxos = |val: i64| group_reader.utxos(&ser_value(val));
+
+ let block0 =
+ vec![make_inputs_tx(0x01, [(0x00, u32::MAX, 0xffff)], [100, 200])];
+ connect_block(&block0)?;
+ assert_eq!(read_utxos(100)?, Some(vec![utxo(0, 0, 100)]));
+ assert_eq!(read_utxos(200)?, Some(vec![utxo(0, 1, 200)]));
+
+ let block1 = vec![
+ make_inputs_tx(0x02, [(0x00, u32::MAX, 0xffff)], [200]),
+ make_inputs_tx(0x03, [(0x01, 0, 100)], [10, 20, 10]),
+ make_inputs_tx(
+ 0x04,
+ [(0x03, 0, 10), (0x01, 1, 200), (0x03, 1, 20)],
+ [200],
+ ),
+ ];
+ connect_block(&block1)?;
+ assert_eq!(read_utxos(10)?, Some(vec![utxo(2, 2, 10)]));
+ assert_eq!(read_utxos(20)?, None);
+ assert_eq!(read_utxos(100)?, None);
+ assert_eq!(
+ read_utxos(200)?,
+ Some(vec![utxo(1, 0, 200), utxo(3, 0, 200)]),
+ );
+
+ disconnect_block(&block1)?;
+ assert_eq!(read_utxos(10)?, None);
+ assert_eq!(read_utxos(20)?, None);
+ assert_eq!(read_utxos(100)?, Some(vec![utxo(0, 0, 100)]));
+ assert_eq!(read_utxos(200)?, Some(vec![utxo(0, 1, 200)]));
+
+ // Reorg block
+ let block1 = vec![
+ make_inputs_tx(0x02, [(0x00, u32::MAX, 0xffff)], [200]),
+ make_inputs_tx(0x10, [(0x01, 0, 100)], [100, 200, 100]),
+ make_inputs_tx(
+ 0x11,
+ [(0x10, 0, 100), (0x10, 1, 200), (0x01, 1, 200)],
+ [200],
+ ),
+ make_inputs_tx(0x12, [(0x11, 0, 200)], [200]),
+ ];
+
+ connect_block(&block1)?;
+ assert_eq!(read_utxos(100)?, Some(vec![utxo(2, 2, 100)]));
+ assert_eq!(
+ read_utxos(200)?,
+ Some(vec![utxo(1, 0, 200), utxo(4, 0, 200)]),
+ );
+
+ disconnect_block(&block1)?;
+ assert_eq!(read_utxos(100)?, Some(vec![utxo(0, 0, 100)]));
+ assert_eq!(read_utxos(200)?, Some(vec![utxo(0, 1, 200)]));
+
+ disconnect_block(&block0)?;
+ assert_eq!(read_utxos(100)?, None);
+ assert_eq!(read_utxos(200)?, None);
+
+ Ok(())
+ }
+}
diff --git a/chronik/chronik-db/src/io/mod.rs b/chronik/chronik-db/src/io/mod.rs
index 6615b294b..c1122a6b0 100644
--- a/chronik/chronik-db/src/io/mod.rs
+++ b/chronik/chronik-db/src/io/mod.rs
@@ -1,15 +1,17 @@
// Copyright (c) 2022 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
//! Module containing readers and writers for the database used by Chronik.
mod blocks;
mod group_history;
+mod group_utxos;
mod metadata;
mod txs;
pub use self::blocks::*;
pub use self::group_history::*;
+pub use self::group_utxos::*;
pub use self::metadata::*;
pub use self::txs::*;
diff --git a/chronik/chronik-db/src/test/value_group.rs b/chronik/chronik-db/src/test/value_group.rs
index 5fc5e966c..cfa208844 100644
--- a/chronik/chronik-db/src/test/value_group.rs
+++ b/chronik/chronik-db/src/test/value_group.rs
@@ -1,117 +1,123 @@
// Copyright (c) 2023 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
use bitcoinsuite_core::tx::{
Coin, OutPoint, Tx, TxId, TxInput, TxMut, TxOutput,
};
use crate::{
group::{Group, GroupQuery, MemberItem},
- io::GroupHistoryConf,
+ io::{GroupHistoryConf, GroupUtxoConf},
};
/// Index by output/input value. While useless in pactice, this makes
/// writing tests very convenient and showcases how Group can be used.
pub(crate) struct ValueGroup;
impl Group for ValueGroup {
type Iter<'a> = Vec<MemberItem<i64>>;
type Member<'a> = i64;
type MemberSer<'a> = [u8; 8];
fn input_members(&self, query: GroupQuery<'_>) -> Self::Iter<'_> {
let mut inputs = Vec::new();
if !query.is_coinbase {
for (idx, input) in query.tx.inputs.iter().enumerate() {
if let Some(coin) = &input.coin {
inputs.push(MemberItem {
idx,
member: coin.output.value,
});
}
}
}
inputs
}
fn output_members(&self, query: GroupQuery<'_>) -> Self::Iter<'_> {
let mut outputs = Vec::new();
for (idx, output) in query.tx.outputs.iter().enumerate() {
outputs.push(MemberItem {
idx,
member: output.value,
});
}
outputs
}
fn ser_member<'a>(&self, value: &i64) -> Self::MemberSer<'a> {
ser_value(*value)
}
fn tx_history_conf() -> GroupHistoryConf {
GroupHistoryConf {
cf_name: "value_history",
page_size: 4,
}
}
+
+ fn utxo_conf() -> GroupUtxoConf {
+ GroupUtxoConf {
+ cf_name: "value_utxo",
+ }
+ }
}
/// Serialize the value as array
pub(crate) fn ser_value(value: i64) -> [u8; 8] {
value.to_be_bytes()
}
/// Make a tx with inputs and outputs having the given values.
/// Also pass _tx_num, which is ignored, but allows tests to document the tx_num
/// of this tx.
pub(crate) fn make_value_tx<const N: usize, const M: usize>(
txid_num: u8,
input_values: [i64; N],
output_values: [i64; M],
) -> Tx {
make_inputs_tx(
txid_num,
input_values.map(|value| (0, 0, value)),
output_values,
)
}
pub(crate) fn make_inputs_tx<const N: usize, const M: usize>(
txid_num: u8,
input_values: [(u8, u32, i64); N],
output_values: [i64; M],
) -> Tx {
Tx::with_txid(
TxId::from([txid_num; 32]),
TxMut {
version: 0,
inputs: input_values
.into_iter()
.map(|(input_txid_num, out_idx, value)| TxInput {
prev_out: OutPoint {
txid: TxId::from([input_txid_num; 32]),
out_idx,
},
coin: Some(Coin {
output: TxOutput {
value,
..Default::default()
},
..Default::default()
}),
..Default::default()
})
.collect(),
outputs: output_values
.into_iter()
.map(|value| TxOutput {
value,
..Default::default()
})
.collect(),
locktime: 0,
},
)
}

File Metadata

Mime Type
text/x-diff
Expires
Thu, Apr 17, 03:21 (9 h, 31 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5508686
Default Alt Text
(35 KB)

Event Timeline