From 582e265ac3ccaf7ec768fb0950d70ce0529b99a6 Mon Sep 17 00:00:00 2001 From: Kris Nuttycombe Date: Mon, 11 Mar 2024 11:01:53 -0600 Subject: [PATCH] zcash_client_backend: Require the tree state for the start of each scanned range. In order to support constructing the anchor for multiple pools with a common anchor height, we must be able to checkpoint each note commitment tree (and consequently compute the root) at that height. Since we may not have the information in the tree needed to do so, we require that it be provided. As a bonus, this change makes it possible to improve the UX around spendability, because we will no longer require subtree ranges below received notes to be fully scanned; the inserted frontier provides sufficient information to make them spendable. --- Cargo.lock | 10 +- Cargo.toml | 5 + zcash_client_backend/src/data_api.rs | 21 +- zcash_client_backend/src/data_api/chain.rs | 70 +++++- zcash_client_sqlite/Cargo.toml | 2 + zcash_client_sqlite/src/lib.rs | 199 ++++++++++++---- zcash_client_sqlite/src/testing.rs | 260 +++++++++++++++------ 7 files changed, 438 insertions(+), 129 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b671f7ebf6..0811e41d08 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1087,8 +1087,7 @@ dependencies = [ [[package]] name = "incrementalmerkletree" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "361c467824d4d9d4f284be4b2608800839419dccc4d4608f28345237fe354623" +source = "git+https://github.com/nuttycom/incrementalmerkletree?rev=fa147c89c6c98a03bba745538f4e68d4eaed5146#fa147c89c6c98a03bba745538f4e68d4eaed5146" dependencies = [ "either", "proptest", @@ -1476,8 +1475,7 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "orchard" version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb255c3ffdccd3c84fe9ebed72aef64fdc72e6a3e4180dd411002d47abaad42" +source = "git+https://github.com/nuttycom/orchard?rev=7ef1feaf1672980095f424be42fd5f79ba01a5aa#7ef1feaf1672980095f424be42fd5f79ba01a5aa" dependencies = [ "aes", "bitvec", @@ -2246,8 +2244,7 @@ dependencies = [ [[package]] name = "shardtree" version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf20c7a2747d9083092e3a3eeb9a7ed75577ae364896bebbc5e0bdcd4e97735" +source = "git+https://github.com/nuttycom/incrementalmerkletree?rev=fa147c89c6c98a03bba745538f4e68d4eaed5146#fa147c89c6c98a03bba745538f4e68d4eaed5146" dependencies = [ "assert_matches", "bitflags 2.4.1", @@ -3056,6 +3053,7 @@ name = "zcash_client_sqlite" version = "0.9.1" dependencies = [ "assert_matches", + "bls12_381", "bs58", "byteorder", "document-features", diff --git a/Cargo.toml b/Cargo.toml index d90c6d3e04..dc2efce9fa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,3 +120,8 @@ zip32 = "0.1" lto = true panic = 'abort' codegen-units = 1 + +[patch.crates-io] +incrementalmerkletree = { git = "https://github.com/nuttycom/incrementalmerkletree", rev = "fa147c89c6c98a03bba745538f4e68d4eaed5146" } +shardtree = { git = "https://github.com/nuttycom/incrementalmerkletree", rev = "fa147c89c6c98a03bba745538f4e68d4eaed5146" } +orchard = { git = "https://github.com/nuttycom/orchard", rev = "7ef1feaf1672980095f424be42fd5f79ba01a5aa" } diff --git a/zcash_client_backend/src/data_api.rs b/zcash_client_backend/src/data_api.rs index 5a72ea9309..3c96d2e14e 100644 --- a/zcash_client_backend/src/data_api.rs +++ b/zcash_client_backend/src/data_api.rs @@ -67,7 +67,10 @@ use incrementalmerkletree::{frontier::Frontier, Retention}; use secrecy::SecretVec; use shardtree::{error::ShardTreeError, store::ShardStore, ShardTree}; -use self::{chain::CommitmentTreeRoot, scanning::ScanRange}; +use self::{ + chain::{ChainState, CommitmentTreeRoot}, + scanning::ScanRange, +}; use crate::{ address::UnifiedAddress, decrypt::DecryptedOutput, @@ -1260,8 +1263,11 @@ pub trait WalletWrite: WalletRead { /// pertaining to this wallet. /// /// `blocks` must be sequential, in order of increasing block height - fn put_blocks(&mut self, blocks: Vec>) - -> Result<(), Self::Error>; + fn put_blocks( + &mut self, + from_state: &ChainState, + blocks: Vec>, + ) -> Result<(), Self::Error>; /// Updates the wallet's view of the blockchain. /// @@ -1400,9 +1406,11 @@ pub mod testing { }; use super::{ - chain::CommitmentTreeRoot, scanning::ScanRange, AccountBirthday, BlockMetadata, - DecryptedTransaction, InputSource, NullifierQuery, ScannedBlock, SentTransaction, - WalletCommitmentTrees, WalletRead, WalletSummary, WalletWrite, SAPLING_SHARD_HEIGHT, + chain::{ChainState, CommitmentTreeRoot}, + scanning::ScanRange, + AccountBirthday, BlockMetadata, DecryptedTransaction, InputSource, NullifierQuery, + ScannedBlock, SentTransaction, WalletCommitmentTrees, WalletRead, WalletSummary, + WalletWrite, SAPLING_SHARD_HEIGHT, }; #[cfg(feature = "transparent-inputs")] @@ -1633,6 +1641,7 @@ pub mod testing { #[allow(clippy::type_complexity)] fn put_blocks( &mut self, + _from_state: &ChainState, _blocks: Vec>, ) -> Result<(), Self::Error> { Ok(()) diff --git a/zcash_client_backend/src/data_api/chain.rs b/zcash_client_backend/src/data_api/chain.rs index 5cd911c522..965e54db14 100644 --- a/zcash_client_backend/src/data_api/chain.rs +++ b/zcash_client_backend/src/data_api/chain.rs @@ -145,6 +145,7 @@ use std::ops::Range; +use incrementalmerkletree::frontier::Frontier; use subtle::ConditionallySelectable; use zcash_primitives::consensus::{self, BlockHeight}; @@ -278,6 +279,68 @@ impl ScanSummary { } } +/// The final note commitment tree state for each shielded pool, as of a particular block height. +#[derive(Debug, Clone)] +pub struct ChainState { + block_height: BlockHeight, + final_sapling_tree: Frontier, + #[cfg(feature = "orchard")] + final_orchard_tree: + Frontier, +} + +impl ChainState { + /// Construct a new empty chain state. + pub fn empty(block_height: BlockHeight) -> Self { + Self { + block_height, + final_sapling_tree: Frontier::empty(), + #[cfg(feature = "orchard")] + final_orchard_tree: Frontier::empty(), + } + } + + /// Construct a new [`ChainState`] from its constituent parts. + pub fn new( + block_height: BlockHeight, + final_sapling_tree: Frontier, + #[cfg(feature = "orchard")] final_orchard_tree: Frontier< + orchard::tree::MerkleHashOrchard, + { orchard::NOTE_COMMITMENT_TREE_DEPTH as u8 }, + >, + ) -> Self { + Self { + block_height, + final_sapling_tree, + #[cfg(feature = "orchard")] + final_orchard_tree, + } + } + + /// Returns the block height to which this chain state applies. + pub fn block_height(&self) -> BlockHeight { + self.block_height + } + + /// Returns the frontier of the Sapling note commitment tree as of the end of the block at + /// [`Self::block_height`]. + pub fn final_sapling_tree( + &self, + ) -> &Frontier { + &self.final_sapling_tree + } + + /// Returns the frontier of the Orchard note commitment tree as of the end of the block at + /// [`Self::block_height`]. + #[cfg(feature = "orchard")] + pub fn final_orchard_tree( + &self, + ) -> &Frontier + { + &self.final_orchard_tree + } +} + /// Scans at most `limit` blocks from the provided block source for in order to find transactions /// received by the accounts tracked in the provided wallet database. /// @@ -290,7 +353,7 @@ pub fn scan_cached_blocks( params: &ParamsT, block_source: &BlockSourceT, data_db: &mut DbT, - from_height: BlockHeight, + from_state: &ChainState, limit: usize, ) -> Result> where @@ -299,6 +362,7 @@ where DbT: WalletWrite, ::AccountId: ConditionallySelectable + Default + Send + 'static, { + let from_height = from_state.block_height + 1; // Fetch the UnifiedFullViewingKeys we are tracking let account_ufvks = data_db .get_unified_full_viewing_keys() @@ -392,7 +456,9 @@ where }, )?; - data_db.put_blocks(scanned_blocks).map_err(Error::Wallet)?; + data_db + .put_blocks(from_state, scanned_blocks) + .map_err(Error::Wallet)?; Ok(scan_summary) } diff --git a/zcash_client_sqlite/Cargo.toml b/zcash_client_sqlite/Cargo.toml index b106fa5ccb..2c247b393b 100644 --- a/zcash_client_sqlite/Cargo.toml +++ b/zcash_client_sqlite/Cargo.toml @@ -78,10 +78,12 @@ maybe-rayon.workspace = true [dev-dependencies] assert_matches.workspace = true +bls12_381.workspace = true incrementalmerkletree = { workspace = true, features = ["test-dependencies"] } pasta_curves.workspace = true shardtree = { workspace = true, features = ["legacy-api", "test-dependencies"] } nonempty.workspace = true +orchard = { workspace = true, features = ["test-dependencies"] } proptest.workspace = true rand_chacha.workspace = true rand_core.workspace = true diff --git a/zcash_client_sqlite/src/lib.rs b/zcash_client_sqlite/src/lib.rs index 992ac9533d..38cd6b4fd8 100644 --- a/zcash_client_sqlite/src/lib.rs +++ b/zcash_client_sqlite/src/lib.rs @@ -32,7 +32,7 @@ // Catch documentation errors caused by code changes. #![deny(rustdoc::broken_intra_doc_links)] -use incrementalmerkletree::Position; +use incrementalmerkletree::{Position, Retention}; use maybe_rayon::{ prelude::{IndexedParallelIterator, ParallelIterator}, slice::ParallelSliceMut, @@ -58,7 +58,7 @@ use zcash_client_backend::{ address::UnifiedAddress, data_api::{ self, - chain::{BlockSource, CommitmentTreeRoot}, + chain::{BlockSource, ChainState, CommitmentTreeRoot}, scanning::{ScanPriority, ScanRange}, AccountBirthday, BlockMetadata, DecryptedTransaction, InputSource, NullifierQuery, ScannedBlock, SentTransaction, WalletCommitmentTrees, WalletRead, WalletSummary, @@ -75,7 +75,12 @@ use zcash_client_backend::{ use crate::{error::SqliteClientError, wallet::commitment_tree::SqliteShardStore}; #[cfg(feature = "orchard")] -use zcash_client_backend::data_api::ORCHARD_SHARD_HEIGHT; +use { + incrementalmerkletree::frontier::Frontier, + shardtree::store::{Checkpoint, ShardStore}, + std::collections::BTreeMap, + zcash_client_backend::data_api::ORCHARD_SHARD_HEIGHT, +}; #[cfg(feature = "transparent-inputs")] use { @@ -92,7 +97,6 @@ use { pub mod chain; pub mod error; - pub mod wallet; use wallet::{ commitment_tree::{self, put_shard_roots}, @@ -535,6 +539,7 @@ impl WalletWrite for WalletDb #[allow(clippy::type_complexity)] fn put_blocks( &mut self, + from_state: &ChainState, blocks: Vec>, ) -> Result<(), Self::Error> { struct BlockPositions { @@ -695,62 +700,168 @@ impl WalletWrite for WalletDb { // Create subtrees from the note commitments in parallel. const CHUNK_SIZE: usize = 1024; - { - let sapling_subtrees = sapling_commitments - .par_chunks_mut(CHUNK_SIZE) - .enumerate() - .filter_map(|(i, chunk)| { - let start = - start_positions.sapling_start_position + (i * CHUNK_SIZE) as u64; - let end = start + chunk.len() as u64; - - shardtree::LocatedTree::from_iter( - start..end, - SAPLING_SHARD_HEIGHT.into(), - chunk.iter_mut().map(|n| n.take().expect("always Some")), - ) + let sapling_subtrees = sapling_commitments + .par_chunks_mut(CHUNK_SIZE) + .enumerate() + .filter_map(|(i, chunk)| { + let start = + start_positions.sapling_start_position + (i * CHUNK_SIZE) as u64; + let end = start + chunk.len() as u64; + + shardtree::LocatedTree::from_iter( + start..end, + SAPLING_SHARD_HEIGHT.into(), + chunk.iter_mut().map(|n| n.take().expect("always Some")), + ) + }) + .map(|res| (res.subtree, res.checkpoints)) + .collect::>(); + + #[cfg(feature = "orchard")] + let orchard_subtrees = orchard_commitments + .par_chunks_mut(CHUNK_SIZE) + .enumerate() + .filter_map(|(i, chunk)| { + let start = + start_positions.orchard_start_position + (i * CHUNK_SIZE) as u64; + let end = start + chunk.len() as u64; + + shardtree::LocatedTree::from_iter( + start..end, + ORCHARD_SHARD_HEIGHT.into(), + chunk.iter_mut().map(|n| n.take().expect("always Some")), + ) + }) + .map(|res| (res.subtree, res.checkpoints)) + .collect::>(); + + // Collect the complete set of Sapling checkpoints + #[cfg(feature = "orchard")] + let sapling_checkpoint_positions: BTreeMap = + dbg!(sapling_subtrees + .iter() + .flat_map(|(_, checkpoints)| checkpoints.iter()) + .map(|(k, v)| (*k, *v)) + .collect()); + + #[cfg(feature = "orchard")] + let orchard_checkpoint_positions: BTreeMap = + dbg!(orchard_subtrees + .iter() + .flat_map(|(_, checkpoints)| checkpoints.iter()) + .map(|(k, v)| (*k, *v)) + .collect()); + + #[cfg(feature = "orchard")] + fn copy_checkpoints( + // The set of checkpoints to copy from + from_checkpoint_positions: &BTreeMap, + // The set of checkpoints to copy into + to_checkpoint_positions: &BTreeMap, + // The frontier whose position will be used when there is no preceding + // checkpoint in to_checkpoint_positions. + state_final_tree: &Frontier, + ) -> Vec<(BlockHeight, Checkpoint)> { + from_checkpoint_positions + .keys() + .flat_map(|from_checkpoint_height| { + to_checkpoint_positions + .range::(..=*from_checkpoint_height) + .last() + .map_or_else( + || { + Some(( + *from_checkpoint_height, + state_final_tree.value().map_or_else( + || Checkpoint::tree_empty(), + |t| Checkpoint::at_position(t.position()), + ), + )) + }, + |(to_prev_height, position)| { + if *to_prev_height < *from_checkpoint_height { + Some(( + *from_checkpoint_height, + Checkpoint::at_position(*position), + )) + } else { + // The checkpoint already exists, so we don't need to + // do anything. + None + } + }, + ) + .into_iter() }) - .map(|res| (res.subtree, res.checkpoints)) - .collect::>(); + .collect::>() + } - // Update the Sapling note commitment tree with all newly read note commitments - let mut sapling_subtrees = sapling_subtrees.into_iter(); - wdb.with_sapling_tree_mut::<_, _, Self::Error>(move |sapling_tree| { - for (tree, checkpoints) in &mut sapling_subtrees { + #[cfg(feature = "orchard")] + let missing_sapling_checkpoints = copy_checkpoints( + &orchard_checkpoint_positions, + &sapling_checkpoint_positions, + from_state.final_sapling_tree(), + ); + #[cfg(feature = "orchard")] + let missing_orchard_checkpoints = copy_checkpoints( + &sapling_checkpoint_positions, + &orchard_checkpoint_positions, + from_state.final_orchard_tree(), + ); + + // Update the Sapling note commitment tree with all newly read note commitments + { + let mut sapling_subtrees_iter = sapling_subtrees.into_iter(); + wdb.with_sapling_tree_mut::<_, _, Self::Error>(|sapling_tree| { + sapling_tree.insert_frontier( + from_state.final_sapling_tree().clone(), + Retention::Checkpoint { + id: from_state.block_height(), + is_marked: false, + }, + )?; + + for (tree, checkpoints) in &mut sapling_subtrees_iter { sapling_tree.insert_tree(tree, checkpoints)?; } + // Ensure we have a Sapling checkpoint for each checkpointed Orchard block height + #[cfg(feature = "orchard")] + for (height, checkpoint) in dbg!(&missing_sapling_checkpoints) { + sapling_tree + .store_mut() + .add_checkpoint(*height, checkpoint.clone()) + .map_err(ShardTreeError::Storage)?; + } + Ok(()) })?; } - // Create subtrees from the note commitments in parallel. + // Update the Orchard note commitment tree with all newly read note commitments #[cfg(feature = "orchard")] { - let orchard_subtrees = orchard_commitments - .par_chunks_mut(CHUNK_SIZE) - .enumerate() - .filter_map(|(i, chunk)| { - let start = - start_positions.orchard_start_position + (i * CHUNK_SIZE) as u64; - let end = start + chunk.len() as u64; - - shardtree::LocatedTree::from_iter( - start..end, - ORCHARD_SHARD_HEIGHT.into(), - chunk.iter_mut().map(|n| n.take().expect("always Some")), - ) - }) - .map(|res| (res.subtree, res.checkpoints)) - .collect::>(); - - // Update the Orchard note commitment tree with all newly read note commitments let mut orchard_subtrees = orchard_subtrees.into_iter(); - wdb.with_orchard_tree_mut::<_, _, Self::Error>(move |orchard_tree| { + wdb.with_orchard_tree_mut::<_, _, Self::Error>(|orchard_tree| { + orchard_tree.insert_frontier( + from_state.final_orchard_tree().clone(), + Retention::Checkpoint { + id: from_state.block_height(), + is_marked: false, + }, + )?; + for (tree, checkpoints) in &mut orchard_subtrees { orchard_tree.insert_tree(tree, checkpoints)?; } + for (height, checkpoint) in dbg!(&missing_orchard_checkpoints) { + orchard_tree + .store_mut() + .add_checkpoint(*height, checkpoint.clone()) + .map_err(ShardTreeError::Storage)?; + } + Ok(()) })?; } diff --git a/zcash_client_sqlite/src/testing.rs b/zcash_client_sqlite/src/testing.rs index 8682e5449b..d051cdc392 100644 --- a/zcash_client_sqlite/src/testing.rs +++ b/zcash_client_sqlite/src/testing.rs @@ -1,6 +1,6 @@ -use std::convert::Infallible; use std::fmt; use std::num::NonZeroU32; +use std::{collections::BTreeMap, convert::Infallible}; #[cfg(feature = "unstable")] use std::fs::File; @@ -45,6 +45,7 @@ use zcash_client_backend::{ zip321, }; use zcash_client_backend::{ + data_api::chain::ChainState, fees::{standard, DustOutputPolicy}, ShieldedProtocol, }; @@ -78,6 +79,7 @@ use super::BlockDb; use { group::ff::{Field, PrimeField}, orchard::note_encryption::{OrchardDomain, OrchardNoteEncryption}, + orchard::tree::MerkleHashOrchard, pasta_curves::pallas, zcash_client_backend::proto::compact_formats::CompactOrchardAction, }; @@ -177,7 +179,8 @@ impl TestBuilder { TestState { cache: self.cache, - latest_cached_block: None, + prior_cached_blocks: BTreeMap::new(), + latest_block_height: None, _data_file: data_file, db_data, test_account, @@ -186,9 +189,10 @@ impl TestBuilder { } } +#[derive(Clone, Debug)] pub(crate) struct CachedBlock { - height: BlockHeight, hash: BlockHash, + chain_state: ChainState, sapling_end_size: u32, orchard_end_size: u32, } @@ -196,44 +200,87 @@ pub(crate) struct CachedBlock { impl CachedBlock { fn none(sapling_activation_height: BlockHeight) -> Self { Self { - height: sapling_activation_height, hash: BlockHash([0; 32]), + chain_state: ChainState::empty(sapling_activation_height), sapling_end_size: 0, orchard_end_size: 0, } } fn at( - height: BlockHeight, hash: BlockHash, - sapling_tree_size: u32, - orchard_tree_size: u32, + chain_state: ChainState, + sapling_end_size: u32, + orchard_end_size: u32, ) -> Self { + assert_eq!( + chain_state.final_sapling_tree().tree_size() as u32, + sapling_end_size + ); + #[cfg(feature = "orchard")] + assert_eq!( + chain_state.final_orchard_tree().tree_size() as u32, + orchard_end_size + ); + Self { - height, hash, - sapling_end_size: sapling_tree_size, - orchard_end_size: orchard_tree_size, + chain_state, + sapling_end_size, + orchard_end_size, } } fn roll_forward(self, cb: &CompactBlock) -> Self { - assert_eq!(self.height + 1, cb.height()); + assert_eq!(self.chain_state.block_height() + 1, cb.height()); + + let sapling_final_tree = cb.vtx.iter().flat_map(|tx| tx.outputs.iter()).fold( + self.chain_state.final_sapling_tree().clone(), + |mut acc, c_out| { + acc.append(sapling::Node::from_cmu(&c_out.cmu().unwrap())); + acc + }, + ); + let sapling_end_size = sapling_final_tree.tree_size() as u32; + + #[cfg(feature = "orchard")] + let orchard_final_tree = cb.vtx.iter().flat_map(|tx| tx.actions.iter()).fold( + self.chain_state.final_orchard_tree().clone(), + |mut acc, c_act| { + acc.append(MerkleHashOrchard::from_cmx(&c_act.cmx().unwrap())); + acc + }, + ); + #[cfg(feature = "orchard")] + let orchard_end_size = orchard_final_tree.tree_size() as u32; + #[cfg(not(feature = "orchard"))] + let orchard_end_size = cb.vtx.iter().fold(self.orchard_end_size, |sz, tx| { + sz + (tx.actions.len() as u32) + }); + Self { - height: cb.height(), hash: cb.hash(), - sapling_end_size: self.sapling_end_size - + cb.vtx.iter().map(|tx| tx.outputs.len() as u32).sum::(), - orchard_end_size: self.orchard_end_size - + cb.vtx.iter().map(|tx| tx.actions.len() as u32).sum::(), + chain_state: ChainState::new( + cb.height(), + sapling_final_tree, + #[cfg(feature = "orchard")] + orchard_final_tree, + ), + sapling_end_size, + orchard_end_size, } } + + fn height(&self) -> BlockHeight { + self.chain_state.block_height() + } } /// The state for a `zcash_client_sqlite` test. pub(crate) struct TestState { cache: Cache, - latest_cached_block: Option, + prior_cached_blocks: BTreeMap, + latest_block_height: Option, _data_file: NamedTempFile, db_data: WalletDb, test_account: Option<( @@ -256,7 +303,9 @@ where } pub(crate) fn latest_cached_block(&self) -> Option<&CachedBlock> { - self.latest_cached_block.as_ref() + self.latest_block_height + .as_ref() + .and_then(|h| self.prior_cached_blocks.get(h)) } /// Creates a fake block at the expected next height containing a single output of the @@ -268,10 +317,10 @@ where value: NonNegativeAmount, ) -> (BlockHeight, Cache::InsertResult, Fvk::Nullifier) { let cached_block = self - .latest_cached_block - .take() + .latest_cached_block() + .cloned() .unwrap_or_else(|| CachedBlock::none(self.sapling_activation_height() - 1)); - let height = cached_block.height + 1; + let height = cached_block.height() + 1; let (res, nf) = self.generate_block_at( height, @@ -282,11 +331,25 @@ where cached_block.sapling_end_size, cached_block.orchard_end_size, ); - assert!(self.latest_cached_block.is_some()); (height, res, nf) } + fn prior_cached_block(&self, height: BlockHeight) -> CachedBlock { + self.prior_cached_blocks.get(&height).map_or_else( + || { + // Assume that the chain state has not been advanced by any blocks in the + // intervening range. If there is no prior chain state, then that's definitionally + // the empty chain. + self.prior_cached_blocks + .range(..height) + .last() + .map_or_else(|| CachedBlock::none(height - 1), |(_, b)| b.clone()) + }, + |b| b.clone(), + ) + } + /// Creates a fake block with the given height and hash containing a single output of /// the given value, and inserts it into the cache. /// @@ -303,6 +366,58 @@ where initial_sapling_tree_size: u32, initial_orchard_tree_size: u32, ) -> (Cache::InsertResult, Fvk::Nullifier) { + let mut prior_cached_block = self.prior_cached_block(height); + assert!(prior_cached_block.chain_state.block_height() < height); + + // If the block height has increased or the Sapling and/or Orchard tree sizes have changed, + // we need to generate a new prior cached block that the block to be generated can + // successfully chain from, with the provided tree sizes. + if prior_cached_block.hash != prev_hash + || prior_cached_block.chain_state.block_height() != height - 1 + || prior_cached_block.sapling_end_size != initial_sapling_tree_size + || prior_cached_block.orchard_end_size != initial_orchard_tree_size + { + assert!(prior_cached_block.chain_state.block_height() < height - 1); + assert!(prior_cached_block.sapling_end_size <= initial_sapling_tree_size); + assert!(prior_cached_block.orchard_end_size <= initial_orchard_tree_size); + + let final_sapling_tree = + (prior_cached_block.sapling_end_size..initial_sapling_tree_size).fold( + prior_cached_block.chain_state.final_sapling_tree().clone(), + |mut acc, _| { + acc.append(sapling::Node::from_scalar(bls12_381::Scalar::random( + &mut self.rng, + ))); + acc + }, + ); + + #[cfg(feature = "orchard")] + let final_orchard_tree = + (prior_cached_block.orchard_end_size..initial_orchard_tree_size).fold( + prior_cached_block.chain_state.final_orchard_tree().clone(), + |mut acc, _| { + acc.append(MerkleHashOrchard::random(&mut self.rng)); + acc + }, + ); + + prior_cached_block = CachedBlock::at( + prev_hash, + ChainState::new( + height - 1, + final_sapling_tree, + #[cfg(feature = "orchard")] + final_orchard_tree, + ), + initial_sapling_tree_size, + initial_orchard_tree_size, + ); + + self.prior_cached_blocks + .insert(height, prior_cached_block.clone()); + } + let (cb, nf) = fake_compact_block( &self.network(), height, @@ -314,17 +429,12 @@ where initial_orchard_tree_size, &mut self.rng, ); - let res = self.cache.insert(&cb); + assert_eq!(cb.height(), height); - self.latest_cached_block = Some( - CachedBlock::at( - height - 1, - cb.hash(), - initial_sapling_tree_size, - initial_orchard_tree_size, - ) - .roll_forward(&cb), - ); + let res = self.cache.insert(&cb); + self.prior_cached_blocks + .insert(cb.height() + 1, prior_cached_block.roll_forward(&cb)); + self.latest_block_height = Some(cb.height()); (res, nf) } @@ -338,27 +448,30 @@ where to: impl Into
, value: NonNegativeAmount, ) -> (BlockHeight, Cache::InsertResult) { - let cached_block = self - .latest_cached_block - .take() + let prior_cached_block = self + .latest_cached_block() + .cloned() .unwrap_or_else(|| CachedBlock::none(self.sapling_activation_height() - 1)); - let height = cached_block.height + 1; + let height = prior_cached_block.height() + 1; let cb = fake_compact_block_spending( &self.network(), height, - cached_block.hash, + prior_cached_block.hash, note, fvk, to.into(), value, - cached_block.sapling_end_size, - cached_block.orchard_end_size, + prior_cached_block.sapling_end_size, + prior_cached_block.orchard_end_size, &mut self.rng, ); - let res = self.cache.insert(&cb); + assert_eq!(cb.height(), height); - self.latest_cached_block = Some(cached_block.roll_forward(&cb)); + let res = self.cache.insert(&cb); + self.prior_cached_blocks + .insert(cb.height() + 1, prior_cached_block.roll_forward(&cb)); + self.latest_block_height = Some(height); (height, res) } @@ -393,24 +506,27 @@ where tx_index: usize, tx: &Transaction, ) -> (BlockHeight, Cache::InsertResult) { - let cached_block = self - .latest_cached_block - .take() + let prior_cached_block = self + .latest_cached_block() + .cloned() .unwrap_or_else(|| CachedBlock::none(self.sapling_activation_height() - 1)); - let height = cached_block.height + 1; + let height = prior_cached_block.height() + 1; let cb = fake_compact_block_from_tx( height, - cached_block.hash, + prior_cached_block.hash, tx_index, tx, - cached_block.sapling_end_size, - cached_block.orchard_end_size, + prior_cached_block.sapling_end_size, + prior_cached_block.orchard_end_size, &mut self.rng, ); - let res = self.cache.insert(&cb); + assert_eq!(cb.height(), height); - self.latest_cached_block = Some(cached_block.roll_forward(&cb)); + let res = self.cache.insert(&cb); + self.prior_cached_blocks + .insert(cb.height() + 1, prior_cached_block.roll_forward(&cb)); + self.latest_block_height = Some(height); (height, res) } @@ -438,13 +554,15 @@ where ::Error, >, > { - scan_cached_blocks( + let prior_cached_block = self.prior_cached_block(from_height); + let result = scan_cached_blocks( &self.network(), self.cache.block_source(), &mut self.db_data, - from_height, + &prior_cached_block.chain_state, limit, - ) + ); + result } /// Resets the wallet using a new wallet database but with the same cache of blocks, @@ -455,7 +573,7 @@ where /// Before using any `generate_*` method on the reset state, call `reset_latest_cached_block()`. pub(crate) fn reset(&mut self) -> NamedTempFile { let network = self.network(); - self.latest_cached_block = None; + self.latest_block_height = None; let tf = std::mem::replace(&mut self._data_file, NamedTempFile::new().unwrap()); self.db_data = WalletDb::for_path(self._data_file.path(), network).unwrap(); self.test_account = None; @@ -463,23 +581,23 @@ where tf } - /// Reset the latest cached block to the most recent one in the cache database. - #[allow(dead_code)] - pub(crate) fn reset_latest_cached_block(&mut self) { - self.cache - .block_source() - .with_blocks::<_, Infallible>(None, None, |block: CompactBlock| { - let chain_metadata = block.chain_metadata.unwrap(); - self.latest_cached_block = Some(CachedBlock::at( - BlockHeight::from_u32(block.height.try_into().unwrap()), - BlockHash::from_slice(block.hash.as_slice()), - chain_metadata.sapling_commitment_tree_size, - chain_metadata.orchard_commitment_tree_size, - )); - Ok(()) - }) - .unwrap(); - } + // /// Reset the latest cached block to the most recent one in the cache database. + // #[allow(dead_code)] + // pub(crate) fn reset_latest_cached_block(&mut self) { + // self.cache + // .block_source() + // .with_blocks::<_, Infallible>(None, None, |block: CompactBlock| { + // let chain_metadata = block.chain_metadata.unwrap(); + // self.latest_cached_block = Some(CachedBlock::at( + // BlockHash::from_slice(block.hash.as_slice()), + // BlockHeight::from_u32(block.height.try_into().unwrap()), + // chain_metadata.sapling_commitment_tree_size, + // chain_metadata.orchard_commitment_tree_size, + // )); + // Ok(()) + // }) + // .unwrap(); + // } } impl TestState {