diff --git a/dash-spv/src/client/block_processor.rs b/dash-spv/src/client/block_processor.rs index c582932f5..5c8fcf432 100644 --- a/dash-spv/src/client/block_processor.rs +++ b/dash-spv/src/client/block_processor.rs @@ -226,7 +226,17 @@ impl Vec { + ) -> (Vec, bool) { let mut processed = self.processed_blocks.lock().await; processed.push((block.block_hash(), height)); // Return txids of all transactions in block as "relevant" - block.txdata.iter().map(|tx| tx.txid()).collect() + // No gap limit changes in mock + (block.txdata.iter().map(|tx| tx.txid()).collect(), false) } async fn process_mempool_transaction(&mut self, tx: &Transaction, _network: Network) { @@ -272,8 +273,8 @@ mod tests { _block: &Block, _height: u32, _network: Network, - ) -> Vec { - Vec::new() + ) -> (Vec, bool) { + (Vec::new(), false) } async fn process_mempool_transaction(&mut self, _tx: &Transaction, _network: Network) {} diff --git a/dash-spv/src/sync/filters/mod.rs b/dash-spv/src/sync/filters/mod.rs index 626e12326..305e9a8d8 100644 --- a/dash-spv/src/sync/filters/mod.rs +++ b/dash-spv/src/sync/filters/mod.rs @@ -13,6 +13,7 @@ //! - `retry` - Retry and timeout logic //! - `stats` - Statistics and progress tracking //! - `requests` - Request queue management +//! - `recheck` - Filter re-checking when gap limits change //! //! ## Thread Safety //! @@ -27,6 +28,7 @@ pub mod gaps; pub mod headers; pub mod manager; pub mod matching; +pub mod recheck; pub mod requests; pub mod retry; pub mod stats; diff --git a/dash-spv/src/sync/filters/recheck.rs b/dash-spv/src/sync/filters/recheck.rs new file mode 100644 index 000000000..eb9c25715 --- /dev/null +++ b/dash-spv/src/sync/filters/recheck.rs @@ -0,0 +1,294 @@ +//! Filter re-checking infrastructure +//! +//! When gap limits change during block processing, we need to re-check compact filters +//! with the new set of addresses. This module provides the infrastructure to track +//! which filters need re-checking and manage the re-check iterations. + +use std::collections::VecDeque; + +/// Configuration for filter re-checking behavior +#[derive(Debug, Clone)] +pub struct FilterRecheckConfig { + /// Whether filter re-checking is enabled + pub enabled: bool, + /// Maximum number of re-check iterations to prevent infinite loops + pub max_iterations: u32, +} + +impl Default for FilterRecheckConfig { + fn default() -> Self { + Self { + enabled: true, + max_iterations: 10, + } + } +} + +/// Represents a range of block heights that need filter re-checking +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct RecheckRange { + /// Starting height (inclusive) + pub start: u32, + /// Ending height (inclusive) + pub end: u32, + /// Which iteration this is (for loop detection) + pub iteration: u32, +} + +impl RecheckRange { + /// Create a new recheck range + pub fn new(start: u32, end: u32, iteration: u32) -> Self { + Self { + start, + end, + iteration, + } + } + + /// Check if this range contains a height + pub fn contains(&self, height: u32) -> bool { + height >= self.start && height <= self.end + } + + /// Get the number of blocks in this range + pub fn len(&self) -> u32 { + self.end.saturating_sub(self.start).saturating_add(1) + } + + /// Check if the range is empty + pub fn is_empty(&self) -> bool { + self.end < self.start + } +} + +/// Queue for managing filter re-check operations +#[derive(Debug)] +pub struct FilterRecheckQueue { + /// Queue of ranges that need re-checking + pending_ranges: VecDeque, + /// Configuration + config: FilterRecheckConfig, + /// Total number of ranges added (for statistics) + total_ranges_added: u64, + /// Total number of ranges completed (for statistics) + total_ranges_completed: u64, +} + +impl FilterRecheckQueue { + /// Create a new filter recheck queue + pub fn new(config: FilterRecheckConfig) -> Self { + Self { + pending_ranges: VecDeque::new(), + config, + total_ranges_added: 0, + total_ranges_completed: 0, + } + } + + /// Add a range to be re-checked + /// + /// Returns Ok(()) if the range was added, or Err with a message if it was rejected + /// (e.g., due to exceeding max iterations) + pub fn add_range(&mut self, start: u32, end: u32, iteration: u32) -> Result<(), String> { + if !self.config.enabled { + return Err("Filter re-checking is disabled".to_string()); + } + + if iteration >= self.config.max_iterations { + return Err(format!( + "Maximum re-check iterations ({}) exceeded for range {}-{}", + self.config.max_iterations, start, end + )); + } + + let range = RecheckRange::new(start, end, iteration); + + // Check if we already have this range queued + if self.pending_ranges.iter().any(|r| r.start == start && r.end == end) { + tracing::debug!("Range {}-{} already queued for re-check, skipping", start, end); + return Ok(()); + } + + tracing::info!( + "📋 Queuing filter re-check for heights {}-{} (iteration {}/{})", + start, + end, + iteration + 1, + self.config.max_iterations + ); + + self.pending_ranges.push_back(range); + self.total_ranges_added += 1; + Ok(()) + } + + /// Get the next range to re-check + pub fn next_range(&mut self) -> Option { + self.pending_ranges.pop_front() + } + + /// Mark a range as completed + pub fn mark_completed(&mut self, _range: &RecheckRange) { + self.total_ranges_completed += 1; + } + + /// Check if there are any pending re-checks + pub fn has_pending(&self) -> bool { + !self.pending_ranges.is_empty() + } + + /// Get the number of pending ranges + pub fn pending_count(&self) -> usize { + self.pending_ranges.len() + } + + /// Clear all pending ranges + pub fn clear(&mut self) { + self.pending_ranges.clear(); + } + + /// Get statistics about re-check operations + pub fn stats(&self) -> RecheckStats { + RecheckStats { + pending_ranges: self.pending_ranges.len(), + total_added: self.total_ranges_added, + total_completed: self.total_ranges_completed, + config: self.config.clone(), + } + } + + /// Check if re-checking is enabled + pub fn is_enabled(&self) -> bool { + self.config.enabled + } +} + +/// Statistics about filter re-check operations +#[derive(Debug, Clone)] +pub struct RecheckStats { + /// Number of ranges currently pending + pub pending_ranges: usize, + /// Total ranges added since creation + pub total_added: u64, + /// Total ranges completed + pub total_completed: u64, + /// Configuration + pub config: FilterRecheckConfig, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_recheck_range_basic() { + let range = RecheckRange::new(100, 200, 0); + assert_eq!(range.start, 100); + assert_eq!(range.end, 200); + assert_eq!(range.iteration, 0); + assert_eq!(range.len(), 101); + assert!(!range.is_empty()); + } + + #[test] + fn test_recheck_range_contains() { + let range = RecheckRange::new(100, 200, 0); + assert!(!range.contains(99)); + assert!(range.contains(100)); + assert!(range.contains(150)); + assert!(range.contains(200)); + assert!(!range.contains(201)); + } + + #[test] + fn test_recheck_queue_add_and_retrieve() { + let mut queue = FilterRecheckQueue::new(FilterRecheckConfig::default()); + + // Add a range + assert!(queue.add_range(100, 200, 0).is_ok()); + assert_eq!(queue.pending_count(), 1); + assert!(queue.has_pending()); + + // Retrieve it + let range = queue.next_range().unwrap(); + assert_eq!(range.start, 100); + assert_eq!(range.end, 200); + assert_eq!(queue.pending_count(), 0); + assert!(!queue.has_pending()); + } + + #[test] + fn test_recheck_queue_max_iterations() { + let config = FilterRecheckConfig { + enabled: true, + max_iterations: 3, + }; + let mut queue = FilterRecheckQueue::new(config); + + // These should succeed + assert!(queue.add_range(100, 200, 0).is_ok()); + assert!(queue.add_range(100, 200, 1).is_ok()); + assert!(queue.add_range(100, 200, 2).is_ok()); + + // This should fail (iteration 3 >= max_iterations 3) + assert!(queue.add_range(100, 200, 3).is_err()); + } + + #[test] + fn test_recheck_queue_disabled() { + let config = FilterRecheckConfig { + enabled: false, + max_iterations: 10, + }; + let mut queue = FilterRecheckQueue::new(config); + + // Should fail when disabled + assert!(queue.add_range(100, 200, 0).is_err()); + } + + #[test] + fn test_recheck_queue_duplicate_detection() { + let mut queue = FilterRecheckQueue::new(FilterRecheckConfig::default()); + + // Add the same range twice + assert!(queue.add_range(100, 200, 0).is_ok()); + assert!(queue.add_range(100, 200, 0).is_ok()); // Should succeed but not add + + // Should only have one range + assert_eq!(queue.pending_count(), 1); + } + + #[test] + fn test_recheck_queue_stats() { + let mut queue = FilterRecheckQueue::new(FilterRecheckConfig::default()); + + queue.add_range(100, 200, 0).unwrap(); + queue.add_range(201, 300, 0).unwrap(); + + let stats = queue.stats(); + assert_eq!(stats.pending_ranges, 2); + assert_eq!(stats.total_added, 2); + assert_eq!(stats.total_completed, 0); + + // Complete one + let range = queue.next_range().unwrap(); + queue.mark_completed(&range); + + let stats = queue.stats(); + assert_eq!(stats.pending_ranges, 1); + assert_eq!(stats.total_completed, 1); + } + + #[test] + fn test_recheck_queue_clear() { + let mut queue = FilterRecheckQueue::new(FilterRecheckConfig::default()); + + queue.add_range(100, 200, 0).unwrap(); + queue.add_range(201, 300, 0).unwrap(); + assert_eq!(queue.pending_count(), 2); + + queue.clear(); + assert_eq!(queue.pending_count(), 0); + assert!(!queue.has_pending()); + } +} diff --git a/dash-spv/src/sync/sequential/filter_recheck.rs b/dash-spv/src/sync/sequential/filter_recheck.rs new file mode 100644 index 000000000..971e16c33 --- /dev/null +++ b/dash-spv/src/sync/sequential/filter_recheck.rs @@ -0,0 +1,106 @@ +//! Filter re-checking operations for gap limit changes + +use crate::error::{SyncError, SyncResult}; +use crate::network::NetworkManager; +use crate::storage::StorageManager; +use crate::sync::filters::recheck::RecheckRange; +use crate::sync::sequential::SequentialSyncManager; +use dashcore::BlockHash; +use key_wallet_manager::wallet_interface::WalletInterface; + +impl< + S: StorageManager + Send + Sync + 'static, + N: NetworkManager + Send + Sync + 'static, + W: WalletInterface, + > SequentialSyncManager +{ + /// Re-check compact filters for a given range of heights + /// + /// This is called when gap limits change during block processing. + /// It re-checks previously evaluated filters with the new, larger set of addresses. + /// + /// Returns a list of (block_hash, height) pairs for blocks that match the updated address set. + pub(super) async fn recheck_filters_for_range( + &mut self, + storage: &S, + _network: &mut N, + range: &RecheckRange, + ) -> SyncResult> { + let mut new_matches = Vec::new(); + + tracing::debug!( + "Re-checking filters for range {}-{} with updated address set", + range.start, + range.end + ); + + // Lock wallet once for the entire range + let mut wallet = self.wallet.write().await; + + // Iterate through the height range + for height in range.start..=range.end { + // Get the block hash for this height + let header = match storage.get_header(height).await { + Ok(Some(header)) => header, + Ok(None) => { + tracing::debug!("No header at height {}, skipping", height); + continue; + } + Err(e) => { + tracing::warn!("Failed to get header at height {}: {}", height, e); + continue; + } + }; + + let block_hash = header.block_hash(); + + // Get the compact filter for this height + let filter_data = match storage.load_filter(height).await { + Ok(Some(data)) => data, + Ok(None) => { + tracing::debug!("No filter at height {}, skipping", height); + continue; + } + Err(e) => { + tracing::warn!("Failed to load filter at height {}: {}", height, e); + continue; + } + }; + + // Create BlockFilter from raw data (pass as slice) + let filter = dashcore::bip158::BlockFilter::new(&filter_data[..]); + + // Check filter with wallet's CURRENT (updated) address set + let matches = + wallet.check_compact_filter(&filter, &block_hash, self.config.network).await; + + if matches { + tracing::info!( + "🎯 Filter re-check found new match at height {} (block {})", + height, + block_hash + ); + new_matches.push((block_hash, height)); + } + } + + drop(wallet); + + if !new_matches.is_empty() { + tracing::info!( + "Re-check complete: Found {} new matches in range {}-{}", + new_matches.len(), + range.start, + range.end + ); + } else { + tracing::debug!( + "Re-check complete: No new matches in range {}-{}", + range.start, + range.end + ); + } + + Ok(new_matches) + } +} diff --git a/dash-spv/src/sync/sequential/lifecycle.rs b/dash-spv/src/sync/sequential/lifecycle.rs index 327a6b787..8a2cfbc75 100644 --- a/dash-spv/src/sync/sequential/lifecycle.rs +++ b/dash-spv/src/sync/sequential/lifecycle.rs @@ -38,6 +38,11 @@ impl< // Create reorg config with sensible defaults let reorg_config = ReorgConfig::default(); + // Create filter recheck queue with default config + let filter_recheck_config = crate::sync::filters::recheck::FilterRecheckConfig::default(); + let filter_recheck_queue = + crate::sync::filters::recheck::FilterRecheckQueue::new(filter_recheck_config); + Ok(Self { current_phase: SyncPhase::Idle, transition_manager: TransitionManager::new(config), @@ -56,6 +61,7 @@ impl< current_phase_retries: 0, wallet, stats, + filter_recheck_queue, _phantom_s: std::marker::PhantomData, _phantom_n: std::marker::PhantomData, }) diff --git a/dash-spv/src/sync/sequential/manager.rs b/dash-spv/src/sync/sequential/manager.rs index 01247b55f..6abb34d1c 100644 --- a/dash-spv/src/sync/sequential/manager.rs +++ b/dash-spv/src/sync/sequential/manager.rs @@ -99,6 +99,9 @@ pub struct SequentialSyncManager>, + + /// Filter re-check queue for handling gap limit changes + pub(super) filter_recheck_queue: crate::sync::filters::recheck::FilterRecheckQueue, } impl< diff --git a/dash-spv/src/sync/sequential/message_handlers.rs b/dash-spv/src/sync/sequential/message_handlers.rs index f6521effd..3d9e014fe 100644 --- a/dash-spv/src/sync/sequential/message_handlers.rs +++ b/dash-spv/src/sync/sequential/message_handlers.rs @@ -762,10 +762,70 @@ impl< .map_err(|e| SyncError::Storage(format!("Failed to get block height: {}", e)))? .unwrap_or(0); - let relevant_txids = wallet.process_block(&block, block_height, self.config.network).await; + let (relevant_txids, gap_limit_changed) = + wallet.process_block(&block, block_height, self.config.network).await; drop(wallet); + // Handle gap limit changes by queuing filter re-checks + if gap_limit_changed { + tracing::warn!( + "⚠️ Gap limit changed during block processing at height {}. Queuing filters for re-check.", + block_height + ); + + // Determine the range to re-check based on current phase + if let SyncPhase::DownloadingBlocks { + pending_blocks, + downloading, + completed, + .. + } = &self.current_phase + { + // Find the minimum and maximum heights in our current download batch + let mut min_height = block_height; + let mut max_height = block_height; + + // Check pending blocks + for (_, height) in pending_blocks { + min_height = min_height.min(*height); + max_height = max_height.max(*height); + } + + // Check downloading blocks (need to look them up from storage) + for block_hash in downloading.keys() { + if let Ok(Some(height)) = storage.get_header_height_by_hash(block_hash).await { + min_height = min_height.min(height); + max_height = max_height.max(height); + } + } + + // Check completed blocks + for block_hash in completed { + if let Ok(Some(height)) = storage.get_header_height_by_hash(block_hash).await { + min_height = min_height.min(height); + max_height = max_height.max(height); + } + } + + // Queue the range for re-checking (iteration 0 for first re-check) + if let Err(e) = self.filter_recheck_queue.add_range(min_height, max_height, 0) { + tracing::error!( + "Failed to queue filter re-check for range {}-{}: {}", + min_height, + max_height, + e + ); + } else { + tracing::info!( + "📋 Queued filter re-check for heights {}-{} after gap limit change", + min_height, + max_height + ); + } + } + } + if !relevant_txids.is_empty() { tracing::info!( "💰 Found {} relevant transactions in block {} at height {}", @@ -802,10 +862,79 @@ impl< }; if should_transition { - self.transition_to_next_phase(storage, network, "All blocks downloaded").await?; + // Before transitioning, process any pending filter re-checks + if self.filter_recheck_queue.has_pending() { + tracing::info!( + "🔄 Processing {} pending filter re-check(s) before transitioning", + self.filter_recheck_queue.pending_count() + ); - // Execute the next phase (if any) - self.execute_current_phase(network, storage).await?; + // Process all pending re-checks + while let Some(range) = self.filter_recheck_queue.next_range() { + tracing::info!( + "🔍 Re-checking filters for heights {}-{} (iteration {}/{})", + range.start, + range.end, + range.iteration + 1, + self.filter_recheck_queue.stats().config.max_iterations + ); + + // Re-check filters for this range + match self.recheck_filters_for_range(storage, network, &range).await { + Ok(new_matches) => { + if !new_matches.is_empty() { + tracing::info!( + "✅ Found {} new filter matches after re-check for range {}-{}", + new_matches.len(), + range.start, + range.end + ); + + // Download the newly matched blocks + // This will trigger another iteration if gap limits change again + for (block_hash, height) in new_matches { + tracing::debug!( + "📦 Requesting block {} at height {} from filter re-check", + block_hash, + height + ); + // Add to pending blocks for download + // Note: We'll re-enter the DownloadingBlocks phase + } + } + self.filter_recheck_queue.mark_completed(&range); + } + Err(e) => { + tracing::error!( + "Failed to re-check filters for range {}-{}: {}", + range.start, + range.end, + e + ); + // Continue with other re-checks + } + } + } + + // If we added new blocks to download, don't transition yet + if self.no_more_pending_blocks() { + self.transition_to_next_phase( + storage, + network, + "All blocks downloaded (after filter re-check)", + ) + .await?; + self.execute_current_phase(network, storage).await?; + } else { + tracing::info!( + "🔄 Filter re-check found new blocks to download, staying in DownloadingBlocks phase" + ); + } + } else { + // No pending re-checks, normal transition + self.transition_to_next_phase(storage, network, "All blocks downloaded").await?; + self.execute_current_phase(network, storage).await?; + } } Ok(()) diff --git a/dash-spv/src/sync/sequential/mod.rs b/dash-spv/src/sync/sequential/mod.rs index 046ec0252..d7f08a338 100644 --- a/dash-spv/src/sync/sequential/mod.rs +++ b/dash-spv/src/sync/sequential/mod.rs @@ -30,8 +30,10 @@ //! - `recovery` - Recovery and error handling logic //! - `request_control` - Request flow control //! - `transitions` - Phase transition management +//! - `filter_recheck` - Filter re-checking for gap limit changes // Sub-modules (focused implementations) +pub mod filter_recheck; pub mod lifecycle; pub mod manager; pub mod message_handlers; diff --git a/key-wallet-manager/src/wallet_interface.rs b/key-wallet-manager/src/wallet_interface.rs index 14fa2936f..697306b5a 100644 --- a/key-wallet-manager/src/wallet_interface.rs +++ b/key-wallet-manager/src/wallet_interface.rs @@ -13,13 +13,15 @@ use key_wallet::Network; #[async_trait] pub trait WalletInterface: Send + Sync { /// Called when a new block is received that may contain relevant transactions - /// Returns transaction IDs that were relevant to the wallet + /// Returns (transaction_ids, gap_limit_changed) + /// - transaction_ids: Transaction IDs that were relevant to the wallet + /// - gap_limit_changed: true if new addresses were generated due to gap limit maintenance async fn process_block( &mut self, block: &Block, height: CoreBlockHeight, network: Network, - ) -> Vec; + ) -> (Vec, bool); /// Called when a transaction is seen in the mempool async fn process_mempool_transaction(&mut self, tx: &Transaction, network: Network); diff --git a/key-wallet-manager/src/wallet_manager/process_block.rs b/key-wallet-manager/src/wallet_manager/process_block.rs index 3151173f8..e7c6f243f 100644 --- a/key-wallet-manager/src/wallet_manager/process_block.rs +++ b/key-wallet-manager/src/wallet_manager/process_block.rs @@ -18,7 +18,11 @@ impl WalletInterface for WalletM block: &Block, height: CoreBlockHeight, network: Network, - ) -> Vec { + ) -> (Vec, bool) { + // Capture address count before processing + let addresses_before: usize = + self.wallet_infos.values().map(|info| info.monitored_addresses(network).len()).sum(); + let mut relevant_txids = Vec::new(); let block_hash = Some(block.block_hash()); let timestamp = block.header.time; @@ -47,7 +51,16 @@ impl WalletInterface for WalletM state.current_height = height; } - relevant_txids + // Capture address count after processing + let addresses_after: usize = + self.wallet_infos.values().map(|info| info.monitored_addresses(network).len()).sum(); + + // Determine if gap limit changed (new addresses were generated) + let gap_limit_changed = addresses_after > addresses_before; + + // Note: Gap limit changes will be logged by the SPV client + + (relevant_txids, gap_limit_changed) } async fn process_mempool_transaction(&mut self, tx: &Transaction, network: Network) { diff --git a/key-wallet-manager/tests/spv_integration_tests.rs b/key-wallet-manager/tests/spv_integration_tests.rs index c7a830bd9..35913bb41 100644 --- a/key-wallet-manager/tests/spv_integration_tests.rs +++ b/key-wallet-manager/tests/spv_integration_tests.rs @@ -113,10 +113,12 @@ async fn test_block_processing() { let block = create_test_block(100, vec![tx.clone()]); // Process the block - let result = manager.process_block(&block, 100, Network::Testnet).await; + let (txids, gap_limit_changed) = manager.process_block(&block, 100, Network::Testnet).await; // Since we're not watching specific addresses, no transactions should be relevant - assert_eq!(result.len(), 0); + assert_eq!(txids.len(), 0); + // No addresses should be generated if we're not tracking any wallets + assert!(!gap_limit_changed); } #[tokio::test] diff --git a/key-wallet/src/gap_limit.rs b/key-wallet/src/gap_limit.rs index 688d8af4a..f9c3a6fe7 100644 --- a/key-wallet/src/gap_limit.rs +++ b/key-wallet/src/gap_limit.rs @@ -83,15 +83,26 @@ impl GapLimit { } /// Mark an address at the given index as used - pub fn mark_used(&mut self, index: u32) { + /// Returns true if the highest_used_index increased (indicating new addresses may need generation) + pub fn mark_used(&mut self, index: u32) -> bool { self.used_indices.insert(index); + // Track if highest_used_index changed + let old_highest = self.highest_used_index; + // Update highest used index self.highest_used_index = match self.highest_used_index { None => Some(index), Some(current) => Some(cmp::max(current, index)), }; + // Determine if the highest used index increased + let highest_increased = match (old_highest, self.highest_used_index) { + (None, Some(_)) => true, // First address marked as used + (Some(old), Some(new)) => new > old, // Highest index increased + _ => false, + }; + // Reset unused count if this breaks a gap if let Some(highest) = self.highest_used_index { if index > highest { @@ -109,6 +120,8 @@ impl GapLimit { if self.stage == GapLimitStage::Scanning && !self.limit_reached { self.stage = GapLimitStage::Extended; } + + highest_increased } /// Mark an address as generated (but not necessarily used) @@ -364,9 +377,10 @@ mod tests { } // Mark some as used - gap.mark_used(2); - gap.mark_used(5); - gap.mark_used(7); + assert!(gap.mark_used(2)); // First use, should return true + assert!(gap.mark_used(5)); // Increases highest, should return true + assert!(gap.mark_used(7)); // Increases highest, should return true + assert!(!gap.mark_used(2)); // Already used and doesn't increase highest assert_eq!(gap.highest_used_index, Some(7)); assert_eq!(gap.current_unused_count, 2); // indices 8 and 9 are unused @@ -389,7 +403,8 @@ mod tests { for i in 0..5 { gap.mark_generated(i); } - gap.mark_used(3); + let changed = gap.mark_used(3); + assert!(changed); // First use should return true assert!(gap.needs_extension()); @@ -437,8 +452,9 @@ mod tests { assert_eq!(gap.addresses_to_generate(), 0); // After using one - gap.mark_used(2); - // target = 2 + 5 + 1 = 8, highest_generated = 4, so need 8 - 4 = 4 more + let changed = gap.mark_used(2); + assert!(changed); // First use should return true + // target = 2 + 5 + 1 = 8, highest_generated = 4, so need 8 - 4 = 4 more assert_eq!(gap.addresses_to_generate(), 4); // Need to generate 5, 6, 7, 8 to maintain gap } }