@@ -301,28 +301,46 @@ pub fn mark_block_pending_sync(height: u64) -> bool {
301301 cleanup_pending_sync_blocks();
302302 }
303303
304- // Hard limit: emergency cleanup
304+ // Hard limit: emergency cleanup with PRIORITY EVICTION
305305 if PENDING_SYNC_BLOCKS.len() >= MAX_PENDING_SYNC_BLOCKS {
306306 let local_height = LOCAL_BLOCKCHAIN_HEIGHT.load(std::sync::atomic::Ordering::Relaxed);
307307 let mut entries_to_remove: Vec<u64> = Vec::new();
308-
309- // Remove entries below local height (already processed)
308+
309+ // Phase 1: Remove entries below local height (already processed)
310310 for entry in PENDING_SYNC_BLOCKS.iter() {
311311 if *entry.key() < local_height.saturating_sub(5) {
312312 entries_to_remove.push(*entry.key());
313313 }
314- if entries_to_remove.len() >= 100 {
315- break;
316- }
317314 }
318-
319- for h in entries_to_remove {
320- PENDING_SYNC_BLOCKS.remove(& h);
315+
316+ for h in & entries_to_remove {
317+ PENDING_SYNC_BLOCKS.remove(h);
321318 }
322-
319+
320+ // Phase 2: If still full, EVICT FARTHEST blocks from local_height.
321+ // Near-height blocks are needed next; far blocks can be re-requested later.
322+ // This prevents the deadlock where far blocks occupy all slots and
323+ // near blocks (that the node needs to advance) get rejected.
324+ if PENDING_SYNC_BLOCKS.len() >= MAX_PENDING_SYNC_BLOCKS {
325+ let mut all_heights: Vec<u64> = PENDING_SYNC_BLOCKS.iter()
326+ .map(|entry| *entry.key())
327+ .collect();
328+ // Sort by distance from local_height (farthest first)
329+ all_heights.sort_by_key(|h| std::cmp::Reverse(h.abs_diff(local_height)));
330+ // Evict top 50% (farthest blocks)
331+ let evict_count = all_heights.len() / 2;
332+ for h in all_heights.iter().take(evict_count) {
333+ PENDING_SYNC_BLOCKS.remove(h);
334+ }
335+ if crate::node::is_info() {
336+ println!("[INFO][SYNC] priority_eviction local_h={} evicted={} remaining={}",
337+ local_height, evict_count, PENDING_SYNC_BLOCKS.len());
338+ }
339+ }
340+
323341 if PENDING_SYNC_BLOCKS.len() >= MAX_PENDING_SYNC_BLOCKS {
324342 if crate::node::is_warn() {
325- println!("[WARN][SYNC] queue_full_after_cleanup size={} rejecting={}",
343+ println!("[WARN][SYNC] queue_full_after_cleanup size={} rejecting={}",
326344 PENDING_SYNC_BLOCKS.len(), height);
327345 }
328346 return false;
@@ -10468,11 +10486,19 @@ impl SimplifiedP2P {
1046810486 let parallel_workers: usize = workers;
1046910487 let chunk_size_blocks: u64 = chunk_size;
1047010488
10471- // v10.1: Download ALL missing blocks in one call.
10472- // Wave-based limiting (old WAVE_SIZE=100) was causing our fast sync loop
10473- // to need many iterations. Semaphore already limits concurrency.
10474- // The caller (fast sync loop) handles re-invocation if network advances.
10475- let actual_target = target_height;
10489+ // v10.3: Download only NEAREST missing blocks, not all at once.
10490+ // Downloading ALL ranges simultaneously floods PENDING_SYNC_BLOCKS (max 1000)
10491+ // with far-away blocks, causing backpressure that rejects near-height blocks
10492+ // the node actually needs next. This creates a sync deadlock where the node
10493+ // advances ~100 blocks per 60s TTL cycle instead of continuously.
10494+ //
10495+ // Solution: limit to nearest 500 blocks. The caller (fast sync loop) re-invokes
10496+ // as local_height advances, naturally sliding the window forward.
10497+ const MAX_SYNC_WINDOW: u64 = 500;
10498+ let actual_target = std::cmp::min(target_height, current_height + MAX_SYNC_WINDOW);
10499+
10500+ // Filter missing_blocks to only include blocks within the window
10501+ missing_blocks.retain(|h| *h <= actual_target);
1047610502
1047710503 if crate::node::is_info() {
1047810504 println!("[SYNC] ⚡ Starting parallel sync: {} blocks (target: {}) with {} workers",
0 commit comments