From f8f3c8fe01235756e17aeeee45b0d8434020eb58 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Fri, 13 Mar 2026 14:54:52 +0100 Subject: [PATCH 01/20] feat: add DA inclusion status tracking from ev-node Add L2 Data Availability (DA) inclusion tracking by querying ev-node's Connect RPC StoreService for Celestia DA heights per block. - New block_da_status table with background DA worker (backfill + retry) - ev-node client with auto-detecting proto/JSON Connect RPC modes - API returns da_status on block responses, features flag on /status - Frontend shows DA rows on block detail when da_tracking is enabled - Configurable via EVNODE_URL and DA_WORKER_CONCURRENCY env vars Closes #4 --- .env.example | 7 + CLAUDE.md | 2 + backend/Cargo.toml | 3 + .../crates/atlas-api/src/handlers/blocks.rs | 29 +- .../crates/atlas-api/src/handlers/status.rs | 9 + backend/crates/atlas-api/src/main.rs | 8 + backend/crates/atlas-common/src/types.rs | 12 + backend/crates/atlas-indexer/Cargo.toml | 2 + backend/crates/atlas-indexer/src/config.rs | 9 + backend/crates/atlas-indexer/src/da_worker.rs | 197 +++++++++++ backend/crates/atlas-indexer/src/evnode.rs | 331 ++++++++++++++++++ backend/crates/atlas-indexer/src/main.rs | 20 ++ .../20240108000001_block_da_status.sql | 21 ++ docker-compose.yml | 3 + frontend/src/api/status.ts | 3 + frontend/src/hooks/index.ts | 1 + frontend/src/hooks/useFeatures.ts | 27 ++ frontend/src/hooks/useLatestBlockHeight.ts | 8 +- frontend/src/pages/BlockDetailPage.tsx | 36 +- frontend/src/types/index.ts | 15 + 20 files changed, 738 insertions(+), 5 deletions(-) create mode 100644 backend/crates/atlas-indexer/src/da_worker.rs create mode 100644 backend/crates/atlas-indexer/src/evnode.rs create mode 100644 backend/migrations/20240108000001_block_da_status.sql create mode 100644 frontend/src/hooks/useFeatures.ts diff --git a/.env.example b/.env.example index 4426f24..c4f1cec 100644 --- a/.env.example +++ b/.env.example @@ -15,3 +15,10 @@ FETCH_WORKERS=10 # Number of blocks to fetch per RPC batch request (reduces HTTP round-trips) RPC_BATCH_SIZE=20 + +# Optional: ev-node Connect RPC URL for L2 DA (Data Availability) inclusion tracking. +# When set, a background worker queries ev-node for Celestia DA heights per block. +# EVNODE_URL=http://localhost:7331 + +# Number of concurrent requests to ev-node for DA status backfill (default: 10) +# DA_WORKER_CONCURRENCY=10 diff --git a/CLAUDE.md b/CLAUDE.md index 4b94f2c..357844c 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -105,6 +105,8 @@ Key vars (see `.env.example` for full list): | `BATCH_SIZE` | indexer | `100` | | `FETCH_WORKERS` | indexer | `10` | | `ADMIN_API_KEY` | api | none | +| `EVNODE_URL` | indexer, api | none (DA tracking disabled) | +| `DA_WORKER_CONCURRENCY` | indexer | `10` | ## Running Locally diff --git a/backend/Cargo.toml b/backend/Cargo.toml index fb250d6..e5fd8d4 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -49,6 +49,9 @@ tokio-stream = "0.1" futures = "0.3" async-stream = "0.3" +# Protobuf +prost = "0.13" + # Utilities bigdecimal = { version = "0.4", features = ["serde"] } hex = "0.4" diff --git a/backend/crates/atlas-api/src/handlers/blocks.rs b/backend/crates/atlas-api/src/handlers/blocks.rs index d9a10b8..d500286 100644 --- a/backend/crates/atlas-api/src/handlers/blocks.rs +++ b/backend/crates/atlas-api/src/handlers/blocks.rs @@ -2,11 +2,22 @@ use axum::{ extract::{Path, Query, State}, Json, }; +use serde::Serialize; use std::sync::Arc; use crate::error::ApiResult; use crate::AppState; -use atlas_common::{AtlasError, Block, PaginatedResponse, Pagination, Transaction}; +use atlas_common::{AtlasError, Block, BlockDaStatus, PaginatedResponse, Pagination, Transaction}; + +/// Block response with optional DA status. +/// DA fields are always present in the JSON (null when no data), +/// so the frontend can rely on a stable schema. +#[derive(Serialize)] +pub struct BlockResponse { + #[serde(flatten)] + pub block: Block, + pub da_status: Option, +} pub async fn list_blocks( State(state): State>, @@ -48,7 +59,7 @@ pub async fn list_blocks( pub async fn get_block( State(state): State>, Path(number): Path, -) -> ApiResult> { +) -> ApiResult> { let block: Block = sqlx::query_as( "SELECT number, hash, parent_hash, timestamp, gas_used, gas_limit, transaction_count, indexed_at FROM blocks @@ -59,7 +70,19 @@ pub async fn get_block( .await? .ok_or_else(|| AtlasError::NotFound(format!("Block {} not found", number)))?; - Ok(Json(block)) + // Always query DA status — returns None when no row exists (DA worker hasn't checked yet, + // or EVNODE_URL is not configured). The frontend uses the features.da_tracking flag from + // /api/status to decide whether to display DA information. + let da_status: Option = sqlx::query_as( + "SELECT block_number, header_da_height, data_da_height, updated_at + FROM block_da_status + WHERE block_number = $1", + ) + .bind(number) + .fetch_optional(&state.pool) + .await?; + + Ok(Json(BlockResponse { block, da_status })) } pub async fn get_block_transactions( diff --git a/backend/crates/atlas-api/src/handlers/status.rs b/backend/crates/atlas-api/src/handlers/status.rs index 3744536..820e9c8 100644 --- a/backend/crates/atlas-api/src/handlers/status.rs +++ b/backend/crates/atlas-api/src/handlers/status.rs @@ -5,10 +5,16 @@ use std::sync::Arc; use crate::error::ApiResult; use crate::AppState; +#[derive(Serialize)] +pub struct ChainFeatures { + pub da_tracking: bool, +} + #[derive(Serialize)] pub struct ChainStatus { pub block_height: i64, pub indexed_at: String, + pub features: ChainFeatures, } /// GET /api/status - Lightweight endpoint for current chain status @@ -25,5 +31,8 @@ pub async fn get_status(State(state): State>) -> ApiResult, + /// ev-node Connect RPC URL. When set, enables DA tracking features. + pub evnode_url: Option, } #[tokio::main] @@ -57,12 +59,18 @@ async fn main() -> Result<()> { let (block_events_tx, _) = broadcast::channel(1024); + let evnode_url = std::env::var("EVNODE_URL").ok(); + if evnode_url.is_some() { + tracing::info!("DA tracking enabled (EVNODE_URL set)"); + } + let state = Arc::new(AppState { pool: pool.clone(), block_events_tx: block_events_tx.clone(), rpc_url, solc_path, admin_api_key, + evnode_url, }); tokio::spawn(handlers::sse::run_block_event_fanout( diff --git a/backend/crates/atlas-common/src/types.rs b/backend/crates/atlas-common/src/types.rs index a3f9776..886164a 100644 --- a/backend/crates/atlas-common/src/types.rs +++ b/backend/crates/atlas-common/src/types.rs @@ -16,6 +16,18 @@ pub struct Block { pub indexed_at: DateTime, } +/// DA (Data Availability) status for a block on L2 chains using Celestia. +/// Only populated when EVNODE_URL is configured and the DA worker has checked the block. +#[derive(Debug, Clone, Serialize, Deserialize, FromRow)] +pub struct BlockDaStatus { + pub block_number: i64, + /// Celestia height where the block header was submitted. 0 = pending. + pub header_da_height: i64, + /// Celestia height where the block data was submitted. 0 = pending. + pub data_da_height: i64, + pub updated_at: DateTime, +} + /// Transaction data as stored in the database #[derive(Debug, Clone, Serialize, Deserialize, FromRow)] pub struct Transaction { diff --git a/backend/crates/atlas-indexer/Cargo.toml b/backend/crates/atlas-indexer/Cargo.toml index 1ca4284..4f3b1c5 100644 --- a/backend/crates/atlas-indexer/Cargo.toml +++ b/backend/crates/atlas-indexer/Cargo.toml @@ -24,6 +24,8 @@ bigdecimal = { workspace = true } num-bigint = "0.4" hex = { workspace = true } chrono = { workspace = true } +prost = { workspace = true } +futures = { workspace = true } async-channel = "2.3" governor = "0.6" tokio-postgres = { version = "0.7" } diff --git a/backend/crates/atlas-indexer/src/config.rs b/backend/crates/atlas-indexer/src/config.rs index 806edd4..b74166d 100644 --- a/backend/crates/atlas-indexer/src/config.rs +++ b/backend/crates/atlas-indexer/src/config.rs @@ -15,6 +15,10 @@ pub struct Config { pub metadata_retry_attempts: u32, pub fetch_workers: u32, pub rpc_batch_size: u32, + /// ev-node Connect RPC URL for DA status tracking. None = DA feature disabled. + pub evnode_url: Option, + /// Number of concurrent requests to ev-node for DA status backfill. + pub da_worker_concurrency: u32, } impl Config { @@ -60,6 +64,11 @@ impl Config { .unwrap_or_else(|_| "20".to_string()) .parse() .context("Invalid RPC_BATCH_SIZE")?, + evnode_url: env::var("EVNODE_URL").ok(), + da_worker_concurrency: env::var("DA_WORKER_CONCURRENCY") + .unwrap_or_else(|_| "10".to_string()) + .parse() + .context("Invalid DA_WORKER_CONCURRENCY")?, }) } } diff --git a/backend/crates/atlas-indexer/src/da_worker.rs b/backend/crates/atlas-indexer/src/da_worker.rs new file mode 100644 index 0000000..c41ff9c --- /dev/null +++ b/backend/crates/atlas-indexer/src/da_worker.rs @@ -0,0 +1,197 @@ +//! Background DA (Data Availability) worker for tracking Celestia inclusion status. +//! +//! This worker queries ev-node's Connect RPC service to determine at which Celestia +//! height each block's header and data were submitted. +//! +//! ## Two-phase design +//! +//! The worker runs in a loop with two phases: +//! +//! 1. **Backfill** — Discovers blocks in the `blocks` table that are missing from +//! `block_da_status`. Queries ev-node for each and INSERTs the result. +//! **Always inserts a row, even when DA heights are 0** (block not yet included +//! on Celestia). This marks the block as "checked" so the backfill phase won't +//! re-query it on the next cycle. Processes newest blocks first so the UI shows +//! current data immediately. +//! +//! 2. **Update pending** — Finds rows where `header_da_height = 0 OR data_da_height = 0` +//! and re-queries ev-node. Updates with new values when the block has been included. +//! Processes oldest pending blocks first. +//! +//! A block flows: backfill (phase 1) → update-pending (phase 2) → done. + +use anyhow::Result; +use futures::stream::{self, StreamExt}; +use sqlx::PgPool; +use std::time::Duration; + +use crate::evnode::EvnodeClient; + +/// Batch size for DB queries per cycle. +const BATCH_SIZE: i64 = 100; + +/// Sleep between worker cycles. +const CYCLE_SLEEP: Duration = Duration::from_secs(2); + +pub struct DaWorker { + pool: PgPool, + client: EvnodeClient, + concurrency: usize, +} + +impl DaWorker { + pub fn new(pool: PgPool, evnode_url: &str, concurrency: u32) -> Result { + Ok(Self { + pool, + client: EvnodeClient::new(evnode_url), + concurrency: concurrency as usize, + }) + } + + pub async fn run(&self) -> Result<()> { + tracing::info!( + "DA worker started (concurrency: {})", + self.concurrency + ); + + loop { + // Phase 1: discover and check new blocks (newest first) + let backfilled = self.backfill_new_blocks().await?; + + // Phase 2: retry blocks pending DA inclusion (oldest first) + let updated = self.update_pending_blocks().await?; + + if backfilled > 0 || updated > 0 { + tracing::info!( + "DA worker cycle: backfilled {}, updated {} pending", + backfilled, + updated + ); + } + + tokio::time::sleep(CYCLE_SLEEP).await; + } + } + + /// Phase 1: Find blocks missing from block_da_status and query ev-node. + /// Returns the number of blocks processed. + async fn backfill_new_blocks(&self) -> Result { + let missing: Vec<(i64,)> = sqlx::query_as( + "SELECT b.number FROM blocks b + LEFT JOIN block_da_status d ON d.block_number = b.number + WHERE d.block_number IS NULL + ORDER BY b.number DESC + LIMIT $1", + ) + .bind(BATCH_SIZE) + .fetch_all(&self.pool) + .await?; + + if missing.is_empty() { + return Ok(0); + } + + let count = missing.len(); + let pool = &self.pool; + let client = &self.client; + + stream::iter(missing) + .map(|(block_number,)| async move { + match client.get_da_status(block_number as u64).await { + Ok((header_da, data_da)) => { + if let Err(e) = sqlx::query( + "INSERT INTO block_da_status (block_number, header_da_height, data_da_height) + VALUES ($1, $2, $3) + ON CONFLICT (block_number) DO UPDATE SET + header_da_height = EXCLUDED.header_da_height, + data_da_height = EXCLUDED.data_da_height, + updated_at = NOW()", + ) + .bind(block_number) + .bind(header_da as i64) + .bind(data_da as i64) + .execute(pool) + .await + { + tracing::warn!( + "Failed to insert DA status for block {}: {}", + block_number, + e + ); + } + } + Err(e) => { + tracing::warn!( + "Failed to fetch DA status for block {}: {}", + block_number, + e + ); + } + } + }) + .buffer_unordered(self.concurrency) + .collect::>() + .await; + + Ok(count) + } + + /// Phase 2: Re-check blocks where DA heights are still 0. + /// Returns the number of blocks processed. + async fn update_pending_blocks(&self) -> Result { + let pending: Vec<(i64,)> = sqlx::query_as( + "SELECT block_number FROM block_da_status + WHERE header_da_height = 0 OR data_da_height = 0 + ORDER BY block_number ASC + LIMIT $1", + ) + .bind(BATCH_SIZE) + .fetch_all(&self.pool) + .await?; + + if pending.is_empty() { + return Ok(0); + } + + let count = pending.len(); + let pool = &self.pool; + let client = &self.client; + + stream::iter(pending) + .map(|(block_number,)| async move { + match client.get_da_status(block_number as u64).await { + Ok((header_da, data_da)) => { + if let Err(e) = sqlx::query( + "UPDATE block_da_status + SET header_da_height = $2, data_da_height = $3, updated_at = NOW() + WHERE block_number = $1", + ) + .bind(block_number) + .bind(header_da as i64) + .bind(data_da as i64) + .execute(pool) + .await + { + tracing::warn!( + "Failed to update DA status for block {}: {}", + block_number, + e + ); + } + } + Err(e) => { + tracing::warn!( + "Failed to fetch DA status for block {}: {}", + block_number, + e + ); + } + } + }) + .buffer_unordered(self.concurrency) + .collect::>() + .await; + + Ok(count) + } +} diff --git a/backend/crates/atlas-indexer/src/evnode.rs b/backend/crates/atlas-indexer/src/evnode.rs new file mode 100644 index 0000000..0e0ff2e --- /dev/null +++ b/backend/crates/atlas-indexer/src/evnode.rs @@ -0,0 +1,331 @@ +//! ev-node Connect RPC client for querying DA (Data Availability) status. +//! +//! ev-node exposes a Connect RPC service (`StoreService`) that provides +//! consensus/DA layer data separate from the standard EVM JSON-RPC API. +//! This module wraps the `GetBlock` RPC to extract DA inclusion heights. +//! +//! Connect RPC supports two serialization modes: +//! - **Protobuf** (`application/proto`) — binary, more efficient +//! - **JSON** (`application/json`) — text, required by some deployments +//! +//! The client auto-detects the correct mode: it starts with protobuf and +//! transparently switches to JSON if the server returns 415 Unsupported +//! Media Type. Once switched, all subsequent requests use JSON. + +use anyhow::{bail, Result}; +use prost::Message; +use serde::{Deserialize, Serialize}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::time::Duration; + +// --------------------------------------------------------------------------- +// Protobuf message types (matching ev-node proto/evnode/v1/state_rpc.proto) +// +// We define only the minimal types needed to decode GetBlockResponse. +// The GetBlockResponse has top-level fields for DA heights (tags 2 and 3), +// so we don't need to navigate into nested Block/Header/Data messages. +// --------------------------------------------------------------------------- + +/// Request message for StoreService.GetBlock. +/// Field 1: block height (uint64). +#[derive(Clone, PartialEq, Message)] +pub struct GetBlockRequest { + #[prost(uint64, tag = "1")] + pub height: u64, +} + +/// Response message for StoreService.GetBlock (minimal). +/// We only decode the DA height fields, ignoring the full Block message. +#[derive(Clone, PartialEq, Message)] +pub struct GetBlockResponse { + // Field 1 (Block) is skipped — we don't need block contents for DA status. + + /// Celestia height where the block header was submitted. + /// 0 means not yet submitted. + #[prost(uint64, tag = "2")] + pub header_da_height: u64, + + /// Celestia height where the block data was submitted. + /// 0 means not yet submitted. + #[prost(uint64, tag = "3")] + pub data_da_height: u64, +} + +// --------------------------------------------------------------------------- +// JSON types for Connect RPC JSON mode +// --------------------------------------------------------------------------- + +#[derive(Serialize)] +struct JsonGetBlockRequest { + height: String, // Connect RPC encodes uint64 as string in JSON +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct JsonGetBlockResponse { + #[serde(default, deserialize_with = "deserialize_u64_string")] + header_da_height: u64, + #[serde(default, deserialize_with = "deserialize_u64_string")] + data_da_height: u64, +} + +/// Connect RPC encodes uint64 as JSON strings (e.g., `"123"` not `123`). +/// This deserializer handles both string and numeric representations. +fn deserialize_u64_string<'de, D>(deserializer: D) -> std::result::Result +where + D: serde::Deserializer<'de>, +{ + use serde::de; + + struct U64Visitor; + impl<'de> de::Visitor<'de> for U64Visitor { + type Value = u64; + fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + f.write_str("a u64 as a string or number") + } + fn visit_u64(self, v: u64) -> std::result::Result { + Ok(v) + } + fn visit_str(self, v: &str) -> std::result::Result { + v.parse().map_err(de::Error::custom) + } + } + deserializer.deserialize_any(U64Visitor) +} + +/// Retry delays for ev-node RPC calls (in seconds). +const RETRY_DELAYS: &[u64] = &[2, 5, 10, 20, 30]; +const MAX_RETRIES: usize = 10; + +/// Client for ev-node's Connect RPC StoreService. +/// +/// Supports both protobuf and JSON serialization modes. The mode is +/// auto-detected on the first request: if the server rejects protobuf +/// with HTTP 415, the client switches to JSON for all future requests. +pub struct EvnodeClient { + client: reqwest::Client, + base_url: String, + /// When true, use JSON mode instead of protobuf. + use_json: AtomicBool, +} + +impl EvnodeClient { + /// Create a new client pointing at the given ev-node Connect RPC URL. + /// + /// # Arguments + /// * `evnode_url` — Base URL of the ev-node Connect RPC service (e.g., `http://localhost:7331`) + pub fn new(evnode_url: &str) -> Self { + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(10)) + .build() + .expect("failed to create HTTP client"); + + Self { + client, + base_url: evnode_url.trim_end_matches('/').to_string(), + use_json: AtomicBool::new(false), + } + } + + /// Fetch DA inclusion heights for a block. + /// + /// Returns `(header_da_height, data_da_height)`. + /// Both are 0 if the block has not yet been submitted to Celestia. + /// + /// Retries with exponential backoff on transient errors. + pub async fn get_da_status(&self, height: u64) -> Result<(u64, u64)> { + let url = format!( + "{}/evnode.v1.StoreService/GetBlock", + self.base_url + ); + + let mut last_error = None; + + for attempt in 0..MAX_RETRIES { + match self.do_request(&url, height).await { + Ok((h, d)) => return Ok((h, d)), + Err(e) => { + let delay = RETRY_DELAYS + .get(attempt) + .copied() + .unwrap_or(*RETRY_DELAYS.last().unwrap()); + + tracing::warn!( + "ev-node GetBlock failed for height {} (attempt {}): {}. Retrying in {}s", + height, + attempt + 1, + e, + delay, + ); + + last_error = Some(e); + tokio::time::sleep(Duration::from_secs(delay)).await; + } + } + } + + bail!( + "ev-node GetBlock failed for height {} after {} retries: {}", + height, + MAX_RETRIES, + last_error.unwrap() + ) + } + + /// Send a Connect RPC request, auto-detecting proto vs JSON mode. + /// + /// On HTTP 415 (Unsupported Media Type) when using protobuf, switches + /// to JSON mode and retries the request immediately. + async fn do_request(&self, url: &str, height: u64) -> Result<(u64, u64)> { + if self.use_json.load(Ordering::Relaxed) { + return self.do_json_request(url, height).await; + } + + // Try protobuf first + let request = GetBlockRequest { height }; + let body = request.encode_to_vec(); + + let response = self + .client + .post(url) + .header("Content-Type", "application/proto") + .body(body) + .send() + .await?; + + // If server requires JSON, switch modes and retry + if response.status() == reqwest::StatusCode::UNSUPPORTED_MEDIA_TYPE { + tracing::info!("ev-node requires JSON mode, switching from protobuf"); + self.use_json.store(true, Ordering::Relaxed); + return self.do_json_request(url, height).await; + } + + if !response.status().is_success() { + bail!( + "HTTP {}: {}", + response.status(), + response.text().await.unwrap_or_default() + ); + } + + let bytes = response.bytes().await?; + let resp = GetBlockResponse::decode(bytes.as_ref())?; + Ok((resp.header_da_height, resp.data_da_height)) + } + + /// Send a Connect RPC request using JSON serialization. + async fn do_json_request(&self, url: &str, height: u64) -> Result<(u64, u64)> { + let request = JsonGetBlockRequest { + height: height.to_string(), + }; + + let response = self + .client + .post(url) + .header("Content-Type", "application/json") + .json(&request) + .send() + .await?; + + if !response.status().is_success() { + bail!( + "HTTP {}: {}", + response.status(), + response.text().await.unwrap_or_default() + ); + } + + let resp: JsonGetBlockResponse = response.json().await?; + Ok((resp.header_da_height, resp.data_da_height)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn encode_decode_get_block_request() { + let req = GetBlockRequest { height: 42 }; + let bytes = req.encode_to_vec(); + let decoded = GetBlockRequest::decode(bytes.as_slice()).unwrap(); + assert_eq!(decoded.height, 42); + } + + #[test] + fn encode_decode_get_block_response() { + let resp = GetBlockResponse { + header_da_height: 100, + data_da_height: 200, + }; + let bytes = resp.encode_to_vec(); + let decoded = GetBlockResponse::decode(bytes.as_slice()).unwrap(); + assert_eq!(decoded.header_da_height, 100); + assert_eq!(decoded.data_da_height, 200); + } + + #[test] + fn decode_response_with_zeros() { + let resp = GetBlockResponse { + header_da_height: 0, + data_da_height: 0, + }; + let bytes = resp.encode_to_vec(); + let decoded = GetBlockResponse::decode(bytes.as_slice()).unwrap(); + assert_eq!(decoded.header_da_height, 0); + assert_eq!(decoded.data_da_height, 0); + } + + #[test] + fn decode_empty_response_defaults_to_zeros() { + // An empty protobuf message should decode with default (zero) values + let decoded = GetBlockResponse::decode(&[] as &[u8]).unwrap(); + assert_eq!(decoded.header_da_height, 0); + assert_eq!(decoded.data_da_height, 0); + } + + #[test] + fn client_trims_trailing_slash() { + let client = EvnodeClient::new("http://localhost:7331/"); + assert_eq!(client.base_url, "http://localhost:7331"); + } + + #[test] + fn client_starts_in_proto_mode() { + let client = EvnodeClient::new("http://localhost:7331"); + assert!(!client.use_json.load(Ordering::Relaxed)); + } + + #[test] + fn json_request_serializes_height_as_string() { + let req = JsonGetBlockRequest { + height: 42.to_string(), + }; + let json = serde_json::to_string(&req).unwrap(); + assert_eq!(json, r#"{"height":"42"}"#); + } + + #[test] + fn json_response_deserializes_string_heights() { + let json = r#"{"headerDaHeight":"100","dataDaHeight":"200"}"#; + let resp: JsonGetBlockResponse = serde_json::from_str(json).unwrap(); + assert_eq!(resp.header_da_height, 100); + assert_eq!(resp.data_da_height, 200); + } + + #[test] + fn json_response_deserializes_numeric_heights() { + let json = r#"{"headerDaHeight":100,"dataDaHeight":200}"#; + let resp: JsonGetBlockResponse = serde_json::from_str(json).unwrap(); + assert_eq!(resp.header_da_height, 100); + assert_eq!(resp.data_da_height, 200); + } + + #[test] + fn json_response_defaults_missing_fields_to_zero() { + let json = r#"{}"#; + let resp: JsonGetBlockResponse = serde_json::from_str(json).unwrap(); + assert_eq!(resp.header_da_height, 0); + assert_eq!(resp.data_da_height, 0); + } +} diff --git a/backend/crates/atlas-indexer/src/main.rs b/backend/crates/atlas-indexer/src/main.rs index 62451e8..016b728 100644 --- a/backend/crates/atlas-indexer/src/main.rs +++ b/backend/crates/atlas-indexer/src/main.rs @@ -5,6 +5,8 @@ use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; mod batch; mod config; mod copy; +mod da_worker; +mod evnode; mod fetcher; mod indexer; mod metadata; @@ -53,6 +55,24 @@ async fn main() -> Result<()> { .await }); + // Start DA worker in background (only if EVNODE_URL is configured) + if let Some(ref evnode_url) = config.evnode_url { + tracing::info!("DA worker enabled, ev-node URL: {}", evnode_url); + let da_pool = pool.clone(); + let da_url = evnode_url.clone(); + let da_concurrency = config.da_worker_concurrency; + tokio::spawn(async move { + run_with_retry(|| async { + let worker = + da_worker::DaWorker::new(da_pool.clone(), &da_url, da_concurrency)?; + worker.run().await + }) + .await + }); + } else { + tracing::info!("DA worker disabled (EVNODE_URL not set)"); + } + // Run indexer with retry on failure run_with_retry(|| indexer.run()).await?; diff --git a/backend/migrations/20240108000001_block_da_status.sql b/backend/migrations/20240108000001_block_da_status.sql new file mode 100644 index 0000000..df6bd3f --- /dev/null +++ b/backend/migrations/20240108000001_block_da_status.sql @@ -0,0 +1,21 @@ +-- Block DA (Data Availability) status for L2 chains using Celestia. +-- Only populated when EVNODE_URL is configured and the DA worker is running. +-- +-- The DA worker has two phases: +-- 1. Backfill: discovers blocks missing from this table, queries ev-node, and INSERTs. +-- Always inserts a row even when DA heights are 0 (not yet included on Celestia). +-- This marks the block as "checked" so backfill won't re-query it. +-- 2. Update pending: retries rows where header_da_height = 0 OR data_da_height = 0 +-- until real DA heights are returned by ev-node. + +CREATE TABLE IF NOT EXISTS block_da_status ( + block_number BIGINT PRIMARY KEY, + header_da_height BIGINT NOT NULL DEFAULT 0, + data_da_height BIGINT NOT NULL DEFAULT 0, + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Partial index for the DA worker to efficiently find blocks still pending DA inclusion. +CREATE INDEX IF NOT EXISTS idx_block_da_status_pending + ON block_da_status (block_number) + WHERE header_da_height = 0 OR data_da_height = 0; diff --git a/docker-compose.yml b/docker-compose.yml index 0e206aa..ef604b8 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -30,6 +30,8 @@ services: FETCH_WORKERS: ${FETCH_WORKERS:-10} RPC_REQUESTS_PER_SECOND: ${RPC_REQUESTS_PER_SECOND:-100} RPC_BATCH_SIZE: ${RPC_BATCH_SIZE:-20} + EVNODE_URL: ${EVNODE_URL:-} + DA_WORKER_CONCURRENCY: ${DA_WORKER_CONCURRENCY:-10} RUST_LOG: atlas_indexer=info depends_on: postgres: @@ -44,6 +46,7 @@ services: environment: DATABASE_URL: postgres://atlas:atlas@postgres/atlas RPC_URL: ${RPC_URL} + EVNODE_URL: ${EVNODE_URL:-} API_HOST: 0.0.0.0 API_PORT: 3000 RUST_LOG: atlas_api=info,tower_http=info diff --git a/frontend/src/api/status.ts b/frontend/src/api/status.ts index 4f5df2c..5310c49 100644 --- a/frontend/src/api/status.ts +++ b/frontend/src/api/status.ts @@ -1,8 +1,11 @@ import client from './client'; +import type { ChainFeatures } from '../types'; + export interface StatusResponse { block_height: number; indexed_at: string; // ISO timestamp + features: ChainFeatures; } export async function getStatus(): Promise { diff --git a/frontend/src/hooks/index.ts b/frontend/src/hooks/index.ts index f5bc2c2..621c7df 100644 --- a/frontend/src/hooks/index.ts +++ b/frontend/src/hooks/index.ts @@ -9,3 +9,4 @@ export * from './useTransfers'; export * from './useProxies'; export { default as useEthBalance } from './useEthBalance'; export { default as useEthPrice } from './useEthPrice'; +export { default as useFeatures } from './useFeatures'; diff --git a/frontend/src/hooks/useFeatures.ts b/frontend/src/hooks/useFeatures.ts new file mode 100644 index 0000000..5827e62 --- /dev/null +++ b/frontend/src/hooks/useFeatures.ts @@ -0,0 +1,27 @@ +import { useEffect, useState } from 'react'; +import { getStatus } from '../api/status'; +import type { ChainFeatures } from '../types'; + +const defaultFeatures: ChainFeatures = { da_tracking: false }; + +/** + * Fetches chain feature flags from /api/status once on mount. + * Returns the features object (defaults to all disabled until loaded). + */ +export default function useFeatures(): ChainFeatures { + const [features, setFeatures] = useState(defaultFeatures); + + useEffect(() => { + let cancelled = false; + getStatus().then((status) => { + if (!cancelled && status.features) { + setFeatures(status.features); + } + }).catch(() => { + // Silently use defaults on error + }); + return () => { cancelled = true; }; + }, []); + + return features; +} diff --git a/frontend/src/hooks/useLatestBlockHeight.ts b/frontend/src/hooks/useLatestBlockHeight.ts index a0ff29e..70e7af4 100644 --- a/frontend/src/hooks/useLatestBlockHeight.ts +++ b/frontend/src/hooks/useLatestBlockHeight.ts @@ -1,5 +1,6 @@ import { useCallback, useEffect, useRef, useState } from 'react'; import { getStatus } from '../api/status'; +import type { ChainFeatures } from '../types'; export interface SSEState { height: number | null; @@ -13,6 +14,7 @@ export interface LatestHeightState { error: string | null; lastUpdatedAt: number | null; bps: number | null; + features: ChainFeatures | null; } /** @@ -31,6 +33,7 @@ export default function useLatestBlockHeight( const [lastUpdatedAt, setLastUpdatedAt] = useState(null); const fetchingRef = useRef(false); const [bps, setBps] = useState(null); + const [features, setFeatures] = useState(null); const prevSampleRef = useRef<{ h: number; t: number } | null>(null); const alphaRef = useRef(0.25); // smoothing factor for EMA @@ -86,6 +89,9 @@ export default function useLatestBlockHeight( const latestHeight = status?.block_height; if (typeof latestHeight === 'number') { processHeight(latestHeight, false); + if (status.features) { + setFeatures(status.features); + } } else { setHeight(null); } @@ -106,5 +112,5 @@ export default function useLatestBlockHeight( return () => clearInterval(id); }, [pollMs, fetchHeight, sseConnected]); - return { height, loading, error, lastUpdatedAt, bps }; + return { height, loading, error, lastUpdatedAt, bps, features }; } diff --git a/frontend/src/pages/BlockDetailPage.tsx b/frontend/src/pages/BlockDetailPage.tsx index 67e1de2..27a030e 100644 --- a/frontend/src/pages/BlockDetailPage.tsx +++ b/frontend/src/pages/BlockDetailPage.tsx @@ -1,14 +1,33 @@ import { useParams, Link } from 'react-router-dom'; -import { useBlock, useBlockTransactions } from '../hooks'; +import { useBlock, useBlockTransactions, useFeatures } from '../hooks'; import { CopyButton, Loading, AddressLink, TxHashLink, StatusBadge } from '../components'; import { formatNumber, formatTimestamp, formatGas, truncateHash, formatTimeAgo, formatEther } from '../utils'; import { useState } from 'react'; import type { ReactNode } from 'react'; +/** Format a DA height as a human-readable status string. */ +function formatDaStatus(daHeight: number): ReactNode { + if (daHeight > 0) { + return ( + + + Included at Celestia height {formatNumber(daHeight)} + + ); + } + return ( + + + Pending + + ); +} + export default function BlockDetailPage() { const { number } = useParams<{ number: string }>(); const blockNumber = number ? parseInt(number, 10) : undefined; const { block, loading: blockLoading, error: blockError } = useBlock(blockNumber); + const features = useFeatures(); const [txPage, setTxPage] = useState(1); const { transactions, pagination, loading } = useBlockTransactions(blockNumber, { page: txPage, limit: 20 }); @@ -44,6 +63,21 @@ export default function BlockDetailPage() { }, { label: 'Gas Used', value: formatGas(block.gas_used.toString()) }, { label: 'Gas Limit', value: formatGas(block.gas_limit.toString()) }, + // DA status rows — only shown when da_tracking feature is enabled + ...(features.da_tracking ? [ + { + label: 'Header DA', + value: block.da_status + ? formatDaStatus(block.da_status.header_da_height) + : Awaiting check..., + }, + { + label: 'Data DA', + value: block.da_status + ? formatDaStatus(block.da_status.data_da_height) + : Awaiting check..., + }, + ] as DetailRow[] : []), ] : [ { label: 'Block Height', value: '---' }, { label: 'Timestamp', value: '---' }, diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index 23574be..9fb3706 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -8,6 +8,21 @@ export interface Block { gas_limit: number; transaction_count: number; indexed_at: string; + da_status?: BlockDaStatus | null; +} + +// DA (Data Availability) status for L2 blocks using Celestia. +// Only present when the DA worker has checked the block. +export interface BlockDaStatus { + block_number: number; + header_da_height: number; + data_da_height: number; + updated_at: string; +} + +// Chain feature flags returned by /api/status +export interface ChainFeatures { + da_tracking: boolean; } // Transaction types From 8b71f59dc28b10e98a089ee3cdac50d8292d0ab0 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Fri, 13 Mar 2026 15:15:04 +0100 Subject: [PATCH 02/20] fix: default evnode client to JSON mode Protobuf mode was silently decoding DA heights as zeros. JSON mode is universally supported and verified working. --- backend/crates/atlas-indexer/src/evnode.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/backend/crates/atlas-indexer/src/evnode.rs b/backend/crates/atlas-indexer/src/evnode.rs index 0e0ff2e..a6acbbe 100644 --- a/backend/crates/atlas-indexer/src/evnode.rs +++ b/backend/crates/atlas-indexer/src/evnode.rs @@ -123,7 +123,9 @@ impl EvnodeClient { Self { client, base_url: evnode_url.trim_end_matches('/').to_string(), - use_json: AtomicBool::new(false), + // Default to JSON mode — it's universally supported by ev-node + // and avoids protobuf decoding issues with our minimal struct. + use_json: AtomicBool::new(true), } } @@ -291,9 +293,9 @@ mod tests { } #[test] - fn client_starts_in_proto_mode() { + fn client_starts_in_json_mode() { let client = EvnodeClient::new("http://localhost:7331"); - assert!(!client.use_json.load(Ordering::Relaxed)); + assert!(client.use_json.load(Ordering::Relaxed)); } #[test] From c82597fd0635e4b7d3436bec2542a8fb14afd293 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Sun, 15 Mar 2026 16:54:19 +0100 Subject: [PATCH 03/20] feat: show DA status in blocks list and simplify detail page display --- .../crates/atlas-api/src/handlers/blocks.rs | 26 +++++++++++++++++-- frontend/src/pages/BlockDetailPage.tsx | 4 +-- frontend/src/pages/BlocksPage.tsx | 20 ++++++++++++-- 3 files changed, 44 insertions(+), 6 deletions(-) diff --git a/backend/crates/atlas-api/src/handlers/blocks.rs b/backend/crates/atlas-api/src/handlers/blocks.rs index d500286..98ec09a 100644 --- a/backend/crates/atlas-api/src/handlers/blocks.rs +++ b/backend/crates/atlas-api/src/handlers/blocks.rs @@ -22,7 +22,7 @@ pub struct BlockResponse { pub async fn list_blocks( State(state): State>, Query(pagination): Query, -) -> ApiResult>> { +) -> ApiResult>> { // Use MAX(number) + 1 instead of COUNT(*) - blocks are sequential so this is accurate // This is ~6500x faster than COUNT(*) on large tables let total: (Option,) = sqlx::query_as("SELECT MAX(number) + 1 FROM blocks") @@ -48,8 +48,30 @@ pub async fn list_blocks( .fetch_all(&state.pool) .await?; + // Batch-fetch DA status for all blocks in this page + let block_numbers: Vec = blocks.iter().map(|b| b.number).collect(); + let da_rows: Vec = sqlx::query_as( + "SELECT block_number, header_da_height, data_da_height, updated_at + FROM block_da_status + WHERE block_number = ANY($1)" + ) + .bind(&block_numbers) + .fetch_all(&state.pool) + .await?; + + let da_map: std::collections::HashMap = + da_rows.into_iter().map(|d| (d.block_number, d)).collect(); + + let responses: Vec = blocks + .into_iter() + .map(|block| { + let da_status = da_map.get(&block.number).cloned(); + BlockResponse { block, da_status } + }) + .collect(); + Ok(Json(PaginatedResponse::new( - blocks, + responses, pagination.page, pagination.limit, total_count, diff --git a/frontend/src/pages/BlockDetailPage.tsx b/frontend/src/pages/BlockDetailPage.tsx index 27a030e..62d0edd 100644 --- a/frontend/src/pages/BlockDetailPage.tsx +++ b/frontend/src/pages/BlockDetailPage.tsx @@ -5,13 +5,13 @@ import { formatNumber, formatTimestamp, formatGas, truncateHash, formatTimeAgo, import { useState } from 'react'; import type { ReactNode } from 'react'; -/** Format a DA height as a human-readable status string. */ +/** Format a DA height as a status indicator. */ function formatDaStatus(daHeight: number): ReactNode { if (daHeight > 0) { return ( - Included at Celestia height {formatNumber(daHeight)} + {formatNumber(daHeight)} ); } diff --git a/frontend/src/pages/BlocksPage.tsx b/frontend/src/pages/BlocksPage.tsx index e6ae3bc..56acc78 100644 --- a/frontend/src/pages/BlocksPage.tsx +++ b/frontend/src/pages/BlocksPage.tsx @@ -1,6 +1,6 @@ import { useContext, useEffect, useMemo, useRef, useState } from 'react'; import { Link, useNavigate } from 'react-router-dom'; -import { useBlocks } from '../hooks'; +import { useBlocks, useFeatures } from '../hooks'; import { CopyButton, Loading } from '../components'; import { formatNumber, formatTimeAgo, formatGas, truncateHash } from '../utils'; import { BlockStatsContext } from '../context/BlockStatsContext'; @@ -16,6 +16,7 @@ export default function BlocksPage() { } }); const { blocks: fetchedBlocks, pagination, refetch, loading } = useBlocks({ page, limit: 20 }); + const features = useFeatures(); const hasLoaded = !loading || pagination !== null; const { latestBlockEvent, sseConnected } = useContext(BlockStatsContext); const [sseBlocks, setSseBlocks] = useState([]); @@ -323,6 +324,9 @@ export default function BlocksPage() { )} + {features.da_tracking && ( + DA + )} @@ -360,7 +364,19 @@ export default function BlocksPage() { {formatGas(block.gas_used.toString())} - + {features.da_tracking && ( + + {block.da_status ? ( + block.da_status.header_da_height > 0 && block.da_status.data_da_height > 0 ? ( + + ) : ( + + ) + ) : ( + + )} + + )} ))} From d8e77fec4184ff5efc2dd9b9ab5415c9a9651053 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Sun, 15 Mar 2026 21:39:47 +0100 Subject: [PATCH 04/20] feat: add live DA status SSE, optimize DA worker performance - Add DA SSE: pg_notify from DA worker -> API broadcast -> SSE da_batch events pushed to connected frontends for real-time DA dot updates - DA worker: split budget between backfill (priority) and pending phases, skip sleep when work available, process newest blocks first in both phases - ev-node client: reduce timeout 10s->2s, retries 10->3 with ms-level backoff - Bump DA_WORKER_CONCURRENCY default 10->50 for higher throughput - Frontend: DA dot pulse animation on SSE updates, remove gray "awaiting check" state (show yellow pending instead), batch SSE for efficiency - BlockDetailPage: simplified DA display with live SSE override --- CLAUDE.md | 2 +- backend/crates/atlas-api/src/handlers/sse.rs | 211 +++++++++++++++--- backend/crates/atlas-api/src/main.rs | 10 +- backend/crates/atlas-indexer/src/config.rs | 2 +- backend/crates/atlas-indexer/src/da_worker.rs | 89 ++++++-- backend/crates/atlas-indexer/src/evnode.rs | 19 +- docker-compose.yml | 2 +- frontend/src/components/Layout.tsx | 2 +- frontend/src/context/BlockStatsContext.tsx | 5 +- frontend/src/hooks/useBlockSSE.ts | 28 ++- frontend/src/index.css | 11 + frontend/src/pages/BlockDetailPage.tsx | 50 +++-- frontend/src/pages/BlocksPage.tsx | 76 +++++-- 13 files changed, 411 insertions(+), 96 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 357844c..b04a636 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -106,7 +106,7 @@ Key vars (see `.env.example` for full list): | `FETCH_WORKERS` | indexer | `10` | | `ADMIN_API_KEY` | api | none | | `EVNODE_URL` | indexer, api | none (DA tracking disabled) | -| `DA_WORKER_CONCURRENCY` | indexer | `10` | +| `DA_WORKER_CONCURRENCY` | indexer | `50` | ## Running Locally diff --git a/backend/crates/atlas-api/src/handlers/sse.rs b/backend/crates/atlas-api/src/handlers/sse.rs index 9e357b4..d1c24c2 100644 --- a/backend/crates/atlas-api/src/handlers/sse.rs +++ b/backend/crates/atlas-api/src/handlers/sse.rs @@ -11,13 +11,14 @@ use tokio::sync::broadcast; use tokio::time::sleep; use crate::AppState; -use atlas_common::Block; +use atlas_common::{Block, BlockDaStatus}; use sqlx::{postgres::PgListener, PgPool}; use tracing::warn; const BLOCK_COLUMNS: &str = "number, hash, parent_hash, timestamp, gas_used, gas_limit, transaction_count, indexed_at"; const BLOCK_EVENT_CHANNEL: &str = "atlas_new_blocks"; +const DA_EVENT_CHANNEL: &str = "atlas_da_updates"; const FETCH_BATCH_SIZE: i64 = 256; #[derive(Serialize, Debug)] @@ -25,14 +26,28 @@ struct NewBlockEvent { block: Block, } -/// GET /api/events — Server-Sent Events stream for live block updates. +#[derive(Serialize, Debug)] +struct DaUpdateEvent { + block_number: i64, + header_da_height: i64, + data_da_height: i64, +} + +#[derive(Serialize, Debug)] +struct DaBatchEvent { + updates: Vec, +} + +/// GET /api/events — Server-Sent Events stream for live block and DA updates. /// Seeds from the latest indexed block, then requeries the DB for blocks added /// after that point whenever the shared notification fanout emits a wake-up. +/// Also streams DA status updates when the DA worker processes blocks. pub async fn block_events( State(state): State>, ) -> Sse>> { let pool = state.pool.clone(); - let mut rx = state.block_events_tx.subscribe(); + let mut block_rx = state.block_events_tx.subscribe(); + let mut da_rx = state.da_events_tx.subscribe(); let stream = async_stream::stream! { let mut last_block_number: Option = None; @@ -49,33 +64,59 @@ pub async fn block_events( Err(e) => warn!(error = ?e, "sse: failed to fetch initial block"), } - while let Ok(()) | Err(broadcast::error::RecvError::Lagged(_)) = rx.recv().await { - let mut cursor = last_block_number; - - loop { - match fetch_blocks_after(&pool, cursor).await { - Ok(blocks) => { - if blocks.is_empty() { - break; + loop { + tokio::select! { + result = block_rx.recv() => { + match result { + Ok(()) | Err(broadcast::error::RecvError::Lagged(_)) => { + let mut cursor = last_block_number; + loop { + match fetch_blocks_after(&pool, cursor).await { + Ok(blocks) => { + if blocks.is_empty() { + break; + } + let batch_len = blocks.len(); + for block in blocks { + let block_number = block.number; + last_block_number = Some(block_number); + cursor = Some(block_number); + if let Some(event) = block_to_event(block) { + yield Ok(event); + } + } + if batch_len < FETCH_BATCH_SIZE as usize { + break; + } + } + Err(e) => { + warn!(error = ?e, cursor = ?last_block_number, "sse: failed to fetch blocks after wake-up"); + break; + } + } + } } - - let batch_len = blocks.len(); - for block in blocks { - let block_number = block.number; - last_block_number = Some(block_number); - cursor = Some(block_number); - if let Some(event) = block_to_event(block) { - yield Ok(event); + Err(broadcast::error::RecvError::Closed) => break, + } + } + result = da_rx.recv() => { + match result { + Ok(block_numbers) => { + match fetch_da_status(&pool, &block_numbers).await { + Ok(da_rows) => { + if let Some(event) = da_batch_to_event(&da_rows) { + yield Ok(event); + } + } + Err(e) => { + warn!(error = ?e, "sse: failed to fetch DA status for update"); + } } } - - if batch_len < FETCH_BATCH_SIZE as usize { - break; + Err(broadcast::error::RecvError::Lagged(_)) => { + // Missed some DA updates — frontend will catch up on next poll/update } - } - Err(e) => { - warn!(error = ?e, cursor = ?last_block_number, "sse: failed to fetch blocks after wake-up"); - break; + Err(broadcast::error::RecvError::Closed) => break, } } } @@ -129,6 +170,46 @@ pub async fn run_block_event_fanout( } } +pub async fn run_da_event_fanout( + database_url: String, + tx: broadcast::Sender>, +) { + loop { + let mut listener = match PgListener::connect(&database_url).await { + Ok(listener) => listener, + Err(e) => { + warn!(error = ?e, "sse: failed to connect DA Postgres listener"); + sleep(Duration::from_secs(1)).await; + continue; + } + }; + + if let Err(e) = listener.listen(DA_EVENT_CHANNEL).await { + warn!(error = ?e, channel = DA_EVENT_CHANNEL, "sse: failed to LISTEN for DA notifications"); + sleep(Duration::from_secs(1)).await; + continue; + } + + loop { + match listener.recv().await { + Ok(notification) => { + if let Ok(block_numbers) = + serde_json::from_str::>(notification.payload()) + { + let _ = tx.send(block_numbers); + } + } + Err(e) => { + warn!(error = ?e, "sse: DA Postgres listener disconnected"); + break; + } + } + } + + sleep(Duration::from_secs(1)).await; + } +} + async fn fetch_latest_block(pool: &PgPool) -> Result, sqlx::Error> { sqlx::query_as(&format!( "SELECT {} FROM blocks ORDER BY number DESC LIMIT 1", @@ -150,6 +231,20 @@ async fn fetch_blocks_after(pool: &PgPool, cursor: Option) -> Result Result, sqlx::Error> { + sqlx::query_as( + "SELECT block_number, header_da_height, data_da_height, updated_at + FROM block_da_status + WHERE block_number = ANY($1)", + ) + .bind(block_numbers) + .fetch_all(pool) + .await +} + fn block_to_event(block: Block) -> Option { let event = NewBlockEvent { block }; serde_json::to_string(&event) @@ -157,6 +252,37 @@ fn block_to_event(block: Block) -> Option { .map(|json| Event::default().event("new_block").data(json)) } +#[cfg(test)] +fn da_to_event(da: &BlockDaStatus) -> Option { + let event = DaUpdateEvent { + block_number: da.block_number, + header_da_height: da.header_da_height, + data_da_height: da.data_da_height, + }; + serde_json::to_string(&event) + .ok() + .map(|json| Event::default().event("da_update").data(json)) +} + +fn da_batch_to_event(rows: &[BlockDaStatus]) -> Option { + if rows.is_empty() { + return None; + } + let batch = DaBatchEvent { + updates: rows + .iter() + .map(|da| DaUpdateEvent { + block_number: da.block_number, + header_da_height: da.header_da_height, + data_da_height: da.data_da_height, + }) + .collect(), + }; + serde_json::to_string(&batch) + .ok() + .map(|json| Event::default().event("da_batch").data(json)) +} + #[cfg(test)] mod tests { use super::*; @@ -217,4 +343,37 @@ mod tests { ); } } + + #[test] + fn da_update_event_serializes_correctly() { + let event = DaUpdateEvent { + block_number: 42, + header_da_height: 8448334, + data_da_height: 8448335, + }; + let json = serde_json::to_string(&event).unwrap(); + let v: serde_json::Value = serde_json::from_str(&json).unwrap(); + + assert_eq!(v["block_number"], 42); + assert_eq!(v["header_da_height"], 8448334); + assert_eq!(v["data_da_height"], 8448335); + } + + #[test] + fn da_update_event_contains_all_fields() { + let event = DaUpdateEvent { + block_number: 1, + header_da_height: 0, + data_da_height: 0, + }; + let json = serde_json::to_string(&event).unwrap(); + let v: serde_json::Value = serde_json::from_str(&json).unwrap(); + + for field in ["block_number", "header_da_height", "data_da_height"] { + assert!( + v.get(field).is_some(), + "da_update JSON missing field: {field}" + ); + } + } } diff --git a/backend/crates/atlas-api/src/main.rs b/backend/crates/atlas-api/src/main.rs index 027365b..f096bad 100644 --- a/backend/crates/atlas-api/src/main.rs +++ b/backend/crates/atlas-api/src/main.rs @@ -18,6 +18,7 @@ mod handlers; pub struct AppState { pub pool: PgPool, pub block_events_tx: broadcast::Sender<()>, + pub da_events_tx: broadcast::Sender>, pub rpc_url: String, pub solc_path: String, pub admin_api_key: Option, @@ -58,6 +59,7 @@ async fn main() -> Result<()> { atlas_common::db::run_migrations(&database_url).await?; let (block_events_tx, _) = broadcast::channel(1024); + let (da_events_tx, _) = broadcast::channel(256); let evnode_url = std::env::var("EVNODE_URL").ok(); if evnode_url.is_some() { @@ -67,6 +69,7 @@ async fn main() -> Result<()> { let state = Arc::new(AppState { pool: pool.clone(), block_events_tx: block_events_tx.clone(), + da_events_tx: da_events_tx.clone(), rpc_url, solc_path, admin_api_key, @@ -75,10 +78,15 @@ async fn main() -> Result<()> { tokio::spawn(handlers::sse::run_block_event_fanout( database_url.clone(), - pool, + pool.clone(), block_events_tx, )); + tokio::spawn(handlers::sse::run_da_event_fanout( + database_url.clone(), + da_events_tx, + )); + // SSE route — excluded from TimeoutLayer so connections stay alive let sse_routes = Router::new() .route("/api/events", get(handlers::sse::block_events)) diff --git a/backend/crates/atlas-indexer/src/config.rs b/backend/crates/atlas-indexer/src/config.rs index b74166d..0402cf6 100644 --- a/backend/crates/atlas-indexer/src/config.rs +++ b/backend/crates/atlas-indexer/src/config.rs @@ -66,7 +66,7 @@ impl Config { .context("Invalid RPC_BATCH_SIZE")?, evnode_url: env::var("EVNODE_URL").ok(), da_worker_concurrency: env::var("DA_WORKER_CONCURRENCY") - .unwrap_or_else(|_| "10".to_string()) + .unwrap_or_else(|_| "50".to_string()) .parse() .context("Invalid DA_WORKER_CONCURRENCY")?, }) diff --git a/backend/crates/atlas-indexer/src/da_worker.rs b/backend/crates/atlas-indexer/src/da_worker.rs index c41ff9c..a897358 100644 --- a/backend/crates/atlas-indexer/src/da_worker.rs +++ b/backend/crates/atlas-indexer/src/da_worker.rs @@ -5,7 +5,7 @@ //! //! ## Two-phase design //! -//! The worker runs in a loop with two phases: +//! The worker runs in a loop with a fixed RPC budget per cycle (BATCH_SIZE): //! //! 1. **Backfill** — Discovers blocks in the `blocks` table that are missing from //! `block_da_status`. Queries ev-node for each and INSERTs the result. @@ -16,9 +16,16 @@ //! //! 2. **Update pending** — Finds rows where `header_da_height = 0 OR data_da_height = 0` //! and re-queries ev-node. Updates with new values when the block has been included. -//! Processes oldest pending blocks first. +//! Processes newest pending blocks first (most relevant to UI users). +//! +//! Both phases share the same per-cycle RPC budget. Backfill runs first and takes +//! what it needs; pending gets the remainder. This ensures new blocks are checked +//! promptly while pending blocks still make progress every cycle. //! //! A block flows: backfill (phase 1) → update-pending (phase 2) → done. +//! +//! After each batch, a PostgreSQL NOTIFY is sent on the `atlas_da_updates` channel +//! so the API's SSE handler can push live DA status changes to connected frontends. use anyhow::Result; use futures::stream::{self, StreamExt}; @@ -27,11 +34,14 @@ use std::time::Duration; use crate::evnode::EvnodeClient; -/// Batch size for DB queries per cycle. +/// Total RPC budget per cycle, split between backfill and pending. const BATCH_SIZE: i64 = 100; -/// Sleep between worker cycles. -const CYCLE_SLEEP: Duration = Duration::from_secs(2); +/// Sleep when idle (no work in either phase). +const IDLE_SLEEP: Duration = Duration::from_millis(500); + +/// PostgreSQL NOTIFY channel for DA status updates. +const DA_EVENT_CHANNEL: &str = "atlas_da_updates"; pub struct DaWorker { pool: PgPool, @@ -55,27 +65,50 @@ impl DaWorker { ); loop { - // Phase 1: discover and check new blocks (newest first) - let backfilled = self.backfill_new_blocks().await?; - - // Phase 2: retry blocks pending DA inclusion (oldest first) - let updated = self.update_pending_blocks().await?; - - if backfilled > 0 || updated > 0 { + // Phase 1: backfill gets first pick of the budget + let backfilled = self.backfill_new_blocks(BATCH_SIZE).await?; + + // Phase 2: pending gets whatever budget remains + let remaining = BATCH_SIZE - backfilled as i64; + let updated = if remaining > 0 { + self.update_pending_blocks(remaining).await? + } else { + 0 + }; + + let did_work = backfilled > 0 || updated > 0; + if did_work { tracing::info!( "DA worker cycle: backfilled {}, updated {} pending", backfilled, updated ); + } else { + tokio::time::sleep(IDLE_SLEEP).await; } + } + } - tokio::time::sleep(CYCLE_SLEEP).await; + /// Send a PostgreSQL NOTIFY with the block numbers that were updated, + /// so the API SSE handler can push live updates to frontends. + async fn notify_da_updates(&self, block_numbers: &[i64]) { + if block_numbers.is_empty() { + return; + } + let payload = serde_json::to_string(block_numbers).unwrap_or_default(); + if let Err(e) = sqlx::query("SELECT pg_notify($1, $2)") + .bind(DA_EVENT_CHANNEL) + .bind(&payload) + .execute(&self.pool) + .await + { + tracing::warn!("Failed to send DA update notification: {}", e); } } /// Phase 1: Find blocks missing from block_da_status and query ev-node. /// Returns the number of blocks processed. - async fn backfill_new_blocks(&self) -> Result { + async fn backfill_new_blocks(&self, limit: i64) -> Result { let missing: Vec<(i64,)> = sqlx::query_as( "SELECT b.number FROM blocks b LEFT JOIN block_da_status d ON d.block_number = b.number @@ -83,7 +116,7 @@ impl DaWorker { ORDER BY b.number DESC LIMIT $1", ) - .bind(BATCH_SIZE) + .bind(limit) .fetch_all(&self.pool) .await?; @@ -95,7 +128,7 @@ impl DaWorker { let pool = &self.pool; let client = &self.client; - stream::iter(missing) + let results: Vec> = stream::iter(missing) .map(|(block_number,)| async move { match client.get_da_status(block_number as u64).await { Ok((header_da, data_da)) => { @@ -118,7 +151,9 @@ impl DaWorker { block_number, e ); + return None; } + Some(block_number) } Err(e) => { tracing::warn!( @@ -126,26 +161,30 @@ impl DaWorker { block_number, e ); + None } } }) .buffer_unordered(self.concurrency) - .collect::>() + .collect() .await; + let updated_blocks: Vec = results.into_iter().flatten().collect(); + self.notify_da_updates(&updated_blocks).await; + Ok(count) } /// Phase 2: Re-check blocks where DA heights are still 0. /// Returns the number of blocks processed. - async fn update_pending_blocks(&self) -> Result { + async fn update_pending_blocks(&self, limit: i64) -> Result { let pending: Vec<(i64,)> = sqlx::query_as( "SELECT block_number FROM block_da_status WHERE header_da_height = 0 OR data_da_height = 0 - ORDER BY block_number ASC + ORDER BY block_number DESC LIMIT $1", ) - .bind(BATCH_SIZE) + .bind(limit) .fetch_all(&self.pool) .await?; @@ -157,7 +196,7 @@ impl DaWorker { let pool = &self.pool; let client = &self.client; - stream::iter(pending) + let results: Vec> = stream::iter(pending) .map(|(block_number,)| async move { match client.get_da_status(block_number as u64).await { Ok((header_da, data_da)) => { @@ -177,7 +216,9 @@ impl DaWorker { block_number, e ); + return None; } + Some(block_number) } Err(e) => { tracing::warn!( @@ -185,13 +226,17 @@ impl DaWorker { block_number, e ); + None } } }) .buffer_unordered(self.concurrency) - .collect::>() + .collect() .await; + let updated_blocks: Vec = results.into_iter().flatten().collect(); + self.notify_da_updates(&updated_blocks).await; + Ok(count) } } diff --git a/backend/crates/atlas-indexer/src/evnode.rs b/backend/crates/atlas-indexer/src/evnode.rs index a6acbbe..6da4ef1 100644 --- a/backend/crates/atlas-indexer/src/evnode.rs +++ b/backend/crates/atlas-indexer/src/evnode.rs @@ -93,9 +93,10 @@ where deserializer.deserialize_any(U64Visitor) } -/// Retry delays for ev-node RPC calls (in seconds). -const RETRY_DELAYS: &[u64] = &[2, 5, 10, 20, 30]; -const MAX_RETRIES: usize = 10; +/// Retry delays for ev-node RPC calls (in milliseconds). +/// Fail fast — the background loop will retry on the next cycle anyway. +const RETRY_DELAYS_MS: &[u64] = &[100, 500, 1000]; +const MAX_RETRIES: usize = 3; /// Client for ev-node's Connect RPC StoreService. /// @@ -116,7 +117,7 @@ impl EvnodeClient { /// * `evnode_url` — Base URL of the ev-node Connect RPC service (e.g., `http://localhost:7331`) pub fn new(evnode_url: &str) -> Self { let client = reqwest::Client::builder() - .timeout(Duration::from_secs(10)) + .timeout(Duration::from_secs(2)) .build() .expect("failed to create HTTP client"); @@ -147,21 +148,21 @@ impl EvnodeClient { match self.do_request(&url, height).await { Ok((h, d)) => return Ok((h, d)), Err(e) => { - let delay = RETRY_DELAYS + let delay_ms = RETRY_DELAYS_MS .get(attempt) .copied() - .unwrap_or(*RETRY_DELAYS.last().unwrap()); + .unwrap_or(*RETRY_DELAYS_MS.last().unwrap()); tracing::warn!( - "ev-node GetBlock failed for height {} (attempt {}): {}. Retrying in {}s", + "ev-node GetBlock failed for height {} (attempt {}): {}. Retrying in {}ms", height, attempt + 1, e, - delay, + delay_ms, ); last_error = Some(e); - tokio::time::sleep(Duration::from_secs(delay)).await; + tokio::time::sleep(Duration::from_millis(delay_ms)).await; } } } diff --git a/docker-compose.yml b/docker-compose.yml index ef604b8..0444929 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -31,7 +31,7 @@ services: RPC_REQUESTS_PER_SECOND: ${RPC_REQUESTS_PER_SECOND:-100} RPC_BATCH_SIZE: ${RPC_BATCH_SIZE:-20} EVNODE_URL: ${EVNODE_URL:-} - DA_WORKER_CONCURRENCY: ${DA_WORKER_CONCURRENCY:-10} + DA_WORKER_CONCURRENCY: ${DA_WORKER_CONCURRENCY:-50} RUST_LOG: atlas_indexer=info depends_on: postgres: diff --git a/frontend/src/components/Layout.tsx b/frontend/src/components/Layout.tsx index 2d3d63b..ac1af50 100644 --- a/frontend/src/components/Layout.tsx +++ b/frontend/src/components/Layout.tsx @@ -269,7 +269,7 @@ export default function Layout() { {/* Main content */}
- +
diff --git a/frontend/src/context/BlockStatsContext.tsx b/frontend/src/context/BlockStatsContext.tsx index e07a305..5939e4e 100644 --- a/frontend/src/context/BlockStatsContext.tsx +++ b/frontend/src/context/BlockStatsContext.tsx @@ -1,10 +1,11 @@ import { createContext } from 'react'; -import type { NewBlockEvent } from '../hooks/useBlockSSE'; +import type { NewBlockEvent, DaUpdateEvent } from '../hooks/useBlockSSE'; export interface BlockStats { bps: number | null; height: number | null; latestBlockEvent: NewBlockEvent | null; + latestDaUpdate: DaUpdateEvent | null; sseConnected: boolean; } @@ -12,6 +13,6 @@ export const BlockStatsContext = createContext({ bps: null, height: null, latestBlockEvent: null, + latestDaUpdate: null, sseConnected: false, }); - diff --git a/frontend/src/hooks/useBlockSSE.ts b/frontend/src/hooks/useBlockSSE.ts index a278133..74625cf 100644 --- a/frontend/src/hooks/useBlockSSE.ts +++ b/frontend/src/hooks/useBlockSSE.ts @@ -6,8 +6,19 @@ export interface NewBlockEvent { block: Block; } +export interface DaUpdateEvent { + block_number: number; + header_da_height: number; + data_da_height: number; +} + +export interface DaBatchEvent { + updates: DaUpdateEvent[]; +} + export interface BlockSSEState { latestBlock: NewBlockEvent | null; + latestDaUpdate: DaUpdateEvent | null; height: number | null; connected: boolean; error: string | null; @@ -68,6 +79,7 @@ function getDrainInterval(baseInterval: number, queueLength: number): number { */ export default function useBlockSSE(): BlockSSEState { const [latestBlock, setLatestBlock] = useState(null); + const [latestDaUpdate, setLatestDaUpdate] = useState(null); const [height, setHeight] = useState(null); const [connected, setConnected] = useState(false); const [error, setError] = useState(null); @@ -171,6 +183,20 @@ export default function useBlockSSE(): BlockSSEState { } }); + es.addEventListener('da_batch', (e: MessageEvent) => { + try { + const data: DaBatchEvent = JSON.parse(e.data); + if (data.updates?.length) { + // Apply the last update — the batch is applied all at once in the + // consuming components via the daOverrides map, so we just need to + // trigger a state change. We store the full batch for consumers. + setLatestDaUpdate(data.updates[data.updates.length - 1]); + } + } catch { + // Ignore malformed events + } + }); + es.onerror = (e) => { setConnected(false); setError(`SSE ${e.type || 'error'}; retrying`); @@ -232,5 +258,5 @@ export default function useBlockSSE(): BlockSSEState { }; }, [connect]); - return { latestBlock, height, connected, error, bps }; + return { latestBlock, latestDaUpdate, height, connected, error, bps }; } diff --git a/frontend/src/index.css b/frontend/src/index.css index d6f3cd8..4245ff5 100644 --- a/frontend/src/index.css +++ b/frontend/src/index.css @@ -168,6 +168,17 @@ 100% { opacity: 0.55; } } +/* DA dot pulse when status changes via SSE */ +.animate-da-pulse { + animation: daPulse 1.5s ease-out; +} + +@keyframes daPulse { + 0% { transform: scale(2.2); opacity: 0.5; box-shadow: 0 0 8px currentColor; } + 40% { transform: scale(1.4); opacity: 0.8; } + 100% { transform: scale(1); opacity: 1; box-shadow: none; } +} + /* Smooth counter appearance on change */ .fade-in-up { animation: fadeInUp 280ms cubic-bezier(0.22, 1, 0.36, 1); diff --git a/frontend/src/pages/BlockDetailPage.tsx b/frontend/src/pages/BlockDetailPage.tsx index 62d0edd..3e967d6 100644 --- a/frontend/src/pages/BlockDetailPage.tsx +++ b/frontend/src/pages/BlockDetailPage.tsx @@ -2,8 +2,10 @@ import { useParams, Link } from 'react-router-dom'; import { useBlock, useBlockTransactions, useFeatures } from '../hooks'; import { CopyButton, Loading, AddressLink, TxHashLink, StatusBadge } from '../components'; import { formatNumber, formatTimestamp, formatGas, truncateHash, formatTimeAgo, formatEther } from '../utils'; -import { useState } from 'react'; +import { useContext, useEffect, useState } from 'react'; import type { ReactNode } from 'react'; +import { BlockStatsContext } from '../context/BlockStatsContext'; +import type { BlockDaStatus } from '../types'; /** Format a DA height as a status indicator. */ function formatDaStatus(daHeight: number): ReactNode { @@ -30,6 +32,25 @@ export default function BlockDetailPage() { const features = useFeatures(); const [txPage, setTxPage] = useState(1); const { transactions, pagination, loading } = useBlockTransactions(blockNumber, { page: txPage, limit: 20 }); + const { latestDaUpdate } = useContext(BlockStatsContext); + const [daOverride, setDaOverride] = useState(null); + + // Apply live DA status updates from SSE + useEffect(() => { + if (latestDaUpdate && latestDaUpdate.block_number === blockNumber) { + setDaOverride({ + block_number: latestDaUpdate.block_number, + header_da_height: latestDaUpdate.header_da_height, + data_da_height: latestDaUpdate.data_da_height, + updated_at: new Date().toISOString(), + }); + } + }, [latestDaUpdate, blockNumber]); + + // Reset override when navigating to a different block + useEffect(() => { + setDaOverride(null); + }, [blockNumber]); type DetailRow = { label: string; value: ReactNode; stacked?: boolean }; const details: DetailRow[] = block ? [ @@ -64,20 +85,19 @@ export default function BlockDetailPage() { { label: 'Gas Used', value: formatGas(block.gas_used.toString()) }, { label: 'Gas Limit', value: formatGas(block.gas_limit.toString()) }, // DA status rows — only shown when da_tracking feature is enabled - ...(features.da_tracking ? [ - { - label: 'Header DA', - value: block.da_status - ? formatDaStatus(block.da_status.header_da_height) - : Awaiting check..., - }, - { - label: 'Data DA', - value: block.da_status - ? formatDaStatus(block.da_status.data_da_height) - : Awaiting check..., - }, - ] as DetailRow[] : []), + ...(features.da_tracking ? (() => { + const daStatus = daOverride ?? block.da_status; + return [ + { + label: 'Header DA', + value: formatDaStatus(daStatus?.header_da_height ?? 0), + }, + { + label: 'Data DA', + value: formatDaStatus(daStatus?.data_da_height ?? 0), + }, + ]; + })() as DetailRow[] : []), ] : [ { label: 'Block Height', value: '---' }, { label: 'Timestamp', value: '---' }, diff --git a/frontend/src/pages/BlocksPage.tsx b/frontend/src/pages/BlocksPage.tsx index 56acc78..7873f96 100644 --- a/frontend/src/pages/BlocksPage.tsx +++ b/frontend/src/pages/BlocksPage.tsx @@ -4,6 +4,7 @@ import { useBlocks, useFeatures } from '../hooks'; import { CopyButton, Loading } from '../components'; import { formatNumber, formatTimeAgo, formatGas, truncateHash } from '../utils'; import { BlockStatsContext } from '../context/BlockStatsContext'; +import type { BlockDaStatus } from '../types'; export default function BlocksPage() { const [page, setPage] = useState(1); @@ -15,10 +16,13 @@ export default function BlocksPage() { return true; } }); - const { blocks: fetchedBlocks, pagination, refetch, loading } = useBlocks({ page, limit: 20 }); + const { blocks: fetchedBlocks, pagination, refetch, loading } = useBlocks({ page, limit: 100 }); const features = useFeatures(); const hasLoaded = !loading || pagination !== null; - const { latestBlockEvent, sseConnected } = useContext(BlockStatsContext); + const { latestBlockEvent, latestDaUpdate, sseConnected } = useContext(BlockStatsContext); + const [daOverrides, setDaOverrides] = useState>(new Map()); + const [daHighlight, setDaHighlight] = useState>(new Set()); + const daHighlightTimeoutsRef = useRef>(new Map()); const [sseBlocks, setSseBlocks] = useState([]); const lastSseBlockRef = useRef(null); const ssePrependRafRef = useRef(null); @@ -53,7 +57,7 @@ export default function BlocksPage() { prepend.push(b); } prepend.reverse(); - return [...prepend, ...prev].slice(0, 20); + return [...prepend, ...prev].slice(0, 100); }); ssePrependRafRef.current = null; }); @@ -76,8 +80,45 @@ export default function BlocksPage() { const blocks = useMemo(() => { if (page !== 1 || !sseBlocks.length) return fetchedBlocks; const unique = sseBlocks.filter((b) => !fetchedNumberSet.has(b.number)); - return [...unique, ...fetchedBlocks].slice(0, 20); + return [...unique, ...fetchedBlocks].slice(0, 100); }, [fetchedBlocks, fetchedNumberSet, sseBlocks, page]); + + // Apply live DA status updates from SSE (drip-fed one at a time) + useEffect(() => { + if (!latestDaUpdate) return; + const bn = latestDaUpdate.block_number; + setDaOverrides(prev => { + const next = new Map(prev); + next.set(bn, { + block_number: bn, + header_da_height: latestDaUpdate.header_da_height, + data_da_height: latestDaUpdate.data_da_height, + updated_at: new Date().toISOString(), + }); + return next; + }); + // Flash the dot for 1.5s + setDaHighlight(prev => new Set(prev).add(bn)); + const prev = daHighlightTimeoutsRef.current.get(bn); + if (prev !== undefined) clearTimeout(prev); + const t = window.setTimeout(() => { + setDaHighlight(p => { + const next = new Set(p); + next.delete(bn); + return next; + }); + daHighlightTimeoutsRef.current.delete(bn); + }, 1500); + daHighlightTimeoutsRef.current.set(bn, t); + }, [latestDaUpdate]); + + // Clear DA overrides when fetched data refreshes (it now includes the updates) + useEffect(() => { + if (fetchedBlocks.length) { + setDaOverrides(new Map()); + } + }, [fetchedBlocks]); + const navigate = useNavigate(); const [sort, setSort] = useState<{ key: 'number' | 'hash' | 'timestamp' | 'transaction_count' | 'gas_used' | null; direction: 'asc' | 'desc'; }>({ key: null, direction: 'desc' }); const seenBlocksRef = useRef>(new Set()); @@ -219,6 +260,8 @@ export default function BlocksPage() { } for (const [, t] of activeTimeouts) clearTimeout(t); activeTimeouts.clear(); + for (const [, t] of daHighlightTimeoutsRef.current) clearTimeout(t); + daHighlightTimeoutsRef.current.clear(); }; }, []); @@ -364,19 +407,20 @@ export default function BlocksPage() { {formatGas(block.gas_used.toString())} - {features.da_tracking && ( - - {block.da_status ? ( - block.da_status.header_da_height > 0 && block.da_status.data_da_height > 0 ? ( - + {features.da_tracking && (() => { + const daStatus = daOverrides.get(block.number) ?? block.da_status; + const flash = daHighlight.has(block.number); + const included = daStatus && daStatus.header_da_height > 0 && daStatus.data_da_height > 0; + return ( + + {included ? ( + ) : ( - - ) - ) : ( - - )} - - )} + + )} + + ); + })()} ))} From acb82177690a58dea8960a71686a3ed6c390d836 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Sun, 15 Mar 2026 22:01:14 +0100 Subject: [PATCH 05/20] refactor: simplify ev-node client to JSON-only Remove protobuf support (prost dependency) from the ev-node client. ev-node's Connect RPC supports both JSON and protobuf natively, so the simpler JSON-only path is sufficient. Also removes unused da_to_event. --- backend/Cargo.toml | 3 - backend/crates/atlas-api/src/handlers/sse.rs | 12 -- backend/crates/atlas-indexer/Cargo.toml | 1 - backend/crates/atlas-indexer/src/evnode.rs | 198 +++---------------- 4 files changed, 27 insertions(+), 187 deletions(-) diff --git a/backend/Cargo.toml b/backend/Cargo.toml index e5fd8d4..fb250d6 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -49,9 +49,6 @@ tokio-stream = "0.1" futures = "0.3" async-stream = "0.3" -# Protobuf -prost = "0.13" - # Utilities bigdecimal = { version = "0.4", features = ["serde"] } hex = "0.4" diff --git a/backend/crates/atlas-api/src/handlers/sse.rs b/backend/crates/atlas-api/src/handlers/sse.rs index d1c24c2..fa539f2 100644 --- a/backend/crates/atlas-api/src/handlers/sse.rs +++ b/backend/crates/atlas-api/src/handlers/sse.rs @@ -252,18 +252,6 @@ fn block_to_event(block: Block) -> Option { .map(|json| Event::default().event("new_block").data(json)) } -#[cfg(test)] -fn da_to_event(da: &BlockDaStatus) -> Option { - let event = DaUpdateEvent { - block_number: da.block_number, - header_da_height: da.header_da_height, - data_da_height: da.data_da_height, - }; - serde_json::to_string(&event) - .ok() - .map(|json| Event::default().event("da_update").data(json)) -} - fn da_batch_to_event(rows: &[BlockDaStatus]) -> Option { if rows.is_empty() { return None; diff --git a/backend/crates/atlas-indexer/Cargo.toml b/backend/crates/atlas-indexer/Cargo.toml index 4f3b1c5..4f9cf73 100644 --- a/backend/crates/atlas-indexer/Cargo.toml +++ b/backend/crates/atlas-indexer/Cargo.toml @@ -24,7 +24,6 @@ bigdecimal = { workspace = true } num-bigint = "0.4" hex = { workspace = true } chrono = { workspace = true } -prost = { workspace = true } futures = { workspace = true } async-channel = "2.3" governor = "0.6" diff --git a/backend/crates/atlas-indexer/src/evnode.rs b/backend/crates/atlas-indexer/src/evnode.rs index 6da4ef1..0e69a86 100644 --- a/backend/crates/atlas-indexer/src/evnode.rs +++ b/backend/crates/atlas-indexer/src/evnode.rs @@ -4,65 +4,25 @@ //! consensus/DA layer data separate from the standard EVM JSON-RPC API. //! This module wraps the `GetBlock` RPC to extract DA inclusion heights. //! -//! Connect RPC supports two serialization modes: -//! - **Protobuf** (`application/proto`) — binary, more efficient -//! - **JSON** (`application/json`) — text, required by some deployments -//! -//! The client auto-detects the correct mode: it starts with protobuf and -//! transparently switches to JSON if the server returns 415 Unsupported -//! Media Type. Once switched, all subsequent requests use JSON. +//! Uses the Connect RPC JSON codec (`application/json`), which ev-node +//! supports out of the box alongside protobuf. use anyhow::{bail, Result}; -use prost::Message; use serde::{Deserialize, Serialize}; -use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; -// --------------------------------------------------------------------------- -// Protobuf message types (matching ev-node proto/evnode/v1/state_rpc.proto) -// -// We define only the minimal types needed to decode GetBlockResponse. -// The GetBlockResponse has top-level fields for DA heights (tags 2 and 3), -// so we don't need to navigate into nested Block/Header/Data messages. -// --------------------------------------------------------------------------- - -/// Request message for StoreService.GetBlock. -/// Field 1: block height (uint64). -#[derive(Clone, PartialEq, Message)] -pub struct GetBlockRequest { - #[prost(uint64, tag = "1")] - pub height: u64, -} - -/// Response message for StoreService.GetBlock (minimal). -/// We only decode the DA height fields, ignoring the full Block message. -#[derive(Clone, PartialEq, Message)] -pub struct GetBlockResponse { - // Field 1 (Block) is skipped — we don't need block contents for DA status. - - /// Celestia height where the block header was submitted. - /// 0 means not yet submitted. - #[prost(uint64, tag = "2")] - pub header_da_height: u64, - - /// Celestia height where the block data was submitted. - /// 0 means not yet submitted. - #[prost(uint64, tag = "3")] - pub data_da_height: u64, -} - -// --------------------------------------------------------------------------- -// JSON types for Connect RPC JSON mode -// --------------------------------------------------------------------------- - +/// Connect RPC JSON request for StoreService.GetBlock. +/// uint64 fields are encoded as strings per Connect RPC convention. #[derive(Serialize)] -struct JsonGetBlockRequest { - height: String, // Connect RPC encodes uint64 as string in JSON +struct GetBlockRequest { + height: String, } +/// Connect RPC JSON response for StoreService.GetBlock. +/// We only extract the DA height fields. #[derive(Deserialize)] #[serde(rename_all = "camelCase")] -struct JsonGetBlockResponse { +struct GetBlockResponse { #[serde(default, deserialize_with = "deserialize_u64_string")] header_da_height: u64, #[serde(default, deserialize_with = "deserialize_u64_string")] @@ -99,34 +59,23 @@ const RETRY_DELAYS_MS: &[u64] = &[100, 500, 1000]; const MAX_RETRIES: usize = 3; /// Client for ev-node's Connect RPC StoreService. -/// -/// Supports both protobuf and JSON serialization modes. The mode is -/// auto-detected on the first request: if the server rejects protobuf -/// with HTTP 415, the client switches to JSON for all future requests. pub struct EvnodeClient { client: reqwest::Client, - base_url: String, - /// When true, use JSON mode instead of protobuf. - use_json: AtomicBool, + url: String, } impl EvnodeClient { /// Create a new client pointing at the given ev-node Connect RPC URL. - /// - /// # Arguments - /// * `evnode_url` — Base URL of the ev-node Connect RPC service (e.g., `http://localhost:7331`) pub fn new(evnode_url: &str) -> Self { let client = reqwest::Client::builder() .timeout(Duration::from_secs(2)) .build() .expect("failed to create HTTP client"); + let base = evnode_url.trim_end_matches('/'); Self { client, - base_url: evnode_url.trim_end_matches('/').to_string(), - // Default to JSON mode — it's universally supported by ev-node - // and avoids protobuf decoding issues with our minimal struct. - use_json: AtomicBool::new(true), + url: format!("{base}/evnode.v1.StoreService/GetBlock"), } } @@ -135,17 +84,12 @@ impl EvnodeClient { /// Returns `(header_da_height, data_da_height)`. /// Both are 0 if the block has not yet been submitted to Celestia. /// - /// Retries with exponential backoff on transient errors. + /// Retries with backoff on transient errors. pub async fn get_da_status(&self, height: u64) -> Result<(u64, u64)> { - let url = format!( - "{}/evnode.v1.StoreService/GetBlock", - self.base_url - ); - let mut last_error = None; for attempt in 0..MAX_RETRIES { - match self.do_request(&url, height).await { + match self.do_request(height).await { Ok((h, d)) => return Ok((h, d)), Err(e) => { let delay_ms = RETRY_DELAYS_MS @@ -175,56 +119,14 @@ impl EvnodeClient { ) } - /// Send a Connect RPC request, auto-detecting proto vs JSON mode. - /// - /// On HTTP 415 (Unsupported Media Type) when using protobuf, switches - /// to JSON mode and retries the request immediately. - async fn do_request(&self, url: &str, height: u64) -> Result<(u64, u64)> { - if self.use_json.load(Ordering::Relaxed) { - return self.do_json_request(url, height).await; - } - - // Try protobuf first - let request = GetBlockRequest { height }; - let body = request.encode_to_vec(); - - let response = self - .client - .post(url) - .header("Content-Type", "application/proto") - .body(body) - .send() - .await?; - - // If server requires JSON, switch modes and retry - if response.status() == reqwest::StatusCode::UNSUPPORTED_MEDIA_TYPE { - tracing::info!("ev-node requires JSON mode, switching from protobuf"); - self.use_json.store(true, Ordering::Relaxed); - return self.do_json_request(url, height).await; - } - - if !response.status().is_success() { - bail!( - "HTTP {}: {}", - response.status(), - response.text().await.unwrap_or_default() - ); - } - - let bytes = response.bytes().await?; - let resp = GetBlockResponse::decode(bytes.as_ref())?; - Ok((resp.header_da_height, resp.data_da_height)) - } - - /// Send a Connect RPC request using JSON serialization. - async fn do_json_request(&self, url: &str, height: u64) -> Result<(u64, u64)> { - let request = JsonGetBlockRequest { + async fn do_request(&self, height: u64) -> Result<(u64, u64)> { + let request = GetBlockRequest { height: height.to_string(), }; let response = self .client - .post(url) + .post(&self.url) .header("Content-Type", "application/json") .json(&request) .send() @@ -238,7 +140,7 @@ impl EvnodeClient { ); } - let resp: JsonGetBlockResponse = response.json().await?; + let resp: GetBlockResponse = response.json().await?; Ok((resp.header_da_height, resp.data_da_height)) } } @@ -247,61 +149,15 @@ impl EvnodeClient { mod tests { use super::*; - #[test] - fn encode_decode_get_block_request() { - let req = GetBlockRequest { height: 42 }; - let bytes = req.encode_to_vec(); - let decoded = GetBlockRequest::decode(bytes.as_slice()).unwrap(); - assert_eq!(decoded.height, 42); - } - - #[test] - fn encode_decode_get_block_response() { - let resp = GetBlockResponse { - header_da_height: 100, - data_da_height: 200, - }; - let bytes = resp.encode_to_vec(); - let decoded = GetBlockResponse::decode(bytes.as_slice()).unwrap(); - assert_eq!(decoded.header_da_height, 100); - assert_eq!(decoded.data_da_height, 200); - } - - #[test] - fn decode_response_with_zeros() { - let resp = GetBlockResponse { - header_da_height: 0, - data_da_height: 0, - }; - let bytes = resp.encode_to_vec(); - let decoded = GetBlockResponse::decode(bytes.as_slice()).unwrap(); - assert_eq!(decoded.header_da_height, 0); - assert_eq!(decoded.data_da_height, 0); - } - - #[test] - fn decode_empty_response_defaults_to_zeros() { - // An empty protobuf message should decode with default (zero) values - let decoded = GetBlockResponse::decode(&[] as &[u8]).unwrap(); - assert_eq!(decoded.header_da_height, 0); - assert_eq!(decoded.data_da_height, 0); - } - #[test] fn client_trims_trailing_slash() { let client = EvnodeClient::new("http://localhost:7331/"); - assert_eq!(client.base_url, "http://localhost:7331"); - } - - #[test] - fn client_starts_in_json_mode() { - let client = EvnodeClient::new("http://localhost:7331"); - assert!(client.use_json.load(Ordering::Relaxed)); + assert_eq!(client.url, "http://localhost:7331/evnode.v1.StoreService/GetBlock"); } #[test] - fn json_request_serializes_height_as_string() { - let req = JsonGetBlockRequest { + fn request_serializes_height_as_string() { + let req = GetBlockRequest { height: 42.to_string(), }; let json = serde_json::to_string(&req).unwrap(); @@ -309,25 +165,25 @@ mod tests { } #[test] - fn json_response_deserializes_string_heights() { + fn response_deserializes_string_heights() { let json = r#"{"headerDaHeight":"100","dataDaHeight":"200"}"#; - let resp: JsonGetBlockResponse = serde_json::from_str(json).unwrap(); + let resp: GetBlockResponse = serde_json::from_str(json).unwrap(); assert_eq!(resp.header_da_height, 100); assert_eq!(resp.data_da_height, 200); } #[test] - fn json_response_deserializes_numeric_heights() { + fn response_deserializes_numeric_heights() { let json = r#"{"headerDaHeight":100,"dataDaHeight":200}"#; - let resp: JsonGetBlockResponse = serde_json::from_str(json).unwrap(); + let resp: GetBlockResponse = serde_json::from_str(json).unwrap(); assert_eq!(resp.header_da_height, 100); assert_eq!(resp.data_da_height, 200); } #[test] - fn json_response_defaults_missing_fields_to_zero() { + fn response_defaults_missing_fields_to_zero() { let json = r#"{}"#; - let resp: JsonGetBlockResponse = serde_json::from_str(json).unwrap(); + let resp: GetBlockResponse = serde_json::from_str(json).unwrap(); assert_eq!(resp.header_da_height, 0); assert_eq!(resp.data_da_height, 0); } From 6a67158229c91f82dc2598467ceeb07a1c8bb402 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Sun, 15 Mar 2026 22:02:54 +0100 Subject: [PATCH 06/20] chore: remove bloat serialization field-presence tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove new_block_event_contains_all_block_fields and da_update_event_contains_all_fields — they only assert that #[derive(Serialize)] includes struct fields, adding no real value. --- backend/crates/atlas-api/src/handlers/sse.rs | 43 -------------------- 1 file changed, 43 deletions(-) diff --git a/backend/crates/atlas-api/src/handlers/sse.rs b/backend/crates/atlas-api/src/handlers/sse.rs index fa539f2..bf79639 100644 --- a/backend/crates/atlas-api/src/handlers/sse.rs +++ b/backend/crates/atlas-api/src/handlers/sse.rs @@ -306,32 +306,6 @@ mod tests { assert_eq!(v["block"]["transaction_count"], 1); } - #[test] - fn new_block_event_contains_all_block_fields() { - let event = NewBlockEvent { - block: sample_block(1), - }; - let json = serde_json::to_string(&event).unwrap(); - let v: serde_json::Value = serde_json::from_str(&json).unwrap(); - let block = &v["block"]; - - for field in [ - "number", - "hash", - "parent_hash", - "timestamp", - "gas_used", - "gas_limit", - "transaction_count", - "indexed_at", - ] { - assert!( - block.get(field).is_some(), - "block JSON missing field: {field}" - ); - } - } - #[test] fn da_update_event_serializes_correctly() { let event = DaUpdateEvent { @@ -347,21 +321,4 @@ mod tests { assert_eq!(v["data_da_height"], 8448335); } - #[test] - fn da_update_event_contains_all_fields() { - let event = DaUpdateEvent { - block_number: 1, - header_da_height: 0, - data_da_height: 0, - }; - let json = serde_json::to_string(&event).unwrap(); - let v: serde_json::Value = serde_json::from_str(&json).unwrap(); - - for field in ["block_number", "header_da_height", "data_da_height"] { - assert!( - v.get(field).is_some(), - "da_update JSON missing field: {field}" - ); - } - } } From cf12d18a6e66ff4bea235d690bc69b34f3b7fb2f Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Sun, 15 Mar 2026 22:05:02 +0100 Subject: [PATCH 07/20] chore: remove SSE serialization tests These only verified that #[derive(Serialize)] works on plain structs. The evnode tests are kept since they cover custom deserializer logic. --- backend/crates/atlas-api/src/handlers/sse.rs | 54 +------------------- 1 file changed, 1 insertion(+), 53 deletions(-) diff --git a/backend/crates/atlas-api/src/handlers/sse.rs b/backend/crates/atlas-api/src/handlers/sse.rs index bf79639..5bf837c 100644 --- a/backend/crates/atlas-api/src/handlers/sse.rs +++ b/backend/crates/atlas-api/src/handlers/sse.rs @@ -269,56 +269,4 @@ fn da_batch_to_event(rows: &[BlockDaStatus]) -> Option { serde_json::to_string(&batch) .ok() .map(|json| Event::default().event("da_batch").data(json)) -} - -#[cfg(test)] -mod tests { - use super::*; - use chrono::Utc; - - fn sample_block(number: i64) -> Block { - Block { - number, - hash: format!("0x{:064x}", number), - parent_hash: format!("0x{:064x}", number.saturating_sub(1)), - timestamp: 1_700_000_000 + number, - gas_used: 21_000, - gas_limit: 30_000_000, - transaction_count: 1, - indexed_at: Utc::now(), - } - } - - #[test] - fn new_block_event_serializes_with_block_wrapper() { - let event = NewBlockEvent { - block: sample_block(42), - }; - let json = serde_json::to_string(&event).unwrap(); - let v: serde_json::Value = serde_json::from_str(&json).unwrap(); - - assert!( - v.get("block").is_some(), - "event JSON must contain a 'block' key" - ); - assert_eq!(v["block"]["number"], 42); - assert_eq!(v["block"]["gas_used"], 21_000); - assert_eq!(v["block"]["transaction_count"], 1); - } - - #[test] - fn da_update_event_serializes_correctly() { - let event = DaUpdateEvent { - block_number: 42, - header_da_height: 8448334, - data_da_height: 8448335, - }; - let json = serde_json::to_string(&event).unwrap(); - let v: serde_json::Value = serde_json::from_str(&json).unwrap(); - - assert_eq!(v["block_number"], 42); - assert_eq!(v["header_da_height"], 8448334); - assert_eq!(v["data_da_height"], 8448335); - } - -} +} \ No newline at end of file From 3e4980f1823f60a77aad34b1d939ea03728bf06f Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Sun, 15 Mar 2026 22:12:11 +0100 Subject: [PATCH 08/20] fix: resolve CI lint and format failures - Run cargo fmt on all backend crates - Fix react-hooks/set-state-in-effect errors by adding subscribeDa callback pattern to SSE hook (setState in subscription callbacks is allowed, synchronous setState in effect bodies is not) - Fix react-hooks/refs error by keeping daOverrides/daHighlight as state instead of refs - Fix react-hooks/exhaustive-deps by using drainOneRef indirection - Fix ref cleanup warning by capturing daHighlightTimeoutsRef.current in a local variable before the cleanup function - Derive daOverride in BlockDetailPage via useMemo instead of useState + useEffect --- .../crates/atlas-api/src/handlers/blocks.rs | 2 +- backend/crates/atlas-api/src/handlers/sse.rs | 7 +- backend/crates/atlas-indexer/src/da_worker.rs | 5 +- backend/crates/atlas-indexer/src/evnode.rs | 5 +- backend/crates/atlas-indexer/src/main.rs | 3 +- frontend/src/components/Layout.tsx | 2 +- frontend/src/context/BlockStatsContext.tsx | 6 +- frontend/src/hooks/useBlockSSE.ts | 19 +++-- frontend/src/pages/BlockDetailPage.tsx | 18 ++--- frontend/src/pages/BlocksPage.tsx | 71 +++++++++---------- 10 files changed, 70 insertions(+), 68 deletions(-) diff --git a/backend/crates/atlas-api/src/handlers/blocks.rs b/backend/crates/atlas-api/src/handlers/blocks.rs index 98ec09a..affa071 100644 --- a/backend/crates/atlas-api/src/handlers/blocks.rs +++ b/backend/crates/atlas-api/src/handlers/blocks.rs @@ -53,7 +53,7 @@ pub async fn list_blocks( let da_rows: Vec = sqlx::query_as( "SELECT block_number, header_da_height, data_da_height, updated_at FROM block_da_status - WHERE block_number = ANY($1)" + WHERE block_number = ANY($1)", ) .bind(&block_numbers) .fetch_all(&state.pool) diff --git a/backend/crates/atlas-api/src/handlers/sse.rs b/backend/crates/atlas-api/src/handlers/sse.rs index 5bf837c..0fcd2ce 100644 --- a/backend/crates/atlas-api/src/handlers/sse.rs +++ b/backend/crates/atlas-api/src/handlers/sse.rs @@ -170,10 +170,7 @@ pub async fn run_block_event_fanout( } } -pub async fn run_da_event_fanout( - database_url: String, - tx: broadcast::Sender>, -) { +pub async fn run_da_event_fanout(database_url: String, tx: broadcast::Sender>) { loop { let mut listener = match PgListener::connect(&database_url).await { Ok(listener) => listener, @@ -269,4 +266,4 @@ fn da_batch_to_event(rows: &[BlockDaStatus]) -> Option { serde_json::to_string(&batch) .ok() .map(|json| Event::default().event("da_batch").data(json)) -} \ No newline at end of file +} diff --git a/backend/crates/atlas-indexer/src/da_worker.rs b/backend/crates/atlas-indexer/src/da_worker.rs index a897358..60afe94 100644 --- a/backend/crates/atlas-indexer/src/da_worker.rs +++ b/backend/crates/atlas-indexer/src/da_worker.rs @@ -59,10 +59,7 @@ impl DaWorker { } pub async fn run(&self) -> Result<()> { - tracing::info!( - "DA worker started (concurrency: {})", - self.concurrency - ); + tracing::info!("DA worker started (concurrency: {})", self.concurrency); loop { // Phase 1: backfill gets first pick of the budget diff --git a/backend/crates/atlas-indexer/src/evnode.rs b/backend/crates/atlas-indexer/src/evnode.rs index 0e69a86..2a37509 100644 --- a/backend/crates/atlas-indexer/src/evnode.rs +++ b/backend/crates/atlas-indexer/src/evnode.rs @@ -152,7 +152,10 @@ mod tests { #[test] fn client_trims_trailing_slash() { let client = EvnodeClient::new("http://localhost:7331/"); - assert_eq!(client.url, "http://localhost:7331/evnode.v1.StoreService/GetBlock"); + assert_eq!( + client.url, + "http://localhost:7331/evnode.v1.StoreService/GetBlock" + ); } #[test] diff --git a/backend/crates/atlas-indexer/src/main.rs b/backend/crates/atlas-indexer/src/main.rs index 016b728..6bf4a8e 100644 --- a/backend/crates/atlas-indexer/src/main.rs +++ b/backend/crates/atlas-indexer/src/main.rs @@ -63,8 +63,7 @@ async fn main() -> Result<()> { let da_concurrency = config.da_worker_concurrency; tokio::spawn(async move { run_with_retry(|| async { - let worker = - da_worker::DaWorker::new(da_pool.clone(), &da_url, da_concurrency)?; + let worker = da_worker::DaWorker::new(da_pool.clone(), &da_url, da_concurrency)?; worker.run().await }) .await diff --git a/frontend/src/components/Layout.tsx b/frontend/src/components/Layout.tsx index ac1af50..1763f89 100644 --- a/frontend/src/components/Layout.tsx +++ b/frontend/src/components/Layout.tsx @@ -269,7 +269,7 @@ export default function Layout() { {/* Main content */}
- +
diff --git a/frontend/src/context/BlockStatsContext.tsx b/frontend/src/context/BlockStatsContext.tsx index 5939e4e..382c831 100644 --- a/frontend/src/context/BlockStatsContext.tsx +++ b/frontend/src/context/BlockStatsContext.tsx @@ -1,5 +1,5 @@ import { createContext } from 'react'; -import type { NewBlockEvent, DaUpdateEvent } from '../hooks/useBlockSSE'; +import type { NewBlockEvent, DaUpdateEvent, DaSubscriber } from '../hooks/useBlockSSE'; export interface BlockStats { bps: number | null; @@ -7,12 +7,16 @@ export interface BlockStats { latestBlockEvent: NewBlockEvent | null; latestDaUpdate: DaUpdateEvent | null; sseConnected: boolean; + subscribeDa: (cb: DaSubscriber) => () => void; } +const noopSubscribe = () => () => {}; + export const BlockStatsContext = createContext({ bps: null, height: null, latestBlockEvent: null, latestDaUpdate: null, sseConnected: false, + subscribeDa: noopSubscribe, }); diff --git a/frontend/src/hooks/useBlockSSE.ts b/frontend/src/hooks/useBlockSSE.ts index 74625cf..e9b3262 100644 --- a/frontend/src/hooks/useBlockSSE.ts +++ b/frontend/src/hooks/useBlockSSE.ts @@ -16,6 +16,8 @@ export interface DaBatchEvent { updates: DaUpdateEvent[]; } +export type DaSubscriber = (updates: DaUpdateEvent[]) => void; + export interface BlockSSEState { latestBlock: NewBlockEvent | null; latestDaUpdate: DaUpdateEvent | null; @@ -23,6 +25,7 @@ export interface BlockSSEState { connected: boolean; error: string | null; bps: number | null; + subscribeDa: (cb: DaSubscriber) => () => void; } type BlockLog = { num: number; ts: number }[]; @@ -84,6 +87,11 @@ export default function useBlockSSE(): BlockSSEState { const [connected, setConnected] = useState(false); const [error, setError] = useState(null); const [bps, setBps] = useState(null); + const daSubscribersRef = useRef>(new Set()); + const subscribeDa = useCallback((cb: DaSubscriber) => { + daSubscribersRef.current.add(cb); + return () => { daSubscribersRef.current.delete(cb); }; + }, []); const esRef = useRef(null); const reconnectTimeoutRef = useRef(null); const queueRef = useRef([]); @@ -96,6 +104,7 @@ export default function useBlockSSE(): BlockSSEState { const blockLogRef = useRef([]); // Cached drain interval in ms, derived from chain block timestamps const drainIntervalRef = useRef(90); // initial guess ~11 bps + const drainOneRef = useRef<() => void>(() => {}); const scheduleDrain = useCallback((fromBurstStart: boolean) => { if (drainTimerRef.current !== null || queueRef.current.length === 0) return; @@ -127,7 +136,7 @@ export default function useBlockSSE(): BlockSSEState { delay = Math.max(delay, burstLeadIn); } - drainTimerRef.current = window.setTimeout(drainOne, delay); + drainTimerRef.current = window.setTimeout(() => drainOneRef.current(), delay); }, []); // Kick the drain loop when new items arrive. @@ -187,10 +196,8 @@ export default function useBlockSSE(): BlockSSEState { try { const data: DaBatchEvent = JSON.parse(e.data); if (data.updates?.length) { - // Apply the last update — the batch is applied all at once in the - // consuming components via the daOverrides map, so we just need to - // trigger a state change. We store the full batch for consumers. setLatestDaUpdate(data.updates[data.updates.length - 1]); + for (const cb of daSubscribersRef.current) cb(data.updates); } } catch { // Ignore malformed events @@ -214,6 +221,8 @@ export default function useBlockSSE(): BlockSSEState { }, [kickDrain]); // Drain one block from the queue at the chain's natural cadence. + // Assigned to drainOneRef so scheduleDrain can call it without a circular dependency. + drainOneRef.current = drainOne; function drainOne() { const queue = queueRef.current; drainTimerRef.current = null; @@ -258,5 +267,5 @@ export default function useBlockSSE(): BlockSSEState { }; }, [connect]); - return { latestBlock, latestDaUpdate, height, connected, error, bps }; + return { latestBlock, latestDaUpdate, height, connected, error, bps, subscribeDa }; } diff --git a/frontend/src/pages/BlockDetailPage.tsx b/frontend/src/pages/BlockDetailPage.tsx index 3e967d6..8addc74 100644 --- a/frontend/src/pages/BlockDetailPage.tsx +++ b/frontend/src/pages/BlockDetailPage.tsx @@ -2,10 +2,9 @@ import { useParams, Link } from 'react-router-dom'; import { useBlock, useBlockTransactions, useFeatures } from '../hooks'; import { CopyButton, Loading, AddressLink, TxHashLink, StatusBadge } from '../components'; import { formatNumber, formatTimestamp, formatGas, truncateHash, formatTimeAgo, formatEther } from '../utils'; -import { useContext, useEffect, useState } from 'react'; +import { useContext, useMemo, useState } from 'react'; import type { ReactNode } from 'react'; import { BlockStatsContext } from '../context/BlockStatsContext'; -import type { BlockDaStatus } from '../types'; /** Format a DA height as a status indicator. */ function formatDaStatus(daHeight: number): ReactNode { @@ -33,25 +32,20 @@ export default function BlockDetailPage() { const [txPage, setTxPage] = useState(1); const { transactions, pagination, loading } = useBlockTransactions(blockNumber, { page: txPage, limit: 20 }); const { latestDaUpdate } = useContext(BlockStatsContext); - const [daOverride, setDaOverride] = useState(null); - // Apply live DA status updates from SSE - useEffect(() => { + // Derive DA override directly from latest SSE update (no state needed) + const daOverride = useMemo(() => { if (latestDaUpdate && latestDaUpdate.block_number === blockNumber) { - setDaOverride({ + return { block_number: latestDaUpdate.block_number, header_da_height: latestDaUpdate.header_da_height, data_da_height: latestDaUpdate.data_da_height, updated_at: new Date().toISOString(), - }); + }; } + return null; }, [latestDaUpdate, blockNumber]); - // Reset override when navigating to a different block - useEffect(() => { - setDaOverride(null); - }, [blockNumber]); - type DetailRow = { label: string; value: ReactNode; stacked?: boolean }; const details: DetailRow[] = block ? [ { label: 'Block Height', value: formatNumber(block.number) }, diff --git a/frontend/src/pages/BlocksPage.tsx b/frontend/src/pages/BlocksPage.tsx index 7873f96..642b215 100644 --- a/frontend/src/pages/BlocksPage.tsx +++ b/frontend/src/pages/BlocksPage.tsx @@ -19,7 +19,7 @@ export default function BlocksPage() { const { blocks: fetchedBlocks, pagination, refetch, loading } = useBlocks({ page, limit: 100 }); const features = useFeatures(); const hasLoaded = !loading || pagination !== null; - const { latestBlockEvent, latestDaUpdate, sseConnected } = useContext(BlockStatsContext); + const { latestBlockEvent, sseConnected, subscribeDa } = useContext(BlockStatsContext); const [daOverrides, setDaOverrides] = useState>(new Map()); const [daHighlight, setDaHighlight] = useState>(new Set()); const daHighlightTimeoutsRef = useRef>(new Map()); @@ -83,41 +83,40 @@ export default function BlocksPage() { return [...unique, ...fetchedBlocks].slice(0, 100); }, [fetchedBlocks, fetchedNumberSet, sseBlocks, page]); - // Apply live DA status updates from SSE (drip-fed one at a time) + // Subscribe to DA updates from SSE. setState is called inside the subscription + // callback (not synchronously in the effect body), satisfying react-hooks/set-state-in-effect. useEffect(() => { - if (!latestDaUpdate) return; - const bn = latestDaUpdate.block_number; - setDaOverrides(prev => { - const next = new Map(prev); - next.set(bn, { - block_number: bn, - header_da_height: latestDaUpdate.header_da_height, - data_da_height: latestDaUpdate.data_da_height, - updated_at: new Date().toISOString(), - }); - return next; - }); - // Flash the dot for 1.5s - setDaHighlight(prev => new Set(prev).add(bn)); - const prev = daHighlightTimeoutsRef.current.get(bn); - if (prev !== undefined) clearTimeout(prev); - const t = window.setTimeout(() => { - setDaHighlight(p => { - const next = new Set(p); - next.delete(bn); + return subscribeDa((updates) => { + setDaOverrides(prev => { + const next = new Map(prev); + for (const u of updates) { + next.set(u.block_number, { + block_number: u.block_number, + header_da_height: u.header_da_height, + data_da_height: u.data_da_height, + updated_at: new Date().toISOString(), + }); + } return next; }); - daHighlightTimeoutsRef.current.delete(bn); - }, 1500); - daHighlightTimeoutsRef.current.set(bn, t); - }, [latestDaUpdate]); - - // Clear DA overrides when fetched data refreshes (it now includes the updates) - useEffect(() => { - if (fetchedBlocks.length) { - setDaOverrides(new Map()); - } - }, [fetchedBlocks]); + // Flash dots for 1.5s + for (const u of updates) { + const bn = u.block_number; + setDaHighlight(prev => new Set(prev).add(bn)); + const existing = daHighlightTimeoutsRef.current.get(bn); + if (existing !== undefined) clearTimeout(existing); + const t = window.setTimeout(() => { + setDaHighlight(p => { + const next = new Set(p); + next.delete(bn); + return next; + }); + daHighlightTimeoutsRef.current.delete(bn); + }, 1500); + daHighlightTimeoutsRef.current.set(bn, t); + } + }); + }, [subscribeDa]); const navigate = useNavigate(); const [sort, setSort] = useState<{ key: 'number' | 'hash' | 'timestamp' | 'transaction_count' | 'gas_used' | null; direction: 'asc' | 'desc'; }>({ key: null, direction: 'desc' }); @@ -126,7 +125,6 @@ export default function BlocksPage() { const [highlightBlocks, setHighlightBlocks] = useState>(new Set()); const timeoutsRef = useRef>(new Map()); const highlightRafRef = useRef(null); - const [, setTick] = useState(0); const handleSort = (key: 'number' | 'hash' | 'timestamp' | 'transaction_count' | 'gas_used') => { setSort((prev) => { @@ -244,6 +242,7 @@ export default function BlocksPage() { // Cleanup on unmount useEffect(() => { const activeTimeouts = timeoutsRef.current; + const activeDaTimeouts = daHighlightTimeoutsRef.current; return () => { if (highlightRafRef.current !== null) { window.cancelAnimationFrame(highlightRafRef.current); @@ -260,8 +259,8 @@ export default function BlocksPage() { } for (const [, t] of activeTimeouts) clearTimeout(t); activeTimeouts.clear(); - for (const [, t] of daHighlightTimeoutsRef.current) clearTimeout(t); - daHighlightTimeoutsRef.current.clear(); + for (const [, t] of activeDaTimeouts) clearTimeout(t); + activeDaTimeouts.clear(); }; }, []); From 905e1ec040b9883b04ef927ce5e8593d08389fca Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Sun, 15 Mar 2026 22:18:40 +0100 Subject: [PATCH 09/20] fix: restore setTick useState declaration removed by linter --- frontend/src/pages/BlocksPage.tsx | 1 + 1 file changed, 1 insertion(+) diff --git a/frontend/src/pages/BlocksPage.tsx b/frontend/src/pages/BlocksPage.tsx index 642b215..63b15dc 100644 --- a/frontend/src/pages/BlocksPage.tsx +++ b/frontend/src/pages/BlocksPage.tsx @@ -23,6 +23,7 @@ export default function BlocksPage() { const [daOverrides, setDaOverrides] = useState>(new Map()); const [daHighlight, setDaHighlight] = useState>(new Set()); const daHighlightTimeoutsRef = useRef>(new Map()); + const [, setTick] = useState(0); const [sseBlocks, setSseBlocks] = useState([]); const lastSseBlockRef = useRef(null); const ssePrependRafRef = useRef(null); From 3ae6ae790fead1226614af75efeae0966e738458 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Sun, 15 Mar 2026 22:25:32 +0100 Subject: [PATCH 10/20] Fix DA batch handling on block detail page --- frontend/src/pages/BlockDetailPage.tsx | 33 ++++++++++++++++---------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/frontend/src/pages/BlockDetailPage.tsx b/frontend/src/pages/BlockDetailPage.tsx index 8addc74..bef758d 100644 --- a/frontend/src/pages/BlockDetailPage.tsx +++ b/frontend/src/pages/BlockDetailPage.tsx @@ -2,9 +2,10 @@ import { useParams, Link } from 'react-router-dom'; import { useBlock, useBlockTransactions, useFeatures } from '../hooks'; import { CopyButton, Loading, AddressLink, TxHashLink, StatusBadge } from '../components'; import { formatNumber, formatTimestamp, formatGas, truncateHash, formatTimeAgo, formatEther } from '../utils'; -import { useContext, useMemo, useState } from 'react'; +import { useContext, useEffect, useState } from 'react'; import type { ReactNode } from 'react'; import { BlockStatsContext } from '../context/BlockStatsContext'; +import type { BlockDaStatus } from '../types'; /** Format a DA height as a status indicator. */ function formatDaStatus(daHeight: number): ReactNode { @@ -31,20 +32,26 @@ export default function BlockDetailPage() { const features = useFeatures(); const [txPage, setTxPage] = useState(1); const { transactions, pagination, loading } = useBlockTransactions(blockNumber, { page: txPage, limit: 20 }); - const { latestDaUpdate } = useContext(BlockStatsContext); + const { subscribeDa } = useContext(BlockStatsContext); + const [daOverride, setDaOverride] = useState(null); - // Derive DA override directly from latest SSE update (no state needed) - const daOverride = useMemo(() => { - if (latestDaUpdate && latestDaUpdate.block_number === blockNumber) { - return { - block_number: latestDaUpdate.block_number, - header_da_height: latestDaUpdate.header_da_height, - data_da_height: latestDaUpdate.data_da_height, + // Persist DA updates for this block until navigation or a full refetch catches up. + useEffect(() => { + return subscribeDa((updates) => { + const match = updates.find((update) => update.block_number === blockNumber); + if (!match) return; + setDaOverride({ + block_number: match.block_number, + header_da_height: match.header_da_height, + data_da_height: match.data_da_height, updated_at: new Date().toISOString(), - }; - } - return null; - }, [latestDaUpdate, blockNumber]); + }); + }); + }, [subscribeDa, blockNumber]); + + useEffect(() => { + setDaOverride(null); + }, [blockNumber]); type DetailRow = { label: string; value: ReactNode; stacked?: boolean }; const details: DetailRow[] = block ? [ From e68246f1e4fab1f2794057d4662268fefbf29218 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Mon, 16 Mar 2026 10:32:25 +0100 Subject: [PATCH 11/20] Fix frontend lint failure on block detail page --- frontend/src/pages/BlockDetailPage.tsx | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/frontend/src/pages/BlockDetailPage.tsx b/frontend/src/pages/BlockDetailPage.tsx index bef758d..9d5f3e7 100644 --- a/frontend/src/pages/BlockDetailPage.tsx +++ b/frontend/src/pages/BlockDetailPage.tsx @@ -49,9 +49,7 @@ export default function BlockDetailPage() { }); }, [subscribeDa, blockNumber]); - useEffect(() => { - setDaOverride(null); - }, [blockNumber]); + const currentDaOverride = daOverride?.block_number === blockNumber ? daOverride : null; type DetailRow = { label: string; value: ReactNode; stacked?: boolean }; const details: DetailRow[] = block ? [ @@ -87,7 +85,7 @@ export default function BlockDetailPage() { { label: 'Gas Limit', value: formatGas(block.gas_limit.toString()) }, // DA status rows — only shown when da_tracking feature is enabled ...(features.da_tracking ? (() => { - const daStatus = daOverride ?? block.da_status; + const daStatus = currentDaOverride ?? block.da_status; return [ { label: 'Header DA', From e409d9bf7df0753263a3755d8e906546d1111293 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Wed, 18 Mar 2026 14:51:35 +0100 Subject: [PATCH 12/20] refactor: send DA SSE payloads directly from DA worker --- .../atlas-server/src/api/handlers/sse.rs | 54 ++++++++----------- .../atlas-server/src/api/handlers/status.rs | 4 +- backend/crates/atlas-server/src/api/mod.rs | 3 +- .../atlas-server/src/indexer/da_worker.rs | 41 +++++++++----- .../crates/atlas-server/src/indexer/mod.rs | 2 +- backend/crates/atlas-server/src/main.rs | 2 +- 6 files changed, 56 insertions(+), 50 deletions(-) diff --git a/backend/crates/atlas-server/src/api/handlers/sse.rs b/backend/crates/atlas-server/src/api/handlers/sse.rs index 18dcf7f..648e650 100644 --- a/backend/crates/atlas-server/src/api/handlers/sse.rs +++ b/backend/crates/atlas-server/src/api/handlers/sse.rs @@ -13,7 +13,8 @@ use tokio::sync::broadcast; use crate::api::handlers::get_latest_block; use crate::api::AppState; use crate::head::HeadTracker; -use atlas_common::{Block, BlockDaStatus}; +use crate::indexer::DaSseUpdate; +use atlas_common::Block; use sqlx::PgPool; use tracing::warn; @@ -41,7 +42,7 @@ fn make_event_stream( pool: PgPool, head_tracker: Arc, mut block_rx: broadcast::Receiver<()>, - mut da_rx: broadcast::Receiver>, + mut da_rx: broadcast::Receiver>, ) -> impl Stream> + Send { async_stream::stream! { let mut last_block_number: Option = None; @@ -109,14 +110,9 @@ fn make_event_stream( } result = da_rx.recv() => { match result { - Ok(block_numbers) => { - match fetch_da_status(&pool, &block_numbers).await { - Ok(rows) => { - if let Some(event) = da_batch_to_event(&rows) { - yield Ok(event); - } - } - Err(e) => warn!(error = ?e, "sse: failed to fetch DA status for update"), + Ok(updates) => { + if let Some(event) = da_batch_to_event(&updates) { + yield Ok(event); } } Err(broadcast::error::RecvError::Lagged(_)) => { @@ -158,20 +154,6 @@ where ) } -async fn fetch_da_status( - pool: &PgPool, - block_numbers: &[i64], -) -> Result, sqlx::Error> { - sqlx::query_as( - "SELECT block_number, header_da_height, data_da_height, updated_at - FROM block_da_status - WHERE block_number = ANY($1)", - ) - .bind(block_numbers) - .fetch_all(pool) - .await -} - fn block_to_event(block: Block) -> Option { let event = NewBlockEvent { block }; serde_json::to_string(&event) @@ -179,12 +161,12 @@ fn block_to_event(block: Block) -> Option { .map(|json| Event::default().event("new_block").data(json)) } -fn da_batch_to_event(rows: &[BlockDaStatus]) -> Option { - if rows.is_empty() { +fn da_batch_to_event(updates: &[DaSseUpdate]) -> Option { + if updates.is_empty() { return None; } let batch = DaBatchEvent { - updates: rows + updates: updates .iter() .map(|da| DaUpdateEvent { block_number: da.block_number, @@ -271,10 +253,12 @@ mod tests { #[tokio::test] async fn stream_seeds_from_head_tracker() { let tracker = Arc::new(HeadTracker::empty(10)); - tracker.publish_committed_batch(vec![sample_block(42)]).await; + tracker + .publish_committed_batch(vec![sample_block(42)]) + .await; let (tx, _) = broadcast::channel::<()>(16); - let (da_tx, _) = broadcast::channel::>(16); + let (da_tx, _) = broadcast::channel::>(16); let stream = make_event_stream(dummy_pool(), tracker, tx.subscribe(), da_tx.subscribe()); tokio::pin!(stream); @@ -296,10 +280,12 @@ mod tests { #[tokio::test] async fn stream_replays_new_blocks_after_broadcast() { let tracker = Arc::new(HeadTracker::empty(10)); - tracker.publish_committed_batch(vec![sample_block(42)]).await; + tracker + .publish_committed_batch(vec![sample_block(42)]) + .await; let (tx, _) = broadcast::channel::<()>(16); - let (da_tx, _) = broadcast::channel::>(16); + let (da_tx, _) = broadcast::channel::>(16); let stream = make_event_stream( dummy_pool(), tracker.clone(), @@ -314,7 +300,9 @@ mod tests { .unwrap(); // Publish a new block and broadcast - tracker.publish_committed_batch(vec![sample_block(43)]).await; + tracker + .publish_committed_batch(vec![sample_block(43)]) + .await; tx.send(()).unwrap(); let second = tokio::time::timeout(Duration::from_secs(1), stream.next()).await; @@ -337,7 +325,7 @@ mod tests { .await; let (tx, _) = broadcast::channel::<()>(16); - let (da_tx, _) = broadcast::channel::>(16); + let (da_tx, _) = broadcast::channel::>(16); let stream = make_event_stream( dummy_pool(), tracker.clone(), diff --git a/backend/crates/atlas-server/src/api/handlers/status.rs b/backend/crates/atlas-server/src/api/handlers/status.rs index a05b649..3bc4b62 100644 --- a/backend/crates/atlas-server/src/api/handlers/status.rs +++ b/backend/crates/atlas-server/src/api/handlers/status.rs @@ -94,7 +94,9 @@ mod tests { #[tokio::test] async fn status_returns_head_tracker_block() { let tracker = Arc::new(HeadTracker::empty(10)); - tracker.publish_committed_batch(vec![sample_block(42)]).await; + tracker + .publish_committed_batch(vec![sample_block(42)]) + .await; let result = get_status(test_state(tracker)).await; let Json(status) = result.unwrap_or_else(|_| panic!("get_status should not fail")); diff --git a/backend/crates/atlas-server/src/api/mod.rs b/backend/crates/atlas-server/src/api/mod.rs index 281145f..7a401d0 100644 --- a/backend/crates/atlas-server/src/api/mod.rs +++ b/backend/crates/atlas-server/src/api/mod.rs @@ -11,11 +11,12 @@ use tower_http::timeout::TimeoutLayer; use tower_http::trace::TraceLayer; use crate::head::HeadTracker; +use crate::indexer::DaSseUpdate; pub struct AppState { pub pool: PgPool, pub block_events_tx: broadcast::Sender<()>, - pub da_events_tx: broadcast::Sender>, + pub da_events_tx: broadcast::Sender>, pub head_tracker: Arc, pub rpc_url: String, pub evnode_url: Option, diff --git a/backend/crates/atlas-server/src/indexer/da_worker.rs b/backend/crates/atlas-server/src/indexer/da_worker.rs index e40b062..dcec72b 100644 --- a/backend/crates/atlas-server/src/indexer/da_worker.rs +++ b/backend/crates/atlas-server/src/indexer/da_worker.rs @@ -41,11 +41,18 @@ const BATCH_SIZE: i64 = 100; /// Sleep when idle (no work in either phase). const IDLE_SLEEP: Duration = Duration::from_millis(500); +#[derive(Clone, Debug)] +pub struct DaSseUpdate { + pub block_number: i64, + pub header_da_height: i64, + pub data_da_height: i64, +} + pub struct DaWorker { pool: PgPool, client: EvnodeClient, concurrency: usize, - da_events_tx: broadcast::Sender>, + da_events_tx: broadcast::Sender>, } impl DaWorker { @@ -53,7 +60,7 @@ impl DaWorker { pool: PgPool, evnode_url: &str, concurrency: u32, - da_events_tx: broadcast::Sender>, + da_events_tx: broadcast::Sender>, ) -> Result { Ok(Self { pool, @@ -92,11 +99,11 @@ impl DaWorker { } /// Notify SSE subscribers of DA status changes via in-process broadcast channel. - fn notify_da_updates(&self, block_numbers: &[i64]) { - if block_numbers.is_empty() { + fn notify_da_updates(&self, updates: &[DaSseUpdate]) { + if updates.is_empty() { return; } - let _ = self.da_events_tx.send(block_numbers.to_vec()); + let _ = self.da_events_tx.send(updates.to_vec()); } /// Phase 1: Find blocks missing from block_da_status and query ev-node. @@ -121,7 +128,7 @@ impl DaWorker { let pool = &self.pool; let client = &self.client; - let results: Vec> = stream::iter(missing) + let results: Vec> = stream::iter(missing) .map(|(block_number,)| async move { match client.get_da_status(block_number as u64).await { Ok((header_da, data_da)) => { @@ -146,7 +153,11 @@ impl DaWorker { ); return None; } - Some(block_number) + Some(DaSseUpdate { + block_number, + header_da_height: header_da as i64, + data_da_height: data_da as i64, + }) } Err(e) => { tracing::warn!( @@ -162,8 +173,8 @@ impl DaWorker { .collect() .await; - let updated_blocks: Vec = results.into_iter().flatten().collect(); - self.notify_da_updates(&updated_blocks); + let updates: Vec = results.into_iter().flatten().collect(); + self.notify_da_updates(&updates); Ok(count) } @@ -189,7 +200,7 @@ impl DaWorker { let pool = &self.pool; let client = &self.client; - let results: Vec> = stream::iter(pending) + let results: Vec> = stream::iter(pending) .map(|(block_number,)| async move { match client.get_da_status(block_number as u64).await { Ok((header_da, data_da)) => { @@ -211,7 +222,11 @@ impl DaWorker { ); return None; } - Some(block_number) + Some(DaSseUpdate { + block_number, + header_da_height: header_da as i64, + data_da_height: data_da as i64, + }) } Err(e) => { tracing::warn!( @@ -227,8 +242,8 @@ impl DaWorker { .collect() .await; - let updated_blocks: Vec = results.into_iter().flatten().collect(); - self.notify_da_updates(&updated_blocks); + let updates: Vec = results.into_iter().flatten().collect(); + self.notify_da_updates(&updates); Ok(count) } diff --git a/backend/crates/atlas-server/src/indexer/mod.rs b/backend/crates/atlas-server/src/indexer/mod.rs index 26bc1d0..05bf8a7 100644 --- a/backend/crates/atlas-server/src/indexer/mod.rs +++ b/backend/crates/atlas-server/src/indexer/mod.rs @@ -7,6 +7,6 @@ pub(crate) mod fetcher; pub mod indexer; pub mod metadata; -pub use da_worker::DaWorker; +pub use da_worker::{DaSseUpdate, DaWorker}; pub use indexer::Indexer; pub use metadata::MetadataFetcher; diff --git a/backend/crates/atlas-server/src/main.rs b/backend/crates/atlas-server/src/main.rs index 0fa4f03..dba0af5 100644 --- a/backend/crates/atlas-server/src/main.rs +++ b/backend/crates/atlas-server/src/main.rs @@ -43,7 +43,7 @@ async fn main() -> Result<()> { // Shared broadcast channels for SSE notifications let (block_events_tx, _) = broadcast::channel(1024); - let (da_events_tx, _) = broadcast::channel::>(256); + let (da_events_tx, _) = broadcast::channel::>(256); let head_tracker = Arc::new(if config.reindex { head::HeadTracker::empty(config.sse_replay_buffer_blocks) } else { From 21a4d0ba0130244d40618a37d6dd0ff6c4491fb4 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Wed, 18 Mar 2026 15:20:07 +0100 Subject: [PATCH 13/20] fix: use axum 0.8-compatible SSE return type --- .../crates/atlas-server/src/api/handlers/sse.rs | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/backend/crates/atlas-server/src/api/handlers/sse.rs b/backend/crates/atlas-server/src/api/handlers/sse.rs index 648e650..f3bc257 100644 --- a/backend/crates/atlas-server/src/api/handlers/sse.rs +++ b/backend/crates/atlas-server/src/api/handlers/sse.rs @@ -5,7 +5,6 @@ use axum::{ use futures::stream::Stream; use serde::Serialize; use std::convert::Infallible; -use std::pin::Pin; use std::sync::Arc; use std::time::Duration; use tokio::sync::broadcast; @@ -18,8 +17,6 @@ use atlas_common::Block; use sqlx::PgPool; use tracing::warn; -type SseStream = Pin> + Send>>; - #[derive(Serialize, Debug)] struct NewBlockEvent { block: Block, @@ -131,22 +128,13 @@ fn make_event_stream( /// in-memory committed head state, plus DA status update batches. pub async fn block_events( State(state): State>, -) -> Sse> { +) -> Sse>> { let stream = make_event_stream( state.pool.clone(), state.head_tracker.clone(), state.block_events_tx.subscribe(), state.da_events_tx.subscribe(), ); - sse_response(stream) -} - -fn sse_response(stream: S) -> Sse> -where - S: Stream> + Send + 'static, -{ - let stream: SseStream = Box::pin(stream); - Sse::new(stream).keep_alive( axum::response::sse::KeepAlive::new() .interval(Duration::from_secs(15)) From 2894a347242653b87d2dfba2b229385b5b88cfd9 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Wed, 18 Mar 2026 15:42:28 +0100 Subject: [PATCH 14/20] fix: reduce blocks page size and DA glow transitions --- frontend/src/pages/BlocksPage.tsx | 43 +++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/frontend/src/pages/BlocksPage.tsx b/frontend/src/pages/BlocksPage.tsx index 63b15dc..f27668e 100644 --- a/frontend/src/pages/BlocksPage.tsx +++ b/frontend/src/pages/BlocksPage.tsx @@ -6,6 +6,12 @@ import { formatNumber, formatTimeAgo, formatGas, truncateHash } from '../utils'; import { BlockStatsContext } from '../context/BlockStatsContext'; import type { BlockDaStatus } from '../types'; +const BLOCKS_PAGE_SIZE = 20; + +function isDaIncluded(status: Pick | null | undefined): boolean { + return !!status && status.header_da_height > 0 && status.data_da_height > 0; +} + export default function BlocksPage() { const [page, setPage] = useState(1); const [autoRefresh, setAutoRefresh] = useState(() => { @@ -16,13 +22,14 @@ export default function BlocksPage() { return true; } }); - const { blocks: fetchedBlocks, pagination, refetch, loading } = useBlocks({ page, limit: 100 }); + const { blocks: fetchedBlocks, pagination, refetch, loading } = useBlocks({ page, limit: BLOCKS_PAGE_SIZE }); const features = useFeatures(); const hasLoaded = !loading || pagination !== null; const { latestBlockEvent, sseConnected, subscribeDa } = useContext(BlockStatsContext); const [daOverrides, setDaOverrides] = useState>(new Map()); const [daHighlight, setDaHighlight] = useState>(new Set()); const daHighlightTimeoutsRef = useRef>(new Map()); + const baseDaIncludedRef = useRef>(new Map()); const [, setTick] = useState(0); const [sseBlocks, setSseBlocks] = useState([]); const lastSseBlockRef = useRef(null); @@ -58,7 +65,7 @@ export default function BlocksPage() { prepend.push(b); } prepend.reverse(); - return [...prepend, ...prev].slice(0, 100); + return [...prepend, ...prev].slice(0, BLOCKS_PAGE_SIZE); }); ssePrependRafRef.current = null; }); @@ -81,28 +88,48 @@ export default function BlocksPage() { const blocks = useMemo(() => { if (page !== 1 || !sseBlocks.length) return fetchedBlocks; const unique = sseBlocks.filter((b) => !fetchedNumberSet.has(b.number)); - return [...unique, ...fetchedBlocks].slice(0, 100); + return [...unique, ...fetchedBlocks].slice(0, BLOCKS_PAGE_SIZE); }, [fetchedBlocks, fetchedNumberSet, sseBlocks, page]); + useEffect(() => { + const next = new Map(); + for (const block of blocks) { + next.set(block.number, isDaIncluded(block.da_status)); + } + baseDaIncludedRef.current = next; + }, [blocks]); + // Subscribe to DA updates from SSE. setState is called inside the subscription // callback (not synchronously in the effect body), satisfying react-hooks/set-state-in-effect. useEffect(() => { return subscribeDa((updates) => { + const transitionedToIncluded = new Set(); setDaOverrides(prev => { const next = new Map(prev); for (const u of updates) { - next.set(u.block_number, { + const prevStatus = prev.get(u.block_number); + const wasIncluded = prevStatus + ? isDaIncluded(prevStatus) + : (baseDaIncludedRef.current.get(u.block_number) ?? false); + const nextStatus = { block_number: u.block_number, header_da_height: u.header_da_height, data_da_height: u.data_da_height, updated_at: new Date().toISOString(), + }; + const nowIncluded = isDaIncluded(nextStatus); + if (!wasIncluded && nowIncluded) { + transitionedToIncluded.add(u.block_number); + } + next.set(u.block_number, { + ...nextStatus, }); } return next; }); - // Flash dots for 1.5s - for (const u of updates) { - const bn = u.block_number; + + // Flash dots for 1.5s only when status transitions from pending -> included. + for (const bn of transitionedToIncluded) { setDaHighlight(prev => new Set(prev).add(bn)); const existing = daHighlightTimeoutsRef.current.get(bn); if (existing !== undefined) clearTimeout(existing); @@ -410,7 +437,7 @@ export default function BlocksPage() { {features.da_tracking && (() => { const daStatus = daOverrides.get(block.number) ?? block.da_status; const flash = daHighlight.has(block.number); - const included = daStatus && daStatus.header_da_height > 0 && daStatus.data_da_height > 0; + const included = isDaIncluded(daStatus); return ( {included ? ( From be051a69bf14769d555cede776f5c9e0526648d9 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Wed, 18 Mar 2026 15:56:31 +0100 Subject: [PATCH 15/20] Add explicit DA tracking controls --- .env.example | 12 +- backend/crates/atlas-common/src/types.rs | 2 +- .../atlas-server/src/api/handlers/status.rs | 4 +- backend/crates/atlas-server/src/api/mod.rs | 2 +- backend/crates/atlas-server/src/config.rs | 134 +++++++++++++++++- .../atlas-server/src/indexer/da_worker.rs | 26 +++- backend/crates/atlas-server/src/main.rs | 15 +- .../20240108000001_block_da_status.sql | 2 +- docker-compose.yml | 2 + 9 files changed, 182 insertions(+), 17 deletions(-) diff --git a/.env.example b/.env.example index d1bf8f4..77954b9 100644 --- a/.env.example +++ b/.env.example @@ -27,9 +27,15 @@ RPC_BATCH_SIZE=20 # API_DB_MAX_CONNECTIONS=20 # SSE_REPLAY_BUFFER_BLOCKS=4096 # replay tail used only for active connected clients -# Optional: ev-node Connect RPC URL for L2 DA (Data Availability) inclusion tracking. -# When set, a background worker queries ev-node for Celestia DA heights per block. +# Optional: enable DA (Data Availability) inclusion tracking from ev-node. +# When disabled, EVNODE_URL and DA tuning settings are ignored. +# ENABLE_DA_TRACKING=false + +# ev-node Connect RPC URL used when ENABLE_DA_TRACKING=true. # EVNODE_URL=http://localhost:7331 -# Number of concurrent requests to ev-node for DA status backfill (default: 50) +# Maximum ev-node requests per second used by the DA worker (default: 50) +# DA_RPC_REQUESTS_PER_SECOND=50 + +# Number of concurrent DA requests/workers (default: 50) # DA_WORKER_CONCURRENCY=50 diff --git a/backend/crates/atlas-common/src/types.rs b/backend/crates/atlas-common/src/types.rs index 1eaa959..6edd8a9 100644 --- a/backend/crates/atlas-common/src/types.rs +++ b/backend/crates/atlas-common/src/types.rs @@ -17,7 +17,7 @@ pub struct Block { } /// DA (Data Availability) status for a block on L2 chains using Celestia. -/// Only populated when EVNODE_URL is configured and the DA worker has checked the block. +/// Only populated when DA tracking is enabled and the DA worker has checked the block. #[derive(Debug, Clone, Serialize, Deserialize, FromRow)] pub struct BlockDaStatus { pub block_number: i64, diff --git a/backend/crates/atlas-server/src/api/handlers/status.rs b/backend/crates/atlas-server/src/api/handlers/status.rs index 3bc4b62..f5b6c1e 100644 --- a/backend/crates/atlas-server/src/api/handlers/status.rs +++ b/backend/crates/atlas-server/src/api/handlers/status.rs @@ -22,7 +22,7 @@ pub struct ChainStatus { /// Returns in <1ms, optimized for frequent polling pub async fn get_status(State(state): State>) -> ApiResult> { let features = ChainFeatures { - da_tracking: state.evnode_url.is_some(), + da_tracking: state.da_tracking_enabled, }; if let Some(block) = state.head_tracker.latest().await { @@ -87,7 +87,7 @@ mod tests { da_events_tx: da_tx, head_tracker, rpc_url: String::new(), - evnode_url: None, + da_tracking_enabled: false, })) } diff --git a/backend/crates/atlas-server/src/api/mod.rs b/backend/crates/atlas-server/src/api/mod.rs index 7a401d0..077f301 100644 --- a/backend/crates/atlas-server/src/api/mod.rs +++ b/backend/crates/atlas-server/src/api/mod.rs @@ -19,7 +19,7 @@ pub struct AppState { pub da_events_tx: broadcast::Sender>, pub head_tracker: Arc, pub rpc_url: String, - pub evnode_url: Option, + pub da_tracking_enabled: bool, } /// Build the Axum router. diff --git a/backend/crates/atlas-server/src/config.rs b/backend/crates/atlas-server/src/config.rs index 846f8ca..755a2a1 100644 --- a/backend/crates/atlas-server/src/config.rs +++ b/backend/crates/atlas-server/src/config.rs @@ -1,6 +1,9 @@ use anyhow::{bail, Context, Result}; use std::env; +const DEFAULT_DA_WORKER_CONCURRENCY: u32 = 50; +const DEFAULT_DA_RPC_REQUESTS_PER_SECOND: u32 = 50; + #[derive(Debug, Clone)] pub struct Config { // Shared @@ -25,8 +28,10 @@ pub struct Config { pub rpc_batch_size: u32, // DA tracking (optional) + pub da_tracking_enabled: bool, pub evnode_url: Option, pub da_worker_concurrency: u32, + pub da_rpc_requests_per_second: u32, // API-specific pub api_host: String, @@ -47,6 +52,52 @@ impl Config { bail!("SSE_REPLAY_BUFFER_BLOCKS must be between 1 and 100000"); } + let da_tracking_enabled: bool = env::var("ENABLE_DA_TRACKING") + .unwrap_or_else(|_| "false".to_string()) + .parse() + .context("Invalid ENABLE_DA_TRACKING")?; + + let raw_evnode_url = env::var("EVNODE_URL") + .ok() + .map(|url| url.trim().to_string()) + .filter(|url| !url.is_empty()); + + let evnode_url = if da_tracking_enabled { + Some( + raw_evnode_url.ok_or_else(|| { + anyhow::anyhow!("EVNODE_URL must be set when ENABLE_DA_TRACKING=true") + })?, + ) + } else { + None + }; + + let da_worker_concurrency = if da_tracking_enabled { + let value: u32 = env::var("DA_WORKER_CONCURRENCY") + .unwrap_or_else(|_| DEFAULT_DA_WORKER_CONCURRENCY.to_string()) + .parse() + .context("Invalid DA_WORKER_CONCURRENCY")?; + if value == 0 { + bail!("DA_WORKER_CONCURRENCY must be greater than 0"); + } + value + } else { + DEFAULT_DA_WORKER_CONCURRENCY + }; + + let da_rpc_requests_per_second = if da_tracking_enabled { + let value: u32 = env::var("DA_RPC_REQUESTS_PER_SECOND") + .unwrap_or_else(|_| DEFAULT_DA_RPC_REQUESTS_PER_SECOND.to_string()) + .parse() + .context("Invalid DA_RPC_REQUESTS_PER_SECOND")?; + if value == 0 { + bail!("DA_RPC_REQUESTS_PER_SECOND must be greater than 0"); + } + value + } else { + DEFAULT_DA_RPC_REQUESTS_PER_SECOND + }; + Ok(Self { database_url: env::var("DATABASE_URL").context("DATABASE_URL must be set")?, rpc_url: env::var("RPC_URL").context("RPC_URL must be set")?, @@ -95,11 +146,10 @@ impl Config { .parse() .context("Invalid RPC_BATCH_SIZE")?, - evnode_url: env::var("EVNODE_URL").ok(), - da_worker_concurrency: env::var("DA_WORKER_CONCURRENCY") - .unwrap_or_else(|_| "50".to_string()) - .parse() - .context("Invalid DA_WORKER_CONCURRENCY")?, + da_tracking_enabled, + evnode_url, + da_worker_concurrency, + da_rpc_requests_per_second, api_host: env::var("API_HOST").unwrap_or_else(|_| "127.0.0.1".to_string()), api_port: env::var("API_PORT") @@ -124,10 +174,18 @@ mod tests { env::set_var("RPC_URL", "http://localhost:8545"); } + fn clear_da_env() { + env::remove_var("ENABLE_DA_TRACKING"); + env::remove_var("EVNODE_URL"); + env::remove_var("DA_WORKER_CONCURRENCY"); + env::remove_var("DA_RPC_REQUESTS_PER_SECOND"); + } + #[test] fn sse_replay_buffer_validation() { let _lock = ENV_LOCK.lock().unwrap(); set_required_env(); + clear_da_env(); // Default env::remove_var("SSE_REPLAY_BUFFER_BLOCKS"); @@ -155,5 +213,71 @@ mod tests { .contains("Invalid SSE_REPLAY_BUFFER_BLOCKS")); env::remove_var("SSE_REPLAY_BUFFER_BLOCKS"); + clear_da_env(); + } + + #[test] + fn da_tracking_is_disabled_by_default_and_ignores_da_specific_env() { + let _lock = ENV_LOCK.lock().unwrap(); + set_required_env(); + clear_da_env(); + + env::set_var("EVNODE_URL", ""); + env::set_var("DA_WORKER_CONCURRENCY", "not-a-number"); + env::set_var("DA_RPC_REQUESTS_PER_SECOND", "not-a-number"); + + let config = Config::from_env().unwrap(); + assert!(!config.da_tracking_enabled); + assert!(config.evnode_url.is_none()); + assert_eq!(config.da_worker_concurrency, DEFAULT_DA_WORKER_CONCURRENCY); + assert_eq!( + config.da_rpc_requests_per_second, + DEFAULT_DA_RPC_REQUESTS_PER_SECOND + ); + + clear_da_env(); + } + + #[test] + fn da_tracking_requires_non_empty_evnode_url_when_enabled() { + let _lock = ENV_LOCK.lock().unwrap(); + set_required_env(); + clear_da_env(); + + env::set_var("ENABLE_DA_TRACKING", "true"); + let err = Config::from_env().unwrap_err(); + assert!( + err.to_string() + .contains("EVNODE_URL must be set when ENABLE_DA_TRACKING=true") + ); + + env::set_var("EVNODE_URL", " "); + let err = Config::from_env().unwrap_err(); + assert!( + err.to_string() + .contains("EVNODE_URL must be set when ENABLE_DA_TRACKING=true") + ); + + clear_da_env(); + } + + #[test] + fn da_tracking_parses_enabled_config() { + let _lock = ENV_LOCK.lock().unwrap(); + set_required_env(); + clear_da_env(); + + env::set_var("ENABLE_DA_TRACKING", "true"); + env::set_var("EVNODE_URL", "http://localhost:7331/"); + env::set_var("DA_WORKER_CONCURRENCY", "12"); + env::set_var("DA_RPC_REQUESTS_PER_SECOND", "34"); + + let config = Config::from_env().unwrap(); + assert!(config.da_tracking_enabled); + assert_eq!(config.evnode_url.as_deref(), Some("http://localhost:7331/")); + assert_eq!(config.da_worker_concurrency, 12); + assert_eq!(config.da_rpc_requests_per_second, 34); + + clear_da_env(); } } diff --git a/backend/crates/atlas-server/src/indexer/da_worker.rs b/backend/crates/atlas-server/src/indexer/da_worker.rs index dcec72b..6bb2f78 100644 --- a/backend/crates/atlas-server/src/indexer/da_worker.rs +++ b/backend/crates/atlas-server/src/indexer/da_worker.rs @@ -29,7 +29,10 @@ use anyhow::Result; use futures::stream::{self, StreamExt}; +use governor::{Quota, RateLimiter}; use sqlx::PgPool; +use std::num::NonZeroU32; +use std::sync::Arc; use std::time::Duration; use tokio::sync::broadcast; @@ -52,6 +55,14 @@ pub struct DaWorker { pool: PgPool, client: EvnodeClient, concurrency: usize, + requests_per_second: u32, + rate_limiter: Arc< + RateLimiter< + governor::state::NotKeyed, + governor::state::InMemoryState, + governor::clock::DefaultClock, + >, + >, da_events_tx: broadcast::Sender>, } @@ -60,18 +71,27 @@ impl DaWorker { pool: PgPool, evnode_url: &str, concurrency: u32, + requests_per_second: u32, da_events_tx: broadcast::Sender>, ) -> Result { + let rate = NonZeroU32::new(requests_per_second) + .ok_or_else(|| anyhow::anyhow!("DA_RPC_REQUESTS_PER_SECOND must be greater than 0"))?; Ok(Self { pool, client: EvnodeClient::new(evnode_url), concurrency: concurrency as usize, + requests_per_second, + rate_limiter: Arc::new(RateLimiter::direct(Quota::per_second(rate))), da_events_tx, }) } pub async fn run(&self) -> Result<()> { - tracing::info!("DA worker started (concurrency: {})", self.concurrency); + tracing::info!( + "DA worker started (concurrency: {}, rate_limit: {} req/s)", + self.concurrency, + self.requests_per_second + ); loop { // Phase 1: backfill gets first pick of the budget @@ -127,9 +147,11 @@ impl DaWorker { let count = missing.len(); let pool = &self.pool; let client = &self.client; + let rate_limiter = &self.rate_limiter; let results: Vec> = stream::iter(missing) .map(|(block_number,)| async move { + rate_limiter.until_ready().await; match client.get_da_status(block_number as u64).await { Ok((header_da, data_da)) => { if let Err(e) = sqlx::query( @@ -199,9 +221,11 @@ impl DaWorker { let count = pending.len(); let pool = &self.pool; let client = &self.client; + let rate_limiter = &self.rate_limiter; let results: Vec> = stream::iter(pending) .map(|(block_number,)| async move { + rate_limiter.until_ready().await; match client.get_da_status(block_number as u64).await { Ok((header_da, data_da)) => { if let Err(e) = sqlx::query( diff --git a/backend/crates/atlas-server/src/main.rs b/backend/crates/atlas-server/src/main.rs index dba0af5..144b08b 100644 --- a/backend/crates/atlas-server/src/main.rs +++ b/backend/crates/atlas-server/src/main.rs @@ -57,7 +57,7 @@ async fn main() -> Result<()> { da_events_tx: da_events_tx.clone(), head_tracker: head_tracker.clone(), rpc_url: config.rpc_url.clone(), - evnode_url: config.evnode_url.clone(), + da_tracking_enabled: config.da_tracking_enabled, }); // Spawn indexer task with retry logic @@ -75,12 +75,21 @@ async fn main() -> Result<()> { }); // Spawn DA worker if EVNODE_URL is configured - if let Some(ref evnode_url) = config.evnode_url { - tracing::info!("DA tracking enabled (EVNODE_URL set)"); + if config.da_tracking_enabled { + let evnode_url = config + .evnode_url + .as_deref() + .expect("ENABLE_DA_TRACKING requires EVNODE_URL"); + tracing::info!( + "DA tracking enabled (workers: {}, rate_limit: {} req/s)", + config.da_worker_concurrency, + config.da_rpc_requests_per_second + ); let da_worker = indexer::DaWorker::new( da_pool, evnode_url, config.da_worker_concurrency, + config.da_rpc_requests_per_second, da_events_tx, )?; tokio::spawn(async move { diff --git a/backend/migrations/20240108000001_block_da_status.sql b/backend/migrations/20240108000001_block_da_status.sql index df6bd3f..5be2202 100644 --- a/backend/migrations/20240108000001_block_da_status.sql +++ b/backend/migrations/20240108000001_block_da_status.sql @@ -1,5 +1,5 @@ -- Block DA (Data Availability) status for L2 chains using Celestia. --- Only populated when EVNODE_URL is configured and the DA worker is running. +-- Only populated when ENABLE_DA_TRACKING=true and the DA worker is running. -- -- The DA worker has two phases: -- 1. Backfill: discovers blocks missing from this table, queries ev-node, and INSERTs. diff --git a/docker-compose.yml b/docker-compose.yml index a2e4536..c1f96cc 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -28,7 +28,9 @@ services: FETCH_WORKERS: ${FETCH_WORKERS:-10} RPC_REQUESTS_PER_SECOND: ${RPC_REQUESTS_PER_SECOND:-100} RPC_BATCH_SIZE: ${RPC_BATCH_SIZE:-20} + ENABLE_DA_TRACKING: ${ENABLE_DA_TRACKING:-false} EVNODE_URL: ${EVNODE_URL:-} + DA_RPC_REQUESTS_PER_SECOND: ${DA_RPC_REQUESTS_PER_SECOND:-50} DA_WORKER_CONCURRENCY: ${DA_WORKER_CONCURRENCY:-50} API_HOST: 0.0.0.0 API_PORT: 3000 From 856a61704077a9cfc05028dad75c72cc27011130 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Wed, 18 Mar 2026 16:01:49 +0100 Subject: [PATCH 16/20] Fix local quality issues --- backend/crates/atlas-server/src/config.rs | 22 +++++++++------------- frontend/src/pages/BlocksPage.tsx | 5 ++++- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/backend/crates/atlas-server/src/config.rs b/backend/crates/atlas-server/src/config.rs index 755a2a1..dd44a95 100644 --- a/backend/crates/atlas-server/src/config.rs +++ b/backend/crates/atlas-server/src/config.rs @@ -63,11 +63,9 @@ impl Config { .filter(|url| !url.is_empty()); let evnode_url = if da_tracking_enabled { - Some( - raw_evnode_url.ok_or_else(|| { - anyhow::anyhow!("EVNODE_URL must be set when ENABLE_DA_TRACKING=true") - })?, - ) + Some(raw_evnode_url.ok_or_else(|| { + anyhow::anyhow!("EVNODE_URL must be set when ENABLE_DA_TRACKING=true") + })?) } else { None }; @@ -246,17 +244,15 @@ mod tests { env::set_var("ENABLE_DA_TRACKING", "true"); let err = Config::from_env().unwrap_err(); - assert!( - err.to_string() - .contains("EVNODE_URL must be set when ENABLE_DA_TRACKING=true") - ); + assert!(err + .to_string() + .contains("EVNODE_URL must be set when ENABLE_DA_TRACKING=true")); env::set_var("EVNODE_URL", " "); let err = Config::from_env().unwrap_err(); - assert!( - err.to_string() - .contains("EVNODE_URL must be set when ENABLE_DA_TRACKING=true") - ); + assert!(err + .to_string() + .contains("EVNODE_URL must be set when ENABLE_DA_TRACKING=true")); clear_da_env(); } diff --git a/frontend/src/pages/BlocksPage.tsx b/frontend/src/pages/BlocksPage.tsx index f27668e..56ce739 100644 --- a/frontend/src/pages/BlocksPage.tsx +++ b/frontend/src/pages/BlocksPage.tsx @@ -438,10 +438,13 @@ export default function BlocksPage() { const daStatus = daOverrides.get(block.number) ?? block.da_status; const flash = daHighlight.has(block.number); const included = isDaIncluded(daStatus); + const includedTitle = daStatus + ? `Header: ${daStatus.header_da_height}, Data: ${daStatus.data_da_height}` + : 'DA included'; return ( {included ? ( - + ) : ( )} From 0870e1d464b1018ff8af92bafae28cd776c1e685 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Wed, 18 Mar 2026 16:55:18 +0100 Subject: [PATCH 17/20] Address DA review follow-ups --- .env.example | 7 +- CLAUDE.md | 15 +- .../atlas-server/src/api/handlers/sse.rs | 67 +++++++- backend/crates/atlas-server/src/config.rs | 25 ++- .../atlas-server/src/indexer/da_worker.rs | 34 ++-- .../crates/atlas-server/src/indexer/evnode.rs | 32 ++-- backend/crates/atlas-server/src/main.rs | 2 +- frontend/src/components/Layout.tsx | 2 +- frontend/src/context/BlockStatsContext.tsx | 6 +- frontend/src/hooks/useBlockSSE.ts | 18 ++- frontend/src/hooks/useFeatures.ts | 61 +++++-- frontend/src/index.css | 6 +- frontend/src/pages/BlockDetailPage.tsx | 15 +- frontend/src/pages/BlocksPage.tsx | 151 ++++++++++++++---- 14 files changed, 335 insertions(+), 106 deletions(-) diff --git a/.env.example b/.env.example index 77954b9..e203130 100644 --- a/.env.example +++ b/.env.example @@ -28,11 +28,12 @@ RPC_BATCH_SIZE=20 # SSE_REPLAY_BUFFER_BLOCKS=4096 # replay tail used only for active connected clients # Optional: enable DA (Data Availability) inclusion tracking from ev-node. -# When disabled, EVNODE_URL and DA tuning settings are ignored. +# A non-empty EVNODE_URL enables DA tracking automatically. +# ENABLE_DA_TRACKING=true is an optional explicit toggle/validation flag. # ENABLE_DA_TRACKING=false -# ev-node Connect RPC URL used when ENABLE_DA_TRACKING=true. -# EVNODE_URL=http://localhost:7331 +# ev-node URL reachable from the atlas-server process/container when DA tracking is enabled. +# EVNODE_URL=http://:7331 # Maximum ev-node requests per second used by the DA worker (default: 50) # DA_RPC_REQUESTS_PER_SECOND=50 diff --git a/CLAUDE.md b/CLAUDE.md index 2807ed3..465d635 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -79,19 +79,20 @@ For large tables (transactions, addresses), use `pg_class.reltuples` instead of pub struct AppState { pub pool: PgPool, // API pool only pub block_events_tx: broadcast::Sender<()>, // shared with indexer - pub da_events_tx: broadcast::Sender>, // shared with DA worker + pub da_events_tx: broadcast::Sender>, // shared with DA worker + pub head_tracker: Arc, pub rpc_url: String, - pub evnode_url: Option, // DA feature flag + pub da_tracking_enabled: bool, } ``` ### DA tracking (optional) -When `EVNODE_URL` is set, a background DA worker queries ev-node for Celestia inclusion heights per block. Updates are pushed to SSE clients via an in-process `broadcast::Sender>` (block numbers that were updated). The SSE handler fetches DA status from the database and streams `da_batch` events. +When DA tracking is enabled, a background DA worker queries ev-node for Celestia inclusion heights per block. Updates are pushed to SSE clients via an in-process `broadcast::Sender>`. The SSE handler streams `da_batch` events for incremental updates and emits `da_resync` when a client falls behind and should refetch visible DA state. ### Frontend API client - Base URL: `/api` (proxied by nginx to `atlas-server:3000`) -- `GET /api/status` → `{ block_height, indexed_at, features: { da_tracking } }` — single key-value lookup from `indexer_state`, sub-ms. Used by the navbar as a polling fallback when SSE is disconnected. -- `GET /api/events` → SSE stream of `new_block` and `da_batch` events. Primary live-update path for navbar counter, blocks page, and DA status. Falls back to `/api/status` polling on disconnect. +- `GET /api/status` → `{ block_height, indexed_at, features: { da_tracking } }` — serves from `head_tracker` first and falls back to `indexer_state` when the in-memory head is empty. Used by the navbar as a polling fallback when SSE is disconnected. +- `GET /api/events` → SSE stream of `new_block`, `da_batch`, and `da_resync` events. Primary live-update path for navbar counter, blocks page, and DA status. Falls back to `/api/status` polling on disconnect. ## Important Conventions @@ -118,7 +119,9 @@ Key vars (see `.env.example` for full list): | `ADMIN_API_KEY` | API | none | | `API_HOST` | API | `127.0.0.1` | | `API_PORT` | API | `3000` | -| `EVNODE_URL` | server | none (DA tracking disabled) | +| `ENABLE_DA_TRACKING` | server | `false` | +| `EVNODE_URL` | server | none | +| `DA_RPC_REQUESTS_PER_SECOND` | DA worker | `50` | | `DA_WORKER_CONCURRENCY` | DA worker | `50` | ## Running Locally diff --git a/backend/crates/atlas-server/src/api/handlers/sse.rs b/backend/crates/atlas-server/src/api/handlers/sse.rs index f3bc257..40d7cc1 100644 --- a/backend/crates/atlas-server/src/api/handlers/sse.rs +++ b/backend/crates/atlas-server/src/api/handlers/sse.rs @@ -34,6 +34,11 @@ struct DaBatchEvent { updates: Vec, } +#[derive(Serialize, Debug)] +struct DaResyncEvent { + required: bool, +} + /// Build the SSE stream. Separated from the handler for testability. fn make_event_stream( pool: PgPool, @@ -112,8 +117,14 @@ fn make_event_stream( yield Ok(event); } } - Err(broadcast::error::RecvError::Lagged(_)) => { - // Missed some DA updates; frontend catches up via next fetch/poll. + Err(broadcast::error::RecvError::Lagged(skipped)) => { + warn!( + skipped, + "sse da: client fell behind DA update stream; requesting resync" + ); + if let Some(event) = da_resync_event() { + yield Ok(event); + } } Err(broadcast::error::RecvError::Closed) => break, } @@ -168,6 +179,12 @@ fn da_batch_to_event(updates: &[DaSseUpdate]) -> Option { .map(|json| Event::default().event("da_batch").data(json)) } +fn da_resync_event() -> Option { + serde_json::to_string(&DaResyncEvent { required: true }) + .ok() + .map(|json| Event::default().event("da_resync").data(json)) +} + #[cfg(test)] mod tests { use super::*; @@ -188,6 +205,14 @@ mod tests { } } + fn sample_da_update(block_number: i64) -> DaSseUpdate { + DaSseUpdate { + block_number, + header_da_height: block_number * 10, + data_da_height: block_number * 10 + 1, + } + } + /// Lazy PgPool that never connects — safe for tests that don't hit the DB. fn dummy_pool() -> PgPool { sqlx::postgres::PgPoolOptions::new() @@ -238,6 +263,13 @@ mod tests { } } + #[test] + fn da_resync_event_serializes_with_required_flag() { + let event = da_resync_event().expect("event should serialize"); + let debug = format!("{event:?}"); + assert!(debug.contains("da_resync")); + } + #[tokio::test] async fn stream_seeds_from_head_tracker() { let tracker = Arc::new(HeadTracker::empty(10)); @@ -353,4 +385,35 @@ mod tests { drop(tx); drop(da_tx); } + + #[tokio::test] + async fn stream_emits_da_resync_when_da_updates_lag() { + let tracker = Arc::new(HeadTracker::empty(10)); + tracker + .publish_committed_batch(vec![sample_block(42)]) + .await; + + let (tx, _) = broadcast::channel::<()>(16); + let (da_tx, _) = broadcast::channel::>(1); + let stream = make_event_stream(dummy_pool(), tracker, tx.subscribe(), da_tx.subscribe()); + tokio::pin!(stream); + + let _ = tokio::time::timeout(Duration::from_secs(1), stream.next()) + .await + .unwrap(); + + da_tx.send(vec![sample_da_update(100)]).unwrap(); + da_tx.send(vec![sample_da_update(101)]).unwrap(); + + let next = tokio::time::timeout(Duration::from_secs(1), stream.next()) + .await + .unwrap() + .unwrap() + .unwrap(); + let debug = format!("{next:?}"); + assert!(debug.contains("da_resync")); + + drop(tx); + drop(da_tx); + } } diff --git a/backend/crates/atlas-server/src/config.rs b/backend/crates/atlas-server/src/config.rs index dd44a95..8bc78c5 100644 --- a/backend/crates/atlas-server/src/config.rs +++ b/backend/crates/atlas-server/src/config.rs @@ -52,7 +52,7 @@ impl Config { bail!("SSE_REPLAY_BUFFER_BLOCKS must be between 1 and 100000"); } - let da_tracking_enabled: bool = env::var("ENABLE_DA_TRACKING") + let explicit_da_tracking_enabled: bool = env::var("ENABLE_DA_TRACKING") .unwrap_or_else(|_| "false".to_string()) .parse() .context("Invalid ENABLE_DA_TRACKING")?; @@ -62,9 +62,11 @@ impl Config { .map(|url| url.trim().to_string()) .filter(|url| !url.is_empty()); + let da_tracking_enabled = explicit_da_tracking_enabled || raw_evnode_url.is_some(); + let evnode_url = if da_tracking_enabled { Some(raw_evnode_url.ok_or_else(|| { - anyhow::anyhow!("EVNODE_URL must be set when ENABLE_DA_TRACKING=true") + anyhow::anyhow!("EVNODE_URL must be set when DA tracking is enabled") })?) } else { None @@ -246,13 +248,28 @@ mod tests { let err = Config::from_env().unwrap_err(); assert!(err .to_string() - .contains("EVNODE_URL must be set when ENABLE_DA_TRACKING=true")); + .contains("EVNODE_URL must be set when DA tracking is enabled")); env::set_var("EVNODE_URL", " "); let err = Config::from_env().unwrap_err(); assert!(err .to_string() - .contains("EVNODE_URL must be set when ENABLE_DA_TRACKING=true")); + .contains("EVNODE_URL must be set when DA tracking is enabled")); + + clear_da_env(); + } + + #[test] + fn evnode_url_alone_enables_da_tracking() { + let _lock = ENV_LOCK.lock().unwrap(); + set_required_env(); + clear_da_env(); + + env::set_var("EVNODE_URL", "http://ev-node:7331"); + + let config = Config::from_env().unwrap(); + assert!(config.da_tracking_enabled); + assert_eq!(config.evnode_url.as_deref(), Some("http://ev-node:7331")); clear_da_env(); } diff --git a/backend/crates/atlas-server/src/indexer/da_worker.rs b/backend/crates/atlas-server/src/indexer/da_worker.rs index 6bb2f78..8a6ed42 100644 --- a/backend/crates/atlas-server/src/indexer/da_worker.rs +++ b/backend/crates/atlas-server/src/indexer/da_worker.rs @@ -144,7 +144,6 @@ impl DaWorker { return Ok(0); } - let count = missing.len(); let pool = &self.pool; let client = &self.client; let rate_limiter = &self.rate_limiter; @@ -198,7 +197,7 @@ impl DaWorker { let updates: Vec = results.into_iter().flatten().collect(); self.notify_da_updates(&updates); - Ok(count) + Ok(updates.len()) } /// Phase 2: Re-check blocks where DA heights are still 0. @@ -218,7 +217,6 @@ impl DaWorker { return Ok(0); } - let count = pending.len(); let pool = &self.pool; let client = &self.client; let rate_limiter = &self.rate_limiter; @@ -228,10 +226,11 @@ impl DaWorker { rate_limiter.until_ready().await; match client.get_da_status(block_number as u64).await { Ok((header_da, data_da)) => { - if let Err(e) = sqlx::query( + match sqlx::query( "UPDATE block_da_status SET header_da_height = $2, data_da_height = $3, updated_at = NOW() - WHERE block_number = $1", + WHERE block_number = $1 + AND (header_da_height, data_da_height) IS DISTINCT FROM ($2, $3)", ) .bind(block_number) .bind(header_da as i64) @@ -239,18 +238,21 @@ impl DaWorker { .execute(pool) .await { - tracing::warn!( - "Failed to update DA status for block {}: {}", + Ok(result) if result.rows_affected() > 0 => Some(DaSseUpdate { block_number, - e - ); - return None; + header_da_height: header_da as i64, + data_da_height: data_da as i64, + }), + Ok(_) => None, + Err(e) => { + tracing::warn!( + "Failed to update DA status for block {}: {}", + block_number, + e + ); + None + } } - Some(DaSseUpdate { - block_number, - header_da_height: header_da as i64, - data_da_height: data_da as i64, - }) } Err(e) => { tracing::warn!( @@ -269,6 +271,6 @@ impl DaWorker { let updates: Vec = results.into_iter().flatten().collect(); self.notify_da_updates(&updates); - Ok(count) + Ok(updates.len()) } } diff --git a/backend/crates/atlas-server/src/indexer/evnode.rs b/backend/crates/atlas-server/src/indexer/evnode.rs index 2a37509..fb8d51d 100644 --- a/backend/crates/atlas-server/src/indexer/evnode.rs +++ b/backend/crates/atlas-server/src/indexer/evnode.rs @@ -92,27 +92,29 @@ impl EvnodeClient { match self.do_request(height).await { Ok((h, d)) => return Ok((h, d)), Err(e) => { - let delay_ms = RETRY_DELAYS_MS - .get(attempt) - .copied() - .unwrap_or(*RETRY_DELAYS_MS.last().unwrap()); - - tracing::warn!( - "ev-node GetBlock failed for height {} (attempt {}): {}. Retrying in {}ms", - height, - attempt + 1, - e, - delay_ms, - ); - last_error = Some(e); - tokio::time::sleep(Duration::from_millis(delay_ms)).await; + if attempt + 1 < MAX_RETRIES { + let delay_ms = RETRY_DELAYS_MS + .get(attempt) + .copied() + .unwrap_or(*RETRY_DELAYS_MS.last().unwrap()); + + tracing::warn!( + "ev-node GetBlock failed for height {} (attempt {}): {}. Retrying in {}ms", + height, + attempt + 1, + last_error.as_ref().unwrap(), + delay_ms, + ); + + tokio::time::sleep(Duration::from_millis(delay_ms)).await; + } } } } bail!( - "ev-node GetBlock failed for height {} after {} retries: {}", + "ev-node GetBlock failed for height {} after {} attempts: {}", height, MAX_RETRIES, last_error.unwrap() diff --git a/backend/crates/atlas-server/src/main.rs b/backend/crates/atlas-server/src/main.rs index 144b08b..99ca111 100644 --- a/backend/crates/atlas-server/src/main.rs +++ b/backend/crates/atlas-server/src/main.rs @@ -93,7 +93,7 @@ async fn main() -> Result<()> { da_events_tx, )?; tokio::spawn(async move { - if let Err(e) = da_worker.run().await { + if let Err(e) = run_with_retry(|| da_worker.run()).await { tracing::error!("DA worker terminated with error: {}", e); } }); diff --git a/frontend/src/components/Layout.tsx b/frontend/src/components/Layout.tsx index da41b79..9013f42 100644 --- a/frontend/src/components/Layout.tsx +++ b/frontend/src/components/Layout.tsx @@ -191,9 +191,9 @@ export default function Layout() { bps: sse.bps, height: sse.height, latestBlockEvent: sse.latestBlock, - latestDaUpdate: sse.latestDaUpdate, sseConnected: sse.connected, subscribeDa: sse.subscribeDa, + subscribeDaResync: sse.subscribeDaResync, }} > diff --git a/frontend/src/context/BlockStatsContext.tsx b/frontend/src/context/BlockStatsContext.tsx index 382c831..63e7efa 100644 --- a/frontend/src/context/BlockStatsContext.tsx +++ b/frontend/src/context/BlockStatsContext.tsx @@ -1,13 +1,13 @@ import { createContext } from 'react'; -import type { NewBlockEvent, DaUpdateEvent, DaSubscriber } from '../hooks/useBlockSSE'; +import type { NewBlockEvent, DaSubscriber, DaResyncSubscriber } from '../hooks/useBlockSSE'; export interface BlockStats { bps: number | null; height: number | null; latestBlockEvent: NewBlockEvent | null; - latestDaUpdate: DaUpdateEvent | null; sseConnected: boolean; subscribeDa: (cb: DaSubscriber) => () => void; + subscribeDaResync: (cb: DaResyncSubscriber) => () => void; } const noopSubscribe = () => () => {}; @@ -16,7 +16,7 @@ export const BlockStatsContext = createContext({ bps: null, height: null, latestBlockEvent: null, - latestDaUpdate: null, sseConnected: false, subscribeDa: noopSubscribe, + subscribeDaResync: noopSubscribe, }); diff --git a/frontend/src/hooks/useBlockSSE.ts b/frontend/src/hooks/useBlockSSE.ts index 1e1683b..299f302 100644 --- a/frontend/src/hooks/useBlockSSE.ts +++ b/frontend/src/hooks/useBlockSSE.ts @@ -18,15 +18,16 @@ export interface DaBatchEvent { } export type DaSubscriber = (updates: DaUpdateEvent[]) => void; +export type DaResyncSubscriber = () => void; export interface BlockSSEState { latestBlock: NewBlockEvent | null; - latestDaUpdate: DaUpdateEvent | null; height: number | null; connected: boolean; error: string | null; bps: number | null; subscribeDa: (cb: DaSubscriber) => () => void; + subscribeDaResync: (cb: DaResyncSubscriber) => () => void; } type BlockLog = { num: number; ts: number }[]; @@ -48,19 +49,25 @@ function computeBpsFromLog(log: BlockLog, minSpan: number, fallbackMinSpan: numb export default function useBlockSSE(): BlockSSEState { const [latestBlock, setLatestBlock] = useState(null); - const [latestDaUpdate, setLatestDaUpdate] = useState(null); const [height, setHeight] = useState(null); const [connected, setConnected] = useState(false); const [error, setError] = useState(null); const [bps, setBps] = useState(null); const daSubscribersRef = useRef>(new Set()); + const daResyncSubscribersRef = useRef>(new Set()); const subscribeDa = useCallback((cb: DaSubscriber) => { daSubscribersRef.current.add(cb); return () => { daSubscribersRef.current.delete(cb); }; }, []); + const subscribeDaResync = useCallback((cb: DaResyncSubscriber) => { + daResyncSubscribersRef.current.add(cb); + return () => { + daResyncSubscribersRef.current.delete(cb); + }; + }, []); const esRef = useRef(null); const queueRef = useRef([]); @@ -172,7 +179,6 @@ export default function useBlockSSE(): BlockSSEState { try { const data: DaBatchEvent = JSON.parse(e.data); if (data.updates?.length) { - setLatestDaUpdate(data.updates[data.updates.length - 1]); for (const cb of daSubscribersRef.current) cb(data.updates); } } catch { @@ -180,6 +186,10 @@ export default function useBlockSSE(): BlockSSEState { } }); + es.addEventListener('da_resync', () => { + for (const cb of daResyncSubscribersRef.current) cb(); + }); + es.onerror = (e) => { connectedRef.current = false; setConnected(false); @@ -203,5 +213,5 @@ export default function useBlockSSE(): BlockSSEState { }; }, [connect, stopPolling]); - return { latestBlock, latestDaUpdate, height, connected, error, bps, subscribeDa }; + return { latestBlock, height, connected, error, bps, subscribeDa, subscribeDaResync }; } diff --git a/frontend/src/hooks/useFeatures.ts b/frontend/src/hooks/useFeatures.ts index 5827e62..4a5c40e 100644 --- a/frontend/src/hooks/useFeatures.ts +++ b/frontend/src/hooks/useFeatures.ts @@ -3,24 +3,65 @@ import { getStatus } from '../api/status'; import type { ChainFeatures } from '../types'; const defaultFeatures: ChainFeatures = { da_tracking: false }; +type FeaturesListener = (features: ChainFeatures) => void; + +let cachedFeatures: ChainFeatures | null = null; +let featuresPromise: Promise | null = null; +const listeners = new Set(); + +function notifyFeatures(features: ChainFeatures) { + for (const listener of listeners) { + listener(features); + } +} + +function loadFeatures(): Promise { + if (cachedFeatures) { + return Promise.resolve(cachedFeatures); + } + + if (!featuresPromise) { + featuresPromise = getStatus() + .then((status) => status.features ?? defaultFeatures) + .catch(() => defaultFeatures) + .then((features) => { + cachedFeatures = features; + notifyFeatures(features); + return features; + }) + .finally(() => { + featuresPromise = null; + }); + } + + return featuresPromise; +} /** - * Fetches chain feature flags from /api/status once on mount. - * Returns the features object (defaults to all disabled until loaded). + * Returns cached chain feature flags, loading them only once per app session. */ export default function useFeatures(): ChainFeatures { - const [features, setFeatures] = useState(defaultFeatures); + const [features, setFeatures] = useState(cachedFeatures ?? defaultFeatures); useEffect(() => { - let cancelled = false; - getStatus().then((status) => { - if (!cancelled && status.features) { - setFeatures(status.features); + let active = true; + const listener: FeaturesListener = (nextFeatures) => { + if (active) { + setFeatures(nextFeatures); + } + }; + + listeners.add(listener); + void loadFeatures().then((nextFeatures) => { + if (active) { + setFeatures(nextFeatures); } - }).catch(() => { - // Silently use defaults on error }); - return () => { cancelled = true; }; + + return () => { + active = false; + listeners.delete(listener); + }; }, []); return features; diff --git a/frontend/src/index.css b/frontend/src/index.css index 4245ff5..ca46cc6 100644 --- a/frontend/src/index.css +++ b/frontend/src/index.css @@ -170,11 +170,11 @@ /* DA dot pulse when status changes via SSE */ .animate-da-pulse { - animation: daPulse 1.5s ease-out; + animation: da-pulse 1.5s ease-out; } -@keyframes daPulse { - 0% { transform: scale(2.2); opacity: 0.5; box-shadow: 0 0 8px currentColor; } +@keyframes da-pulse { + 0% { transform: scale(2.2); opacity: 0.5; box-shadow: 0 0 8px currentcolor; } 40% { transform: scale(1.4); opacity: 0.8; } 100% { transform: scale(1); opacity: 1; box-shadow: none; } } diff --git a/frontend/src/pages/BlockDetailPage.tsx b/frontend/src/pages/BlockDetailPage.tsx index 9d5f3e7..ede4818 100644 --- a/frontend/src/pages/BlockDetailPage.tsx +++ b/frontend/src/pages/BlockDetailPage.tsx @@ -28,15 +28,16 @@ function formatDaStatus(daHeight: number): ReactNode { export default function BlockDetailPage() { const { number } = useParams<{ number: string }>(); const blockNumber = number ? parseInt(number, 10) : undefined; - const { block, loading: blockLoading, error: blockError } = useBlock(blockNumber); + const { block, loading: blockLoading, error: blockError, refetch: refetchBlock } = useBlock(blockNumber); const features = useFeatures(); const [txPage, setTxPage] = useState(1); const { transactions, pagination, loading } = useBlockTransactions(blockNumber, { page: txPage, limit: 20 }); - const { subscribeDa } = useContext(BlockStatsContext); + const { subscribeDa, subscribeDaResync } = useContext(BlockStatsContext); const [daOverride, setDaOverride] = useState(null); // Persist DA updates for this block until navigation or a full refetch catches up. useEffect(() => { + if (!features.da_tracking) return; return subscribeDa((updates) => { const match = updates.find((update) => update.block_number === blockNumber); if (!match) return; @@ -47,7 +48,15 @@ export default function BlockDetailPage() { updated_at: new Date().toISOString(), }); }); - }, [subscribeDa, blockNumber]); + }, [features.da_tracking, subscribeDa, blockNumber]); + + useEffect(() => { + if (!features.da_tracking) return; + return subscribeDaResync(() => { + setDaOverride(null); + void refetchBlock(); + }); + }, [features.da_tracking, refetchBlock, subscribeDaResync]); const currentDaOverride = daOverride?.block_number === blockNumber ? daOverride : null; diff --git a/frontend/src/pages/BlocksPage.tsx b/frontend/src/pages/BlocksPage.tsx index 56ce739..b3ef14d 100644 --- a/frontend/src/pages/BlocksPage.tsx +++ b/frontend/src/pages/BlocksPage.tsx @@ -25,11 +25,14 @@ export default function BlocksPage() { const { blocks: fetchedBlocks, pagination, refetch, loading } = useBlocks({ page, limit: BLOCKS_PAGE_SIZE }); const features = useFeatures(); const hasLoaded = !loading || pagination !== null; - const { latestBlockEvent, sseConnected, subscribeDa } = useContext(BlockStatsContext); + const { latestBlockEvent, sseConnected, subscribeDa, subscribeDaResync } = useContext(BlockStatsContext); const [daOverrides, setDaOverrides] = useState>(new Map()); const [daHighlight, setDaHighlight] = useState>(new Set()); + const daOverridesRef = useRef>(new Map()); + const daOverridesSyncRafRef = useRef(null); const daHighlightTimeoutsRef = useRef>(new Map()); const baseDaIncludedRef = useRef>(new Map()); + const visibleDaBlocksRef = useRef>(new Set()); const [, setTick] = useState(0); const [sseBlocks, setSseBlocks] = useState([]); const lastSseBlockRef = useRef(null); @@ -92,59 +95,133 @@ export default function BlocksPage() { }, [fetchedBlocks, fetchedNumberSet, sseBlocks, page]); useEffect(() => { + if (!features.da_tracking) { + baseDaIncludedRef.current = new Map(); + visibleDaBlocksRef.current = new Set(); + if (daOverridesRef.current.size > 0) { + const empty = new Map(); + daOverridesRef.current = empty; + if (daOverridesSyncRafRef.current !== null) { + cancelAnimationFrame(daOverridesSyncRafRef.current); + } + daOverridesSyncRafRef.current = window.requestAnimationFrame(() => { + setDaOverrides(empty); + daOverridesSyncRafRef.current = null; + }); + } + return; + } + + const visible = new Set(); const next = new Map(); for (const block of blocks) { + visible.add(block.number); next.set(block.number, isDaIncluded(block.da_status)); } baseDaIncludedRef.current = next; - }, [blocks]); + visibleDaBlocksRef.current = visible; + + let changed = false; + const nextOverrides = new Map(); + for (const [blockNumber, status] of daOverridesRef.current) { + if (!visible.has(blockNumber)) { + changed = true; + continue; + } + nextOverrides.set(blockNumber, status); + } + + if (changed || nextOverrides.size !== daOverridesRef.current.size) { + daOverridesRef.current = nextOverrides; + if (daOverridesSyncRafRef.current !== null) { + cancelAnimationFrame(daOverridesSyncRafRef.current); + } + daOverridesSyncRafRef.current = window.requestAnimationFrame(() => { + setDaOverrides(nextOverrides); + daOverridesSyncRafRef.current = null; + }); + } + }, [blocks, features.da_tracking]); // Subscribe to DA updates from SSE. setState is called inside the subscription // callback (not synchronously in the effect body), satisfying react-hooks/set-state-in-effect. useEffect(() => { + if (!features.da_tracking) return; return subscribeDa((updates) => { - const transitionedToIncluded = new Set(); - setDaOverrides(prev => { - const next = new Map(prev); - for (const u of updates) { - const prevStatus = prev.get(u.block_number); - const wasIncluded = prevStatus - ? isDaIncluded(prevStatus) - : (baseDaIncludedRef.current.get(u.block_number) ?? false); - const nextStatus = { - block_number: u.block_number, - header_da_height: u.header_da_height, - data_da_height: u.data_da_height, - updated_at: new Date().toISOString(), - }; - const nowIncluded = isDaIncluded(nextStatus); - if (!wasIncluded && nowIncluded) { - transitionedToIncluded.add(u.block_number); - } - next.set(u.block_number, { - ...nextStatus, - }); + const visible = visibleDaBlocksRef.current; + if (visible.size === 0) return; + + const next = new Map(); + for (const [blockNumber, status] of daOverridesRef.current) { + if (visible.has(blockNumber)) { + next.set(blockNumber, status); } - return next; - }); + } + + const transitionedToIncluded: number[] = []; + let changed = next.size !== daOverridesRef.current.size; + + for (const update of updates) { + if (!visible.has(update.block_number)) continue; + + const prevStatus = next.get(update.block_number); + const wasIncluded = prevStatus + ? isDaIncluded(prevStatus) + : (baseDaIncludedRef.current.get(update.block_number) ?? false); + const nextStatus = { + block_number: update.block_number, + header_da_height: update.header_da_height, + data_da_height: update.data_da_height, + updated_at: new Date().toISOString(), + }; + + if ( + prevStatus?.header_da_height === nextStatus.header_da_height + && prevStatus?.data_da_height === nextStatus.data_da_height + ) { + continue; + } + + if (!wasIncluded && isDaIncluded(nextStatus)) { + transitionedToIncluded.push(update.block_number); + } + + next.set(update.block_number, nextStatus); + changed = true; + } + + if (!changed) return; + + daOverridesRef.current = next; + setDaOverrides(next); // Flash dots for 1.5s only when status transitions from pending -> included. - for (const bn of transitionedToIncluded) { - setDaHighlight(prev => new Set(prev).add(bn)); - const existing = daHighlightTimeoutsRef.current.get(bn); + for (const blockNumber of transitionedToIncluded) { + setDaHighlight((prev) => new Set(prev).add(blockNumber)); + const existing = daHighlightTimeoutsRef.current.get(blockNumber); if (existing !== undefined) clearTimeout(existing); const t = window.setTimeout(() => { - setDaHighlight(p => { - const next = new Set(p); - next.delete(bn); - return next; + setDaHighlight((prev) => { + const nextHighlight = new Set(prev); + nextHighlight.delete(blockNumber); + return nextHighlight; }); - daHighlightTimeoutsRef.current.delete(bn); + daHighlightTimeoutsRef.current.delete(blockNumber); }, 1500); - daHighlightTimeoutsRef.current.set(bn, t); + daHighlightTimeoutsRef.current.set(blockNumber, t); } }); - }, [subscribeDa]); + }, [features.da_tracking, subscribeDa]); + + useEffect(() => { + if (!features.da_tracking) return; + return subscribeDaResync(() => { + const empty = new Map(); + daOverridesRef.current = empty; + setDaOverrides(empty); + void refetch(); + }); + }, [features.da_tracking, refetch, subscribeDaResync]); const navigate = useNavigate(); const [sort, setSort] = useState<{ key: 'number' | 'hash' | 'timestamp' | 'transaction_count' | 'gas_used' | null; direction: 'asc' | 'desc'; }>({ key: null, direction: 'desc' }); @@ -276,6 +353,10 @@ export default function BlocksPage() { window.cancelAnimationFrame(highlightRafRef.current); highlightRafRef.current = null; } + if (daOverridesSyncRafRef.current !== null) { + cancelAnimationFrame(daOverridesSyncRafRef.current); + daOverridesSyncRafRef.current = null; + } if (ssePrependRafRef.current !== null) { cancelAnimationFrame(ssePrependRafRef.current); ssePrependRafRef.current = null; From 09897fe310546a33c50ac2da44217ab2a3917758 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Wed, 18 Mar 2026 17:18:50 +0100 Subject: [PATCH 18/20] Fix DA worker and buffered SSE updates --- .../atlas-server/src/indexer/da_worker.rs | 175 +++++++++++++----- frontend/src/pages/BlocksPage.tsx | 25 ++- 2 files changed, 152 insertions(+), 48 deletions(-) diff --git a/backend/crates/atlas-server/src/indexer/da_worker.rs b/backend/crates/atlas-server/src/indexer/da_worker.rs index 8a6ed42..17b1fcf 100644 --- a/backend/crates/atlas-server/src/indexer/da_worker.rs +++ b/backend/crates/atlas-server/src/indexer/da_worker.rs @@ -44,6 +44,30 @@ const BATCH_SIZE: i64 = 100; /// Sleep when idle (no work in either phase). const IDLE_SLEEP: Duration = Duration::from_millis(500); +const SELECT_MISSING_BLOCKS_SQL: &str = "SELECT b.number FROM blocks b + LEFT JOIN block_da_status d ON d.block_number = b.number + WHERE d.block_number IS NULL + ORDER BY b.number DESC + LIMIT $1"; + +const INSERT_DA_STATUS_SQL: &str = + "INSERT INTO block_da_status (block_number, header_da_height, data_da_height) + VALUES ($1, $2, $3) + ON CONFLICT (block_number) DO UPDATE SET + header_da_height = EXCLUDED.header_da_height, + data_da_height = EXCLUDED.data_da_height, + updated_at = NOW()"; + +const SELECT_PENDING_BLOCKS_SQL: &str = "SELECT block_number FROM block_da_status + WHERE header_da_height = 0 OR data_da_height = 0 + ORDER BY block_number DESC + LIMIT $1"; + +const UPDATE_PENDING_DA_STATUS_SQL: &str = "UPDATE block_da_status + SET header_da_height = $2, data_da_height = $3, updated_at = NOW() + WHERE block_number = $1 + AND (header_da_height, data_da_height) IS DISTINCT FROM ($2, $3)"; + #[derive(Clone, Debug)] pub struct DaSseUpdate { pub block_number: i64, @@ -74,12 +98,14 @@ impl DaWorker { requests_per_second: u32, da_events_tx: broadcast::Sender>, ) -> Result { + let concurrency = NonZeroU32::new(concurrency) + .ok_or_else(|| anyhow::anyhow!("DA_WORKER_CONCURRENCY must be greater than 0"))?; let rate = NonZeroU32::new(requests_per_second) .ok_or_else(|| anyhow::anyhow!("DA_RPC_REQUESTS_PER_SECOND must be greater than 0"))?; Ok(Self { pool, client: EvnodeClient::new(evnode_url), - concurrency: concurrency as usize, + concurrency: concurrency.get() as usize, requests_per_second, rate_limiter: Arc::new(RateLimiter::direct(Quota::per_second(rate))), da_events_tx, @@ -129,16 +155,10 @@ impl DaWorker { /// Phase 1: Find blocks missing from block_da_status and query ev-node. /// Returns the number of blocks processed. async fn backfill_new_blocks(&self, limit: i64) -> Result { - let missing: Vec<(i64,)> = sqlx::query_as( - "SELECT b.number FROM blocks b - LEFT JOIN block_da_status d ON d.block_number = b.number - WHERE d.block_number IS NULL - ORDER BY b.number DESC - LIMIT $1", - ) - .bind(limit) - .fetch_all(&self.pool) - .await?; + let missing: Vec<(i64,)> = sqlx::query_as(SELECT_MISSING_BLOCKS_SQL) + .bind(limit) + .fetch_all(&self.pool) + .await?; if missing.is_empty() { return Ok(0); @@ -153,19 +173,12 @@ impl DaWorker { rate_limiter.until_ready().await; match client.get_da_status(block_number as u64).await { Ok((header_da, data_da)) => { - if let Err(e) = sqlx::query( - "INSERT INTO block_da_status (block_number, header_da_height, data_da_height) - VALUES ($1, $2, $3) - ON CONFLICT (block_number) DO UPDATE SET - header_da_height = EXCLUDED.header_da_height, - data_da_height = EXCLUDED.data_da_height, - updated_at = NOW()", - ) - .bind(block_number) - .bind(header_da as i64) - .bind(data_da as i64) - .execute(pool) - .await + if let Err(e) = sqlx::query(INSERT_DA_STATUS_SQL) + .bind(block_number) + .bind(header_da as i64) + .bind(data_da as i64) + .execute(pool) + .await { tracing::warn!( "Failed to insert DA status for block {}: {}", @@ -203,15 +216,10 @@ impl DaWorker { /// Phase 2: Re-check blocks where DA heights are still 0. /// Returns the number of blocks processed. async fn update_pending_blocks(&self, limit: i64) -> Result { - let pending: Vec<(i64,)> = sqlx::query_as( - "SELECT block_number FROM block_da_status - WHERE header_da_height = 0 OR data_da_height = 0 - ORDER BY block_number DESC - LIMIT $1", - ) - .bind(limit) - .fetch_all(&self.pool) - .await?; + let pending: Vec<(i64,)> = sqlx::query_as(SELECT_PENDING_BLOCKS_SQL) + .bind(limit) + .fetch_all(&self.pool) + .await?; if pending.is_empty() { return Ok(0); @@ -226,17 +234,12 @@ impl DaWorker { rate_limiter.until_ready().await; match client.get_da_status(block_number as u64).await { Ok((header_da, data_da)) => { - match sqlx::query( - "UPDATE block_da_status - SET header_da_height = $2, data_da_height = $3, updated_at = NOW() - WHERE block_number = $1 - AND (header_da_height, data_da_height) IS DISTINCT FROM ($2, $3)", - ) - .bind(block_number) - .bind(header_da as i64) - .bind(data_da as i64) - .execute(pool) - .await + match sqlx::query(UPDATE_PENDING_DA_STATUS_SQL) + .bind(block_number) + .bind(header_da as i64) + .bind(data_da as i64) + .execute(pool) + .await { Ok(result) if result.rows_affected() > 0 => Some(DaSseUpdate { block_number, @@ -274,3 +277,87 @@ impl DaWorker { Ok(updates.len()) } } + +#[cfg(test)] +mod tests { + use super::*; + + fn test_pool() -> PgPool { + sqlx::postgres::PgPoolOptions::new() + .connect_lazy("postgres://test@localhost:5432/test") + .expect("lazy pool creation should not fail") + } + + #[tokio::test] + async fn new_rejects_zero_concurrency() { + let (tx, _) = broadcast::channel(1); + let err = DaWorker::new(test_pool(), "http://localhost:7331", 0, 50, tx) + .err() + .expect("zero concurrency should fail"); + + assert!(err + .to_string() + .contains("DA_WORKER_CONCURRENCY must be greater than 0")); + } + + #[tokio::test] + async fn new_rejects_zero_rate_limit() { + let (tx, _) = broadcast::channel(1); + let err = DaWorker::new(test_pool(), "http://localhost:7331", 4, 0, tx) + .err() + .expect("zero rate limit should fail"); + + assert!(err + .to_string() + .contains("DA_RPC_REQUESTS_PER_SECOND must be greater than 0")); + } + + #[tokio::test] + async fn notify_da_updates_sends_full_batch() { + let (tx, mut rx) = broadcast::channel(1); + let worker = DaWorker::new(test_pool(), "http://localhost:7331", 4, 50, tx).unwrap(); + let updates = vec![ + DaSseUpdate { + block_number: 10, + header_da_height: 100, + data_da_height: 101, + }, + DaSseUpdate { + block_number: 11, + header_da_height: 110, + data_da_height: 111, + }, + ]; + + worker.notify_da_updates(&updates); + + let received = rx.recv().await.expect("batch should be broadcast"); + assert_eq!(received.len(), 2); + assert_eq!(received[0].block_number, 10); + assert_eq!(received[1].data_da_height, 111); + } + + #[tokio::test] + async fn notify_da_updates_skips_empty_batch() { + let (tx, mut rx) = broadcast::channel(1); + let worker = DaWorker::new(test_pool(), "http://localhost:7331", 4, 50, tx).unwrap(); + + worker.notify_da_updates(&[]); + + let result = tokio::time::timeout(Duration::from_millis(50), rx.recv()).await; + assert!(result.is_err(), "empty batch should not be broadcast"); + } + + #[test] + fn scheduler_queries_prioritize_newest_blocks() { + assert!(SELECT_MISSING_BLOCKS_SQL.contains("ORDER BY b.number DESC")); + assert!(SELECT_PENDING_BLOCKS_SQL.contains("ORDER BY block_number DESC")); + assert!(SELECT_MISSING_BLOCKS_SQL.contains("LIMIT $1")); + assert!(SELECT_PENDING_BLOCKS_SQL.contains("LIMIT $1")); + } + + #[test] + fn pending_update_sql_suppresses_noop_writes() { + assert!(UPDATE_PENDING_DA_STATUS_SQL.contains("IS DISTINCT FROM")); + } +} diff --git a/frontend/src/pages/BlocksPage.tsx b/frontend/src/pages/BlocksPage.tsx index b3ef14d..3e82e8a 100644 --- a/frontend/src/pages/BlocksPage.tsx +++ b/frontend/src/pages/BlocksPage.tsx @@ -33,6 +33,7 @@ export default function BlocksPage() { const daHighlightTimeoutsRef = useRef>(new Map()); const baseDaIncludedRef = useRef>(new Map()); const visibleDaBlocksRef = useRef>(new Set()); + const bufferedDaBlocksRef = useRef>(new Set()); const [, setTick] = useState(0); const [sseBlocks, setSseBlocks] = useState([]); const lastSseBlockRef = useRef(null); @@ -55,6 +56,7 @@ export default function BlocksPage() { if (lastSseBlockRef.current != null && block.number <= lastSseBlockRef.current) return; lastSseBlockRef.current = block.number; pendingSseBlocksRef.current.push(block); + bufferedDaBlocksRef.current.add(block.number); if (ssePrependRafRef.current !== null) return; // RAF already scheduled; block is buffered ssePrependRafRef.current = window.requestAnimationFrame(() => { const pending = pendingSseBlocksRef.current; @@ -74,6 +76,19 @@ export default function BlocksPage() { }); }, [latestBlockEvent, page, autoRefresh]); + useEffect(() => { + if (page !== 1 || !autoRefresh) { + bufferedDaBlocksRef.current = new Set(); + return; + } + + const next = new Set(sseBlocks.map((block) => block.number)); + for (const block of pendingSseBlocksRef.current) { + next.add(block.number); + } + bufferedDaBlocksRef.current = next; + }, [autoRefresh, page, sseBlocks]); + // Drop SSE blocks that are now present in fetchedBlocks to avoid duplicates, // but keep any that haven't been fetched yet. useEffect(() => { @@ -120,11 +135,12 @@ export default function BlocksPage() { } baseDaIncludedRef.current = next; visibleDaBlocksRef.current = visible; + const buffered = bufferedDaBlocksRef.current; let changed = false; const nextOverrides = new Map(); for (const [blockNumber, status] of daOverridesRef.current) { - if (!visible.has(blockNumber)) { + if (!visible.has(blockNumber) && !buffered.has(blockNumber)) { changed = true; continue; } @@ -149,11 +165,12 @@ export default function BlocksPage() { if (!features.da_tracking) return; return subscribeDa((updates) => { const visible = visibleDaBlocksRef.current; - if (visible.size === 0) return; + const buffered = bufferedDaBlocksRef.current; + if (visible.size === 0 && buffered.size === 0) return; const next = new Map(); for (const [blockNumber, status] of daOverridesRef.current) { - if (visible.has(blockNumber)) { + if (visible.has(blockNumber) || buffered.has(blockNumber)) { next.set(blockNumber, status); } } @@ -162,7 +179,7 @@ export default function BlocksPage() { let changed = next.size !== daOverridesRef.current.size; for (const update of updates) { - if (!visible.has(update.block_number)) continue; + if (!visible.has(update.block_number) && !buffered.has(update.block_number)) continue; const prevStatus = next.get(update.block_number); const wasIncluded = prevStatus From ae1f38c24c94d147cc7a8aca29e29bd7bf8b24f1 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Wed, 18 Mar 2026 17:41:50 +0100 Subject: [PATCH 19/20] Clarify explicit DA tracking enablement --- .env.example | 12 +++-- CLAUDE.md | 2 +- .../atlas-server/src/api/handlers/sse.rs | 45 +++++++++---------- backend/crates/atlas-server/src/config.rs | 17 ++++--- backend/crates/atlas-server/src/main.rs | 4 +- 5 files changed, 39 insertions(+), 41 deletions(-) diff --git a/.env.example b/.env.example index 991e763..161f2aa 100644 --- a/.env.example +++ b/.env.example @@ -31,15 +31,13 @@ RPC_BATCH_SIZE=20 # SSE_REPLAY_BUFFER_BLOCKS=4096 # replay tail used only for active connected clients # Optional: enable DA (Data Availability) inclusion tracking from ev-node. -# A non-empty EVNODE_URL enables DA tracking automatically. -# ENABLE_DA_TRACKING=true is an optional explicit toggle/validation flag. -# ENABLE_DA_TRACKING=false +# Set this to true only when you also provide EVNODE_URL below. +ENABLE_DA_TRACKING=false -# ev-node URL reachable from the atlas-server process/container when DA tracking is enabled. +# Required when ENABLE_DA_TRACKING=true. +# Must be reachable from the atlas-server process/container. # EVNODE_URL=http://:7331 -# Maximum ev-node requests per second used by the DA worker (default: 50) +# Optional when ENABLE_DA_TRACKING=true. # DA_RPC_REQUESTS_PER_SECOND=50 - -# Number of concurrent DA requests/workers (default: 50) # DA_WORKER_CONCURRENCY=50 diff --git a/CLAUDE.md b/CLAUDE.md index 084a653..cebbaf7 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -89,7 +89,7 @@ pub struct AppState { ``` ### DA tracking (optional) -When DA tracking is enabled, a background DA worker queries ev-node for Celestia inclusion heights per block. Updates are pushed to SSE clients via an in-process `broadcast::Sender>`. The SSE handler streams `da_batch` events for incremental updates and emits `da_resync` when a client falls behind and should refetch visible DA state. +When `ENABLE_DA_TRACKING=true`, a background DA worker queries ev-node for Celestia inclusion heights per block. `EVNODE_URL` is required only in that mode. Updates are pushed to SSE clients via an in-process `broadcast::Sender>`. The SSE handler streams `da_batch` events for incremental updates and emits `da_resync` when a client falls behind and should refetch visible DA state. ### Frontend API client - Base URL: `/api` (proxied by nginx to `atlas-server:3000`) diff --git a/backend/crates/atlas-server/src/api/handlers/sse.rs b/backend/crates/atlas-server/src/api/handlers/sse.rs index 17e5b9b..16c60ae 100644 --- a/backend/crates/atlas-server/src/api/handlers/sse.rs +++ b/backend/crates/atlas-server/src/api/handlers/sse.rs @@ -1,12 +1,10 @@ use axum::{ extract::State, response::sse::{Event, Sse}, - response::IntoResponse, }; use futures::stream::Stream; use serde::Serialize; use std::convert::Infallible; -use std::pin::Pin; use std::sync::Arc; use std::time::Duration; use tokio::sync::broadcast; @@ -19,8 +17,6 @@ use atlas_common::Block; use sqlx::PgPool; use tracing::warn; -type SseStream = Pin> + Send>>; - #[derive(Serialize, Debug)] struct NewBlockEvent { block: Block, @@ -43,8 +39,8 @@ struct DaResyncEvent { required: bool, } -/// Build the SSE block stream. Separated from the handler for testability. -fn make_block_stream( +/// Build the SSE stream. Separated from the handler for testability. +fn make_event_stream( pool: PgPool, head_tracker: Arc, mut block_rx: broadcast::Receiver<()>, @@ -139,25 +135,19 @@ fn make_block_stream( } /// GET /api/events — Server-Sent Events stream for live committed block updates. -/// New connections receive the current latest block and then stream forward -/// from in-memory committed head state, plus DA update batches and resync -/// signals for DA consumers. -pub async fn block_events(State(state): State>) -> impl IntoResponse { - let stream = make_block_stream( +/// New connections receive the current latest block and then stream forward from +/// in-memory committed head state, plus DA status update batches. If the DA +/// stream lags, the handler emits `da_resync` so the frontend can refetch the +/// visible DA state instead of silently going stale. +pub async fn block_events( + State(state): State>, +) -> Sse>> { + let stream = make_event_stream( state.pool.clone(), state.head_tracker.clone(), state.block_events_tx.subscribe(), state.da_events_tx.subscribe(), ); - sse_response(stream) -} - -fn sse_response(stream: S) -> impl IntoResponse -where - S: Stream> + Send + 'static, -{ - let stream: SseStream = Box::pin(stream); - Sse::new(stream).keep_alive( axum::response::sse::KeepAlive::new() .interval(Duration::from_secs(15)) @@ -291,9 +281,10 @@ mod tests { let (tx, _) = broadcast::channel::<()>(16); let (da_tx, _) = broadcast::channel::>(16); - let stream = make_block_stream(dummy_pool(), tracker, tx.subscribe(), da_tx.subscribe()); + let stream = make_event_stream(dummy_pool(), tracker, tx.subscribe(), da_tx.subscribe()); tokio::pin!(stream); + // Drop sender so loop terminates after the initial seed. drop(tx); drop(da_tx); @@ -317,7 +308,7 @@ mod tests { let (tx, _) = broadcast::channel::<()>(16); let (da_tx, _) = broadcast::channel::>(16); - let stream = make_block_stream( + let stream = make_event_stream( dummy_pool(), tracker.clone(), tx.subscribe(), @@ -325,10 +316,12 @@ mod tests { ); tokio::pin!(stream); + // Consume initial seed. let _ = tokio::time::timeout(Duration::from_secs(1), stream.next()) .await .unwrap(); + // Publish a new block and broadcast. tracker .publish_committed_batch(vec![sample_block(43)]) .await; @@ -347,6 +340,7 @@ mod tests { #[tokio::test] async fn stream_terminates_when_client_behind_tail() { + // Buffer capacity 3: only keeps 3 most recent blocks. let tracker = Arc::new(HeadTracker::empty(3)); tracker .publish_committed_batch(vec![sample_block(10), sample_block(11), sample_block(12)]) @@ -354,7 +348,7 @@ mod tests { let (tx, _) = broadcast::channel::<()>(16); let (da_tx, _) = broadcast::channel::>(16); - let stream = make_block_stream( + let stream = make_event_stream( dummy_pool(), tracker.clone(), tx.subscribe(), @@ -362,10 +356,12 @@ mod tests { ); tokio::pin!(stream); + // Consume initial seed (latest = block 12). let _ = tokio::time::timeout(Duration::from_secs(1), stream.next()) .await .unwrap(); + // Advance buffer far ahead: client cursor=12, buffer will be [23,24,25]. tracker .publish_committed_batch(vec![ sample_block(20), @@ -378,6 +374,7 @@ mod tests { .await; tx.send(()).unwrap(); + // Stream should detect behind-tail and terminate. let result = tokio::time::timeout(Duration::from_secs(2), async { while (stream.next().await).is_some() {} }) @@ -400,7 +397,7 @@ mod tests { let (tx, _) = broadcast::channel::<()>(16); let (da_tx, _) = broadcast::channel::>(1); - let stream = make_block_stream(dummy_pool(), tracker, tx.subscribe(), da_tx.subscribe()); + let stream = make_event_stream(dummy_pool(), tracker, tx.subscribe(), da_tx.subscribe()); tokio::pin!(stream); let _ = tokio::time::timeout(Duration::from_secs(1), stream.next()) diff --git a/backend/crates/atlas-server/src/config.rs b/backend/crates/atlas-server/src/config.rs index f3d3cff..f15a934 100644 --- a/backend/crates/atlas-server/src/config.rs +++ b/backend/crates/atlas-server/src/config.rs @@ -53,7 +53,7 @@ impl Config { bail!("SSE_REPLAY_BUFFER_BLOCKS must be between 1 and 100000"); } - let explicit_da_tracking_enabled: bool = env::var("ENABLE_DA_TRACKING") + let da_tracking_enabled: bool = env::var("ENABLE_DA_TRACKING") .unwrap_or_else(|_| "false".to_string()) .parse() .context("Invalid ENABLE_DA_TRACKING")?; @@ -63,8 +63,6 @@ impl Config { .map(|url| url.trim().to_string()) .filter(|url| !url.is_empty()); - let da_tracking_enabled = explicit_da_tracking_enabled || raw_evnode_url.is_some(); - let evnode_url = if da_tracking_enabled { Some(raw_evnode_url.ok_or_else(|| { anyhow::anyhow!("EVNODE_URL must be set when DA tracking is enabled") @@ -224,7 +222,7 @@ mod tests { set_required_env(); clear_da_env(); - env::set_var("EVNODE_URL", ""); + env::set_var("EVNODE_URL", "http://ev-node:7331"); env::set_var("DA_WORKER_CONCURRENCY", "not-a-number"); env::set_var("DA_RPC_REQUESTS_PER_SECOND", "not-a-number"); @@ -262,7 +260,7 @@ mod tests { } #[test] - fn evnode_url_alone_enables_da_tracking() { + fn evnode_url_alone_does_not_enable_da_tracking() { let _lock = ENV_LOCK.lock().unwrap(); set_required_env(); clear_da_env(); @@ -270,8 +268,13 @@ mod tests { env::set_var("EVNODE_URL", "http://ev-node:7331"); let config = Config::from_env().unwrap(); - assert!(config.da_tracking_enabled); - assert_eq!(config.evnode_url.as_deref(), Some("http://ev-node:7331")); + assert!(!config.da_tracking_enabled); + assert!(config.evnode_url.is_none()); + assert_eq!(config.da_worker_concurrency, DEFAULT_DA_WORKER_CONCURRENCY); + assert_eq!( + config.da_rpc_requests_per_second, + DEFAULT_DA_RPC_REQUESTS_PER_SECOND + ); clear_da_env(); } diff --git a/backend/crates/atlas-server/src/main.rs b/backend/crates/atlas-server/src/main.rs index a55351e..b3e69ab 100644 --- a/backend/crates/atlas-server/src/main.rs +++ b/backend/crates/atlas-server/src/main.rs @@ -106,12 +106,12 @@ async fn main() -> Result<()> { } }); - // Spawn DA worker if EVNODE_URL is configured + // Spawn DA worker when DA tracking is explicitly enabled. if config.da_tracking_enabled { let evnode_url = config .evnode_url .as_deref() - .expect("ENABLE_DA_TRACKING requires EVNODE_URL"); + .expect("DA tracking requires EVNODE_URL"); tracing::info!( "DA tracking enabled (workers: {}, rate_limit: {} req/s)", config.da_worker_concurrency, From fadeab2026206e716c5fd3a57c8c82ef02fbe01e Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Thu, 19 Mar 2026 13:34:06 +0100 Subject: [PATCH 20/20] Prevent stale DA override sync frames --- frontend/src/pages/BlocksPage.tsx | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/frontend/src/pages/BlocksPage.tsx b/frontend/src/pages/BlocksPage.tsx index 78e0c2d..567b257 100644 --- a/frontend/src/pages/BlocksPage.tsx +++ b/frontend/src/pages/BlocksPage.tsx @@ -41,6 +41,13 @@ export default function BlocksPage() { const pendingSseBlocksRef = useRef([]); const sseFilterRafRef = useRef(null); + const cancelDaOverridesSync = () => { + if (daOverridesSyncRafRef.current !== null) { + cancelAnimationFrame(daOverridesSyncRafRef.current); + daOverridesSyncRafRef.current = null; + } + }; + // Cache fetched block numbers to avoid recreating Sets on every effect/memo const fetchedNumberSet = useMemo( () => new Set(fetchedBlocks.map((b) => b.number)), @@ -118,9 +125,7 @@ export default function BlocksPage() { if (daOverridesRef.current.size > 0) { const empty = new Map(); daOverridesRef.current = empty; - if (daOverridesSyncRafRef.current !== null) { - cancelAnimationFrame(daOverridesSyncRafRef.current); - } + cancelDaOverridesSync(); daOverridesSyncRafRef.current = window.requestAnimationFrame(() => { setDaOverrides(empty); daOverridesSyncRafRef.current = null; @@ -151,9 +156,7 @@ export default function BlocksPage() { if (changed || nextOverrides.size !== daOverridesRef.current.size) { daOverridesRef.current = nextOverrides; - if (daOverridesSyncRafRef.current !== null) { - cancelAnimationFrame(daOverridesSyncRafRef.current); - } + cancelDaOverridesSync(); daOverridesSyncRafRef.current = window.requestAnimationFrame(() => { setDaOverrides(nextOverrides); daOverridesSyncRafRef.current = null; @@ -211,6 +214,7 @@ export default function BlocksPage() { if (!changed) return; + cancelDaOverridesSync(); daOverridesRef.current = next; setDaOverrides(next); @@ -235,6 +239,7 @@ export default function BlocksPage() { useEffect(() => { if (!features.da_tracking) return; return subscribeDaResync(() => { + cancelDaOverridesSync(); const empty = new Map(); daOverridesRef.current = empty; setDaOverrides(empty);