Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions block/internal/executing/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -444,6 +444,34 @@ func (e *Executor) ProduceBlock(ctx context.Context) error {
// Update in-memory state after successful commit
e.setLastState(newState)

// Run height-based pruning of stored block data if enabled. This is a
// best-effort background maintenance step and should not cause block
// production to fail, but it does run in the critical path and may add
// some latency when large ranges are pruned.
if e.config.Node.PruningEnabled && e.config.Node.PruningKeepRecent > 0 && e.config.Node.PruningInterval > 0 {
if newHeight%e.config.Node.PruningInterval == 0 {
// Compute the prune floor: all heights <= targetHeight are candidates
// for pruning of header/data/signature/index entries.
if newHeight > e.config.Node.PruningKeepRecent {
targetHeight := newHeight - e.config.Node.PruningKeepRecent
if err := e.store.PruneBlocks(e.ctx, targetHeight); err != nil {
e.logger.Error().Err(err).Uint64("target_height", targetHeight).Msg("failed to prune old block data")
}

// If the execution client exposes execution-metadata pruning,
// prune ExecMeta using the same target height. This keeps EVM
// execution metadata aligned with ev-node's block store pruning
// while remaining a no-op for execution environments that don't
// implement ExecMetaPruner (e.g. ABCI-based executors).
if pruner, ok := e.exec.(coreexecutor.ExecMetaPruner); ok {
if err := pruner.PruneExecMeta(e.ctx, targetHeight); err != nil {
e.logger.Error().Err(err).Uint64("target_height", targetHeight).Msg("failed to prune execution metadata")
}
}
}
}
}

// broadcast header and data to P2P network
g, broadcastCtx := errgroup.WithContext(ctx)
g.Go(func() error { return e.headerBroadcaster.WriteToStoreAndBroadcast(broadcastCtx, header) })
Expand Down
13 changes: 13 additions & 0 deletions core/execution/execution.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,3 +161,16 @@ type Rollbackable interface {
// Rollback resets the execution layer head to the specified height.
Rollback(ctx context.Context, targetHeight uint64) error
}

// ExecMetaPruner is an optional interface that execution clients can implement
// to support height-based pruning of their execution metadata. This is used by
// EVM-based execution clients to keep ExecMeta consistent with ev-node's
// pruning window while remaining a no-op for execution environments that
// don't persist per-height metadata in ev-node's datastore.
type ExecMetaPruner interface {
// PruneExecMeta should delete execution metadata for all heights up to and
// including the given height. Implementations should be idempotent and track
// their own progress so that repeated calls with the same or decreasing
// heights are cheap no-ops.
PruneExecMeta(ctx context.Context, height uint64) error
}
17 changes: 17 additions & 0 deletions execution/evm/execution.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,12 @@ var _ execution.HeightProvider = (*EngineClient)(nil)
// Ensure EngineClient implements the execution.Rollbackable interface
var _ execution.Rollbackable = (*EngineClient)(nil)

// Ensure EngineClient implements optional pruning interface when used with
// ev-node's height-based pruning. This enables coordinated pruning of EVM
// ExecMeta alongside ev-node's own block data pruning, while remaining a
// no-op for non-EVM execution environments.
var _ execution.ExecMetaPruner = (*EngineClient)(nil)

// validatePayloadStatus checks the payload status and returns appropriate errors.
// It implements the Engine API specification's status handling:
// - VALID: Operation succeeded, return nil
Expand Down Expand Up @@ -265,6 +271,17 @@ func NewEngineExecutionClient(
}, nil
}

// PruneExecMeta implements execution.ExecMetaPruner by delegating to the
// underlying EVMStore. It is safe to call this multiple times with the same
// or increasing heights; the store tracks its own last-pruned height.
func (c *EngineClient) PruneExecMeta(ctx context.Context, height uint64) error {
if c.store == nil {
return nil
}

return c.store.PruneExecMeta(ctx, height)
}

// SetLogger allows callers to attach a structured logger.
func (c *EngineClient) SetLogger(l zerolog.Logger) {
c.logger = l
Expand Down
53 changes: 53 additions & 0 deletions execution/evm/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,11 @@ import (
// Store prefix for execution/evm data - keeps it isolated from other ev-node data
const evmStorePrefix = "evm/"

// lastPrunedExecMetaKey is the datastore key used to track the highest
// execution height for which ExecMeta has been pruned. All ExecMeta entries
// for heights <= this value are considered pruned.
const lastPrunedExecMetaKey = evmStorePrefix + "last-pruned-execmeta-height"

// ExecMeta stages
const (
ExecStageStarted = "started"
Expand Down Expand Up @@ -140,6 +145,54 @@ func (s *EVMStore) SaveExecMeta(ctx context.Context, meta *ExecMeta) error {
return nil
}

// PruneExecMeta removes ExecMeta entries up to and including the given height.
// It is safe to call this multiple times with the same or increasing heights;
// previously pruned ranges will be skipped based on the last-pruned marker.
func (s *EVMStore) PruneExecMeta(ctx context.Context, height uint64) error {
// Load last pruned height, if any.
var lastPruned uint64
data, err := s.db.Get(ctx, ds.NewKey(lastPrunedExecMetaKey))
if err != nil {
if !errors.Is(err, ds.ErrNotFound) {
return fmt.Errorf("failed to get last pruned execmeta height: %w", err)
}
} else if len(data) == 8 {
lastPruned = binary.BigEndian.Uint64(data)
}

// Nothing new to prune.
if height <= lastPruned {
return nil
}

batch, err := s.db.Batch(ctx)
if err != nil {
return fmt.Errorf("failed to create batch for execmeta pruning: %w", err)
}

for h := lastPruned + 1; h <= height; h++ {
key := execMetaKey(h)
if err := batch.Delete(ctx, key); err != nil {
if !errors.Is(err, ds.ErrNotFound) {
return fmt.Errorf("failed to delete exec meta at height %d: %w", h, err)
}
}
}

// Persist updated last pruned height.
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, height)
if err := batch.Put(ctx, ds.NewKey(lastPrunedExecMetaKey), buf); err != nil {
return fmt.Errorf("failed to update last pruned execmeta height: %w", err)
}

if err := batch.Commit(ctx); err != nil {
return fmt.Errorf("failed to commit execmeta pruning batch: %w", err)
}

return nil
}

// Sync ensures all pending writes are flushed to disk.
func (s *EVMStore) Sync(ctx context.Context) error {
return s.db.Sync(ctx, ds.NewKey(evmStorePrefix))
Expand Down
99 changes: 99 additions & 0 deletions execution/evm/store_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
package evm

import (
"context"
"encoding/binary"
"testing"

ds "github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-datastore/sync"
"github.com/stretchr/testify/require"
)

// newTestDatastore creates an in-memory datastore for testing.
func newTestDatastore(t *testing.T) ds.Batching {
t.Helper()
// Wrap the in-memory MapDatastore to satisfy the Batching interface.
return dssync.MutexWrap(ds.NewMapDatastore())
}

func TestPruneExecMeta_PrunesUpToTargetHeight(t *testing.T) {
t.Parallel()

ctx := context.Background()
db := newTestDatastore(t)
store := NewEVMStore(db)

// Seed ExecMeta entries at heights 1..5
for h := uint64(1); h <= 5; h++ {
meta := &ExecMeta{Height: h}
require.NoError(t, store.SaveExecMeta(ctx, meta))
}

// Sanity: all heights should be present
for h := uint64(1); h <= 5; h++ {
meta, err := store.GetExecMeta(ctx, h)
require.NoError(t, err)
require.NotNil(t, meta)
require.Equal(t, h, meta.Height)
}

// Prune up to height 3
require.NoError(t, store.PruneExecMeta(ctx, 3))

// Heights 1..3 should be gone
for h := uint64(1); h <= 3; h++ {
meta, err := store.GetExecMeta(ctx, h)
require.NoError(t, err)
require.Nil(t, meta)
}

// Heights 4..5 should remain
for h := uint64(4); h <= 5; h++ {
meta, err := store.GetExecMeta(ctx, h)
require.NoError(t, err)
require.NotNil(t, meta)
}

// Re-pruning with the same height should be a no-op
require.NoError(t, store.PruneExecMeta(ctx, 3))
}

func TestPruneExecMeta_TracksLastPrunedHeight(t *testing.T) {
t.Parallel()

ctx := context.Background()
db := newTestDatastore(t)
store := NewEVMStore(db)

// Seed ExecMeta entries at heights 1..5
for h := uint64(1); h <= 5; h++ {
meta := &ExecMeta{Height: h}
require.NoError(t, store.SaveExecMeta(ctx, meta))
}

// First prune up to 2
require.NoError(t, store.PruneExecMeta(ctx, 2))

// Then prune up to 4; heights 3..4 should be deleted in this run
require.NoError(t, store.PruneExecMeta(ctx, 4))

// Verify all heights 1..4 are gone, 5 remains
for h := uint64(1); h <= 4; h++ {
meta, err := store.GetExecMeta(ctx, h)
require.NoError(t, err)
require.Nil(t, meta)
}

meta, err := store.GetExecMeta(ctx, 5)
require.NoError(t, err)
require.NotNil(t, meta)
require.Equal(t, uint64(5), meta.Height)

// Ensure last-pruned marker is set to 4
raw, err := db.Get(ctx, ds.NewKey(lastPrunedExecMetaKey))
require.NoError(t, err)
require.Len(t, raw, 8)
last := binary.BigEndian.Uint64(raw)
require.Equal(t, uint64(4), last)
}
25 changes: 25 additions & 0 deletions pkg/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,13 @@ type NodeConfig struct {
// Readiness / health configuration
ReadinessWindowSeconds uint64 `mapstructure:"readiness_window_seconds" yaml:"readiness_window_seconds" comment:"Time window in seconds used to calculate ReadinessMaxBlocksBehind based on block time. Default: 15 seconds."`
ReadinessMaxBlocksBehind uint64 `mapstructure:"readiness_max_blocks_behind" yaml:"readiness_max_blocks_behind" comment:"How many blocks behind best-known head the node can be and still be considered ready. 0 means must be exactly at head."`

// Pruning configuration
// When enabled, the node will periodically prune old block data (headers, data,
// signatures, and hash index) from the local store while keeping recent history.
PruningEnabled bool `mapstructure:"pruning_enabled" yaml:"pruning_enabled" comment:"Enable height-based pruning of stored block data. When disabled, all blocks are kept (archive mode)."`
PruningKeepRecent uint64 `mapstructure:"pruning_keep_recent" yaml:"pruning_keep_recent" comment:"Number of most recent blocks to retain. Older blocks will have their header/data/signature removed from the local store. 0 means keep all blocks."`
PruningInterval uint64 `mapstructure:"pruning_interval" yaml:"pruning_interval" comment:"Run pruning every N blocks. Must be >= 1 when pruning is enabled."`
}

// LogConfig contains all logging configuration parameters
Expand Down Expand Up @@ -306,6 +313,20 @@ func (c *Config) Validate() error {
c.Node.LazyBlockInterval.Duration, c.Node.BlockTime.Duration)
}

// Validate pruning configuration
if c.Node.PruningEnabled {
// When pruning is enabled, pruning_interval must be >= 1
if c.Node.PruningInterval == 0 {
return fmt.Errorf("pruning_interval must be >= 1 when pruning is enabled")
}

// When pruning is enabled, keeping 0 blocks is contradictory; use pruning_enabled=false
// for archive mode instead.
if c.Node.PruningKeepRecent == 0 {
return fmt.Errorf("pruning_keep_recent must be > 0 when pruning is enabled; use pruning_enabled=false to keep all blocks")
}
}

return nil
}

Expand Down Expand Up @@ -366,6 +387,10 @@ func AddFlags(cmd *cobra.Command) {
cmd.Flags().Uint64(FlagReadinessWindowSeconds, def.Node.ReadinessWindowSeconds, "time window in seconds for calculating readiness threshold based on block time (default: 15s)")
cmd.Flags().Uint64(FlagReadinessMaxBlocksBehind, def.Node.ReadinessMaxBlocksBehind, "how many blocks behind best-known head the node can be and still be considered ready (0 = must be at head)")
cmd.Flags().Duration(FlagScrapeInterval, def.Node.ScrapeInterval.Duration, "interval at which the reaper polls the execution layer for new transactions")
// Pruning configuration flags
cmd.Flags().Bool(FlagPrefixEvnode+"node.pruning_enabled", def.Node.PruningEnabled, "enable height-based pruning of stored block data (headers, data, signatures, index)")
cmd.Flags().Uint64(FlagPrefixEvnode+"node.pruning_keep_recent", def.Node.PruningKeepRecent, "number of most recent blocks to retain when pruning is enabled (0 = keep all)")
cmd.Flags().Uint64(FlagPrefixEvnode+"node.pruning_interval", def.Node.PruningInterval, "run pruning every N blocks (must be >= 1 when pruning is enabled)")

// Data Availability configuration flags
cmd.Flags().String(FlagDAAddress, def.DA.Address, "DA address (host:port)")
Expand Down
3 changes: 3 additions & 0 deletions pkg/config/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,9 @@ func DefaultConfig() Config {
ReadinessWindowSeconds: defaultReadinessWindowSeconds,
ReadinessMaxBlocksBehind: calculateReadinessMaxBlocksBehind(defaultBlockTime.Duration, defaultReadinessWindowSeconds),
ScrapeInterval: DurationWrapper{1 * time.Second},
PruningEnabled: false,
PruningKeepRecent: 0,
PruningInterval: 0,
},
DA: DAConfig{
Address: "http://localhost:7980",
Expand Down
6 changes: 6 additions & 0 deletions pkg/store/keys.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,12 @@ const (
// LastSubmittedHeaderHeightKey is the key used for persisting the last submitted header height in store.
LastSubmittedHeaderHeightKey = "last-submitted-header-height"

// LastPrunedBlockHeightKey is the metadata key used for persisting the last
// pruned block height in the store. All block data (header, data,
// signature, and hash index) for heights <= this value are considered
// pruned and may be missing from the store.
LastPrunedBlockHeightKey = "last-pruned-block-height"

headerPrefix = "h"
dataPrefix = "d"
signaturePrefix = "c"
Expand Down
Loading
Loading