Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ release/
.DS_Store
build/
cache/
evm_stress
*.iml

# Local .terraform directories
Expand Down
17 changes: 17 additions & 0 deletions app/ante/evm_delivertx.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"github.com/sei-protocol/sei-chain/sei-cosmos/client"
"github.com/sei-protocol/sei-chain/sei-cosmos/crypto/keys/secp256k1"
sdk "github.com/sei-protocol/sei-chain/sei-cosmos/types"
sdkerrors "github.com/sei-protocol/sei-chain/sei-cosmos/types/errors"
upgradekeeper "github.com/sei-protocol/sei-chain/sei-cosmos/x/upgrade/keeper"
"github.com/sei-protocol/sei-chain/x/evm/derived"
evmkeeper "github.com/sei-protocol/sei-chain/x/evm/keeper"
Expand Down Expand Up @@ -46,6 +47,22 @@ func EvmDeliverTxAnte(
}

func EvmDeliverHandleSignatures(ctx sdk.Context, ek *evmkeeper.Keeper, txData ethtx.TxData, chainID *big.Int, msg *evmtypes.MsgEVMTransaction) (common.Address, derived.SignerVersion, error) {
if msg.Derived != nil {
if msg.Derived.PubKey == nil {
return common.Address{}, 0, sdkerrors.ErrInvalidPubKey
}
evmAddr := msg.Derived.SenderEVMAddr
seiAddr := msg.Derived.SenderSeiAddr
version := msg.Derived.Version
if err := AssociateAddress(ctx, ek, evmAddr, seiAddr, msg.Derived.PubKey); err != nil {
return evmAddr, version, err
}
if ek.EthReplayConfig.Enabled {
ek.PrepareReplayedAddr(ctx, evmAddr)
}
return evmAddr, version, nil
}

evmAddr, seiAddr, seiPubkey, version, err := CheckAndDecodeSignature(ctx, txData, chainID, ek.EthBlockTestConfig.Enabled)
if err != nil {
return evmAddr, version, err
Expand Down
36 changes: 35 additions & 1 deletion app/app.go
Original file line number Diff line number Diff line change
Expand Up @@ -1506,6 +1506,35 @@ func (app *App) ProcessTxsSynchronousGiga(ctx sdk.Context, txs [][]byte, typedTx
return txResults
}

func (app *App) shouldProcessSingleRecipientEVMTransfersSynchronously(typedTxs []sdk.Tx) bool {
const minSingleRecipientEVMTransfers = 64

if len(typedTxs) < minSingleRecipientEVMTransfers {
return false
}

var recipient common.Address
for i, tx := range typedTxs {
msg := app.GetEVMMsg(tx)
if msg == nil {
return false
}
etx, _ := msg.AsTransaction()
if etx == nil || etx.To() == nil || len(etx.Data()) != 0 || etx.Value().Sign() <= 0 {
return false
}
if i == 0 {
recipient = *etx.To()
continue
}
if *etx.To() != recipient {
return false
}
}

return true
}

// cacheContext returns a new context based off of the provided context with
// a branched multi-store.
func (app *App) CacheContext(ctx sdk.Context) (sdk.Context, sdk.CacheMultiStore) {
Expand All @@ -1516,14 +1545,19 @@ func (app *App) CacheContext(ctx sdk.Context) (sdk.Context, sdk.CacheMultiStore)

// ExecuteTxsConcurrently calls the appropriate function for processing transacitons
func (app *App) ExecuteTxsConcurrently(ctx sdk.Context, txs [][]byte, typedTxs []sdk.Tx) ([]*abci.ExecTxResult, sdk.Context) {
processSynchronously := app.shouldProcessSingleRecipientEVMTransfersSynchronously(typedTxs)

// Giga only supports synchronous execution for now
if app.GigaExecutorEnabled && app.GigaOCCEnabled {
if app.GigaExecutorEnabled && app.GigaOCCEnabled && !processSynchronously {
return app.ProcessTXsWithOCCGiga(ctx, txs, typedTxs)
} else if app.GigaExecutorEnabled {
return app.ProcessTxsSynchronousGiga(ctx, txs, typedTxs), ctx
} else if !ctx.IsOCCEnabled() {
return app.ProcessTxsSynchronousV2(ctx, txs, typedTxs), ctx
}
if processSynchronously {
return app.ProcessTxsSynchronousV2(ctx, txs, typedTxs), ctx
}

return app.ProcessTXsWithOCCV2(ctx, txs, typedTxs)
}
Expand Down
43 changes: 36 additions & 7 deletions giga/deps/tasks/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,10 @@ package tasks
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"sort"
"strings"
"sync"
"time"

Expand Down Expand Up @@ -113,8 +115,10 @@ type scheduler struct {
executeCh chan func()
validateCh chan func()
metrics *schedulerMetrics
synchronous bool // true if maxIncarnation exceeds threshold
maxIncarnation int // current highest incarnation
synchronous bool // true if maxIncarnation exceeds threshold
maxIncarnation int // current highest incarnation
conflictKeyCounts map[string]int // per-key conflict counts accumulated over the block
conflictKeyMu sync.Mutex
}

// NewScheduler creates a new scheduler
Expand Down Expand Up @@ -166,23 +170,27 @@ func (s *scheduler) DoExecute(work func()) {
s.executeCh <- work
}

func (s *scheduler) findConflicts(task *deliverTxTask) (bool, []int) {
func (s *scheduler) findConflicts(task *deliverTxTask) (bool, []int, []string) {
var conflicts []int
conflictKeys := make([]string, 0, len(s.multiVersionStores))
uniq := make(map[int]struct{})
valid := true
for _, mv := range s.multiVersionStores {
ok, mvConflicts := mv.ValidateTransactionState(task.AbsoluteIndex)
for storeKey, mv := range s.multiVersionStores {
ok, mvConflicts, mvKeys := mv.ValidateTransactionStateWithKeys(task.AbsoluteIndex)
for _, c := range mvConflicts {
if _, ok := uniq[c]; !ok {
conflicts = append(conflicts, c)
uniq[c] = struct{}{}
}
}
for _, k := range mvKeys {
conflictKeys = append(conflictKeys, storeKey.Name()+"/"+k)
}
// any non-ok value makes valid false
valid = valid && ok
}
sort.Ints(conflicts)
return valid, conflicts
return valid, conflicts, conflictKeys
}

func toTasks(reqs []*sdk.DeliverTxEntry) ([]*deliverTxTask, map[int]*deliverTxTask) {
Expand Down Expand Up @@ -284,6 +292,7 @@ func (s *scheduler) ProcessAll(ctx sdk.Context, reqs []*sdk.DeliverTxEntry) ([]t
tasks, tasksMap := toTasks(reqs)
s.allTasks = tasks
s.allTasksMap = tasksMap
s.conflictKeyCounts = make(map[string]int)
s.executeCh = make(chan func(), len(tasks))
s.validateCh = make(chan func(), len(tasks))
defer s.emitMetrics()
Expand Down Expand Up @@ -338,6 +347,21 @@ func (s *scheduler) ProcessAll(ctx sdk.Context, reqs []*sdk.DeliverTxEntry) ([]t
}
s.metrics.maxIncarnation = s.maxIncarnation

if s.metrics.retries > 0 && len(s.conflictKeyCounts) > 0 {
encoded := make(map[string]int, len(s.conflictKeyCounts))
for k, v := range s.conflictKeyCounts {
storeName, rawKey, _ := strings.Cut(k, "/")
var encodedKey string
if rawKey == "globalAccountNumber" {
encodedKey = storeName + "/" + rawKey
} else {
encodedKey = storeName + "/" + hex.EncodeToString([]byte(rawKey))
}
encoded[encodedKey] = v
}
logger.Info("occ scheduler key conflicts", "height", ctx.BlockHeight(), "counts", encoded)
}

logger.Info("occ scheduler", "height", ctx.BlockHeight(), "txs", len(tasks), "latency_ms", time.Since(startTime).Milliseconds(), "retries", s.metrics.retries, "maxIncarnation", s.maxIncarnation, "iterations", iterations, "sync", s.synchronous, "workers", s.workers)

return s.collectResponses(tasks), nil
Expand All @@ -354,9 +378,14 @@ func (s *scheduler) shouldRerun(task *deliverTxTask) bool {
// With the current scheduler, we won't actually get to this step if a previous task has already been determined to be invalid,
// since we choose to fail fast and mark the subsequent tasks as invalid as well.
// TODO: in a future async scheduler that no longer exhaustively validates in order, we may need to carefully handle the `valid=true` with conflicts case
if valid, conflicts := s.findConflicts(task); !valid {
if valid, conflicts, conflictKeys := s.findConflicts(task); !valid {
s.invalidateTask(task)
task.AppendDependencies(conflicts)
s.conflictKeyMu.Lock()
for _, k := range conflictKeys {
s.conflictKeyCounts[k]++
}
s.conflictKeyMu.Unlock()

// if the conflicts are now validated, then rerun this task
if dependenciesValidated(s.allTasksMap, task.Dependencies) {
Expand Down
125 changes: 125 additions & 0 deletions scripts/evm_stress.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
#!/usr/bin/env bash
# Spin up a local seid node, flood it with EVM transfers from N accounts to one
# recipient, and print only branch-specific logs + block time.
#
# Usage: ./scripts/evm_stress.sh
# Run from the repo root.
set -euo pipefail

REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
SEID="$HOME/go/bin/seid"
LOG_FILE="/tmp/seid_stress.log"

# Genesis balance per sender account. 10^12 usei ≈ unlimited for any test run.
SENDER_GENESIS_FUNDS="1000000000000"

cd "$REPO_ROOT"

cleanup() {
echo ""
echo "==> shutting down..."
[ -n "${SEID_PID:-}" ] && kill "$SEID_PID" 2>/dev/null || true
# Kill the entire process group so tail and grep children are also terminated.
[ -n "${LOG_PID:-}" ] && kill -- -"$LOG_PID" 2>/dev/null || true
}
trap cleanup EXIT INT TERM

# Kill any tail processes from previous runs that are still watching this log
# file. tail -F re-opens the file by name when it is truncated, so a stale
# tail would re-read the new run's log and emit duplicate lines.
pkill -f "tail.*${LOG_FILE}" 2>/dev/null || true

# ---------------------------------------------------------------------------
# 1. Init chain (no start)
# ---------------------------------------------------------------------------
echo "==> initializing chain..."
NO_RUN=1 ./scripts/initialize_local_chain.sh

# ---------------------------------------------------------------------------
# 2. Bulk-add sender accounts to genesis via direct JSON patch.
# go run -dump-sei-addrs generates all bech32 addresses; Python patches
# genesis.json in one pass (same technique as populate_genesis_accounts.py).
# ---------------------------------------------------------------------------
cat > /tmp/evm_stress_patch_genesis.py << 'PYEOF'
import json, sys

genesis_path = sys.argv[1]
amount = sys.argv[2]
denom = "usei"

addrs = [l.strip() for l in sys.stdin if l.strip()]

with open(genesis_path) as f:
g = json.load(f)

for addr in addrs:
g["app_state"]["auth"]["accounts"].append({
"@type": "/cosmos.auth.v1beta1.BaseAccount",
"address": addr,
"pub_key": None,
"account_number": "0",
"sequence": "0",
})
g["app_state"]["bank"]["balances"].append({
"address": addr,
"coins": [{"denom": denom, "amount": amount}],
})

with open(genesis_path, "w") as f:
json.dump(g, f, separators=(",", ":"))

print(f"Added {len(addrs)} accounts to genesis", file=sys.stderr)
PYEOF

echo "==> patching genesis with sender accounts..."
go run "$REPO_ROOT/scripts/evm_stress/main.go" -dump-sei-addrs \
| python3 /tmp/evm_stress_patch_genesis.py \
"$HOME/.sei/config/genesis.json" "$SENDER_GENESIS_FUNDS"
echo "==> genesis patched"

# ---------------------------------------------------------------------------
# 3. Start seid, capturing all output to log file
# ---------------------------------------------------------------------------
echo "==> starting seid (logs -> $LOG_FILE)..."
mkdir -p /tmp/race
GORACE="log_path=/tmp/race/seid_race" \
"$SEID" start --trace --chain-id sei-chain > "$LOG_FILE" 2>&1 &
SEID_PID=$!
echo "==> seid PID: $SEID_PID"

# ---------------------------------------------------------------------------
# 4. Tail log file, printing only branch-specific messages
# - "occ scheduler key conflicts" from sei-cosmos/tasks/scheduler.go
# - "execution block time" from x/evm/keeper/abci.go
# ---------------------------------------------------------------------------
(
tail -F "$LOG_FILE" 2>/dev/null \
| grep --line-buffered -E \
'"occ scheduler key conflicts"|"execution block time"'
) &
LOG_PID=$!

# ---------------------------------------------------------------------------
# 5. Wait for the EVM RPC to accept connections
# ---------------------------------------------------------------------------
echo "==> waiting for EVM RPC at http://127.0.0.1:8545..."
for i in $(seq 1 60); do
if curl -sf -X POST http://127.0.0.1:8545 \
-H "Content-Type: application/json" \
-d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \
> /dev/null 2>&1; then
echo "==> EVM RPC ready (after ${i}s)"
break
fi
if [ "$i" -eq 60 ]; then
echo "ERROR: EVM RPC not ready after 60s" >&2
exit 1
fi
sleep 1
done

# ---------------------------------------------------------------------------
# 6. Run the Go load tester
# ---------------------------------------------------------------------------
echo "==> starting EVM transfer stress test (target 500 TPS, 50k unique senders)..."
go run "$REPO_ROOT/scripts/evm_stress/main.go"
Loading
Loading