diff --git a/.ai/EXAMPLES.md b/.ai/EXAMPLES.md new file mode 100644 index 00000000..11f5a16d --- /dev/null +++ b/.ai/EXAMPLES.md @@ -0,0 +1,669 @@ +# Assertoor Playbook Examples + +Real-world patterns extracted from production playbooks. + +--- + +## Example 1: Block Proposal Check + +Verifies every client pair produces at least one block proposal. + +```yaml +id: block-proposal-check +name: "Every client pair proposed a block" +timeout: 20m +config: + validatorPairNames: [] + +tasks: +- name: check_clients_are_healthy + title: "Check if at least one client is ready" + timeout: 5m + config: + minClientCount: 1 + +- name: run_task_matrix + title: "Check block proposals from all client pairs" + configVars: + matrixValues: "validatorPairNames" + config: + runConcurrent: true + matrixVar: "validatorPairName" + task: + name: check_consensus_block_proposals + title: "Wait for block proposal from ${validatorPairName}" + timeout: 15m + configVars: + validatorNamePattern: "validatorPairName" + config: + blockCount: 1 +``` + +**Key patterns:** +- Health check gate at the start +- Matrix loop over `validatorPairNames` array +- Concurrent execution for parallel client checking +- `${validatorPairName}` placeholder in title + +--- + +## Example 2: EOA Transaction Testing + +Generates transactions in background, verifies inclusion in foreground. + +```yaml +id: eoa-transactions-test +name: "Every client proposes blocks with EOA transactions" +timeout: 30m +config: + walletPrivkey: "" + validatorPairNames: [] + +tasks: +- name: check_clients_are_healthy + title: "Check client health" + timeout: 5m + config: + minClientCount: 1 + +- name: run_task_background + title: "Generate and verify EOA transactions" + config: + onBackgroundComplete: fail + backgroundTask: + name: generate_eoa_transactions + title: "Generate EOA transactions" + config: + childWallets: 100 + limitPending: 100 + limitPerBlock: 10 + limitTotal: 0 + randomTarget: true + legacyTxType: false + configVars: + privateKey: "walletPrivkey" + foregroundTask: + name: run_task_matrix + title: "Check block proposals with transactions" + configVars: + matrixValues: "validatorPairNames" + config: + runConcurrent: true + matrixVar: "validatorPairName" + task: + name: check_consensus_block_proposals + title: "Wait for block with >= 5 txs from ${validatorPairName}" + timeout: 20m + configVars: + validatorNamePattern: "validatorPairName" + config: + minTransactionCount: 5 +``` + +**Key patterns:** +- Background task for continuous transaction generation +- `onBackgroundComplete: fail` ensures background keeps running +- `limitTotal: 0` means generate indefinitely +- Foreground verifies inclusion across all client pairs + +--- + +## Example 3: Blob Transaction Testing + +Tests EIP-4844 blob transaction inclusion. + +```yaml +id: blob-transactions-test +name: "Every client proposes blocks with blob transactions" +timeout: 30m +config: + walletPrivkey: "" + validatorPairNames: [] + +tasks: +- name: check_clients_are_healthy + title: "Check client health" + timeout: 5m + config: + minClientCount: 1 + +- name: run_task_background + title: "Generate and verify blob transactions" + config: + onBackgroundComplete: fail + backgroundTask: + name: generate_blob_transactions + title: "Generate blob transactions" + config: + childWallets: 50 + limitPending: 10 + limitPerBlock: 3 + limitTotal: 0 + blobSidecars: 3 + randomTarget: true + configVars: + privateKey: "walletPrivkey" + foregroundTask: + name: run_task_matrix + configVars: + matrixValues: "validatorPairNames" + config: + runConcurrent: true + matrixVar: "validatorPairName" + task: + name: check_consensus_block_proposals + title: "Block from ${validatorPairName} with blobs" + timeout: 20m + configVars: + validatorNamePattern: "validatorPairName" + config: + minBlobCount: 1 +``` + +--- + +## Example 4: Validator Lifecycle (Deposit -> Activate -> Exit) + +Complete validator lifecycle test with deposit, activation wait, and exit. + +```yaml +id: validator-lifecycle-test +name: "Validator lifecycle: deposit, activate, exit" +timeout: 2h +config: + walletPrivkey: "" + depositContract: "" + validatorCount: 5 + +tasks: +# Step 1: Generate random mnemonic for new validators +- name: get_random_mnemonic + title: "Generate validator mnemonic" + id: gen_mnemonic + +# Step 2: Derive public keys +- name: get_pubkeys_from_mnemonic + title: "Get validator public keys" + id: gen_pubkeys + configVars: + mnemonic: "tasks.gen_mnemonic.outputs.mnemonic" + config: + count: 5 + +# Step 3: Generate deposits +- name: generate_deposits + title: "Submit deposits" + id: deposits + timeout: 30m + configVars: + mnemonic: "tasks.gen_mnemonic.outputs.mnemonic" + walletPrivkey: "walletPrivkey" + depositContract: "depositContract" + config: + limitTotal: 5 + limitPerSlot: 2 + indexCount: 5 + depositAmount: 32000000000 + awaitReceipt: true + failOnReject: true + +# Step 4: Wait for deposits to appear on beacon chain +- name: run_task_matrix + title: "Verify deposits included" + configVars: + matrixValues: "tasks.gen_pubkeys.outputs.pubkeys" + config: + runConcurrent: true + matrixVar: "validatorPubkey" + task: + name: check_consensus_block_proposals + title: "Check deposit for ${validatorPubkey}" + timeout: 10m + configVars: + expectDeposits: "| [.validatorPubkey]" + config: + blockCount: 1 + +# Step 5: Wait for activation +- name: run_task_matrix + title: "Wait for validator activation" + timeout: 60m + configVars: + matrixValues: "tasks.gen_pubkeys.outputs.pubkeys" + config: + runConcurrent: true + matrixVar: "validatorPubkey" + task: + name: check_consensus_validator_status + title: "Wait for ${validatorPubkey} to activate" + timeout: 55m + configVars: + validatorPubKey: "validatorPubkey" + config: + validatorStatus: + - "active_ongoing" + +# Step 6: Get chain specs for exit eligibility calculation +- name: get_consensus_specs + title: "Get chain specs" + id: get_specs + +# Step 7: Wait for exit eligibility epoch +- name: check_consensus_slot_range + title: "Wait for exit eligibility" + configVars: + minEpochNumber: "| (.tasks.get_specs.outputs.specs.SHARD_COMMITTEE_PERIOD | tonumber) + 5" + +# Step 8: Generate voluntary exits +- name: generate_exits + title: "Submit voluntary exits" + timeout: 30m + configVars: + mnemonic: "tasks.gen_mnemonic.outputs.mnemonic" + config: + limitTotal: 5 + limitPerSlot: 2 + indexCount: 5 + awaitInclusion: true + +# Step 9: Verify exit status +- name: run_task_matrix + title: "Verify validators exiting" + configVars: + matrixValues: "tasks.gen_pubkeys.outputs.pubkeys" + config: + runConcurrent: true + matrixVar: "validatorPubkey" + task: + name: check_consensus_validator_status + title: "Verify ${validatorPubkey} is exiting" + timeout: 10m + configVars: + validatorPubKey: "validatorPubkey" + config: + validatorStatus: + - "active_exiting" + - "exited_unslashed" +``` + +--- + +## Example 5: Finality and Health Monitoring + +Continuous monitoring test for network health. + +```yaml +id: network-health-monitor +name: "Network health monitoring" +timeout: 1h +config: {} + +tasks: +- name: run_tasks_concurrent + title: "Monitor network health" + config: + tasks: + - name: check_clients_are_healthy + title: "Client health" + timeout: 55m + config: + minClientCount: 1 + continueOnPass: true + failOnCheckMiss: true + + - name: check_consensus_finality + title: "Finality check" + timeout: 55m + config: + maxUnfinalizedEpochs: 4 + failOnCheckMiss: true + continueOnPass: true + + - name: check_consensus_attestation_stats + title: "Attestation quality" + timeout: 55m + config: + minTargetPercent: 80 + minHeadPercent: 70 + minTotalPercent: 90 + failOnCheckMiss: true + continueOnPass: true + minCheckedEpochs: 3 + + - name: check_consensus_forks + title: "Fork monitoring" + timeout: 55m + config: + maxForkDistance: 2 + continueOnPass: true + + - name: check_consensus_reorgs + title: "Reorg monitoring" + timeout: 55m + config: + maxTotalReorgs: 5 + maxReorgDistance: 3 + continueOnPass: true +``` + +--- + +## Example 6: EIP-7702 Set Code Transactions + +Testing set code (account abstraction) transactions. + +```yaml +id: eip7702-test +name: "EIP-7702 set code transaction test" +timeout: 30m +config: + walletPrivkey: "" + +tasks: +- name: check_clients_are_healthy + title: "Check health" + timeout: 5m + config: + minClientCount: 1 + +# Deploy a contract to use as delegation target +- name: generate_transaction + title: "Deploy delegation contract" + id: deploy + configVars: + privateKey: "walletPrivkey" + config: + contractDeployment: true + callData: "0x6080604052..." # Contract bytecode + gasLimit: 500000 + awaitReceipt: true + failOnReject: true + +# Create child wallet for 7702 testing +- name: generate_child_wallet + title: "Create test wallet" + id: test_wallet + configVars: + privateKey: "walletPrivkey" + config: + prefundAmount: "1000000000000000000" # 1 ETH + randomSeed: true + +# Send set code transaction +- name: generate_transaction + title: "Send EIP-7702 set code tx" + id: set_code_tx + configVars: + privateKey: "walletPrivkey" + config: + setCodeTxType: true + targetAddress: "0x0000000000000000000000000000000000000000" + gasLimit: 200000 + awaitReceipt: true + failOnReject: true + authorizations: + - chainId: 0 + nonce: 0 + codeAddress: "tasks.deploy.outputs.contractAddress" + signerPrivkey: "tasks.test_wallet.outputs.childWallet.privateKey" +``` + +--- + +## Example 7: EL-Triggered Withdrawal Requests (EIP-7002) + +```yaml +id: eip7002-withdrawal-test +name: "EL-triggered withdrawal requests" +timeout: 1h +config: + walletPrivkey: "" + validatorMnemonic: "" + +tasks: +- name: check_clients_are_healthy + title: "Check health" + timeout: 5m + config: + minClientCount: 1 + +# Get validator info +- name: get_pubkeys_from_mnemonic + title: "Get validator pubkeys" + id: pubkeys + configVars: + mnemonic: "validatorMnemonic" + config: + count: 2 + +# Submit withdrawal requests via EL +- name: generate_withdrawal_requests + title: "Submit withdrawal requests" + id: withdrawals + timeout: 30m + configVars: + walletPrivkey: "walletPrivkey" + sourceMnemonic: "validatorMnemonic" + config: + limitTotal: 2 + limitPerSlot: 1 + sourceStartIndex: 0 + sourceIndexCount: 2 + withdrawAmount: 0 # 0 = full exit + awaitReceipt: true + failOnReject: true + +# Verify withdrawal requests appear in beacon blocks +- name: run_task_matrix + title: "Verify withdrawal requests in blocks" + configVars: + matrixValues: "tasks.pubkeys.outputs.pubkeys" + config: + runConcurrent: true + matrixVar: "validatorPubkey" + task: + name: check_consensus_block_proposals + title: "Check withdrawal request for ${validatorPubkey}" + timeout: 10m + configVars: + expectWithdrawalRequests: "| [{sourceAddress: .depositorAddress, validatorPubkey: .validatorPubkey, amount: 0}]" + config: + blockCount: 1 +``` + +--- + +## Example 8: Slashing Test + +```yaml +id: slashing-test +name: "Generate and verify slashings" +timeout: 30m +config: + walletPrivkey: "" + slashingMnemonic: "" + +tasks: +- name: check_clients_are_healthy + title: "Check health" + timeout: 5m + config: + minClientCount: 1 + +- name: get_pubkeys_from_mnemonic + title: "Get slashable pubkeys" + id: pubkeys + configVars: + mnemonic: "slashingMnemonic" + config: + count: 2 + +# Run slashing generation + verification in parallel +- name: run_task_background + title: "Slash and verify" + config: + onBackgroundComplete: fail + backgroundTask: + name: generate_slashings + title: "Generate attester slashings" + configVars: + mnemonic: "slashingMnemonic" + config: + slashingType: "attester" + limitPerSlot: 1 + limitTotal: 2 + indexCount: 2 + awaitInclusion: true + foregroundTask: + name: run_task_matrix + configVars: + matrixValues: "tasks.pubkeys.outputs.pubkeys" + config: + runConcurrent: true + matrixVar: "validatorPubkey" + task: + name: check_consensus_block_proposals + title: "Verify slashing of ${validatorPubkey}" + timeout: 20m + configVars: + expectSlashings: "| [{publicKey: .validatorPubkey, slashingType: \"attester\"}]" + config: + blockCount: 1 +``` + +--- + +## Example 9: Shell Script Integration + +Using `run_shell` for custom logic and variable passing. + +```yaml +id: shell-integration-test +name: "Custom shell logic test" +timeout: 15m +config: + walletPrivkey: "" + +tasks: +# Create wallet and capture details +- name: generate_child_wallet + title: "Create wallet" + id: wallet + configVars: + privateKey: "walletPrivkey" + config: + prefundAmount: "500000000000000000" + randomSeed: true + +# Run shell script with environment variables from task outputs +- name: run_shell + title: "Process wallet data" + config: + envVars: + WALLET_ADDRESS: "tasks.wallet.outputs.childWallet.address" + WALLET_PRIVKEY: "tasks.wallet.outputs.childWallet.privateKey" + command: | + echo "Wallet: $WALLET_ADDRESS" + + # Compute something + RESULT=$(echo "$WALLET_ADDRESS" | cut -c1-10) + + # Pass computed value back to assertoor as task output + echo "::set-output shortAddress $RESULT" + + # Set a JSON variable + echo "::set-json walletInfo {\"address\": \"$WALLET_ADDRESS\", \"short\": \"$RESULT\"}" + + # Set success + exit 0 +``` + +--- + +## Example 10: Conditional Test with Setup Phase + +```yaml +id: conditional-setup-test +name: "Test with optional setup phase" +timeout: 1h +config: + runSetup: true + walletPrivkey: "" + validatorMnemonic: "" + existingPubkeys: [] + +tasks: +# Conditional setup: only run if runSetup is true +- name: run_tasks + title: "Setup phase" + if: "runSetup" + config: + tasks: + - name: get_random_mnemonic + title: "Generate mnemonic" + id: new_mnemonic + + - name: get_pubkeys_from_mnemonic + title: "Derive pubkeys" + id: new_pubkeys + configVars: + mnemonic: "tasks.new_mnemonic.outputs.mnemonic" + config: + count: 5 + + - name: generate_deposits + title: "Submit deposits" + configVars: + mnemonic: "tasks.new_mnemonic.outputs.mnemonic" + walletPrivkey: "walletPrivkey" + config: + limitTotal: 5 + limitPerSlot: 2 + indexCount: 5 + depositAmount: 32000000000 + awaitInclusion: true + +# Use either generated or pre-existing pubkeys +- name: run_shell + title: "Resolve pubkeys" + config: + envVars: + RUN_SETUP: "runSetup" + NEW_PUBKEYS: "tasks.new_pubkeys.outputs.pubkeys" + EXISTING: "existingPubkeys" + command: | + if [ "$RUN_SETUP" = "true" ]; then + echo "::set-json activePubkeys $NEW_PUBKEYS" + else + echo "::set-json activePubkeys $EXISTING" + fi + +# Verify validators are active +- name: run_task_matrix + title: "Check validators active" + configVars: + matrixValues: "activePubkeys" + config: + runConcurrent: true + matrixVar: "pubkey" + task: + name: check_consensus_validator_status + title: "Check ${pubkey}" + configVars: + validatorPubKey: "pubkey" + config: + validatorStatus: ["active_ongoing"] +``` + +--- + +## Common Configuration Variables + +These variables are commonly provided by the assertoor coordinator config and passed to tests: + +| Variable | Description | Example | +|----------|-------------|---------| +| `walletPrivkey` | Funded wallet private key for transactions | `"0xdeadbeef..."` | +| `depositContract` | Deposit contract address | `"0x00000000219ab540356cBB839Cbe05303d7705Fa"` | +| `validatorPairNames` | Array of client pair names for matrix tests | `["lighthouse-geth", "prysm-geth"]` | +| `validatorMnemonic` | Mnemonic for validator key derivation | `"abandon abandon..."` | diff --git a/.ai/OVERVIEW.md b/.ai/OVERVIEW.md new file mode 100644 index 00000000..92bada55 --- /dev/null +++ b/.ai/OVERVIEW.md @@ -0,0 +1,206 @@ +# Assertoor System Overview + +## What is Assertoor? + +Assertoor is a comprehensive testing framework for live Ethereum testnets. It orchestrates test scenarios (called "playbooks") that verify the behavior of Ethereum consensus and execution layer clients. Tests are defined in YAML and composed of reusable tasks that check network state, generate transactions, manage validators, and control execution flow. + +## Core Concepts + +### Coordinator +The central orchestrator (`pkg/assertoor/`) that: +- Loads configuration and connects to Ethereum endpoints +- Manages the test registry and schedules test runs +- Maintains the client pool for consensus and execution layers +- Runs the web server (API + frontend) +- Manages the database for test history + +### Tests (Playbooks) +A test is a YAML file that defines: +- **Identity**: id, name, timeout +- **Configuration**: default variables and variable bindings +- **Tasks**: ordered list of tasks to execute +- **Cleanup tasks**: tasks that always run after the test completes + +Tests can be loaded from: +- Inline YAML in the assertoor config +- External YAML files (local or remote URLs) +- The REST API + +### Tasks +Tasks are the atomic units of work. Each task: +- Has a registered type name (e.g., `check_consensus_finality`) +- Accepts configuration parameters +- Has access to a variable scope for reading/writing state +- Can produce output variables accessible by subsequent tasks +- Reports a result: Success, Failure, or None (skipped) + +### Task Categories + +| Category | Purpose | Examples | +|----------|---------|---------| +| **Check** | Verify network state against expected conditions | `check_consensus_finality`, `check_clients_are_healthy` | +| **Generate** | Perform network operations (transactions, validator ops) | `generate_transaction`, `generate_deposits` | +| **Get** | Retrieve data from the network | `get_consensus_specs`, `get_wallet_details` | +| **Flow** | Control task execution order and logic | `run_tasks`, `run_tasks_concurrent`, `run_task_matrix` | +| **Utility** | Shell commands, sleep, mnemonic generation | `run_shell`, `sleep`, `get_random_mnemonic` | + +### Variable System + +The variable system provides hierarchical scoping with jq expression evaluation: + +``` +Global Scope (coordinator config) + -> Test Scope (test config defaults + configVars from parent) + -> Task Scope (inherits test scope) + -> Child Task Scope (inherits parent task scope) +``` + +**Key features:** +- Variables flow down through scoping hierarchy +- Child scopes can read parent variables but modifications stay local +- jq expressions can query and transform variables +- Task outputs are isolated in `tasks..outputs` namespace +- `configVars` map config fields to jq queries against the variable scope + +### Client Pool + +Manages connections to Ethereum nodes: +- **Consensus clients**: Beacon API (REST) for chain state, validators, blocks +- **Execution clients**: JSON-RPC for transactions, balances, calls +- Configurable via `endpoints` array with name, URLs, and optional headers +- Client selection via regex patterns (`clientPattern`) +- Built-in health monitoring and sync status tracking + +### Scheduler + +The task scheduler (`pkg/scheduler/`) manages per-test execution: +- Sequential execution of root tasks +- Task state machine: pending -> running -> success/failure/skipped +- Timeout enforcement with context cancellation +- Conditional execution via `if` expressions +- Nested task support (flow tasks create child tasks) +- Result notification channels for inter-task coordination + +## Architecture Diagram + +``` + +------------------+ + | YAML Config | + +--------+---------+ + | + +--------v---------+ + | Coordinator | + +--------+---------+ + | + +------------------+------------------+ + | | | + +-------v------+ +-------v------+ +-------v------+ + | ClientPool | | TestRegistry | | WebServer | + +-------+------+ +-------+------+ +-------+------+ + | | | + +-------v------+ +-------v------+ +-------v------+ + | CL + EL RPCs | | TestRunner | | REST API | + +--------------+ +-------+------+ | + React UI | + | +--------------+ + +-------v------+ + | Scheduler | + +-------+------+ + | + +-------------+-------------+ + | | | + +------v---+ +-----v----+ +-----v----+ + | Check | | Generate | | Flow | + | Tasks | | Tasks | | Tasks | + +----------+ +----------+ +----------+ +``` + +## Data Flow + +1. **Configuration loaded** -> Coordinator initializes client pool and test registry +2. **Tests scheduled** -> Test runner creates test instances with variable scopes +3. **Task execution** -> Scheduler processes each task: + a. Evaluate `if` condition (skip if false) + b. Load config (static YAML + dynamic configVars resolution) + c. Execute task logic + d. Record outputs and result +4. **Inter-task communication** -> Via variable scope and task output references +5. **Results persisted** -> Database stores test runs, task states, logs +6. **Results reported** -> Web UI / API / exit code + +## Database Schema + +- `test_runs` - Test execution history with status, timing, config +- `task_states` - Per-task state snapshots +- `task_results` - Task execution results +- `task_logs` - Task log entries +- `test_configs` - Stored test configurations + +Supported engines: SQLite (default, in-memory), PostgreSQL + +## REST API + +Base endpoint configurable via `web.server.host` and `web.server.port`. + +Key endpoints: +- `GET /api/tests` - List all registered tests +- `GET /api/test-runs` - List test run history +- `POST /api/test-runs` - Schedule a new test run +- `GET /api/test-runs/{id}` - Get test run details +- `GET /api/task-descriptors` - List available task types +- `GET /api/logs` - Streaming log output + +Swagger UI available at `/api/docs` when API is enabled. + +## Configuration Reference + +```yaml +coordinator: + maxConcurrentTests: 1 # Max tests running simultaneously + testRetentionTime: 336h # How long to keep test history + +web: + server: + host: "0.0.0.0" + port: 8080 + api: + enabled: true + frontend: + enabled: true + +database: + engine: sqlite # sqlite or pgsql + sqlite: + file: ":memory:?cache=shared" + +endpoints: + - name: "client-1" + executionUrl: "http://localhost:8545" + consensusUrl: "http://localhost:5052" + headers: # Optional custom headers + Authorization: "Bearer token" + +validatorNames: + inventory: # Maps validator indices to names + "0-63": "lighthouse-geth" + "64-127": "prysm-geth" + +globalVars: + walletPrivkey: "0x..." + depositContract: "0x..." + +tests: + - id: my-test + file: "./playbooks/my-test.yaml" + name: "My Test" + timeout: 30m + config: + key: value + schedule: + startup: true # Run on startup + cron: + - "0 */6 * * *" # Run every 6 hours + +externalTests: + - file: "https://example.com/test.yaml" + name: "Remote Test" +``` diff --git a/.ai/PLAYBOOK_AUTHORING.md b/.ai/PLAYBOOK_AUTHORING.md new file mode 100644 index 00000000..074b2f99 --- /dev/null +++ b/.ai/PLAYBOOK_AUTHORING.md @@ -0,0 +1,464 @@ +# Assertoor Playbook Authoring Guide + +## Playbook Structure + +Every test playbook is a YAML file with this structure: + +```yaml +id: unique-test-identifier +name: "Human-Readable Test Name" +timeout: 30m # Max test duration +config: # Default variables for this test + walletPrivkey: "" + validatorPairNames: [] +configVars: # Variables copied from parent scope (jq queries) + someVar: "parentScope.value" +tasks: # Main task list (executed sequentially) + - name: task_type + title: "Description" + config: {} +cleanupTasks: # Always run after test completes (success or failure) + - name: cleanup_task +``` + +## Task Definition + +Each task in the `tasks` array has: + +```yaml +- name: task_type_name # Required: registered task name + title: "Display title" # Required: shown in logs/UI + timeout: 5m # Optional: task-specific timeout + id: my_task_id # Optional: ID for referencing outputs + if: "condition_expression" # Optional: jq condition (skip if false) + config: # Task-specific static configuration + param1: value1 + param2: value2 + configVars: # Dynamic variable bindings (key=config field, value=jq query) + param1: "sourceVariable" +``` + +## Variable System + +### Scoping Hierarchy + +``` +Global Variables (coordinator config) + -> Test Scope (test config + configVars) + -> Task Scope (inherited from test/parent task) + -> Child Task Scope (inherited from parent task) +``` + +### Setting Variables + +**Static (in config):** +```yaml +config: + walletPrivkey: "0xabc123..." + depositAmount: 32000000000 +``` + +**Dynamic (via configVars):** +```yaml +configVars: + privateKey: "walletPrivkey" # Simple variable reference + address: "tasks.my_wallet.outputs.childWallet.address" # Task output reference + count: "| .validatorPubkeys | length" # jq expression with pipe +``` + +### Accessing Task Outputs + +Tasks with an `id` field expose their outputs to subsequent tasks: + +```yaml +- name: generate_child_wallet + id: my_wallet + config: + privateKey: "0x..." + +- name: generate_transaction + configVars: + # Access output from task "my_wallet" + privateKey: "tasks.my_wallet.outputs.childWallet.privateKey" +``` + +### Task Status Variables + +Every task exposes status variables accessible via `tasks.`: +- `tasks..result` - uint8 (0=None, 1=Success, 2=Failure) +- `tasks..running` - boolean +- `tasks..progress` - float64 (0-100) +- `tasks..outputs` - subscope with all output variables + +### Placeholder Syntax + +In task title values: +- `${varname}` - Simple variable substitution +- `${{.query.path}}` - jq expression evaluation + +```yaml +title: "Check block from ${validatorPairName}" +``` + +### jq Expression Examples + +```yaml +configVars: + # Simple variable reference + privateKey: "walletPrivkey" + + # Navigate nested objects + address: "tasks.wallet_task.outputs.childWallet.address" + + # Array slicing + firstFive: "validatorPubkeys[:5]" + remaining: "validatorPubkeys[5:]" + + # Pipe expressions (prefix with |) + epochCalc: "|(.tasks.info.outputs.validator.validator.activation_epoch | tonumber) + 256" + + # Array construction + expectList: "| [.validatorPubkey]" + + # Complex objects + expectWithdrawals: "| [{publicKey: .validatorPubkey, address: .depositorAddress, minAmount: 31000000000}]" + + # Conditional/filter + activeOnes: "| [.validators[] | select(.status == \"active\")]" + + # Length + count: "| .items | length" +``` + +## Control Flow Patterns + +### Sequential Execution (run_tasks) + +```yaml +- name: run_tasks + title: "Sequential steps" + config: + tasks: + - name: step_one + title: "First step" + config: {} + - name: step_two + title: "Second step" + config: {} +``` + +Options: +- `continueOnFailure: true` - Don't stop on child failure +- `newVariableScope: true` - Isolate variable scope + +### Parallel Execution (run_tasks_concurrent) + +```yaml +- name: run_tasks_concurrent + title: "Parallel checks" + config: + tasks: + - name: check_a + config: {} + - name: check_b + config: {} +``` + +Options: +- `successThreshold: 0` - How many must succeed (0=all) +- `failureThreshold: 1` - How many failures before stopping +- `stopOnThreshold: true` - Stop remaining tasks on threshold + +### Matrix/Loop (run_task_matrix) + +Execute a task template for each value in an array: + +```yaml +- name: run_task_matrix + title: "Check all validators" + configVars: + matrixValues: "validatorPairNames" + config: + runConcurrent: true + matrixVar: "validatorPairName" # Variable name for current iteration + task: + name: check_consensus_block_proposals + title: "Check ${validatorPairName}" + configVars: + validatorNamePattern: "validatorPairName" + config: + blockCount: 1 +``` + +Options: +- `runConcurrent: true/false` - Parallel or sequential +- `successThreshold` / `failureThreshold` - Control pass/fail criteria + +### Background Tasks (run_task_background) + +Run a long-running task in background while foreground task checks results: + +```yaml +- name: run_task_background + title: "Generate and verify transactions" + config: + onBackgroundComplete: fail # fail|ignore|succeed|failOrIgnore + backgroundTask: + name: generate_eoa_transactions + config: + limitPerBlock: 10 + limitTotal: 1000 + limitPending: 100 + randomTarget: true + configVars: + privateKey: "walletPrivkey" + foregroundTask: + name: check_consensus_block_proposals + config: + minTransactionCount: 5 +``` + +Options: +- `onBackgroundComplete` - What to do when background finishes +- `exitOnForegroundSuccess/Failure` - Control exit conditions + +### Conditional Execution + +```yaml +- name: some_task + if: "runSetup == true" + config: {} + +- name: other_task + if: "| .useExistingValidators == false" + config: {} +``` + +### Retry Pattern (run_task_options) + +```yaml +- name: run_task_options + config: + retryOnFailure: true + maxRetryCount: 3 + task: + name: flaky_task + config: {} +``` + +Options: +- `retryOnFailure` / `maxRetryCount` - Retry on failure +- `invertResult` / `expectFailure` - Expect task to fail +- `ignoreFailure` / `ignoreResult` - Don't propagate failure + +### External Task Files (run_external_tasks) + +```yaml +- name: run_external_tasks + config: + testFile: "./path/to/other-playbook.yaml" + testConfig: + walletPrivkey: "" + testConfigVars: + walletPrivkey: "walletPrivkey" +``` + +## Common Test Patterns + +### Pattern 1: Health Check + Block Proposal Verification + +```yaml +id: block-proposal-check +name: "Every client pair proposed a block" +timeout: 20m +config: + validatorPairNames: [] + +tasks: +- name: check_clients_are_healthy + title: "Wait for healthy clients" + timeout: 5m + config: + minClientCount: 1 + +- name: run_task_matrix + title: "Check proposals from all pairs" + configVars: + matrixValues: "validatorPairNames" + config: + runConcurrent: true + matrixVar: "validatorPairName" + task: + name: check_consensus_block_proposals + title: "Wait for block from ${validatorPairName}" + configVars: + validatorNamePattern: "validatorPairName" + config: + blockCount: 1 +``` + +### Pattern 2: Transaction Generation + Inclusion Verification + +```yaml +- name: run_task_background + title: "Generate and verify tx inclusion" + config: + onBackgroundComplete: fail + backgroundTask: + name: generate_eoa_transactions + config: + childWallets: 100 + limitPending: 100 + limitPerBlock: 10 + limitTotal: 0 # 0 = unlimited + randomTarget: true + configVars: + privateKey: "walletPrivkey" + foregroundTask: + name: run_task_matrix + configVars: + matrixValues: "validatorPairNames" + config: + runConcurrent: true + matrixVar: "validatorPairName" + task: + name: check_consensus_block_proposals + title: "Block from ${validatorPairName} with >= 5 txs" + configVars: + validatorNamePattern: "validatorPairName" + config: + minTransactionCount: 5 +``` + +### Pattern 3: Validator Lifecycle (Deposit -> Activate -> Exit) + +```yaml +# 1. Generate mnemonic for new validators +- name: get_random_mnemonic + id: gen_mnemonic + +# 2. Get public keys from mnemonic +- name: get_pubkeys_from_mnemonic + id: gen_pubkeys + configVars: + mnemonic: "tasks.gen_mnemonic.outputs.mnemonic" + config: + count: 10 + +# 3. Generate deposits +- name: generate_deposits + id: deposits + configVars: + mnemonic: "tasks.gen_mnemonic.outputs.mnemonic" + config: + limitTotal: 10 + limitPerSlot: 2 + indexCount: 10 + depositAmount: 32000000000 + awaitInclusion: true + configVars: + walletPrivkey: "walletPrivkey" + depositContract: "depositContract" + +# 4. Wait for activation +- name: run_task_matrix + configVars: + matrixValues: "tasks.gen_pubkeys.outputs.pubkeys" + config: + runConcurrent: true + matrixVar: "validatorPubkey" + task: + name: check_consensus_validator_status + title: "Wait for ${validatorPubkey} activation" + configVars: + validatorPubKey: "validatorPubkey" + config: + validatorStatus: ["active_ongoing"] + +# 5. Generate voluntary exits +- name: generate_exits + configVars: + mnemonic: "tasks.gen_mnemonic.outputs.mnemonic" + config: + limitTotal: 10 + limitPerSlot: 2 + indexCount: 10 + awaitInclusion: true +``` + +### Pattern 4: Shell Integration + +```yaml +- name: run_shell + title: "Run custom script" + config: + envVars: + WALLET: "tasks.wallet.outputs.address" + command: | + echo "Processing wallet: $WALLET" + + # Set task output via magic comments + echo "::set-output result success" + + # Set task variable (accessible by subsequent tasks) + echo "::set-var computedValue 42" + + # Set JSON variable + echo '::set-json myObject {"key": "value", "count": 5}' +``` + +### Pattern 5: Finality Monitoring + +```yaml +- name: check_consensus_finality + title: "Wait for finality" + timeout: 30m + config: + minFinalizedEpochs: 1 + maxUnfinalizedEpochs: 4 + failOnCheckMiss: true +``` + +### Pattern 6: Blob Transaction Testing + +```yaml +- name: run_task_background + config: + onBackgroundComplete: fail + backgroundTask: + name: generate_blob_transactions + config: + limitPerBlock: 3 + limitTotal: 0 + limitPending: 10 + blobSidecars: 3 + configVars: + privateKey: "walletPrivkey" + foregroundTask: + name: run_task_matrix + configVars: + matrixValues: "validatorPairNames" + config: + runConcurrent: true + matrixVar: "validatorPairName" + task: + name: check_consensus_block_proposals + title: "Block from ${validatorPairName} with blobs" + configVars: + validatorNamePattern: "validatorPairName" + config: + minBlobCount: 1 +``` + +## Best Practices + +1. **Always start with a health check** - Use `check_clients_are_healthy` with `minClientCount: 1` before running tests +2. **Use task IDs** for any task whose outputs you need to reference later +3. **Set appropriate timeouts** - Validator operations need longer timeouts (30m+), transaction checks need shorter ones +4. **Use matrix for multi-client testing** - `run_task_matrix` with `runConcurrent: true` over `validatorPairNames` +5. **Background + foreground pattern** for continuous generation with verification +6. **Always include cleanup tasks** for validator lifecycle tests (exit created validators) +7. **Use configVars** for dynamic values, `config` for static values +8. **Descriptive titles** with `${variable}` placeholders for clarity in logs +9. **Chain specs** - Use `get_consensus_specs` to retrieve chain parameters for calculations +10. **Client selection** - Use `clientPattern` regex to target specific clients (e.g., `"lighthouse.*"`) diff --git a/.ai/TASK_REFERENCE.md b/.ai/TASK_REFERENCE.md new file mode 100644 index 00000000..364d04cc --- /dev/null +++ b/.ai/TASK_REFERENCE.md @@ -0,0 +1,1067 @@ +# Assertoor Task Reference + +Complete reference for all assertoor tasks with configuration parameters, output variables, and usage notes. + +--- + +## Flow Control Tasks + +### run_tasks + +Runs child tasks sequentially. Stops on first failure unless configured otherwise. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `tasks` | array | required | List of task definitions to execute sequentially | +| `newVariableScope` | bool | false | Create isolated variable scope for children | +| `continueOnFailure` | bool | false | Continue executing remaining tasks after a failure | +| `invertResult` | bool | false | Swap success/failure result | +| `ignoreResult` | bool | false | Always report success | + +**Outputs:** None + +**Example:** +```yaml +- name: run_tasks + title: "Sequential steps" + config: + continueOnFailure: true + tasks: + - name: step_one + title: "First" + config: {} + - name: step_two + title: "Second" + config: {} +``` + +--- + +### run_tasks_concurrent + +Runs child tasks in parallel. Configurable success/failure thresholds. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `tasks` | array | required | List of task definitions to execute in parallel | +| `newVariableScope` | bool | true | Create isolated variable scope for children | +| `successThreshold` | uint64 | 0 | Number of tasks that must succeed (0 = all) | +| `failureThreshold` | uint64 | 1 | Number of failures before overall failure | +| `stopOnThreshold` | bool | true | Stop remaining tasks when threshold reached | +| `invertResult` | bool | false | Swap success/failure result | +| `ignoreResult` | bool | false | Always report success | + +**Outputs:** None + +**Example:** +```yaml +- name: run_tasks_concurrent + title: "Parallel checks" + config: + successThreshold: 0 + tasks: + - name: check_a + - name: check_b +``` + +--- + +### run_task_matrix + +Runs a task template for each value in an array. Supports concurrent or sequential execution. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `task` | object | required | Task template to execute for each matrix value | +| `matrixVar` | string | "" | Variable name to set for each iteration value | +| `matrixValues` | array | [] | Array of values to iterate over | +| `runConcurrent` | bool | false | Run all iterations in parallel | +| `successThreshold` | uint64 | 0 | Required successes (0 = all) | +| `failureThreshold` | uint64 | 1 | Failure limit before stopping | +| `stopOnThreshold` | bool | true | Stop at threshold | +| `invertResult` | bool | false | Swap success/failure | +| `ignoreResult` | bool | false | Always report success | + +**Outputs:** None + +**Example:** +```yaml +- name: run_task_matrix + title: "Check all validators" + configVars: + matrixValues: "validatorPairNames" + config: + runConcurrent: true + matrixVar: "validatorPairName" + task: + name: check_consensus_block_proposals + title: "Check ${validatorPairName}" + configVars: + validatorNamePattern: "validatorPairName" +``` + +--- + +### run_task_background + +Runs a foreground task and a background task simultaneously. The foreground task determines the overall result. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `foregroundTask` | object | required | Primary task (determines result) | +| `backgroundTask` | object | null | Long-running background task | +| `newVariableScope` | bool | false | Create isolated variable scope | +| `exitOnForegroundSuccess` | bool | false | Stop background when foreground succeeds | +| `exitOnForegroundFailure` | bool | false | Stop background when foreground fails | +| `onBackgroundComplete` | string | "ignore" | Action when background completes: `ignore`, `fail`, `succeed`, `failOrIgnore` | + +**Outputs:** None + +**onBackgroundComplete options:** +- `ignore` - Background completion has no effect +- `fail` - Fail overall task if background completes (useful: background should run forever) +- `succeed` - Succeed overall task if background completes +- `failOrIgnore` - Fail only if background failed, ignore if it succeeded + +**Example:** +```yaml +- name: run_task_background + config: + onBackgroundComplete: fail + backgroundTask: + name: generate_eoa_transactions + config: + limitTotal: 0 + foregroundTask: + name: check_consensus_block_proposals + config: + minTransactionCount: 5 +``` + +--- + +### run_task_options + +Wraps a task with retry, result inversion, and failure handling options. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `task` | object | required | Task to execute | +| `newVariableScope` | bool | false | Create isolated variable scope | +| `retryOnFailure` | bool | false | Retry the task on failure | +| `maxRetryCount` | uint | 0 | Maximum retry attempts (0 = unlimited) | +| `invertResult` | bool | false | Swap success/failure | +| `ignoreResult` | bool | false | Always report success | +| `ignoreFailure` | bool | false | Ignore failure result | +| `expectFailure` | bool | false | Alias for invertResult | + +**Outputs:** None + +--- + +### run_external_tasks + +Loads and executes a task list from an external YAML file. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `testFile` | string | required | Path or URL to external test YAML file | +| `testConfig` | map | {} | Static configuration values to pass | +| `testConfigVars` | map | {} | Variable mappings to pass | +| `expectFailure` | bool | false | Expect external test to fail | +| `ignoreFailure` | bool | false | Ignore failures from external test | + +**Outputs:** None + +--- + +## Check Tasks - Consensus Layer + +### check_clients_are_healthy + +Monitors health status of consensus and/or execution clients. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `clientPattern` | string | "" | Regex to select specific clients | +| `pollInterval` | duration | 5s | Interval between health polls | +| `skipConsensusCheck` | bool | false | Skip consensus client checks | +| `skipExecutionCheck` | bool | false | Skip execution client checks | +| `expectUnhealthy` | bool | false | Invert: expect clients to be unhealthy | +| `minClientCount` | int | 0 | Minimum healthy clients required | +| `maxUnhealthyCount` | int | -1 | Max unhealthy clients allowed (-1 = unlimited) | +| `failOnCheckMiss` | bool | false | Fail task when condition not met (vs. keep waiting) | +| `continueOnPass` | bool | false | Keep monitoring after check passes | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `goodClients` | array | Healthy client info objects | +| `failedClients` | array | Unhealthy client info objects | +| `totalCount` | int | Total clients checked | +| `failedCount` | int | Failed client count | +| `goodCount` | int | Healthy client count | + +--- + +### check_consensus_block_proposals + +Monitors consensus blocks for ones matching specific criteria. Waits until enough matching blocks are found. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `checkLookback` | int | 1 | Slots to look back for matching blocks | +| `blockCount` | int | 1 | Number of matching blocks required | +| `payloadTimeout` | int | 12 | Seconds to wait for execution payload | +| `graffitiPattern` | string | "" | Regex for block graffiti | +| `validatorNamePattern` | string | "" | Regex for validator name | +| `extraDataPattern` | string | "" | Regex for execution payload extra data | +| `minAttestationCount` | int | 0 | Min attestations in block | +| `minDepositCount` | int | 0 | Min deposits in block | +| `minExitCount` | int | 0 | Min voluntary exits in block | +| `minSlashingCount` | int | 0 | Min total slashings | +| `minAttesterSlashingCount` | int | 0 | Min attester slashings | +| `minProposerSlashingCount` | int | 0 | Min proposer slashings | +| `minBlsChangeCount` | int | 0 | Min BLS to execution changes | +| `minWithdrawalCount` | int | 0 | Min withdrawals | +| `minTransactionCount` | int | 0 | Min transactions | +| `minBlobCount` | int | 0 | Min blob sidecars | +| `minDepositRequestCount` | int | 0 | Min deposit requests (EIP-6110) | +| `minWithdrawalRequestCount` | int | 0 | Min withdrawal requests (EIP-7002) | +| `minConsolidationRequestCount` | int | 0 | Min consolidation requests (EIP-7251) | +| `expectDeposits` | array[string] | [] | Expected validator pubkeys with deposits | +| `expectExits` | array[string] | [] | Expected validator pubkeys with exits | +| `expectSlashings` | array | [] | Expected slashings [{publicKey, slashingType}] | +| `expectBlsChanges` | array | [] | Expected BLS changes [{publicKey, address}] | +| `expectWithdrawals` | array | [] | Expected withdrawals [{publicKey, address, minAmount, maxAmount}] | +| `expectDepositRequests` | array | [] | Expected deposit requests [{publicKey, withdrawalCredentials, amount}] | +| `expectWithdrawalRequests` | array | [] | Expected withdrawal requests [{sourceAddress, validatorPubkey, amount}] | +| `expectConsolidationRequests` | array | [] | Expected consolidations [{sourceAddress, sourcePubkey, targetPubkey}] | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `matchingBlockRoots` | array | Block roots that matched criteria | +| `matchingBlockHeaders` | array | Block headers that matched | +| `matchingBlockBodies` | array | Block bodies that matched | + +--- + +### check_consensus_finality + +Monitors chain finality status. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `minUnfinalizedEpochs` | uint64 | 0 | Min unfinalized epochs required | +| `maxUnfinalizedEpochs` | uint64 | 0 | Max unfinalized epochs allowed | +| `minFinalizedEpochs` | uint64 | 0 | Min finalized epochs required | +| `failOnCheckMiss` | bool | false | Fail on condition miss | +| `continueOnPass` | bool | false | Keep monitoring after pass | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `finalizedEpoch` | uint64 | Latest finalized epoch | +| `finalizedRoot` | string | Finalized checkpoint root hash | +| `unfinalizedEpochs` | uint64 | Epochs since last finalized | + +--- + +### check_consensus_forks + +Monitors for consensus layer forks. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `minCheckEpochCount` | uint64 | 1 | Min epochs to monitor before evaluating | +| `maxForkDistance` | int64 | 1 | Max allowed fork depth in slots | +| `maxForkCount` | uint64 | 0 | Max forks allowed | +| `continueOnPass` | bool | false | Keep monitoring | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `forks` | array | Fork info objects with head slot, root, clients | + +--- + +### check_consensus_reorgs + +Monitors for chain reorganizations. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `minCheckEpochCount` | uint64 | 1 | Min epochs to monitor | +| `maxReorgDistance` | uint64 | 0 | Max reorg depth in slots | +| `maxReorgsPerEpoch` | float64 | 0 | Max average reorgs per epoch | +| `maxTotalReorgs` | uint64 | 0 | Max total reorgs | +| `continueOnPass` | bool | false | Keep monitoring | + +**Outputs:** None + +--- + +### check_consensus_sync_status + +Checks consensus clients for sync status. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `clientPattern` | string | "" | Regex for client selection | +| `pollInterval` | duration | 5s | Poll interval | +| `expectSyncing` | bool | false | Expect clients to be syncing | +| `expectOptimistic` | bool | false | Expect optimistic mode | +| `expectMinPercent` | float64 | 100 | Min % of clients matching condition | +| `expectMaxPercent` | float64 | 100 | Max % of clients matching condition | +| `minSlotHeight` | int | 10 | Min slot height before checking | +| `waitForChainProgression` | bool | false | Wait for chain to progress | +| `continueOnPass` | bool | false | Keep monitoring | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `goodClients` | array | Clients meeting sync criteria | +| `failedClients` | array | Clients not meeting criteria | + +--- + +### check_consensus_validator_status + +Checks validator status on the beacon chain. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `validatorPubKey` | string | "" | Validator public key to check | +| `validatorNamePattern` | string | "" | Regex for validator name | +| `validatorIndex` | *uint64 | nil | Validator index to check | +| `validatorStatus` | array[string] | [] | Expected statuses (e.g., `["active_ongoing"]`) | +| `minValidatorBalance` | uint64 | 0 | Min balance in gwei | +| `maxValidatorBalance` | *uint64 | nil | Max balance in gwei | +| `withdrawalCredsPrefix` | string | "" | Expected withdrawal credentials prefix | +| `failOnCheckMiss` | bool | false | Fail on condition miss | +| `continueOnPass` | bool | false | Keep monitoring | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `validator` | object | Full validator information | +| `pubkey` | string | Validator public key | + +**Validator status values:** `pending_initialized`, `pending_queued`, `active_ongoing`, `active_exiting`, `active_slashed`, `exited_unslashed`, `exited_slashed`, `withdrawal_possible`, `withdrawal_done` + +--- + +### check_consensus_builder_status + +Checks builder status on the beacon chain by loading the full beacon state. Builders are a GLOAS-specific concept stored in a separate section of the state. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `builderPubKey` | string | "" | Builder public key to check | +| `builderIndex` | *uint64 | nil | Builder index to check | +| `minBuilderBalance` | uint64 | 0 | Min balance in gwei | +| `maxBuilderBalance` | *uint64 | nil | Max balance in gwei | +| `expectActive` | bool | false | Expect active builder (withdrawable_epoch == FAR_FUTURE) | +| `expectExiting` | bool | false | Expect exiting/exited builder (withdrawable_epoch != FAR_FUTURE) | +| `failOnCheckMiss` | bool | false | Fail on condition miss | +| `continueOnPass` | bool | false | Keep monitoring | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `builder` | object | Full builder information (pubkey, balance, deposit_epoch, withdrawable_epoch) | +| `builderIndex` | number | Builder's index in the builder list | +| `pubkey` | string | Builder public key | + +--- + +### check_consensus_attestation_stats + +Monitors attestation statistics per epoch. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `minTargetPercent` | uint64 | 0 | Min correct target vote % | +| `maxTargetPercent` | uint64 | 100 | Max correct target vote % | +| `minHeadPercent` | uint64 | 0 | Min correct head vote % | +| `maxHeadPercent` | uint64 | 100 | Max correct head vote % | +| `minTotalPercent` | uint64 | 0 | Min total attestation % | +| `maxTotalPercent` | uint64 | 100 | Max total attestation % | +| `failOnCheckMiss` | bool | false | Fail on condition miss | +| `minCheckedEpochs` | uint64 | 1 | Min epochs to check first | +| `continueOnPass` | bool | false | Keep monitoring | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `lastCheckedEpoch` | uint64 | Last epoch checked | +| `validatorCount` | uint64 | Active validator count | +| `validatorBalance` | uint64 | Total effective balance | +| `targetVotes` | uint64 | Correct target votes | +| `targetVotesPercent` | float64 | Target vote percentage | +| `headVotes` | uint64 | Correct head votes | +| `headVotesPercent` | float64 | Head vote percentage | +| `totalVotes` | uint64 | Total attestation votes | +| `totalVotesPercent` | float64 | Total attestation percentage | + +--- + +### check_consensus_proposer_duty + +Checks for upcoming proposer duties for specific validators. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `validatorNamePattern` | string | "" | Regex for validator name | +| `validatorIndex` | *uint64 | nil | Specific validator index | +| `minSlotDistance` | uint64 | 0 | Min slots from current for duty | +| `maxSlotDistance` | uint64 | 0 | Max slots from current for duty | +| `failOnCheckMiss` | bool | false | Fail on condition miss | +| `continueOnPass` | bool | false | Keep monitoring | + +**Outputs:** None + +--- + +### check_consensus_slot_range + +Waits for consensus wallclock to reach a specific slot/epoch range. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `minSlotNumber` | uint64 | 0 | Min slot number required | +| `maxSlotNumber` | uint64 | max | Max slot number allowed | +| `minEpochNumber` | uint64 | 0 | Min epoch number required | +| `maxEpochNumber` | uint64 | max | Max epoch number allowed | +| `failIfLower` | bool | false | Fail immediately if below min | +| `continueOnPass` | bool | false | Keep monitoring | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `genesisTime` | int64 | Genesis timestamp (Unix seconds) | +| `currentSlot` | uint64 | Current wallclock slot | +| `currentEpoch` | uint64 | Current wallclock epoch | + +--- + +### check_consensus_identity + +Checks consensus client node identity, including ENR and CGC extraction. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `clientPattern` | string | required | Regex for client selection | +| `pollInterval` | duration | 10s | Poll interval | +| `minClientCount` | int | 1 | Min clients required | +| `maxFailCount` | int | -1 | Max failed clients allowed (-1 = unlimited) | +| `failOnCheckMiss` | bool | false | Fail on miss | +| `expectCgc` | *uint64 | nil | Expected custody group count | +| `minCgc` | *uint64 | nil | Min CGC | +| `maxCgc` | *uint64 | nil | Max CGC | +| `expectEnrField` | map | nil | Expected ENR field values | +| `expectPeerIdPattern` | string | "" | Regex for peer ID | +| `expectP2pAddressCount` | *int | nil | Expected P2P address count | +| `expectP2pAddressMatch` | string | "" | Regex for P2P addresses | +| `expectSeqNumber` | *uint64 | nil | Expected metadata sequence number | +| `minSeqNumber` | *uint64 | nil | Min sequence number | +| `continueOnPass` | bool | false | Keep monitoring | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `matchingClients` | array | Clients that passed checks | +| `failedClients` | array | Clients that failed checks | +| `totalCount` | int | Total clients checked | +| `matchingCount` | int | Clients that passed | +| `failedCount` | int | Clients that failed | + +--- + +## Check Tasks - Execution Layer + +### check_execution_sync_status + +Checks execution clients for sync status. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `clientPattern` | string | "" | Regex for client selection | +| `pollInterval` | duration | 5s | Poll interval | +| `expectSyncing` | bool | false | Expect syncing | +| `expectMinPercent` | float64 | 100 | Min % matching condition | +| `expectMaxPercent` | float64 | 100 | Max % matching condition | +| `minBlockHeight` | int | 10 | Min block height before checking | +| `waitForChainProgression` | bool | false | Wait for chain progress | +| `continueOnPass` | bool | false | Keep monitoring | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `goodClients` | array | Clients meeting criteria | +| `failedClients` | array | Clients not meeting criteria | + +--- + +### check_eth_call + +Executes an eth_call and verifies the response. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `ethCallData` | string | "" | Hex-encoded call data | +| `expectResult` | string | "" | Expected hex result | +| `ignoreResults` | array[string] | [] | Hex results to ignore | +| `callAddress` | string | "" | Target contract address | +| `blockNumber` | uint64 | 0 | Block number (0 = latest) | +| `failOnMismatch` | bool | false | Fail on result mismatch | +| `clientPattern` | string | "" | Regex for client selection | +| `excludeClientPattern` | string | "" | Regex to exclude clients | +| `continueOnPass` | bool | false | Keep monitoring | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `callResult` | string | eth_call result as hex | + +--- + +### check_eth_config + +Verifies all execution clients return matching eth_config (EIP-7910). + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `clientPattern` | string | "" | Regex for client selection | +| `excludeClientPattern` | string | "" | Regex to exclude clients | +| `failOnMismatch` | bool | true | Fail when configs don't match | +| `excludeSyncingClients` | bool | false | Exclude syncing clients | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `ethConfig` | string | eth_config JSON from clients | + +--- + +## Generate Tasks - Transactions + +### generate_transaction + +Sends a single transaction with full control over type and parameters. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `privateKey` | string | required | Wallet private key | +| `legacyTxType` | bool | false | Use legacy transaction type | +| `blobTxType` | bool | false | Use blob transaction (EIP-4844) | +| `setCodeTxType` | bool | false | Use set code transaction (EIP-7702) | +| `blobFeeCap` | *big.Int | nil | Max blob fee cap (wei) | +| `feeCap` | *big.Int | 100 Gwei | Max fee cap (wei) | +| `tipCap` | *big.Int | 1 Gwei | Max priority tip (wei) | +| `gasLimit` | uint64 | 50000 | Gas limit | +| `targetAddress` | string | "" | Target address | +| `randomTarget` | bool | false | Random target address | +| `contractDeployment` | bool | false | Deploy contract | +| `callData` | string | "" | Hex call data | +| `blobData` | string | "" | Hex blob data | +| `blobSidecars` | uint64 | 1 | Number of blob sidecars | +| `randomAmount` | bool | false | Random amount | +| `amount` | *big.Int | 0 | Amount in wei | +| `nonce` | *uint64 | nil | Custom nonce | +| `authorizations` | array | [] | EIP-7702 authorizations [{chainId, nonce, codeAddress, signerPrivkey}] | +| `clientPattern` | string | "" | Regex for client selection | +| `excludeClientPattern` | string | "" | Regex to exclude clients | +| `awaitReceipt` | bool | true | Wait for receipt | +| `failOnReject` | bool | false | Fail on rejection | +| `failOnSuccess` | bool | false | Fail on success (negative testing) | +| `expectEvents` | array | [] | Expected events [{topic0, topic1, topic2, data}] | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `transaction` | object | Transaction object | +| `transactionHex` | string | Transaction hex encoding | +| `transactionHash` | string | Transaction hash | +| `contractAddress` | string | Deployed contract address | +| `receipt` | object | Transaction receipt | + +--- + +### generate_eoa_transactions + +Generates multiple EOA transactions continuously. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `limitPerBlock` | int | required | Max transactions per block | +| `limitTotal` | int | required | Total limit (0 = unlimited) | +| `limitPending` | int | required | Max pending before waiting | +| `privateKey` | string | required | Wallet private key | +| `childWallets` | uint64 | 0 | Child wallets for parallel sending | +| `walletSeed` | string | "" | Deterministic child wallet seed | +| `refillPendingLimit` | uint64 | 200 | Max pending refill transactions | +| `refillFeeCap` | *big.Int | 500 Gwei | Refill fee cap | +| `refillTipCap` | *big.Int | 1 Gwei | Refill tip cap | +| `refillAmount` | *big.Int | 1 ETH | Refill amount | +| `refillMinBalance` | *big.Int | 0.5 ETH | Min balance before refill | +| `legacyTxType` | bool | false | Legacy transaction type | +| `feeCap` | *big.Int | 100 Gwei | Fee cap | +| `tipCap` | *big.Int | 1 Gwei | Tip cap | +| `gasLimit` | uint64 | 50000 | Gas limit | +| `targetAddress` | string | "" | Target address | +| `randomTarget` | bool | false | Random targets | +| `contractDeployment` | bool | false | Deploy contracts | +| `callData` | string | "" | Call data | +| `randomAmount` | bool | false | Random amounts | +| `amount` | *big.Int | 0 | Amount per transaction | +| `awaitReceipt` | bool | false | Wait for receipts | +| `failOnReject` | bool | false | Fail on rejection | +| `failOnSuccess` | bool | false | Fail on success | +| `clientPattern` | string | "" | Client selection regex | +| `excludeClientPattern` | string | "" | Client exclusion regex | + +**Outputs:** None + +--- + +### generate_blob_transactions + +Generates blob transactions (EIP-4844) continuously. + +**Config:** +Same as `generate_eoa_transactions` plus: +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `blobSidecars` | uint64 | 0 | Blobs per transaction | +| `blobFeeCap` | *big.Int | 10 Gwei | Max blob fee cap | +| `blobData` | string | "" | Hex blob data | +| `legacyBlobTx` | bool | false | Legacy blob format | + +**Outputs:** None + +--- + +## Generate Tasks - Validator Operations + +### generate_deposits + +Generates staking deposits. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `limitPerSlot` | int | required | Max deposits per slot | +| `limitTotal` | int | required | Total deposit limit | +| `limitPending` | int | 0 | Max pending deposits | +| `mnemonic` | string | required | Validator key mnemonic | +| `startIndex` | int | 0 | Start index in mnemonic | +| `indexCount` | int | required | Number of validator keys | +| `publicKey` | string | "" | Existing validator pubkey (for top-up) | +| `walletPrivkey` | string | required | Funding wallet private key | +| `depositContract` | string | required | Deposit contract address | +| `depositAmount` | uint64 | 0 | ETH to deposit per validator | +| `depositTxFeeCap` | int64 | 100 Gwei | Deposit tx fee cap | +| `depositTxTipCap` | int64 | 1 Gwei | Deposit tx tip cap | +| `withdrawalCredentials` | string | "" | Custom withdrawal credentials | +| `topUpDeposit` | bool | false | Top up existing validator | +| `clientPattern` | string | "" | Client selection regex | +| `excludeClientPattern` | string | "" | Client exclusion regex | +| `awaitReceipt` | bool | false | Wait for receipts | +| `failOnReject` | bool | false | Fail on rejection | +| `awaitInclusion` | bool | false | Wait for beacon inclusion | + +**Outputs:** None + +--- + +### generate_exits + +Generates voluntary validator exits. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `limitPerSlot` | int | required | Max exits per slot | +| `limitTotal` | int | required | Total exit limit | +| `mnemonic` | string | required | Validator key mnemonic | +| `startIndex` | int | 0 | Start index in mnemonic | +| `indexCount` | int | required | Number of validator keys | +| `builderExit` | bool | false | Generate builder exits instead of validator exits (uses BUILDER_INDEX_FLAG) | +| `sendToAllClients` | bool | false | Submit exit to all ready CL clients in parallel (succeeds if any accepts) | +| `exitEpoch` | int64 | -1 | Exit epoch (-1 = current) | +| `clientPattern` | string | "" | Client selection regex | +| `excludeClientPattern` | string | "" | Client exclusion regex | +| `awaitInclusion` | bool | false | Wait for beacon inclusion | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `exitedValidators` | array | Validator indices submitted for exit | +| `includedExits` | number | Exits included on-chain | + +--- + +### generate_bls_changes + +Generates BLS to execution withdrawal credential changes. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `limitPerSlot` | int | required | Max changes per slot | +| `limitTotal` | int | required | Total change limit | +| `mnemonic` | string | required | Validator key mnemonic | +| `startIndex` | int | 0 | Start index | +| `indexCount` | int | required | Number of keys | +| `targetAddress` | string | required | New withdrawal address | +| `clientPattern` | string | "" | Client selection regex | +| `excludeClientPattern` | string | "" | Client exclusion regex | +| `awaitInclusion` | bool | false | Wait for inclusion | + +**Outputs:** None + +--- + +### generate_slashings + +Generates slashable attestations or proposals. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `slashingType` | string | "attester" | Type: `attester` or `proposer` | +| `limitPerSlot` | int | required | Max slashings per slot | +| `limitTotal` | int | required | Total slashing limit | +| `mnemonic` | string | required | Validator key mnemonic | +| `startIndex` | int | 0 | Start index | +| `indexCount` | int | required | Number of keys | +| `clientPattern` | string | "" | Client selection regex | +| `excludeClientPattern` | string | "" | Client exclusion regex | +| `awaitInclusion` | bool | false | Wait for inclusion | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `slashedValidators` | array | Validator indices submitted for slashing | +| `includedSlashings` | number | Slashings included on-chain | + +--- + +### generate_withdrawal_requests + +Generates EL-triggered withdrawal requests (EIP-7002). + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `limitPerSlot` | int | required | Max requests per slot | +| `limitTotal` | int | required | Total request limit | +| `limitPending` | int | 0 | Max pending requests | +| `sourcePubkey` | string | "" | Single validator pubkey | +| `sourceMnemonic` | string | required | Validator key mnemonic | +| `sourceStartIndex` | int | required | Start index in mnemonic | +| `sourceStartValidatorIndex` | *uint64 | required | Starting validator index | +| `sourceIndexCount` | int | 0 | Number of validators | +| `withdrawAmount` | uint64 | 0 | Gwei to withdraw (0 = full exit) | +| `walletPrivkey` | string | required | Wallet private key | +| `withdrawalContract` | string | 0x00...7002 | Withdrawal contract address | +| `txAmount` | *big.Int | 0.001 ETH | ETH to send with request | +| `txFeeCap` | *big.Int | 100 Gwei | Fee cap | +| `txTipCap` | *big.Int | 1 Gwei | Tip cap | +| `txGasLimit` | uint64 | 200000 | Gas limit | +| `clientPattern` | string | "" | Client selection regex | +| `excludeClientPattern` | string | "" | Client exclusion regex | +| `awaitReceipt` | bool | false | Wait for receipts | +| `failOnReject` | bool | false | Fail on rejection | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `transactionHashes` | array | Transaction hashes | +| `transactionReceipts` | array | Transaction receipts | + +--- + +### generate_consolidations + +Generates validator consolidation requests (EIP-7251). + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `limitPerSlot` | int | 0 | Max consolidations per slot | +| `limitTotal` | int | required | Total consolidation limit | +| `limitPending` | int | 0 | Max pending consolidations | +| `sourceMnemonic` | string | required | Source validator mnemonic | +| `sourceStartIndex` | int | required | Source start index | +| `sourceStartValidatorIndex` | *uint64 | required | Source validator index | +| `sourceIndexCount` | int | required | Source validator count | +| `targetPublicKey` | string | required | Target validator pubkey | +| `targetValidatorIndex` | *uint64 | required | Target validator index | +| `consolidationEpoch` | *uint64 | nil | Consolidation epoch | +| `walletPrivkey` | string | "" | Wallet private key | +| `consolidationContract` | string | 0x00...7251 | Contract address | +| `txAmount` | *big.Int | 0.5 ETH | ETH to send | +| `txFeeCap` | *big.Int | 100 Gwei | Fee cap | +| `txTipCap` | *big.Int | 1 Gwei | Tip cap | +| `txGasLimit` | uint64 | 200000 | Gas limit | +| `clientPattern` | string | "" | Client selection regex | +| `excludeClientPattern` | string | "" | Client exclusion regex | +| `awaitReceipt` | bool | false | Wait for receipts | +| `failOnReject` | bool | false | Fail on rejection | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `transactionHashes` | array | Transaction hashes | +| `transactionReceipts` | array | Transaction receipts | + +--- + +### generate_attestations + +Generates custom attestations from derived validator keys. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `mnemonic` | string | required | Validator key mnemonic | +| `startIndex` | int | 0 | Start index | +| `indexCount` | int | required | Number of keys | +| `limitTotal` | int | required | Total attestation limit | +| `limitEpochs` | int | required | Epochs to generate for | +| `clientPattern` | string | "" | Client selection regex | +| `excludeClientPattern` | string | "" | Client exclusion regex | +| `lastEpochAttestations` | bool | false | Reference last epoch | +| `sendAllLastEpoch` | bool | false | All attestations with last epoch data | +| `lateHead` | int | 0 | Slots to delay head vote | +| `randomLateHead` | string | "" | Random delay range (min:max) | +| `lateHeadClusterSize` | int | 0 | Cluster size for shared delay | + +**Outputs:** None + +--- + +## Wallet & Key Tasks + +### generate_child_wallet + +Creates a funded child wallet from a parent wallet. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `privateKey` | string | required | Parent wallet private key | +| `walletSeed` | string | "" | Deterministic seed | +| `randomSeed` | bool | false | Random seed | +| `prefundFeeCap` | *big.Int | nil | Prefund fee cap | +| `prefundTipCap` | *big.Int | nil | Prefund tip cap | +| `prefundAmount` | *big.Int | nil | Amount to transfer | +| `prefundMinBalance` | *big.Int | nil | Min balance trigger | +| `keepFunding` | bool | false | Keep funding loop running | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `childWallet` | object | Wallet info {address, privateKey, balance} | + +--- + +### get_wallet_details + +Retrieves wallet balance and nonce. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `privateKey` | string | "" | Wallet private key | +| `address` | string | "" | Wallet address (alternative) | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `address` | string | Wallet address | +| `balance` | string | Balance in wei | +| `nonce` | uint64 | Current nonce | +| `summary` | object | Summary object | + +--- + +### get_pubkeys_from_mnemonic + +Derives validator public keys from a BIP39 mnemonic. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `mnemonic` | string | required | BIP39 mnemonic | +| `startIndex` | int | 0 | Start index | +| `count` | int | 1 | Number of keys to derive | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `pubkeys` | array | Derived public keys | + +--- + +### get_random_mnemonic + +Generates a random BIP39 mnemonic. + +**Config:** None + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `mnemonic` | string | Generated mnemonic | + +--- + +## Data Retrieval Tasks + +### get_consensus_specs + +Retrieves consensus chain specifications. + +**Config:** None + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `specs` | object | Full chain specs (SECONDS_PER_SLOT, SLOTS_PER_EPOCH, etc.) | + +--- + +### get_consensus_validators + +Retrieves validators matching specified criteria. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `clientPattern` | string | "" | Client selection regex | +| `validatorNamePattern` | string | "" | Validator name regex | +| `validatorStatus` | array[string] | [] | Status filter | +| `minValidatorBalance` | *uint64 | nil | Min balance filter | +| `maxValidatorBalance` | *uint64 | nil | Max balance filter | +| `withdrawalCredsPrefix` | string | "" | Withdrawal creds prefix | +| `minValidatorIndex` | *uint64 | nil | Min validator index | +| `maxValidatorIndex` | *uint64 | nil | Max validator index | +| `maxResults` | int | 100 | Max results | +| `outputFormat` | string | "full" | Format: `full`, `pubkeys`, or `indices` | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `validators` | array | Full validator info (when format=full) | +| `pubkeys` | array | Public keys (when format=pubkeys) | +| `indices` | array | Validator indices (when format=indices) | +| `count` | int | Number of matching validators | + +--- + +### get_execution_block + +Gets the latest execution block header. + +**Config:** None + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `header` | object | Execution block header | + +--- + +## Utility Tasks + +### sleep + +Pauses execution for a specified duration. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `duration` | duration | required | Sleep duration (e.g., "5s", "1m", "1h30m") | + +**Outputs:** None + +--- + +### run_shell + +Executes a shell script with environment variable support. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `shell` | string | "bash" | Shell interpreter | +| `shellArgs` | array[string] | [] | Shell arguments | +| `envVars` | map[string]string | {} | Environment variables (values are configVar-style queries) | +| `command` | string | required | Shell command to execute | + +**Special output patterns in command stdout:** +- `::set-var varName value` - Set variable in task scope +- `::set-json varName {"json": "value"}` - Set JSON variable in task scope +- `::set-output outputName value` - Set task output variable + +**Outputs:** Dynamic (based on `::set-output` commands) + +--- + +### run_command + +Executes a command with arguments. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `command` | array[string] | required | Command and arguments | +| `allowed_to_fail` | bool | false | Allow command failure | + +**Outputs:** +| Variable | Type | Description | +|----------|------|-------------| +| `stdout` | string | Combined stdout/stderr | +| `error` | string | Error message if failed | + +--- + +### run_spamoor_scenario + +Runs a spamoor stress testing scenario. + +**Config:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `scenarioName` | string | required | Spamoor scenario name | +| `privateKey` | string | required | Root wallet private key | +| `scenarioYaml` | object | nil | Scenario YAML configuration | + +**Outputs:** None diff --git a/.ai/VARIABLES_AND_EXPRESSIONS.md b/.ai/VARIABLES_AND_EXPRESSIONS.md new file mode 100644 index 00000000..420a91d4 --- /dev/null +++ b/.ai/VARIABLES_AND_EXPRESSIONS.md @@ -0,0 +1,267 @@ +# Variables and Expression System + +## Variable Scoping + +### Scope Hierarchy + +Variables follow a hierarchical scoping model where child scopes inherit from parent scopes: + +``` +Global Scope (coordinator globalVars) + -> Test Scope (created via NewScope from global) + -> Root Task Scope (inherits test scope) + -> Child Task Scope (inherits parent task scope) + -> Grandchild Task Scope (...) +``` + +### Scope Behavior + +- **Reading**: Variables are resolved by checking current scope first, then walking up through parent scopes +- **Writing**: `SetVar()` only modifies the current scope (parent scopes are unaffected) +- **Defaults**: `SetDefaultVar()` sets fallback values with lowest priority +- **Subscopes**: Named namespaces for organizing variables (e.g., `tasks.myTask.outputs`) + +### Resolution Order + +When resolving a variable: +1. Check for subscope match (e.g., `tasks` prefix routes to tasks subscope) +2. Check current scope's variable map +3. Check parent scope (recursively) +4. Check default variable map +5. Return nil if not found + +## Variable Sources + +### 1. Global Variables (assertoor config) + +```yaml +globalVars: + walletPrivkey: "0xdeadbeef..." + depositContract: "0x00000000..." + validatorPairNames: + - "lighthouse-geth" + - "prysm-geth" +``` + +### 2. Test Config (static defaults) + +```yaml +# In test YAML +config: + walletPrivkey: "" # Default empty, overridden by global + depositAmount: 32000000000 + useExistingValidators: false +``` + +### 3. Test ConfigVars (dynamic from parent) + +```yaml +# In test YAML +configVars: + walletPrivkey: "walletPrivkey" # Copy from parent scope +``` + +### 4. Task Outputs + +Tasks produce outputs via `ctx.Outputs.SetVar(name, value)`: + +```go +// In task implementation +t.ctx.Outputs.SetVar("address", "0x1234...") +t.ctx.Outputs.SetVar("receipt", receiptObject) +``` + +Outputs are accessible via: +```yaml +configVars: + myAddr: "tasks.taskId.outputs.address" +``` + +### 5. Shell Task Variables + +The `run_shell` task can set variables via special output patterns: + +```bash +# Set a string variable in the task scope +echo "::set-var varName value" + +# Set a JSON variable in the task scope +echo '::set-json varName {"key": "value"}' + +# Set a task output variable +echo "::set-output outputName value" +``` + +## Expression System (jq) + +### Overview + +Assertoor uses the **gojq** library (Go implementation of jq) for expression evaluation. Expressions are used in: +- `configVars` mappings +- `if` conditions on tasks +- Placeholder syntax `${{expression}}` + +### Basic Syntax + +```yaml +configVars: + # Simple variable reference (implicitly becomes .walletPrivkey) + privateKey: "walletPrivkey" + + # Dot-notation navigation + address: "tasks.wallet.outputs.childWallet.address" + + # Explicit jq with pipe prefix + calculation: "| .someValue + 1" +``` + +### Query Normalization + +Queries that don't start with `.` or `|` are automatically prefixed with `.`: +- `"walletPrivkey"` becomes `".walletPrivkey"` +- `"tasks.id.outputs.x"` becomes `".tasks.id.outputs.x"` + +Queries starting with `|` are prefixed with `. `: +- `"| .x + .y"` becomes `". | .x + .y"` + +### Common Expressions + +#### Simple References +```yaml +configVars: + privateKey: "walletPrivkey" # -> .walletPrivkey + mnemonic: "tasks.gen.outputs.mnemonic" # -> .tasks.gen.outputs.mnemonic +``` + +#### Array Operations +```yaml +configVars: + firstFive: "validatorPubkeys[:5]" # Array slice (first 5) + afterFive: "validatorPubkeys[5:]" # Array slice (from index 5) + firstOne: "validatorPubkeys[0]" # Single element + count: "| .validatorPubkeys | length" # Array length +``` + +#### Arithmetic +```yaml +configVars: + nextEpoch: "| .currentEpoch + 1" + total: "| (.a | tonumber) + (.b | tonumber)" + # Complex calculation combining multiple task outputs: + targetEpoch: "| (.tasks.info.outputs.validator.validator.activation_epoch | tonumber) + (.tasks.specs.outputs.specs.SHARD_COMMITTEE_PERIOD | tonumber)" +``` + +#### Object Construction +```yaml +configVars: + # Build array of objects + expectExits: "| [.validatorPubkey]" + expectBlsChanges: "| [{publicKey: .validatorPubkey, address: .depositorAddress}]" + expectWithdrawals: "| [{publicKey: .validatorPubkey, address: .address, minAmount: 31000000000}]" +``` + +#### Filtering +```yaml +configVars: + activeValidators: "| [.validators[] | select(.status == \"active_ongoing\")]" + specificEntries: "| [.items[] | select(.key > 5)]" +``` + +#### String Operations +```yaml +configVars: + formatted: "| .prefix + \"-\" + .suffix" +``` + +### If Conditions + +The `if` field on tasks evaluates a jq expression. The task is skipped if the result is falsy (false, null, 0, empty string): + +```yaml +# Simple boolean check +- name: some_task + if: "runSetup" + config: {} + +# Comparison +- name: other_task + if: "| .useExistingValidators == false" + config: {} + +# Complex condition +- name: conditional_task + if: "| .tasks.check.result == 1" # Only if check task succeeded + config: {} +``` + +### Placeholder Syntax + +In string values within YAML, two placeholder syntaxes are supported: + +#### Simple: `${varname}` +```yaml +title: "Check block from ${validatorPairName}" +``` +Resolves via `LookupVar(varname)` - simple variable lookup. + +#### Expression: `${{expression}}` +```yaml +title: "Epoch ${{.currentEpoch}} check" +``` +Resolves via `ResolveQuery(expression)` - full jq evaluation. + +## ConfigVars Mechanism + +The `configVars` field maps configuration struct fields to jq queries: + +```yaml +configVars: + fieldName: "jqQuery" +``` + +**Processing:** +1. Each query is evaluated against the current variable scope +2. The result is YAML-marshaled then unmarshaled into the target config field +3. Type conversion happens automatically through YAML serialization +4. This runs AFTER static `config` values are applied (overrides them) + +**Example:** +```yaml +config: + limitTotal: 10 # Static default + privateKey: "" # Placeholder +configVars: + privateKey: "walletPrivkey" # Overrides with variable value + limitTotal: "| .count * 2" # Overrides with computed value +``` + +## Type Generalization + +Before jq evaluation, Go-typed values are "generalized" through YAML marshal/unmarshal: +- Typed slices (`[]string`, `[]int`) become `[]interface{}` +- Struct fields become `map[string]interface{}` +- This ensures consistent jq behavior regardless of Go source types + +## Task Output Namespace + +Each task's outputs are stored in an isolated scope accessible via: +``` +tasks..outputs. +``` + +Task status is also available: +``` +tasks..result # 0=None, 1=Success, 2=Failure +tasks..running # bool +tasks..progress # float64 (0-100) +tasks..progressMessage # string +``` + +## Scope Control in Flow Tasks + +Flow tasks like `run_tasks` and `run_tasks_concurrent` accept a `newVariableScope` option: + +- `newVariableScope: false` (default for most) - Child tasks share the parent's scope +- `newVariableScope: true` (default for `run_tasks_concurrent`) - Creates isolated scope + +When `newVariableScope: true`, child task variable changes don't leak to siblings or parent. diff --git a/.gitignore b/.gitignore index 85569477..b3103ec1 100644 --- a/.gitignore +++ b/.gitignore @@ -7,7 +7,6 @@ test-*.yaml .hack/devnet/generated-** .hack/devnet/custom-** -CLAUDE.md # Web UI web-ui/node_modules/ diff --git a/.hack/devnet/run.sh b/.hack/devnet/run.sh index 517bbf17..09d0a4f7 100755 --- a/.hack/devnet/run.sh +++ b/.hack/devnet/run.sh @@ -79,6 +79,9 @@ done echo "$config_content" > "${__dir}/generated-assertoor-config.yaml" if [ -f "${__dir}/custom-ai-config.yaml" ]; then + # Remove existing 'ai' object from the generated config file + yq 'del(.ai)' "${__dir}/generated-assertoor-config.yaml" > "${__dir}/generated-assertoor-config.yaml.tmp" && mv "${__dir}/generated-assertoor-config.yaml.tmp" "${__dir}/generated-assertoor-config.yaml" + ai_config_file="${__dir}/custom-ai-config.yaml" cat "$ai_config_file" | envsubst >> "${__dir}/generated-assertoor-config.yaml" fi diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000..1f6e6936 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,68 @@ +# Assertoor - Ethereum Network Testing Framework + +## Project Overview + +Assertoor is a full-scale testing orchestrator for live Ethereum testnets, built in Go. It manages test playbooks that verify network behavior by executing task-based workflows against consensus and execution layer clients. + +## Architecture + +``` +main.go -> cmd/root.go -> pkg/assertoor/Coordinator + |- ClientPool (consensus + execution RPC clients) + |- TestRegistry (loads test definitions from YAML) + |- TestRunner (executes tests with task scheduler) + |- Database (SQLite/PostgreSQL persistence) + |- WebServer (REST API + React frontend) + |- EventBus (coordinator-wide events) + |- Spamoor (transaction generation engine) +``` + +## Key Directories + +- `cmd/` - CLI commands (root, tasks, validate) +- `pkg/assertoor/` - Coordinator, config loading, test registry +- `pkg/clients/` - Consensus and execution client pool management +- `pkg/scheduler/` - Task execution engine, state machine, variable scoping +- `pkg/tasks/` - 41+ task implementations (check, generate, flow, utility) +- `pkg/test/` - Test runner, descriptor, lifecycle management +- `pkg/types/` - Core interfaces (Task, Test, Coordinator, Variables) +- `pkg/vars/` - Variable system with jq expression evaluation +- `pkg/txmgr/` - Transaction manager (Spamoor wrapper) +- `pkg/db/` - Database layer with migrations +- `pkg/web/` - REST API with Swagger docs +- `playbooks/` - Pre-built test playbooks organized by network type +- `web-ui/` - React/TypeScript frontend + +## Playbook Authoring + +See `.ai/PLAYBOOK_AUTHORING.md` for complete playbook writing guide. +See `.ai/TASK_REFERENCE.md` for detailed task documentation with all config parameters and outputs. + +## Building + +```bash +make build # Compile binary to bin/assertoor +make test # Run Go tests +make docs # Generate Swagger API docs +make ui # Build web UI +make devnet # Start local devnet +make devnet-run # Run assertoor against local devnet +``` + +## Configuration + +Main config file (YAML): +- `coordinator` - max concurrent tests, retention +- `web` - server host/port, API/frontend toggles +- `endpoints` - array of {name, executionUrl, consensusUrl} +- `validatorNames` - inventory for mapping validator indices to names +- `globalVars` - variables available to all tests +- `tests` / `externalTests` - test definitions or file references + +## Variable System + +- Hierarchical scoping: global -> test -> task -> child task +- jq expression evaluation via `configVars` mappings +- Placeholder syntax: `${varname}` (simple) or `${{.query}}` (jq expression) +- Task outputs accessible via `tasks..outputs.` +- Task status via `tasks..result` (0=None, 1=Success, 2=Failure) diff --git a/go.mod b/go.mod index b9428546..33c45014 100644 --- a/go.mod +++ b/go.mod @@ -47,6 +47,7 @@ require ( github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.20.0 // indirect + github.com/casbin/govaluate v1.8.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/consensys/gnark-crypto v0.19.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect @@ -74,7 +75,7 @@ require ( github.com/itchyny/timefmt-go v0.1.7 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/kilic/bls12-381 v0.1.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -83,7 +84,8 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/ncruces/go-strftime v1.0.0 // indirect - github.com/pk910/dynamic-ssz v0.0.5 // indirect + github.com/pk910/dynamic-ssz v1.2.1 // indirect + github.com/pk910/hashtree-bindings v0.0.1 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.66.1 // indirect @@ -115,10 +117,11 @@ require ( golang.org/x/tools v0.42.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/protobuf v1.36.11 // indirect - gopkg.in/Knetic/govaluate.v3 v3.0.0 // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect modernc.org/libc v1.68.0 // indirect modernc.org/mathutil v1.7.1 // indirect modernc.org/memory v1.11.0 // indirect modernc.org/sqlite v1.46.1 // indirect ) + +replace github.com/attestantio/go-eth2-client => github.com/pk910/go-eth2-client v0.0.0-20260225144847-75b86704f554 diff --git a/go.sum b/go.sum index a954be12..0c7486c7 100644 --- a/go.sum +++ b/go.sum @@ -13,14 +13,14 @@ github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608 github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI= github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0= github.com/VictoriaMetrics/fastcache v1.13.0/go.mod h1:hHXhl4DA2fTL2HTZDJFXWgW0LNjo6B+4aj2Wmng3TjU= -github.com/attestantio/go-eth2-client v0.28.0 h1:2zIIIMPvSD+g6h3TgVXsoda/Yw3e+wjo1e8CZEanORU= -github.com/attestantio/go-eth2-client v0.28.0/go.mod h1:PO9sHFCq+1RiG+Eh3eOR2GYvYV64Qzg7idM3kLgCs5k= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU= github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/casbin/govaluate v1.8.0 h1:1dUaV/I0LFP2tcY1uNQEb6wBCbp8GMTcC/zhwQDWvZo= +github.com/casbin/govaluate v1.8.0/go.mod h1:G/UnbIjZk/0uMNaLwZZmFQrR72tYRZWQkO70si/iR7A= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -195,8 +195,8 @@ github.com/kilic/bls12-381 v0.1.0 h1:encrdjqKMEvabVQ7qYOKu1OvhqpK4s47wDYtNiPtlp4 github.com/kilic/bls12-381 v0.1.0/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c= github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= -github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= -github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -254,8 +254,12 @@ github.com/pion/transport/v2 v2.2.1 h1:7qYnCBlpgSJNYMbLCKuSY9KbQdBFoETvPNETv0y4N github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= github.com/pion/transport/v3 v3.0.1 h1:gDTlPJwROfSfz6QfSi0ZmeCSkFcnWWiiR9ES0ouANiM= github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= -github.com/pk910/dynamic-ssz v0.0.5 h1:VP9heGYUwzlpyhk28P2nCAzhvGsePJOOOO5vQMDh2qQ= -github.com/pk910/dynamic-ssz v0.0.5/go.mod h1:b6CrLaB2X7pYA+OSEEbkgXDEcRnjLOZIxZTsMuO/Y9c= +github.com/pk910/dynamic-ssz v1.2.1 h1:84eNMiiOYDiNC2Y1m5A/UtIPs6u/9SsvG4RVSBRGE5U= +github.com/pk910/dynamic-ssz v1.2.1/go.mod h1:HXRWLNcgj3DL65Kznrb+RdL3DEKw2JBZ/6crooqGoII= +github.com/pk910/go-eth2-client v0.0.0-20260225144847-75b86704f554 h1:FmusNWzB2XDzRQK1OoLo9XUH/PBatWPkztJOX/Ther4= +github.com/pk910/go-eth2-client v0.0.0-20260225144847-75b86704f554/go.mod h1:8fpxrIBBVbOcVG3vcHe5ubOHIeqW3N5t7kS4oU5EeJU= +github.com/pk910/hashtree-bindings v0.0.1 h1:Sw+UlPlrBle4LUg04kqLFybVQcfmamwKL1QsrR3GU0g= +github.com/pk910/hashtree-bindings v0.0.1/go.mod h1:eayIpxMFkWzMsydESu/5bV8wglZzSE/c9mq6DQdn204= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -415,8 +419,6 @@ golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSm golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= -gopkg.in/Knetic/govaluate.v3 v3.0.0 h1:18mUyIt4ZlRlFZAAfVetz4/rzlJs9yhN+U02F4u1AOc= -gopkg.in/Knetic/govaluate.v3 v3.0.0/go.mod h1:csKLBORsPbafmSCGTEh3U7Ozmsuq8ZSIlKk1bcqph0E= gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/clients/clients.go b/pkg/clients/clients.go index e845612d..4d215bca 100644 --- a/pkg/clients/clients.go +++ b/pkg/clients/clients.go @@ -9,6 +9,7 @@ import ( "runtime/debug" "time" + "github.com/attestantio/go-eth2-client/spec" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/assertoor/pkg/clients/consensus" "github.com/ethpandaops/assertoor/pkg/clients/execution" @@ -123,35 +124,61 @@ func (pool *ClientPool) processConsensusBlockNotification(poolClient *PoolClient } }() - subscription := poolClient.ConsensusClient.SubscribeBlockEvent(100) - defer subscription.Unsubscribe() + blockSubscription := poolClient.ConsensusClient.SubscribeBlockEvent(100) + defer blockSubscription.Unsubscribe() + + payloadSubscription := pool.consensusPool.GetBlockCache().SubscribePayloadEvent(100) + defer payloadSubscription.Unsubscribe() for { select { case <-pool.ctx.Done(): return - case block := <-subscription.Channel(): - versionedBlock := block.AwaitBlock(context.Background(), 2*time.Second) - if versionedBlock == nil { - pool.logger.Warnf("cl/el block notification failed: AwaitBlock timeout (client: %v, slot: %v, root: 0x%x)", poolClient.Config.Name, block.Slot, block.Root) - break - } + case block := <-blockSubscription.Channel(): + pool.notifyELBlockFromBeaconBlock(poolClient, block) + case block := <-payloadSubscription.Channel(): + pool.notifyELBlockFromPayload(poolClient, block) + } + } +} - hash, err := versionedBlock.ExecutionBlockHash() - if err != nil { - pool.logger.Warnf("cl/el block notification failed: %s (client: %v, slot: %v, root: 0x%x)", err, poolClient.Config.Name, block.Slot, block.Root) - break - } +func (pool *ClientPool) notifyELBlockFromBeaconBlock(poolClient *PoolClient, block *consensus.Block) { + versionedBlock := block.AwaitBlock(context.Background(), 2*time.Second) + if versionedBlock == nil { + pool.logger.Warnf("cl/el block notification failed: AwaitBlock timeout (client: %v, slot: %v, root: 0x%x)", poolClient.Config.Name, block.Slot, block.Root) + return + } - number, err := versionedBlock.ExecutionBlockNumber() - if err != nil { - pool.logger.Warnf("cl/el block notification failed: %s (client: %v, slot: %v, root: 0x%x)", err, poolClient.Config.Name, block.Slot, block.Root) - break - } + // For gloas+ blocks, EL info comes from the payload, not the block body + if versionedBlock.Version >= spec.DataVersionGloas { + return + } - poolClient.ExecutionClient.NotifyNewBlock(common.Hash(hash), number) - } + hash, err := versionedBlock.ExecutionBlockHash() + if err != nil { + pool.logger.Warnf("cl/el block notification failed: %s (client: %v, slot: %v, root: 0x%x)", err, poolClient.Config.Name, block.Slot, block.Root) + return + } + + number, err := versionedBlock.ExecutionBlockNumber() + if err != nil { + pool.logger.Warnf("cl/el block notification failed: %s (client: %v, slot: %v, root: 0x%x)", err, poolClient.Config.Name, block.Slot, block.Root) + return } + + poolClient.ExecutionClient.NotifyNewBlock(common.Hash(hash), number) +} + +func (pool *ClientPool) notifyELBlockFromPayload(poolClient *PoolClient, block *consensus.Block) { + payload := block.GetPayload() + if payload == nil || payload.Message == nil || payload.Message.Payload == nil { + return + } + + hash := common.Hash(payload.Message.Payload.BlockHash) + number := payload.Message.Payload.BlockNumber + + poolClient.ExecutionClient.NotifyNewBlock(hash, number) } func (pool *ClientPool) GetConsensusPool() *consensus.Pool { diff --git a/pkg/clients/consensus/block.go b/pkg/clients/consensus/block.go index fca3c2ae..d9df0681 100644 --- a/pkg/clients/consensus/block.go +++ b/pkg/clients/consensus/block.go @@ -7,20 +7,24 @@ import ( "time" "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" ) type Block struct { - Root phase0.Root - Slot phase0.Slot - headerMutex sync.Mutex - headerChan chan bool - header *phase0.SignedBeaconBlockHeader - blockMutex sync.Mutex - blockChan chan bool - block *spec.VersionedSignedBeaconBlock - seenMutex sync.RWMutex - seenMap map[uint16]*Client + Root phase0.Root + Slot phase0.Slot + headerMutex sync.Mutex + headerChan chan bool + header *phase0.SignedBeaconBlockHeader + blockMutex sync.Mutex + blockChan chan bool + block *spec.VersionedSignedBeaconBlock + payloadMutex sync.Mutex + payloadChan chan bool + payload *gloas.SignedExecutionPayloadEnvelope + seenMutex sync.RWMutex + seenMap map[uint16]*Client } func (block *Block) GetSeenBy() []*Client { @@ -112,6 +116,10 @@ func (block *Block) EnsureHeader(loadHeader func() (*phase0.SignedBeaconBlockHea return err } + if header == nil { + return nil + } + block.header = header close(block.headerChan) @@ -135,8 +143,60 @@ func (block *Block) EnsureBlock(loadBlock func() (*spec.VersionedSignedBeaconBlo return false, err } + if blockBody == nil { + return false, nil + } + block.block = blockBody close(block.blockChan) return true, nil } + +// GetPayload returns the execution payload envelope if available. +func (block *Block) GetPayload() *gloas.SignedExecutionPayloadEnvelope { + return block.payload +} + +// AwaitPayload waits for the execution payload envelope to become available. +func (block *Block) AwaitPayload(ctx context.Context, timeout time.Duration) *gloas.SignedExecutionPayloadEnvelope { + if ctx == nil { + ctx = context.Background() + } + + select { + case <-block.payloadChan: + case <-time.After(timeout): + case <-ctx.Done(): + } + + return block.payload +} + +// EnsurePayload loads and sets the execution payload envelope if not already set. +func (block *Block) EnsurePayload(loadPayload func() (*gloas.SignedExecutionPayloadEnvelope, error)) (bool, error) { + if block.payload != nil { + return false, nil + } + + block.payloadMutex.Lock() + defer block.payloadMutex.Unlock() + + if block.payload != nil { + return false, nil + } + + payload, err := loadPayload() + if err != nil { + return false, err + } + + if payload == nil { + return false, nil + } + + block.payload = payload + close(block.payloadChan) + + return true, nil +} diff --git a/pkg/clients/consensus/block_utils.go b/pkg/clients/consensus/block_utils.go index 52a352cc..461871f7 100644 --- a/pkg/clients/consensus/block_utils.go +++ b/pkg/clients/consensus/block_utils.go @@ -4,6 +4,7 @@ import ( "errors" "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/gloas" ) func GetExecutionExtraData(v *spec.VersionedSignedBeaconBlock) ([]byte, error) { @@ -23,15 +24,38 @@ func GetExecutionExtraData(v *spec.VersionedSignedBeaconBlock) ([]byte, error) { return v.Capella.Message.Body.ExecutionPayload.ExtraData, nil case spec.DataVersionDeneb: if v.Deneb == nil || v.Deneb.Message == nil || v.Deneb.Message.Body == nil || v.Deneb.Message.Body.ExecutionPayload == nil { - return nil, errors.New("no denb block") + return nil, errors.New("no deneb block") } return v.Deneb.Message.Body.ExecutionPayload.ExtraData, nil + case spec.DataVersionElectra: + if v.Electra == nil || v.Electra.Message == nil || v.Electra.Message.Body == nil || v.Electra.Message.Body.ExecutionPayload == nil { + return nil, errors.New("no electra block") + } + + return v.Electra.Message.Body.ExecutionPayload.ExtraData, nil + case spec.DataVersionFulu: + if v.Fulu == nil || v.Fulu.Message == nil || v.Fulu.Message.Body == nil || v.Fulu.Message.Body.ExecutionPayload == nil { + return nil, errors.New("no fulu block") + } + + return v.Fulu.Message.Body.ExecutionPayload.ExtraData, nil + case spec.DataVersionGloas: + return nil, errors.New("gloas extra data is in separate payload") default: return nil, errors.New("unknown version") } } +// GetPayloadExtraData returns the extra data from a gloas execution payload envelope. +func GetPayloadExtraData(payload *gloas.SignedExecutionPayloadEnvelope) ([]byte, error) { + if payload == nil || payload.Message == nil || payload.Message.Payload == nil { + return nil, errors.New("no payload") + } + + return payload.Message.Payload.ExtraData, nil +} + func GetBlockBody(v *spec.VersionedSignedBeaconBlock) any { //nolint:exhaustive // ignore switch v.Version { @@ -45,6 +69,12 @@ func GetBlockBody(v *spec.VersionedSignedBeaconBlock) any { return v.Capella case spec.DataVersionDeneb: return v.Deneb + case spec.DataVersionElectra: + return v.Electra + case spec.DataVersionFulu: + return v.Fulu + case spec.DataVersionGloas: + return v.Gloas default: return nil } diff --git a/pkg/clients/consensus/blockcache.go b/pkg/clients/consensus/blockcache.go index fe74bb37..42e7e3e9 100644 --- a/pkg/clients/consensus/blockcache.go +++ b/pkg/clients/consensus/blockcache.go @@ -39,12 +39,17 @@ type BlockCache struct { valsetEpoch phase0.Epoch valsetMap map[phase0.ValidatorIndex]*v1.Validator + builderMutex sync.Mutex + builderEpoch phase0.Epoch + builderSet []*BuilderInfo + blockMutex sync.RWMutex blockSlotMap map[phase0.Slot][]*Block blockRootMap map[phase0.Root]*Block maxSlotIdx int64 blockDispatcher Dispatcher[*Block] + payloadDispatcher Dispatcher[*Block] checkpointDispatcher Dispatcher[*FinalizedCheckpoint] wallclockEpochDispatcher Dispatcher[*ethwallclock.Epoch] wallclockSlotDispatcher Dispatcher[*ethwallclock.Slot] @@ -99,6 +104,14 @@ func (cache *BlockCache) notifyBlockReady(block *Block) { cache.blockDispatcher.Fire(block) } +func (cache *BlockCache) SubscribePayloadEvent(capacity int) *Subscription[*Block] { + return cache.payloadDispatcher.Subscribe(capacity) +} + +func (cache *BlockCache) notifyPayloadReady(block *Block) { + cache.payloadDispatcher.Fire(block) +} + func (cache *BlockCache) SetMinFollowDistance(followDistance uint64) { if followDistance > 10000 { followDistance = 10000 @@ -266,11 +279,12 @@ func (cache *BlockCache) AddBlock(root phase0.Root, slot phase0.Slot) (*Block, b } cacheBlock := &Block{ - Root: root, - Slot: slot, - seenMap: make(map[uint16]*Client), - headerChan: make(chan bool), - blockChan: make(chan bool), + Root: root, + Slot: slot, + seenMap: make(map[uint16]*Client), + headerChan: make(chan bool), + blockChan: make(chan bool), + payloadChan: make(chan bool), } cache.blockRootMap[root] = cacheBlock @@ -341,6 +355,7 @@ func (cache *BlockCache) runCacheCleanup(ctx context.Context) { cache.cleanupBlockCache() cache.cleanupValsetCache() + cache.cleanupBuilderCache() } } @@ -366,6 +381,93 @@ func (cache *BlockCache) cleanupBlockCache() { } } +func (cache *BlockCache) getCachedBuilderSet(loadFn func() []*BuilderInfo) []*BuilderInfo { + wallclock := cache.GetWallclock() + + cache.builderMutex.Lock() + defer cache.builderMutex.Unlock() + + epoch := phase0.Epoch(0) + + if wallclock != nil { + _, e, _ := wallclock.Now() + if e.Number() < math.MaxInt64 { + epoch = phase0.Epoch(e.Number()) + } + } + + if cache.builderSet == nil || cache.builderEpoch < epoch { + builderSet := loadFn() + if builderSet != nil { + cache.builderSet = builderSet + cache.builderEpoch = epoch + } + } + + return cache.builderSet +} + +func (cache *BlockCache) SetBuilderSet(builders []*BuilderInfo) { + wallclock := cache.GetWallclock() + + cache.builderMutex.Lock() + defer cache.builderMutex.Unlock() + + epoch := phase0.Epoch(0) + + if wallclock != nil { + _, e, _ := wallclock.Now() + if e.Number() < math.MaxInt64 { + epoch = phase0.Epoch(e.Number()) + } + } + + cache.builderSet = builders + cache.builderEpoch = epoch +} + +func (cache *BlockCache) SetValidatorSet(valset map[phase0.ValidatorIndex]*v1.Validator) { + wallclock := cache.GetWallclock() + + cache.valsetMutex.Lock() + defer cache.valsetMutex.Unlock() + + epoch := phase0.Epoch(0) + + if wallclock != nil { + _, e, _ := wallclock.Now() + if e.Number() < math.MaxInt64 { + epoch = phase0.Epoch(e.Number()) + } + } + + cache.valsetMap = valset + cache.valsetEpoch = epoch +} + +func (cache *BlockCache) cleanupBuilderCache() { + if cache.builderSet == nil { + return + } + + wallclock := cache.GetWallclock() + epoch := phase0.Epoch(0) + + if wallclock != nil { + _, e, _ := wallclock.Now() + if e.Number() < math.MaxInt64 { + epoch = phase0.Epoch(e.Number()) + } + } + + cache.builderMutex.Lock() + defer cache.builderMutex.Unlock() + + if epoch-cache.builderEpoch >= 2 { + cache.builderSet = nil + } +} + func (cache *BlockCache) cleanupValsetCache() { if cache.valsetMap == nil { return diff --git a/pkg/clients/consensus/builder.go b/pkg/clients/consensus/builder.go new file mode 100644 index 00000000..664848bc --- /dev/null +++ b/pkg/clients/consensus/builder.go @@ -0,0 +1,22 @@ +package consensus + +import ( + "github.com/attestantio/go-eth2-client/spec/gloas" + "github.com/attestantio/go-eth2-client/spec/phase0" +) + +// BuilderIndexFlag is the bit flag that indicates a ValidatorIndex +// should be treated as a BuilderIndex (BUILDER_INDEX_FLAG = 2^40). +const BuilderIndexFlag = uint64(1 << 40) + +// BuilderInfo wraps a gloas.Builder with its index in the builder list. +type BuilderInfo struct { + Index gloas.BuilderIndex + Builder *gloas.Builder +} + +// ConvertBuilderIndexToValidatorIndex returns the ValidatorIndex +// representation of a builder index (builder_index | BUILDER_INDEX_FLAG). +func ConvertBuilderIndexToValidatorIndex(builderIndex gloas.BuilderIndex) phase0.ValidatorIndex { + return phase0.ValidatorIndex(uint64(builderIndex) | BuilderIndexFlag) +} diff --git a/pkg/clients/consensus/chainspec.go b/pkg/clients/consensus/chainspec.go index 7de457cf..2004afac 100644 --- a/pkg/clients/consensus/chainspec.go +++ b/pkg/clients/consensus/chainspec.go @@ -25,11 +25,24 @@ type ChainSpec struct { BellatrixForkEpoch uint64 `yaml:"BELLATRIX_FORK_EPOCH"` CappellaForkVersion phase0.Version `yaml:"CAPELLA_FORK_VERSION"` CappellaForkEpoch uint64 `yaml:"CAPELLA_FORK_EPOCH"` + DenebForkEpoch uint64 `yaml:"DENEB_FORK_EPOCH"` + ElectraForkEpoch uint64 `yaml:"ELECTRA_FORK_EPOCH"` + FuluForkEpoch uint64 `yaml:"FULU_FORK_EPOCH"` + GloasForkEpoch uint64 `yaml:"GLOAS_FORK_EPOCH"` SecondsPerSlot time.Duration `yaml:"SECONDS_PER_SLOT"` SlotsPerEpoch uint64 `yaml:"SLOTS_PER_EPOCH"` MaxCommitteesPerSlot uint64 `yaml:"MAX_COMMITTEES_PER_SLOT"` } +// IsGloasActive returns true if the gloas fork is active at the given slot. +func (chain *ChainSpec) IsGloasActive(slot phase0.Slot) bool { + if chain.GloasForkEpoch == 0 || chain.SlotsPerEpoch == 0 { + return false + } + + return uint64(slot) >= chain.GloasForkEpoch*chain.SlotsPerEpoch +} + func (chain *ChainSpec) CheckMismatch(chain2 *ChainSpec) []string { mismatches := []string{} diff --git a/pkg/clients/consensus/clientlogic.go b/pkg/clients/consensus/clientlogic.go index 1a8bde2c..9aab87df 100644 --- a/pkg/clients/consensus/clientlogic.go +++ b/pkg/clients/consensus/clientlogic.go @@ -9,6 +9,7 @@ import ( v1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/assertoor/pkg/clients/consensus/rpc" ) @@ -137,7 +138,7 @@ func (client *Client) runClientLogic() error { } // start event stream - blockStream := client.rpcClient.NewBlockStream(client.clientCtx, rpc.StreamBlockEvent|rpc.StreamFinalizedEvent) + blockStream := client.rpcClient.NewBlockStream(client.clientCtx, rpc.StreamBlockEvent|rpc.StreamFinalizedEvent|rpc.StreamExecutionPayloadEvent) defer blockStream.Close() // process events @@ -173,6 +174,14 @@ func (client *Client) runClientLogic() error { client.logger.Warnf("failed processing finalized event: %v", err) } } + + case rpc.StreamExecutionPayloadEvent: + if payloadEvent, ok := evt.Data.(*v1.ExecutionPayloadAvailableEvent); ok { + err := client.processPayloadEvent(payloadEvent) + if err != nil { + client.logger.Warnf("failed processing payload event: %v", err) + } + } } client.logger.Tracef("event (%v) processing time: %v ms", evt.Event, time.Since(now).Milliseconds()) @@ -220,6 +229,36 @@ func (client *Client) processFinalizedEvent(evt *v1.FinalizedCheckpointEvent) er return client.setFinalizedHead(evt.Epoch, evt.Block) } +func (client *Client) processPayloadEvent(evt *v1.ExecutionPayloadAvailableEvent) error { + cachedBlock := client.pool.blockCache.GetCachedBlockByRoot(evt.BlockRoot) + if cachedBlock == nil { + client.logger.Debugf("received payload event for unknown block [0x%x]", evt.BlockRoot) + return nil + } + + loaded, err := cachedBlock.EnsurePayload(func() (*gloas.SignedExecutionPayloadEnvelope, error) { + ctx, cancel := context.WithTimeout(client.clientCtx, 10*time.Second) + defer cancel() + + payload, err := client.rpcClient.GetExecutionPayloadByBlockroot(ctx, evt.BlockRoot) + if err != nil { + return nil, err + } + + return payload, nil + }) + if err != nil { + return fmt.Errorf("could not load payload for block [0x%x]: %w", evt.BlockRoot, err) + } + + if loaded { + client.logger.Infof("received execution payload for block %v [0x%x]", cachedBlock.Slot, evt.BlockRoot) + client.pool.blockCache.notifyPayloadReady(cachedBlock) + } + + return nil +} + func (client *Client) pollClientHead() error { ctx, cancel := context.WithTimeout(client.clientCtx, 10*time.Second) defer cancel() @@ -296,6 +335,12 @@ func (client *Client) processBlock(root phase0.Root, slot phase0.Slot, header *p if loaded { client.pool.blockCache.notifyBlockReady(cachedBlock) + + // For gloas+ blocks, also try to load payload (for polled/backfill blocks) + blockData := cachedBlock.GetBlock() + if blockData != nil && blockData.Version >= spec.DataVersionGloas { + go client.loadBlockPayload(cachedBlock) + } } client.headMutex.Lock() @@ -315,6 +360,29 @@ func (client *Client) processBlock(root phase0.Root, slot phase0.Slot, header *p return nil } +func (client *Client) loadBlockPayload(cachedBlock *Block) { + loaded, err := cachedBlock.EnsurePayload(func() (*gloas.SignedExecutionPayloadEnvelope, error) { + ctx, cancel := context.WithTimeout(client.clientCtx, 10*time.Second) + defer cancel() + + payload, err := client.rpcClient.GetExecutionPayloadByBlockroot(ctx, cachedBlock.Root) + if err != nil { + return nil, err + } + + return payload, nil + }) + if err != nil { + client.logger.Warnf("could not load payload for block %v [0x%x]: %v", cachedBlock.Slot, cachedBlock.Root, err) + return + } + + if loaded { + client.logger.Debugf("loaded execution payload for block %v [0x%x]", cachedBlock.Slot, cachedBlock.Root) + client.pool.blockCache.notifyPayloadReady(cachedBlock) + } +} + func (client *Client) setFinalizedHead(epoch phase0.Epoch, root phase0.Root) error { client.headMutex.Lock() diff --git a/pkg/clients/consensus/pool.go b/pkg/clients/consensus/pool.go index 56de35fe..663d8603 100644 --- a/pkg/clients/consensus/pool.go +++ b/pkg/clients/consensus/pool.go @@ -7,6 +7,8 @@ import ( "time" v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) @@ -87,6 +89,120 @@ func (pool *Pool) GetValidatorSet() map[phase0.ValidatorIndex]*v1.Validator { }) } +func (pool *Pool) GetBuilderSet() []*BuilderInfo { + return pool.blockCache.getCachedBuilderSet(func() []*BuilderInfo { + client := pool.GetReadyEndpoint(AnyClient) + if client == nil { + pool.logger.Errorf("could not load builder set: no ready client") + return nil + } + + state, err := client.GetRPCClient().GetState(client.clientCtx, "head") + if err != nil { + pool.logger.Errorf("could not load beacon state for builder set: %v", err) + return nil + } + + if state.Version < spec.DataVersionGloas || state.Gloas == nil { + return nil + } + + // Update validator set cache from the same state + pool.updateValidatorSetFromState(state) + + builders := make([]*BuilderInfo, len(state.Gloas.Builders)) + for i, b := range state.Gloas.Builders { + builders[i] = &BuilderInfo{ + Index: gloas.BuilderIndex(i), + Builder: b, + } + } + + return builders + }) +} + +func (pool *Pool) updateValidatorSetFromState(state *spec.VersionedBeaconState) { + validators, err := state.Validators() + if err != nil || len(validators) == 0 { + return + } + + balances, err := state.ValidatorBalances() + if err != nil { + return + } + + currentSlot, err := state.Slot() + if err != nil { + return + } + + specs := pool.blockCache.GetSpecs() + if specs == nil { + return + } + + currentEpoch := phase0.Epoch(uint64(currentSlot) / specs.SlotsPerEpoch) + + valset := make(map[phase0.ValidatorIndex]*v1.Validator, len(validators)) + for i, val := range validators { + idx := phase0.ValidatorIndex(i) + + balance := phase0.Gwei(0) + if i < len(balances) { + balance = balances[i] + } + + valset[idx] = &v1.Validator{ + Index: idx, + Balance: balance, + Status: computeValidatorStatus(val, currentEpoch), + Validator: val, + } + } + + pool.blockCache.SetValidatorSet(valset) +} + +func computeValidatorStatus(val *phase0.Validator, epoch phase0.Epoch) v1.ValidatorState { + farFuture := phase0.Epoch(0xFFFFFFFFFFFFFFFF) + + if val.ActivationEligibilityEpoch == farFuture { + return v1.ValidatorStatePendingInitialized + } + + if val.ActivationEpoch > epoch { + return v1.ValidatorStatePendingQueued + } + + if val.ExitEpoch > epoch { + if val.Slashed { + return v1.ValidatorStateActiveSlashed + } + + if val.ExitEpoch == farFuture { + return v1.ValidatorStateActiveOngoing + } + + return v1.ValidatorStateActiveExiting + } + + if val.WithdrawableEpoch > epoch { + if val.Slashed { + return v1.ValidatorStateExitedSlashed + } + + return v1.ValidatorStateExitedUnslashed + } + + if val.EffectiveBalance != 0 { + return v1.ValidatorStateWithdrawalPossible + } + + return v1.ValidatorStateWithdrawalDone +} + func (pool *Pool) AddEndpoint(endpoint *ClientConfig) (*Client, error) { clientIdx := pool.clientCounter pool.clientCounter++ diff --git a/pkg/clients/consensus/rpc/beaconapi.go b/pkg/clients/consensus/rpc/beaconapi.go index 7a8d005b..c091e78f 100644 --- a/pkg/clients/consensus/rpc/beaconapi.go +++ b/pkg/clients/consensus/rpc/beaconapi.go @@ -16,6 +16,7 @@ import ( "github.com/attestantio/go-eth2-client/http" "github.com/attestantio/go-eth2-client/spec" "github.com/attestantio/go-eth2-client/spec/capella" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/rs/zerolog" "github.com/sirupsen/logrus" @@ -338,6 +339,29 @@ func (bc *BeaconClient) GetBlockHeaderBySlot(ctx context.Context, slot phase0.Sl return result.Data, nil } +func (bc *BeaconClient) GetExecutionPayloadByBlockroot(ctx context.Context, blockroot phase0.Root) (*gloas.SignedExecutionPayloadEnvelope, error) { + provider, isProvider := bc.clientSvc.(eth2client.ExecutionPayloadProvider) + if !isProvider { + return nil, fmt.Errorf("get execution payload not supported") + } + + result, err := provider.SignedExecutionPayloadEnvelope(ctx, &api.SignedExecutionPayloadEnvelopeOpts{ + Block: fmt.Sprintf("0x%x", blockroot), + Common: api.CommonOpts{ + Timeout: 0, + }, + }) + if err != nil { + if strings.HasPrefix(err.Error(), "GET failed with status 404") { + return nil, nil + } + + return nil, err + } + + return result.Data, nil +} + func (bc *BeaconClient) GetBlockBodyByBlockroot(ctx context.Context, blockroot phase0.Root) (*spec.VersionedSignedBeaconBlock, error) { provider, isProvider := bc.clientSvc.(eth2client.SignedBeaconBlockProvider) if !isProvider { diff --git a/pkg/clients/consensus/rpc/beaconstream.go b/pkg/clients/consensus/rpc/beaconstream.go index 28981e0f..f284a5b7 100644 --- a/pkg/clients/consensus/rpc/beaconstream.go +++ b/pkg/clients/consensus/rpc/beaconstream.go @@ -16,9 +16,10 @@ import ( ) const ( - StreamBlockEvent uint16 = 0x01 - StreamHeadEvent uint16 = 0x02 - StreamFinalizedEvent uint16 = 0x04 + StreamBlockEvent uint16 = 0x01 + StreamHeadEvent uint16 = 0x02 + StreamFinalizedEvent uint16 = 0x04 + StreamExecutionPayloadEvent uint16 = 0x08 ) type BeaconStreamEvent struct { @@ -63,7 +64,16 @@ func (bs *BeaconStream) startStream() { bs.running = false }() - stream := bs.subscribeStream(bs.client.endpoint, bs.events) + // Start advanced stream (execution_payload_available) in a separate goroutine + // if requested. This runs independently since CL clients may not support it yet. + if bs.events&StreamExecutionPayloadEvent > 0 { + go bs.runAdvancedStream() + } + + // Basic stream: block, head, finalized_checkpoint + basicEvents := bs.events &^ StreamExecutionPayloadEvent + + stream := bs.subscribeStream(bs.client.endpoint, basicEvents) if stream != nil { defer stream.Close() @@ -94,6 +104,43 @@ func (bs *BeaconStream) startStream() { } } +func (bs *BeaconStream) runAdvancedStream() { + for { + stream := bs.subscribeStream(bs.client.endpoint, StreamExecutionPayloadEvent) + if stream == nil { + return + } + + bs.runAdvancedStreamEvents(stream) + stream.Close() + + select { + case <-bs.ctx.Done(): + return + case <-time.After(5 * time.Second): + } + } +} + +func (bs *BeaconStream) runAdvancedStreamEvents(stream *eventstream.Stream) { + for { + select { + case <-bs.ctx.Done(): + return + case evt := <-stream.Events: + if evt.Event() == "execution_payload_available" { + bs.processExecutionPayloadAvailableEvent(evt) + } + case <-stream.Ready: + // advanced stream ready, nothing to signal + case err := <-stream.Errors: + logger.WithField("client", bs.client.name).Debugf("advanced beacon stream error: %v", err) + + return + } + } +} + func (bs *BeaconStream) subscribeStream(endpoint string, events uint16) *eventstream.Stream { var topics strings.Builder @@ -129,6 +176,16 @@ func (bs *BeaconStream) subscribeStream(endpoint string, events uint16) *eventst topicsCount++ } + if events&StreamExecutionPayloadEvent > 0 { + if topicsCount > 0 { + fmt.Fprintf(&topics, ",") + } + + fmt.Fprintf(&topics, "execution_payload_available") + + topicsCount++ + } + if topicsCount == 0 { return nil } @@ -207,6 +264,21 @@ func (bs *BeaconStream) processFinalizedEvent(evt eventsource.Event) { } } +func (bs *BeaconStream) processExecutionPayloadAvailableEvent(evt eventsource.Event) { + var parsed v1.ExecutionPayloadAvailableEvent + + err := json.Unmarshal([]byte(evt.Data()), &parsed) + if err != nil { + logger.WithField("client", bs.client.name).Warnf("beacon block stream failed to decode execution_payload_available event: %v", err) + return + } + + bs.EventChan <- &BeaconStreamEvent{ + Event: StreamExecutionPayloadEvent, + Data: &parsed, + } +} + func getRedactedURL(requrl string) string { var logurl string diff --git a/pkg/tasks/check_consensus_block_proposals/config.go b/pkg/tasks/check_consensus_block_proposals/config.go index 5b27dbd9..c9d89ba3 100644 --- a/pkg/tasks/check_consensus_block_proposals/config.go +++ b/pkg/tasks/check_consensus_block_proposals/config.go @@ -5,6 +5,7 @@ import "math/big" type Config struct { CheckLookback int `yaml:"checkLookback" json:"checkLookback" desc:"Number of slots to look back when checking for block proposals."` BlockCount int `yaml:"blockCount" json:"blockCount" desc:"Number of matching blocks required to pass the check."` + PayloadTimeout int `yaml:"payloadTimeout" json:"payloadTimeout" desc:"Timeout in seconds to wait for execution payload (gloas+). Default: 12"` GraffitiPattern string `yaml:"graffitiPattern" json:"graffitiPattern" desc:"Regex pattern to match block graffiti."` ValidatorNamePattern string `yaml:"validatorNamePattern" json:"validatorNamePattern" desc:"Regex pattern to match validator names."` ExtraDataPattern string `yaml:"extraDataPattern" json:"extraDataPattern" desc:"Regex pattern to match execution payload extra data."` diff --git a/pkg/tasks/check_consensus_block_proposals/task.go b/pkg/tasks/check_consensus_block_proposals/task.go index 0dc7c98e..cd89da7d 100644 --- a/pkg/tasks/check_consensus_block_proposals/task.go +++ b/pkg/tasks/check_consensus_block_proposals/task.go @@ -10,6 +10,9 @@ import ( "time" "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/capella" + "github.com/attestantio/go-eth2-client/spec/electra" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/assertoor/pkg/clients/consensus" @@ -214,6 +217,29 @@ func (t *Task) setMatchingBlocksOutput(blocks []*consensus.Block) { t.ctx.Outputs.SetVar("matchingBlockBodies", blockBodies) } +// needsPayload returns true if any payload-dependent filter is configured. +func (t *Task) needsPayload() bool { + return t.config.ExtraDataPattern != "" || + t.config.MinTransactionCount > 0 || + t.config.MinWithdrawalCount > 0 || + len(t.config.ExpectWithdrawals) > 0 || + t.config.MinDepositRequestCount > 0 || + len(t.config.ExpectDepositRequests) > 0 || + t.config.MinWithdrawalRequestCount > 0 || + len(t.config.ExpectWithdrawalRequests) > 0 || + t.config.MinConsolidationRequestCount > 0 || + len(t.config.ExpectConsolidationRequests) > 0 +} + +// getPayloadTimeout returns the configured payload timeout or the default of 12 seconds. +func (t *Task) getPayloadTimeout() time.Duration { + if t.config.PayloadTimeout > 0 { + return time.Duration(t.config.PayloadTimeout) * time.Second + } + + return 12 * time.Second +} + //nolint:gocyclo // ignore func (t *Task) checkBlock(ctx context.Context, block *consensus.Block) bool { blockData := block.AwaitBlock(ctx, 2*time.Second) @@ -222,6 +248,18 @@ func (t *Task) checkBlock(ctx context.Context, block *consensus.Block) bool { return false } + // For gloas+ blocks, load payload if any payload-dependent checks are configured + var payload *gloas.SignedExecutionPayloadEnvelope + + isGloas := blockData.Version >= spec.DataVersionGloas + if isGloas && t.needsPayload() { + payload = block.AwaitPayload(ctx, t.getPayloadTimeout()) + if payload == nil { + t.logger.Warnf("could not fetch payload for gloas block %v [0x%x]", block.Slot, block.Root) + return false + } + } + // check validator name if t.config.ValidatorNamePattern != "" && !t.checkBlockValidatorName(block, blockData) { return false @@ -233,7 +271,7 @@ func (t *Task) checkBlock(ctx context.Context, block *consensus.Block) bool { } // check extra data - if t.config.ExtraDataPattern != "" && !t.checkBlockExtraData(block, blockData) { + if t.config.ExtraDataPattern != "" && !t.checkBlockExtraData(block, blockData, payload) { return false } @@ -273,12 +311,12 @@ func (t *Task) checkBlock(ctx context.Context, block *consensus.Block) bool { } // check withdrawal count - if (t.config.MinWithdrawalCount > 0 || len(t.config.ExpectWithdrawals) > 0) && !t.checkBlockWithdrawals(block, blockData) { + if (t.config.MinWithdrawalCount > 0 || len(t.config.ExpectWithdrawals) > 0) && !t.checkBlockWithdrawals(block, blockData, payload) { return false } // check transaction count - if t.config.MinTransactionCount > 0 && !t.checkBlockTransactions(block, blockData) { + if t.config.MinTransactionCount > 0 && !t.checkBlockTransactions(block, blockData, payload) { return false } @@ -288,17 +326,17 @@ func (t *Task) checkBlock(ctx context.Context, block *consensus.Block) bool { } // check deposit request count - if (t.config.MinDepositRequestCount > 0 || len(t.config.ExpectDepositRequests) > 0) && !t.checkBlockDepositRequests(block, blockData) { + if (t.config.MinDepositRequestCount > 0 || len(t.config.ExpectDepositRequests) > 0) && !t.checkBlockDepositRequests(block, blockData, payload) { return false } // check withdrawal request count - if (t.config.MinWithdrawalRequestCount > 0 || len(t.config.ExpectWithdrawalRequests) > 0) && !t.checkBlockWithdrawalRequests(block, blockData) { + if (t.config.MinWithdrawalRequestCount > 0 || len(t.config.ExpectWithdrawalRequests) > 0) && !t.checkBlockWithdrawalRequests(block, blockData, payload) { return false } // check consolidation request count - if (t.config.MinConsolidationRequestCount > 0 || len(t.config.ExpectConsolidationRequests) > 0) && !t.checkBlockConsolidationRequests(block, blockData) { + if (t.config.MinConsolidationRequestCount > 0 || len(t.config.ExpectConsolidationRequests) > 0) && !t.checkBlockConsolidationRequests(block, blockData, payload) { return false } @@ -349,8 +387,17 @@ func (t *Task) checkBlockValidatorName(block *consensus.Block, blockData *spec.V return true } -func (t *Task) checkBlockExtraData(block *consensus.Block, blockData *spec.VersionedSignedBeaconBlock) bool { - extraData, err := consensus.GetExecutionExtraData(blockData) +func (t *Task) checkBlockExtraData(block *consensus.Block, blockData *spec.VersionedSignedBeaconBlock, payload *gloas.SignedExecutionPayloadEnvelope) bool { + var extraData []byte + + var err error + + if blockData.Version >= spec.DataVersionGloas { + extraData, err = consensus.GetPayloadExtraData(payload) + } else { + extraData, err = consensus.GetExecutionExtraData(blockData) + } + if err != nil { t.logger.Warnf("could not get extra data for block %v [0x%x]: %v", block.Slot, block.Root, err) return false @@ -636,11 +683,24 @@ func (t *Task) checkBlockBlsChanges(block *consensus.Block, blockData *spec.Vers return true } -func (t *Task) checkBlockWithdrawals(block *consensus.Block, blockData *spec.VersionedSignedBeaconBlock) bool { - withdrawals, err := blockData.Withdrawals() - if err != nil { - t.logger.Warnf("could not get withdrawals for block %v [0x%x]: %v", block.Slot, block.Root, err) - return false +func (t *Task) checkBlockWithdrawals(block *consensus.Block, blockData *spec.VersionedSignedBeaconBlock, payload *gloas.SignedExecutionPayloadEnvelope) bool { + var withdrawals []*capella.Withdrawal + + if blockData.Version >= spec.DataVersionGloas { + if payload == nil || payload.Message == nil || payload.Message.Payload == nil { + t.logger.Warnf("could not get withdrawals for gloas block %v [0x%x]: no payload", block.Slot, block.Root) + return false + } + + withdrawals = payload.Message.Payload.Withdrawals + } else { + var err error + + withdrawals, err = blockData.Withdrawals() + if err != nil { + t.logger.Warnf("could not get withdrawals for block %v [0x%x]: %v", block.Slot, block.Root, err) + return false + } } if len(withdrawals) < t.config.MinWithdrawalCount { @@ -694,15 +754,28 @@ func (t *Task) checkBlockWithdrawals(block *consensus.Block, blockData *spec.Ver return true } -func (t *Task) checkBlockTransactions(block *consensus.Block, blockData *spec.VersionedSignedBeaconBlock) bool { - transactions, err := blockData.ExecutionTransactions() - if err != nil { - t.logger.Warnf("could not get transactions for block %v [0x%x]: %v", block.Slot, block.Root, err) - return false +func (t *Task) checkBlockTransactions(block *consensus.Block, blockData *spec.VersionedSignedBeaconBlock, payload *gloas.SignedExecutionPayloadEnvelope) bool { + var txCount int + + if blockData.Version >= spec.DataVersionGloas { + if payload == nil || payload.Message == nil || payload.Message.Payload == nil { + t.logger.Warnf("could not get transactions for gloas block %v [0x%x]: no payload", block.Slot, block.Root) + return false + } + + txCount = len(payload.Message.Payload.Transactions) + } else { + transactions, err := blockData.ExecutionTransactions() + if err != nil { + t.logger.Warnf("could not get transactions for block %v [0x%x]: %v", block.Slot, block.Root, err) + return false + } + + txCount = len(transactions) } - if len(transactions) < t.config.MinTransactionCount { - t.logger.Infof("check failed for block %v [0x%x]: not enough transactions (want: >= %v, have: %v)", block.Slot, block.Root, t.config.MinTransactionCount, len(transactions)) + if txCount < t.config.MinTransactionCount { + t.logger.Infof("check failed for block %v [0x%x]: not enough transactions (want: >= %v, have: %v)", block.Slot, block.Root, t.config.MinTransactionCount, txCount) return false } @@ -724,11 +797,24 @@ func (t *Task) checkBlockBlobs(block *consensus.Block, blockData *spec.Versioned return true } -func (t *Task) checkBlockDepositRequests(block *consensus.Block, blockData *spec.VersionedSignedBeaconBlock) bool { - executionRequests, err := blockData.ExecutionRequests() - if err != nil { - t.logger.Warnf("could not get execution requests for block %v [0x%x]: %v", block.Slot, block.Root, err) - return false +func (t *Task) checkBlockDepositRequests(block *consensus.Block, blockData *spec.VersionedSignedBeaconBlock, payload *gloas.SignedExecutionPayloadEnvelope) bool { + var executionRequests *electra.ExecutionRequests + + if blockData.Version >= spec.DataVersionGloas { + if payload == nil || payload.Message == nil || payload.Message.ExecutionRequests == nil { + t.logger.Warnf("could not get execution requests for gloas block %v [0x%x]: no payload", block.Slot, block.Root) + return false + } + + executionRequests = payload.Message.ExecutionRequests + } else { + var err error + + executionRequests, err = blockData.ExecutionRequests() + if err != nil { + t.logger.Warnf("could not get execution requests for block %v [0x%x]: %v", block.Slot, block.Root, err) + return false + } } depositRequests := executionRequests.Deposits @@ -774,11 +860,24 @@ func (t *Task) checkBlockDepositRequests(block *consensus.Block, blockData *spec return true } -func (t *Task) checkBlockWithdrawalRequests(block *consensus.Block, blockData *spec.VersionedSignedBeaconBlock) bool { - executionRequests, err := blockData.ExecutionRequests() - if err != nil { - t.logger.Warnf("could not get execution requests for block %v [0x%x]: %v", block.Slot, block.Root, err) - return false +func (t *Task) checkBlockWithdrawalRequests(block *consensus.Block, blockData *spec.VersionedSignedBeaconBlock, payload *gloas.SignedExecutionPayloadEnvelope) bool { + var executionRequests *electra.ExecutionRequests + + if blockData.Version >= spec.DataVersionGloas { + if payload == nil || payload.Message == nil || payload.Message.ExecutionRequests == nil { + t.logger.Warnf("could not get execution requests for gloas block %v [0x%x]: no payload", block.Slot, block.Root) + return false + } + + executionRequests = payload.Message.ExecutionRequests + } else { + var err error + + executionRequests, err = blockData.ExecutionRequests() + if err != nil { + t.logger.Warnf("could not get execution requests for block %v [0x%x]: %v", block.Slot, block.Root, err) + return false + } } withdrawalRequests := executionRequests.Withdrawals @@ -828,11 +927,24 @@ func (t *Task) checkBlockWithdrawalRequests(block *consensus.Block, blockData *s return true } -func (t *Task) checkBlockConsolidationRequests(block *consensus.Block, blockData *spec.VersionedSignedBeaconBlock) bool { - executionRequests, err := blockData.ExecutionRequests() - if err != nil { - t.logger.Warnf("could not get execution requests for block %v [0x%x]: %v", block.Slot, block.Root, err) - return false +func (t *Task) checkBlockConsolidationRequests(block *consensus.Block, blockData *spec.VersionedSignedBeaconBlock, payload *gloas.SignedExecutionPayloadEnvelope) bool { + var executionRequests *electra.ExecutionRequests + + if blockData.Version >= spec.DataVersionGloas { + if payload == nil || payload.Message == nil || payload.Message.ExecutionRequests == nil { + t.logger.Warnf("could not get execution requests for gloas block %v [0x%x]: no payload", block.Slot, block.Root) + return false + } + + executionRequests = payload.Message.ExecutionRequests + } else { + var err error + + executionRequests, err = blockData.ExecutionRequests() + if err != nil { + t.logger.Warnf("could not get execution requests for block %v [0x%x]: %v", block.Slot, block.Root, err) + return false + } } consolidationRequests := executionRequests.Consolidations diff --git a/pkg/tasks/check_consensus_builder_status/README.md b/pkg/tasks/check_consensus_builder_status/README.md new file mode 100644 index 00000000..17338b0d --- /dev/null +++ b/pkg/tasks/check_consensus_builder_status/README.md @@ -0,0 +1,62 @@ +## `check_consensus_builder_status` Task + +### Description +The `check_consensus_builder_status` task verifies the status of builders on the consensus chain. Builders are a GLOAS-specific concept stored in a separate section of the beacon state (not the validator set). The task uses a shared builder cache that loads the full beacon state to extract builder information. + +#### Task Behavior +- The task monitors builder status at each epoch. +- By default, the task returns immediately when a matching builder is found that meets all criteria. +- Use `continueOnPass: true` to keep monitoring even after success (useful for tracking status changes). +- The builder cache is shared across tasks and also updates the validator set cache when loading the full beacon state. + +### Configuration Parameters + +- **`builderPubKey`**:\ + The public key of the builder to check. If specified, the task will focus on the builder with this public key. Default: `""`. + +- **`builderIndex`**:\ + The index of a specific builder in the builder list. If set, the task focuses on the builder at this index. If `null`, no filter on builder index is applied. Default: `null`. + +- **`minBuilderBalance`**:\ + The minimum balance (in gwei) the builder must have. Default: `0`. + +- **`maxBuilderBalance`**:\ + The maximum balance (in gwei) the builder may have. Default: `null`. + +- **`expectActive`**:\ + If `true`, expect the builder to be active (withdrawable_epoch == FAR_FUTURE_EPOCH). Default: `false`. + +- **`expectExiting`**:\ + If `true`, expect the builder to be exiting or exited (withdrawable_epoch != FAR_FUTURE_EPOCH). Default: `false`. + +- **`failOnCheckMiss`**:\ + If `false`, the task will continue running and wait for the builder to match the expected status. If `true`, the task will fail immediately upon a status mismatch. Default: `false`. + +- **`continueOnPass`**:\ + If set to `true`, the task continues monitoring builder status even after a matching builder is found. Default: `false`. + +### Defaults + +```yaml +- name: check_consensus_builder_status + config: + builderPubKey: "" + builderIndex: null + minBuilderBalance: 0 + maxBuilderBalance: null + expectActive: false + expectExiting: false + failOnCheckMiss: false + continueOnPass: false +``` + +### Outputs + +- **`builder`**:\ + The builder information object containing pubkey, balance, deposit_epoch, withdrawable_epoch, and other data. + +- **`builderIndex`**:\ + The builder's index in the builder list. + +- **`pubkey`**:\ + The builder's public key as a hex string. diff --git a/pkg/tasks/check_consensus_builder_status/config.go b/pkg/tasks/check_consensus_builder_status/config.go new file mode 100644 index 00000000..edb94f93 --- /dev/null +++ b/pkg/tasks/check_consensus_builder_status/config.go @@ -0,0 +1,21 @@ +package checkconsensusbuildersstatus + +type Config struct { + BuilderPubKey string `yaml:"builderPubKey" json:"builderPubKey" desc:"Public key of the builder to check."` + BuilderIndex *uint64 `yaml:"builderIndex" json:"builderIndex" desc:"Index of the builder to check."` + MinBuilderBalance uint64 `yaml:"minBuilderBalance" json:"minBuilderBalance" desc:"Minimum builder balance required (in gwei)."` + MaxBuilderBalance *uint64 `yaml:"maxBuilderBalance" json:"maxBuilderBalance" desc:"Maximum builder balance allowed (in gwei)."` + ExpectExiting bool `yaml:"expectExiting" json:"expectExiting" desc:"If true, expect the builder to have a non-FAR_FUTURE withdrawable epoch (i.e. exiting or exited)."` + ExpectActive bool `yaml:"expectActive" json:"expectActive" desc:"If true, expect the builder to have FAR_FUTURE withdrawable epoch (i.e. active)."` + FailOnCheckMiss bool `yaml:"failOnCheckMiss" json:"failOnCheckMiss" desc:"If true, fail the task when builder status check condition is not met."` + ContinueOnPass bool `yaml:"continueOnPass" json:"continueOnPass" desc:"If true, continue monitoring after the check passes instead of completing immediately."` + ClientPattern string `yaml:"clientPattern" json:"clientPattern" desc:"Regex pattern to select specific client endpoints for state queries."` +} + +func DefaultConfig() Config { + return Config{} +} + +func (c *Config) Validate() error { + return nil +} diff --git a/pkg/tasks/check_consensus_builder_status/task.go b/pkg/tasks/check_consensus_builder_status/task.go new file mode 100644 index 00000000..7a14861c --- /dev/null +++ b/pkg/tasks/check_consensus_builder_status/task.go @@ -0,0 +1,220 @@ +package checkconsensusbuildersstatus + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethpandaops/assertoor/pkg/types" + "github.com/ethpandaops/assertoor/pkg/vars" + "github.com/sirupsen/logrus" +) + +var ( + TaskName = "check_consensus_builder_status" + TaskDescriptor = &types.TaskDescriptor{ + Name: TaskName, + Description: "Check builder status on consensus chain by loading the full beacon state.", + Category: "consensus", + Config: DefaultConfig(), + Outputs: []types.TaskOutputDefinition{ + { + Name: "builder", + Type: "object", + Description: "The builder information object.", + }, + { + Name: "builderIndex", + Type: "number", + Description: "The builder's index in the builder list.", + }, + { + Name: "pubkey", + Type: "string", + Description: "The builder's public key.", + }, + }, + NewTask: NewTask, + } +) + +type Task struct { + ctx *types.TaskContext + options *types.TaskOptions + config Config + logger logrus.FieldLogger +} + +func NewTask(ctx *types.TaskContext, options *types.TaskOptions) (types.Task, error) { + return &Task{ + ctx: ctx, + options: options, + logger: ctx.Logger.GetLogger(), + }, nil +} + +func (t *Task) Config() interface{} { + return t.config +} + +func (t *Task) Timeout() time.Duration { + return t.options.Timeout.Duration +} + +func (t *Task) LoadConfig() error { + config := DefaultConfig() + + if t.options.Config != nil { + if err := t.options.Config.Unmarshal(&config); err != nil { + return fmt.Errorf("error parsing task config for %v: %w", TaskName, err) + } + } + + err := t.ctx.Vars.ConsumeVars(&config, t.options.ConfigVars) + if err != nil { + return err + } + + if valerr := config.Validate(); valerr != nil { + return valerr + } + + t.config = config + + return nil +} + +func (t *Task) Execute(ctx context.Context) error { + consensusPool := t.ctx.Scheduler.GetServices().ClientPool().GetConsensusPool() + + wallclockEpochSubscription := consensusPool.GetBlockCache().SubscribeWallclockEpochEvent(10) + defer wallclockEpochSubscription.Unsubscribe() + + checkCount := 0 + + checkCount++ + + if done, err := t.processCheck(ctx, checkCount); done { + return err + } + + for { + select { + case <-wallclockEpochSubscription.Channel(): + checkCount++ + + if done, err := t.processCheck(ctx, checkCount); done { + return err + } + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (t *Task) processCheck(ctx context.Context, checkCount int) (bool, error) { + checkResult := t.runBuilderStatusCheck(ctx) + + _, epoch, _ := t.ctx.Scheduler.GetServices().ClientPool().GetConsensusPool().GetBlockCache().GetWallclock().Now() + t.logger.Infof("epoch %v check result: %v", epoch.Number(), checkResult) + + switch { + case checkResult: + t.ctx.SetResult(types.TaskResultSuccess) + t.ctx.ReportProgress(100, fmt.Sprintf("Builder status check passed at epoch %d", epoch.Number())) + + if !t.config.ContinueOnPass { + return true, nil + } + + return false, nil + case t.config.FailOnCheckMiss: + t.ctx.SetResult(types.TaskResultFailure) + t.ctx.ReportProgress(0, fmt.Sprintf("Builder status check failed at epoch %d", epoch.Number())) + + return true, fmt.Errorf("builder status check failed at epoch %d", epoch.Number()) + default: + t.ctx.SetResult(types.TaskResultNone) + t.ctx.ReportProgress(0, fmt.Sprintf("Waiting for builder status... (attempt %d)", checkCount)) + + return false, nil + } +} + +func (t *Task) runBuilderStatusCheck(_ context.Context) bool { + builderSet := t.ctx.Scheduler.GetServices().ClientPool().GetConsensusPool().GetBuilderSet() + if len(builderSet) == 0 { + t.logger.Infof("check failed: no builders in builder set") + return false + } + + pubkey := []byte{} + if t.config.BuilderPubKey != "" { + pubkey = common.FromHex(t.config.BuilderPubKey) + } + + for _, info := range builderSet { + builder := info.Builder + + if t.config.BuilderIndex != nil && uint64(info.Index) != *t.config.BuilderIndex { + continue + } + + if t.config.BuilderPubKey != "" && !bytes.Equal(pubkey, builder.PublicKey[:]) { + continue + } + + // Found a matching builder + t.logger.Infof("builder found: index %v, pubkey 0x%x, balance %v, deposit_epoch %v, withdrawable_epoch %v", + info.Index, builder.PublicKey[:], builder.Balance, builder.DepositEpoch, builder.WithdrawableEpoch) + + if body, err := vars.GeneralizeData(builder); err == nil { + t.ctx.Outputs.SetVar("builder", body) + } else { + t.logger.Warnf("failed encoding builder info: %v", err) + } + + t.ctx.Outputs.SetVar("builderIndex", uint64(info.Index)) + t.ctx.Outputs.SetVar("pubkey", fmt.Sprintf("0x%x", builder.PublicKey[:])) + + // FAR_FUTURE_EPOCH sentinel value + farFutureEpoch := uint64(0xFFFFFFFFFFFFFFFF) + + // is_active_builder: deposit_epoch < finalized_epoch AND withdrawable_epoch == FAR_FUTURE + if t.config.ExpectActive { + if uint64(builder.WithdrawableEpoch) != farFutureEpoch { + t.logger.Infof("check failed: expected active builder but withdrawable_epoch is %v", builder.WithdrawableEpoch) + continue + } + + finalizedEpoch, _ := t.ctx.Scheduler.GetServices().ClientPool().GetConsensusPool().GetBlockCache().GetFinalizedCheckpoint() + if builder.DepositEpoch >= finalizedEpoch { + t.logger.Infof("check failed: builder deposit not yet finalized (deposit_epoch: %v, finalized_epoch: %v)", builder.DepositEpoch, finalizedEpoch) + continue + } + } + + if t.config.ExpectExiting && uint64(builder.WithdrawableEpoch) == farFutureEpoch { + t.logger.Infof("check failed: expected exiting builder but withdrawable_epoch is FAR_FUTURE") + continue + } + + if t.config.MinBuilderBalance > 0 && uint64(builder.Balance) < t.config.MinBuilderBalance { + t.logger.Infof("check failed: builder balance below minimum: %v", builder.Balance) + continue + } + + if t.config.MaxBuilderBalance != nil && uint64(builder.Balance) > *t.config.MaxBuilderBalance { + t.logger.Infof("check failed: builder balance above maximum: %v", builder.Balance) + continue + } + + return true + } + + t.logger.Infof("check failed: no matching builder found") + + return false +} diff --git a/pkg/tasks/generate_deposits/task.go b/pkg/tasks/generate_deposits/task.go index e1e41003..4e522be7 100644 --- a/pkg/tasks/generate_deposits/task.go +++ b/pkg/tasks/generate_deposits/task.go @@ -13,6 +13,7 @@ import ( "time" v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec" "github.com/ethereum/go-ethereum/accounts/abi/bind" ethcommon "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" @@ -153,6 +154,14 @@ func (t *Task) Execute(ctx context.Context) error { defer subscription.Unsubscribe() } + // Subscribe early so we don't miss the block containing the deposit. + // With EIP-6110, deposits are included in the same beacon block as the EL transaction. + var inclusionSubscription *consensus.Subscription[*consensus.Block] + if t.config.AwaitInclusion { + inclusionSubscription = t.ctx.Scheduler.GetServices().ClientPool().GetConsensusPool().GetBlockCache().SubscribeBlockEvent(10) + defer inclusionSubscription.Unsubscribe() + } + var pendingChan chan bool pendingWg := sync.WaitGroup{} @@ -328,7 +337,7 @@ func (t *Task) Execute(ctx context.Context) error { // Await inclusion in beacon blocks if configured if t.config.AwaitInclusion && len(validatorPubkeys) > 0 { - err := t.awaitInclusion(ctx, validatorPubkeys, totalCount) + err := t.awaitInclusion(ctx, inclusionSubscription, validatorPubkeys, totalCount) if err != nil { return err } @@ -337,10 +346,7 @@ func (t *Task) Execute(ctx context.Context) error { return nil } -func (t *Task) awaitInclusion(ctx context.Context, validatorPubkeys []string, totalCount int) error { - blockSubscription := t.ctx.Scheduler.GetServices().ClientPool().GetConsensusPool().GetBlockCache().SubscribeBlockEvent(10) - defer blockSubscription.Unsubscribe() - +func (t *Task) awaitInclusion(ctx context.Context, blockSubscription *consensus.Subscription[*consensus.Block], validatorPubkeys []string, totalCount int) error { // Create a map of pending pubkeys for faster lookup pendingPubkeys := make(map[string]bool, len(validatorPubkeys)) for _, pubkey := range validatorPubkeys { @@ -363,30 +369,57 @@ func (t *Task) awaitInclusion(ctx context.Context, validatorPubkeys []string, to continue } + // Check old-style deposits (pre-Electra) deposits, err := blockData.Deposits() - if err != nil { - t.logger.Warnf("could not get deposits from block %v: %v", block.Slot, err) - continue - } + if err == nil { + for _, deposit := range deposits { + pubkeyStr := deposit.Data.PublicKey.String() + if pendingPubkeys[pubkeyStr] { + delete(pendingPubkeys, pubkeyStr) - for _, deposit := range deposits { - pubkeyStr := deposit.Data.PublicKey.String() - if !pendingPubkeys[pubkeyStr] { - continue + includedCount++ + } } + } - delete(pendingPubkeys, pubkeyStr) - - includedCount++ + // Check EIP-6110 deposit requests from execution requests + if blockData.Version >= spec.DataVersionGloas { + // GLOAS: execution requests are in the separate payload envelope + payload := block.AwaitPayload(ctx, 2*time.Second) + if payload != nil && payload.Message != nil && payload.Message.ExecutionRequests != nil { + for _, depositReq := range payload.Message.ExecutionRequests.Deposits { + pubkeyStr := depositReq.Pubkey.String() + if pendingPubkeys[pubkeyStr] { + delete(pendingPubkeys, pubkeyStr) + + includedCount++ + } + } + } + } else { + // Electra: execution requests are in the beacon block body + execRequests, err := blockData.ExecutionRequests() + if err == nil && execRequests != nil { + for _, depositReq := range execRequests.Deposits { + pubkeyStr := depositReq.Pubkey.String() + if pendingPubkeys[pubkeyStr] { + delete(pendingPubkeys, pubkeyStr) + + includedCount++ + } + } + } + } - t.ctx.Outputs.SetVar("includedDeposits", includedCount) - t.logger.Infof("Deposit for validator %s included in block %d (%d/%d)", - pubkeyStr, block.Slot, includedCount, totalCount) + t.ctx.Outputs.SetVar("includedDeposits", includedCount) - // Calculate progress: 50% for generation + 50% for inclusion + if includedCount > 0 { inclusionProgress := float64(includedCount) / float64(totalCount) * 50 t.ctx.ReportProgress(50+inclusionProgress, fmt.Sprintf("Awaiting inclusion: %d/%d deposits included", includedCount, totalCount)) + + t.logger.Infof("deposits included in block %d (%d/%d)", + block.Slot, includedCount, totalCount) } } } diff --git a/pkg/tasks/generate_exits/README.md b/pkg/tasks/generate_exits/README.md index c1652f60..a9e2c184 100644 --- a/pkg/tasks/generate_exits/README.md +++ b/pkg/tasks/generate_exits/README.md @@ -20,6 +20,12 @@ The `generate_exits` task is designed to create and send voluntary exit transact - **`indexCount`**:\ The number of validator keys to generate from the mnemonic, determining how many unique exit transactions will be created. +- **`builderExit`**:\ + If set to `true`, generates builder exits instead of validator exits. Builder exits use the `BUILDER_INDEX_FLAG` (2^40) OR'd with the builder index in the `ValidatorIndex` field of the voluntary exit message. The task looks up the pubkey in the shared builder set cache instead of the validator set. Default: `false`. + +- **`sendToAllClients`**:\ + If set to `true`, submits the voluntary exit to all ready consensus clients in parallel instead of just one. Useful when not all clients support a particular exit type (e.g. builder exits). The task succeeds if at least one client accepts the exit. Default: `false`. + - **`exitEpoch`**:\ The exit epoch number set within the exit message. (defaults to head epoch) @@ -52,6 +58,8 @@ Default settings for the `generate_exits` task: mnemonic: "" startIndex: 0 indexCount: 0 + builderExit: false + sendToAllClients: false exitEpoch: -1 clientPattern: "" excludeClientPattern: "" diff --git a/pkg/tasks/generate_exits/config.go b/pkg/tasks/generate_exits/config.go index 6b5fd3a3..15168c85 100644 --- a/pkg/tasks/generate_exits/config.go +++ b/pkg/tasks/generate_exits/config.go @@ -10,6 +10,8 @@ type Config struct { Mnemonic string `yaml:"mnemonic" json:"mnemonic" require:"B" desc:"Mnemonic phrase used to generate validator keys."` StartIndex int `yaml:"startIndex" json:"startIndex" desc:"Index within the mnemonic from which to start generating validator keys."` IndexCount int `yaml:"indexCount" json:"indexCount" require:"A.3" desc:"Number of validator keys to generate from the mnemonic."` + BuilderExit bool `yaml:"builderExit" json:"builderExit" desc:"If true, generate builder exits instead of validator exits. Uses BUILDER_INDEX_FLAG in the validator index field."` + SendToAllClients bool `yaml:"sendToAllClients" json:"sendToAllClients" desc:"If true, submit voluntary exits to all ready consensus clients in parallel instead of just one."` ExitEpoch int64 `yaml:"exitEpoch" json:"exitEpoch" desc:"Exit epoch to set in the voluntary exit message (-1 for current epoch)."` ClientPattern string `yaml:"clientPattern" json:"clientPattern" desc:"Regex pattern to select specific client endpoints for submitting operations."` ExcludeClientPattern string `yaml:"excludeClientPattern" json:"excludeClientPattern" desc:"Regex pattern to exclude certain client endpoints."` diff --git a/pkg/tasks/generate_exits/task.go b/pkg/tasks/generate_exits/task.go index 69fb4166..9d97e4f3 100644 --- a/pkg/tasks/generate_exits/task.go +++ b/pkg/tasks/generate_exits/task.go @@ -282,51 +282,91 @@ func (t *Task) generateVoluntaryExit(ctx context.Context, accountIdx uint64, for return 0, fmt.Errorf("failed generating validator key %v: %w", validatorKeyPath, err) } - // select validator - var validator *v1.Validator - validatorPubkey := validatorPrivkey.PublicKey().Marshal() - for _, val := range t.ctx.Scheduler.GetServices().ClientPool().GetConsensusPool().GetValidatorSet() { - if bytes.Equal(val.Validator.PublicKey[:], validatorPubkey) { - validator = val - break + + var exitIndex phase0.ValidatorIndex + + if t.config.BuilderExit { + // Look up builder by pubkey in the builder set + builderSet := t.ctx.Scheduler.GetServices().ClientPool().GetConsensusPool().GetBuilderSet() + + var found bool + + for _, info := range builderSet { + if bytes.Equal(info.Builder.PublicKey[:], validatorPubkey) { + exitIndex = consensus.ConvertBuilderIndexToValidatorIndex(info.Index) + found = true + + t.logger.Infof("found builder: index %v, flagged index %v", info.Index, exitIndex) + + break + } } - } - // check validator status - if validator == nil { - return 0, fmt.Errorf("validator not found: 0x%x", validatorPubkey) - } + if !found { + return 0, fmt.Errorf("builder not found: 0x%x", validatorPubkey) + } + } else { + // Look up validator by pubkey in the validator set + var validator *v1.Validator - if validator.Validator.ExitEpoch != 18446744073709551615 { - return 0, fmt.Errorf("validator %v is already exited", validator.Index) - } + for _, val := range t.ctx.Scheduler.GetServices().ClientPool().GetConsensusPool().GetValidatorSet() { + if bytes.Equal(val.Validator.PublicKey[:], validatorPubkey) { + validator = val + break + } + } + + if validator == nil { + return 0, fmt.Errorf("validator not found: 0x%x", validatorPubkey) + } + + if validator.Validator.ExitEpoch != 18446744073709551615 { + return 0, fmt.Errorf("validator %v is already exited", validator.Index) + } - // select client - var client *consensus.Client + exitIndex = validator.Index + } + // select clients clientPool := t.ctx.Scheduler.GetServices().ClientPool() + + var clients []*consensus.Client + if t.config.ClientPattern == "" && t.config.ExcludeClientPattern == "" { - client = clientPool.GetConsensusPool().GetReadyEndpoint(consensus.AnyClient) + if t.config.SendToAllClients { + for _, c := range clientPool.GetConsensusPool().GetAllEndpoints() { + if clientPool.GetConsensusPool().IsClientReady(c) { + clients = append(clients, c) + } + } + } else { + client := clientPool.GetConsensusPool().GetReadyEndpoint(consensus.AnyClient) + if client != nil { + clients = []*consensus.Client{client} + } + } } else { - clients := clientPool.GetClientsByNamePatterns(t.config.ClientPattern, t.config.ExcludeClientPattern) - if len(clients) == 0 { - return 0, fmt.Errorf("no client found with pattern %v", t.config.ClientPattern) + poolClients := clientPool.GetClientsByNamePatterns(t.config.ClientPattern, t.config.ExcludeClientPattern) + for _, c := range poolClients { + clients = append(clients, c.ConsensusClient) } + } - client = clients[0].ConsensusClient + if len(clients) == 0 { + return 0, fmt.Errorf("no ready client found") } // build voluntary exit message specs := clientPool.GetConsensusPool().GetBlockCache().GetSpecs() operation := &phase0.VoluntaryExit{ - ValidatorIndex: validator.Index, + ValidatorIndex: exitIndex, } if t.config.ExitEpoch >= 0 { operation.Epoch = phase0.Epoch(t.config.ExitEpoch) } else { - currentSlot, _ := client.GetLastHead() + currentSlot, _ := clients[0].GetLastHead() operation.Epoch = phase0.Epoch(currentSlot / phase0.Slot(specs.SlotsPerEpoch)) } @@ -357,14 +397,31 @@ func (t *Task) generateVoluntaryExit(ctx context.Context, accountIdx uint64, for signedMsg.Message = operation copy(signedMsg.Signature[:], sig.Serialize()) - t.logger.WithField("client", client.GetName()).Infof("sending voluntary exit for validator %v", validator.Index) + // Submit to all selected clients + var lastErr error - err = client.GetRPCClient().SubmitVoluntaryExits(ctx, &signedMsg) - if err != nil { - return 0, err + successCount := 0 + + for _, client := range clients { + submitErr := client.GetRPCClient().SubmitVoluntaryExits(ctx, &signedMsg) + if submitErr != nil { + t.logger.WithField("client", client.GetName()).Warnf("failed submitting voluntary exit: %v", submitErr) + + lastErr = submitErr + } else { + t.logger.WithField("client", client.GetName()).Infof("sent voluntary exit for index %v (builder: %v)", exitIndex, t.config.BuilderExit) + + successCount++ + } + } + + if successCount == 0 { + return 0, fmt.Errorf("all clients rejected voluntary exit: %w", lastErr) } - return validator.Index, nil + t.logger.Infof("voluntary exit accepted by %d/%d clients", successCount, len(clients)) + + return exitIndex, nil } func (t *Task) mnemonicToSeed(mnemonic string) (seed []byte, err error) { diff --git a/pkg/tasks/tasks.go b/pkg/tasks/tasks.go index 631a533e..c4b9c8b4 100644 --- a/pkg/tasks/tasks.go +++ b/pkg/tasks/tasks.go @@ -6,6 +6,7 @@ import ( checkclientsarehealthy "github.com/ethpandaops/assertoor/pkg/tasks/check_clients_are_healthy" checkconsensusattestationstats "github.com/ethpandaops/assertoor/pkg/tasks/check_consensus_attestation_stats" checkconsensusblockproposals "github.com/ethpandaops/assertoor/pkg/tasks/check_consensus_block_proposals" + checkconsensusbuildersstatus "github.com/ethpandaops/assertoor/pkg/tasks/check_consensus_builder_status" checkconsensusfinality "github.com/ethpandaops/assertoor/pkg/tasks/check_consensus_finality" checkconsensusforks "github.com/ethpandaops/assertoor/pkg/tasks/check_consensus_forks" checkconsensusidentity "github.com/ethpandaops/assertoor/pkg/tasks/check_consensus_identity" @@ -50,6 +51,7 @@ var AvailableTaskDescriptors = []*types.TaskDescriptor{ checkclientsarehealthy.TaskDescriptor, checkconsensusattestationstats.TaskDescriptor, checkconsensusblockproposals.TaskDescriptor, + checkconsensusbuildersstatus.TaskDescriptor, checkconsensusfinality.TaskDescriptor, checkconsensusforks.TaskDescriptor, checkconsensusidentity.TaskDescriptor, diff --git a/playbooks/gloas-dev/builder-deposit.yaml b/playbooks/gloas-dev/builder-deposit.yaml new file mode 100644 index 00000000..c0b9573d --- /dev/null +++ b/playbooks/gloas-dev/builder-deposit.yaml @@ -0,0 +1,35 @@ +id: builder-deposit +name: Validator Deposit with 0x03 Builder Credentials +timeout: 30m +config: + walletPrivkey: '' + depositContract: '0x00000000219ab540356cBB839Cbe05303d7705Fa' # Mainnet deposit contract +tasks: + - name: check_clients_are_healthy + title: Check if at least one client is ready + timeout: 5m + config: + minClientCount: 1 + - name: generate_child_wallet + id: test_wallet + title: Generate wallet for deposit operations + config: + walletSeed: builder-deposit-test + prefundMinBalance: 10000000000000000000 + configVars: + privateKey: walletPrivkey + - name: get_random_mnemonic + id: test_mnemonic + title: Generate random mnemonic for test validator + - name: generate_deposits + title: Generate validator deposit with 0x03 credentials + config: + limitTotal: 1 + depositAmount: 5 + awaitReceipt: true + awaitInclusion: true + configVars: + walletPrivkey: tasks.test_wallet.outputs.childWallet.privkey + mnemonic: tasks.test_mnemonic.outputs.mnemonic + depositContract: depositContract + withdrawalCredentials: '| "0x03" + ("00" * 11) + (.tasks.test_wallet.outputs.childWallet.address | ltrimstr("0x"))' \ No newline at end of file diff --git a/playbooks/gloas-dev/builder-lifecycle.yaml b/playbooks/gloas-dev/builder-lifecycle.yaml new file mode 100644 index 00000000..28d37df3 --- /dev/null +++ b/playbooks/gloas-dev/builder-lifecycle.yaml @@ -0,0 +1,589 @@ +id: builder-lifecycle +name: "Builder Lifecycle Test (GLOAS)" +timeout: 24h +config: + walletPrivkey: '' + depositContract: '0x00000000219ab540356cBB839Cbe05303d7705Fa' + depositAmount: 5 + +tasks: +- name: check_clients_are_healthy + title: "Check if at least one client is ready" + timeout: 5m + config: + minClientCount: 1 + +# get consensus specs and current slot, then calculate deposit timing +- name: get_consensus_specs + id: get_specs + title: "Get consensus chain specs" + +- name: check_consensus_slot_range + id: current_slot + title: "Get current slot" + +- name: run_shell + id: calc_slots + title: "Calculate deposit timing slots" + config: + envVars: + GLOAS_FORK_EPOCH: "tasks.get_specs.outputs.specs.GLOAS_FORK_EPOCH" + SLOTS_PER_EPOCH: "tasks.get_specs.outputs.specs.SLOTS_PER_EPOCH" + CURRENT_SLOT: "tasks.current_slot.outputs.currentSlot" + command: | + set -e + GLOAS_FORK_EPOCH=$(echo $GLOAS_FORK_EPOCH | jq -r .) + SLOTS_PER_EPOCH=$(echo $SLOTS_PER_EPOCH | jq -r .) + CURRENT_SLOT=$(echo $CURRENT_SLOT | jq -r .) + + GLOAS_ACTIVATION_SLOT=$((GLOAS_FORK_EPOCH * SLOTS_PER_EPOCH)) + + if [ "$CURRENT_SLOT" -lt "$((GLOAS_ACTIVATION_SLOT - 5))" ]; then + # Pre-GLOAS: deposit around GLOAS activation boundary + DEPOSIT1_SLOT=$((GLOAS_ACTIVATION_SLOT - 5)) + DEPOSIT2_SLOT=$((GLOAS_ACTIVATION_SLOT + 5)) + WAIT_EPOCH=$((GLOAS_FORK_EPOCH + 1)) + echo "Mode: pre-GLOAS (deposits around GLOAS activation)" + else + # Post-GLOAS: deposit around next epoch boundary + # Find next epoch boundary at least 10 slots in the future + MIN_FUTURE=$((CURRENT_SLOT + 10)) + TARGET_EPOCH=$(( (MIN_FUTURE + SLOTS_PER_EPOCH - 1) / SLOTS_PER_EPOCH )) + EPOCH_BOUNDARY_SLOT=$((TARGET_EPOCH * SLOTS_PER_EPOCH)) + DEPOSIT1_SLOT=$((EPOCH_BOUNDARY_SLOT - 5)) + DEPOSIT2_SLOT=$((EPOCH_BOUNDARY_SLOT + 5)) + WAIT_EPOCH=$((TARGET_EPOCH + 1)) + echo "Mode: post-GLOAS (deposits around epoch $TARGET_EPOCH boundary)" + fi + + echo "Current slot: $CURRENT_SLOT" + echo "GLOAS activation slot: $GLOAS_ACTIVATION_SLOT" + echo "Deposit 1 slot: $DEPOSIT1_SLOT" + echo "Deposit 2 slot: $DEPOSIT2_SLOT" + echo "Wait epoch: $WAIT_EPOCH" + + echo "::set-output-json deposit1Slot $DEPOSIT1_SLOT" + echo "::set-output-json deposit2Slot $DEPOSIT2_SLOT" + echo "::set-output-json waitEpoch $WAIT_EPOCH" + +# prepare wallet and mnemonic +- name: generate_child_wallet + id: test_wallet + title: "Generate wallet for builder operations" + config: + walletSeed: builder-lifecycle-test + prefundMinBalance: 200000000000000000000 # 200 ETH + configVars: + privateKey: walletPrivkey + +- name: get_random_mnemonic + id: test_mnemonic + title: "Generate mnemonic for builders" + +- name: get_pubkeys_from_mnemonic + id: builder_pubkeys + title: "Get pubkeys for all 10 builder keys" + configVars: + mnemonic: "tasks.test_mnemonic.outputs.mnemonic" + config: + count: 10 + +## +## PHASE 1: Deposit 6 builders (3 before boundary, 3 after) +## + +- name: check_consensus_slot_range + title: "Wait for deposit 1 slot" + timeout: 2h + configVars: + minSlotNumber: "tasks.calc_slots.outputs.deposit1Slot" + +- name: generate_deposits + id: deposit_batch1 + title: "Deposit builders 0-2 with 0x03 credentials" + config: + limitTotal: 3 + limitPerSlot: 3 + indexCount: 3 + startIndex: 0 + awaitReceipt: true + awaitInclusion: true + configVars: + walletPrivkey: "tasks.test_wallet.outputs.childWallet.privkey" + mnemonic: "tasks.test_mnemonic.outputs.mnemonic" + depositContract: "depositContract" + depositAmount: "depositAmount" + withdrawalCredentials: '| "0x03" + ("00" * 11) + (.tasks.test_wallet.outputs.childWallet.address | ltrimstr("0x"))' + +# Verify batch 1 deposit receipts +- name: run_shell + title: "Verify deposit receipts for builders 0-2" + config: + envVars: + RECEIPTS: "tasks.deposit_batch1.outputs.depositReceipts" + command: | + set -e + COUNT=$(echo "$RECEIPTS" | jq 'length') + FAILED=$(echo "$RECEIPTS" | jq '[.[] | select(.status != "0x1")] | length') + echo "Batch 1: $COUNT receipts, $FAILED failed" + if [ "$FAILED" -gt 0 ]; then + echo "ERROR: some deposits failed" + echo "$RECEIPTS" | jq '.[] | select(.status != "0x1")' + exit 1 + fi + +- name: check_consensus_slot_range + title: "Wait for deposit 2 slot" + timeout: 2h + configVars: + minSlotNumber: "tasks.calc_slots.outputs.deposit2Slot" + +- name: generate_deposits + id: deposit_batch2 + title: "Deposit builders 3-5 with 0x03 credentials" + config: + limitTotal: 3 + limitPerSlot: 3 + indexCount: 3 + startIndex: 3 + awaitReceipt: true + awaitInclusion: true + configVars: + walletPrivkey: "tasks.test_wallet.outputs.childWallet.privkey" + mnemonic: "tasks.test_mnemonic.outputs.mnemonic" + depositContract: "depositContract" + depositAmount: "depositAmount" + withdrawalCredentials: '| "0x03" + ("00" * 11) + (.tasks.test_wallet.outputs.childWallet.address | ltrimstr("0x"))' + +# Verify batch 2 deposit receipts +- name: run_shell + title: "Verify deposit receipts for builders 3-5" + config: + envVars: + RECEIPTS: "tasks.deposit_batch2.outputs.depositReceipts" + command: | + set -e + COUNT=$(echo "$RECEIPTS" | jq 'length') + FAILED=$(echo "$RECEIPTS" | jq '[.[] | select(.status != "0x1")] | length') + echo "Batch 2: $COUNT receipts, $FAILED failed" + if [ "$FAILED" -gt 0 ]; then + echo "ERROR: some deposits failed" + echo "$RECEIPTS" | jq '.[] | select(.status != "0x1")' + exit 1 + fi + +## +## PHASE 2: Wait for all 6 builders to become active +## + +- name: check_consensus_slot_range + title: "Wait for next epoch after deposits" + timeout: 30m + configVars: + minEpochNumber: "tasks.calc_slots.outputs.waitEpoch" + +- name: run_task_matrix + title: "Wait for all 6 builders to become active" + timeout: 2h + configVars: + matrixValues: "tasks.builder_pubkeys.outputs.pubkeys[:6]" + config: + runConcurrent: true + matrixVar: "builderPubkey" + task: + name: check_consensus_builder_status + title: "Wait for builder ${builderPubkey} to become active" + config: + expectActive: true + configVars: + builderPubKey: "builderPubkey" + +## +## PHASE 3: Exit builders with alternating voluntary / EL-triggered exits, 10 slots apart +## Ensure exits span at least 2 epochs +## + +# Record epoch at start of exits +- name: check_consensus_slot_range + id: exit_start + title: "Record epoch at start of exits" + +# Builder 0: voluntary exit (fallback to EL) +- name: run_tasks + title: "Exit builder 0 (voluntary, EL fallback)" + config: + continueOnFailure: true + tasks: + - name: generate_exits + id: vol_exit_0 + title: "Try voluntary exit for builder 0" + timeout: 2m + config: + limitTotal: 1 + limitPerSlot: 1 + indexCount: 1 + startIndex: 0 + builderExit: true + sendToAllClients: true + awaitInclusion: true + configVars: + mnemonic: "tasks.test_mnemonic.outputs.mnemonic" + - name: generate_withdrawal_requests + title: "Fallback: EL-triggered exit for builder 0" + if: "| .tasks.vol_exit_0.result != 1" + config: + limitTotal: 1 + limitPerSlot: 1 + withdrawAmount: 0 + awaitReceipt: true + failOnReject: true + configVars: + sourcePubkey: "tasks.builder_pubkeys.outputs.pubkeys[0]" + walletPrivkey: "tasks.test_wallet.outputs.childWallet.privkey" + +# Wait 10 slots +- name: check_consensus_slot_range + id: post_exit0 + title: "Get current slot after builder 0 exit" +- name: check_consensus_slot_range + title: "Wait 10 slots" + timeout: 30m + configVars: + minSlotNumber: "| (.tasks.post_exit0.outputs.currentSlot | tonumber) + 10" + +# Builder 1: EL-triggered exit +- name: generate_withdrawal_requests + title: "Exit builder 1 via EL-triggered exit" + config: + limitTotal: 1 + limitPerSlot: 1 + withdrawAmount: 0 + awaitReceipt: true + failOnReject: true + configVars: + sourcePubkey: "tasks.builder_pubkeys.outputs.pubkeys[1]" + walletPrivkey: "tasks.test_wallet.outputs.childWallet.privkey" + +# Wait 10 slots +- name: check_consensus_slot_range + id: post_exit1 + title: "Get current slot after builder 1 exit" +- name: check_consensus_slot_range + title: "Wait 10 slots" + timeout: 30m + configVars: + minSlotNumber: "| (.tasks.post_exit1.outputs.currentSlot | tonumber) + 10" + +# Builder 2: voluntary exit (fallback to EL) +- name: run_tasks + title: "Exit builder 2 (voluntary, EL fallback)" + config: + continueOnFailure: true + tasks: + - name: generate_exits + id: vol_exit_2 + title: "Try voluntary exit for builder 2" + timeout: 1m + config: + limitTotal: 1 + limitPerSlot: 1 + indexCount: 1 + startIndex: 2 + builderExit: true + sendToAllClients: true + awaitInclusion: true + configVars: + mnemonic: "tasks.test_mnemonic.outputs.mnemonic" + - name: generate_withdrawal_requests + title: "Fallback: EL-triggered exit for builder 2" + if: "| .tasks.vol_exit_2.result != 1" + config: + limitTotal: 1 + limitPerSlot: 1 + withdrawAmount: 0 + awaitReceipt: true + failOnReject: true + configVars: + sourcePubkey: "tasks.builder_pubkeys.outputs.pubkeys[2]" + walletPrivkey: "tasks.test_wallet.outputs.childWallet.privkey" + +# After first 3 exits: ensure we cross an epoch boundary before continuing +- name: check_consensus_slot_range + title: "Ensure exits span at least 2 epochs" + timeout: 30m + configVars: + minEpochNumber: "| (.tasks.exit_start.outputs.currentEpoch | tonumber) + 1" + +# Builder 3: EL-triggered exit +- name: generate_withdrawal_requests + title: "Exit builder 3 via EL-triggered exit" + config: + limitTotal: 1 + limitPerSlot: 1 + withdrawAmount: 0 + awaitReceipt: true + failOnReject: true + configVars: + sourcePubkey: "tasks.builder_pubkeys.outputs.pubkeys[3]" + walletPrivkey: "tasks.test_wallet.outputs.childWallet.privkey" + +# Builder 4: voluntary exit (fallback to EL) +- name: run_tasks + title: "Exit builder 4 (voluntary, EL fallback)" + config: + continueOnFailure: true + tasks: + - name: generate_exits + id: vol_exit_4 + title: "Try voluntary exit for builder 4" + timeout: 1m + config: + limitTotal: 1 + limitPerSlot: 1 + indexCount: 1 + startIndex: 4 + builderExit: true + sendToAllClients: true + awaitInclusion: true + configVars: + mnemonic: "tasks.test_mnemonic.outputs.mnemonic" + - name: generate_withdrawal_requests + title: "Fallback: EL-triggered exit for builder 4" + if: "| .tasks.vol_exit_4.result != 1" + config: + limitTotal: 1 + limitPerSlot: 1 + withdrawAmount: 0 + awaitReceipt: true + failOnReject: true + configVars: + sourcePubkey: "tasks.builder_pubkeys.outputs.pubkeys[4]" + walletPrivkey: "tasks.test_wallet.outputs.childWallet.privkey" + +# Builder 5: EL-triggered exit +- name: generate_withdrawal_requests + title: "Exit builder 5 via EL-triggered exit" + config: + limitTotal: 1 + limitPerSlot: 1 + withdrawAmount: 0 + awaitReceipt: true + failOnReject: true + configVars: + sourcePubkey: "tasks.builder_pubkeys.outputs.pubkeys[5]" + walletPrivkey: "tasks.test_wallet.outputs.childWallet.privkey" + +## +## PHASE 4: Wait for builders 0-2 to be fully withdrawn (balance == 0) +## Builders 0-2 exited in epoch N, builders 3-5 exited in epoch N+1 +## + +- name: run_task_options + title: "Wait for builders 0-2 to be fully withdrawn (non-fatal)" + config: + ignoreFailure: true + task: + name: run_task_matrix + title: "Wait for builders 0-2 to be fully withdrawn" + timeout: 30m + configVars: + matrixValues: "tasks.builder_pubkeys.outputs.pubkeys[:3]" + config: + runConcurrent: true + matrixVar: "builderPubkey" + task: + name: check_consensus_builder_status + title: "Wait for builder ${builderPubkey} to be withdrawn" + config: + expectExiting: true + maxBuilderBalance: 0 + configVars: + builderPubKey: "builderPubkey" + +## +## PHASE 5: Deposit 4 new builders (keys 6-9) to test index reuse +## Builders 0-2 are withdrawn (balance 0) -> indices 0-2 reusable +## Builders 3-5 still exiting (later epoch) -> indices 3-5 NOT reusable +## Expected: key 6->idx 0, key 7->idx 1, key 8->idx 2, key 9->idx 6 +## + +- name: generate_deposits + id: deposit_batch3 + title: "Deposit builders 6-9 with 0x03 credentials" + config: + limitTotal: 4 + limitPerSlot: 4 + indexCount: 4 + startIndex: 6 + awaitReceipt: true + awaitInclusion: true + configVars: + walletPrivkey: "tasks.test_wallet.outputs.childWallet.privkey" + mnemonic: "tasks.test_mnemonic.outputs.mnemonic" + depositContract: "depositContract" + depositAmount: "depositAmount" + withdrawalCredentials: '| "0x03" + ("00" * 11) + (.tasks.test_wallet.outputs.childWallet.address | ltrimstr("0x"))' + +# Verify batch 3 deposit receipts +- name: run_shell + title: "Verify deposit receipts for builders 6-9" + config: + envVars: + RECEIPTS: "tasks.deposit_batch3.outputs.depositReceipts" + command: | + set -e + COUNT=$(echo "$RECEIPTS" | jq 'length') + FAILED=$(echo "$RECEIPTS" | jq '[.[] | select(.status != "0x1")] | length') + echo "Batch 3: $COUNT receipts, $FAILED failed" + if [ "$FAILED" -gt 0 ]; then + echo "ERROR: some deposits failed" + echo "$RECEIPTS" | jq '.[] | select(.status != "0x1")' + exit 1 + fi + +# Wait for new builders to become active +- name: run_task_matrix + title: "Wait for builders 6-9 to become active" + timeout: 2h + configVars: + matrixValues: "tasks.builder_pubkeys.outputs.pubkeys[6:10]" + config: + runConcurrent: true + matrixVar: "builderPubkey" + task: + name: check_consensus_builder_status + title: "Wait for builder ${builderPubkey} to become active" + config: + expectActive: true + configVars: + builderPubKey: "builderPubkey" + +## +## PHASE 6: Verify builder index reuse (non-fatal) +## key 6 -> index 0, key 7 -> index 1, key 8 -> index 2, key 9 -> index 6 +## + +- name: run_tasks + title: "Verify builder index reuse" + config: + continueOnFailure: true + tasks: + - name: check_consensus_builder_status + title: "Verify key 6 reused builder index 0" + config: + failOnCheckMiss: true + configVars: + builderPubKey: "tasks.builder_pubkeys.outputs.pubkeys[6]" + builderIndex: "| 0" + + - name: check_consensus_builder_status + title: "Verify key 7 reused builder index 1" + config: + failOnCheckMiss: true + configVars: + builderPubKey: "tasks.builder_pubkeys.outputs.pubkeys[7]" + builderIndex: "| 1" + + - name: check_consensus_builder_status + title: "Verify key 8 reused builder index 2" + config: + failOnCheckMiss: true + configVars: + builderPubKey: "tasks.builder_pubkeys.outputs.pubkeys[8]" + builderIndex: "| 2" + + - name: check_consensus_builder_status + title: "Verify key 9 got new builder index 6" + config: + failOnCheckMiss: true + configVars: + builderPubKey: "tasks.builder_pubkeys.outputs.pubkeys[9]" + builderIndex: "| 6" + +## +## PHASE 7: Test invalid withdrawal & consolidation requests on active builders +## These should have no effect but we want to exercise the code paths +## + +# Partial withdrawal requests (amount > 0) for active builders - should be no-ops +- name: run_task_matrix + title: "Send partial withdrawal requests for active builders (expect no effect)" + configVars: + matrixValues: "tasks.builder_pubkeys.outputs.pubkeys[6:10]" + config: + runConcurrent: true + matrixVar: "builderPubkey" + task: + name: run_task_options + title: "Partial withdrawal request for ${builderPubkey}" + config: + ignoreFailure: true + task: + name: generate_withdrawal_requests + title: "Send partial withdrawal for ${builderPubkey}" + config: + limitTotal: 1 + limitPerSlot: 1 + withdrawAmount: 1000000000 # 1 ETH in gwei + awaitReceipt: true + configVars: + sourcePubkey: "builderPubkey" + walletPrivkey: "tasks.test_wallet.outputs.childWallet.privkey" + +# Consolidation requests for active builders - should be no-ops +- name: run_task_matrix + title: "Send consolidation requests for active builders (expect no effect)" + configVars: + matrixValues: "tasks.builder_pubkeys.outputs.pubkeys[6:9]" + config: + runConcurrent: true + matrixVar: "builderPubkey" + task: + name: run_task_options + title: "Consolidation request for ${builderPubkey}" + config: + ignoreFailure: true + task: + name: generate_consolidations + title: "Send consolidation for ${builderPubkey}" + config: + limitTotal: 1 + limitPerSlot: 1 + awaitReceipt: true + configVars: + sourcePubkey: "builderPubkey" + targetPublicKey: "tasks.builder_pubkeys.outputs.pubkeys[9]" + walletPrivkey: "tasks.test_wallet.outputs.childWallet.privkey" + +## +## PHASE 8: Wait an epoch, then exit all newly deposited builders +## + +- name: check_consensus_slot_range + id: pre_final_exits + title: "Get current epoch before final exits" +- name: check_consensus_slot_range + title: "Wait for next epoch" + timeout: 30m + configVars: + minEpochNumber: "| (.tasks.pre_final_exits.outputs.currentEpoch | tonumber) + 1" + +# Exit all 4 new builders via EL-triggered exits +- name: run_task_matrix + title: "Exit builders 6-9 via EL-triggered exit" + configVars: + matrixValues: "tasks.builder_pubkeys.outputs.pubkeys[6:10]" + config: + matrixVar: "builderPubkey" + task: + name: generate_withdrawal_requests + title: "EL-triggered exit for ${builderPubkey}" + config: + limitTotal: 1 + limitPerSlot: 1 + withdrawAmount: 0 + awaitReceipt: true + failOnReject: true + configVars: + sourcePubkey: "builderPubkey" + walletPrivkey: "tasks.test_wallet.outputs.childWallet.privkey"