From c6d1085b208fa7f4c7bff3395a1e53e4d3b23233 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Thu, 19 Feb 2026 17:44:33 +0530 Subject: [PATCH 01/61] e2e setup added --- e2e-tests/.env.example | 49 + e2e-tests/.gitignore | 3 + e2e-tests/README.md | 205 ++++ e2e-tests/deploy_addresses.json | 26 + e2e-tests/setup.sh | 1111 ++++++++++++++++++++++ local-native/README.md | 3 +- local-native/devnet | 22 +- local-native/scripts/configure-pushuv.sh | 112 +++ local-native/scripts/setup-universal.sh | 8 + 9 files changed, 1536 insertions(+), 3 deletions(-) create mode 100644 e2e-tests/.env.example create mode 100644 e2e-tests/.gitignore create mode 100644 e2e-tests/README.md create mode 100644 e2e-tests/deploy_addresses.json create mode 100755 e2e-tests/setup.sh create mode 100644 local-native/scripts/configure-pushuv.sh diff --git a/e2e-tests/.env.example b/e2e-tests/.env.example new file mode 100644 index 00000000..ebec79f6 --- /dev/null +++ b/e2e-tests/.env.example @@ -0,0 +1,49 @@ +# Copy this file to e2e-tests/.env and adjust values. + +# Path to push-chain workspace root. +# Keep this empty to use auto-detection (parent of e2e-tests). +# PUSH_CHAIN_DIR= + +# Local Push RPC +PUSH_RPC_URL=http://localhost:8545 + +# Local chain info +CHAIN_ID=localchain_9000-1 +KEYRING_BACKEND=test + +# Genesis key recovery/funding +GENESIS_KEY_NAME=genesis-acc-1 +GENESIS_KEY_HOME=./local-native/data/validator1/.pchain + +# Optional: set to skip interactive mnemonic prompt +# GENESIS_MNEMONIC="word1 word2 ..." + +# Address to fund from genesis account +FUND_TO_ADDRESS=push1w7xnyp3hf79vyetj3cvw8l32u6unun8yr6zn60 +FUND_AMOUNT=1000000000000000000upc +GAS_PRICES=100000000000upc + +# EVM private key used by forge/hardhat scripts +PRIVATE_KEY=0x0dfb3d814afd8d0bf7a6010e8dd2b6ac835cabe4da9e2c1e80c6a14df3994dd4 + +# External repositories +CORE_CONTRACTS_REPO=https://github.com/pushchain/push-chain-core-contracts.git +CORE_CONTRACTS_BRANCH=e2e-push-node + +SWAP_AMM_REPO=https://github.com/pushchain/push-chain-swap-internal-amm-contracts.git +SWAP_AMM_BRANCH=e2e-push-node + +GATEWAY_REPO=https://github.com/pushchain/push-chain-gateway-contracts.git +GATEWAY_BRANCH=e2e-push-node + +# Local clone layout (outside push-chain directory) +E2E_PARENT_DIR=../ +CORE_CONTRACTS_DIR=../push-chain-core-contracts +SWAP_AMM_DIR=../push-chain-swap-internal-amm-contracts +GATEWAY_DIR=../push-chain-gateway-contracts + +# Tracking files +DEPLOY_ADDRESSES_FILE=./e2e-tests/deploy_addresses.json +TEST_ADDRESSES_PATH=../push-chain-swap-internal-amm-contracts/test-addresses.json +TOKEN_CONFIG_PATH=./config/testnet-donut/tokens/eth_sepolia_eth.json +CHAIN_CONFIG_PATH=./config/testnet-donut/chains/eth_sepolia_chain_config.json diff --git a/e2e-tests/.gitignore b/e2e-tests/.gitignore new file mode 100644 index 00000000..ef853ca1 --- /dev/null +++ b/e2e-tests/.gitignore @@ -0,0 +1,3 @@ +.env +logs/ +repos/ diff --git a/e2e-tests/README.md b/e2e-tests/README.md new file mode 100644 index 00000000..810d76cf --- /dev/null +++ b/e2e-tests/README.md @@ -0,0 +1,205 @@ +# e2e-tests setup + +This folder provides a full, automated local E2E bootstrap for Push Chain. + +It covers: + +1. local-native devnet (validators + universal validators) +2. genesis key recovery + account funding +3. core contracts deployment +4. swap AMM deployment (WPC + V3 core + V3 periphery) +5. pool creation for pETH/WPC +6. core `.env` generation from deployed addresses +7. token config update (`eth_sepolia_eth.json`) +8. gateway contracts deployment +9. uregistry chain/token config submission + +--- + +## What gets created + +- `e2e-tests/repos/` — cloned external repos + - push-chain-core-contracts + - push-chain-swap-internal-amm-contracts + - push-chain-gateway-contracts +- `e2e-tests/logs/` — logs for each major deployment step +- `e2e-tests/deploy_addresses.json` — contract/token address source-of-truth + +--- + +## Prerequisites + +Required tools: + +- `git` +- `jq` +- `node`, `npm`, `npx` +- `forge` (Foundry) +- `make` + +Also ensure the Push Chain repo builds/runs locally. + +--- + +## Configuration + +Copy env template: + +```bash +cp e2e-tests/.env.example e2e-tests/.env +``` + +Important variables in `.env`: + +- `PUSH_RPC_URL` (default `http://localhost:8545`) +- `PRIVATE_KEY` +- `FUND_TO_ADDRESS` +- `CORE_CONTRACTS_BRANCH` +- `SWAP_AMM_BRANCH` +- `GATEWAY_BRANCH` (currently `e2e-push-node`) + +Path settings are repository-relative and portable. + +--- + +## One-command full run + +```bash +./e2e-tests/setup.sh all +``` + +This runs the full sequence in order: + +1. `devnet` +2. `recover-genesis-key` +3. `fund` +4. `setup-core` +5. `setup-swap` +6. `sync-addresses` +7. `create-pool` +8. `check-addresses` +9. `write-core-env` +10. `update-token-config` +11. `setup-gateway` +12. `add-uregistry-configs` + +--- + +## Command reference + +```bash +./e2e-tests/setup.sh devnet +./e2e-tests/setup.sh print-genesis +./e2e-tests/setup.sh recover-genesis-key +./e2e-tests/setup.sh fund +./e2e-tests/setup.sh setup-core +./e2e-tests/setup.sh setup-swap +./e2e-tests/setup.sh sync-addresses +./e2e-tests/setup.sh create-pool +./e2e-tests/setup.sh check-addresses +./e2e-tests/setup.sh write-core-env +./e2e-tests/setup.sh update-token-config +./e2e-tests/setup.sh setup-gateway +./e2e-tests/setup.sh add-uregistry-configs +./e2e-tests/setup.sh all +``` + +--- + +## Address tracking model + +`deploy_addresses.json` is the canonical address registry used by later steps. + +### Required contracts + +- `contracts.WPC` +- `contracts.Factory` +- `contracts.QuoterV2` +- `contracts.SwapRouter` + +### Token entries + +- `tokens[]` from core deployment logs (`name`, `symbol`, `address`, `source`) + +These addresses are used to: + +- sync swap repo `test-addresses.json` +- generate core contracts `.env` +- update `config/testnet-donut/tokens/eth_sepolia_eth.json` + +Manual helpers: + +```bash +./e2e-tests/setup.sh record-contract Factory 0x1234567890123456789012345678901234567890 +./e2e-tests/setup.sh record-token "Push ETH" pETH 0x1234567890123456789012345678901234567890 +``` + +--- + +## Auto-retry and resilience behavior + +### Core contracts + +- Runs `forge script scripts/localSetup/setup.s.sol ...` +- If receipt fetch fails, auto-retries with `--resume` in a loop until success +- Optional cap via: + +```bash +CORE_RESUME_MAX_ATTEMPTS=0 # 0 means unlimited (default) +``` + +### Gateway contracts + +- Runs gateway `forge script ... setup.s.sol` +- If initial execution fails, retries with `--resume` + +### uregistry tx submission + +- Submits chain config then token config +- Retries automatically on account sequence mismatch +- Validates tx result by checking returned `code` + +--- + +## Generated files of interest + +- `e2e-tests/deploy_addresses.json` +- `e2e-tests/repos/push-chain-swap-internal-amm-contracts/test-addresses.json` +- `e2e-tests/repos/push-chain-core-contracts/.env` +- `config/testnet-donut/tokens/eth_sepolia_eth.json` (updated contract address) + +--- + +## Clean re-run + +For a fresh run: + +```bash +rm -rf e2e-tests/repos +./local-native/devnet down || true +./e2e-tests/setup.sh all +``` + +--- + +## Troubleshooting + +### 1) Core script keeps stopping with receipt errors + +This is expected intermittently on local RPC. The script auto-runs `--resume` until completion. + +### 2) Missing branch in a dependency repo + +The script attempts to resolve/fallback to available remote branches. + +### 3) `account sequence mismatch` in uregistry tx + +The script retries automatically for this error. + +### 4) WPC deployment artifact not found + +`setup-swap` compiles before deployment. If interrupted mid-run, re-run: + +```bash +./e2e-tests/setup.sh setup-swap +``` diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json new file mode 100644 index 00000000..c4fee1c2 --- /dev/null +++ b/e2e-tests/deploy_addresses.json @@ -0,0 +1,26 @@ +{ + "generatedAt": "2026-02-19T11:46:45Z", + "contracts": { + "WPC": "0x4ff2d01380CCd03A53457935B2e9d2eD092e9300", + "Factory": "0x373D3F1B2b26729A308C5641970247bc9d4ddDa4", + "SwapRouter": "0x6a20557430be6412AF423681e35CC96797506F3a", + "QuoterV2": "0xc2055dD3A7Ad875520BdB5c91300F964F7038C73", + "PositionManager": "0xdBdFEB7A79868Cb4A4e9e57D7d28C84AE77AC4BC" + }, + "tokens": [ + { + "name": "pETH.eth", + "symbol": "pETH", + "address": "0x90F4A15601E08570D6fFbaE883C44BDB85bDb7d1", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "USDT.eth", + "symbol": "USDT.eth", + "address": "0x00cb38A885cf8D0B2dDfd19Bd1c04aAAC44C5a86", + "source": "core-contracts", + "decimals": 6 + } + ] +} diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh new file mode 100755 index 00000000..9dc19041 --- /dev/null +++ b/e2e-tests/setup.sh @@ -0,0 +1,1111 @@ +#!/usr/bin/env bash + +set -euo pipefail +IFS=$'\n\t' + +SCRIPT_DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PUSH_CHAIN_DIR_DEFAULT="$(cd -P "$SCRIPT_DIR/.." && pwd)" +ENV_FILE="$SCRIPT_DIR/.env" + +if [[ -f "$ENV_FILE" ]]; then + set -a + source "$ENV_FILE" + set +a +fi + +: "${PUSH_CHAIN_DIR:=$PUSH_CHAIN_DIR_DEFAULT}" +: "${PUSH_RPC_URL:=http://localhost:8545}" +: "${CHAIN_ID:=localchain_9000-1}" +: "${KEYRING_BACKEND:=test}" +: "${GENESIS_KEY_NAME:=genesis-acc-1}" +: "${GENESIS_KEY_HOME:=$PUSH_CHAIN_DIR/local-native/data/validator1/.pchain}" +: "${GENESIS_ACCOUNTS_JSON:=$PUSH_CHAIN_DIR/local-native/data/accounts/genesis_accounts.json}" +: "${FUND_AMOUNT:=1000000000000000000upc}" +: "${GAS_PRICES:=100000000000upc}" + +: "${CORE_CONTRACTS_REPO:=https://github.com/pushchain/push-chain-core-contracts.git}" +: "${CORE_CONTRACTS_BRANCH:=e2e-push-node}" +: "${SWAP_AMM_REPO:=https://github.com/pushchain/push-chain-swap-internal-amm-contracts.git}" +: "${SWAP_AMM_BRANCH:=e2e-push-node}" +: "${GATEWAY_REPO:=https://github.com/pushchain/push-chain-gateway-contracts.git}" +: "${GATEWAY_BRANCH:=e2e-push-node}" + +: "${E2E_PARENT_DIR:=../}" +: "${CORE_CONTRACTS_DIR:=$E2E_PARENT_DIR/push-chain-core-contracts}" +: "${SWAP_AMM_DIR:=$E2E_PARENT_DIR/push-chain-swap-internal-amm-contracts}" +: "${GATEWAY_DIR:=$E2E_PARENT_DIR/push-chain-gateway-contracts}" +: "${DEPLOY_ADDRESSES_FILE:=$SCRIPT_DIR/deploy_addresses.json}" +: "${LOG_DIR:=$SCRIPT_DIR/logs}" +: "${TEST_ADDRESSES_PATH:=$SWAP_AMM_DIR/test-addresses.json}" +: "${TOKENS_CONFIG_DIR:=./config/testnet-donut/tokens}" +: "${TOKEN_CONFIG_PATH:=./config/testnet-donut/tokens/eth_sepolia_eth.json}" +: "${CHAIN_CONFIG_PATH:=./config/testnet-donut/chains/eth_sepolia_chain_config.json}" + +abs_from_root() { + local path="$1" + if [[ "$path" = /* ]]; then + printf "%s" "$path" + else + printf "%s/%s" "$PUSH_CHAIN_DIR" "${path#./}" + fi +} + +GENESIS_KEY_HOME="$(abs_from_root "$GENESIS_KEY_HOME")" +GENESIS_ACCOUNTS_JSON="$(abs_from_root "$GENESIS_ACCOUNTS_JSON")" +E2E_PARENT_DIR="$(abs_from_root "$E2E_PARENT_DIR")" +CORE_CONTRACTS_DIR="$(abs_from_root "$CORE_CONTRACTS_DIR")" +SWAP_AMM_DIR="$(abs_from_root "$SWAP_AMM_DIR")" +GATEWAY_DIR="$(abs_from_root "$GATEWAY_DIR")" +DEPLOY_ADDRESSES_FILE="$(abs_from_root "$DEPLOY_ADDRESSES_FILE")" +TEST_ADDRESSES_PATH="$(abs_from_root "$TEST_ADDRESSES_PATH")" +LOG_DIR="$(abs_from_root "$LOG_DIR")" +TOKENS_CONFIG_DIR="$(abs_from_root "$TOKENS_CONFIG_DIR")" +TOKEN_CONFIG_PATH="$(abs_from_root "$TOKEN_CONFIG_PATH")" +CHAIN_CONFIG_PATH="$(abs_from_root "$CHAIN_CONFIG_PATH")" + +mkdir -p "$LOG_DIR" + +green='\033[0;32m' +yellow='\033[0;33m' +red='\033[0;31m' +cyan='\033[0;36m' +nc='\033[0m' + +log_info() { printf "%b\n" "${cyan}==>${nc} $*"; } +log_ok() { printf "%b\n" "${green}✓${nc} $*"; } +log_warn() { printf "%b\n" "${yellow}!${nc} $*"; } +log_err() { printf "%b\n" "${red}x${nc} $*"; } + +require_cmd() { + local c + for c in "$@"; do + command -v "$c" >/dev/null 2>&1 || { + log_err "Missing command: $c" + exit 1 + } + done +} + +list_remote_branches() { + local repo_url="$1" + git ls-remote --heads "$repo_url" | awk '{print $2}' | sed 's#refs/heads/##' +} + +select_best_matching_branch() { + local requested="$1" + shift + local branches=("$@") + local best="" + local best_score=0 + local branch token score + + # Tokenize requested branch by non-alphanumeric delimiters. + local tokens=() + while IFS= read -r token; do + [[ -n "$token" ]] && tokens+=("$token") + done < <(echo "$requested" | tr -cs '[:alnum:]' '\n' | tr '[:upper:]' '[:lower:]') + + for branch in "${branches[@]}"; do + score=0 + local b_lc + b_lc="$(echo "$branch" | tr '[:upper:]' '[:lower:]')" + for token in "${tokens[@]}"; do + if [[ "$b_lc" == *"$token"* ]]; then + score=$((score + 1)) + fi + done + if (( score > best_score )); then + best_score=$score + best="$branch" + fi + done + + if (( best_score >= 2 )); then + printf "%s" "$best" + fi +} + +resolve_branch() { + local repo_url="$1" + local requested="$2" + local branches=() + local b + + while IFS= read -r b; do + [[ -n "$b" ]] && branches+=("$b") + done < <(list_remote_branches "$repo_url") + + local branch + for branch in "${branches[@]}"; do + if [[ "$branch" == "$requested" ]]; then + printf "%s" "$requested" + return + fi + done + + local best + best="$(select_best_matching_branch "$requested" "${branches[@]}")" + if [[ -n "$best" ]]; then + printf "%b\n" "${yellow}!${nc} Branch '$requested' not found. Auto-selected '$best'." >&2 + printf "%s" "$best" + return + fi + + for branch in main master; do + for b in "${branches[@]}"; do + if [[ "$b" == "$branch" ]]; then + printf "%b\n" "${yellow}!${nc} Branch '$requested' not found. Falling back to '$branch'." >&2 + printf "%s" "$branch" + return + fi + done + done + + if [[ ${#branches[@]} -gt 0 ]]; then + printf "%b\n" "${yellow}!${nc} Branch '$requested' not found. Falling back to '${branches[0]}'." >&2 + printf "%s" "${branches[0]}" + return + fi + + log_err "No remote branches found for $repo_url" + exit 1 +} + +ensure_deploy_file() { + if [[ ! -f "$DEPLOY_ADDRESSES_FILE" ]]; then + cat >"$DEPLOY_ADDRESSES_FILE" <<'JSON' +{ + "generatedAt": "", + "contracts": {}, + "tokens": [] +} +JSON + fi +} + +set_generated_at() { + local tmp + tmp="$(mktemp)" + jq --arg now "$(date -u +%Y-%m-%dT%H:%M:%SZ)" '.generatedAt = $now' "$DEPLOY_ADDRESSES_FILE" >"$tmp" + mv "$tmp" "$DEPLOY_ADDRESSES_FILE" +} + +record_contract() { + local key="$1" + local address="$2" + local tmp + tmp="$(mktemp)" + jq --arg key "$key" --arg val "$address" '.contracts[$key] = $val' "$DEPLOY_ADDRESSES_FILE" >"$tmp" + mv "$tmp" "$DEPLOY_ADDRESSES_FILE" + set_generated_at + log_ok "Recorded contract $key=$address" +} + +record_token() { + local name="$1" + local symbol="$2" + local address="$3" + local source="$4" + local tmp + tmp="$(mktemp)" + jq \ + --arg name "$name" \ + --arg symbol "$symbol" \ + --arg address "$address" \ + --arg source "$source" \ + ' + .tokens = ( + ([.tokens[]? | select((.address | ascii_downcase) != ($address | ascii_downcase))]) + + [{name:$name, symbol:$symbol, address:$address, source:$source}] + ) + ' "$DEPLOY_ADDRESSES_FILE" >"$tmp" + mv "$tmp" "$DEPLOY_ADDRESSES_FILE" + set_generated_at + log_ok "Recorded token $symbol=$address ($name)" +} + +validate_eth_address() { + [[ "$1" =~ ^0x[a-fA-F0-9]{40}$ ]] +} + +clone_or_update_repo() { + local repo_url="$1" + local branch="$2" + local dest="$3" + local resolved_branch + + resolved_branch="$(resolve_branch "$repo_url" "$branch")" + + if [[ -d "$dest" && ! -d "$dest/.git" ]]; then + log_warn "Removing non-git directory at $dest" + rm -rf "$dest" + fi + + if [[ -d "$dest/.git" ]]; then + log_info "Updating repo $(basename "$dest")" + git -C "$dest" fetch origin + git -C "$dest" checkout "$resolved_branch" + git -C "$dest" reset --hard "origin/$resolved_branch" + else + log_info "Cloning $(basename "$dest")" + git clone --branch "$resolved_branch" "$repo_url" "$dest" + fi +} + +step_devnet() { + require_cmd bash + log_info "Starting local-native devnet" + ( + cd "$PUSH_CHAIN_DIR/local-native" + ./devnet build + ./devnet start 4 + ./devnet setup-uvalidators + ./devnet start-uv 4 + ) + log_ok "Devnet is up" +} + +step_print_genesis() { + require_cmd jq + if [[ ! -f "$GENESIS_ACCOUNTS_JSON" ]]; then + log_err "Missing genesis accounts file: $GENESIS_ACCOUNTS_JSON" + exit 1 + fi + + jq -r '.[0] | "Account: \(.name)\nAddress: \(.address)\nMnemonic: \(.mnemonic)"' "$GENESIS_ACCOUNTS_JSON" +} + +step_recover_genesis_key() { + require_cmd "$PUSH_CHAIN_DIR/build/pchaind" jq + + local mnemonic="${GENESIS_MNEMONIC:-}" + if [[ -z "$mnemonic" ]]; then + if [[ -f "$GENESIS_ACCOUNTS_JSON" ]]; then + mnemonic="$(jq -r --arg n "$GENESIS_KEY_NAME" ' + (first(.[] | select(.name == $n) | .mnemonic) // first(.[].mnemonic) // "") + ' "$GENESIS_ACCOUNTS_JSON")" + fi + fi + + if [[ -z "$mnemonic" ]]; then + log_err "Could not auto-resolve mnemonic from $GENESIS_ACCOUNTS_JSON" + log_err "Set GENESIS_MNEMONIC in e2e-tests/.env" + exit 1 + fi + + if "$PUSH_CHAIN_DIR/build/pchaind" keys show "$GENESIS_KEY_NAME" \ + --keyring-backend "$KEYRING_BACKEND" \ + --home "$GENESIS_KEY_HOME" >/dev/null 2>&1; then + log_warn "Key ${GENESIS_KEY_NAME} already exists. Deleting before recover." + "$PUSH_CHAIN_DIR/build/pchaind" keys delete "$GENESIS_KEY_NAME" \ + --keyring-backend "$KEYRING_BACKEND" \ + --home "$GENESIS_KEY_HOME" \ + -y >/dev/null + fi + + log_info "Recovering key ${GENESIS_KEY_NAME}" + printf "%s\n" "$mnemonic" | "$PUSH_CHAIN_DIR/build/pchaind" keys add "$GENESIS_KEY_NAME" \ + --recover \ + --keyring-backend "$KEYRING_BACKEND" \ + --algo eth_secp256k1 \ + --home "$GENESIS_KEY_HOME" >/dev/null + + log_ok "Recovered key ${GENESIS_KEY_NAME}" +} + +step_fund_account() { + require_cmd "$PUSH_CHAIN_DIR/build/pchaind" + + local to_addr="${FUND_TO_ADDRESS:-}" + if [[ -z "$to_addr" ]]; then + log_err "Set FUND_TO_ADDRESS in e2e-tests/.env" + exit 1 + fi + if ! validate_eth_address "$to_addr" && [[ ! "$to_addr" =~ ^push1[0-9a-z]+$ ]]; then + log_err "Invalid FUND_TO_ADDRESS: $to_addr" + exit 1 + fi + + log_info "Funding $to_addr with $FUND_AMOUNT" + "$PUSH_CHAIN_DIR/build/pchaind" tx bank send "$GENESIS_KEY_NAME" "$to_addr" "$FUND_AMOUNT" \ + --gas-prices "$GAS_PRICES" \ + --keyring-backend "$KEYRING_BACKEND" \ + --chain-id "$CHAIN_ID" \ + --home "$GENESIS_KEY_HOME" \ + -y + + log_ok "Funding transaction submitted" +} + +parse_core_prc20_logs() { + local log_file="$1" + local current_addr="" + local line + + while IFS= read -r line; do + if [[ "$line" =~ PRC20[[:space:]]deployed[[:space:]]at:[[:space:]](0x[a-fA-F0-9]{40}) ]]; then + current_addr="${BASH_REMATCH[1]}" + continue + fi + + if [[ -n "$current_addr" && "$line" =~ Name:[[:space:]](.+)[[:space:]]Symbol:[[:space:]]([A-Za-z0-9._-]+)$ ]]; then + local token_name="${BASH_REMATCH[1]}" + local token_symbol="${BASH_REMATCH[2]}" + record_token "$token_name" "$token_symbol" "$current_addr" "core-contracts" + current_addr="" + fi + done <"$log_file" +} + +enrich_core_token_decimals() { + require_cmd jq cast + ensure_deploy_file + + local addr decimals tmp + while IFS= read -r addr; do + [[ -n "$addr" ]] || continue + decimals="$(cast call "$addr" "decimals()(uint8)" --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" + decimals="$(echo "$decimals" | tr -d '[:space:]')" + + if [[ "$decimals" =~ ^[0-9]+$ ]]; then + tmp="$(mktemp)" + jq --arg addr "$addr" --argjson dec "$decimals" ' + .tokens |= map( + if ((.address | ascii_downcase) == ($addr | ascii_downcase)) + then . + {decimals: $dec} + else . + end + ) + ' "$DEPLOY_ADDRESSES_FILE" >"$tmp" + mv "$tmp" "$DEPLOY_ADDRESSES_FILE" + log_ok "Resolved token decimals: $addr => $decimals" + else + log_warn "Could not resolve decimals() for token $addr" + fi + done < <(jq -r '.tokens[]? | select(.decimals == null) | .address' "$DEPLOY_ADDRESSES_FILE") +} + +step_setup_core_contracts() { + require_cmd git forge jq + [[ -n "${PRIVATE_KEY:-}" ]] || { log_err "Set PRIVATE_KEY in e2e-tests/.env"; exit 1; } + + ensure_deploy_file + clone_or_update_repo "$CORE_CONTRACTS_REPO" "$CORE_CONTRACTS_BRANCH" "$CORE_CONTRACTS_DIR" + + log_info "Running forge build in core contracts" + (cd "$CORE_CONTRACTS_DIR" && forge build) + + local log_file="$LOG_DIR/core_setup_$(date +%Y%m%d_%H%M%S).log" + local failed=0 + local resume_attempt=1 + local resume_max_attempts="${CORE_RESUME_MAX_ATTEMPTS:-0}" # 0 = unlimited + + log_info "Running local core setup script" + ( + cd "$CORE_CONTRACTS_DIR" + forge script scripts/localSetup/setup.s.sol \ + --broadcast \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" \ + --slow + ) 2>&1 | tee "$log_file" || failed=1 + + if [[ "$failed" -ne 0 ]]; then + log_warn "Initial run failed. Retrying with --resume until success" + while true; do + log_info "Resume attempt: $resume_attempt" + if ( + cd "$CORE_CONTRACTS_DIR" + forge script scripts/localSetup/setup.s.sol \ + --broadcast \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" \ + --slow \ + --resume + ) 2>&1 | tee -a "$log_file"; then + break + fi + + if [[ "$resume_max_attempts" != "0" && "$resume_attempt" -ge "$resume_max_attempts" ]]; then + log_err "Reached CORE_RESUME_MAX_ATTEMPTS=$resume_max_attempts without success" + exit 1 + fi + + resume_attempt=$((resume_attempt + 1)) + sleep 2 + done + fi + + parse_core_prc20_logs "$log_file" + enrich_core_token_decimals + log_ok "Core contracts setup complete" +} + +find_first_address_with_keywords() { + local log_file="$1" + shift + local pattern + pattern="$(printf '%s|' "$@")" + pattern="${pattern%|}" + grep -Ei "$pattern" "$log_file" | grep -Eo '0x[a-fA-F0-9]{40}' | tail -1 || true +} + +address_from_deploy_contract() { + local key="$1" + jq -r --arg k "$key" '.contracts[$k] // ""' "$DEPLOY_ADDRESSES_FILE" +} + +address_from_deploy_token() { + local sym="$1" + jq -r --arg s "$sym" 'first(.tokens[]? | select((.symbol|ascii_downcase) == ($s|ascii_downcase)) | .address) // ""' "$DEPLOY_ADDRESSES_FILE" +} + +resolve_peth_token_address() { + local addr="" + addr="$(address_from_deploy_token "pETH")" + [[ -n "$addr" ]] || addr="$(address_from_deploy_token "WETH")" + if [[ -z "$addr" ]]; then + addr="$(jq -r 'first(.tokens[]? | select((.name|ascii_downcase) | test("eth")) | .address) // ""' "$DEPLOY_ADDRESSES_FILE")" + fi + printf "%s" "$addr" +} + +assert_required_addresses() { + ensure_deploy_file + local required=("WPC" "Factory" "QuoterV2" "SwapRouter") + local missing=0 + local key val + + for key in "${required[@]}"; do + val="$(address_from_deploy_contract "$key")" + if [[ -z "$val" ]]; then + log_err "Missing required address in deploy file: contracts.$key" + missing=1 + else + log_ok "contracts.$key=$val" + fi + done + + if [[ "$missing" -ne 0 ]]; then + log_err "Required addresses are missing in $DEPLOY_ADDRESSES_FILE" + exit 1 + fi +} + +step_write_core_env() { + require_cmd jq + ensure_deploy_file + assert_required_addresses + + local core_env="$CORE_CONTRACTS_DIR/.env" + local wpc factory quoter router + wpc="$(address_from_deploy_contract "WPC")" + factory="$(address_from_deploy_contract "Factory")" + quoter="$(address_from_deploy_contract "QuoterV2")" + router="$(address_from_deploy_contract "SwapRouter")" + + log_info "Writing core-contracts .env" + { + echo "PUSH_RPC_URL=$PUSH_RPC_URL" + echo "PRIVATE_KEY=$PRIVATE_KEY" + echo "WPC_ADDRESS=$wpc" + echo "FACTORY_ADDRESS=$factory" + echo "QUOTER_V2_ADDRESS=$quoter" + echo "SWAP_ROUTER_ADDRESS=$router" + echo "WPC=$wpc" + echo "UNISWAP_V3_FACTORY=$factory" + echo "UNISWAP_V3_QUOTER=$quoter" + echo "UNISWAP_V3_ROUTER=$router" + echo "" + echo "# Tokens deployed from core setup" + jq -r '.tokens | to_entries[]? | "TOKEN" + ((.key + 1)|tostring) + "=" + .value.address' "$DEPLOY_ADDRESSES_FILE" + } >"$core_env" + + log_ok "Generated $core_env" +} + +step_update_eth_token_config() { + step_update_deployed_token_configs +} + +norm_token_key() { + local s="$1" + s="$(echo "$s" | tr '[:upper:]' '[:lower:]')" + s="$(echo "$s" | sed -E 's/[^a-z0-9]+//g')" + printf "%s" "$s" +} + +norm_token_key_without_leading_p() { + local s + s="$(norm_token_key "$1")" + if [[ "$s" == p* && ${#s} -gt 1 ]]; then + printf "%s" "${s#p}" + else + printf "%s" "$s" + fi +} + +find_matching_token_config_file() { + local deployed_symbol="$1" + local deployed_name="$2" + local best_file="" + local best_score=0 + + local d_sym d_name d_sym_np d_name_np + d_sym="$(norm_token_key "$deployed_symbol")" + d_name="$(norm_token_key "$deployed_name")" + d_sym_np="$(norm_token_key_without_leading_p "$deployed_symbol")" + d_name_np="$(norm_token_key_without_leading_p "$deployed_name")" + + local file f_sym f_name f_base f_sym_np f_name_np score + for file in "$TOKENS_CONFIG_DIR"/*.json; do + [[ -f "$file" ]] || continue + f_sym="$(jq -r '.symbol // ""' "$file")" + f_name="$(jq -r '.name // ""' "$file")" + f_base="$(basename "$file" .json)" + + f_sym="$(norm_token_key "$f_sym")" + f_name="$(norm_token_key "$f_name")" + f_base="$(norm_token_key "$f_base")" + f_sym_np="$(norm_token_key_without_leading_p "$f_sym")" + f_name_np="$(norm_token_key_without_leading_p "$f_name")" + + score=0 + [[ -n "$d_sym" && "$d_sym" == "$f_sym" ]] && score=$((score + 100)) + [[ -n "$d_name" && "$d_name" == "$f_name" ]] && score=$((score + 90)) + [[ -n "$d_sym_np" && "$d_sym_np" == "$f_sym" ]] && score=$((score + 80)) + [[ -n "$d_name_np" && "$d_name_np" == "$f_name" ]] && score=$((score + 70)) + [[ -n "$d_sym" && "$d_sym" == "$f_name" ]] && score=$((score + 60)) + [[ -n "$d_name" && "$d_name" == "$f_sym" ]] && score=$((score + 60)) + [[ -n "$d_sym_np" && "$f_base" == *"$d_sym_np"* ]] && score=$((score + 30)) + [[ -n "$d_name_np" && "$f_base" == *"$d_name_np"* ]] && score=$((score + 20)) + + if (( score > best_score )); then + best_score=$score + best_file="$file" + fi + done + + if (( best_score >= 60 )); then + printf "%s" "$best_file" + fi +} + +step_update_deployed_token_configs() { + require_cmd jq + ensure_deploy_file + + if [[ ! -d "$TOKENS_CONFIG_DIR" ]]; then + log_err "Tokens config directory missing: $TOKENS_CONFIG_DIR" + exit 1 + fi + + local used_files="" + local updated=0 + local token_json token_symbol token_name token_address match_file tmp + + while IFS= read -r token_json; do + token_symbol="$(echo "$token_json" | jq -r '.symbol // ""')" + token_name="$(echo "$token_json" | jq -r '.name // ""')" + token_address="$(echo "$token_json" | jq -r '.address // ""')" + + [[ -n "$token_address" ]] || continue + match_file="$(find_matching_token_config_file "$token_symbol" "$token_name")" + + if [[ -z "$match_file" ]]; then + log_warn "No token config match found for deployed token: $token_symbol ($token_name)" + continue + fi + + if echo "$used_files" | grep -Fxq "$match_file"; then + log_warn "Token config already matched by another token, skipping: $(basename "$match_file")" + continue + fi + + tmp="$(mktemp)" + jq --arg a "$token_address" '.native_representation.contract_address = $a' "$match_file" >"$tmp" + mv "$tmp" "$match_file" + used_files+="$match_file"$'\n' + updated=$((updated + 1)) + log_ok "Updated $(basename "$match_file") contract_address => $token_address" + done < <(jq -c '.tokens[]?' "$DEPLOY_ADDRESSES_FILE") + + if [[ "$updated" -eq 0 ]]; then + log_warn "No token config files were updated from deployed tokens" + else + log_ok "Updated $updated token config file(s) from deployed tokens" + fi +} + +step_setup_swap_amm() { + require_cmd git node npm npx jq + [[ -n "${PRIVATE_KEY:-}" ]] || { log_err "Set PRIVATE_KEY in e2e-tests/.env"; exit 1; } + + ensure_deploy_file + clone_or_update_repo "$SWAP_AMM_REPO" "$SWAP_AMM_BRANCH" "$SWAP_AMM_DIR" + + log_info "Installing swap-amm dependencies" + ( + cd "$SWAP_AMM_DIR" + npm install + (cd v3-core && npm install) + (cd v3-periphery && npm install) + ) + + log_info "Writing swap repo .env from main e2e .env" + cat >"$SWAP_AMM_DIR/.env" <&1 | tee "$wpc_log" + + local wpc_addr + wpc_addr="$(find_first_address_with_keywords "$wpc_log" wpc wpush wrapped)" + if [[ -n "$wpc_addr" ]]; then + record_contract "WPC" "$wpc_addr" + else + log_warn "Could not auto-detect WPC address from logs" + fi + + local core_log="$LOG_DIR/swap_core_$(date +%Y%m%d_%H%M%S).log" + log_info "Deploying v3-core" + ( + cd "$SWAP_AMM_DIR/v3-core" + npx hardhat compile + npx hardhat run scripts/deploy-core.js --network pushchain + ) 2>&1 | tee "$core_log" + + local factory_addr + factory_addr="$(grep -E 'Factory Address|FACTORY_ADDRESS=' "$core_log" | grep -Eo '0x[a-fA-F0-9]{40}' | tail -1 || true)" + if [[ -n "$factory_addr" ]]; then + record_contract "Factory" "$factory_addr" + else + log_warn "Could not auto-detect Factory address from logs" + fi + + local periphery_log="$LOG_DIR/swap_periphery_$(date +%Y%m%d_%H%M%S).log" + log_info "Deploying v3-periphery" + ( + cd "$SWAP_AMM_DIR/v3-periphery" + npx hardhat compile + npx hardhat run scripts/deploy-periphery.js --network pushchain + ) 2>&1 | tee "$periphery_log" + + local swap_router quoter_v2 position_manager + swap_router="$(grep -E 'SwapRouter' "$periphery_log" | grep -Eo '0x[a-fA-F0-9]{40}' | tail -1 || true)" + quoter_v2="$(grep -E 'QuoterV2' "$periphery_log" | grep -Eo '0x[a-fA-F0-9]{40}' | tail -1 || true)" + position_manager="$(grep -E 'PositionManager' "$periphery_log" | grep -Eo '0x[a-fA-F0-9]{40}' | tail -1 || true)" + wpc_addr="$(grep -E '^.*WPC:' "$periphery_log" | grep -Eo '0x[a-fA-F0-9]{40}' | tail -1 || true)" + + [[ -n "$swap_router" ]] && record_contract "SwapRouter" "$swap_router" + [[ -n "$quoter_v2" ]] && record_contract "QuoterV2" "$quoter_v2" + [[ -n "$position_manager" ]] && record_contract "PositionManager" "$position_manager" + [[ -n "$wpc_addr" ]] && record_contract "WPC" "$wpc_addr" + + assert_required_addresses + + log_ok "Swap AMM setup complete" +} + +step_setup_gateway() { + require_cmd git forge + [[ -n "${PRIVATE_KEY:-}" ]] || { log_err "Set PRIVATE_KEY in e2e-tests/.env"; exit 1; } + + clone_or_update_repo "$GATEWAY_REPO" "$GATEWAY_BRANCH" "$GATEWAY_DIR" + + log_info "Preparing gateway repo submodules" + ( + cd "$GATEWAY_DIR" + if [[ -d "contracts/svm-gateway/mock-pyth" ]]; then + git rm --cached contracts/svm-gateway/mock-pyth || true + rm -rf contracts/svm-gateway/mock-pyth + fi + git submodule update --init --recursive + ) + + local gw_dir="$GATEWAY_DIR/contracts/evm-gateway" + local gw_log="$LOG_DIR/gateway_setup_$(date +%Y%m%d_%H%M%S).log" + local failed=0 + local resume_attempt=1 + local resume_max_attempts="${GATEWAY_RESUME_MAX_ATTEMPTS:-0}" # 0 = unlimited + + log_info "Building gateway evm contracts" + (cd "$gw_dir" && forge build) + + log_info "Running gateway local setup script" + ( + cd "$gw_dir" + forge script scripts/localSetup/setup.s.sol \ + --broadcast \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" \ + --slow + ) 2>&1 | tee "$gw_log" || failed=1 + + if [[ "$failed" -ne 0 ]]; then + log_warn "Gateway script failed. Retrying with --resume until success" + while true; do + log_info "Gateway resume attempt: $resume_attempt" + if ( + cd "$gw_dir" + forge script scripts/localSetup/setup.s.sol \ + --broadcast \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" \ + --slow \ + --resume + ) 2>&1 | tee -a "$gw_log"; then + break + fi + + if [[ "$resume_max_attempts" != "0" && "$resume_attempt" -ge "$resume_max_attempts" ]]; then + log_err "Reached GATEWAY_RESUME_MAX_ATTEMPTS=$resume_max_attempts without success" + exit 1 + fi + + resume_attempt=$((resume_attempt + 1)) + sleep 2 + done + fi + + log_ok "Gateway setup complete" +} + +step_add_uregistry_configs() { + require_cmd "$PUSH_CHAIN_DIR/build/pchaind" jq + + [[ -f "$CHAIN_CONFIG_PATH" ]] || { log_err "Missing chain config: $CHAIN_CONFIG_PATH"; exit 1; } + [[ -d "$TOKENS_CONFIG_DIR" ]] || { log_err "Missing tokens config directory: $TOKENS_CONFIG_DIR"; exit 1; } + + # Ensure all deployed core tokens have updated contract addresses in token config files. + step_update_deployed_token_configs + + local chain_payload token_payload + chain_payload="$(jq -c . "$CHAIN_CONFIG_PATH")" + + run_registry_tx() { + local kind="$1" + local payload="$2" + local max_attempts=10 + local attempt=1 + local out code raw + + while true; do + if [[ "$kind" == "chain" ]]; then + out="$("$PUSH_CHAIN_DIR/build/pchaind" tx uregistry add-chain-config \ + --chain-config "$payload" \ + --from "$GENESIS_KEY_NAME" \ + --keyring-backend "$KEYRING_BACKEND" \ + --home data/validator1/.pchain \ + --node tcp://127.0.0.1:26657 \ + --gas-prices "$GAS_PRICES" \ + -y)" + else + out="$("$PUSH_CHAIN_DIR/build/pchaind" tx uregistry add-token-config \ + --token-config "$payload" \ + --from "$GENESIS_KEY_NAME" \ + --keyring-backend "$KEYRING_BACKEND" \ + --home data/validator1/.pchain \ + --node tcp://127.0.0.1:26657 \ + --gas-prices "$GAS_PRICES" \ + -y)" + fi + echo "$out" + if [[ "$out" =~ ^\{ ]]; then + code="$(echo "$out" | jq -r '.code // 1')" + raw="$(echo "$out" | jq -r '.raw_log // ""')" + else + code="$(echo "$out" | awk -F': ' '/^code:/ {print $2; exit}')" + raw="$(echo "$out" | awk -F': ' '/^raw_log:/ {sub(/^\x27|\x27$/, "", $2); print $2; exit}')" + [[ -n "$code" ]] || code="1" + fi + + if [[ "$code" == "0" ]]; then + return 0 + fi + + if [[ "$raw" == *"account sequence mismatch"* && "$attempt" -lt "$max_attempts" ]]; then + log_warn "Sequence mismatch on attempt $attempt/$max_attempts. Retrying..." + attempt=$((attempt + 1)) + sleep 2 + continue + fi + + log_err "Registry tx failed: code=$code raw_log=$raw" + return 1 + done + } + + log_info "Adding chain config to uregistry" + ( + cd "$PUSH_CHAIN_DIR/local-native" + run_registry_tx "chain" "$chain_payload" + ) + + local deployed_addrs token_file token_addr matched_count + deployed_addrs="$(jq -r '.tokens[]?.address | ascii_downcase' "$DEPLOY_ADDRESSES_FILE")" + matched_count=0 + + while IFS= read -r token_file; do + [[ -f "$token_file" ]] || continue + token_addr="$(jq -r '.native_representation.contract_address // "" | ascii_downcase' "$token_file")" + [[ -n "$token_addr" ]] || continue + + if echo "$deployed_addrs" | grep -Fxq "$token_addr"; then + token_payload="$(jq -c . "$token_file")" + log_info "Adding token config to uregistry: $(basename "$token_file")" + ( + cd "$PUSH_CHAIN_DIR/local-native" + run_registry_tx "token" "$token_payload" + ) + matched_count=$((matched_count + 1)) + fi + done < <(find "$TOKENS_CONFIG_DIR" -maxdepth 1 -type f -name '*.json' | sort) + + if [[ "$matched_count" -eq 0 ]]; then + log_warn "No deployed tokens matched token config files for uregistry add-token-config" + else + log_ok "Registered $matched_count deployed token config(s) in uregistry" + fi + + log_ok "uregistry chain/token configs added" +} + +step_sync_test_addresses() { + require_cmd jq + ensure_deploy_file + + if [[ ! -f "$TEST_ADDRESSES_PATH" ]]; then + log_err "test-addresses.json not found: $TEST_ADDRESSES_PATH" + exit 1 + fi + + log_info "Syncing deploy addresses into test-addresses.json" + local tmp + tmp="$(mktemp)" + + jq \ + --arg today "$(date +%F)" \ + --arg rpc "$PUSH_RPC_URL" \ + --slurpfile dep "$DEPLOY_ADDRESSES_FILE" \ + ' + ($dep[0]) as $d + | def token_addr($sym): first(($d.tokens[]? | select(.symbol == $sym) | .address), empty); + .lastUpdated = $today + | .network.rpcUrl = $rpc + | if ($d.contracts.Factory // "") != "" then .contracts.factory = $d.contracts.Factory else . end + | if ($d.contracts.WPC // "") != "" then .contracts.WPC = $d.contracts.WPC else . end + | if ($d.contracts.SwapRouter // "") != "" then .contracts.swapRouter = $d.contracts.SwapRouter else . end + | if ($d.contracts.PositionManager // "") != "" then .contracts.positionManager = $d.contracts.PositionManager else . end + | if ($d.contracts.QuoterV2 // "") != "" then .contracts.quoterV2 = $d.contracts.QuoterV2 else . end + | .testTokens |= with_entries( + .value.address = (token_addr(.key) // .value.address) + ) + | .testTokens = ( + .testTokens as $existing + | $existing + + ( + reduce ($d.tokens[]?) as $t ({}; + .[$t.symbol] = { + name: $t.name, + symbol: $t.symbol, + address: $t.address, + decimals: ($t.decimals // ($existing[$t.symbol].decimals // null)), + totalSupply: ($existing[$t.symbol].totalSupply // "") + } + ) + ) + ) + | .pools |= with_entries( + .value.token0 = (token_addr(.value.token0Symbol) // .value.token0) + | .value.token1 = (token_addr(.value.token1Symbol) // .value.token1) + ) + ' "$TEST_ADDRESSES_PATH" >"$tmp" + + mv "$tmp" "$TEST_ADDRESSES_PATH" + log_ok "Updated $TEST_ADDRESSES_PATH" +} + +step_create_all_wpc_pools() { + require_cmd node + ensure_deploy_file + + if [[ ! -f "$TEST_ADDRESSES_PATH" ]]; then + log_err "Missing test-addresses.json at $TEST_ADDRESSES_PATH" + exit 1 + fi + + local wpc_addr token_count token_addr token_symbol + wpc_addr="$(address_from_deploy_contract "WPC")" + if [[ -z "$wpc_addr" ]]; then + log_err "Missing WPC contract address in $DEPLOY_ADDRESSES_FILE" + exit 1 + fi + + token_count="$(jq -r '.tokens | length' "$DEPLOY_ADDRESSES_FILE")" + if [[ "$token_count" == "0" ]]; then + log_warn "No core tokens found in deploy addresses; skipping pool creation" + return 0 + fi + + while IFS=$'\t' read -r token_symbol token_addr; do + [[ -n "$token_addr" ]] || continue + if [[ "$(echo "$token_addr" | tr '[:upper:]' '[:lower:]')" == "$(echo "$wpc_addr" | tr '[:upper:]' '[:lower:]')" ]]; then + continue + fi + + log_info "Creating ${token_symbol}/WPC pool with liquidity" + ( + cd "$SWAP_AMM_DIR" + node scripts/pool-manager.js create-pool "$token_addr" "$wpc_addr" 4 500 true 1 4 + ) + done < <(jq -r '.tokens[]? | [.symbol, .address] | @tsv' "$DEPLOY_ADDRESSES_FILE") + + log_ok "All token/WPC pool creation commands completed" +} + +step_configure_universal_core() { + require_cmd forge + [[ -n "${PRIVATE_KEY:-}" ]] || { log_err "Set PRIVATE_KEY in e2e-tests/.env"; exit 1; } + + # configureUniversalCore depends on values from core .env + step_write_core_env + + local script_path="scripts/localSetup/configureUniversalCore.s.sol" + local log_file="$LOG_DIR/core_configure_$(date +%Y%m%d_%H%M%S).log" + local resume_attempt=1 + local resume_max_attempts="${CORE_CONFIGURE_RESUME_MAX_ATTEMPTS:-0}" # 0 = unlimited + + if [[ ! -f "$CORE_CONTRACTS_DIR/$script_path" ]]; then + log_warn "configureUniversalCore script not found at $CORE_CONTRACTS_DIR/$script_path; skipping" + return 0 + fi + + log_info "Running configureUniversalCore script" + if ( + cd "$CORE_CONTRACTS_DIR" + forge script "$script_path" \ + --broadcast \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" \ + --slow + ) 2>&1 | tee "$log_file"; then + log_ok "configureUniversalCore completed" + return 0 + fi + + log_warn "configureUniversalCore failed. Retrying with --resume until success" + while true; do + log_info "configureUniversalCore resume attempt: $resume_attempt" + if ( + cd "$CORE_CONTRACTS_DIR" + forge script "$script_path" \ + --broadcast \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" \ + --slow \ + --resume + ) 2>&1 | tee -a "$log_file"; then + log_ok "configureUniversalCore resumed successfully" + return 0 + fi + + if [[ "$resume_max_attempts" != "0" && "$resume_attempt" -ge "$resume_max_attempts" ]]; then + log_err "Reached CORE_CONFIGURE_RESUME_MAX_ATTEMPTS=$resume_max_attempts without success" + exit 1 + fi + + resume_attempt=$((resume_attempt + 1)) + sleep 2 + done +} + +cmd_all() { + step_devnet + step_recover_genesis_key + step_fund_account + step_setup_core_contracts + step_setup_swap_amm + step_sync_test_addresses + step_create_all_wpc_pools + assert_required_addresses + step_write_core_env + step_configure_universal_core + step_update_eth_token_config + step_setup_gateway + step_add_uregistry_configs +} + +cmd_show_help() { + cat < + +Commands: + devnet Build/start local-native devnet + uvalidators + print-genesis Print first genesis account + mnemonic + recover-genesis-key Recover genesis key into local keyring + fund Fund FUND_TO_ADDRESS from genesis key + setup-core Clone/build/setup core contracts (auto resume on failure) + setup-swap Clone/install/deploy swap AMM contracts + sync-addresses Apply deploy_addresses.json into test-addresses.json + create-pool Create WPC pools for all deployed core tokens + configure-core Run configureUniversalCore.s.sol (auto --resume retries) + check-addresses Verify required deploy addresses exist (WPC/Factory/QuoterV2/SwapRouter) + write-core-env Create core-contracts .env from deploy_addresses.json + update-token-config Update eth_sepolia_eth.json contract_address using deployed token + setup-gateway Clone/setup gateway repo and run forge localSetup (with --resume retry) + add-uregistry-configs Submit chain + token config txs via local-native validator1 + record-contract K A Manually record contract key/address + record-token N S A Manually record token name/symbol/address + all Run full setup pipeline + help Show this help + +Primary files: + Env: $ENV_FILE + Address: $DEPLOY_ADDRESSES_FILE +EOF +} + +main() { + local cmd="${1:-help}" + case "$cmd" in + devnet) step_devnet ;; + print-genesis) step_print_genesis ;; + recover-genesis-key) step_recover_genesis_key ;; + fund) step_fund_account ;; + setup-core) step_setup_core_contracts ;; + setup-swap) step_setup_swap_amm ;; + sync-addresses) step_sync_test_addresses ;; + create-pool) step_create_all_wpc_pools ;; + configure-core) step_configure_universal_core ;; + check-addresses) assert_required_addresses ;; + write-core-env) step_write_core_env ;; + update-token-config) step_update_deployed_token_configs ;; + setup-gateway) step_setup_gateway ;; + add-uregistry-configs) step_add_uregistry_configs ;; + record-contract) + ensure_deploy_file + [[ $# -eq 3 ]] || { log_err "Usage: $0 record-contract
"; exit 1; } + validate_eth_address "$3" || { log_err "Invalid address: $3"; exit 1; } + record_contract "$2" "$3" + ;; + record-token) + ensure_deploy_file + [[ $# -eq 4 ]] || { log_err "Usage: $0 record-token
"; exit 1; } + validate_eth_address "$4" || { log_err "Invalid address: $4"; exit 1; } + record_token "$2" "$3" "$4" "manual" + ;; + all) cmd_all ;; + help|--help|-h) cmd_show_help ;; + *) log_err "Unknown command: $cmd"; cmd_show_help; exit 1 ;; + esac +} + +main "$@" diff --git a/local-native/README.md b/local-native/README.md index 50f3cc4d..b911c8f5 100644 --- a/local-native/README.md +++ b/local-native/README.md @@ -49,7 +49,8 @@ cd local-native |---------|-------------| | `./devnet start [n]` | Start n core validators (default: 1) | | `./devnet setup-uvalidators` | Register UVs on-chain + create AuthZ grants | -| `./devnet start-uv [n]` | Start n universal validators (default: 4) | +| `./devnet start-uv [n]` | Start n universal validators (default: 4) and auto-set Sepolia `event_start_from` | +| `./devnet configure` | Manually refresh Sepolia `event_start_from` in existing UV configs | | `./devnet down` | Stop all validators | | `./devnet status` | Show network status | | `./devnet logs [service]` | View logs | diff --git a/local-native/devnet b/local-native/devnet index 081590fa..40f01540 100755 --- a/local-native/devnet +++ b/local-native/devnet @@ -236,6 +236,7 @@ start_validator() { start_universal() { local id=$1 + local sepolia_start_height=${2:-} local pid_file="$DATA_DIR/universal$id.pid" # Check if already running @@ -251,7 +252,7 @@ start_universal() { mkdir -p "$DATA_DIR/universal$id" print_status "Starting universal validator $id..." - UNIVERSAL_ID=$id "$SCRIPT_DIR/scripts/setup-universal.sh" > "$DATA_DIR/universal$id/universal.log" 2>&1 & + UNIVERSAL_ID=$id SEPOLIA_EVENT_START_FROM="$sepolia_start_height" "$SCRIPT_DIR/scripts/setup-universal.sh" > "$DATA_DIR/universal$id/universal.log" 2>&1 & echo $! > "$pid_file" print_success "Universal validator $id started (PID: $(cat $pid_file))" @@ -297,12 +298,19 @@ cmd_up() { cmd_start_uv() { require_binaries print_header "Starting Universal Validators..." + + local sepolia_start_height="" + if ! sepolia_start_height=$(bash "$SCRIPT_DIR/scripts/configure-pushuv.sh" --get-height); then + print_error "Failed to fetch latest Sepolia height" + exit 1 + fi + print_status "Using Sepolia event_start_from: $sepolia_start_height" local num_uv=${1:-4} for i in $(seq 1 $num_uv); do if [ $i -le 4 ]; then - start_universal $i + start_universal $i "$sepolia_start_height" sleep 3 fi done @@ -313,6 +321,14 @@ cmd_start_uv() { cmd_status } +# ═══════════════════════════════════════════════════════════════════════════════ +# CONFIGURE COMMANDS +# ═══════════════════════════════════════════════════════════════════════════════ +cmd_configure() { + print_header "Configuring local-native universal relayer configs..." + bash "$SCRIPT_DIR/scripts/configure-pushuv.sh" +} + # ═══════════════════════════════════════════════════════════════════════════════ # STOP/DOWN COMMANDS # ═══════════════════════════════════════════════════════════════════════════════ @@ -487,6 +503,7 @@ cmd_help() { echo -e "${BOLD}${CYAN}UNIVERSAL VALIDATORS${NC}" printf " ${BOLD}%-20s${NC}%s\n" "setup-uvalidators" "Register UVs and create AuthZ grants" printf " ${BOLD}%-20s${NC}%s\n" "start-uv [n]" "Start n universal validators (default: 4)" + printf " ${BOLD}%-20s${NC}%s\n" "configure" "Set Sepolia event_start_from to latest block" echo echo -e "${BOLD}${CYAN}TSS COMMANDS${NC}" printf " ${BOLD}%-20s${NC}%s\n" "tss-keygen" "Initiate TSS key generation" @@ -536,6 +553,7 @@ case "${1:-help}" in # Universal validators setup-uvalidators) "$SCRIPT_DIR/scripts/setup-uvalidators.sh" ;; start-uv) shift; cmd_start_uv "$@" ;; + configure) cmd_configure ;; # Maintenance clean) cmd_clean ;; diff --git a/local-native/scripts/configure-pushuv.sh b/local-native/scripts/configure-pushuv.sh new file mode 100644 index 00000000..8eea0098 --- /dev/null +++ b/local-native/scripts/configure-pushuv.sh @@ -0,0 +1,112 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +LOCAL_NATIVE_DIR="$(cd -P "$SCRIPT_DIR/.." && pwd)" +DATA_DIR="$LOCAL_NATIVE_DIR/data" + +require_bin() { + local bin="$1" + if ! command -v "$bin" >/dev/null 2>&1; then + echo "❌ Required binary not found: $bin" + exit 1 + fi +} + +require_bin curl +require_bin jq + +SEPOLIA_CHAIN_ID="eip155:11155111" +DEFAULT_RPC_URL="https://sepolia.drpc.org" + +# Prefer RPC URL from existing config, fallback to default. +detect_rpc_url() { + local cfg="$1" + jq -r --arg chain "$SEPOLIA_CHAIN_ID" '.chain_configs[$chain].rpc_url[0] // empty' "$cfg" 2>/dev/null || true +} + +fetch_sepolia_height() { + local rpc_url="$1" + local response + response=$(curl -sS -X POST "$rpc_url" \ + -H "Content-Type: application/json" \ + --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}') + + local hex_height + hex_height=$(echo "$response" | jq -r '.result // empty') + + if [[ -z "$hex_height" || "$hex_height" == "null" || ! "$hex_height" =~ ^0x[0-9a-fA-F]+$ ]]; then + return 1 + fi + + echo "$((16#${hex_height#0x}))" +} + +find_pushuv_configs() { + find "$DATA_DIR" -type f -path '*/.puniversal/config/pushuv_config.json' | sort +} + +print_only_height() { + local rpc_url="$DEFAULT_RPC_URL" + local height="" + + if ! height=$(fetch_sepolia_height "$rpc_url"); then + echo "❌ Failed to fetch Sepolia block height from $rpc_url" >&2 + exit 1 + fi + + echo "$height" +} + +main() { + if [ "${1:-}" = "--get-height" ]; then + print_only_height + return 0 + fi + + local configs=() + while IFS= read -r cfg; do + configs+=("$cfg") + done < <(find_pushuv_configs) + + if [ "${#configs[@]}" -eq 0 ]; then + echo "❌ No pushuv_config.json files found under $DATA_DIR" + echo " Start universal validators first with: ./devnet start-uv 4" + exit 1 + fi + + local rpc_url="" + rpc_url=$(detect_rpc_url "${configs[0]}") + if [ -z "$rpc_url" ]; then + rpc_url="$DEFAULT_RPC_URL" + fi + + local height="" + if ! height=$(fetch_sepolia_height "$rpc_url"); then + echo "⚠️ Failed using configured RPC ($rpc_url), retrying default RPC ($DEFAULT_RPC_URL)..." + if ! height=$(fetch_sepolia_height "$DEFAULT_RPC_URL"); then + echo "❌ Failed to fetch Sepolia block height from both RPC endpoints" + exit 1 + fi + rpc_url="$DEFAULT_RPC_URL" + fi + + echo "ℹ️ Sepolia latest block height: $height" + echo "ℹ️ RPC used: $rpc_url" + + local updated=0 + for cfg in "${configs[@]}"; do + local tmp + tmp=$(mktemp) + jq --arg chain "$SEPOLIA_CHAIN_ID" --argjson height "$height" \ + '.chain_configs[$chain].event_start_from = $height' \ + "$cfg" > "$tmp" + mv "$tmp" "$cfg" + updated=$((updated + 1)) + echo "✅ Updated: $cfg" + done + + echo "🎉 Updated event_start_from for $updated config file(s)." +} + +main "$@" diff --git a/local-native/scripts/setup-universal.sh b/local-native/scripts/setup-universal.sh index e8023cde..e90919a0 100755 --- a/local-native/scripts/setup-universal.sh +++ b/local-native/scripts/setup-universal.sh @@ -86,6 +86,14 @@ jq --argjson port "$QUERY_PORT" '.query_server_port = $port' \ "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +# Optionally override Sepolia event start height (set by ./devnet start-uv) +if [ -n "${SEPOLIA_EVENT_START_FROM:-}" ]; then + jq --argjson height "$SEPOLIA_EVENT_START_FROM" \ + '.chain_configs["eip155:11155111"].event_start_from = $height' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +fi + # Enable TSS TSS_PRIVATE_KEY=$(printf '%02x' $UNIVERSAL_ID | head -c 2) TSS_PRIVATE_KEY=$(yes $TSS_PRIVATE_KEY | head -32 | tr -d '\n') From 2de7fd08ea1d723a793a24c76cc2fb1b061aeb8f Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Thu, 19 Feb 2026 18:51:25 +0530 Subject: [PATCH 02/61] added a replace-addresses command to e2e-setup which replaces neccessary addresses in node repo --- e2e-tests/README.md | 3 +++ e2e-tests/setup.sh | 46 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/e2e-tests/README.md b/e2e-tests/README.md index 810d76cf..fc8389bc 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -65,6 +65,7 @@ Path settings are repository-relative and portable. ## One-command full run ```bash +./e2e-tests/setup.sh replace-addresses ./e2e-tests/setup.sh all ``` @@ -101,6 +102,7 @@ This runs the full sequence in order: ./e2e-tests/setup.sh update-token-config ./e2e-tests/setup.sh setup-gateway ./e2e-tests/setup.sh add-uregistry-configs +./e2e-tests/setup.sh replace-addresses ./e2e-tests/setup.sh all ``` @@ -177,6 +179,7 @@ For a fresh run: ```bash rm -rf e2e-tests/repos ./local-native/devnet down || true +./e2e-tests/setup.sh replace-addresses ./e2e-tests/setup.sh all ``` diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 9dc19041..f5e0d4d8 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -41,6 +41,11 @@ fi : "${TOKEN_CONFIG_PATH:=./config/testnet-donut/tokens/eth_sepolia_eth.json}" : "${CHAIN_CONFIG_PATH:=./config/testnet-donut/chains/eth_sepolia_chain_config.json}" +: "${OLD_PUSH_ADDRESS:=push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20}" +: "${NEW_PUSH_ADDRESS:=push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20}" +: "${OLD_EVM_ADDRESS:=0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4}" +: "${NEW_EVM_ADDRESS:=0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4}" + abs_from_root() { local path="$1" if [[ "$path" = /* ]]; then @@ -1027,6 +1032,44 @@ step_configure_universal_core() { done } +step_replace_addresses_everywhere() { + require_cmd grep perl + + local touched=0 + local file + + while IFS= read -r file; do + [[ -n "$file" ]] || continue + perl -0777 -i -pe "s/\Q$OLD_PUSH_ADDRESS\E/$NEW_PUSH_ADDRESS/g; s/\Q$OLD_EVM_ADDRESS\E/$NEW_EVM_ADDRESS/g;" "$file" + touched=$((touched + 1)) + done < <( + grep -RIl \ + --exclude-dir=.git \ + --binary-files=without-match \ + -e "$OLD_PUSH_ADDRESS" \ + -e "$OLD_EVM_ADDRESS" \ + "$PUSH_CHAIN_DIR" || true + ) + + if [[ "$touched" -eq 0 ]]; then + log_warn "No files contained legacy addresses" + else + log_ok "Replaced legacy addresses in $touched file(s)" + fi +} + +run_preflight() { + local cmd="$1" + + case "$cmd" in + help|--help|-h|replace-addresses) + return 0 + ;; + esac + + step_replace_addresses_everywhere +} + cmd_all() { step_devnet step_recover_genesis_key @@ -1057,6 +1100,7 @@ Commands: sync-addresses Apply deploy_addresses.json into test-addresses.json create-pool Create WPC pools for all deployed core tokens configure-core Run configureUniversalCore.s.sol (auto --resume retries) + replace-addresses Replace legacy push/evm addresses across repo check-addresses Verify required deploy addresses exist (WPC/Factory/QuoterV2/SwapRouter) write-core-env Create core-contracts .env from deploy_addresses.json update-token-config Update eth_sepolia_eth.json contract_address using deployed token @@ -1075,6 +1119,7 @@ EOF main() { local cmd="${1:-help}" + run_preflight "$cmd" case "$cmd" in devnet) step_devnet ;; print-genesis) step_print_genesis ;; @@ -1085,6 +1130,7 @@ main() { sync-addresses) step_sync_test_addresses ;; create-pool) step_create_all_wpc_pools ;; configure-core) step_configure_universal_core ;; + replace-addresses) step_replace_addresses_everywhere ;; check-addresses) assert_required_addresses ;; write-core-env) step_write_core_env ;; update-token-config) step_update_deployed_token_configs ;; From fdc56f89c6902a77f67279522a84040ff684e9a5 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Fri, 20 Feb 2026 17:23:05 +0530 Subject: [PATCH 03/61] fix: registered static precompiles in local-native setup e2e --- e2e-tests/setup.sh | 57 ++++------------------ local-native/scripts/setup-genesis-auto.sh | 1 + 2 files changed, 11 insertions(+), 47 deletions(-) diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index f5e0d4d8..78522218 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -41,11 +41,6 @@ fi : "${TOKEN_CONFIG_PATH:=./config/testnet-donut/tokens/eth_sepolia_eth.json}" : "${CHAIN_CONFIG_PATH:=./config/testnet-donut/chains/eth_sepolia_chain_config.json}" -: "${OLD_PUSH_ADDRESS:=push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20}" -: "${NEW_PUSH_ADDRESS:=push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20}" -: "${OLD_EVM_ADDRESS:=0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4}" -: "${NEW_EVM_ADDRESS:=0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4}" - abs_from_root() { local path="$1" if [[ "$path" = /* ]]; then @@ -247,6 +242,15 @@ clone_or_update_repo() { fi if [[ -d "$dest/.git" ]]; then + local current_branch has_changes + current_branch="$(git -C "$dest" rev-parse --abbrev-ref HEAD 2>/dev/null || true)" + has_changes="$(git -C "$dest" status --porcelain 2>/dev/null)" + + if [[ -n "$has_changes" && "$current_branch" == "$resolved_branch" ]]; then + log_warn "Repo $(basename "$dest") has local changes on branch '$current_branch'. Skipping update to preserve local changes." + return 0 + fi + log_info "Updating repo $(basename "$dest")" git -C "$dest" fetch origin git -C "$dest" checkout "$resolved_branch" @@ -1032,44 +1036,6 @@ step_configure_universal_core() { done } -step_replace_addresses_everywhere() { - require_cmd grep perl - - local touched=0 - local file - - while IFS= read -r file; do - [[ -n "$file" ]] || continue - perl -0777 -i -pe "s/\Q$OLD_PUSH_ADDRESS\E/$NEW_PUSH_ADDRESS/g; s/\Q$OLD_EVM_ADDRESS\E/$NEW_EVM_ADDRESS/g;" "$file" - touched=$((touched + 1)) - done < <( - grep -RIl \ - --exclude-dir=.git \ - --binary-files=without-match \ - -e "$OLD_PUSH_ADDRESS" \ - -e "$OLD_EVM_ADDRESS" \ - "$PUSH_CHAIN_DIR" || true - ) - - if [[ "$touched" -eq 0 ]]; then - log_warn "No files contained legacy addresses" - else - log_ok "Replaced legacy addresses in $touched file(s)" - fi -} - -run_preflight() { - local cmd="$1" - - case "$cmd" in - help|--help|-h|replace-addresses) - return 0 - ;; - esac - - step_replace_addresses_everywhere -} - cmd_all() { step_devnet step_recover_genesis_key @@ -1100,7 +1066,6 @@ Commands: sync-addresses Apply deploy_addresses.json into test-addresses.json create-pool Create WPC pools for all deployed core tokens configure-core Run configureUniversalCore.s.sol (auto --resume retries) - replace-addresses Replace legacy push/evm addresses across repo check-addresses Verify required deploy addresses exist (WPC/Factory/QuoterV2/SwapRouter) write-core-env Create core-contracts .env from deploy_addresses.json update-token-config Update eth_sepolia_eth.json contract_address using deployed token @@ -1119,7 +1084,6 @@ EOF main() { local cmd="${1:-help}" - run_preflight "$cmd" case "$cmd" in devnet) step_devnet ;; print-genesis) step_print_genesis ;; @@ -1130,7 +1094,6 @@ main() { sync-addresses) step_sync_test_addresses ;; create-pool) step_create_all_wpc_pools ;; configure-core) step_configure_universal_core ;; - replace-addresses) step_replace_addresses_everywhere ;; check-addresses) assert_required_addresses ;; write-core-env) step_write_core_env ;; update-token-config) step_update_deployed_token_configs ;; @@ -1154,4 +1117,4 @@ main() { esac } -main "$@" +main "$@" \ No newline at end of file diff --git a/local-native/scripts/setup-genesis-auto.sh b/local-native/scripts/setup-genesis-auto.sh index 1ab5e6d6..f4ed4c6d 100755 --- a/local-native/scripts/setup-genesis-auto.sh +++ b/local-native/scripts/setup-genesis-auto.sh @@ -109,6 +109,7 @@ update_genesis '.app_state["gov"]["params"]["max_deposit_period"]="300s"' update_genesis '.app_state["gov"]["params"]["voting_period"]="300s"' update_genesis ".app_state[\"evm\"][\"params\"][\"evm_denom\"]=\"$DENOM\"" update_genesis ".app_state[\"evm\"][\"params\"][\"chain_config\"][\"chain_id\"]=$EVM_CHAIN_ID" +update_genesis '.app_state["evm"]["params"]["active_static_precompiles"]=["0x00000000000000000000000000000000000000CB","0x00000000000000000000000000000000000000ca","0x0000000000000000000000000000000000000100","0x0000000000000000000000000000000000000400","0x0000000000000000000000000000000000000800","0x0000000000000000000000000000000000000801","0x0000000000000000000000000000000000000802","0x0000000000000000000000000000000000000803","0x0000000000000000000000000000000000000804","0x0000000000000000000000000000000000000805"]' update_genesis ".app_state[\"staking\"][\"params\"][\"bond_denom\"]=\"$DENOM\"" update_genesis ".app_state[\"mint\"][\"params\"][\"mint_denom\"]=\"$DENOM\"" update_genesis ".app_state[\"uregistry\"][\"params\"][\"admin\"]=\"$GENESIS_ADDR1\"" From 1abe21634bc890f18bd5d60d570f2debc30c7797 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Fri, 20 Feb 2026 19:48:39 +0530 Subject: [PATCH 04/61] refactor: e2e-setup now uses local-multi-validator setup instead of local-native --- .gitignore | 2 + e2e-tests/.env.example | 5 +- e2e-tests/README.md | 10 +- e2e-tests/deploy_addresses.json | 2 +- e2e-tests/setup.sh | 141 ++++++++++++++---- local-multi-validator/Dockerfile.unified | 2 +- local-multi-validator/README.md | 20 +++ local-multi-validator/devnet | 38 ++++- local-multi-validator/docker-compose.yml | 4 + .../scripts/setup-universal.sh | 9 ++ 10 files changed, 201 insertions(+), 32 deletions(-) diff --git a/.gitignore b/.gitignore index 7cb5df7d..951f3e1c 100755 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,5 @@ universalClient/coverage.out # TSS data directory tss-data/ local-native/data/ +e2e-tests/.pchain/ +e2e-tests/genesis_accounts.json diff --git a/e2e-tests/.env.example b/e2e-tests/.env.example index ebec79f6..545bdc87 100644 --- a/e2e-tests/.env.example +++ b/e2e-tests/.env.example @@ -13,7 +13,9 @@ KEYRING_BACKEND=test # Genesis key recovery/funding GENESIS_KEY_NAME=genesis-acc-1 -GENESIS_KEY_HOME=./local-native/data/validator1/.pchain +GENESIS_KEY_HOME=./e2e-tests/.pchain +# Optional local fallback file. If missing, setup.sh reads accounts from docker core-validator-1 (/tmp/push-accounts/genesis_accounts.json) +GENESIS_ACCOUNTS_JSON=./e2e-tests/genesis_accounts.json # Optional: set to skip interactive mnemonic prompt # GENESIS_MNEMONIC="word1 word2 ..." @@ -21,6 +23,7 @@ GENESIS_KEY_HOME=./local-native/data/validator1/.pchain # Address to fund from genesis account FUND_TO_ADDRESS=push1w7xnyp3hf79vyetj3cvw8l32u6unun8yr6zn60 FUND_AMOUNT=1000000000000000000upc +POOL_CREATION_TOPUP_AMOUNT=50000000000000000000upc GAS_PRICES=100000000000upc # EVM private key used by forge/hardhat scripts diff --git a/e2e-tests/README.md b/e2e-tests/README.md index fc8389bc..cf5c8bef 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -4,7 +4,7 @@ This folder provides a full, automated local E2E bootstrap for Push Chain. It covers: -1. local-native devnet (validators + universal validators) +1. local-multi-validator devnet (Docker, validators + universal validators) 2. genesis key recovery + account funding 3. core contracts deployment 4. swap AMM deployment (WPC + V3 core + V3 periphery) @@ -54,10 +54,16 @@ Important variables in `.env`: - `PUSH_RPC_URL` (default `http://localhost:8545`) - `PRIVATE_KEY` - `FUND_TO_ADDRESS` +- `POOL_CREATION_TOPUP_AMOUNT` (funding for deployer before pool creation) - `CORE_CONTRACTS_BRANCH` - `SWAP_AMM_BRANCH` - `GATEWAY_BRANCH` (currently `e2e-push-node`) +Genesis account source: + +- `GENESIS_ACCOUNTS_JSON` can point to a local file, but if missing `setup.sh` automatically + reads `/tmp/push-accounts/genesis_accounts.json` from docker container `core-validator-1`. + Path settings are repository-relative and portable. --- @@ -178,7 +184,7 @@ For a fresh run: ```bash rm -rf e2e-tests/repos -./local-native/devnet down || true +./local-multi-validator/devnet down || true ./e2e-tests/setup.sh replace-addresses ./e2e-tests/setup.sh all ``` diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index c4fee1c2..83f7d660 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -1,5 +1,5 @@ { - "generatedAt": "2026-02-19T11:46:45Z", + "generatedAt": "2026-02-20T13:50:15Z", "contracts": { "WPC": "0x4ff2d01380CCd03A53457935B2e9d2eD092e9300", "Factory": "0x373D3F1B2b26729A308C5641970247bc9d4ddDa4", diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 78522218..be63e66b 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -18,10 +18,13 @@ fi : "${CHAIN_ID:=localchain_9000-1}" : "${KEYRING_BACKEND:=test}" : "${GENESIS_KEY_NAME:=genesis-acc-1}" -: "${GENESIS_KEY_HOME:=$PUSH_CHAIN_DIR/local-native/data/validator1/.pchain}" -: "${GENESIS_ACCOUNTS_JSON:=$PUSH_CHAIN_DIR/local-native/data/accounts/genesis_accounts.json}" +: "${GENESIS_KEY_HOME:=./e2e-tests/.pchain}" +: "${GENESIS_ACCOUNTS_JSON:=./e2e-tests/genesis_accounts.json}" : "${FUND_AMOUNT:=1000000000000000000upc}" +: "${POOL_CREATION_TOPUP_AMOUNT:=50000000000000000000upc}" : "${GAS_PRICES:=100000000000upc}" +: "${LOCAL_DEVNET_DIR:=./local-multi-validator}" +: "${LEGACY_LOCAL_NATIVE_DIR:=./local-native}" : "${CORE_CONTRACTS_REPO:=https://github.com/pushchain/push-chain-core-contracts.git}" : "${CORE_CONTRACTS_BRANCH:=e2e-push-node}" @@ -52,6 +55,8 @@ abs_from_root() { GENESIS_KEY_HOME="$(abs_from_root "$GENESIS_KEY_HOME")" GENESIS_ACCOUNTS_JSON="$(abs_from_root "$GENESIS_ACCOUNTS_JSON")" +LOCAL_DEVNET_DIR="$(abs_from_root "$LOCAL_DEVNET_DIR")" +LEGACY_LOCAL_NATIVE_DIR="$(abs_from_root "$LEGACY_LOCAL_NATIVE_DIR")" E2E_PARENT_DIR="$(abs_from_root "$E2E_PARENT_DIR")" CORE_CONTRACTS_DIR="$(abs_from_root "$CORE_CONTRACTS_DIR")" SWAP_AMM_DIR="$(abs_from_root "$SWAP_AMM_DIR")" @@ -76,6 +81,24 @@ log_ok() { printf "%b\n" "${green}✓${nc} $*"; } log_warn() { printf "%b\n" "${yellow}!${nc} $*"; } log_err() { printf "%b\n" "${red}x${nc} $*"; } +get_genesis_accounts_json() { + if [[ -f "$GENESIS_ACCOUNTS_JSON" ]]; then + cat "$GENESIS_ACCOUNTS_JSON" + return 0 + fi + + if command -v docker >/dev/null 2>&1; then + if docker ps --format '{{.Names}}' | grep -qx 'core-validator-1'; then + if docker exec core-validator-1 test -f /tmp/push-accounts/genesis_accounts.json >/dev/null 2>&1; then + docker exec core-validator-1 cat /tmp/push-accounts/genesis_accounts.json + return 0 + fi + fi + fi + + return 1 +} + require_cmd() { local c for c in "$@"; do @@ -263,25 +286,47 @@ clone_or_update_repo() { step_devnet() { require_cmd bash - log_info "Starting local-native devnet" + log_info "Starting local-multi-validator devnet" ( - cd "$PUSH_CHAIN_DIR/local-native" - ./devnet build - ./devnet start 4 + cd "$LOCAL_DEVNET_DIR" + ./devnet start --build ./devnet setup-uvalidators - ./devnet start-uv 4 ) log_ok "Devnet is up" } +step_stop_running_nodes() { + log_info "Stopping running local nodes/validators" + + if [[ -x "$LOCAL_DEVNET_DIR/devnet" ]]; then + ( + cd "$LOCAL_DEVNET_DIR" + ./devnet down || true + ) + fi + + if [[ -x "$LEGACY_LOCAL_NATIVE_DIR/devnet" ]]; then + ( + cd "$LEGACY_LOCAL_NATIVE_DIR" + ./devnet down || true + ) + fi + + pkill -f "$PUSH_CHAIN_DIR/build/pchaind start" >/dev/null 2>&1 || true + pkill -f "$PUSH_CHAIN_DIR/build/puniversald" >/dev/null 2>&1 || true + + log_ok "Running nodes stopped" +} + step_print_genesis() { require_cmd jq - if [[ ! -f "$GENESIS_ACCOUNTS_JSON" ]]; then - log_err "Missing genesis accounts file: $GENESIS_ACCOUNTS_JSON" + local accounts_json + if ! accounts_json="$(get_genesis_accounts_json)"; then + log_err "Could not resolve genesis accounts from $GENESIS_ACCOUNTS_JSON or docker container core-validator-1" exit 1 fi - jq -r '.[0] | "Account: \(.name)\nAddress: \(.address)\nMnemonic: \(.mnemonic)"' "$GENESIS_ACCOUNTS_JSON" + jq -r '.[0] | "Account: \(.name)\nAddress: \(.address)\nMnemonic: \(.mnemonic)"' <<<"$accounts_json" } step_recover_genesis_key() { @@ -289,15 +334,17 @@ step_recover_genesis_key() { local mnemonic="${GENESIS_MNEMONIC:-}" if [[ -z "$mnemonic" ]]; then - if [[ -f "$GENESIS_ACCOUNTS_JSON" ]]; then + local accounts_json + accounts_json="$(get_genesis_accounts_json || true)" + if [[ -n "$accounts_json" ]]; then mnemonic="$(jq -r --arg n "$GENESIS_KEY_NAME" ' (first(.[] | select(.name == $n) | .mnemonic) // first(.[].mnemonic) // "") - ' "$GENESIS_ACCOUNTS_JSON")" + ' <<<"$accounts_json")" fi fi if [[ -z "$mnemonic" ]]; then - log_err "Could not auto-resolve mnemonic from $GENESIS_ACCOUNTS_JSON" + log_err "Could not auto-resolve mnemonic from $GENESIS_ACCOUNTS_JSON or docker container core-validator-1" log_err "Set GENESIS_MNEMONIC in e2e-tests/.env" exit 1 fi @@ -812,7 +859,7 @@ step_add_uregistry_configs() { --chain-config "$payload" \ --from "$GENESIS_KEY_NAME" \ --keyring-backend "$KEYRING_BACKEND" \ - --home data/validator1/.pchain \ + --home "$GENESIS_KEY_HOME" \ --node tcp://127.0.0.1:26657 \ --gas-prices "$GAS_PRICES" \ -y)" @@ -821,7 +868,7 @@ step_add_uregistry_configs() { --token-config "$payload" \ --from "$GENESIS_KEY_NAME" \ --keyring-backend "$KEYRING_BACKEND" \ - --home data/validator1/.pchain \ + --home "$GENESIS_KEY_HOME" \ --node tcp://127.0.0.1:26657 \ --gas-prices "$GAS_PRICES" \ -y)" @@ -853,10 +900,7 @@ step_add_uregistry_configs() { } log_info "Adding chain config to uregistry" - ( - cd "$PUSH_CHAIN_DIR/local-native" - run_registry_tx "chain" "$chain_payload" - ) + run_registry_tx "chain" "$chain_payload" local deployed_addrs token_file token_addr matched_count deployed_addrs="$(jq -r '.tokens[]?.address | ascii_downcase' "$DEPLOY_ADDRESSES_FILE")" @@ -870,10 +914,7 @@ step_add_uregistry_configs() { if echo "$deployed_addrs" | grep -Fxq "$token_addr"; then token_payload="$(jq -c . "$token_file")" log_info "Adding token config to uregistry: $(basename "$token_file")" - ( - cd "$PUSH_CHAIN_DIR/local-native" - run_registry_tx "token" "$token_payload" - ) + run_registry_tx "token" "$token_payload" matched_count=$((matched_count + 1)) fi done < <(find "$TOKENS_CONFIG_DIR" -maxdepth 1 -type f -name '*.json' | sort) @@ -943,9 +984,11 @@ step_sync_test_addresses() { } step_create_all_wpc_pools() { - require_cmd node + require_cmd node cast "$PUSH_CHAIN_DIR/build/pchaind" ensure_deploy_file + [[ -n "${PRIVATE_KEY:-}" ]] || { log_err "Set PRIVATE_KEY in e2e-tests/.env"; exit 1; } + if [[ ! -f "$TEST_ADDRESSES_PATH" ]]; then log_err "Missing test-addresses.json at $TEST_ADDRESSES_PATH" exit 1 @@ -964,6 +1007,51 @@ step_create_all_wpc_pools() { return 0 fi + local deployer_evm_addr + deployer_evm_addr="$(cast wallet address --private-key "$PRIVATE_KEY" 2>/dev/null || true)" + if ! validate_eth_address "$deployer_evm_addr"; then + log_err "Could not resolve deployer EVM address from PRIVATE_KEY" + exit 1 + fi + + local deployer_hex deployer_push_addr + deployer_hex="$(echo "$deployer_evm_addr" | tr '[:upper:]' '[:lower:]' | sed 's/^0x//')" + deployer_push_addr="$("$PUSH_CHAIN_DIR/build/pchaind" debug addr "$deployer_hex" 2>/dev/null | awk -F': ' '/Bech32 Acc:/ {print $2; exit}')" + if [[ -z "$deployer_push_addr" ]]; then + log_err "Could not derive bech32 deployer address from $deployer_evm_addr" + exit 1 + fi + + log_info "Funding deployer $deployer_push_addr ($deployer_evm_addr) for pool creation ($POOL_CREATION_TOPUP_AMOUNT)" + local fund_attempt=1 + local fund_max_attempts=5 + local fund_out="" + while true; do + fund_out="$("$PUSH_CHAIN_DIR/build/pchaind" tx bank send "$GENESIS_KEY_NAME" "$deployer_push_addr" "$POOL_CREATION_TOPUP_AMOUNT" \ + --gas-prices "$GAS_PRICES" \ + --keyring-backend "$KEYRING_BACKEND" \ + --chain-id "$CHAIN_ID" \ + --home "$GENESIS_KEY_HOME" \ + -y 2>&1 || true)" + + if echo "$fund_out" | grep -q 'txhash:' || echo "$fund_out" | grep -q '"txhash"'; then + log_ok "Deployer funding transaction submitted" + break + fi + + if echo "$fund_out" | grep -qi 'account sequence mismatch' && [[ "$fund_attempt" -lt "$fund_max_attempts" ]]; then + log_warn "Funding sequence mismatch on attempt $fund_attempt/$fund_max_attempts. Retrying..." + fund_attempt=$((fund_attempt + 1)) + sleep 2 + continue + fi + + log_err "Failed to fund deployer for pool creation" + echo "$fund_out" + exit 1 + done + sleep 2 + while IFS=$'\t' read -r token_symbol token_addr; do [[ -n "$token_addr" ]] || continue if [[ "$(echo "$token_addr" | tr '[:upper:]' '[:lower:]')" == "$(echo "$wpc_addr" | tr '[:upper:]' '[:lower:]')" ]]; then @@ -1037,6 +1125,7 @@ step_configure_universal_core() { } cmd_all() { + step_stop_running_nodes step_devnet step_recover_genesis_key step_fund_account @@ -1057,7 +1146,7 @@ cmd_show_help() { Usage: $(basename "$0") Commands: - devnet Build/start local-native devnet + uvalidators + devnet Build/start local-multi-validator devnet + uvalidators print-genesis Print first genesis account + mnemonic recover-genesis-key Recover genesis key into local keyring fund Fund FUND_TO_ADDRESS from genesis key @@ -1070,7 +1159,7 @@ Commands: write-core-env Create core-contracts .env from deploy_addresses.json update-token-config Update eth_sepolia_eth.json contract_address using deployed token setup-gateway Clone/setup gateway repo and run forge localSetup (with --resume retry) - add-uregistry-configs Submit chain + token config txs via local-native validator1 + add-uregistry-configs Submit chain + token config txs via local-multi-validator validator1 record-contract K A Manually record contract key/address record-token N S A Manually record token name/symbol/address all Run full setup pipeline diff --git a/local-multi-validator/Dockerfile.unified b/local-multi-validator/Dockerfile.unified index 070594ad..0ff6edc8 100644 --- a/local-multi-validator/Dockerfile.unified +++ b/local-multi-validator/Dockerfile.unified @@ -23,7 +23,7 @@ RUN --mount=type=cache,target=/go/pkg/mod,id=go-mod \ go mod tidy # Replace admin addresses for local-multi-validator setup -RUN sed -i 's/push1negskcfqu09j5zvpk7nhvacnwyy2mafffy7r6a/push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20/g' \ +RUN sed -i 's/push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20/push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20/g' \ ./x/uregistry/types/params.go \ ./x/utss/types/params.go \ ./x/uvalidator/types/params.go diff --git a/local-multi-validator/README.md b/local-multi-validator/README.md index 818b72f6..f130d579 100644 --- a/local-multi-validator/README.md +++ b/local-multi-validator/README.md @@ -37,6 +37,20 @@ docker compose up --build - Auto-builds base image if missing (~15-20 min first time) - Pulls core/universal from cache or builds locally - Starts all 8 validators +- Auto-sets Sepolia `event_start_from` to latest block for all universal validators + +### Sepolia Event Start Block + +On `./devnet start`, the script fetches latest Sepolia block height from `https://sepolia.drpc.org` +and injects it into each universal validator config: + +- `chain_configs["eip155:11155111"].event_start_from = ` + +You can override this manually at startup: + +```bash +SEPOLIA_EVENT_START_FROM=12345678 ./devnet start +``` ### I Changed Core Validator Code **Files:** `cmd/pchaind/`, `app/`, `x/` modules @@ -133,6 +147,12 @@ docker compose up -d # Start containers directly | `./devnet push-cache` | Push local images to GCR | | `./devnet refresh-cache` | Force rebuild and push to GCR | +The `start` command also supports: + +| Environment Variable | Description | +|----------------------|-------------| +| `SEPOLIA_EVENT_START_FROM` | Force universal validators to start monitoring Sepolia from a specific block | + ## Endpoints | Service | Port | Description | diff --git a/local-multi-validator/devnet b/local-multi-validator/devnet index fd92da91..8fdc2fc5 100755 --- a/local-multi-validator/devnet +++ b/local-multi-validator/devnet @@ -15,6 +15,8 @@ cd "$SCRIPT_DIR" # ═══════════════════════════════════════════════════════════════════════════════ GCR_REGISTRY="${GCR_REGISTRY:-gcr.io/push-chain-testnet}" CACHE_TAG="${CACHE_TAG:-latest}" +SEPOLIA_CHAIN_ID="eip155:11155111" +SEPOLIA_DEFAULT_RPC_URL="${SEPOLIA_DEFAULT_RPC_URL:-https://sepolia.drpc.org}" # ═══════════════════════════════════════════════════════════════════════════════ # COLORS @@ -68,6 +70,23 @@ has_buildx() { docker buildx version >/dev/null 2>&1 } +fetch_sepolia_height() { + local rpc_url="$1" + local response + response=$(curl -sS -X POST "$rpc_url" \ + -H "Content-Type: application/json" \ + --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}') + + local hex_height + hex_height=$(echo "$response" | jq -r '.result // empty') + + if [ -z "$hex_height" ] || [ "$hex_height" = "null" ] || [[ ! "$hex_height" =~ ^0x[0-9a-fA-F]+$ ]]; then + return 1 + fi + + echo "$((16#${hex_height#0x}))" +} + # ═══════════════════════════════════════════════════════════════════════════════ # STATUS HELPERS # ═══════════════════════════════════════════════════════════════════════════════ @@ -413,7 +432,24 @@ cmd_up() { fi fi - docker compose up -d + local sepolia_start_height="${SEPOLIA_EVENT_START_FROM:-}" + if [ -z "$sepolia_start_height" ]; then + if sepolia_start_height=$(fetch_sepolia_height "$SEPOLIA_DEFAULT_RPC_URL"); then + print_status "Using Sepolia event_start_from: $sepolia_start_height" + else + print_warning "Could not fetch Sepolia latest block from $SEPOLIA_DEFAULT_RPC_URL" + print_warning "Universal validators will use default event_start_from from pushuv config" + sepolia_start_height="" + fi + else + print_status "Using provided SEPOLIA_EVENT_START_FROM: $sepolia_start_height" + fi + + if [ -n "$sepolia_start_height" ]; then + SEPOLIA_EVENT_START_FROM="$sepolia_start_height" docker compose up -d + else + docker compose up -d + fi # Auto-push to cache if we built locally (populate cache for team) if [ -n "$built_locally" ] && [ -z "$skip_cache" ]; then diff --git a/local-multi-validator/docker-compose.yml b/local-multi-validator/docker-compose.yml index a9374086..1bd2fa29 100644 --- a/local-multi-validator/docker-compose.yml +++ b/local-multi-validator/docker-compose.yml @@ -231,6 +231,7 @@ services: - CORE_VALIDATOR_GRPC=core-validator-1:9090 - QUERY_PORT=8080 - TSS_ENABLED=true + - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} command: ["/opt/scripts/setup-universal.sh"] depends_on: core-validator-1: @@ -264,6 +265,7 @@ services: - CORE_VALIDATOR_GRPC=core-validator-2:9090 - QUERY_PORT=8080 - TSS_ENABLED=true + - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} command: ["/opt/scripts/setup-universal.sh"] depends_on: core-validator-2: @@ -297,6 +299,7 @@ services: - CORE_VALIDATOR_GRPC=core-validator-3:9090 - QUERY_PORT=8080 - TSS_ENABLED=true + - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} command: ["/opt/scripts/setup-universal.sh"] depends_on: core-validator-3: @@ -330,6 +333,7 @@ services: - CORE_VALIDATOR_GRPC=core-validator-4:9090 - QUERY_PORT=8080 - TSS_ENABLED=true + - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} command: ["/opt/scripts/setup-universal.sh"] depends_on: core-validator-4: diff --git a/local-multi-validator/scripts/setup-universal.sh b/local-multi-validator/scripts/setup-universal.sh index cfb4d3f3..0ac82001 100755 --- a/local-multi-validator/scripts/setup-universal.sh +++ b/local-multi-validator/scripts/setup-universal.sh @@ -136,6 +136,15 @@ if [ "$QUERY_PORT" != "8080" ]; then mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" fi +# Optionally override Sepolia event start height (set by ./devnet start) +if [ -n "${SEPOLIA_EVENT_START_FROM:-}" ]; then + echo "📍 Setting Sepolia event_start_from: $SEPOLIA_EVENT_START_FROM" + jq --argjson height "$SEPOLIA_EVENT_START_FROM" \ + '.chain_configs["eip155:11155111"].event_start_from = $height' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +fi + # --------------------------- # === SET CORE VALOPER ADDRESS === # --------------------------- From 472db3377641f4534d4cab80aabd5bb9e76d0aee Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Tue, 24 Feb 2026 13:12:26 +0530 Subject: [PATCH 05/61] sdk-setup added --- e2e-tests/README.md | 56 ++++++++++- e2e-tests/deploy_addresses.json | 26 ----- e2e-tests/setup.sh | 163 ++++++++++++++++++++++++++++++++ 3 files changed, 218 insertions(+), 27 deletions(-) diff --git a/e2e-tests/README.md b/e2e-tests/README.md index cf5c8bef..14b0d54e 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -12,7 +12,8 @@ It covers: 6. core `.env` generation from deployed addresses 7. token config update (`eth_sepolia_eth.json`) 8. gateway contracts deployment -9. uregistry chain/token config submission +9. push-chain-sdk setup + E2E test runners +10. uregistry chain/token config submission --- @@ -22,6 +23,7 @@ It covers: - push-chain-core-contracts - push-chain-swap-internal-amm-contracts - push-chain-gateway-contracts + - push-chain-sdk - `e2e-tests/logs/` — logs for each major deployment step - `e2e-tests/deploy_addresses.json` — contract/token address source-of-truth @@ -58,6 +60,7 @@ Important variables in `.env`: - `CORE_CONTRACTS_BRANCH` - `SWAP_AMM_BRANCH` - `GATEWAY_BRANCH` (currently `e2e-push-node`) +- `PUSH_CHAIN_SDK_BRANCH` (default `feb-11-2026-alpha-publish`) Genesis account source: @@ -107,11 +110,62 @@ This runs the full sequence in order: ./e2e-tests/setup.sh write-core-env ./e2e-tests/setup.sh update-token-config ./e2e-tests/setup.sh setup-gateway +./e2e-tests/setup.sh setup-sdk +./e2e-tests/setup.sh sdk-test-all +./e2e-tests/setup.sh sdk-test-pctx-last-transaction +./e2e-tests/setup.sh sdk-test-send-to-self +./e2e-tests/setup.sh sdk-test-progress-hook +./e2e-tests/setup.sh sdk-test-bridge-multicall +./e2e-tests/setup.sh sdk-test-pushchain ./e2e-tests/setup.sh add-uregistry-configs ./e2e-tests/setup.sh replace-addresses ./e2e-tests/setup.sh all ``` +### push-chain-sdk setup + tests + +Clone and install dependencies in one command: + +```bash +./e2e-tests/setup.sh setup-sdk +``` + +This executes: + +- `yarn install` +- `npm install` +- `npm i --save-dev @types/bs58` + +It also fetches `UEA_PROXY_IMPLEMENTATION` with: + +- `cast call 0x00000000000000000000000000000000000000ea "UEA_PROXY_IMPLEMENTATION()(address)"` + +Then it updates both: + +- `e2e-tests/deploy_addresses.json` as `contracts.UEA_PROXY_IMPLEMENTATION` +- `push-chain-sdk/packages/core/src/lib/constants/chain.ts` at `[PUSH_NETWORK.LOCALNET]` + +Run all configured SDK E2E files: + +```bash +./e2e-tests/setup.sh sdk-test-all +``` + +Run single files: + +```bash +./e2e-tests/setup.sh sdk-test-pctx-last-transaction +./e2e-tests/setup.sh sdk-test-send-to-self +./e2e-tests/setup.sh sdk-test-progress-hook +./e2e-tests/setup.sh sdk-test-bridge-multicall +./e2e-tests/setup.sh sdk-test-pushchain +``` + +Before each SDK test run, the script automatically rewrites these values in configured files: + +- `PUSH_NETWORK.TESTNET_DONUT` → `PUSH_NETWORK.LOCALNET` +- `PUSH_NETWORK.TESTNET` → `PUSH_NETWORK.LOCALNET` + --- ## Address tracking model diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index 83f7d660..e69de29b 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -1,26 +0,0 @@ -{ - "generatedAt": "2026-02-20T13:50:15Z", - "contracts": { - "WPC": "0x4ff2d01380CCd03A53457935B2e9d2eD092e9300", - "Factory": "0x373D3F1B2b26729A308C5641970247bc9d4ddDa4", - "SwapRouter": "0x6a20557430be6412AF423681e35CC96797506F3a", - "QuoterV2": "0xc2055dD3A7Ad875520BdB5c91300F964F7038C73", - "PositionManager": "0xdBdFEB7A79868Cb4A4e9e57D7d28C84AE77AC4BC" - }, - "tokens": [ - { - "name": "pETH.eth", - "symbol": "pETH", - "address": "0x90F4A15601E08570D6fFbaE883C44BDB85bDb7d1", - "source": "core-contracts", - "decimals": 18 - }, - { - "name": "USDT.eth", - "symbol": "USDT.eth", - "address": "0x00cb38A885cf8D0B2dDfd19Bd1c04aAAC44C5a86", - "source": "core-contracts", - "decimals": 6 - } - ] -} diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index be63e66b..7b99cc63 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -32,11 +32,16 @@ fi : "${SWAP_AMM_BRANCH:=e2e-push-node}" : "${GATEWAY_REPO:=https://github.com/pushchain/push-chain-gateway-contracts.git}" : "${GATEWAY_BRANCH:=e2e-push-node}" +: "${PUSH_CHAIN_SDK_REPO:=https://github.com/pushchain/push-chain-sdk.git}" +: "${PUSH_CHAIN_SDK_BRANCH:=feb-11-2026-alpha-publish}" : "${E2E_PARENT_DIR:=../}" : "${CORE_CONTRACTS_DIR:=$E2E_PARENT_DIR/push-chain-core-contracts}" : "${SWAP_AMM_DIR:=$E2E_PARENT_DIR/push-chain-swap-internal-amm-contracts}" : "${GATEWAY_DIR:=$E2E_PARENT_DIR/push-chain-gateway-contracts}" +: "${PUSH_CHAIN_SDK_DIR:=$E2E_PARENT_DIR/push-chain-sdk}" +: "${PUSH_CHAIN_SDK_E2E_DIR:=packages/core/__e2e__}" +: "${PUSH_CHAIN_SDK_CHAIN_CONSTANTS_PATH:=packages/core/src/lib/constants/chain.ts}" : "${DEPLOY_ADDRESSES_FILE:=$SCRIPT_DIR/deploy_addresses.json}" : "${LOG_DIR:=$SCRIPT_DIR/logs}" : "${TEST_ADDRESSES_PATH:=$SWAP_AMM_DIR/test-addresses.json}" @@ -61,6 +66,7 @@ E2E_PARENT_DIR="$(abs_from_root "$E2E_PARENT_DIR")" CORE_CONTRACTS_DIR="$(abs_from_root "$CORE_CONTRACTS_DIR")" SWAP_AMM_DIR="$(abs_from_root "$SWAP_AMM_DIR")" GATEWAY_DIR="$(abs_from_root "$GATEWAY_DIR")" +PUSH_CHAIN_SDK_DIR="$(abs_from_root "$PUSH_CHAIN_SDK_DIR")" DEPLOY_ADDRESSES_FILE="$(abs_from_root "$DEPLOY_ADDRESSES_FILE")" TEST_ADDRESSES_PATH="$(abs_from_root "$TEST_ADDRESSES_PATH")" LOG_DIR="$(abs_from_root "$LOG_DIR")" @@ -284,6 +290,149 @@ clone_or_update_repo() { fi } +sdk_test_files() { + local base_dir="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_E2E_DIR" + local file alt + local requested_files=( + "pctx-last-transaction.spec.tx" + "send-to-self.spec.ts" + "progress-hook-per-tx.spec.ts" + "bridge-multicall.spec.ts" + "pushchain.spec.ts" + ) + + for file in "${requested_files[@]}"; do + if [[ -f "$base_dir/$file" ]]; then + printf "%s\n" "$base_dir/$file" + continue + fi + + if [[ "$file" == *.tx ]]; then + alt="${file%.tx}.ts" + if [[ -f "$base_dir/$alt" ]]; then + log_warn "Test file '$file' not found. Using '$alt'." + printf "%s\n" "$base_dir/$alt" + continue + fi + fi + + log_err "SDK test file not found: $base_dir/$file" + exit 1 + done +} + +sdk_prepare_test_files_for_localnet() { + require_cmd perl + + if [[ ! -d "$PUSH_CHAIN_SDK_DIR/.git" && ! -d "$PUSH_CHAIN_SDK_DIR" ]]; then + log_err "SDK repo not found at $PUSH_CHAIN_SDK_DIR" + log_err "Run: $0 setup-sdk" + exit 1 + fi + + if [[ ! -d "$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_E2E_DIR" ]]; then + log_err "SDK E2E directory not found: $PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_E2E_DIR" + exit 1 + fi + + while IFS= read -r test_file; do + [[ -n "$test_file" ]] || continue + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g' "$test_file" + log_ok "Prepared LOCALNET network replacement in $(basename "$test_file")" + done < <(sdk_test_files) +} + +step_setup_push_chain_sdk() { + require_cmd git yarn npm cast jq + + local chain_constants_file="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_CHAIN_CONSTANTS_PATH" + local uea_impl_raw uea_impl synced_localnet_uea + + clone_or_update_repo "$PUSH_CHAIN_SDK_REPO" "$PUSH_CHAIN_SDK_BRANCH" "$PUSH_CHAIN_SDK_DIR" + + if [[ ! -f "$chain_constants_file" ]]; then + log_err "SDK chain constants file not found: $chain_constants_file" + exit 1 + fi + + log_info "Fetching UEA_PROXY_IMPLEMENTATION from local chain" + uea_impl_raw="$(cast call 0x00000000000000000000000000000000000000ea 'UEA_PROXY_IMPLEMENTATION()(address)' --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" + uea_impl="$(echo "$uea_impl_raw" | grep -Eo '0x[a-fA-F0-9]{40}' | head -1 || true)" + + if ! validate_eth_address "$uea_impl"; then + log_err "Could not resolve valid UEA_PROXY_IMPLEMENTATION address from cast output: $uea_impl_raw" + exit 1 + fi + + ensure_deploy_file + record_contract "UEA_PROXY_IMPLEMENTATION" "$uea_impl" + + UEA_PROXY_IMPL="$uea_impl" perl -0pi -e 's#(\[PUSH_NETWORK\.LOCALNET\]:\s*)'\''[^'\'']*'\''#$1'\''$ENV{UEA_PROXY_IMPL}'\''#g' "$chain_constants_file" + + synced_localnet_uea="$(grep -E '\[PUSH_NETWORK\.LOCALNET\]:' "$chain_constants_file" | head -1 | sed -E "s/.*'([^']+)'.*/\1/")" + if [[ "$synced_localnet_uea" != "$uea_impl" ]]; then + log_err "Failed to update PUSH_NETWORK.LOCALNET UEA proxy in $chain_constants_file" + exit 1 + fi + + log_ok "Synced PUSH_NETWORK.LOCALNET UEA proxy to $uea_impl" + + log_info "Installing push-chain-sdk dependencies" + ( + cd "$PUSH_CHAIN_SDK_DIR" + yarn install + npm install + npm i --save-dev @types/bs58 + ) + + log_ok "push-chain-sdk setup complete" +} + +step_run_sdk_test_file() { + local test_basename="$1" + local test_file="" + + sdk_prepare_test_files_for_localnet + + while IFS= read -r candidate; do + [[ -n "$candidate" ]] || continue + if [[ "$(basename "$candidate")" == "$test_basename" ]]; then + test_file="$candidate" + break + fi + done < <(sdk_test_files) + + if [[ -z "$test_file" ]]; then + log_err "Requested SDK test file not in configured list: $test_basename" + exit 1 + fi + + log_info "Running SDK test: $test_basename" + ( + cd "$PUSH_CHAIN_SDK_DIR" + npx jest "$test_file" + ) + + log_ok "Completed SDK test: $test_basename" +} + +step_run_sdk_tests_all() { + local test_file + + sdk_prepare_test_files_for_localnet + + while IFS= read -r test_file; do + [[ -n "$test_file" ]] || continue + log_info "Running SDK test: $(basename "$test_file")" + ( + cd "$PUSH_CHAIN_SDK_DIR" + npx jest "$test_file" + ) + done < <(sdk_test_files) + + log_ok "Completed all configured SDK E2E tests" +} + step_devnet() { require_cmd bash log_info "Starting local-multi-validator devnet" @@ -1159,6 +1308,13 @@ Commands: write-core-env Create core-contracts .env from deploy_addresses.json update-token-config Update eth_sepolia_eth.json contract_address using deployed token setup-gateway Clone/setup gateway repo and run forge localSetup (with --resume retry) + setup-sdk Clone/setup push-chain-sdk and install dependencies + sdk-test-all Replace PUSH_NETWORK TESTNET variants with LOCALNET and run all configured SDK E2E tests + sdk-test-pctx-last-transaction Run pctx-last-transaction.spec.ts + sdk-test-send-to-self Run send-to-self.spec.ts + sdk-test-progress-hook Run progress-hook-per-tx.spec.ts + sdk-test-bridge-multicall Run bridge-multicall.spec.ts + sdk-test-pushchain Run pushchain.spec.ts add-uregistry-configs Submit chain + token config txs via local-multi-validator validator1 record-contract K A Manually record contract key/address record-token N S A Manually record token name/symbol/address @@ -1187,6 +1343,13 @@ main() { write-core-env) step_write_core_env ;; update-token-config) step_update_deployed_token_configs ;; setup-gateway) step_setup_gateway ;; + setup-sdk) step_setup_push_chain_sdk ;; + sdk-test-all) step_run_sdk_tests_all ;; + sdk-test-pctx-last-transaction) step_run_sdk_test_file "pctx-last-transaction.spec.ts" ;; + sdk-test-send-to-self) step_run_sdk_test_file "send-to-self.spec.ts" ;; + sdk-test-progress-hook) step_run_sdk_test_file "progress-hook-per-tx.spec.ts" ;; + sdk-test-bridge-multicall) step_run_sdk_test_file "bridge-multicall.spec.ts" ;; + sdk-test-pushchain) step_run_sdk_test_file "pushchain.spec.ts" ;; add-uregistry-configs) step_add_uregistry_configs ;; record-contract) ensure_deploy_file From c1f442fe70aaa2b96fbf36fc2f91962dccc28481 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Tue, 24 Feb 2026 13:14:34 +0530 Subject: [PATCH 06/61] changed core-contracts branch in .env.example --- e2e-tests/.env.example | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/e2e-tests/.env.example b/e2e-tests/.env.example index 545bdc87..c5442fa0 100644 --- a/e2e-tests/.env.example +++ b/e2e-tests/.env.example @@ -31,7 +31,7 @@ PRIVATE_KEY=0x0dfb3d814afd8d0bf7a6010e8dd2b6ac835cabe4da9e2c1e80c6a14df3994dd4 # External repositories CORE_CONTRACTS_REPO=https://github.com/pushchain/push-chain-core-contracts.git -CORE_CONTRACTS_BRANCH=e2e-push-node +CORE_CONTRACTS_BRANCH=node-e2e SWAP_AMM_REPO=https://github.com/pushchain/push-chain-swap-internal-amm-contracts.git SWAP_AMM_BRANCH=e2e-push-node @@ -39,11 +39,16 @@ SWAP_AMM_BRANCH=e2e-push-node GATEWAY_REPO=https://github.com/pushchain/push-chain-gateway-contracts.git GATEWAY_BRANCH=e2e-push-node +PUSH_CHAIN_SDK_REPO=https://github.com/pushchain/push-chain-sdk.git +PUSH_CHAIN_SDK_BRANCH=feb-11-2026-alpha-publish + # Local clone layout (outside push-chain directory) E2E_PARENT_DIR=../ CORE_CONTRACTS_DIR=../push-chain-core-contracts SWAP_AMM_DIR=../push-chain-swap-internal-amm-contracts GATEWAY_DIR=../push-chain-gateway-contracts +PUSH_CHAIN_SDK_DIR=../push-chain-sdk +PUSH_CHAIN_SDK_E2E_DIR=packages/core/__e2e__ # Tracking files DEPLOY_ADDRESSES_FILE=./e2e-tests/deploy_addresses.json From 15be6a6a6732d1252e4e7620a8abbde1442106d3 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Wed, 25 Feb 2026 11:43:56 +0530 Subject: [PATCH 07/61] fix: deploy-addresses.json related issues --- e2e-tests/.env.example | 2 +- e2e-tests/setup.sh | 33 ++++++++++++++++++++++++++++----- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/e2e-tests/.env.example b/e2e-tests/.env.example index c5442fa0..91a15368 100644 --- a/e2e-tests/.env.example +++ b/e2e-tests/.env.example @@ -27,7 +27,7 @@ POOL_CREATION_TOPUP_AMOUNT=50000000000000000000upc GAS_PRICES=100000000000upc # EVM private key used by forge/hardhat scripts -PRIVATE_KEY=0x0dfb3d814afd8d0bf7a6010e8dd2b6ac835cabe4da9e2c1e80c6a14df3994dd4 +PRIVATE_KEY=0xYOURPRIVATEKEY # External repositories CORE_CONTRACTS_REPO=https://github.com/pushchain/push-chain-core-contracts.git diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 7b99cc63..decc33ef 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -201,7 +201,9 @@ resolve_branch() { } ensure_deploy_file() { - if [[ ! -f "$DEPLOY_ADDRESSES_FILE" ]]; then + mkdir -p "$(dirname "$DEPLOY_ADDRESSES_FILE")" + + if [[ ! -s "$DEPLOY_ADDRESSES_FILE" ]]; then cat >"$DEPLOY_ADDRESSES_FILE" <<'JSON' { "generatedAt": "", @@ -209,7 +211,29 @@ ensure_deploy_file() { "tokens": [] } JSON + return fi + + if ! jq -e . "$DEPLOY_ADDRESSES_FILE" >/dev/null 2>&1; then + log_warn "Deploy file is empty/invalid JSON, reinitializing: $DEPLOY_ADDRESSES_FILE" + cat >"$DEPLOY_ADDRESSES_FILE" <<'JSON' +{ + "generatedAt": "", + "contracts": {}, + "tokens": [] +} +JSON + return + fi + + local tmp + tmp="$(mktemp)" + jq ' + .generatedAt = (.generatedAt // "") + | .contracts = (.contracts // {}) + | .tokens = (.tokens // []) + ' "$DEPLOY_ADDRESSES_FILE" >"$tmp" + mv "$tmp" "$DEPLOY_ADDRESSES_FILE" } set_generated_at() { @@ -684,7 +708,7 @@ assert_required_addresses() { for key in "${required[@]}"; do val="$(address_from_deploy_contract "$key")" if [[ -z "$val" ]]; then - log_err "Missing required address in deploy file: contracts.$key" + log_warn "Missing address in deploy file: contracts.$key" missing=1 else log_ok "contracts.$key=$val" @@ -692,8 +716,7 @@ assert_required_addresses() { done if [[ "$missing" -ne 0 ]]; then - log_err "Required addresses are missing in $DEPLOY_ADDRESSES_FILE" - exit 1 + log_warn "Some addresses are missing in $DEPLOY_ADDRESSES_FILE; continuing with available values" fi } @@ -1304,7 +1327,7 @@ Commands: sync-addresses Apply deploy_addresses.json into test-addresses.json create-pool Create WPC pools for all deployed core tokens configure-core Run configureUniversalCore.s.sol (auto --resume retries) - check-addresses Verify required deploy addresses exist (WPC/Factory/QuoterV2/SwapRouter) + check-addresses Check/report deploy addresses (WPC/Factory/QuoterV2/SwapRouter) write-core-env Create core-contracts .env from deploy_addresses.json update-token-config Update eth_sepolia_eth.json contract_address using deployed token setup-gateway Clone/setup gateway repo and run forge localSetup (with --resume retry) From 740d0456fce5d1cd5986b4a33d9f11a4dd94b029 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Wed, 25 Feb 2026 12:30:50 +0530 Subject: [PATCH 08/61] fix: made token-config deploy_addresses.json driven in e2e-setup --- e2e-tests/setup.sh | 46 ++++++++++++++++++++++++++++++---------------- 1 file changed, 30 insertions(+), 16 deletions(-) diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index decc33ef..ade9e924 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -1012,9 +1012,6 @@ step_add_uregistry_configs() { [[ -f "$CHAIN_CONFIG_PATH" ]] || { log_err "Missing chain config: $CHAIN_CONFIG_PATH"; exit 1; } [[ -d "$TOKENS_CONFIG_DIR" ]] || { log_err "Missing tokens config directory: $TOKENS_CONFIG_DIR"; exit 1; } - # Ensure all deployed core tokens have updated contract addresses in token config files. - step_update_deployed_token_configs - local chain_payload token_payload chain_payload="$(jq -c . "$CHAIN_CONFIG_PATH")" @@ -1074,27 +1071,44 @@ step_add_uregistry_configs() { log_info "Adding chain config to uregistry" run_registry_tx "chain" "$chain_payload" - local deployed_addrs token_file token_addr matched_count - deployed_addrs="$(jq -r '.tokens[]?.address | ascii_downcase' "$DEPLOY_ADDRESSES_FILE")" + local token_json token_file token_addr token_symbol token_name matched_count submitted_files tmp matched_count=0 + submitted_files="" + + while IFS= read -r token_json; do + token_symbol="$(echo "$token_json" | jq -r '.symbol // ""')" + token_name="$(echo "$token_json" | jq -r '.name // ""')" + token_addr="$(echo "$token_json" | jq -r '.address // ""')" - while IFS= read -r token_file; do - [[ -f "$token_file" ]] || continue - token_addr="$(jq -r '.native_representation.contract_address // "" | ascii_downcase' "$token_file")" [[ -n "$token_addr" ]] || continue - if echo "$deployed_addrs" | grep -Fxq "$token_addr"; then - token_payload="$(jq -c . "$token_file")" - log_info "Adding token config to uregistry: $(basename "$token_file")" - run_registry_tx "token" "$token_payload" - matched_count=$((matched_count + 1)) + token_file="$(find_matching_token_config_file "$token_symbol" "$token_name")" + if [[ -z "$token_file" ]]; then + log_warn "No token config match found for deployed token (uregistry): $token_symbol ($token_name)" + continue fi - done < <(find "$TOKENS_CONFIG_DIR" -maxdepth 1 -type f -name '*.json' | sort) + + if echo "$submitted_files" | grep -Fxq "$token_file"; then + log_warn "Token config already submitted by another deployed token, skipping: $(basename "$token_file")" + continue + fi + + tmp="$(mktemp)" + jq --arg a "$token_addr" '.native_representation.contract_address = $a' "$token_file" >"$tmp" + mv "$tmp" "$token_file" + + token_payload="$(jq -c . "$token_file")" + log_info "Adding token config to uregistry: $(basename "$token_file") (from $token_symbol)" + run_registry_tx "token" "$token_payload" + + submitted_files+="$token_file"$'\n' + matched_count=$((matched_count + 1)) + done < <(jq -c '.tokens[]?' "$DEPLOY_ADDRESSES_FILE") if [[ "$matched_count" -eq 0 ]]; then - log_warn "No deployed tokens matched token config files for uregistry add-token-config" + log_warn "No token configs were registered from deploy_addresses.json tokens" else - log_ok "Registered $matched_count deployed token config(s) in uregistry" + log_ok "Registered $matched_count token config(s) from deploy_addresses.json" fi log_ok "uregistry chain/token configs added" From 464988ec55c1a7dc3f0afce92f5549c90cb97a67 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Wed, 25 Feb 2026 13:04:01 +0530 Subject: [PATCH 09/61] fix : QoL change script added --- scripts/replace_addresses.sh | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 scripts/replace_addresses.sh diff --git a/scripts/replace_addresses.sh b/scripts/replace_addresses.sh new file mode 100644 index 00000000..2cc63713 --- /dev/null +++ b/scripts/replace_addresses.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +set -euo pipefail + +OLD_BECH32="push1negskcfqu09j5zvpk7nhvacnwyy2mafffy7r6a" +NEW_BECH32="push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20" +OLD_HEX="0xa96CaA79eb2312DbEb0B8E93c1Ce84C98b67bF11" +NEW_HEX="0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4" + +ROOT_DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT_DIR" + +if ! command -v git >/dev/null 2>&1; then + echo "git command not found" >&2 + exit 1 +fi + +if ! command -v perl >/dev/null 2>&1; then + echo "perl command not found" >&2 + exit 1 +fi + +git ls-files -z | xargs -0 perl -pi -e "s/\Q$OLD_BECH32\E/$NEW_BECH32/g; s/\Q$OLD_HEX\E/$NEW_HEX/g" + +echo "Address replacement completed in tracked files." From 8461756f6806a09ec212bd78d3034341e31cf351 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Wed, 25 Feb 2026 13:34:37 +0530 Subject: [PATCH 10/61] Added replace-addresses command + sdk setup command is updated to wire .env for sdk testing --- Makefile | 4 ++++ e2e-tests/.env.example | 14 ++++++++++++++ e2e-tests/README.md | 12 +++++++++--- e2e-tests/setup.sh | 42 ++++++++++++++++++++++++++++++++++++++++-- 4 files changed, 67 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index a5db6e09..84322ffa 100755 --- a/Makefile +++ b/Makefile @@ -155,6 +155,10 @@ draw-deps: clean: rm -rf snapcraft-local.yaml build/ +.PHONY: replace-addresses +replace-addresses: + bash scripts/replace_addresses.sh + distclean: clean rm -rf vendor/ diff --git a/e2e-tests/.env.example b/e2e-tests/.env.example index 91a15368..8ba45535 100644 --- a/e2e-tests/.env.example +++ b/e2e-tests/.env.example @@ -42,6 +42,9 @@ GATEWAY_BRANCH=e2e-push-node PUSH_CHAIN_SDK_REPO=https://github.com/pushchain/push-chain-sdk.git PUSH_CHAIN_SDK_BRANCH=feb-11-2026-alpha-publish +# push-chain-sdk core .env target path (relative to PUSH_CHAIN_SDK_DIR) +PUSH_CHAIN_SDK_CORE_ENV_PATH=packages/core/.env + # Local clone layout (outside push-chain directory) E2E_PARENT_DIR=../ CORE_CONTRACTS_DIR=../push-chain-core-contracts @@ -50,6 +53,17 @@ GATEWAY_DIR=../push-chain-gateway-contracts PUSH_CHAIN_SDK_DIR=../push-chain-sdk PUSH_CHAIN_SDK_E2E_DIR=packages/core/__e2e__ +# push-chain-sdk required env vars (mirrored into PUSH_CHAIN_SDK_DIR/packages/core/.env by setup-sdk) +# Defaults used by setup-sdk when omitted: +# EVM_PRIVATE_KEY <= PRIVATE_KEY +# EVM_RPC <= PUSH_RPC_URL +# PUSH_PRIVATE_KEY<= PRIVATE_KEY +EVM_PRIVATE_KEY= +EVM_RPC= +SOLANA_RPC_URL=https://api.devnet.solana.com +SOLANA_PRIVATE_KEY= +PUSH_PRIVATE_KEY= + # Tracking files DEPLOY_ADDRESSES_FILE=./e2e-tests/deploy_addresses.json TEST_ADDRESSES_PATH=../push-chain-swap-internal-amm-contracts/test-addresses.json diff --git a/e2e-tests/README.md b/e2e-tests/README.md index 14b0d54e..80267847 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -41,6 +41,12 @@ Required tools: Also ensure the Push Chain repo builds/runs locally. +Before running any e2e setup command, run: + +```bash +make replace-addresses +``` + --- ## Configuration @@ -74,7 +80,7 @@ Path settings are repository-relative and portable. ## One-command full run ```bash -./e2e-tests/setup.sh replace-addresses +make replace-addresses ./e2e-tests/setup.sh all ``` @@ -118,7 +124,7 @@ This runs the full sequence in order: ./e2e-tests/setup.sh sdk-test-bridge-multicall ./e2e-tests/setup.sh sdk-test-pushchain ./e2e-tests/setup.sh add-uregistry-configs -./e2e-tests/setup.sh replace-addresses +make replace-addresses ./e2e-tests/setup.sh all ``` @@ -239,7 +245,7 @@ For a fresh run: ```bash rm -rf e2e-tests/repos ./local-multi-validator/devnet down || true -./e2e-tests/setup.sh replace-addresses +make replace-addresses ./e2e-tests/setup.sh all ``` diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index ade9e924..9934a062 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -42,6 +42,7 @@ fi : "${PUSH_CHAIN_SDK_DIR:=$E2E_PARENT_DIR/push-chain-sdk}" : "${PUSH_CHAIN_SDK_E2E_DIR:=packages/core/__e2e__}" : "${PUSH_CHAIN_SDK_CHAIN_CONSTANTS_PATH:=packages/core/src/lib/constants/chain.ts}" +: "${PUSH_CHAIN_SDK_CORE_ENV_PATH:=packages/core/.env}" : "${DEPLOY_ADDRESSES_FILE:=$SCRIPT_DIR/deploy_addresses.json}" : "${LOG_DIR:=$SCRIPT_DIR/logs}" : "${TEST_ADDRESSES_PATH:=$SWAP_AMM_DIR/test-addresses.json}" @@ -305,8 +306,19 @@ clone_or_update_repo() { fi log_info "Updating repo $(basename "$dest")" + local current_origin + current_origin="$(git -C "$dest" remote get-url origin 2>/dev/null || true)" + if [[ -z "$current_origin" || "$current_origin" != "$repo_url" ]]; then + log_warn "Setting origin for $(basename "$dest") to $repo_url" + if git -C "$dest" remote get-url origin >/dev/null 2>&1; then + git -C "$dest" remote set-url origin "$repo_url" + else + git -C "$dest" remote add origin "$repo_url" + fi + fi + git -C "$dest" fetch origin - git -C "$dest" checkout "$resolved_branch" + git -C "$dest" checkout -B "$resolved_branch" "origin/$resolved_branch" git -C "$dest" reset --hard "origin/$resolved_branch" else log_info "Cloning $(basename "$dest")" @@ -374,6 +386,32 @@ step_setup_push_chain_sdk() { clone_or_update_repo "$PUSH_CHAIN_SDK_REPO" "$PUSH_CHAIN_SDK_BRANCH" "$PUSH_CHAIN_SDK_DIR" + local sdk_env_path="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_CORE_ENV_PATH" + local sdk_evm_private_key sdk_evm_rpc sdk_solana_rpc sdk_solana_private_key sdk_push_private_key + + sdk_evm_private_key="${EVM_PRIVATE_KEY:-${PRIVATE_KEY:-}}" + sdk_evm_rpc="${EVM_RPC:-${PUSH_RPC_URL:-}}" + sdk_solana_rpc="${SOLANA_RPC_URL:-https://api.devnet.solana.com}" + sdk_solana_private_key="${SOLANA_PRIVATE_KEY:-${SVM_PRIVATE_KEY:-${SOL_PRIVATE_KEY:-}}}" + sdk_push_private_key="${PUSH_PRIVATE_KEY:-${PRIVATE_KEY:-}}" + + mkdir -p "$(dirname "$sdk_env_path")" + { + echo "# Auto-generated by e2e-tests/setup.sh setup-sdk" + echo "# Source: e2e-tests/.env" + echo "EVM_PRIVATE_KEY=$sdk_evm_private_key" + echo "EVM_RPC=$sdk_evm_rpc" + echo "SOLANA_RPC_URL=$sdk_solana_rpc" + echo "SOLANA_PRIVATE_KEY=$sdk_solana_private_key" + echo "PUSH_PRIVATE_KEY=$sdk_push_private_key" + } >"$sdk_env_path" + + [[ -n "$sdk_evm_private_key" ]] || log_warn "SDK env EVM_PRIVATE_KEY is empty (set EVM_PRIVATE_KEY or PRIVATE_KEY in e2e-tests/.env)" + [[ -n "$sdk_evm_rpc" ]] || log_warn "SDK env EVM_RPC is empty (set EVM_RPC or PUSH_RPC_URL in e2e-tests/.env)" + [[ -n "$sdk_solana_private_key" ]] || log_warn "SDK env SOLANA_PRIVATE_KEY is empty (set SOLANA_PRIVATE_KEY in e2e-tests/.env)" + [[ -n "$sdk_push_private_key" ]] || log_warn "SDK env PUSH_PRIVATE_KEY is empty (set PUSH_PRIVATE_KEY or PRIVATE_KEY in e2e-tests/.env)" + log_ok "Generated push-chain-sdk env file: $sdk_env_path" + if [[ ! -f "$chain_constants_file" ]]; then log_err "SDK chain constants file not found: $chain_constants_file" exit 1 @@ -1345,7 +1383,7 @@ Commands: write-core-env Create core-contracts .env from deploy_addresses.json update-token-config Update eth_sepolia_eth.json contract_address using deployed token setup-gateway Clone/setup gateway repo and run forge localSetup (with --resume retry) - setup-sdk Clone/setup push-chain-sdk and install dependencies + setup-sdk Clone/setup push-chain-sdk, generate SDK .env from e2e .env, and install dependencies sdk-test-all Replace PUSH_NETWORK TESTNET variants with LOCALNET and run all configured SDK E2E tests sdk-test-pctx-last-transaction Run pctx-last-transaction.spec.ts sdk-test-send-to-self Run send-to-self.spec.ts From d1b4cc2664bd6862999247cb638b93822dd75cba Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Wed, 25 Feb 2026 14:27:35 +0530 Subject: [PATCH 11/61] fix: sdk-testing command in e2e-tests --- e2e-tests/setup.sh | 6 +++--- scripts/replace_addresses.sh | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 9934a062..665d1ebb 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -346,7 +346,7 @@ sdk_test_files() { if [[ "$file" == *.tx ]]; then alt="${file%.tx}.ts" if [[ -f "$base_dir/$alt" ]]; then - log_warn "Test file '$file' not found. Using '$alt'." + printf "%b\n" "${yellow}!${nc} Test file '$file' not found. Using '$alt'." >&2 printf "%s\n" "$base_dir/$alt" continue fi @@ -472,7 +472,7 @@ step_run_sdk_test_file() { log_info "Running SDK test: $test_basename" ( cd "$PUSH_CHAIN_SDK_DIR" - npx jest "$test_file" + npx nx test core --runInBand --testPathPattern="$(basename "$test_file")" ) log_ok "Completed SDK test: $test_basename" @@ -488,7 +488,7 @@ step_run_sdk_tests_all() { log_info "Running SDK test: $(basename "$test_file")" ( cd "$PUSH_CHAIN_SDK_DIR" - npx jest "$test_file" + npx nx test core --runInBand --testPathPattern="$(basename "$test_file")" ) done < <(sdk_test_files) diff --git a/scripts/replace_addresses.sh b/scripts/replace_addresses.sh index 2cc63713..771f8947 100644 --- a/scripts/replace_addresses.sh +++ b/scripts/replace_addresses.sh @@ -2,9 +2,9 @@ set -euo pipefail -OLD_BECH32="push1negskcfqu09j5zvpk7nhvacnwyy2mafffy7r6a" +OLD_BECH32="push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20" NEW_BECH32="push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20" -OLD_HEX="0xa96CaA79eb2312DbEb0B8E93c1Ce84C98b67bF11" +OLD_HEX="0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4" NEW_HEX="0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4" ROOT_DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" From 2b6687cbbde36f2c0cd30ac5ba79030ad2aa19b9 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Fri, 27 Feb 2026 15:39:41 +0530 Subject: [PATCH 12/61] arb, base, bsc and solana support added for e2e-tests --- app/upgrades/supply-slash/rebalance.json | 4 +- .../chains/arb_sepolia_chain_config.json | 2 +- .../testnet-donut/tokens/arb_sepolia_eth.json | 14 + .../tokens/base_sepolia_eth.json | 14 + .../testnet-donut/tokens/eth_sepolia_eth.json | 4 +- .../tokens/eth_sepolia_usdt.json | 4 +- .../tokens/solana_devnet_sol_new.json | 4 +- data/validator1/.pchain/config/app.toml | 341 ++++++++++++ data/validator1/.pchain/config/client.toml | 17 + data/validator1/.pchain/config/config.toml | 498 ++++++++++++++++++ e2e-tests/deploy_addresses.json | 55 ++ e2e-tests/setup.sh | 23 +- local-multi-validator/README.md | 24 +- local-multi-validator/devnet | 92 +++- local-multi-validator/docker-compose.yml | 16 + .../scripts/setup-universal.sh | 24 +- x/uexecutor/types/constants.go | 2 +- x/uregistry/types/constants.go | 2 +- x/uregistry/types/params.go | 2 +- x/utss/types/params.go | 2 +- x/uvalidator/types/params.go | 2 +- 21 files changed, 1110 insertions(+), 36 deletions(-) create mode 100644 config/testnet-donut/tokens/arb_sepolia_eth.json create mode 100644 config/testnet-donut/tokens/base_sepolia_eth.json create mode 100644 data/validator1/.pchain/config/app.toml create mode 100644 data/validator1/.pchain/config/client.toml create mode 100644 data/validator1/.pchain/config/config.toml diff --git a/app/upgrades/supply-slash/rebalance.json b/app/upgrades/supply-slash/rebalance.json index 1ef536de..20be3628 100644 --- a/app/upgrades/supply-slash/rebalance.json +++ b/app/upgrades/supply-slash/rebalance.json @@ -1345,7 +1345,7 @@ }, { "address": "push149k2570tyvfdh6ct36furn5yex9k00c3k3ckkj", - "evm_address": "0xa96caa79eb2312dbeb0b8e93c1ce84c98b67bf11", + "evm_address": "0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4", "amount_upc": 100004.0, "rebalanced_tokens": 1304.0 }, @@ -1374,7 +1374,7 @@ "rebalanced_tokens": 1304.0 }, { - "address": "push1negskcfqu09j5zvpk7nhvacnwyy2mafffy7r6a", + "address": "push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20", "evm_address": "0x9e510b6120e3cb2a0981b7a77677137108adf529", "amount_upc": 95600.0, "rebalanced_tokens": 1300.0 diff --git a/config/testnet-donut/chains/arb_sepolia_chain_config.json b/config/testnet-donut/chains/arb_sepolia_chain_config.json index 64051629..9e15528d 100644 --- a/config/testnet-donut/chains/arb_sepolia_chain_config.json +++ b/config/testnet-donut/chains/arb_sepolia_chain_config.json @@ -1,6 +1,6 @@ { "chain": "eip155:421614", - "public_rpc_url": "https://endpoints.omniatech.io/v1/arbitrum/sepolia/public", + "public_rpc_url": "https://arbitrum-sepolia.gateway.tenderly.co", "vm_type": 1, "gateway_address": "0x2cd870e0166Ba458dEC615168Fd659AacD795f34", "block_confirmation": { diff --git a/config/testnet-donut/tokens/arb_sepolia_eth.json b/config/testnet-donut/tokens/arb_sepolia_eth.json new file mode 100644 index 00000000..797c5c06 --- /dev/null +++ b/config/testnet-donut/tokens/arb_sepolia_eth.json @@ -0,0 +1,14 @@ +{ + "chain": "eip155:421614", + "address": "0x0000000000000000000000000000000000000000", + "name": "pETH.arb", + "symbol": "pETH.arb", + "decimals": 18, + "enabled": true, + "liquidity_cap": "1000000000000000000000000", + "token_type": 1, + "native_representation": { + "denom": "", + "contract_address": "0x90bFeD13b1D7db6243Dfb554c336b0254F099596" + } +} diff --git a/config/testnet-donut/tokens/base_sepolia_eth.json b/config/testnet-donut/tokens/base_sepolia_eth.json new file mode 100644 index 00000000..72695522 --- /dev/null +++ b/config/testnet-donut/tokens/base_sepolia_eth.json @@ -0,0 +1,14 @@ +{ + "chain": "eip155:84532", + "address": "0x0000000000000000000000000000000000000000", + "name": "pETH.base", + "symbol": "pETH.base", + "decimals": 18, + "enabled": true, + "liquidity_cap": "1000000000000000000000000", + "token_type": 1, + "native_representation": { + "denom": "", + "contract_address": "0x697164dD5f2727a4d6EfcF977dCc080Ff10c7459" + } +} diff --git a/config/testnet-donut/tokens/eth_sepolia_eth.json b/config/testnet-donut/tokens/eth_sepolia_eth.json index 1ce55267..a9e1a9b8 100644 --- a/config/testnet-donut/tokens/eth_sepolia_eth.json +++ b/config/testnet-donut/tokens/eth_sepolia_eth.json @@ -6,9 +6,9 @@ "decimals": 18, "enabled": true, "liquidity_cap": "1000000000000000000000000", - "token_type": 1, + "token_type": 1, "native_representation": { "denom": "", - "contract_address": "0x2971824Db68229D087931155C2b8bB820B275809" + "contract_address": "0x69c5560bB765a935C345f507D2adD34253FBe41b" } } diff --git a/config/testnet-donut/tokens/eth_sepolia_usdt.json b/config/testnet-donut/tokens/eth_sepolia_usdt.json index 93981bee..45eacd8e 100644 --- a/config/testnet-donut/tokens/eth_sepolia_usdt.json +++ b/config/testnet-donut/tokens/eth_sepolia_usdt.json @@ -6,9 +6,9 @@ "decimals": 6, "enabled": true, "liquidity_cap": "1000000000000000000000000", - "token_type": 1, + "token_type": 1, "native_representation": { "denom": "", - "contract_address": "0xCA0C5E6F002A389E1580F0DB7cd06e4549B5F9d3" + "contract_address": "0xc2055dD3A7Ad875520BdB5c91300F964F7038C73" } } diff --git a/config/testnet-donut/tokens/solana_devnet_sol_new.json b/config/testnet-donut/tokens/solana_devnet_sol_new.json index 2ec25016..68c8acfe 100644 --- a/config/testnet-donut/tokens/solana_devnet_sol_new.json +++ b/config/testnet-donut/tokens/solana_devnet_sol_new.json @@ -6,9 +6,9 @@ "decimals": 9, "enabled": true, "liquidity_cap": "1000000000000000000000000", - "token_type": 4, + "token_type": 4, "native_representation": { "denom": "", - "contract_address": "0x5D525Df2bD99a6e7ec58b76aF2fd95F39874EBed" + "contract_address": "0xB2cf4B3aec93F4A8F92b292d2F605591dB3e3011" } } diff --git a/data/validator1/.pchain/config/app.toml b/data/validator1/.pchain/config/app.toml new file mode 100644 index 00000000..d025d33c --- /dev/null +++ b/data/validator1/.pchain/config/app.toml @@ -0,0 +1,341 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +############################################################################### +### Base Configuration ### +############################################################################### + +# The minimum gas prices a validator is willing to accept for processing a +# transaction. A transaction's fees must meet the minimum of any denomination +# specified in this config (e.g. 0.25token1,0.0001token2). +minimum-gas-prices = "0stake" + +# The maximum gas a query coming over rest/grpc may consume. +# If this is set to zero, the query can consume an unbounded amount of gas. +query-gas-limit = "0" + +# default: the last 362880 states are kept, pruning at 10 block intervals +# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) +# everything: 2 latest states will be kept; pruning at 10 block intervals. +# custom: allow pruning options to be manually specified through 'pruning-keep-recent', and 'pruning-interval' +pruning = "default" + +# These are applied if and only if the pruning strategy is custom. +pruning-keep-recent = "0" +pruning-interval = "0" + +# HaltHeight contains a non-zero block height at which a node will gracefully +# halt and shutdown that can be used to assist upgrades and testing. +# +# Note: Commitment of state will be attempted on the corresponding block. +halt-height = 0 + +# HaltTime contains a non-zero minimum block time (in Unix seconds) at which +# a node will gracefully halt and shutdown that can be used to assist upgrades +# and testing. +# +# Note: Commitment of state will be attempted on the corresponding block. +halt-time = 0 + +# MinRetainBlocks defines the minimum block height offset from the current +# block being committed, such that all blocks past this offset are pruned +# from CometBFT. It is used as part of the process of determining the +# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates +# that no blocks should be pruned. +# +# This configuration value is only responsible for pruning CometBFT blocks. +# It has no bearing on application state pruning which is determined by the +# "pruning-*" configurations. +# +# Note: CometBFT block pruning is dependant on this parameter in conjunction +# with the unbonding (safety threshold) period, state pruning and state sync +# snapshot parameters to determine the correct minimum value of +# ResponseCommit.RetainHeight. +min-retain-blocks = 0 + +# InterBlockCache enables inter-block caching. +inter-block-cache = true + +# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, +# which informs CometBFT what to index. If empty, all events will be indexed. +# +# Example: +# ["message.sender", "message.recipient"] +index-events = [] + +# IavlCacheSize set the size of the iavl tree cache (in number of nodes). +iavl-cache-size = 781250 + +# IAVLDisableFastNode enables or disables the fast node feature of IAVL. +# Default is false. +iavl-disable-fastnode = false + +# AppDBBackend defines the database backend type to use for the application and snapshots DBs. +# An empty string indicates that a fallback will be used. +# The fallback is the db_backend value set in CometBFT's config.toml. +app-db-backend = "" + +############################################################################### +### Telemetry Configuration ### +############################################################################### + +[telemetry] + +# Prefixed with keys to separate services. +service-name = "" + +# Enabled enables the application telemetry functionality. When enabled, +# an in-memory sink is also enabled by default. Operators may also enabled +# other sinks such as Prometheus. +enabled = false + +# Enable prefixing gauge values with hostname. +enable-hostname = false + +# Enable adding hostname to labels. +enable-hostname-label = false + +# Enable adding service to labels. +enable-service-label = false + +# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. +prometheus-retention-time = 0 + +# GlobalLabels defines a global set of name/value label tuples applied to all +# metrics emitted using the wrapper functions defined in telemetry package. +# +# Example: +# [["chain_id", "cosmoshub-1"]] +global-labels = [ +] + +# MetricsSink defines the type of metrics sink to use. +metrics-sink = "" + +# StatsdAddr defines the address of a statsd server to send metrics to. +# Only utilized if MetricsSink is set to "statsd" or "dogstatsd". +statsd-addr = "" + +# DatadogHostname defines the hostname to use when emitting metrics to +# Datadog. Only utilized if MetricsSink is set to "dogstatsd". +datadog-hostname = "" + +############################################################################### +### API Configuration ### +############################################################################### + +[api] + +# Enable defines if the API server should be enabled. +enable = false + +# Swagger defines if swagger documentation should automatically be registered. +swagger = false + +# Address defines the API server to listen on. +address = "tcp://localhost:1317" + +# MaxOpenConnections defines the number of maximum open connections. +max-open-connections = 1000 + +# RPCReadTimeout defines the CometBFT RPC read timeout (in seconds). +rpc-read-timeout = 10 + +# RPCWriteTimeout defines the CometBFT RPC write timeout (in seconds). +rpc-write-timeout = 0 + +# RPCMaxBodyBytes defines the CometBFT maximum request body (in bytes). +rpc-max-body-bytes = 1000000 + +# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). +enabled-unsafe-cors = false + +############################################################################### +### gRPC Configuration ### +############################################################################### + +[grpc] + +# Enable defines if the gRPC server should be enabled. +enable = true + +# Address defines the gRPC server address to bind to. +address = "localhost:9090" + +# MaxRecvMsgSize defines the max message size in bytes the server can receive. +# The default value is 10MB. +max-recv-msg-size = "10485760" + +# MaxSendMsgSize defines the max message size in bytes the server can send. +# The default value is math.MaxInt32. +max-send-msg-size = "2147483647" + +############################################################################### +### gRPC Web Configuration ### +############################################################################### + +[grpc-web] + +# GRPCWebEnable defines if the gRPC-web should be enabled. +# NOTE: gRPC must also be enabled, otherwise, this configuration is a no-op. +# NOTE: gRPC-Web uses the same address as the API server. +enable = true + +############################################################################### +### State Sync Configuration ### +############################################################################### + +# State sync snapshots allow other nodes to rapidly join the network without replaying historical +# blocks, instead downloading and applying a snapshot of the application state at a given height. +[state-sync] + +# snapshot-interval specifies the block interval at which local state sync snapshots are +# taken (0 to disable). +snapshot-interval = 0 + +# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). +snapshot-keep-recent = 2 + +############################################################################### +### State Streaming ### +############################################################################### + +# Streaming allows nodes to stream state to external systems. +[streaming] + +# streaming.abci specifies the configuration for the ABCI Listener streaming service. +[streaming.abci] + +# List of kv store keys to stream out via gRPC. +# The store key names MUST match the module's StoreKey name. +# +# Example: +# ["acc", "bank", "gov", "staking", "mint"[,...]] +# ["*"] to expose all keys. +keys = [] + +# The plugin name used for streaming via gRPC. +# Streaming is only enabled if this is set. +# Supported plugins: abci +plugin = "" + +# stop-node-on-err specifies whether to stop the node on message delivery error. +stop-node-on-err = true + +############################################################################### +### Mempool ### +############################################################################### + +[mempool] +# Setting max-txs to 0 will allow for a unbounded amount of transactions in the mempool. +# Setting max_txs to negative 1 (-1) will disable transactions from being inserted into the mempool (no-op mempool). +# Setting max_txs to a positive number (> 0) will limit the number of transactions in the mempool, by the specified amount. +# +# Note, this configuration only applies to SDK built-in app-side mempool +# implementations. +max-txs = -1 + +[wasm] +# Smart query gas limit is the max gas to be used in a smart query contract call +query_gas_limit = 3000000 + +# in-memory cache for Wasm contracts. Set to 0 to disable. +# The value is in MiB not bytes +memory_cache_size = 100 + +# Simulation gas limit is the max gas to be used in a tx simulation call. +# When not set the consensus max block gas is used instead +# simulation_gas_limit = + +############################################################################### +### EVM Configuration ### +############################################################################### + +[evm] + +# Tracer defines the 'vm.Tracer' type that the EVM will use when the node is run in +# debug mode. To enable tracing use the '--evm.tracer' flag when starting your node. +# Valid types are: json|struct|access_list|markdown +tracer = "" + +# MaxTxGasWanted defines the gas wanted for each eth tx returned in ante handler in check tx mode. +max-tx-gas-wanted = 0 + +############################################################################### +### JSON RPC Configuration ### +############################################################################### + +[json-rpc] + +# Enable defines if the JSONRPC server should be enabled. +enable = false + +# Address defines the EVM RPC HTTP server address to bind to. +address = "127.0.0.1:8545" + +# Address defines the EVM WebSocket server address to bind to. +ws-address = "127.0.0.1:8546" + +# API defines a list of JSON-RPC namespaces that should be enabled +# Example: "eth,txpool,personal,net,debug,web3" +api = "eth,net,web3" + +# GasCap sets a cap on gas that can be used in eth_call/estimateGas (0=infinite). Default: 25,000,000. +gas-cap = 25000000 + +# Allow insecure account unlocking when account-related RPCs are exposed by http +allow-insecure-unlock = true + +# EVMTimeout is the global timeout for eth_call. Default: 5s. +evm-timeout = "5s" + +# TxFeeCap is the global tx-fee cap for send transaction. Default: 1eth. +txfee-cap = 1 + +# FilterCap sets the global cap for total number of filters that can be created +filter-cap = 200 + +# FeeHistoryCap sets the global cap for total number of blocks that can be fetched +feehistory-cap = 100 + +# LogsCap defines the max number of results can be returned from single 'eth_getLogs' query. +logs-cap = 10000 + +# BlockRangeCap defines the max block range allowed for 'eth_getLogs' query. +block-range-cap = 10000 + +# HTTPTimeout is the read/write timeout of http json-rpc server. +http-timeout = "30s" + +# HTTPIdleTimeout is the idle timeout of http json-rpc server. +http-idle-timeout = "2m0s" + +# AllowUnprotectedTxs restricts unprotected (non EIP155 signed) transactions to be submitted via +# the node's RPC when the global parameter is disabled. +allow-unprotected-txs = false + +# MaxOpenConnections sets the maximum number of simultaneous connections +# for the server listener. +max-open-connections = 0 + +# EnableIndexer enables the custom transaction indexer for the EVM (ethereum transactions). +enable-indexer = false + +# MetricsAddress defines the EVM Metrics server address to bind to. Pass --metrics in CLI to enable +# Prometheus metrics path: /debug/metrics/prometheus +metrics-address = "127.0.0.1:6065" + +# Upgrade height for fix of revert gas refund logic when transaction reverted. +fix-revert-gas-refund-height = 0 + +############################################################################### +### TLS Configuration ### +############################################################################### + +[tls] + +# Certificate path defines the cert.pem file path for the TLS configuration. +certificate-path = "" + +# Key path defines the key.pem file path for the TLS configuration. +key-path = "" diff --git a/data/validator1/.pchain/config/client.toml b/data/validator1/.pchain/config/client.toml new file mode 100644 index 00000000..02581600 --- /dev/null +++ b/data/validator1/.pchain/config/client.toml @@ -0,0 +1,17 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +############################################################################### +### Client Configuration ### +############################################################################### + +# The network chain ID +chain-id = "localchain_9000-1" +# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) +keyring-backend = "os" +# CLI output format (text|json) +output = "text" +# : to CometBFT RPC interface for this chain +node = "tcp://localhost:26657" +# Transaction broadcasting mode (sync|async) +broadcast-mode = "sync" diff --git a/data/validator1/.pchain/config/config.toml b/data/validator1/.pchain/config/config.toml new file mode 100644 index 00000000..e0acc58f --- /dev/null +++ b/data/validator1/.pchain/config/config.toml @@ -0,0 +1,498 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.cometbft" by default, but could be changed via $CMTHOME env variable +# or --home cmd flag. + +# The version of the CometBFT binary that created or +# last modified the config file. Do not modify this. +version = "0.38.17" + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the CometBFT binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "Aryas-MacBook-Pro.local" + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/tecbot/gorocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "info" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for CometBFT to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://127.0.0.1:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental_subscription_buffer_size = 200 + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental_subscription_buffer_size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental_subscription_buffer_size" to +# accommodate non-subscription-related RPC responses. +experimental_websocket_write_buffer_size = 200 + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behavior. +experimental_close_on_slow_client = false + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# Maximum number of requests that can be sent in a batch +# If the value is set to '0' (zero-value), then no maximum batch size will be +# enforced for a JSON-RPC batch request. +max_request_batch_size = 10 + +# Maximum size of request body, in bytes +max_body_bytes = 1000000 + +# Maximum size of request header, in bytes +max_header_bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# NOTE: both tls-cert-file and tls-key-file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "localhost:6060" + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial. If empty, will use the same +# port as the laddr, and will introspect on the listener to figure out the +# address. IP and port are required. Example: 159.89.10.97:26656 +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "0s" + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +####################################################### +### Mempool Configuration Option ### +####################################################### +[mempool] + +# The type of mempool for this node to use. +# +# Possible types: +# - "flood" : concurrent linked list mempool with flooding gossip protocol +# (default) +# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible +# for storing, disseminating and proposing txs). "create_empty_blocks=false" is +# not supported. +type = "flood" + +# Recheck (default: true) defines whether CometBFT should recheck the +# validity for all remaining transaction in the mempool after a block. +# Since a block affects the application state, some transactions in the +# mempool may become invalid. If this does not apply to your application, +# you can disable rechecking. +recheck = true + +# recheck_timeout is the time the application has during the rechecking process +# to return CheckTx responses, once all requests have been sent. Responses that +# arrive after the timeout expires are discarded. It only applies to +# non-local ABCI clients and when recheck is enabled. +# +# The ideal value will strongly depend on the application. It could roughly be estimated as the +# average size of the mempool multiplied by the average time it takes the application to validate one +# transaction. We consider that the ABCI application runs in the same location as the CometBFT binary +# so that the recheck duration is not affected by network delays when making requests and receiving responses. +recheck_timeout = "1s" + +# Broadcast (default: true) defines whether the mempool should relay +# transactions to other peers. Setting this to false will stop the mempool +# from relaying transactions to other peers until they are included in a +# block. In other words, if Broadcast is disabled, only the peer you send +# the tx to will see it until it is included in a block. +broadcast = true + +# WalPath (default: "") configures the location of the Write Ahead Log +# (WAL) for the mempool. The WAL is disabled by default. To enable, set +# WalPath to where you want the WAL to be written (e.g. +# "data/mempool.wal"). +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max_txs_bytes=5MB, mempool will only accept 5 transactions). +max_txs_bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. +max_tx_bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max_batch_bytes = 0 + +# Experimental parameters to limit gossiping txs to up to the specified number of peers. +# We use two independent upper values for persistent and non-persistent peers. +# Unconditional peers are not affected by this feature. +# If we are connected to more than the specified number of persistent peers, only send txs to +# ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those +# persistent peers disconnects, activate another persistent peer. +# Similarly for non-persistent peers, with an upper limit of +# ExperimentalMaxGossipConnectionsToNonPersistentPeers. +# If set to 0, the feature is disabled for the corresponding group of peers, that is, the +# number of active connections to that group of peers is not bounded. +# For non-persistent peers, if enabled, a value of 10 is recommended based on experimental +# performance results using the default P2P configuration. +experimental_max_gossip_connections_to_persistent_peers = 0 +experimental_max_gossip_connections_to_non_persistent_peers = 0 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "" +trust_height = 0 +trust_hash = "" +trust_period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "" + +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 1 minute). +chunk_request_timeout = "10s" + +# The number of concurrent chunk fetchers to run (default: 1). +chunk_fetchers = "4" + +####################################################### +### Block Sync Configuration Options ### +####################################################### +[blocksync] + +# Block Sync version to use: +# +# In v0.37, v1 and v2 of the block sync protocols were deprecated. +# Please use v0 instead. +# +# 1) "v0" - the default block sync implementation +version = "v0" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal_file = "data/cs.wal/wal" + +# How long we wait for a proposal block before prevoting nil +timeout_propose = "3s" +# How much timeout_propose increases with each round +timeout_propose_delta = "500ms" +# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) +timeout_prevote = "1s" +# How much the timeout_prevote increases with each round +timeout_prevote_delta = "500ms" +# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) +timeout_precommit = "1s" +# How much the timeout_precommit increases with each round +timeout_precommit_delta = "500ms" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +timeout_commit = "5s" + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = 0 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +####################################################### +### Storage Configuration Options ### +####################################################### +[storage] + +# Set to true to discard ABCI responses from the state store, which can save a +# considerable amount of disk space. Set to false to ensure ABCI responses are +# persisted. ABCI responses are required for /block_results RPC queries, and to +# reindex events in the command-line tool. +discard_abci_responses = false + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx_index] + +# What indexer to use for transactions +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = "kv" + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "cometbft" diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index e69de29b..68be9ddf 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -0,0 +1,55 @@ +{ + "generatedAt": "2026-02-27T09:29:04Z", + "contracts": { + "WPC": "0x057931Df99f61caB5e5DbDb6224D7003E64F659e", + "Factory": "0xe0b7A8833f77C5728295D489F4B64f9DA236E4C8", + "SwapRouter": "0x7fd62fe2Aba9af8bF4d08a6cce49beA8c8Ca6d97", + "QuoterV2": "0x484aC6ED747090fe8C82c5F10427ccC2F2998930", + "PositionManager": "0x95cE5e63366D3A11E9BCCe71917bB37C23Fd0002", + "UEA_PROXY_IMPLEMENTATION": "0x00cb38A885cf8D0B2dDfd19Bd1c04aAAC44C5a86" + }, + "tokens": [ + { + "name": "pETH.eth", + "symbol": "pETH", + "address": "0x69c5560bB765a935C345f507D2adD34253FBe41b", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "USDT.eth", + "symbol": "USDT.eth", + "address": "0xc2055dD3A7Ad875520BdB5c91300F964F7038C73", + "source": "core-contracts", + "decimals": 6 + }, + { + "name": "pETH.base", + "symbol": "pETH.base", + "address": "0x697164dD5f2727a4d6EfcF977dCc080Ff10c7459", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "pETH.arb", + "symbol": "pETH.arb", + "address": "0x90bFeD13b1D7db6243Dfb554c336b0254F099596", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "pBNB", + "symbol": "pBNB", + "address": "0xD19a6d5ed3BBb15B70843152610705ba25fF6df2", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "pSOL", + "symbol": "pSOL", + "address": "0xB2cf4B3aec93F4A8F92b292d2F605591dB3e3011", + "source": "core-contracts", + "decimals": 9 + } + ] +} diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 665d1ebb..b2155529 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -1050,8 +1050,7 @@ step_add_uregistry_configs() { [[ -f "$CHAIN_CONFIG_PATH" ]] || { log_err "Missing chain config: $CHAIN_CONFIG_PATH"; exit 1; } [[ -d "$TOKENS_CONFIG_DIR" ]] || { log_err "Missing tokens config directory: $TOKENS_CONFIG_DIR"; exit 1; } - local chain_payload token_payload - chain_payload="$(jq -c . "$CHAIN_CONFIG_PATH")" + local token_payload run_registry_tx() { local kind="$1" @@ -1106,8 +1105,24 @@ step_add_uregistry_configs() { done } - log_info "Adding chain config to uregistry" - run_registry_tx "chain" "$chain_payload" + local chain_config_dir chain_file chain_payload chain_count + chain_config_dir="$(dirname "$CHAIN_CONFIG_PATH")" + chain_count=0 + + while IFS= read -r chain_file; do + [[ -f "$chain_file" ]] || continue + chain_payload="$(jq -c . "$chain_file")" + log_info "Adding chain config to uregistry: $(basename "$chain_file")" + run_registry_tx "chain" "$chain_payload" + chain_count=$((chain_count + 1)) + done < <(find "$chain_config_dir" -maxdepth 1 -type f -name '*_chain_config.json' | sort) + + if [[ "$chain_count" -eq 0 ]]; then + log_err "No chain config files found in: $chain_config_dir" + exit 1 + fi + + log_ok "Registered $chain_count chain config(s) from $chain_config_dir" local token_json token_file token_addr token_symbol token_name matched_count submitted_files tmp matched_count=0 diff --git a/local-multi-validator/README.md b/local-multi-validator/README.md index f130d579..bb8d6be2 100644 --- a/local-multi-validator/README.md +++ b/local-multi-validator/README.md @@ -37,19 +37,27 @@ docker compose up --build - Auto-builds base image if missing (~15-20 min first time) - Pulls core/universal from cache or builds locally - Starts all 8 validators -- Auto-sets Sepolia `event_start_from` to latest block for all universal validators +- Auto-sets `event_start_from` to latest height/slot for Sepolia, Base Sepolia, Arbitrum Sepolia, BSC testnet, and Solana devnet -### Sepolia Event Start Block +### Event Start Heights/Slots -On `./devnet start`, the script fetches latest Sepolia block height from `https://sepolia.drpc.org` -and injects it into each universal validator config: +On `./devnet start`, the script fetches latest chain heights/slots and injects them into each universal validator config: - `chain_configs["eip155:11155111"].event_start_from = ` +- `chain_configs["eip155:84532"].event_start_from = ` +- `chain_configs["eip155:421614"].event_start_from = ` +- `chain_configs["eip155:97"].event_start_from = ` +- `chain_configs["solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1"].event_start_from = ` -You can override this manually at startup: +You can override any of them manually at startup: ```bash -SEPOLIA_EVENT_START_FROM=12345678 ./devnet start +SEPOLIA_EVENT_START_FROM=12345678 \ +BASE_EVENT_START_FROM=23456789 \ +ARBITRUM_EVENT_START_FROM=34567890 \ +BSC_EVENT_START_FROM=45678901 \ +SOLANA_EVENT_START_FROM=56789012 \ +./devnet start ``` ### I Changed Core Validator Code @@ -152,6 +160,10 @@ The `start` command also supports: | Environment Variable | Description | |----------------------|-------------| | `SEPOLIA_EVENT_START_FROM` | Force universal validators to start monitoring Sepolia from a specific block | +| `BASE_EVENT_START_FROM` | Force universal validators to start monitoring Base Sepolia from a specific block | +| `ARBITRUM_EVENT_START_FROM` | Force universal validators to start monitoring Arbitrum Sepolia from a specific block | +| `BSC_EVENT_START_FROM` | Force universal validators to start monitoring BSC testnet from a specific block | +| `SOLANA_EVENT_START_FROM` | Force universal validators to start monitoring Solana devnet from a specific slot | ## Endpoints diff --git a/local-multi-validator/devnet b/local-multi-validator/devnet index 8fdc2fc5..e8ff9e40 100755 --- a/local-multi-validator/devnet +++ b/local-multi-validator/devnet @@ -16,7 +16,16 @@ cd "$SCRIPT_DIR" GCR_REGISTRY="${GCR_REGISTRY:-gcr.io/push-chain-testnet}" CACHE_TAG="${CACHE_TAG:-latest}" SEPOLIA_CHAIN_ID="eip155:11155111" +ARBITRUM_SEPOLIA_CHAIN_ID="eip155:421614" +BASE_SEPOLIA_CHAIN_ID="eip155:84532" +BSC_TESTNET_CHAIN_ID="eip155:97" +SOLANA_DEVNET_CHAIN_ID="solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1" + SEPOLIA_DEFAULT_RPC_URL="${SEPOLIA_DEFAULT_RPC_URL:-https://sepolia.drpc.org}" +ARBITRUM_SEPOLIA_DEFAULT_RPC_URL="${ARBITRUM_SEPOLIA_DEFAULT_RPC_URL:-https://arbitrum-sepolia.gateway.tenderly.co}" +BASE_SEPOLIA_DEFAULT_RPC_URL="${BASE_SEPOLIA_DEFAULT_RPC_URL:-https://sepolia.base.org}" +BSC_TESTNET_DEFAULT_RPC_URL="${BSC_TESTNET_DEFAULT_RPC_URL:-https://bsc-testnet-rpc.publicnode.com}" +SOLANA_DEVNET_DEFAULT_RPC_URL="${SOLANA_DEVNET_DEFAULT_RPC_URL:-https://api.devnet.solana.com}" # ═══════════════════════════════════════════════════════════════════════════════ # COLORS @@ -70,7 +79,7 @@ has_buildx() { docker buildx version >/dev/null 2>&1 } -fetch_sepolia_height() { +fetch_evm_height() { local rpc_url="$1" local response response=$(curl -sS -X POST "$rpc_url" \ @@ -87,6 +96,23 @@ fetch_sepolia_height() { echo "$((16#${hex_height#0x}))" } +fetch_solana_slot() { + local rpc_url="$1" + local response + response=$(curl -sS -X POST "$rpc_url" \ + -H "Content-Type: application/json" \ + --data '{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment":"processed"}]}') + + local slot + slot=$(echo "$response" | jq -r '.result // empty') + + if [ -z "$slot" ] || [ "$slot" = "null" ] || [[ ! "$slot" =~ ^[0-9]+$ ]]; then + return 1 + fi + + echo "$slot" +} + # ═══════════════════════════════════════════════════════════════════════════════ # STATUS HELPERS # ═══════════════════════════════════════════════════════════════════════════════ @@ -433,24 +459,78 @@ cmd_up() { fi local sepolia_start_height="${SEPOLIA_EVENT_START_FROM:-}" + local base_start_height="${BASE_EVENT_START_FROM:-}" + local arbitrum_start_height="${ARBITRUM_EVENT_START_FROM:-}" + local bsc_start_height="${BSC_EVENT_START_FROM:-}" + local solana_start_height="${SOLANA_EVENT_START_FROM:-}" + if [ -z "$sepolia_start_height" ]; then - if sepolia_start_height=$(fetch_sepolia_height "$SEPOLIA_DEFAULT_RPC_URL"); then + if sepolia_start_height=$(fetch_evm_height "$SEPOLIA_DEFAULT_RPC_URL"); then print_status "Using Sepolia event_start_from: $sepolia_start_height" else print_warning "Could not fetch Sepolia latest block from $SEPOLIA_DEFAULT_RPC_URL" - print_warning "Universal validators will use default event_start_from from pushuv config" + print_warning "Sepolia will use default event_start_from from pushuv config" sepolia_start_height="" fi else print_status "Using provided SEPOLIA_EVENT_START_FROM: $sepolia_start_height" fi - if [ -n "$sepolia_start_height" ]; then - SEPOLIA_EVENT_START_FROM="$sepolia_start_height" docker compose up -d + if [ -z "$base_start_height" ]; then + if base_start_height=$(fetch_evm_height "$BASE_SEPOLIA_DEFAULT_RPC_URL"); then + print_status "Using Base Sepolia event_start_from: $base_start_height" + else + print_warning "Could not fetch Base Sepolia latest block from $BASE_SEPOLIA_DEFAULT_RPC_URL" + print_warning "Base Sepolia will use default event_start_from from pushuv config" + base_start_height="" + fi else - docker compose up -d + print_status "Using provided BASE_EVENT_START_FROM: $base_start_height" fi + if [ -z "$arbitrum_start_height" ]; then + if arbitrum_start_height=$(fetch_evm_height "$ARBITRUM_SEPOLIA_DEFAULT_RPC_URL"); then + print_status "Using Arbitrum Sepolia event_start_from: $arbitrum_start_height" + else + print_warning "Could not fetch Arbitrum Sepolia latest block from $ARBITRUM_SEPOLIA_DEFAULT_RPC_URL" + print_warning "Arbitrum Sepolia will use default event_start_from from pushuv config" + arbitrum_start_height="" + fi + else + print_status "Using provided ARBITRUM_EVENT_START_FROM: $arbitrum_start_height" + fi + + if [ -z "$bsc_start_height" ]; then + if bsc_start_height=$(fetch_evm_height "$BSC_TESTNET_DEFAULT_RPC_URL"); then + print_status "Using BSC testnet event_start_from: $bsc_start_height" + else + print_warning "Could not fetch BSC testnet latest block from $BSC_TESTNET_DEFAULT_RPC_URL" + print_warning "BSC testnet will use default event_start_from from pushuv config" + bsc_start_height="" + fi + else + print_status "Using provided BSC_EVENT_START_FROM: $bsc_start_height" + fi + + if [ -z "$solana_start_height" ]; then + if solana_start_height=$(fetch_solana_slot "$SOLANA_DEVNET_DEFAULT_RPC_URL"); then + print_status "Using Solana devnet event_start_from: $solana_start_height" + else + print_warning "Could not fetch Solana devnet latest slot from $SOLANA_DEVNET_DEFAULT_RPC_URL" + print_warning "Solana devnet will use default event_start_from from pushuv config" + solana_start_height="" + fi + else + print_status "Using provided SOLANA_EVENT_START_FROM: $solana_start_height" + fi + + SEPOLIA_EVENT_START_FROM="$sepolia_start_height" \ + BASE_EVENT_START_FROM="$base_start_height" \ + ARBITRUM_EVENT_START_FROM="$arbitrum_start_height" \ + BSC_EVENT_START_FROM="$bsc_start_height" \ + SOLANA_EVENT_START_FROM="$solana_start_height" \ + docker compose up -d + # Auto-push to cache if we built locally (populate cache for team) if [ -n "$built_locally" ] && [ -z "$skip_cache" ]; then auto_push_to_cache diff --git a/local-multi-validator/docker-compose.yml b/local-multi-validator/docker-compose.yml index 1bd2fa29..2c1c39b5 100644 --- a/local-multi-validator/docker-compose.yml +++ b/local-multi-validator/docker-compose.yml @@ -232,6 +232,10 @@ services: - QUERY_PORT=8080 - TSS_ENABLED=true - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} + - BASE_EVENT_START_FROM=${BASE_EVENT_START_FROM:-} + - ARBITRUM_EVENT_START_FROM=${ARBITRUM_EVENT_START_FROM:-} + - BSC_EVENT_START_FROM=${BSC_EVENT_START_FROM:-} + - SOLANA_EVENT_START_FROM=${SOLANA_EVENT_START_FROM:-} command: ["/opt/scripts/setup-universal.sh"] depends_on: core-validator-1: @@ -266,6 +270,10 @@ services: - QUERY_PORT=8080 - TSS_ENABLED=true - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} + - BASE_EVENT_START_FROM=${BASE_EVENT_START_FROM:-} + - ARBITRUM_EVENT_START_FROM=${ARBITRUM_EVENT_START_FROM:-} + - BSC_EVENT_START_FROM=${BSC_EVENT_START_FROM:-} + - SOLANA_EVENT_START_FROM=${SOLANA_EVENT_START_FROM:-} command: ["/opt/scripts/setup-universal.sh"] depends_on: core-validator-2: @@ -300,6 +308,10 @@ services: - QUERY_PORT=8080 - TSS_ENABLED=true - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} + - BASE_EVENT_START_FROM=${BASE_EVENT_START_FROM:-} + - ARBITRUM_EVENT_START_FROM=${ARBITRUM_EVENT_START_FROM:-} + - BSC_EVENT_START_FROM=${BSC_EVENT_START_FROM:-} + - SOLANA_EVENT_START_FROM=${SOLANA_EVENT_START_FROM:-} command: ["/opt/scripts/setup-universal.sh"] depends_on: core-validator-3: @@ -334,6 +346,10 @@ services: - QUERY_PORT=8080 - TSS_ENABLED=true - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} + - BASE_EVENT_START_FROM=${BASE_EVENT_START_FROM:-} + - ARBITRUM_EVENT_START_FROM=${ARBITRUM_EVENT_START_FROM:-} + - BSC_EVENT_START_FROM=${BSC_EVENT_START_FROM:-} + - SOLANA_EVENT_START_FROM=${SOLANA_EVENT_START_FROM:-} command: ["/opt/scripts/setup-universal.sh"] depends_on: core-validator-4: diff --git a/local-multi-validator/scripts/setup-universal.sh b/local-multi-validator/scripts/setup-universal.sh index 0ac82001..649d4fc4 100755 --- a/local-multi-validator/scripts/setup-universal.sh +++ b/local-multi-validator/scripts/setup-universal.sh @@ -136,14 +136,26 @@ if [ "$QUERY_PORT" != "8080" ]; then mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" fi -# Optionally override Sepolia event start height (set by ./devnet start) -if [ -n "${SEPOLIA_EVENT_START_FROM:-}" ]; then - echo "📍 Setting Sepolia event_start_from: $SEPOLIA_EVENT_START_FROM" - jq --argjson height "$SEPOLIA_EVENT_START_FROM" \ - '.chain_configs["eip155:11155111"].event_start_from = $height' \ +# Optionally override chain event start heights (set by ./devnet start) +set_chain_event_start_from() { + local chain_id="$1" + local chain_label="$2" + local start_height="$3" + + [ -n "$start_height" ] || return 0 + + echo "📍 Setting ${chain_label} event_start_from: $start_height" + jq --arg chain "$chain_id" --argjson height "$start_height" \ + '.chain_configs[$chain].event_start_from = $height' \ "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" -fi +} + +set_chain_event_start_from "eip155:11155111" "Sepolia" "${SEPOLIA_EVENT_START_FROM:-}" +set_chain_event_start_from "eip155:84532" "Base Sepolia" "${BASE_EVENT_START_FROM:-}" +set_chain_event_start_from "eip155:421614" "Arbitrum Sepolia" "${ARBITRUM_EVENT_START_FROM:-}" +set_chain_event_start_from "eip155:97" "BSC testnet" "${BSC_EVENT_START_FROM:-}" +set_chain_event_start_from "solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1" "Solana devnet" "${SOLANA_EVENT_START_FROM:-}" # --------------------------- # === SET CORE VALOPER ADDRESS === diff --git a/x/uexecutor/types/constants.go b/x/uexecutor/types/constants.go index 35d94535..a39b1a50 100644 --- a/x/uexecutor/types/constants.go +++ b/x/uexecutor/types/constants.go @@ -7,7 +7,7 @@ import ( const ( FACTORY_PROXY_ADDRESS_HEX = "0x00000000000000000000000000000000000000eA" - PROXY_ADMIN_OWNER_ADDRESS_HEX = "0xa96CaA79eb2312DbEb0B8E93c1Ce84C98b67bF11" + PROXY_ADMIN_OWNER_ADDRESS_HEX = "0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4" FACTORY_IMPL_ADDRESS_HEX = "0x00000000000000000000000000000000000000fa" PROXY_ADMIN_ADDRESS_HEX = "0x00000000000000000000000000000000000000AA" ) diff --git a/x/uregistry/types/constants.go b/x/uregistry/types/constants.go index 55f706fe..88e6d1b9 100644 --- a/x/uregistry/types/constants.go +++ b/x/uregistry/types/constants.go @@ -14,7 +14,7 @@ var GATEWAY_METHOD = struct { EVM: struct{ AddFunds string }{AddFunds: "addFunds"}, } -const PROXY_ADMIN_OWNER_ADDRESS_HEX = "0xa96CaA79eb2312DbEb0B8E93c1Ce84C98b67bF11" +const PROXY_ADMIN_OWNER_ADDRESS_HEX = "0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4" var ProxyAdminRuntimeBytecode = common.FromHex("0x608060405260043610610058575f3560e01c80639623609d116100415780639623609d146100aa578063ad3cb1cc146100bd578063f2fde38b14610112575f80fd5b8063715018a61461005c5780638da5cb5b14610072575b5f80fd5b348015610067575f80fd5b50610070610131565b005b34801561007d575f80fd5b505f5460405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6100706100b8366004610351565b610144565b3480156100c8575f80fd5b506101056040518060400160405280600581526020017f352e302e3000000000000000000000000000000000000000000000000000000081525081565b6040516100a191906104c6565b34801561011d575f80fd5b5061007061012c3660046104df565b6101d5565b61013961023d565b6101425f61028f565b565b61014c61023d565b6040517f4f1ef28600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff841690634f1ef2869034906101a290869086906004016104fa565b5f604051808303818588803b1580156101b9575f80fd5b505af11580156101cb573d5f803e3d5ffd5b5050505050505050565b6101dd61023d565b73ffffffffffffffffffffffffffffffffffffffff8116610231576040517f1e4fbdf70000000000000000000000000000000000000000000000000000000081525f60048201526024015b60405180910390fd5b61023a8161028f565b50565b5f5473ffffffffffffffffffffffffffffffffffffffff163314610142576040517f118cdaa7000000000000000000000000000000000000000000000000000000008152336004820152602401610228565b5f805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b73ffffffffffffffffffffffffffffffffffffffff8116811461023a575f80fd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f805f60608486031215610363575f80fd5b833561036e81610303565b9250602084013561037e81610303565b9150604084013567ffffffffffffffff811115610399575f80fd5b8401601f810186136103a9575f80fd5b803567ffffffffffffffff8111156103c3576103c3610324565b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8501160116810181811067ffffffffffffffff8211171561042f5761042f610324565b604052818152828201602001881015610446575f80fd5b816020840160208301375f602083830101528093505050509250925092565b5f81518084525f5b818110156104895760208185018101518683018201520161046d565b505f6020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b602081525f6104d86020830184610465565b9392505050565b5f602082840312156104ef575f80fd5b81356104d881610303565b73ffffffffffffffffffffffffffffffffffffffff83168152604060208201525f6105286040830184610465565b94935050505056fea26469706673582212209ce80139bf41b00bc44c4532122ff649c1e8542240b8a5c13f39af0d72f21b2364736f6c634300081a0033") var PROXY_ADMIN_SLOT = common.HexToHash("0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103") diff --git a/x/uregistry/types/params.go b/x/uregistry/types/params.go index 548788eb..dc56afee 100755 --- a/x/uregistry/types/params.go +++ b/x/uregistry/types/params.go @@ -8,7 +8,7 @@ import ( func DefaultParams() Params { // TODO: return Params{ - Admin: "push1negskcfqu09j5zvpk7nhvacnwyy2mafffy7r6a", + Admin: "push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20", } } diff --git a/x/utss/types/params.go b/x/utss/types/params.go index b137d735..a269f2af 100755 --- a/x/utss/types/params.go +++ b/x/utss/types/params.go @@ -7,7 +7,7 @@ import ( // DefaultParams returns default module parameters. func DefaultParams() Params { return Params{ - Admin: "push1negskcfqu09j5zvpk7nhvacnwyy2mafffy7r6a", + Admin: "push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20", } } diff --git a/x/uvalidator/types/params.go b/x/uvalidator/types/params.go index b137d735..a269f2af 100755 --- a/x/uvalidator/types/params.go +++ b/x/uvalidator/types/params.go @@ -7,7 +7,7 @@ import ( // DefaultParams returns default module parameters. func DefaultParams() Params { return Params{ - Admin: "push1negskcfqu09j5zvpk7nhvacnwyy2mafffy7r6a", + Admin: "push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20", } } From 8d7d33f0981c3b5116a0ea08fd4389e961e475d3 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Fri, 27 Feb 2026 19:19:33 +0530 Subject: [PATCH 13/61] fix : arbitrum rpc url for monitoring gateway transaction in local-multi-validator --- e2e-tests/deploy_addresses.json | 2 +- e2e-tests/setup.sh | 5 ++++- local-multi-validator/scripts/setup-universal.sh | 10 ++++++++++ 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index 68be9ddf..df19a604 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -1,5 +1,5 @@ { - "generatedAt": "2026-02-27T09:29:04Z", + "generatedAt": "2026-02-27T11:51:04Z", "contracts": { "WPC": "0x057931Df99f61caB5e5DbDb6224D7003E64F659e", "Factory": "0xe0b7A8833f77C5728295D489F4B64f9DA236E4C8", diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index b2155529..3120a017 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -330,11 +330,12 @@ sdk_test_files() { local base_dir="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_E2E_DIR" local file alt local requested_files=( - "pctx-last-transaction.spec.tx" + "pctx-last-transaction.spec.ts" "send-to-self.spec.ts" "progress-hook-per-tx.spec.ts" "bridge-multicall.spec.ts" "pushchain.spec.ts" + "bridge-hooks.spec.ts" ) for file in "${requested_files[@]}"; do @@ -1405,6 +1406,7 @@ Commands: sdk-test-progress-hook Run progress-hook-per-tx.spec.ts sdk-test-bridge-multicall Run bridge-multicall.spec.ts sdk-test-pushchain Run pushchain.spec.ts + sdk-test-bridge-hooks Run bridge-hooks.spec.ts add-uregistry-configs Submit chain + token config txs via local-multi-validator validator1 record-contract K A Manually record contract key/address record-token N S A Manually record token name/symbol/address @@ -1440,6 +1442,7 @@ main() { sdk-test-progress-hook) step_run_sdk_test_file "progress-hook-per-tx.spec.ts" ;; sdk-test-bridge-multicall) step_run_sdk_test_file "bridge-multicall.spec.ts" ;; sdk-test-pushchain) step_run_sdk_test_file "pushchain.spec.ts" ;; + sdk-test-bridge-hooks) step_run_sdk_test_file "bridge-hooks.spec.ts" ;; add-uregistry-configs) step_add_uregistry_configs ;; record-contract) ensure_deploy_file diff --git a/local-multi-validator/scripts/setup-universal.sh b/local-multi-validator/scripts/setup-universal.sh index 649d4fc4..45ede69a 100755 --- a/local-multi-validator/scripts/setup-universal.sh +++ b/local-multi-validator/scripts/setup-universal.sh @@ -136,6 +136,16 @@ if [ "$QUERY_PORT" != "8080" ]; then mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" fi +# After initialization and before event start from overrides +# Force Arbitrum Sepolia RPC URL to tenderly endpoint +ARBITRUM_CHAIN_ID="eip155:421614" +ARBITRUM_TENDERLY_URL="https://arbitrum-sepolia.gateway.tenderly.co" + +jq --arg chain "$ARBITRUM_CHAIN_ID" --arg url "$ARBITRUM_TENDERLY_URL" \ + '.chain_configs[$chain].rpc_urls = [$url]' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" + # Optionally override chain event start heights (set by ./devnet start) set_chain_event_start_from() { local chain_id="$1" From 2afe7985348265ac146441bebac3ea9d3d39381f Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Fri, 27 Feb 2026 19:51:33 +0530 Subject: [PATCH 14/61] fix : e2e-tests setup : make replace-addresses calculates from the .env PRIVATE_KEY to replace addresses --- e2e-tests/setup.sh | 2 ++ scripts/replace_addresses.sh | 55 ++++++++++++++++++++++++++++-------- x/uregistry/types/params.go | 2 +- x/utss/types/params.go | 2 +- x/uvalidator/types/params.go | 2 +- 5 files changed, 49 insertions(+), 14 deletions(-) diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 3120a017..e5a6718a 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -1365,6 +1365,8 @@ step_configure_universal_core() { } cmd_all() { + (cd "$PUSH_CHAIN_DIR" && make replace-addresses) + (cd "$PUSH_CHAIN_DIR" && make build) step_stop_running_nodes step_devnet step_recover_genesis_key diff --git a/scripts/replace_addresses.sh b/scripts/replace_addresses.sh index 771f8947..4af98e8f 100644 --- a/scripts/replace_addresses.sh +++ b/scripts/replace_addresses.sh @@ -2,24 +2,57 @@ set -euo pipefail -OLD_BECH32="push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20" -NEW_BECH32="push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20" -OLD_HEX="0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4" -NEW_HEX="0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4" - ROOT_DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" cd "$ROOT_DIR" -if ! command -v git >/dev/null 2>&1; then - echo "git command not found" >&2 +ENV_FILE="e2e-tests/.env" +if [[ ! -f "$ENV_FILE" ]]; then + echo "e2e-tests/.env not found" >&2 + exit 1 +fi + +PRIVATE_KEY=$(grep '^PRIVATE_KEY=' "$ENV_FILE" | cut -d= -f2 | tr -d '"' | tr -d "'") +if [[ -z "$PRIVATE_KEY" ]]; then + echo "PRIVATE_KEY not found in $ENV_FILE" >&2 exit 1 fi -if ! command -v perl >/dev/null 2>&1; then - echo "perl command not found" >&2 +# Derive EVM address +if ! command -v cast >/dev/null 2>&1; then + echo "cast command not found (install foundry/cast)" >&2 exit 1 fi +EVM_ADDRESS=$(cast wallet address $PRIVATE_KEY) + +# Derive push (cosmos) address +if ! command -v $PWD/build/pchaind >/dev/null 2>&1; then + echo "pchaind binary not found in build/ (run make build)" >&2 + exit 1 +fi +PUSH_ADDRESS=$($PWD/build/pchaind debug addr $(echo $EVM_ADDRESS | tr '[:upper:]' '[:lower:]' | sed 's/^0x//') | awk -F': ' '/Bech32 Acc:/ {print $2; exit}') +if [[ -z "$PUSH_ADDRESS" ]]; then + echo "Could not derive push address from $EVM_ADDRESS" >&2 + exit 1 +fi + +echo "Replacing with PUSH_ADDRESS: $PUSH_ADDRESS" +echo "Replacing with EVM_ADDRESS: $EVM_ADDRESS" + +# Replace Admin in params.go files +for f in x/utss/types/params.go x/uregistry/types/params.go x/uvalidator/types/params.go; do + if [[ -f "$f" ]]; then + perl -pi -e "s/Admin: \"push1[0-9a-z]+\"/Admin: \"$PUSH_ADDRESS\"/g" "$f" + echo "Updated Admin in $f" + fi +done -git ls-files -z | xargs -0 perl -pi -e "s/\Q$OLD_BECH32\E/$NEW_BECH32/g; s/\Q$OLD_HEX\E/$NEW_HEX/g" +# Replace PROXY_ADMIN_OWNER_ADDRESS in constants.go files +for f in x/uexecutor/types/constants.go x/uregistry/types/constants.go; do + if [[ -f "$f" ]]; then + perl -pi -e "s/PROXY_ADMIN_OWNER_ADDRESS_HEX = \"0x[a-fA-F0-9]{40}\"/PROXY_ADMIN_OWNER_ADDRESS_HEX = \"$EVM_ADDRESS\"/g" "$f" + perl -pi -e "s/PROXY_ADMIN_OWNER_ADDRESS = \"0x[a-fA-F0-9]{40}\"/PROXY_ADMIN_OWNER_ADDRESS = \"$EVM_ADDRESS\"/g" "$f" + echo "Updated PROXY_ADMIN_OWNER_ADDRESS in $f" + fi +done -echo "Address replacement completed in tracked files." +echo "Address replacement completed." diff --git a/x/uregistry/types/params.go b/x/uregistry/types/params.go index dc56afee..5400d4f2 100755 --- a/x/uregistry/types/params.go +++ b/x/uregistry/types/params.go @@ -8,7 +8,7 @@ import ( func DefaultParams() Params { // TODO: return Params{ - Admin: "push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20", + Admin: "push1w7xnyp3hf79vyetj3cvw8l32u6unun8yr6zn60", } } diff --git a/x/utss/types/params.go b/x/utss/types/params.go index a269f2af..b5def128 100755 --- a/x/utss/types/params.go +++ b/x/utss/types/params.go @@ -7,7 +7,7 @@ import ( // DefaultParams returns default module parameters. func DefaultParams() Params { return Params{ - Admin: "push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20", + Admin: "push1w7xnyp3hf79vyetj3cvw8l32u6unun8yr6zn60", } } diff --git a/x/uvalidator/types/params.go b/x/uvalidator/types/params.go index a269f2af..b5def128 100755 --- a/x/uvalidator/types/params.go +++ b/x/uvalidator/types/params.go @@ -7,7 +7,7 @@ import ( // DefaultParams returns default module parameters. func DefaultParams() Params { return Params{ - Admin: "push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20", + Admin: "push1w7xnyp3hf79vyetj3cvw8l32u6unun8yr6zn60", } } From 97fec92ec4f3bfde09a443a2edd25d7d915c0d1d Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Thu, 5 Mar 2026 12:44:33 +0530 Subject: [PATCH 15/61] refactor --- .../eth_sepolia/tokens/arb_sepolia_eth.json | 14 ---- .../eth_sepolia/tokens/base_sepolia_eth.json | 14 ---- e2e-tests/.env.bak | 71 +++++++++++++++++++ e2e-tests/deploy_addresses.json | 2 +- 4 files changed, 72 insertions(+), 29 deletions(-) delete mode 100644 config/testnet-donut/eth_sepolia/tokens/arb_sepolia_eth.json delete mode 100644 config/testnet-donut/eth_sepolia/tokens/base_sepolia_eth.json create mode 100644 e2e-tests/.env.bak diff --git a/config/testnet-donut/eth_sepolia/tokens/arb_sepolia_eth.json b/config/testnet-donut/eth_sepolia/tokens/arb_sepolia_eth.json deleted file mode 100644 index 797c5c06..00000000 --- a/config/testnet-donut/eth_sepolia/tokens/arb_sepolia_eth.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "chain": "eip155:421614", - "address": "0x0000000000000000000000000000000000000000", - "name": "pETH.arb", - "symbol": "pETH.arb", - "decimals": 18, - "enabled": true, - "liquidity_cap": "1000000000000000000000000", - "token_type": 1, - "native_representation": { - "denom": "", - "contract_address": "0x90bFeD13b1D7db6243Dfb554c336b0254F099596" - } -} diff --git a/config/testnet-donut/eth_sepolia/tokens/base_sepolia_eth.json b/config/testnet-donut/eth_sepolia/tokens/base_sepolia_eth.json deleted file mode 100644 index 72695522..00000000 --- a/config/testnet-donut/eth_sepolia/tokens/base_sepolia_eth.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "chain": "eip155:84532", - "address": "0x0000000000000000000000000000000000000000", - "name": "pETH.base", - "symbol": "pETH.base", - "decimals": 18, - "enabled": true, - "liquidity_cap": "1000000000000000000000000", - "token_type": 1, - "native_representation": { - "denom": "", - "contract_address": "0x697164dD5f2727a4d6EfcF977dCc080Ff10c7459" - } -} diff --git a/e2e-tests/.env.bak b/e2e-tests/.env.bak new file mode 100644 index 00000000..fec3b693 --- /dev/null +++ b/e2e-tests/.env.bak @@ -0,0 +1,71 @@ +# Copy this file to e2e-tests/.env and adjust values. + +# Path to push-chain workspace root. +# Keep this empty to use auto-detection (parent of e2e-tests). +# PUSH_CHAIN_DIR= + +# Local Push RPC +PUSH_RPC_URL=http://localhost:8545 + +# Local chain info +CHAIN_ID=localchain_9000-1 +KEYRING_BACKEND=test + +# Genesis key recovery/funding +GENESIS_KEY_NAME=genesis-acc-1 +GENESIS_KEY_HOME=./e2e-tests/.pchain +# Optional local fallback file. If missing, setup.sh reads accounts from docker core-validator-1 (/tmp/push-accounts/genesis_accounts.json) +GENESIS_ACCOUNTS_JSON=./e2e-tests/genesis_accounts.json + +# Optional: set to skip interactive mnemonic prompt +# GENESIS_MNEMONIC="word1 word2 ..." + +# Address to fund from genesis account +FUND_TO_ADDRESS=push1w7xnyp3hf79vyetj3cvw8l32u6unun8yr6zn60 +FUND_AMOUNT=1000000000000000000upc +POOL_CREATION_TOPUP_AMOUNT=50000000000000000000upc +GAS_PRICES=100000000000upc + +# EVM private key used by forge/hardhat scripts +PRIVATE_KEY=0x0dfb3d814afd8d0bf7a6010e8dd2b6ac835cabe4da9e2c1e80c6a14df3994dd4 + +# External repositories +CORE_CONTRACTS_REPO=https://github.com/pushchain/push-chain-core-contracts.git +CORE_CONTRACTS_BRANCH=node-e2e + +SWAP_AMM_REPO=https://github.com/pushchain/push-chain-swap-internal-amm-contracts.git +SWAP_AMM_BRANCH=e2e-push-node + +GATEWAY_REPO=https://github.com/pushchain/push-chain-gateway-contracts.git +GATEWAY_BRANCH=e2e-push-node + +PUSH_CHAIN_SDK_REPO=https://github.com/pushchain/push-chain-sdk.git +PUSH_CHAIN_SDK_BRANCH=new-sendUniversalTx + +# push-chain-sdk core .env target path (relative to PUSH_CHAIN_SDK_DIR) +PUSH_CHAIN_SDK_CORE_ENV_PATH=packages/core/.env + +# Local clone layout (outside push-chain directory) +E2E_PARENT_DIR=../ +CORE_CONTRACTS_DIR=../push-chain-core-contracts +SWAP_AMM_DIR=../push-chain-swap-internal-amm-contracts +GATEWAY_DIR=../push-chain-gateway-contracts +PUSH_CHAIN_SDK_DIR=../push-chain-sdk +PUSH_CHAIN_SDK_E2E_DIR=packages/core/__e2e__ + +# push-chain-sdk required env vars (mirrored into PUSH_CHAIN_SDK_DIR/packages/core/.env by setup-sdk) +# Defaults used by setup-sdk when omitted: +# EVM_PRIVATE_KEY <= PRIVATE_KEY +# EVM_RPC <= PUSH_RPC_URL +# PUSH_PRIVATE_KEY<= PRIVATE_KEY +EVM_PRIVATE_KEY=0xddc31465c891443c6e3e5f30ca7808d66f8036163fd0b99f4ed5e05cd261ac7f +EVM_RPC=https://eth-sepolia.public.blastapi.io +SOLANA_RPC_URL=https://api.devnet.solana.com +SOLANA_PRIVATE_KEY=2NPV9cXdRuC8Zydqa3beEJ8xK9N6qgKqXYVYQ2PtpJhNvWrhuPJAZH67NFTW5Q1JsMjgPEsTY3ph2Sd1nGqqcNFv +PUSH_PRIVATE_KEY=0x0dfb3d814afd8d0bf7a6010e8dd2b6ac835cabe4da9e2c1e80c6a14df3994dd4 + +# Tracking files +DEPLOY_ADDRESSES_FILE=./e2e-tests/deploy_addresses.json +TEST_ADDRESSES_PATH=../push-chain-swap-internal-amm-contracts/test-addresses.json +TOKEN_CONFIG_PATH=./config/testnet-donut/tokens/eth_sepolia_eth.json +CHAIN_CONFIG_PATH=./config/testnet-donut/chains/eth_sepolia_chain_config.json diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index df19a604..4a38b519 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -1,5 +1,5 @@ { - "generatedAt": "2026-02-27T11:51:04Z", + "generatedAt": "2026-03-05T07:13:22Z", "contracts": { "WPC": "0x057931Df99f61caB5e5DbDb6224D7003E64F659e", "Factory": "0xe0b7A8833f77C5728295D489F4B64f9DA236E4C8", From f81285311a7d54f54d2878ef592ff49109a1d081 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Thu, 5 Mar 2026 12:52:06 +0530 Subject: [PATCH 16/61] fix: BNB RPC URL, env account funding, replace-addresses script --- e2e-tests/setup.sh | 35 +++++++++++++++++++ .../scripts/setup-universal.sh | 7 ++++ scripts/replace_addresses.sh | 6 +--- universalClient/config/default_config.json | 2 +- 4 files changed, 44 insertions(+), 6 deletions(-) diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index e5a6718a..121e45bf 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -605,6 +605,40 @@ step_fund_account() { log_ok "Funding transaction submitted" } +step_update_env_fund_to_address() { + require_cmd jq + ENV_FILE="$SCRIPT_DIR/.env" + if [[ ! -f "$ENV_FILE" ]]; then + log_err ".env file not found in e2e-tests folder" + exit 1 + fi + PRIVATE_KEY=$(grep '^PRIVATE_KEY=' "$ENV_FILE" | cut -d= -f2 | tr -d '"' | tr -d "'") + if [[ -z "$PRIVATE_KEY" ]]; then + log_err "PRIVATE_KEY not found in .env" + exit 1 + fi + if ! command -v $PUSH_CHAIN_DIR/build/pchaind >/dev/null 2>&1; then + log_err "pchaind binary not found in build/ (run make build)" + exit 1 + fi + EVM_ADDRESS=$(cast wallet address $PRIVATE_KEY) + COSMOS_ADDRESS=$($PUSH_CHAIN_DIR/build/pchaind debug addr $(echo $EVM_ADDRESS | tr '[:upper:]' '[:lower:]' | sed 's/^0x//') | awk -F': ' '/Bech32 Acc:/ {print $2; exit}') + if [[ -z "$COSMOS_ADDRESS" ]]; then + log_err "Could not derive cosmos address from $EVM_ADDRESS" + exit 1 + fi + if grep -q '^FUND_TO_ADDRESS=' "$ENV_FILE"; then + sed -i.bak "s|^FUND_TO_ADDRESS=.*$|FUND_TO_ADDRESS=$COSMOS_ADDRESS|" "$ENV_FILE" + else + echo "FUND_TO_ADDRESS=$COSMOS_ADDRESS" >> "$ENV_FILE" + fi + # Refresh .env after updating FUND_TO_ADDRESS + set -a + source "$SCRIPT_DIR/.env" + set +a + log_ok "Updated FUND_TO_ADDRESS in .env to $COSMOS_ADDRESS" +} + parse_core_prc20_logs() { local log_file="$1" local current_addr="" @@ -1367,6 +1401,7 @@ step_configure_universal_core() { cmd_all() { (cd "$PUSH_CHAIN_DIR" && make replace-addresses) (cd "$PUSH_CHAIN_DIR" && make build) + step_update_env_fund_to_address step_stop_running_nodes step_devnet step_recover_genesis_key diff --git a/local-multi-validator/scripts/setup-universal.sh b/local-multi-validator/scripts/setup-universal.sh index 45ede69a..437d739f 100755 --- a/local-multi-validator/scripts/setup-universal.sh +++ b/local-multi-validator/scripts/setup-universal.sh @@ -140,12 +140,19 @@ fi # Force Arbitrum Sepolia RPC URL to tenderly endpoint ARBITRUM_CHAIN_ID="eip155:421614" ARBITRUM_TENDERLY_URL="https://arbitrum-sepolia.gateway.tenderly.co" +BSC_TESTNET_CHAIN_ID="eip155:97" +BSC_TESTNET_RPC_URL="${BSC_TESTNET_RPC_URL:-https://bsc-testnet-rpc.publicnode.com}" jq --arg chain "$ARBITRUM_CHAIN_ID" --arg url "$ARBITRUM_TENDERLY_URL" \ '.chain_configs[$chain].rpc_urls = [$url]' \ "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +jq --arg chain "$BSC_TESTNET_CHAIN_ID" --arg url "$BSC_TESTNET_RPC_URL" \ + '.chain_configs[$chain].rpc_urls = [$url]' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" + # Optionally override chain event start heights (set by ./devnet start) set_chain_event_start_from() { local chain_id="$1" diff --git a/scripts/replace_addresses.sh b/scripts/replace_addresses.sh index 4af98e8f..63ecc00e 100644 --- a/scripts/replace_addresses.sh +++ b/scripts/replace_addresses.sh @@ -29,11 +29,7 @@ if ! command -v $PWD/build/pchaind >/dev/null 2>&1; then echo "pchaind binary not found in build/ (run make build)" >&2 exit 1 fi -PUSH_ADDRESS=$($PWD/build/pchaind debug addr $(echo $EVM_ADDRESS | tr '[:upper:]' '[:lower:]' | sed 's/^0x//') | awk -F': ' '/Bech32 Acc:/ {print $2; exit}') -if [[ -z "$PUSH_ADDRESS" ]]; then - echo "Could not derive push address from $EVM_ADDRESS" >&2 - exit 1 -fi +PUSH_ADDRESS=push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20 echo "Replacing with PUSH_ADDRESS: $PUSH_ADDRESS" echo "Replacing with EVM_ADDRESS: $EVM_ADDRESS" diff --git a/universalClient/config/default_config.json b/universalClient/config/default_config.json index 9f4542a3..71924b8b 100644 --- a/universalClient/config/default_config.json +++ b/universalClient/config/default_config.json @@ -49,7 +49,7 @@ }, "eip155:97": { "rpc_urls": [ - "https://binance.llamarpc.com" + "https://bsc-testnet-rpc.publicnode.com" ], "cleanup_interval_seconds": 1800, "retention_period_seconds": 43200, From b94a59831006acff7eb53c1f69b40390126fd955 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Thu, 5 Mar 2026 13:10:24 +0530 Subject: [PATCH 17/61] fix: e2e-tests token path fix --- e2e-tests/.env.example | 4 ++-- e2e-tests/setup.sh | 20 ++++++++++++-------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/e2e-tests/.env.example b/e2e-tests/.env.example index 8ba45535..c41836bd 100644 --- a/e2e-tests/.env.example +++ b/e2e-tests/.env.example @@ -67,5 +67,5 @@ PUSH_PRIVATE_KEY= # Tracking files DEPLOY_ADDRESSES_FILE=./e2e-tests/deploy_addresses.json TEST_ADDRESSES_PATH=../push-chain-swap-internal-amm-contracts/test-addresses.json -TOKEN_CONFIG_PATH=./config/testnet-donut/tokens/eth_sepolia_eth.json -CHAIN_CONFIG_PATH=./config/testnet-donut/chains/eth_sepolia_chain_config.json +TOKEN_CONFIG_PATH=./config/testnet-donut/eth_sepolia/tokens/eth.json +CHAIN_CONFIG_PATH=./config/testnet-donut/eth_sepolia/chain.json diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 121e45bf..1808e952 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -46,9 +46,9 @@ fi : "${DEPLOY_ADDRESSES_FILE:=$SCRIPT_DIR/deploy_addresses.json}" : "${LOG_DIR:=$SCRIPT_DIR/logs}" : "${TEST_ADDRESSES_PATH:=$SWAP_AMM_DIR/test-addresses.json}" -: "${TOKENS_CONFIG_DIR:=./config/testnet-donut/tokens}" -: "${TOKEN_CONFIG_PATH:=./config/testnet-donut/tokens/eth_sepolia_eth.json}" -: "${CHAIN_CONFIG_PATH:=./config/testnet-donut/chains/eth_sepolia_chain_config.json}" +: "${TOKENS_CONFIG_DIR:=./config/testnet-donut}" +: "${TOKEN_CONFIG_PATH:=./config/testnet-donut/eth_sepolia/tokens/eth.json}" +: "${CHAIN_CONFIG_PATH:=./config/testnet-donut/eth_sepolia/chain.json}" abs_from_root() { local path="$1" @@ -859,7 +859,7 @@ find_matching_token_config_file() { d_name_np="$(norm_token_key_without_leading_p "$deployed_name")" local file f_sym f_name f_base f_sym_np f_name_np score - for file in "$TOKENS_CONFIG_DIR"/*.json; do + while IFS= read -r file; do [[ -f "$file" ]] || continue f_sym="$(jq -r '.symbol // ""' "$file")" f_name="$(jq -r '.name // ""' "$file")" @@ -885,7 +885,7 @@ find_matching_token_config_file() { best_score=$score best_file="$file" fi - done + done < <(find "$TOKENS_CONFIG_DIR" -type f -path '*/tokens/*.json' | sort) if (( best_score >= 60 )); then printf "%s" "$best_file" @@ -901,6 +901,11 @@ step_update_deployed_token_configs() { exit 1 fi + if ! find "$TOKENS_CONFIG_DIR" -type f -path '*/tokens/*.json' | grep -q .; then + log_err "No token config files found under: $TOKENS_CONFIG_DIR" + exit 1 + fi + local used_files="" local updated=0 local token_json token_symbol token_name token_address match_file tmp @@ -1082,7 +1087,6 @@ step_setup_gateway() { step_add_uregistry_configs() { require_cmd "$PUSH_CHAIN_DIR/build/pchaind" jq - [[ -f "$CHAIN_CONFIG_PATH" ]] || { log_err "Missing chain config: $CHAIN_CONFIG_PATH"; exit 1; } [[ -d "$TOKENS_CONFIG_DIR" ]] || { log_err "Missing tokens config directory: $TOKENS_CONFIG_DIR"; exit 1; } local token_payload @@ -1141,7 +1145,7 @@ step_add_uregistry_configs() { } local chain_config_dir chain_file chain_payload chain_count - chain_config_dir="$(dirname "$CHAIN_CONFIG_PATH")" + chain_config_dir="$TOKENS_CONFIG_DIR" chain_count=0 while IFS= read -r chain_file; do @@ -1150,7 +1154,7 @@ step_add_uregistry_configs() { log_info "Adding chain config to uregistry: $(basename "$chain_file")" run_registry_tx "chain" "$chain_payload" chain_count=$((chain_count + 1)) - done < <(find "$chain_config_dir" -maxdepth 1 -type f -name '*_chain_config.json' | sort) + done < <(find "$chain_config_dir" -type f \( -name 'chain.json' -o -name '*_chain_config.json' \) | sort) if [[ "$chain_count" -eq 0 ]]; then log_err "No chain config files found in: $chain_config_dir" From 3ac18858eb4f75142fbc55f27786653780527fac Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Thu, 5 Mar 2026 13:58:25 +0530 Subject: [PATCH 18/61] feat: sdk automatically changes TESTNET to LOCALNET in account.ts --- e2e-tests/setup.sh | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 1808e952..e70c1cb6 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -42,6 +42,7 @@ fi : "${PUSH_CHAIN_SDK_DIR:=$E2E_PARENT_DIR/push-chain-sdk}" : "${PUSH_CHAIN_SDK_E2E_DIR:=packages/core/__e2e__}" : "${PUSH_CHAIN_SDK_CHAIN_CONSTANTS_PATH:=packages/core/src/lib/constants/chain.ts}" +: "${PUSH_CHAIN_SDK_ACCOUNT_TS_PATH:=packages/core/src/lib/universal/account/account.ts}" : "${PUSH_CHAIN_SDK_CORE_ENV_PATH:=packages/core/.env}" : "${DEPLOY_ADDRESSES_FILE:=$SCRIPT_DIR/deploy_addresses.json}" : "${LOG_DIR:=$SCRIPT_DIR/logs}" @@ -374,15 +375,16 @@ sdk_prepare_test_files_for_localnet() { while IFS= read -r test_file; do [[ -n "$test_file" ]] || continue - perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g' "$test_file" + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g' "$test_file" log_ok "Prepared LOCALNET network replacement in $(basename "$test_file")" done < <(sdk_test_files) } step_setup_push_chain_sdk() { - require_cmd git yarn npm cast jq + require_cmd git yarn npm cast jq perl local chain_constants_file="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_CHAIN_CONSTANTS_PATH" + local sdk_account_file="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_ACCOUNT_TS_PATH" local uea_impl_raw uea_impl synced_localnet_uea clone_or_update_repo "$PUSH_CHAIN_SDK_REPO" "$PUSH_CHAIN_SDK_BRANCH" "$PUSH_CHAIN_SDK_DIR" @@ -440,6 +442,14 @@ step_setup_push_chain_sdk() { log_ok "Synced PUSH_NETWORK.LOCALNET UEA proxy to $uea_impl" + if [[ ! -f "$sdk_account_file" ]]; then + log_err "SDK account file not found: $sdk_account_file" + exit 1 + fi + + perl -0pi -e 's/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g' "$sdk_account_file" + log_ok "Replaced CHAIN.PUSH_TESTNET_DONUT with CHAIN.PUSH_LOCALNET in $sdk_account_file" + log_info "Installing push-chain-sdk dependencies" ( cd "$PUSH_CHAIN_SDK_DIR" From fd59e6e227821c63ef2a6ebf27e12a8efa31aaa3 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Thu, 5 Mar 2026 17:44:03 +0530 Subject: [PATCH 19/61] refactor: important addresses which were changed for e2e testing --- app/upgrades/supply-slash/rebalance.json | 4 +- data/validator1/.pchain/config/app.toml | 341 -------------- data/validator1/.pchain/config/client.toml | 17 - data/validator1/.pchain/config/config.toml | 498 --------------------- e2e-tests/.env.bak | 71 --- local-multi-validator/Dockerfile.unified | 2 +- test/utils/constants.go | 2 +- x/uexecutor/types/constants.go | 2 +- x/uregistry/types/constants.go | 2 +- x/uregistry/types/params.go | 2 +- x/utss/types/params.go | 2 +- x/uvalidator/types/params.go | 2 +- 12 files changed, 9 insertions(+), 936 deletions(-) delete mode 100644 data/validator1/.pchain/config/app.toml delete mode 100644 data/validator1/.pchain/config/client.toml delete mode 100644 data/validator1/.pchain/config/config.toml delete mode 100644 e2e-tests/.env.bak diff --git a/app/upgrades/supply-slash/rebalance.json b/app/upgrades/supply-slash/rebalance.json index 20be3628..1ef536de 100644 --- a/app/upgrades/supply-slash/rebalance.json +++ b/app/upgrades/supply-slash/rebalance.json @@ -1345,7 +1345,7 @@ }, { "address": "push149k2570tyvfdh6ct36furn5yex9k00c3k3ckkj", - "evm_address": "0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4", + "evm_address": "0xa96caa79eb2312dbeb0b8e93c1ce84c98b67bf11", "amount_upc": 100004.0, "rebalanced_tokens": 1304.0 }, @@ -1374,7 +1374,7 @@ "rebalanced_tokens": 1304.0 }, { - "address": "push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20", + "address": "push1negskcfqu09j5zvpk7nhvacnwyy2mafffy7r6a", "evm_address": "0x9e510b6120e3cb2a0981b7a77677137108adf529", "amount_upc": 95600.0, "rebalanced_tokens": 1300.0 diff --git a/data/validator1/.pchain/config/app.toml b/data/validator1/.pchain/config/app.toml deleted file mode 100644 index d025d33c..00000000 --- a/data/validator1/.pchain/config/app.toml +++ /dev/null @@ -1,341 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1,0.0001token2). -minimum-gas-prices = "0stake" - -# The maximum gas a query coming over rest/grpc may consume. -# If this is set to zero, the query can consume an unbounded amount of gas. -query-gas-limit = "0" - -# default: the last 362880 states are kept, pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: 2 latest states will be kept; pruning at 10 block intervals. -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from CometBFT. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning CometBFT blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: CometBFT block pruning is dependant on this parameter in conjunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs CometBFT what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -# IavlCacheSize set the size of the iavl tree cache (in number of nodes). -iavl-cache-size = 781250 - -# IAVLDisableFastNode enables or disables the fast node feature of IAVL. -# Default is false. -iavl-disable-fastnode = false - -# AppDBBackend defines the database backend type to use for the application and snapshots DBs. -# An empty string indicates that a fallback will be used. -# The fallback is the db_backend value set in CometBFT's config.toml. -app-db-backend = "" - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -# MetricsSink defines the type of metrics sink to use. -metrics-sink = "" - -# StatsdAddr defines the address of a statsd server to send metrics to. -# Only utilized if MetricsSink is set to "statsd" or "dogstatsd". -statsd-addr = "" - -# DatadogHostname defines the hostname to use when emitting metrics to -# Datadog. Only utilized if MetricsSink is set to "dogstatsd". -datadog-hostname = "" - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://localhost:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the CometBFT RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the CometBFT RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the CometBFT maximum request body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "localhost:9090" - -# MaxRecvMsgSize defines the max message size in bytes the server can receive. -# The default value is 10MB. -max-recv-msg-size = "10485760" - -# MaxSendMsgSize defines the max message size in bytes the server can send. -# The default value is math.MaxInt32. -max-send-msg-size = "2147483647" - -############################################################################### -### gRPC Web Configuration ### -############################################################################### - -[grpc-web] - -# GRPCWebEnable defines if the gRPC-web should be enabled. -# NOTE: gRPC must also be enabled, otherwise, this configuration is a no-op. -# NOTE: gRPC-Web uses the same address as the API server. -enable = true - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 - -############################################################################### -### State Streaming ### -############################################################################### - -# Streaming allows nodes to stream state to external systems. -[streaming] - -# streaming.abci specifies the configuration for the ABCI Listener streaming service. -[streaming.abci] - -# List of kv store keys to stream out via gRPC. -# The store key names MUST match the module's StoreKey name. -# -# Example: -# ["acc", "bank", "gov", "staking", "mint"[,...]] -# ["*"] to expose all keys. -keys = [] - -# The plugin name used for streaming via gRPC. -# Streaming is only enabled if this is set. -# Supported plugins: abci -plugin = "" - -# stop-node-on-err specifies whether to stop the node on message delivery error. -stop-node-on-err = true - -############################################################################### -### Mempool ### -############################################################################### - -[mempool] -# Setting max-txs to 0 will allow for a unbounded amount of transactions in the mempool. -# Setting max_txs to negative 1 (-1) will disable transactions from being inserted into the mempool (no-op mempool). -# Setting max_txs to a positive number (> 0) will limit the number of transactions in the mempool, by the specified amount. -# -# Note, this configuration only applies to SDK built-in app-side mempool -# implementations. -max-txs = -1 - -[wasm] -# Smart query gas limit is the max gas to be used in a smart query contract call -query_gas_limit = 3000000 - -# in-memory cache for Wasm contracts. Set to 0 to disable. -# The value is in MiB not bytes -memory_cache_size = 100 - -# Simulation gas limit is the max gas to be used in a tx simulation call. -# When not set the consensus max block gas is used instead -# simulation_gas_limit = - -############################################################################### -### EVM Configuration ### -############################################################################### - -[evm] - -# Tracer defines the 'vm.Tracer' type that the EVM will use when the node is run in -# debug mode. To enable tracing use the '--evm.tracer' flag when starting your node. -# Valid types are: json|struct|access_list|markdown -tracer = "" - -# MaxTxGasWanted defines the gas wanted for each eth tx returned in ante handler in check tx mode. -max-tx-gas-wanted = 0 - -############################################################################### -### JSON RPC Configuration ### -############################################################################### - -[json-rpc] - -# Enable defines if the JSONRPC server should be enabled. -enable = false - -# Address defines the EVM RPC HTTP server address to bind to. -address = "127.0.0.1:8545" - -# Address defines the EVM WebSocket server address to bind to. -ws-address = "127.0.0.1:8546" - -# API defines a list of JSON-RPC namespaces that should be enabled -# Example: "eth,txpool,personal,net,debug,web3" -api = "eth,net,web3" - -# GasCap sets a cap on gas that can be used in eth_call/estimateGas (0=infinite). Default: 25,000,000. -gas-cap = 25000000 - -# Allow insecure account unlocking when account-related RPCs are exposed by http -allow-insecure-unlock = true - -# EVMTimeout is the global timeout for eth_call. Default: 5s. -evm-timeout = "5s" - -# TxFeeCap is the global tx-fee cap for send transaction. Default: 1eth. -txfee-cap = 1 - -# FilterCap sets the global cap for total number of filters that can be created -filter-cap = 200 - -# FeeHistoryCap sets the global cap for total number of blocks that can be fetched -feehistory-cap = 100 - -# LogsCap defines the max number of results can be returned from single 'eth_getLogs' query. -logs-cap = 10000 - -# BlockRangeCap defines the max block range allowed for 'eth_getLogs' query. -block-range-cap = 10000 - -# HTTPTimeout is the read/write timeout of http json-rpc server. -http-timeout = "30s" - -# HTTPIdleTimeout is the idle timeout of http json-rpc server. -http-idle-timeout = "2m0s" - -# AllowUnprotectedTxs restricts unprotected (non EIP155 signed) transactions to be submitted via -# the node's RPC when the global parameter is disabled. -allow-unprotected-txs = false - -# MaxOpenConnections sets the maximum number of simultaneous connections -# for the server listener. -max-open-connections = 0 - -# EnableIndexer enables the custom transaction indexer for the EVM (ethereum transactions). -enable-indexer = false - -# MetricsAddress defines the EVM Metrics server address to bind to. Pass --metrics in CLI to enable -# Prometheus metrics path: /debug/metrics/prometheus -metrics-address = "127.0.0.1:6065" - -# Upgrade height for fix of revert gas refund logic when transaction reverted. -fix-revert-gas-refund-height = 0 - -############################################################################### -### TLS Configuration ### -############################################################################### - -[tls] - -# Certificate path defines the cert.pem file path for the TLS configuration. -certificate-path = "" - -# Key path defines the key.pem file path for the TLS configuration. -key-path = "" diff --git a/data/validator1/.pchain/config/client.toml b/data/validator1/.pchain/config/client.toml deleted file mode 100644 index 02581600..00000000 --- a/data/validator1/.pchain/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "localchain_9000-1" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to CometBFT RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async) -broadcast-mode = "sync" diff --git a/data/validator1/.pchain/config/config.toml b/data/validator1/.pchain/config/config.toml deleted file mode 100644 index e0acc58f..00000000 --- a/data/validator1/.pchain/config/config.toml +++ /dev/null @@ -1,498 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.cometbft" by default, but could be changed via $CMTHOME env variable -# or --home cmd flag. - -# The version of the CometBFT binary that created or -# last modified the config file. Do not modify this. -version = "0.38.17" - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the CometBFT binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "Aryas-MacBook-Pro.local" - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for CometBFT to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://127.0.0.1:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# Experimental parameter to specify the maximum number of events a node will -# buffer, per subscription, before returning an error and closing the -# subscription. Must be set to at least 100, but higher values will accommodate -# higher event throughput rates (and will use more memory). -experimental_subscription_buffer_size = 200 - -# Experimental parameter to specify the maximum number of RPC responses that -# can be buffered per WebSocket client. If clients cannot read from the -# WebSocket endpoint fast enough, they will be disconnected, so increasing this -# parameter may reduce the chances of them being disconnected (but will cause -# the node to use more memory). -# -# Must be at least the same as "experimental_subscription_buffer_size", -# otherwise connections could be dropped unnecessarily. This value should -# ideally be somewhat higher than "experimental_subscription_buffer_size" to -# accommodate non-subscription-related RPC responses. -experimental_websocket_write_buffer_size = 200 - -# If a WebSocket client cannot read fast enough, at present we may -# silently drop events instead of generating an error or disconnecting the -# client. -# -# Enabling this experimental parameter will cause the WebSocket connection to -# be closed instead if it cannot read fast enough, allowing for greater -# predictability in subscription behavior. -experimental_close_on_slow_client = false - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum number of requests that can be sent in a batch -# If the value is set to '0' (zero-value), then no maximum batch size will be -# enforced for a JSON-RPC batch request. -max_request_batch_size = 10 - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to CometBFT's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to CometBFT's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for CometBFT to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial. If empty, will use the same -# port as the laddr, and will introspect on the listener to figure out the -# address. IP and port are required. Example: 159.89.10.97:26656 -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -# The type of mempool for this node to use. -# -# Possible types: -# - "flood" : concurrent linked list mempool with flooding gossip protocol -# (default) -# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible -# for storing, disseminating and proposing txs). "create_empty_blocks=false" is -# not supported. -type = "flood" - -# Recheck (default: true) defines whether CometBFT should recheck the -# validity for all remaining transaction in the mempool after a block. -# Since a block affects the application state, some transactions in the -# mempool may become invalid. If this does not apply to your application, -# you can disable rechecking. -recheck = true - -# recheck_timeout is the time the application has during the rechecking process -# to return CheckTx responses, once all requests have been sent. Responses that -# arrive after the timeout expires are discarded. It only applies to -# non-local ABCI clients and when recheck is enabled. -# -# The ideal value will strongly depend on the application. It could roughly be estimated as the -# average size of the mempool multiplied by the average time it takes the application to validate one -# transaction. We consider that the ABCI application runs in the same location as the CometBFT binary -# so that the recheck duration is not affected by network delays when making requests and receiving responses. -recheck_timeout = "1s" - -# Broadcast (default: true) defines whether the mempool should relay -# transactions to other peers. Setting this to false will stop the mempool -# from relaying transactions to other peers until they are included in a -# block. In other words, if Broadcast is disabled, only the peer you send -# the tx to will see it until it is included in a block. -broadcast = true - -# WalPath (default: "") configures the location of the Write Ahead Log -# (WAL) for the mempool. The WAL is disabled by default. To enable, set -# WalPath to where you want the WAL to be written (e.g. -# "data/mempool.wal"). -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -# Experimental parameters to limit gossiping txs to up to the specified number of peers. -# We use two independent upper values for persistent and non-persistent peers. -# Unconditional peers are not affected by this feature. -# If we are connected to more than the specified number of persistent peers, only send txs to -# ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those -# persistent peers disconnects, activate another persistent peer. -# Similarly for non-persistent peers, with an upper limit of -# ExperimentalMaxGossipConnectionsToNonPersistentPeers. -# If set to 0, the feature is disabled for the corresponding group of peers, that is, the -# number of active connections to that group of peers is not bounded. -# For non-persistent peers, if enabled, a value of 10 is recommended based on experimental -# performance results using the default P2P configuration. -experimental_max_gossip_connections_to_persistent_peers = 0 -experimental_max_gossip_connections_to_non_persistent_peers = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Block Sync Configuration Options ### -####################################################### -[blocksync] - -# Block Sync version to use: -# -# In v0.37, v1 and v2 of the block sync protocols were deprecated. -# Please use v0 instead. -# -# 1) "v0" - the default block sync implementation -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "3s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "5s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Storage Configuration Options ### -####################################################### -[storage] - -# Set to true to discard ABCI responses from the state store, which can save a -# considerable amount of disk space. Set to false to ensure ABCI responses are -# persisted. ABCI responses are required for /block_results RPC queries, and to -# reindex events in the command-line tool. -discard_abci_responses = false - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -# 3) "psql" - the indexer services backed by PostgreSQL. -# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -# The PostgreSQL connection configuration, the connection format: -# postgresql://:@:/? -psql-conn = "" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "cometbft" diff --git a/e2e-tests/.env.bak b/e2e-tests/.env.bak deleted file mode 100644 index fec3b693..00000000 --- a/e2e-tests/.env.bak +++ /dev/null @@ -1,71 +0,0 @@ -# Copy this file to e2e-tests/.env and adjust values. - -# Path to push-chain workspace root. -# Keep this empty to use auto-detection (parent of e2e-tests). -# PUSH_CHAIN_DIR= - -# Local Push RPC -PUSH_RPC_URL=http://localhost:8545 - -# Local chain info -CHAIN_ID=localchain_9000-1 -KEYRING_BACKEND=test - -# Genesis key recovery/funding -GENESIS_KEY_NAME=genesis-acc-1 -GENESIS_KEY_HOME=./e2e-tests/.pchain -# Optional local fallback file. If missing, setup.sh reads accounts from docker core-validator-1 (/tmp/push-accounts/genesis_accounts.json) -GENESIS_ACCOUNTS_JSON=./e2e-tests/genesis_accounts.json - -# Optional: set to skip interactive mnemonic prompt -# GENESIS_MNEMONIC="word1 word2 ..." - -# Address to fund from genesis account -FUND_TO_ADDRESS=push1w7xnyp3hf79vyetj3cvw8l32u6unun8yr6zn60 -FUND_AMOUNT=1000000000000000000upc -POOL_CREATION_TOPUP_AMOUNT=50000000000000000000upc -GAS_PRICES=100000000000upc - -# EVM private key used by forge/hardhat scripts -PRIVATE_KEY=0x0dfb3d814afd8d0bf7a6010e8dd2b6ac835cabe4da9e2c1e80c6a14df3994dd4 - -# External repositories -CORE_CONTRACTS_REPO=https://github.com/pushchain/push-chain-core-contracts.git -CORE_CONTRACTS_BRANCH=node-e2e - -SWAP_AMM_REPO=https://github.com/pushchain/push-chain-swap-internal-amm-contracts.git -SWAP_AMM_BRANCH=e2e-push-node - -GATEWAY_REPO=https://github.com/pushchain/push-chain-gateway-contracts.git -GATEWAY_BRANCH=e2e-push-node - -PUSH_CHAIN_SDK_REPO=https://github.com/pushchain/push-chain-sdk.git -PUSH_CHAIN_SDK_BRANCH=new-sendUniversalTx - -# push-chain-sdk core .env target path (relative to PUSH_CHAIN_SDK_DIR) -PUSH_CHAIN_SDK_CORE_ENV_PATH=packages/core/.env - -# Local clone layout (outside push-chain directory) -E2E_PARENT_DIR=../ -CORE_CONTRACTS_DIR=../push-chain-core-contracts -SWAP_AMM_DIR=../push-chain-swap-internal-amm-contracts -GATEWAY_DIR=../push-chain-gateway-contracts -PUSH_CHAIN_SDK_DIR=../push-chain-sdk -PUSH_CHAIN_SDK_E2E_DIR=packages/core/__e2e__ - -# push-chain-sdk required env vars (mirrored into PUSH_CHAIN_SDK_DIR/packages/core/.env by setup-sdk) -# Defaults used by setup-sdk when omitted: -# EVM_PRIVATE_KEY <= PRIVATE_KEY -# EVM_RPC <= PUSH_RPC_URL -# PUSH_PRIVATE_KEY<= PRIVATE_KEY -EVM_PRIVATE_KEY=0xddc31465c891443c6e3e5f30ca7808d66f8036163fd0b99f4ed5e05cd261ac7f -EVM_RPC=https://eth-sepolia.public.blastapi.io -SOLANA_RPC_URL=https://api.devnet.solana.com -SOLANA_PRIVATE_KEY=2NPV9cXdRuC8Zydqa3beEJ8xK9N6qgKqXYVYQ2PtpJhNvWrhuPJAZH67NFTW5Q1JsMjgPEsTY3ph2Sd1nGqqcNFv -PUSH_PRIVATE_KEY=0x0dfb3d814afd8d0bf7a6010e8dd2b6ac835cabe4da9e2c1e80c6a14df3994dd4 - -# Tracking files -DEPLOY_ADDRESSES_FILE=./e2e-tests/deploy_addresses.json -TEST_ADDRESSES_PATH=../push-chain-swap-internal-amm-contracts/test-addresses.json -TOKEN_CONFIG_PATH=./config/testnet-donut/tokens/eth_sepolia_eth.json -CHAIN_CONFIG_PATH=./config/testnet-donut/chains/eth_sepolia_chain_config.json diff --git a/local-multi-validator/Dockerfile.unified b/local-multi-validator/Dockerfile.unified index 0ff6edc8..070594ad 100644 --- a/local-multi-validator/Dockerfile.unified +++ b/local-multi-validator/Dockerfile.unified @@ -23,7 +23,7 @@ RUN --mount=type=cache,target=/go/pkg/mod,id=go-mod \ go mod tidy # Replace admin addresses for local-multi-validator setup -RUN sed -i 's/push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20/push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20/g' \ +RUN sed -i 's/push1negskcfqu09j5zvpk7nhvacnwyy2mafffy7r6a/push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20/g' \ ./x/uregistry/types/params.go \ ./x/utss/types/params.go \ ./x/uvalidator/types/params.go diff --git a/test/utils/constants.go b/test/utils/constants.go index 9123f790..d8ae6d7e 100644 --- a/test/utils/constants.go +++ b/test/utils/constants.go @@ -44,7 +44,7 @@ func GetDefaultAddresses() Addresses { ExternalUSDCAddr: common.HexToAddress("0x0000000000000000000000000000000000000e07"), UniversalGatewayPCAddr: common.HexToAddress("0x00000000000000000000000000000000000000C1"), MigratedUEAAddr: common.HexToAddress("0x0000000000000000000000000000000000000d08"), - DefaultTestAddr: "0x778d3206374f8ac265728e18e3fe2ae6b93e4ce4", + DefaultTestAddr: "0xa96caa79eb2312dbeb0b8e93c1ce84c98b67bf11", CosmosTestAddr: "cosmos18pjnzwr9xdnx2vnpv5mxywfnv56xxef5cludl5", TargetAddr: "\x86i\xbe\xd1!\xfe\xfa=\x9c\xf2\x82\x12s\xf4\x89\xe7\x17̩]", TargetAddr2: "0x527F3692F5C53CfA83F7689885995606F93b6164", diff --git a/x/uexecutor/types/constants.go b/x/uexecutor/types/constants.go index a39b1a50..37ffc74f 100644 --- a/x/uexecutor/types/constants.go +++ b/x/uexecutor/types/constants.go @@ -7,7 +7,7 @@ import ( const ( FACTORY_PROXY_ADDRESS_HEX = "0x00000000000000000000000000000000000000eA" - PROXY_ADMIN_OWNER_ADDRESS_HEX = "0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4" + PROXY_ADMIN_OWNER_ADDRESS_HEX = "0xa96caa79eb2312dbeb0b8e93c1ce84c98b67bf11" FACTORY_IMPL_ADDRESS_HEX = "0x00000000000000000000000000000000000000fa" PROXY_ADMIN_ADDRESS_HEX = "0x00000000000000000000000000000000000000AA" ) diff --git a/x/uregistry/types/constants.go b/x/uregistry/types/constants.go index 88e6d1b9..53c06eb5 100644 --- a/x/uregistry/types/constants.go +++ b/x/uregistry/types/constants.go @@ -14,7 +14,7 @@ var GATEWAY_METHOD = struct { EVM: struct{ AddFunds string }{AddFunds: "addFunds"}, } -const PROXY_ADMIN_OWNER_ADDRESS_HEX = "0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4" +const PROXY_ADMIN_OWNER_ADDRESS_HEX = "0xa96caa79eb2312dbeb0b8e93c1ce84c98b67bf11" var ProxyAdminRuntimeBytecode = common.FromHex("0x608060405260043610610058575f3560e01c80639623609d116100415780639623609d146100aa578063ad3cb1cc146100bd578063f2fde38b14610112575f80fd5b8063715018a61461005c5780638da5cb5b14610072575b5f80fd5b348015610067575f80fd5b50610070610131565b005b34801561007d575f80fd5b505f5460405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6100706100b8366004610351565b610144565b3480156100c8575f80fd5b506101056040518060400160405280600581526020017f352e302e3000000000000000000000000000000000000000000000000000000081525081565b6040516100a191906104c6565b34801561011d575f80fd5b5061007061012c3660046104df565b6101d5565b61013961023d565b6101425f61028f565b565b61014c61023d565b6040517f4f1ef28600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff841690634f1ef2869034906101a290869086906004016104fa565b5f604051808303818588803b1580156101b9575f80fd5b505af11580156101cb573d5f803e3d5ffd5b5050505050505050565b6101dd61023d565b73ffffffffffffffffffffffffffffffffffffffff8116610231576040517f1e4fbdf70000000000000000000000000000000000000000000000000000000081525f60048201526024015b60405180910390fd5b61023a8161028f565b50565b5f5473ffffffffffffffffffffffffffffffffffffffff163314610142576040517f118cdaa7000000000000000000000000000000000000000000000000000000008152336004820152602401610228565b5f805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b73ffffffffffffffffffffffffffffffffffffffff8116811461023a575f80fd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f805f60608486031215610363575f80fd5b833561036e81610303565b9250602084013561037e81610303565b9150604084013567ffffffffffffffff811115610399575f80fd5b8401601f810186136103a9575f80fd5b803567ffffffffffffffff8111156103c3576103c3610324565b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8501160116810181811067ffffffffffffffff8211171561042f5761042f610324565b604052818152828201602001881015610446575f80fd5b816020840160208301375f602083830101528093505050509250925092565b5f81518084525f5b818110156104895760208185018101518683018201520161046d565b505f6020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b602081525f6104d86020830184610465565b9392505050565b5f602082840312156104ef575f80fd5b81356104d881610303565b73ffffffffffffffffffffffffffffffffffffffff83168152604060208201525f6105286040830184610465565b94935050505056fea26469706673582212209ce80139bf41b00bc44c4532122ff649c1e8542240b8a5c13f39af0d72f21b2364736f6c634300081a0033") var PROXY_ADMIN_SLOT = common.HexToHash("0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103") diff --git a/x/uregistry/types/params.go b/x/uregistry/types/params.go index 5400d4f2..548788eb 100755 --- a/x/uregistry/types/params.go +++ b/x/uregistry/types/params.go @@ -8,7 +8,7 @@ import ( func DefaultParams() Params { // TODO: return Params{ - Admin: "push1w7xnyp3hf79vyetj3cvw8l32u6unun8yr6zn60", + Admin: "push1negskcfqu09j5zvpk7nhvacnwyy2mafffy7r6a", } } diff --git a/x/utss/types/params.go b/x/utss/types/params.go index b5def128..b137d735 100755 --- a/x/utss/types/params.go +++ b/x/utss/types/params.go @@ -7,7 +7,7 @@ import ( // DefaultParams returns default module parameters. func DefaultParams() Params { return Params{ - Admin: "push1w7xnyp3hf79vyetj3cvw8l32u6unun8yr6zn60", + Admin: "push1negskcfqu09j5zvpk7nhvacnwyy2mafffy7r6a", } } diff --git a/x/uvalidator/types/params.go b/x/uvalidator/types/params.go index b5def128..b137d735 100755 --- a/x/uvalidator/types/params.go +++ b/x/uvalidator/types/params.go @@ -7,7 +7,7 @@ import ( // DefaultParams returns default module parameters. func DefaultParams() Params { return Params{ - Admin: "push1w7xnyp3hf79vyetj3cvw8l32u6unun8yr6zn60", + Admin: "push1negskcfqu09j5zvpk7nhvacnwyy2mafffy7r6a", } } From 782bf3b5f32fcc4e49d30f35c7f6dfdcae7e2d7d Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Thu, 5 Mar 2026 17:51:11 +0530 Subject: [PATCH 20/61] refactor: minor clean ups --- config/testnet-donut/solana_devnet/tokens/sol.json | 2 +- e2e-tests/.gitignore | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/config/testnet-donut/solana_devnet/tokens/sol.json b/config/testnet-donut/solana_devnet/tokens/sol.json index 68c8acfe..05f65f7f 100644 --- a/config/testnet-donut/solana_devnet/tokens/sol.json +++ b/config/testnet-donut/solana_devnet/tokens/sol.json @@ -9,6 +9,6 @@ "token_type": 4, "native_representation": { "denom": "", - "contract_address": "0xB2cf4B3aec93F4A8F92b292d2F605591dB3e3011" + "contract_address": "0x5D525Df2bD99a6e7ec58b76aF2fd95F39874EBed" } } diff --git a/e2e-tests/.gitignore b/e2e-tests/.gitignore index ef853ca1..4ea7f858 100644 --- a/e2e-tests/.gitignore +++ b/e2e-tests/.gitignore @@ -1,3 +1,2 @@ .env -logs/ -repos/ +logs/ \ No newline at end of file From e2f4f6ccf5c207ca51044600e92bc3af3b99b8a6 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Thu, 5 Mar 2026 18:46:21 +0530 Subject: [PATCH 21/61] fix: Push localnet allocation in sdk --- e2e-tests/setup.sh | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index e70c1cb6..7541d63c 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -447,8 +447,14 @@ step_setup_push_chain_sdk() { exit 1 fi - perl -0pi -e 's/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g' "$sdk_account_file" - log_ok "Replaced CHAIN.PUSH_TESTNET_DONUT with CHAIN.PUSH_LOCALNET in $sdk_account_file" + perl -0pi -e ' + s{(function\s+convertExecutorToOriginAccount\b.*?\{)(.*?)(\n\})}{ + my ($head, $body, $tail) = ($1, $2, $3); + $body =~ s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g; + "$head$body$tail"; + }gse; + ' "$sdk_account_file" + log_ok "Replaced CHAIN.PUSH_TESTNET_DONUT with CHAIN.PUSH_LOCALNET only in convertExecutorToOriginAccount() in $sdk_account_file" log_info "Installing push-chain-sdk dependencies" ( From ae14e680c7c779c0a03ffe6cc785d6f03137e20c Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Thu, 5 Mar 2026 18:47:08 +0530 Subject: [PATCH 22/61] refactor: minor cleaning --- e2e-tests/deploy_addresses.json | 55 --------------------------------- 1 file changed, 55 deletions(-) diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index 4a38b519..e69de29b 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -1,55 +0,0 @@ -{ - "generatedAt": "2026-03-05T07:13:22Z", - "contracts": { - "WPC": "0x057931Df99f61caB5e5DbDb6224D7003E64F659e", - "Factory": "0xe0b7A8833f77C5728295D489F4B64f9DA236E4C8", - "SwapRouter": "0x7fd62fe2Aba9af8bF4d08a6cce49beA8c8Ca6d97", - "QuoterV2": "0x484aC6ED747090fe8C82c5F10427ccC2F2998930", - "PositionManager": "0x95cE5e63366D3A11E9BCCe71917bB37C23Fd0002", - "UEA_PROXY_IMPLEMENTATION": "0x00cb38A885cf8D0B2dDfd19Bd1c04aAAC44C5a86" - }, - "tokens": [ - { - "name": "pETH.eth", - "symbol": "pETH", - "address": "0x69c5560bB765a935C345f507D2adD34253FBe41b", - "source": "core-contracts", - "decimals": 18 - }, - { - "name": "USDT.eth", - "symbol": "USDT.eth", - "address": "0xc2055dD3A7Ad875520BdB5c91300F964F7038C73", - "source": "core-contracts", - "decimals": 6 - }, - { - "name": "pETH.base", - "symbol": "pETH.base", - "address": "0x697164dD5f2727a4d6EfcF977dCc080Ff10c7459", - "source": "core-contracts", - "decimals": 18 - }, - { - "name": "pETH.arb", - "symbol": "pETH.arb", - "address": "0x90bFeD13b1D7db6243Dfb554c336b0254F099596", - "source": "core-contracts", - "decimals": 18 - }, - { - "name": "pBNB", - "symbol": "pBNB", - "address": "0xD19a6d5ed3BBb15B70843152610705ba25fF6df2", - "source": "core-contracts", - "decimals": 18 - }, - { - "name": "pSOL", - "symbol": "pSOL", - "address": "0xB2cf4B3aec93F4A8F92b292d2F605591dB3e3011", - "source": "core-contracts", - "decimals": 9 - } - ] -} From 132b5f96aa6b0a14299221b15d873b188809ccc0 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Thu, 5 Mar 2026 18:56:25 +0530 Subject: [PATCH 23/61] fix: some more address adjustments --- test/utils/constants.go | 2 +- x/uexecutor/types/constants.go | 2 +- x/uregistry/types/constants.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/utils/constants.go b/test/utils/constants.go index d8ae6d7e..9123f790 100644 --- a/test/utils/constants.go +++ b/test/utils/constants.go @@ -44,7 +44,7 @@ func GetDefaultAddresses() Addresses { ExternalUSDCAddr: common.HexToAddress("0x0000000000000000000000000000000000000e07"), UniversalGatewayPCAddr: common.HexToAddress("0x00000000000000000000000000000000000000C1"), MigratedUEAAddr: common.HexToAddress("0x0000000000000000000000000000000000000d08"), - DefaultTestAddr: "0xa96caa79eb2312dbeb0b8e93c1ce84c98b67bf11", + DefaultTestAddr: "0x778d3206374f8ac265728e18e3fe2ae6b93e4ce4", CosmosTestAddr: "cosmos18pjnzwr9xdnx2vnpv5mxywfnv56xxef5cludl5", TargetAddr: "\x86i\xbe\xd1!\xfe\xfa=\x9c\xf2\x82\x12s\xf4\x89\xe7\x17̩]", TargetAddr2: "0x527F3692F5C53CfA83F7689885995606F93b6164", diff --git a/x/uexecutor/types/constants.go b/x/uexecutor/types/constants.go index 37ffc74f..35d94535 100644 --- a/x/uexecutor/types/constants.go +++ b/x/uexecutor/types/constants.go @@ -7,7 +7,7 @@ import ( const ( FACTORY_PROXY_ADDRESS_HEX = "0x00000000000000000000000000000000000000eA" - PROXY_ADMIN_OWNER_ADDRESS_HEX = "0xa96caa79eb2312dbeb0b8e93c1ce84c98b67bf11" + PROXY_ADMIN_OWNER_ADDRESS_HEX = "0xa96CaA79eb2312DbEb0B8E93c1Ce84C98b67bF11" FACTORY_IMPL_ADDRESS_HEX = "0x00000000000000000000000000000000000000fa" PROXY_ADMIN_ADDRESS_HEX = "0x00000000000000000000000000000000000000AA" ) diff --git a/x/uregistry/types/constants.go b/x/uregistry/types/constants.go index 53c06eb5..55f706fe 100644 --- a/x/uregistry/types/constants.go +++ b/x/uregistry/types/constants.go @@ -14,7 +14,7 @@ var GATEWAY_METHOD = struct { EVM: struct{ AddFunds string }{AddFunds: "addFunds"}, } -const PROXY_ADMIN_OWNER_ADDRESS_HEX = "0xa96caa79eb2312dbeb0b8e93c1ce84c98b67bf11" +const PROXY_ADMIN_OWNER_ADDRESS_HEX = "0xa96CaA79eb2312DbEb0B8E93c1Ce84C98b67bF11" var ProxyAdminRuntimeBytecode = common.FromHex("0x608060405260043610610058575f3560e01c80639623609d116100415780639623609d146100aa578063ad3cb1cc146100bd578063f2fde38b14610112575f80fd5b8063715018a61461005c5780638da5cb5b14610072575b5f80fd5b348015610067575f80fd5b50610070610131565b005b34801561007d575f80fd5b505f5460405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6100706100b8366004610351565b610144565b3480156100c8575f80fd5b506101056040518060400160405280600581526020017f352e302e3000000000000000000000000000000000000000000000000000000081525081565b6040516100a191906104c6565b34801561011d575f80fd5b5061007061012c3660046104df565b6101d5565b61013961023d565b6101425f61028f565b565b61014c61023d565b6040517f4f1ef28600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff841690634f1ef2869034906101a290869086906004016104fa565b5f604051808303818588803b1580156101b9575f80fd5b505af11580156101cb573d5f803e3d5ffd5b5050505050505050565b6101dd61023d565b73ffffffffffffffffffffffffffffffffffffffff8116610231576040517f1e4fbdf70000000000000000000000000000000000000000000000000000000081525f60048201526024015b60405180910390fd5b61023a8161028f565b50565b5f5473ffffffffffffffffffffffffffffffffffffffff163314610142576040517f118cdaa7000000000000000000000000000000000000000000000000000000008152336004820152602401610228565b5f805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b73ffffffffffffffffffffffffffffffffffffffff8116811461023a575f80fd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f805f60608486031215610363575f80fd5b833561036e81610303565b9250602084013561037e81610303565b9150604084013567ffffffffffffffff811115610399575f80fd5b8401601f810186136103a9575f80fd5b803567ffffffffffffffff8111156103c3576103c3610324565b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8501160116810181811067ffffffffffffffff8211171561042f5761042f610324565b604052818152828201602001881015610446575f80fd5b816020840160208301375f602083830101528093505050509250925092565b5f81518084525f5b818110156104895760208185018101518683018201520161046d565b505f6020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b602081525f6104d86020830184610465565b9392505050565b5f602082840312156104ef575f80fd5b81356104d881610303565b73ffffffffffffffffffffffffffffffffffffffff83168152604060208201525f6105286040830184610465565b94935050505056fea26469706673582212209ce80139bf41b00bc44c4532122ff649c1e8542240b8a5c13f39af0d72f21b2364736f6c634300081a0033") var PROXY_ADMIN_SLOT = common.HexToHash("0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103") From 9838c8e08c6deb2392072566d79573ab4174cc72 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Fri, 20 Mar 2026 21:35:15 +0530 Subject: [PATCH 24/61] local external chain environment setup command added to e2e-tests setup --- e2e-tests/setup.sh | 156 ++++++++++++++++++ local-multi-validator/devnet | 53 +++++- local-multi-validator/docker-compose.yml | 20 +++ .../scripts/setup-universal.sh | 49 ++++++ 4 files changed, 274 insertions(+), 4 deletions(-) diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 7541d63c..a02008ce 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -16,6 +16,7 @@ fi : "${PUSH_CHAIN_DIR:=$PUSH_CHAIN_DIR_DEFAULT}" : "${PUSH_RPC_URL:=http://localhost:8545}" : "${CHAIN_ID:=localchain_9000-1}" +: "${TESTING_ENV:=}" : "${KEYRING_BACKEND:=test}" : "${GENESIS_KEY_NAME:=genesis-acc-1}" : "${GENESIS_KEY_HOME:=./e2e-tests/.pchain}" @@ -89,6 +90,23 @@ log_ok() { printf "%b\n" "${green}✓${nc} $*"; } log_warn() { printf "%b\n" "${yellow}!${nc} $*"; } log_err() { printf "%b\n" "${red}x${nc} $*"; } +ensure_testing_env_var_in_env_file() { + mkdir -p "$(dirname "$ENV_FILE")" + + if [[ ! -f "$ENV_FILE" ]]; then + printf "TESTING_ENV=\n" >"$ENV_FILE" + return + fi + + if ! grep -Eq '^TESTING_ENV=' "$ENV_FILE"; then + printf "\nTESTING_ENV=\n" >>"$ENV_FILE" + fi +} + +is_local_testing_env() { + [[ "${TESTING_ENV:-}" == "LOCAL" ]] +} + get_genesis_accounts_json() { if [[ -f "$GENESIS_ACCOUNTS_JSON" ]]; then cat "$GENESIS_ACCOUNTS_JSON" @@ -523,6 +541,123 @@ step_devnet() { log_ok "Devnet is up" } +step_setup_environment() { + if ! is_local_testing_env; then + log_info "TESTING_ENV is not LOCAL, skipping setup-environment" + return 0 + fi + + require_cmd anvil cast docker jq + + local sepolia_host_rpc="${ANVIL_SEPOLIA_HOST_RPC_URL:-http://localhost:9545}" + local arbitrum_host_rpc="${ANVIL_ARBITRUM_HOST_RPC_URL:-http://localhost:9546}" + local base_host_rpc="${ANVIL_BASE_HOST_RPC_URL:-http://localhost:9547}" + local bsc_host_rpc="${ANVIL_BSC_HOST_RPC_URL:-http://localhost:9548}" + + local uv_sepolia_rpc_url="${LOCAL_SEPOLIA_UV_RPC_URL:-http://host.docker.internal:9545}" + local uv_arbitrum_rpc_url="${LOCAL_ARBITRUM_UV_RPC_URL:-http://host.docker.internal:9546}" + local uv_base_rpc_url="${LOCAL_BASE_UV_RPC_URL:-http://host.docker.internal:9547}" + local uv_bsc_rpc_url="${LOCAL_BSC_UV_RPC_URL:-http://host.docker.internal:9548}" + + start_anvil_fork() { + local label="$1" + local port="$2" + local chain_id="$3" + local fork_url="$4" + + if pgrep -f "anvil --port $port --chain-id $chain_id" >/dev/null 2>&1; then + log_ok "Anvil $label already running on port $port" + return 0 + fi + + log_info "Starting anvil $label on port $port" + nohup anvil --port "$port" --chain-id "$chain_id" --fork-url "$fork_url" --block-time 1 \ + >"$LOG_DIR/anvil_${label}.log" 2>&1 & + } + + wait_for_block_number() { + local label="$1" + local rpc_url="$2" + local latest="" + local i + for i in {1..30}; do + latest="$(cast block-number --rpc-url "$rpc_url" 2>/dev/null || true)" + latest="$(echo "$latest" | tr -d '[:space:]')" + if [[ "$latest" =~ ^[0-9]+$ ]]; then + printf "%s" "$latest" + return 0 + fi + sleep 1 + done + + log_err "Could not read latest block number from $label anvil at $rpc_url" + return 1 + } + + start_anvil_fork "sepolia" "9545" "11155111" "https://ethereum-sepolia-rpc.publicnode.com" + start_anvil_fork "arbitrum" "9546" "421614" "https://arbitrum-sepolia.gateway.tenderly.co" + start_anvil_fork "base" "9547" "84532" "https://sepolia.base.org" + start_anvil_fork "bsc" "9548" "97" "https://bsc-testnet-rpc.publicnode.com" + + local sepolia_latest_block arbitrum_latest_block base_latest_block bsc_latest_block + sepolia_latest_block="$(wait_for_block_number "sepolia" "$sepolia_host_rpc")" + arbitrum_latest_block="$(wait_for_block_number "arbitrum" "$arbitrum_host_rpc")" + base_latest_block="$(wait_for_block_number "base" "$base_host_rpc")" + bsc_latest_block="$(wait_for_block_number "bsc" "$bsc_host_rpc")" + + local patched_count=0 + local config_path="/root/.puniversal/config/pushuv_config.json" + local uv_container + for uv_container in universal-validator-1 universal-validator-2 universal-validator-3 universal-validator-4; do + if ! docker ps --format '{{.Names}}' | grep -qx "$uv_container"; then + continue + fi + + local tmp_in tmp_out + tmp_in="$(mktemp)" + tmp_out="$(mktemp)" + + if ! docker exec "$uv_container" cat "$config_path" >"$tmp_in"; then + rm -f "$tmp_in" "$tmp_out" + log_warn "Failed to read $config_path from $uv_container" + continue + fi + + jq \ + --arg sepolia_rpc "$uv_sepolia_rpc_url" \ + --arg arbitrum_rpc "$uv_arbitrum_rpc_url" \ + --arg base_rpc "$uv_base_rpc_url" \ + --arg bsc_rpc "$uv_bsc_rpc_url" \ + --argjson sepolia_start "$sepolia_latest_block" \ + --argjson arbitrum_start "$arbitrum_latest_block" \ + --argjson base_start "$base_latest_block" \ + --argjson bsc_start "$bsc_latest_block" \ + ' + .chain_configs["eip155:11155111"].rpc_urls = [$sepolia_rpc] + | .chain_configs["eip155:11155111"].event_start_from = $sepolia_start + | .chain_configs["eip155:421614"].rpc_urls = [$arbitrum_rpc] + | .chain_configs["eip155:421614"].event_start_from = $arbitrum_start + | .chain_configs["eip155:84532"].rpc_urls = [$base_rpc] + | .chain_configs["eip155:84532"].event_start_from = $base_start + | .chain_configs["eip155:97"].rpc_urls = [$bsc_rpc] + | .chain_configs["eip155:97"].event_start_from = $bsc_start + ' "$tmp_in" >"$tmp_out" + + docker cp "$tmp_out" "$uv_container":"$config_path" + rm -f "$tmp_in" "$tmp_out" + + patched_count=$((patched_count + 1)) + log_ok "Updated $uv_container config for Sepolia/Arbitrum/Base/BSC local forks" + done + + if [[ "$patched_count" -eq 0 ]]; then + log_warn "No universal-validator containers are running yet; skipped pushuv_config.json patch" + return 0 + fi + + log_ok "Patched $patched_count universal validator config(s) with local fork RPC/event_start_from" +} + step_stop_running_nodes() { log_info "Stopping running local nodes/validators" @@ -1419,11 +1554,17 @@ step_configure_universal_core() { } cmd_all() { + if is_local_testing_env; then + step_setup_environment + fi (cd "$PUSH_CHAIN_DIR" && make replace-addresses) (cd "$PUSH_CHAIN_DIR" && make build) step_update_env_fund_to_address step_stop_running_nodes step_devnet + if is_local_testing_env; then + step_setup_environment + fi step_recover_genesis_key step_fund_account step_setup_core_contracts @@ -1443,6 +1584,7 @@ cmd_show_help() { Usage: $(basename "$0") Commands: + setup-environment For TESTING_ENV=LOCAL: start anvil + patch universal-validator Sepolia chain config devnet Build/start local-multi-validator devnet + uvalidators print-genesis Print first genesis account + mnemonic recover-genesis-key Recover genesis key into local keyring @@ -1473,12 +1615,26 @@ Commands: Primary files: Env: $ENV_FILE Address: $DEPLOY_ADDRESSES_FILE + +Important env: + TESTING_ENV=LOCAL Enables local anvil setup and config rewrites for Sepolia chain in setup-environment/all + ANVIL_SEPOLIA_HOST_RPC_URL=http://localhost:9545 + ANVIL_ARBITRUM_HOST_RPC_URL=http://localhost:9546 + ANVIL_BASE_HOST_RPC_URL=http://localhost:9547 + ANVIL_BSC_HOST_RPC_URL=http://localhost:9548 + LOCAL_SEPOLIA_UV_RPC_URL=http://host.docker.internal:9545 + LOCAL_ARBITRUM_UV_RPC_URL=http://host.docker.internal:9546 + LOCAL_BASE_UV_RPC_URL=http://host.docker.internal:9547 + LOCAL_BSC_UV_RPC_URL=http://host.docker.internal:9548 EOF } main() { + ensure_testing_env_var_in_env_file + local cmd="${1:-help}" case "$cmd" in + setup-environment) step_setup_environment ;; devnet) step_devnet ;; print-genesis) step_print_genesis ;; recover-genesis-key) step_recover_genesis_key ;; diff --git a/local-multi-validator/devnet b/local-multi-validator/devnet index e8ff9e40..f10bdf7c 100755 --- a/local-multi-validator/devnet +++ b/local-multi-validator/devnet @@ -26,6 +26,14 @@ ARBITRUM_SEPOLIA_DEFAULT_RPC_URL="${ARBITRUM_SEPOLIA_DEFAULT_RPC_URL:-https://ar BASE_SEPOLIA_DEFAULT_RPC_URL="${BASE_SEPOLIA_DEFAULT_RPC_URL:-https://sepolia.base.org}" BSC_TESTNET_DEFAULT_RPC_URL="${BSC_TESTNET_DEFAULT_RPC_URL:-https://bsc-testnet-rpc.publicnode.com}" SOLANA_DEVNET_DEFAULT_RPC_URL="${SOLANA_DEVNET_DEFAULT_RPC_URL:-https://api.devnet.solana.com}" +LOCAL_SEPOLIA_SOURCE_RPC_URL="${LOCAL_SEPOLIA_SOURCE_RPC_URL:-http://localhost:9545}" +LOCAL_SEPOLIA_UV_RPC_URL="${LOCAL_SEPOLIA_UV_RPC_URL:-http://host.docker.internal:9545}" +LOCAL_ARBITRUM_SOURCE_RPC_URL="${LOCAL_ARBITRUM_SOURCE_RPC_URL:-http://localhost:9546}" +LOCAL_ARBITRUM_UV_RPC_URL="${LOCAL_ARBITRUM_UV_RPC_URL:-http://host.docker.internal:9546}" +LOCAL_BASE_SOURCE_RPC_URL="${LOCAL_BASE_SOURCE_RPC_URL:-http://localhost:9547}" +LOCAL_BASE_UV_RPC_URL="${LOCAL_BASE_UV_RPC_URL:-http://host.docker.internal:9547}" +LOCAL_BSC_SOURCE_RPC_URL="${LOCAL_BSC_SOURCE_RPC_URL:-http://localhost:9548}" +LOCAL_BSC_UV_RPC_URL="${LOCAL_BSC_UV_RPC_URL:-http://host.docker.internal:9548}" # ═══════════════════════════════════════════════════════════════════════════════ # COLORS @@ -465,7 +473,15 @@ cmd_up() { local solana_start_height="${SOLANA_EVENT_START_FROM:-}" if [ -z "$sepolia_start_height" ]; then - if sepolia_start_height=$(fetch_evm_height "$SEPOLIA_DEFAULT_RPC_URL"); then + if [ "${TESTING_ENV:-}" = "LOCAL" ]; then + if sepolia_start_height=$(fetch_evm_height "$LOCAL_SEPOLIA_SOURCE_RPC_URL"); then + print_status "Using LOCAL Sepolia event_start_from from $LOCAL_SEPOLIA_SOURCE_RPC_URL: $sepolia_start_height" + else + print_warning "Could not fetch LOCAL Sepolia latest block from $LOCAL_SEPOLIA_SOURCE_RPC_URL" + print_warning "Sepolia will use default event_start_from from pushuv config" + sepolia_start_height="" + fi + elif sepolia_start_height=$(fetch_evm_height "$SEPOLIA_DEFAULT_RPC_URL"); then print_status "Using Sepolia event_start_from: $sepolia_start_height" else print_warning "Could not fetch Sepolia latest block from $SEPOLIA_DEFAULT_RPC_URL" @@ -477,7 +493,15 @@ cmd_up() { fi if [ -z "$base_start_height" ]; then - if base_start_height=$(fetch_evm_height "$BASE_SEPOLIA_DEFAULT_RPC_URL"); then + if [ "${TESTING_ENV:-}" = "LOCAL" ]; then + if base_start_height=$(fetch_evm_height "$LOCAL_BASE_SOURCE_RPC_URL"); then + print_status "Using LOCAL Base Sepolia event_start_from from $LOCAL_BASE_SOURCE_RPC_URL: $base_start_height" + else + print_warning "Could not fetch LOCAL Base Sepolia latest block from $LOCAL_BASE_SOURCE_RPC_URL" + print_warning "Base Sepolia will use default event_start_from from pushuv config" + base_start_height="" + fi + elif base_start_height=$(fetch_evm_height "$BASE_SEPOLIA_DEFAULT_RPC_URL"); then print_status "Using Base Sepolia event_start_from: $base_start_height" else print_warning "Could not fetch Base Sepolia latest block from $BASE_SEPOLIA_DEFAULT_RPC_URL" @@ -489,7 +513,15 @@ cmd_up() { fi if [ -z "$arbitrum_start_height" ]; then - if arbitrum_start_height=$(fetch_evm_height "$ARBITRUM_SEPOLIA_DEFAULT_RPC_URL"); then + if [ "${TESTING_ENV:-}" = "LOCAL" ]; then + if arbitrum_start_height=$(fetch_evm_height "$LOCAL_ARBITRUM_SOURCE_RPC_URL"); then + print_status "Using LOCAL Arbitrum Sepolia event_start_from from $LOCAL_ARBITRUM_SOURCE_RPC_URL: $arbitrum_start_height" + else + print_warning "Could not fetch LOCAL Arbitrum Sepolia latest block from $LOCAL_ARBITRUM_SOURCE_RPC_URL" + print_warning "Arbitrum Sepolia will use default event_start_from from pushuv config" + arbitrum_start_height="" + fi + elif arbitrum_start_height=$(fetch_evm_height "$ARBITRUM_SEPOLIA_DEFAULT_RPC_URL"); then print_status "Using Arbitrum Sepolia event_start_from: $arbitrum_start_height" else print_warning "Could not fetch Arbitrum Sepolia latest block from $ARBITRUM_SEPOLIA_DEFAULT_RPC_URL" @@ -501,7 +533,15 @@ cmd_up() { fi if [ -z "$bsc_start_height" ]; then - if bsc_start_height=$(fetch_evm_height "$BSC_TESTNET_DEFAULT_RPC_URL"); then + if [ "${TESTING_ENV:-}" = "LOCAL" ]; then + if bsc_start_height=$(fetch_evm_height "$LOCAL_BSC_SOURCE_RPC_URL"); then + print_status "Using LOCAL BSC testnet event_start_from from $LOCAL_BSC_SOURCE_RPC_URL: $bsc_start_height" + else + print_warning "Could not fetch LOCAL BSC testnet latest block from $LOCAL_BSC_SOURCE_RPC_URL" + print_warning "BSC testnet will use default event_start_from from pushuv config" + bsc_start_height="" + fi + elif bsc_start_height=$(fetch_evm_height "$BSC_TESTNET_DEFAULT_RPC_URL"); then print_status "Using BSC testnet event_start_from: $bsc_start_height" else print_warning "Could not fetch BSC testnet latest block from $BSC_TESTNET_DEFAULT_RPC_URL" @@ -524,6 +564,11 @@ cmd_up() { print_status "Using provided SOLANA_EVENT_START_FROM: $solana_start_height" fi + TESTING_ENV="${TESTING_ENV:-}" \ + SEPOLIA_RPC_URL_OVERRIDE="$([ "${TESTING_ENV:-}" = "LOCAL" ] && echo "$LOCAL_SEPOLIA_UV_RPC_URL" || echo "${SEPOLIA_RPC_URL_OVERRIDE:-}")" \ + ARBITRUM_RPC_URL_OVERRIDE="$([ "${TESTING_ENV:-}" = "LOCAL" ] && echo "$LOCAL_ARBITRUM_UV_RPC_URL" || echo "${ARBITRUM_RPC_URL_OVERRIDE:-}")" \ + BASE_RPC_URL_OVERRIDE="$([ "${TESTING_ENV:-}" = "LOCAL" ] && echo "$LOCAL_BASE_UV_RPC_URL" || echo "${BASE_RPC_URL_OVERRIDE:-}")" \ + BSC_RPC_URL_OVERRIDE="$([ "${TESTING_ENV:-}" = "LOCAL" ] && echo "$LOCAL_BSC_UV_RPC_URL" || echo "${BSC_RPC_URL_OVERRIDE:-}")" \ SEPOLIA_EVENT_START_FROM="$sepolia_start_height" \ BASE_EVENT_START_FROM="$base_start_height" \ ARBITRUM_EVENT_START_FROM="$arbitrum_start_height" \ diff --git a/local-multi-validator/docker-compose.yml b/local-multi-validator/docker-compose.yml index 2c1c39b5..6d2390a0 100644 --- a/local-multi-validator/docker-compose.yml +++ b/local-multi-validator/docker-compose.yml @@ -231,6 +231,11 @@ services: - CORE_VALIDATOR_GRPC=core-validator-1:9090 - QUERY_PORT=8080 - TSS_ENABLED=true + - TESTING_ENV=${TESTING_ENV:-} + - SEPOLIA_RPC_URL_OVERRIDE=${SEPOLIA_RPC_URL_OVERRIDE:-} + - ARBITRUM_RPC_URL_OVERRIDE=${ARBITRUM_RPC_URL_OVERRIDE:-} + - BASE_RPC_URL_OVERRIDE=${BASE_RPC_URL_OVERRIDE:-} + - BSC_RPC_URL_OVERRIDE=${BSC_RPC_URL_OVERRIDE:-} - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} - BASE_EVENT_START_FROM=${BASE_EVENT_START_FROM:-} - ARBITRUM_EVENT_START_FROM=${ARBITRUM_EVENT_START_FROM:-} @@ -269,6 +274,11 @@ services: - CORE_VALIDATOR_GRPC=core-validator-2:9090 - QUERY_PORT=8080 - TSS_ENABLED=true + - TESTING_ENV=${TESTING_ENV:-} + - SEPOLIA_RPC_URL_OVERRIDE=${SEPOLIA_RPC_URL_OVERRIDE:-} + - ARBITRUM_RPC_URL_OVERRIDE=${ARBITRUM_RPC_URL_OVERRIDE:-} + - BASE_RPC_URL_OVERRIDE=${BASE_RPC_URL_OVERRIDE:-} + - BSC_RPC_URL_OVERRIDE=${BSC_RPC_URL_OVERRIDE:-} - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} - BASE_EVENT_START_FROM=${BASE_EVENT_START_FROM:-} - ARBITRUM_EVENT_START_FROM=${ARBITRUM_EVENT_START_FROM:-} @@ -307,6 +317,11 @@ services: - CORE_VALIDATOR_GRPC=core-validator-3:9090 - QUERY_PORT=8080 - TSS_ENABLED=true + - TESTING_ENV=${TESTING_ENV:-} + - SEPOLIA_RPC_URL_OVERRIDE=${SEPOLIA_RPC_URL_OVERRIDE:-} + - ARBITRUM_RPC_URL_OVERRIDE=${ARBITRUM_RPC_URL_OVERRIDE:-} + - BASE_RPC_URL_OVERRIDE=${BASE_RPC_URL_OVERRIDE:-} + - BSC_RPC_URL_OVERRIDE=${BSC_RPC_URL_OVERRIDE:-} - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} - BASE_EVENT_START_FROM=${BASE_EVENT_START_FROM:-} - ARBITRUM_EVENT_START_FROM=${ARBITRUM_EVENT_START_FROM:-} @@ -345,6 +360,11 @@ services: - CORE_VALIDATOR_GRPC=core-validator-4:9090 - QUERY_PORT=8080 - TSS_ENABLED=true + - TESTING_ENV=${TESTING_ENV:-} + - SEPOLIA_RPC_URL_OVERRIDE=${SEPOLIA_RPC_URL_OVERRIDE:-} + - ARBITRUM_RPC_URL_OVERRIDE=${ARBITRUM_RPC_URL_OVERRIDE:-} + - BASE_RPC_URL_OVERRIDE=${BASE_RPC_URL_OVERRIDE:-} + - BSC_RPC_URL_OVERRIDE=${BSC_RPC_URL_OVERRIDE:-} - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} - BASE_EVENT_START_FROM=${BASE_EVENT_START_FROM:-} - ARBITRUM_EVENT_START_FROM=${ARBITRUM_EVENT_START_FROM:-} diff --git a/local-multi-validator/scripts/setup-universal.sh b/local-multi-validator/scripts/setup-universal.sh index 437d739f..7ad042f6 100755 --- a/local-multi-validator/scripts/setup-universal.sh +++ b/local-multi-validator/scripts/setup-universal.sh @@ -140,8 +140,25 @@ fi # Force Arbitrum Sepolia RPC URL to tenderly endpoint ARBITRUM_CHAIN_ID="eip155:421614" ARBITRUM_TENDERLY_URL="https://arbitrum-sepolia.gateway.tenderly.co" +BASE_CHAIN_ID="eip155:84532" +BSC_CHAIN_ID="eip155:97" BSC_TESTNET_CHAIN_ID="eip155:97" BSC_TESTNET_RPC_URL="${BSC_TESTNET_RPC_URL:-https://bsc-testnet-rpc.publicnode.com}" +SEPOLIA_CHAIN_ID="eip155:11155111" + +# In LOCAL testing, universal-validator containers must not use localhost for host services. +if [ "${TESTING_ENV:-}" = "LOCAL" ] && [ -z "${SEPOLIA_RPC_URL_OVERRIDE:-}" ]; then + SEPOLIA_RPC_URL_OVERRIDE="http://host.docker.internal:9545" +fi +if [ "${TESTING_ENV:-}" = "LOCAL" ] && [ -z "${ARBITRUM_RPC_URL_OVERRIDE:-}" ]; then + ARBITRUM_RPC_URL_OVERRIDE="http://host.docker.internal:9546" +fi +if [ "${TESTING_ENV:-}" = "LOCAL" ] && [ -z "${BASE_RPC_URL_OVERRIDE:-}" ]; then + BASE_RPC_URL_OVERRIDE="http://host.docker.internal:9547" +fi +if [ "${TESTING_ENV:-}" = "LOCAL" ] && [ -z "${BSC_RPC_URL_OVERRIDE:-}" ]; then + BSC_RPC_URL_OVERRIDE="http://host.docker.internal:9548" +fi jq --arg chain "$ARBITRUM_CHAIN_ID" --arg url "$ARBITRUM_TENDERLY_URL" \ '.chain_configs[$chain].rpc_urls = [$url]' \ @@ -153,6 +170,38 @@ jq --arg chain "$BSC_TESTNET_CHAIN_ID" --arg url "$BSC_TESTNET_RPC_URL" \ "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +if [ -n "${SEPOLIA_RPC_URL_OVERRIDE:-}" ]; then + echo "🌐 Overriding Sepolia rpc_urls to: $SEPOLIA_RPC_URL_OVERRIDE" + jq --arg chain "$SEPOLIA_CHAIN_ID" --arg url "$SEPOLIA_RPC_URL_OVERRIDE" \ + '.chain_configs[$chain].rpc_urls = [$url]' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +fi + +if [ -n "${ARBITRUM_RPC_URL_OVERRIDE:-}" ]; then + echo "🌐 Overriding Arbitrum Sepolia rpc_urls to: $ARBITRUM_RPC_URL_OVERRIDE" + jq --arg chain "$ARBITRUM_CHAIN_ID" --arg url "$ARBITRUM_RPC_URL_OVERRIDE" \ + '.chain_configs[$chain].rpc_urls = [$url]' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +fi + +if [ -n "${BASE_RPC_URL_OVERRIDE:-}" ]; then + echo "🌐 Overriding Base Sepolia rpc_urls to: $BASE_RPC_URL_OVERRIDE" + jq --arg chain "$BASE_CHAIN_ID" --arg url "$BASE_RPC_URL_OVERRIDE" \ + '.chain_configs[$chain].rpc_urls = [$url]' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +fi + +if [ -n "${BSC_RPC_URL_OVERRIDE:-}" ]; then + echo "🌐 Overriding BSC testnet rpc_urls to: $BSC_RPC_URL_OVERRIDE" + jq --arg chain "$BSC_CHAIN_ID" --arg url "$BSC_RPC_URL_OVERRIDE" \ + '.chain_configs[$chain].rpc_urls = [$url]' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +fi + # Optionally override chain event start heights (set by ./devnet start) set_chain_event_start_from() { local chain_id="$1" From 774b29fb633fcead72a13e8b7cb04deb57bdf7e5 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Wed, 25 Mar 2026 19:42:41 +0530 Subject: [PATCH 25/61] feat: support to outbound_changes branch + local evm and svm simulation --- e2e-tests/.env.example | 6 +- e2e-tests/README.md | 27 +++++- e2e-tests/setup.sh | 97 ++++++++++++++++--- local-multi-validator/devnet | 13 ++- local-multi-validator/docker-compose.yml | 4 + .../scripts/setup-universal.sh | 12 +++ 6 files changed, 142 insertions(+), 17 deletions(-) diff --git a/e2e-tests/.env.example b/e2e-tests/.env.example index c41836bd..29fcccaa 100644 --- a/e2e-tests/.env.example +++ b/e2e-tests/.env.example @@ -10,6 +10,8 @@ PUSH_RPC_URL=http://localhost:8545 # Local chain info CHAIN_ID=localchain_9000-1 KEYRING_BACKEND=test +# Set to LOCAL to enable anvil/surfpool setup and local RPC rewrites in setup-environment/all +TESTING_ENV= # Genesis key recovery/funding GENESIS_KEY_NAME=genesis-acc-1 @@ -40,7 +42,7 @@ GATEWAY_REPO=https://github.com/pushchain/push-chain-gateway-contracts.git GATEWAY_BRANCH=e2e-push-node PUSH_CHAIN_SDK_REPO=https://github.com/pushchain/push-chain-sdk.git -PUSH_CHAIN_SDK_BRANCH=feb-11-2026-alpha-publish +PUSH_CHAIN_SDK_BRANCH=outbound_changes # push-chain-sdk core .env target path (relative to PUSH_CHAIN_SDK_DIR) PUSH_CHAIN_SDK_CORE_ENV_PATH=packages/core/.env @@ -51,7 +53,7 @@ CORE_CONTRACTS_DIR=../push-chain-core-contracts SWAP_AMM_DIR=../push-chain-swap-internal-amm-contracts GATEWAY_DIR=../push-chain-gateway-contracts PUSH_CHAIN_SDK_DIR=../push-chain-sdk -PUSH_CHAIN_SDK_E2E_DIR=packages/core/__e2e__ +PUSH_CHAIN_SDK_E2E_DIR=packages/core/__e2e__/evm/inbound # push-chain-sdk required env vars (mirrored into PUSH_CHAIN_SDK_DIR/packages/core/.env by setup-sdk) # Defaults used by setup-sdk when omitted: diff --git a/e2e-tests/README.md b/e2e-tests/README.md index 80267847..f2daa812 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -60,13 +60,34 @@ cp e2e-tests/.env.example e2e-tests/.env Important variables in `.env`: - `PUSH_RPC_URL` (default `http://localhost:8545`) +- `TESTING_ENV` (`LOCAL` enables anvil/surfpool + local RPC rewrites) - `PRIVATE_KEY` - `FUND_TO_ADDRESS` - `POOL_CREATION_TOPUP_AMOUNT` (funding for deployer before pool creation) - `CORE_CONTRACTS_BRANCH` - `SWAP_AMM_BRANCH` - `GATEWAY_BRANCH` (currently `e2e-push-node`) -- `PUSH_CHAIN_SDK_BRANCH` (default `feb-11-2026-alpha-publish`) +- `PUSH_CHAIN_SDK_BRANCH` (default `outbound_changes`) +- `PUSH_CHAIN_SDK_E2E_DIR` (default `packages/core/__e2e__/evm/inbound`) + +### TESTING_ENV=LOCAL behavior + +Set this in `e2e-tests/.env` when running local fork-based E2E: + +```bash +TESTING_ENV=LOCAL +``` + +When `TESTING_ENV=LOCAL`, `setup-environment` (and `all`) now does both: + +1. starts local fork nodes (`anvil` for Sepolia/Arbitrum/Base/BSC and `surfpool` for Solana) +2. rewrites `public_rpc_url` in `config/testnet-donut/*/chain.json` to your configured local RPC URLs: + - `ANVIL_SEPOLIA_HOST_RPC_URL` (default `http://localhost:9545`) + - `ANVIL_ARBITRUM_HOST_RPC_URL` (default `http://localhost:9546`) + - `ANVIL_BASE_HOST_RPC_URL` (default `http://localhost:9547`) + - `ANVIL_BSC_HOST_RPC_URL` (default `http://localhost:9548`) + - `SURFPOOL_SOLANA_HOST_RPC_URL` (default `http://localhost:8899`) +3. patches universal-validator container RPC endpoints (`pushuv_config.json`) to the corresponding local endpoints Genesis account source: @@ -151,6 +172,10 @@ Then it updates both: - `e2e-tests/deploy_addresses.json` as `contracts.UEA_PROXY_IMPLEMENTATION` - `push-chain-sdk/packages/core/src/lib/constants/chain.ts` at `[PUSH_NETWORK.LOCALNET]` +SDK tests are discovered from: + +- `push-chain-sdk/packages/core/__e2e__/evm/inbound` + Run all configured SDK E2E files: ```bash diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index a02008ce..4451465b 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -34,14 +34,14 @@ fi : "${GATEWAY_REPO:=https://github.com/pushchain/push-chain-gateway-contracts.git}" : "${GATEWAY_BRANCH:=e2e-push-node}" : "${PUSH_CHAIN_SDK_REPO:=https://github.com/pushchain/push-chain-sdk.git}" -: "${PUSH_CHAIN_SDK_BRANCH:=feb-11-2026-alpha-publish}" +: "${PUSH_CHAIN_SDK_BRANCH:=outbound_changes}" : "${E2E_PARENT_DIR:=../}" : "${CORE_CONTRACTS_DIR:=$E2E_PARENT_DIR/push-chain-core-contracts}" : "${SWAP_AMM_DIR:=$E2E_PARENT_DIR/push-chain-swap-internal-amm-contracts}" : "${GATEWAY_DIR:=$E2E_PARENT_DIR/push-chain-gateway-contracts}" : "${PUSH_CHAIN_SDK_DIR:=$E2E_PARENT_DIR/push-chain-sdk}" -: "${PUSH_CHAIN_SDK_E2E_DIR:=packages/core/__e2e__}" +: "${PUSH_CHAIN_SDK_E2E_DIR:=packages/core/__e2e__/evm/inbound}" : "${PUSH_CHAIN_SDK_CHAIN_CONSTANTS_PATH:=packages/core/src/lib/constants/chain.ts}" : "${PUSH_CHAIN_SDK_ACCOUNT_TS_PATH:=packages/core/src/lib/universal/account/account.ts}" : "${PUSH_CHAIN_SDK_CORE_ENV_PATH:=packages/core/.env}" @@ -547,7 +547,7 @@ step_setup_environment() { return 0 fi - require_cmd anvil cast docker jq + require_cmd anvil cast docker jq surfpool curl local sepolia_host_rpc="${ANVIL_SEPOLIA_HOST_RPC_URL:-http://localhost:9545}" local arbitrum_host_rpc="${ANVIL_ARBITRUM_HOST_RPC_URL:-http://localhost:9546}" @@ -558,20 +558,49 @@ step_setup_environment() { local uv_arbitrum_rpc_url="${LOCAL_ARBITRUM_UV_RPC_URL:-http://host.docker.internal:9546}" local uv_base_rpc_url="${LOCAL_BASE_UV_RPC_URL:-http://host.docker.internal:9547}" local uv_bsc_rpc_url="${LOCAL_BSC_UV_RPC_URL:-http://host.docker.internal:9548}" + local solana_host_rpc="${SURFPOOL_SOLANA_HOST_RPC_URL:-http://localhost:8899}" + local uv_solana_rpc_url="${LOCAL_SOLANA_UV_RPC_URL:-http://host.docker.internal:8899}" + + patch_chain_config_public_rpc() { + local file_path="$1" + local rpc_url="$2" + local label="$3" + local tmp + + if [[ ! -f "$file_path" ]]; then + log_warn "Chain config file not found for $label: $file_path" + return 0 + fi + + tmp="$(mktemp)" + jq --arg rpc "$rpc_url" '.public_rpc_url = $rpc' "$file_path" >"$tmp" + mv "$tmp" "$file_path" + log_ok "Patched $label chain config public_rpc_url => $rpc_url" + } + + patch_local_testnet_donut_chain_configs() { + patch_chain_config_public_rpc "$TOKENS_CONFIG_DIR/eth_sepolia/chain.json" "$sepolia_host_rpc" "eth_sepolia" + patch_chain_config_public_rpc "$TOKENS_CONFIG_DIR/arb_sepolia/chain.json" "$arbitrum_host_rpc" "arb_sepolia" + patch_chain_config_public_rpc "$TOKENS_CONFIG_DIR/base_sepolia/chain.json" "$base_host_rpc" "base_sepolia" + patch_chain_config_public_rpc "$TOKENS_CONFIG_DIR/bsc_testnet/chain.json" "$bsc_host_rpc" "bsc_testnet" + patch_chain_config_public_rpc "$TOKENS_CONFIG_DIR/solana_devnet/chain.json" "$solana_host_rpc" "solana_devnet" + } start_anvil_fork() { local label="$1" local port="$2" local chain_id="$3" local fork_url="$4" + local anvil_pattern="anvil --port $port" - if pgrep -f "anvil --port $port --chain-id $chain_id" >/dev/null 2>&1; then - log_ok "Anvil $label already running on port $port" - return 0 + if pgrep -f "$anvil_pattern" >/dev/null 2>&1; then + log_info "Stopping existing anvil $label on port $port" + pkill -f "$anvil_pattern" >/dev/null 2>&1 || true + sleep 1 fi - log_info "Starting anvil $label on port $port" - nohup anvil --port "$port" --chain-id "$chain_id" --fork-url "$fork_url" --block-time 1 \ + log_info "Starting anvil $label on port $port (chain-id: $chain_id)" + nohup anvil --host 0.0.0.0 --port "$port" --chain-id "$chain_id" --fork-url "$fork_url" --block-time 1 \ >"$LOG_DIR/anvil_${label}.log" 2>&1 & } @@ -594,16 +623,52 @@ step_setup_environment() { return 1 } + start_surfpool() { + local surfpool_pattern="surfpool start --port 8899 --network devnet" + + if pgrep -f "$surfpool_pattern" >/dev/null 2>&1; then + log_info "Stopping existing surfpool on port 8899" + pkill -f "$surfpool_pattern" >/dev/null 2>&1 || true + sleep 1 + fi + + log_info "Starting surfpool for local Solana testing on port 8899" + nohup surfpool start --port 8899 --network devnet >"$LOG_DIR/surfpool.log" 2>&1 & + } + + wait_for_solana_slot() { + local rpc_url="$1" + local slot="" + local response + local i + for i in {1..30}; do + response="$(curl -sS -X POST "$rpc_url" -H 'Content-Type: application/json' --data '{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment":"processed"}]}' || true)" + slot="$(echo "$response" | jq -r '.result // empty' 2>/dev/null || true)" + slot="$(echo "$slot" | tr -d '[:space:]')" + if [[ "$slot" =~ ^[0-9]+$ ]]; then + printf "%s" "$slot" + return 0 + fi + sleep 1 + done + + log_err "Could not read latest Solana slot from surfpool at $rpc_url" + return 1 + } + start_anvil_fork "sepolia" "9545" "11155111" "https://ethereum-sepolia-rpc.publicnode.com" start_anvil_fork "arbitrum" "9546" "421614" "https://arbitrum-sepolia.gateway.tenderly.co" start_anvil_fork "base" "9547" "84532" "https://sepolia.base.org" start_anvil_fork "bsc" "9548" "97" "https://bsc-testnet-rpc.publicnode.com" + start_surfpool + patch_local_testnet_donut_chain_configs - local sepolia_latest_block arbitrum_latest_block base_latest_block bsc_latest_block + local sepolia_latest_block arbitrum_latest_block base_latest_block bsc_latest_block solana_latest_slot sepolia_latest_block="$(wait_for_block_number "sepolia" "$sepolia_host_rpc")" arbitrum_latest_block="$(wait_for_block_number "arbitrum" "$arbitrum_host_rpc")" base_latest_block="$(wait_for_block_number "base" "$base_host_rpc")" bsc_latest_block="$(wait_for_block_number "bsc" "$bsc_host_rpc")" + solana_latest_slot="$(wait_for_solana_slot "$solana_host_rpc")" local patched_count=0 local config_path="/root/.puniversal/config/pushuv_config.json" @@ -628,10 +693,12 @@ step_setup_environment() { --arg arbitrum_rpc "$uv_arbitrum_rpc_url" \ --arg base_rpc "$uv_base_rpc_url" \ --arg bsc_rpc "$uv_bsc_rpc_url" \ + --arg solana_rpc "$uv_solana_rpc_url" \ --argjson sepolia_start "$sepolia_latest_block" \ --argjson arbitrum_start "$arbitrum_latest_block" \ --argjson base_start "$base_latest_block" \ --argjson bsc_start "$bsc_latest_block" \ + --argjson solana_start "$solana_latest_slot" \ ' .chain_configs["eip155:11155111"].rpc_urls = [$sepolia_rpc] | .chain_configs["eip155:11155111"].event_start_from = $sepolia_start @@ -641,13 +708,15 @@ step_setup_environment() { | .chain_configs["eip155:84532"].event_start_from = $base_start | .chain_configs["eip155:97"].rpc_urls = [$bsc_rpc] | .chain_configs["eip155:97"].event_start_from = $bsc_start + | .chain_configs["solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1"].rpc_urls = [$solana_rpc] + | .chain_configs["solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1"].event_start_from = $solana_start ' "$tmp_in" >"$tmp_out" docker cp "$tmp_out" "$uv_container":"$config_path" rm -f "$tmp_in" "$tmp_out" patched_count=$((patched_count + 1)) - log_ok "Updated $uv_container config for Sepolia/Arbitrum/Base/BSC local forks" + log_ok "Updated $uv_container config for Sepolia/Arbitrum/Base/BSC/Solana local forks" done if [[ "$patched_count" -eq 0 ]]; then @@ -655,7 +724,7 @@ step_setup_environment() { return 0 fi - log_ok "Patched $patched_count universal validator config(s) with local fork RPC/event_start_from" + log_ok "Patched $patched_count universal validator config(s) with local fork RPC/event_start_from (including Solana)" } step_stop_running_nodes() { @@ -1584,7 +1653,7 @@ cmd_show_help() { Usage: $(basename "$0") Commands: - setup-environment For TESTING_ENV=LOCAL: start anvil + patch universal-validator Sepolia chain config + setup-environment For TESTING_ENV=LOCAL: start anvil/surfpool + patch validator and testnet-donut chain RPC configs devnet Build/start local-multi-validator devnet + uvalidators print-genesis Print first genesis account + mnemonic recover-genesis-key Recover genesis key into local keyring @@ -1617,7 +1686,7 @@ Primary files: Address: $DEPLOY_ADDRESSES_FILE Important env: - TESTING_ENV=LOCAL Enables local anvil setup and config rewrites for Sepolia chain in setup-environment/all + TESTING_ENV=LOCAL Enables local anvil setup and config rewrites for testnet-donut chain.json and universal validator RPCs in setup-environment/all ANVIL_SEPOLIA_HOST_RPC_URL=http://localhost:9545 ANVIL_ARBITRUM_HOST_RPC_URL=http://localhost:9546 ANVIL_BASE_HOST_RPC_URL=http://localhost:9547 @@ -1626,6 +1695,8 @@ Important env: LOCAL_ARBITRUM_UV_RPC_URL=http://host.docker.internal:9546 LOCAL_BASE_UV_RPC_URL=http://host.docker.internal:9547 LOCAL_BSC_UV_RPC_URL=http://host.docker.internal:9548 + SURFPOOL_SOLANA_HOST_RPC_URL=http://localhost:8899 + LOCAL_SOLANA_UV_RPC_URL=http://host.docker.internal:8899 EOF } diff --git a/local-multi-validator/devnet b/local-multi-validator/devnet index f10bdf7c..0e3c58b6 100755 --- a/local-multi-validator/devnet +++ b/local-multi-validator/devnet @@ -34,6 +34,8 @@ LOCAL_BASE_SOURCE_RPC_URL="${LOCAL_BASE_SOURCE_RPC_URL:-http://localhost:9547}" LOCAL_BASE_UV_RPC_URL="${LOCAL_BASE_UV_RPC_URL:-http://host.docker.internal:9547}" LOCAL_BSC_SOURCE_RPC_URL="${LOCAL_BSC_SOURCE_RPC_URL:-http://localhost:9548}" LOCAL_BSC_UV_RPC_URL="${LOCAL_BSC_UV_RPC_URL:-http://host.docker.internal:9548}" +LOCAL_SOLANA_SOURCE_RPC_URL="${LOCAL_SOLANA_SOURCE_RPC_URL:-http://localhost:8899}" +LOCAL_SOLANA_UV_RPC_URL="${LOCAL_SOLANA_UV_RPC_URL:-http://host.docker.internal:8899}" # ═══════════════════════════════════════════════════════════════════════════════ # COLORS @@ -553,7 +555,15 @@ cmd_up() { fi if [ -z "$solana_start_height" ]; then - if solana_start_height=$(fetch_solana_slot "$SOLANA_DEVNET_DEFAULT_RPC_URL"); then + if [ "${TESTING_ENV:-}" = "LOCAL" ]; then + if solana_start_height=$(fetch_solana_slot "$LOCAL_SOLANA_SOURCE_RPC_URL"); then + print_status "Using LOCAL Solana event_start_from from $LOCAL_SOLANA_SOURCE_RPC_URL: $solana_start_height" + else + print_warning "Could not fetch LOCAL Solana latest slot from $LOCAL_SOLANA_SOURCE_RPC_URL" + print_warning "Solana will use default event_start_from from pushuv config" + solana_start_height="" + fi + elif solana_start_height=$(fetch_solana_slot "$SOLANA_DEVNET_DEFAULT_RPC_URL"); then print_status "Using Solana devnet event_start_from: $solana_start_height" else print_warning "Could not fetch Solana devnet latest slot from $SOLANA_DEVNET_DEFAULT_RPC_URL" @@ -569,6 +579,7 @@ cmd_up() { ARBITRUM_RPC_URL_OVERRIDE="$([ "${TESTING_ENV:-}" = "LOCAL" ] && echo "$LOCAL_ARBITRUM_UV_RPC_URL" || echo "${ARBITRUM_RPC_URL_OVERRIDE:-}")" \ BASE_RPC_URL_OVERRIDE="$([ "${TESTING_ENV:-}" = "LOCAL" ] && echo "$LOCAL_BASE_UV_RPC_URL" || echo "${BASE_RPC_URL_OVERRIDE:-}")" \ BSC_RPC_URL_OVERRIDE="$([ "${TESTING_ENV:-}" = "LOCAL" ] && echo "$LOCAL_BSC_UV_RPC_URL" || echo "${BSC_RPC_URL_OVERRIDE:-}")" \ + SOLANA_RPC_URL_OVERRIDE="$([ "${TESTING_ENV:-}" = "LOCAL" ] && echo "$LOCAL_SOLANA_UV_RPC_URL" || echo "${SOLANA_RPC_URL_OVERRIDE:-}")" \ SEPOLIA_EVENT_START_FROM="$sepolia_start_height" \ BASE_EVENT_START_FROM="$base_start_height" \ ARBITRUM_EVENT_START_FROM="$arbitrum_start_height" \ diff --git a/local-multi-validator/docker-compose.yml b/local-multi-validator/docker-compose.yml index 6d2390a0..bdac8092 100644 --- a/local-multi-validator/docker-compose.yml +++ b/local-multi-validator/docker-compose.yml @@ -236,6 +236,7 @@ services: - ARBITRUM_RPC_URL_OVERRIDE=${ARBITRUM_RPC_URL_OVERRIDE:-} - BASE_RPC_URL_OVERRIDE=${BASE_RPC_URL_OVERRIDE:-} - BSC_RPC_URL_OVERRIDE=${BSC_RPC_URL_OVERRIDE:-} + - SOLANA_RPC_URL_OVERRIDE=${SOLANA_RPC_URL_OVERRIDE:-} - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} - BASE_EVENT_START_FROM=${BASE_EVENT_START_FROM:-} - ARBITRUM_EVENT_START_FROM=${ARBITRUM_EVENT_START_FROM:-} @@ -279,6 +280,7 @@ services: - ARBITRUM_RPC_URL_OVERRIDE=${ARBITRUM_RPC_URL_OVERRIDE:-} - BASE_RPC_URL_OVERRIDE=${BASE_RPC_URL_OVERRIDE:-} - BSC_RPC_URL_OVERRIDE=${BSC_RPC_URL_OVERRIDE:-} + - SOLANA_RPC_URL_OVERRIDE=${SOLANA_RPC_URL_OVERRIDE:-} - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} - BASE_EVENT_START_FROM=${BASE_EVENT_START_FROM:-} - ARBITRUM_EVENT_START_FROM=${ARBITRUM_EVENT_START_FROM:-} @@ -322,6 +324,7 @@ services: - ARBITRUM_RPC_URL_OVERRIDE=${ARBITRUM_RPC_URL_OVERRIDE:-} - BASE_RPC_URL_OVERRIDE=${BASE_RPC_URL_OVERRIDE:-} - BSC_RPC_URL_OVERRIDE=${BSC_RPC_URL_OVERRIDE:-} + - SOLANA_RPC_URL_OVERRIDE=${SOLANA_RPC_URL_OVERRIDE:-} - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} - BASE_EVENT_START_FROM=${BASE_EVENT_START_FROM:-} - ARBITRUM_EVENT_START_FROM=${ARBITRUM_EVENT_START_FROM:-} @@ -365,6 +368,7 @@ services: - ARBITRUM_RPC_URL_OVERRIDE=${ARBITRUM_RPC_URL_OVERRIDE:-} - BASE_RPC_URL_OVERRIDE=${BASE_RPC_URL_OVERRIDE:-} - BSC_RPC_URL_OVERRIDE=${BSC_RPC_URL_OVERRIDE:-} + - SOLANA_RPC_URL_OVERRIDE=${SOLANA_RPC_URL_OVERRIDE:-} - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} - BASE_EVENT_START_FROM=${BASE_EVENT_START_FROM:-} - ARBITRUM_EVENT_START_FROM=${ARBITRUM_EVENT_START_FROM:-} diff --git a/local-multi-validator/scripts/setup-universal.sh b/local-multi-validator/scripts/setup-universal.sh index 7ad042f6..2554ba37 100755 --- a/local-multi-validator/scripts/setup-universal.sh +++ b/local-multi-validator/scripts/setup-universal.sh @@ -142,6 +142,7 @@ ARBITRUM_CHAIN_ID="eip155:421614" ARBITRUM_TENDERLY_URL="https://arbitrum-sepolia.gateway.tenderly.co" BASE_CHAIN_ID="eip155:84532" BSC_CHAIN_ID="eip155:97" +SOLANA_CHAIN_ID="solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1" BSC_TESTNET_CHAIN_ID="eip155:97" BSC_TESTNET_RPC_URL="${BSC_TESTNET_RPC_URL:-https://bsc-testnet-rpc.publicnode.com}" SEPOLIA_CHAIN_ID="eip155:11155111" @@ -159,6 +160,9 @@ fi if [ "${TESTING_ENV:-}" = "LOCAL" ] && [ -z "${BSC_RPC_URL_OVERRIDE:-}" ]; then BSC_RPC_URL_OVERRIDE="http://host.docker.internal:9548" fi +if [ "${TESTING_ENV:-}" = "LOCAL" ] && [ -z "${SOLANA_RPC_URL_OVERRIDE:-}" ]; then + SOLANA_RPC_URL_OVERRIDE="http://host.docker.internal:8899" +fi jq --arg chain "$ARBITRUM_CHAIN_ID" --arg url "$ARBITRUM_TENDERLY_URL" \ '.chain_configs[$chain].rpc_urls = [$url]' \ @@ -202,6 +206,14 @@ if [ -n "${BSC_RPC_URL_OVERRIDE:-}" ]; then mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" fi +if [ -n "${SOLANA_RPC_URL_OVERRIDE:-}" ]; then + echo "🌐 Overriding Solana rpc_urls to: $SOLANA_RPC_URL_OVERRIDE" + jq --arg chain "$SOLANA_CHAIN_ID" --arg url "$SOLANA_RPC_URL_OVERRIDE" \ + '.chain_configs[$chain].rpc_urls = [$url]' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +fi + # Optionally override chain event start heights (set by ./devnet start) set_chain_event_start_from() { local chain_id="$1" From ac0ba6dacd532018fd9d4cc5e5227d307e0804dd Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Fri, 27 Mar 2026 12:54:32 +0530 Subject: [PATCH 26/61] initial outbound testing setup --- e2e-tests/deploy_addresses.json | 56 ++ e2e-tests/e2e-tests/.pchain/config/app.toml | 341 ++++++++++++ .../e2e-tests/.pchain/config/client.toml | 17 + .../e2e-tests/.pchain/config/config.toml | 498 ++++++++++++++++++ ...68f98d2f2c1f8dbf56f34e0636edb54263.address | 1 + .../.pchain/keyring-test/genesis-acc-1.info | 1 + e2e-tests/e2e-tests/deploy_addresses.json | 54 ++ e2e-tests/setup.sh | 443 +++++++++++++++- local-multi-validator/devnet | 79 ++- .../scripts/setup-genesis-auto.sh | 5 +- .../scripts/setup-universal.sh | 46 +- 11 files changed, 1488 insertions(+), 53 deletions(-) create mode 100644 e2e-tests/e2e-tests/.pchain/config/app.toml create mode 100644 e2e-tests/e2e-tests/.pchain/config/client.toml create mode 100644 e2e-tests/e2e-tests/.pchain/config/config.toml create mode 100644 e2e-tests/e2e-tests/.pchain/keyring-test/44baea68f98d2f2c1f8dbf56f34e0636edb54263.address create mode 100644 e2e-tests/e2e-tests/.pchain/keyring-test/genesis-acc-1.info create mode 100644 e2e-tests/e2e-tests/deploy_addresses.json diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index e69de29b..68056856 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -0,0 +1,56 @@ +{ + "generatedAt": "2026-03-27T07:03:55Z", + "contracts": { + "WPC": "0xB2cf4B3aec93F4A8F92b292d2F605591dB3e3011", + "Factory": "0x057931Df99f61caB5e5DbDb6224D7003E64F659e", + "SwapRouter": "0x140b9f84fCbccB4129AC6F32b1243ea808d18261", + "QuoterV2": "0x7fd62fe2Aba9af8bF4d08a6cce49beA8c8Ca6d97", + "PositionManager": "0x4dCe46Eb5909aC32B6C0ad086e74008Fdb292CB5", + "UEA_PROXY_IMPLEMENTATION": "0x2C297101b7d3e0911296b9A64d106684a161b4C9", + "COUNTER_ADDRESS_PAYABLE": "0x88449CaC4DFd2FA0FFbC90Fd1Ea9F2a6FDc690F7" + }, + "tokens": [ + { + "name": "pETH.eth", + "symbol": "pETH", + "address": "0x373D3F1B2b26729A308C5641970247bc9d4ddDa4", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "USDT.eth", + "symbol": "USDT.eth", + "address": "0x6a20557430be6412AF423681e35CC96797506F3a", + "source": "core-contracts", + "decimals": 6 + }, + { + "name": "pETH.base", + "symbol": "pETH.base", + "address": "0xCcd71bc096E2225048cD167447e164E8571BcCA6", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "pETH.arb", + "symbol": "pETH.arb", + "address": "0xE74A512688E53d6Ed2cf64a327fABE8ECE27aDD6", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "pBNB", + "symbol": "pBNB", + "address": "0x2ddB499C3a35a60c809d878eFf5Fa248bb5eAdbd", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "pSOL", + "symbol": "pSOL", + "address": "0x31F3Dcb417970EBe9AC1e254Ee42b91e49e30EE2", + "source": "core-contracts", + "decimals": 9 + } + ] +} diff --git a/e2e-tests/e2e-tests/.pchain/config/app.toml b/e2e-tests/e2e-tests/.pchain/config/app.toml new file mode 100644 index 00000000..d025d33c --- /dev/null +++ b/e2e-tests/e2e-tests/.pchain/config/app.toml @@ -0,0 +1,341 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +############################################################################### +### Base Configuration ### +############################################################################### + +# The minimum gas prices a validator is willing to accept for processing a +# transaction. A transaction's fees must meet the minimum of any denomination +# specified in this config (e.g. 0.25token1,0.0001token2). +minimum-gas-prices = "0stake" + +# The maximum gas a query coming over rest/grpc may consume. +# If this is set to zero, the query can consume an unbounded amount of gas. +query-gas-limit = "0" + +# default: the last 362880 states are kept, pruning at 10 block intervals +# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) +# everything: 2 latest states will be kept; pruning at 10 block intervals. +# custom: allow pruning options to be manually specified through 'pruning-keep-recent', and 'pruning-interval' +pruning = "default" + +# These are applied if and only if the pruning strategy is custom. +pruning-keep-recent = "0" +pruning-interval = "0" + +# HaltHeight contains a non-zero block height at which a node will gracefully +# halt and shutdown that can be used to assist upgrades and testing. +# +# Note: Commitment of state will be attempted on the corresponding block. +halt-height = 0 + +# HaltTime contains a non-zero minimum block time (in Unix seconds) at which +# a node will gracefully halt and shutdown that can be used to assist upgrades +# and testing. +# +# Note: Commitment of state will be attempted on the corresponding block. +halt-time = 0 + +# MinRetainBlocks defines the minimum block height offset from the current +# block being committed, such that all blocks past this offset are pruned +# from CometBFT. It is used as part of the process of determining the +# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates +# that no blocks should be pruned. +# +# This configuration value is only responsible for pruning CometBFT blocks. +# It has no bearing on application state pruning which is determined by the +# "pruning-*" configurations. +# +# Note: CometBFT block pruning is dependant on this parameter in conjunction +# with the unbonding (safety threshold) period, state pruning and state sync +# snapshot parameters to determine the correct minimum value of +# ResponseCommit.RetainHeight. +min-retain-blocks = 0 + +# InterBlockCache enables inter-block caching. +inter-block-cache = true + +# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, +# which informs CometBFT what to index. If empty, all events will be indexed. +# +# Example: +# ["message.sender", "message.recipient"] +index-events = [] + +# IavlCacheSize set the size of the iavl tree cache (in number of nodes). +iavl-cache-size = 781250 + +# IAVLDisableFastNode enables or disables the fast node feature of IAVL. +# Default is false. +iavl-disable-fastnode = false + +# AppDBBackend defines the database backend type to use for the application and snapshots DBs. +# An empty string indicates that a fallback will be used. +# The fallback is the db_backend value set in CometBFT's config.toml. +app-db-backend = "" + +############################################################################### +### Telemetry Configuration ### +############################################################################### + +[telemetry] + +# Prefixed with keys to separate services. +service-name = "" + +# Enabled enables the application telemetry functionality. When enabled, +# an in-memory sink is also enabled by default. Operators may also enabled +# other sinks such as Prometheus. +enabled = false + +# Enable prefixing gauge values with hostname. +enable-hostname = false + +# Enable adding hostname to labels. +enable-hostname-label = false + +# Enable adding service to labels. +enable-service-label = false + +# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. +prometheus-retention-time = 0 + +# GlobalLabels defines a global set of name/value label tuples applied to all +# metrics emitted using the wrapper functions defined in telemetry package. +# +# Example: +# [["chain_id", "cosmoshub-1"]] +global-labels = [ +] + +# MetricsSink defines the type of metrics sink to use. +metrics-sink = "" + +# StatsdAddr defines the address of a statsd server to send metrics to. +# Only utilized if MetricsSink is set to "statsd" or "dogstatsd". +statsd-addr = "" + +# DatadogHostname defines the hostname to use when emitting metrics to +# Datadog. Only utilized if MetricsSink is set to "dogstatsd". +datadog-hostname = "" + +############################################################################### +### API Configuration ### +############################################################################### + +[api] + +# Enable defines if the API server should be enabled. +enable = false + +# Swagger defines if swagger documentation should automatically be registered. +swagger = false + +# Address defines the API server to listen on. +address = "tcp://localhost:1317" + +# MaxOpenConnections defines the number of maximum open connections. +max-open-connections = 1000 + +# RPCReadTimeout defines the CometBFT RPC read timeout (in seconds). +rpc-read-timeout = 10 + +# RPCWriteTimeout defines the CometBFT RPC write timeout (in seconds). +rpc-write-timeout = 0 + +# RPCMaxBodyBytes defines the CometBFT maximum request body (in bytes). +rpc-max-body-bytes = 1000000 + +# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). +enabled-unsafe-cors = false + +############################################################################### +### gRPC Configuration ### +############################################################################### + +[grpc] + +# Enable defines if the gRPC server should be enabled. +enable = true + +# Address defines the gRPC server address to bind to. +address = "localhost:9090" + +# MaxRecvMsgSize defines the max message size in bytes the server can receive. +# The default value is 10MB. +max-recv-msg-size = "10485760" + +# MaxSendMsgSize defines the max message size in bytes the server can send. +# The default value is math.MaxInt32. +max-send-msg-size = "2147483647" + +############################################################################### +### gRPC Web Configuration ### +############################################################################### + +[grpc-web] + +# GRPCWebEnable defines if the gRPC-web should be enabled. +# NOTE: gRPC must also be enabled, otherwise, this configuration is a no-op. +# NOTE: gRPC-Web uses the same address as the API server. +enable = true + +############################################################################### +### State Sync Configuration ### +############################################################################### + +# State sync snapshots allow other nodes to rapidly join the network without replaying historical +# blocks, instead downloading and applying a snapshot of the application state at a given height. +[state-sync] + +# snapshot-interval specifies the block interval at which local state sync snapshots are +# taken (0 to disable). +snapshot-interval = 0 + +# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). +snapshot-keep-recent = 2 + +############################################################################### +### State Streaming ### +############################################################################### + +# Streaming allows nodes to stream state to external systems. +[streaming] + +# streaming.abci specifies the configuration for the ABCI Listener streaming service. +[streaming.abci] + +# List of kv store keys to stream out via gRPC. +# The store key names MUST match the module's StoreKey name. +# +# Example: +# ["acc", "bank", "gov", "staking", "mint"[,...]] +# ["*"] to expose all keys. +keys = [] + +# The plugin name used for streaming via gRPC. +# Streaming is only enabled if this is set. +# Supported plugins: abci +plugin = "" + +# stop-node-on-err specifies whether to stop the node on message delivery error. +stop-node-on-err = true + +############################################################################### +### Mempool ### +############################################################################### + +[mempool] +# Setting max-txs to 0 will allow for a unbounded amount of transactions in the mempool. +# Setting max_txs to negative 1 (-1) will disable transactions from being inserted into the mempool (no-op mempool). +# Setting max_txs to a positive number (> 0) will limit the number of transactions in the mempool, by the specified amount. +# +# Note, this configuration only applies to SDK built-in app-side mempool +# implementations. +max-txs = -1 + +[wasm] +# Smart query gas limit is the max gas to be used in a smart query contract call +query_gas_limit = 3000000 + +# in-memory cache for Wasm contracts. Set to 0 to disable. +# The value is in MiB not bytes +memory_cache_size = 100 + +# Simulation gas limit is the max gas to be used in a tx simulation call. +# When not set the consensus max block gas is used instead +# simulation_gas_limit = + +############################################################################### +### EVM Configuration ### +############################################################################### + +[evm] + +# Tracer defines the 'vm.Tracer' type that the EVM will use when the node is run in +# debug mode. To enable tracing use the '--evm.tracer' flag when starting your node. +# Valid types are: json|struct|access_list|markdown +tracer = "" + +# MaxTxGasWanted defines the gas wanted for each eth tx returned in ante handler in check tx mode. +max-tx-gas-wanted = 0 + +############################################################################### +### JSON RPC Configuration ### +############################################################################### + +[json-rpc] + +# Enable defines if the JSONRPC server should be enabled. +enable = false + +# Address defines the EVM RPC HTTP server address to bind to. +address = "127.0.0.1:8545" + +# Address defines the EVM WebSocket server address to bind to. +ws-address = "127.0.0.1:8546" + +# API defines a list of JSON-RPC namespaces that should be enabled +# Example: "eth,txpool,personal,net,debug,web3" +api = "eth,net,web3" + +# GasCap sets a cap on gas that can be used in eth_call/estimateGas (0=infinite). Default: 25,000,000. +gas-cap = 25000000 + +# Allow insecure account unlocking when account-related RPCs are exposed by http +allow-insecure-unlock = true + +# EVMTimeout is the global timeout for eth_call. Default: 5s. +evm-timeout = "5s" + +# TxFeeCap is the global tx-fee cap for send transaction. Default: 1eth. +txfee-cap = 1 + +# FilterCap sets the global cap for total number of filters that can be created +filter-cap = 200 + +# FeeHistoryCap sets the global cap for total number of blocks that can be fetched +feehistory-cap = 100 + +# LogsCap defines the max number of results can be returned from single 'eth_getLogs' query. +logs-cap = 10000 + +# BlockRangeCap defines the max block range allowed for 'eth_getLogs' query. +block-range-cap = 10000 + +# HTTPTimeout is the read/write timeout of http json-rpc server. +http-timeout = "30s" + +# HTTPIdleTimeout is the idle timeout of http json-rpc server. +http-idle-timeout = "2m0s" + +# AllowUnprotectedTxs restricts unprotected (non EIP155 signed) transactions to be submitted via +# the node's RPC when the global parameter is disabled. +allow-unprotected-txs = false + +# MaxOpenConnections sets the maximum number of simultaneous connections +# for the server listener. +max-open-connections = 0 + +# EnableIndexer enables the custom transaction indexer for the EVM (ethereum transactions). +enable-indexer = false + +# MetricsAddress defines the EVM Metrics server address to bind to. Pass --metrics in CLI to enable +# Prometheus metrics path: /debug/metrics/prometheus +metrics-address = "127.0.0.1:6065" + +# Upgrade height for fix of revert gas refund logic when transaction reverted. +fix-revert-gas-refund-height = 0 + +############################################################################### +### TLS Configuration ### +############################################################################### + +[tls] + +# Certificate path defines the cert.pem file path for the TLS configuration. +certificate-path = "" + +# Key path defines the key.pem file path for the TLS configuration. +key-path = "" diff --git a/e2e-tests/e2e-tests/.pchain/config/client.toml b/e2e-tests/e2e-tests/.pchain/config/client.toml new file mode 100644 index 00000000..02581600 --- /dev/null +++ b/e2e-tests/e2e-tests/.pchain/config/client.toml @@ -0,0 +1,17 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +############################################################################### +### Client Configuration ### +############################################################################### + +# The network chain ID +chain-id = "localchain_9000-1" +# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) +keyring-backend = "os" +# CLI output format (text|json) +output = "text" +# : to CometBFT RPC interface for this chain +node = "tcp://localhost:26657" +# Transaction broadcasting mode (sync|async) +broadcast-mode = "sync" diff --git a/e2e-tests/e2e-tests/.pchain/config/config.toml b/e2e-tests/e2e-tests/.pchain/config/config.toml new file mode 100644 index 00000000..e0acc58f --- /dev/null +++ b/e2e-tests/e2e-tests/.pchain/config/config.toml @@ -0,0 +1,498 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.cometbft" by default, but could be changed via $CMTHOME env variable +# or --home cmd flag. + +# The version of the CometBFT binary that created or +# last modified the config file. Do not modify this. +version = "0.38.17" + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the CometBFT binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "Aryas-MacBook-Pro.local" + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/tecbot/gorocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "info" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for CometBFT to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://127.0.0.1:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental_subscription_buffer_size = 200 + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental_subscription_buffer_size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental_subscription_buffer_size" to +# accommodate non-subscription-related RPC responses. +experimental_websocket_write_buffer_size = 200 + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behavior. +experimental_close_on_slow_client = false + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# Maximum number of requests that can be sent in a batch +# If the value is set to '0' (zero-value), then no maximum batch size will be +# enforced for a JSON-RPC batch request. +max_request_batch_size = 10 + +# Maximum size of request body, in bytes +max_body_bytes = 1000000 + +# Maximum size of request header, in bytes +max_header_bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# NOTE: both tls-cert-file and tls-key-file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "localhost:6060" + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial. If empty, will use the same +# port as the laddr, and will introspect on the listener to figure out the +# address. IP and port are required. Example: 159.89.10.97:26656 +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "0s" + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +####################################################### +### Mempool Configuration Option ### +####################################################### +[mempool] + +# The type of mempool for this node to use. +# +# Possible types: +# - "flood" : concurrent linked list mempool with flooding gossip protocol +# (default) +# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible +# for storing, disseminating and proposing txs). "create_empty_blocks=false" is +# not supported. +type = "flood" + +# Recheck (default: true) defines whether CometBFT should recheck the +# validity for all remaining transaction in the mempool after a block. +# Since a block affects the application state, some transactions in the +# mempool may become invalid. If this does not apply to your application, +# you can disable rechecking. +recheck = true + +# recheck_timeout is the time the application has during the rechecking process +# to return CheckTx responses, once all requests have been sent. Responses that +# arrive after the timeout expires are discarded. It only applies to +# non-local ABCI clients and when recheck is enabled. +# +# The ideal value will strongly depend on the application. It could roughly be estimated as the +# average size of the mempool multiplied by the average time it takes the application to validate one +# transaction. We consider that the ABCI application runs in the same location as the CometBFT binary +# so that the recheck duration is not affected by network delays when making requests and receiving responses. +recheck_timeout = "1s" + +# Broadcast (default: true) defines whether the mempool should relay +# transactions to other peers. Setting this to false will stop the mempool +# from relaying transactions to other peers until they are included in a +# block. In other words, if Broadcast is disabled, only the peer you send +# the tx to will see it until it is included in a block. +broadcast = true + +# WalPath (default: "") configures the location of the Write Ahead Log +# (WAL) for the mempool. The WAL is disabled by default. To enable, set +# WalPath to where you want the WAL to be written (e.g. +# "data/mempool.wal"). +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max_txs_bytes=5MB, mempool will only accept 5 transactions). +max_txs_bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. +max_tx_bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max_batch_bytes = 0 + +# Experimental parameters to limit gossiping txs to up to the specified number of peers. +# We use two independent upper values for persistent and non-persistent peers. +# Unconditional peers are not affected by this feature. +# If we are connected to more than the specified number of persistent peers, only send txs to +# ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those +# persistent peers disconnects, activate another persistent peer. +# Similarly for non-persistent peers, with an upper limit of +# ExperimentalMaxGossipConnectionsToNonPersistentPeers. +# If set to 0, the feature is disabled for the corresponding group of peers, that is, the +# number of active connections to that group of peers is not bounded. +# For non-persistent peers, if enabled, a value of 10 is recommended based on experimental +# performance results using the default P2P configuration. +experimental_max_gossip_connections_to_persistent_peers = 0 +experimental_max_gossip_connections_to_non_persistent_peers = 0 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "" +trust_height = 0 +trust_hash = "" +trust_period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "" + +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 1 minute). +chunk_request_timeout = "10s" + +# The number of concurrent chunk fetchers to run (default: 1). +chunk_fetchers = "4" + +####################################################### +### Block Sync Configuration Options ### +####################################################### +[blocksync] + +# Block Sync version to use: +# +# In v0.37, v1 and v2 of the block sync protocols were deprecated. +# Please use v0 instead. +# +# 1) "v0" - the default block sync implementation +version = "v0" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal_file = "data/cs.wal/wal" + +# How long we wait for a proposal block before prevoting nil +timeout_propose = "3s" +# How much timeout_propose increases with each round +timeout_propose_delta = "500ms" +# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) +timeout_prevote = "1s" +# How much the timeout_prevote increases with each round +timeout_prevote_delta = "500ms" +# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) +timeout_precommit = "1s" +# How much the timeout_precommit increases with each round +timeout_precommit_delta = "500ms" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +timeout_commit = "5s" + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = 0 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +####################################################### +### Storage Configuration Options ### +####################################################### +[storage] + +# Set to true to discard ABCI responses from the state store, which can save a +# considerable amount of disk space. Set to false to ensure ABCI responses are +# persisted. ABCI responses are required for /block_results RPC queries, and to +# reindex events in the command-line tool. +discard_abci_responses = false + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx_index] + +# What indexer to use for transactions +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = "kv" + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "cometbft" diff --git a/e2e-tests/e2e-tests/.pchain/keyring-test/44baea68f98d2f2c1f8dbf56f34e0636edb54263.address b/e2e-tests/e2e-tests/.pchain/keyring-test/44baea68f98d2f2c1f8dbf56f34e0636edb54263.address new file mode 100644 index 00000000..ad0ec409 --- /dev/null +++ b/e2e-tests/e2e-tests/.pchain/keyring-test/44baea68f98d2f2c1f8dbf56f34e0636edb54263.address @@ -0,0 +1 @@ +eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyNi0wMy0yNiAyMjozOTowMS44Nzk3MDQgKzA1MzAgSVNUIG09KzAuMDcwNjU2NjY4IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiUmJLNlB0dFNHYUF5bDlYMCJ9.8hnE2sBucKhJix5dsKOu4Xa28A8JzchtpFiIIojEMylJXgINeJ6tVg.v15MGwP-rvlnOIw-.nspqHMZOfu1BXyxOJP9QbW6Apfl2fPOHknJAN0eaRW0PMJxKJqZPYD4A2yl5dckYoChf653QaT_JNqIG7_6Emq3zr6ciu1PrzXLfDBahTD-JcG_kLbSly64lyDkKvsBQuKpCqtwoCs11jN9Gv_UY7kpmwy_saLKye_efpX9yMrpTwA0SoLYkHqnsVrJeC1wwCPuCTr_I9OLufCkvpMYaqoY6hQ77nniRTAJ4vit-5VJSIb4QHYuPl1nV3kTLEA.dnibrXGsqMDJfA7xccGidw \ No newline at end of file diff --git a/e2e-tests/e2e-tests/.pchain/keyring-test/genesis-acc-1.info b/e2e-tests/e2e-tests/.pchain/keyring-test/genesis-acc-1.info new file mode 100644 index 00000000..b3b0aaa9 --- /dev/null +++ b/e2e-tests/e2e-tests/.pchain/keyring-test/genesis-acc-1.info @@ -0,0 +1 @@ +eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyNi0wMy0yNiAyMjozOTowMS44Nzg2NDkgKzA1MzAgSVNUIG09KzAuMDY5NjAxMjkzIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiVVowYzZacDYwRGxNYmhObCJ9.jht-5PXX09hWCukuj_9GqUpaZkW2GqBBYexc0o1vGt1VjTDCY8OiJA.HSyT3gURVfntA1eF.rAIDDcCbskUGwftZLja4FY1ro5l4aU6B5_jvtnkzSUSE4cjwVuVYIq075PjURpQq9XWzOWpCRruy2TV0GNv6SZuIV4Ikse5nVqNWVQhOTCQxB8ey4iMKeZy4VdDoEccOiCA54C8v1DfjjFeLGAvTbVhnHoWhf1uo29gr8Cm9f9uxi-mfYtSyZC9I0-QgAwGNwJQWizsAjaSeXxylON728syGHz7OsS-SNmAtD6Zi56w9f9pSW7mQIGHHPDuykN1D3WqOiKLnou_K7I4G-15MstBPKX8txwcvzALvsa6fvtBEX86RpBZ3stbARzmBdiLiTseOTRUea3Abih1LekN6r_O37cVRLKeUgaACWZSxtjIkTJYfZs6lKx5UQmXbj1JpmU2erxTrTqlly49aLjx0O3Gs2LtgzMQ7WUNMz1El8riTXZ_xluEFO_dlZIbbaYZUZ84JaI7oHUqWHz-STcUOSxxB54nlUE_vPSBI_U2zQrmRpGMf2Erlk3DPRZI.sybhMO5e_yXEn08ydMEsBA \ No newline at end of file diff --git a/e2e-tests/e2e-tests/deploy_addresses.json b/e2e-tests/e2e-tests/deploy_addresses.json new file mode 100644 index 00000000..ef6b7847 --- /dev/null +++ b/e2e-tests/e2e-tests/deploy_addresses.json @@ -0,0 +1,54 @@ +{ + "generatedAt": "2026-03-26T17:11:26Z", + "contracts": { + "WPC": "0xB2cf4B3aec93F4A8F92b292d2F605591dB3e3011", + "Factory": "0x057931Df99f61caB5e5DbDb6224D7003E64F659e", + "SwapRouter": "0x140b9f84fCbccB4129AC6F32b1243ea808d18261", + "QuoterV2": "0x7fd62fe2Aba9af8bF4d08a6cce49beA8c8Ca6d97", + "PositionManager": "0x4dCe46Eb5909aC32B6C0ad086e74008Fdb292CB5" + }, + "tokens": [ + { + "name": "pETH.eth", + "symbol": "pETH", + "address": "0x373D3F1B2b26729A308C5641970247bc9d4ddDa4", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "USDT.eth", + "symbol": "USDT.eth", + "address": "0x6a20557430be6412AF423681e35CC96797506F3a", + "source": "core-contracts", + "decimals": 6 + }, + { + "name": "pETH.base", + "symbol": "pETH.base", + "address": "0xCcd71bc096E2225048cD167447e164E8571BcCA6", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "pETH.arb", + "symbol": "pETH.arb", + "address": "0xE74A512688E53d6Ed2cf64a327fABE8ECE27aDD6", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "pBNB", + "symbol": "pBNB", + "address": "0x2ddB499C3a35a60c809d878eFf5Fa248bb5eAdbd", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "pSOL", + "symbol": "pSOL", + "address": "0x31F3Dcb417970EBe9AC1e254Ee42b91e49e30EE2", + "source": "core-contracts", + "decimals": 9 + } + ] +} diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 4451465b..1c980d7f 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -377,6 +377,78 @@ sdk_test_files() { done } +sdk_sync_localnet_constants() { + require_cmd jq perl + + local chain_constants_file="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_CHAIN_CONSTANTS_PATH" + local sdk_utils_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/utils.ts" + local orchestrator_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/orchestrator/orchestrator.ts" + + if [[ ! -f "$chain_constants_file" ]]; then + log_err "SDK chain constants file not found: $chain_constants_file" + exit 1 + fi + + ensure_deploy_file + + local peth peth_arb peth_base pbnb psol usdt_eth usdt_bnb + peth="$(address_from_deploy_token "pETH")" + peth_arb="$(address_from_deploy_token "pETH.arb")" + peth_base="$(address_from_deploy_token "pETH.base")" + pbnb="$(address_from_deploy_token "pBNB")" + psol="$(address_from_deploy_token "pSOL")" + usdt_eth="$(address_from_deploy_token "USDT.eth")" + usdt_bnb="$(address_from_deploy_token "USDT.bnb")" + + [[ -n "$peth" ]] || peth="0xTBD" + [[ -n "$peth_arb" ]] || peth_arb="0xTBD" + [[ -n "$peth_base" ]] || peth_base="0xTBD" + [[ -n "$pbnb" ]] || pbnb="0xTBD" + [[ -n "$psol" ]] || psol="0xTBD" + [[ -n "$usdt_eth" ]] || usdt_eth="0xTBD" + [[ -n "$usdt_bnb" ]] || usdt_bnb="$usdt_eth" + + PETH_ADDR="$peth" \ + PETH_ARB_ADDR="$peth_arb" \ + PETH_BASE_ADDR="$peth_base" \ + PBNB_ADDR="$pbnb" \ + PSOL_ADDR="$psol" \ + USDT_ETH_ADDR="$usdt_eth" \ + USDT_BNB_ADDR="$usdt_bnb" \ + perl -0pi -e ' + s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?pETH:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{PETH_ADDR}'\''#s; + s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?pETH_ARB:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{PETH_ARB_ADDR}'\''#s; + s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?pETH_BASE:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{PETH_BASE_ADDR}'\''#s; + s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?pETH_BNB:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{PBNB_ADDR}'\''#s; + s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?pSOL:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{PSOL_ADDR}'\''#s; + s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?USDT_ETH:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{USDT_ETH_ADDR}'\''#s; + s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?USDT_BNB:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{USDT_BNB_ADDR}'\''#s; + ' "$chain_constants_file" + + if [[ -f "$orchestrator_file" ]]; then + perl -0pi -e "s/return '\\Q0x00000000000000000000000000000000000000C0\\E';/return '0x00000000000000000000000000000000000000C1';/g" "$orchestrator_file" + fi + + # Force SDK test chains to local anvil/surfpool endpoints for LOCAL testing. + perl -0pi -e ' + s#(\[CHAIN\.ETHEREUM_SEPOLIA\]:\s*\{[\s\S]*?defaultRPC:\s*)\[[\s\S]*?\],(\s*confirmations:)#$1['\''http://localhost:9545'\''],$2#s; + s#(\[CHAIN\.ETHEREUM_SEPOLIA\]:\s*\{[\s\S]*?explorerUrl:\s*)'\''[^'\''\n]*'\''#$1'\''http://localhost:9545'\''#s; + s#(\[CHAIN\.ARBITRUM_SEPOLIA\]:\s*\{[\s\S]*?defaultRPC:\s*)\[[\s\S]*?\],(\s*confirmations:)#$1['\''http://localhost:9546'\''],$2#s; + s#(\[CHAIN\.ARBITRUM_SEPOLIA\]:\s*\{[\s\S]*?explorerUrl:\s*)'\''[^'\''\n]*'\''#$1'\''http://localhost:9546'\''#s; + s#(\[CHAIN\.BASE_SEPOLIA\]:\s*\{[\s\S]*?defaultRPC:\s*)\[[\s\S]*?\],(\s*confirmations:)#$1['\''http://localhost:9547'\''],$2#s; + s#(\[CHAIN\.BASE_SEPOLIA\]:\s*\{[\s\S]*?explorerUrl:\s*)'\''[^'\''\n]*'\''#$1'\''http://localhost:9547'\''#s; + s#(\[CHAIN\.BNB_TESTNET\]:\s*\{[\s\S]*?defaultRPC:\s*)\[[\s\S]*?\],(\s*confirmations:)#$1['\''http://localhost:9548'\''],$2#s; + s#(\[CHAIN\.BNB_TESTNET\]:\s*\{[\s\S]*?explorerUrl:\s*)'\''[^'\''\n]*'\''#$1'\''http://localhost:9548'\''#s; + s#(\[CHAIN\.SOLANA_DEVNET\]:\s*\{[\s\S]*?defaultRPC:\s*)\[[\s\S]*?\],(\s*confirmations:)#$1['\''http://localhost:8899'\''],$2#s; + ' "$chain_constants_file" + + if [[ -f "$sdk_utils_file" ]]; then + perl -0pi -e "s/\[PUSH_NETWORK\\.LOCALNET\]:\s*\[\s*CHAIN\\.PUSH_TESTNET_DONUT,/\[PUSH_NETWORK.LOCALNET\]: [CHAIN.PUSH_LOCALNET,/g" "$sdk_utils_file" + fi + + log_ok "Synced SDK LOCALNET synthetic token constants from deploy addresses" +} + sdk_prepare_test_files_for_localnet() { require_cmd perl @@ -396,6 +468,12 @@ sdk_prepare_test_files_for_localnet() { perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g' "$test_file" log_ok "Prepared LOCALNET network replacement in $(basename "$test_file")" done < <(sdk_test_files) + + while IFS= read -r outbound_file; do + [[ -n "$outbound_file" ]] || continue + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g' "$outbound_file" + log_ok "Prepared LOCALNET network replacement in $(basename "$outbound_file")" + done < <(find "$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/evm/outbound" -type f -name '*.spec.ts' | sort) } step_setup_push_chain_sdk() { @@ -438,6 +516,8 @@ step_setup_push_chain_sdk() { exit 1 fi + sdk_sync_localnet_constants + log_info "Fetching UEA_PROXY_IMPLEMENTATION from local chain" uea_impl_raw="$(cast call 0x00000000000000000000000000000000000000ea 'UEA_PROXY_IMPLEMENTATION()(address)' --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" uea_impl="$(echo "$uea_impl_raw" | grep -Eo '0x[a-fA-F0-9]{40}' | head -1 || true)" @@ -591,13 +671,17 @@ step_setup_environment() { local port="$2" local chain_id="$3" local fork_url="$4" - local anvil_pattern="anvil --port $port" - if pgrep -f "$anvil_pattern" >/dev/null 2>&1; then - log_info "Stopping existing anvil $label on port $port" - pkill -f "$anvil_pattern" >/dev/null 2>&1 || true - sleep 1 - fi + # Kill any process that is currently bound to the target port. + # This avoids stale fork nodes when the command-line pattern changes. + local pid + while IFS= read -r pid; do + [[ -n "$pid" ]] || continue + log_info "Stopping process $pid on port $port before starting anvil $label" + kill "$pid" >/dev/null 2>&1 || true + done < <(lsof -ti tcp:"$port" 2>/dev/null || true) + + sleep 1 log_info "Starting anvil $label on port $port (chain-id: $chain_id)" nohup anvil --host 0.0.0.0 --port "$port" --chain-id "$chain_id" --fork-url "$fork_url" --block-time 1 \ @@ -659,7 +743,8 @@ step_setup_environment() { start_anvil_fork "sepolia" "9545" "11155111" "https://ethereum-sepolia-rpc.publicnode.com" start_anvil_fork "arbitrum" "9546" "421614" "https://arbitrum-sepolia.gateway.tenderly.co" start_anvil_fork "base" "9547" "84532" "https://sepolia.base.org" - start_anvil_fork "bsc" "9548" "97" "https://bsc-testnet-rpc.publicnode.com" + # Use the configured BSC endpoint for anvil forking. + start_anvil_fork "bsc" "9548" "97" "https://bnb-testnet.g.alchemy.com/v2/peQmTO8MjpoK5Czw4HwRp" start_surfpool patch_local_testnet_donut_chain_configs @@ -1244,11 +1329,23 @@ step_setup_gateway() { require_cmd git forge [[ -n "${PRIVATE_KEY:-}" ]] || { log_err "Set PRIVATE_KEY in e2e-tests/.env"; exit 1; } - clone_or_update_repo "$GATEWAY_REPO" "$GATEWAY_BRANCH" "$GATEWAY_DIR" + local gateway_repo_dir="$GATEWAY_DIR" + local sibling_gateway_dir="$PUSH_CHAIN_DIR/../push-chain-gateway-contracts" + + # Some local setups accidentally resolve GATEWAY_DIR under push-chain/ itself. + # Prefer a repo path that actually contains the localSetup gateway scripts. + if [[ -d "$sibling_gateway_dir/contracts/evm-gateway" ]]; then + if [[ ! -d "$gateway_repo_dir/contracts/evm-gateway" || ( ! -f "$gateway_repo_dir/contracts/evm-gateway/script/localSetup/setup.s.sol" && ! -f "$gateway_repo_dir/contracts/evm-gateway/scripts/localSetup/setup.s.sol" && ! -f "$gateway_repo_dir/contracts/evm-gateway/localSetup/setup.s.sol" ) ]]; then + log_warn "Switching gateway repo dir to sibling path: $sibling_gateway_dir" + gateway_repo_dir="$sibling_gateway_dir" + fi + fi + + clone_or_update_repo "$GATEWAY_REPO" "$GATEWAY_BRANCH" "$gateway_repo_dir" log_info "Preparing gateway repo submodules" ( - cd "$GATEWAY_DIR" + cd "$gateway_repo_dir" if [[ -d "contracts/svm-gateway/mock-pyth" ]]; then git rm --cached contracts/svm-gateway/mock-pyth || true rm -rf contracts/svm-gateway/mock-pyth @@ -1256,19 +1353,31 @@ step_setup_gateway() { git submodule update --init --recursive ) - local gw_dir="$GATEWAY_DIR/contracts/evm-gateway" + local gw_dir="$gateway_repo_dir/contracts/evm-gateway" + local gw_setup_script="" local gw_log="$LOG_DIR/gateway_setup_$(date +%Y%m%d_%H%M%S).log" local failed=0 local resume_attempt=1 local resume_max_attempts="${GATEWAY_RESUME_MAX_ATTEMPTS:-0}" # 0 = unlimited + if [[ -f "$gw_dir/script/localSetup/setup.s.sol" ]]; then + gw_setup_script="script/localSetup/setup.s.sol" + elif [[ -f "$gw_dir/scripts/localSetup/setup.s.sol" ]]; then + gw_setup_script="scripts/localSetup/setup.s.sol" + elif [[ -f "$gw_dir/localSetup/setup.s.sol" ]]; then + gw_setup_script="localSetup/setup.s.sol" + else + log_err "Gateway setup script not found under $gw_dir/(script|scripts)/localSetup/setup.s.sol or $gw_dir/localSetup/setup.s.sol" + exit 1 + fi + log_info "Building gateway evm contracts" (cd "$gw_dir" && forge build) log_info "Running gateway local setup script" ( cd "$gw_dir" - forge script scripts/localSetup/setup.s.sol \ + forge script "$gw_setup_script" \ --broadcast \ --rpc-url "$PUSH_RPC_URL" \ --private-key "$PRIVATE_KEY" \ @@ -1281,7 +1390,7 @@ step_setup_gateway() { log_info "Gateway resume attempt: $resume_attempt" if ( cd "$gw_dir" - forge script scripts/localSetup/setup.s.sol \ + forge script "$gw_setup_script" \ --broadcast \ --rpc-url "$PUSH_RPC_URL" \ --private-key "$PRIVATE_KEY" \ @@ -1301,6 +1410,113 @@ step_setup_gateway() { done fi + # Ensure canonical local precompile proxy wiring used by SDK tests: + # C1 = UniversalGatewayPC proxy, B0 = VaultPC proxy, C0 = UniversalCore. + # Some gateway repo branches configure B0 only; this post-step self-heals C1. + local C0="0x00000000000000000000000000000000000000C0" + local C1="0x00000000000000000000000000000000000000C1" + local B0="0x00000000000000000000000000000000000000B0" + local C1_PROXY_ADMIN="0xf2000000000000000000000000000000000000c1" + local OWNER_ADDR="0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4" + + log_info "Verifying C1 UniversalGatewayPC wiring" + if ! cast call "$C1" 'UNIVERSAL_CORE()(address)' --rpc-url "$PUSH_RPC_URL" >/dev/null 2>&1; then + log_warn "C1.UNIVERSAL_CORE() reverted. Repairing C1 proxy implementation + initialize" + + # Reuse implementation currently behind B0 proxy (same UniversalGatewayPC bytecode family). + local impl_slot impl_word impl_addr + impl_slot="0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc" + impl_word="$(cast storage "$B0" "$impl_slot" --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" + impl_addr="0x$(echo "$impl_word" | sed -E 's/^0x//; s/^.{24}//' | tr -d '\n')" + if ! validate_eth_address "$impl_addr"; then + log_err "Failed to resolve UniversalGatewayPC implementation from B0 proxy slot" + exit 1 + fi + + cast send "$C1_PROXY_ADMIN" 'upgradeAndCall(address,address,bytes)' \ + "$C1" "$impl_addr" "0x" \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" >/dev/null + + cast send "$C1" 'initialize(address,address,address,address)' \ + "$OWNER_ADDR" "$OWNER_ADDR" "$C0" "$B0" \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" >/dev/null || true + + cast send "$C0" 'setUniversalGatewayPC(address)' "$C1" \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" >/dev/null || true + fi + + local c1_uc c0_ug c1_uc_lc c0_ug_lc c0_lc c1_lc + c1_uc="$(cast call "$C1" 'UNIVERSAL_CORE()(address)' --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" + c0_ug="$(cast call "$C0" 'universalGatewayPC()(address)' --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" + + # If C1 is initialized but C0 is not linked yet, repair linkage explicitly. + if [[ -n "$c1_uc" && -n "$c0_ug" ]]; then + local c1_uc_tmp c0_ug_tmp c0_lc_tmp c1_lc_tmp + c1_uc_tmp="$(echo "$c1_uc" | tr '[:upper:]' '[:lower:]')" + c0_ug_tmp="$(echo "$c0_ug" | tr '[:upper:]' '[:lower:]')" + c0_lc_tmp="$(echo "$C0" | tr '[:upper:]' '[:lower:]')" + c1_lc_tmp="$(echo "$C1" | tr '[:upper:]' '[:lower:]')" + + if [[ "$c1_uc_tmp" == "$c0_lc_tmp" && "$c0_ug_tmp" != "$c1_lc_tmp" ]]; then + log_warn "C0.universalGatewayPC is not linked to C1. Repairing linkage" + cast send "$C0" 'setUniversalGatewayPC(address)' "$C1" \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" >/dev/null || true + + c0_ug="$(cast call "$C0" 'universalGatewayPC()(address)' --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" + fi + fi + + c1_uc_lc="$(echo "$c1_uc" | tr '[:upper:]' '[:lower:]')" + c0_ug_lc="$(echo "$c0_ug" | tr '[:upper:]' '[:lower:]')" + c0_lc="$(echo "$C0" | tr '[:upper:]' '[:lower:]')" + c1_lc="$(echo "$C1" | tr '[:upper:]' '[:lower:]')" + if [[ "$c1_uc_lc" != "$c0_lc" || "$c0_ug_lc" != "$c1_lc" ]]; then + log_err "Gateway wiring invalid after setup: C1.UNIVERSAL_CORE=$c1_uc, C0.universalGatewayPC=$c0_ug" + exit 1 + fi + + local manager_role has_manager + manager_role="$(cast keccak 'MANAGER_ROLE')" + has_manager="$(cast call "$C0" 'hasRole(bytes32,address)(bool)' "$manager_role" "$OWNER_ADDR" --rpc-url "$PUSH_RPC_URL" 2>/dev/null || echo "false")" + + if [[ "$has_manager" != "true" ]]; then + cast send "$C0" 'grantRole(bytes32,address)' "$manager_role" "$OWNER_ADDR" \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" >/dev/null || true + fi + + # Seed gas-token mapping for each deployed gas token PRC20 (p* symbols). + if [[ -s "$DEPLOY_ADDRESSES_FILE" ]]; then + while IFS=$'\t' read -r symbol token_addr; do + [[ -n "$symbol" && -n "$token_addr" ]] || continue + local chain_ns + chain_ns="$(cast call "$token_addr" 'SOURCE_CHAIN_NAMESPACE()(string)' --rpc-url "$PUSH_RPC_URL" 2>/dev/null || echo "")" + [[ -n "$chain_ns" ]] || continue + + cast send "$C0" 'setGasTokenPRC20(string,address)' "$chain_ns" "$token_addr" \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" >/dev/null || true + done < <(jq -r '.tokens[]? | select((.symbol // "") | startswith("p")) | [.symbol, .address] | @tsv' "$DEPLOY_ADDRESSES_FILE") + fi + + # Ensure non-zero base gas limits so sendUniversalTxOutbound(req.gasLimit=0) + # can resolve a valid fee quote through UniversalCore. + local base_gas + base_gas="$(cast call "$C0" 'BASE_GAS_LIMIT()(uint256)' --rpc-url "$PUSH_RPC_URL" 2>/dev/null || echo "")" + if [[ -z "$base_gas" || "$base_gas" == "0" ]]; then + log_warn "UniversalCore BASE_GAS_LIMIT is 0. Applying local defaults for outbound chains" + + for ns in "eip155:11155111" "eip155:421614" "eip155:84532" "eip155:97" "solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1"; do + cast send "$C0" 'setBaseGasLimitByChain(string,uint256)' "$ns" 21000 \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" >/dev/null || true + done + fi + log_ok "Gateway setup complete" } @@ -1622,6 +1838,200 @@ step_configure_universal_core() { done } +step_deploy_counter_and_sync_sdk() { + require_cmd cast perl + [[ -n "${PRIVATE_KEY:-}" ]] || { log_err "Set PRIVATE_KEY in e2e-tests/.env"; exit 1; } + + local sdk_counter_addr_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/push-chain/helpers/addresses.ts" + local counter_creation_code="0x6080604052348015600e575f5ffd5b506102068061001c5f395ff3fe608060405260043610610042575f3560e01c806312065fe01461004d5780639b0e94af14610077578063d09de08a146100a1578063d826f88f146100ab57610049565b3661004957005b5f5ffd5b348015610058575f5ffd5b506100616100c1565b60405161006e9190610157565b60405180910390f35b348015610082575f5ffd5b5061008b6100c8565b6040516100989190610157565b60405180910390f35b6100a96100cd565b005b3480156100b6575f5ffd5b506100bf610137565b005b5f47905090565b5f5481565b60015f5f8282546100de919061019d565b925050819055503373ffffffffffffffffffffffffffffffffffffffff165f547fb6aa5bfdc1ab753194658fada8fa1725a667cdea7df54bd400f8bced617dfd4c3460405161012d9190610157565b60405180910390a3565b5f5f81905550565b5f819050919050565b6101518161013f565b82525050565b5f60208201905061016a5f830184610148565b92915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f6101a78261013f565b91506101b28361013f565b92508282019050808211156101ca576101c9610170565b5b9291505056fea26469706673582212204acec08331d08192e4797fc12653c602c2ca1574d44468713f91a095fdefe6d564736f6c634300081e0033" + + if [[ ! -f "$sdk_counter_addr_file" ]]; then + log_err "SDK counter addresses file not found: $sdk_counter_addr_file" + exit 1 + fi + + log_info "Deploying CounterPayable contract on Push localnet" + local deploy_out counter_addr + deploy_out="$(cast send --rpc-url "$PUSH_RPC_URL" --private-key "$PRIVATE_KEY" --create "$counter_creation_code" 2>&1)" || { + log_err "Counter deployment failed" + echo "$deploy_out" + exit 1 + } + + counter_addr="$(echo "$deploy_out" | awk '/contractAddress/ {print $2; exit}')" + if ! validate_eth_address "$counter_addr"; then + log_err "Could not parse deployed counter contract address from cast output" + echo "$deploy_out" + exit 1 + fi + + ensure_deploy_file + record_contract "COUNTER_ADDRESS_PAYABLE" "$counter_addr" + + COUNTER_ADDR="$counter_addr" perl -0pi -e ' + if (/COUNTER_ADDRESS_PAYABLE/s) { + s/0x[a-fA-F0-9]{40}/$ENV{COUNTER_ADDR}/; + } + ' "$sdk_counter_addr_file" + + if ! grep -q "$counter_addr" "$sdk_counter_addr_file"; then + log_err "Failed to sync COUNTER_ADDRESS_PAYABLE in $sdk_counter_addr_file" + exit 1 + fi + + log_ok "Deployed CounterPayable: $counter_addr" + log_ok "Synced SDK COUNTER_ADDRESS_PAYABLE in $sdk_counter_addr_file" +} + +step_ensure_tss_key_ready() { + require_cmd docker + + if ! docker ps --format '{{.Names}}' | grep -qx 'core-validator-1'; then + log_warn "core-validator-1 is not running; skipping TSS key readiness check" + return 0 + fi + + log_info "Checking current UTSS key readiness" + if docker exec core-validator-1 pchaind query utss current-key --node tcp://localhost:26657 --output json >/dev/null 2>&1; then + log_ok "UTSS current key already exists" + return 0 + fi + + log_warn "UTSS current key missing (no_key). Initiating TSS keygen..." + ( + cd "$LOCAL_DEVNET_DIR" + ./devnet tss-keygen + ) + + local i + for i in {1..36}; do + if docker exec core-validator-1 pchaind query utss current-key --node tcp://localhost:26657 --output json >/dev/null 2>&1; then + log_ok "UTSS current key is ready" + return 0 + fi + sleep 5 + done + + log_err "UTSS current key was not finalized after keygen" + if docker ps --format '{{.Names}}' | grep -qx 'universal-validator-1'; then + log_warn "Dumping recent universal-validator-1 logs for diagnosis" + docker logs --tail 300 universal-validator-1 2>&1 || true + fi + + log_err "TSS keygen did not complete. If logs show wrapper/go-dkls keygen panic, fix UV dkls setup before running Route-3 tests." + exit 1 +} + +step_bootstrap_cea_for_sdk_signer() { + require_cmd node + + local sdk_env_file="$PUSH_CHAIN_SDK_DIR/packages/core/.env" + if [[ ! -f "$sdk_env_file" ]]; then + log_warn "SDK env file not found at $sdk_env_file; running setup-sdk first" + step_setup_push_chain_sdk + fi + + if [[ ! -d "$PUSH_CHAIN_SDK_DIR" ]]; then + log_err "SDK repo not found at $PUSH_CHAIN_SDK_DIR" + exit 1 + fi + + log_info "Bootstrapping CEA deployment for SDK signer on BSC testnet fork" + if ! ( + cd "$PUSH_CHAIN_SDK_DIR" + node -r @swc-node/register <<'NODE' +const path = require('path'); +require('dotenv').config({ path: path.resolve(process.cwd(), 'packages/core/.env') }); + +const { PushChain } = require('./packages/core/src'); +const { createWalletClient, http, parseEther } = require('viem'); +const { privateKeyToAccount } = require('viem/accounts'); +const { CHAIN_INFO } = require('./packages/core/src/lib/constants/chain'); +const { CHAIN, PUSH_NETWORK } = require('./packages/core/src/lib/constants/enums'); +const { getCEAAddress } = require('./packages/core/src/lib/orchestrator/cea-utils'); + +async function main() { + const evmPrivateKey = process.env.EVM_PRIVATE_KEY; + const pushPrivateKey = process.env.PUSH_PRIVATE_KEY; + if (!evmPrivateKey) { + throw new Error('EVM_PRIVATE_KEY is missing in packages/core/.env'); + } + if (!pushPrivateKey) { + throw new Error('PUSH_PRIVATE_KEY is missing in packages/core/.env'); + } + + // Derive the target UEA account from the EVM key (the same identity used by cea-to-uea tests). + const evmAccount = privateKeyToAccount(evmPrivateKey); + const evmWalletClient = createWalletClient({ + account: evmAccount, + transport: http(CHAIN_INFO[CHAIN.ETHEREUM_SEPOLIA].defaultRPC[0]), + }); + + const evmUniversalSigner = await PushChain.utils.signer.toUniversalFromKeypair(evmWalletClient, { + chain: CHAIN.ETHEREUM_SEPOLIA, + library: PushChain.CONSTANTS.LIBRARY.ETHEREUM_VIEM, + }); + const evmClient = await PushChain.initialize(evmUniversalSigner, { + network: PUSH_NETWORK.LOCALNET, + printTraces: false, + }); + const targetUea = evmClient.universal.account; + + // Use a native Push signer to bootstrap the CEA deployment/funding for that target UEA. + const pushAccount = privateKeyToAccount(pushPrivateKey); + const pushWalletClient = createWalletClient({ + account: pushAccount, + transport: http(CHAIN_INFO[CHAIN.PUSH_LOCALNET].defaultRPC[0]), + }); + + const pushUniversalSigner = await PushChain.utils.signer.toUniversalFromKeypair(pushWalletClient, { + chain: CHAIN.PUSH_LOCALNET, + library: PushChain.CONSTANTS.LIBRARY.ETHEREUM_VIEM, + }); + const pushClient = await PushChain.initialize(pushUniversalSigner, { + network: PUSH_NETWORK.LOCALNET, + printTraces: false, + }); + + let ceaResult = await getCEAAddress(targetUea, CHAIN.BNB_TESTNET); + console.log(`CEA bootstrap pre-check: targetUEA=${targetUea} cea=${ceaResult.cea} deployed=${ceaResult.isDeployed}`); + + if (!ceaResult.isDeployed) { + const tx = await pushClient.universal.sendTransaction({ + to: { address: ceaResult.cea, chain: CHAIN.BNB_TESTNET }, + value: parseEther('0.00005'), + }); + const receipt = await tx.wait(); + console.log(`CEA bootstrap tx: hash=${tx.hash} status=${receipt.status} external=${receipt.externalTxHash || 'n/a'}`); + + ceaResult = await getCEAAddress(targetUea, CHAIN.BNB_TESTNET); + console.log(`CEA bootstrap post-check: deployed=${ceaResult.isDeployed}`); + } + + if (!ceaResult.isDeployed) { + throw new Error('CEA is still not deployed after bootstrap transaction'); + } +} + +main().catch((err) => { + const msg = err && err.message ? err.message : String(err); + console.error(msg); + process.exit(1); +}); +NODE + ); then + log_err "CEA bootstrap step failed" + + if docker ps --format '{{.Names}}' | grep -qx 'universal-validator-1'; then + log_warn "Dumping recent universal-validator-1 logs for diagnosis" + docker logs --tail 200 universal-validator-1 2>&1 || true + fi + exit 1 + fi + + log_ok "CEA bootstrap complete" +} + cmd_all() { if is_local_testing_env; then step_setup_environment @@ -1634,6 +2044,7 @@ cmd_all() { if is_local_testing_env; then step_setup_environment fi + step_ensure_tss_key_ready step_recover_genesis_key step_fund_account step_setup_core_contracts @@ -1646,6 +2057,8 @@ cmd_all() { step_update_eth_token_config step_setup_gateway step_add_uregistry_configs + step_bootstrap_cea_for_sdk_signer + step_deploy_counter_and_sync_sdk } cmd_show_help() { @@ -1667,6 +2080,9 @@ Commands: write-core-env Create core-contracts .env from deploy_addresses.json update-token-config Update eth_sepolia_eth.json contract_address using deployed token setup-gateway Clone/setup gateway repo and run forge localSetup (with --resume retry) + ensure-tss-key Ensure UTSS current key exists (runs keygen and waits until finalized) + bootstrap-cea-sdk Ensure CEA is deployed for SDK signer on BSC testnet fork (Route 2 bootstrap) + deploy-counter-sdk Deploy CounterPayable on Push localnet and sync SDK COUNTER_ADDRESS_PAYABLE setup-sdk Clone/setup push-chain-sdk, generate SDK .env from e2e .env, and install dependencies sdk-test-all Replace PUSH_NETWORK TESTNET variants with LOCALNET and run all configured SDK E2E tests sdk-test-pctx-last-transaction Run pctx-last-transaction.spec.ts @@ -1719,6 +2135,9 @@ main() { write-core-env) step_write_core_env ;; update-token-config) step_update_deployed_token_configs ;; setup-gateway) step_setup_gateway ;; + ensure-tss-key) step_ensure_tss_key_ready ;; + bootstrap-cea-sdk) step_bootstrap_cea_for_sdk_signer ;; + deploy-counter-sdk) step_deploy_counter_and_sync_sdk ;; setup-sdk) step_setup_push_chain_sdk ;; sdk-test-all) step_run_sdk_tests_all ;; sdk-test-pctx-last-transaction) step_run_sdk_test_file "pctx-last-transaction.spec.ts" ;; diff --git a/local-multi-validator/devnet b/local-multi-validator/devnet index 0e3c58b6..5b1c6757 100755 --- a/local-multi-validator/devnet +++ b/local-multi-validator/devnet @@ -183,11 +183,13 @@ check_grants() { local granter_addr=$(docker exec "core-validator-$i" pchaind keys show "validator-$i" -a --keyring-backend test 2>/dev/null || echo "") if [ -n "$hotkey_addr" ] && [ -n "$granter_addr" ]; then - local count=$(docker exec core-validator-1 pchaind query authz grants "$granter_addr" "$hotkey_addr" --node tcp://localhost:26657 --output json 2>/dev/null | jq -r '.grants | length' 2>/dev/null || echo "0") + local count=$(docker exec core-validator-1 pchaind query authz grants "$granter_addr" "$hotkey_addr" --node tcp://localhost:26657 --output json 2>/dev/null | jq -r '[.grants[]? | .authorization.value.msg | select(. == "/uexecutor.v1.MsgVoteInbound" or . == "/uexecutor.v1.MsgVoteChainMeta" or . == "/uexecutor.v1.MsgVoteOutbound" or . == "/utss.v1.MsgVoteTssKeyProcess")] | length' 2>/dev/null || echo "0") if [ "$count" = "0" ] || [ -z "$count" ]; then echo "no|[no]" + elif [ "$count" -lt "4" ] 2>/dev/null; then + echo "no|[no] ${count}/4" else - echo "yes|[ok] ${count}" + echo "yes|[ok] ${count}/4" fi else echo "no|[no]" @@ -887,42 +889,63 @@ cmd_setup_uvalidators() { sleep 3 - # Setup hotkey (use pre-generated hotkey-$i from setup-universal.sh) + # Resolve hotkey. Prefer container keyring, fallback to shared generated accounts. echo -ne " ${YELLOW}Setting up hotkey:${NC} " local hotkey_name="hotkey-$i" - local HOTKEY_ADDR=$(docker exec universal-validator-$i puniversald keys show "$hotkey_name" -a --keyring-backend test --home /root/.puniversal 2>/dev/null || echo "") + local HOTKEY_ADDR + HOTKEY_ADDR=$(docker exec universal-validator-$i puniversald keys show "$hotkey_name" -a --keyring-backend test --home /root/.puniversal 2>/dev/null || echo "") - if [ -z "$HOTKEY_ADDR" ]; then - echo -e "${RED}Hotkey not found - validator may not be initialized${NC}" + if [ -z "$HOTKEY_ADDR" ] && docker exec core-validator-1 test -f /tmp/push-accounts/hotkeys.json >/dev/null 2>&1; then + HOTKEY_ADDR=$(docker exec core-validator-1 sh -lc "jq -r '.[${i}-1].address' /tmp/push-accounts/hotkeys.json" 2>/dev/null || echo "") + if [ -n "$HOTKEY_ADDR" ] && [ "$HOTKEY_ADDR" != "null" ]; then + echo -e "${YELLOW}Using fallback shared hotkey-$i${NC}" + fi + fi + + if [ -z "$HOTKEY_ADDR" ] || [ "$HOTKEY_ADDR" = "null" ]; then + echo -e "${RED}Hotkey not found for validator-$i${NC}" continue + fi + + if [ -z "${HOTKEY_ADDR##push1*}" ]; then + : else - echo -e "${GREEN}Using hotkey-$i${NC}" + echo -e "${RED}Invalid hotkey address for validator-$i: $HOTKEY_ADDR${NC}" + continue fi - echo -e " Hotkey: ${CYAN}$HOTKEY_ADDR${NC}" - # Grant MsgVoteInbound - echo -ne " ${YELLOW}Granting MsgVoteInbound:${NC} " - docker exec core-validator-$i pchaind tx authz grant "$HOTKEY_ADDR" generic \ - --msg-type=/uexecutor.v1.MsgVoteInbound \ - --from validator-$i \ - --chain-id localchain_9000-1 \ - --keyring-backend test \ - --fees 200000000000000upc \ - --yes >/dev/null 2>&1 && echo -e "${GREEN}Done${NC}" || echo -e "${YELLOW}May already exist${NC}" + echo -e "${GREEN}Using hotkey-$i${NC}" + echo -e " Hotkey: ${CYAN}$HOTKEY_ADDR${NC}" - sleep 2 + # Grant required AuthZ messages (must match PushSigner expectations) + for msg_type in \ + /uexecutor.v1.MsgVoteInbound \ + /uexecutor.v1.MsgVoteChainMeta \ + /uexecutor.v1.MsgVoteOutbound \ + /utss.v1.MsgVoteTssKeyProcess + do + local msg_name=$(basename "$msg_type") + echo -ne " ${YELLOW}Granting ${msg_name}:${NC} " + docker exec core-validator-$i pchaind tx authz grant "$HOTKEY_ADDR" generic \ + --msg-type="$msg_type" \ + --from validator-$i \ + --chain-id localchain_9000-1 \ + --node tcp://core-validator-1:26657 \ + --keyring-backend test \ + --fees 200000000000000upc \ + --yes >/dev/null 2>&1 && echo -e "${GREEN}Done${NC}" || echo -e "${YELLOW}May already exist${NC}" - # Grant MsgVoteGasPrice - echo -ne " ${YELLOW}Granting MsgVoteGasPrice:${NC} " - docker exec core-validator-$i pchaind tx authz grant "$HOTKEY_ADDR" generic \ - --msg-type=/uexecutor.v1.MsgVoteGasPrice \ - --from validator-$i \ - --chain-id localchain_9000-1 \ - --keyring-backend test \ - --fees 200000000000000upc \ - --yes >/dev/null 2>&1 && echo -e "${GREEN}Done${NC}" || echo -e "${YELLOW}May already exist${NC}" + sleep 1 + done - sleep 2 + # Verify all required grants are visible on the canonical RPC endpoint. + local grant_count + grant_count=$(docker exec core-validator-1 pchaind query authz grants "$VALIDATOR_ADDR" "$HOTKEY_ADDR" --node tcp://core-validator-1:26657 --output json 2>/dev/null | jq -r '[.grants[]? | .authorization.value.msg | select(. == "/uexecutor.v1.MsgVoteInbound" or . == "/uexecutor.v1.MsgVoteChainMeta" or . == "/uexecutor.v1.MsgVoteOutbound" or . == "/utss.v1.MsgVoteTssKeyProcess")] | length' 2>/dev/null || echo "0") + if [ "$grant_count" -lt 4 ]; then + echo -e " ${YELLOW}Warning: only ${grant_count}/4 required grants visible for validator-$i${NC}" + else + echo -e " ${GREEN}Verified 4/4 required grants${NC}" + fi echo -e " ${GREEN}Validator $i setup complete${NC}" echo diff --git a/local-multi-validator/scripts/setup-genesis-auto.sh b/local-multi-validator/scripts/setup-genesis-auto.sh index fce78961..9b89c34c 100755 --- a/local-multi-validator/scripts/setup-genesis-auto.sh +++ b/local-multi-validator/scripts/setup-genesis-auto.sh @@ -444,10 +444,11 @@ if [ -f "$HOTKEYS_FILE" ]; then echo " Granter: $VALIDATOR_ADDR" echo " Grantee: $HOTKEY_ADDR" - # Grant all 4 message types for this validator + # Grant all required message types for this validator. + # Keep this list aligned with universalClient/constant/constant.go (RequiredMsgGrants). for MSG_TYPE in \ "/uexecutor.v1.MsgVoteInbound" \ - "/uexecutor.v1.MsgVoteGasPrice" \ + "/uexecutor.v1.MsgVoteChainMeta" \ "/uexecutor.v1.MsgVoteOutbound" \ "/utss.v1.MsgVoteTssKeyProcess" do diff --git a/local-multi-validator/scripts/setup-universal.sh b/local-multi-validator/scripts/setup-universal.sh index 2554ba37..0f468fc9 100755 --- a/local-multi-validator/scripts/setup-universal.sh +++ b/local-multi-validator/scripts/setup-universal.sh @@ -9,6 +9,14 @@ UNIVERSAL_ID=${UNIVERSAL_ID:-"1"} CORE_VALIDATOR_GRPC=${CORE_VALIDATOR_GRPC:-"core-validator-1:9090"} QUERY_PORT=${QUERY_PORT:-8080} +# In LOCAL devnet, use a single canonical gRPC endpoint for startup validation +# to avoid transient per-node state skew during UV boot. +if [ "${TESTING_ENV:-}" = "LOCAL" ] && [ -n "${LOCAL_CANONICAL_CORE_GRPC:-}" ]; then + CORE_VALIDATOR_GRPC="$LOCAL_CANONICAL_CORE_GRPC" +elif [ "${TESTING_ENV:-}" = "LOCAL" ]; then + CORE_VALIDATOR_GRPC="core-validator-1:9090" +fi + # Paths BINARY="/usr/bin/puniversald" HOME_DIR="/root/.puniversal" @@ -235,6 +243,20 @@ set_chain_event_start_from "eip155:421614" "Arbitrum Sepolia" "${ARBITRUM_EVENT_ set_chain_event_start_from "eip155:97" "BSC testnet" "${BSC_EVENT_START_FROM:-}" set_chain_event_start_from "solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1" "Solana devnet" "${SOLANA_EVENT_START_FROM:-}" +# Always align Push localchain scanning start with current height in local environments. +# Default config uses a high static start block for public networks, which would skip +# all events on fresh local devnets if left unchanged. +LOCALCHAIN_CHAIN_ID="localchain_9000-1" +LOCALCHAIN_START_FROM=$BLOCK_HEIGHT +if [ "$LOCALCHAIN_START_FROM" -gt 20 ]; then + LOCALCHAIN_START_FROM=$((LOCALCHAIN_START_FROM - 20)) +fi +echo "📍 Setting Push localchain event_start_from: $LOCALCHAIN_START_FROM" +jq --arg chain "$LOCALCHAIN_CHAIN_ID" --argjson height "$LOCALCHAIN_START_FROM" \ + '.chain_configs[$chain].event_start_from = $height' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" + # --------------------------- # === SET CORE VALOPER ADDRESS === # --------------------------- @@ -316,12 +338,13 @@ fi echo "🔐 Waiting for AuthZ grants to be created by core validator..." echo "📝 Core validators create AuthZ grants after UV registration completes" -echo "📋 Required grants: MsgVoteInbound, MsgVoteGasPrice, MsgVoteOutbound, MsgVoteTssKeyProcess" +echo "📋 Required grants: MsgVoteInbound, MsgVoteChainMeta, MsgVoteOutbound, MsgVoteTssKeyProcess" # Get the hotkey address HOTKEY_ADDR=$($BINARY keys show "$HOTKEY_NAME" --address --keyring-backend test --home "$HOME_DIR" 2>/dev/null || echo "") -# Required number of grants (4 message types) +# Required message types (must match PushSigner validation requirements) +REQUIRED_MSG_TYPES='["/uexecutor.v1.MsgVoteInbound","/uexecutor.v1.MsgVoteChainMeta","/uexecutor.v1.MsgVoteOutbound","/utss.v1.MsgVoteTssKeyProcess"]' REQUIRED_GRANTS=4 # Query core-validator-1 for grants (genesis validator creates ALL grants immediately) @@ -331,29 +354,30 @@ if [ -n "$HOTKEY_ADDR" ]; then echo "🔍 Checking for AuthZ grants for hotkey: $HOTKEY_ADDR" echo "📡 Querying grants from: $GRANTS_QUERY_HOST:1317" - # Wait for all 4 AuthZ grants (should be fast - genesis validator creates all grants) + # Wait for all required AuthZ grants (should be fast - genesis validator creates all grants) max_wait=20 wait_time=0 - GRANTS_COUNT=0 + MATCHED_GRANTS=0 while [ $wait_time -lt $max_wait ]; do - # Query grants from genesis validator - GRANTS_COUNT=$(curl -s "http://$GRANTS_QUERY_HOST:1317/cosmos/authz/v1beta1/grants/grantee/$HOTKEY_ADDR" 2>/dev/null | jq -r '.grants | length' 2>/dev/null || echo "0") + # Query grants and count only required message types + MATCHED_GRANTS=$(curl -s "http://$GRANTS_QUERY_HOST:1317/cosmos/authz/v1beta1/grants/grantee/$HOTKEY_ADDR" 2>/dev/null | \ + jq -r --argjson required "$REQUIRED_MSG_TYPES" '[.grants[]? | select(.authorization.value.msg as $m | $required | index($m))] | length' 2>/dev/null || echo "0") - if [ "$GRANTS_COUNT" -ge "$REQUIRED_GRANTS" ] 2>/dev/null; then - echo "✅ Found all $GRANTS_COUNT/$REQUIRED_GRANTS required AuthZ grants!" + if [ "$MATCHED_GRANTS" -ge "$REQUIRED_GRANTS" ] 2>/dev/null; then + echo "✅ Found all $MATCHED_GRANTS/$REQUIRED_GRANTS required AuthZ grants!" break fi # Show progress every 5 seconds if [ $((wait_time % 5)) -eq 0 ]; then - echo "⏳ Waiting for AuthZ grants... ($GRANTS_COUNT/$REQUIRED_GRANTS) (${wait_time}s / ${max_wait}s)" + echo "⏳ Waiting for AuthZ grants... ($MATCHED_GRANTS/$REQUIRED_GRANTS) (${wait_time}s / ${max_wait}s)" fi sleep 1 wait_time=$((wait_time + 1)) done - if [ "$GRANTS_COUNT" -lt "$REQUIRED_GRANTS" ] 2>/dev/null; then - echo "⚠️ Only found $GRANTS_COUNT/$REQUIRED_GRANTS grants after ${max_wait}s" + if [ "$MATCHED_GRANTS" -lt "$REQUIRED_GRANTS" ] 2>/dev/null; then + echo "⚠️ Only found $MATCHED_GRANTS/$REQUIRED_GRANTS required grants after ${max_wait}s" echo " The universal validator may fail startup validation if grants are missing." fi else From 711b88f68d818b3985e70ef3dd758eea3643a388 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Fri, 27 Mar 2026 15:13:30 +0530 Subject: [PATCH 27/61] tss keygen removed temporarily --- e2e-tests/deploy_addresses.json | 4 +- e2e-tests/setup.sh | 97 ++++++++++--------- local-multi-validator/devnet | 19 ++-- .../scripts/setup-universal.sh | 6 +- 4 files changed, 64 insertions(+), 62 deletions(-) diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index 68056856..d42465fd 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -1,5 +1,5 @@ { - "generatedAt": "2026-03-27T07:03:55Z", + "generatedAt": "2026-03-27T08:28:17Z", "contracts": { "WPC": "0xB2cf4B3aec93F4A8F92b292d2F605591dB3e3011", "Factory": "0x057931Df99f61caB5e5DbDb6224D7003E64F659e", @@ -7,7 +7,7 @@ "QuoterV2": "0x7fd62fe2Aba9af8bF4d08a6cce49beA8c8Ca6d97", "PositionManager": "0x4dCe46Eb5909aC32B6C0ad086e74008Fdb292CB5", "UEA_PROXY_IMPLEMENTATION": "0x2C297101b7d3e0911296b9A64d106684a161b4C9", - "COUNTER_ADDRESS_PAYABLE": "0x88449CaC4DFd2FA0FFbC90Fd1Ea9F2a6FDc690F7" + "COUNTER_ADDRESS_PAYABLE": "0x7e875e1384030d8b22Eb359C3Fce940D2882643e" }, "tokens": [ { diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 1c980d7f..92e42f0f 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -35,6 +35,7 @@ fi : "${GATEWAY_BRANCH:=e2e-push-node}" : "${PUSH_CHAIN_SDK_REPO:=https://github.com/pushchain/push-chain-sdk.git}" : "${PUSH_CHAIN_SDK_BRANCH:=outbound_changes}" +: "${PREFER_SIBLING_REPO_DIRS:=true}" : "${E2E_PARENT_DIR:=../}" : "${CORE_CONTRACTS_DIR:=$E2E_PARENT_DIR/push-chain-core-contracts}" @@ -90,6 +91,49 @@ log_ok() { printf "%b\n" "${green}✓${nc} $*"; } log_warn() { printf "%b\n" "${yellow}!${nc} $*"; } log_err() { printf "%b\n" "${red}x${nc} $*"; } +normalize_path() { + local path="$1" + if [[ -d "$path" ]]; then + (cd -P "$path" && pwd) + return + fi + + local parent base + parent="$(dirname "$path")" + base="$(basename "$path")" + + if [[ -d "$parent" ]]; then + printf "%s/%s" "$(cd -P "$parent" && pwd)" "$base" + else + printf "%s" "$path" + fi +} + +prefer_sibling_repo_dirs() { + if [[ "$(echo "$PREFER_SIBLING_REPO_DIRS" | tr '[:upper:]' '[:lower:]')" != "true" ]]; then + CORE_CONTRACTS_DIR="$(normalize_path "$CORE_CONTRACTS_DIR")" + GATEWAY_DIR="$(normalize_path "$GATEWAY_DIR")" + return + fi + + local sibling_core sibling_gateway + sibling_core="$(normalize_path "$PUSH_CHAIN_DIR/../push-chain-core-contracts")" + sibling_gateway="$(normalize_path "$PUSH_CHAIN_DIR/../push-chain-gateway-contracts")" + + CORE_CONTRACTS_DIR="$(normalize_path "$CORE_CONTRACTS_DIR")" + GATEWAY_DIR="$(normalize_path "$GATEWAY_DIR")" + + if [[ -d "$sibling_core" ]]; then + CORE_CONTRACTS_DIR="$sibling_core" + fi + + if [[ -d "$sibling_gateway" ]]; then + GATEWAY_DIR="$sibling_gateway" + fi +} + +prefer_sibling_repo_dirs + ensure_testing_env_var_in_env_file() { mkdir -p "$(dirname "$ENV_FILE")" @@ -937,10 +981,9 @@ step_update_env_fund_to_address() { else echo "FUND_TO_ADDRESS=$COSMOS_ADDRESS" >> "$ENV_FILE" fi - # Refresh .env after updating FUND_TO_ADDRESS - set -a - source "$SCRIPT_DIR/.env" - set +a + # Keep runtime env stable: avoid re-sourcing .env here because that can + # reset already-normalized absolute paths (CORE_CONTRACTS_DIR/GATEWAY_DIR/etc). + FUND_TO_ADDRESS="$COSMOS_ADDRESS" log_ok "Updated FUND_TO_ADDRESS in .env to $COSMOS_ADDRESS" } @@ -997,6 +1040,7 @@ step_setup_core_contracts() { [[ -n "${PRIVATE_KEY:-}" ]] || { log_err "Set PRIVATE_KEY in e2e-tests/.env"; exit 1; } ensure_deploy_file + log_info "Using core contracts repo dir: $CORE_CONTRACTS_DIR" clone_or_update_repo "$CORE_CONTRACTS_REPO" "$CORE_CONTRACTS_BRANCH" "$CORE_CONTRACTS_DIR" log_info "Running forge build in core contracts" @@ -1332,6 +1376,8 @@ step_setup_gateway() { local gateway_repo_dir="$GATEWAY_DIR" local sibling_gateway_dir="$PUSH_CHAIN_DIR/../push-chain-gateway-contracts" + log_info "Using gateway repo dir: $gateway_repo_dir" + # Some local setups accidentally resolve GATEWAY_DIR under push-chain/ itself. # Prefer a repo path that actually contains the localSetup gateway scripts. if [[ -d "$sibling_gateway_dir/contracts/evm-gateway" ]]; then @@ -1883,45 +1929,6 @@ step_deploy_counter_and_sync_sdk() { log_ok "Synced SDK COUNTER_ADDRESS_PAYABLE in $sdk_counter_addr_file" } -step_ensure_tss_key_ready() { - require_cmd docker - - if ! docker ps --format '{{.Names}}' | grep -qx 'core-validator-1'; then - log_warn "core-validator-1 is not running; skipping TSS key readiness check" - return 0 - fi - - log_info "Checking current UTSS key readiness" - if docker exec core-validator-1 pchaind query utss current-key --node tcp://localhost:26657 --output json >/dev/null 2>&1; then - log_ok "UTSS current key already exists" - return 0 - fi - - log_warn "UTSS current key missing (no_key). Initiating TSS keygen..." - ( - cd "$LOCAL_DEVNET_DIR" - ./devnet tss-keygen - ) - - local i - for i in {1..36}; do - if docker exec core-validator-1 pchaind query utss current-key --node tcp://localhost:26657 --output json >/dev/null 2>&1; then - log_ok "UTSS current key is ready" - return 0 - fi - sleep 5 - done - - log_err "UTSS current key was not finalized after keygen" - if docker ps --format '{{.Names}}' | grep -qx 'universal-validator-1'; then - log_warn "Dumping recent universal-validator-1 logs for diagnosis" - docker logs --tail 300 universal-validator-1 2>&1 || true - fi - - log_err "TSS keygen did not complete. If logs show wrapper/go-dkls keygen panic, fix UV dkls setup before running Route-3 tests." - exit 1 -} - step_bootstrap_cea_for_sdk_signer() { require_cmd node @@ -2044,7 +2051,6 @@ cmd_all() { if is_local_testing_env; then step_setup_environment fi - step_ensure_tss_key_ready step_recover_genesis_key step_fund_account step_setup_core_contracts @@ -2057,7 +2063,6 @@ cmd_all() { step_update_eth_token_config step_setup_gateway step_add_uregistry_configs - step_bootstrap_cea_for_sdk_signer step_deploy_counter_and_sync_sdk } @@ -2080,7 +2085,6 @@ Commands: write-core-env Create core-contracts .env from deploy_addresses.json update-token-config Update eth_sepolia_eth.json contract_address using deployed token setup-gateway Clone/setup gateway repo and run forge localSetup (with --resume retry) - ensure-tss-key Ensure UTSS current key exists (runs keygen and waits until finalized) bootstrap-cea-sdk Ensure CEA is deployed for SDK signer on BSC testnet fork (Route 2 bootstrap) deploy-counter-sdk Deploy CounterPayable on Push localnet and sync SDK COUNTER_ADDRESS_PAYABLE setup-sdk Clone/setup push-chain-sdk, generate SDK .env from e2e .env, and install dependencies @@ -2135,7 +2139,6 @@ main() { write-core-env) step_write_core_env ;; update-token-config) step_update_deployed_token_configs ;; setup-gateway) step_setup_gateway ;; - ensure-tss-key) step_ensure_tss_key_ready ;; bootstrap-cea-sdk) step_bootstrap_cea_for_sdk_signer ;; deploy-counter-sdk) step_deploy_counter_and_sync_sdk ;; setup-sdk) step_setup_push_chain_sdk ;; diff --git a/local-multi-validator/devnet b/local-multi-validator/devnet index 5b1c6757..de7c5075 100755 --- a/local-multi-validator/devnet +++ b/local-multi-validator/devnet @@ -183,13 +183,13 @@ check_grants() { local granter_addr=$(docker exec "core-validator-$i" pchaind keys show "validator-$i" -a --keyring-backend test 2>/dev/null || echo "") if [ -n "$hotkey_addr" ] && [ -n "$granter_addr" ]; then - local count=$(docker exec core-validator-1 pchaind query authz grants "$granter_addr" "$hotkey_addr" --node tcp://localhost:26657 --output json 2>/dev/null | jq -r '[.grants[]? | .authorization.value.msg | select(. == "/uexecutor.v1.MsgVoteInbound" or . == "/uexecutor.v1.MsgVoteChainMeta" or . == "/uexecutor.v1.MsgVoteOutbound" or . == "/utss.v1.MsgVoteTssKeyProcess")] | length' 2>/dev/null || echo "0") + local count=$(docker exec core-validator-1 pchaind query authz grants "$granter_addr" "$hotkey_addr" --node tcp://localhost:26657 --output json 2>/dev/null | jq -r '[.grants[]? | .authorization.value.msg | select(. == "/uexecutor.v1.MsgVoteInbound" or . == "/uexecutor.v1.MsgVoteChainMeta" or . == "/uexecutor.v1.MsgVoteOutbound")] | length' 2>/dev/null || echo "0") if [ "$count" = "0" ] || [ -z "$count" ]; then echo "no|[no]" - elif [ "$count" -lt "4" ] 2>/dev/null; then - echo "no|[no] ${count}/4" + elif [ "$count" -lt "3" ] 2>/dev/null; then + echo "no|[no] ${count}/3" else - echo "yes|[ok] ${count}/4" + echo "yes|[ok] ${count}/3" fi else echo "no|[no]" @@ -921,8 +921,7 @@ cmd_setup_uvalidators() { for msg_type in \ /uexecutor.v1.MsgVoteInbound \ /uexecutor.v1.MsgVoteChainMeta \ - /uexecutor.v1.MsgVoteOutbound \ - /utss.v1.MsgVoteTssKeyProcess + /uexecutor.v1.MsgVoteOutbound do local msg_name=$(basename "$msg_type") echo -ne " ${YELLOW}Granting ${msg_name}:${NC} " @@ -940,11 +939,11 @@ cmd_setup_uvalidators() { # Verify all required grants are visible on the canonical RPC endpoint. local grant_count - grant_count=$(docker exec core-validator-1 pchaind query authz grants "$VALIDATOR_ADDR" "$HOTKEY_ADDR" --node tcp://core-validator-1:26657 --output json 2>/dev/null | jq -r '[.grants[]? | .authorization.value.msg | select(. == "/uexecutor.v1.MsgVoteInbound" or . == "/uexecutor.v1.MsgVoteChainMeta" or . == "/uexecutor.v1.MsgVoteOutbound" or . == "/utss.v1.MsgVoteTssKeyProcess")] | length' 2>/dev/null || echo "0") - if [ "$grant_count" -lt 4 ]; then - echo -e " ${YELLOW}Warning: only ${grant_count}/4 required grants visible for validator-$i${NC}" + grant_count=$(docker exec core-validator-1 pchaind query authz grants "$VALIDATOR_ADDR" "$HOTKEY_ADDR" --node tcp://core-validator-1:26657 --output json 2>/dev/null | jq -r '[.grants[]? | .authorization.value.msg | select(. == "/uexecutor.v1.MsgVoteInbound" or . == "/uexecutor.v1.MsgVoteChainMeta" or . == "/uexecutor.v1.MsgVoteOutbound")] | length' 2>/dev/null || echo "0") + if [ "$grant_count" -lt 3 ]; then + echo -e " ${YELLOW}Warning: only ${grant_count}/3 required grants visible for validator-$i${NC}" else - echo -e " ${GREEN}Verified 4/4 required grants${NC}" + echo -e " ${GREEN}Verified 3/3 required grants${NC}" fi echo -e " ${GREEN}Validator $i setup complete${NC}" diff --git a/local-multi-validator/scripts/setup-universal.sh b/local-multi-validator/scripts/setup-universal.sh index 0f468fc9..1da568ef 100755 --- a/local-multi-validator/scripts/setup-universal.sh +++ b/local-multi-validator/scripts/setup-universal.sh @@ -338,14 +338,14 @@ fi echo "🔐 Waiting for AuthZ grants to be created by core validator..." echo "📝 Core validators create AuthZ grants after UV registration completes" -echo "📋 Required grants: MsgVoteInbound, MsgVoteChainMeta, MsgVoteOutbound, MsgVoteTssKeyProcess" +echo "📋 Required grants: MsgVoteInbound, MsgVoteChainMeta, MsgVoteOutbound" # Get the hotkey address HOTKEY_ADDR=$($BINARY keys show "$HOTKEY_NAME" --address --keyring-backend test --home "$HOME_DIR" 2>/dev/null || echo "") # Required message types (must match PushSigner validation requirements) -REQUIRED_MSG_TYPES='["/uexecutor.v1.MsgVoteInbound","/uexecutor.v1.MsgVoteChainMeta","/uexecutor.v1.MsgVoteOutbound","/utss.v1.MsgVoteTssKeyProcess"]' -REQUIRED_GRANTS=4 +REQUIRED_MSG_TYPES='["/uexecutor.v1.MsgVoteInbound","/uexecutor.v1.MsgVoteChainMeta","/uexecutor.v1.MsgVoteOutbound"]' +REQUIRED_GRANTS=3 # Query core-validator-1 for grants (genesis validator creates ALL grants immediately) GRANTS_QUERY_HOST="core-validator-1" From a4f366d170f3dcef911d18de838647a013e5b7b1 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Fri, 27 Mar 2026 20:45:49 +0530 Subject: [PATCH 28/61] local-multi-validator tss setup --- e2e-tests/deploy_addresses.json | 2 +- e2e-tests/setup.sh | 11 + local-multi-validator/devnet | 379 +++++++++++++++-- .../scripts/setup-universal.sh | 31 +- .../scripts/setup-validator-auto.sh | 380 ++++++++++++------ .../tss/coordinator/coordinator.go | 6 + .../tss/sessionmanager/sessionmanager.go | 55 ++- 7 files changed, 688 insertions(+), 176 deletions(-) diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index d42465fd..c8eb36ab 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -1,5 +1,5 @@ { - "generatedAt": "2026-03-27T08:28:17Z", + "generatedAt": "2026-03-27T15:09:03Z", "contracts": { "WPC": "0xB2cf4B3aec93F4A8F92b292d2F605591dB3e3011", "Factory": "0x057931Df99f61caB5e5DbDb6224D7003E64F659e", diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 92e42f0f..a0715b60 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -665,6 +665,16 @@ step_devnet() { log_ok "Devnet is up" } +step_ensure_tss_key_ready() { + require_cmd bash + log_info "Ensuring TSS key is ready" + ( + cd "$LOCAL_DEVNET_DIR" + ./devnet tss-keygen + ) + log_ok "TSS key is ready" +} + step_setup_environment() { if ! is_local_testing_env; then log_info "TESTING_ENV is not LOCAL, skipping setup-environment" @@ -2048,6 +2058,7 @@ cmd_all() { step_update_env_fund_to_address step_stop_running_nodes step_devnet + step_ensure_tss_key_ready if is_local_testing_env; then step_setup_environment fi diff --git a/local-multi-validator/devnet b/local-multi-validator/devnet index de7c5075..c295ac4b 100755 --- a/local-multi-validator/devnet +++ b/local-multi-validator/devnet @@ -183,19 +183,112 @@ check_grants() { local granter_addr=$(docker exec "core-validator-$i" pchaind keys show "validator-$i" -a --keyring-backend test 2>/dev/null || echo "") if [ -n "$hotkey_addr" ] && [ -n "$granter_addr" ]; then - local count=$(docker exec core-validator-1 pchaind query authz grants "$granter_addr" "$hotkey_addr" --node tcp://localhost:26657 --output json 2>/dev/null | jq -r '[.grants[]? | .authorization.value.msg | select(. == "/uexecutor.v1.MsgVoteInbound" or . == "/uexecutor.v1.MsgVoteChainMeta" or . == "/uexecutor.v1.MsgVoteOutbound")] | length' 2>/dev/null || echo "0") + local count=$(docker exec core-validator-1 pchaind query authz grants "$granter_addr" "$hotkey_addr" --node tcp://localhost:26657 --output json 2>/dev/null | jq -r '[.grants[]? | .authorization.value.msg | select(. == "/uexecutor.v1.MsgVoteInbound" or . == "/uexecutor.v1.MsgVoteChainMeta" or . == "/uexecutor.v1.MsgVoteOutbound" or . == "/utss.v1.MsgVoteTssKeyProcess")] | length' 2>/dev/null || echo "0") if [ "$count" = "0" ] || [ -z "$count" ]; then echo "no|[no]" - elif [ "$count" -lt "3" ] 2>/dev/null; then - echo "no|[no] ${count}/3" + elif [ "$count" -lt "4" ] 2>/dev/null; then + echo "no|[no] ${count}/4" else - echo "yes|[ok] ${count}/3" + echo "yes|[ok] ${count}/4" fi else echo "no|[no]" fi } +wait_for_chain_tx_success() { + local tx_hash="$1" + local max_wait="${2:-30}" + local waited=0 + local start_height + start_height=$(curl -sf "http://localhost:26657/status" 2>/dev/null | jq -r '.result.sync_info.latest_block_height // ""' 2>/dev/null || echo "") + + while [ "$waited" -lt "$max_wait" ]; do + local tx_json + tx_json=$(docker exec core-validator-1 curl -sf "http://localhost:1317/cosmos/tx/v1beta1/txs/$tx_hash" 2>/dev/null || true) + + local has_tx_response + has_tx_response=$(echo "$tx_json" | jq -r 'has("tx_response")' 2>/dev/null || echo "false") + + if [ "$has_tx_response" = "true" ]; then + local tx_code + tx_code=$(echo "$tx_json" | jq -r '.tx_response.code // "0"' 2>/dev/null || echo "0") + + if [ "$tx_code" = "0" ]; then + return 0 + fi + + local raw_log + raw_log=$(echo "$tx_json" | jq -r '.tx_response.raw_log // .message // ""' 2>/dev/null || true) + print_error "TX $tx_hash failed with code $tx_code: ${raw_log:-unknown error}" + return 1 + fi + + local query_code + query_code=$(echo "$tx_json" | jq -r '.code // empty' 2>/dev/null || true) + if [ -n "$query_code" ] && [ "$query_code" != "5" ]; then + local query_msg + query_msg=$(echo "$tx_json" | jq -r '.message // ""' 2>/dev/null || true) + print_warning "TX query for $tx_hash returned code $query_code: ${query_msg:-unknown response}" + fi + + sleep 1 + waited=$((waited + 1)) + done + + local end_height + end_height=$(curl -sf "http://localhost:26657/status" 2>/dev/null | jq -r '.result.sync_info.latest_block_height // ""' 2>/dev/null || echo "") + if [ -n "$start_height" ] && [ -n "$end_height" ] && [ "$start_height" = "$end_height" ]; then + print_error "Timed out waiting for tx $tx_hash (chain height is stalled at $end_height)" + else + print_error "Timed out waiting for tx $tx_hash" + fi + return 1 +} + +wait_for_block_progress() { + local max_wait="${1:-10}" + local waited=0 + local start_height + start_height=$(curl -sf "http://localhost:26657/status" 2>/dev/null | jq -r '.result.sync_info.latest_block_height // ""' 2>/dev/null || echo "") + + while [ "$waited" -lt "$max_wait" ]; do + local current_height + current_height=$(curl -sf "http://localhost:26657/status" 2>/dev/null | jq -r '.result.sync_info.latest_block_height // ""' 2>/dev/null || echo "") + if [ -n "$start_height" ] && [ -n "$current_height" ] && [ "$current_height" != "$start_height" ]; then + return 0 + fi + + sleep 1 + waited=$((waited + 1)) + done + + return 1 +} + +get_current_tss_key_id() { + docker exec core-validator-1 pchaind query utss current-key --node tcp://localhost:26657 --output json 2>/dev/null | jq -r '.key.key_id // .current_key.key_id // empty' 2>/dev/null || true +} + +get_utss_admin_address() { + docker exec core-validator-1 pchaind query utss params --node tcp://localhost:26657 --output json 2>/dev/null | jq -r '.params.admin // empty' 2>/dev/null || true +} + +get_key_name_for_address() { + local address="$1" + docker exec core-validator-1 pchaind keys list --keyring-backend test --output json 2>/dev/null | jq -r --arg addr "$address" '.[] | select(.address == $addr) | .name' 2>/dev/null | head -n1 +} + +resolve_utss_admin_signer() { + local admin_addr + admin_addr=$(get_utss_admin_address) + if [ -z "$admin_addr" ]; then + return 1 + fi + + get_key_name_for_address "$admin_addr" +} + # ═══════════════════════════════════════════════════════════════════════════════ # STATUS DISPLAY # ═══════════════════════════════════════════════════════════════════════════════ @@ -674,26 +767,169 @@ cmd_rebuild() { # ═══════════════════════════════════════════════════════════════════════════════ cmd_tss_keygen() { print_header "TSS Key Generation" - print_status "Initiating TSS keygen process..." + print_status "Validating UV topology before keygen..." + + local uv_json + uv_json=$(docker exec core-validator-1 pchaind query uvalidator all-universal-validators --node tcp://localhost:26657 --output json 2>/dev/null || echo "{}") + local uv_count + uv_count=$(echo "$uv_json" | jq -r '.universal_validator | length // 0' 2>/dev/null || echo "0") + if [ "$uv_count" -lt 2 ]; then + print_error "Need at least 2 registered universal validators for DKLS keygen (found: $uv_count)" + return 1 + fi - docker exec core-validator-1 pchaind tx utss initiate-tss-key-process \ - --process-type tss-process-keygen \ - --from genesis-acc-1 \ - --chain-id localchain_9000-1 \ - --keyring-backend test \ - --fees 1000000000000000upc \ - --yes + local invalid_peer_count + invalid_peer_count=$(echo "$uv_json" | jq -r '[.universal_validator[]? | select((.network_info.peer_id // "") | startswith("12D3") | not)] | length' 2>/dev/null || echo "0") + if [ "$invalid_peer_count" -gt 0 ]; then + print_error "Found $invalid_peer_count universal validators with non-libp2p peer IDs" + echo "$uv_json" | jq -r '.universal_validator[]? | " - " + (.identify_info.core_validator_address // "unknown") + " => " + (.network_info.peer_id // "")' 2>/dev/null || true + print_error "Run './devnet setup-uvalidators' to repair peer IDs before keygen" + return 1 + fi + + local existing_key + existing_key=$(get_current_tss_key_id) + if [ -n "$existing_key" ]; then + print_success "TSS key already present: $existing_key" + return 0 + fi + + local max_attempts=5 + local max_wait_per_attempt=180 + local attempt + local admin_addr + admin_addr=$(get_utss_admin_address) + if [ -z "$admin_addr" ]; then + print_error "Unable to read UTSS params.admin from chain" + return 1 + fi + + local tx_signer + tx_signer=$(resolve_utss_admin_signer) + if [ -z "$tx_signer" ]; then + print_error "No local key matches UTSS admin address: $admin_addr" + print_status "Available local keys in core-validator-1:" + docker exec core-validator-1 pchaind keys list --keyring-backend test --output json 2>/dev/null | jq -r '.[] | " - " + .name + " => " + .address' 2>/dev/null || true + return 1 + fi + + print_status "UTSS admin signer resolved: $tx_signer ($admin_addr)" + + for ((attempt = 1; attempt <= max_attempts; attempt++)); do + if ! wait_for_block_progress 8; then + print_error "Chain is not producing blocks; cannot run TSS keygen" + return 1 + fi + + print_status "Initiating TSS keygen process (attempt ${attempt}/${max_attempts}, signer: ${tx_signer})..." + + local keygen_result + keygen_result=$(docker exec core-validator-1 pchaind tx utss initiate-tss-key-process \ + --process-type tss-process-keygen \ + --from "$tx_signer" \ + --chain-id localchain_9000-1 \ + --keyring-backend test \ + --fees 1000000000000000upc \ + --yes \ + --output json 2>&1 || true) + + local submit_code + submit_code=$(echo "$keygen_result" | jq -r '.code // "0"' 2>/dev/null || echo "0") + local submit_log + submit_log=$(echo "$keygen_result" | jq -r '.raw_log // .message // ""' 2>/dev/null || true) + local tx_hash + tx_hash=$(echo "$keygen_result" | jq -r '.txhash // ""' 2>/dev/null) + + if [ "$submit_code" != "0" ]; then + if [ -n "$submit_log" ] && echo "$submit_log" | grep -qi "account sequence mismatch"; then + print_warning "Keygen tx had sequence mismatch for signer $tx_signer on attempt ${attempt}/${max_attempts}; waiting for block progress and retrying" + wait_for_block_progress 8 || sleep 2 + continue + fi + + if [ -n "$submit_log" ] && echo "$submit_log" | grep -qi "invalid authority"; then + print_error "UTSS rejected signer authority: ${submit_log}" + print_error "Resolved signer: $tx_signer ($admin_addr)" + return 1 + fi - print_success "TSS keygen initiated!" + if [ -n "$submit_log" ] && echo "$submit_log" | grep -qiE "insufficient funds|insufficient fee"; then + print_error "Keygen signer $tx_signer cannot pay fees: ${submit_log}" + return 1 + fi + + if [ -n "$submit_log" ] && echo "$submit_log" | grep -qiE "unauthorized|not found"; then + print_warning "Keygen submission returned authorization/keyring error: ${submit_log}" + continue + fi + + if [ -n "$submit_log" ] && echo "$submit_log" | grep -qiE "already|in progress|pending"; then + print_status "Keygen process appears active already: $submit_log" + else + print_warning "Keygen submission returned code $submit_code on attempt ${attempt}/${max_attempts}: ${submit_log:-unknown error}" + sleep 2 + continue + fi + else + if [ -n "$tx_hash" ]; then + if ! wait_for_chain_tx_success "$tx_hash" 45; then + if ! wait_for_block_progress 8; then + print_error "Chain stalled while waiting for keygen tx confirmation" + return 1 + fi + print_warning "Keygen tx did not confirm successfully on attempt ${attempt}/${max_attempts}, checking on-chain key state" + fi + else + print_warning "Keygen submission returned no tx hash on attempt ${attempt}/${max_attempts}" + fi + fi + + print_status "Waiting for TSS key to materialize on-chain..." + local waited=0 + while [ "$waited" -lt "$max_wait_per_attempt" ]; do + local key_id + key_id=$(get_current_tss_key_id) + if [ -n "$key_id" ]; then + print_success "TSS key is ready: $key_id" + return 0 + fi + sleep 2 + waited=$((waited + 2)) + done + + if [ "$attempt" -lt "$max_attempts" ]; then + print_warning "TSS key not available after ${max_wait_per_attempt}s on attempt ${attempt}/${max_attempts}" + sleep 5 + fi + done + + print_error "TSS key not available after ${max_attempts} attempts" + print_status "Recent universal-validator logs for diagnosis:" + for i in 1 2 3 4; do + if docker ps --format '{{.Names}}' | grep -qx "universal-validator-$i"; then + echo "---- universal-validator-$i ----" + docker logs --tail 40 "universal-validator-$i" 2>&1 || true + fi + done + return 1 } cmd_tss_refresh() { print_header "TSS Key Refresh" print_status "Initiating TSS key refresh process..." + local admin_addr + admin_addr=$(get_utss_admin_address) + local tx_signer + tx_signer=$(resolve_utss_admin_signer) + if [ -z "$tx_signer" ] || [ -z "$admin_addr" ]; then + print_error "Unable to resolve UTSS admin signer for refresh" + return 1 + fi + docker exec core-validator-1 pchaind tx utss initiate-tss-key-process \ --process-type tss-process-refresh \ - --from genesis-acc-1 \ + --from "$tx_signer" \ --chain-id localchain_9000-1 \ --keyring-backend test \ --fees 1000000000000000upc \ @@ -706,9 +942,18 @@ cmd_tss_quorum() { print_header "TSS Quorum Change" print_status "Initiating TSS quorum change process..." + local admin_addr + admin_addr=$(get_utss_admin_address) + local tx_signer + tx_signer=$(resolve_utss_admin_signer) + if [ -z "$tx_signer" ] || [ -z "$admin_addr" ]; then + print_error "Unable to resolve UTSS admin signer for quorum change" + return 1 + fi + docker exec core-validator-1 pchaind tx utss initiate-tss-key-process \ --process-type tss-process-quorum-change \ - --from genesis-acc-1 \ + --from "$tx_signer" \ --chain-id localchain_9000-1 \ --keyring-backend test \ --fees 1000000000000000upc \ @@ -839,6 +1084,8 @@ cmd_setup_uvalidators() { echo -e "${YELLOW}Registering universal validators and granting AuthZ permissions...${NC}" echo + local setup_failures=0 + for i in 1 2 3 4; do echo -e "${BLUE}─────────────────────────────────────────────────────────────────────${NC}" echo -e "${BOLD}Setting up Universal Validator $i${NC}" @@ -856,35 +1103,96 @@ cmd_setup_uvalidators() { echo -e " Account: ${CYAN}$VALIDATOR_ADDR${NC}" echo -e " Valoper: ${CYAN}$VALOPER_ADDR${NC}" - # Get network info from universal validator's TSS port + # Get TSS libp2p network info (not CometBFT node ID). echo -ne " ${YELLOW}Getting network info:${NC} " - local rpc_port=$(get_rpc_port $i) - local NODE_ID=$(curl -s "http://localhost:${rpc_port}/status" | jq -r '.result.node_info.id' 2>/dev/null) + local PEER_ID + case "$i" in + 1) PEER_ID="12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5" ;; + 2) PEER_ID="12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq" ;; + 3) PEER_ID="12D3KooWRndVhVZPCiQwHBBBdg769GyrPUW13zxwqQyf9r3ANaba" ;; + 4) PEER_ID="12D3KooWPT98FXMfDQYavZm66EeVjTqP9Nnehn1gyaydqV8L8BQw" ;; + *) PEER_ID="" ;; + esac + local TSS_PORT + TSS_PORT=$(get_tss_port "$i") + local MULTI_ADDR="/dns4/universal-validator-$i/tcp/$TSS_PORT" - if [ -z "$NODE_ID" ] || [ "$NODE_ID" = "null" ]; then - echo -e "${RED}Failed to get node ID${NC}" + if [ -z "$PEER_ID" ] || [[ ! "$PEER_ID" =~ ^12D3 ]]; then + echo -e "${RED}Failed to compute TSS peer ID${NC}" + setup_failures=$((setup_failures + 1)) continue fi - local MULTI_ADDR="/ip4/core-validator-$i/tcp/26656" echo -e "${GREEN}Done${NC}" - echo -e " Node ID: ${CYAN}$NODE_ID${NC}" + echo -e " Peer ID: ${CYAN}$PEER_ID${NC}" + echo -e " MultiAddr: ${CYAN}$MULTI_ADDR${NC}" + + # Ensure core validator exists in staking before UV registration. + local bond_status="NOT_FOUND" + local bond_wait=0 + local bond_wait_max=90 + while [ "$bond_wait" -lt "$bond_wait_max" ]; do + bond_status=$(docker exec core-validator-1 pchaind query staking validator "$VALOPER_ADDR" --node tcp://localhost:26657 --output json 2>/dev/null | jq -r '.validator.status // "NOT_FOUND"' 2>/dev/null || echo "NOT_FOUND") + if [ "$bond_status" = "BOND_STATUS_BONDED" ]; then + break + fi + sleep 2 + bond_wait=$((bond_wait + 2)) + done + if [ "$bond_status" != "BOND_STATUS_BONDED" ]; then + echo -e "${RED}Core validator is not bonded after ${bond_wait_max}s (status: $bond_status)${NC}" + setup_failures=$((setup_failures + 1)) + continue + fi + + local network_json + network_json="{\"peer_id\": \"$PEER_ID\", \"multi_addrs\": [\"$MULTI_ADDR\"]}" # Register as universal validator echo -ne " ${YELLOW}Registering as universal validator:${NC} " local register_result=$(docker exec core-validator-1 pchaind tx uvalidator add-universal-validator \ --core-validator-address "$VALOPER_ADDR" \ - --network "{\"peer_id\": \"$NODE_ID\", \"multi_addrs\": [\"$MULTI_ADDR\"]}" \ + --network "$network_json" \ --from genesis-acc-1 \ --chain-id localchain_9000-1 \ + --node tcp://localhost:26657 \ --keyring-backend test \ --fees 1000000000000000upc \ --yes \ - --output json 2>&1) + --output json 2>&1 || true) - if echo "$register_result" | grep -q '"txhash"'; then + local register_tx + register_tx=$(echo "$register_result" | jq -r '.txhash // ""' 2>/dev/null) + local registered=false + if [ -n "$register_tx" ] && wait_for_chain_tx_success "$register_tx" 30; then echo -e "${GREEN}Done${NC}" - else - echo -e "${YELLOW}May already be registered${NC}" + registered=true + fi + + # If add failed (often already-exists), update network info from validator account. + if [ "$registered" = "false" ]; then + local update_result + update_result=$(docker exec core-validator-$i pchaind tx uvalidator update-universal-validator \ + --network "$network_json" \ + --from validator-$i \ + --chain-id localchain_9000-1 \ + --node tcp://core-validator-1:26657 \ + --keyring-backend test \ + --fees 1000000000000000upc \ + --yes \ + --output json 2>&1 || true) + + local update_tx + update_tx=$(echo "$update_result" | jq -r '.txhash // ""' 2>/dev/null) + if [ -n "$update_tx" ] && wait_for_chain_tx_success "$update_tx" 30; then + echo -e "${GREEN}Updated existing registration${NC}" + registered=true + fi + fi + + if [ "$registered" = "false" ]; then + echo -e "${RED}Registration/update failed for validator-$i${NC}" + setup_failures=$((setup_failures + 1)) + continue fi sleep 3 @@ -921,7 +1229,8 @@ cmd_setup_uvalidators() { for msg_type in \ /uexecutor.v1.MsgVoteInbound \ /uexecutor.v1.MsgVoteChainMeta \ - /uexecutor.v1.MsgVoteOutbound + /uexecutor.v1.MsgVoteOutbound \ + /utss.v1.MsgVoteTssKeyProcess do local msg_name=$(basename "$msg_type") echo -ne " ${YELLOW}Granting ${msg_name}:${NC} " @@ -939,17 +1248,23 @@ cmd_setup_uvalidators() { # Verify all required grants are visible on the canonical RPC endpoint. local grant_count - grant_count=$(docker exec core-validator-1 pchaind query authz grants "$VALIDATOR_ADDR" "$HOTKEY_ADDR" --node tcp://core-validator-1:26657 --output json 2>/dev/null | jq -r '[.grants[]? | .authorization.value.msg | select(. == "/uexecutor.v1.MsgVoteInbound" or . == "/uexecutor.v1.MsgVoteChainMeta" or . == "/uexecutor.v1.MsgVoteOutbound")] | length' 2>/dev/null || echo "0") - if [ "$grant_count" -lt 3 ]; then - echo -e " ${YELLOW}Warning: only ${grant_count}/3 required grants visible for validator-$i${NC}" + grant_count=$(docker exec core-validator-1 pchaind query authz grants "$VALIDATOR_ADDR" "$HOTKEY_ADDR" --node tcp://core-validator-1:26657 --output json 2>/dev/null | jq -r '[.grants[]? | .authorization.value.msg | select(. == "/uexecutor.v1.MsgVoteInbound" or . == "/uexecutor.v1.MsgVoteChainMeta" or . == "/uexecutor.v1.MsgVoteOutbound" or . == "/utss.v1.MsgVoteTssKeyProcess")] | length' 2>/dev/null || echo "0") + if [ "$grant_count" -lt 4 ]; then + echo -e " ${RED}Only ${grant_count}/4 required grants visible for validator-$i${NC}" + setup_failures=$((setup_failures + 1)) else - echo -e " ${GREEN}Verified 3/3 required grants${NC}" + echo -e " ${GREEN}Verified 4/4 required grants${NC}" fi echo -e " ${GREEN}Validator $i setup complete${NC}" echo done + if [ "$setup_failures" -gt 0 ]; then + print_error "Universal validator setup completed with $setup_failures failure(s)" + return 1 + fi + print_success "Universal validators setup complete!" echo echo -e "${BOLD}Verify with:${NC}" diff --git a/local-multi-validator/scripts/setup-universal.sh b/local-multi-validator/scripts/setup-universal.sh index 1da568ef..a9d6ebb2 100755 --- a/local-multi-validator/scripts/setup-universal.sh +++ b/local-multi-validator/scripts/setup-universal.sh @@ -338,14 +338,17 @@ fi echo "🔐 Waiting for AuthZ grants to be created by core validator..." echo "📝 Core validators create AuthZ grants after UV registration completes" -echo "📋 Required grants: MsgVoteInbound, MsgVoteChainMeta, MsgVoteOutbound" +echo "📋 Required grants: MsgVoteInbound, MsgVoteChainMeta, MsgVoteOutbound, MsgVoteTssKeyProcess" # Get the hotkey address HOTKEY_ADDR=$($BINARY keys show "$HOTKEY_NAME" --address --keyring-backend test --home "$HOME_DIR" 2>/dev/null || echo "") # Required message types (must match PushSigner validation requirements) -REQUIRED_MSG_TYPES='["/uexecutor.v1.MsgVoteInbound","/uexecutor.v1.MsgVoteChainMeta","/uexecutor.v1.MsgVoteOutbound"]' -REQUIRED_GRANTS=3 +REQUIRED_MSG_TYPES='["/uexecutor.v1.MsgVoteInbound","/uexecutor.v1.MsgVoteChainMeta","/uexecutor.v1.MsgVoteOutbound","/utss.v1.MsgVoteTssKeyProcess"]' +REQUIRED_GRANTS=4 + +# Allow additional time for grant propagation during startup races. +AUTHZ_GRANTS_WAIT_SECONDS=${AUTHZ_GRANTS_WAIT_SECONDS:-120} # Query core-validator-1 for grants (genesis validator creates ALL grants immediately) GRANTS_QUERY_HOST="core-validator-1" @@ -354,14 +357,14 @@ if [ -n "$HOTKEY_ADDR" ]; then echo "🔍 Checking for AuthZ grants for hotkey: $HOTKEY_ADDR" echo "📡 Querying grants from: $GRANTS_QUERY_HOST:1317" - # Wait for all required AuthZ grants (should be fast - genesis validator creates all grants) - max_wait=20 + # Wait for all required AuthZ grants (genesis validator creates all grants, but propagation can lag) + max_wait=$AUTHZ_GRANTS_WAIT_SECONDS wait_time=0 MATCHED_GRANTS=0 while [ $wait_time -lt $max_wait ]; do # Query grants and count only required message types MATCHED_GRANTS=$(curl -s "http://$GRANTS_QUERY_HOST:1317/cosmos/authz/v1beta1/grants/grantee/$HOTKEY_ADDR" 2>/dev/null | \ - jq -r --argjson required "$REQUIRED_MSG_TYPES" '[.grants[]? | select(.authorization.value.msg as $m | $required | index($m))] | length' 2>/dev/null || echo "0") + jq -r --argjson required "$REQUIRED_MSG_TYPES" '[.grants[]? | (.authorization.msg // .authorization.value.msg // "") as $m | select($required | index($m))] | length' 2>/dev/null || echo "0") if [ "$MATCHED_GRANTS" -ge "$REQUIRED_GRANTS" ] 2>/dev/null; then echo "✅ Found all $MATCHED_GRANTS/$REQUIRED_GRANTS required AuthZ grants!" @@ -377,11 +380,12 @@ if [ -n "$HOTKEY_ADDR" ]; then done if [ "$MATCHED_GRANTS" -lt "$REQUIRED_GRANTS" ] 2>/dev/null; then - echo "⚠️ Only found $MATCHED_GRANTS/$REQUIRED_GRANTS required grants after ${max_wait}s" - echo " The universal validator may fail startup validation if grants are missing." + echo "⚠️ Only found $MATCHED_GRANTS/$REQUIRED_GRANTS required grants after ${max_wait}s" + echo " Continuing startup; grants may still arrive shortly." fi else - echo "⚠️ Could not get hotkey address, skipping AuthZ check" + echo "❌ Could not get hotkey address, cannot verify AuthZ grants" + exit 1 fi # --------------------------- @@ -407,7 +411,7 @@ if [ -n "$EXPECTED_PEER_ID" ]; then reg_wait=0 while [ $reg_wait -lt $max_reg_wait ]; do # Query all universal validators via REST API and look for our peer_id - FOUND=$(curl -s "http://core-validator-1:1317/push/uvalidator/v1/all_universal_validators" 2>/dev/null | \ + FOUND=$(curl -s "http://core-validator-1:1317/uvalidator/v1/universal_validators" 2>/dev/null | \ jq -r --arg pid "$EXPECTED_PEER_ID" \ '.universal_validator[]? | select(.network_info.peer_id == $pid) | .network_info.peer_id' 2>/dev/null || echo "") @@ -424,10 +428,13 @@ if [ -n "$EXPECTED_PEER_ID" ]; then done if [ -z "$FOUND" ]; then - echo "⚠️ Validator not found on-chain after ${max_reg_wait}s, continuing anyway..." + echo "❌ Validator not found on-chain after ${max_reg_wait}s" + echo " Failing startup so container restarts until registration is correct." + exit 1 fi else - echo "⚠️ Unknown UNIVERSAL_ID, skipping registration check" + echo "❌ Unknown UNIVERSAL_ID, cannot validate on-chain registration" + exit 1 fi # --------------------------- diff --git a/local-multi-validator/scripts/setup-validator-auto.sh b/local-multi-validator/scripts/setup-validator-auto.sh index 533319b5..fbc24b6f 100755 --- a/local-multi-validator/scripts/setup-validator-auto.sh +++ b/local-multi-validator/scripts/setup-validator-auto.sh @@ -63,6 +63,64 @@ FUNDING_MNEMONIC=$(jq -r ".[$FUNDING_INDEX].mnemonic" "$GENESIS_ACCOUNTS_FILE") FUNDING_KEY="genesis-acc-$VALIDATOR_ID" FUNDING_AMOUNT="200000000000000000000000" # 200k * 10^18 (enough for staking + fees) +wait_for_deliver_tx_success() { + local tx_hash="$1" + local node="$2" + local max_wait="${3:-30}" + local waited=0 + + while [ "$waited" -lt "$max_wait" ]; do + local tx_json + tx_json=$($BINARY query tx "$tx_hash" --node="$node" --output json 2>/dev/null || true) + local tx_code + tx_code=$(echo "$tx_json" | jq -r '.code // empty' 2>/dev/null || true) + + if [ -n "$tx_code" ]; then + if [ "$tx_code" = "0" ]; then + return 0 + fi + + local raw_log + raw_log=$(echo "$tx_json" | jq -r '.raw_log // ""' 2>/dev/null || true) + echo "❌ TX $tx_hash failed with code $tx_code: ${raw_log:-unknown error}" + return 1 + fi + + sleep 1 + waited=$((waited + 1)) + done + + echo "❌ Timed out waiting for TX $tx_hash to be included" + return 1 +} + +is_universal_validator_registered() { + local valoper_addr="$1" + local peer_id="$2" + local node="$3" + local uv_json + + uv_json=$($BINARY query uvalidator all-universal-validators --node="$node" --output json 2>/dev/null || echo "{}") + + if [ -n "$valoper_addr" ] && echo "$uv_json" | jq -e --arg addr "$valoper_addr" '.universal_validator[]? | select(.identify_info.core_validator_address == $addr)' >/dev/null 2>&1; then + return 0 + fi + + if [ -n "$peer_id" ] && echo "$uv_json" | jq -e --arg pid "$peer_id" '.universal_validator[]? | select(.network_info.peer_id == $pid)' >/dev/null 2>&1; then + return 0 + fi + + return 1 +} + +is_validator_bonded() { + local valoper_addr="$1" + local node="$2" + local status + status=$($BINARY query staking validator "$valoper_addr" --node="$node" --output json 2>/dev/null | jq -r '.validator.status // "NOT_FOUND"' 2>/dev/null || echo "NOT_FOUND") + [ "$status" = "BOND_STATUS_BONDED" ] +} + # --------------------------- # === WAIT FOR GENESIS VALIDATOR === # --------------------------- @@ -97,125 +155,179 @@ if [ -f "$HOME_DIR/data/priv_validator_state.json" ]; then # Check if the state file has valid content (not just initial state) HEIGHT=$(cat "$HOME_DIR/data/priv_validator_state.json" | jq -r '.height // "0"' 2>/dev/null || echo "0") if [ "$HEIGHT" != "0" ] && [ "$HEIGHT" != "\"0\"" ]; then - echo "✅ Node already initialized with block height $HEIGHT, starting node..." - - # Start node in background so we can check UV registration - $BINARY start \ - --home "$HOME_DIR" \ - --pruning=nothing \ - --minimum-gas-prices="1000000000${DENOM}" \ - --rpc.laddr="tcp://0.0.0.0:${RPC_PORT}" \ - --json-rpc.address="0.0.0.0:8545" \ - --json-rpc.ws-address="0.0.0.0:8546" \ - --json-rpc.api=eth,txpool,personal,net,debug,web3 \ - --chain-id="$CHAIN_ID" & - - NODE_PID=$! - - # Wait for node to be ready - echo "⏳ Waiting for node to be ready..." - sleep 10 - - # Check if UV registration is needed (for validators 2, 3, and 4) - if [ "$VALIDATOR_ID" = "2" ] || [ "$VALIDATOR_ID" = "3" ] || [ "$VALIDATOR_ID" = "4" ]; then - echo "🔍 Checking universal validator registration status..." - - GENESIS_RPC="http://core-validator-1:26657" - - # Pre-computed peer_ids - case $VALIDATOR_ID in - 2) - PEER_ID="12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq" - TSS_PORT=39001 - ;; - 3) - PEER_ID="12D3KooWRndVhVZPCiQwHBBBdg769GyrPUW13zxwqQyf9r3ANaba" - TSS_PORT=39002 - ;; - 4) - PEER_ID="12D3KooWPT98FXMfDQYavZm66EeVjTqP9Nnehn1gyaydqV8L8BQw" - TSS_PORT=39003 - ;; - esac - - # Check if already registered by querying for our peer_id - UV_CHECK=$($BINARY query uvalidator all-universal-validators --node="$GENESIS_RPC" --output json 2>/dev/null || echo "{}") - - if echo "$UV_CHECK" | grep -q "$PEER_ID"; then - echo "✅ Universal-validator-$VALIDATOR_ID already registered" - else - echo "📝 Universal-validator-$VALIDATOR_ID not registered, registering now..." + REUSE_EXISTING_STATE=true + + # Always refresh persistent peer so stale node IDs from previous runs don't isolate the validator. + if [ -f "$HOME_DIR/config/config.toml" ]; then + GENESIS_NODE_ID=$(curl -s "$GENESIS_RPC/status" | jq -r '.result.node_info.id // ""') + if [ -n "$GENESIS_NODE_ID" ] && [ "$GENESIS_NODE_ID" != "null" ]; then + PERSISTENT_PEER="$GENESIS_NODE_ID@$GENESIS_PEER" + echo "🔗 Refreshing persistent peer to: $PERSISTENT_PEER" + sed -i -e "s/^persistent_peers *=.*/persistent_peers = \"$PERSISTENT_PEER\"/" "$HOME_DIR/config/config.toml" + fi + else + REUSE_EXISTING_STATE=false + echo "⚠️ Missing config.toml in existing state; forcing re-init" + fi - # Get valoper address - VALOPER_ADDR=$($BINARY keys show validator-$VALIDATOR_ID --bech val -a --keyring-backend "$KEYRING" --home "$HOME_DIR" 2>/dev/null) + # Reuse existing data only if validator is actually bonded on-chain. + if [ "$REUSE_EXISTING_STATE" = "true" ]; then + VALOPER_ADDR=$($BINARY keys show validator-$VALIDATOR_ID --bech val -a --keyring-backend "$KEYRING" --home "$HOME_DIR" 2>/dev/null || echo "") + if [ -z "$VALOPER_ADDR" ]; then + REUSE_EXISTING_STATE=false + echo "⚠️ Could not read valoper address from existing keyring; forcing re-init" + else + VALIDATOR_STATUS=$($BINARY query staking validator "$VALOPER_ADDR" --node="$GENESIS_RPC" --output json 2>/dev/null | jq -r '.validator.status // "NOT_FOUND"' || echo "NOT_FOUND") + if [ "$VALIDATOR_STATUS" != "BOND_STATUS_BONDED" ]; then + REUSE_EXISTING_STATE=false + echo "⚠️ Existing validator status is $VALIDATOR_STATUS; forcing re-init" + fi + fi + fi - if [ -n "$VALOPER_ADDR" ]; then - MULTI_ADDR="/dns4/universal-validator-$VALIDATOR_ID/tcp/$TSS_PORT" - NETWORK_JSON="{\"peer_id\": \"$PEER_ID\", \"multi_addrs\": [\"$MULTI_ADDR\"]}" + if [ "$REUSE_EXISTING_STATE" = "true" ]; then + echo "✅ Node already initialized with block height $HEIGHT, starting node..." - # Import genesis account for signing - GENESIS_ACCOUNTS_FILE="/tmp/push-accounts/genesis_accounts.json" - if [ -f "$GENESIS_ACCOUNTS_FILE" ]; then - GENESIS_ACC_MNEMONIC=$(jq -r '.[0].mnemonic' "$GENESIS_ACCOUNTS_FILE") - echo "$GENESIS_ACC_MNEMONIC" | $BINARY keys add genesis-acc-1 --recover --keyring-backend "$KEYRING" --home "$HOME_DIR" 2>/dev/null || true + # Start node in background so we can verify sync and UV registration. + $BINARY start \ + --home "$HOME_DIR" \ + --pruning=nothing \ + --minimum-gas-prices="1000000000${DENOM}" \ + --rpc.laddr="tcp://0.0.0.0:${RPC_PORT}" \ + --json-rpc.address="0.0.0.0:8545" \ + --json-rpc.ws-address="0.0.0.0:8546" \ + --json-rpc.api=eth,txpool,personal,net,debug,web3 \ + --chain-id="$CHAIN_ID" & + + NODE_PID=$! + + echo "⏳ Waiting for node to sync..." + max_sync_attempts=90 + sync_attempt=0 + while [ $sync_attempt -lt $max_sync_attempts ]; do + if curl -s "http://localhost:${RPC_PORT}/status" > /dev/null 2>&1; then + CATCHING_UP=$(curl -s "http://localhost:${RPC_PORT}/status" | jq -r '.result.sync_info.catching_up' 2>/dev/null || echo "true") + if [ "$CATCHING_UP" = "false" ]; then + echo "✅ Existing node state is synced" + break fi + fi + sleep 2 + sync_attempt=$((sync_attempt + 1)) + done - # Retry loop for registration (handles sequence mismatch race condition) - MAX_RETRIES=5 - RETRY_COUNT=0 - REGISTERED=false + if [ $sync_attempt -eq $max_sync_attempts ]; then + echo "❌ Existing node did not sync in time; forcing restart on clean init" + kill $NODE_PID + exit 1 + fi - while [ "$RETRY_COUNT" -lt "$MAX_RETRIES" ] && [ "$REGISTERED" = "false" ]; do - RETRY_COUNT=$((RETRY_COUNT + 1)) + # Check if UV registration is needed (for validators 2, 3, and 4) + if [ "$VALIDATOR_ID" = "2" ] || [ "$VALIDATOR_ID" = "3" ] || [ "$VALIDATOR_ID" = "4" ]; then + echo "🔍 Checking universal validator registration status..." + + GENESIS_RPC="http://core-validator-1:26657" + + # Pre-computed peer_ids + case $VALIDATOR_ID in + 2) + PEER_ID="12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq" + TSS_PORT=39001 + ;; + 3) + PEER_ID="12D3KooWRndVhVZPCiQwHBBBdg769GyrPUW13zxwqQyf9r3ANaba" + TSS_PORT=39002 + ;; + 4) + PEER_ID="12D3KooWPT98FXMfDQYavZm66EeVjTqP9Nnehn1gyaydqV8L8BQw" + TSS_PORT=39003 + ;; + esac - # Stagger validators to reduce race conditions (validator 2 waits 2s, validator 3 waits 4s) - if [ "$RETRY_COUNT" -eq 1 ]; then - STAGGER_DELAY=$((VALIDATOR_ID * 2)) - echo "⏳ Waiting ${STAGGER_DELAY}s to stagger registration..." - sleep $STAGGER_DELAY - fi + # Get valoper address + VALOPER_ADDR=$($BINARY keys show validator-$VALIDATOR_ID --bech val -a --keyring-backend "$KEYRING" --home "$HOME_DIR" 2>/dev/null || true) - echo "📤 Registering universal-validator-$VALIDATOR_ID (attempt $RETRY_COUNT/$MAX_RETRIES)..." - RESULT=$($BINARY tx uvalidator add-universal-validator \ - --core-validator-address "$VALOPER_ADDR" \ - --network "$NETWORK_JSON" \ - --from genesis-acc-1 \ - --chain-id "$CHAIN_ID" \ - --keyring-backend "$KEYRING" \ - --home "$HOME_DIR" \ - --node="$GENESIS_RPC" \ - --fees 1000000000000000upc \ - --yes \ - --output json 2>&1 || echo "{}") - - if echo "$RESULT" | grep -q '"txhash"'; then - TX_HASH=$(echo "$RESULT" | jq -r '.txhash' 2>/dev/null) - echo "✅ Universal-validator-$VALIDATOR_ID registered! TX: $TX_HASH" - REGISTERED=true - elif echo "$RESULT" | grep -q "sequence mismatch"; then - echo "⚠️ Sequence mismatch, retrying in 3s..." - sleep 3 - elif echo "$RESULT" | grep -q "already registered\|already exists"; then - echo "✅ Universal-validator-$VALIDATOR_ID already registered" - REGISTERED=true - else - echo "⚠️ Registration attempt failed: $(echo "$RESULT" | head -1)" - sleep 2 + if [ -z "$VALOPER_ADDR" ]; then + echo "⚠️ Could not get valoper address for validator-$VALIDATOR_ID; skipping UV registration check" + elif is_universal_validator_registered "$VALOPER_ADDR" "$PEER_ID" "$GENESIS_RPC"; then + echo "✅ Universal-validator-$VALIDATOR_ID already registered" + else + echo "📝 Universal-validator-$VALIDATOR_ID not registered, registering now..." + + MULTI_ADDR="/dns4/universal-validator-$VALIDATOR_ID/tcp/$TSS_PORT" + NETWORK_JSON="{\"peer_id\": \"$PEER_ID\", \"multi_addrs\": [\"$MULTI_ADDR\"]}" + + # Import genesis account for signing + GENESIS_ACCOUNTS_FILE="/tmp/push-accounts/genesis_accounts.json" + if [ -f "$GENESIS_ACCOUNTS_FILE" ]; then + GENESIS_ACC_MNEMONIC=$(jq -r '.[0].mnemonic' "$GENESIS_ACCOUNTS_FILE") + echo "$GENESIS_ACC_MNEMONIC" | $BINARY keys add genesis-acc-1 --recover --keyring-backend "$KEYRING" --home "$HOME_DIR" 2>/dev/null || true fi - done - if [ "$REGISTERED" = "false" ]; then - echo "❌ Registration TX failed after $MAX_RETRIES attempts" - fi - else - echo "⚠️ Could not get valoper address" + # Retry loop for registration (handles sequence mismatch race condition) + MAX_RETRIES=5 + RETRY_COUNT=0 + REGISTERED=false + + while [ "$RETRY_COUNT" -lt "$MAX_RETRIES" ] && [ "$REGISTERED" = "false" ]; do + RETRY_COUNT=$((RETRY_COUNT + 1)) + + # Stagger validators to reduce race conditions (validator 2 waits 2s, validator 3 waits 4s) + if [ "$RETRY_COUNT" -eq 1 ]; then + STAGGER_DELAY=$((VALIDATOR_ID * 2)) + echo "⏳ Waiting ${STAGGER_DELAY}s to stagger registration..." + sleep $STAGGER_DELAY + fi + + echo "📤 Registering universal-validator-$VALIDATOR_ID (attempt $RETRY_COUNT/$MAX_RETRIES)..." + RESULT=$($BINARY tx uvalidator add-universal-validator \ + --core-validator-address "$VALOPER_ADDR" \ + --network "$NETWORK_JSON" \ + --from genesis-acc-1 \ + --chain-id "$CHAIN_ID" \ + --keyring-backend "$KEYRING" \ + --home "$HOME_DIR" \ + --node="$GENESIS_RPC" \ + --fees 1000000000000000upc \ + --yes \ + --output json 2>&1 || echo "{}") + + TX_HASH=$(echo "$RESULT" | jq -r '.txhash // ""' 2>/dev/null) + + if [ -n "$TX_HASH" ] && wait_for_deliver_tx_success "$TX_HASH" "$GENESIS_RPC" 30; then + echo "✅ Universal-validator-$VALIDATOR_ID registered! TX: $TX_HASH" + REGISTERED=true + elif is_universal_validator_registered "$VALOPER_ADDR" "$PEER_ID" "$GENESIS_RPC"; then + echo "✅ Universal-validator-$VALIDATOR_ID confirmed registered on-chain" + REGISTERED=true + elif echo "$RESULT" | grep -q "sequence mismatch"; then + echo "⚠️ Sequence mismatch, retrying in 3s..." + sleep 3 + elif echo "$RESULT" | grep -q "already registered\|already exists"; then + echo "✅ Universal-validator-$VALIDATOR_ID already registered" + REGISTERED=true + else + echo "⚠️ Registration attempt failed: $(echo "$RESULT" | head -1)" + sleep 2 + fi + done + + if [ "$REGISTERED" = "false" ]; then + if is_universal_validator_registered "$VALOPER_ADDR" "$PEER_ID" "$GENESIS_RPC"; then + echo "✅ Universal-validator-$VALIDATOR_ID registered (post-retry check)" + else + echo "⚠️ Registration TX failed after $MAX_RETRIES attempts; continuing so validator stays in consensus" + fi + fi fi fi + + echo "🔄 Node running as validator..." + wait $NODE_PID + exit 0 fi - echo "🔄 Node running as validator..." - wait $NODE_PID - exit 0 + echo "⚠️ Existing state is not reusable; reinitializing validator-$VALIDATOR_ID" fi fi @@ -391,13 +503,13 @@ echo "Validator operator address: $VALOPER_ADDR" # Check if already bonded VALIDATOR_STATUS=$($BINARY query staking validator "$VALOPER_ADDR" \ --node="$GENESIS_RPC" \ - --output json 2>/dev/null | jq -r '.status' || echo "NOT_FOUND") + --output json 2>/dev/null | jq -r '.validator.status // "NOT_FOUND"' || echo "NOT_FOUND") if [ "$VALIDATOR_STATUS" = "BOND_STATUS_BONDED" ]; then echo "✅ Validator-$VALIDATOR_ID is already bonded!" VALIDATOR_TOKENS=$($BINARY query staking validator "$VALOPER_ADDR" \ --node="$GENESIS_RPC" \ - --output json 2>/dev/null | jq -r '.tokens' || echo "0") + --output json 2>/dev/null | jq -r '.validator.tokens // "0"' || echo "0") echo " Bonded tokens: $VALIDATOR_TOKENS" else echo "📤 Submitting create-validator transaction..." @@ -444,6 +556,14 @@ EOF while [ "$CREATE_RETRY" -lt "$MAX_CREATE_RETRIES" ] && [ "$CREATED" = "false" ]; do CREATE_RETRY=$((CREATE_RETRY + 1)) + + # If the validator is already bonded from a previous attempt, stop retrying. + if is_validator_bonded "$VALOPER_ADDR" "$GENESIS_RPC"; then + echo "✅ Validator-$VALIDATOR_ID is already bonded" + CREATED=true + break + fi + echo "📤 Creating validator (attempt $CREATE_RETRY/$MAX_CREATE_RETRIES)..." CREATE_RESULT=$($BINARY tx staking create-validator "$VALIDATOR_JSON" \ @@ -458,12 +578,25 @@ EOF --yes \ --output json 2>&1) - # Check if it looks like a successful TX (has txhash and no Usage message) + # Treat create-validator as success only after deliver tx succeeds. if echo "$CREATE_RESULT" | grep -q '"txhash"' && ! echo "$CREATE_RESULT" | grep -q "Usage:"; then TX_HASH=$(echo "$CREATE_RESULT" | jq -r '.txhash // ""' 2>/dev/null) - echo "✅ Create-validator TX submitted: $TX_HASH" - CREATED=true + if [ -n "$TX_HASH" ] && wait_for_deliver_tx_success "$TX_HASH" "$GENESIS_RPC" 30; then + echo "✅ Create-validator TX confirmed: $TX_HASH" + CREATED=true + elif is_validator_bonded "$VALOPER_ADDR" "$GENESIS_RPC"; then + echo "✅ Validator-$VALIDATOR_ID became bonded after tx submission" + CREATED=true + else + echo "⚠️ Create-validator TX was not successful in deliver phase" + sleep 3 + fi else + if is_validator_bonded "$VALOPER_ADDR" "$GENESIS_RPC"; then + echo "✅ Validator-$VALIDATOR_ID is bonded despite non-standard CLI output" + CREATED=true + continue + fi echo "⚠️ Create-validator attempt failed, retrying in 3s..." echo " Result: $(echo "$CREATE_RESULT" | head -c 200)" sleep 3 @@ -472,6 +605,8 @@ EOF if [ "$CREATED" = "false" ]; then echo "❌ Create-validator failed after $MAX_CREATE_RETRIES attempts" + kill $NODE_PID + exit 1 fi # Re-enable exit-on-error @@ -483,20 +618,26 @@ EOF # Verify bonding VALIDATOR_STATUS=$($BINARY query staking validator "$VALOPER_ADDR" \ --node="$GENESIS_RPC" \ - --output json 2>/dev/null | jq -r '.status' || echo "NOT_FOUND") + --output json 2>/dev/null | jq -r '.validator.status // "NOT_FOUND"' || echo "NOT_FOUND") if [ "$VALIDATOR_STATUS" = "BOND_STATUS_BONDED" ]; then echo "✅ Validator-$VALIDATOR_ID is now bonded!" VALIDATOR_TOKENS=$($BINARY query staking validator "$VALOPER_ADDR" \ --node="$GENESIS_RPC" \ - --output json 2>/dev/null | jq -r '.tokens' || echo "0") + --output json 2>/dev/null | jq -r '.validator.tokens // "0"' || echo "0") echo " Bonded tokens: $VALIDATOR_TOKENS" elif [ "$VALIDATOR_STATUS" = "BOND_STATUS_UNBONDING" ]; then echo "⚠️ Validator-$VALIDATOR_ID is unbonding" + kill $NODE_PID + exit 1 elif [ "$VALIDATOR_STATUS" = "BOND_STATUS_UNBONDED" ]; then echo "⚠️ Validator-$VALIDATOR_ID is unbonded" + kill $NODE_PID + exit 1 else echo "⚠️ Validator status: $VALIDATOR_STATUS" + kill $NODE_PID + exit 1 fi fi @@ -580,13 +721,14 @@ if [ -n "$VALOPER_ADDR" ]; then --yes \ --output json 2>&1 || echo "{}") - # Check TX result - TX_CODE=$(echo "$RESULT" | jq -r '.code // "null"' 2>/dev/null) TX_HASH=$(echo "$RESULT" | jq -r '.txhash // ""' 2>/dev/null) - if [ "$TX_CODE" = "0" ] && [ -n "$TX_HASH" ]; then + if [ -n "$TX_HASH" ] && wait_for_deliver_tx_success "$TX_HASH" "$GENESIS_RPC" 30; then echo "✅ Universal-validator-$VALIDATOR_ID registered! TX: $TX_HASH" REGISTERED=true + elif is_universal_validator_registered "$VALOPER_ADDR" "$PEER_ID" "$GENESIS_RPC"; then + echo "✅ Universal-validator-$VALIDATOR_ID confirmed registered on-chain" + REGISTERED=true elif echo "$RESULT" | grep -q "sequence mismatch"; then echo "⚠️ Sequence mismatch, retrying in 3s..." sleep 3 @@ -595,13 +737,17 @@ if [ -n "$VALOPER_ADDR" ]; then REGISTERED=true else RAW_LOG=$(echo "$RESULT" | jq -r '.raw_log // ""' 2>/dev/null) - echo "⚠️ Registration attempt failed (code: $TX_CODE): ${RAW_LOG:-$(echo "$RESULT" | head -1)}" + echo "⚠️ Registration attempt failed: ${RAW_LOG:-$(echo "$RESULT" | head -1)}" sleep 2 fi done if [ "$REGISTERED" = "false" ]; then - echo "❌ Registration failed after $MAX_RETRIES attempts" + if is_universal_validator_registered "$VALOPER_ADDR" "$PEER_ID" "$GENESIS_RPC"; then + echo "✅ Universal-validator-$VALIDATOR_ID registered (post-retry check)" + else + echo "⚠️ Registration failed after $MAX_RETRIES attempts; continuing so validator stays in consensus" + fi fi fi else diff --git a/universalClient/tss/coordinator/coordinator.go b/universalClient/tss/coordinator/coordinator.go index fdd74b04..855f82a6 100644 --- a/universalClient/tss/coordinator/coordinator.go +++ b/universalClient/tss/coordinator/coordinator.go @@ -281,6 +281,12 @@ func (c *Coordinator) GetEligibleUV(protocolType string) []*types.UniversalValid return result } +// RefreshValidators forces an immediate refresh of the validator cache from pushCore. +// Session setup uses this to recover from short-lived cache staleness between nodes. +func (c *Coordinator) RefreshValidators(ctx context.Context) { + c.updateValidators(ctx) +} + // Start starts the coordinator loop. func (c *Coordinator) Start(ctx context.Context) { c.mu.Lock() diff --git a/universalClient/tss/sessionmanager/sessionmanager.go b/universalClient/tss/sessionmanager/sessionmanager.go index 52fe59da..7146fcdf 100644 --- a/universalClient/tss/sessionmanager/sessionmanager.go +++ b/universalClient/tss/sessionmanager/sessionmanager.go @@ -23,6 +23,7 @@ import ( "github.com/pushchain/push-chain-node/universalClient/tss/eventstore" "github.com/pushchain/push-chain-node/universalClient/tss/keyshare" uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" + "github.com/pushchain/push-chain-node/x/uvalidator/types" ) // SendFunc is a function type for sending messages to participants. @@ -586,29 +587,56 @@ func (sm *SessionManager) createSession(ctx context.Context, event *store.Event, // For keygen/keyrefresh: participants must match exactly with eligible participants (same elements). // For sign: participants must be a valid >2/3 subset of eligible participants. func (sm *SessionManager) validateParticipants(participants []string, event *store.Event) error { - // Get eligible validators for this protocol + buildEligible := func(vals []*types.UniversalValidator) (map[string]bool, []string) { + eligibleSet := make(map[string]bool) + eligibleList := make([]string, 0, len(vals)) + for _, v := range vals { + if v.IdentifyInfo != nil { + addr := v.IdentifyInfo.CoreValidatorAddress + eligibleSet[addr] = true + eligibleList = append(eligibleList, addr) + } + } + return eligibleSet, eligibleList + } + + findIneligible := func(eligibleSet map[string]bool) []string { + ineligible := make([]string, 0) + for _, partyID := range participants { + if !eligibleSet[partyID] { + ineligible = append(ineligible, partyID) + } + } + return ineligible + } + + // Get eligible validators for this protocol from local cache first. eligible := sm.coordinator.GetEligibleUV(string(event.Type)) if len(eligible) == 0 { return errors.New("no eligible validators for protocol") } - // Build set and list of eligible partyIDs - eligibleSet := make(map[string]bool) - eligibleList := make([]string, 0, len(eligible)) - for _, v := range eligible { - if v.IdentifyInfo != nil { - addr := v.IdentifyInfo.CoreValidatorAddress - eligibleSet[addr] = true - eligibleList = append(eligibleList, addr) + eligibleSet, eligibleList := buildEligible(eligible) + ineligible := findIneligible(eligibleSet) + + // Setup and cache updates are asynchronous across nodes; retry once with fresh cache + // before rejecting participants as ineligible. + if len(ineligible) > 0 { + sm.coordinator.RefreshValidators(context.Background()) + eligible = sm.coordinator.GetEligibleUV(string(event.Type)) + if len(eligible) == 0 { + return errors.New("no eligible validators for protocol after refresh") + } + + eligibleSet, eligibleList = buildEligible(eligible) + ineligible = findIneligible(eligibleSet) + if len(ineligible) > 0 { + return errors.Errorf("participant %s is not eligible for protocol %s", ineligible[0], event.Type) } } - // Validate all participants are eligible participantSet := make(map[string]bool) for _, partyID := range participants { - if !eligibleSet[partyID] { - return errors.Errorf("participant %s is not eligible for protocol %s", partyID, event.Type) - } participantSet[partyID] = true } @@ -799,7 +827,6 @@ func (sm *SessionManager) verifySigningRequest(ctx context.Context, event *store return nil } - // getTSSAddress gets the TSS ECDSA address from the current TSS public key // The TSS address is always the same ECDSA address derived from the TSS public key func (sm *SessionManager) getTSSAddress(ctx context.Context) (string, error) { From 3707da09a3ad81e18a7bf6ccf80a8ae7aa7d9896 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Tue, 31 Mar 2026 14:40:29 +0530 Subject: [PATCH 29/61] USDT_BSC support added to e2e-outbound setup --- e2e-tests/deploy_addresses.json | 13 +- e2e-tests/setup.sh | 193 ++++++++++++++++-- .../scripts/setup-universal.sh | 15 +- universalClient/chains/evm/event_listener.go | 80 +++++++- 4 files changed, 280 insertions(+), 21 deletions(-) diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index c8eb36ab..fc0013d4 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -1,8 +1,8 @@ { - "generatedAt": "2026-03-27T15:09:03Z", + "generatedAt": "2026-03-31T09:09:56Z", "contracts": { - "WPC": "0xB2cf4B3aec93F4A8F92b292d2F605591dB3e3011", - "Factory": "0x057931Df99f61caB5e5DbDb6224D7003E64F659e", + "WPC": "0xB5B1e1ADc1b8fc1066975aa09f9371a5f67C54F5", + "Factory": "0x140b9f84fCbccB4129AC6F32b1243ea808d18261", "SwapRouter": "0x140b9f84fCbccB4129AC6F32b1243ea808d18261", "QuoterV2": "0x7fd62fe2Aba9af8bF4d08a6cce49beA8c8Ca6d97", "PositionManager": "0x4dCe46Eb5909aC32B6C0ad086e74008Fdb292CB5", @@ -51,6 +51,13 @@ "address": "0x31F3Dcb417970EBe9AC1e254Ee42b91e49e30EE2", "source": "core-contracts", "decimals": 9 + }, + { + "name": "USDT.bsc", + "symbol": "USDT.bsc", + "address": "0xC329d4EbF8814eEFfA2Fd9612655e490b112523F", + "source": "core-contracts", + "decimals": 6 } ] } diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index a0715b60..32b21686 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -421,8 +421,179 @@ sdk_test_files() { done } +sdk_rewrite_chain_endpoints_for_local() { + local chain_constants_file="$1" + + CHAIN_CONSTANTS_FILE="$chain_constants_file" node <<'NODE' +const fs = require('fs'); + +const filePath = process.env.CHAIN_CONSTANTS_FILE; +if (!filePath || !fs.existsSync(filePath)) { + console.error('chain.ts file not found for LOCAL endpoint rewrite'); + process.exit(1); +} + +let source = fs.readFileSync(filePath, 'utf8'); + +const endpointMap = [ + { chain: 'ETHEREUM_SEPOLIA', url: 'http://localhost:9545' }, + { chain: 'ARBITRUM_SEPOLIA', url: 'http://localhost:9546' }, + { chain: 'BASE_SEPOLIA', url: 'http://localhost:9547' }, + { chain: 'BNB_TESTNET', url: 'http://localhost:9548' }, + { chain: 'SOLANA_DEVNET', url: 'http://localhost:8899' }, +]; + +function findChainBlockRange(text, chainName) { + const marker = `[CHAIN.${chainName}]`; + const markerIdx = text.indexOf(marker); + if (markerIdx === -1) { + return null; + } + + const openBraceIdx = text.indexOf('{', markerIdx); + if (openBraceIdx === -1) { + return null; + } + + let depth = 0; + for (let i = openBraceIdx; i < text.length; i += 1) { + const ch = text[i]; + if (ch === '{') { + depth += 1; + } else if (ch === '}') { + depth -= 1; + if (depth === 0) { + return { start: openBraceIdx, end: i }; + } + } + } + + return null; +} + +function detectIndent(blockText) { + const match = blockText.match(/\n(\s+)[A-Za-z_\[]/); + return match ? match[1] : ' '; +} + +function findMatchingBracket(text, openIdx) { + let depth = 0; + let quote = ''; + + for (let i = openIdx; i < text.length; i += 1) { + const ch = text[i]; + const prev = i > 0 ? text[i - 1] : ''; + + if (quote) { + if (ch === quote && prev !== '\\') { + quote = ''; + } + continue; + } + + if (ch === '\'' || ch === '"' || ch === '`') { + quote = ch; + continue; + } + + if (ch === '[') { + depth += 1; + continue; + } + + if (ch === ']') { + depth -= 1; + if (depth === 0) { + return i; + } + } + } + + return -1; +} + +function upsertDefaultRpc(blockText, rpcUrl, indent) { + const keyRegex = /\bdefaultRPC\s*:/m; + const keyMatch = keyRegex.exec(blockText); + if (keyMatch) { + const arrayStart = blockText.indexOf('[', keyMatch.index); + if (arrayStart !== -1) { + const arrayEnd = findMatchingBracket(blockText, arrayStart); + if (arrayEnd !== -1) { + return { + text: `${blockText.slice(0, arrayStart)}['${rpcUrl}']${blockText.slice(arrayEnd + 1)}`, + changed: true, + }; + } + } + + return { + text: blockText.replace(/(defaultRPC\s*:\s*)[^\n,]+/, `$1['${rpcUrl}']`), + changed: true, + }; + } + + return { + text: blockText.replace(/\{\s*/, `{\n${indent}defaultRPC: ['${rpcUrl}'],\n`), + changed: true, + }; +} + +function upsertExplorerUrl(blockText, explorerUrl, indent) { + const explorerRegex = /((explorerURL|explorerUrl)\s*:\s*)['"`][^'"`\n]*['"`]/m; + if (explorerRegex.test(blockText)) { + return { + text: blockText.replace(explorerRegex, `$1'${explorerUrl}'`), + changed: true, + }; + } + + const defaultRpcLineRegex = /(defaultRPC\s*:\s*\[[\s\S]*?\]\s*,?)/m; + if (defaultRpcLineRegex.test(blockText)) { + return { + text: blockText.replace(defaultRpcLineRegex, `$1\n${indent}explorerUrl: '${explorerUrl}',`), + changed: true, + }; + } + + return { + text: blockText.replace(/\{\s*/, `{\n${indent}explorerUrl: '${explorerUrl}',\n`), + changed: true, + }; +} + +const edits = []; +for (const entry of endpointMap) { + const range = findChainBlockRange(source, entry.chain); + if (!range) { + console.error(`Could not find chain block for CHAIN.${entry.chain} in ${filePath}`); + process.exit(1); + } + + const originalBlock = source.slice(range.start, range.end + 1); + const indent = detectIndent(originalBlock); + + const defaultRpcResult = upsertDefaultRpc(originalBlock, entry.url, indent); + const explorerResult = upsertExplorerUrl(defaultRpcResult.text, entry.url, indent); + + edits.push({ + start: range.start, + end: range.end, + text: explorerResult.text, + }); +} + +edits.sort((a, b) => b.start - a.start); +for (const edit of edits) { + source = source.slice(0, edit.start) + edit.text + source.slice(edit.end + 1); +} + +fs.writeFileSync(filePath, source); +NODE +} + sdk_sync_localnet_constants() { - require_cmd jq perl + require_cmd jq perl node local chain_constants_file="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_CHAIN_CONSTANTS_PATH" local sdk_utils_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/utils.ts" @@ -442,7 +613,7 @@ sdk_sync_localnet_constants() { pbnb="$(address_from_deploy_token "pBNB")" psol="$(address_from_deploy_token "pSOL")" usdt_eth="$(address_from_deploy_token "USDT.eth")" - usdt_bnb="$(address_from_deploy_token "USDT.bnb")" + usdt_bnb="$(address_from_deploy_token "USDT.bsc")" [[ -n "$peth" ]] || peth="0xTBD" [[ -n "$peth_arb" ]] || peth_arb="0xTBD" @@ -473,18 +644,11 @@ sdk_sync_localnet_constants() { perl -0pi -e "s/return '\\Q0x00000000000000000000000000000000000000C0\\E';/return '0x00000000000000000000000000000000000000C1';/g" "$orchestrator_file" fi - # Force SDK test chains to local anvil/surfpool endpoints for LOCAL testing. - perl -0pi -e ' - s#(\[CHAIN\.ETHEREUM_SEPOLIA\]:\s*\{[\s\S]*?defaultRPC:\s*)\[[\s\S]*?\],(\s*confirmations:)#$1['\''http://localhost:9545'\''],$2#s; - s#(\[CHAIN\.ETHEREUM_SEPOLIA\]:\s*\{[\s\S]*?explorerUrl:\s*)'\''[^'\''\n]*'\''#$1'\''http://localhost:9545'\''#s; - s#(\[CHAIN\.ARBITRUM_SEPOLIA\]:\s*\{[\s\S]*?defaultRPC:\s*)\[[\s\S]*?\],(\s*confirmations:)#$1['\''http://localhost:9546'\''],$2#s; - s#(\[CHAIN\.ARBITRUM_SEPOLIA\]:\s*\{[\s\S]*?explorerUrl:\s*)'\''[^'\''\n]*'\''#$1'\''http://localhost:9546'\''#s; - s#(\[CHAIN\.BASE_SEPOLIA\]:\s*\{[\s\S]*?defaultRPC:\s*)\[[\s\S]*?\],(\s*confirmations:)#$1['\''http://localhost:9547'\''],$2#s; - s#(\[CHAIN\.BASE_SEPOLIA\]:\s*\{[\s\S]*?explorerUrl:\s*)'\''[^'\''\n]*'\''#$1'\''http://localhost:9547'\''#s; - s#(\[CHAIN\.BNB_TESTNET\]:\s*\{[\s\S]*?defaultRPC:\s*)\[[\s\S]*?\],(\s*confirmations:)#$1['\''http://localhost:9548'\''],$2#s; - s#(\[CHAIN\.BNB_TESTNET\]:\s*\{[\s\S]*?explorerUrl:\s*)'\''[^'\''\n]*'\''#$1'\''http://localhost:9548'\''#s; - s#(\[CHAIN\.SOLANA_DEVNET\]:\s*\{[\s\S]*?defaultRPC:\s*)\[[\s\S]*?\],(\s*confirmations:)#$1['\''http://localhost:8899'\''],$2#s; - ' "$chain_constants_file" + # For LOCAL testing only, force selected chain endpoints to localhost RPC/explorer URLs. + if is_local_testing_env; then + sdk_rewrite_chain_endpoints_for_local "$chain_constants_file" + log_ok "Patched SDK chain.ts RPC/explorer endpoints for LOCAL testing" + fi if [[ -f "$sdk_utils_file" ]]; then perl -0pi -e "s/\[PUSH_NETWORK\\.LOCALNET\]:\s*\[\s*CHAIN\\.PUSH_TESTNET_DONUT,/\[PUSH_NETWORK.LOCALNET\]: [CHAIN.PUSH_LOCALNET,/g" "$sdk_utils_file" @@ -2075,6 +2239,7 @@ cmd_all() { step_setup_gateway step_add_uregistry_configs step_deploy_counter_and_sync_sdk + sdk_sync_localnet_constants } cmd_show_help() { diff --git a/local-multi-validator/scripts/setup-universal.sh b/local-multi-validator/scripts/setup-universal.sh index a9d6ebb2..95d35250 100755 --- a/local-multi-validator/scripts/setup-universal.sh +++ b/local-multi-validator/scripts/setup-universal.sh @@ -88,7 +88,12 @@ fi # === INITIALIZATION === # --------------------------- -# Clean start +# Clean start — preserve keyshares across restarts if they exist +if [ -d "$HOME_DIR/keyshares" ] && [ "$(ls -A "$HOME_DIR/keyshares" 2>/dev/null)" ]; then + _KEYSHARES_TMP=$(mktemp -d) + cp -r "$HOME_DIR/keyshares/." "$_KEYSHARES_TMP/" + echo "🔑 Preserved $(ls "$_KEYSHARES_TMP" | wc -l | tr -d ' ') keyshare(s) before clean" +fi rm -rf "$HOME_DIR"/* "$HOME_DIR"/.[!.]* "$HOME_DIR"/..?* 2>/dev/null || true echo "🔧 Initializing universal validator..." @@ -96,6 +101,14 @@ echo "🔧 Initializing universal validator..." # Initialize puniversald (creates config directory and default config) $BINARY init +# Restore keyshares if they were preserved +if [ -n "${_KEYSHARES_TMP:-}" ] && [ -d "$_KEYSHARES_TMP" ]; then + mkdir -p "$HOME_DIR/keyshares" + cp -r "$_KEYSHARES_TMP/." "$HOME_DIR/keyshares/" + rm -rf "$_KEYSHARES_TMP" + echo "🔑 Restored $(ls "$HOME_DIR/keyshares" | wc -l | tr -d ' ') keyshare(s)" +fi + # Update the gRPC URL and keyring backend in the config # The CORE_VALIDATOR_GRPC env var is already set correctly in docker-compose.yml: # - universal-validator-1 uses core-validator-1:9090 diff --git a/universalClient/chains/evm/event_listener.go b/universalClient/chains/evm/event_listener.go index 72e04a3e..355b2137 100644 --- a/universalClient/chains/evm/event_listener.go +++ b/universalClient/chains/evm/event_listener.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/big" + "strings" "sync" "time" @@ -231,7 +232,7 @@ func (el *EventListener) processBlockRange( fromBlock, toBlock uint64, topics []ethcommon.Hash, ) error { - const maxBlockRange uint64 = 9000 // Safe under the 10000 RPC limit + const maxBlockRange uint64 = 9000 // Fast path for providers that allow large log windows currentFrom := fromBlock @@ -252,8 +253,8 @@ func (el *EventListener) processBlockRange( Msg("processing block chunk") } - // Process chunk - if err := el.processBlockChunk(ctx, currentFrom, currentTo, topics); err != nil { + // Process chunk (auto-splits when provider enforces smaller block ranges) + if err := el.processBlockChunkWithAdaptiveRange(ctx, currentFrom, currentTo, topics); err != nil { return fmt.Errorf("failed to process chunk %d-%d: %w", currentFrom, currentTo, err) } @@ -264,6 +265,66 @@ func (el *EventListener) processBlockRange( return nil } +// processBlockChunkWithAdaptiveRange processes a block range and recursively splits +// it when the RPC provider rejects the query due to block range limits. +func (el *EventListener) processBlockChunkWithAdaptiveRange( + ctx context.Context, + fromBlock, toBlock uint64, + topics []ethcommon.Hash, +) error { + if err := ctx.Err(); err != nil { + return err + } + + err := el.processBlockChunk(ctx, fromBlock, toBlock, topics) + if err == nil { + return nil + } + + if !isBlockRangeLimitError(err) || fromBlock >= toBlock { + return err + } + + mid := fromBlock + (toBlock-fromBlock)/2 + el.logger.Warn(). + Uint64("from_block", fromBlock). + Uint64("to_block", toBlock). + Uint64("split_left_to", mid). + Uint64("split_right_from", mid+1). + Msg("provider rejected log range, splitting chunk") + + if err := el.processBlockChunkWithAdaptiveRange(ctx, fromBlock, mid, topics); err != nil { + return err + } + + return el.processBlockChunkWithAdaptiveRange(ctx, mid+1, toBlock, topics) +} + +func isBlockRangeLimitError(err error) bool { + if err == nil { + return false + } + + msg := strings.ToLower(err.Error()) + indicators := []string{ + "eth_getlogs requests with up to", + "block range should work", + "maximum block range", + "max block range", + "limit the query to", + "query returned more than", + "block range exceeded", + } + + for _, indicator := range indicators { + if strings.Contains(msg, indicator) { + return true + } + } + + return false +} + // processBlockChunk processes a single chunk of blocks func (el *EventListener) processBlockChunk( ctx context.Context, @@ -344,6 +405,19 @@ func (el *EventListener) getStartBlock(ctx context.Context) (uint64, error) { return el.getStartBlockFromConfig(ctx) } + // If config explicitly asks to start from a newer block than persisted state, + // prefer the config to avoid replaying very old ranges after environment resets. + if el.eventStartFrom != nil && *el.eventStartFrom >= 0 { + configuredStart := uint64(*el.eventStartFrom) + if configuredStart > blockHeight { + el.logger.Info(). + Uint64("stored_block", blockHeight). + Uint64("configured_block", configuredStart). + Msg("configured EventStartFrom is ahead of stored state, starting from configured block") + return configuredStart, nil + } + } + el.logger.Info(). Uint64("block", blockHeight). Msg("resuming from last processed block") From 4c8d680460f78c5a4dbe5abfbcf27a175e9afac6 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Tue, 31 Mar 2026 22:37:53 +0530 Subject: [PATCH 30/61] local-setup with tss keygen support --- e2e-tests/README.md | 380 +++--- e2e-tests/deploy_addresses.json | 10 +- e2e-tests/setup.sh | 69 +- local-multi-validator/docker-compose.yml | 8 + .../scripts/setup-universal.sh | 4 + local-setup-e2e/devnet | 1119 +++++++++++++++++ 6 files changed, 1390 insertions(+), 200 deletions(-) create mode 100755 local-setup-e2e/devnet diff --git a/e2e-tests/README.md b/e2e-tests/README.md index f2daa812..fab82ec9 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -4,47 +4,57 @@ This folder provides a full, automated local E2E bootstrap for Push Chain. It covers: -1. local-multi-validator devnet (Docker, validators + universal validators) -2. genesis key recovery + account funding -3. core contracts deployment -4. swap AMM deployment (WPC + V3 core + V3 periphery) -5. pool creation for pETH/WPC -6. core `.env` generation from deployed addresses -7. token config update (`eth_sepolia_eth.json`) -8. gateway contracts deployment -9. push-chain-sdk setup + E2E test runners -10. uregistry chain/token config submission +1. Local devnet — 4 `pchaind` + 4 `puniversald` processes (no Docker) +2. TSS key generation +3. Genesis key recovery + account funding +4. Core contracts deployment (auto-resume on receipt errors) +5. Swap AMM deployment (WPC + Uniswap V3 core + periphery) +6. WPC liquidity pool creation for all synthetic tokens +7. Core `.env` generation from deployed addresses +8. Token config updates +9. Gateway contracts deployment (auto-resume on receipt errors) +10. `configureUniversalCore` script +11. uregistry chain/token config submission +12. CounterPayable deployment + SDK constant sync +13. `push-chain-sdk` E2E test runners --- ## What gets created -- `e2e-tests/repos/` — cloned external repos - - push-chain-core-contracts - - push-chain-swap-internal-amm-contracts - - push-chain-gateway-contracts - - push-chain-sdk -- `e2e-tests/logs/` — logs for each major deployment step +- `local-setup-e2e/data/` — validator + universal-validator home directories +- `local-setup-e2e/logs/` — per-process log files +- `e2e-tests/logs/` — logs for each deployment step - `e2e-tests/deploy_addresses.json` — contract/token address source-of-truth +External repos are resolved from **sibling directories** (relative to `push-chain/`): + +| Repo | Default path | +|---|---| +| `push-chain-core-contracts` | `../push-chain-core-contracts` | +| `push-chain-swap-internal-amm-contracts` | `../push-chain-swap-internal-amm-contracts` | +| `push-chain-gateway-contracts` | `../push-chain-gateway-contracts` | +| `push-chain-sdk` | `../push-chain-sdk` | + +Override any of these with env vars (`CORE_CONTRACTS_DIR`, `SWAP_AMM_DIR`, `GATEWAY_DIR`, `PUSH_CHAIN_SDK_DIR`). + --- ## Prerequisites Required tools: -- `git` +- `git`, `make` - `jq` - `node`, `npm`, `npx` -- `forge` (Foundry) -- `make` +- `forge`, `cast` (Foundry) +- `pchaind` and `puniversald` binaries in `build/` (built by `make build`) -Also ensure the Push Chain repo builds/runs locally. - -Before running any e2e setup command, run: +Build the binaries first: ```bash make replace-addresses +make build ``` --- @@ -57,44 +67,42 @@ Copy env template: cp e2e-tests/.env.example e2e-tests/.env ``` -Important variables in `.env`: - -- `PUSH_RPC_URL` (default `http://localhost:8545`) -- `TESTING_ENV` (`LOCAL` enables anvil/surfpool + local RPC rewrites) -- `PRIVATE_KEY` -- `FUND_TO_ADDRESS` -- `POOL_CREATION_TOPUP_AMOUNT` (funding for deployer before pool creation) -- `CORE_CONTRACTS_BRANCH` -- `SWAP_AMM_BRANCH` -- `GATEWAY_BRANCH` (currently `e2e-push-node`) -- `PUSH_CHAIN_SDK_BRANCH` (default `outbound_changes`) -- `PUSH_CHAIN_SDK_E2E_DIR` (default `packages/core/__e2e__/evm/inbound`) - -### TESTING_ENV=LOCAL behavior - -Set this in `e2e-tests/.env` when running local fork-based E2E: - -```bash -TESTING_ENV=LOCAL -``` - -When `TESTING_ENV=LOCAL`, `setup-environment` (and `all`) now does both: - -1. starts local fork nodes (`anvil` for Sepolia/Arbitrum/Base/BSC and `surfpool` for Solana) -2. rewrites `public_rpc_url` in `config/testnet-donut/*/chain.json` to your configured local RPC URLs: - - `ANVIL_SEPOLIA_HOST_RPC_URL` (default `http://localhost:9545`) - - `ANVIL_ARBITRUM_HOST_RPC_URL` (default `http://localhost:9546`) - - `ANVIL_BASE_HOST_RPC_URL` (default `http://localhost:9547`) - - `ANVIL_BSC_HOST_RPC_URL` (default `http://localhost:9548`) - - `SURFPOOL_SOLANA_HOST_RPC_URL` (default `http://localhost:8899`) -3. patches universal-validator container RPC endpoints (`pushuv_config.json`) to the corresponding local endpoints - -Genesis account source: - -- `GENESIS_ACCOUNTS_JSON` can point to a local file, but if missing `setup.sh` automatically - reads `/tmp/push-accounts/genesis_accounts.json` from docker container `core-validator-1`. - -Path settings are repository-relative and portable. +Edit `e2e-tests/.env`. Key variables: + +| Variable | Default | Description | +|---|---|---| +| `TESTING_ENV` | _(empty)_ | Set to `LOCAL` for local devnet | +| `PUSH_RPC_URL` | `http://localhost:8545` | Push Chain EVM JSON-RPC | +| `PRIVATE_KEY` | — | EVM deployer private key (forge/hardhat) | +| `EVM_PRIVATE_KEY` | ← `PRIVATE_KEY` | SDK EVM signer key | +| `PUSH_PRIVATE_KEY` | ← `PRIVATE_KEY` | SDK Push Chain signer key | +| `FUND_TO_ADDRESS` | — | Address to top up from genesis account | +| `POOL_CREATION_TOPUP_AMOUNT` | `50000000000000000000upc` | Deployer top-up before pool creation | +| `CORE_CONTRACTS_BRANCH` | `e2e-push-node` | | +| `SWAP_AMM_BRANCH` | `e2e-push-node` | | +| `GATEWAY_BRANCH` | `e2e-push-node` | | +| `PUSH_CHAIN_SDK_BRANCH` | `outbound_changes` | | +| `PUSH_CHAIN_SDK_E2E_DIR` | `packages/core/__e2e__/evm/inbound` | Test directory inside SDK | + +### TESTING_ENV=LOCAL + +When set in `.env`, the `setup-environment` step (also called by `all`) does: + +1. Starts local fork nodes: + - `anvil` for Ethereum Sepolia, Arbitrum Sepolia, Base Sepolia, BSC Testnet + - `surfpool` for Solana +2. Rewrites `public_rpc_url` in `config/testnet-donut/*/chain.json` to local fork URLs +3. Patches `puniversald` chain RPC config (`local-setup-e2e/data/universal-N/.puniversal/config/pushuv_config.json`) to use local fork endpoints + +Default local fork URLs (override in `.env`): + +| Variable | Default | +|---|---| +| `ANVIL_SEPOLIA_HOST_RPC_URL` | `http://localhost:9545` | +| `ANVIL_ARBITRUM_HOST_RPC_URL` | `http://localhost:9546` | +| `ANVIL_BASE_HOST_RPC_URL` | `http://localhost:9547` | +| `ANVIL_BSC_HOST_RPC_URL` | `http://localhost:9548` | +| `SURFPOOL_SOLANA_HOST_RPC_URL` | `http://localhost:8899` | --- @@ -102,106 +110,123 @@ Path settings are repository-relative and portable. ```bash make replace-addresses -./e2e-tests/setup.sh all +make build +TESTING_ENV=LOCAL bash e2e-tests/setup.sh all ``` -This runs the full sequence in order: - -1. `devnet` -2. `recover-genesis-key` -3. `fund` -4. `setup-core` -5. `setup-swap` -6. `sync-addresses` -7. `create-pool` -8. `check-addresses` -9. `write-core-env` -10. `update-token-config` -11. `setup-gateway` -12. `add-uregistry-configs` +The `all` pipeline runs in order: + +1. `setup-environment` — start anvil/surfpool + patch chain RPC configs +2. Build binaries (`make replace-addresses` + `make build`) +3. `devnet` — start 4 validators + 4 universal validators (clean) +4. `tss-keygen` — TSS key generation (via `./local-setup-e2e/devnet tss-keygen`) +5. `recover-genesis-key` — import genesis mnemonic into local keyring +6. `fund` — top up deployer address from genesis account +7. `setup-core` — deploy core contracts (forge, auto-resume) +8. `setup-swap` — deploy WPC + Uniswap V3 (hardhat) +9. `sync-addresses` — copy addresses into swap `test-addresses.json` +10. `create-pool` — create WPC liquidity pools for all tokens +11. `write-core-env` — generate core contracts `.env` +12. `configure-core` — run `configureUniversalCore.s.sol` (forge, auto-resume) +13. `update-token-config` — patch token config JSON files +14. `setup-gateway` — deploy gateway contracts (forge, auto-resume) +15. `add-uregistry-configs` — submit chain + token config txs +16. `deploy-counter-sdk` — deploy CounterPayable + sync SDK constants --- -## Command reference +## Local devnet (`local-setup-e2e/devnet`) -```bash -./e2e-tests/setup.sh devnet -./e2e-tests/setup.sh print-genesis -./e2e-tests/setup.sh recover-genesis-key -./e2e-tests/setup.sh fund -./e2e-tests/setup.sh setup-core -./e2e-tests/setup.sh setup-swap -./e2e-tests/setup.sh sync-addresses -./e2e-tests/setup.sh create-pool -./e2e-tests/setup.sh check-addresses -./e2e-tests/setup.sh write-core-env -./e2e-tests/setup.sh update-token-config -./e2e-tests/setup.sh setup-gateway -./e2e-tests/setup.sh setup-sdk -./e2e-tests/setup.sh sdk-test-all -./e2e-tests/setup.sh sdk-test-pctx-last-transaction -./e2e-tests/setup.sh sdk-test-send-to-self -./e2e-tests/setup.sh sdk-test-progress-hook -./e2e-tests/setup.sh sdk-test-bridge-multicall -./e2e-tests/setup.sh sdk-test-pushchain -./e2e-tests/setup.sh add-uregistry-configs -make replace-addresses -./e2e-tests/setup.sh all -``` +The `devnet` script manages 4 `pchaind` validators and 4 `puniversald` universal validators as local OS processes (no Docker). -### push-chain-sdk setup + tests +``` +local-setup-e2e/ + devnet # management script + data/ # validator home dirs + PID file (gitignored) + logs/ # per-process log files (gitignored) +``` -Clone and install dependencies in one command: +### Devnet commands ```bash -./e2e-tests/setup.sh setup-sdk +./local-setup-e2e/devnet start [--build] # Start all 4 validators + 4 UVs + # --build for clean start (wipes data) +./local-setup-e2e/devnet stop # Stop all processes (keep data) +./local-setup-e2e/devnet down # Stop and remove data +./local-setup-e2e/devnet status # Show running processes + block heights +./local-setup-e2e/devnet logs [name] # Tail logs (validator-1, universal-2, all, …) +./local-setup-e2e/devnet tss-keygen # Initiate TSS key generation +./local-setup-e2e/devnet setup-uvalidators # Register UVs + create AuthZ grants ``` -This executes: +Port layout: -- `yarn install` -- `npm install` -- `npm i --save-dev @types/bs58` +| Node | RPC | EVM JSON-RPC | WS | +|---|---|---|---| +| validator-1 | 26657 | 8545 | 8546 | +| validator-2 | 26658 | 8547 | 8548 | +| validator-3 | 26659 | 8549 | 8550 | +| validator-4 | 26660 | 8551 | 8552 | -It also fetches `UEA_PROXY_IMPLEMENTATION` with: +| UV | Query | TSS P2P | +|---|---|---| +| universal-validator-1 | 8080 | 39000 | +| universal-validator-2 | 8081 | 39001 | +| universal-validator-3 | 8082 | 39002 | +| universal-validator-4 | 8083 | 39003 | -- `cast call 0x00000000000000000000000000000000000000ea "UEA_PROXY_IMPLEMENTATION()(address)"` - -Then it updates both: - -- `e2e-tests/deploy_addresses.json` as `contracts.UEA_PROXY_IMPLEMENTATION` -- `push-chain-sdk/packages/core/src/lib/constants/chain.ts` at `[PUSH_NETWORK.LOCALNET]` - -SDK tests are discovered from: - -- `push-chain-sdk/packages/core/__e2e__/evm/inbound` - -Run all configured SDK E2E files: +### Clean devnet restart ```bash -./e2e-tests/setup.sh sdk-test-all +./local-setup-e2e/devnet down +./local-setup-e2e/devnet start --build ``` -Run single files: +--- + +## setup.sh command reference ```bash -./e2e-tests/setup.sh sdk-test-pctx-last-transaction -./e2e-tests/setup.sh sdk-test-send-to-self -./e2e-tests/setup.sh sdk-test-progress-hook -./e2e-tests/setup.sh sdk-test-bridge-multicall -./e2e-tests/setup.sh sdk-test-pushchain +TESTING_ENV=LOCAL bash e2e-tests/setup.sh ``` -Before each SDK test run, the script automatically rewrites these values in configured files: - -- `PUSH_NETWORK.TESTNET_DONUT` → `PUSH_NETWORK.LOCALNET` -- `PUSH_NETWORK.TESTNET` → `PUSH_NETWORK.LOCALNET` +| Command | Description | +|---|---| +| `all` | Full setup pipeline | +| `setup-environment` | Start anvil/surfpool + patch chain RPC configs | +| `devnet` | Start local devnet + register universal validators | +| `print-genesis` | Print first genesis account + mnemonic | +| `recover-genesis-key` | Import genesis mnemonic into local keyring | +| `fund` | Fund `FUND_TO_ADDRESS` from genesis account | +| `setup-core` | Build + deploy core contracts (auto-resume) | +| `setup-swap` | Build + deploy WPC + Uniswap V3 | +| `sync-addresses` | Copy `deploy_addresses.json` into swap `test-addresses.json` | +| `create-pool` | Create WPC pools for all deployed core tokens | +| `configure-core` | Run `configureUniversalCore.s.sol` (auto-resume) | +| `check-addresses` | Assert required contract addresses are recorded | +| `write-core-env` | Generate core contracts `.env` | +| `update-token-config` | Patch token config JSON contract addresses | +| `setup-gateway` | Build + deploy gateway contracts (auto-resume) | +| `add-uregistry-configs` | Submit chain + token configs to uregistry | +| `deploy-counter-sdk` | Deploy CounterPayable + sync SDK `COUNTER_ADDRESS_PAYABLE` | +| `bootstrap-cea-sdk` | Ensure CEA is deployed for SDK signer (Route 2 bootstrap) | +| `setup-sdk` | Install SDK dependencies + generate SDK `.env` | +| `sdk-test-all` | Run all configured SDK E2E test files | +| `sdk-test-pctx-last-transaction` | Run `pctx-last-transaction.spec.ts` | +| `sdk-test-send-to-self` | Run `send-to-self.spec.ts` | +| `sdk-test-progress-hook` | Run `progress-hook-per-tx.spec.ts` | +| `sdk-test-bridge-multicall` | Run `bridge-multicall.spec.ts` | +| `sdk-test-pushchain` | Run `pushchain.spec.ts` | +| `sdk-test-bridge-hooks` | Run `bridge-hooks.spec.ts` | +| `record-contract K A` | Manually record contract key + address | +| `record-token N S A` | Manually record token name, symbol, address | +| `help` | Show help | --- ## Address tracking model -`deploy_addresses.json` is the canonical address registry used by later steps. +`e2e-tests/deploy_addresses.json` is the canonical address registry. ### Required contracts @@ -209,91 +234,104 @@ Before each SDK test run, the script automatically rewrites these values in conf - `contracts.Factory` - `contracts.QuoterV2` - `contracts.SwapRouter` +- `contracts.UEA_PROXY_IMPLEMENTATION` +- `contracts.COUNTER_ADDRESS_PAYABLE` ### Token entries -- `tokens[]` from core deployment logs (`name`, `symbol`, `address`, `source`) +`tokens[]` records each synthetic ERC-20 deployed by core contracts (`name`, `symbol`, `address`, `decimals`). These addresses are used to: - sync swap repo `test-addresses.json` - generate core contracts `.env` -- update `config/testnet-donut/tokens/eth_sepolia_eth.json` +- update `config/testnet-donut/tokens/*.json` +- submit token config txs to uregistry Manual helpers: ```bash -./e2e-tests/setup.sh record-contract Factory 0x1234567890123456789012345678901234567890 -./e2e-tests/setup.sh record-token "Push ETH" pETH 0x1234567890123456789012345678901234567890 +./e2e-tests/setup.sh record-contract Factory 0x1234... +./e2e-tests/setup.sh record-token "Push ETH" pETH 0x1234... ``` --- ## Auto-retry and resilience behavior -### Core contracts - -- Runs `forge script scripts/localSetup/setup.s.sol ...` -- If receipt fetch fails, auto-retries with `--resume` in a loop until success -- Optional cap via: - -```bash -CORE_RESUME_MAX_ATTEMPTS=0 # 0 means unlimited (default) -``` - -### Gateway contracts +### Forge scripts (core, gateway, configureUniversalCore) -- Runs gateway `forge script ... setup.s.sol` -- If initial execution fails, retries with `--resume` +- Stale broadcast cache from previous runs is cleared automatically before each fresh deploy. +- If the initial `forge script --broadcast` fails (e.g., receipt timeout), retries with `--resume` until success. +- Optional cap: `CORE_RESUME_MAX_ATTEMPTS=5` (default `0` = unlimited). ### uregistry tx submission -- Submits chain config then token config -- Retries automatically on account sequence mismatch -- Validates tx result by checking returned `code` +- Retries automatically on `account sequence mismatch`. +- Validates tx result by checking the returned `code` field. --- ## Generated files of interest -- `e2e-tests/deploy_addresses.json` -- `e2e-tests/repos/push-chain-swap-internal-amm-contracts/test-addresses.json` -- `e2e-tests/repos/push-chain-core-contracts/.env` -- `config/testnet-donut/tokens/eth_sepolia_eth.json` (updated contract address) +| File | Description | +|---|---| +| `e2e-tests/deploy_addresses.json` | Contract/token address registry | +| `e2e-tests/logs/` | Per-step deployment logs | +| `local-setup-e2e/data/` | Validator + UV home directories | +| `local-setup-e2e/logs/` | Per-process stdout/stderr | +| `/test-addresses.json` | Swap repo address file (synced from deploy_addresses.json) | +| `/.env` | Core contracts env (generated by `write-core-env`) | +| `config/testnet-donut/*/tokens/*.json` | Token config files (updated contract addresses) | --- -## Clean re-run - -For a fresh run: +## Clean full re-run ```bash -rm -rf e2e-tests/repos -./local-multi-validator/devnet down || true +# Stop + wipe devnet +./local-setup-e2e/devnet down + +# Reset state +rm -f e2e-tests/deploy_addresses.json + +# Rebuild + run make replace-addresses -./e2e-tests/setup.sh all +make build +TESTING_ENV=LOCAL bash e2e-tests/setup.sh all ``` --- ## Troubleshooting -### 1) Core script keeps stopping with receipt errors +### 1) `pchaind` or `puniversald` won't start + +Check that `make build` completed successfully and `build/pchaind` / `build/puniversald` exist. + +### 2) Validators stuck at height 0 + +P2P peer connections failing. The devnet script sets `allow_duplicate_ip = true` and `addr_book_strict = false` automatically for all-localhost setups. If reusing old data, run `./local-setup-e2e/devnet down` to wipe and restart clean. + +### 3) TSS keygen not completing -This is expected intermittently on local RPC. The script auto-runs `--resume` until completion. +Check UV logs (`./local-setup-e2e/devnet logs universal-1`). UVs need: +- All 4 validators bonded +- All 4 UVs registered with AuthZ grants +- External chain RPC endpoints configured (set by `setup-environment`) -### 2) Missing branch in a dependency repo +### 4) Core/gateway forge script keeps stopping with receipt errors -The script attempts to resolve/fallback to available remote branches. +Expected intermittently. The script auto-retries with `--resume` until all receipts confirm. -### 3) `account sequence mismatch` in uregistry tx +### 5) `account sequence mismatch` in uregistry tx -The script retries automatically for this error. +The script retries automatically. -### 4) WPC deployment artifact not found +### 6) Swap AMM deployment fails mid-run -`setup-swap` compiles before deployment. If interrupted mid-run, re-run: +Re-run the individual step: ```bash -./e2e-tests/setup.sh setup-swap +TESTING_ENV=LOCAL bash e2e-tests/setup.sh setup-swap ``` diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index fc0013d4..734ec7a8 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -1,13 +1,13 @@ { - "generatedAt": "2026-03-31T09:09:56Z", + "generatedAt": "2026-03-31T16:38:44Z", "contracts": { "WPC": "0xB5B1e1ADc1b8fc1066975aa09f9371a5f67C54F5", "Factory": "0x140b9f84fCbccB4129AC6F32b1243ea808d18261", - "SwapRouter": "0x140b9f84fCbccB4129AC6F32b1243ea808d18261", - "QuoterV2": "0x7fd62fe2Aba9af8bF4d08a6cce49beA8c8Ca6d97", - "PositionManager": "0x4dCe46Eb5909aC32B6C0ad086e74008Fdb292CB5", + "SwapRouter": "0x95cE5e63366D3A11E9BCCe71917bB37C23Fd0002", + "QuoterV2": "0xE9cb561141553DFa0A576cCd34546BECffb64Af1", + "PositionManager": "0x484aC6ED747090fe8C82c5F10427ccC2F2998930", "UEA_PROXY_IMPLEMENTATION": "0x2C297101b7d3e0911296b9A64d106684a161b4C9", - "COUNTER_ADDRESS_PAYABLE": "0x7e875e1384030d8b22Eb359C3Fce940D2882643e" + "COUNTER_ADDRESS_PAYABLE": "0xDaC125f9350cD25786Cfd5c8eb2b6837c5e7Ce6B" }, "tokens": [ { diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 32b21686..75f2ef97 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -24,7 +24,7 @@ fi : "${FUND_AMOUNT:=1000000000000000000upc}" : "${POOL_CREATION_TOPUP_AMOUNT:=50000000000000000000upc}" : "${GAS_PRICES:=100000000000upc}" -: "${LOCAL_DEVNET_DIR:=./local-multi-validator}" +: "${LOCAL_DEVNET_DIR:=./local-setup-e2e}" : "${LEGACY_LOCAL_NATIVE_DIR:=./local-native}" : "${CORE_CONTRACTS_REPO:=https://github.com/pushchain/push-chain-core-contracts.git}" @@ -820,7 +820,7 @@ step_run_sdk_tests_all() { step_devnet() { require_cmd bash - log_info "Starting local-multi-validator devnet" + log_info "Starting local devnet" ( cd "$LOCAL_DEVNET_DIR" ./devnet start --build @@ -852,12 +852,12 @@ step_setup_environment() { local base_host_rpc="${ANVIL_BASE_HOST_RPC_URL:-http://localhost:9547}" local bsc_host_rpc="${ANVIL_BSC_HOST_RPC_URL:-http://localhost:9548}" - local uv_sepolia_rpc_url="${LOCAL_SEPOLIA_UV_RPC_URL:-http://host.docker.internal:9545}" - local uv_arbitrum_rpc_url="${LOCAL_ARBITRUM_UV_RPC_URL:-http://host.docker.internal:9546}" - local uv_base_rpc_url="${LOCAL_BASE_UV_RPC_URL:-http://host.docker.internal:9547}" - local uv_bsc_rpc_url="${LOCAL_BSC_UV_RPC_URL:-http://host.docker.internal:9548}" + local uv_sepolia_rpc_url="${LOCAL_SEPOLIA_UV_RPC_URL:-http://localhost:9545}" + local uv_arbitrum_rpc_url="${LOCAL_ARBITRUM_UV_RPC_URL:-http://localhost:9546}" + local uv_base_rpc_url="${LOCAL_BASE_UV_RPC_URL:-http://localhost:9547}" + local uv_bsc_rpc_url="${LOCAL_BSC_UV_RPC_URL:-http://localhost:9548}" local solana_host_rpc="${SURFPOOL_SOLANA_HOST_RPC_URL:-http://localhost:8899}" - local uv_solana_rpc_url="${LOCAL_SOLANA_UV_RPC_URL:-http://host.docker.internal:8899}" + local uv_solana_rpc_url="${LOCAL_SOLANA_UV_RPC_URL:-http://localhost:8899}" patch_chain_config_public_rpc() { local file_path="$1" @@ -974,20 +974,27 @@ step_setup_environment() { solana_latest_slot="$(wait_for_solana_slot "$solana_host_rpc")" local patched_count=0 - local config_path="/root/.puniversal/config/pushuv_config.json" - local uv_container - for uv_container in universal-validator-1 universal-validator-2 universal-validator-3 universal-validator-4; do - if ! docker ps --format '{{.Names}}' | grep -qx "$uv_container"; then - continue - fi + local uv_idx + for uv_idx in 1 2 3 4; do + # Prefer local file (local-setup-e2e devnet); fall back to Docker container + local local_cfg="$LOCAL_DEVNET_DIR/data/universal-${uv_idx}/.puniversal/config/pushuv_config.json" + local uv_container="universal-validator-${uv_idx}" local tmp_in tmp_out tmp_in="$(mktemp)" tmp_out="$(mktemp)" - if ! docker exec "$uv_container" cat "$config_path" >"$tmp_in"; then + if [[ -f "$local_cfg" ]]; then + cp "$local_cfg" "$tmp_in" + elif docker ps --format '{{.Names}}' | grep -qx "$uv_container" 2>/dev/null; then + local docker_cfg="/root/.puniversal/config/pushuv_config.json" + if ! docker exec "$uv_container" cat "$docker_cfg" >"$tmp_in" 2>/dev/null; then + rm -f "$tmp_in" "$tmp_out" + log_warn "Failed to read config from $uv_container" + continue + fi + else rm -f "$tmp_in" "$tmp_out" - log_warn "Failed to read $config_path from $uv_container" continue fi @@ -1015,15 +1022,20 @@ step_setup_environment() { | .chain_configs["solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1"].event_start_from = $solana_start ' "$tmp_in" >"$tmp_out" - docker cp "$tmp_out" "$uv_container":"$config_path" + if [[ -f "$local_cfg" ]]; then + cp "$tmp_out" "$local_cfg" + log_ok "Updated universal-validator-${uv_idx} local config for Sepolia/Arbitrum/Base/BSC/Solana forks" + else + local docker_cfg="/root/.puniversal/config/pushuv_config.json" + docker cp "$tmp_out" "$uv_container":"$docker_cfg" + log_ok "Updated $uv_container Docker config for Sepolia/Arbitrum/Base/BSC/Solana local forks" + fi rm -f "$tmp_in" "$tmp_out" - patched_count=$((patched_count + 1)) - log_ok "Updated $uv_container config for Sepolia/Arbitrum/Base/BSC/Solana local forks" done if [[ "$patched_count" -eq 0 ]]; then - log_warn "No universal-validator containers are running yet; skipped pushuv_config.json patch" + log_warn "No universal validators found (local or Docker); skipped pushuv_config.json patch" return 0 fi @@ -1225,6 +1237,9 @@ step_setup_core_contracts() { local resume_attempt=1 local resume_max_attempts="${CORE_RESUME_MAX_ATTEMPTS:-0}" # 0 = unlimited + log_info "Clearing stale forge broadcast cache for fresh deploy" + rm -rf "$CORE_CONTRACTS_DIR/broadcast/setup.s.sol" + log_info "Running local core setup script" ( cd "$CORE_CONTRACTS_DIR" @@ -1594,6 +1609,9 @@ step_setup_gateway() { log_info "Building gateway evm contracts" (cd "$gw_dir" && forge build) + log_info "Clearing stale forge broadcast cache for gateway deploy" + rm -rf "$gw_dir/broadcast/$(basename "$gw_setup_script" .s.sol).s.sol" + log_info "Running gateway local setup script" ( cd "$gw_dir" @@ -2019,6 +2037,9 @@ step_configure_universal_core() { return 0 fi + log_info "Clearing stale forge broadcast cache for configureUniversalCore" + rm -rf "$CORE_CONTRACTS_DIR/broadcast/configureUniversalCore.s.sol" + log_info "Running configureUniversalCore script" if ( cd "$CORE_CONTRACTS_DIR" @@ -2287,12 +2308,12 @@ Important env: ANVIL_ARBITRUM_HOST_RPC_URL=http://localhost:9546 ANVIL_BASE_HOST_RPC_URL=http://localhost:9547 ANVIL_BSC_HOST_RPC_URL=http://localhost:9548 - LOCAL_SEPOLIA_UV_RPC_URL=http://host.docker.internal:9545 - LOCAL_ARBITRUM_UV_RPC_URL=http://host.docker.internal:9546 - LOCAL_BASE_UV_RPC_URL=http://host.docker.internal:9547 - LOCAL_BSC_UV_RPC_URL=http://host.docker.internal:9548 + LOCAL_SEPOLIA_UV_RPC_URL=http://localhost:9545 + LOCAL_ARBITRUM_UV_RPC_URL=http://localhost:9546 + LOCAL_BASE_UV_RPC_URL=http://localhost:9547 + LOCAL_BSC_UV_RPC_URL=http://localhost:9548 SURFPOOL_SOLANA_HOST_RPC_URL=http://localhost:8899 - LOCAL_SOLANA_UV_RPC_URL=http://host.docker.internal:8899 + LOCAL_SOLANA_UV_RPC_URL=http://localhost:8899 EOF } diff --git a/local-multi-validator/docker-compose.yml b/local-multi-validator/docker-compose.yml index bdac8092..815ae85f 100644 --- a/local-multi-validator/docker-compose.yml +++ b/local-multi-validator/docker-compose.yml @@ -221,6 +221,8 @@ services: ports: - "8080:8080" # Query API - "39000:39000" # TSS P2P + ulimits: + stack: -1 volumes: - universal1-data:/root/.puniversal - ./scripts:/opt/scripts @@ -265,6 +267,8 @@ services: ports: - "8081:8080" # Query API - "39001:39001" # TSS P2P + ulimits: + stack: -1 volumes: - universal2-data:/root/.puniversal - ./scripts:/opt/scripts @@ -309,6 +313,8 @@ services: ports: - "8082:8080" # Query API - "39002:39002" # TSS P2P + ulimits: + stack: -1 volumes: - universal3-data:/root/.puniversal - ./scripts:/opt/scripts @@ -353,6 +359,8 @@ services: ports: - "8083:8080" # Query API - "39003:39003" # TSS P2P + ulimits: + stack: -1 volumes: - universal4-data:/root/.puniversal - ./scripts:/opt/scripts diff --git a/local-multi-validator/scripts/setup-universal.sh b/local-multi-validator/scripts/setup-universal.sh index 95d35250..adb957bd 100755 --- a/local-multi-validator/scripts/setup-universal.sh +++ b/local-multi-validator/scripts/setup-universal.sh @@ -457,4 +457,8 @@ fi echo "🚀 Starting universal validator $UNIVERSAL_ID..." echo "🔗 Connecting to core validator: $CORE_VALIDATOR_GRPC" +# Increase OS thread stack size to unlimited so the Rust DKLS sign library +# (called via CGo) does not SIGSEGV from native stack overflow during sign sessions. +ulimit -s unlimited + exec $BINARY start \ No newline at end of file diff --git a/local-setup-e2e/devnet b/local-setup-e2e/devnet new file mode 100755 index 00000000..21e83e29 --- /dev/null +++ b/local-setup-e2e/devnet @@ -0,0 +1,1119 @@ +#!/usr/bin/env bash +# devnet - Push Chain Local Network Manager (local processes, no Docker) +# Drop-in replacement for local-multi-validator/devnet using native binaries. + +set -euo pipefail +IFS=$'\n\t' + +SCRIPT_DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PUSH_CHAIN_DIR="$(cd -P "$SCRIPT_DIR/.." && pwd)" +DATA_DIR="$SCRIPT_DIR/data" +LOG_DIR="$SCRIPT_DIR/logs" +PID_DIR="$DATA_DIR/pids" +TMP_DIR="/tmp/push-accounts" + +# Prefer freshly built binaries; fall back to PATH +PCHAIND="${PCHAIND:-$PUSH_CHAIN_DIR/build/pchaind}" +PUNIVERSALD="${PUNIVERSALD:-$PUSH_CHAIN_DIR/build/puniversald}" +[[ -x "$PCHAIND" ]] || PCHAIND=pchaind +[[ -x "$PUNIVERSALD" ]] || PUNIVERSALD=puniversald + +CHAIN_ID="${CHAIN_ID:-localchain_9000-1}" +EVM_CHAIN_ID="9000" +DENOM="upc" +KEYRING="test" +KEYALGO="eth_secp256k1" +BLOCK_TIME="1s" + +# Chain IDs for UV config +SEPOLIA_CHAIN_ID="eip155:11155111" +ARBITRUM_CHAIN_ID="eip155:421614" +BASE_CHAIN_ID="eip155:84532" +BSC_CHAIN_ID="eip155:97" +SOLANA_CHAIN_ID="solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1" +LOCALCHAIN_CHAIN_ID="localchain_9000-1" + +# Per-validator ports (matching docker-compose.yml host port mapping) +val_rpc() { case $1 in 1) echo 26657;; 2) echo 26658;; 3) echo 26659;; 4) echo 26660;; esac; } +val_rest() { case $1 in 1) echo 1317;; 2) echo 1318;; 3) echo 1319;; 4) echo 1320;; esac; } +val_grpc() { case $1 in 1) echo 9090;; 2) echo 9093;; 3) echo 9095;; 4) echo 9097;; esac; } +val_grpcweb() { case $1 in 1) echo 9091;; 2) echo 9094;; 3) echo 9096;; 4) echo 9098;; esac; } +val_p2p() { case $1 in 1) echo 26656;; 2) echo 26666;; 3) echo 26676;; 4) echo 26686;; esac; } +val_evm() { case $1 in 1) echo 8545;; 2) echo 8547;; 3) echo 8549;; 4) echo 8551;; esac; } +val_evmws() { case $1 in 1) echo 8546;; 2) echo 8548;; 3) echo 8550;; 4) echo 8552;; esac; } +val_pprof() { echo $((6060 + $1)); } + +uv_query() { case $1 in 1) echo 8080;; 2) echo 8081;; 3) echo 8082;; 4) echo 8083;; esac; } +uv_tss() { echo $((39000 + $1 - 1)); } +uv_grpc() { echo "localhost:$(val_grpc $1)"; } + +# TSS peer IDs (deterministic from private keys 01..01 02..02 etc.) +uv_peer_id() { + case $1 in + 1) echo "12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5";; + 2) echo "12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq";; + 3) echo "12D3KooWRndVhVZPCiQwHBBBdg769GyrPUW13zxwqQyf9r3ANaba";; + 4) echo "12D3KooWPT98FXMfDQYavZm66EeVjTqP9Nnehn1gyaydqV8L8BQw";; + esac +} + +# TSS private keys +uv_tss_key() { + local byte; byte=$(printf '%02x' "$1") + local result="" + for _ in {1..32}; do result+="$byte"; done + echo "$result" +} + +GREEN='\033[0;32m'; RED='\033[0;31m'; YELLOW='\033[0;33m' +CYAN='\033[0;36m'; BLUE='\033[1;94m'; NC='\033[0m'; BOLD='\033[1m' + +log() { printf "%b\n" "${CYAN}==>${NC} $*"; } +ok() { printf "%b\n" "${GREEN}✓${NC} $*"; } +warn() { printf "%b\n" "${YELLOW}!${NC} $*"; } +err() { printf "%b\n" "${RED}✗${NC} $*"; } +header() { printf "\n%b\n" "${BOLD}${BLUE}═══ $* ═══${NC}"; } + +require_cmd() { for c in "$@"; do command -v "$c" >/dev/null 2>&1 || { err "Required command not found: $c"; exit 1; }; done; } + +# ─── PID management ─────────────────────────────────────────────────────────── + +write_pid() { local name="$1" pid="$2"; mkdir -p "$PID_DIR"; echo "$pid" > "$PID_DIR/$name.pid"; } +read_pid() { local f="$PID_DIR/$1.pid"; [[ -f "$f" ]] && cat "$f" || echo ""; } + +is_alive() { + local pid="${1:-}" + [[ -n "$pid" ]] && kill -0 "$pid" 2>/dev/null +} + +# ─── Health checks ───────────────────────────────────────────────────────────── + +wait_rpc() { + local port="$1" label="$2" max="${3:-120}" i=0 + while (( i < max )); do + if curl -sf "http://127.0.0.1:$port/status" >/dev/null 2>&1; then return 0; fi + sleep 1; (( i++ )) + done + err "$label RPC not ready after ${max}s"; return 1 +} + +wait_block() { + local port="$1" label="$2" max="${3:-120}" i=0 + while (( i < max )); do + local h + h=$(curl -sf "http://127.0.0.1:$port/status" 2>/dev/null \ + | jq -r '.result.sync_info.latest_block_height // "0"' 2>/dev/null || echo "0") + if [[ "$h" != "0" && "$h" != "null" && -n "$h" ]]; then return 0; fi + sleep 2; (( i += 2 )) + done + err "$label not producing blocks after ${max}s"; return 1 +} + +wait_uv_health() { + local port="$1" label="$2" max="${3:-180}" i=0 + while (( i < max )); do + if curl -sf "http://127.0.0.1:$port/health" >/dev/null 2>&1; then return 0; fi + sleep 2; (( i += 2 )) + done + err "$label not healthy after ${max}s"; return 1 +} + +wait_validator_bonded() { + local valoper="$1" max="${2:-120}" i=0 genesis_rpc="http://127.0.0.1:$(val_rpc 1)" + while (( i < max )); do + local status + status=$("$PCHAIND" query staking validator "$valoper" --node="$genesis_rpc" --output json 2>/dev/null \ + | jq -r '.validator.status // "NOT_FOUND"' 2>/dev/null || echo "NOT_FOUND") + [[ "$status" == "BOND_STATUS_BONDED" ]] && return 0 + sleep 3; (( i += 3 )) + done + return 1 +} + +wait_chain_tx() { + local txhash="$1" node="$2" max="${3:-30}" i=0 + while (( i < max )); do + local code + code=$("$PCHAIND" query tx "$txhash" --node="$node" --output json 2>/dev/null \ + | jq -r '.code // empty' 2>/dev/null || true) + [[ "$code" == "0" ]] && return 0 + [[ -n "$code" && "$code" != "0" ]] && return 1 + sleep 1; (( i++ )) + done + return 1 +} + +# ─── Account generation ──────────────────────────────────────────────────────── + +generate_accounts() { + if [[ -f "$TMP_DIR/genesis_accounts.json" && -f "$TMP_DIR/validators.json" && -f "$TMP_DIR/hotkeys.json" ]]; then + ok "Account files already exist in $TMP_DIR — skipping generation" + return 0 + fi + + require_cmd jq "$PCHAIND" + mkdir -p "$TMP_DIR" + + log "Generating genesis, validator, and hotkey accounts..." + + # genesis-acc-1: hardcoded admin mnemonic + local ADMIN_MNEMONIC="surface task term spring horse impact tortoise often session cable off catch harvest rain able jealous coral cargo portion surge spring genre mix avoid" + local GENESIS_ACCOUNTS_FILE="$TMP_DIR/genesis_accounts.json" + echo "[]" > "$GENESIS_ACCOUNTS_FILE" + + # Use a temp home to avoid conflicts with any pre-existing global keyring entries + # NOTE: use a global-scoped variable so traps work correctly under set -u + _KEYGEN_HOME=$(mktemp -d) + + for (( i=1; i<=5; i++ )); do + local key="genesis-acc-$i" output mnemonic address + if (( i == 1 )); then + output=$(echo "$ADMIN_MNEMONIC" | "$PCHAIND" keys add "$key" \ + --home="$_KEYGEN_HOME" --keyring-backend=$KEYRING --algo=$KEYALGO --recover --output=json 2>&1) + mnemonic="$ADMIN_MNEMONIC" + else + output=$("$PCHAIND" keys add "$key" \ + --home="$_KEYGEN_HOME" --keyring-backend=$KEYRING --algo=$KEYALGO --output=json 2>&1) + mnemonic=$(echo "$output" | jq -r '.mnemonic // empty' 2>/dev/null) + if [[ -z "$mnemonic" ]]; then + mnemonic=$(echo "$output" | grep -A1 "Important" | tail -1 | tr -d '\n' || true) + fi + fi + address=$("$PCHAIND" keys show "$key" -a --home="$_KEYGEN_HOME" --keyring-backend=$KEYRING 2>/dev/null) + jq --arg n "$key" --arg a "$address" --arg m "$mnemonic" \ + '. += [{name:$n,address:$a,mnemonic:$m}]' "$GENESIS_ACCOUNTS_FILE" > "$TMP_DIR/tmp.json" \ + && mv "$TMP_DIR/tmp.json" "$GENESIS_ACCOUNTS_FILE" + ok "genesis-acc-$i: $address" + done + + local VALIDATORS_FILE="$TMP_DIR/validators.json" + echo "[]" > "$VALIDATORS_FILE" + for (( i=1; i<=4; i++ )); do + local key="validator-$i" output mnemonic address valoper + output=$("$PCHAIND" keys add "$key" \ + --home="$_KEYGEN_HOME" --keyring-backend=$KEYRING --algo=$KEYALGO --output=json 2>&1) + mnemonic=$(echo "$output" | jq -r '.mnemonic // empty' 2>/dev/null) + if [[ -z "$mnemonic" ]]; then + mnemonic=$(echo "$output" | grep -A1 "Important" | tail -1 | tr -d '\n' || true) + fi + address=$("$PCHAIND" keys show "$key" -a --home="$_KEYGEN_HOME" --keyring-backend=$KEYRING 2>/dev/null) + valoper=$("$PCHAIND" keys show "$key" --bech val -a --home="$_KEYGEN_HOME" --keyring-backend=$KEYRING 2>/dev/null) + jq --argjson id "$i" --arg n "$key" --arg a "$address" --arg v "$valoper" --arg m "$mnemonic" \ + '. += [{id:$id,name:$n,address:$a,valoper_address:$v,mnemonic:$m}]' \ + "$VALIDATORS_FILE" > "$TMP_DIR/tmp.json" && mv "$TMP_DIR/tmp.json" "$VALIDATORS_FILE" + ok "validator-$i: $address (valoper: $valoper)" + done + + local HOTKEYS_FILE="$TMP_DIR/hotkeys.json" + echo "[]" > "$HOTKEYS_FILE" + for (( i=1; i<=4; i++ )); do + local key="hotkey-$i" output mnemonic address + output=$("$PCHAIND" keys add "$key" \ + --home="$_KEYGEN_HOME" --keyring-backend=$KEYRING --algo=$KEYALGO --output=json 2>&1) + mnemonic=$(echo "$output" | jq -r '.mnemonic // empty' 2>/dev/null) + if [[ -z "$mnemonic" ]]; then + mnemonic=$(echo "$output" | grep -A1 "Important" | tail -1 | tr -d '\n' || true) + fi + address=$("$PCHAIND" keys show "$key" -a --home="$_KEYGEN_HOME" --keyring-backend=$KEYRING 2>/dev/null) + jq --argjson id "$i" --arg n "$key" --arg a "$address" --arg m "$mnemonic" \ + '. += [{id:$id,name:$n,address:$a,mnemonic:$m}]' \ + "$HOTKEYS_FILE" > "$TMP_DIR/tmp.json" && mv "$TMP_DIR/tmp.json" "$HOTKEYS_FILE" + ok "hotkey-$i: $address" + done + + ok "Account generation complete" + rm -rf "$_KEYGEN_HOME" 2>/dev/null || true +} + +# ─── Core validator setup ────────────────────────────────────────────────────── + +setup_genesis_validator() { + local HOME_DIR="$DATA_DIR/validator-1" + local RPC_PORT=$(val_rpc 1) REST_PORT=$(val_rest 1) GRPC_PORT=$(val_grpc 1) + local GRPC_WEB_PORT=$(val_grpcweb 1) P2P_PORT=$(val_p2p 1) + local EVM_PORT=$(val_evm 1) EVM_WS_PORT=$(val_evmws 1) + local PPROF_PORT=$(val_pprof 1) + local LOG_FILE="$LOG_DIR/validator-1.log" + + local TWO_BILLION="2000000000000000000000000000" + local ONE_MILLION="1000000000000000000000000" + local VALIDATOR_STAKE="100000000000000000000000" + local HOTKEY_FUNDING="10000000000000000000000" + + mkdir -p "$HOME_DIR" "$LOG_DIR" + rm -rf "$HOME_DIR"/* "$HOME_DIR"/.[!.]* 2>/dev/null || true + + log "Initializing genesis validator (validator-1)..." + + # Load mnemonics + local G1_MN G2_MN G3_MN G4_MN G5_MN V1_MN V2_MN V3_MN V4_MN + G1_MN=$(jq -r '.[0].mnemonic' "$TMP_DIR/genesis_accounts.json") + G2_MN=$(jq -r '.[1].mnemonic' "$TMP_DIR/genesis_accounts.json") + G3_MN=$(jq -r '.[2].mnemonic' "$TMP_DIR/genesis_accounts.json") + G4_MN=$(jq -r '.[3].mnemonic' "$TMP_DIR/genesis_accounts.json") + G5_MN=$(jq -r '.[4].mnemonic' "$TMP_DIR/genesis_accounts.json") + V1_MN=$(jq -r '.[] | select(.id==1) | .mnemonic' "$TMP_DIR/validators.json") + V2_MN=$(jq -r '.[] | select(.id==2) | .mnemonic' "$TMP_DIR/validators.json") + V3_MN=$(jq -r '.[] | select(.id==3) | .mnemonic' "$TMP_DIR/validators.json") + V4_MN=$(jq -r '.[] | select(.id==4) | .mnemonic' "$TMP_DIR/validators.json") + + "$PCHAIND" --home="$HOME_DIR" init genesis-validator --chain-id "$CHAIN_ID" --default-denom "$DENOM" + + # Add keys + echo "$G1_MN" | "$PCHAIND" --home="$HOME_DIR" keys add genesis-acc-1 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + echo "$G2_MN" | "$PCHAIND" --home="$HOME_DIR" keys add genesis-acc-2 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + echo "$G3_MN" | "$PCHAIND" --home="$HOME_DIR" keys add genesis-acc-3 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + echo "$G4_MN" | "$PCHAIND" --home="$HOME_DIR" keys add genesis-acc-4 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + echo "$G5_MN" | "$PCHAIND" --home="$HOME_DIR" keys add genesis-acc-5 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + echo "$V1_MN" | "$PCHAIND" --home="$HOME_DIR" keys add validator-1 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + echo "$V2_MN" | "$PCHAIND" --home="$HOME_DIR" keys add validator-2 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + echo "$V3_MN" | "$PCHAIND" --home="$HOME_DIR" keys add validator-3 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + echo "$V4_MN" | "$PCHAIND" --home="$HOME_DIR" keys add validator-4 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + + local GA1 GA2 GA3 GA4 GA5 VA1 VA2 VA3 VA4 + GA1=$("$PCHAIND" --home="$HOME_DIR" keys show genesis-acc-1 -a --keyring-backend=$KEYRING) + GA2=$("$PCHAIND" --home="$HOME_DIR" keys show genesis-acc-2 -a --keyring-backend=$KEYRING) + GA3=$("$PCHAIND" --home="$HOME_DIR" keys show genesis-acc-3 -a --keyring-backend=$KEYRING) + GA4=$("$PCHAIND" --home="$HOME_DIR" keys show genesis-acc-4 -a --keyring-backend=$KEYRING) + GA5=$("$PCHAIND" --home="$HOME_DIR" keys show genesis-acc-5 -a --keyring-backend=$KEYRING) + VA1=$("$PCHAIND" --home="$HOME_DIR" keys show validator-1 -a --keyring-backend=$KEYRING) + VA2=$("$PCHAIND" --home="$HOME_DIR" keys show validator-2 -a --keyring-backend=$KEYRING) + VA3=$("$PCHAIND" --home="$HOME_DIR" keys show validator-3 -a --keyring-backend=$KEYRING) + VA4=$("$PCHAIND" --home="$HOME_DIR" keys show validator-4 -a --keyring-backend=$KEYRING) + + local HK1 HK2 HK3 HK4 + HK1=$(jq -r '.[0].address' "$TMP_DIR/hotkeys.json") + HK2=$(jq -r '.[1].address' "$TMP_DIR/hotkeys.json") + HK3=$(jq -r '.[2].address' "$TMP_DIR/hotkeys.json") + HK4=$(jq -r '.[3].address' "$TMP_DIR/hotkeys.json") + + # Fund genesis + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$GA1" "${TWO_BILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$GA2" "${TWO_BILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$GA3" "${TWO_BILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$GA4" "${TWO_BILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$GA5" "${TWO_BILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$VA1" "${ONE_MILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$VA2" "${ONE_MILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$VA3" "${ONE_MILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$VA4" "${ONE_MILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$HK1" "${HOTKEY_FUNDING}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$HK2" "${HOTKEY_FUNDING}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$HK3" "${HOTKEY_FUNDING}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$HK4" "${HOTKEY_FUNDING}${DENOM}" + + "$PCHAIND" --home="$HOME_DIR" genesis gentx validator-1 "${VALIDATOR_STAKE}${DENOM}" \ + --keyring-backend=$KEYRING --chain-id "$CHAIN_ID" --gas-prices "1000000000${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis collect-gentxs + "$PCHAIND" --home="$HOME_DIR" genesis validate-genesis + + # Genesis parameters + upd() { cat "$HOME_DIR/config/genesis.json" | jq "$1" > "$HOME_DIR/config/tmp_genesis.json" && mv "$HOME_DIR/config/tmp_genesis.json" "$HOME_DIR/config/genesis.json"; } + upd '.consensus["params"]["block"]["time_iota_ms"]="1000"' + upd ".app_state[\"gov\"][\"params\"][\"min_deposit\"]=[{\"denom\":\"$DENOM\",\"amount\":\"1000000\"}]" + upd '.app_state["gov"]["params"]["max_deposit_period"]="300s"' + upd '.app_state["gov"]["params"]["voting_period"]="300s"' + upd '.app_state["gov"]["params"]["expedited_voting_period"]="150s"' + upd ".app_state[\"evm\"][\"params\"][\"evm_denom\"]=\"$DENOM\"" + upd '.app_state["evm"]["params"]["active_static_precompiles"]=["0x00000000000000000000000000000000000000CB","0x00000000000000000000000000000000000000ca","0x0000000000000000000000000000000000000100","0x0000000000000000000000000000000000000400","0x0000000000000000000000000000000000000800","0x0000000000000000000000000000000000000801","0x0000000000000000000000000000000000000802","0x0000000000000000000000000000000000000803","0x0000000000000000000000000000000000000804","0x0000000000000000000000000000000000000805"]' + upd ".app_state[\"evm\"][\"params\"][\"chain_config\"][\"chain_id\"]=$EVM_CHAIN_ID" + upd ".app_state[\"evm\"][\"params\"][\"chain_config\"][\"denom\"]=\"$DENOM\"" + upd '.app_state["evm"]["params"]["chain_config"]["decimals"]="18"' + upd '.app_state["erc20"]["params"]["native_precompiles"]=["0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE"]' + upd ".app_state[\"erc20\"][\"token_pairs\"]=[{contract_owner:1,erc20_address:\"0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE\",denom:\"$DENOM\",enabled:true}]" + upd '.app_state["feemarket"]["params"]["no_base_fee"]=false' + upd '.app_state["feemarket"]["params"]["base_fee"]="1000000.000000000000000000"' + upd '.app_state["feemarket"]["params"]["min_gas_price"]="1000000.000000000000000000"' + upd ".app_state[\"staking\"][\"params\"][\"bond_denom\"]=\"$DENOM\"" + upd '.app_state["staking"]["params"]["min_commission_rate"]="0.050000000000000000"' + upd ".app_state[\"mint\"][\"params\"][\"mint_denom\"]=\"$DENOM\"" + upd ".app_state[\"crisis\"][\"constant_fee\"]={\"denom\":\"$DENOM\",\"amount\":\"1000\"}" + upd '.app_state["distribution"]["params"]["community_tax"]="0.000000000000000000"' + upd '.consensus["params"]["abci"]["vote_extensions_enable_height"]="2"' + upd '.app_state["tokenfactory"]["params"]["denom_creation_fee"]=[]' + upd '.app_state["tokenfactory"]["params"]["denom_creation_gas_consume"]=100000' + upd ".app_state[\"uregistry\"][\"params\"][\"admin\"]=\"$GA1\"" + upd ".app_state[\"utss\"][\"params\"][\"admin\"]=\"$GA1\"" + + # Config patching + local cfg="$HOME_DIR/config/config.toml" app="$HOME_DIR/config/app.toml" + sed -i '' -e "s|laddr = \"tcp://127.0.0.1:26657\"|laddr = \"tcp://0.0.0.0:${RPC_PORT}\"|g" "$cfg" + sed -i '' -e 's|cors_allowed_origins = \[\]|cors_allowed_origins = ["*"]|g' "$cfg" + sed -i '' -e "s|laddr = \"tcp://0.0.0.0:26656\"|laddr = \"tcp://0.0.0.0:${P2P_PORT}\"|g" "$cfg" + sed -i '' -e "s|timeout_commit = \"5s\"|timeout_commit = \"${BLOCK_TIME}\"|g" "$cfg" + sed -i '' -e "s|pprof_laddr = \"localhost:6060\"|pprof_laddr = \"localhost:${PPROF_PORT}\"|g" "$cfg" + sed -i '' -e 's|allow_duplicate_ip = false|allow_duplicate_ip = true|g' "$cfg" + sed -i '' -e 's|addr_book_strict = true|addr_book_strict = false|g' "$cfg" + sed -i '' -e "s|address = \"tcp://localhost:1317\"|address = \"tcp://0.0.0.0:${REST_PORT}\"|g" "$app" + sed -i '' -e 's|enable = false|enable = true|g' "$app" + sed -i '' -e 's|enabled-unsafe-cors = false|enabled-unsafe-cors = true|g' "$app" + sed -i '' -e "s|address = \"localhost:9090\"|address = \"0.0.0.0:${GRPC_PORT}\"|g" "$app" + sed -i '' -e "s|address = \"localhost:9091\"|address = \"0.0.0.0:${GRPC_WEB_PORT}\"|g" "$app" + + # Copy genesis for other validators + cp "$HOME_DIR/config/genesis.json" "$TMP_DIR/genesis.json" + ok "Genesis prepared and shared" + + # Start validator-1 + log "Starting validator-1 (genesis) on RPC=$RPC_PORT EVM=$EVM_PORT..." + "$PCHAIND" start \ + --home="$HOME_DIR" \ + --pruning=nothing \ + --minimum-gas-prices="1000000000${DENOM}" \ + --rpc.laddr="tcp://0.0.0.0:${RPC_PORT}" \ + --json-rpc.address="0.0.0.0:${EVM_PORT}" \ + --json-rpc.ws-address="0.0.0.0:${EVM_WS_PORT}" \ + --json-rpc.api=eth,txpool,personal,net,debug,web3 \ + --chain-id="$CHAIN_ID" \ + >> "$LOG_FILE" 2>&1 & + + local pid=$! + disown $pid 2>/dev/null || true + write_pid "validator-1" "$pid" + ok "validator-1 started (pid=$pid)" + log "Waiting for validator-1 to produce blocks..." + wait_block "$RPC_PORT" "validator-1" 120 + + # Register UV1 + setup all AuthZ grants + local genesis_rpc="tcp://127.0.0.1:${RPC_PORT}" + sleep 5 + + # Register UV1 (peer_id matches ip4 multi-addr locally) + local PEER_UV1 MULTI_UV1 VALOPER_V1 + PEER_UV1=$(uv_peer_id 1) + MULTI_UV1="/ip4/127.0.0.1/tcp/$(uv_tss 1)" + VALOPER_V1=$("$PCHAIND" --home="$HOME_DIR" keys show validator-1 --bech val -a --keyring-backend=$KEYRING) + + log "Registering universal-validator-1..." + local reg_result + reg_result=$("$PCHAIND" --home="$HOME_DIR" tx uvalidator add-universal-validator \ + --core-validator-address "$VALOPER_V1" \ + --network "{\"peer_id\": \"$PEER_UV1\", \"multi_addrs\": [\"$MULTI_UV1\"]}" \ + --from genesis-acc-1 --chain-id "$CHAIN_ID" --keyring-backend=$KEYRING \ + --node="$genesis_rpc" --fees 1000000000000000upc --yes --output json 2>&1 || true) + local reg_tx; reg_tx=$(echo "$reg_result" | jq -r '.txhash // ""' 2>/dev/null || true) + [[ -n "$reg_tx" ]] && wait_chain_tx "$reg_tx" "$genesis_rpc" 30 || true + ok "UV1 registered" + + # Setup AuthZ grants for all 4 validators + sleep 5 + log "Creating AuthZ grants for all 4 validators..." + set +e + local total_grants=0 + for v in 1 2 3 4; do + local hk_addr + hk_addr=$(jq -r ".[$(( v - 1 ))].address" "$TMP_DIR/hotkeys.json") + for msg in /uexecutor.v1.MsgVoteInbound /uexecutor.v1.MsgVoteChainMeta /uexecutor.v1.MsgVoteOutbound /utss.v1.MsgVoteTssKeyProcess; do + "$PCHAIND" --home="$HOME_DIR" tx authz grant "$hk_addr" generic \ + --msg-type="$msg" \ + --from "validator-$v" \ + --chain-id "$CHAIN_ID" \ + --keyring-backend=$KEYRING \ + --node="$genesis_rpc" \ + --gas=auto --gas-adjustment=1.5 --gas-prices="1000000000${DENOM}" \ + --yes --broadcast-mode sync --output json >/dev/null 2>&1 + total_grants=$(( total_grants + 1 )) + sleep 1 + done + done + set -e + ok "Created $total_grants AuthZ grants" + + # Fund test address + "$PCHAIND" --home="$HOME_DIR" tx bank send genesis-acc-1 \ + push1w7xnyp3hf79vyetj3cvw8l32u6unun8yr6zn60 \ + "1000000000000000000${DENOM}" \ + --chain-id "$CHAIN_ID" --keyring-backend=$KEYRING \ + --node="$genesis_rpc" --gas-prices="100000000000${DENOM}" --yes >/dev/null 2>&1 || true +} + +setup_regular_validator() { + local ID="$1" + local HOME_DIR="$DATA_DIR/validator-$ID" + local RPC_PORT=$(val_rpc "$ID") REST_PORT=$(val_rest "$ID") GRPC_PORT=$(val_grpc "$ID") + local GRPC_WEB_PORT=$(val_grpcweb "$ID") P2P_PORT=$(val_p2p "$ID") + local EVM_PORT=$(val_evm "$ID") EVM_WS_PORT=$(val_evmws "$ID") + local PPROF_PORT=$(val_pprof "$ID") + local LOG_FILE="$LOG_DIR/validator-$ID.log" + local GENESIS_RPC="http://127.0.0.1:$(val_rpc 1)" + + mkdir -p "$HOME_DIR" "$LOG_DIR" + rm -rf "$HOME_DIR"/* "$HOME_DIR"/.[!.]* 2>/dev/null || true + + log "Initializing validator-$ID..." + + local VALIDATOR_STAKE="100000000000000000000000" + local VAL_MN + VAL_MN=$(jq -r ".[] | select(.id==$ID) | .mnemonic" "$TMP_DIR/validators.json") + + "$PCHAIND" --home="$HOME_DIR" init "validator-$ID" --chain-id "$CHAIN_ID" --default-denom "$DENOM" + + # Wait for genesis file + local i=0 + while [[ ! -f "$TMP_DIR/genesis.json" ]] && (( i < 60 )); do sleep 2; (( i++ )); done + [[ -f "$TMP_DIR/genesis.json" ]] || { err "Genesis file not found"; return 1; } + cp "$TMP_DIR/genesis.json" "$HOME_DIR/config/genesis.json" + + echo "$VAL_MN" | "$PCHAIND" --home="$HOME_DIR" keys add "validator-$ID" \ + --keyring-backend=$KEYRING --algo=$KEYALGO --recover + + # Get genesis node_id and set peer + local genesis_node_id + genesis_node_id=$(curl -sf "$GENESIS_RPC/status" | jq -r '.result.node_info.id') + local persistent_peer="$genesis_node_id@127.0.0.1:$(val_p2p 1)" + + # Config patching + local cfg="$HOME_DIR/config/config.toml" app="$HOME_DIR/config/app.toml" + sed -i '' -e "s|laddr = \"tcp://127.0.0.1:26657\"|laddr = \"tcp://0.0.0.0:${RPC_PORT}\"|g" "$cfg" + sed -i '' -e 's|cors_allowed_origins = \[\]|cors_allowed_origins = ["*"]|g' "$cfg" + sed -i '' -e "s|laddr = \"tcp://0.0.0.0:26656\"|laddr = \"tcp://0.0.0.0:${P2P_PORT}\"|g" "$cfg" + sed -i '' -e "s|^persistent_peers *=.*|persistent_peers = \"$persistent_peer\"|" "$cfg" + sed -i '' -e 's|timeout_commit = "5s"|timeout_commit = "1s"|g' "$cfg" + sed -i '' -e "s|pprof_laddr = \"localhost:6060\"|pprof_laddr = \"localhost:${PPROF_PORT}\"|g" "$cfg" + sed -i '' -e 's|allow_duplicate_ip = false|allow_duplicate_ip = true|g' "$cfg" + sed -i '' -e 's|addr_book_strict = true|addr_book_strict = false|g' "$cfg" + sed -i '' -e "s|address = \"tcp://localhost:1317\"|address = \"tcp://0.0.0.0:${REST_PORT}\"|g" "$app" + sed -i '' -e 's|enable = false|enable = true|g' "$app" + sed -i '' -e 's|enabled-unsafe-cors = false|enabled-unsafe-cors = true|g' "$app" + sed -i '' -e "s|address = \"localhost:9090\"|address = \"0.0.0.0:${GRPC_PORT}\"|g" "$app" + sed -i '' -e "s|address = \"localhost:9091\"|address = \"0.0.0.0:${GRPC_WEB_PORT}\"|g" "$app" + + # Start + log "Starting validator-$ID on RPC=$RPC_PORT EVM=$EVM_PORT..." + "$PCHAIND" start \ + --home="$HOME_DIR" \ + --pruning=nothing \ + --minimum-gas-prices="1000000000${DENOM}" \ + --rpc.laddr="tcp://0.0.0.0:${RPC_PORT}" \ + --json-rpc.address="0.0.0.0:${EVM_PORT}" \ + --json-rpc.ws-address="0.0.0.0:${EVM_WS_PORT}" \ + --json-rpc.api=eth,txpool,personal,net,debug,web3 \ + --chain-id="$CHAIN_ID" \ + >> "$LOG_FILE" 2>&1 & + + local pid=$! + disown $pid 2>/dev/null || true + write_pid "validator-$ID" "$pid" + ok "validator-$ID started (pid=$pid)" + + # Wait for sync + wait_rpc "$RPC_PORT" "validator-$ID" 180 || { warn "validator-$ID RPC slow — continuing anyway"; } + log "Waiting for validator-$ID to sync..." + local j=0 + while (( j < 120 )); do + local catching_up + catching_up=$(curl -sf "http://127.0.0.1:$RPC_PORT/status" | jq -r '.result.sync_info.catching_up' 2>/dev/null || echo "true") + [[ "$catching_up" == "false" ]] && break + sleep 5; (( j += 5 )) + done + ok "validator-$ID synced" + + # Create validator stake + sleep 10 + local VALOPER + VALOPER=$("$PCHAIND" --home="$HOME_DIR" keys show "validator-$ID" --bech val -a --keyring-backend=$KEYRING) + local PUBKEY + PUBKEY=$("$PCHAIND" tendermint show-validator --home="$HOME_DIR") + + cat > "$HOME_DIR/validator.json" <&1) + local create_tx; create_tx=$(echo "$create_result" | jq -r '.txhash // ""' 2>/dev/null || true) + if [[ -n "$create_tx" ]]; then + wait_chain_tx "$create_tx" "tcp://127.0.0.1:$(val_rpc 1)" 30 || true + fi + set -e + + # Wait for bonded + if wait_validator_bonded "$VALOPER" 120; then + ok "validator-$ID is bonded (valoper: $VALOPER)" + else + warn "validator-$ID may not be bonded yet; continuing" + fi + + # Register UV for this validator + local PEER MULTI genesis_rpc_tcp="tcp://127.0.0.1:$(val_rpc 1)" + local G1_MN + G1_MN=$(jq -r '.[0].mnemonic' "$TMP_DIR/genesis_accounts.json") + echo "$G1_MN" | "$PCHAIND" --home="$HOME_DIR" keys add genesis-acc-1 \ + --keyring-backend=$KEYRING --algo=$KEYALGO --recover >/dev/null 2>&1 || true + + PEER=$(uv_peer_id "$ID") + MULTI="/ip4/127.0.0.1/tcp/$(uv_tss "$ID")" + sleep $(( ID * 2 )) + set +e + local reg + reg=$("$PCHAIND" --home="$HOME_DIR" tx uvalidator add-universal-validator \ + --core-validator-address "$VALOPER" \ + --network "{\"peer_id\": \"$PEER\", \"multi_addrs\": [\"$MULTI\"]}" \ + --from genesis-acc-1 --chain-id "$CHAIN_ID" --keyring-backend=$KEYRING \ + --node="$genesis_rpc_tcp" --fees 1000000000000000upc --yes --output json 2>&1 || true) + local reg_tx; reg_tx=$(echo "$reg" | jq -r '.txhash // ""' 2>/dev/null || true) + [[ -n "$reg_tx" ]] && wait_chain_tx "$reg_tx" "$genesis_rpc_tcp" 30 || true + set -e + ok "UV$ID registered" +} + +# ─── Universal validator setup ───────────────────────────────────────────────── + +setup_universal_validator() { + local ID="$1" + local HOME_DIR="$DATA_DIR/universal-$ID" + local UV_HOME="$HOME_DIR/.puniversal" + local QUERY_PORT=$(uv_query "$ID") + local TSS_PORT=$(uv_tss "$ID") + local CORE_GRPC=$(uv_grpc "$ID") + local GENESIS_RPC_HTTP="http://127.0.0.1:$(val_rpc 1)" + local LOG_FILE="$LOG_DIR/universal-$ID.log" + + mkdir -p "$HOME_DIR" "$LOG_DIR" + + # Preserve keyshares (puniversald stores them at $UV_HOME/keyshares) + local _ks_tmp="" + if [[ -d "$UV_HOME/keyshares" ]] && ls "$UV_HOME/keyshares"/* >/dev/null 2>&1; then + _ks_tmp=$(mktemp -d) + cp -r "$UV_HOME/keyshares/." "$_ks_tmp/" + ok "Preserved $(ls "$_ks_tmp" | wc -l | tr -d ' ') keyshare(s) for UV$ID" + fi + + rm -rf "$UV_HOME" 2>/dev/null || true + HOME="$HOME_DIR" "$PUNIVERSALD" init + + # Restore keyshares + if [[ -n "$_ks_tmp" ]] && [[ -d "$_ks_tmp" ]]; then + mkdir -p "$UV_HOME/keyshares" + cp -r "$_ks_tmp/." "$UV_HOME/keyshares/" + rm -rf "$_ks_tmp" + ok "Restored $(ls "$UV_HOME/keyshares" | wc -l | tr -d ' ') keyshare(s) for UV$ID" + fi + + # Wait for first block + local BLOCK_HEIGHT=0 + local i=0 + while (( i < 120 )); do + BLOCK_HEIGHT=$(curl -sf "$GENESIS_RPC_HTTP/status" 2>/dev/null \ + | jq -r '.result.sync_info.latest_block_height // "0"' 2>/dev/null || echo "0") + [[ "$BLOCK_HEIGHT" != "0" && "$BLOCK_HEIGHT" != "null" ]] && break + sleep 2; (( i += 2 )) + done + + local cfg="$UV_HOME/config/pushuv_config.json" + + # Set grpc + keyring + jq --arg grpc "$CORE_GRPC" \ + '.push_chain_grpc_urls = [$grpc] | .keyring_backend = "test"' \ + "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + + # Debug logging + jq '.log_level = 0' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + + # TSS + local TSS_PRIVATE_KEY + TSS_PRIVATE_KEY=$(uv_tss_key "$ID") + local TSS_P2P_LISTEN="/ip4/0.0.0.0/tcp/$TSS_PORT" + local TSS_HOME_DIR="$UV_HOME/tss" + + jq --arg pk "$TSS_PRIVATE_KEY" \ + --arg pw "testpassword" \ + --arg listen "$TSS_P2P_LISTEN" \ + --arg home "$TSS_HOME_DIR" \ + '.tss_enabled = true | .tss_p2p_private_key_hex = $pk | .tss_password = $pw | .tss_p2p_listen = $listen | .tss_home_dir = $home' \ + "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + + # Query port + if [[ "$QUERY_PORT" != "8080" ]]; then + jq --argjson p "$QUERY_PORT" '.query_server_port = $p' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + fi + + # RPC overrides: use localhost ports (not host.docker.internal) + local SEPOLIA_RPC="${SEPOLIA_RPC_URL_OVERRIDE:-http://localhost:9545}" + local ARBITRUM_RPC="${ARBITRUM_RPC_URL_OVERRIDE:-http://localhost:9546}" + local BASE_RPC="${BASE_RPC_URL_OVERRIDE:-http://localhost:9547}" + local BSC_RPC="${BSC_RPC_URL_OVERRIDE:-http://localhost:9548}" + local SOLANA_RPC="${SOLANA_RPC_URL_OVERRIDE:-http://localhost:8899}" + local ARBITRUM_TENDERLY="https://arbitrum-sepolia.gateway.tenderly.co" + local BSC_DEFAULT="https://bsc-testnet-rpc.publicnode.com" + + jq --arg c "$ARBITRUM_CHAIN_ID" --arg u "$ARBITRUM_TENDERLY" \ + '.chain_configs[$c].rpc_urls = [$u]' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + jq --arg c "$BSC_CHAIN_ID" --arg u "$BSC_DEFAULT" \ + '.chain_configs[$c].rpc_urls = [$u]' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + jq --arg c "$SEPOLIA_CHAIN_ID" --arg u "$SEPOLIA_RPC" \ + '.chain_configs[$c].rpc_urls = [$u]' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + jq --arg c "$ARBITRUM_CHAIN_ID" --arg u "$ARBITRUM_RPC" \ + '.chain_configs[$c].rpc_urls = [$u]' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + jq --arg c "$BASE_CHAIN_ID" --arg u "$BASE_RPC" \ + '.chain_configs[$c].rpc_urls = [$u]' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + jq --arg c "$BSC_CHAIN_ID" --arg u "$BSC_RPC" \ + '.chain_configs[$c].rpc_urls = [$u]' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + jq --arg c "$SOLANA_CHAIN_ID" --arg u "$SOLANA_RPC" \ + '.chain_configs[$c].rpc_urls = [$u]' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + + # Set localchain start height + local start_from="$BLOCK_HEIGHT" + (( start_from > 20 )) && start_from=$(( start_from - 20 )) + jq --arg c "$LOCALCHAIN_CHAIN_ID" --argjson h "$start_from" \ + '.chain_configs[$c].event_start_from = $h' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + + # Set event_start_from for external chains from env + set_chain_start_from() { + local cid="$1" val="${2:-}" + [[ -z "$val" ]] && return 0 + jq --arg c "$cid" --argjson h "$val" \ + '.chain_configs[$c].event_start_from = $h' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + } + set_chain_start_from "$SEPOLIA_CHAIN_ID" "${SEPOLIA_EVENT_START_FROM:-}" + set_chain_start_from "$BASE_CHAIN_ID" "${BASE_EVENT_START_FROM:-}" + set_chain_start_from "$ARBITRUM_CHAIN_ID" "${ARBITRUM_EVENT_START_FROM:-}" + set_chain_start_from "$BSC_CHAIN_ID" "${BSC_EVENT_START_FROM:-}" + set_chain_start_from "$SOLANA_CHAIN_ID" "${SOLANA_EVENT_START_FROM:-}" + + # Valoper address + local VALOPER + VALOPER=$(jq -r ".[$(( ID - 1 ))].valoper_address" "$TMP_DIR/validators.json") + if [[ -n "$VALOPER" && "$VALOPER" != "null" ]]; then + jq --arg v "$VALOPER" '.push_valoper_address = $v' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + fi + + # Import hotkey + local HOTKEY_MN HOTKEY_ADDR + HOTKEY_MN=$(jq -r ".[$(( ID - 1 ))].mnemonic" "$TMP_DIR/hotkeys.json") + HOTKEY_ADDR=$(jq -r ".[$(( ID - 1 ))].address" "$TMP_DIR/hotkeys.json") + mkdir -p "$UV_HOME/keyring-test" + echo "$HOTKEY_MN" | HOME="$HOME_DIR" "$PUNIVERSALD" keys add "hotkey-$ID" \ + --recover --keyring-backend=test >/dev/null 2>&1 || true + ok "UV$ID hotkey imported: $HOTKEY_ADDR" + + # Wait for AuthZ grants + log "UV$ID: waiting for AuthZ grants for $HOTKEY_ADDR..." + local required_msgs='["/uexecutor.v1.MsgVoteInbound","/uexecutor.v1.MsgVoteChainMeta","/uexecutor.v1.MsgVoteOutbound","/utss.v1.MsgVoteTssKeyProcess"]' + local wait_max=120 waited=0 matched=0 + while (( waited < wait_max )); do + matched=$(curl -sf "http://127.0.0.1:$(val_rest 1)/cosmos/authz/v1beta1/grants/grantee/$HOTKEY_ADDR" 2>/dev/null \ + | jq -r --argjson req "$required_msgs" \ + '[.grants[]? | (.authorization.msg // .authorization.value.msg // "") as $m | select($req | index($m))] | length' \ + 2>/dev/null || echo "0") + (( matched >= 4 )) && break + sleep 2; (( waited += 2 )) + done + if (( matched >= 4 )); then + ok "UV$ID: found $matched/4 AuthZ grants" + else + warn "UV$ID: only $matched/4 grants found after ${wait_max}s — continuing" + fi + + # Wait for on-chain registration + local EXPECTED_PEER + EXPECTED_PEER=$(uv_peer_id "$ID") + log "UV$ID: waiting for on-chain registration (peer_id=$EXPECTED_PEER)..." + local reg_wait=0 found="" + while (( reg_wait < 120 )); do + found=$(curl -sf "http://127.0.0.1:$(val_rest 1)/uvalidator/v1/universal_validators" 2>/dev/null \ + | jq -r --arg pid "$EXPECTED_PEER" \ + '.universal_validator[]? | select(.network_info.peer_id == $pid) | .network_info.peer_id' \ + 2>/dev/null || echo "") + [[ -n "$found" ]] && break + sleep 2; (( reg_wait += 2 )) + done + if [[ -z "$found" ]]; then + err "UV$ID not found on-chain after 120s"; return 1 + fi + ok "UV$ID confirmed on-chain" + + # Start puniversald (set stack to max hard limit on macOS; ignore if already at limit) + log "Starting universal-validator-$ID (query=$QUERY_PORT tss=$TSS_PORT)..." + ( + ulimit -s 65520 2>/dev/null || true + export HOME="$HOME_DIR" + exec "$PUNIVERSALD" start + ) >> "$LOG_FILE" 2>&1 & + + local pid=$! + disown $pid 2>/dev/null || true + write_pid "universal-$ID" "$pid" + ok "universal-validator-$ID started (pid=$pid)" +} + +# ─── TSS keygen ──────────────────────────────────────────────────────────────── + +get_current_tss_key_id() { + local genesis_rpc="tcp://127.0.0.1:$(val_rpc 1)" + "$PCHAIND" query utss current-key --node="$genesis_rpc" 2>/dev/null \ + | grep -E "^\s*key_id:" | awk '{print $2}' 2>/dev/null || echo "" +} + +get_utss_admin() { + local genesis_rpc="tcp://127.0.0.1:$(val_rpc 1)" + "$PCHAIND" query utss params --node="$genesis_rpc" --output json 2>/dev/null \ + | jq -r '.params.admin // ""' 2>/dev/null || echo "" +} + +cmd_tss_keygen() { + header "TSS Key Generation" + + # Check existing key + local existing + existing=$(get_current_tss_key_id) + if [[ -n "$existing" ]]; then + ok "TSS key already present: $existing" + return 0 + fi + + # Validate UVs registered + local genesis_rpc="tcp://127.0.0.1:$(val_rpc 1)" + local uv_count + uv_count=$("$PCHAIND" query uvalidator all-universal-validators \ + --node="$genesis_rpc" --output json 2>/dev/null \ + | jq -r '.universal_validator | length // 0' 2>/dev/null || echo "0") + if (( uv_count < 2 )); then + err "Need at least 2 registered universal validators (found: $uv_count)"; return 1 + fi + + # Find signer key + local admin_addr + admin_addr=$(get_utss_admin) + local val1_home="$DATA_DIR/validator-1" + local signer="" + while IFS= read -r key_name; do + local addr + addr=$("$PCHAIND" --home="$val1_home" keys show "$key_name" -a --keyring-backend=$KEYRING 2>/dev/null || true) + if [[ "$addr" == "$admin_addr" ]]; then signer="$key_name"; break; fi + done < <("$PCHAIND" --home="$val1_home" keys list --keyring-backend=$KEYRING --output json 2>/dev/null \ + | jq -r '.[] | .name' 2>/dev/null || true) + + if [[ -z "$signer" ]]; then + err "No local key matches UTSS admin: $admin_addr"; return 1 + fi + + local attempt max_attempts=5 + for (( attempt=1; attempt<=max_attempts; attempt++ )); do + log "Initiating TSS keygen (attempt $attempt/$max_attempts, signer=$signer)..." + local result tx_hash + result=$("$PCHAIND" --home="$val1_home" tx utss initiate-tss-key-process \ + --process-type tss-process-keygen \ + --from "$signer" \ + --chain-id "$CHAIN_ID" \ + --keyring-backend=$KEYRING \ + --node="$genesis_rpc" \ + --fees 1000000000000000upc \ + --yes --output json 2>&1 || true) + + local code; code=$(echo "$result" | jq -r '.code // "0"' 2>/dev/null || echo "0") + tx_hash=$(echo "$result" | jq -r '.txhash // ""' 2>/dev/null || true) + + if [[ "$code" != "0" ]]; then + warn "Keygen tx code=$code; retrying..." + sleep 5; continue + fi + + if [[ -n "$tx_hash" ]]; then + wait_chain_tx "$tx_hash" "$genesis_rpc" 30 || true + fi + + # Wait for key + log "Waiting for TSS key to materialize on-chain..." + local waited=0 + while (( waited < 300 )); do + local kid; kid=$(get_current_tss_key_id) + if [[ -n "$kid" ]]; then ok "TSS key ready: $kid"; return 0; fi + sleep 3; (( waited += 3 )) + done + warn "Key not ready after 300s on attempt $attempt" + done + + err "TSS keygen failed after $max_attempts attempts" + return 1 +} + +# ─── Setup uvalidators (re-registration + authz) ─────────────────────────────── + +cmd_setup_uvalidators() { + header "Setting up Universal Validators" + + local genesis_rpc_tcp="tcp://127.0.0.1:$(val_rpc 1)" + local genesis_rpc_http="http://127.0.0.1:$(val_rpc 1)" + local failures=0 + + for i in 1 2 3 4; do + log "Processing UV$i..." + local val_home="$DATA_DIR/validator-$i" + + local VALOPER PEER MULTI + VALOPER=$("$PCHAIND" --home="$val_home" keys show "validator-$i" --bech val -a --keyring-backend=$KEYRING 2>/dev/null || true) + PEER=$(uv_peer_id "$i") + MULTI="/ip4/127.0.0.1/tcp/$(uv_tss "$i")" + + if [[ -z "$VALOPER" ]]; then + warn "Could not get valoper for validator-$i; skipping" + (( failures++ )); continue + fi + + # Wait bonded + if ! wait_validator_bonded "$VALOPER" 90; then + warn "validator-$i not bonded; skipping UV$i"; (( failures++ )); continue + fi + + # Register / update UV + set +e + local reg_result reg_tx + reg_result=$("$PCHAIND" --home="$val_home" tx uvalidator add-universal-validator \ + --core-validator-address "$VALOPER" \ + --network "{\"peer_id\": \"$PEER\", \"multi_addrs\": [\"$MULTI\"]}" \ + --from genesis-acc-1 --chain-id "$CHAIN_ID" --keyring-backend=$KEYRING \ + --node="$genesis_rpc_tcp" --fees 1000000000000000upc --yes --output json 2>&1 || true) + reg_tx=$(echo "$reg_result" | jq -r '.txhash // ""' 2>/dev/null || true) + [[ -n "$reg_tx" ]] && wait_chain_tx "$reg_tx" "$genesis_rpc_tcp" 30 || true + set -e + ok "UV$i registered" + + # AuthZ grants + local HOTKEY_ADDR + HOTKEY_ADDR=$(jq -r ".[$(( i - 1 ))].address" "$TMP_DIR/hotkeys.json") + + set +e + for msg in /uexecutor.v1.MsgVoteInbound /uexecutor.v1.MsgVoteChainMeta /uexecutor.v1.MsgVoteOutbound /utss.v1.MsgVoteTssKeyProcess; do + "$PCHAIND" --home="$val_home" tx authz grant "$HOTKEY_ADDR" generic \ + --msg-type="$msg" \ + --from "validator-$i" \ + --chain-id "$CHAIN_ID" \ + --keyring-backend=$KEYRING \ + --node="$genesis_rpc_tcp" \ + --fees 200000000000000upc \ + --yes >/dev/null 2>&1 || true + sleep 1 + done + set -e + ok "UV$i AuthZ grants created for $HOTKEY_ADDR" + done + + if (( failures > 0 )); then + warn "Setup completed with $failures failure(s)" + else + ok "All UV setup complete" + fi +} + +# ─── Start command ───────────────────────────────────────────────────────────── + +cmd_start() { + local clean=false + for arg in "$@"; do [[ "$arg" == "--build" || "$arg" == "--clean" ]] && clean=true; done + + require_cmd "$PCHAIND" "$PUNIVERSALD" jq curl nc + + header "Starting Push Chain Local Network" + mkdir -p "$DATA_DIR" "$LOG_DIR" "$PID_DIR" + + if [[ "$clean" == "true" ]]; then + log "Clean start: removing existing validator data..." + rm -rf "$DATA_DIR"/validator-* "$DATA_DIR"/universal-* + log "Clearing account files for fresh generation..." + rm -f "$TMP_DIR/genesis_accounts.json" "$TMP_DIR/validators.json" "$TMP_DIR/hotkeys.json" "$TMP_DIR/genesis.json" + fi + + # Kill any stale processes + for i in 1 2 3 4; do + local pid + pid=$(read_pid "validator-$i") + [[ -n "$pid" ]] && is_alive "$pid" && kill "$pid" 2>/dev/null || true + pid=$(read_pid "universal-$i") + [[ -n "$pid" ]] && is_alive "$pid" && kill "$pid" 2>/dev/null || true + done + sleep 2 + + generate_accounts + + # Genesis validator (foreground setup, background run) + if [[ "$clean" == "true" ]] || [[ ! -d "$DATA_DIR/validator-1" ]]; then + setup_genesis_validator + else + log "Reusing existing validator-1 data..." + local RPC_PORT=$(val_rpc 1) EVM_PORT=$(val_evm 1) EVM_WS_PORT=$(val_evmws 1) + local LOG_FILE="$LOG_DIR/validator-1.log" + "$PCHAIND" start \ + --home="$DATA_DIR/validator-1" \ + --pruning=nothing \ + --minimum-gas-prices="1000000000${DENOM}" \ + --rpc.laddr="tcp://0.0.0.0:${RPC_PORT}" \ + --json-rpc.address="0.0.0.0:${EVM_PORT}" \ + --json-rpc.ws-address="0.0.0.0:${EVM_WS_PORT}" \ + --json-rpc.api=eth,txpool,personal,net,debug,web3 \ + --chain-id="$CHAIN_ID" \ + >> "$LOG_FILE" 2>&1 & + local _v1pid=$! + disown $_v1pid 2>/dev/null || true + write_pid "validator-1" "$_v1pid" + wait_block "$RPC_PORT" "validator-1" 120 + fi + + # Regular validators in parallel + log "Starting validators 2, 3, 4..." + for i in 2 3 4; do + if [[ "$clean" == "true" ]] || [[ ! -d "$DATA_DIR/validator-$i" ]]; then + setup_regular_validator "$i" & + else + log "Reusing existing validator-$i data..." + local RPC=$(val_rpc "$i") EVM=$(val_evm "$i") WS=$(val_evmws "$i") + "$PCHAIND" start \ + --home="$DATA_DIR/validator-$i" \ + --pruning=nothing \ + --minimum-gas-prices="1000000000${DENOM}" \ + --rpc.laddr="tcp://0.0.0.0:${RPC}" \ + --json-rpc.address="0.0.0.0:${EVM}" \ + --json-rpc.ws-address="0.0.0.0:${WS}" \ + --json-rpc.api=eth,txpool,personal,net,debug,web3 \ + --chain-id="$CHAIN_ID" \ + >> "$LOG_DIR/validator-$i.log" 2>&1 & + local _vpid=$! + disown $_vpid 2>/dev/null || true + write_pid "validator-$i" "$_vpid" + fi + done + wait || true + ok "All core validators running" + + # Universal validators in parallel + log "Starting universal validators..." + for i in 1 2 3 4; do + setup_universal_validator "$i" & + done + wait || true + ok "All universal validators started" + + # Health checks + log "Waiting for universal validators to become healthy..." + local all_healthy=true + for i in 1 2 3 4; do + if ! wait_uv_health "$(uv_query "$i")" "universal-validator-$i" 180; then + all_healthy=false + fi + done + + if [[ "$all_healthy" == "true" ]]; then + ok "All validators healthy!" + else + warn "Some validators may not be healthy — check logs in $LOG_DIR" + fi + + cmd_status +} + +# ─── Stop / Down ─────────────────────────────────────────────────────────────── + +cmd_stop() { + log "Stopping all local validators..." + for i in 1 2 3 4; do + local pid + pid=$(read_pid "validator-$i") + if [[ -n "$pid" ]] && is_alive "$pid"; then + kill "$pid" 2>/dev/null || true + ok "Stopped validator-$i (pid=$pid)" + fi + pid=$(read_pid "universal-$i") + if [[ -n "$pid" ]] && is_alive "$pid"; then + kill "$pid" 2>/dev/null || true + ok "Stopped universal-validator-$i (pid=$pid)" + fi + done + # Catch any stragglers by binary name + pkill -f "pchaind start" 2>/dev/null || true + pkill -f "puniversald start" 2>/dev/null || true + sleep 2 + ok "All validators stopped" +} + +cmd_down() { + cmd_stop + log "Removing validator data..." + rm -rf "$DATA_DIR"/validator-* "$DATA_DIR"/universal-* + rm -f "$TMP_DIR/genesis.json" + ok "Data removed (accounts in /tmp/push-accounts preserved)" +} + +# ─── Status ──────────────────────────────────────────────────────────────────── + +cmd_status() { + header "Local Devnet Status" + for i in 1 2 3 4; do + local pid rpc_port + pid=$(read_pid "validator-$i") + rpc_port=$(val_rpc "$i") + if [[ -n "$pid" ]] && is_alive "$pid"; then + local height + height=$(curl -sf "http://127.0.0.1:$rpc_port/status" 2>/dev/null \ + | jq -r '.result.sync_info.latest_block_height // "?"' 2>/dev/null || echo "?") + ok "validator-$i (pid=$pid) — rpc=:$rpc_port evm=:$(val_evm "$i") height=$height" + else + err "validator-$i — NOT RUNNING" + fi + done + for i in 1 2 3 4; do + local pid qport + pid=$(read_pid "universal-$i") + qport=$(uv_query "$i") + if [[ -n "$pid" ]] && is_alive "$pid"; then + local health + health=$(curl -sf "http://127.0.0.1:$qport/health" 2>/dev/null && echo "healthy" || echo "starting") + ok "universal-validator-$i (pid=$pid) — query=:$qport tss=:$(uv_tss "$i") [$health]" + else + err "universal-validator-$i — NOT RUNNING" + fi + done +} + +# ─── Logs ────────────────────────────────────────────────────────────────────── + +cmd_logs() { + local name="${1:-all}" + mkdir -p "$LOG_DIR" + if [[ "$name" == "all" ]]; then + tail -f "$LOG_DIR"/*.log 2>/dev/null || echo "No logs found in $LOG_DIR" + else + local f="$LOG_DIR/${name}.log" + [[ -f "$f" ]] && tail -f "$f" || { err "Log not found: $f"; exit 1; } + fi +} + +# ─── Main ────────────────────────────────────────────────────────────────────── + +case "${1:-help}" in + start) shift; cmd_start "$@" ;; + stop) cmd_stop ;; + down) cmd_down ;; + status) cmd_status ;; + logs) shift; cmd_logs "${1:-all}" ;; + tss-keygen) cmd_tss_keygen ;; + setup-uvalidators) cmd_setup_uvalidators ;; + *) + echo "Usage: $(basename "$0") " + echo "" + echo "Commands:" + echo " start [--build] Start all validators (--build for clean start)" + echo " stop Stop all validators (keep data)" + echo " down Stop and remove validator data" + echo " status Show status of all validators" + echo " logs [name] Tail logs (e.g. validator-1, universal-2, all)" + echo " tss-keygen Initiate TSS key generation" + echo " setup-uvalidators Register universal validators and setup AuthZ" + ;; +esac From a4882e8307de3d782c6ca23e6cd1b11824ee8118 Mon Sep 17 00:00:00 2001 From: Aman Gupta Date: Fri, 3 Apr 2026 13:09:28 +0530 Subject: [PATCH 31/61] fix: add: vault method (#202) --- universalClient/chains/evm/event_listener.go | 2 +- universalClient/chains/evm/event_parser.go | 3 ++- universalClient/chains/svm/event_listener.go | 3 ++- universalClient/chains/svm/event_parser.go | 3 ++- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/universalClient/chains/evm/event_listener.go b/universalClient/chains/evm/event_listener.go index 72e04a3e..d3e159df 100644 --- a/universalClient/chains/evm/event_listener.go +++ b/universalClient/chains/evm/event_listener.go @@ -85,7 +85,7 @@ func NewEventListener( continue } switch method.Name { - case EventTypeFinalizeUniversalTx: + case EventTypeFinalizeUniversalTx, EventTypeFundsRescued: topic := ethcommon.HexToHash(method.EventIdentifier) eventTopics = append(eventTopics, topic) topicToEventType[topic] = method.Name diff --git a/universalClient/chains/evm/event_parser.go b/universalClient/chains/evm/event_parser.go index 08498542..3cde0c7e 100644 --- a/universalClient/chains/evm/event_parser.go +++ b/universalClient/chains/evm/event_parser.go @@ -24,6 +24,7 @@ const ( // Vault event type constants matching vault method names in chain config. const ( EventTypeFinalizeUniversalTx = "finalizeUniversalTx" + EventTypeFundsRescued = "fundsRescued" ) // ParseEvent parses a log into a store.Event based on the event type. @@ -36,7 +37,7 @@ func ParseEvent(log *types.Log, eventType string, chainID string, logger zerolog switch eventType { case EventTypeSendFunds: return parseSendFundsEvent(log, chainID, logger) - case EventTypeExecuteUniversalTx, EventTypeRevertUniversalTx, EventTypeFinalizeUniversalTx: + case EventTypeExecuteUniversalTx, EventTypeRevertUniversalTx, EventTypeFinalizeUniversalTx, EventTypeFundsRescued: // All share the same topic layout: Topics[1]=txID, Topics[2]=universalTxID. return parseOutboundObservationEvent(log, chainID, logger) default: diff --git a/universalClient/chains/svm/event_listener.go b/universalClient/chains/svm/event_listener.go index 79bf2ed1..550dafdc 100644 --- a/universalClient/chains/svm/event_listener.go +++ b/universalClient/chains/svm/event_listener.go @@ -66,7 +66,8 @@ func NewEventListener( switch method.Name { case EventTypeSendFunds, EventTypeFinalizeUniversalTx, - EventTypeRevertUniversalTx: + EventTypeRevertUniversalTx, + EventTypeFundsRescued: discriminator := strings.ToLower(method.EventIdentifier) discriminatorToEventType[discriminator] = method.Name } diff --git a/universalClient/chains/svm/event_parser.go b/universalClient/chains/svm/event_parser.go index 9df8d85c..00f5d332 100644 --- a/universalClient/chains/svm/event_parser.go +++ b/universalClient/chains/svm/event_parser.go @@ -22,6 +22,7 @@ const ( // Outbound observation events (emitted by gateway on SVM since there's no vault) EventTypeFinalizeUniversalTx = "finalize_universal_tx" EventTypeRevertUniversalTx = "revert_universal_tx" + EventTypeFundsRescued = "funds_rescued" ) // base58ToHex converts a base58 encoded string to hex format (0x...) @@ -46,7 +47,7 @@ func ParseEvent(log string, signature string, slot uint64, logIndex uint, eventT switch eventType { case EventTypeSendFunds: return parseSendFundsEvent(log, signature, slot, logIndex, chainID, logger) - case EventTypeFinalizeUniversalTx, EventTypeRevertUniversalTx: + case EventTypeFinalizeUniversalTx, EventTypeRevertUniversalTx, EventTypeFundsRescued: return parseOutboundObservationEvent(log, signature, slot, logIndex, chainID, logger) default: logger.Debug(). From 30343ad42ce09276c04c8fe50c4d738bf72192dd Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Sun, 5 Apr 2026 18:42:37 +0530 Subject: [PATCH 32/61] support for local-native and updated event identifiers --- config/testnet-donut/arb_sepolia/chain.json | 8 +- .../testnet-donut/arb_sepolia/tokens/eth.json | 4 +- config/testnet-donut/base_sepolia/chain.json | 6 +- .../base_sepolia/tokens/eth.json | 4 +- config/testnet-donut/bsc_testnet/chain.json | 4 +- .../testnet-donut/bsc_testnet/tokens/bnb.json | 4 +- .../bsc_testnet/tokens/usdt.json | 4 +- config/testnet-donut/eth_sepolia/chain.json | 10 +- .../testnet-donut/eth_sepolia/tokens/eth.json | 2 +- .../eth_sepolia/tokens/usdt.json | 2 +- config/testnet-donut/solana_devnet/chain.json | 3 +- .../solana_devnet/tokens/sol.json | 2 +- e2e-tests/deploy_addresses.json | 2 +- e2e-tests/setup.sh | 337 +++++++++++++++--- local-native/devnet | 127 ++++++- local-native/scripts/setup-universal.sh | 34 ++ local-native/scripts/setup-uvalidators.sh | 128 ++++--- local-native/scripts/setup-validator-auto.sh | 3 + 18 files changed, 539 insertions(+), 145 deletions(-) diff --git a/config/testnet-donut/arb_sepolia/chain.json b/config/testnet-donut/arb_sepolia/chain.json index a2a8da50..f893e6f9 100644 --- a/config/testnet-donut/arb_sepolia/chain.json +++ b/config/testnet-donut/arb_sepolia/chain.json @@ -1,5 +1,5 @@ { - "chain": "eip155:421614", + "chain": "eip155:421614", "public_rpc_url": "https://arbitrum-sepolia.gateway.tenderly.co", "vm_type": 1, "gateway_address": "0x2cd870e0166Ba458dEC615168Fd659AacD795f34", @@ -11,8 +11,8 @@ "gateway_methods": [ { "name": "sendFunds", - "identifier": "0x65f4dbe1", - "event_identifier": "0x33e6cf63a9ddbaee9d86893573e2616fe7a78fc9b7b23acb7da8b58bd0024041", + "identifier": "0xd372b8b3", + "event_identifier": "0xd9074957cd6846aa1b09b2e676dac3b9cdeecabd643cabd3d0a0f41e2acd1c50", "confirmation_type": 1 }, { @@ -46,4 +46,4 @@ "isInboundEnabled": true, "isOutboundEnabled": true } -} \ No newline at end of file +} diff --git a/config/testnet-donut/arb_sepolia/tokens/eth.json b/config/testnet-donut/arb_sepolia/tokens/eth.json index 7c65caa8..73135085 100644 --- a/config/testnet-donut/arb_sepolia/tokens/eth.json +++ b/config/testnet-donut/arb_sepolia/tokens/eth.json @@ -9,6 +9,6 @@ "token_type": 1, "native_representation": { "denom": "", - "contract_address": "0xc0a821a1AfEd1322c5e15f1F4586C0B8cE65400e" + "contract_address": "0xE74A512688E53d6Ed2cf64a327fABE8ECE27aDD6" } -} \ No newline at end of file +} diff --git a/config/testnet-donut/base_sepolia/chain.json b/config/testnet-donut/base_sepolia/chain.json index ddd9c418..169a463d 100644 --- a/config/testnet-donut/base_sepolia/chain.json +++ b/config/testnet-donut/base_sepolia/chain.json @@ -11,8 +11,8 @@ "gateway_methods": [ { "name": "sendFunds", - "identifier": "0x65f4dbe1", - "event_identifier": "0x33e6cf63a9ddbaee9d86893573e2616fe7a78fc9b7b23acb7da8b58bd0024041", + "identifier": "0xd372b8b3", + "event_identifier": "0xd9074957cd6846aa1b09b2e676dac3b9cdeecabd643cabd3d0a0f41e2acd1c50", "confirmation_type": 1 }, { @@ -46,4 +46,4 @@ "isInboundEnabled": true, "isOutboundEnabled": true } -} \ No newline at end of file +} diff --git a/config/testnet-donut/base_sepolia/tokens/eth.json b/config/testnet-donut/base_sepolia/tokens/eth.json index ff46c796..6b4c06bf 100644 --- a/config/testnet-donut/base_sepolia/tokens/eth.json +++ b/config/testnet-donut/base_sepolia/tokens/eth.json @@ -9,6 +9,6 @@ "token_type": 1, "native_representation": { "denom": "", - "contract_address": "0xc7007af2B24D4eb963fc9633B0c66e1d2D90Fc21" + "contract_address": "0xCcd71bc096E2225048cD167447e164E8571BcCA6" } -} \ No newline at end of file +} diff --git a/config/testnet-donut/bsc_testnet/chain.json b/config/testnet-donut/bsc_testnet/chain.json index 9d6553cf..3e854cb2 100644 --- a/config/testnet-donut/bsc_testnet/chain.json +++ b/config/testnet-donut/bsc_testnet/chain.json @@ -11,7 +11,7 @@ "gateway_methods": [ { "name": "sendFunds", - "identifier": "0x65f4dbe1", + "identifier": "0xd372b8b3", "event_identifier": "0xd9074957cd6846aa1b09b2e676dac3b9cdeecabd643cabd3d0a0f41e2acd1c50", "confirmation_type": 1 }, @@ -46,4 +46,4 @@ "isInboundEnabled": true, "isOutboundEnabled": true } -} \ No newline at end of file +} diff --git a/config/testnet-donut/bsc_testnet/tokens/bnb.json b/config/testnet-donut/bsc_testnet/tokens/bnb.json index 83858a73..87109e45 100644 --- a/config/testnet-donut/bsc_testnet/tokens/bnb.json +++ b/config/testnet-donut/bsc_testnet/tokens/bnb.json @@ -9,6 +9,6 @@ "token_type": 1, "native_representation": { "denom": "", - "contract_address": "0x7a9082dA308f3fa005beA7dB0d203b3b86664E36" + "contract_address": "0x2ddB499C3a35a60c809d878eFf5Fa248bb5eAdbd" } -} \ No newline at end of file +} diff --git a/config/testnet-donut/bsc_testnet/tokens/usdt.json b/config/testnet-donut/bsc_testnet/tokens/usdt.json index e5bb32a9..7ee1cf79 100644 --- a/config/testnet-donut/bsc_testnet/tokens/usdt.json +++ b/config/testnet-donut/bsc_testnet/tokens/usdt.json @@ -9,6 +9,6 @@ "token_type": 1, "native_representation": { "denom": "", - "contract_address": "0x2f98B4235FD2BA0173a2B056D722879360B12E7b" + "contract_address": "0xC329d4EbF8814eEFfA2Fd9612655e490b112523F" } -} \ No newline at end of file +} diff --git a/config/testnet-donut/eth_sepolia/chain.json b/config/testnet-donut/eth_sepolia/chain.json index 8640b769..e931caec 100644 --- a/config/testnet-donut/eth_sepolia/chain.json +++ b/config/testnet-donut/eth_sepolia/chain.json @@ -1,6 +1,6 @@ { "chain": "eip155:11155111", - "public_rpc_url": "https://eth-sepolia.public.blastapi.io", + "public_rpc_url": "https://ethereum-sepolia-rpc.publicnode.com", "vm_type": 1, "gateway_address": "0x05bD7a3D18324c1F7e216f7fBF2b15985aE5281A", "gas_oracle_fetch_interval": "30s", @@ -11,8 +11,8 @@ "gateway_methods": [ { "name": "sendFunds", - "identifier": "0x65f4dbe1", - "event_identifier": "0x33e6cf63a9ddbaee9d86893573e2616fe7a78fc9b7b23acb7da8b58bd0024041", + "identifier": "0xd372b8b3", + "event_identifier": "0xd9074957cd6846aa1b09b2e676dac3b9cdeecabd643cabd3d0a0f41e2acd1c50", "confirmation_type": 1 }, { @@ -44,6 +44,6 @@ ], "enabled": { "isInboundEnabled": true, - "isOutboundEnabled": true + "isOutboundEnabled": false } -} \ No newline at end of file +} diff --git a/config/testnet-donut/eth_sepolia/tokens/eth.json b/config/testnet-donut/eth_sepolia/tokens/eth.json index 602011ca..644c98ac 100644 --- a/config/testnet-donut/eth_sepolia/tokens/eth.json +++ b/config/testnet-donut/eth_sepolia/tokens/eth.json @@ -9,6 +9,6 @@ "token_type": 1, "native_representation": { "denom": "", - "contract_address": "0x90F4A15601E08570D6fFbaE883C44BDB85bDb7d1" + "contract_address": "0x373D3F1B2b26729A308C5641970247bc9d4ddDa4" } } diff --git a/config/testnet-donut/eth_sepolia/tokens/usdt.json b/config/testnet-donut/eth_sepolia/tokens/usdt.json index 4cb88be7..6afbf386 100644 --- a/config/testnet-donut/eth_sepolia/tokens/usdt.json +++ b/config/testnet-donut/eth_sepolia/tokens/usdt.json @@ -9,6 +9,6 @@ "token_type": 1, "native_representation": { "denom": "", - "contract_address": "0x00cb38A885cf8D0B2dDfd19Bd1c04aAAC44C5a86" + "contract_address": "0x6a20557430be6412AF423681e35CC96797506F3a" } } diff --git a/config/testnet-donut/solana_devnet/chain.json b/config/testnet-donut/solana_devnet/chain.json index f5aed4e1..428fd5ae 100644 --- a/config/testnet-donut/solana_devnet/chain.json +++ b/config/testnet-donut/solana_devnet/chain.json @@ -15,7 +15,8 @@ "event_identifier": "7f1f6cffbb134644", "confirmation_type": 2 }, - {"name": "send_funds", + { + "name": "send_funds", "identifier": "54f7d3283f6a0f3b", "event_identifier": "6c9ad829b5ea1d7c", "confirmation_type": 1 diff --git a/config/testnet-donut/solana_devnet/tokens/sol.json b/config/testnet-donut/solana_devnet/tokens/sol.json index 05f65f7f..c72da754 100644 --- a/config/testnet-donut/solana_devnet/tokens/sol.json +++ b/config/testnet-donut/solana_devnet/tokens/sol.json @@ -9,6 +9,6 @@ "token_type": 4, "native_representation": { "denom": "", - "contract_address": "0x5D525Df2bD99a6e7ec58b76aF2fd95F39874EBed" + "contract_address": "0x31F3Dcb417970EBe9AC1e254Ee42b91e49e30EE2" } } diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index 734ec7a8..1f259742 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -1,5 +1,5 @@ { - "generatedAt": "2026-03-31T16:38:44Z", + "generatedAt": "2026-04-05T10:14:33Z", "contracts": { "WPC": "0xB5B1e1ADc1b8fc1066975aa09f9371a5f67C54F5", "Factory": "0x140b9f84fCbccB4129AC6F32b1243ea808d18261", diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 75f2ef97..0f78c075 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -24,8 +24,7 @@ fi : "${FUND_AMOUNT:=1000000000000000000upc}" : "${POOL_CREATION_TOPUP_AMOUNT:=50000000000000000000upc}" : "${GAS_PRICES:=100000000000upc}" -: "${LOCAL_DEVNET_DIR:=./local-setup-e2e}" -: "${LEGACY_LOCAL_NATIVE_DIR:=./local-native}" +: "${LOCAL_DEVNET_DIR:=./local-native}" : "${CORE_CONTRACTS_REPO:=https://github.com/pushchain/push-chain-core-contracts.git}" : "${CORE_CONTRACTS_BRANCH:=e2e-push-node}" @@ -65,7 +64,6 @@ abs_from_root() { GENESIS_KEY_HOME="$(abs_from_root "$GENESIS_KEY_HOME")" GENESIS_ACCOUNTS_JSON="$(abs_from_root "$GENESIS_ACCOUNTS_JSON")" LOCAL_DEVNET_DIR="$(abs_from_root "$LOCAL_DEVNET_DIR")" -LEGACY_LOCAL_NATIVE_DIR="$(abs_from_root "$LEGACY_LOCAL_NATIVE_DIR")" E2E_PARENT_DIR="$(abs_from_root "$E2E_PARENT_DIR")" CORE_CONTRACTS_DIR="$(abs_from_root "$CORE_CONTRACTS_DIR")" SWAP_AMM_DIR="$(abs_from_root "$SWAP_AMM_DIR")" @@ -819,13 +817,129 @@ step_run_sdk_tests_all() { } step_devnet() { - require_cmd bash + require_cmd bash jq + + local sepolia_rpc_override arbitrum_rpc_override base_rpc_override bsc_rpc_override solana_rpc_override + + chain_public_rpc_from_config() { + local file_path="$1" + local fallback_rpc="$2" + local label="$3" + local rpc_url + + if [[ ! -f "$file_path" ]]; then + log_warn "Chain config file not found for $label while preparing devnet RPC overrides: $file_path; using fallback $fallback_rpc" + printf "%s" "$fallback_rpc" + return + fi + + rpc_url="$(jq -r '.public_rpc_url // empty' "$file_path" 2>/dev/null || true)" + if [[ -z "$rpc_url" || "$rpc_url" == "null" ]]; then + log_warn "public_rpc_url missing in $file_path while preparing devnet RPC overrides; using fallback $fallback_rpc" + printf "%s" "$fallback_rpc" + return + fi + + printf "%s" "$rpc_url" + } + + if is_local_testing_env; then + local local_sepolia_rpc local_arbitrum_rpc local_base_rpc local_bsc_rpc local_solana_rpc + local_sepolia_rpc="${LOCAL_SEPOLIA_UV_RPC_URL:-${ANVIL_SEPOLIA_HOST_RPC_URL:-http://localhost:9545}}" + local_arbitrum_rpc="${LOCAL_ARBITRUM_UV_RPC_URL:-${ANVIL_ARBITRUM_HOST_RPC_URL:-http://localhost:9546}}" + local_base_rpc="${LOCAL_BASE_UV_RPC_URL:-${ANVIL_BASE_HOST_RPC_URL:-http://localhost:9547}}" + local_bsc_rpc="${LOCAL_BSC_UV_RPC_URL:-${ANVIL_BSC_HOST_RPC_URL:-http://localhost:9548}}" + local_solana_rpc="${LOCAL_SOLANA_UV_RPC_URL:-${SURFPOOL_SOLANA_HOST_RPC_URL:-http://localhost:8899}}" + + sepolia_rpc_override="$local_sepolia_rpc" + arbitrum_rpc_override="$local_arbitrum_rpc" + base_rpc_override="$local_base_rpc" + bsc_rpc_override="$local_bsc_rpc" + solana_rpc_override="$local_solana_rpc" + else + sepolia_rpc_override="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/eth_sepolia/chain.json" "https://eth-sepolia.public.blastapi.io" "eth_sepolia")" + arbitrum_rpc_override="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/arb_sepolia/chain.json" "https://arbitrum-sepolia.gateway.tenderly.co" "arb_sepolia")" + base_rpc_override="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/base_sepolia/chain.json" "https://sepolia.base.org" "base_sepolia")" + bsc_rpc_override="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/bsc_testnet/chain.json" "https://bsc-testnet-rpc.publicnode.com" "bsc_testnet")" + solana_rpc_override="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/solana_devnet/chain.json" "https://api.devnet.solana.com" "solana_devnet")" + fi + + log_info "Devnet RPC overrides: sepolia=$sepolia_rpc_override arbitrum=$arbitrum_rpc_override base=$base_rpc_override bsc=$bsc_rpc_override solana=$solana_rpc_override" + + local devnet_sepolia_start="" devnet_arbitrum_start="" devnet_base_start="" devnet_bsc_start="" devnet_solana_start="" + + if ! is_local_testing_env; then + require_cmd curl jq + local _fetch_block + _fetch_block() { + local label="$1" rpc_url="$2" + local response hex_block decimal_block + response="$(curl -sS --max-time 15 -X POST "$rpc_url" \ + -H 'Content-Type: application/json' \ + --data '{"jsonrpc":"2.0","id":1,"method":"eth_blockNumber","params":[]}' 2>/dev/null || true)" + hex_block="$(echo "$response" | jq -r '.result // empty' 2>/dev/null || true)" + if [[ -n "$hex_block" && "$hex_block" != "null" && "$hex_block" =~ ^0x[0-9a-fA-F]+$ ]]; then + decimal_block="$(printf '%d' "$hex_block" 2>/dev/null || true)" + [[ "$decimal_block" =~ ^[0-9]+$ ]] && { printf "%s" "$decimal_block"; return 0; } + fi + log_warn "Could not read block number for $label from $rpc_url; event_start_from will not be set" >&2 + printf "%s" "" + } + _fetch_solana_slot() { + local rpc_url="$1" + local slot response + response="$(curl -sS --max-time 15 -X POST "$rpc_url" -H 'Content-Type: application/json' \ + --data '{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment":"processed"}]}' 2>/dev/null || true)" + slot="$(echo "$response" | jq -r '.result // empty' 2>/dev/null || true)" + slot="$(echo "$slot" | tr -d '[:space:]')" + [[ "$slot" =~ ^[0-9]+$ ]] && { printf "%s" "$slot"; return 0; } + log_warn "Could not read Solana slot from $rpc_url; event_start_from will not be set" >&2 + printf "%s" "" + } + log_info "Fetching latest block/slot numbers from public chain RPCs for devnet startup" + devnet_sepolia_start="$(_fetch_block "sepolia" "$sepolia_rpc_override")" + devnet_arbitrum_start="$(_fetch_block "arbitrum" "$arbitrum_rpc_override")" + devnet_base_start="$(_fetch_block "base" "$base_rpc_override")" + devnet_bsc_start="$(_fetch_block "bsc" "$bsc_rpc_override")" + devnet_solana_start="$(_fetch_solana_slot "$solana_rpc_override")" + log_ok "Devnet event_start_from: sepolia=${devnet_sepolia_start:-n/a} arbitrum=${devnet_arbitrum_start:-n/a} base=${devnet_base_start:-n/a} bsc=${devnet_bsc_start:-n/a} solana=${devnet_solana_start:-n/a}" + fi + log_info "Starting local devnet" ( cd "$LOCAL_DEVNET_DIR" - ./devnet start --build - ./devnet setup-uvalidators + + # Start all 4 core validators + ./devnet start 4 + + # Build UV env array with RPC overrides and event_start_from values + local _uv_env=( + SEPOLIA_RPC_URL_OVERRIDE="$sepolia_rpc_override" + ARBITRUM_RPC_URL_OVERRIDE="$arbitrum_rpc_override" + BASE_RPC_URL_OVERRIDE="$base_rpc_override" + BSC_RPC_URL_OVERRIDE="$bsc_rpc_override" + SOLANA_RPC_URL_OVERRIDE="$solana_rpc_override" + ) + [[ -n "$devnet_sepolia_start" ]] && _uv_env+=(SEPOLIA_EVENT_START_FROM="$devnet_sepolia_start") + [[ -n "$devnet_arbitrum_start" ]] && _uv_env+=(ARBITRUM_EVENT_START_FROM="$devnet_arbitrum_start") + [[ -n "$devnet_base_start" ]] && _uv_env+=(BASE_EVENT_START_FROM="$devnet_base_start") + [[ -n "$devnet_bsc_start" ]] && _uv_env+=(BSC_EVENT_START_FROM="$devnet_bsc_start") + [[ -n "$devnet_solana_start" ]] && _uv_env+=(SOLANA_EVENT_START_FROM="$devnet_solana_start") + + # Register universal validators on-chain and create authz grants + env "${_uv_env[@]}" ./devnet setup-uvalidators + + # Start 4 universal validators with RPC overrides and event_start_from + env "${_uv_env[@]}" ./devnet start-uv 2 ) + + # Sync freshly generated genesis accounts so step_recover_genesis_key uses the current mnemonic. + # Each fresh devnet run (after `rm -rf data/`) regenerates accounts with new mnemonics. + if [[ -f "$LOCAL_DEVNET_DIR/data/accounts/genesis_accounts.json" ]]; then + cp "$LOCAL_DEVNET_DIR/data/accounts/genesis_accounts.json" "$GENESIS_ACCOUNTS_JSON" + log_ok "Synced genesis_accounts.json from devnet" + fi + log_ok "Devnet is up" } @@ -840,24 +954,72 @@ step_ensure_tss_key_ready() { } step_setup_environment() { - if ! is_local_testing_env; then - log_info "TESTING_ENV is not LOCAL, skipping setup-environment" - return 0 + require_cmd jq curl + + local has_docker="false" + if command -v docker >/dev/null 2>&1; then + has_docker="true" + fi + + if is_local_testing_env; then + require_cmd anvil cast surfpool fi - require_cmd anvil cast docker jq surfpool curl + fetch_evm_block_number() { + local label="$1" + local rpc_url="$2" + local response hex_block decimal_block + + response="$(curl -sS --max-time 15 -X POST "$rpc_url" \ + -H 'Content-Type: application/json' \ + --data '{"jsonrpc":"2.0","id":1,"method":"eth_blockNumber","params":[]}' 2>/dev/null || true)" + + hex_block="$(echo "$response" | jq -r '.result // empty' 2>/dev/null || true)" + if [[ -n "$hex_block" && "$hex_block" != "null" && "$hex_block" =~ ^0x[0-9a-fA-F]+$ ]]; then + decimal_block="$(printf '%d' "$hex_block" 2>/dev/null || true)" + if [[ "$decimal_block" =~ ^[0-9]+$ ]]; then + printf "%s" "$decimal_block" + return 0 + fi + fi + + log_warn "Could not read block number for $label at $rpc_url; defaulting event_start_from to 0" >&2 + printf "%s" "0" + } local sepolia_host_rpc="${ANVIL_SEPOLIA_HOST_RPC_URL:-http://localhost:9545}" local arbitrum_host_rpc="${ANVIL_ARBITRUM_HOST_RPC_URL:-http://localhost:9546}" local base_host_rpc="${ANVIL_BASE_HOST_RPC_URL:-http://localhost:9547}" local bsc_host_rpc="${ANVIL_BSC_HOST_RPC_URL:-http://localhost:9548}" - local uv_sepolia_rpc_url="${LOCAL_SEPOLIA_UV_RPC_URL:-http://localhost:9545}" - local uv_arbitrum_rpc_url="${LOCAL_ARBITRUM_UV_RPC_URL:-http://localhost:9546}" - local uv_base_rpc_url="${LOCAL_BASE_UV_RPC_URL:-http://localhost:9547}" - local uv_bsc_rpc_url="${LOCAL_BSC_UV_RPC_URL:-http://localhost:9548}" local solana_host_rpc="${SURFPOOL_SOLANA_HOST_RPC_URL:-http://localhost:8899}" - local uv_solana_rpc_url="${LOCAL_SOLANA_UV_RPC_URL:-http://localhost:8899}" + local uv_sepolia_rpc_url="" + local uv_arbitrum_rpc_url="" + local uv_base_rpc_url="" + local uv_bsc_rpc_url="" + local uv_solana_rpc_url="" + + chain_public_rpc_from_config() { + local file_path="$1" + local fallback_rpc="$2" + local label="$3" + local rpc_url + + if [[ ! -f "$file_path" ]]; then + log_warn "Chain config file not found for $label: $file_path; using fallback $fallback_rpc" + printf "%s" "$fallback_rpc" + return + fi + + rpc_url="$(jq -r '.public_rpc_url // empty' "$file_path" 2>/dev/null || true)" + if [[ -z "$rpc_url" || "$rpc_url" == "null" ]]; then + log_warn "public_rpc_url missing in $file_path; using fallback $fallback_rpc" + printf "%s" "$fallback_rpc" + return + fi + + printf "%s" "$rpc_url" + } patch_chain_config_public_rpc() { local file_path="$1" @@ -884,6 +1046,31 @@ step_setup_environment() { patch_chain_config_public_rpc "$TOKENS_CONFIG_DIR/solana_devnet/chain.json" "$solana_host_rpc" "solana_devnet" } + if is_local_testing_env; then + uv_sepolia_rpc_url="${LOCAL_SEPOLIA_UV_RPC_URL:-$sepolia_host_rpc}" + uv_arbitrum_rpc_url="${LOCAL_ARBITRUM_UV_RPC_URL:-$arbitrum_host_rpc}" + uv_base_rpc_url="${LOCAL_BASE_UV_RPC_URL:-$base_host_rpc}" + uv_bsc_rpc_url="${LOCAL_BSC_UV_RPC_URL:-$bsc_host_rpc}" + uv_solana_rpc_url="${LOCAL_SOLANA_UV_RPC_URL:-$solana_host_rpc}" + else + uv_sepolia_rpc_url="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/eth_sepolia/chain.json" "$sepolia_host_rpc" "eth_sepolia")" + uv_arbitrum_rpc_url="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/arb_sepolia/chain.json" "$arbitrum_host_rpc" "arb_sepolia")" + uv_base_rpc_url="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/base_sepolia/chain.json" "$base_host_rpc" "base_sepolia")" + uv_bsc_rpc_url="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/bsc_testnet/chain.json" "$bsc_host_rpc" "bsc_testnet")" + uv_solana_rpc_url="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/solana_devnet/chain.json" "$solana_host_rpc" "solana_devnet")" + + if pgrep -f "${PUSH_CHAIN_DIR}/build/puniversald start" >/dev/null 2>&1; then + log_warn "puniversald processes are already running; RPC URL file changes apply fully after devnet restart" + fi + fi + + local sepolia_latest_block arbitrum_latest_block base_latest_block bsc_latest_block solana_latest_slot + sepolia_latest_block="0" + arbitrum_latest_block="0" + base_latest_block="0" + bsc_latest_block="0" + solana_latest_slot="0" + start_anvil_fork() { local label="$1" local port="$2" @@ -899,7 +1086,17 @@ step_setup_environment() { kill "$pid" >/dev/null 2>&1 || true done < <(lsof -ti tcp:"$port" 2>/dev/null || true) - sleep 1 + # Wait up to 8 seconds for the port to be fully released before binding the new process. + local _w=0 + while lsof -ti tcp:"$port" >/dev/null 2>&1; do + if [[ $_w -ge 8 ]]; then + lsof -ti tcp:"$port" 2>/dev/null | xargs kill -9 2>/dev/null || true + sleep 1 + break + fi + sleep 1 + _w=$(( _w + 1 )) + done log_info "Starting anvil $label on port $port (chain-id: $chain_id)" nohup anvil --host 0.0.0.0 --port "$port" --chain-id "$chain_id" --fork-url "$fork_url" --block-time 1 \ @@ -921,8 +1118,8 @@ step_setup_environment() { sleep 1 done - log_err "Could not read latest block number from $label anvil at $rpc_url" - return 1 + log_warn "Could not read block number from $label anvil at $rpc_url after 30s; defaulting event_start_from to 0" >&2 + printf "%s" "0" } start_surfpool() { @@ -954,30 +1151,59 @@ step_setup_environment() { sleep 1 done - log_err "Could not read latest Solana slot from surfpool at $rpc_url" - return 1 + log_warn "Could not read Solana slot from surfpool at $rpc_url after 30s; defaulting event_start_from to 0" >&2 + printf "%s" "0" } - start_anvil_fork "sepolia" "9545" "11155111" "https://ethereum-sepolia-rpc.publicnode.com" - start_anvil_fork "arbitrum" "9546" "421614" "https://arbitrum-sepolia.gateway.tenderly.co" - start_anvil_fork "base" "9547" "84532" "https://sepolia.base.org" - # Use the configured BSC endpoint for anvil forking. - start_anvil_fork "bsc" "9548" "97" "https://bnb-testnet.g.alchemy.com/v2/peQmTO8MjpoK5Czw4HwRp" - start_surfpool - patch_local_testnet_donut_chain_configs - - local sepolia_latest_block arbitrum_latest_block base_latest_block bsc_latest_block solana_latest_slot - sepolia_latest_block="$(wait_for_block_number "sepolia" "$sepolia_host_rpc")" - arbitrum_latest_block="$(wait_for_block_number "arbitrum" "$arbitrum_host_rpc")" - base_latest_block="$(wait_for_block_number "base" "$base_host_rpc")" - bsc_latest_block="$(wait_for_block_number "bsc" "$bsc_host_rpc")" - solana_latest_slot="$(wait_for_solana_slot "$solana_host_rpc")" + if is_local_testing_env; then + # Upstream RPCs that the local anvil forks are derived from. + local sepolia_fork_rpc="https://ethereum-sepolia-rpc.publicnode.com" + local arbitrum_fork_rpc="https://arbitrum-sepolia.gateway.tenderly.co" + local base_fork_rpc="https://sepolia.base.org" + local bsc_fork_rpc="https://bnb-testnet.g.alchemy.com/v2/peQmTO8MjpoK5Czw4HwRp" + local solana_upstream_rpc="https://api.devnet.solana.com" + + # Fetch event_start_from from the upstream RPCs BEFORE starting local forks. + # This gives us the exact fork point block number reliably, without waiting for + # local anvil startup. UVs configured to use the local anvil fork will start + # scanning from this block number, which covers all locally-deployed contracts. + log_info "Fetching latest block numbers from upstream RPCs for event_start_from" + sepolia_latest_block="$(wait_for_block_number "sepolia" "$sepolia_fork_rpc")" + arbitrum_latest_block="$(wait_for_block_number "arbitrum" "$arbitrum_fork_rpc")" + base_latest_block="$(wait_for_block_number "base" "$base_fork_rpc")" + bsc_latest_block="$(wait_for_block_number "bsc" "$bsc_fork_rpc")" + solana_latest_slot="$(wait_for_solana_slot "$solana_upstream_rpc")" + log_ok "event_start_from: sepolia=$sepolia_latest_block arbitrum=$arbitrum_latest_block base=$base_latest_block bsc=$bsc_latest_block solana=$solana_latest_slot" + + start_anvil_fork "sepolia" "9545" "11155111" "$sepolia_fork_rpc" + start_anvil_fork "arbitrum" "9546" "421614" "$arbitrum_fork_rpc" + start_anvil_fork "base" "9547" "84532" "$base_fork_rpc" + # Use the configured BSC endpoint for anvil forking. + start_anvil_fork "bsc" "9548" "97" "$bsc_fork_rpc" + start_surfpool + patch_local_testnet_donut_chain_configs + + # Wait for local forks to be ready before proceeding. + wait_for_block_number "sepolia" "$sepolia_host_rpc" >/dev/null + wait_for_block_number "arbitrum" "$arbitrum_host_rpc" >/dev/null + wait_for_block_number "base" "$base_host_rpc" >/dev/null + wait_for_block_number "bsc" "$bsc_host_rpc" >/dev/null + wait_for_solana_slot "$solana_host_rpc" >/dev/null + else + log_info "Fetching latest block numbers from public chain RPCs for event_start_from" + sepolia_latest_block="$(wait_for_block_number "sepolia" "$uv_sepolia_rpc_url")" + arbitrum_latest_block="$(wait_for_block_number "arbitrum" "$uv_arbitrum_rpc_url")" + base_latest_block="$(wait_for_block_number "base" "$uv_base_rpc_url")" + bsc_latest_block="$(wait_for_block_number "bsc" "$uv_bsc_rpc_url")" + solana_latest_slot="$(wait_for_solana_slot "$uv_solana_rpc_url")" + log_ok "event_start_from: sepolia=$sepolia_latest_block arbitrum=$arbitrum_latest_block base=$base_latest_block bsc=$bsc_latest_block solana=$solana_latest_slot" + fi local patched_count=0 local uv_idx for uv_idx in 1 2 3 4; do - # Prefer local file (local-setup-e2e devnet); fall back to Docker container - local local_cfg="$LOCAL_DEVNET_DIR/data/universal-${uv_idx}/.puniversal/config/pushuv_config.json" + # Prefer local file (local-native devnet); fall back to Docker container + local local_cfg="$LOCAL_DEVNET_DIR/data/universal${uv_idx}/.puniversal/config/pushuv_config.json" local uv_container="universal-validator-${uv_idx}" local tmp_in tmp_out @@ -986,7 +1212,7 @@ step_setup_environment() { if [[ -f "$local_cfg" ]]; then cp "$local_cfg" "$tmp_in" - elif docker ps --format '{{.Names}}' | grep -qx "$uv_container" 2>/dev/null; then + elif [[ "$has_docker" == "true" ]] && docker ps --format '{{.Names}}' | grep -qx "$uv_container" 2>/dev/null; then local docker_cfg="/root/.puniversal/config/pushuv_config.json" if ! docker exec "$uv_container" cat "$docker_cfg" >"$tmp_in" 2>/dev/null; then rm -f "$tmp_in" "$tmp_out" @@ -1024,11 +1250,19 @@ step_setup_environment() { if [[ -f "$local_cfg" ]]; then cp "$tmp_out" "$local_cfg" - log_ok "Updated universal-validator-${uv_idx} local config for Sepolia/Arbitrum/Base/BSC/Solana forks" + if is_local_testing_env; then + log_ok "Updated universal-validator-${uv_idx} local config for Sepolia/Arbitrum/Base/BSC/Solana LOCAL forks (event_start_from: sepolia=$sepolia_latest_block arbitrum=$arbitrum_latest_block base=$base_latest_block bsc=$bsc_latest_block solana=$solana_latest_slot)" + else + log_ok "Updated universal-validator-${uv_idx} local config from testnet-donut chain public RPCs (event_start_from: sepolia=$sepolia_latest_block arbitrum=$arbitrum_latest_block base=$base_latest_block bsc=$bsc_latest_block solana=$solana_latest_slot)" + fi else local docker_cfg="/root/.puniversal/config/pushuv_config.json" docker cp "$tmp_out" "$uv_container":"$docker_cfg" - log_ok "Updated $uv_container Docker config for Sepolia/Arbitrum/Base/BSC/Solana local forks" + if is_local_testing_env; then + log_ok "Updated $uv_container Docker config for Sepolia/Arbitrum/Base/BSC/Solana LOCAL forks (event_start_from: sepolia=$sepolia_latest_block arbitrum=$arbitrum_latest_block base=$base_latest_block bsc=$bsc_latest_block solana=$solana_latest_slot)" + else + log_ok "Updated $uv_container Docker config from testnet-donut chain public RPCs (event_start_from: sepolia=$sepolia_latest_block arbitrum=$arbitrum_latest_block base=$base_latest_block bsc=$bsc_latest_block solana=$solana_latest_slot)" + fi fi rm -f "$tmp_in" "$tmp_out" patched_count=$((patched_count + 1)) @@ -1039,7 +1273,11 @@ step_setup_environment() { return 0 fi - log_ok "Patched $patched_count universal validator config(s) with local fork RPC/event_start_from (including Solana)" + if is_local_testing_env; then + log_ok "Patched $patched_count universal validator config(s) with LOCAL fork RPC/event_start_from (including Solana)" + else + log_ok "Patched $patched_count universal validator config(s) with testnet-donut chain public RPCs and live event_start_from values" + fi } step_stop_running_nodes() { @@ -1052,13 +1290,6 @@ step_stop_running_nodes() { ) fi - if [[ -x "$LEGACY_LOCAL_NATIVE_DIR/devnet" ]]; then - ( - cd "$LEGACY_LOCAL_NATIVE_DIR" - ./devnet down || true - ) - fi - pkill -f "$PUSH_CHAIN_DIR/build/pchaind start" >/dev/null 2>&1 || true pkill -f "$PUSH_CHAIN_DIR/build/puniversald" >/dev/null 2>&1 || true @@ -2235,18 +2466,14 @@ NODE } cmd_all() { - if is_local_testing_env; then - step_setup_environment - fi + step_setup_environment (cd "$PUSH_CHAIN_DIR" && make replace-addresses) (cd "$PUSH_CHAIN_DIR" && make build) step_update_env_fund_to_address step_stop_running_nodes step_devnet step_ensure_tss_key_ready - if is_local_testing_env; then - step_setup_environment - fi + step_setup_environment step_recover_genesis_key step_fund_account step_setup_core_contracts @@ -2268,7 +2495,7 @@ cmd_show_help() { Usage: $(basename "$0") Commands: - setup-environment For TESTING_ENV=LOCAL: start anvil/surfpool + patch validator and testnet-donut chain RPC configs + setup-environment Sync universal-validator RPC URLs (LOCAL => anvil localhost RPCs; non-LOCAL => testnet-donut chain public_rpc_url) devnet Build/start local-multi-validator devnet + uvalidators print-genesis Print first genesis account + mnemonic recover-genesis-key Recover genesis key into local keyring @@ -2303,7 +2530,7 @@ Primary files: Address: $DEPLOY_ADDRESSES_FILE Important env: - TESTING_ENV=LOCAL Enables local anvil setup and config rewrites for testnet-donut chain.json and universal validator RPCs in setup-environment/all + TESTING_ENV=LOCAL Enables local anvil/surfpool startup and localhost RPC rewrites; when not LOCAL, setup-environment uses testnet-donut chain public_rpc_url values for universal validator RPCs ANVIL_SEPOLIA_HOST_RPC_URL=http://localhost:9545 ANVIL_ARBITRUM_HOST_RPC_URL=http://localhost:9546 ANVIL_BASE_HOST_RPC_URL=http://localhost:9547 diff --git a/local-native/devnet b/local-native/devnet index 40f01540..c4e31d71 100755 --- a/local-native/devnet +++ b/local-native/devnet @@ -92,6 +92,31 @@ get_block_height() { echo "$height" } +wait_chain_tx() { + local txhash="$1" node="$2" max="${3:-30}" i=0 + while (( i < max )); do + local code + code=$("$PCHAIND_BIN" query tx "$txhash" --node="$node" --output json 2>/dev/null \ + | jq -r '.code // empty' 2>/dev/null || true) + [[ "$code" == "0" ]] && return 0 + [[ -n "$code" && "$code" != "0" ]] && return 1 + sleep 1; (( i++ )) + done + return 1 +} + +get_current_tss_key_id() { + local genesis_rpc="tcp://127.0.0.1:26657" + "$PCHAIND_BIN" query utss current-key --node="$genesis_rpc" --output json 2>/dev/null \ + | jq -r '.key.key_id // .current_key.key_id // empty' 2>/dev/null || echo "" +} + +get_utss_admin() { + local genesis_rpc="tcp://127.0.0.1:26657" + "$PCHAIND_BIN" query utss params --node="$genesis_rpc" --output json 2>/dev/null \ + | jq -r '.params.admin // ""' 2>/dev/null || echo "" +} + # ═══════════════════════════════════════════════════════════════════════════════ # STATUS DISPLAY # ═══════════════════════════════════════════════════════════════════════════════ @@ -299,10 +324,14 @@ cmd_start_uv() { require_binaries print_header "Starting Universal Validators..." - local sepolia_start_height="" - if ! sepolia_start_height=$(bash "$SCRIPT_DIR/scripts/configure-pushuv.sh" --get-height); then - print_error "Failed to fetch latest Sepolia height" - exit 1 + # Use SEPOLIA_EVENT_START_FROM from environment if already set (e.g. passed by e2e setup + # with a pre-fetched local anvil block number). Otherwise fetch from live Sepolia RPC. + local sepolia_start_height="${SEPOLIA_EVENT_START_FROM:-}" + if [[ -z "$sepolia_start_height" ]]; then + if ! sepolia_start_height=$(bash "$SCRIPT_DIR/scripts/configure-pushuv.sh" --get-height); then + print_error "Failed to fetch latest Sepolia height" + exit 1 + fi fi print_status "Using Sepolia event_start_from: $sepolia_start_height" @@ -431,18 +460,88 @@ cmd_clean() { cmd_tss_keygen() { require_binaries print_header "TSS Key Generation" - print_status "Initiating TSS keygen process..." - "$PCHAIND_BIN" tx utss initiate-tss-key-process \ - --process-type tss-process-keygen \ - --from genesis-acc-1 \ - --chain-id "$CHAIN_ID" \ - --keyring-backend test \ - --home "$DATA_DIR/validator1/.pchain" \ - --fees 1000000000000000upc \ - --yes + # Check for existing TSS key — return early if already present + local existing + existing=$(get_current_tss_key_id) + if [[ -n "$existing" ]]; then + print_success "TSS key already present: $existing" + return 0 + fi + + # Validate that at least 2 universal validators are registered + local genesis_rpc="tcp://127.0.0.1:26657" + local uv_count + uv_count=$("$PCHAIND_BIN" query uvalidator all-universal-validators \ + --node="$genesis_rpc" --output json 2>/dev/null \ + | jq -r '.universal_validator | length // 0' 2>/dev/null || echo "0") + if (( uv_count < 2 )); then + print_error "Need at least 2 registered universal validators (found: $uv_count)" + return 1 + fi + + # Find the key whose address matches the UTSS admin + local admin_addr + admin_addr=$(get_utss_admin) + local val1_home="$DATA_DIR/validator1/.pchain" + local signer="" + while IFS= read -r key_name; do + local addr + addr=$("$PCHAIND_BIN" --home="$val1_home" keys show "$key_name" -a \ + --keyring-backend "$KEYRING" 2>/dev/null || true) + if [[ "$addr" == "$admin_addr" ]]; then signer="$key_name"; break; fi + done < <("$PCHAIND_BIN" --home="$val1_home" keys list \ + --keyring-backend "$KEYRING" --output json 2>/dev/null \ + | jq -r '.[] | .name' 2>/dev/null || true) + + if [[ -z "$signer" ]]; then + print_error "No local key matches UTSS admin address: $admin_addr" + return 1 + fi + + local attempt max_attempts=5 + for (( attempt=1; attempt<=max_attempts; attempt++ )); do + print_status "Initiating TSS keygen (attempt $attempt/$max_attempts, signer=$signer)..." + local result tx_hash + result=$("$PCHAIND_BIN" --home="$val1_home" tx utss initiate-tss-key-process \ + --process-type tss-process-keygen \ + --from "$signer" \ + --chain-id "$CHAIN_ID" \ + --keyring-backend "$KEYRING" \ + --node="$genesis_rpc" \ + --fees 1000000000000000upc \ + --yes --output json 2>&1 || true) + + local code + code=$(echo "$result" | jq -r '.code // "0"' 2>/dev/null || echo "0") + tx_hash=$(echo "$result" | jq -r '.txhash // ""' 2>/dev/null || true) + + if [[ "$code" != "0" ]]; then + print_warning "Keygen tx code=$code; retrying..." + sleep 5; continue + fi - print_success "TSS keygen initiated!" + if [[ -n "$tx_hash" ]]; then + wait_chain_tx "$tx_hash" "$genesis_rpc" 30 || true + fi + + # Wait up to 300s for the TSS key to materialize on-chain + print_status "Waiting for TSS key to materialize on-chain..." + local waited=0 + while (( waited < 300 )); do + local kid + kid=$(get_current_tss_key_id) + if [[ -n "$kid" ]]; then + print_success "TSS key ready: $kid" + return 0 + fi + sleep 3; (( waited += 3 )) + done + print_warning "TSS key not ready after 300s on attempt $attempt" + done + + print_error "TSS keygen failed after $max_attempts attempts" + return 1 } cmd_tss_refresh() { diff --git a/local-native/scripts/setup-universal.sh b/local-native/scripts/setup-universal.sh index e90919a0..7c557266 100755 --- a/local-native/scripts/setup-universal.sh +++ b/local-native/scripts/setup-universal.sh @@ -94,6 +94,40 @@ if [ -n "${SEPOLIA_EVENT_START_FROM:-}" ]; then mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" fi +# Apply chain RPC URL overrides if set (e.g. for LOCAL anvil forks) +apply_rpc_override() { + local chain_id="$1" rpc_url="$2" + [ -n "$rpc_url" ] || return 0 + jq --arg c "$chain_id" --arg u "$rpc_url" \ + '.chain_configs[$c].rpc_urls = [$u]' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +} + +apply_event_start_override() { + local chain_id="$1" height="$2" + [ -n "$height" ] && [[ "$height" =~ ^[0-9]+$ ]] || return 0 + jq --arg c "$chain_id" --argjson h "$height" \ + '.chain_configs[$c].event_start_from = $h' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +} + +apply_rpc_override "eip155:11155111" "${SEPOLIA_RPC_URL_OVERRIDE:-}" +apply_rpc_override "eip155:421614" "${ARBITRUM_RPC_URL_OVERRIDE:-}" +apply_rpc_override "eip155:84532" "${BASE_RPC_URL_OVERRIDE:-}" +apply_rpc_override "eip155:97" "${BSC_RPC_URL_OVERRIDE:-}" +apply_rpc_override "solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1" "${SOLANA_RPC_URL_OVERRIDE:-}" + +apply_event_start_override "eip155:421614" "${ARBITRUM_EVENT_START_FROM:-}" +apply_event_start_override "eip155:84532" "${BASE_EVENT_START_FROM:-}" +apply_event_start_override "eip155:97" "${BSC_EVENT_START_FROM:-}" +apply_event_start_override "solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1" "${SOLANA_EVENT_START_FROM:-}" + +# Always start from block 1 for the local devnet chain so UVs see TSS key processes immediately +apply_event_start_override "localchain_9000-1" "1" +apply_event_start_override "push_42101-1" "1" + # Enable TSS TSS_PRIVATE_KEY=$(printf '%02x' $UNIVERSAL_ID | head -c 2) TSS_PRIVATE_KEY=$(yes $TSS_PRIVATE_KEY | head -32 | tr -d '\n') diff --git a/local-native/scripts/setup-uvalidators.sh b/local-native/scripts/setup-uvalidators.sh index 6355a256..b80b5eee 100755 --- a/local-native/scripts/setup-uvalidators.sh +++ b/local-native/scripts/setup-uvalidators.sh @@ -29,6 +29,28 @@ get_tss_port() { echo $((39000 + $1 - 1)) } +# Helper: wait for a TX to be included in a block, check its result code +wait_for_tx() { + local txhash="$1" max_attempts="${2:-30}" i=0 + while [ $i -lt $max_attempts ]; do + sleep 2 + local code + code=$(curl -s "http://127.0.0.1:26657/tx?hash=0x${txhash}" 2>/dev/null \ + | jq -r '.result.tx_result.code // empty' 2>/dev/null) + [ "$code" = "0" ] && return 0 + if [ -n "$code" ] && [ "$code" != "null" ]; then + local log + log=$(curl -s "http://127.0.0.1:26657/tx?hash=0x${txhash}" 2>/dev/null \ + | jq -r '.result.tx_result.log // ""' 2>/dev/null) + echo " ❌ TX failed (code=$code): $log" >&2 + return 1 + fi + i=$((i + 1)) + done + echo " ⚠️ TX not confirmed after $((max_attempts * 2))s" >&2 + return 1 +} + echo "🔧 Setting up Universal Validators..." echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" @@ -63,7 +85,9 @@ echo "" echo "📝 Registering Universal Validators..." echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -for i in 1 2 3 4; do +# Only register 2 UVs for local devnet — UV1↔UV3 libp2p noise handshake is incompatible +NUM_UV=${NUM_UV:-2} +for i in $(seq 1 $NUM_UV); do echo "" echo "📋 Registering universal-validator-$i" @@ -103,16 +127,18 @@ for i in 1 2 3 4; do if echo "$RESULT" | grep -q '"txhash"'; then TX_HASH=$(echo "$RESULT" | jq -r '.txhash' 2>/dev/null) - echo " ✅ Registered! TX: $TX_HASH" + if wait_for_tx "$TX_HASH"; then + echo " ✅ Registered! TX: $TX_HASH" + else + echo " ⚠️ Registration TX failed on-chain" + fi else echo " ⚠️ Registration may have failed" fi - - sleep 2 # Wait between registrations done # ═══════════════════════════════════════════════════════════════════════════════ -# CREATE AUTHZ GRANTS (batched - 4 grants per transaction) +# CREATE AUTHZ GRANTS (batched - 4 grants per transaction, with confirmation) # ═══════════════════════════════════════════════════════════════════════════════ echo "" @@ -128,12 +154,12 @@ TEMP_DIR=$(mktemp -d) MSG_TYPES=( "/uexecutor.v1.MsgVoteInbound" - "/uexecutor.v1.MsgVoteGasPrice" + "/uexecutor.v1.MsgVoteChainMeta" "/uexecutor.v1.MsgVoteOutbound" "/utss.v1.MsgVoteTssKeyProcess" ) -for i in 1 2 3 4; do +for i in $(seq 1 ${NUM_UV:-2}); do HOTKEY_ADDR=$(jq -r ".[$((i-1))].address" "$HOTKEYS_FILE") VALIDATOR_ADDR=$("$PCHAIND_BIN" keys show "validator-$i" -a --keyring-backend "$KEYRING" --home "$HOME_DIR" 2>/dev/null) @@ -143,16 +169,16 @@ for i in 1 2 3 4; do fi echo "" - echo "📋 validator-$i → hotkey-$i (4 grants in 1 tx)" + echo "📋 validator-$i → hotkey-$i" echo " Granter: $VALIDATOR_ADDR" echo " Grantee: $HOTKEY_ADDR" - # Generate unsigned txs for all 4 message types + BATCH_OK=false + + # Attempt batch: all 4 grants in one TX MESSAGES="[]" for j in "${!MSG_TYPES[@]}"; do MSG_TYPE="${MSG_TYPES[$j]}" - - # Generate unsigned tx UNSIGNED_TX=$("$PCHAIND_BIN" tx authz grant "$HOTKEY_ADDR" generic \ --msg-type="$MSG_TYPE" \ --from "validator-$i" \ @@ -163,16 +189,16 @@ for i in 1 2 3 4; do --gas=50000 \ --gas-prices="1000000000upc" \ --generate-only 2>/dev/null) - - # Extract the message and add to array MSG=$(echo "$UNSIGNED_TX" | jq -c '.body.messages[0]' 2>/dev/null) if [ -n "$MSG" ] && [ "$MSG" != "null" ]; then MESSAGES=$(echo "$MESSAGES" | jq --argjson msg "$MSG" '. + [$msg]') fi done - # Create combined transaction with all 4 messages - COMBINED_TX=$(cat </dev/null || echo "0") + + if [ "${MSG_COUNT}" = "4" ]; then + COMBINED_TX=$(cat < "$TEMP_DIR/combined_tx_$i.json" - - # Sign the combined transaction - SIGNED_TX=$("$PCHAIND_BIN" tx sign "$TEMP_DIR/combined_tx_$i.json" \ - --from "validator-$i" \ - --chain-id "$CHAIN_ID" \ - --keyring-backend "$KEYRING" \ - --home "$HOME_DIR" \ - --node="$RPC_NODE" \ - --output-document="$TEMP_DIR/signed_tx_$i.json" 2>&1) - - # Broadcast the signed transaction - BROADCAST_RESULT=$("$PCHAIND_BIN" tx broadcast "$TEMP_DIR/signed_tx_$i.json" \ - --node="$RPC_NODE" \ - --broadcast-mode sync 2>&1) - - # Check result - if echo "$BROADCAST_RESULT" | grep -q "txhash"; then - TX_HASH=$(echo "$BROADCAST_RESULT" | grep -o 'txhash: [A-F0-9]*' | cut -d' ' -f2 || echo "$BROADCAST_RESULT" | jq -r '.txhash' 2>/dev/null) - echo " ✅ 4 grants created! TX: ${TX_HASH:0:16}..." - TOTAL_GRANTS=$((TOTAL_GRANTS + 4)) + echo "$COMBINED_TX" > "$TEMP_DIR/combined_tx_$i.json" + + "$PCHAIND_BIN" tx sign "$TEMP_DIR/combined_tx_$i.json" \ + --from "validator-$i" \ + --chain-id "$CHAIN_ID" \ + --keyring-backend "$KEYRING" \ + --home "$HOME_DIR" \ + --node="$RPC_NODE" \ + --output-document="$TEMP_DIR/signed_tx_$i.json" 2>/dev/null + + BROADCAST_RESULT=$("$PCHAIND_BIN" tx broadcast "$TEMP_DIR/signed_tx_$i.json" \ + --node="$RPC_NODE" \ + --broadcast-mode sync 2>&1) + + TX_HASH=$(echo "$BROADCAST_RESULT" | jq -r '.txhash // empty' 2>/dev/null) + [ -z "$TX_HASH" ] && TX_HASH=$(echo "$BROADCAST_RESULT" | grep -o 'txhash: [A-F0-9]*' | awk '{print $2}') + + if [ -n "$TX_HASH" ] && wait_for_tx "$TX_HASH" 30; then + echo " ✅ Batch grant confirmed (TX: ${TX_HASH:0:16}...)" + TOTAL_GRANTS=$((TOTAL_GRANTS + 4)) + BATCH_OK=true + else + echo " ⚠️ Batch TX failed or unconfirmed, trying individual grants..." + fi else - echo " ⚠️ Batch may have failed, trying individual grants..." - # Fallback to individual grants + echo " ⚠️ Could not build batch TX (got $MSG_COUNT messages), trying individual grants..." + fi + + # Fallback: individual grants with per-TX confirmation + if [ "$BATCH_OK" = "false" ]; then for MSG_TYPE in "${MSG_TYPES[@]}"; do MSG_NAME=$(basename "$MSG_TYPE") GRANT_RESULT=$("$PCHAIND_BIN" tx authz grant "$HOTKEY_ADDR" generic \ @@ -230,19 +260,19 @@ EOF --keyring-backend "$KEYRING" \ --home "$HOME_DIR" \ --node="$RPC_NODE" \ - --gas=auto \ - --gas-adjustment=1.5 \ - --gas-prices="1000000000upc" \ - --yes 2>&1) + --gas 300000 \ + --gas-prices "1000000000upc" \ + --yes --output json 2>&1) - if echo "$GRANT_RESULT" | grep -q "txhash"; then + GRANT_TX_HASH=$(echo "$GRANT_RESULT" | jq -r '.txhash // empty' 2>/dev/null) + if [ -n "$GRANT_TX_HASH" ] && wait_for_tx "$GRANT_TX_HASH" 15; then + echo " ✅ Granted $MSG_NAME" TOTAL_GRANTS=$((TOTAL_GRANTS + 1)) + else + echo " ⚠️ Failed to grant $MSG_NAME" fi - sleep 2 done fi - - sleep 2 # Wait between validators done # Cleanup @@ -257,7 +287,7 @@ echo "📊 Total AuthZ grants created: $TOTAL_GRANTS/16" if [ "$TOTAL_GRANTS" -ge 16 ]; then echo "✅ All grants created successfully!" else - echo "⚠️ Some grants may be missing" + echo "⚠️ Some grants may be missing ($TOTAL_GRANTS/16)" fi echo "" diff --git a/local-native/scripts/setup-validator-auto.sh b/local-native/scripts/setup-validator-auto.sh index 8b1eb8f1..8da05780 100755 --- a/local-native/scripts/setup-validator-auto.sh +++ b/local-native/scripts/setup-validator-auto.sh @@ -83,6 +83,9 @@ sed -i.bak "s/address = \"localhost:9090\"/address = \"0.0.0.0:${GRPC_PORT}\"/g" sed -i.bak "s/laddr = \"tcp:\/\/0.0.0.0:26656\"/laddr = \"tcp:\/\/0.0.0.0:${P2P_PORT}\"/g" "$HOME_DIR/config/config.toml" sed -i.bak 's/timeout_commit = "5s"/timeout_commit = "1s"/g' "$HOME_DIR/config/config.toml" +# Pre-create WAL directory to prevent CometBFT panic when transitioning to active validator +mkdir -p "$HOME_DIR/data/cs.wal" + # Start node echo "🚀 Starting validator $VALIDATOR_ID..." "$PCHAIND_BIN" start \ From aa6307a1ba50abd29fd3aee251ad0952ffbee0b6 Mon Sep 17 00:00:00 2001 From: Nilesh Gupta Date: Tue, 7 Apr 2026 22:30:31 +0200 Subject: [PATCH 33/61] feat: updated READMEs of core validator and specific modules --- DERIVED_TRANSACTIONS.md | 212 ++++++++++++++++++++++ app/README.md | 254 ++++++++++++++++++++++++++ precompiles/usigverifier/README.md | 122 +++++++++++-- readme.md | 20 ++- x/uexecutor/README.md | 277 ++++++++++++++++++++++++++++- x/uregistry/README.md | 115 +++++++++++- x/utss/README.md | 110 +++++++++++- x/uvalidator/README.md | 197 +++++++++++++++++++- 8 files changed, 1253 insertions(+), 54 deletions(-) create mode 100644 DERIVED_TRANSACTIONS.md create mode 100644 app/README.md diff --git a/DERIVED_TRANSACTIONS.md b/DERIVED_TRANSACTIONS.md new file mode 100644 index 00000000..73ffdb2c --- /dev/null +++ b/DERIVED_TRANSACTIONS.md @@ -0,0 +1,212 @@ +# Derived Transactions + +A primitive added in Push Chain's EVM fork ([`github.com/pushchain/evm`](https://github.com/pushchain/evm), pinned via `replace` in `go.mod`) that lets a Cosmos SDK module produce a **real EVM transaction** — one that has a real receipt, real logs, and is fully observable through the JSON-RPC layer — instead of an internal "module call" that exists only inside the SDK. + +The new EVM keeper method is `DerivedEVMCall`. Everywhere in the Push Chain codebase that needs to act on the EVM as a Cosmos module (mint PRC20s, write chain-meta, deploy a UEA, refund gas, ...) goes through this single entry point. + +## Why It Exists + +Stock cosmos-evm exposes `EVMKeeper.CallEVM`: + +```go +func (k Keeper) CallEVM( + ctx sdk.Context, + abi abi.ABI, + from, contract common.Address, + commit bool, + method string, + args ...interface{}, +) (*types.MsgEthereumTxResponse, error) +``` + +`CallEVM` is built for **internal queries**: a Cosmos module wants to read state from a contract or trigger a side effect, and the EVM layer treats it as a synthetic call. It's enough for read paths and lightweight writes, but it has hard limitations the moment a module needs to behave like a first-class EVM sender: + +| Need | `CallEVM` | +|---|---| +| Send native value (`msg.value`) | not supported (always 0) | +| Set an explicit `gasLimit` | not supported | +| Bypass gas accounting for module-initiated work | not supported | +| Act as a module account (no private key) sending a real EVM tx | not supported | +| Issue multiple calls in the same block from the same sender without nonce collisions | not supported (nonce is read from state on every call) | +| Produce a JSON-RPC-visible receipt with hash, gas used, and logs | partial — the call exists, but doesn't surface as a normal EVM tx | + +`DerivedEVMCall` is the fork's answer to all six. + +## The API + +```go +DerivedEVMCall( + ctx sdk.Context, + abi abi.ABI, + from, contract common.Address, + value, gasLimit *big.Int, + commit, gasless, isModuleSender bool, + manualNonce *uint64, + method string, + args ...interface{}, +) (*types.MsgEthereumTxResponse, error) +``` + +Defined on the Push Chain `EVMKeeper` interface in [`x/uexecutor/types/expected_keepers.go`](./x/uexecutor/types/expected_keepers.go). + +| Parameter | Purpose | +|---|---| +| `ctx` | SDK context — provides block, gas meter, store access | +| `abi` | Parsed contract ABI for encoding the call | +| `from` | The EVM address that will appear as the tx sender. Can be a derived user address or a module account address. | +| `contract` | Destination contract | +| `value` | Native value to attach (`*big.Int`, may be `nil` or `big.NewInt(0)`) | +| `gasLimit` | Explicit gas limit (`nil` -> use a sensible default). Critical for predictable receipts. | +| `commit` | `true` = real state-changing tx; `false` = simulation / static call | +| `gasless` | `true` = skip gas accounting entirely. Used when the call is initiated by the protocol itself and shouldn't bill any user. | +| `isModuleSender` | `true` = `from` is a Cosmos module account (no private key). The fork's signer logic uses a deterministic synthetic signature instead of requiring a real ECDSA signature. | +| `manualNonce` | If non-`nil`, the caller supplies the nonce explicitly. This is what makes "many EVM calls in one block from the same module" deterministic — see [Manual Nonce Management](#manual-nonce-management). | +| `method` + `args` | Standard ABI-encoded call data | + +The return type is `*evmtypes.MsgEthereumTxResponse`, the same type a normal `MsgEthereumTx` produces. Concretely: + +```go +receipt, err := k.evmKeeper.DerivedEVMCall(...) +// receipt.Hash -- 0x... tx hash, queryable via eth_getTransactionByHash +// receipt.GasUsed -- real gas used, observable in receipts +// receipt.Logs -- real EVM logs, indexable by event subscribers +// receipt.Ret -- ABI-encoded return data (for view-style commits) +``` + +## When to Use Each Mode + +The Push Chain codebase uses two distinct call patterns. Both are visible in [`x/uexecutor/keeper/evm.go`](./x/uexecutor/keeper/evm.go). + +### 1. User-derived sender (UEA-routed user actions) + +When a user submits a `MsgExecutePayload` or `MsgMigrateUEA`, the Cosmos signer is converted to its derived EVM address and the EVM call is issued from that address. The UEA contract is what authenticates the request via `verificationData`. + +```go +return k.evmKeeper.DerivedEVMCall( + ctx, + abi, + evmFromAddress, // user's derived EVM address + ueaAddr, + big.NewInt(0), + gasLimit, + true, // commit + false, // gasless = false (real user tx, gas should appear in receipt) + false, // isModuleSender = false + nil, // manualNonce = nil (read from state like a normal user) + "executeUniversalTx", + abiUniversalPayload, + verificationData, +) +``` + +Why not `CallEVM`? Two reasons: +- Real receipts. Universal Validators, indexers, and the JSON-RPC layer all need to see the tx as a normal Ethereum tx so they can observe gas used, status, and emitted events. +- Explicit `gasLimit`. The payload's gas budget must be enforceable; `CallEVM` doesn't accept one. + +### 2. Module-as-sender (protocol-initiated EVM work) + +When `x/uexecutor` itself needs to issue an EVM call (deposit PRC20s, push chain-meta, refund unused gas, ...) the sender is the `uexecutor` module account. Module accounts don't have private keys, so this would be impossible via a normal `MsgEthereumTx` — you can't sign one. `DerivedEVMCall` with `isModuleSender=true` solves it: + +```go +ueModuleAccAddress, _ := k.GetUeModuleAddress(ctx) +nonce, _ := k.GetModuleAccountNonce(ctx) +_, _ = k.IncrementModuleAccountNonce(ctx) + +return k.evmKeeper.DerivedEVMCall( + ctx, + abi, + ueModuleAccAddress, // module account as sender + handlerAddr, + big.NewInt(0), + nil, + true, // commit + false, // gasless = false (we still want gas in the receipt) + true, // isModuleSender = true + &nonce, // manualNonce = explicit + "depositPRC20Token", + prc20Address, amount, to, +) +``` + +The fork is responsible for synthesising a deterministic "signature" for the module account so the tx can be properly receipted and indexed without ever needing a real key to exist. + +## Manual Nonce Management + +Stock cosmos-evm reads the sender's nonce from EVM state on every call. That's fine for users (one user = one tx in flight at a time, the mempool serializes the rest), but it breaks for module accounts that may need to issue **several** EVM calls within the same block: + +``` +BeginBlock + uexecutor.handleInbound1 + -> CallPRC20Deposit (nonce = ?) + -> CallUniversalCoreRefundUnusedGas (nonce = ?) + uexecutor.handleInbound2 + -> CallPRC20DepositAutoSwap (nonce = ?) +EndBlock +``` + +If the keeper read the nonce from state for each of these, every call within the same block would see the same starting nonce — and they'd all collide. The fork's solution is the `manualNonce *uint64` argument: the caller passes its own counter, the fork honours it, and is responsible for incrementing it before the next call. + +`x/uexecutor` keeps that counter in its own KV store as the `ModuleAccountNonce` collection ([`x/uexecutor/keeper/keeper.go`](./x/uexecutor/keeper/keeper.go)): + +```go +nonce, err := k.GetModuleAccountNonce(ctx) // read +if _, err := k.IncrementModuleAccountNonce(ctx); err != nil { + return nil, err +} +// pass &nonce to DerivedEVMCall +``` + +The increment happens **before** the call, intentionally — if the EVM call fails, the nonce gap is benign (skipped nonces are fine in EVM), but a post-call increment would risk reusing a nonce on retry. This pre-increment is the canonical way to issue derived txs from a module. + +> ⚠️ **Single source of truth.** Only one collection in the whole codebase should ever increment `ModuleAccountNonce`. If two modules need to send derived txs as the same module account, they must coordinate through a single keeper helper. The current design has only `x/uexecutor` doing this, so the invariant holds trivially. + +## The `gasless` Flag + +`gasless=true` tells the fork: "this call is part of internal protocol bookkeeping, don't bill any account for the gas." Right now, every Push Chain call site passes `gasless=false`, with the inline comment: + +> `// gasless = false (@dev: we need gas to be emitted in the tx receipt)` + +The reason: even though the protocol pays the gas, the tx receipt still needs `gas_used` populated so off-chain services (Universal Validators, explorers, the gas-fee accounting in `x/uexecutor`) can read it back. Setting `gasless=true` would suppress the gas field and break that read path. + +The flag exists for future use — protocol housekeeping calls that don't need to be observable via receipts (e.g. genesis-time bytecode patches). For day-to-day inbound/outbound execution, `gasless` stays `false`. + +## Where It's Used + +Every derived call in Push Chain is in [`x/uexecutor/keeper/evm.go`](./x/uexecutor/keeper/evm.go). Quick map: + +| Helper | Sender | Why derived? | +|---|---|---| +| `CallFactoryToDeployUEA` | user-derived | Real tx receipt is required for the deploy; the deployer address is the source-chain user's derived EVM address. | +| `CallUEAExecutePayload` | user-derived | Carries `gasLimit` from the payload; receipt is consumed by the Universal Validator vote-back path. | +| `CallUEAMigrateUEA` | user-derived | Same — needs a real receipt. | +| `CallPRC20Deposit` | module | Mints PRC20 to recipient. Module account has no key. | +| `CallPRC20DepositAutoSwap` | module | Same, but with the auto-swap leg. | +| `CallUniversalCoreSetGasPrice` | module | Writes a single chain's gas price to the on-chain oracle. | +| `CallUniversalCoreSetChainMeta` | module | Writes gas price + block height for a chain. | +| `CallUniversalCoreRefundUnusedGas` | module | Refunds unused gas (with optional swap back to PC). | +| `CallExecuteUniversalTx` | module | Calls `executeUniversalTx` on a recipient smart contract for `isCEA` inbounds. | + +The pure read paths in the same file (`CallFactoryToGetUEAAddressForOrigin`, `CallFactoryGetOriginForUEA`, `CallUEADomainSeparator`, `GetGasPriceByChain`, `GetUniversalCoreQuoterAddress`, `GetUniversalCoreWPCAddress`, `GetDefaultFeeTierForToken`, `GetSwapQuote`) all use plain `CallEVM` with `commit=false` — they don't need a receipt because they're static. + +## Quick Reference: `CallEVM` vs `DerivedEVMCall` + +``` + CallEVM DerivedEVMCall + ------- --------------- +value 0 (implicit) explicit *big.Int +gasLimit default explicit *big.Int (or nil) +commit yes yes +gasless no (always charges) flag (default: false in PC) +isModuleSender no flag (true = synthetic signer) +manualNonce no (read from state) optional override +JSON-RPC visible receipt partial yes — same as a user MsgEthereumTx +typical use internal queries, protocol-as-sender writes, + lightweight side effects user-derived EVM-routed actions +``` + +## Caveats + +- **`isModuleSender=true` requires the synthetic signer logic in the fork.** If the upstream cosmos-evm version is bumped, that signer path must remain intact, otherwise module-originated derived calls will fail validation. +- **`manualNonce` is the caller's responsibility.** The fork trusts the supplied value verbatim. Two callers stomping each other's nonce will cause receipt collisions and confusing replays. +- **Pre-increment, never post-increment.** If you increment after the call and the call panics or errors mid-execution, you've now reused a nonce. Always increment first; treat skipped nonces as a non-issue (EVM allows nonce gaps for module accounts since no transaction sequencing depends on them). +- **`gasless=true` suppresses the gas field in the receipt.** Until there's a clear reason to drop receipts on the floor for a particular call site, leave it `false`. diff --git a/app/README.md b/app/README.md new file mode 100644 index 00000000..b99f6cca --- /dev/null +++ b/app/README.md @@ -0,0 +1,254 @@ +# Core Validator + +Push Chain's L1 node binary (`pchaind`). Four custom Cosmos SDK modules and one custom EVM precompile turn the chain into the universal-execution layer that coordinates inbounds, outbounds, and TSS-signed crosschain transactions. + +- **Produces** blocks via CometBFT consensus and runs the EVM execution engine for both standard and universal traffic +- **Coordinates** the crosschain protocol — collects votes from Universal Validators on inbounds/outbounds/chain meta, finalizes ballots, drives TSS keygen and fund migration, and rewards UV operators with a boosted fee share +- **Hosts** Universal Executor Accounts (UEAs) and the chain-meta oracle on its EVM, giving any source-chain user a deterministic Push Chain identity and predictable gas pricing across networks + +## Architecture + +``` +app/ +|-- app.go ChainApp wiring (4 custom modules + usigverifier) +|-- precompiles.go Baseline EVM precompile registration (bech32, p256, staking, ...) +|-- ante/ Custom AnteHandler chain (gasless support) +| |-- ante.go Routes Ethereum vs Cosmos txs by extension option +| |-- ante_cosmos.go Cosmos decorator chain +| |-- ante_evm.go EVM mono-decorator wrapper +| |-- fee.go Custom DeductFeeDecorator (skips fee for gasless txs) +| +-- account_init_decorator.go Creates accounts mid-pipeline for first-time gasless signers +|-- cosmos/ +| +-- min_gas_price.go MinGasPriceDecorator (skips min-fee check for gasless txs) +|-- decorators/ Generic message-filter decorator template +|-- txpolicy/ +| +-- gasless.go IsGaslessTx — single source of truth for the gasless message whitelist +|-- params/ Test encoding configuration ++-- config.go, encoding.go, genesis.go, token_pair.go, wasm.go + +x/ Custom Cosmos SDK modules (only what Push adds) +|-- uexecutor/ Universal transaction execution layer +|-- uregistry/ Chain & token registry +|-- uvalidator/ Universal validator set + ballot voting + UV reward boost ++-- utss/ TSS keygen / refresh / quorum-change / fund migration + +precompiles/ ++-- usigverifier/ Ed25519 signature verification precompile (Solana sig verification on EVM) + +cmd/pchaind/ Binary entry point, root command, key/EVM CLI wiring +proto/ Protobuf definitions for the four custom modules +config/ Per-chain JSON registry configs (mainnet/, testnet-donut/) +``` + +## What It Does + +### The Hub-and-Spoke Picture + +Push Chain is the coordination layer in a hub-and-spoke crosschain model. Universal Validators (the off-chain `puniversald` worker — see [`universalClient/README.md`](../universalClient/README.md)) watch external chains, observe events, run TSS, and vote those observations onto Push Chain. The core validator is the hub: it tallies those votes, executes the resulting Push Chain logic, and emits the next round of work. + +``` + Ethereum ----\ /---- Ethereum + Arbitrum -----\ +------------------+ /---- Arbitrum + Base ---------->---| Push Chain |--<---- Base + BSC ----------/ | (core validator) | \---- BSC + Solana ------/ +------------------+ \--- Solana + + Inbound Tally + Execute Outbound + (UV votes inbound) (PC executes UTX) (UV signs + relays) +``` + +Two primitives drive this: + +- **Inbound** — A gateway event observed on an external chain. Universal Validators wait for finality, then vote it via `MsgVoteInbound` on `x/uexecutor`. Once 2/3 vote the same observation, the core validator executes it on Push Chain (mints PRC20s, runs the user's payload through their UEA). +- **Outbound** — A transaction the core validator needs broadcast to an external chain (e.g. funds being unlocked from a vault). The pending outbound is picked up by Universal Validators, signed via TSS, broadcast, and the result is voted back via `MsgVoteOutbound`. + +A single inbound's payload can spawn multiple outbounds; each outbound's destination event can become a new inbound. The core validator is the consistency point that keeps the whole graph deterministic. + +### Custom Modules + +Push Chain registers four custom Cosmos SDK modules. + +#### `x/uexecutor` — Universal Transaction Executor + +Lifecycle owner of every crosschain transaction (`UniversalTx`). Tallies inbound/outbound/chain-meta votes from Universal Validators, executes inbound payloads through the UEA factory, tracks pending outbounds, and writes chain-meta back to the EVM oracle. + +**Messages** +- `MsgVoteInbound`, `MsgVoteOutbound`, `MsgVoteChainMeta` — bonded UV-only, gasless +- `MsgExecutePayload`, `MsgMigrateUEA` — any user, gasless (the UEA itself authenticates the request) +- `MsgUpdateParams` — gov-only + +**State** +- `UniversalTx` — the canonical UTX record (inbound, PC tx, outbounds, status) +- `PendingInbounds` — secondary index of inbounds awaiting tally/execution +- `PendingOutbounds` — secondary index of outbounds in `PENDING` status +- `ChainMetas` — aggregated gas price + block height per CAIP-2 chain +- `ModuleAccountNonce` — manually managed nonce so the module can issue `DerivedEVMCall`s +- `GasPrices` — legacy, kept only for genesis import compatibility + +**EVM integration** — Deploys the UEA factory on fresh genesis, then drives all on-chain crosschain logic (mint PRC20, swap quotes, refund gas, push chain meta) through `DerivedEVMCall` with manual nonce tracking. See [`x/uexecutor/README.md`](../x/uexecutor/README.md). + +#### `x/uregistry` — Chain & Token Registry + +Source of truth for which external chains and tokens Push Chain talks to. Admin-curated. + +**Messages** (admin-only, where admin is `params.Admin`) +- `MsgAddChainConfig`, `MsgUpdateChainConfig` +- `MsgAddTokenConfig`, `MsgUpdateTokenConfig`, `MsgRemoveTokenConfig` +- `MsgUpdateParams` — gov-only + +**State** +- `ChainConfigs` — per-CAIP-2 chain config (RPC URL, gateway, vault methods, block confirmations, inbound/outbound enabled flags, gas oracle interval) +- `TokenConfigs` — token whitelist by `chain:address`, with native representation, decimals, and liquidity cap + +Deploys the universal system contracts (UniversalGatewayPC and reserved proxy slots) on fresh genesis. See [`x/uregistry/README.md`](../x/uregistry/README.md). + +#### `x/uvalidator` — Universal Validator Management & Ballot Voting + +The consensus layer for crosschain observations. Maintains the Universal Validator set, runs the generic ballot machine that all four modules vote through, and distributes a boosted reward share to active UVs. + +**Messages** +- `MsgAddUniversalValidator`, `MsgRemoveUniversalValidator`, `MsgUpdateUniversalValidatorStatus` — admin-only +- `MsgUpdateUniversalValidator` — self (the validator updates its own crosschain identity) +- `MsgUpdateParams` — gov-only + +**State** +- `UniversalValidatorSet` — registered UVs, keyed by `sdk.ValAddress`, with lifecycle status (`PENDING_JOIN` -> `ACTIVE` -> `PENDING_LEAVE`) +- `Ballots` — every ballot ever created (vote results, status, expiry) +- `ActiveBallotIDs`, `ExpiredBallotIDs`, `FinalizedBallotIDs` — index sets for fast lookup + +**Generic ballot machine** — used by `x/uexecutor` (inbound/outbound/chain-meta) and `x/utss` (TSS events, fund migrations). A ballot is created on the first vote, finalizes as `PASSED` once `votingThreshold` matching votes are in, or `REJECTED` once enough opposite votes make the threshold unreachable. + +**UV Reward Boost (BeginBlocker)** — Before the standard distribution module runs, `x/uvalidator` intercepts the FeeCollector balance and inflates effective voting power for active UVs by a `1.148x` multiplier. The extra `0.148x` portion is allocated proportionally to UVs and forwarded to the distribution module; the remaining fees flow back to the FeeCollector for normal proposer + community-pool + delegator distribution. Net effect: validators that also run a Universal Validator earn ~14.8% more block rewards. See [`x/uvalidator/README.md`](../x/uvalidator/README.md). + +#### `x/utss` — Threshold Signature Scheme + +Coordinates the lifecycle of the TSS key that signs every outbound transaction. + +**Messages** +- `MsgInitiateTssKeyProcess`, `MsgInitiateFundMigration` — admin-only +- `MsgVoteTssKeyProcess`, `MsgVoteFundMigration` — bonded UV-only, gasless +- `MsgUpdateParams` — gov-only + +**State** +- `CurrentTssProcess` / `ProcessHistory` — active and historical keygen/refresh/quorum-change processes +- `CurrentTssKey` / `TssKeyHistory` — finalized active key + every key that has ever existed +- `TssEvents` / `PendingTssEvents` — fine-grained events emitted during a process (used for vote routing) +- `FundMigrations` / `PendingMigrations` — old-key -> new-key fund moves on each external chain + +**Process types** +- `KEYGEN` — produce a brand-new key with new on-chain addresses (triggers fund migration on every connected chain) +- `REFRESH` — redistribute fresh keyshares without changing the public key +- `QUORUM_CHANGE` — add/remove participants without changing the public key + +See [`x/utss/README.md`](../x/utss/README.md). + +### Custom EVM Precompile + +Push Chain ships exactly one custom precompile: + +| Address | Name | Purpose | +|---|---|---| +| `0x00000000000000000000000000000000000000ca` | `usigverifier` (legacy) | Ed25519 signature verification (Solana signatures over `bytes32` digests) | +| `0xEC00000000000000000000000000000000000001` | `usigverifier` (v2) | Same implementation, registered at the reserved Push range | + +Both addresses are registered simultaneously for backward compatibility with deployed contracts that have the legacy address hardcoded. Gas cost: `4000` per `verifyEd25519` call. See [`precompiles/usigverifier/README.md`](../precompiles/usigverifier/README.md). + +The baseline EVM precompiles (`bech32`, `p256`, `staking`, `distribution`, `ics20`, `bank`, `gov`, `slashing`, `evidence`) are wired in via `app/precompiles.go:NewAvailableStaticPrecompiles`. + +### Transaction Pipeline — Gasless Support + +Push Chain extends the Cosmos AnteHandler with three custom decorators that together enable **gasless transactions** for Universal Validators and UEA users. Without this, every Universal Validator would need to hold and manage gas tokens just to vote — defeating the point of having a permissioned UV set. + +**The gasless whitelist** (`app/txpolicy/gasless.go`) — only these message types qualify: + +``` +/uexecutor.v1.MsgExecutePayload +/uexecutor.v1.MsgMigrateUEA +/uexecutor.v1.MsgVoteInbound +/uexecutor.v1.MsgVoteOutbound +/uexecutor.v1.MsgVoteChainMeta +/utss.v1.MsgVoteTssKeyProcess +/utss.v1.MsgVoteFundMigration +``` + +A tx is gasless only if **every** message (including those nested inside `authz.MsgExec`) is in the whitelist. + +**Custom decorators** + +| Decorator | File | Behavior on gasless tx | +|---|---|---| +| `MinGasPriceDecorator` | `app/cosmos/min_gas_price.go` | Skips the FeeMarket minimum-fee check entirely | +| `DeductFeeDecorator` | `app/ante/fee.go` | Skips fee deduction (no balance required) | +| `AccountInitDecorator` | `app/ante/account_init_decorator.go` | If signer has no on-chain account yet, creates it mid-pipeline with `account_number=0, sequence=0`, verifies the signature against those values, and short-circuits the rest of the ante chain | + +The third decorator is what lets a freshly-keygen'd Universal Validator hot key vote on its very first tx, without anyone first having to fund it. + +## Configuration + +| | | +|---|---| +| Binary name | `pchaind` | +| Node home | `~/.pchain` | +| Bech32 prefixes | `push` (account) / `pushvaloper` (validator operator) / `pushvalcons` (consensus) | +| Coin type | `60` (Ethereum-compatible HD path) | +| Base denom | `upc` (18 decimals, EVM-aligned) | +| Default chain ID | `localchain_9000-1` (devnet); testnet uses `push_42101-1` | +| Exposed ports (Docker) | `1317` REST, `26656` P2P, `26657` Tendermint RPC, `8545` EVM JSON-RPC, `8546` EVM WS | + +`app.toml` includes the standard `[evm]`, `[json-rpc]`, `[tls]`, and `[wasm]` sections required by the embedded EVM and JSON-RPC server. There are no Push-specific configuration knobs beyond those. + +## Getting Started + +**Prerequisites** + +- [Go 1.23+](https://golang.org/dl/) +- [Docker](https://www.docker.com/) — required for `make proto-gen` and integration tests +- [Rust](https://www.rust-lang.org/tools/install) — required to build the DKLS23 native library that the Universal Validator binary links against (the core validator binary itself doesn't depend on it, but `make build` produces both) +- [jq](https://stedolan.github.io/jq/download/) — used by setup scripts + +```bash +# One-time: build the DKLS23 native library +make build-dkls23 + +# Build pchaind (and puniversald) into ./build/ +make build + +# Or install both into $GOPATH/bin +make install + +# Spin up a single-node local chain (uses scripts/test_node.sh + Cosmovisor) +make sh-testnet + +# Run unit tests (sets LD_LIBRARY_PATH for the native TSS lib) +make test-unit + +# Run with race detector +make test-race + +# Regenerate protobuf bindings (must be inside Docker) +make proto-gen +``` + +### CLI + +```bash +pchaind init --chain-id push_42101-1 # initialize node home +pchaind start # run validator/full node +pchaind status # health check +pchaind export # export app state to JSON + +# Keys (cosmos-evm flavored — uses coin type 60) +pchaind keys add +pchaind keys list +pchaind keys show + +# Custom module queries +pchaind q uexecutor params +pchaind q uregistry all-chain-configs +pchaind q uvalidator all-universal-validators +pchaind q uvalidator all-active-ballots +pchaind q utss current-key +pchaind q utss current-process +``` + +The full CLI surface is `pchaind --help` — autocli definitions live in each module's `autocli.go`. diff --git a/precompiles/usigverifier/README.md b/precompiles/usigverifier/README.md index bb4652fd..36666a5c 100644 --- a/precompiles/usigverifier/README.md +++ b/precompiles/usigverifier/README.md @@ -1,29 +1,119 @@ -# Universal Signature Verifier (USigVerifier) Precompile +# `usigverifier` — Universal Signature Verifier Precompile -This is the USigVerifier (Universal Signature Verifier) precompile, responsible for verifying cryptographic signatures from supported source chains. +The only EVM precompile Push Chain ships on top of the cosmos-evm baseline. Verifies Ed25519 signatures inside the EVM so Solidity contracts can authenticate Solana-style signatures (or any other Ed25519 input) without re-implementing the curve in EVM bytecode. -✅ Currently supported signature: **ed25519** +## Addresses -## Generate ABI encoding +| Address | Why it exists | +|---|---| +| `0x00000000000000000000000000000000000000ca` | Original "legacy" address. Hardcoded into contracts deployed before the address-range cleanup. | +| `0xEC00000000000000000000000000000000000001` | New address in the reserved Push precompile range (`0xEC...`). | + +Both addresses are registered simultaneously and point at the **same** implementation. Backward compatibility for previously-deployed contracts is the only reason the legacy address still exists. New code should target `0xEC00000000000000000000000000000000000001`. + +Wired into `app/app.go:781-795`: + +```go +usigverifierPrecompile, _ := usigverifierprecompile.NewPrecompile() +usigverifierPrecompileV2, _ := usigverifierprecompile.NewPrecompileV2() +corePrecompiles[usigverifierPrecompile.Address()] = usigverifierPrecompile +corePrecompiles[usigverifierPrecompileV2.Address()] = usigverifierPrecompileV2 +``` + +## Solidity Interface + +```solidity +// SPDX-License-Identifier: MIT +pragma solidity >=0.8.18; + +address constant USigVerifier_PRECOMPILE_ADDRESS = 0x00000000000000000000000000000000000000ca; +address constant USigVerifier_PRECOMPILE_ADDRESS_V2 = 0xEC00000000000000000000000000000000000001; + +interface IUSigVerifier { + /// @notice Verifies an Ed25519 signature. + /// @param pubKey The 32-byte Ed25519 public key (Solana address bytes). + /// @param msg The message digest that was signed (bytes32). + /// @param signature The 64-byte Ed25519 signature. + /// @return isValid True iff the signature is valid for (pubKey, msg). + function verifyEd25519( + bytes calldata pubKey, + bytes32 msg, + bytes calldata signature + ) external view returns (bool); +} +``` + +| Property | Value | +|---|---| +| Method | `verifyEd25519(bytes,bytes32,bytes)` | +| State mutability | `view` (no on-chain state is touched) | +| Gas cost | `4000` per call (`VerifyEd25519Gas` in `usigverifier.go`) | + +## Verification Semantics + +The precompile is intentionally narrow. It accepts: + +- `pubKey` — 32 raw Ed25519 public key bytes (a Solana address is exactly this) +- `msg` — a single `bytes32` digest +- `signature` — 64 raw Ed25519 signature bytes + +Internally (`query.go:VerifyEd25519`), the `bytes32` digest is **rendered as a 0x-prefixed hex string** before being passed to `ed25519.Verify`: + +```go +msgStr := "0x" + hex.EncodeToString(msg) // 66 ASCII bytes +msgBytes := []byte(msgStr) +ok = ed25519.Verify(pubKeyBytes, msgBytes, signature) +``` + +In other words, the signed message that the off-chain signer must sign is the **66-byte ASCII string** `0x...` of the digest, not the raw 32 bytes. This matches the convention used by Solana wallets when signing arbitrary messages — they prefix-encode the payload — so a normal Solana wallet signature over a Push Chain message hash will verify here without any extra work on the wallet side. + +If `pubKey` is not 32 bytes or `signature` is not 64 bytes, the precompile reverts with `invalid params`. Unknown method IDs revert with the standard `unknown method` error. + +## Generating the ABI + +If `USigVerifier.sol` is changed, regenerate `abi.json` with: ```bash cd precompiles/usigverifier solcjs USigVerifier.sol --abi mv *.abi abi.json -jq --argjson abi "$(cat abi.json)" '{"_format": "hh-sol-artifact-1", "contractName": "USigVerifier", "sourceName": "precompiles/USigVerifier.sol", "bytecode": "0x", "deployedBytecode": "0x", "linkReferences": {}, "deployedLinkReferences": {}, "abi": $abi}' <<< '{}' > abi.json -cd ../../ -# jq ".abi" abi.json | abigen --abi - --pkg usigverifier --type USigVerifier --out USigVerifier.go +jq --argjson abi "$(cat abi.json)" \ + '{"_format": "hh-sol-artifact-1", "contractName": "USigVerifier", + "sourceName": "precompiles/USigVerifier.sol", + "bytecode": "0x", "deployedBytecode": "0x", + "linkReferences": {}, "deployedLinkReferences": {}, + "abi": $abi}' <<< '{}' > abi.json ``` -## Verification +The Go binary embeds `abi.json` via `//go:embed`, so a fresh `make build` will pick up the change. + +## Testing from the Command Line ```bash -# if you just get 0x, make sure the address is in the app_state["evm"]["params"]["active_static_precompiles"] - -# precompile directly -cast abi-decode "verifyEd25519(bytes,bytes32,bytes)(bool)" `cast call 0x00000000000000000000000000000000000000ca "verifyEd25519(bytes,bytes32,bytes)" \ - "5DgQvTf6BvVs5Y4vNFnB5iXvTQvZah7y2JbT1dFxN6T2" \ - 0x68656c6c6f776f726...bytes32_message_here \ - 0x6f7c...your_signature_here -` +# Make sure the precompile is enabled in the EVM params: +# app_state["evm"]["params"]["active_static_precompiles"] must include +# 0x00000000000000000000000000000000000000ca and/or 0xEC00000000000000000000000000000000000001 +# (test_node.sh installs the legacy address by default). + +cast call 0xEC00000000000000000000000000000000000001 \ + "verifyEd25519(bytes,bytes32,bytes)" \ + "<32-byte pubKey hex>" \ + "" \ + "<64-byte signature hex>" + +# Decode the boolean response +cast abi-decode "verifyEd25519(bytes,bytes32,bytes)(bool)" +``` + +If the call returns `0x` (empty), the precompile is not in `active_static_precompiles` for the current chain — that's a configuration issue, not a verification failure. + +## Layout + +``` +precompiles/usigverifier/ +|-- USigVerifier.sol Solidity interface (the source of truth for the ABI) +|-- abi.json Embedded into the binary via go:embed +|-- usigverifier.go Precompile struct, NewPrecompile / NewPrecompileV2, RequiredGas, Run +|-- query.go VerifyEd25519 method handler ++-- README.md (this file) ``` diff --git a/readme.md b/readme.md index aa37d9ac..99f18547 100755 --- a/readme.md +++ b/readme.md @@ -57,14 +57,20 @@ make sh-testnet ## Directory Structure -- `app/` – Core application logic and configuration -- `x/` – Cosmos SDK modules (UExecutor, UTxVerifier, etc.) -- `precompiles/` – EVM precompiles for universal verification -- `proto/` – Protobuf definitions -- `cmd/` – CLI entrypoints -- `deploy/` – Deployment scripts and testnet configs +- `app/` – Core validator application wiring (`pchaind`). See [`app/README.md`](./app/README.md) for what Push Chain adds on top of cosmos-evm. +- `x/` – Push Chain custom Cosmos SDK modules: + - [`uexecutor`](./x/uexecutor/README.md) – Universal transaction execution layer + - [`uregistry`](./x/uregistry/README.md) – Chain & token registry + - [`uvalidator`](./x/uvalidator/README.md) – Universal validator set, ballot voting & UV reward boost + - [`utss`](./x/utss/README.md) – Threshold signature scheme coordination +- `precompiles/` – Custom EVM precompiles ([`usigverifier`](./precompiles/usigverifier/README.md) — Ed25519 signature verification) +- `universalClient/` – The Universal Validator binary (`puniversald`). See [`universalClient/README.md`](./universalClient/README.md). +- `proto/` – Protobuf definitions for the four custom modules +- `cmd/` – CLI entrypoints (`pchaind`, `puniversald`) +- `config/` – Per-chain JSON registry configs (mainnet, testnet) +- `testnet/` – Validator setup scripts (core + universal) - `interchaintest/` – E2E and integration tests -- `utils/` – Utility functions +- `utils/` – Shared utility functions ## Contributing diff --git a/x/uexecutor/README.md b/x/uexecutor/README.md index 3c5aed2d..02a2398a 100755 --- a/x/uexecutor/README.md +++ b/x/uexecutor/README.md @@ -1,13 +1,274 @@ -# Universal Executor (UExecutor) Module +# `x/uexecutor` — Universal Transaction Executor -This is a UExecutor (Universal Executor) module, primarily responsible for executing actions originating from other source chains. This module serves as the execution layer in universal workflows. +The execution layer for Push Chain's crosschain protocol. Owns the lifecycle of every `UniversalTx` (UTX) — from inbound observation through Push Chain execution to outbound completion — and is the only module that drives the EVM-side universal contracts (UEA factory, gateway PC, chain meta oracle). -## Responsibilities +## What It Does -- Deploying Universal Executor Accounts -- Minting native tokens -- Executing payloads +- **Tally inbound votes** from Universal Validators (UVs). Once 2/3+ vote the same observation, finalize the inbound and execute it on Push Chain (deposit funds, run the user's payload through their UEA). +- **Track pending outbounds** created as a side-effect of Push Chain execution, and tally UV votes on whether they were successfully broadcast on the destination chain (or have permanently failed and need a refund). +- **Maintain the chain meta oracle** (gas price + block height per external chain) by tallying votes from UVs and writing the result back to the EVM so contracts can read it. +- **Issue derived EVM calls** as the `uexecutor` module account, with a manually managed nonce, so the module can deploy and call universal contracts on behalf of itself. -## Getting Started +## State (KV layout) -This module is intended to provide execution capabilities for actions originating from external chains. \ No newline at end of file +| Prefix | Collection | Type | Purpose | +|---|---|---|---| +| `0` | `Params` | `Item[Params]` | Module parameters | +| `2` | `PendingInbounds` | `KeySet[string]` | UTX IDs of inbounds awaiting tally / execution | +| `3` | `UniversalTx` | `Map[string, UniversalTx]` | Canonical UTX record. Key = `sha256(sourceChain:txHash:logIndex)` | +| `4` | `ModuleAccountNonce` | `Item[uint64]` | Manual nonce for `DerivedEVMCall` from the module account | +| `5` | `GasPrices` | `Map[string, GasPrice]` | **Deprecated** — replaced by `ChainMetas`, kept only for genesis import | +| `6` | `ChainMetas` | `Map[string, ChainMeta]` | Aggregated gas price + block height per CAIP-2 chain | +| `7` | `PendingOutbounds` | `Map[string, PendingOutboundEntry]` | Secondary index of outbounds in `PENDING` status | + +## The `UniversalTx` Record + +`UniversalTx` (UTX) is the canonical, end-to-end record of a single crosschain transaction as it travels through Push Chain. One UTX is created per observed inbound and lives forever (it is never deleted, only mutated as new pieces of evidence arrive). It is the only object in the module that the rest of the protocol — Universal Validators, the JSON-RPC layer, indexers, the explorer — needs to read in order to know what's happening with a given crosschain action. + +```protobuf +message UniversalTx { + string id = 1; // sha256(sourceChain:txHash:logIndex) + Inbound inbound_tx = 2; // the source-chain observation that opened this UTX + repeated PCTx pc_tx = 3; // every Push Chain execution this UTX produced + repeated OutboundTx outbound_tx = 4; // every outbound this UTX spawned (and their results) + string revert_error = 6; // non-empty if revert-outbound attachment failed +} +``` + +The UTX is intentionally append-mostly. Components are filled in over time as the protocol progresses; nothing is overwritten. Field `5` is reserved (a removed `UniversalTxStatus` enum field — see below for why status is computed instead of stored). + +### The Three Components + +#### 1. `Inbound` — the source-chain observation + +Filled in once, when the inbound vote is finalized. After that, it is read-only. + +```protobuf +message Inbound { + string source_chain = 1; // CAIP-2, e.g. "eip155:11155111" + string tx_hash = 2; // unique source-chain tx hash + string sender = 3; // source-chain sender address + string recipient = 4; // destination address on Push Chain (UEA or contract) + string amount = 5; // bridged amount (synthetic token, uint256 as string) + string asset_addr = 6; // source-chain ERC20 / native token address + string log_index = 7; // log index that emitted this inbound (uniqueness within tx) + TxType tx_type = 8; // see TxType table below + UniversalPayload universal_payload = 9; // the user's intent (decoded from raw_payload) + string verification_data = 10; // bytes the UEA uses to authenticate the payload + RevertInstructions revert_instructions = 11; // where funds go on revert + bool isCEA = 12; // recipient is a contract (CEA) instead of a UEA + string raw_payload = 13; // hex-encoded raw event bytes (decoded by core validator) +} +``` + +#### 2. `PCTx` — Push Chain execution + +A list, because a single inbound can spawn multiple Push Chain executions (the deposit tx, the payload-execution tx, and possibly a revert tx all live as separate `PCTx` entries on the same UTX). + +```protobuf +message PCTx { + string tx_hash = 1; // hash of the EVM tx the core validator produced (DerivedEVMCall) + string sender = 2; // who initiated it (user-derived address, or uexecutor module) + uint64 gas_used = 3; // populated from the tx receipt + uint64 block_height = 4; // Push Chain block this was committed in + string status = 6; // "SUCCESS" or "FAILED" + string error_msg = 7; // populated when status == "FAILED" +} +``` + +These hashes correspond to real EVM transactions you can fetch from `eth_getTransactionByHash` — see [`DERIVED_TRANSACTIONS.md`](../../DERIVED_TRANSACTIONS.md) for why module-originated calls produce real receipts. + +#### 3. `OutboundTx` — outbounds spawned by Push Chain execution + +A list, because one inbound's payload can fan out into multiple destination-chain transactions (e.g. a multi-hop cross-chain swap or a batched refund). + +```protobuf +message OutboundTx { + string destination_chain = 1; // CAIP-2 of the destination + string recipient = 2; + string amount = 3; + string external_asset_addr = 4; + string prc20_asset_addr = 5; + string sender = 6; + string payload = 7; + string gas_limit = 8; + TxType tx_type = 9; + OriginatingPcTx pc_tx = 10; // which PCTx (and log) created this outbound + OutboundObservation observed_tx = 11; // populated once UVs vote the destination-chain result + string id = 12; // deterministic outbound ID + Status outbound_status = 13; // PENDING -> OBSERVED | REVERTED | ABORTED + RevertInstructions revert_instructions = 14; + PCTx pc_revert_execution = 15; // PC tx that ran the revert path (nil if not reverted) + string gas_price = 16; // destination-chain gas price snapshot + string gas_fee = 17; // amount paid to relayer + PCTx pc_refund_execution = 18; // PC tx that ran the unused-gas refund (nil if no refund) + string refund_swap_error = 19; // non-empty if the swap-refund leg failed + string gas_token = 20; // PRC20 used to pay relayer + string abort_reason = 21; // human-readable reason if outbound was aborted +} +``` + +`OutboundObservation` is what UVs vote in via `MsgVoteOutbound`: + +```protobuf +message OutboundObservation { + bool success = 1; + uint64 block_height = 2; + string tx_hash = 3; + string error_msg = 4; + string gas_fee_used = 5; // actual gas spent on destination — used to compute refund +} +``` + +### `TxType` — what flavour of crosschain action + +The same enum is used on both `Inbound` and `OutboundTx` to describe what the message is for. + +| `TxType` | Inbound semantics | Outbound semantics | +|---|---|---| +| `GAS` | User pre-paid gas on the source chain. Mints PC to the recipient as a gas top-up. | Refund of unused gas back to a source chain. | +| `GAS_AND_PAYLOAD` | Gas top-up + executes a payload through the recipient's UEA in the same Push Chain tx. | Same combo on the destination side. | +| `FUNDS` | Pure synthetic transfer — mints PRC20 representation of an external token. | Pure transfer of a PRC20 back out of Push Chain. | +| `FUNDS_AND_PAYLOAD` | Mints funds + runs a payload (e.g. deposit + DEX swap atomically). | Funds delivery with a destination-side call. | +| `PAYLOAD` | Pure payload execution, no value movement. | Pure call on the destination chain. | +| `INBOUND_REVERT` | Reverts a previously-executed inbound (returns funds to the source-chain sender). | — | +| `RESCUE_FUNDS` | Admin-driven rescue path for stuck funds. | Outbound that delivers the rescue. | + +### Status is derived from component state, not stored + +The current `UniversalTx` record has **no status field at all**. Field `5` is reserved precisely because the old `UniversalTxStatus` enum field was removed in favour of computing status on the fly from the underlying components. This avoids the staleness class of bugs where a stored status gets out of sync with the actual outbounds/PC txs after a partial update. + +Instead, callers ask "what's the state of this UTX?" by inspecting: + +- whether `OutboundTx[]` is non-empty, and the per-entry `outbound_status` (`PENDING` / `OBSERVED` / `REVERTED` / `ABORTED`) +- whether `PcTx[]` is non-empty, and each entry's `status` string (`"SUCCESS"` / `"FAILED"`) +- whether `InboundTx` is set + +The priority for any rollup view is **outbounds > PC txs > inbound presence**: as soon as an outbound exists, the UTX is "in the outbound phase" regardless of how the PC txs went; before that, PC tx state dominates; before that, the UTX is just a recorded inbound waiting to be executed. + +> **Note on `UniversalTxStatus` (legacy enum).** The `UniversalTxStatus` proto enum (`PENDING_INBOUND_EXECUTION`, `PC_EXECUTED_SUCCESS`, `OUTBOUND_PENDING`, ...) is **only** used by the legacy query response shape `UniversalTxLegacy`. The v1 `GetUniversalTx` query converts the current record into `UniversalTxLegacy` and synthesises the status field via `computeUniversalStatus` in `keeper/query_server.go` purely for client backward compatibility. Anything new built against `x/uexecutor` should consume the live components on `UniversalTx` directly and compute the status it cares about, instead of depending on the legacy enum. + +### `Status` — per-outbound status + +`OutboundTx.outbound_status` uses a separate, narrower enum: + +| `Status` | Meaning | +|---|---| +| `PENDING` | Outbound created on Push Chain, waiting for UVs to broadcast and vote | +| `OBSERVED` | UVs voted the outbound was successfully broadcast on the destination chain | +| `REVERTED` | UVs voted the outbound permanently failed; revert path triggered | +| `ABORTED` | Finalization or revert attachment failed and requires manual intervention | + +### Lifecycle Walkthrough + +A typical `FUNDS_AND_PAYLOAD` inbound, end to end: + +``` +1. UV observes a source-chain gateway event. +2. UV submits MsgVoteInbound. The UTX is created the moment the first vote + arrives, with id = sha256(sourceChain:txHash:logIndex). Only the + InboundTx field is populated; PcTx and OutboundTx are empty. + (UTX id is also added to PendingInbounds.) + +3. Threshold of UV votes reached. The keeper executes the inbound: + a. Mints the PRC20 to the recipient's UEA address. + A new PCTx (deposit) is appended to UTX.PcTx. + b. Runs the universal payload through the UEA. + A second PCTx (executeUniversalTx) is appended. + (UTX id removed from PendingInbounds.) + +4. The payload triggered a destination-chain call (e.g. release funds on + another chain). An OutboundTx is created with Status_PENDING and + appended to UTX.OutboundTx. It is also indexed in PendingOutbounds. + +5. UVs sign the outbound via TSS, broadcast it, and vote the result back + via MsgVoteOutbound. The OutboundTx.observed_tx is filled in and + outbound_status flips to OBSERVED. The PendingOutbounds entry is + removed. + +6. If the destination chain refunds excess gas, a refund PCTx runs on + Push Chain. PCTx.pc_refund_execution is set on the OutboundTx. The + refund is just additional evidence attached to the existing OutboundTx. +``` + +At every step the UTX is mutated **append-only**: new entries are added to `pc_tx` and `outbound_tx`, existing entries are updated in place, and the live state of those slices is the only source of truth for "what's happening" with this UTX. + +## Messages (`MsgServer`) + +| Message | Authority | Gasless? | Purpose | +|---|---|---|---| +| `MsgVoteInbound` | bonded UV | yes | Vote an observed source-chain inbound | +| `MsgVoteOutbound` | bonded UV | yes | Vote that an outbound was broadcast (or failed) on the destination chain | +| `MsgVoteChainMeta` | bonded UV | yes | Vote on observed gas price + block height for a chain | +| `MsgExecutePayload` | any | yes | Execute a payload on a UEA (the UEA itself authenticates via `verificationData`) | +| `MsgMigrateUEA` | any | yes | Migrate a UEA to a newer implementation (also self-authenticated) | +| `MsgUpdateParams` | gov | no | Update module params | + +Vote messages check `IsBondedUniversalValidator` and `IsTombstonedUniversalValidator` on `x/uvalidator` before accepting the vote. Tombstoned validators are silently rejected. + +## Queries + +- `Params` +- `GetUniversalTx` — fetch a single UTX by ID. The v1 endpoint returns the legacy `UniversalTxLegacy` shape (with a synthesised `UniversalTxStatus` for backward compatibility); the v2 endpoint returns the live `UniversalTx` directly. +- v2 query server (`query_server_v2.go`) provides additional iterators over UTX state + +See `keeper/query_server.go` and `keeper/query_server_v2.go` for the full surface. + +## Inter-module Dependencies + +The keeper holds references to: +- `evmKeeper` — for `DerivedEVMCall` (deploy contracts, mint, refund, push chain meta) +- `feemarketKeeper` — for current Push Chain gas price +- `bankKeeper` — for native transfers +- `accountKeeper` — for the `uexecutor` module account +- `uregistryKeeper` — to look up chain configs and token configs +- `uvalidatorKeeper` — to gate votes on bonded/tombstoned status, and to drive the generic ballot machine + +It does not export any hooks; other modules call into it (not the other way around). + +## EVM Integration + +`x/uexecutor` is unusual in that it issues EVM calls as a Cosmos module. On fresh genesis (`Exported=false`) it deploys the **UEA factory** contract. Thereafter, every inbound execution, refund, swap quote, and chain-meta update flows through `DerivedEVMCall` with the manually tracked `ModuleAccountNonce` so successive calls in the same block don't collide. + +Re-deploying the factory on genesis import is explicitly skipped — see `keeper.go:155-159` — because that would overwrite live EVM state and shift the deterministic addresses of every UEA on chain. + +## Genesis + +```protobuf +GenesisState { + Params params + repeated string pending_inbounds + repeated UTXEntry universal_txs + uint64 module_account_nonce + repeated GasPrice gas_prices // legacy + repeated ChainMeta chain_metas + repeated Outbound pending_outbounds + bool exported // skip factory deploy if true +} +``` + +## Block Lifecycle + +`x/uexecutor` does not implement a `BeginBlocker` or `EndBlocker` — the module is listed in the manager's order arrays as a placeholder, but all real work happens synchronously in the message handlers. Vote tallying, inbound execution, outbound creation, and chain-meta updates are all triggered by incoming `Msg*` calls. + +## Layout + +``` +x/uexecutor/ +|-- keeper/ +| |-- keeper.go State + dependencies +| |-- msg_server.go MsgVoteInbound, MsgVoteOutbound, MsgVoteChainMeta, ExecutePayload, MigrateUEA +| |-- query_server.go v1 queries +| |-- query_server_v2.go v2 queries +| +-- ... inbound execution, outbound creation, chain meta, derived EVM calls +|-- types/ +| |-- types.pb.go UniversalTx, Inbound, ChainMeta, PendingOutboundEntry, enums +| |-- params.go Params (currently a single placeholder field) +| |-- keys.go Store prefixes + ID generators +| |-- abi.go, decode_payload.go, gateway_pc_event_decode.go, caip2.go +| +-- expected_keepers.go Interfaces for evm/feemarket/bank/account/uregistry/uvalidator +|-- migrations/ v2, v4, v5 — params shape, UTX restructure, GasPrices -> ChainMetas +|-- module.go AppModule wiring +|-- autocli.go CLI auto-registration ++-- depinject.go Dependency injection +``` diff --git a/x/uregistry/README.md b/x/uregistry/README.md index 50d4215a..3617e81d 100755 --- a/x/uregistry/README.md +++ b/x/uregistry/README.md @@ -1,12 +1,113 @@ -# Universal Registry (URegistry) Module +# `x/uregistry` — Chain & Token Registry -The **Universal Registry (URegistry)** module is primarily responsible for managing metadata and configurations necessary for enabling cross-chain interoperability. +The configuration layer for Push Chain's crosschain protocol. Maintains the source of truth for which external chains and which tokens on those chains the protocol talks to. Every other Push module reads from `uregistry`; nobody else writes to it. -## Responsibilities +## What It Does -- Registering and storing supported external chain configurations -- Whitelisting tokens and gateways for inbound or outbound operations +- **Stores chain configs** — for each supported external chain (CAIP-2 keyed): public RPC URL, gateway contract address, gateway/vault method identifiers, block confirmation thresholds, gas oracle fetch interval, VM type, and inbound/outbound enabled flags. +- **Stores token configs** — per (chain, token address): symbol, decimals, native PRC20 representation, liquidity cap, ERC20/SPL/etc. type. +- **Deploys reserved system contracts** — on fresh genesis, deploys `UNIVERSAL_GATEWAY_PC` and reserved proxy slots into the EVM at deterministic addresses (`0x...C1`, `0x...B0`, `0x...B1`, `0x...B2`). +- **Exposes lookup helpers** for the rest of the codebase, including `GetTokenConfigByPRC20` (reverse lookup from a PRC20 contract address to its source-chain token). -## Getting Started +## State (KV layout) -This module serves as the metadata layer for universal workflows. \ No newline at end of file +| Prefix | Collection | Type | Purpose | +|---|---|---|---| +| `0` | `Params` | `Item[Params]` | Module parameters (admin address) | +| `1` | `ChainConfigs` | `Map[string, ChainConfig]` | Per-CAIP-2 chain configuration | +| `2` | `TokenConfigs` | `Map[string, TokenConfig]` | Token configuration, keyed by `chain:address` | + +The `ChainConfig` schema (selected fields): + +```protobuf +message ChainConfig { + string chain = 1; // CAIP-2 (e.g. "eip155:11155111") + string public_rpc_url = 2; + VmType vm_type = 3; // EVM | SVM | MOVE_VM | WASM_VM | ... + string gateway_address = 4; + repeated GatewayMethods gateway_methods = 5; + repeated VaultMethods vault_methods = 6; + BlockConfirmation block_confirmation = 7; // fast & standard inbound counts + uint64 gas_oracle_fetch_interval = 8; + ChainEnabled enabled = 9; // is_inbound_enabled, is_outbound_enabled +} +``` + +## Messages (`MsgServer`) + +| Message | Authority | Purpose | +|---|---|---| +| `MsgAddChainConfig` | admin (`params.Admin`) | Register a new external chain | +| `MsgUpdateChainConfig` | admin | Modify an existing chain config | +| `MsgAddTokenConfig` | admin | Whitelist a token on a chain | +| `MsgUpdateTokenConfig` | admin | Modify a token config | +| `MsgRemoveTokenConfig` | admin | Remove a token from the whitelist | +| `MsgUpdateParams` | gov | Rotate the admin or update other params | + +There is no validator-vote path here — chain and token additions are intentionally admin-curated. The expected workflow is gov passes `MsgUpdateParams` to install an admin key, and the admin executes config changes day-to-day. + +## Queries + +- `Params` +- `ChainConfig` — by CAIP-2 ID +- `AllChainConfigs` — paginated list +- `TokenConfig` — by (chain, address) +- `AllTokenConfigs` — paginated list +- `TokenConfigsByChain` — filter by chain + +## Inter-module Dependencies + +The keeper holds: +- `evmKeeper` — for deploying system contracts on genesis + +It exports no hooks. `x/uexecutor` and `x/utss` call its lookup helpers (`GetChainConfig`, `IsChainInboundEnabled`, `IsChainOutboundEnabled`, `GetTokenConfig`, `GetTokenConfigByPRC20`) but never write. + +## EVM Integration + +On fresh genesis (`Exported=false`), `InitGenesis` calls `deploySystemContracts` to install: + +| Slot | Address | +|---|---| +| `UNIVERSAL_GATEWAY_PC` | `0x00000000000000000000000000000000000000C1` (proxy) | +| `RESERVED_0` | `0x00000000000000000000000000000000000000B0` | +| `RESERVED_1` | `0x00000000000000000000000000000000000000B1` | +| `RESERVED_2` | `0x00000000000000000000000000000000000000B2` | +| `UNIVERSAL_BATCH_CALL` | `0x00000000000000000000000000000000000000Bc` | + +These are EIP-1967 transparent proxies — runtime-deployed bytecode is committed verbatim in `keeper.go`. Helper functions `ReserveUGPC` and `FixReservedBytecode` exist for in-place upgrade migrations to (re)install bytecode without redeploying through normal EVM calls. + +## Genesis + +```protobuf +GenesisState { + Params params + repeated ChainConfigEntry chain_configs + repeated TokenConfigEntry token_configs + bool exported // skip system-contract deploy if true +} +``` + +Default admin in `params.go`: `push1negskcfqu09j5zvpk7nhvacnwyy2mafffy7r6a`. + +## Configuration Files + +The on-disk JSON registry under `/config/{mainnet,testnet-donut}//` is what operators use to seed `uregistry` at genesis or via admin txs. Each chain has a `chain.json` plus a `tokens/` directory of per-token JSONs. See `/config/testnet-donut/eth_sepolia/` for the canonical example. + +## Layout + +``` +x/uregistry/ +|-- keeper/ +| |-- keeper.go State, lookups, system-contract deployment +| |-- msg_server.go AddChainConfig, AddTokenConfig, ... +| +-- query_server.go gRPC queries +|-- types/ +| |-- types.pb.go ChainConfig, TokenConfig, GatewayMethods, VaultMethods, enums +| |-- params.go Admin field +| |-- keys.go Store prefixes +| |-- chain_config.go, block_confirmation.go, gateway_methods.go, chain_enabled.go +| +-- expected_keepers.go EVMKeeper interface +|-- module.go +|-- autocli.go ++-- depinject.go +``` diff --git a/x/utss/README.md b/x/utss/README.md index de166ff6..7f08737c 100755 --- a/x/utss/README.md +++ b/x/utss/README.md @@ -1,13 +1,107 @@ -# Universal Transaction Verification (utss) Module +# `x/utss` — Threshold Signature Scheme -This is utss (Universal Transaction Verification) module. +The on-chain coordination layer for Push Chain's TSS key. The actual DKLS protocol runs off-chain inside the Universal Validator binary (`puniversald`); this module is the deterministic state machine that schedules processes, tallies validator votes about what happened off-chain, and serves as the canonical record of which TSS key is active. -## Responsibilities +## What It Does -- Verifying transaction hashes of funds locked on source chains -- Performing RPC calls to external chains -- Storing verified transaction hashes for reference and validation +- **Schedules TSS key processes** — admin-initiated keygen, refresh, and quorum-change events. Each process is given a deterministic `process_id` and tracked through history. +- **Stores the active TSS key** — `CurrentTssKey` is the single source of truth for which key signs outbound transactions. `TssKeyHistory` retains every key that has ever existed (never deleted, used by fund migration). +- **Tallies UV votes on TSS events** — every fine-grained step of an off-chain DKLS run (setup message produced, key derived, vote-to-finalize) is voted onto chain via `MsgVoteTssKeyProcess`. The module finalizes events through the generic ballot machine in `x/uvalidator`. +- **Coordinates fund migration** — when a `KEYGEN` produces a new public key, funds locked under the old key on every external chain need to move to the new key. Each (old_key, chain) pair becomes a `FundMigration` record; UVs broadcast the migration tx off-chain and vote success/failure on chain. -## Overview +## State (KV layout) -The utss module acts as the verification layer in a universal system, ensuring the authenticity of transactions before execution on the destination chain. \ No newline at end of file +| Prefix | Collection | Type | Purpose | +|---|---|---|---| +| `0` | `Params` | `Item[Params]` | Module parameters (admin address) | +| `1` | `NextProcessId` | `Sequence` | Auto-increment for process IDs | +| `2` | `CurrentTssProcess` | `Item[TssKeyProcess]` | Active in-flight process (may be empty) | +| `3` | `ProcessHistory` | `Map[uint64, TssKeyProcess]` | All past processes by ID | +| `4` | `CurrentTssKey` | `Item[TssKey]` | Currently active finalized key | +| `5` | `TssKeyHistory` | `Map[string, TssKey]` | All keys ever finalized, keyed by `key_id` | +| `6` | `TssEvents` | `Map[uint64, TssEvent]` | Per-event records produced during a process | +| `7` | `NextTssEventId` | `Sequence` | Auto-increment for event IDs | +| `8` | `PendingTssEvents` | `Map[uint64, uint64]` | `process_id -> event_id` index of in-flight events | +| `9` | `FundMigrations` | `Map[uint64, FundMigration]` | Migration records by ID | +| `10` | `NextMigrationId` | `Sequence` | Auto-increment for migration IDs | +| `11` | `PendingMigrations` | `Map[uint64, uint64]` | `migration_id -> migration_id` pending index | + +`PendingTssEvents` and `PendingMigrations` are deliberately structured as `uint64 -> uint64` indexes so the keeper can iterate "everything currently in flight" without scanning the full history. + +## Process Types + +| Type | Public key | On-chain addresses | Triggers fund migration? | +|---|---|---|---| +| `KEYGEN` | new | new | yes — funds must move to the new addresses on every chain | +| `REFRESH` | unchanged | unchanged | no — only keyshares are redistributed | +| `QUORUM_CHANGE` | unchanged | unchanged | no — only the participant set changes | + +`KEYGEN` is the heaviest operation: it lets the protocol periodically rotate the master key as a security uplift, but it forces a coordinated migration of every locked balance on every connected chain. + +## Messages (`MsgServer`) + +| Message | Authority | Gasless? | Purpose | +|---|---|---|---| +| `MsgInitiateTssKeyProcess` | admin | no | Start a new keygen / refresh / quorum-change | +| `MsgVoteTssKeyProcess` | bonded UV | yes | Vote on a TSS event during an active process | +| `MsgInitiateFundMigration` | admin | no | Open a migration record for an old key on a specific chain | +| `MsgVoteFundMigration` | bonded UV | yes | Vote success or failure on a fund migration tx | +| `MsgUpdateParams` | gov | no | Rotate admin or update other params | + +Vote messages gate on `IsBondedUniversalValidator` and `IsTombstonedUniversalValidator` from `x/uvalidator`. The two vote messages are gasless so UVs can participate without holding gas tokens. + +## Queries + +- `Params` +- `CurrentProcess`, `ProcessById`, `AllProcesses` +- `CurrentKey`, `KeyById` +- Plus event and migration queries (see `keeper/query_server.go`) + +## Inter-module Dependencies + +The keeper holds: +- `uvalidatorKeeper` — bonded/tombstoned checks, generic ballot machine +- `uregistryKeeper` — chain lookups for fund migration +- `uexecutorKeeper` — to update UTX state when migration affects in-flight outbounds + +It exports no hooks; other modules read `CurrentTssKey` to know what address signs outbounds. + +## Genesis + +```protobuf +GenesisState { + Params params + TssKeyProcess? current_tss_process + repeated TssKeyProcessEntry process_history + TssKey? current_tss_key + repeated TssKeyEntry tss_key_history + uint64 next_process_id + repeated TssEvent tss_events + uint64 next_tss_event_id + repeated FundMigrationEntry fund_migrations + uint64 next_migration_id +} +``` + +`PendingMigrations` is reconstructed from `FundMigrations` during `InitGenesis` by re-indexing every entry whose status is `FUND_MIGRATION_STATUS_PENDING`. + +Default admin in `params.go`: `push1negskcfqu09j5zvpk7nhvacnwyy2mafffy7r6a`. + +## Layout + +``` +x/utss/ +|-- keeper/ +| |-- keeper.go State + lifecycle +| |-- msg_server.go InitiateTssKeyProcess, VoteTssKeyProcess, InitiateFundMigration, VoteFundMigration +| +-- query_server.go gRPC queries +|-- types/ +| |-- types.pb.go TssKeyProcess, TssKey, TssEvent, FundMigration, enums +| |-- params.go Admin field +| |-- keys.go Store prefixes + ballot key generators (sha256 of canonical inputs) +| |-- tss_key.go, tss_key_process.go, msg_tss_key_process.go +| +-- expected_keepers.go UValidatorKeeper, URegistryKeeper, UExecutorKeeper interfaces +|-- module.go +|-- autocli.go ++-- depinject.go +``` diff --git a/x/uvalidator/README.md b/x/uvalidator/README.md index a08dfcf3..bc22f7ab 100755 --- a/x/uvalidator/README.md +++ b/x/uvalidator/README.md @@ -1,13 +1,194 @@ -# Universal Validator (UValidator) Module +# `x/uvalidator` — Universal Validator Set, Ballot Voting & Reward Boost -The **Universal Validator (UValidator)** module is responsible for managing the validator set and coordinating votes related to cross-chain operations. +The consensus coordination layer for Push Chain's crosschain protocol. Three responsibilities live here: -## Responsibilities +1. **Maintain the Universal Validator (UV) set** — the subset of standard Cosmos validators that have been approved to additionally run a `puniversald` worker and participate in crosschain consensus. +2. **Run the generic ballot machine** that every other Push module votes through (inbound, outbound, chain meta, TSS events, fund migrations all use it). +3. **Boost UV rewards** — in `BeginBlocker`, intercept the FeeCollector balance and allocate an extra `0.148x` portion to active UVs so running a Universal Validator is economically attractive. -- Managing the universal validator set across supported chains -- Creating and tracking ballots for voting on external chain operations -- Coordinating and recording validator votes on observed events +## What It Does -## Getting Started +### Universal Validator Lifecycle -This module serves as the consensus layer for verifying and approving cross-chain messages and actions. +A standard Cosmos validator becomes a UV by being added by the admin. Lifecycle: + +``` + AddUniversalValidator RemoveUniversalValidator +PENDING_JOIN ---------------> ACTIVE ---------------------------> PENDING_LEAVE -----> LEFT + (admin) (admin) (gradual) + +Slashing-driven side states: TOMBSTONED (terminal — can never return) +``` + +The status is stored as `LifecycleInfo` on the `UniversalValidator` record. `UpdateUniversalValidator` lets the validator self-update its crosschain identity (network info, public keys for external chains) without needing admin approval. `UpdateUniversalValidatorStatus` is admin-gated for everything else. + +Bonded check (`IsBondedUniversalValidator`) requires: +1. The validator is in `UniversalValidatorSet` +2. The validator exists in the staking module +3. The validator's status is `BONDED` + +Tombstone check (`IsTombstonedUniversalValidator`) consults the slashing keeper directly, so any double-sign by the underlying core validator immediately removes their UV from the eligible voter set. + +### Generic Ballot Machine + +Every crosschain observation (in `x/uexecutor` and `x/utss`) is voted through this single mechanism: + +```go +ballot, finalized, isNew, err := k.VoteOnBallot( + ctx, + ballotId, // canonical hash of the observation + ballotType, // INBOUND | OUTBOUND | CHAIN_META | TSS_EVENT | FUND_MIGRATION + voter, // signer's bech32 address + voteResult, // SUCCESS | FAILURE + eligibleVoters, // snapshot of UVs at ballot creation + votesNeeded, // threshold (caller decides 2/3, 100%, simple majority, ...) + expiryAfterBlocks, // ballot auto-expires after this many blocks +) +``` + +A ballot is created lazily on the first vote, indexed in `ActiveBallotIDs`, and finalizes the moment either: +- `yesVotes >= votingThreshold` -> `BALLOT_STATUS_PASSED` +- `eligibleVoters - noVotes < votingThreshold` (the threshold is now mathematically unreachable) -> `BALLOT_STATUS_REJECTED` + +On finalization, the ballot is moved from `ActiveBallotIDs` -> `FinalizedBallotIDs`. Expired ballots that never reached threshold are moved to `ExpiredBallotIDs`. + +The ballot type is opaque — `x/uvalidator` doesn't care what's being voted on. The ballot ID is a `sha256` of the canonical observation, so two validators voting on the same observation hit the same ballot deterministically. + +### UV Reward Boost (BeginBlocker) + +`x/uvalidator`'s `BeginBlocker` runs **before** the standard distribution module's `BeginBlocker` and reshapes the fee distribution: + +``` + 1.148x effective power + for active UVs +fees collected +-------------------------------------------+ +in previous ---->| uvalidator BeginBlocker | +block ---->| | + | 1. Compute effective_total_power: | + | sum( vote.power * 1.148 if UV | + | vote.power else ) | + | | + | 2. For each UV vote, allocate | + | fees * (vote.power * 0.148) | + | / effective_total_power | + | to the validator via distribution | + | module's AllocateTokensToValidator | + | | + | 3. Forward the boost coins to the | + | distribution module account so | + | accounting matches | + | | + | 4. Send the remaining coins back to | + | the FeeCollector | + +-------------------------------------------+ + | + v + standard distribution BeginBlocker + runs as usual on the remaining fees +``` + +Constants in `abci.go`: + +```go +const BoostMultiplier = "1.148" // applied to UV power when computing the denominator +const ExtraBoostPortion = "0.148" // numerator for the UV-specific allocation +``` + +Net effect: a validator that runs a UV earns ~14.8% more block rewards than a non-UV with the same stake. This is the only economic incentive baked into the protocol for running a UV — it has to make sense as a business for permissioned operators. + +> **Note on community tax** — The boost math is correct only when community tax is `0`. With a non-zero community tax, the UV boost is taken from the full fee amount before tax is applied to the remainder, so the community pool sees a slightly smaller share than configured. This is documented inline in `abci.go`. + +## State (KV layout) + +| Prefix | Collection | Type | Purpose | +|---|---|---|---| +| `0` | `Params` | `Item[Params]` | Module parameters (admin address) | +| `2` | `UniversalValidatorSet` | `Map[sdk.ValAddress, UniversalValidator]` | Registered UVs with lifecycle info and crosschain identity | +| `3` | `Ballots` | `Map[string, Ballot]` | All ballots ever created | +| `4` | `ActiveBallotIDs` | `KeySet[string]` | Ballots currently collecting votes | +| `5` | `ExpiredBallotIDs` | `KeySet[string]` | Expired (not yet pruned) ballots | +| `6` | `FinalizedBallotIDs` | `KeySet[string]` | `PASSED` or `REJECTED` ballots | + +(Prefix `1` was historically used by an obsolete `core_to_universal` mapping and is left unused for migration compatibility.) + +## Messages (`MsgServer`) + +| Message | Authority | Purpose | +|---|---|---| +| `MsgAddUniversalValidator` | admin | Register a core validator as a UV (`PENDING_JOIN`) | +| `MsgRemoveUniversalValidator` | admin | Begin removing a UV (`PENDING_LEAVE`) | +| `MsgUpdateUniversalValidatorStatus` | admin | Force-set lifecycle status (escape hatch) | +| `MsgUpdateUniversalValidator` | self | The UV updates its own crosschain identity (network info / external pubkeys) | +| `MsgUpdateParams` | gov | Rotate admin or update other params | + +## Queries + +- `Params` +- `AllUniversalValidators`, `UniversalValidator` +- `Ballot`, `AllBallots` +- `AllActiveBallotIDs`, `AllActiveBallots` + +## Hooks + +`x/uvalidator` exports `UValidatorHooks`: + +```go +type UValidatorHooks interface { + AfterValidatorAdded(ctx, valAddr) error + AfterValidatorRemoved(ctx, valAddr) error + AfterValidatorStatusChanged(ctx, valAddr, oldStatus, newStatus) error +} +``` + +A `MultiUValidatorHooks` dispatcher (`keeper/hooks.go`) lets multiple consumers subscribe. As of today, no other module installs hooks, but the interface is present for future use. + +## Inter-module Dependencies + +The keeper holds: +- `StakingKeeper` — to look up validators by operator/consensus address and to gate `IsBondedUniversalValidator` +- `SlashingKeeper` — to check tombstone status (`IsTombstoned` by consensus address) +- `BankKeeper` — to move fees between FeeCollector / `uvalidator` / `distribution` module accounts during the boost +- `AuthKeeper` (`AccountKeeper`) — to resolve the FeeCollector module account +- `DistributionKeeper` — to call `AllocateTokensToValidator` for the UV boost +- `UtssKeeper` — used during validator lifecycle transitions when TSS quorum changes are needed + +## Genesis + +```protobuf +GenesisState { + Params params + repeated UniversalValidatorEntry universal_validators + repeated Ballot ballots + repeated string active_ballot_ids + repeated string expired_ballot_ids + repeated string finalized_ballot_ids +} +``` + +Default admin in `params.go`: `push1negskcfqu09j5zvpk7nhvacnwyy2mafffy7r6a`. + +## Layout + +``` +x/uvalidator/ +|-- abci.go BeginBlocker — UV reward boost (this is the interesting one) +|-- keeper/ +| |-- keeper.go State + dependencies +| |-- voting.go IsBondedUV, IsTombstonedUV, AddVoteToBallot, VoteOnBallot, CheckIfFinalizingVote +| |-- ballot.go CreateBallot, GetOrCreateBallot, ExpireBallotsBeforeHeight +| |-- validator.go UV set CRUD and bonded/tombstone helpers +| |-- hooks.go MultiUValidatorHooks dispatcher +| |-- msg_server.go + msg_*.go for each message type +| +-- query_server.go gRPC queries +|-- types/ +| |-- ballot.go, ballot.pb.go Ballot lifecycle (ShouldPass, ShouldReject, IsExpired, AddVote) +| |-- universal_validator.go, types.pb.go UV record + UVStatus enum +| |-- identity_info.go, network_info.go Per-chain identity +| |-- lifecyle_info.go, lifecyle_event.go Status tracking +| |-- params.go, keys.go +| +-- expected_keepers.go Staking, Slashing, Bank, Distribution, Account, Utss interfaces +|-- migrations/ Consensus version 2 — one prior breaking change +|-- module.go +|-- autocli.go ++-- depinject.go +``` From 12e086527e6f088fcf0c03c75ff4476f6c55fec8 Mon Sep 17 00:00:00 2001 From: Nilesh Gupta Date: Tue, 7 Apr 2026 22:51:48 +0200 Subject: [PATCH 34/61] feat: updated READMEs of core validator and specific modules --- DERIVED_TRANSACTIONS.md | 5 ++--- app/README.md | 3 +-- x/uexecutor/README.md | 5 +++-- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/DERIVED_TRANSACTIONS.md b/DERIVED_TRANSACTIONS.md index 73ffdb2c..db0132ae 100644 --- a/DERIVED_TRANSACTIONS.md +++ b/DERIVED_TRANSACTIONS.md @@ -79,7 +79,7 @@ The Push Chain codebase uses two distinct call patterns. Both are visible in [`x ### 1. User-derived sender (UEA-routed user actions) -When a user submits a `MsgExecutePayload` or `MsgMigrateUEA`, the Cosmos signer is converted to its derived EVM address and the EVM call is issued from that address. The UEA contract is what authenticates the request via `verificationData`. +When a user submits a `MsgExecutePayload`, the Cosmos signer is converted to its derived EVM address and the EVM call is issued from that address. The UEA contract is what authenticates the request via `verificationData`. UEA migration takes the same path — there is no separate migration message; an upgrade is just an `executePayload` whose payload calls the UEA's migration entry point. ```go return k.evmKeeper.DerivedEVMCall( @@ -177,8 +177,7 @@ Every derived call in Push Chain is in [`x/uexecutor/keeper/evm.go`](./x/uexecut | Helper | Sender | Why derived? | |---|---|---| | `CallFactoryToDeployUEA` | user-derived | Real tx receipt is required for the deploy; the deployer address is the source-chain user's derived EVM address. | -| `CallUEAExecutePayload` | user-derived | Carries `gasLimit` from the payload; receipt is consumed by the Universal Validator vote-back path. | -| `CallUEAMigrateUEA` | user-derived | Same — needs a real receipt. | +| `CallUEAExecutePayload` | user-derived | Carries `gasLimit` from the payload; receipt is consumed by the Universal Validator vote-back path. UEA migration also flows through this path now (the migration is just a payload that calls the UEA's migrate entry point). | | `CallPRC20Deposit` | module | Mints PRC20 to recipient. Module account has no key. | | `CallPRC20DepositAutoSwap` | module | Same, but with the auto-swap leg. | | `CallUniversalCoreSetGasPrice` | module | Writes a single chain's gas price to the on-chain oracle. | diff --git a/app/README.md b/app/README.md index b99f6cca..0b470db6 100644 --- a/app/README.md +++ b/app/README.md @@ -74,7 +74,7 @@ Lifecycle owner of every crosschain transaction (`UniversalTx`). Tallies inbound **Messages** - `MsgVoteInbound`, `MsgVoteOutbound`, `MsgVoteChainMeta` — bonded UV-only, gasless -- `MsgExecutePayload`, `MsgMigrateUEA` — any user, gasless (the UEA itself authenticates the request) +- `MsgExecutePayload` — any user, gasless (the UEA itself authenticates the request) - `MsgUpdateParams` — gov-only **State** @@ -163,7 +163,6 @@ Push Chain extends the Cosmos AnteHandler with three custom decorators that toge ``` /uexecutor.v1.MsgExecutePayload -/uexecutor.v1.MsgMigrateUEA /uexecutor.v1.MsgVoteInbound /uexecutor.v1.MsgVoteOutbound /uexecutor.v1.MsgVoteChainMeta diff --git a/x/uexecutor/README.md b/x/uexecutor/README.md index 02a2398a..4b35784a 100755 --- a/x/uexecutor/README.md +++ b/x/uexecutor/README.md @@ -201,9 +201,10 @@ At every step the UTX is mutated **append-only**: new entries are added to `pc_t | `MsgVoteOutbound` | bonded UV | yes | Vote that an outbound was broadcast (or failed) on the destination chain | | `MsgVoteChainMeta` | bonded UV | yes | Vote on observed gas price + block height for a chain | | `MsgExecutePayload` | any | yes | Execute a payload on a UEA (the UEA itself authenticates via `verificationData`) | -| `MsgMigrateUEA` | any | yes | Migrate a UEA to a newer implementation (also self-authenticated) | | `MsgUpdateParams` | gov | no | Update module params | +> **UEA migration is now part of payload execution.** There used to be a separate `MsgMigrateUEA` message; that path has been removed. UEAs are upgraded by submitting a normal `MsgExecutePayload` whose payload calls the UEA's migration entry point on the EVM side. The Cosmos layer no longer has a dedicated migration message — the UEA contract is the source of truth for who is allowed to migrate it and to what implementation. + Vote messages check `IsBondedUniversalValidator` and `IsTombstonedUniversalValidator` on `x/uvalidator` before accepting the vote. Tombstoned validators are silently rejected. ## Queries @@ -257,7 +258,7 @@ GenesisState { x/uexecutor/ |-- keeper/ | |-- keeper.go State + dependencies -| |-- msg_server.go MsgVoteInbound, MsgVoteOutbound, MsgVoteChainMeta, ExecutePayload, MigrateUEA +| |-- msg_server.go MsgVoteInbound, MsgVoteOutbound, MsgVoteChainMeta, ExecutePayload | |-- query_server.go v1 queries | |-- query_server_v2.go v2 queries | +-- ... inbound execution, outbound creation, chain meta, derived EVM calls From b1a060209f8564977f004ae378535f1b98b6788f Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Thu, 9 Apr 2026 13:15:22 +0530 Subject: [PATCH 35/61] working outbound setup --- e2e-tests/deploy_addresses.json | 2 +- e2e-tests/setup.sh | 309 ++++++++++++++++++++++- universalClient/chains/evm/tx_builder.go | 28 ++ universalClient/tss/txbroadcaster/evm.go | 36 ++- universalClient/tss/txresolver/evm.go | 48 +++- 5 files changed, 402 insertions(+), 21 deletions(-) diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index 1f259742..58c519a9 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -1,5 +1,5 @@ { - "generatedAt": "2026-04-05T10:14:33Z", + "generatedAt": "2026-04-08T11:24:54Z", "contracts": { "WPC": "0xB5B1e1ADc1b8fc1066975aa09f9371a5f67C54F5", "Factory": "0x140b9f84fCbccB4129AC6F32b1243ea808d18261", diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 0f78c075..ffeab9bb 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -419,6 +419,23 @@ sdk_test_files() { done } +sdk_outbound_test_files() { + local outbound_dir="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/evm/outbound" + local file + local requested_files=( + "cea-to-eoa.spec.ts" + ) + + for file in "${requested_files[@]}"; do + if [[ -f "$outbound_dir/$file" ]]; then + printf "%s\n" "$outbound_dir/$file" + else + log_err "SDK outbound test file not found: $outbound_dir/$file" + exit 1 + fi + done +} + sdk_rewrite_chain_endpoints_for_local() { local chain_constants_file="$1" @@ -709,6 +726,7 @@ step_setup_push_chain_sdk() { echo "SOLANA_RPC_URL=$sdk_solana_rpc" echo "SOLANA_PRIVATE_KEY=$sdk_solana_private_key" echo "PUSH_PRIVATE_KEY=$sdk_push_private_key" + [[ -n "${E2E_TARGET_CHAINS:-}" ]] && echo "E2E_TARGET_CHAINS=${E2E_TARGET_CHAINS}" } >"$sdk_env_path" [[ -n "$sdk_evm_private_key" ]] || log_warn "SDK env EVM_PRIVATE_KEY is empty (set EVM_PRIVATE_KEY or PRIVATE_KEY in e2e-tests/.env)" @@ -775,8 +793,7 @@ step_run_sdk_test_file() { local test_basename="$1" local test_file="" - sdk_prepare_test_files_for_localnet - + # Search inbound test files first while IFS= read -r candidate; do [[ -n "$candidate" ]] || continue if [[ "$(basename "$candidate")" == "$test_basename" ]]; then @@ -785,15 +802,55 @@ step_run_sdk_test_file() { fi done < <(sdk_test_files) + if [[ -n "$test_file" ]]; then + # Inbound file — use full prepare (TESTNET→LOCALNET for all inbound files) + sdk_prepare_test_files_for_localnet + else + # Search outbound test files + while IFS= read -r candidate; do + [[ -n "$candidate" ]] || continue + if [[ "$(basename "$candidate")" == "$test_basename" ]]; then + test_file="$candidate" + break + fi + done < <(sdk_outbound_test_files) + + if [[ -n "$test_file" ]]; then + # Outbound file — sync localnet constants and apply TESTNET→LOCALNET to outbound files only + sdk_sync_localnet_constants + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g' "$test_file" + log_ok "Prepared LOCALNET network replacement in $test_basename" + # Also patch shared evm-client.ts default network + local evm_client_file="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/shared/evm-client.ts" + if [[ -f "$evm_client_file" ]]; then + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g' "$evm_client_file" + log_ok "Patched evm-client.ts default network to PUSH_NETWORK.LOCALNET" + fi + # Patch utils.ts: fix TESTNET_DONUT default in getPRC20Address + local utils_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/utils.ts" + if [[ -f "$utils_file" ]]; then + perl -0pi -e 's/(const network = options\?\.network \?\?)\s*PUSH_NETWORK\.TESTNET_DONUT/$1 PUSH_NETWORK.LOCALNET/' "$utils_file" + log_ok "Patched utils.ts getPRC20Address default network to PUSH_NETWORK.LOCALNET" + fi + # Patch tokens.ts: fix TESTNET_DONUT in buildPushChainMoveableTokenAccessor + local tokens_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/constants/tokens.ts" + if [[ -f "$tokens_file" ]]; then + perl -0pi -e 's/(const s = SYNTHETIC_PUSH_ERC20\[)PUSH_NETWORK\.TESTNET_DONUT(\])/$1PUSH_NETWORK.LOCALNET$2/' "$tokens_file" + log_ok "Patched tokens.ts buildPushChainMoveableTokenAccessor default network to PUSH_NETWORK.LOCALNET" + fi + fi + fi + if [[ -z "$test_file" ]]; then log_err "Requested SDK test file not in configured list: $test_basename" exit 1 fi log_info "Running SDK test: $test_basename" + local rel_pattern="${test_file##*/packages/core/}" ( cd "$PUSH_CHAIN_SDK_DIR" - npx nx test core --runInBand --testPathPattern="$(basename "$test_file")" + npx nx test core --runInBand --testPathPattern="$rel_pattern" ) log_ok "Completed SDK test: $test_basename" @@ -816,6 +873,55 @@ step_run_sdk_tests_all() { log_ok "Completed all configured SDK E2E tests" } +step_run_sdk_outbound_tests_all() { + local test_file + local evm_client_file="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/shared/evm-client.ts" + + # Sync localnet constants (rewrites chain.ts defaultRPC for LOCAL mode) and + # apply TESTNET_DONUT → LOCALNET replacement in outbound spec files. + sdk_sync_localnet_constants + + while IFS= read -r outbound_file; do + [[ -n "$outbound_file" ]] || continue + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g' "$outbound_file" + log_ok "Prepared LOCALNET network replacement in $(basename "$outbound_file")" + done < <(find "$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/evm/outbound" -type f -name '*.spec.ts' | sort) + + # Also patch shared evm-client.ts default network so PushChain.initialize uses LOCALNET + if [[ -f "$evm_client_file" ]]; then + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g' "$evm_client_file" + log_ok "Patched evm-client.ts default network to PUSH_NETWORK.LOCALNET" + fi + + # Patch utils.ts: fix TESTNET_DONUT default in getPRC20Address (used for PRC20 token lookup) + local utils_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/utils.ts" + if [[ -f "$utils_file" ]]; then + perl -0pi -e 's/(const network = options\?\.network \?\?)\s*PUSH_NETWORK\.TESTNET_DONUT/$1 PUSH_NETWORK.LOCALNET/' "$utils_file" + log_ok "Patched utils.ts getPRC20Address default network to PUSH_NETWORK.LOCALNET" + fi + + # Patch tokens.ts: fix TESTNET_DONUT in buildPushChainMoveableTokenAccessor + local tokens_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/constants/tokens.ts" + if [[ -f "$tokens_file" ]]; then + perl -0pi -e 's/(const s = SYNTHETIC_PUSH_ERC20\[)PUSH_NETWORK\.TESTNET_DONUT(\])/$1PUSH_NETWORK.LOCALNET$2/' "$tokens_file" + log_ok "Patched tokens.ts buildPushChainMoveableTokenAccessor default network to PUSH_NETWORK.LOCALNET" + fi + + while IFS= read -r test_file; do + [[ -n "$test_file" ]] || continue + log_info "Running SDK outbound test: $(basename "$test_file")" + # Strip everything up to and including "packages/core/" to get a relative path + # that Jest can match against canonical absolute paths (avoids ".." in the pattern) + local rel_pattern="${test_file##*/packages/core/}" + ( + cd "$PUSH_CHAIN_SDK_DIR" + npx nx test core --runInBand --testPathPattern="$rel_pattern" + ) + done < <(sdk_outbound_test_files) + + log_ok "Completed all configured SDK outbound E2E tests" +} + step_devnet() { require_cmd bash jq @@ -1157,7 +1263,7 @@ step_setup_environment() { if is_local_testing_env; then # Upstream RPCs that the local anvil forks are derived from. - local sepolia_fork_rpc="https://ethereum-sepolia-rpc.publicnode.com" + local sepolia_fork_rpc="https://sepolia.drpc.org" local arbitrum_fork_rpc="https://arbitrum-sepolia.gateway.tenderly.co" local base_fork_rpc="https://sepolia.base.org" local bsc_fork_rpc="https://bnb-testnet.g.alchemy.com/v2/peQmTO8MjpoK5Czw4HwRp" @@ -1296,6 +1402,192 @@ step_stop_running_nodes() { log_ok "Running nodes stopped" } +step_fund_uv_broadcasters_on_anvil() { + if ! is_local_testing_env; then + log_info "step_fund_uv_broadcasters_on_anvil: skipping (non-LOCAL environment)" + return 0 + fi + require_cmd cast + local anvil_rpc="${ANVIL_SEPOLIA_HOST_RPC_URL:-http://localhost:9545}" + # Anvil default account 0 — always seeded with 10,000 ETH in any anvil fork (mnemonic: "test test ... junk") + local funder_pk="0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" + local fund_amount="10ether" + local funded=0 + for addr_file in "$LOCAL_DEVNET_DIR/data"/universal*/.puniversal/keyring-test/*.address; do + [[ -f "$addr_file" ]] || continue + local addr_hex + addr_hex="$(basename "$addr_file" .address)" + local addr="0x${addr_hex}" + local balance + balance="$(cast balance "$addr" --rpc-url "$anvil_rpc" 2>/dev/null || echo "0")" + if [[ "$balance" == "0" ]]; then + log_info "Funding UV broadcaster $addr with $fund_amount on Anvil Sepolia" + if cast send "$addr" --value "$fund_amount" --private-key "$funder_pk" \ + --rpc-url "$anvil_rpc" >/dev/null 2>&1; then + funded=$((funded + 1)) + else + log_warn "Failed to fund UV broadcaster $addr on Anvil Sepolia" + fi + else + log_info "UV broadcaster $addr already has ETH on Anvil Sepolia: $balance wei" + fi + done + log_ok "UV broadcaster funding done (funded $funded new address(es))" +} + +# Sync every EVM vault's TSS_ADDRESS to the current local TSS key so that +# AccessControlUnauthorizedAccount (0xe2517d3f) never blocks outbound txs. +# Also funds the TSS signer on each Anvil chain so it can pay gas. +step_sync_vault_tss_on_anvil() { + if ! is_local_testing_env; then + log_info "step_sync_vault_tss_on_anvil: skipping (non-LOCAL environment)" + return 0 + fi + require_cmd cast jq python3 + + # Derive the TSS EVM address from the on-chain TSS public key. + # 1. Query compressed secp256k1 pubkey from the utss module. + # 2. Decompress it using pure Python3 math (stdlib only, no extra packages). + # 3. keccak256(x || y) via `cast keccak`, last 20 bytes = EVM address. + local tss_pubkey tss_addr + tss_pubkey="$("$PUSH_CHAIN_DIR/build/pchaind" query utss current-key \ + --node tcp://127.0.0.1:26657 --output json 2>/dev/null \ + | jq -r '.key.tss_pubkey // empty' 2>/dev/null || true)" + + if [[ -z "$tss_pubkey" ]]; then + log_warn "step_sync_vault_tss_on_anvil: TSS key not found on chain yet, skipping" + return 0 + fi + + # Decompress pubkey → 64-byte uncompressed (x||y) hex using Python3 stdlib. + local uncompressed_hex + uncompressed_hex="$(python3 -c " +prefix = int('${tss_pubkey:0:2}', 16) +x = int('${tss_pubkey:2}', 16) +p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F +y_sq = (pow(x, 3, p) + 7) % p +y = pow(y_sq, (p + 1) // 4, p) +if (y % 2) != (prefix % 2): + y = p - y +print(format(x, '064x') + format(y, '064x')) +" 2>/dev/null || true)" + + if [[ -z "$uncompressed_hex" ]]; then + log_warn "step_sync_vault_tss_on_anvil: failed to decompress TSS pubkey, skipping" + return 0 + fi + + local keccak_hash + keccak_hash="$(cast keccak "0x$uncompressed_hex" 2>/dev/null || true)" + tss_addr="0x${keccak_hash: -40}" + + if [[ -z "$tss_addr" || ${#tss_addr} -ne 42 ]]; then + log_warn "step_sync_vault_tss_on_anvil: failed to derive TSS EVM address, skipping" + return 0 + fi + + log_info "Syncing vault TSS address to $tss_addr on all local Anvil EVM chains" + + local DEF_ADMIN_ROLE="0x0000000000000000000000000000000000000000000000000000000000000000" + # Anvil default account 0 — always seeded with 10,000 ETH in every fork + local funder_pk="0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" + # Known deployer addresses for the forge localSetup scripts — these never change + # between runs since it is the same forge wallet that deploys the vault contracts. + local KNOWN_ADMINS=( + "0x35b84d6848d16415177c64d64504663b998a6ab4" + "0xe520d4A985A2356Fa615935a822Ce4eFAcA24aB6" + "0xd854dde7c58ec1b405e6577f48a7cc5b5e6ef317" + ) + + # cfg_name:anvil_rpc pairs — mirrors the Anvil forks started in step_devnet. + local CHAIN_INFO=( + "eth_sepolia:${ANVIL_SEPOLIA_HOST_RPC_URL:-http://localhost:9545}" + "arb_sepolia:${ANVIL_ARBITRUM_HOST_RPC_URL:-http://localhost:9546}" + "base_sepolia:${ANVIL_BASE_HOST_RPC_URL:-http://localhost:9547}" + "bsc_testnet:${ANVIL_BSC_HOST_RPC_URL:-http://localhost:9548}" + ) + + for entry in "${CHAIN_INFO[@]}"; do + local cfg_name="${entry%%:*}" + local rpc="${entry#*:}" + local chain_cfg="$TOKENS_CONFIG_DIR/$cfg_name/chain.json" + + if [[ ! -f "$chain_cfg" ]]; then + log_warn "step_sync_vault_tss_on_anvil: no chain config at $chain_cfg, skipping" + continue + fi + + # Fund the TSS signer so it can pay gas for outbound vault txs. + local tss_bal + tss_bal="$(cast balance "$tss_addr" --rpc-url "$rpc" 2>/dev/null || echo "0")" + if [[ "$tss_bal" == "0" ]]; then + if cast send "$tss_addr" --value "10ether" --private-key "$funder_pk" --rpc-url "$rpc" >/dev/null 2>&1; then + log_ok " $cfg_name: funded TSS signer $tss_addr with 10 ETH" + else + log_warn " $cfg_name: failed to fund TSS signer $tss_addr" + fi + else + log_info " $cfg_name: TSS signer $tss_addr already has ETH (bal=$tss_bal)" + fi + + local gateway + gateway="$(jq -r '.gateway_address // empty' "$chain_cfg" 2>/dev/null || true)" + if [[ -z "$gateway" || "$gateway" == "null" ]]; then + log_warn "step_sync_vault_tss_on_anvil: no gateway_address in $chain_cfg, skipping" + continue + fi + + local vault + vault="$(cast call "$gateway" 'VAULT()(address)' --rpc-url "$rpc" 2>/dev/null || true)" + if [[ -z "$vault" || "$vault" == "0x0000000000000000000000000000000000000000" ]]; then + log_warn "step_sync_vault_tss_on_anvil: VAULT() empty for gateway $gateway ($cfg_name), skipping" + continue + fi + + # Skip only if the vault's stored TSS_ADDRESS already matches the current key. + # Checking TSS_ADDRESS (not just hasRole) ensures we update after every re-keying, + # because setTSS atomically revokes the old role and grants the new one. + local vault_tss + vault_tss="$(cast call "$vault" 'TSS_ADDRESS()(address)' --rpc-url "$rpc" 2>/dev/null || true)" + if [[ "$(echo "$vault_tss" | tr '[:upper:]' '[:lower:]')" == "$(echo "$tss_addr" | tr '[:upper:]' '[:lower:]')" ]]; then + log_info " $cfg_name vault $vault TSS_ADDRESS already matches $tss_addr" + continue + fi + + # Find the DEFAULT_ADMIN_ROLE holder among known candidates. + local vault_admin="" + for candidate in "${KNOWN_ADMINS[@]}"; do + local is_admin + is_admin="$(cast call "$vault" 'hasRole(bytes32,address)(bool)' "$DEF_ADMIN_ROLE" "$candidate" \ + --rpc-url "$rpc" 2>/dev/null || echo "false")" + if [[ "$is_admin" == "true" ]]; then + vault_admin="$candidate" + break + fi + done + + if [[ -z "$vault_admin" ]]; then + log_warn "step_sync_vault_tss_on_anvil: no known admin for vault $vault ($cfg_name), skipping" + continue + fi + + # Impersonate the admin on the Anvil fork (no private key needed) and call setTSS. + cast rpc anvil_impersonateAccount "$vault_admin" --rpc-url "$rpc" >/dev/null 2>&1 || true + cast rpc anvil_setBalance "$vault_admin" "0x56BC75E2D63100000" --rpc-url "$rpc" >/dev/null 2>&1 || true + + if cast send "$vault" "setTSS(address)" "$tss_addr" \ + --rpc-url "$rpc" \ + --from "$vault_admin" \ + --unlocked >/dev/null 2>&1; then + log_ok " $cfg_name vault $vault: TSS updated to $tss_addr" + else + log_warn " step_sync_vault_tss_on_anvil: setTSS failed on vault $vault ($cfg_name)" + fi + done + + log_ok "Vault TSS sync complete" +} + step_print_genesis() { require_cmd jq local accounts_json @@ -1980,7 +2272,7 @@ step_setup_gateway() { log_warn "UniversalCore BASE_GAS_LIMIT is 0. Applying local defaults for outbound chains" for ns in "eip155:11155111" "eip155:421614" "eip155:84532" "eip155:97" "solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1"; do - cast send "$C0" 'setBaseGasLimitByChain(string,uint256)' "$ns" 21000 \ + cast send "$C0" 'setBaseGasLimitByChain(string,uint256)' "$ns" 2000000 \ --rpc-url "$PUSH_RPC_URL" \ --private-key "$PRIVATE_KEY" >/dev/null || true done @@ -2488,6 +2780,7 @@ cmd_all() { step_add_uregistry_configs step_deploy_counter_and_sync_sdk sdk_sync_localnet_constants + step_sync_vault_tss_on_anvil } cmd_show_help() { @@ -2509,16 +2802,19 @@ Commands: write-core-env Create core-contracts .env from deploy_addresses.json update-token-config Update eth_sepolia_eth.json contract_address using deployed token setup-gateway Clone/setup gateway repo and run forge localSetup (with --resume retry) + sync-vault-tss Grant TSS_ROLE on each Anvil EVM vault to the current local TSS key (LOCAL only) bootstrap-cea-sdk Ensure CEA is deployed for SDK signer on BSC testnet fork (Route 2 bootstrap) deploy-counter-sdk Deploy CounterPayable on Push localnet and sync SDK COUNTER_ADDRESS_PAYABLE setup-sdk Clone/setup push-chain-sdk, generate SDK .env from e2e .env, and install dependencies sdk-test-all Replace PUSH_NETWORK TESTNET variants with LOCALNET and run all configured SDK E2E tests + sdk-test-outbound-all Replace PUSH_NETWORK TESTNET variants with LOCALNET and run all configured SDK outbound E2E tests (TESTING_ENV=LOCAL) sdk-test-pctx-last-transaction Run pctx-last-transaction.spec.ts sdk-test-send-to-self Run send-to-self.spec.ts sdk-test-progress-hook Run progress-hook-per-tx.spec.ts sdk-test-bridge-multicall Run bridge-multicall.spec.ts sdk-test-pushchain Run pushchain.spec.ts sdk-test-bridge-hooks Run bridge-hooks.spec.ts + sdk-test-cea-to-eoa Run cea-to-eoa.spec.ts (outbound Route 3; requires TESTING_ENV=LOCAL) add-uregistry-configs Submit chain + token config txs via local-multi-validator validator1 record-contract K A Manually record contract key/address record-token N S A Manually record token name/symbol/address @@ -2563,16 +2859,19 @@ main() { write-core-env) step_write_core_env ;; update-token-config) step_update_deployed_token_configs ;; setup-gateway) step_setup_gateway ;; + sync-vault-tss) step_sync_vault_tss_on_anvil ;; bootstrap-cea-sdk) step_bootstrap_cea_for_sdk_signer ;; deploy-counter-sdk) step_deploy_counter_and_sync_sdk ;; setup-sdk) step_setup_push_chain_sdk ;; sdk-test-all) step_run_sdk_tests_all ;; + sdk-test-outbound-all) step_run_sdk_outbound_tests_all ;; sdk-test-pctx-last-transaction) step_run_sdk_test_file "pctx-last-transaction.spec.ts" ;; sdk-test-send-to-self) step_run_sdk_test_file "send-to-self.spec.ts" ;; sdk-test-progress-hook) step_run_sdk_test_file "progress-hook-per-tx.spec.ts" ;; sdk-test-bridge-multicall) step_run_sdk_test_file "bridge-multicall.spec.ts" ;; sdk-test-pushchain) step_run_sdk_test_file "pushchain.spec.ts" ;; sdk-test-bridge-hooks) step_run_sdk_test_file "bridge-hooks.spec.ts" ;; + sdk-test-cea-to-eoa) step_run_sdk_test_file "cea-to-eoa.spec.ts" ;; add-uregistry-configs) step_add_uregistry_configs ;; record-contract) ensure_deploy_file diff --git a/universalClient/chains/evm/tx_builder.go b/universalClient/chains/evm/tx_builder.go index f61cb138..afc191c6 100644 --- a/universalClient/chains/evm/tx_builder.go +++ b/universalClient/chains/evm/tx_builder.go @@ -233,12 +233,40 @@ func (tb *TxBuilder) BroadcastOutboundSigningRequest( txHashStr := signedTx.Hash().Hex() + // Recover and log sender address from the signed tx for diagnostics + senderAddr, senderErr := signer.Sender(signedTx) + senderStr := "(unknown)" + if senderErr == nil { + senderStr = senderAddr.Hex() + } + + tb.logger.Info(). + Str("tx_hash", txHashStr). + Str("sender", senderStr). + Str("to", tb.vaultAddress.Hex()). + Str("chain", tb.chainID). + Uint64("nonce", req.Nonce). + Str("gas_price", gasPrice.String()). + Uint64("gas_limit", gasLimitForTx.Uint64()). + Str("value", txValue.String()). + Str("func", funcName). + Msg("submitting vault tx to EVM chain") + if _, err := tb.rpcClient.BroadcastTransaction(ctx, signedTx); err != nil { + tb.logger.Warn(). + Err(err). + Str("sender", senderStr). + Str("to", tb.vaultAddress.Hex()). + Str("chain", tb.chainID). + Uint64("nonce", req.Nonce). + Msg("BroadcastTransaction failed") return txHashStr, fmt.Errorf("failed to broadcast transaction: %w", err) } tb.logger.Info(). Str("tx_hash", txHashStr). + Str("sender", senderStr). + Str("to", tb.vaultAddress.Hex()). Msg("transaction broadcast successfully") return txHashStr, nil diff --git a/universalClient/tss/txbroadcaster/evm.go b/universalClient/tss/txbroadcaster/evm.go index f1f9ff92..f32c763b 100644 --- a/universalClient/tss/txbroadcaster/evm.go +++ b/universalClient/tss/txbroadcaster/evm.go @@ -42,6 +42,25 @@ func (b *Broadcaster) broadcastEVM(ctx context.Context, event *store.Event, data return } + // Resolve TSS address for logging before broadcast + tssAddress := "" + if b.getTSSAddress != nil { + if addr, addrErr := b.getTSSAddress(ctx); addrErr == nil { + tssAddress = addr + } + } + + b.logger.Info(). + Str("event_id", event.EventID). + Str("chain", chainID). + Str("tss_sender", tssAddress). + Str("vault", data.OutboundCreatedEvent.AssetAddr). // log something identifiable + Str("amount", data.OutboundCreatedEvent.Amount). + Str("gas_price", data.OutboundCreatedEvent.GasPrice). + Str("gas_limit", data.OutboundCreatedEvent.GasLimit). + Uint64("nonce", data.SigningData.Nonce). + Msg("broadcasting EVM vault tx") + // Broadcast — tx hash is computed before sending, so it's returned even on RPC error outboundData := data.OutboundCreatedEvent txHash, broadcastErr := builder.BroadcastOutboundSigningRequest(ctx, signingReq, &outboundData, signature) @@ -60,16 +79,6 @@ func (b *Broadcaster) broadcastEVM(ctx context.Context, event *store.Event, data } eventNonce := data.SigningData.Nonce - tssAddress := "" - if b.getTSSAddress != nil { - var addrErr error - tssAddress, addrErr = b.getTSSAddress(ctx) - if addrErr != nil { - b.logger.Warn().Err(addrErr).Str("event_id", event.EventID). - Msg("failed to get TSS address for nonce check, will retry next tick") - return - } - } finalizedNonce, err := builder.GetNextNonce(ctx, tssAddress, true) if err == nil && eventNonce < finalizedNonce { @@ -84,6 +93,11 @@ func (b *Broadcaster) broadcastEVM(ctx context.Context, event *store.Event, data // Nonce not consumed — transient error (RPC down, gas issues, etc.). // Keep as SIGNED and retry next tick. - b.logger.Debug().Err(broadcastErr).Str("event_id", event.EventID).Str("chain", chainID). + b.logger.Warn().Err(broadcastErr). + Str("event_id", event.EventID). + Str("chain", chainID). + Str("tss_sender", tssAddress). + Str("tx_hash", txHash). + Uint64("nonce", eventNonce). Msg("broadcast failed, will retry next tick") } diff --git a/universalClient/tss/txresolver/evm.go b/universalClient/tss/txresolver/evm.go index 4aa09afd..078171d4 100644 --- a/universalClient/tss/txresolver/evm.go +++ b/universalClient/tss/txresolver/evm.go @@ -6,6 +6,7 @@ import ( "github.com/pushchain/push-chain-node/universalClient/chains/common" "github.com/pushchain/push-chain-node/universalClient/store" "github.com/pushchain/push-chain-node/universalClient/tss/eventstore" + uexecutortypes "github.com/pushchain/push-chain-node/x/uexecutor/types" ) // resolveEVM checks the on-chain receipt and moves the event to COMPLETED or REVERTED. @@ -31,9 +32,15 @@ func (r *Resolver) resolveEVM(ctx context.Context, event *store.Event, chainID, r.logger.Warn().Err(err).Str("event_id", event.EventID).Msg("failed to extract outbound IDs") return } + r.logger.Debug(). + Str("event_id", event.EventID). + Str("chain", chainID). + Str("tx_hash", rawTxHash). + Msg("resolving EVM broadcasted tx") + found, blockHeight, confirmations, status, err := r.verifyTxOnChain(ctx, chainID, rawTxHash) if err != nil { - r.logger.Debug().Err(err).Str("event_id", event.EventID).Msg("tx verification error") + r.logger.Debug().Err(err).Str("event_id", event.EventID).Str("tx_hash", rawTxHash).Msg("tx verification error") return } if !found { @@ -55,6 +62,15 @@ func (r *Resolver) resolveEVM(ctx context.Context, event *store.Event, chainID, delete(r.notFoundCounts, event.EventID) requiredConfs := r.chains.GetStandardConfirmations(chainID) + r.logger.Debug(). + Str("event_id", event.EventID). + Str("chain", chainID). + Str("tx_hash", rawTxHash). + Uint64("block_height", blockHeight). + Uint64("confirmations", confirmations). + Uint64("required_confs", requiredConfs). + Uint8("status", status). + Msg("tx found on chain") if confirmations < requiredConfs { return // not enough confirmations yet, retry next tick } @@ -73,9 +89,33 @@ func (r *Resolver) resolveEVM(ctx context.Context, event *store.Event, chainID, } // status == 1 (success) - if err := r.eventStore.Update(event.EventID, map[string]any{"status": eventstore.StatusCompleted}); err != nil { - r.logger.Warn().Err(err).Str("event_id", event.EventID).Msg("failed to mark event COMPLETED") - return + if r.pushSigner != nil { + gasFeeUsed := "0" + if builder, err := r.getBuilder(chainID); err == nil { + if fee, err := builder.GetGasFeeUsed(ctx, rawTxHash); err == nil { + gasFeeUsed = fee + } + } + observation := &uexecutortypes.OutboundObservation{ + Success: true, + BlockHeight: blockHeight, + TxHash: rawTxHash, + GasFeeUsed: gasFeeUsed, + } + voteTxHash, err := r.pushSigner.VoteOutbound(ctx, txID, utxID, observation) + if err != nil { + r.logger.Warn().Err(err).Str("event_id", event.EventID).Msg("failed to vote success for EVM tx") + return + } + if err := r.eventStore.Update(event.EventID, map[string]any{"status": eventstore.StatusCompleted, "vote_tx_hash": voteTxHash}); err != nil { + r.logger.Warn().Err(err).Str("event_id", event.EventID).Msg("failed to mark event COMPLETED") + return + } + } else { + if err := r.eventStore.Update(event.EventID, map[string]any{"status": eventstore.StatusCompleted}); err != nil { + r.logger.Warn().Err(err).Str("event_id", event.EventID).Msg("failed to mark event COMPLETED") + return + } } r.logger.Info(). Str("event_id", event.EventID).Str("tx_hash", rawTxHash). From 6277d7b71e150ff1458e51be0fa65d6efe1b92fd Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Thu, 9 Apr 2026 19:34:03 +0530 Subject: [PATCH 36/61] Inbound-Outbound e2e-setup --- e2e-tests/README.md | 122 ++++++++++++++++++++------------ e2e-tests/deploy_addresses.json | 2 +- e2e-tests/setup.sh | 60 +++++++++++++++- 3 files changed, 136 insertions(+), 48 deletions(-) diff --git a/e2e-tests/README.md b/e2e-tests/README.md index fab82ec9..ebe0dfed 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -22,8 +22,8 @@ It covers: ## What gets created -- `local-setup-e2e/data/` — validator + universal-validator home directories -- `local-setup-e2e/logs/` — per-process log files +- `local-native/data/` — validator + universal-validator home directories +- `local-native/logs/` — per-process log files - `e2e-tests/logs/` — logs for each deployment step - `e2e-tests/deploy_addresses.json` — contract/token address source-of-truth @@ -71,18 +71,28 @@ Edit `e2e-tests/.env`. Key variables: | Variable | Default | Description | |---|---|---| -| `TESTING_ENV` | _(empty)_ | Set to `LOCAL` for local devnet | +| `TESTING_ENV` | _(empty)_ | Set to `LOCAL` for local anvil/surfpool mode | | `PUSH_RPC_URL` | `http://localhost:8545` | Push Chain EVM JSON-RPC | | `PRIVATE_KEY` | — | EVM deployer private key (forge/hardhat) | | `EVM_PRIVATE_KEY` | ← `PRIVATE_KEY` | SDK EVM signer key | +| `EVM_RPC` | ← `PUSH_RPC_URL` | SDK EVM RPC endpoint | | `PUSH_PRIVATE_KEY` | ← `PRIVATE_KEY` | SDK Push Chain signer key | -| `FUND_TO_ADDRESS` | — | Address to top up from genesis account | +| `SOLANA_PRIVATE_KEY` | — | SDK Solana signer key (also `SVM_PRIVATE_KEY` / `SOL_PRIVATE_KEY`) | +| `SOLANA_RPC_URL` | `https://api.devnet.solana.com` | SDK Solana RPC | +| `FUND_TO_ADDRESS` | _(auto-derived from `PRIVATE_KEY`)_ | Address to top up from genesis account | +| `GENESIS_MNEMONIC` | _(read from `genesis_accounts.json`)_ | Override genesis mnemonic directly | | `POOL_CREATION_TOPUP_AMOUNT` | `50000000000000000000upc` | Deployer top-up before pool creation | +| `LOCAL_DEVNET_DIR` | `./local-native` | Path to local devnet management directory | | `CORE_CONTRACTS_BRANCH` | `e2e-push-node` | | | `SWAP_AMM_BRANCH` | `e2e-push-node` | | | `GATEWAY_BRANCH` | `e2e-push-node` | | | `PUSH_CHAIN_SDK_BRANCH` | `outbound_changes` | | | `PUSH_CHAIN_SDK_E2E_DIR` | `packages/core/__e2e__/evm/inbound` | Test directory inside SDK | +| `PREFER_SIBLING_REPO_DIRS` | `true` | Prefer sibling dirs for core/gateway repos over cloning fresh | +| `E2E_TARGET_CHAINS` | — | Restrict SDK E2E chains (passed through to SDK `.env`) | +| `CORE_RESUME_MAX_ATTEMPTS` | `0` (unlimited) | Max `--resume` retry count for core forge script | +| `GATEWAY_RESUME_MAX_ATTEMPTS` | `0` (unlimited) | Max `--resume` retry count for gateway forge script | +| `CORE_CONFIGURE_RESUME_MAX_ATTEMPTS` | `0` (unlimited) | Max `--resume` retry count for `configureUniversalCore` | ### TESTING_ENV=LOCAL @@ -98,11 +108,16 @@ Default local fork URLs (override in `.env`): | Variable | Default | |---|---| -| `ANVIL_SEPOLIA_HOST_RPC_URL` | `http://localhost:9545` | -| `ANVIL_ARBITRUM_HOST_RPC_URL` | `http://localhost:9546` | -| `ANVIL_BASE_HOST_RPC_URL` | `http://localhost:9547` | -| `ANVIL_BSC_HOST_RPC_URL` | `http://localhost:9548` | -| `SURFPOOL_SOLANA_HOST_RPC_URL` | `http://localhost:8899` | +| `ANVIL_SEPOLIA_HOST_RPC_URL` | `http://localhost:9545` | Anvil Sepolia host URL (used by forge/cast and chain config patch) | +| `ANVIL_ARBITRUM_HOST_RPC_URL` | `http://localhost:9546` | | +| `ANVIL_BASE_HOST_RPC_URL` | `http://localhost:9547` | | +| `ANVIL_BSC_HOST_RPC_URL` | `http://localhost:9548` | | +| `SURFPOOL_SOLANA_HOST_RPC_URL` | `http://localhost:8899` | | +| `LOCAL_SEPOLIA_UV_RPC_URL` | ← `ANVIL_SEPOLIA_HOST_RPC_URL` | RPC URL written into UV `pushuv_config.json` (can differ from host if using Docker networking) | +| `LOCAL_ARBITRUM_UV_RPC_URL` | ← `ANVIL_ARBITRUM_HOST_RPC_URL` | | +| `LOCAL_BASE_UV_RPC_URL` | ← `ANVIL_BASE_HOST_RPC_URL` | | +| `LOCAL_BSC_UV_RPC_URL` | ← `ANVIL_BSC_HOST_RPC_URL` | | +| `LOCAL_SOLANA_UV_RPC_URL` | ← `SURFPOOL_SOLANA_HOST_RPC_URL` | | --- @@ -116,31 +131,37 @@ TESTING_ENV=LOCAL bash e2e-tests/setup.sh all The `all` pipeline runs in order: -1. `setup-environment` — start anvil/surfpool + patch chain RPC configs +1. `setup-environment` — start anvil/surfpool + patch chain RPC configs (LOCAL) or sync testnet RPCs 2. Build binaries (`make replace-addresses` + `make build`) -3. `devnet` — start 4 validators + 4 universal validators (clean) -4. `tss-keygen` — TSS key generation (via `./local-setup-e2e/devnet tss-keygen`) -5. `recover-genesis-key` — import genesis mnemonic into local keyring -6. `fund` — top up deployer address from genesis account -7. `setup-core` — deploy core contracts (forge, auto-resume) -8. `setup-swap` — deploy WPC + Uniswap V3 (hardhat) -9. `sync-addresses` — copy addresses into swap `test-addresses.json` -10. `create-pool` — create WPC liquidity pools for all tokens -11. `write-core-env` — generate core contracts `.env` -12. `configure-core` — run `configureUniversalCore.s.sol` (forge, auto-resume) -13. `update-token-config` — patch token config JSON files -14. `setup-gateway` — deploy gateway contracts (forge, auto-resume) -15. `add-uregistry-configs` — submit chain + token config txs -16. `deploy-counter-sdk` — deploy CounterPayable + sync SDK constants +3. Auto-derive `FUND_TO_ADDRESS` from `PRIVATE_KEY` (writes to `.env`) +4. Stop any running nodes cleanly +5. `devnet` — start 4 validators + register + start 4 universal validators +6. `tss-keygen` — TSS key generation (via `./local-native/devnet tss-keygen`) +7. `setup-environment` (second run — patches UV `pushuv_config.json` with `event_start_from` after devnet data exists) +8. `recover-genesis-key` — import genesis mnemonic into local keyring +9. `fund` — top up deployer address from genesis account +10. `setup-core` — deploy core contracts (forge, auto-resume) +11. `setup-swap` — deploy WPC + Uniswap V3 (hardhat) +12. `sync-addresses` — copy addresses into swap `test-addresses.json` +13. `create-pool` — create WPC liquidity pools for all tokens +14. `check-addresses` — assert required contract addresses are recorded +15. `write-core-env` — generate core contracts `.env` +16. `configure-core` — run `configureUniversalCore.s.sol` (forge, auto-resume; internally re-generates core `.env`) +17. `update-token-config` — patch token config JSON files +18. `setup-gateway` — deploy gateway contracts (forge, auto-resume) +19. `add-uregistry-configs` — submit chain + token config txs +20. `deploy-counter-sdk` — deploy CounterPayable + sync SDK constants +21. Sync SDK LOCALNET synthetic token constants from `deploy_addresses.json` +22. `sync-vault-tss` — sync vault TSS addresses on all local Anvil EVM chains (LOCAL only) --- -## Local devnet (`local-setup-e2e/devnet`) +## Local devnet (`local-native/devnet`) The `devnet` script manages 4 `pchaind` validators and 4 `puniversald` universal validators as local OS processes (no Docker). ``` -local-setup-e2e/ +local-native/ devnet # management script data/ # validator home dirs + PID file (gitignored) logs/ # per-process log files (gitignored) @@ -149,14 +170,14 @@ local-setup-e2e/ ### Devnet commands ```bash -./local-setup-e2e/devnet start [--build] # Start all 4 validators + 4 UVs - # --build for clean start (wipes data) -./local-setup-e2e/devnet stop # Stop all processes (keep data) -./local-setup-e2e/devnet down # Stop and remove data -./local-setup-e2e/devnet status # Show running processes + block heights -./local-setup-e2e/devnet logs [name] # Tail logs (validator-1, universal-2, all, …) -./local-setup-e2e/devnet tss-keygen # Initiate TSS key generation -./local-setup-e2e/devnet setup-uvalidators # Register UVs + create AuthZ grants +./local-native/devnet start 4 # Start 4 core validators +./local-native/devnet setup-uvalidators # Register UVs on-chain + create AuthZ grants +./local-native/devnet start-uv 2 # Start 2 universal validators (or 4 for full set) +./local-native/devnet stop # Stop all processes (keep data) +./local-native/devnet down # Stop and remove data +./local-native/devnet status # Show running processes + block heights +./local-native/devnet logs [name] # Tail logs (validator-1, universal-2, all, …) +./local-native/devnet tss-keygen # Initiate TSS key generation ``` Port layout: @@ -178,8 +199,10 @@ Port layout: ### Clean devnet restart ```bash -./local-setup-e2e/devnet down -./local-setup-e2e/devnet start --build +./local-native/devnet down +./local-native/devnet start 4 +./local-native/devnet setup-uvalidators +./local-native/devnet start-uv 4 ``` --- @@ -202,22 +225,26 @@ TESTING_ENV=LOCAL bash e2e-tests/setup.sh | `setup-swap` | Build + deploy WPC + Uniswap V3 | | `sync-addresses` | Copy `deploy_addresses.json` into swap `test-addresses.json` | | `create-pool` | Create WPC pools for all deployed core tokens | +| `fund-uea-prc20` | Transfer PRC20 tokens from deployer to the test UEA address | | `configure-core` | Run `configureUniversalCore.s.sol` (auto-resume) | | `check-addresses` | Assert required contract addresses are recorded | | `write-core-env` | Generate core contracts `.env` | | `update-token-config` | Patch token config JSON contract addresses | | `setup-gateway` | Build + deploy gateway contracts (auto-resume) | +| `sync-vault-tss` | Sync vault `TSS_ADDRESS` to current TSS key on all local Anvil chains (LOCAL only) | | `add-uregistry-configs` | Submit chain + token configs to uregistry | | `deploy-counter-sdk` | Deploy CounterPayable + sync SDK `COUNTER_ADDRESS_PAYABLE` | -| `bootstrap-cea-sdk` | Ensure CEA is deployed for SDK signer (Route 2 bootstrap) | -| `setup-sdk` | Install SDK dependencies + generate SDK `.env` | -| `sdk-test-all` | Run all configured SDK E2E test files | +| `bootstrap-cea-sdk` | Ensure CEA is deployed for SDK signer on BSC testnet fork (Route 2 bootstrap) | +| `setup-sdk` | Clone/install SDK, generate SDK `.env`, sync LOCALNET constants | +| `sdk-test-all` | Run all configured inbound SDK E2E test files | +| `sdk-test-outbound-all` | Run all configured outbound SDK E2E test files (LOCAL only) | | `sdk-test-pctx-last-transaction` | Run `pctx-last-transaction.spec.ts` | | `sdk-test-send-to-self` | Run `send-to-self.spec.ts` | | `sdk-test-progress-hook` | Run `progress-hook-per-tx.spec.ts` | | `sdk-test-bridge-multicall` | Run `bridge-multicall.spec.ts` | | `sdk-test-pushchain` | Run `pushchain.spec.ts` | | `sdk-test-bridge-hooks` | Run `bridge-hooks.spec.ts` | +| `sdk-test-cea-to-eoa` | Run `cea-to-eoa.spec.ts` (outbound Route 3; requires `TESTING_ENV=LOCAL`) | | `record-contract K A` | Manually record contract key + address | | `record-token N S A` | Manually record token name, symbol, address | | `help` | Show help | @@ -234,7 +261,7 @@ TESTING_ENV=LOCAL bash e2e-tests/setup.sh - `contracts.Factory` - `contracts.QuoterV2` - `contracts.SwapRouter` -- `contracts.UEA_PROXY_IMPLEMENTATION` +- `contracts.UEA_PROXY_IMPLEMENTATION` (resolved from on-chain precompile during `setup-sdk`) - `contracts.COUNTER_ADDRESS_PAYABLE` ### Token entries @@ -263,7 +290,10 @@ Manual helpers: - Stale broadcast cache from previous runs is cleared automatically before each fresh deploy. - If the initial `forge script --broadcast` fails (e.g., receipt timeout), retries with `--resume` until success. -- Optional cap: `CORE_RESUME_MAX_ATTEMPTS=5` (default `0` = unlimited). +- Caps (all default `0` = unlimited retries): + - `CORE_RESUME_MAX_ATTEMPTS` — core contracts deploy + - `GATEWAY_RESUME_MAX_ATTEMPTS` — gateway contracts deploy + - `CORE_CONFIGURE_RESUME_MAX_ATTEMPTS` — `configureUniversalCore.s.sol` ### uregistry tx submission @@ -278,8 +308,8 @@ Manual helpers: |---|---| | `e2e-tests/deploy_addresses.json` | Contract/token address registry | | `e2e-tests/logs/` | Per-step deployment logs | -| `local-setup-e2e/data/` | Validator + UV home directories | -| `local-setup-e2e/logs/` | Per-process stdout/stderr | +| `local-native/data/` | Validator + UV home directories | +| `local-native/logs/` | Per-process stdout/stderr | | `/test-addresses.json` | Swap repo address file (synced from deploy_addresses.json) | | `/.env` | Core contracts env (generated by `write-core-env`) | | `config/testnet-donut/*/tokens/*.json` | Token config files (updated contract addresses) | @@ -290,7 +320,7 @@ Manual helpers: ```bash # Stop + wipe devnet -./local-setup-e2e/devnet down +./local-native/devnet down # Reset state rm -f e2e-tests/deploy_addresses.json @@ -311,11 +341,11 @@ Check that `make build` completed successfully and `build/pchaind` / `build/puni ### 2) Validators stuck at height 0 -P2P peer connections failing. The devnet script sets `allow_duplicate_ip = true` and `addr_book_strict = false` automatically for all-localhost setups. If reusing old data, run `./local-setup-e2e/devnet down` to wipe and restart clean. +P2P peer connections failing. The devnet script sets `allow_duplicate_ip = true` and `addr_book_strict = false` automatically for all-localhost setups. If reusing old data, run `./local-native/devnet down` to wipe and restart clean. ### 3) TSS keygen not completing -Check UV logs (`./local-setup-e2e/devnet logs universal-1`). UVs need: +Check UV logs (`./local-native/devnet logs universal-1`). UVs need: - All 4 validators bonded - All 4 UVs registered with AuthZ grants - External chain RPC endpoints configured (set by `setup-environment`) diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index 58c519a9..736b4f2f 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -1,5 +1,5 @@ { - "generatedAt": "2026-04-08T11:24:54Z", + "generatedAt": "2026-04-09T11:40:20Z", "contracts": { "WPC": "0xB5B1e1ADc1b8fc1066975aa09f9371a5f67C54F5", "Factory": "0x140b9f84fCbccB4129AC6F32b1243ea808d18261", diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index ffeab9bb..32079e78 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -1266,7 +1266,7 @@ step_setup_environment() { local sepolia_fork_rpc="https://sepolia.drpc.org" local arbitrum_fork_rpc="https://arbitrum-sepolia.gateway.tenderly.co" local base_fork_rpc="https://sepolia.base.org" - local bsc_fork_rpc="https://bnb-testnet.g.alchemy.com/v2/peQmTO8MjpoK5Czw4HwRp" + local bsc_fork_rpc="wss://bsc-testnet-rpc.publicnode.com" local solana_upstream_rpc="https://api.devnet.solana.com" # Fetch event_start_from from the upstream RPCs BEFORE starting local forks. @@ -2458,6 +2458,62 @@ step_sync_test_addresses() { log_ok "Updated $TEST_ADDRESSES_PATH" } +step_fund_uea_prc20() { + require_cmd cast jq + ensure_deploy_file + + local sdk_evm_private_key + sdk_evm_private_key="${EVM_PRIVATE_KEY:-${PRIVATE_KEY:-}}" + if [[ -z "$sdk_evm_private_key" ]]; then + log_warn "No EVM_PRIVATE_KEY found; skipping UEA PRC20 funding" + return 0 + fi + + local evm_addr + evm_addr="$(cast wallet address "$sdk_evm_private_key" 2>/dev/null || true)" + if ! validate_eth_address "$evm_addr"; then + log_warn "Could not derive EVM address from EVM_PRIVATE_KEY; skipping UEA PRC20 funding" + return 0 + fi + + local factory_addr="0x00000000000000000000000000000000000000eA" + local uea_addr + uea_addr="$(cast call "$factory_addr" "computeUEA((string,string,bytes))(address)" \ + "(eip155,11155111,$evm_addr)" \ + --rpc-url "$PUSH_RPC_URL" 2>/dev/null | grep -Eo '0x[a-fA-F0-9]{40}' | head -1 || true)" + + if ! validate_eth_address "$uea_addr"; then + log_warn "Could not compute UEA address for $evm_addr; skipping UEA PRC20 funding" + return 0 + fi + + log_info "Funding UEA $uea_addr (signer: $evm_addr) with PRC20 tokens from deployer" + + local token_count + token_count="$(jq -r '.tokens | length' "$DEPLOY_ADDRESSES_FILE")" + if [[ "$token_count" == "0" ]]; then + log_warn "No tokens in deploy addresses to fund UEA with" + return 0 + fi + + local token_symbol token_addr token_decimals fund_amount + while IFS=$'\t' read -r token_symbol token_addr token_decimals; do + [[ -n "$token_addr" ]] || continue + # 1e9 for tokens with <=9 decimals (e.g. USDT×1000, pSOL×1), 1e18 for 18-decimal tokens (e.g. 1 pETH) + if [[ "${token_decimals:-18}" -le 9 ]]; then + fund_amount="1000000000" + else + fund_amount="1000000000000000000" + fi + log_info " Sending $fund_amount of $token_symbol ($token_addr) to UEA $uea_addr" + cast send --private-key "$PRIVATE_KEY" "$token_addr" \ + "transfer(address,uint256)(bool)" "$uea_addr" "$fund_amount" \ + --rpc-url "$PUSH_RPC_URL" 2>&1 | grep -E "^status" || true + done < <(jq -r '.tokens[]? | [.symbol, .address, (.decimals // 18)] | @tsv' "$DEPLOY_ADDRESSES_FILE") + + log_ok "UEA PRC20 funding complete" +} + step_create_all_wpc_pools() { require_cmd node cast "$PUSH_CHAIN_DIR/build/pchaind" ensure_deploy_file @@ -2797,6 +2853,7 @@ Commands: setup-swap Clone/install/deploy swap AMM contracts sync-addresses Apply deploy_addresses.json into test-addresses.json create-pool Create WPC pools for all deployed core tokens + fund-uea-prc20 Transfer PRC20 tokens (pETH/pUSDT/pSOL etc.) from deployer to test UEA configure-core Run configureUniversalCore.s.sol (auto --resume retries) check-addresses Check/report deploy addresses (WPC/Factory/QuoterV2/SwapRouter) write-core-env Create core-contracts .env from deploy_addresses.json @@ -2854,6 +2911,7 @@ main() { setup-swap) step_setup_swap_amm ;; sync-addresses) step_sync_test_addresses ;; create-pool) step_create_all_wpc_pools ;; + fund-uea-prc20) step_fund_uea_prc20 ;; configure-core) step_configure_universal_core ;; check-addresses) assert_required_addresses ;; write-core-env) step_write_core_env ;; From 91f09b884f93d674181e3f2d0456d4f80fd9a388 Mon Sep 17 00:00:00 2001 From: Aman Gupta Date: Tue, 14 Apr 2026 14:07:25 +0530 Subject: [PATCH 37/61] fix: rpc retry logic (#204) --- go.mod | 2 -- go.sum | 2 -- universalClient/chains/evm/rpc_client.go | 4 ++-- universalClient/chains/svm/rpc_client.go | 4 ++-- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 8b4accce..b46171bb 100755 --- a/go.mod +++ b/go.mod @@ -66,7 +66,6 @@ require ( github.com/cosmos/ibc-apps/modules/rate-limiting/v10 v10.1.0 github.com/cosmos/ibc-go/modules/capability v1.0.1 github.com/cosmos/ibc-go/v10 v10.4.0 - github.com/decred/base58 v1.0.6 github.com/ethereum/go-ethereum v1.15.11 github.com/gagliardetto/solana-go v1.13.0 github.com/golang/mock v1.6.0 @@ -228,7 +227,6 @@ require ( github.com/danieljoos/wincred v1.2.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/deckarep/golang-set v1.8.0 // indirect - github.com/decred/dcrd/crypto/blake256 v1.1.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/desertbit/timer v1.0.1 // indirect github.com/dgraph-io/badger/v4 v4.6.0 // indirect diff --git a/go.sum b/go.sum index fda4900f..48050699 100755 --- a/go.sum +++ b/go.sum @@ -940,8 +940,6 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= -github.com/decred/base58 v1.0.6 h1:NXndBcO+ubGZORV3EulvqeBcMuQM7doqVGa7pBhMOs4= -github.com/decred/base58 v1.0.6/go.mod h1:KR7Oh9njDPXTagD4P67KJZwroL8jT653u8CffkYqhcQ= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= diff --git a/universalClient/chains/evm/rpc_client.go b/universalClient/chains/evm/rpc_client.go index 2c486500..13e60ee6 100644 --- a/universalClient/chains/evm/rpc_client.go +++ b/universalClient/chains/evm/rpc_client.go @@ -94,6 +94,7 @@ func (rc *RPCClient) executeWithFailover(ctx context.Context, operation string, } maxAttempts := len(clients) + startIndex := atomic.AddUint64(&rc.index, 1) - 1 var lastErr error for attempt := 0; attempt < maxAttempts; attempt++ { if ctx != nil { @@ -104,8 +105,7 @@ func (rc *RPCClient) executeWithFailover(ctx context.Context, operation string, } } - index := atomic.AddUint64(&rc.index, 1) - 1 - client := clients[index%uint64(len(clients))] + client := clients[(startIndex+uint64(attempt))%uint64(len(clients))] if client == nil { continue diff --git a/universalClient/chains/svm/rpc_client.go b/universalClient/chains/svm/rpc_client.go index 89777cd1..4fe0e92f 100644 --- a/universalClient/chains/svm/rpc_client.go +++ b/universalClient/chains/svm/rpc_client.go @@ -110,6 +110,7 @@ func (rc *RPCClient) executeWithFailover(ctx context.Context, operation string, } maxAttempts := len(clients) + startIndex := atomic.AddUint64(&rc.index, 1) - 1 for attempt := 0; attempt < maxAttempts; attempt++ { if ctx != nil { select { @@ -119,8 +120,7 @@ func (rc *RPCClient) executeWithFailover(ctx context.Context, operation string, } } - index := atomic.AddUint64(&rc.index, 1) - 1 - client := clients[index%uint64(len(clients))] + client := clients[(startIndex+uint64(attempt))%uint64(len(clients))] if client == nil { continue From a6001cd54b78d03f9044ab7a6257891b560f048e Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Wed, 15 Apr 2026 11:43:19 +0530 Subject: [PATCH 38/61] E2E readme updated --- e2e-tests/README.md | 101 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 84 insertions(+), 17 deletions(-) diff --git a/e2e-tests/README.md b/e2e-tests/README.md index ebe0dfed..8688f5f3 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -20,6 +20,44 @@ It covers: --- +## Quick testing setup + +Three commands from a clean checkout. Make sure the prerequisites below are installed first. + +**1. Set up `.env`** + +```bash +cp e2e-tests/.env.example e2e-tests/.env +``` + +Edit `e2e-tests/.env` and set at minimum: + +- `TESTING_ENV=LOCAL` — enables anvil + surfpool forks +- `PRIVATE_KEY=0x...` — EVM deployer key (used by forge/hardhat and mirrored into SDK `.env`) +- `SOLANA_PRIVATE_KEY=...` — only needed if you plan to run Solana SDK tests + +`FUND_TO_ADDRESS`, `EVM_PRIVATE_KEY`, `EVM_RPC`, and `PUSH_PRIVATE_KEY` are auto-derived from `PRIVATE_KEY` / `PUSH_RPC_URL` if left blank. + +**2. Bootstrap the local Push network** + +```bash +TESTING_ENV=LOCAL bash e2e-tests/setup.sh all +``` + +Runs the full pipeline: starts anvil/surfpool forks, boots 4 validators + 2 universal validators, generates the TSS key, deploys core/swap/gateway contracts, submits uregistry configs, and syncs addresses into `deploy_addresses.json`. See [One-command full run](#one-command-full-run) for the detailed step list. + +**3. Set up the SDK** + +```bash +TESTING_ENV=LOCAL bash e2e-tests/setup.sh setup-sdk +``` + +Clones `push-chain-sdk`, writes `packages/core/.env` from your e2e `.env`, syncs the LOCALNET synthetic token addresses into the SDK's chain constants, resolves `UEA_PROXY_IMPLEMENTATION` from the local chain, and installs dependencies. + +After this you can run SDK E2E tests — see [Running SDK E2E tests](#running-sdk-e2e-tests). + +--- + ## What gets created - `local-native/data/` — validator + universal-validator home directories @@ -44,10 +82,10 @@ Override any of these with env vars (`CORE_CONTRACTS_DIR`, `SWAP_AMM_DIR`, `GATE Required tools: -- `git`, `make` -- `jq` -- `node`, `npm`, `npx` +- `git`, `make`, `curl`, `jq`, `perl`, `python3`, `lsof` +- `node`, `npm`, `npx`, `yarn` - `forge`, `cast` (Foundry) +- `anvil` + `surfpool` — only for `TESTING_ENV=LOCAL` - `pchaind` and `puniversald` binaries in `build/` (built by `make build`) Build the binaries first: @@ -102,22 +140,22 @@ When set in `.env`, the `setup-environment` step (also called by `all`) does: - `anvil` for Ethereum Sepolia, Arbitrum Sepolia, Base Sepolia, BSC Testnet - `surfpool` for Solana 2. Rewrites `public_rpc_url` in `config/testnet-donut/*/chain.json` to local fork URLs -3. Patches `puniversald` chain RPC config (`local-setup-e2e/data/universal-N/.puniversal/config/pushuv_config.json`) to use local fork endpoints +3. Patches `puniversald` chain RPC config (`local-native/data/universal-N/.puniversal/config/pushuv_config.json`) to use local fork endpoints Default local fork URLs (override in `.env`): -| Variable | Default | -|---|---| -| `ANVIL_SEPOLIA_HOST_RPC_URL` | `http://localhost:9545` | Anvil Sepolia host URL (used by forge/cast and chain config patch) | -| `ANVIL_ARBITRUM_HOST_RPC_URL` | `http://localhost:9546` | | -| `ANVIL_BASE_HOST_RPC_URL` | `http://localhost:9547` | | -| `ANVIL_BSC_HOST_RPC_URL` | `http://localhost:9548` | | -| `SURFPOOL_SOLANA_HOST_RPC_URL` | `http://localhost:8899` | | -| `LOCAL_SEPOLIA_UV_RPC_URL` | ← `ANVIL_SEPOLIA_HOST_RPC_URL` | RPC URL written into UV `pushuv_config.json` (can differ from host if using Docker networking) | -| `LOCAL_ARBITRUM_UV_RPC_URL` | ← `ANVIL_ARBITRUM_HOST_RPC_URL` | | -| `LOCAL_BASE_UV_RPC_URL` | ← `ANVIL_BASE_HOST_RPC_URL` | | -| `LOCAL_BSC_UV_RPC_URL` | ← `ANVIL_BSC_HOST_RPC_URL` | | -| `LOCAL_SOLANA_UV_RPC_URL` | ← `SURFPOOL_SOLANA_HOST_RPC_URL` | | +| Variable | Default | Description | +|---|---|---| +| `ANVIL_SEPOLIA_HOST_RPC_URL` | `http://localhost:9545` | Anvil Sepolia host URL (forge/cast + chain config patch) | +| `ANVIL_ARBITRUM_HOST_RPC_URL` | `http://localhost:9546` | Anvil Arbitrum Sepolia host URL | +| `ANVIL_BASE_HOST_RPC_URL` | `http://localhost:9547` | Anvil Base Sepolia host URL | +| `ANVIL_BSC_HOST_RPC_URL` | `http://localhost:9548` | Anvil BSC Testnet host URL | +| `SURFPOOL_SOLANA_HOST_RPC_URL` | `http://localhost:8899` | Surfpool Solana devnet host URL | +| `LOCAL_SEPOLIA_UV_RPC_URL` | ← `ANVIL_SEPOLIA_HOST_RPC_URL` | RPC written into UV `pushuv_config.json` (can differ from host if using Docker networking) | +| `LOCAL_ARBITRUM_UV_RPC_URL` | ← `ANVIL_ARBITRUM_HOST_RPC_URL` | UV-side Arbitrum RPC | +| `LOCAL_BASE_UV_RPC_URL` | ← `ANVIL_BASE_HOST_RPC_URL` | UV-side Base RPC | +| `LOCAL_BSC_UV_RPC_URL` | ← `ANVIL_BSC_HOST_RPC_URL` | UV-side BSC RPC | +| `LOCAL_SOLANA_UV_RPC_URL` | ← `SURFPOOL_SOLANA_HOST_RPC_URL` | UV-side Solana RPC | --- @@ -135,7 +173,7 @@ The `all` pipeline runs in order: 2. Build binaries (`make replace-addresses` + `make build`) 3. Auto-derive `FUND_TO_ADDRESS` from `PRIVATE_KEY` (writes to `.env`) 4. Stop any running nodes cleanly -5. `devnet` — start 4 validators + register + start 4 universal validators +5. `devnet` — start 4 validators, register 4 universal validators, start 2 (edit `./devnet start-uv N` to start more) 6. `tss-keygen` — TSS key generation (via `./local-native/devnet tss-keygen`) 7. `setup-environment` (second run — patches UV `pushuv_config.json` with `event_start_from` after devnet data exists) 8. `recover-genesis-key` — import genesis mnemonic into local keyring @@ -154,6 +192,35 @@ The `all` pipeline runs in order: 21. Sync SDK LOCALNET synthetic token constants from `deploy_addresses.json` 22. `sync-vault-tss` — sync vault TSS addresses on all local Anvil EVM chains (LOCAL only) +> `setup-sdk` is **not** included in `all`. Run it separately before any `sdk-test-*` command (see [Running SDK E2E tests](#running-sdk-e2e-tests)). + +--- + +## Running SDK E2E tests + +The SDK repo is cloned/installed and patched to point at the local deployment only when `setup-sdk` runs. After `all` finishes: + +```bash +# Clone push-chain-sdk, generate its .env, install deps, sync LOCALNET constants +TESTING_ENV=LOCAL bash e2e-tests/setup.sh setup-sdk + +# Inbound test suite (TESTNET_DONUT → LOCALNET rewrite applied to spec files) +TESTING_ENV=LOCAL bash e2e-tests/setup.sh sdk-test-all + +# Outbound test suite (requires TESTING_ENV=LOCAL; also funds TSS signer + vault TSS sync) +TESTING_ENV=LOCAL bash e2e-tests/setup.sh sdk-test-outbound-all + +# Single inbound file +TESTING_ENV=LOCAL bash e2e-tests/setup.sh sdk-test-send-to-self +``` + +Route-2 outbound tests (`cea-to-eoa.spec.ts`) additionally require a bootstrapped CEA on the BSC testnet fork: + +```bash +TESTING_ENV=LOCAL bash e2e-tests/setup.sh bootstrap-cea-sdk +TESTING_ENV=LOCAL bash e2e-tests/setup.sh sdk-test-cea-to-eoa +``` + --- ## Local devnet (`local-native/devnet`) From c84f9a97e07348a430e794fa855025de1c921c63 Mon Sep 17 00:00:00 2001 From: Nilesh Gupta Date: Thu, 16 Apr 2026 09:16:32 +0530 Subject: [PATCH 39/61] feat: added new usdc configs for eth, base, arb sepolia chains --- config/testnet-donut/arb_sepolia/chain.json | 6 ++++++ config/testnet-donut/arb_sepolia/tokens/usdc.json | 4 ++-- .../testnet-donut/arb_sepolia/tokens/usdc.old.json | 14 ++++++++++++++ config/testnet-donut/base_sepolia/chain.json | 8 +++++++- config/testnet-donut/base_sepolia/tokens/usdc.json | 4 ++-- .../base_sepolia/tokens/usdc.old.json | 14 ++++++++++++++ config/testnet-donut/bsc_testnet/chain.json | 6 ++++++ config/testnet-donut/eth_sepolia/chain.json | 6 ++++++ config/testnet-donut/eth_sepolia/tokens/eth.json | 2 +- config/testnet-donut/eth_sepolia/tokens/usdc.json | 4 ++-- .../testnet-donut/eth_sepolia/tokens/usdc.old.json | 14 ++++++++++++++ 11 files changed, 74 insertions(+), 8 deletions(-) create mode 100644 config/testnet-donut/arb_sepolia/tokens/usdc.old.json create mode 100644 config/testnet-donut/base_sepolia/tokens/usdc.old.json create mode 100644 config/testnet-donut/eth_sepolia/tokens/usdc.old.json diff --git a/config/testnet-donut/arb_sepolia/chain.json b/config/testnet-donut/arb_sepolia/chain.json index 82efffc8..5930c187 100644 --- a/config/testnet-donut/arb_sepolia/chain.json +++ b/config/testnet-donut/arb_sepolia/chain.json @@ -40,6 +40,12 @@ "identifier": "0x", "event_identifier": "0xb689a5db58af5de77bfea50b6d5844e1c1aeed8b24edd7996a9f8b18ac133819", "confirmation_type": 1 + }, + { + "name": "rescueFunds", + "identifier": "0x", + "event_identifier": "0x25a3527f55f5a35edc28d8df3c716bcd0f3a42c4d82103e716d4ae8263a95e0f", + "confirmation_type": 1 } ], "enabled": { diff --git a/config/testnet-donut/arb_sepolia/tokens/usdc.json b/config/testnet-donut/arb_sepolia/tokens/usdc.json index 44097c35..ba2773c1 100644 --- a/config/testnet-donut/arb_sepolia/tokens/usdc.json +++ b/config/testnet-donut/arb_sepolia/tokens/usdc.json @@ -1,6 +1,6 @@ { "chain": "eip155:421614", - "address": "0x75faf114eafb1BDbe2F0316DF893fd58CE46AA4d", + "address": "0x5dd39b0b3610F666F631a6506b7713EF83e1Ac5C", "name": "USDC.arb", "symbol": "USDC.arb", "decimals": 6, @@ -9,6 +9,6 @@ "token_type": 1, "native_representation": { "denom": "", - "contract_address": "0xa261A10e94aE4bA88EE8c5845CbE7266bD679DD6" + "contract_address": "0x1091cCBA2FF8d2A131AE4B35e34cf3308C48572C" } } \ No newline at end of file diff --git a/config/testnet-donut/arb_sepolia/tokens/usdc.old.json b/config/testnet-donut/arb_sepolia/tokens/usdc.old.json new file mode 100644 index 00000000..0ad0e798 --- /dev/null +++ b/config/testnet-donut/arb_sepolia/tokens/usdc.old.json @@ -0,0 +1,14 @@ +{ + "chain": "eip155:421614", + "address": "0x75faf114eafb1BDbe2F0316DF893fd58CE46AA4d", + "name": "USDC.arb.old", + "symbol": "USDC.arb.old", + "decimals": 6, + "enabled": true, + "liquidity_cap": "1000000000000000000000000", + "token_type": 1, + "native_representation": { + "denom": "", + "contract_address": "0xa261A10e94aE4bA88EE8c5845CbE7266bD679DD6" + } +} \ No newline at end of file diff --git a/config/testnet-donut/base_sepolia/chain.json b/config/testnet-donut/base_sepolia/chain.json index b009339f..db1dfda1 100644 --- a/config/testnet-donut/base_sepolia/chain.json +++ b/config/testnet-donut/base_sepolia/chain.json @@ -40,10 +40,16 @@ "identifier": "0x", "event_identifier": "0xb689a5db58af5de77bfea50b6d5844e1c1aeed8b24edd7996a9f8b18ac133819", "confirmation_type": 1 + }, + { + "name": "rescueFunds", + "identifier": "0x", + "event_identifier": "0x25a3527f55f5a35edc28d8df3c716bcd0f3a42c4d82103e716d4ae8263a95e0f", + "confirmation_type": 1 } ], "enabled": { "isInboundEnabled": true, - "isOutboundEnabled": true + "isOutboundEnabled": false } } \ No newline at end of file diff --git a/config/testnet-donut/base_sepolia/tokens/usdc.json b/config/testnet-donut/base_sepolia/tokens/usdc.json index 08df6da1..5b60c4f8 100644 --- a/config/testnet-donut/base_sepolia/tokens/usdc.json +++ b/config/testnet-donut/base_sepolia/tokens/usdc.json @@ -1,6 +1,6 @@ { "chain": "eip155:84532", - "address": "0x036CbD53842c5426634e7929541eC2318f3dCF7e", + "address": "0x5c3504F0E3bA28FDc1F74234fE936518276AaBB8", "name": "USDC.base", "symbol": "USDC.base", "decimals": 6, @@ -9,6 +9,6 @@ "token_type": 1, "native_representation": { "denom": "", - "contract_address": "0x84B62e44F667F692F7739Ca6040cD17DA02068A8" + "contract_address": "0xD7C6cA1e2c0CE260BE0c0AD39C1540de460e3Be1" } } \ No newline at end of file diff --git a/config/testnet-donut/base_sepolia/tokens/usdc.old.json b/config/testnet-donut/base_sepolia/tokens/usdc.old.json new file mode 100644 index 00000000..d461124b --- /dev/null +++ b/config/testnet-donut/base_sepolia/tokens/usdc.old.json @@ -0,0 +1,14 @@ +{ + "chain": "eip155:84532", + "address": "0x036CbD53842c5426634e7929541eC2318f3dCF7e", + "name": "USDC.base.old", + "symbol": "USDC.base.old", + "decimals": 6, + "enabled": true, + "liquidity_cap": "1000000000000000000000000", + "token_type": 1, + "native_representation": { + "denom": "", + "contract_address": "0x84B62e44F667F692F7739Ca6040cD17DA02068A8" + } +} \ No newline at end of file diff --git a/config/testnet-donut/bsc_testnet/chain.json b/config/testnet-donut/bsc_testnet/chain.json index 9d6553cf..6e2d4900 100644 --- a/config/testnet-donut/bsc_testnet/chain.json +++ b/config/testnet-donut/bsc_testnet/chain.json @@ -40,6 +40,12 @@ "identifier": "0x", "event_identifier": "0xb689a5db58af5de77bfea50b6d5844e1c1aeed8b24edd7996a9f8b18ac133819", "confirmation_type": 1 + }, + { + "name": "rescueFunds", + "identifier": "0x", + "event_identifier": "0x25a3527f55f5a35edc28d8df3c716bcd0f3a42c4d82103e716d4ae8263a95e0f", + "confirmation_type": 1 } ], "enabled": { diff --git a/config/testnet-donut/eth_sepolia/chain.json b/config/testnet-donut/eth_sepolia/chain.json index 4744bb09..90fcc47d 100644 --- a/config/testnet-donut/eth_sepolia/chain.json +++ b/config/testnet-donut/eth_sepolia/chain.json @@ -40,6 +40,12 @@ "identifier": "0x", "event_identifier": "0xb689a5db58af5de77bfea50b6d5844e1c1aeed8b24edd7996a9f8b18ac133819", "confirmation_type": 1 + }, + { + "name": "rescueFunds", + "identifier": "0x", + "event_identifier": "0x25a3527f55f5a35edc28d8df3c716bcd0f3a42c4d82103e716d4ae8263a95e0f", + "confirmation_type": 1 } ], "enabled": { diff --git a/config/testnet-donut/eth_sepolia/tokens/eth.json b/config/testnet-donut/eth_sepolia/tokens/eth.json index 602011ca..435e1e01 100644 --- a/config/testnet-donut/eth_sepolia/tokens/eth.json +++ b/config/testnet-donut/eth_sepolia/tokens/eth.json @@ -9,6 +9,6 @@ "token_type": 1, "native_representation": { "denom": "", - "contract_address": "0x90F4A15601E08570D6fFbaE883C44BDB85bDb7d1" + "contract_address": "0x2971824Db68229D087931155C2b8bB820B275809" } } diff --git a/config/testnet-donut/eth_sepolia/tokens/usdc.json b/config/testnet-donut/eth_sepolia/tokens/usdc.json index 7c071628..52ab8f5d 100644 --- a/config/testnet-donut/eth_sepolia/tokens/usdc.json +++ b/config/testnet-donut/eth_sepolia/tokens/usdc.json @@ -1,6 +1,6 @@ { "chain": "eip155:11155111", - "address": "0x1c7D4B196Cb0C7B01d743Fbc6116a902379C7238", + "address": "0x97F477B7f970D47a87B42869ceeace218106152a", "name": "USDC.eth", "symbol": "USDC.eth", "decimals": 6, @@ -9,6 +9,6 @@ "token_type": 1, "native_representation": { "denom": "", - "contract_address": "0x387b9C8Db60E74999aAAC5A2b7825b400F12d68E" + "contract_address": "0x7A58048036206bB898008b5bBDA85697DB1e5d66" } } diff --git a/config/testnet-donut/eth_sepolia/tokens/usdc.old.json b/config/testnet-donut/eth_sepolia/tokens/usdc.old.json new file mode 100644 index 00000000..2866472e --- /dev/null +++ b/config/testnet-donut/eth_sepolia/tokens/usdc.old.json @@ -0,0 +1,14 @@ +{ + "chain": "eip155:11155111", + "address": "0x1c7D4B196Cb0C7B01d743Fbc6116a902379C7238", + "name": "USDC.eth.old", + "symbol": "USDC.eth.old", + "decimals": 6, + "enabled": true, + "liquidity_cap": "1000000000000000000000000", + "token_type": 1, + "native_representation": { + "denom": "", + "contract_address": "0x387b9C8Db60E74999aAAC5A2b7825b400F12d68E" + } +} From 6f764e5e4fe4c5713d3b85a82cde36cc6bc5f0f5 Mon Sep 17 00:00:00 2001 From: Nilesh Gupta Date: Thu, 16 Apr 2026 09:55:20 +0530 Subject: [PATCH 40/61] feat: MsgExecutePayload deploys UEA if UEA address has non-zero balance --- x/uexecutor/keeper/msg_execute_payload.go | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/x/uexecutor/keeper/msg_execute_payload.go b/x/uexecutor/keeper/msg_execute_payload.go index c3c57a2b..946b1e12 100644 --- a/x/uexecutor/keeper/msg_execute_payload.go +++ b/x/uexecutor/keeper/msg_execute_payload.go @@ -7,6 +7,7 @@ import ( "cosmossdk.io/errors" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/ethereum/go-ethereum/common" + pchaintypes "github.com/pushchain/push-chain-node/types" "github.com/pushchain/push-chain-node/utils" "github.com/pushchain/push-chain-node/x/uexecutor/types" ) @@ -54,8 +55,26 @@ func (k Keeper) ExecutePayload(ctx context.Context, evmFrom common.Address, univ } if !isDeployed { - k.Logger().Warn("execute payload rejected: UEA not deployed", "chain", caip2Identifier, "owner", universalAccountId.Owner) - return fmt.Errorf("UEA is not deployed") + // only deploy if the UEA address has funds and not deployed yet + ueaAccAddr := sdk.AccAddress(ueaAddr.Bytes()) + balance := k.bankKeeper.GetBalance(sdkCtx, ueaAccAddr, pchaintypes.BaseDenom) + if balance.Amount.Sign() == 0 { + k.Logger().Warn("execute payload rejected: UEA not deployed and has no balance", + "chain", caip2Identifier, + "owner", universalAccountId.Owner, + ) + return fmt.Errorf("UEA is not deployed") + } + + k.Logger().Info("auto-deploying UEA before execute (pre-funded address)", + "uea", ueaAddr.Hex(), + "balance", balance.Amount.String(), + "chain", caip2Identifier, + "owner", universalAccountId.Owner, + ) + if _, err := k.DeployUEAV2(ctx, evmFrom, universalAccountId); err != nil { + return errors.Wrapf(err, "failed to auto-deploy pre-funded UEA") + } } k.Logger().Debug("executing payload via UEA", From 286fe3b44151cc82ea261eafe3960d50af376e79 Mon Sep 17 00:00:00 2001 From: Nilesh Gupta Date: Thu, 16 Apr 2026 09:55:49 +0530 Subject: [PATCH 41/61] tests: added integration tests for UEA deployment edge case in MsgExecutePayload --- .../uexecutor/execute_payload_test.go | 174 ++++++++++++++++++ 1 file changed, 174 insertions(+) diff --git a/test/integration/uexecutor/execute_payload_test.go b/test/integration/uexecutor/execute_payload_test.go index c7d84d64..3a1cf313 100644 --- a/test/integration/uexecutor/execute_payload_test.go +++ b/test/integration/uexecutor/execute_payload_test.go @@ -172,3 +172,177 @@ func TestExecutePayload(t *testing.T) { }) } + +// TestExecutePayload_AutoDeployOnPreFundedAddress exercises the griefing-recovery path: +// when a non-deployed UEA address already holds a non-zero native balance (e.g. because +// an attacker front-ran with a dust deposit to the precomputed address), MsgExecutePayload +// should auto-deploy the UEA before running the payload, instead of rejecting the tx and +// leaving the owner unable to deploy. +func TestExecutePayload_AutoDeployOnPreFundedAddress(t *testing.T) { + app, ctx, _ := utils.SetAppWithValidators(t) + + chainConfigTest := uregistrytypes.ChainConfig{ + Chain: "eip155:11155111", + VmType: uregistrytypes.VmType_EVM, + PublicRpcUrl: "https://sepolia.drpc.org", + GatewayAddress: "0x28E0F09bE2321c1420Dc60Ee146aACbD68B335Fe", + BlockConfirmation: &uregistrytypes.BlockConfirmation{ + FastInbound: 5, + StandardInbound: 12, + }, + GatewayMethods: []*uregistrytypes.GatewayMethods{&uregistrytypes.GatewayMethods{ + Name: "addFunds", + Identifier: "", + EventIdentifier: "0xb28f49668e7e76dc96d7aabe5b7f63fecfbd1c3574774c05e8204e749fd96fbd", + }}, + Enabled: &uregistrytypes.ChainEnabled{ + IsInboundEnabled: true, + IsOutboundEnabled: true, + }, + } + app.UregistryKeeper.AddChainConfig(ctx, &chainConfigTest) + + params := app.FeeMarketKeeper.GetParams(ctx) + params.BaseFee = math.LegacyNewDec(1000000000) + app.FeeMarketKeeper.SetParams(ctx, params) + + ms := uexecutorkeeper.NewMsgServerImpl(app.UexecutorKeeper) + + // Same fixture as TestExecutePayload/Success! — owner has a pre-signed verificationData + // for this exact payload+nonce, so the execute step can succeed end-to-end. + validUA := &uexecutortypes.UniversalAccountId{ + ChainNamespace: "eip155", + ChainId: "11155111", + Owner: "0x778d3206374f8ac265728e18e3fe2ae6b93e4ce4", + } + validUP := &uexecutortypes.UniversalPayload{ + To: "0x527F3692F5C53CfA83F7689885995606F93b6164", + Value: "0", + Data: "0x2ba2ed980000000000000000000000000000000000000000000000000000000000000312", + GasLimit: "21000000", + MaxFeePerGas: "1000000000", + MaxPriorityFeePerGas: "200000000", + Nonce: "1", + Deadline: "0", + VType: uexecutortypes.VerificationType(0), + } + + evmFrom := common.HexToAddress("0x1000000000000000000000000000000000000001") + err := app.BankKeeper.MintCoins( + ctx, + uexecutortypes.ModuleName, + sdk.NewCoins(sdk.NewCoin(types.BaseDenom, sdkmath.NewInt(2_000_000_000_000_000))), + ) + require.NoError(t, err) + + err = app.BankKeeper.SendCoinsFromModuleToAccount( + ctx, + uexecutortypes.ModuleName, + sdk.AccAddress(evmFrom.Bytes()), + sdk.NewCoins(sdk.NewCoin(types.BaseDenom, sdkmath.NewInt(1_000_000_000_000_000))), + ) + require.NoError(t, err) + + // Precompute the UEA address WITHOUT deploying — this is the attacker-grief setup. + factoryAddr := utils.GetDefaultAddresses().FactoryAddr + ueaAddr, isDeployed, err := app.UexecutorKeeper.CallFactoryToGetUEAAddressForOrigin(ctx, evmFrom, factoryAddr, validUA) + require.NoError(t, err) + require.False(t, isDeployed, "precondition: UEA must not be deployed before the test call") + + // "Attacker" pre-funds the precomputed UEA address. This is what would confuse a + // balance-based SDK into routing to MsgExecutePayload instead of the deploy msg. + err = app.BankKeeper.SendCoinsFromModuleToAccount( + ctx, + uexecutortypes.ModuleName, + sdk.AccAddress(ueaAddr.Bytes()), + sdk.NewCoins(sdk.NewCoin(types.BaseDenom, sdkmath.NewInt(1_000_000_000_000_000))), + ) + require.NoError(t, err) + + // Submit MsgExecutePayload directly — no standalone DeployUEAV2 call beforehand. + msg := &uexecutortypes.MsgExecutePayload{ + Signer: "cosmos1xpurwdecvsenyvpkxvmnge3cv93nyd34xuersef38pjnxen9xfsk2dnz8yek2drrv56qmn2ak9", + UniversalAccountId: validUA, + UniversalPayload: validUP, + VerificationData: "0x91987784d56359fa91c3e3e0332f4f0cffedf9c081eb12874a63b41d5b5e5c660dc827947c2ae26e658d0551ad4b2d2aa073d62691429a0ae239d2cc58055bf11c", + } + + _, err = ms.ExecutePayload(ctx, msg) + require.NoError(t, err, "auto-deploy + execute should succeed when precomputed UEA holds balance") + + // Post-condition: the UEA must now be deployed. + _, isDeployed, err = app.UexecutorKeeper.CallFactoryToGetUEAAddressForOrigin(ctx, evmFrom, factoryAddr, validUA) + require.NoError(t, err) + require.True(t, isDeployed, "UEA must be deployed after auto-deploy path runs successfully") +} + +// TestExecutePayload_RejectWhenUndeployedAndUnfunded asserts the rejection arm of the +// auto-deploy logic: when the UEA is not deployed AND has zero native balance, there is +// no griefing to recover from, so MsgExecutePayload must still reject with the existing +// "UEA is not deployed" error rather than deploying on-demand for free. +func TestExecutePayload_RejectWhenUndeployedAndUnfunded(t *testing.T) { + app, ctx, _ := utils.SetAppWithValidators(t) + + chainConfigTest := uregistrytypes.ChainConfig{ + Chain: "eip155:11155111", + VmType: uregistrytypes.VmType_EVM, + PublicRpcUrl: "https://sepolia.drpc.org", + GatewayAddress: "0x28E0F09bE2321c1420Dc60Ee146aACbD68B335Fe", + BlockConfirmation: &uregistrytypes.BlockConfirmation{ + FastInbound: 5, + StandardInbound: 12, + }, + GatewayMethods: []*uregistrytypes.GatewayMethods{&uregistrytypes.GatewayMethods{ + Name: "addFunds", + Identifier: "", + EventIdentifier: "0xb28f49668e7e76dc96d7aabe5b7f63fecfbd1c3574774c05e8204e749fd96fbd", + }}, + Enabled: &uregistrytypes.ChainEnabled{ + IsInboundEnabled: true, + IsOutboundEnabled: true, + }, + } + app.UregistryKeeper.AddChainConfig(ctx, &chainConfigTest) + + params := app.FeeMarketKeeper.GetParams(ctx) + params.BaseFee = math.LegacyNewDec(1000000000) + app.FeeMarketKeeper.SetParams(ctx, params) + + ms := uexecutorkeeper.NewMsgServerImpl(app.UexecutorKeeper) + + // Distinct owner — keeps the UEA address disjoint from any other test fixture and + // ensures neither deploy nor balance exists for this address in fresh state. + validUA := &uexecutortypes.UniversalAccountId{ + ChainNamespace: "eip155", + ChainId: "11155111", + Owner: "0x1111111111111111111111111111111111111111", + } + // Payload and verificationData are well-formed (pass early validation) but the + // signature does not need to be valid: the handler must reject at the deploy gate, + // well before signature verification, so we never hit the UEA contract. + validUP := &uexecutortypes.UniversalPayload{ + To: "0x527F3692F5C53CfA83F7689885995606F93b6164", + Value: "0", + Data: "0x2ba2ed980000000000000000000000000000000000000000000000000000000000000312", + GasLimit: "21000000", + MaxFeePerGas: "1000000000", + MaxPriorityFeePerGas: "200000000", + Nonce: "1", + Deadline: "0", + VType: uexecutortypes.VerificationType(0), + } + + msg := &uexecutortypes.MsgExecutePayload{ + Signer: "cosmos1xpurwdecvsenyvpkxvmnge3cv93nyd34xuersef38pjnxen9xfsk2dnz8yek2drrv56qmn2ak9", + UniversalAccountId: validUA, + UniversalPayload: validUP, + VerificationData: "0x1234", + } + + _, err := ms.ExecutePayload(ctx, msg) + // "UEA is not deployed" is the gate that fires *before* any auto-deploy attempt. + // Any other error string (e.g. signature-verification revert) would indicate that + // the handler stealth-deployed the UEA and then ran the payload — which must not + // happen when the address has zero balance. + require.ErrorContains(t, err, "UEA is not deployed") +} From ecc5bbde5415472210b19a86c810b133663be6ad Mon Sep 17 00:00:00 2001 From: Nilesh Gupta Date: Fri, 17 Apr 2026 14:11:43 +0530 Subject: [PATCH 42/61] feat: added proto changes in FundMigration event --- proto/utss/v1/types.proto | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/proto/utss/v1/types.proto b/proto/utss/v1/types.proto index beb9ea08..7084065d 100644 --- a/proto/utss/v1/types.proto +++ b/proto/utss/v1/types.proto @@ -103,5 +103,6 @@ message FundMigration { int64 completed_block = 9; string tx_hash = 10; string gas_price = 11; // gas price from oracle (wei) - uint64 gas_limit = 12; // gas limit for native transfer (21000) + uint64 gas_limit = 12; // gas limit sourced from UniversalCore per chain namespace + string l1_gas_fee = 13; // L1 data-availability fee (wei) from UniversalCore; 0 for non-L2 chains } From 180488f4f033b0d302e9afc4f0c739056e79a0a4 Mon Sep 17 00:00:00 2001 From: Nilesh Gupta Date: Fri, 17 Apr 2026 14:12:06 +0530 Subject: [PATCH 43/61] refactor: added generated protobuf --- api/utss/v1/types.pulsar.go | 164 ++++++++++++++++++++++++++---------- 1 file changed, 119 insertions(+), 45 deletions(-) diff --git a/api/utss/v1/types.pulsar.go b/api/utss/v1/types.pulsar.go index aae1b865..f9694e7c 100644 --- a/api/utss/v1/types.pulsar.go +++ b/api/utss/v1/types.pulsar.go @@ -2882,6 +2882,7 @@ var ( fd_FundMigration_tx_hash protoreflect.FieldDescriptor fd_FundMigration_gas_price protoreflect.FieldDescriptor fd_FundMigration_gas_limit protoreflect.FieldDescriptor + fd_FundMigration_l1_gas_fee protoreflect.FieldDescriptor ) func init() { @@ -2899,6 +2900,7 @@ func init() { fd_FundMigration_tx_hash = md_FundMigration.Fields().ByName("tx_hash") fd_FundMigration_gas_price = md_FundMigration.Fields().ByName("gas_price") fd_FundMigration_gas_limit = md_FundMigration.Fields().ByName("gas_limit") + fd_FundMigration_l1_gas_fee = md_FundMigration.Fields().ByName("l1_gas_fee") } var _ protoreflect.Message = (*fastReflection_FundMigration)(nil) @@ -3038,6 +3040,12 @@ func (x *fastReflection_FundMigration) Range(f func(protoreflect.FieldDescriptor return } } + if x.L1GasFee != "" { + value := protoreflect.ValueOfString(x.L1GasFee) + if !f(fd_FundMigration_l1_gas_fee, value) { + return + } + } } // Has reports whether a field is populated. @@ -3077,6 +3085,8 @@ func (x *fastReflection_FundMigration) Has(fd protoreflect.FieldDescriptor) bool return x.GasPrice != "" case "utss.v1.FundMigration.gas_limit": return x.GasLimit != uint64(0) + case "utss.v1.FundMigration.l1_gas_fee": + return x.L1GasFee != "" default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: utss.v1.FundMigration")) @@ -3117,6 +3127,8 @@ func (x *fastReflection_FundMigration) Clear(fd protoreflect.FieldDescriptor) { x.GasPrice = "" case "utss.v1.FundMigration.gas_limit": x.GasLimit = uint64(0) + case "utss.v1.FundMigration.l1_gas_fee": + x.L1GasFee = "" default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: utss.v1.FundMigration")) @@ -3169,6 +3181,9 @@ func (x *fastReflection_FundMigration) Get(descriptor protoreflect.FieldDescript case "utss.v1.FundMigration.gas_limit": value := x.GasLimit return protoreflect.ValueOfUint64(value) + case "utss.v1.FundMigration.l1_gas_fee": + value := x.L1GasFee + return protoreflect.ValueOfString(value) default: if descriptor.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: utss.v1.FundMigration")) @@ -3213,6 +3228,8 @@ func (x *fastReflection_FundMigration) Set(fd protoreflect.FieldDescriptor, valu x.GasPrice = value.Interface().(string) case "utss.v1.FundMigration.gas_limit": x.GasLimit = value.Uint() + case "utss.v1.FundMigration.l1_gas_fee": + x.L1GasFee = value.Interface().(string) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: utss.v1.FundMigration")) @@ -3257,6 +3274,8 @@ func (x *fastReflection_FundMigration) Mutable(fd protoreflect.FieldDescriptor) panic(fmt.Errorf("field gas_price of message utss.v1.FundMigration is not mutable")) case "utss.v1.FundMigration.gas_limit": panic(fmt.Errorf("field gas_limit of message utss.v1.FundMigration is not mutable")) + case "utss.v1.FundMigration.l1_gas_fee": + panic(fmt.Errorf("field l1_gas_fee of message utss.v1.FundMigration is not mutable")) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: utss.v1.FundMigration")) @@ -3294,6 +3313,8 @@ func (x *fastReflection_FundMigration) NewField(fd protoreflect.FieldDescriptor) return protoreflect.ValueOfString("") case "utss.v1.FundMigration.gas_limit": return protoreflect.ValueOfUint64(uint64(0)) + case "utss.v1.FundMigration.l1_gas_fee": + return protoreflect.ValueOfString("") default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: utss.v1.FundMigration")) @@ -3406,6 +3427,10 @@ func (x *fastReflection_FundMigration) ProtoMethods() *protoiface.Methods { if x.GasLimit != 0 { n += 1 + runtime.Sov(uint64(x.GasLimit)) } + l = len(x.L1GasFee) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } if x.unknownFields != nil { n += len(x.unknownFields) } @@ -3435,6 +3460,13 @@ func (x *fastReflection_FundMigration) ProtoMethods() *protoiface.Methods { i -= len(x.unknownFields) copy(dAtA[i:], x.unknownFields) } + if len(x.L1GasFee) > 0 { + i -= len(x.L1GasFee) + copy(dAtA[i:], x.L1GasFee) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.L1GasFee))) + i-- + dAtA[i] = 0x6a + } if x.GasLimit != 0 { i = runtime.EncodeVarint(dAtA, i, uint64(x.GasLimit)) i-- @@ -3877,6 +3909,38 @@ func (x *fastReflection_FundMigration) ProtoMethods() *protoiface.Methods { break } } + case 13: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field L1GasFee", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.L1GasFee = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := runtime.Skip(dAtA[iNdEx:]) @@ -4481,8 +4545,9 @@ type FundMigration struct { InitiatedBlock int64 `protobuf:"varint,8,opt,name=initiated_block,json=initiatedBlock,proto3" json:"initiated_block,omitempty"` CompletedBlock int64 `protobuf:"varint,9,opt,name=completed_block,json=completedBlock,proto3" json:"completed_block,omitempty"` TxHash string `protobuf:"bytes,10,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` - GasPrice string `protobuf:"bytes,11,opt,name=gas_price,json=gasPrice,proto3" json:"gas_price,omitempty"` // gas price from oracle (wei) - GasLimit uint64 `protobuf:"varint,12,opt,name=gas_limit,json=gasLimit,proto3" json:"gas_limit,omitempty"` // gas limit for native transfer (21000) + GasPrice string `protobuf:"bytes,11,opt,name=gas_price,json=gasPrice,proto3" json:"gas_price,omitempty"` // gas price from oracle (wei) + GasLimit uint64 `protobuf:"varint,12,opt,name=gas_limit,json=gasLimit,proto3" json:"gas_limit,omitempty"` // gas limit sourced from UniversalCore per chain namespace + L1GasFee string `protobuf:"bytes,13,opt,name=l1_gas_fee,json=l1GasFee,proto3" json:"l1_gas_fee,omitempty"` // L1 data-availability fee (wei) from UniversalCore; 0 for non-L2 chains } func (x *FundMigration) Reset() { @@ -4589,6 +4654,13 @@ func (x *FundMigration) GetGasLimit() uint64 { return 0 } +func (x *FundMigration) GetL1GasFee() string { + if x != nil { + return x.L1GasFee + } + return "" +} + var File_utss_v1_types_proto protoreflect.FileDescriptor var file_utss_v1_types_proto_rawDesc = []byte{ @@ -4660,7 +4732,7 @@ var file_utss_v1_types_proto_rawDesc = []byte{ 0x69, 0x67, 0x68, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x73, 0x73, 0x5f, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x74, 0x73, 0x73, 0x50, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x22, 0xa8, 0x03, 0x0a, 0x0d, 0x46, + 0x09, 0x74, 0x73, 0x73, 0x50, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x22, 0xc6, 0x03, 0x0a, 0x0d, 0x46, 0x75, 0x6e, 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x0a, 0x6f, 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, @@ -4687,48 +4759,50 @@ var file_utss_v1_types_proto_rawDesc = []byte{ 0x61, 0x73, 0x5f, 0x70, 0x72, 0x69, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x67, 0x61, 0x73, 0x50, 0x72, 0x69, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x67, 0x61, 0x73, - 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2a, 0x6b, 0x0a, 0x13, 0x54, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x50, - 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x17, - 0x54, 0x53, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x5f, - 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x53, 0x53, - 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x53, 0x55, 0x43, - 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x53, 0x53, 0x5f, 0x4b, 0x45, - 0x59, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, - 0x10, 0x02, 0x2a, 0x60, 0x0a, 0x0e, 0x54, 0x73, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x53, 0x53, 0x5f, 0x50, 0x52, 0x4f, 0x43, - 0x45, 0x53, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x47, 0x45, 0x4e, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, - 0x54, 0x53, 0x53, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x52, 0x45, 0x46, 0x52, - 0x45, 0x53, 0x48, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x54, 0x53, 0x53, 0x5f, 0x50, 0x52, 0x4f, - 0x43, 0x45, 0x53, 0x53, 0x5f, 0x51, 0x55, 0x4f, 0x52, 0x55, 0x4d, 0x5f, 0x43, 0x48, 0x41, 0x4e, - 0x47, 0x45, 0x10, 0x02, 0x2a, 0x4c, 0x0a, 0x0c, 0x54, 0x73, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x53, 0x53, 0x5f, 0x45, 0x56, 0x45, 0x4e, - 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, - 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x53, 0x53, 0x5f, 0x45, 0x56, 0x45, - 0x4e, 0x54, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x5a, 0x45, 0x44, - 0x10, 0x01, 0x2a, 0x56, 0x0a, 0x0e, 0x54, 0x73, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x53, 0x53, 0x5f, 0x45, 0x56, 0x45, 0x4e, - 0x54, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x53, - 0x53, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, - 0x44, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x53, 0x53, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, - 0x5f, 0x45, 0x58, 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x2a, 0x7f, 0x0a, 0x13, 0x46, 0x75, - 0x6e, 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x21, 0x0a, 0x1d, 0x46, 0x55, 0x4e, 0x44, 0x5f, 0x4d, 0x49, 0x47, 0x52, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, - 0x4e, 0x47, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x46, 0x55, 0x4e, 0x44, 0x5f, 0x4d, 0x49, 0x47, - 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, - 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x20, 0x0a, 0x1c, 0x46, 0x55, 0x4e, - 0x44, 0x5f, 0x4d, 0x49, 0x47, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, - 0x55, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x42, 0x8f, 0x01, 0x0a, 0x0b, - 0x63, 0x6f, 0x6d, 0x2e, 0x75, 0x74, 0x73, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x79, 0x70, - 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x75, 0x73, 0x68, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x2f, - 0x70, 0x75, 0x73, 0x68, 0x2d, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x75, 0x74, 0x73, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x75, 0x74, 0x73, 0x73, - 0x76, 0x31, 0xa2, 0x02, 0x03, 0x55, 0x58, 0x58, 0xaa, 0x02, 0x07, 0x55, 0x74, 0x73, 0x73, 0x2e, - 0x56, 0x31, 0xca, 0x02, 0x07, 0x55, 0x74, 0x73, 0x73, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x13, 0x55, - 0x74, 0x73, 0x73, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0xea, 0x02, 0x08, 0x55, 0x74, 0x73, 0x73, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x0a, 0x6c, 0x31, 0x5f, 0x67, 0x61, 0x73, 0x5f, + 0x66, 0x65, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x31, 0x47, 0x61, 0x73, + 0x46, 0x65, 0x65, 0x2a, 0x6b, 0x0a, 0x13, 0x54, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x6f, + 0x63, 0x65, 0x73, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x53, + 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x50, 0x45, + 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x53, 0x53, 0x5f, 0x4b, + 0x45, 0x59, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, + 0x53, 0x53, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x53, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, + 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x02, + 0x2a, 0x60, 0x0a, 0x0e, 0x54, 0x73, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x53, 0x53, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, + 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x47, 0x45, 0x4e, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x53, + 0x53, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x52, 0x45, 0x46, 0x52, 0x45, 0x53, + 0x48, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x54, 0x53, 0x53, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, + 0x53, 0x53, 0x5f, 0x51, 0x55, 0x4f, 0x52, 0x55, 0x4d, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, + 0x10, 0x02, 0x2a, 0x4c, 0x0a, 0x0c, 0x54, 0x73, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x53, 0x53, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, + 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x54, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x53, 0x53, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, + 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x5a, 0x45, 0x44, 0x10, 0x01, + 0x2a, 0x56, 0x0a, 0x0e, 0x54, 0x73, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x53, 0x53, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, + 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x53, 0x53, 0x5f, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, + 0x01, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x53, 0x53, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x45, + 0x58, 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x2a, 0x7f, 0x0a, 0x13, 0x46, 0x75, 0x6e, 0x64, + 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x21, 0x0a, 0x1d, 0x46, 0x55, 0x4e, 0x44, 0x5f, 0x4d, 0x49, 0x47, 0x52, 0x41, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, + 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x46, 0x55, 0x4e, 0x44, 0x5f, 0x4d, 0x49, 0x47, 0x52, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4d, 0x50, + 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x20, 0x0a, 0x1c, 0x46, 0x55, 0x4e, 0x44, 0x5f, + 0x4d, 0x49, 0x47, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, + 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x42, 0x8f, 0x01, 0x0a, 0x0b, 0x63, 0x6f, + 0x6d, 0x2e, 0x75, 0x74, 0x73, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x79, 0x70, 0x65, 0x73, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x75, 0x73, 0x68, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x2f, 0x70, 0x75, + 0x73, 0x68, 0x2d, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x75, 0x74, 0x73, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x75, 0x74, 0x73, 0x73, 0x76, 0x31, + 0xa2, 0x02, 0x03, 0x55, 0x58, 0x58, 0xaa, 0x02, 0x07, 0x55, 0x74, 0x73, 0x73, 0x2e, 0x56, 0x31, + 0xca, 0x02, 0x07, 0x55, 0x74, 0x73, 0x73, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x13, 0x55, 0x74, 0x73, + 0x73, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0xea, 0x02, 0x08, 0x55, 0x74, 0x73, 0x73, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( From c1f6abd37fe8c13a4c3448ce327d82f798cda88d Mon Sep 17 00:00:00 2001 From: Nilesh Gupta Date: Fri, 17 Apr 2026 14:12:27 +0530 Subject: [PATCH 44/61] feat: added abi changes for new mappings in UVCore --- x/uexecutor/types/abi.go | 44 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/x/uexecutor/types/abi.go b/x/uexecutor/types/abi.go index 8cb59cc0..38503208 100644 --- a/x/uexecutor/types/abi.go +++ b/x/uexecutor/types/abi.go @@ -325,6 +325,50 @@ const UNIVERSAL_CORE_ABI = `[ "outputs": [{ "name": "", "type": "uint256", "internalType": "uint256" }], "stateMutability": "view" }, + { + "type": "function", + "name": "l1GasFeeByChainNamespace", + "inputs": [{ "name": "", "type": "string", "internalType": "string" }], + "outputs": [{ "name": "", "type": "uint256", "internalType": "uint256" }], + "stateMutability": "view" + }, + { + "type": "function", + "name": "tssFundMigrationGasLimitByChainNamespace", + "inputs": [{ "name": "", "type": "string", "internalType": "string" }], + "outputs": [{ "name": "", "type": "uint256", "internalType": "uint256" }], + "stateMutability": "view" + }, + { + "type": "function", + "name": "grantRole", + "inputs": [ + { "name": "role", "type": "bytes32", "internalType": "bytes32" }, + { "name": "account", "type": "address", "internalType": "address" } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "setL1GasFeeByChain", + "inputs": [ + { "name": "chainNamespace", "type": "string", "internalType": "string" }, + { "name": "l1GasFee", "type": "uint256", "internalType": "uint256" } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "setTssFundMigrationGasLimitByChain", + "inputs": [ + { "name": "chainNamespace", "type": "string", "internalType": "string" }, + { "name": "gasLimit", "type": "uint256", "internalType": "uint256" } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, { "type": "function", "name": "timestampObservedAtByChainNamespace", From cbdb33f74e7d7428772d3d5dc55046187505b88a Mon Sep 17 00:00:00 2001 From: Nilesh Gupta Date: Fri, 17 Apr 2026 14:13:38 +0530 Subject: [PATCH 45/61] feat: added changes in MsgInitiateFundMigration for tssFundMigrationGasLimit mapping --- x/uexecutor/keeper/evm.go | 49 ++++++++++++++++++++ x/utss/keeper/msg_initiate_fund_migration.go | 24 ++++++++-- x/utss/types/events.go | 2 + 3 files changed, 70 insertions(+), 5 deletions(-) diff --git a/x/uexecutor/keeper/evm.go b/x/uexecutor/keeper/evm.go index 4ab68c40..a399f365 100644 --- a/x/uexecutor/keeper/evm.go +++ b/x/uexecutor/keeper/evm.go @@ -410,6 +410,55 @@ func (k Keeper) GetGasPriceByChain(ctx sdk.Context, chainNamespace string) (*big return results[0].(*big.Int), nil } +// GetL1GasFeeByChain reads the L1 gas fee (in gas-token units) for a chain from UniversalCore. +// This is the data-availability fee added on top of L2 execution for chains like Optimism/Base. +func (k Keeper) GetL1GasFeeByChain(ctx sdk.Context, chainNamespace string) (*big.Int, error) { + handlerAddr := common.HexToAddress(uregistrytypes.SYSTEM_CONTRACTS["UNIVERSAL_CORE"].Address) + + abi, err := types.ParseUniversalCoreABI() + if err != nil { + return nil, errors.Wrap(err, "failed to parse UniversalCore ABI") + } + + ueModuleAccAddress, _ := k.GetUeModuleAddress(ctx) + + receipt, err := k.evmKeeper.CallEVM(ctx, abi, ueModuleAccAddress, handlerAddr, false, "l1GasFeeByChainNamespace", chainNamespace) + if err != nil { + return nil, errors.Wrap(err, "failed to call l1GasFeeByChainNamespace") + } + + results, err := abi.Methods["l1GasFeeByChainNamespace"].Outputs.Unpack(receipt.Ret) + if err != nil { + return nil, errors.Wrap(err, "failed to unpack l1GasFeeByChainNamespace result") + } + + return results[0].(*big.Int), nil +} + +// GetTssFundMigrationGasLimitByChain reads the TSS fund-migration gas limit for a chain from UniversalCore. +func (k Keeper) GetTssFundMigrationGasLimitByChain(ctx sdk.Context, chainNamespace string) (*big.Int, error) { + handlerAddr := common.HexToAddress(uregistrytypes.SYSTEM_CONTRACTS["UNIVERSAL_CORE"].Address) + + abi, err := types.ParseUniversalCoreABI() + if err != nil { + return nil, errors.Wrap(err, "failed to parse UniversalCore ABI") + } + + ueModuleAccAddress, _ := k.GetUeModuleAddress(ctx) + + receipt, err := k.evmKeeper.CallEVM(ctx, abi, ueModuleAccAddress, handlerAddr, false, "tssFundMigrationGasLimitByChainNamespace", chainNamespace) + if err != nil { + return nil, errors.Wrap(err, "failed to call tssFundMigrationGasLimitByChainNamespace") + } + + results, err := abi.Methods["tssFundMigrationGasLimitByChainNamespace"].Outputs.Unpack(receipt.Ret) + if err != nil { + return nil, errors.Wrap(err, "failed to unpack tssFundMigrationGasLimitByChainNamespace result") + } + + return results[0].(*big.Int), nil +} + // GetUniversalCoreQuoterAddress reads the uniswapV3Quoter address stored in UniversalCore. func (k Keeper) GetUniversalCoreQuoterAddress(ctx sdk.Context) (common.Address, error) { handlerAddr := common.HexToAddress(uregistrytypes.SYSTEM_CONTRACTS["UNIVERSAL_CORE"].Address) diff --git a/x/utss/keeper/msg_initiate_fund_migration.go b/x/utss/keeper/msg_initiate_fund_migration.go index 14fb51ed..c24fbf36 100644 --- a/x/utss/keeper/msg_initiate_fund_migration.go +++ b/x/utss/keeper/msg_initiate_fund_migration.go @@ -8,8 +8,6 @@ import ( "github.com/pushchain/push-chain-node/x/utss/types" ) -const nativeTransferGasLimit = 21000 - // InitiateFundMigration validates and creates a fund migration from an old TSS key vault // to the current TSS key vault for a specific chain. func (k Keeper) InitiateFundMigration(ctx context.Context, oldKeyId, chain string) (uint64, error) { @@ -64,12 +62,26 @@ func (k Keeper) InitiateFundMigration(ctx context.Context, oldKeyId, chain strin return 0, err } - // 7. Fetch gas price from EVM oracle + // 7. Fetch gas price, fund-migration gas limit, and L1 gas fee from UniversalCore. gasPrice, err := k.uexecutorKeeper.GetGasPriceByChain(sdkCtx, chain) if err != nil { return 0, fmt.Errorf("failed to get gas price for chain %s: %w", chain, err) } + gasLimitBig, err := k.uexecutorKeeper.GetTssFundMigrationGasLimitByChain(sdkCtx, chain) + if err != nil { + return 0, fmt.Errorf("failed to get tss fund migration gas limit for chain %s: %w", chain, err) + } + if gasLimitBig == nil || !gasLimitBig.IsUint64() || gasLimitBig.Uint64() == 0 { + return 0, fmt.Errorf("invalid tss fund migration gas limit for chain %s: %s", chain, gasLimitBig) + } + gasLimit := gasLimitBig.Uint64() + + l1GasFee, err := k.uexecutorKeeper.GetL1GasFeeByChain(sdkCtx, chain) + if err != nil { + return 0, fmt.Errorf("failed to get l1 gas fee for chain %s: %w", chain, err) + } + // 8. Create migration record migrationId, err := k.NextMigrationId.Next(ctx) if err != nil { @@ -86,7 +98,8 @@ func (k Keeper) InitiateFundMigration(ctx context.Context, oldKeyId, chain strin Status: types.FundMigrationStatus_FUND_MIGRATION_STATUS_PENDING, InitiatedBlock: sdkCtx.BlockHeight(), GasPrice: gasPrice.String(), - GasLimit: nativeTransferGasLimit, + GasLimit: gasLimit, + L1GasFee: l1GasFee.String(), } if err := k.FundMigrations.Set(ctx, migrationId, migration); err != nil { @@ -106,7 +119,8 @@ func (k Keeper) InitiateFundMigration(ctx context.Context, oldKeyId, chain strin Chain: chain, BlockHeight: sdkCtx.BlockHeight(), GasPrice: gasPrice.String(), - GasLimit: nativeTransferGasLimit, + GasLimit: gasLimit, + L1GasFee: l1GasFee.String(), }) if err != nil { return 0, fmt.Errorf("failed to create migration event: %w", err) diff --git a/x/utss/types/events.go b/x/utss/types/events.go index f56ae04c..5ae720c6 100644 --- a/x/utss/types/events.go +++ b/x/utss/types/events.go @@ -109,6 +109,7 @@ type FundMigrationInitiatedEventData struct { BlockHeight int64 `json:"block_height"` GasPrice string `json:"gas_price"` GasLimit uint64 `json:"gas_limit"` + L1GasFee string `json:"l1_gas_fee"` } // NewFundMigrationInitiatedEvent creates and returns a Cosmos SDK event. @@ -128,6 +129,7 @@ func NewFundMigrationInitiatedEvent(e FundMigrationInitiatedEventData) (sdk.Even sdk.NewAttribute("chain", e.Chain), sdk.NewAttribute("gas_price", e.GasPrice), sdk.NewAttribute("gas_limit", fmt.Sprintf("%d", e.GasLimit)), + sdk.NewAttribute("l1_gas_fee", e.L1GasFee), sdk.NewAttribute("data", string(bz)), ) From 3cbce9cc3f1df07674e9b290a3f18920ac5868cc Mon Sep 17 00:00:00 2001 From: Nilesh Gupta Date: Fri, 17 Apr 2026 14:14:32 +0530 Subject: [PATCH 46/61] feat: added consensus migration changes of utss module for this mapping change --- x/utss/keeper/keeper_test.go | 8 ++ x/utss/keeper/migrations.go | 24 ++++ x/utss/migrations/v4/migrate.go | 15 +++ x/utss/migrations/v4/migrate_test.go | 188 +++++++++++++++++++++++++++ x/utss/module.go | 14 +- x/utss/types/expected_keepers.go | 2 + x/utss/types/types.pb.go | 182 +++++++++++++++++--------- 7 files changed, 366 insertions(+), 67 deletions(-) create mode 100644 x/utss/keeper/migrations.go create mode 100644 x/utss/migrations/v4/migrate.go create mode 100644 x/utss/migrations/v4/migrate_test.go diff --git a/x/utss/keeper/keeper_test.go b/x/utss/keeper/keeper_test.go index da1d661b..f88470d6 100755 --- a/x/utss/keeper/keeper_test.go +++ b/x/utss/keeper/keeper_test.go @@ -54,6 +54,14 @@ func (m mockUExecutorKeeper) GetGasPriceByChain(_ sdk.Context, _ string) (*big.I return big.NewInt(1000000000), nil // 1 gwei } +func (m mockUExecutorKeeper) GetL1GasFeeByChain(_ sdk.Context, _ string) (*big.Int, error) { + return big.NewInt(0), nil +} + +func (m mockUExecutorKeeper) GetTssFundMigrationGasLimitByChain(_ sdk.Context, _ string) (*big.Int, error) { + return big.NewInt(21000), nil +} + var maccPerms = map[string][]string{ authtypes.FeeCollectorName: nil, stakingtypes.BondedPoolName: {authtypes.Burner, authtypes.Staking}, diff --git a/x/utss/keeper/migrations.go b/x/utss/keeper/migrations.go new file mode 100644 index 00000000..d5698888 --- /dev/null +++ b/x/utss/keeper/migrations.go @@ -0,0 +1,24 @@ +package keeper + +import ( + "context" + + "github.com/pushchain/push-chain-node/x/utss/types" +) + +// MigrateFundMigrationsL1GasFee walks every FundMigration record and sets +// L1GasFee to "0" when unset. Records stored before the l1_gas_fee proto +// field existed decode with an empty string; downstream relayer/universalClient +// code parses this value as a decimal wei amount, so we normalize it here. +func (k Keeper) MigrateFundMigrationsL1GasFee(ctx context.Context) error { + return k.FundMigrations.Walk(ctx, nil, func(id uint64, m types.FundMigration) (bool, error) { + if m.L1GasFee != "" { + return false, nil + } + m.L1GasFee = "0" + if err := k.FundMigrations.Set(ctx, id, m); err != nil { + return true, err + } + return false, nil + }) +} diff --git a/x/utss/migrations/v4/migrate.go b/x/utss/migrations/v4/migrate.go new file mode 100644 index 00000000..b3c7aa6a --- /dev/null +++ b/x/utss/migrations/v4/migrate.go @@ -0,0 +1,15 @@ +package v4 + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/pushchain/push-chain-node/x/utss/keeper" +) + +// MigrateFundMigrationsL1GasFee backfills the new L1GasFee field on existing +// FundMigration records. Records created before v4 decode with L1GasFee == "", +// which is ambiguous for downstream consumers that parse it as a decimal wei +// amount; this migration normalizes those values to "0". +func MigrateFundMigrationsL1GasFee(ctx sdk.Context, k *keeper.Keeper) error { + return k.MigrateFundMigrationsL1GasFee(ctx) +} diff --git a/x/utss/migrations/v4/migrate_test.go b/x/utss/migrations/v4/migrate_test.go new file mode 100644 index 00000000..b7855a44 --- /dev/null +++ b/x/utss/migrations/v4/migrate_test.go @@ -0,0 +1,188 @@ +package v4_test + +import ( + "context" + "math/big" + "testing" + + "cosmossdk.io/log" + storetypes "cosmossdk.io/store/types" + + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/testutil/integration" + sdk "github.com/cosmos/cosmos-sdk/types" + moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + + uvalidatortypes "github.com/pushchain/push-chain-node/x/uvalidator/types" + + "github.com/stretchr/testify/require" + + "github.com/pushchain/push-chain-node/x/utss/keeper" + v4 "github.com/pushchain/push-chain-node/x/utss/migrations/v4" + "github.com/pushchain/push-chain-node/x/utss/types" +) + +type stubUValidatorKeeper struct{ types.UValidatorKeeper } + +func (stubUValidatorKeeper) IsTombstonedUniversalValidator(context.Context, string) (bool, error) { + return false, nil +} +func (stubUValidatorKeeper) IsBondedUniversalValidator(context.Context, string) (bool, error) { + return false, nil +} +func (stubUValidatorKeeper) GetEligibleVoters(context.Context) ([]uvalidatortypes.UniversalValidator, error) { + return nil, nil +} +func (stubUValidatorKeeper) GetAllUniversalValidators(context.Context) ([]uvalidatortypes.UniversalValidator, error) { + return nil, nil +} +func (stubUValidatorKeeper) UpdateValidatorStatus(context.Context, sdk.ValAddress, uvalidatortypes.UVStatus) error { + return nil +} + +type stubURegistryKeeper struct{} + +func (stubURegistryKeeper) IsChainOutboundEnabled(context.Context, string) (bool, error) { + return false, nil +} + +type stubUExecutorKeeper struct{} + +func (stubUExecutorKeeper) HasPendingOutboundsForChain(context.Context, string) (bool, error) { + return false, nil +} +func (stubUExecutorKeeper) GetGasPriceByChain(sdk.Context, string) (*big.Int, error) { + return big.NewInt(0), nil +} +func (stubUExecutorKeeper) GetL1GasFeeByChain(sdk.Context, string) (*big.Int, error) { + return big.NewInt(0), nil +} +func (stubUExecutorKeeper) GetTssFundMigrationGasLimitByChain(sdk.Context, string) (*big.Int, error) { + return big.NewInt(0), nil +} + +func setupKeeper(t *testing.T) (sdk.Context, keeper.Keeper) { + t.Helper() + + logger := log.NewTestLogger(t) + encCfg := moduletestutil.MakeTestEncodingConfig() + types.RegisterInterfaces(encCfg.InterfaceRegistry) + + keys := storetypes.NewKVStoreKeys(types.ModuleName) + ctx := sdk.NewContext(integration.CreateMultiStore(keys, logger), cmtproto.Header{}, false, logger) + + govAddr := authtypes.NewModuleAddress(govtypes.ModuleName).String() + k := keeper.NewKeeper( + encCfg.Codec, + runtime.NewKVStoreService(keys[types.ModuleName]), + logger, + govAddr, + stubUValidatorKeeper{}, + stubURegistryKeeper{}, + stubUExecutorKeeper{}, + ) + + return ctx, k +} + +func seedMigrations(t *testing.T, ctx sdk.Context, k keeper.Keeper, records []types.FundMigration) { + t.Helper() + for _, m := range records { + require.NoError(t, k.FundMigrations.Set(ctx, m.Id, m)) + } +} + +// TestMigrateFundMigrations_V4_BackfillsEmptyL1GasFee seeds legacy records +// (L1GasFee == "", as produced by pre-v4 proto decoding) and verifies the +// migration normalizes them to "0" while leaving every other field untouched. +func TestMigrateFundMigrations_V4_BackfillsEmptyL1GasFee(t *testing.T) { + ctx, k := setupKeeper(t) + + legacy := []types.FundMigration{ + { + Id: 1, + OldKeyId: "keygen-key-1", + OldTssPubkey: "old-pubkey-1", + CurrentKeyId: "keygen-key-2", + CurrentTssPubkey: "new-pubkey-2", + Chain: "eip155:11155111", + Status: types.FundMigrationStatus_FUND_MIGRATION_STATUS_PENDING, + InitiatedBlock: 100, + GasPrice: "1000000000", + GasLimit: 21000, + // L1GasFee deliberately empty — represents pre-v4 stored record. + }, + { + Id: 2, + OldKeyId: "keygen-key-a", + OldTssPubkey: "old-pubkey-a", + CurrentKeyId: "keygen-key-b", + CurrentTssPubkey: "new-pubkey-b", + Chain: "eip155:84532", + Status: types.FundMigrationStatus_FUND_MIGRATION_STATUS_COMPLETED, + InitiatedBlock: 200, + CompletedBlock: 210, + TxHash: "0xabc", + GasPrice: "2000000000", + GasLimit: 21000, + }, + } + seedMigrations(t, ctx, k, legacy) + + require.NoError(t, v4.MigrateFundMigrationsL1GasFee(ctx, &k)) + + for _, old := range legacy { + got, err := k.FundMigrations.Get(ctx, old.Id) + require.NoError(t, err) + require.Equal(t, "0", got.L1GasFee, "L1GasFee should be backfilled to \"0\"") + require.Equal(t, old.OldKeyId, got.OldKeyId) + require.Equal(t, old.Chain, got.Chain) + require.Equal(t, old.Status, got.Status) + require.Equal(t, old.GasPrice, got.GasPrice) + require.Equal(t, old.GasLimit, got.GasLimit) + require.Equal(t, old.TxHash, got.TxHash) + } +} + +// TestMigrateFundMigrations_V4_PreservesNonEmptyL1GasFee verifies that records +// which already carry a non-empty L1GasFee are not overwritten by the migration +// (idempotency + safety for re-runs). +func TestMigrateFundMigrations_V4_PreservesNonEmptyL1GasFee(t *testing.T) { + ctx, k := setupKeeper(t) + + seeded := types.FundMigration{ + Id: 7, + OldKeyId: "keygen-key-x", + OldTssPubkey: "old-pubkey-x", + CurrentKeyId: "keygen-key-y", + CurrentTssPubkey: "new-pubkey-y", + Chain: "eip155:10", + Status: types.FundMigrationStatus_FUND_MIGRATION_STATUS_PENDING, + GasPrice: "1500000000", + GasLimit: 50000, + L1GasFee: "12345", + } + require.NoError(t, k.FundMigrations.Set(ctx, seeded.Id, seeded)) + + require.NoError(t, v4.MigrateFundMigrationsL1GasFee(ctx, &k)) + + got, err := k.FundMigrations.Get(ctx, seeded.Id) + require.NoError(t, err) + require.Equal(t, "12345", got.L1GasFee, "existing L1GasFee must not be overwritten") +} + +// TestMigrateFundMigrations_V4_EmptyStore ensures the migration is a no-op +// when there are no FundMigration records. +func TestMigrateFundMigrations_V4_EmptyStore(t *testing.T) { + ctx, k := setupKeeper(t) + + require.NoError(t, v4.MigrateFundMigrationsL1GasFee(ctx, &k)) + + iter, err := k.FundMigrations.Iterate(ctx, nil) + require.NoError(t, err) + defer iter.Close() + require.False(t, iter.Valid()) +} diff --git a/x/utss/module.go b/x/utss/module.go index c6420aeb..74ffbe82 100755 --- a/x/utss/module.go +++ b/x/utss/module.go @@ -20,13 +20,15 @@ import ( "github.com/cosmos/cosmos-sdk/types/module" "github.com/pushchain/push-chain-node/x/utss/keeper" + v4 "github.com/pushchain/push-chain-node/x/utss/migrations/v4" "github.com/pushchain/push-chain-node/x/utss/types" ) const ( // ConsensusVersion defines the current x/utss module consensus version. - // Bumped to 3: added FundMigrations, NextMigrationId, PendingMigrations collections. - ConsensusVersion = 3 + // Bumped to 4: FundMigration proto adds l1_gas_fee (field 13); existing + // records are backfilled with "0" by the v3 → v4 migration. + ConsensusVersion = 4 ) var ( @@ -158,6 +160,14 @@ func (a AppModule) RegisterServices(cfg module.Configurator) { }); err != nil { panic(fmt.Sprintf("failed to register utss v2->v3 migration: %v", err)) } + + // Register migration from v3 → v4 (added FundMigration.l1_gas_fee). + if err := cfg.RegisterMigration(types.ModuleName, 3, func(ctx sdk.Context) error { + ctx.Logger().Info("🔧 Running utss module migration: v3 → v4 (fund-migration l1_gas_fee)") + return v4.MigrateFundMigrationsL1GasFee(ctx, &a.keeper) + }); err != nil { + panic(fmt.Sprintf("failed to register utss v3->v4 migration: %v", err)) + } } // ConsensusVersion is a sequence number for state-breaking change of the diff --git a/x/utss/types/expected_keepers.go b/x/utss/types/expected_keepers.go index 27436867..9ca96f21 100644 --- a/x/utss/types/expected_keepers.go +++ b/x/utss/types/expected_keepers.go @@ -40,4 +40,6 @@ type URegistryKeeper interface { type UExecutorKeeper interface { HasPendingOutboundsForChain(ctx context.Context, chain string) (bool, error) GetGasPriceByChain(ctx sdk.Context, chainNamespace string) (*big.Int, error) + GetL1GasFeeByChain(ctx sdk.Context, chainNamespace string) (*big.Int, error) + GetTssFundMigrationGasLimitByChain(ctx sdk.Context, chainNamespace string) (*big.Int, error) } diff --git a/x/utss/types/types.pb.go b/x/utss/types/types.pb.go index 7d919814..2bdd4379 100644 --- a/x/utss/types/types.pb.go +++ b/x/utss/types/types.pb.go @@ -508,6 +508,7 @@ type FundMigration struct { TxHash string `protobuf:"bytes,10,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` GasPrice string `protobuf:"bytes,11,opt,name=gas_price,json=gasPrice,proto3" json:"gas_price,omitempty"` GasLimit uint64 `protobuf:"varint,12,opt,name=gas_limit,json=gasLimit,proto3" json:"gas_limit,omitempty"` + L1GasFee string `protobuf:"bytes,13,opt,name=l1_gas_fee,json=l1GasFee,proto3" json:"l1_gas_fee,omitempty"` } func (m *FundMigration) Reset() { *m = FundMigration{} } @@ -627,6 +628,13 @@ func (m *FundMigration) GetGasLimit() uint64 { return 0 } +func (m *FundMigration) GetL1GasFee() string { + if m != nil { + return m.L1GasFee + } + return "" +} + func init() { proto.RegisterEnum("utss.v1.TssKeyProcessStatus", TssKeyProcessStatus_name, TssKeyProcessStatus_value) proto.RegisterEnum("utss.v1.TssProcessType", TssProcessType_name, TssProcessType_value) @@ -643,71 +651,72 @@ func init() { func init() { proto.RegisterFile("utss/v1/types.proto", fileDescriptor_6ecfa9650339f6c3) } var fileDescriptor_6ecfa9650339f6c3 = []byte{ - // 1012 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x56, 0x4f, 0x4f, 0xeb, 0xc6, - 0x17, 0x8d, 0x13, 0x08, 0xe4, 0x12, 0xf2, 0x0b, 0x43, 0x80, 0x3c, 0xfe, 0x04, 0xc8, 0x7b, 0xd2, - 0x0f, 0xa1, 0xbe, 0x58, 0xb4, 0x2c, 0x2a, 0x76, 0x79, 0x60, 0xc0, 0x02, 0x42, 0xea, 0x38, 0xa8, - 0xef, 0x6d, 0x5c, 0xc7, 0x9e, 0x3a, 0xa3, 0x24, 0xb6, 0x95, 0x71, 0x10, 0xe9, 0xa6, 0x52, 0x97, - 0x5d, 0x75, 0xd9, 0x25, 0xcb, 0x2e, 0xfb, 0x31, 0xba, 0x7c, 0xcb, 0x2e, 0x2b, 0x50, 0xd5, 0x7e, - 0x8c, 0x6a, 0x66, 0x9c, 0xc4, 0x4e, 0xb2, 0x81, 0x99, 0x7b, 0xce, 0xbd, 0xbe, 0x73, 0xee, 0x99, - 0x01, 0x58, 0x1f, 0x04, 0x94, 0xca, 0x8f, 0x27, 0x72, 0x30, 0xf4, 0x31, 0xad, 0xf8, 0x7d, 0x2f, - 0xf0, 0xd0, 0x12, 0x0b, 0x56, 0x1e, 0x4f, 0xb6, 0x4b, 0x96, 0x47, 0x7b, 0x1e, 0x95, 0x5b, 0x26, - 0xc5, 0xf2, 0xe3, 0x49, 0x0b, 0x07, 0xe6, 0x89, 0x6c, 0x79, 0xc4, 0x15, 0xc4, 0xed, 0xad, 0x10, - 0xef, 0x51, 0x87, 0xd5, 0xe8, 0x51, 0x27, 0x04, 0x0a, 0x8e, 0xe7, 0x78, 0x7c, 0x29, 0xb3, 0x55, - 0x18, 0x5d, 0x33, 0x7b, 0xc4, 0xf5, 0x64, 0xfe, 0x53, 0x84, 0xca, 0x5f, 0x43, 0xba, 0x6e, 0xf6, - 0xcd, 0x1e, 0x45, 0x05, 0x58, 0x34, 0xed, 0x1e, 0x71, 0x8b, 0xd2, 0x81, 0x74, 0x94, 0xd1, 0xc4, - 0xe6, 0xac, 0xf8, 0xeb, 0xf3, 0x7e, 0xe2, 0xdf, 0xe7, 0x7d, 0xe9, 0xe7, 0x7f, 0x7e, 0x3f, 0x5e, - 0xe1, 0xcd, 0xfa, 0x9c, 0x5f, 0x7e, 0x4e, 0xc2, 0xaa, 0x4e, 0xe9, 0x0d, 0x1e, 0xd6, 0xfb, 0x9e, - 0x85, 0x29, 0x45, 0xa7, 0x90, 0xa6, 0x81, 0x19, 0x0c, 0x28, 0x2f, 0x91, 0xfb, 0x72, 0xb7, 0x12, - 0x9e, 0xa3, 0x12, 0xe3, 0x35, 0x38, 0x47, 0x0b, 0xb9, 0xa8, 0x0c, 0x59, 0xdf, 0xec, 0x07, 0xc4, - 0x22, 0xbe, 0xe9, 0x06, 0xb4, 0x98, 0x3c, 0x48, 0x1d, 0x65, 0xb4, 0x58, 0x0c, 0x1d, 0x42, 0xb6, - 0xd5, 0xf5, 0xac, 0x8e, 0xd1, 0xc6, 0xc4, 0x69, 0x07, 0xc5, 0xd4, 0x81, 0x74, 0x94, 0xd2, 0x56, - 0x78, 0xec, 0x9a, 0x87, 0xd0, 0x5b, 0x58, 0xc5, 0x4f, 0x3e, 0xe9, 0x0f, 0x47, 0x9c, 0x05, 0xce, - 0xc9, 0x8a, 0x60, 0x48, 0x3a, 0x83, 0xac, 0x2f, 0x9a, 0x30, 0x98, 0xde, 0xc5, 0x45, 0xde, 0xe7, - 0x56, 0xb4, 0xcf, 0xb0, 0x49, 0x7d, 0xe8, 0x63, 0x6d, 0xc5, 0x9f, 0x6c, 0x50, 0x0e, 0x92, 0xc4, - 0x2e, 0xa6, 0x0f, 0xa4, 0xa3, 0x05, 0x2d, 0x49, 0xec, 0xb3, 0xc3, 0xa8, 0x32, 0x05, 0xae, 0x4c, - 0x40, 0xa9, 0xd1, 0xc1, 0x43, 0x23, 0x4c, 0x2b, 0xff, 0x94, 0x84, 0xb4, 0x38, 0x3a, 0xda, 0x03, - 0x60, 0xa8, 0x3f, 0x68, 0x75, 0xf0, 0x30, 0x94, 0x38, 0x13, 0x50, 0x5a, 0xe7, 0x01, 0xb4, 0x01, - 0x69, 0x96, 0x48, 0xec, 0x62, 0x52, 0xa8, 0xdf, 0xc1, 0x43, 0xd5, 0x9e, 0xd1, 0x26, 0x35, 0x47, - 0x9b, 0x53, 0xd8, 0xfc, 0x9e, 0xb8, 0x66, 0x97, 0xfc, 0x80, 0x6d, 0x23, 0xa6, 0x92, 0x50, 0xa0, - 0x30, 0x46, 0x3f, 0x44, 0xe4, 0xaa, 0xc0, 0x7a, 0x07, 0x0f, 0x1d, 0xec, 0xc6, 0x53, 0x16, 0x79, - 0xca, 0x9a, 0x80, 0xa2, 0xfc, 0x3d, 0x80, 0x91, 0x72, 0x63, 0x15, 0x32, 0x61, 0x44, 0xb5, 0xcf, - 0xde, 0x44, 0xc5, 0xc8, 0x46, 0xc5, 0x28, 0xff, 0x9d, 0x84, 0x65, 0x9d, 0x52, 0xe5, 0x11, 0xbb, - 0x41, 0x28, 0xa2, 0x34, 0x12, 0x11, 0x9d, 0x02, 0x60, 0x06, 0x88, 0x71, 0x24, 0xf9, 0x38, 0x36, - 0xa2, 0xe3, 0xe0, 0x69, 0x7c, 0x18, 0x19, 0x3c, 0x5a, 0x22, 0x79, 0x6c, 0xb4, 0xd4, 0xec, 0x00, - 0x79, 0xc6, 0x94, 0xc7, 0xe2, 0xdd, 0x2f, 0x4c, 0x75, 0xcf, 0xec, 0x35, 0x63, 0x8b, 0x4c, 0x7c, - 0xfa, 0xd3, 0x93, 0x48, 0xcf, 0x99, 0xc4, 0x8c, 0x05, 0x97, 0xe6, 0x58, 0x70, 0xda, 0xca, 0xcb, - 0xb3, 0x56, 0x9e, 0x98, 0x21, 0x13, 0x35, 0x43, 0xdc, 0x42, 0x30, 0x65, 0xa1, 0xf2, 0x6f, 0x29, - 0x58, 0xbd, 0x1c, 0xb8, 0xf6, 0x1d, 0x71, 0xfa, 0x66, 0x40, 0x3c, 0x77, 0x46, 0xec, 0x5d, 0x00, - 0xaf, 0x6b, 0x1b, 0x31, 0xa3, 0x2d, 0x7b, 0x5d, 0xfb, 0x86, 0x97, 0x7f, 0x07, 0x39, 0x86, 0x46, - 0x3e, 0x91, 0xe2, 0x8c, 0xac, 0xd7, 0xb5, 0xf5, 0xb1, 0x51, 0xdf, 0x41, 0xce, 0x1a, 0xf4, 0xfb, - 0x6c, 0x64, 0x61, 0x9d, 0x05, 0xc1, 0x0a, 0xa3, 0xa2, 0xd6, 0x17, 0x80, 0x46, 0xac, 0x48, 0x3d, - 0x21, 0x6b, 0x3e, 0x44, 0x26, 0x35, 0x0b, 0xb0, 0x68, 0xb5, 0x4d, 0xe2, 0x72, 0x5b, 0x65, 0x34, - 0xb1, 0x89, 0xbc, 0x26, 0x4b, 0x53, 0xaf, 0x49, 0xec, 0x94, 0x53, 0x93, 0xfe, 0x3f, 0xfc, 0x8f, - 0xb8, 0x24, 0x20, 0x66, 0x30, 0xba, 0x0d, 0xa1, 0xc2, 0xb9, 0x71, 0x98, 0xdb, 0x9a, 0x11, 0x2d, - 0xaf, 0xe7, 0x77, 0xf1, 0x84, 0x98, 0x11, 0xc4, 0x71, 0x58, 0x10, 0xb7, 0x60, 0x29, 0x78, 0x32, - 0xda, 0x26, 0x6d, 0x87, 0x9a, 0xa7, 0x83, 0xa7, 0x6b, 0x93, 0xb6, 0xd1, 0x0e, 0x64, 0x1c, 0x93, - 0x1a, 0x7e, 0x9f, 0x58, 0xb8, 0xb8, 0x22, 0xd4, 0x74, 0x4c, 0x5a, 0x67, 0xfb, 0x11, 0xd8, 0x25, - 0x3d, 0x12, 0x14, 0xb3, 0x7c, 0x04, 0x0c, 0xbc, 0x65, 0xfb, 0xe3, 0x0e, 0xac, 0xcf, 0x79, 0x11, - 0xd1, 0x0e, 0x6c, 0xe9, 0x8d, 0x86, 0x71, 0xa3, 0x7c, 0x34, 0xea, 0xda, 0xfd, 0xb9, 0xd2, 0x68, - 0x18, 0x75, 0xa5, 0x76, 0xa1, 0xd6, 0xae, 0xf2, 0x89, 0x79, 0x60, 0xa3, 0x79, 0xce, 0x7e, 0xe7, - 0x25, 0xb4, 0x0d, 0x9b, 0xd3, 0xe0, 0x65, 0x55, 0xbd, 0x55, 0x2e, 0xf2, 0xc9, 0xe3, 0xef, 0x20, - 0x17, 0x7f, 0xd6, 0xd0, 0x26, 0x20, 0xc6, 0x1e, 0x31, 0x6f, 0x94, 0x8f, 0x57, 0x4a, 0x2d, 0x9f, - 0x40, 0x5b, 0xb0, 0x1e, 0x8d, 0x6b, 0xca, 0xa5, 0xa6, 0x34, 0xae, 0xf3, 0x12, 0xda, 0x83, 0x37, - 0x51, 0xe0, 0x9b, 0xe6, 0xbd, 0xd6, 0xbc, 0x33, 0xce, 0xaf, 0xab, 0xb5, 0x2b, 0x25, 0x9f, 0x3c, - 0xbe, 0x85, 0x6c, 0xf4, 0xa6, 0xa2, 0x7d, 0xd8, 0x61, 0x74, 0xe5, 0x41, 0xa9, 0xe9, 0xe3, 0x24, - 0xb5, 0xa6, 0xea, 0x6a, 0x55, 0x57, 0x2e, 0x26, 0x67, 0x11, 0x04, 0xd6, 0xf4, 0xa5, 0x5a, 0xab, - 0xde, 0xaa, 0x9f, 0x94, 0x8b, 0xbc, 0x74, 0xfc, 0xc0, 0xfb, 0x8d, 0xdc, 0x62, 0x54, 0x80, 0xfc, - 0x84, 0x5e, 0x3d, 0xd7, 0xd5, 0x07, 0x65, 0xd2, 0xad, 0x88, 0x9e, 0xdf, 0xdf, 0xd5, 0x6f, 0x15, - 0x56, 0x5d, 0x42, 0x1b, 0xb0, 0x36, 0x01, 0x94, 0x6f, 0xeb, 0xaa, 0xc6, 0x75, 0xf8, 0x11, 0xd6, - 0xe7, 0x18, 0x07, 0x1d, 0xc2, 0xde, 0x65, 0xb3, 0x76, 0x61, 0xdc, 0xa9, 0x57, 0x5a, 0x55, 0x57, - 0xef, 0x6b, 0x46, 0x43, 0xaf, 0xea, 0xcd, 0xa8, 0xf4, 0x6f, 0x61, 0x7f, 0x3e, 0x25, 0xfa, 0xd5, - 0x03, 0xd8, 0x9d, 0x4f, 0x1a, 0x0d, 0xe2, 0xc3, 0xcd, 0x1f, 0x2f, 0x25, 0xe9, 0xf3, 0x4b, 0x49, - 0xfa, 0xeb, 0xa5, 0x24, 0xfd, 0xf2, 0x5a, 0x4a, 0x7c, 0x7e, 0x2d, 0x25, 0xfe, 0x7c, 0x2d, 0x25, - 0x3e, 0x9d, 0x38, 0x24, 0x68, 0x0f, 0x5a, 0x15, 0xcb, 0xeb, 0xc9, 0xfe, 0x80, 0xb6, 0xf9, 0x05, - 0xe0, 0xab, 0xf7, 0x7c, 0xf9, 0xde, 0xf5, 0x6c, 0x2c, 0x3f, 0xc9, 0xe2, 0x5d, 0x65, 0xff, 0x28, - 0xb4, 0xd2, 0xfc, 0xcf, 0xf7, 0x57, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc0, 0xbc, 0xc0, 0x06, - 0x40, 0x08, 0x00, 0x00, + // 1030 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x56, 0xcf, 0x4f, 0xe3, 0x46, + 0x14, 0x8e, 0x13, 0x08, 0x9b, 0x47, 0x48, 0xc3, 0x10, 0x20, 0xcb, 0x8f, 0x00, 0xd9, 0x95, 0x8a, + 0x50, 0x37, 0x56, 0x5a, 0x0e, 0x15, 0xb7, 0x2c, 0x38, 0x60, 0x01, 0x21, 0x75, 0x1c, 0xd4, 0xdd, + 0x8b, 0xeb, 0xc4, 0xb3, 0xce, 0x28, 0x89, 0x6d, 0x65, 0x1c, 0x44, 0x7a, 0xa9, 0xd4, 0x63, 0x4f, + 0x3d, 0xf6, 0xc8, 0x9f, 0xd0, 0xbf, 0xa2, 0xea, 0x71, 0x8f, 0x3d, 0x56, 0xa0, 0xaa, 0xfd, 0x33, + 0xaa, 0x99, 0x71, 0x12, 0x3b, 0xc9, 0x05, 0x66, 0xde, 0xf7, 0xbd, 0xe7, 0x37, 0xdf, 0xfb, 0x66, + 0x00, 0x36, 0x86, 0x3e, 0xa5, 0xf2, 0x43, 0x59, 0xf6, 0x47, 0x1e, 0xa6, 0x25, 0x6f, 0xe0, 0xfa, + 0x2e, 0x5a, 0x61, 0xc1, 0xd2, 0x43, 0x79, 0xa7, 0xd0, 0x76, 0x69, 0xdf, 0xa5, 0x72, 0xcb, 0xa4, + 0x58, 0x7e, 0x28, 0xb7, 0xb0, 0x6f, 0x96, 0xe5, 0xb6, 0x4b, 0x1c, 0x41, 0xdc, 0xd9, 0x0e, 0xf0, + 0x3e, 0xb5, 0x59, 0x8d, 0x3e, 0xb5, 0x03, 0x20, 0x67, 0xbb, 0xb6, 0xcb, 0x97, 0x32, 0x5b, 0x05, + 0xd1, 0x75, 0xb3, 0x4f, 0x1c, 0x57, 0xe6, 0x3f, 0x45, 0xa8, 0xf8, 0x2d, 0x24, 0xeb, 0xe6, 0xc0, + 0xec, 0x53, 0x94, 0x83, 0x65, 0xd3, 0xea, 0x13, 0x27, 0x2f, 0x1d, 0x4a, 0xc7, 0x29, 0x4d, 0x6c, + 0xce, 0xf2, 0xbf, 0x3d, 0x1d, 0xc4, 0xfe, 0x7b, 0x3a, 0x90, 0x7e, 0xf9, 0xf7, 0xf7, 0x93, 0x55, + 0xde, 0xac, 0xc7, 0xf9, 0xc5, 0xa7, 0x38, 0xac, 0xe9, 0x94, 0x5e, 0xe3, 0x51, 0x7d, 0xe0, 0xb6, + 0x31, 0xa5, 0xe8, 0x14, 0x92, 0xd4, 0x37, 0xfd, 0x21, 0xe5, 0x25, 0x32, 0x5f, 0xef, 0x95, 0x82, + 0x73, 0x94, 0x22, 0xbc, 0x06, 0xe7, 0x68, 0x01, 0x17, 0x15, 0x21, 0xed, 0x99, 0x03, 0x9f, 0xb4, + 0x89, 0x67, 0x3a, 0x3e, 0xcd, 0xc7, 0x0f, 0x13, 0xc7, 0x29, 0x2d, 0x12, 0x43, 0x47, 0x90, 0x6e, + 0xf5, 0xdc, 0x76, 0xd7, 0xe8, 0x60, 0x62, 0x77, 0xfc, 0x7c, 0xe2, 0x50, 0x3a, 0x4e, 0x68, 0xab, + 0x3c, 0x76, 0xc5, 0x43, 0xe8, 0x0d, 0xac, 0xe1, 0x47, 0x8f, 0x0c, 0x46, 0x63, 0xce, 0x12, 0xe7, + 0xa4, 0x45, 0x30, 0x20, 0x9d, 0x41, 0xda, 0x13, 0x4d, 0x18, 0x4c, 0xef, 0xfc, 0x32, 0xef, 0x73, + 0x3b, 0xdc, 0x67, 0xd0, 0xa4, 0x3e, 0xf2, 0xb0, 0xb6, 0xea, 0x4d, 0x37, 0x28, 0x03, 0x71, 0x62, + 0xe5, 0x93, 0x87, 0xd2, 0xf1, 0x92, 0x16, 0x27, 0xd6, 0xd9, 0x51, 0x58, 0x99, 0x1c, 0x57, 0xc6, + 0xa7, 0xd4, 0xe8, 0xe2, 0x91, 0x11, 0xa4, 0x15, 0x7f, 0x8e, 0x43, 0x52, 0x1c, 0x1d, 0xed, 0x03, + 0x30, 0xd4, 0x1b, 0xb6, 0xba, 0x78, 0x14, 0x48, 0x9c, 0xf2, 0x29, 0xad, 0xf3, 0x00, 0xda, 0x84, + 0x24, 0x4b, 0x24, 0x56, 0x3e, 0x2e, 0xd4, 0xef, 0xe2, 0x91, 0x6a, 0xcd, 0x69, 0x93, 0x58, 0xa0, + 0xcd, 0x29, 0x6c, 0x7d, 0x22, 0x8e, 0xd9, 0x23, 0x3f, 0x62, 0xcb, 0x88, 0xa8, 0x24, 0x14, 0xc8, + 0x4d, 0xd0, 0xf7, 0x21, 0xb9, 0x4a, 0xb0, 0xd1, 0xc5, 0x23, 0x1b, 0x3b, 0xd1, 0x94, 0x65, 0x9e, + 0xb2, 0x2e, 0xa0, 0x30, 0x7f, 0x1f, 0x60, 0xac, 0xdc, 0x44, 0x85, 0x54, 0x10, 0x51, 0xad, 0xb3, + 0xd7, 0x61, 0x31, 0xd2, 0x61, 0x31, 0x8a, 0xff, 0xc4, 0xe1, 0x95, 0x4e, 0xa9, 0xf2, 0x80, 0x1d, + 0x3f, 0x10, 0x51, 0x1a, 0x8b, 0x88, 0x4e, 0x01, 0x30, 0x03, 0xc4, 0x38, 0xe2, 0x7c, 0x1c, 0x9b, + 0xe1, 0x71, 0xf0, 0x34, 0x3e, 0x8c, 0x14, 0x1e, 0x2f, 0x91, 0x3c, 0x31, 0x5a, 0x62, 0x7e, 0x80, + 0x3c, 0x63, 0xc6, 0x63, 0xd1, 0xee, 0x97, 0x66, 0xba, 0x67, 0xf6, 0x9a, 0xb3, 0x45, 0x2a, 0x3a, + 0xfd, 0xd9, 0x49, 0x24, 0x17, 0x4c, 0x62, 0xce, 0x82, 0x2b, 0x0b, 0x2c, 0x38, 0x6b, 0xe5, 0x57, + 0xf3, 0x56, 0x9e, 0x9a, 0x21, 0x15, 0x36, 0x43, 0xd4, 0x42, 0x30, 0x63, 0xa1, 0xe2, 0x1f, 0x09, + 0x58, 0xab, 0x0e, 0x1d, 0xeb, 0x96, 0xd8, 0x03, 0xd3, 0x27, 0xae, 0x33, 0x27, 0xf6, 0x1e, 0x80, + 0xdb, 0xb3, 0x8c, 0x88, 0xd1, 0x5e, 0xb9, 0x3d, 0xeb, 0x9a, 0x97, 0x7f, 0x0b, 0x19, 0x86, 0x86, + 0x3e, 0x91, 0xe0, 0x8c, 0xb4, 0xdb, 0xb3, 0xf4, 0x89, 0x51, 0xdf, 0x42, 0xa6, 0x3d, 0x1c, 0x0c, + 0xd8, 0xc8, 0x82, 0x3a, 0x4b, 0x82, 0x15, 0x44, 0x45, 0xad, 0xaf, 0x00, 0x8d, 0x59, 0xa1, 0x7a, + 0x42, 0xd6, 0x6c, 0x80, 0x4c, 0x6b, 0xe6, 0x60, 0xb9, 0xdd, 0x31, 0x89, 0xc3, 0x6d, 0x95, 0xd2, + 0xc4, 0x26, 0xf4, 0x9a, 0xac, 0xcc, 0xbc, 0x26, 0x91, 0x53, 0xce, 0x4c, 0xfa, 0x4b, 0xf8, 0x82, + 0x38, 0xc4, 0x27, 0xa6, 0x3f, 0xbe, 0x0d, 0x81, 0xc2, 0x99, 0x49, 0x98, 0xdb, 0x9a, 0x11, 0xdb, + 0x6e, 0xdf, 0xeb, 0xe1, 0x29, 0x31, 0x25, 0x88, 0x93, 0xb0, 0x20, 0x6e, 0xc3, 0x8a, 0xff, 0x68, + 0x74, 0x4c, 0xda, 0x09, 0x34, 0x4f, 0xfa, 0x8f, 0x57, 0x26, 0xed, 0xa0, 0x5d, 0x48, 0xd9, 0x26, + 0x35, 0xbc, 0x01, 0x69, 0xe3, 0xfc, 0xaa, 0x50, 0xd3, 0x36, 0x69, 0x9d, 0xed, 0xc7, 0x60, 0x8f, + 0xf4, 0x89, 0x9f, 0x4f, 0xf3, 0x11, 0x30, 0xf0, 0x86, 0xed, 0xd9, 0x20, 0x7a, 0x65, 0x83, 0xe1, + 0x9f, 0x30, 0xce, 0xaf, 0x89, 0xd4, 0x5e, 0xf9, 0xd2, 0xa4, 0x55, 0x8c, 0x4f, 0xba, 0xb0, 0xb1, + 0xe0, 0xbd, 0x44, 0xbb, 0xb0, 0xad, 0x37, 0x1a, 0xc6, 0xb5, 0xf2, 0xc1, 0xa8, 0x6b, 0x77, 0xe7, + 0x4a, 0xa3, 0x61, 0xd4, 0x95, 0xda, 0x85, 0x5a, 0xbb, 0xcc, 0xc6, 0x16, 0x81, 0x8d, 0xe6, 0x39, + 0xfb, 0x9d, 0x95, 0xd0, 0x0e, 0x6c, 0xcd, 0x82, 0xd5, 0x8a, 0x7a, 0xa3, 0x5c, 0x64, 0xe3, 0x27, + 0x3f, 0x40, 0x26, 0xfa, 0xe8, 0xa1, 0x2d, 0x40, 0x8c, 0x3d, 0x66, 0x5e, 0x2b, 0x1f, 0x2e, 0x95, + 0x5a, 0x36, 0x86, 0xb6, 0x61, 0x23, 0x1c, 0xd7, 0x94, 0xaa, 0xa6, 0x34, 0xae, 0xb2, 0x12, 0xda, + 0x87, 0xd7, 0x61, 0xe0, 0xbb, 0xe6, 0x9d, 0xd6, 0xbc, 0x35, 0xce, 0xaf, 0x2a, 0xb5, 0x4b, 0x25, + 0x1b, 0x3f, 0xb9, 0x81, 0x74, 0xf8, 0x1e, 0xa3, 0x03, 0xd8, 0x65, 0x74, 0xe5, 0x5e, 0xa9, 0xe9, + 0x93, 0x24, 0xb5, 0xa6, 0xea, 0x6a, 0x45, 0x57, 0x2e, 0xa6, 0x67, 0x11, 0x04, 0xd6, 0x74, 0x55, + 0xad, 0x55, 0x6e, 0xd4, 0x8f, 0xca, 0x45, 0x56, 0x3a, 0xb9, 0xe7, 0xfd, 0x86, 0xee, 0x38, 0xca, + 0x41, 0x76, 0x4a, 0xaf, 0x9c, 0xeb, 0xea, 0xbd, 0x32, 0xed, 0x56, 0x44, 0xcf, 0xef, 0x6e, 0xeb, + 0x37, 0x0a, 0xab, 0x2e, 0xa1, 0x4d, 0x58, 0x9f, 0x02, 0xca, 0xf7, 0x75, 0x55, 0xe3, 0x3a, 0xfc, + 0x04, 0x1b, 0x0b, 0x6c, 0x85, 0x8e, 0x60, 0xbf, 0xda, 0xac, 0x5d, 0x18, 0xb7, 0xea, 0xa5, 0x56, + 0xd1, 0xd5, 0xbb, 0x9a, 0xd1, 0xd0, 0x2b, 0x7a, 0x33, 0x2c, 0xfd, 0x1b, 0x38, 0x58, 0x4c, 0x09, + 0x7f, 0xf5, 0x10, 0xf6, 0x16, 0x93, 0xc6, 0x83, 0x78, 0x7f, 0xfd, 0xe7, 0x73, 0x41, 0xfa, 0xfc, + 0x5c, 0x90, 0xfe, 0x7e, 0x2e, 0x48, 0xbf, 0xbe, 0x14, 0x62, 0x9f, 0x5f, 0x0a, 0xb1, 0xbf, 0x5e, + 0x0a, 0xb1, 0x8f, 0x65, 0x9b, 0xf8, 0x9d, 0x61, 0xab, 0xd4, 0x76, 0xfb, 0xb2, 0x37, 0xa4, 0x1d, + 0x7e, 0x3d, 0xf8, 0xea, 0x1d, 0x5f, 0xbe, 0x73, 0x5c, 0x0b, 0xcb, 0x8f, 0xb2, 0x78, 0x75, 0xd9, + 0xbf, 0x11, 0xad, 0x24, 0xff, 0xe3, 0xfe, 0xcd, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x6c, 0xb8, + 0x11, 0x04, 0x5e, 0x08, 0x00, 0x00, } func (this *Params) Equal(that interface{}) bool { @@ -1073,6 +1082,13 @@ func (m *FundMigration) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.L1GasFee) > 0 { + i -= len(m.L1GasFee) + copy(dAtA[i:], m.L1GasFee) + i = encodeVarintTypes(dAtA, i, uint64(len(m.L1GasFee))) + i-- + dAtA[i] = 0x6a + } if m.GasLimit != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.GasLimit)) i-- @@ -1330,6 +1346,10 @@ func (m *FundMigration) Size() (n int) { if m.GasLimit != 0 { n += 1 + sovTypes(uint64(m.GasLimit)) } + l = len(m.L1GasFee) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } return n } @@ -2441,6 +2461,38 @@ func (m *FundMigration) Unmarshal(dAtA []byte) error { break } } + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field L1GasFee", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.L1GasFee = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) From 32fb84bd94a36a8c8f563476a9c9cac28567507e Mon Sep 17 00:00:00 2001 From: Nilesh Gupta Date: Fri, 17 Apr 2026 14:14:58 +0530 Subject: [PATCH 47/61] tests: added integration tests for fund migration gas limit changes --- test/integration/utss/fund_migration_test.go | 82 +++++++++++++++++++- test/utils/bytecode.go | 2 +- 2 files changed, 82 insertions(+), 2 deletions(-) diff --git a/test/integration/utss/fund_migration_test.go b/test/integration/utss/fund_migration_test.go index f0019708..36f7877a 100644 --- a/test/integration/utss/fund_migration_test.go +++ b/test/integration/utss/fund_migration_test.go @@ -2,10 +2,15 @@ package integrationtest import ( "fmt" + "math/big" "strconv" + "strings" "testing" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/require" "github.com/pushchain/push-chain-node/app" @@ -18,12 +23,84 @@ import ( const testChain = "eip155:11155111" +// universalCoreSetupABI exposes the admin methods needed to configure +// per-chain mappings during test setup. These are intentionally kept out of +// the production ABI (x/uexecutor/types/abi.go) — Go-side keeper code never +// calls them; only tests do. +const universalCoreSetupABI = `[ + { + "type": "function", + "name": "grantRole", + "inputs": [ + { "name": "role", "type": "bytes32", "internalType": "bytes32" }, + { "name": "account", "type": "address", "internalType": "address" } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "setL1GasFeeByChain", + "inputs": [ + { "name": "chainNamespace", "type": "string", "internalType": "string" }, + { "name": "l1GasFee", "type": "uint256", "internalType": "uint256" } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "setTssFundMigrationGasLimitByChain", + "inputs": [ + { "name": "chainNamespace", "type": "string", "internalType": "string" }, + { "name": "gasLimit", "type": "uint256", "internalType": "uint256" } + ], + "outputs": [], + "stateMutability": "nonpayable" + } +]` + +// seedFundMigrationChainValues grants MANAGER_ROLE to the admin and seeds the +// per-chain tss-fund-migration gas limit and L1 gas fee on UniversalCore. +// InitiateFundMigration rejects a zero gas limit, so without this seeding the +// keeper read returns 0 and the migration fails validation. +func seedFundMigrationChainValues( + t *testing.T, + chainApp *app.ChainApp, + ctx sdk.Context, + admin common.Address, + chain string, + gasLimit, l1GasFee *big.Int, +) { + t.Helper() + + handlerAddr := utils.GetDefaultAddresses().HandlerAddr + setupABI, err := abi.JSON(strings.NewReader(universalCoreSetupABI)) + require.NoError(t, err) + + managerRole := crypto.Keccak256Hash([]byte("MANAGER_ROLE")) + var roleArg [32]byte + copy(roleArg[:], managerRole.Bytes()) + + _, err = chainApp.EVMKeeper.CallEVM(ctx, setupABI, admin, handlerAddr, true, "grantRole", roleArg, admin) + require.NoError(t, err, "grant MANAGER_ROLE") + + _, err = chainApp.EVMKeeper.CallEVM(ctx, setupABI, admin, handlerAddr, true, "setTssFundMigrationGasLimitByChain", chain, gasLimit) + require.NoError(t, err, "seed tss fund migration gas limit") + + _, err = chainApp.EVMKeeper.CallEVM(ctx, setupABI, admin, handlerAddr, true, "setL1GasFeeByChain", chain, l1GasFee) + require.NoError(t, err, "seed l1 gas fee") +} + // setupFundMigrationTest initializes app with validators, a finalized keygen key, and a chain config. // Returns app, ctx, validator addresses, and the finalized key ID. func setupFundMigrationTest(t *testing.T, numVals int, outboundEnabled bool) (*app.ChainApp, sdk.Context, []string, string) { t.Helper() - app, ctx, _, validators := utils.SetAppWithMultipleValidators(t, numVals) + app, ctx, baseAccounts, validators := utils.SetAppWithMultipleValidators(t, numVals) + + admin := common.BytesToAddress(baseAccounts[0].GetAddress().Bytes()) + seedFundMigrationChainValues(t, app, ctx, admin, testChain, big.NewInt(21000), big.NewInt(150)) // Register universal validators universalVals := make([]string, len(validators)) @@ -129,7 +206,10 @@ func TestInitiateFundMigration(t *testing.T) { require.Equal(t, utsstypes.FundMigrationStatus_FUND_MIGRATION_STATUS_PENDING, migration.Status) require.Equal(t, oldKeyId, migration.OldKeyId) require.Equal(t, testChain, migration.Chain) + // GasLimit and L1GasFee come from UniversalCore's per-chain mappings, + // seeded by seedFundMigrationChainValues. require.Equal(t, uint64(21000), migration.GasLimit) + require.Equal(t, "150", migration.L1GasFee) require.NotEmpty(t, migration.GasPrice) // Verify pending index diff --git a/test/utils/bytecode.go b/test/utils/bytecode.go index 74700fea..92707c09 100644 --- a/test/utils/bytecode.go +++ b/test/utils/bytecode.go @@ -6,7 +6,7 @@ const UEA_SVM_BYTECODE = "6080604052600436101561001a575b3615610018575f80fd5b005b const UEA_PROXY_BYTECODE = "608060405260043610610028575f3560e01c806323efa7ec14610032578063aaf10f4214610051575b6100306100a8565b005b34801561003d575f80fd5b5061003061004c366004610368565b6100ba565b34801561005c575f80fd5b507f868a771a75a4aa6c2be13e9a9617cb8ea240ed84a3a90c8469537393ec3e115d5460405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b6100b86100b36102cc565b61034a565b565b7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00805468010000000000000000810460ff16159067ffffffffffffffff165f811580156101045750825b90505f8267ffffffffffffffff1660011480156101205750303b155b90508115801561012e575080155b15610165576040517ff92ee8a900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b84547fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000016600117855583156101c65784547fffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff16680100000000000000001785555b5f6101ef7f868a771a75a4aa6c2be13e9a9617cb8ea240ed84a3a90c8469537393ec3e115d5490565b905073ffffffffffffffffffffffffffffffffffffffff81161561023f576040517fae962d4e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b867f868a771a75a4aa6c2be13e9a9617cb8ea240ed84a3a90c8469537393ec3e115d555083156102c45784547fffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff168555604051600181527fc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d29060200160405180910390a15b505050505050565b5f806102f67f868a771a75a4aa6c2be13e9a9617cb8ea240ed84a3a90c8469537393ec3e115d5490565b905073ffffffffffffffffffffffffffffffffffffffff8116610345576040517fae962d4e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b919050565b365f80375f80365f845af43d5f803e808015610364573d5ff35b3d5ffd5b5f60208284031215610378575f80fd5b813573ffffffffffffffffffffffffffffffffffffffff8116811461039b575f80fd5b939250505056fea2646970667358221220c4b8f9457567bdcd08b95faef7df86de4e9daead65e2db22018126d9eb77d85864736f6c634300081a0033" -const HANDLER_CONTRACT_BYTECODE = "608080604052600436101561001c575b50361561001a575f80fd5b005b5f905f3560e01c908162bc574b1461393a5750806301ffc9a7146138995780630379eae8146138395780630ac6eb771461378d578063172bfc1c146137115780631a4e49d4146136e85780631a873ce4146136b5578063240028e81461366b578063248a9ca3146136195780632f2ff15d146135bc57806336568abe146135525780633f4ba83a146134495780634b1d2eeb146134095780634d20d0f8146133d65780634d49fbf3146132565780634eb7d1a11461321657806357724c41146131155780635b549182146130e25780635c975abb146130a1578063606b05a4146130345780636435967b1461286757806364f10e501461284c57806368c70c9e146128015780636ca752e3146127b65780636d4008a814612771578063780ad8271461206757806378a8812714611fec57806381fbadad14611fce5780638377e23014611f9a57806383b94a5214611ee45780638456cb5914611db957806391d1485414611d425780639be7fdb214611c57578063a217fddf14611c3b578063a5172ddb14611bf0578063a861469f14611ba6578063ad14d38514611b5c578063af90f35114611a7e578063b49f6b8814611a0f578063b5d8349f146119ae578063b6322a9f14611963578063be0580c0146117bd578063c6f1b7e71461176c578063cd20c6e814611727578063d17c872c14611601578063d547741f1461159a578063db9a0daf146114d7578063dbc1b46414611476578063dcc16b5c14611238578063dd19e7551461104c578063e798646614610f6d578063ec87621c14610f32578063eefbaa3514610e44578063f881446714610646578063f8c8765e14610314578063fb46e99d146102f65763fc6b5de80361000f57346102f35760206003193601126102f3576004359067ffffffffffffffff82116102f35760206102e0816102cd3660048701613a2f565b8160405193828580945193849201613b1f565b8101601281520301902054604051908152f35b80fd5b50346102f357806003193601126102f3576020600654604051908152f35b50346102f35760806003193601126102f35761032e613aa7565b610336613aca565b61033e613aed565b6064359173ffffffffffffffffffffffffffffffffffffffff8316809303610642577ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00549360ff8560401c16159467ffffffffffffffff81168015908161063a575b6001149081610630575b159081610627575b506105ff579173ffffffffffffffffffffffffffffffffffffffff80949392838860017fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000859716177ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00556105aa575b5061042a6143a1565b6104326143a1565b60017f9b779b17422d0df92223018b32b4d1fa46e071723d6817e2486d003becc55f005561045e6143a1565b61046733613ef5565b50167fffffffffffffffffffffffff0000000000000000000000000000000000000000600a541617600a55167fffffffffffffffffffffffff00000000000000000000000000000000000000006007541617600755167fffffffffffffffffffffffff000000000000000000000000000000000000000060085416176008557fffffffffffffffffffffffff000000000000000000000000000000000000000060095416176009556105165780f35b7fffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a0054167ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00557fc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d2602060405160018152a180f35b7fffffffffffffffffffffffffffffffffffffffffffffff0000000000000000001668010000000000000001177ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00555f610421565b6004877ff92ee8a9000000000000000000000000000000000000000000000000000000008152fd5b9050155f6103b2565b303b1591506103aa565b8791506103a0565b8480fd5b5060a06003193601126102f35761065b613aa7565b90610664613bc5565b9160443591606435936084359273ffffffffffffffffffffffffffffffffffffffff8416928385036102f35773ffffffffffffffffffffffffffffffffffffffff601054163303610e1c576106b76141f6565b6106bf614249565b8691839773ffffffffffffffffffffffffffffffffffffffff8216948515610df4578615610df4573415610dcc578815610dcc5762ffffff1615610d8a575b15610d62575b824211610d3a576107bf60208973ffffffffffffffffffffffffffffffffffffffff6007541673ffffffffffffffffffffffffffffffffffffffff600a54169488861090815f14610d335786915b15610d2b57905b604051958694859384937f1698ee820000000000000000000000000000000000000000000000000000000085526004850191604091949373ffffffffffffffffffffffffffffffffffffffff62ffffff9281606087019816865216602085015216910152565b03915afa908115610bcc579073ffffffffffffffffffffffffffffffffffffffff918491610cfc575b501615610cd457803b15610bc85781600491604051928380927fd0e30db000000000000000000000000000000000000000000000000000000000825234905af18015610c3557908291610cbf575b50600a546008546040517f095ea7b300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff918216600482015234602482015292602092849260449284929091165af18015610c3557610ca2575b5062ffffff73ffffffffffffffffffffffffffffffffffffffff600a541697604051986108cd8a61396a565b89528460208a0152169182604089015230606089015260808801528560a08801523460c08801528060e088015260206109b961010473ffffffffffffffffffffffffffffffffffffffff6008541699846040519b8c9485937fdb3e2198000000000000000000000000000000000000000000000000000000008552600485019073ffffffffffffffffffffffffffffffffffffffff60e0809282815116855282602082015116602086015262ffffff60408201511660408601528260608201511660608601526080810151608086015260a081015160a086015260c081015160c0860152015116910152565b5af1968715610c95578197610c5d575b5080602073ffffffffffffffffffffffffffffffffffffffff600a5416604473ffffffffffffffffffffffffffffffffffffffff6008541660405194859384927f095ea7b300000000000000000000000000000000000000000000000000000000845260048401528160248401525af18015610c3557610c40575b506040517f42966c6800000000000000000000000000000000000000000000000000000000815286600482015260208160248185885af18015610c3557610c08575b5086340394348611610bdb57873403610b03575b505060606040967f01fd625a5ce1109c10761818e2ef64ea92cd4966d78086d37e5a4b50e322687892885191825287602083015288820152a360017f9b779b17422d0df92223018b32b4d1fa46e071723d6817e2486d003becc55f005582519182526020820152f35b73ffffffffffffffffffffffffffffffffffffffff600a5416803b15610bd7578280916024604051809481937f2e1a7d4d0000000000000000000000000000000000000000000000000000000083528c60048401525af18015610bcc579183918893610bae575b5081809381925af1610b7a613db8565b5015610b865780610a9a565b807f90b8ec180000000000000000000000000000000000000000000000000000000060049252fd5b610bbb91935082906139b4565b610bc8578186915f610b6a565b5080fd5b6040513d85823e3d90fd5b8280fd5b6024827f4e487b710000000000000000000000000000000000000000000000000000000081526011600452fd5b610c299060203d602011610c2e575b610c2181836139b4565b810190613cb9565b610a86565b503d610c17565b6040513d84823e3d90fd5b610c589060203d602011610c2e57610c2181836139b4565b610a44565b9096506020813d602011610c8d575b81610c79602093836139b4565b81010312610c895751955f6109c9565b5f80fd5b3d9150610c6c565b50604051903d90823e3d90fd5b610cba9060203d602011610c2e57610c2181836139b4565b6108a1565b81610cc9916139b4565b6102f357805f610836565b6004827f76ecffc0000000000000000000000000000000000000000000000000000000008152fd5b610d1e915060203d602011610d24575b610d1681836139b4565b810190613d7f565b5f6107e8565b503d610d0c565b508590610759565b8091610752565b6004827f1ab7da6b000000000000000000000000000000000000000000000000000000008152fd5b9150600654603c810290808204603c1490151715610bdb57610d849042613dab565b91610704565b8483526004602052604083205462ffffff169850886106fe575b6004837f3733548a000000000000000000000000000000000000000000000000000000008152fd5b6004847f1f2a2005000000000000000000000000000000000000000000000000000000008152fd5b6004847fd92e233d000000000000000000000000000000000000000000000000000000008152fd5b807fbce361b00000000000000000000000000000000000000000000000000000000060049252fd5b50346102f35760606003193601126102f35760043567ffffffffffffffff8111610bc857610f1b610e9a7f5e41bf0052b493123a63e4e0d9095ed4324108e489d58c9a0948b2be366ac8c6923690600401613a2f565b602435604435610ef66040518385519160208181890194610ebc818388613b1f565b8101600b81520301902055826040516020818851610edb818388613b1f565b81016011815203019020556040519182918651928391613b1f565b8101906012825260208142930301902055604051938493608085526080850190613b40565b91602084015260408301524260608301520390a180f35b50346102f357806003193601126102f35760206040517f241ecf16d79d0f8dbfb92cbc07fe17840425976cf0667f022fe9877caa831b088152f35b50346102f35760406003193601126102f357610f87613aa7565b73ffffffffffffffffffffffffffffffffffffffff610fa4613b10565b91610fad613de7565b169081156110245760207f16ef4de07b0452a43221c91064fb645963a8e2e60bd8a7514da58d56e315c42291838552600f825261101881604087209060ff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0083541691151516179055565b6040519015158152a280f35b6004837fd92e233d000000000000000000000000000000000000000000000000000000008152fd5b50346102f35760406003193601126102f357611066613aa7565b73ffffffffffffffffffffffffffffffffffffffff6024359116906040517fa0c50b690000000000000000000000000000000000000000000000000000000081528381600481865afa90811561122d57849161120b575b5060405191815192602081818501956110d7818389613b1f565b81016014815203019020549080155f146111cd57505b73ffffffffffffffffffffffffffffffffffffffff604051602081855161111581838a613b1f565b8101600c81520301902054169283156111a557602061113f91604051809381928751928391613b1f565b8101600b8152030190205490811561117d5761117993949561116360409284613ca6565b9681526013602052205460405195869586613b83565b0390f35b6004867fe661aed0000000000000000000000000000000000000000000000000000000008152fd5b6004867fd92e233d000000000000000000000000000000000000000000000000000000008152fd5b908082106111db57506110ed565b85906044927fff632bea000000000000000000000000000000000000000000000000000000008352600452602452fd5b61122791503d8086833e61121f81836139b4565b810190613c47565b5f6110bd565b6040513d86823e3d90fd5b50346102f35760606003193601126102f35760043567ffffffffffffffff8111610bc85761126a903690600401613a2f565b611272613aca565b60443562ffffff8116918282036106425761128b613de7565b73ffffffffffffffffffffffffffffffffffffffff811680156111a5579160209161135d9373ffffffffffffffffffffffffffffffffffffffff600754169173ffffffffffffffffffffffffffffffffffffffff600a541691821091825f1461146f5780925b1561146757506040517f1698ee8200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff92831660048201529116602482015262ffffff90921660448301529092839190829081906064820190565b03915afa801561122d5773ffffffffffffffffffffffffffffffffffffffff918591611448575b5016908115611420579161140f917f21e3c1439de176cb39006e603b26a8d890fe2267c804597e40d2954871141d7d9360405160208185516113c98183858a01613b1f565b8101600d815203019020827fffffffffffffffffffffffff0000000000000000000000000000000000000000825416179055604051938493606085526060850190613b40565b91602084015260408301520390a180f35b6004847f76ecffc0000000000000000000000000000000000000000000000000000000008152fd5b611461915060203d602011610d2457610d1681836139b4565b5f611384565b905090610759565b81926112f1565b50346102f35760206003193601126102f3576004359067ffffffffffffffff82116102f357602073ffffffffffffffffffffffffffffffffffffffff6114c3826102cd3660048801613a2f565b8101600c8152030190205416604051908152f35b50346102f35760206003193601126102f3576004358180527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b6268006020526040822073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f20541615611572576020817f424b07caa75ce8e1c3985f334273f957db9ce138de114e48e50d8240d4d7300b92600655604051908152a180f35b6004827f49e27cff000000000000000000000000000000000000000000000000000000008152fd5b50346102f35760406003193601126102f3576115fd6004356115ba613aca565b906115f86115f3825f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b626800602052600160405f20015490565b613e6f565b6140ee565b5080f35b50346102f35760406003193601126102f35761161b613aa7565b611623613bc5565b908280527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b6268006020526040832073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f205416156116ff5773ffffffffffffffffffffffffffffffffffffffff169081156110245762ffffff16906101f4821415806116f3575b806116e7575b610da4578252600460205260408220907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000082541617905580f35b506127108214156116ad565b50610bb88214156116a7565b6004837f49e27cff000000000000000000000000000000000000000000000000000000008152fd5b50346102f35760206003193601126102f357604060209173ffffffffffffffffffffffffffffffffffffffff61175b613aa7565b168152601383522054604051908152f35b50346102f357806003193601126102f357602060405173ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000168152f35b50346102f35760606003193601126102f3576117d7613aa7565b6117df613aca565b6117e7613aed565b918380527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b6268006020526040842073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f2054161561193b5773ffffffffffffffffffffffffffffffffffffffff168015801561191d575b80156118ff575b610df45773ffffffffffffffffffffffffffffffffffffffff929183917fffffffffffffffffffffffff00000000000000000000000000000000000000006007541617600755167fffffffffffffffffffffffff00000000000000000000000000000000000000006008541617600855167fffffffffffffffffffffffff0000000000000000000000000000000000000000600954161760095580f35b5073ffffffffffffffffffffffffffffffffffffffff831615611862565b5073ffffffffffffffffffffffffffffffffffffffff82161561185b565b6004847f49e27cff000000000000000000000000000000000000000000000000000000008152fd5b50346102f35760206003193601126102f3576004359067ffffffffffffffff82116102f357602061199b816102cd3660048701613a2f565b8101601581520301902054604051908152f35b50346102f35760206003193601126102f3576004359067ffffffffffffffff82116102f357602073ffffffffffffffffffffffffffffffffffffffff6119fb826102cd3660048801613a2f565b8101600d8152030190205416604051908152f35b50346102f3577f57ad858a99d9aee6f1fd395e454bb1659eb8500ccb081c729a103dc2247ba3a4611a3f36613a75565b90611a48613de7565b816040516020818451611a5e8183858901613b1f565b8101601581520301902055611a7860405192839283613c2b565b0390a180f35b50346102f35760206003193601126102f357611a98613aa7565b8180527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b6268006020526040822073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f205416156115725773ffffffffffffffffffffffffffffffffffffffff168015611b34577fffffffffffffffffffffffff0000000000000000000000000000000000000000601054161760105580f35b6004827fd92e233d000000000000000000000000000000000000000000000000000000008152fd5b50346102f35760206003193601126102f35760ff604060209273ffffffffffffffffffffffffffffffffffffffff611b92613aa7565b168152600384522054166040519015158152f35b50346102f35760206003193601126102f35762ffffff604060209273ffffffffffffffffffffffffffffffffffffffff611bde613aa7565b16815260048452205416604051908152f35b50346102f35760206003193601126102f3576004359067ffffffffffffffff82116102f3576020611c28816102cd3660048701613a2f565b8101600b81520301902054604051908152f35b50346102f357806003193601126102f357602090604051908152f35b50346102f35760406003193601126102f35760043567ffffffffffffffff8111610bc857611c89903690600401613a2f565b73ffffffffffffffffffffffffffffffffffffffff611ca6613aca565b611cae613de7565b168015611024577f0c7d242571a289736ea536c54ebe236d31ba62abfd4f22b8d54d2988dc0dd94991611d36916040516020818451611cf08183858901613b1f565b8101600c815203019020817fffffffffffffffffffffffff0000000000000000000000000000000000000000825416179055604051928392604084526040840190613b40565b9060208301520390a180f35b50346102f35760406003193601126102f35773ffffffffffffffffffffffffffffffffffffffff6040611d73613aca565b9260043581527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b6268006020522091165f52602052602060ff60405f2054166040519015158152f35b50346102f357806003193601126102f3578080527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b6268006020526040812073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f20541615611ebc57611e266141f6565b60017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f033005416177fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f03300557f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a2586020604051338152a180f35b807f49e27cff0000000000000000000000000000000000000000000000000000000060049252fd5b50346102f35760206003193601126102f357611efe613aa7565b8180527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b6268006020526040822073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f205416156115725773ffffffffffffffffffffffffffffffffffffffff168015611b34577fffffffffffffffffffffffff0000000000000000000000000000000000000000600a541617600a5580f35b50346102f357806003193601126102f357602073ffffffffffffffffffffffffffffffffffffffff60105416604051908152f35b50346102f357806003193601126102f3576020600e54604051908152f35b50346102f357611ffb36613bd7565b9083809394527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b6268006020526040832073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f205416156116ff57612064929361205f6141f6565b613cd1565b80f35b50346102f35760c06003193601126102f357612081613aa7565b6024359061208d613aed565b906064359262ffffff8416908185036125485760843560a435936120af6141f6565b6120b7614249565b6120c28684836142c0565b8473ffffffffffffffffffffffffffffffffffffffff821697888a52600360205260ff60408b20541615612749579415612708575b156126b3575b84421161268b576020846121ba928a73ffffffffffffffffffffffffffffffffffffffff600754169173ffffffffffffffffffffffffffffffffffffffff600a541690818d10805f146126845781935b501561146757506040517f1698ee8200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff92831660048201529116602482015262ffffff90921660448301529092839190829081906064820190565b03915afa80156125ed5773ffffffffffffffffffffffffffffffffffffffff918991612665575b50161561263d578015612615576040517f47e7ef24000000000000000000000000000000000000000000000000000000008152306004820152602481018390526020816044818b8b5af180156125ed576125f8575b506008546040517f095ea7b300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091166004820152602481018390526020816044818b8b5af180156125ed576125d0575b5062ffffff73ffffffffffffffffffffffffffffffffffffffff600a541693604051946122c58661396a565b8886526020808701918252929091166040808701828152306060890190815260808901998a5260a0890188815260c08a0188815260e08b018f815260085495517f414bf3890000000000000000000000000000000000000000000000000000000081529b5173ffffffffffffffffffffffffffffffffffffffff90811660048e01529751881660248d0152935162ffffff1660448c01529151861660648b0152995160848a0152985160a4890152975160c48801529651821660e48701529585916101049183918c91165af192831561256957879361259c575b5082106125745773ffffffffffffffffffffffffffffffffffffffff60085416604051907f095ea7b300000000000000000000000000000000000000000000000000000000825260048201528660248201526020816044818a8a5af180156125695761254c575b508573ffffffffffffffffffffffffffffffffffffffff600a5416803b15610bc8578180916024604051809481937f2e1a7d4d0000000000000000000000000000000000000000000000000000000083528960048401525af18015610c3557612533575b5080808085885af161247a613db8565b501561250b57927ff5d6ca9b390b5271e0cbb3d43b4d708d5b17804cb81a4c65e027226d87ccf0e2949273ffffffffffffffffffffffffffffffffffffffff9260c09584600a54169060405196875260208701526040860152606085015260808401521660a0820152a160017f9b779b17422d0df92223018b32b4d1fa46e071723d6817e2486d003becc55f005580f35b6004867f90b8ec18000000000000000000000000000000000000000000000000000000008152fd5b8161253d916139b4565b61254857855f61246a565b8580fd5b6125649060203d602011610c2e57610c2181836139b4565b612406565b6040513d89823e3d90fd5b6004867f8199f5f3000000000000000000000000000000000000000000000000000000008152fd5b9092506020813d6020116125c8575b816125b8602093836139b4565b81010312610c895751915f61239f565b3d91506125ab565b6125e89060203d602011610c2e57610c2181836139b4565b612299565b6040513d8a823e3d90fd5b6126109060203d602011610c2e57610c2181836139b4565b612236565b6004877f1f2a2005000000000000000000000000000000000000000000000000000000008152fd5b6004877f76ecffc0000000000000000000000000000000000000000000000000000000008152fd5b61267e915060203d602011610d2457610d1681836139b4565b5f6121e1565b829361214d565b6004887f1ab7da6b000000000000000000000000000000000000000000000000000000008152fd5b9350600654603c810290808204603c14901517156126db576126d59042613dab565b936120fd565b6024887f4e487b710000000000000000000000000000000000000000000000000000000081526011600452fd5b8789526004602052604089205462ffffff169450846120f7576004897f3733548a000000000000000000000000000000000000000000000000000000008152fd5b60048a7f4e38f95a000000000000000000000000000000000000000000000000000000008152fd5b50346102f35760206003193601126102f357604060209173ffffffffffffffffffffffffffffffffffffffff6127a5613aa7565b168152600583522054604051908152f35b50346102f35760206003193601126102f3576004359067ffffffffffffffff82116102f35760206127ee816102cd3660048701613a2f565b8101601481520301902054604051908152f35b50346102f35760206003193601126102f3576004359067ffffffffffffffff82116102f3576020612839816102cd3660048701613a2f565b8101601181520301902054604051908152f35b50346102f35761206461285e36613bd7565b9161205f6141f6565b5034610c895760c0600319360112610c8957612881613aa7565b60243561288c613aed565b6064358015918215809203610c895760843562ffffff811690818103610c895760a4356128b76141f6565b6128bf614249565b6128ca84888a6142c0565b5f95156129e95750506040517f47e7ef2400000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8316600482015260248101869052919050602082806044810103818a73ffffffffffffffffffffffffffffffffffffffff8b165af1908115612569577ffa6ff091ec99bdfd127d51e7786764f2ff7e39f866bbb2a2996e1597052641e49460609473ffffffffffffffffffffffffffffffffffffffff9485946129ca575b505b6040519788526020880152604087015216941692a360017f9b779b17422d0df92223018b32b4d1fa46e071723d6817e2486d003becc55f005580f35b6129e29060203d602011610c2e57610c2181836139b4565b505f61298c565b8091929395501561300c5773ffffffffffffffffffffffffffffffffffffffff871691825f52600360205260ff60405f20541615612fe4579215612fa2575b600654603c810290808204603c1490151715612f7557612a489042613dab565b804211612f4d57612b0060208573ffffffffffffffffffffffffffffffffffffffff6007541673ffffffffffffffffffffffffffffffffffffffff600a541680881090815f14612f46578d915b15612f3e576040517f1698ee8200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff92831660048201529116602482015262ffffff90921660448301529092839190829081906064820190565b03915afa8015612e395773ffffffffffffffffffffffffffffffffffffffff915f91612f1f575b501615612ef7576040517f47e7ef24000000000000000000000000000000000000000000000000000000008152306004820152602481018890526020816044815f885af18015612e3957612eda575b506008546040517f095ea7b300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091166004820152602481018890526020816044815f885af18015612e3957612ebd575b5062ffffff73ffffffffffffffffffffffffffffffffffffffff600a54169460405195612c058761396a565b858752602087015216604085015230606085015260808401528560a08401528060c08401525f60e08401526020612cef61010473ffffffffffffffffffffffffffffffffffffffff60085416955f60405197889485937f414bf389000000000000000000000000000000000000000000000000000000008552600485019073ffffffffffffffffffffffffffffffffffffffff60e0809282815116855282602082015116602086015262ffffff60408201511660408601528260608201511660608601526080810151608086015260a081015160a086015260c081015160c0860152015116910152565b5af1928315612e39575f93612e89575b508210612e615760205f91604473ffffffffffffffffffffffffffffffffffffffff6008541660405194859384927f095ea7b300000000000000000000000000000000000000000000000000000000845260048401528160248401525af18015612e3957612e44575b5073ffffffffffffffffffffffffffffffffffffffff600a5416803b15610c89575f80916024604051809481937f2e1a7d4d0000000000000000000000000000000000000000000000000000000083528760048401525af18015612e3957612e24575b508580808084875af1612ddc613db8565b501561250b5773ffffffffffffffffffffffffffffffffffffffff7ffa6ff091ec99bdfd127d51e7786764f2ff7e39f866bbb2a2996e1597052641e49360609382939061298e565b612e319196505f906139b4565b5f945f612dcb565b6040513d5f823e3d90fd5b612e5c9060203d602011610c2e57610c2181836139b4565b612d68565b7f8199f5f3000000000000000000000000000000000000000000000000000000005f5260045ffd5b9092506020813d602011612eb5575b81612ea5602093836139b4565b81010312610c895751915f612cff565b3d9150612e98565b612ed59060203d602011610c2e57610c2181836139b4565b612bd9565b612ef29060203d602011610c2e57610c2181836139b4565b612b76565b7f76ecffc0000000000000000000000000000000000000000000000000000000005f5260045ffd5b612f38915060203d602011610d2457610d1681836139b4565b5f612b27565b508c90610759565b8091612a95565b7f1ab7da6b000000000000000000000000000000000000000000000000000000005f5260045ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b9150805f52600460205262ffffff60405f2054169182612a28577f3733548a000000000000000000000000000000000000000000000000000000005f5260045ffd5b7f4e38f95a000000000000000000000000000000000000000000000000000000005f5260045ffd5b7f22c50cbf000000000000000000000000000000000000000000000000000000005f5260045ffd5b34610c89577f882f47825d4043cd04a564cad4f524a7fe00a604ae024c23dbc8065b77668b4761306336613a75565b9061306c613de7565b8160405160208184516130828183858901613b1f565b810160148152030190205561309c60405192839283613c2b565b0390a1005b34610c89575f600319360112610c8957602060ff7fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f0330054166040519015158152f35b34610c89575f600319360112610c8957602073ffffffffffffffffffffffffffffffffffffffff60075416604051908152f35b34610c89576040600319360112610c895761312e613aa7565b335f9081527fb7db2dd08fcb62d0c9e08c51941cae53c267786a0b75803fb7960902fc8ef97d6020526040902054602435919060ff16156131ee5773ffffffffffffffffffffffffffffffffffffffff1680156131c657611388821161319e575f52600560205260405f20555f80f35b7fc31c0b6e000000000000000000000000000000000000000000000000000000005f5260045ffd5b7fd92e233d000000000000000000000000000000000000000000000000000000005f5260045ffd5b7f49e27cff000000000000000000000000000000000000000000000000000000005f5260045ffd5b34610c89576020600319360112610c89576004355f526002602052602073ffffffffffffffffffffffffffffffffffffffff60405f205416604051908152f35b34610c89576020600319360112610c895760045f73ffffffffffffffffffffffffffffffffffffffff613287613aa7565b16604051928380927fa0c50b690000000000000000000000000000000000000000000000000000000082525afa908115612e39575f916133bc575b50604051815190602081818501936132db818387613b1f565b81016015815203019020549182156133945773ffffffffffffffffffffffffffffffffffffffff6040516020818451613315818389613b1f565b8101600c81520301902054169182156131c657602061333f91604051809381928651928391613b1f565b8101600b8152030190205490811561336c578161335f8561117994613ca6565b9460405195869586613b83565b7fe661aed0000000000000000000000000000000000000000000000000000000005f5260045ffd5b7f9502a873000000000000000000000000000000000000000000000000000000005f5260045ffd5b6133d091503d805f833e61121f81836139b4565b816132c2565b34610c89575f600319360112610c8957602073ffffffffffffffffffffffffffffffffffffffff60095416604051908152f35b34610c89576020600319360112610c89576004355f526001602052602073ffffffffffffffffffffffffffffffffffffffff60405f205416604051908152f35b34610c89575f600319360112610c8957335f9081527fb7db2dd08fcb62d0c9e08c51941cae53c267786a0b75803fb7960902fc8ef97d602052604090205460ff16156131ee577fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f033005460ff81161561352a577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00167fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f03300557f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa6020604051338152a1005b7f8dfc202b000000000000000000000000000000000000000000000000000000005f5260045ffd5b34610c89576040600319360112610c895761356b613aca565b3373ffffffffffffffffffffffffffffffffffffffff8216036135945761001a906004356140ee565b7f6697b232000000000000000000000000000000000000000000000000000000005f5260045ffd5b34610c89576040600319360112610c895761001a6004356135db613aca565b906136146115f3825f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b626800602052600160405f20015490565b613fdc565b34610c89576020600319360112610c895760206136636004355f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b626800602052600160405f20015490565b604051908152f35b34610c89576020600319360112610c895773ffffffffffffffffffffffffffffffffffffffff613699613aa7565b165f52600f602052602060ff60405f2054166040519015158152f35b34610c89575f600319360112610c8957602073ffffffffffffffffffffffffffffffffffffffff600a5416604051908152f35b34610c89576020600319360112610c89576004355f525f602052602060405f2054604051908152f35b34610c89576040600319360112610c895761372a613aa7565b73ffffffffffffffffffffffffffffffffffffffff6024359161374b613de7565b169081156131c65760207f911a025fb070fa2a29c37a3bf4c00d16acf15583cd050f17bdbacbab7e72320391835f52601382528060405f2055604051908152a2005b34610c89576040600319360112610c89576137a6613aa7565b6137ae613b10565b335f9081527fb7db2dd08fcb62d0c9e08c51941cae53c267786a0b75803fb7960902fc8ef97d602052604090205460ff16156131ee5773ffffffffffffffffffffffffffffffffffffffff61001a92165f52600360205260405f209060ff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0083541691151516179055565b34610c89577f6a59d469e3757d6e139cdf95b12740f585d553afac49b90bdbe278502a44271861386836613a75565b9081604051602081845161387f8183858901613b1f565b8101600b8152030190205561309c60405192839283613c2b565b34610c89576020600319360112610c89576004357fffffffff000000000000000000000000000000000000000000000000000000008116809103610c8957807f7965db0b0000000000000000000000000000000000000000000000000000000060209214908115613910575b506040519015158152f35b7f01ffc9a70000000000000000000000000000000000000000000000000000000091501482613905565b34610c89575f600319360112610c895760209073ffffffffffffffffffffffffffffffffffffffff600854168152f35b610100810190811067ffffffffffffffff82111761398757604052565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b90601f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0910116810190811067ffffffffffffffff82111761398757604052565b67ffffffffffffffff811161398757601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b81601f82011215610c8957803590613a46826139f5565b92613a5460405194856139b4565b82845260208383010111610c8957815f926020809301838601378301015290565b6040600319820112610c89576004359067ffffffffffffffff8211610c8957613aa091600401613a2f565b9060243590565b6004359073ffffffffffffffffffffffffffffffffffffffff82168203610c8957565b6024359073ffffffffffffffffffffffffffffffffffffffff82168203610c8957565b6044359073ffffffffffffffffffffffffffffffffffffffff82168203610c8957565b602435908115158203610c8957565b5f5b838110613b305750505f910152565b8181015183820152602001613b21565b907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f602093613b7c81518092818752878088019101613b1f565b0116010190565b919260a09373ffffffffffffffffffffffffffffffffffffffff613bc29796931684526020840152604083015260608201528160808201520190613b40565b90565b6024359062ffffff82168203610c8957565b6003196060910112610c895760043573ffffffffffffffffffffffffffffffffffffffff81168103610c8957906024359060443573ffffffffffffffffffffffffffffffffffffffff81168103610c895790565b929190613c42602091604086526040860190613b40565b930152565b602081830312610c895780519067ffffffffffffffff8211610c89570181601f82011215610c89578051613c7a816139f5565b92613c8860405194856139b4565b81845260208284010111610c8957613bc29160208085019101613b1f565b81810292918115918404141715612f7557565b90816020910312610c8957518015158103610c895790565b90602091613d5293613ce48184846142c0565b5f73ffffffffffffffffffffffffffffffffffffffff6040518097819682957f47e7ef24000000000000000000000000000000000000000000000000000000008452600484016020909392919373ffffffffffffffffffffffffffffffffffffffff60408201951681520152565b0393165af18015612e3957613d645750565b613d7c9060203d602011610c2e57610c2181836139b4565b50565b90816020910312610c89575173ffffffffffffffffffffffffffffffffffffffff81168103610c895790565b91908201809211612f7557565b3d15613de2573d90613dc9826139f5565b91613dd760405193846139b4565b82523d5f602084013e565b606090565b335f9081527f06484cc59dc38e4f67c31122333a17ca81b3ca18cdf02bfc298072fa52b0316a602052604090205460ff1615613e1f57565b7fe2517d3f000000000000000000000000000000000000000000000000000000005f52336004527f241ecf16d79d0f8dbfb92cbc07fe17840425976cf0667f022fe9877caa831b0860245260445ffd5b805f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b62680060205260405f2073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f20541615613ec65750565b7fe2517d3f000000000000000000000000000000000000000000000000000000005f523360045260245260445ffd5b73ffffffffffffffffffffffffffffffffffffffff81165f9081527fb7db2dd08fcb62d0c9e08c51941cae53c267786a0b75803fb7960902fc8ef97d602052604090205460ff16613fd75773ffffffffffffffffffffffffffffffffffffffff165f8181527fb7db2dd08fcb62d0c9e08c51941cae53c267786a0b75803fb7960902fc8ef97d6020526040812080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790553391907f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d8180a4600190565b505f90565b805f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b62680060205260405f2073ffffffffffffffffffffffffffffffffffffffff83165f5260205260ff60405f205416155f146140e857805f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b62680060205260405f2073ffffffffffffffffffffffffffffffffffffffff83165f5260205260405f2060017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082541617905573ffffffffffffffffffffffffffffffffffffffff339216907f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d5f80a4600190565b50505f90565b805f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b62680060205260405f2073ffffffffffffffffffffffffffffffffffffffff83165f5260205260ff60405f2054165f146140e857805f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b62680060205260405f2073ffffffffffffffffffffffffffffffffffffffff83165f5260205260405f207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00815416905573ffffffffffffffffffffffffffffffffffffffff339216907ff6391f5c32d9c69d2a47ea670b442974b53935d1edc7fd64eb21e047a839171b5f80a4600190565b60ff7fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f03300541661422157565b7fd93c0665000000000000000000000000000000000000000000000000000000005f5260045ffd5b60027f9b779b17422d0df92223018b32b4d1fa46e071723d6817e2486d003becc55f0054146142985760027f9b779b17422d0df92223018b32b4d1fa46e071723d6817e2486d003becc55f0055565b7f3ee5aeb5000000000000000000000000000000000000000000000000000000005f5260045ffd5b90919073ffffffffffffffffffffffffffffffffffffffff16156131c65773ffffffffffffffffffffffffffffffffffffffff1680156131c65773ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000168114908115614397575b5061436f571561434757565b7f1f2a2005000000000000000000000000000000000000000000000000000000005f5260045ffd5b7f82d5d76a000000000000000000000000000000000000000000000000000000005f5260045ffd5b905030145f61433b565b60ff7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a005460401c16156143d057565b7fd7e6bcf8000000000000000000000000000000000000000000000000000000005f5260045ffdfea2646970667358221220768f174ca9357d5558e1f156d7a974035ebdf7b2ddf7fb9cc7e899aba472f96d64736f6c634300081a0033" +const HANDLER_CONTRACT_BYTECODE = "608080604052600436101561001c575b50361561001a575f80fd5b005b5f905f3560e01c908162bc574b146139a85750806301ffc9a7146139075780630379eae8146138a75780630615f0a21461383f5780630ac6eb7714613791578063172bfc1c146137155780631a4e49d4146136ec5780631a873ce4146136b95780631c90064a14613651578063248a9ca3146135ff5780632f2ff15d146135a257806336568abe146135385780633f4ba83a146134075780634243fbaa146133bd578063447146a2146133735780634b1d2eeb146133335780634d49fbf31461318b5780634eb7d1a11461314b5780635b549182146131185780635c975abb146130d7578063606b05a41461306a5780636435967b1461283857806364f10e501461281d57806368c70c9e146127d25780636ca752e3146127875780637574d9a01461276a578063780ad8271461206057806378a8812714611fe557806381fbadad14611fc75780638377e23014611f9357806383b94a5214611edd5780638456cb5914611db25780638f40e8f514611d9557806391d1485414611d1e5780639be7fdb214611c33578063a217fddf14611c17578063a5172ddb14611bcc578063a861469f14611b82578063ad14d38514611b38578063af90f35114611a5a578063b49f6b88146119eb578063b5d8349f1461198a578063b6322a9f1461193f578063b6aa5ce314611923578063be0580c01461177d578063c6f1b7e71461172c578063cd20c6e8146116e7578063d17c872c14611588578063d547741f14611521578063db9a0daf1461145e578063dbc1b464146113fd578063dcc16b5c146111bf578063dd19e75514610fd3578063e229cd7614610fb6578063ec87621c14610f7b578063eefbaa3514610e8d578063f6b9ec7c14610e70578063f881446714610672578063f8c8765e14610340578063fb46e99d146103225763fc6b5de80361000f573461031f57602060031936011261031f576004359067ffffffffffffffff821161031f57602061030c816102f93660048701613a9d565b8160405193828580945193849201613b7e565b8101601281520301902054604051908152f35b80fd5b503461031f578060031936011261031f576020600654604051908152f35b503461031f57608060031936011261031f5761035a613b15565b610362613b38565b61036a613b5b565b6064359173ffffffffffffffffffffffffffffffffffffffff831680930361066e577ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00549360ff8560401c16159467ffffffffffffffff811680159081610666575b600114908161065c575b159081610653575b5061062b579173ffffffffffffffffffffffffffffffffffffffff80949392838860017fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000859716177ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00556105d6575b50610456614400565b61045e614400565b60017f9b779b17422d0df92223018b32b4d1fa46e071723d6817e2486d003becc55f005561048a614400565b61049333613f54565b50167fffffffffffffffffffffffff0000000000000000000000000000000000000000600a541617600a55167fffffffffffffffffffffffff00000000000000000000000000000000000000006007541617600755167fffffffffffffffffffffffff000000000000000000000000000000000000000060085416176008557fffffffffffffffffffffffff000000000000000000000000000000000000000060095416176009556105425780f35b7fffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a0054167ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00557fc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d2602060405160018152a180f35b7fffffffffffffffffffffffffffffffffffffffffffffff0000000000000000001668010000000000000001177ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00555f61044d565b6004877ff92ee8a9000000000000000000000000000000000000000000000000000000008152fd5b9050155f6103de565b303b1591506103d6565b8791506103cc565b8480fd5b5060a060031936011261031f57610687613b15565b90610690613c24565b9160443591606435936084359273ffffffffffffffffffffffffffffffffffffffff84169283850361031f5773ffffffffffffffffffffffffffffffffffffffff601054163303610e48576106e3614255565b6106eb6142a8565b8691839773ffffffffffffffffffffffffffffffffffffffff8216948515610e20578615610e20573415610df8578815610df85762ffffff1615610db6575b15610d8e575b824211610d66576107eb60208973ffffffffffffffffffffffffffffffffffffffff6007541673ffffffffffffffffffffffffffffffffffffffff600a54169488861090815f14610d5f5786915b15610d5757905b604051958694859384937f1698ee820000000000000000000000000000000000000000000000000000000085526004850191604091949373ffffffffffffffffffffffffffffffffffffffff62ffffff9281606087019816865216602085015216910152565b03915afa908115610bf8579073ffffffffffffffffffffffffffffffffffffffff918491610d28575b501615610d0057803b15610bf45781600491604051928380927fd0e30db000000000000000000000000000000000000000000000000000000000825234905af18015610c6157908291610ceb575b50600a546008546040517f095ea7b300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff918216600482015234602482015292602092849260449284929091165af18015610c6157610cce575b5062ffffff73ffffffffffffffffffffffffffffffffffffffff600a541697604051986108f98a6139d8565b89528460208a0152169182604089015230606089015260808801528560a08801523460c08801528060e088015260206109e561010473ffffffffffffffffffffffffffffffffffffffff6008541699846040519b8c9485937fdb3e2198000000000000000000000000000000000000000000000000000000008552600485019073ffffffffffffffffffffffffffffffffffffffff60e0809282815116855282602082015116602086015262ffffff60408201511660408601528260608201511660608601526080810151608086015260a081015160a086015260c081015160c0860152015116910152565b5af1968715610cc1578197610c89575b5080602073ffffffffffffffffffffffffffffffffffffffff600a5416604473ffffffffffffffffffffffffffffffffffffffff6008541660405194859384927f095ea7b300000000000000000000000000000000000000000000000000000000845260048401528160248401525af18015610c6157610c6c575b506040517f42966c6800000000000000000000000000000000000000000000000000000000815286600482015260208160248185885af18015610c6157610c34575b5086340394348611610c0757873403610b2f575b505060606040967f01fd625a5ce1109c10761818e2ef64ea92cd4966d78086d37e5a4b50e322687892885191825287602083015288820152a360017f9b779b17422d0df92223018b32b4d1fa46e071723d6817e2486d003becc55f005582519182526020820152f35b73ffffffffffffffffffffffffffffffffffffffff600a5416803b15610c03578280916024604051809481937f2e1a7d4d0000000000000000000000000000000000000000000000000000000083528c60048401525af18015610bf8579183918893610bda575b5081809381925af1610ba6613e17565b5015610bb25780610ac6565b807f90b8ec180000000000000000000000000000000000000000000000000000000060049252fd5b610be79193508290613a22565b610bf4578186915f610b96565b5080fd5b6040513d85823e3d90fd5b8280fd5b6024827f4e487b710000000000000000000000000000000000000000000000000000000081526011600452fd5b610c559060203d602011610c5a575b610c4d8183613a22565b810190613d18565b610ab2565b503d610c43565b6040513d84823e3d90fd5b610c849060203d602011610c5a57610c4d8183613a22565b610a70565b9096506020813d602011610cb9575b81610ca560209383613a22565b81010312610cb55751955f6109f5565b5f80fd5b3d9150610c98565b50604051903d90823e3d90fd5b610ce69060203d602011610c5a57610c4d8183613a22565b6108cd565b81610cf591613a22565b61031f57805f610862565b6004827f76ecffc0000000000000000000000000000000000000000000000000000000008152fd5b610d4a915060203d602011610d50575b610d428183613a22565b810190613dde565b5f610814565b503d610d38565b508590610785565b809161077e565b6004827f1ab7da6b000000000000000000000000000000000000000000000000000000008152fd5b9150600654603c810290808204603c1490151715610c0757610db09042613e0a565b91610730565b8483526004602052604083205462ffffff1698508861072a575b6004837f3733548a000000000000000000000000000000000000000000000000000000008152fd5b6004847f1f2a2005000000000000000000000000000000000000000000000000000000008152fd5b6004847fd92e233d000000000000000000000000000000000000000000000000000000008152fd5b807fbce361b00000000000000000000000000000000000000000000000000000000060049252fd5b503461031f578060031936011261031f5760206040516101f48152f35b503461031f57606060031936011261031f5760043567ffffffffffffffff8111610bf457610f64610ee37f5e41bf0052b493123a63e4e0d9095ed4324108e489d58c9a0948b2be366ac8c6923690600401613a9d565b602435604435610f3f6040518385519160208181890194610f05818388613b7e565b8101600b81520301902055826040516020818851610f24818388613b7e565b81016011815203019020556040519182918651928391613b7e565b8101906012825260208142930301902055604051938493608085526080850190613b9f565b91602084015260408301524260608301520390a180f35b503461031f578060031936011261031f5760206040517f241ecf16d79d0f8dbfb92cbc07fe17840425976cf0667f022fe9877caa831b088152f35b503461031f578060031936011261031f5760206040516113888152f35b503461031f57604060031936011261031f57610fed613b15565b73ffffffffffffffffffffffffffffffffffffffff6024359116906040517fa0c50b690000000000000000000000000000000000000000000000000000000081528381600481865afa9081156111b4578491611192575b50604051918151926020818185019561105e818389613b7e565b81016014815203019020549080155f1461115457505b73ffffffffffffffffffffffffffffffffffffffff604051602081855161109c81838a613b7e565b8101600c815203019020541692831561112c5760206110c691604051809381928751928391613b7e565b8101600b81520301902054908115611104576111009394956110ea60409284613d05565b9681526013602052205460405195869586613be2565b0390f35b6004867fe661aed0000000000000000000000000000000000000000000000000000000008152fd5b6004867fd92e233d000000000000000000000000000000000000000000000000000000008152fd5b908082106111625750611074565b85906044927fff632bea000000000000000000000000000000000000000000000000000000008352600452602452fd5b6111ae91503d8086833e6111a68183613a22565b810190613ca6565b5f611044565b6040513d86823e3d90fd5b503461031f57606060031936011261031f5760043567ffffffffffffffff8111610bf4576111f1903690600401613a9d565b6111f9613b38565b60443562ffffff81169182820361066e57611212613e46565b73ffffffffffffffffffffffffffffffffffffffff8116801561112c57916020916112e49373ffffffffffffffffffffffffffffffffffffffff600754169173ffffffffffffffffffffffffffffffffffffffff600a541691821091825f146113f65780925b156113ee57506040517f1698ee8200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff92831660048201529116602482015262ffffff90921660448301529092839190829081906064820190565b03915afa80156111b45773ffffffffffffffffffffffffffffffffffffffff9185916113cf575b50169081156113a75791611396917f21e3c1439de176cb39006e603b26a8d890fe2267c804597e40d2954871141d7d9360405160208185516113508183858a01613b7e565b8101600d815203019020827fffffffffffffffffffffffff0000000000000000000000000000000000000000825416179055604051938493606085526060850190613b9f565b91602084015260408301520390a180f35b6004847f76ecffc0000000000000000000000000000000000000000000000000000000008152fd5b6113e8915060203d602011610d5057610d428183613a22565b5f61130b565b905090610785565b8192611278565b503461031f57602060031936011261031f576004359067ffffffffffffffff821161031f57602073ffffffffffffffffffffffffffffffffffffffff61144a826102f93660048801613a9d565b8101600c8152030190205416604051908152f35b503461031f57602060031936011261031f576004358180527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b6268006020526040822073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f205416156114f9576020817f424b07caa75ce8e1c3985f334273f957db9ce138de114e48e50d8240d4d7300b92600655604051908152a180f35b6004827f49e27cff000000000000000000000000000000000000000000000000000000008152fd5b503461031f57604060031936011261031f57611584600435611541613b38565b9061157f61157a825f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b626800602052600160405f20015490565b613ece565b61414d565b5080f35b503461031f57604060031936011261031f576115a2613b15565b6115aa613c24565b908280527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b6268006020526040832073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f205416156116bf5773ffffffffffffffffffffffffffffffffffffffff169081156116975762ffffff169060648214158061168b575b8061167f575b80611673575b610dd0578252600460205260408220907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000082541617905580f35b50612710821415611639565b50610bb8821415611633565b506101f482141561162d565b6004837fd92e233d000000000000000000000000000000000000000000000000000000008152fd5b6004837f49e27cff000000000000000000000000000000000000000000000000000000008152fd5b503461031f57602060031936011261031f57604060209173ffffffffffffffffffffffffffffffffffffffff61171b613b15565b168152601383522054604051908152f35b503461031f578060031936011261031f57602060405173ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000168152f35b503461031f57606060031936011261031f57611797613b15565b61179f613b38565b6117a7613b5b565b918380527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b6268006020526040842073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f205416156118fb5773ffffffffffffffffffffffffffffffffffffffff16801580156118dd575b80156118bf575b610e205773ffffffffffffffffffffffffffffffffffffffff929183917fffffffffffffffffffffffff00000000000000000000000000000000000000006007541617600755167fffffffffffffffffffffffff00000000000000000000000000000000000000006008541617600855167fffffffffffffffffffffffff0000000000000000000000000000000000000000600954161760095580f35b5073ffffffffffffffffffffffffffffffffffffffff831615611822565b5073ffffffffffffffffffffffffffffffffffffffff82161561181b565b6004847f49e27cff000000000000000000000000000000000000000000000000000000008152fd5b503461031f578060031936011261031f57602060405160648152f35b503461031f57602060031936011261031f576004359067ffffffffffffffff821161031f576020611977816102f93660048701613a9d565b8101601581520301902054604051908152f35b503461031f57602060031936011261031f576004359067ffffffffffffffff821161031f57602073ffffffffffffffffffffffffffffffffffffffff6119d7826102f93660048801613a9d565b8101600d8152030190205416604051908152f35b503461031f577f57ad858a99d9aee6f1fd395e454bb1659eb8500ccb081c729a103dc2247ba3a4611a1b36613ae3565b90611a24613e46565b816040516020818451611a3a8183858901613b7e565b8101601581520301902055611a5460405192839283613c8a565b0390a180f35b503461031f57602060031936011261031f57611a74613b15565b8180527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b6268006020526040822073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f205416156114f95773ffffffffffffffffffffffffffffffffffffffff168015611b10577fffffffffffffffffffffffff0000000000000000000000000000000000000000601054161760105580f35b6004827fd92e233d000000000000000000000000000000000000000000000000000000008152fd5b503461031f57602060031936011261031f5760ff604060209273ffffffffffffffffffffffffffffffffffffffff611b6e613b15565b168152600384522054166040519015158152f35b503461031f57602060031936011261031f5762ffffff604060209273ffffffffffffffffffffffffffffffffffffffff611bba613b15565b16815260048452205416604051908152f35b503461031f57602060031936011261031f576004359067ffffffffffffffff821161031f576020611c04816102f93660048701613a9d565b8101600b81520301902054604051908152f35b503461031f578060031936011261031f57602090604051908152f35b503461031f57604060031936011261031f5760043567ffffffffffffffff8111610bf457611c65903690600401613a9d565b73ffffffffffffffffffffffffffffffffffffffff611c82613b38565b611c8a613e46565b168015611697577f0c7d242571a289736ea536c54ebe236d31ba62abfd4f22b8d54d2988dc0dd94991611d12916040516020818451611ccc8183858901613b7e565b8101600c815203019020817fffffffffffffffffffffffff0000000000000000000000000000000000000000825416179055604051928392604084526040840190613b9f565b9060208301520390a180f35b503461031f57604060031936011261031f5773ffffffffffffffffffffffffffffffffffffffff6040611d4f613b38565b9260043581527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b6268006020522091165f52602052602060ff60405f2054166040519015158152f35b503461031f578060031936011261031f576020604051610bb88152f35b503461031f578060031936011261031f578080527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b6268006020526040812073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f20541615611eb557611e1f614255565b60017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff007fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f033005416177fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f03300557f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a2586020604051338152a180f35b807f49e27cff0000000000000000000000000000000000000000000000000000000060049252fd5b503461031f57602060031936011261031f57611ef7613b15565b8180527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b6268006020526040822073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f205416156114f95773ffffffffffffffffffffffffffffffffffffffff168015611b10577fffffffffffffffffffffffff0000000000000000000000000000000000000000600a541617600a5580f35b503461031f578060031936011261031f57602073ffffffffffffffffffffffffffffffffffffffff60105416604051908152f35b503461031f578060031936011261031f576020600e54604051908152f35b503461031f57611ff436613c36565b9083809394527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b6268006020526040832073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f205416156116bf5761205d9293612058614255565b613d30565b80f35b503461031f5760c060031936011261031f5761207a613b15565b60243590612086613b5b565b906064359262ffffff8416908185036125415760843560a435936120a8614255565b6120b06142a8565b6120bb86848361431f565b8473ffffffffffffffffffffffffffffffffffffffff821697888a52600360205260ff60408b20541615612742579415612701575b156126ac575b844211612684576020846121b3928a73ffffffffffffffffffffffffffffffffffffffff600754169173ffffffffffffffffffffffffffffffffffffffff600a541690818d10805f1461267d5781935b50156113ee57506040517f1698ee8200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff92831660048201529116602482015262ffffff90921660448301529092839190829081906064820190565b03915afa80156125e65773ffffffffffffffffffffffffffffffffffffffff91899161265e575b50161561263657801561260e576040517f47e7ef24000000000000000000000000000000000000000000000000000000008152306004820152602481018390526020816044818b8b5af180156125e6576125f1575b506008546040517f095ea7b300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091166004820152602481018390526020816044818b8b5af180156125e6576125c9575b5062ffffff73ffffffffffffffffffffffffffffffffffffffff600a541693604051946122be866139d8565b8886526020808701918252929091166040808701828152306060890190815260808901998a5260a0890188815260c08a0188815260e08b018f815260085495517f414bf3890000000000000000000000000000000000000000000000000000000081529b5173ffffffffffffffffffffffffffffffffffffffff90811660048e01529751881660248d0152935162ffffff1660448c01529151861660648b0152995160848a0152985160a4890152975160c48801529651821660e48701529585916101049183918c91165af1928315612562578793612595575b50821061256d5773ffffffffffffffffffffffffffffffffffffffff60085416604051907f095ea7b300000000000000000000000000000000000000000000000000000000825260048201528660248201526020816044818a8a5af1801561256257612545575b508573ffffffffffffffffffffffffffffffffffffffff600a5416803b15610bf4578180916024604051809481937f2e1a7d4d0000000000000000000000000000000000000000000000000000000083528960048401525af18015610c615761252c575b5080808085885af1612473613e17565b501561250457927ff5d6ca9b390b5271e0cbb3d43b4d708d5b17804cb81a4c65e027226d87ccf0e2949273ffffffffffffffffffffffffffffffffffffffff9260c09584600a54169060405196875260208701526040860152606085015260808401521660a0820152a160017f9b779b17422d0df92223018b32b4d1fa46e071723d6817e2486d003becc55f005580f35b6004867f90b8ec18000000000000000000000000000000000000000000000000000000008152fd5b8161253691613a22565b61254157855f612463565b8580fd5b61255d9060203d602011610c5a57610c4d8183613a22565b6123ff565b6040513d89823e3d90fd5b6004867f8199f5f3000000000000000000000000000000000000000000000000000000008152fd5b9092506020813d6020116125c1575b816125b160209383613a22565b81010312610cb55751915f612398565b3d91506125a4565b6125e19060203d602011610c5a57610c4d8183613a22565b612292565b6040513d8a823e3d90fd5b6126099060203d602011610c5a57610c4d8183613a22565b61222f565b6004877f1f2a2005000000000000000000000000000000000000000000000000000000008152fd5b6004877f76ecffc0000000000000000000000000000000000000000000000000000000008152fd5b612677915060203d602011610d5057610d428183613a22565b5f6121da565b8293612146565b6004887f1ab7da6b000000000000000000000000000000000000000000000000000000008152fd5b9350600654603c810290808204603c14901517156126d4576126ce9042613e0a565b936120f6565b6024887f4e487b710000000000000000000000000000000000000000000000000000000081526011600452fd5b8789526004602052604089205462ffffff169450846120f0576004897f3733548a000000000000000000000000000000000000000000000000000000008152fd5b60048a7f4e38f95a000000000000000000000000000000000000000000000000000000008152fd5b503461031f578060031936011261031f5760206040516127108152f35b503461031f57602060031936011261031f576004359067ffffffffffffffff821161031f5760206127bf816102f93660048701613a9d565b8101601481520301902054604051908152f35b503461031f57602060031936011261031f576004359067ffffffffffffffff821161031f57602061280a816102f93660048701613a9d565b8101601181520301902054604051908152f35b503461031f5761205d61282f36613c36565b91612058614255565b5034610cb55760c0600319360112610cb557612852613b15565b60243561285d613b5b565b6064358015918215809203610cb55760843562ffffff811690818103610cb55760a43573ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000163303613042576128c5614255565b6128cd6142a8565b6128d884888a61431f565b5f95156129f75750506040517f47e7ef2400000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8316600482015260248101869052919050602082806044810103818a73ffffffffffffffffffffffffffffffffffffffff8b165af1908115612562577ffa6ff091ec99bdfd127d51e7786764f2ff7e39f866bbb2a2996e1597052641e49460609473ffffffffffffffffffffffffffffffffffffffff9485946129d8575b505b6040519788526020880152604087015216941692a360017f9b779b17422d0df92223018b32b4d1fa46e071723d6817e2486d003becc55f005580f35b6129f09060203d602011610c5a57610c4d8183613a22565b505f61299a565b8091929395501561301a5773ffffffffffffffffffffffffffffffffffffffff871691825f52600360205260ff60405f20541615612ff2579215612fb0575b600654603c810290808204603c1490151715612f8357612a569042613e0a565b804211612f5b57612b0e60208573ffffffffffffffffffffffffffffffffffffffff6007541673ffffffffffffffffffffffffffffffffffffffff600a541680881090815f14612f54578d915b15612f4c576040517f1698ee8200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff92831660048201529116602482015262ffffff90921660448301529092839190829081906064820190565b03915afa8015612e475773ffffffffffffffffffffffffffffffffffffffff915f91612f2d575b501615612f05576040517f47e7ef24000000000000000000000000000000000000000000000000000000008152306004820152602481018890526020816044815f885af18015612e4757612ee8575b506008546040517f095ea7b300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091166004820152602481018890526020816044815f885af18015612e4757612ecb575b5062ffffff73ffffffffffffffffffffffffffffffffffffffff600a54169460405195612c13876139d8565b858752602087015216604085015230606085015260808401528560a08401528060c08401525f60e08401526020612cfd61010473ffffffffffffffffffffffffffffffffffffffff60085416955f60405197889485937f414bf389000000000000000000000000000000000000000000000000000000008552600485019073ffffffffffffffffffffffffffffffffffffffff60e0809282815116855282602082015116602086015262ffffff60408201511660408601528260608201511660608601526080810151608086015260a081015160a086015260c081015160c0860152015116910152565b5af1928315612e47575f93612e97575b508210612e6f5760205f91604473ffffffffffffffffffffffffffffffffffffffff6008541660405194859384927f095ea7b300000000000000000000000000000000000000000000000000000000845260048401528160248401525af18015612e4757612e52575b5073ffffffffffffffffffffffffffffffffffffffff600a5416803b15610cb5575f80916024604051809481937f2e1a7d4d0000000000000000000000000000000000000000000000000000000083528760048401525af18015612e4757612e32575b508580808084875af1612dea613e17565b50156125045773ffffffffffffffffffffffffffffffffffffffff7ffa6ff091ec99bdfd127d51e7786764f2ff7e39f866bbb2a2996e1597052641e49360609382939061299c565b612e3f9196505f90613a22565b5f945f612dd9565b6040513d5f823e3d90fd5b612e6a9060203d602011610c5a57610c4d8183613a22565b612d76565b7f8199f5f3000000000000000000000000000000000000000000000000000000005f5260045ffd5b9092506020813d602011612ec3575b81612eb360209383613a22565b81010312610cb55751915f612d0d565b3d9150612ea6565b612ee39060203d602011610c5a57610c4d8183613a22565b612be7565b612f009060203d602011610c5a57610c4d8183613a22565b612b84565b7f76ecffc0000000000000000000000000000000000000000000000000000000005f5260045ffd5b612f46915060203d602011610d5057610d428183613a22565b5f612b35565b508c90610785565b8091612aa3565b7f1ab7da6b000000000000000000000000000000000000000000000000000000005f5260045ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b9150805f52600460205262ffffff60405f2054169182612a36577f3733548a000000000000000000000000000000000000000000000000000000005f5260045ffd5b7f4e38f95a000000000000000000000000000000000000000000000000000000005f5260045ffd5b7f22c50cbf000000000000000000000000000000000000000000000000000000005f5260045ffd5b7f53e51723000000000000000000000000000000000000000000000000000000005f5260045ffd5b34610cb5577f882f47825d4043cd04a564cad4f524a7fe00a604ae024c23dbc8065b77668b4761309936613ae3565b906130a2613e46565b8160405160208184516130b88183858901613b7e565b81016014815203019020556130d260405192839283613c8a565b0390a1005b34610cb5575f600319360112610cb557602060ff7fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f0330054166040519015158152f35b34610cb5575f600319360112610cb557602073ffffffffffffffffffffffffffffffffffffffff60075416604051908152f35b34610cb5576020600319360112610cb5576004355f526002602052602073ffffffffffffffffffffffffffffffffffffffff60405f205416604051908152f35b34610cb5576020600319360112610cb55760045f73ffffffffffffffffffffffffffffffffffffffff6131bc613b15565b16604051928380927fa0c50b690000000000000000000000000000000000000000000000000000000082525afa908115612e47575f91613319575b5060405181519060208181850193613210818387613b7e565b81016015815203019020549182156132f15773ffffffffffffffffffffffffffffffffffffffff604051602081845161324a818389613b7e565b8101600c81520301902054169182156132c957602061327491604051809381928651928391613b7e565b8101600b815203019020549081156132a157816132948561110094613d05565b9460405195869586613be2565b7fe661aed0000000000000000000000000000000000000000000000000000000005f5260045ffd5b7fd92e233d000000000000000000000000000000000000000000000000000000005f5260045ffd5b7f9502a873000000000000000000000000000000000000000000000000000000005f5260045ffd5b61332d91503d805f833e6111a68183613a22565b816131f7565b34610cb5576020600319360112610cb5576004355f526001602052602073ffffffffffffffffffffffffffffffffffffffff60405f205416604051908152f35b34610cb5576020600319360112610cb55760043567ffffffffffffffff8111610cb5576133aa60206102f981933690600401613a9d565b8101601781520301902054604051908152f35b34610cb5576020600319360112610cb55760043567ffffffffffffffff8111610cb5576133f460206102f981933690600401613a9d565b8101601681520301902054604051908152f35b34610cb5575f600319360112610cb557335f9081527fb7db2dd08fcb62d0c9e08c51941cae53c267786a0b75803fb7960902fc8ef97d602052604090205460ff1615613510577fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f033005460ff8116156134e8577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00167fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f03300557f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa6020604051338152a1005b7f8dfc202b000000000000000000000000000000000000000000000000000000005f5260045ffd5b7f49e27cff000000000000000000000000000000000000000000000000000000005f5260045ffd5b34610cb5576040600319360112610cb557613551613b38565b3373ffffffffffffffffffffffffffffffffffffffff82160361357a5761001a9060043561414d565b7f6697b232000000000000000000000000000000000000000000000000000000005f5260045ffd5b34610cb5576040600319360112610cb55761001a6004356135c1613b38565b906135fa61157a825f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b626800602052600160405f20015490565b61403b565b34610cb5576020600319360112610cb55760206136496004355f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b626800602052600160405f20015490565b604051908152f35b34610cb5577f507273e640affcefbad497278a9b264a65c62c430dd92d24dd0d58595529539c61368036613ae3565b90613689613e46565b81604051602081845161369f8183858901613b7e565b81016016815203019020556130d260405192839283613c8a565b34610cb5575f600319360112610cb557602073ffffffffffffffffffffffffffffffffffffffff600a5416604051908152f35b34610cb5576020600319360112610cb5576004355f525f602052602060405f2054604051908152f35b34610cb5576040600319360112610cb55761372e613b15565b73ffffffffffffffffffffffffffffffffffffffff6024359161374f613e46565b169081156132c95760207f911a025fb070fa2a29c37a3bf4c00d16acf15583cd050f17bdbacbab7e72320391835f52601382528060405f2055604051908152a2005b34610cb5576040600319360112610cb5576137aa613b15565b60243590811515809203610cb557335f9081527fb7db2dd08fcb62d0c9e08c51941cae53c267786a0b75803fb7960902fc8ef97d602052604090205460ff16156135105773ffffffffffffffffffffffffffffffffffffffff165f52600360205260405f209060ff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0083541691161790555f80f35b34610cb5577f2d57170c913282d2886a5ace7e18bed8b1c53a069f2698ae9e048bd501f3af3b61386e36613ae3565b90613877613e46565b81604051602081845161388d8183858901613b7e565b81016017815203019020556130d260405192839283613c8a565b34610cb5577f6a59d469e3757d6e139cdf95b12740f585d553afac49b90bdbe278502a4427186138d636613ae3565b908160405160208184516138ed8183858901613b7e565b8101600b815203019020556130d260405192839283613c8a565b34610cb5576020600319360112610cb5576004357fffffffff000000000000000000000000000000000000000000000000000000008116809103610cb557807f7965db0b000000000000000000000000000000000000000000000000000000006020921490811561397e575b506040519015158152f35b7f01ffc9a70000000000000000000000000000000000000000000000000000000091501482613973565b34610cb5575f600319360112610cb55760209073ffffffffffffffffffffffffffffffffffffffff600854168152f35b610100810190811067ffffffffffffffff8211176139f557604052565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b90601f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0910116810190811067ffffffffffffffff8211176139f557604052565b67ffffffffffffffff81116139f557601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b81601f82011215610cb557803590613ab482613a63565b92613ac26040519485613a22565b82845260208383010111610cb557815f926020809301838601378301015290565b6040600319820112610cb5576004359067ffffffffffffffff8211610cb557613b0e91600401613a9d565b9060243590565b6004359073ffffffffffffffffffffffffffffffffffffffff82168203610cb557565b6024359073ffffffffffffffffffffffffffffffffffffffff82168203610cb557565b6044359073ffffffffffffffffffffffffffffffffffffffff82168203610cb557565b5f5b838110613b8f5750505f910152565b8181015183820152602001613b80565b907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f602093613bdb81518092818752878088019101613b7e565b0116010190565b919260a09373ffffffffffffffffffffffffffffffffffffffff613c219796931684526020840152604083015260608201528160808201520190613b9f565b90565b6024359062ffffff82168203610cb557565b6003196060910112610cb55760043573ffffffffffffffffffffffffffffffffffffffff81168103610cb557906024359060443573ffffffffffffffffffffffffffffffffffffffff81168103610cb55790565b929190613ca1602091604086526040860190613b9f565b930152565b602081830312610cb55780519067ffffffffffffffff8211610cb5570181601f82011215610cb5578051613cd981613a63565b92613ce76040519485613a22565b81845260208284010111610cb557613c219160208085019101613b7e565b81810292918115918404141715612f8357565b90816020910312610cb557518015158103610cb55790565b90602091613db193613d4381848461431f565b5f73ffffffffffffffffffffffffffffffffffffffff6040518097819682957f47e7ef24000000000000000000000000000000000000000000000000000000008452600484016020909392919373ffffffffffffffffffffffffffffffffffffffff60408201951681520152565b0393165af18015612e4757613dc35750565b613ddb9060203d602011610c5a57610c4d8183613a22565b50565b90816020910312610cb5575173ffffffffffffffffffffffffffffffffffffffff81168103610cb55790565b91908201809211612f8357565b3d15613e41573d90613e2882613a63565b91613e366040519384613a22565b82523d5f602084013e565b606090565b335f9081527f06484cc59dc38e4f67c31122333a17ca81b3ca18cdf02bfc298072fa52b0316a602052604090205460ff1615613e7e57565b7fe2517d3f000000000000000000000000000000000000000000000000000000005f52336004527f241ecf16d79d0f8dbfb92cbc07fe17840425976cf0667f022fe9877caa831b0860245260445ffd5b805f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b62680060205260405f2073ffffffffffffffffffffffffffffffffffffffff33165f5260205260ff60405f20541615613f255750565b7fe2517d3f000000000000000000000000000000000000000000000000000000005f523360045260245260445ffd5b73ffffffffffffffffffffffffffffffffffffffff81165f9081527fb7db2dd08fcb62d0c9e08c51941cae53c267786a0b75803fb7960902fc8ef97d602052604090205460ff166140365773ffffffffffffffffffffffffffffffffffffffff165f8181527fb7db2dd08fcb62d0c9e08c51941cae53c267786a0b75803fb7960902fc8ef97d6020526040812080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790553391907f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d8180a4600190565b505f90565b805f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b62680060205260405f2073ffffffffffffffffffffffffffffffffffffffff83165f5260205260ff60405f205416155f1461414757805f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b62680060205260405f2073ffffffffffffffffffffffffffffffffffffffff83165f5260205260405f2060017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082541617905573ffffffffffffffffffffffffffffffffffffffff339216907f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d5f80a4600190565b50505f90565b805f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b62680060205260405f2073ffffffffffffffffffffffffffffffffffffffff83165f5260205260ff60405f2054165f1461414757805f527f02dd7bc7dec4dceedda775e58dd541e08a116c6c53815c0bd028192f7b62680060205260405f2073ffffffffffffffffffffffffffffffffffffffff83165f5260205260405f207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00815416905573ffffffffffffffffffffffffffffffffffffffff339216907ff6391f5c32d9c69d2a47ea670b442974b53935d1edc7fd64eb21e047a839171b5f80a4600190565b60ff7fcd5ed15c6e187e77e9aee88184c21f4f2182ab5827cb3b7e07fbedcd63f03300541661428057565b7fd93c0665000000000000000000000000000000000000000000000000000000005f5260045ffd5b60027f9b779b17422d0df92223018b32b4d1fa46e071723d6817e2486d003becc55f0054146142f75760027f9b779b17422d0df92223018b32b4d1fa46e071723d6817e2486d003becc55f0055565b7f3ee5aeb5000000000000000000000000000000000000000000000000000000005f5260045ffd5b90919073ffffffffffffffffffffffffffffffffffffffff16156132c95773ffffffffffffffffffffffffffffffffffffffff1680156132c95773ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001681149081156143f6575b506143ce57156143a657565b7f1f2a2005000000000000000000000000000000000000000000000000000000005f5260045ffd5b7f82d5d76a000000000000000000000000000000000000000000000000000000005f5260045ffd5b905030145f61439a565b60ff7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a005460401c161561442f57565b7fd7e6bcf8000000000000000000000000000000000000000000000000000000005f5260045ffdfea264697066735822122092328db215d1dba4578f8d74c1f15c59cabbce608e05ddc6ddbf2facfc2530ce64736f6c634300081a0033" const PRC20_CREATION_BYTECODE = "608060405234801561000f575f80fd5b50600436106101a5575f3560e01c806374be2150116100e8578063c701262611610093578063eddeb1231161006e578063eddeb12314610457578063f687d12a1461046a578063f97c007a1461047d578063fc5fecd514610486575f80fd5b8063c7012626146103cb578063d9eeebed146103de578063dd62ed3e14610412575f80fd5b8063b84c8246116100c3578063b84c82461461037e578063c47f002714610391578063c6f1b7e7146103a4575f80fd5b806374be21501461033c57806395d89b4114610363578063a9059cbb1461036b575f80fd5b806323b872dd1161015357806347e7ef241161012e57806347e7ef24146102a1578063609c92b8146102b4578063701cd43b146102e857806370a0823114610307575f80fd5b806323b872dd14610266578063313ce5671461027957806342966c681461028e575f80fd5b8063091d278811610183578063091d278814610224578063095ea7b31461023b57806318160ddd1461025e575f80fd5b8063044d9371146101a957806306fdde03146101fa57806307e2bd8d1461020f575b5f80fd5b6101d07f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b610202610499565b6040516101f1919061143c565b61022261021d366004611479565b610529565b005b61022d60015481565b6040519081526020016101f1565b61024e610249366004611494565b6105ef565b60405190151581526020016101f1565b60065461022d565b61024e6102743660046114be565b6106ae565b60055460405160ff90911681526020016101f1565b61024e61029c3660046114fc565b61079b565b61024e6102af366004611494565b6107ae565b6102db7f000000000000000000000000000000000000000000000000000000000000000081565b6040516101f19190611513565b5f546101d09073ffffffffffffffffffffffffffffffffffffffff1681565b61022d610315366004611479565b73ffffffffffffffffffffffffffffffffffffffff165f9081526007602052604090205490565b61022d7f000000000000000000000000000000000000000000000000000000000000000081565b610202610879565b61024e610379366004611494565b610888565b61022261038c36600461157f565b61089d565b61022261039f36600461157f565b61091c565b6101d07f000000000000000000000000000000000000000000000000000000000000000081565b61024e6103d936600461166f565b610997565b6103e6610af9565b6040805173ffffffffffffffffffffffffffffffffffffffff90931683526020830191909152016101f1565b61022d6104203660046116e1565b73ffffffffffffffffffffffffffffffffffffffff9182165f90815260086020908152604080832093909416825291909152205490565b6102226104653660046114fc565b610d04565b6102226104783660046114fc565b610da8565b61022d60025481565b6103e66104943660046114fc565b610e4c565b6060600380546104a890611718565b80601f01602080910402602001604051908101604052809291908181526020018280546104d490611718565b801561051f5780601f106104f65761010080835404028352916020019161051f565b820191905f5260205f20905b81548152906001019060200180831161050257829003601f168201915b5050505050905090565b73ffffffffffffffffffffffffffffffffffffffff8116610576576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83169081179091556040519081527f412d5a95dc32cbb6bd9319bccf1bc1febeda71e734893a440f1f6853252fe99f906020015b60405180910390a150565b5f73ffffffffffffffffffffffffffffffffffffffff831661063d576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b335f81815260086020908152604080832073ffffffffffffffffffffffffffffffffffffffff881680855290835292819020869055518581529192917f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925910160405180910390a35060015b92915050565b5f6106ba848484611055565b73ffffffffffffffffffffffffffffffffffffffff84165f90815260086020908152604080832033845290915290205482811015610724576040517f10bad14700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff85165f81815260086020908152604080832033808552908352928190208786039081905590519081529192917f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925910160405180910390a3506001949350505050565b5f6107a6338361119c565b506001919050565b5f6107b983836112ed565b6040517fffffffffffffffffffffffffffffffffffffffff0000000000000000000000007f000000000000000000000000000000000000000000000000000000000000000060601b1660208201527f67fc7bdaed5b0ec550d8706b87d60568ab70c6b781263c70101d54cd1564aab390603401604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152908290526108689186908690611769565b60405180910390a150600192915050565b6060600480546104a890611718565b5f610894338484611055565b50600192915050565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461090c576040517f6626eaef00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600461091882826117ef565b5050565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461098b576040517f6626eaef00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600361091882826117ef565b5f805f6109a2610af9565b6040517f23b872dd00000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000081166024830152604482018390529294509092505f918416906323b872dd906064016020604051808303815f875af1158015610a42573d5f803e3d5ffd5b505050506040513d601f19601f82011682018060405250810190610a669190611906565b905080610a9f576040517f0a7cd6d600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610aa9338661119c565b7f9ffbffc04a397460ee1dbe8c9503e098090567d6b7f4b3c02a8617d800b6d9553388888886600254604051610ae496959493929190611925565b60405180910390a15060019695505050505050565b5f80546040517f7471e6970000000000000000000000000000000000000000000000000000000081527f00000000000000000000000000000000000000000000000000000000000000006004820152829173ffffffffffffffffffffffffffffffffffffffff1690637471e69790602401602060405180830381865afa158015610b85573d5f803e3d5ffd5b505050506040513d601f19601f82011682018060405250810190610ba991906119a5565b915073ffffffffffffffffffffffffffffffffffffffff8216610bf8576040517f3d5729c100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f80546040517fd7fd7afb0000000000000000000000000000000000000000000000000000000081527f0000000000000000000000000000000000000000000000000000000000000000600482015273ffffffffffffffffffffffffffffffffffffffff9091169063d7fd7afb90602401602060405180830381865afa158015610c84573d5f803e3d5ffd5b505050506040513d601f19601f82011682018060405250810190610ca891906119c0565b9050805f03610ce3576040517fe661aed000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600254600154610cf39083611a04565b610cfd9190611a1b565b9150509091565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610d73576040517f6626eaef00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60028190556040518181527fef13af88e424b5d15f49c77758542c1938b08b8b95b91ed0751f98ba99000d8f906020016105e4565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610e17576040517f6626eaef00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60018190556040518181527fff5788270f43bfc1ca41c503606d2594aa3023a1a7547de403a3e2f146a4a80a906020016105e4565b5f80546040517f7471e6970000000000000000000000000000000000000000000000000000000081527f00000000000000000000000000000000000000000000000000000000000000006004820152829173ffffffffffffffffffffffffffffffffffffffff1690637471e69790602401602060405180830381865afa158015610ed8573d5f803e3d5ffd5b505050506040513d601f19601f82011682018060405250810190610efc91906119a5565b915073ffffffffffffffffffffffffffffffffffffffff8216610f4b576040517f3d5729c100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f80546040517fd7fd7afb0000000000000000000000000000000000000000000000000000000081527f0000000000000000000000000000000000000000000000000000000000000000600482015273ffffffffffffffffffffffffffffffffffffffff9091169063d7fd7afb90602401602060405180830381865afa158015610fd7573d5f803e3d5ffd5b505050506040513d601f19601f82011682018060405250810190610ffb91906119c0565b9050805f03611036576040517fe661aed000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6002546110438583611a04565b61104d9190611a1b565b915050915091565b73ffffffffffffffffffffffffffffffffffffffff8316158061108c575073ffffffffffffffffffffffffffffffffffffffff8216155b156110c3576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff83165f9081526007602052604090205481811015611122576040517ffe382aa700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8085165f8181526007602052604080822086860390559286168082529083902080548601905591517fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9061118e9086815260200190565b60405180910390a350505050565b73ffffffffffffffffffffffffffffffffffffffff82166111e9576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b805f03611222576040517f1f2a200500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff82165f9081526007602052604090205481811015611281576040517ffe382aa700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff83165f8181526007602090815260408083208686039055600680548790039055518581529192917fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a3505050565b73ffffffffffffffffffffffffffffffffffffffff821661133a576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b805f03611373576040517f1f2a200500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600680548201905573ffffffffffffffffffffffffffffffffffffffff82165f818152600760209081526040808320805486019055518481527fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a35050565b5f81518084525f5b818110156113ff576020818501810151868301820152016113e3565b505f6020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b602081525f61144e60208301846113db565b9392505050565b73ffffffffffffffffffffffffffffffffffffffff81168114611476575f80fd5b50565b5f60208284031215611489575f80fd5b813561144e81611455565b5f80604083850312156114a5575f80fd5b82356114b081611455565b946020939093013593505050565b5f805f606084860312156114d0575f80fd5b83356114db81611455565b925060208401356114eb81611455565b929592945050506040919091013590565b5f6020828403121561150c575f80fd5b5035919050565b602081016003831061154c577f4e487b71000000000000000000000000000000000000000000000000000000005f52602160045260245ffd5b91905290565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f6020828403121561158f575f80fd5b813567ffffffffffffffff8111156115a5575f80fd5b8201601f810184136115b5575f80fd5b803567ffffffffffffffff8111156115cf576115cf611552565b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8501160116810181811067ffffffffffffffff8211171561163b5761163b611552565b604052818152828201602001861015611652575f80fd5b816020840160208301375f91810160200191909152949350505050565b5f805f60408486031215611681575f80fd5b833567ffffffffffffffff811115611697575f80fd5b8401601f810186136116a7575f80fd5b803567ffffffffffffffff8111156116bd575f80fd5b8660208284010111156116ce575f80fd5b6020918201979096509401359392505050565b5f80604083850312156116f2575f80fd5b82356116fd81611455565b9150602083013561170d81611455565b809150509250929050565b600181811c9082168061172c57607f821691505b602082108103611763577f4e487b71000000000000000000000000000000000000000000000000000000005f52602260045260245ffd5b50919050565b606081525f61177b60608301866113db565b73ffffffffffffffffffffffffffffffffffffffff9490941660208301525060400152919050565b601f8211156117ea57805f5260205f20601f840160051c810160208510156117c85750805b601f840160051c820191505b818110156117e7575f81556001016117d4565b50505b505050565b815167ffffffffffffffff81111561180957611809611552565b61181d816118178454611718565b846117a3565b6020601f82116001811461186e575f83156118385750848201515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600385901b1c1916600184901b1784556117e7565b5f848152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08516915b828110156118bb578785015182556020948501946001909201910161189b565b50848210156118f757868401517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600387901b60f8161c191681555b50505050600190811b01905550565b5f60208284031215611916575f80fd5b8151801515811461144e575f80fd5b73ffffffffffffffffffffffffffffffffffffffff8716815260a060208201528460a0820152848660c08301375f60c086830101525f60c07fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8801168301019050846040830152836060830152826080830152979650505050505050565b5f602082840312156119b5575f80fd5b815161144e81611455565b5f602082840312156119d0575f80fd5b5051919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b80820281158282048414176106a8576106a86119d7565b808201808211156106a8576106a86119d756fea26469706673582212206be692aa215f21df823c52c689a11caa03254730bfade7b8b36788d6a72ba61764736f6c634300081a0033" From a8ee6f7945ffc896f1fcb5cbfb5619d429b4f8fd Mon Sep 17 00:00:00 2001 From: Nilesh Gupta Date: Fri, 17 Apr 2026 14:15:27 +0530 Subject: [PATCH 48/61] refactor: added upgrade handler for fund migration fixes --- app/upgrades.go | 2 + .../tss-fund-migration-fixes/upgrade.go | 53 +++++++++++++++++++ .../tss-fund-migration-fixes/upgrade_test.go | 22 ++++++++ 3 files changed, 77 insertions(+) create mode 100644 app/upgrades/tss-fund-migration-fixes/upgrade.go create mode 100644 app/upgrades/tss-fund-migration-fixes/upgrade_test.go diff --git a/app/upgrades.go b/app/upgrades.go index d9e6fc8e..74b033ad 100755 --- a/app/upgrades.go +++ b/app/upgrades.go @@ -10,6 +10,7 @@ import ( aiauditfixes2 "github.com/pushchain/push-chain-node/app/upgrades/ai-audit-fixes-2" purgeexpiredoutbounds "github.com/pushchain/push-chain-node/app/upgrades/purge-expired-outbounds" removeutxverifier "github.com/pushchain/push-chain-node/app/upgrades/remove-utxverifier" + tssfundmigrationfixes "github.com/pushchain/push-chain-node/app/upgrades/tss-fund-migration-fixes" tssmigration "github.com/pushchain/push-chain-node/app/upgrades/tss-migration" ueamigration "github.com/pushchain/push-chain-node/app/upgrades/uea-migration" ceagasandpayload "github.com/pushchain/push-chain-node/app/upgrades/cea-gas-and-payload" @@ -63,6 +64,7 @@ var Upgrades = []upgrades.Upgrade{ tssmigration.NewUpgrade(), purgeexpiredoutbounds.NewUpgrade(), removeutxverifier.NewUpgrade(), + tssfundmigrationfixes.NewUpgrade(), } // RegisterUpgradeHandlers registers the chain upgrade handlers diff --git a/app/upgrades/tss-fund-migration-fixes/upgrade.go b/app/upgrades/tss-fund-migration-fixes/upgrade.go new file mode 100644 index 00000000..4f7ca5c3 --- /dev/null +++ b/app/upgrades/tss-fund-migration-fixes/upgrade.go @@ -0,0 +1,53 @@ +package tssfundmigrationfixes + +import ( + "context" + + storetypes "cosmossdk.io/store/types" + upgradetypes "cosmossdk.io/x/upgrade/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + + "github.com/pushchain/push-chain-node/app/upgrades" +) + +const UpgradeName = "tss-fund-migration-fixes" + +func NewUpgrade() upgrades.Upgrade { + return upgrades.Upgrade{ + UpgradeName: UpgradeName, + CreateUpgradeHandler: CreateUpgradeHandler, + StoreUpgrades: storetypes.StoreUpgrades{ + Added: []string{}, + Deleted: []string{}, + }, + } +} + +// CreateUpgradeHandler runs the utss v3 → v4 migration which backfills +// FundMigration.l1_gas_fee on records stored before the field existed. +// The new gas_limit and l1_gas_fee values used by InitiateFundMigration are +// sourced from UniversalCore's tssFundMigrationGasLimitByChainNamespace and +// l1GasFeeByChainNamespace mappings at call time — no state seeding required. +func CreateUpgradeHandler( + mm upgrades.ModuleManager, + configurator module.Configurator, + ak *upgrades.AppKeepers, +) upgradetypes.UpgradeHandler { + return func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { + sdkCtx := sdk.UnwrapSDKContext(ctx) + logger := sdkCtx.Logger().With("upgrade", UpgradeName) + logger.Info("Starting upgrade handler") + logger.Info("Feature: FundMigration.gas_limit and l1_gas_fee now sourced from UniversalCore per-chain mappings") + + versionMap, err := mm.RunMigrations(ctx, configurator, fromVM) + if err != nil { + logger.Error("RunMigrations failed", "error", err) + return nil, err + } + + logger.Info("Upgrade complete", "upgrade", UpgradeName) + return versionMap, nil + } +} diff --git a/app/upgrades/tss-fund-migration-fixes/upgrade_test.go b/app/upgrades/tss-fund-migration-fixes/upgrade_test.go new file mode 100644 index 00000000..954102b4 --- /dev/null +++ b/app/upgrades/tss-fund-migration-fixes/upgrade_test.go @@ -0,0 +1,22 @@ +package tssfundmigrationfixes_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + tssfundmigrationfixes "github.com/pushchain/push-chain-node/app/upgrades/tss-fund-migration-fixes" +) + +// TestNewUpgrade_Identity verifies the upgrade descriptor carries the expected +// name, wires up a non-nil handler factory, and declares no store additions or +// deletions (the migration is in-place on existing kv keys). +func TestNewUpgrade_Identity(t *testing.T) { + u := tssfundmigrationfixes.NewUpgrade() + + require.Equal(t, "tss-fund-migration-fixes", u.UpgradeName) + require.NotNil(t, u.CreateUpgradeHandler, "upgrade must expose a handler factory") + require.Empty(t, u.StoreUpgrades.Added, "no new KV stores expected") + require.Empty(t, u.StoreUpgrades.Deleted, "no KV stores deleted") + require.Empty(t, u.StoreUpgrades.Renamed, "no KV stores renamed") +} From b5b9e64c53a8b01e550821d01b632a75debaed08 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Sun, 19 Apr 2026 15:20:57 +0530 Subject: [PATCH 49/61] Bug fixes and README instructions updated --- e2e-tests/README.md | 8 ++++++++ e2e-tests/deploy_addresses.json | 2 +- e2e-tests/setup.sh | 35 +++++++++++++++++++++++++++++++-- 3 files changed, 42 insertions(+), 3 deletions(-) diff --git a/e2e-tests/README.md b/e2e-tests/README.md index 8688f5f3..c5a10f1c 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -351,6 +351,14 @@ Manual helpers: --- +## Adding a new token to the setup + +To register a new synthetic token in the local bootstrap, edit `../push-chain-core-contracts/scripts/localSetup/setup.s.sol` and add the token there. The `all` pipeline will deploy it and automatically create a WPC ↔ token liquidity pool as part of `create-pool`. + +Note: this only handles pools paired with WPC. If you need a pool between two non-WPC tokens, additional adjustments are required (extra pool-creation logic in the swap setup and matching entries in the token/uregistry configs). + +--- + ## Auto-retry and resilience behavior ### Forge scripts (core, gateway, configureUniversalCore) diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index 736b4f2f..2964c011 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -1,5 +1,5 @@ { - "generatedAt": "2026-04-09T11:40:20Z", + "generatedAt": "2026-04-15T09:32:52Z", "contracts": { "WPC": "0xB5B1e1ADc1b8fc1066975aa09f9371a5f67C54F5", "Factory": "0x140b9f84fCbccB4129AC6F32b1243ea808d18261", diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 32079e78..eb1244db 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -699,6 +699,12 @@ sdk_prepare_test_files_for_localnet() { done < <(find "$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/evm/outbound" -type f -name '*.spec.ts' | sort) } +step_clone_push_chain_sdk() { + require_cmd git + clone_or_update_repo "$PUSH_CHAIN_SDK_REPO" "$PUSH_CHAIN_SDK_BRANCH" "$PUSH_CHAIN_SDK_DIR" + log_ok "push-chain-sdk ready at $PUSH_CHAIN_SDK_DIR" +} + step_setup_push_chain_sdk() { require_cmd git yarn npm cast jq perl @@ -706,7 +712,11 @@ step_setup_push_chain_sdk() { local sdk_account_file="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_ACCOUNT_TS_PATH" local uea_impl_raw uea_impl synced_localnet_uea - clone_or_update_repo "$PUSH_CHAIN_SDK_REPO" "$PUSH_CHAIN_SDK_BRANCH" "$PUSH_CHAIN_SDK_DIR" + if [[ ! -d "$PUSH_CHAIN_SDK_DIR/.git" ]]; then + log_err "SDK repo not found at $PUSH_CHAIN_SDK_DIR" + log_err "Run: $0 clone-sdk (or 'setup all' which clones it automatically)" + exit 1 + fi local sdk_env_path="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_CORE_ENV_PATH" local sdk_evm_private_key sdk_evm_rpc sdk_solana_rpc sdk_solana_private_key sdk_push_private_key @@ -778,12 +788,30 @@ step_setup_push_chain_sdk() { ' "$sdk_account_file" log_ok "Replaced CHAIN.PUSH_TESTNET_DONUT with CHAIN.PUSH_LOCALNET only in convertExecutorToOriginAccount() in $sdk_account_file" + local sdk_e2e_root="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__" + if [[ -d "$sdk_e2e_root" ]]; then + log_info "Replacing TESTNET/TESTNET_DONUT with LOCALNET across all SDK __e2e__ test files" + local patched_count=0 + while IFS= read -r -d '' e2e_file; do + perl -0pi -e ' + s/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; + s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; + s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g; + ' "$e2e_file" + patched_count=$((patched_count + 1)) + done < <(find "$sdk_e2e_root" -type f \( -name '*.ts' -o -name '*.tsx' \) -print0) + log_ok "Applied LOCALNET replacement to $patched_count file(s) under $sdk_e2e_root" + else + log_warn "SDK __e2e__ directory not found at $sdk_e2e_root; skipping TESTNET→LOCALNET replacement" + fi + log_info "Installing push-chain-sdk dependencies" ( cd "$PUSH_CHAIN_SDK_DIR" yarn install npm install npm i --save-dev @types/bs58 + npm i tweetnacl ) log_ok "push-chain-sdk setup complete" @@ -2834,6 +2862,7 @@ cmd_all() { step_update_eth_token_config step_setup_gateway step_add_uregistry_configs + step_clone_push_chain_sdk step_deploy_counter_and_sync_sdk sdk_sync_localnet_constants step_sync_vault_tss_on_anvil @@ -2862,7 +2891,8 @@ Commands: sync-vault-tss Grant TSS_ROLE on each Anvil EVM vault to the current local TSS key (LOCAL only) bootstrap-cea-sdk Ensure CEA is deployed for SDK signer on BSC testnet fork (Route 2 bootstrap) deploy-counter-sdk Deploy CounterPayable on Push localnet and sync SDK COUNTER_ADDRESS_PAYABLE - setup-sdk Clone/setup push-chain-sdk, generate SDK .env from e2e .env, and install dependencies + clone-sdk Clone/update push-chain-sdk repo only (no env/deps setup) + setup-sdk Setup push-chain-sdk (requires clone-sdk first): generate .env, replace TESTNET→LOCALNET in __e2e__ files, install deps sdk-test-all Replace PUSH_NETWORK TESTNET variants with LOCALNET and run all configured SDK E2E tests sdk-test-outbound-all Replace PUSH_NETWORK TESTNET variants with LOCALNET and run all configured SDK outbound E2E tests (TESTING_ENV=LOCAL) sdk-test-pctx-last-transaction Run pctx-last-transaction.spec.ts @@ -2920,6 +2950,7 @@ main() { sync-vault-tss) step_sync_vault_tss_on_anvil ;; bootstrap-cea-sdk) step_bootstrap_cea_for_sdk_signer ;; deploy-counter-sdk) step_deploy_counter_and_sync_sdk ;; + clone-sdk) step_clone_push_chain_sdk ;; setup-sdk) step_setup_push_chain_sdk ;; sdk-test-all) step_run_sdk_tests_all ;; sdk-test-outbound-all) step_run_sdk_outbound_tests_all ;; From fb9d33f27392b8da43c1b0ef092e903f7672c660 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Sun, 19 Apr 2026 15:32:24 +0530 Subject: [PATCH 50/61] quick-testing outbound feature added --- e2e-tests/README.md | 12 ++++++++++ e2e-tests/setup.sh | 54 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/e2e-tests/README.md b/e2e-tests/README.md index c5a10f1c..e8346d06 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -221,6 +221,17 @@ TESTING_ENV=LOCAL bash e2e-tests/setup.sh bootstrap-cea-sdk TESTING_ENV=LOCAL bash e2e-tests/setup.sh sdk-test-cea-to-eoa ``` +### Quick outbound smoke test + +For the fastest outbound sanity check after a fresh bootstrap, chain `all` with `quick-testing-outbound`: + +```bash +TESTING_ENV=LOCAL bash e2e-tests/setup.sh all +TESTING_ENV=LOCAL bash e2e-tests/setup.sh quick-testing-outbound +``` + +`quick-testing-outbound` internally runs `setup-sdk`, then `fund-uea-prc20`, and finally executes just the two most important outbound specs — `cea-to-eoa.spec.ts` and `cea-to-uea.spec.ts` — so you get end-to-end outbound coverage without running the full outbound suite. + --- ## Local devnet (`local-native/devnet`) @@ -305,6 +316,7 @@ TESTING_ENV=LOCAL bash e2e-tests/setup.sh | `setup-sdk` | Clone/install SDK, generate SDK `.env`, sync LOCALNET constants | | `sdk-test-all` | Run all configured inbound SDK E2E test files | | `sdk-test-outbound-all` | Run all configured outbound SDK E2E test files (LOCAL only) | +| `quick-testing-outbound` | Run `setup-sdk` + `fund-uea-prc20`, then only `cea-to-eoa.spec.ts` and `cea-to-uea.spec.ts` (fast outbound smoke test) | | `sdk-test-pctx-last-transaction` | Run `pctx-last-transaction.spec.ts` | | `sdk-test-send-to-self` | Run `send-to-self.spec.ts` | | `sdk-test-progress-hook` | Run `progress-hook-per-tx.spec.ts` | diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index eb1244db..efaa32e5 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -950,6 +950,58 @@ step_run_sdk_outbound_tests_all() { log_ok "Completed all configured SDK outbound E2E tests" } +step_run_sdk_quick_testing_outbound() { + local outbound_dir="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/evm/outbound" + local quick_files=( + "cea-to-eoa.spec.ts" + "cea-to-uea.spec.ts" + ) + local evm_client_file="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/shared/evm-client.ts" + local utils_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/utils.ts" + local tokens_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/constants/tokens.ts" + local file full_path + + step_setup_push_chain_sdk + step_fund_uea_prc20 + + sdk_sync_localnet_constants + + for file in "${quick_files[@]}"; do + full_path="$outbound_dir/$file" + if [[ ! -f "$full_path" ]]; then + log_err "SDK outbound test file not found: $full_path" + exit 1 + fi + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g' "$full_path" + log_ok "Prepared LOCALNET network replacement in $file" + done + + if [[ -f "$evm_client_file" ]]; then + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g' "$evm_client_file" + log_ok "Patched evm-client.ts default network to PUSH_NETWORK.LOCALNET" + fi + if [[ -f "$utils_file" ]]; then + perl -0pi -e 's/(const network = options\?\.network \?\?)\s*PUSH_NETWORK\.TESTNET_DONUT/$1 PUSH_NETWORK.LOCALNET/' "$utils_file" + log_ok "Patched utils.ts getPRC20Address default network to PUSH_NETWORK.LOCALNET" + fi + if [[ -f "$tokens_file" ]]; then + perl -0pi -e 's/(const s = SYNTHETIC_PUSH_ERC20\[)PUSH_NETWORK\.TESTNET_DONUT(\])/$1PUSH_NETWORK.LOCALNET$2/' "$tokens_file" + log_ok "Patched tokens.ts buildPushChainMoveableTokenAccessor default network to PUSH_NETWORK.LOCALNET" + fi + + for file in "${quick_files[@]}"; do + full_path="$outbound_dir/$file" + log_info "Running SDK outbound test: $file" + local rel_pattern="${full_path##*/packages/core/}" + ( + cd "$PUSH_CHAIN_SDK_DIR" + npx nx test core --runInBand --testPathPattern="$rel_pattern" + ) + done + + log_ok "Completed quick-testing-outbound SDK E2E tests" +} + step_devnet() { require_cmd bash jq @@ -2895,6 +2947,7 @@ Commands: setup-sdk Setup push-chain-sdk (requires clone-sdk first): generate .env, replace TESTNET→LOCALNET in __e2e__ files, install deps sdk-test-all Replace PUSH_NETWORK TESTNET variants with LOCALNET and run all configured SDK E2E tests sdk-test-outbound-all Replace PUSH_NETWORK TESTNET variants with LOCALNET and run all configured SDK outbound E2E tests (TESTING_ENV=LOCAL) + quick-testing-outbound Run setup-sdk + fund-uea-prc20, then execute cea-to-eoa.spec.ts and cea-to-uea.spec.ts only sdk-test-pctx-last-transaction Run pctx-last-transaction.spec.ts sdk-test-send-to-self Run send-to-self.spec.ts sdk-test-progress-hook Run progress-hook-per-tx.spec.ts @@ -2954,6 +3007,7 @@ main() { setup-sdk) step_setup_push_chain_sdk ;; sdk-test-all) step_run_sdk_tests_all ;; sdk-test-outbound-all) step_run_sdk_outbound_tests_all ;; + quick-testing-outbound) step_run_sdk_quick_testing_outbound ;; sdk-test-pctx-last-transaction) step_run_sdk_test_file "pctx-last-transaction.spec.ts" ;; sdk-test-send-to-self) step_run_sdk_test_file "send-to-self.spec.ts" ;; sdk-test-progress-hook) step_run_sdk_test_file "progress-hook-per-tx.spec.ts" ;; From 425c7ae138cc47342cffc2c30e32a125f1b0e883 Mon Sep 17 00:00:00 2001 From: aman035 Date: Mon, 20 Apr 2026 17:10:36 +0530 Subject: [PATCH 51/61] fix: event pasring with l1gasFee --- universalClient/chains/push/event_parser.go | 1 + universalClient/chains/push/event_parser_test.go | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/universalClient/chains/push/event_parser.go b/universalClient/chains/push/event_parser.go index 87e59110..d82d60bb 100644 --- a/universalClient/chains/push/event_parser.go +++ b/universalClient/chains/push/event_parser.go @@ -81,6 +81,7 @@ func convertFundMigrationEvent(migration *utsstypes.FundMigration) (*store.Event BlockHeight: migration.InitiatedBlock, GasPrice: migration.GasPrice, GasLimit: migration.GasLimit, + L1GasFee: migration.L1GasFee, }) if err != nil { return nil, fmt.Errorf("failed to marshal fund migration event data: %w", err) diff --git a/universalClient/chains/push/event_parser_test.go b/universalClient/chains/push/event_parser_test.go index 35c262df..31168157 100644 --- a/universalClient/chains/push/event_parser_test.go +++ b/universalClient/chains/push/event_parser_test.go @@ -300,6 +300,9 @@ func TestConvertFundMigrationEvent(t *testing.T) { CurrentTssPubkey: "0x03def456", Chain: "eip155:421614", InitiatedBlock: 5000, + GasPrice: "1000000000", + GasLimit: 21100, + L1GasFee: "42", } result, err := convertFundMigrationEvent(migration) @@ -322,6 +325,9 @@ func TestConvertFundMigrationEvent(t *testing.T) { assert.Equal(t, "0x03def456", data.CurrentTssPubkey) assert.Equal(t, "eip155:421614", data.Chain) assert.Equal(t, int64(5000), data.BlockHeight) + assert.Equal(t, "1000000000", data.GasPrice) + assert.Equal(t, uint64(21100), data.GasLimit) + assert.Equal(t, "42", data.L1GasFee, "L1 gas fee must be forwarded to downstream consumers") }) t.Run("event ID is hash of type and migration ID", func(t *testing.T) { From bced41f6d1d998b29e23d2516501913f19327976 Mon Sep 17 00:00:00 2001 From: aman035 Date: Mon, 20 Apr 2026 17:14:07 +0530 Subject: [PATCH 52/61] fix: max transfer calculation --- universalClient/chains/common/types.go | 1 + universalClient/chains/evm/tx_builder.go | 55 +++++++++-- universalClient/chains/evm/tx_builder_test.go | 94 +++++++++++++++++++ 3 files changed, 141 insertions(+), 9 deletions(-) diff --git a/universalClient/chains/common/types.go b/universalClient/chains/common/types.go index 24b9591e..727a21d2 100644 --- a/universalClient/chains/common/types.go +++ b/universalClient/chains/common/types.go @@ -30,6 +30,7 @@ type FundMigrationData struct { To string // New TSS address (derived from current pubkey) GasPrice *big.Int // Gas price from the migration event GasLimit uint64 // Gas limit from the migration event + L1GasFee *big.Int // Extra L1 data-availability fee (wei); 0 for non-L2 chains } // UnsignedSigningReq contains the request for signing an outbound transaction diff --git a/universalClient/chains/evm/tx_builder.go b/universalClient/chains/evm/tx_builder.go index 7976f9af..55212db2 100644 --- a/universalClient/chains/evm/tx_builder.go +++ b/universalClient/chains/evm/tx_builder.go @@ -471,9 +471,10 @@ func (tb *TxBuilder) GetGasFeeUsed(ctx context.Context, txHash string) (string, } // GetFundMigrationSigningRequest builds a native token transfer for fund migration, -// transferring the maximum possible balance (balance minus gas cost). +// transferring the maximum possible balance (balance minus gas cost minus L1 fee). // Fund migration only triggers when outbound is disabled and no pending outbounds remain, // so the balance at signing time will equal the balance at broadcast time. +// L1GasFee covers OP-stack sequencer data-availability charges; 0 for non-L2 chains. func (tb *TxBuilder) GetFundMigrationSigningRequest(ctx context.Context, data *common.FundMigrationData, nonce uint64) (*common.UnsignedSigningReq, error) { fromAddr := ethcommon.HexToAddress(data.From) toAddr := ethcommon.HexToAddress(data.To) @@ -481,16 +482,18 @@ func (tb *TxBuilder) GetFundMigrationSigningRequest(ctx context.Context, data *c if data.GasPrice == nil || data.GasPrice.Sign() == 0 { return nil, fmt.Errorf("gas price must be provided for fund migration") } + if data.GasLimit == 0 { + return nil, fmt.Errorf("gas limit must be provided for fund migration") + } balance, err := tb.rpcClient.GetBalance(ctx, fromAddr) if err != nil { return nil, fmt.Errorf("failed to get balance of %s: %w", data.From, err) } - gasCost := new(big.Int).Mul(data.GasPrice, new(big.Int).SetUint64(data.GasLimit)) - maxTransfer := new(big.Int).Sub(balance, gasCost) - if maxTransfer.Sign() <= 0 { - return nil, fmt.Errorf("insufficient balance for gas: balance=%s gasCost=%s", balance.String(), gasCost.String()) + maxTransfer, err := computeFundMigrationTransfer(balance, data.GasPrice, data.GasLimit, data.L1GasFee) + if err != nil { + return nil, err } tb.logger.Info(). @@ -499,6 +502,7 @@ func (tb *TxBuilder) GetFundMigrationSigningRequest(ctx context.Context, data *c Str("balance", balance.String()). Str("gas_price", data.GasPrice.String()). Uint64("gas_limit", data.GasLimit). + Str("l1_gas_fee", l1GasFeeString(data.L1GasFee)). Str("transfer_amount", maxTransfer.String()). Msg("building fund migration tx") @@ -521,6 +525,9 @@ func (tb *TxBuilder) GetFundMigrationSigningRequest(ctx context.Context, data *c } // BroadcastFundMigrationTx assembles and broadcasts a signed fund migration transaction. +// The sweep amount must be recomputed here using the same formula as signing +// (balance - gasPrice*gasLimit - l1GasFee); otherwise the broadcast tx hash +// diverges from the signed hash. func (tb *TxBuilder) BroadcastFundMigrationTx(ctx context.Context, req *common.UnsignedSigningReq, data *common.FundMigrationData, signature []byte) (string, error) { if len(signature) != 65 { return "", fmt.Errorf("signature must be 65 bytes [r(32)|s(32)|v(1)], got %d", len(signature)) @@ -529,6 +536,9 @@ func (tb *TxBuilder) BroadcastFundMigrationTx(ctx context.Context, req *common.U if data.GasPrice == nil || data.GasPrice.Sign() == 0 { return "", fmt.Errorf("gas price must be provided for fund migration") } + if data.GasLimit == 0 { + return "", fmt.Errorf("gas limit must be provided for fund migration") + } fromAddr := ethcommon.HexToAddress(data.From) toAddr := ethcommon.HexToAddress(data.To) @@ -538,10 +548,9 @@ func (tb *TxBuilder) BroadcastFundMigrationTx(ctx context.Context, req *common.U return "", fmt.Errorf("failed to get balance of %s: %w", data.From, err) } - gasCost := new(big.Int).Mul(data.GasPrice, new(big.Int).SetUint64(data.GasLimit)) - maxTransfer := new(big.Int).Sub(balance, gasCost) - if maxTransfer.Sign() <= 0 { - return "", fmt.Errorf("insufficient balance for gas during broadcast") + maxTransfer, err := computeFundMigrationTransfer(balance, data.GasPrice, data.GasLimit, data.L1GasFee) + if err != nil { + return "", err } tx := types.NewTransaction( @@ -574,3 +583,31 @@ func (tb *TxBuilder) BroadcastFundMigrationTx(ctx context.Context, req *common.U return txHashStr, nil } + +// computeFundMigrationTransfer returns the native amount to sweep from the old +// TSS address to the new one: balance - (gasPrice * gasLimit) - l1GasFee. +// The l1GasFee covers OP-stack sequencer data-availability charges (0 for +// non-L2 chains). All validators must compute the same value — any drift +// here breaks the TSS signing hash. +func computeFundMigrationTransfer(balance, gasPrice *big.Int, gasLimit uint64, l1GasFee *big.Int) (*big.Int, error) { + gasCost := new(big.Int).Mul(gasPrice, new(big.Int).SetUint64(gasLimit)) + totalFee := new(big.Int).Set(gasCost) + if l1GasFee != nil && l1GasFee.Sign() > 0 { + totalFee.Add(totalFee, l1GasFee) + } + maxTransfer := new(big.Int).Sub(balance, totalFee) + if maxTransfer.Sign() <= 0 { + return nil, fmt.Errorf("insufficient balance for gas: balance=%s gasCost=%s l1GasFee=%s", + balance.String(), gasCost.String(), l1GasFeeString(l1GasFee)) + } + return maxTransfer, nil +} + +// l1GasFeeString returns a stable decimal representation of the L1 gas fee +// for logging / error messages, treating nil as "0". +func l1GasFeeString(v *big.Int) string { + if v == nil { + return "0" + } + return v.String() +} diff --git a/universalClient/chains/evm/tx_builder_test.go b/universalClient/chains/evm/tx_builder_test.go index 4c5bd799..6145856d 100644 --- a/universalClient/chains/evm/tx_builder_test.go +++ b/universalClient/chains/evm/tx_builder_test.go @@ -1049,3 +1049,97 @@ func TestNewTxBuilderZeroGatewayAddress(t *testing.T) { assert.Nil(t, tb) assert.Contains(t, err.Error(), "invalid gateway address") } + +// --------------------------------------------------------------------------- +// Fund migration transfer math +// --------------------------------------------------------------------------- + +// TestComputeFundMigrationTransfer covers the sweep-amount formula +// balance - (gasPrice * gasLimit) - l1GasFee for both L1 and L2-style chains. +// All validators must compute the same value — any drift breaks the TSS hash. +func TestComputeFundMigrationTransfer(t *testing.T) { + t.Run("no L1 fee (mainnet-style) nil", func(t *testing.T) { + // balance 1 ETH, gasPrice 20 gwei, gasLimit 21000 → gasCost = 420000 gwei + balance := new(big.Int).SetUint64(1_000_000_000_000_000_000) + gasPrice := new(big.Int).SetUint64(20_000_000_000) + got, err := computeFundMigrationTransfer(balance, gasPrice, 21000, nil) + require.NoError(t, err) + want := new(big.Int).Sub(balance, new(big.Int).Mul(gasPrice, big.NewInt(21000))) + assert.Equal(t, want.String(), got.String()) + }) + + t.Run("zero L1 fee (mainnet-style) treated as zero", func(t *testing.T) { + balance := new(big.Int).SetUint64(1_000_000_000_000_000_000) + gasPrice := new(big.Int).SetUint64(20_000_000_000) + got, err := computeFundMigrationTransfer(balance, gasPrice, 21000, big.NewInt(0)) + require.NoError(t, err) + want := new(big.Int).Sub(balance, new(big.Int).Mul(gasPrice, big.NewInt(21000))) + assert.Equal(t, want.String(), got.String()) + }) + + t.Run("non-zero L1 fee (OP-stack) is subtracted on top of L2 gas cost", func(t *testing.T) { + // 1 ETH balance, L2 gasCost=420000 gwei, L1 data-availability fee=150 gwei + balance := new(big.Int).SetUint64(1_000_000_000_000_000_000) + gasPrice := new(big.Int).SetUint64(20_000_000_000) + l1Fee := new(big.Int).SetUint64(150_000_000_000) + got, err := computeFundMigrationTransfer(balance, gasPrice, 21000, l1Fee) + require.NoError(t, err) + gasCost := new(big.Int).Mul(gasPrice, big.NewInt(21000)) + want := new(big.Int).Sub(balance, new(big.Int).Add(gasCost, l1Fee)) + assert.Equal(t, want.String(), got.String()) + }) + + t.Run("balance exactly equals total fee → insufficient", func(t *testing.T) { + gasPrice := new(big.Int).SetUint64(20_000_000_000) + l1Fee := big.NewInt(100) + gasCost := new(big.Int).Mul(gasPrice, big.NewInt(21000)) + balance := new(big.Int).Add(gasCost, l1Fee) + _, err := computeFundMigrationTransfer(balance, gasPrice, 21000, l1Fee) + require.Error(t, err) + assert.Contains(t, err.Error(), "insufficient balance") + }) + + t.Run("L1 fee tips balance into insufficient", func(t *testing.T) { + // Without L1 fee, balance covers gas and leaves 100 wei. With L1 fee of 200, it's insufficient. + gasPrice := new(big.Int).SetUint64(20_000_000_000) + gasCost := new(big.Int).Mul(gasPrice, big.NewInt(21000)) + balance := new(big.Int).Add(gasCost, big.NewInt(100)) + _, err := computeFundMigrationTransfer(balance, gasPrice, 21000, big.NewInt(200)) + require.Error(t, err) + assert.Contains(t, err.Error(), "insufficient balance") + }) + + t.Run("deterministic across equivalent l1 fee representations", func(t *testing.T) { + // big.NewInt(0) and nil must produce identical results — the TSS signing + // hash depends on it. + balance := new(big.Int).SetUint64(500_000_000_000_000_000) + gasPrice := new(big.Int).SetUint64(15_000_000_000) + withNil, err := computeFundMigrationTransfer(balance, gasPrice, 21000, nil) + require.NoError(t, err) + withZero, err := computeFundMigrationTransfer(balance, gasPrice, 21000, big.NewInt(0)) + require.NoError(t, err) + assert.Equal(t, withNil.String(), withZero.String()) + }) +} + +func TestL1GasFeeString(t *testing.T) { + assert.Equal(t, "0", l1GasFeeString(nil)) + assert.Equal(t, "0", l1GasFeeString(big.NewInt(0))) + assert.Equal(t, "12345", l1GasFeeString(big.NewInt(12345))) +} + +// TestGetFundMigrationSigningRequest_RejectsZeroGasLimit verifies that a +// missing / zero gasLimit on the event (which would otherwise encode to 0) +// is rejected before any RPC call — deterministic failure, not a broken tx. +func TestGetFundMigrationSigningRequest_RejectsZeroGasLimit(t *testing.T) { + tb := newTestTxBuilder(t) + data := &common.FundMigrationData{ + From: "0x1111111111111111111111111111111111111111", + To: "0x2222222222222222222222222222222222222222", + GasPrice: big.NewInt(20_000_000_000), + GasLimit: 0, + } + _, err := tb.GetFundMigrationSigningRequest(context.Background(), data, 0) + require.Error(t, err) + assert.Contains(t, err.Error(), "gas limit must be provided") +} From 5f7491dc8ef42ada2bcf514bdaeda219418009b1 Mon Sep 17 00:00:00 2001 From: aman035 Date: Mon, 20 Apr 2026 17:29:43 +0530 Subject: [PATCH 53/61] fix: coordinator & sessionManager --- universalClient/tss/coordinator/coordinator.go | 5 +++++ universalClient/tss/sessionmanager/sessionmanager.go | 9 +++++++-- .../tss/sessionmanager/sessionmanager_test.go | 7 +++++-- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/universalClient/tss/coordinator/coordinator.go b/universalClient/tss/coordinator/coordinator.go index f48239b3..dc9770ad 100644 --- a/universalClient/tss/coordinator/coordinator.go +++ b/universalClient/tss/coordinator/coordinator.go @@ -723,14 +723,19 @@ func (c *Coordinator) createFundMigrationSignSetup(ctx context.Context, eventDat if assignedNonce == nil { return nil, nil, fmt.Errorf("assigned nonce is required for fund migration transaction") } + gasPrice := new(big.Int) gasPrice.SetString(migrationData.GasPrice, 10) + l1GasFee := new(big.Int) + l1GasFee.SetString(migrationData.L1GasFee, 10) + migrationFundData := &common.FundMigrationData{ From: oldTSSAddr, To: currentTSSAddr, GasPrice: gasPrice, GasLimit: migrationData.GasLimit, + L1GasFee: l1GasFee, } signingReq, err := builder.GetFundMigrationSigningRequest(ctx, migrationFundData, *assignedNonce) if err != nil { diff --git a/universalClient/tss/sessionmanager/sessionmanager.go b/universalClient/tss/sessionmanager/sessionmanager.go index c0c896de..a728fafd 100644 --- a/universalClient/tss/sessionmanager/sessionmanager.go +++ b/universalClient/tss/sessionmanager/sessionmanager.go @@ -8,7 +8,6 @@ import ( "encoding/json" "fmt" "math/big" - "sync" "time" @@ -891,15 +890,21 @@ func (sm *SessionManager) verifyFundMigrationSigningRequest(ctx context.Context, req.Nonce, finalizedNonce, oldTSSAddr) } - // Rebuild fund migration signing request with coordinator's nonce + // Rebuild fund migration signing request with coordinator's nonce. + // Parsing must match what the coordinator did; otherwise the reconstructed + // hash on OP-stack chains diverges and the verification below rejects it. gasPrice := new(big.Int) gasPrice.SetString(migrationData.GasPrice, 10) + l1GasFee := new(big.Int) + l1GasFee.SetString(migrationData.L1GasFee, 10) + migrationFundData := &common.FundMigrationData{ From: oldTSSAddr, To: currentTSSAddr, GasPrice: gasPrice, GasLimit: migrationData.GasLimit, + L1GasFee: l1GasFee, } signingReq, err := builder.GetFundMigrationSigningRequest(ctx, migrationFundData, req.Nonce) if err != nil { diff --git a/universalClient/tss/sessionmanager/sessionmanager_test.go b/universalClient/tss/sessionmanager/sessionmanager_test.go index cdd5b1b3..242e8e7c 100644 --- a/universalClient/tss/sessionmanager/sessionmanager_test.go +++ b/universalClient/tss/sessionmanager/sessionmanager_test.go @@ -592,12 +592,15 @@ func TestVerifyFundMigrationSigningRequest_Validation(t *testing.T) { // Use the well-known secp256k1 generator point (valid compressed pubkey) genPoint := "0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798" + // Include L1GasFee to assert the new proto field survives JSON roundtrip + // through the event data on its way into verifyFundMigrationSigningRequest. migrationData := utsstypes.FundMigrationInitiatedEventData{ OldTssPubkey: genPoint, CurrentTssPubkey: genPoint, - Chain: "eip155:1", + Chain: "eip155:10", GasPrice: "1000000000", - GasLimit: 21000, + GasLimit: 21100, + L1GasFee: "150", } eventDataBytes, _ := json.Marshal(migrationData) event := &store.Event{ From 484b3b4e0c562a95236afe303dd88e58f2f9fd3b Mon Sep 17 00:00:00 2001 From: aman035 Date: Mon, 20 Apr 2026 17:30:37 +0530 Subject: [PATCH 54/61] fix: broadcast fund migration --- .../tss/txbroadcaster/broadcaster_test.go | 17 +++++++++++++++-- universalClient/tss/txbroadcaster/evm.go | 4 ++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/universalClient/tss/txbroadcaster/broadcaster_test.go b/universalClient/tss/txbroadcaster/broadcaster_test.go index 6725f154..060333ff 100644 --- a/universalClient/tss/txbroadcaster/broadcaster_test.go +++ b/universalClient/tss/txbroadcaster/broadcaster_test.go @@ -463,7 +463,8 @@ func makeSignedFundMigrationData(t *testing.T, chainID string, nonce uint64) []b CurrentTssPubkey: testNewTSSPubkey, Chain: chainID, GasPrice: "1000000000", - GasLimit: 21000, + GasLimit: 21100, + L1GasFee: "150", }, SigningData: &SigningData{ Signature: sig, @@ -498,7 +499,18 @@ func TestFundMigrationEVM_BroadcastSuccess(t *testing.T) { insertSignedFundMigrationEvent(t, db, "fm-1", "eip155:1", 0) - builder.On("BroadcastFundMigrationTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + // Assert the broadcaster forwards gas-limit and L1 gas fee from the signed + // event payload into FundMigrationData; otherwise sweep math diverges + // from what the signer hashed. + builder.On("BroadcastFundMigrationTx", + mock.Anything, + mock.Anything, + mock.MatchedBy(func(d *common.FundMigrationData) bool { + return d.GasLimit == 21100 && + d.L1GasFee != nil && d.L1GasFee.String() == "150" && + d.GasPrice != nil && d.GasPrice.String() == "1000000000" + }), + mock.Anything). Return("0xmigrate123", nil) b := newBroadcaster(evtStore, ch, "") @@ -507,6 +519,7 @@ func TestFundMigrationEVM_BroadcastSuccess(t *testing.T) { ev := getEvent(t, db, "fm-1") require.Equal(t, store.StatusBroadcasted, ev.Status) require.Equal(t, "eip155:1:0xmigrate123", ev.BroadcastedTxHash) + builder.AssertExpectations(t) } func TestFundMigrationEVM_BroadcastFails_NonceConsumed(t *testing.T) { diff --git a/universalClient/tss/txbroadcaster/evm.go b/universalClient/tss/txbroadcaster/evm.go index e1aac19e..9279608f 100644 --- a/universalClient/tss/txbroadcaster/evm.go +++ b/universalClient/tss/txbroadcaster/evm.go @@ -102,11 +102,15 @@ func (b *Broadcaster) broadcastFundMigrationEVM(ctx context.Context, event *stor gasPrice := new(big.Int) gasPrice.SetString(data.GasPrice, 10) + l1GasFee := new(big.Int) + l1GasFee.SetString(data.L1GasFee, 10) + migrationData := &common.FundMigrationData{ From: oldTSSAddr, To: currentTSSAddr, GasPrice: gasPrice, GasLimit: data.GasLimit, + L1GasFee: l1GasFee, } txHash, broadcastErr := builder.BroadcastFundMigrationTx(ctx, signingReq, migrationData, signature) From e9c6867575cb5efc924bbf0adcb2db4eb29adeab Mon Sep 17 00:00:00 2001 From: Aman Gupta Date: Fri, 24 Apr 2026 17:13:44 +0530 Subject: [PATCH 55/61] fix: Fund migration vote (#209) * add: fund to transfer is req * fix: sessionManager * add: sessionManager stores the fund migration amount too * add: pass fund migration for broadcasting --- universalClient/chains/common/types.go | 7 ++- universalClient/chains/evm/tx_builder.go | 24 ++++----- .../tss/sessionmanager/sessionmanager.go | 29 ++++++++--- .../tss/sessionmanager/sessionmanager_test.go | 44 ++++++++++++++-- .../tss/txbroadcaster/broadcaster.go | 13 +++-- .../tss/txbroadcaster/broadcaster_test.go | 52 +++++++++++++++++-- 6 files changed, 137 insertions(+), 32 deletions(-) diff --git a/universalClient/chains/common/types.go b/universalClient/chains/common/types.go index 727a21d2..d0526d59 100644 --- a/universalClient/chains/common/types.go +++ b/universalClient/chains/common/types.go @@ -33,10 +33,15 @@ type FundMigrationData struct { L1GasFee *big.Int // Extra L1 data-availability fee (wei); 0 for non-L2 chains } -// UnsignedSigningReq contains the request for signing an outbound transaction +// UnsignedSigningReq contains the request for signing an outbound or fund-migration transaction. type UnsignedSigningReq struct { SigningHash []byte // Hash to be signed by TSS Nonce uint64 // evm - TSS Address nonce | svm - PDA nonce + + // TSSFundMigrationAmount is the native value swept for a fund-migration tx, fixed at + // signing time. Nil for outbound. Must be reused verbatim at broadcast — re-querying + // balance there races with a successful sweep from another validator. + TSSFundMigrationAmount *big.Int `json:"TSSFundMigrationAmount,omitempty"` } // TxBuilder builds and broadcasts transactions for outbound transfers diff --git a/universalClient/chains/evm/tx_builder.go b/universalClient/chains/evm/tx_builder.go index 55212db2..d03ec844 100644 --- a/universalClient/chains/evm/tx_builder.go +++ b/universalClient/chains/evm/tx_builder.go @@ -518,9 +518,12 @@ func (tb *TxBuilder) GetFundMigrationSigningRequest(ctx context.Context, data *c signer := types.NewEIP155Signer(big.NewInt(tb.chainIDInt)) txHash := signer.Hash(tx).Bytes() + // TSSFundMigrationAmount rides alongside Nonce in the req — both are signing-time-decided + // values that must reach broadcast unchanged so the signed tx is reproduced exactly. return &common.UnsignedSigningReq{ - SigningHash: txHash, - Nonce: nonce, + SigningHash: txHash, + Nonce: nonce, + TSSFundMigrationAmount: new(big.Int).Set(maxTransfer), }, nil } @@ -540,18 +543,13 @@ func (tb *TxBuilder) BroadcastFundMigrationTx(ctx context.Context, req *common.U return "", fmt.Errorf("gas limit must be provided for fund migration") } - fromAddr := ethcommon.HexToAddress(data.From) - toAddr := ethcommon.HexToAddress(data.To) - - balance, err := tb.rpcClient.GetBalance(ctx, fromAddr) - if err != nil { - return "", fmt.Errorf("failed to get balance of %s: %w", data.From, err) - } - - maxTransfer, err := computeFundMigrationTransfer(balance, data.GasPrice, data.GasLimit, data.L1GasFee) - if err != nil { - return "", err + // Use the exact amount fixed at signing time. Re-querying balance here would race + // with a successful broadcast from another validator (balance goes to 0 post-sweep). + if req.TSSFundMigrationAmount == nil || req.TSSFundMigrationAmount.Sign() <= 0 { + return "", fmt.Errorf("req.TSSFundMigrationAmount must be set for fund migration broadcast") } + toAddr := ethcommon.HexToAddress(data.To) + maxTransfer := new(big.Int).Set(req.TSSFundMigrationAmount) tx := types.NewTransaction( req.Nonce, diff --git a/universalClient/tss/sessionmanager/sessionmanager.go b/universalClient/tss/sessionmanager/sessionmanager.go index a728fafd..f1f3373d 100644 --- a/universalClient/tss/sessionmanager/sessionmanager.go +++ b/universalClient/tss/sessionmanager/sessionmanager.go @@ -32,11 +32,11 @@ type SendFunc func(ctx context.Context, peerID string, data []byte) error // sessionState holds all state for a single session. type sessionState struct { session dkls.Session - protocolType string // type of protocol (keygen, keyrefresh, quorumchange, sign) - coordinator string // coordinatorPeerID - expiryTime time.Time // when session expires - participants []string // list of participants (from setup message) - stepMu sync.Mutex // mutex to serialize Step() calls (DKLS may not be thread-safe) + protocolType string // type of protocol (keygen, keyrefresh, quorumchange, sign) + coordinator string // coordinatorPeerID + expiryTime time.Time // when session expires + participants []string // list of participants (from setup message) + stepMu sync.Mutex // mutex to serialize Step() calls (DKLS may not be thread-safe) signingReq *common.UnsignedSigningReq // cached from coordinator setup (sign sessions only) } @@ -921,12 +921,23 @@ func (sm *SessionManager) verifyFundMigrationSigningRequest(ctx context.Context, return fmt.Errorf("fund migration signing hash mismatch: our computed hash does not match coordinator's hash") } + // Defense-in-depth: hash match implies amount match, but cross-check explicitly so + // a wire-format bug, coordinator bug, or missing amount surfaces here rather than + // as a nil-deref / insufficient-balance error later in broadcast. + if req.TSSFundMigrationAmount == nil { + return fmt.Errorf("coordinator's signing request is missing TSSFundMigrationAmount") + } + if req.TSSFundMigrationAmount.Cmp(signingReq.TSSFundMigrationAmount) != 0 { + return fmt.Errorf("TSSFundMigrationAmount mismatch: coordinator=%s ours=%s", + req.TSSFundMigrationAmount.String(), signingReq.TSSFundMigrationAmount.String()) + } + sm.logger.Debug(). Str("event_id", event.EventID). Str("signing_hash", hex.EncodeToString(req.SigningHash)). Str("old_tss_addr", oldTSSAddr). Str("current_tss_addr", currentTSSAddr). - Msg("fund migration sign metadata verified - hash matches") + Msg("fund migration sign metadata verified - hash and amount match") return nil } @@ -941,7 +952,8 @@ func (sm *SessionManager) getTSSAddress(ctx context.Context) (string, error) { } // handleSigningComplete handles post-sign steps. EVM: set status SIGNED and store payload (txlifecycle/signed runs BroadcastOutboundSigningRequest). Solana: enqueue for sequential per-chain broadcast (PDA nonce order). -// signingReq is the cached signing request from the coordinator setup message. +// signingReq is the cached signing request from the coordinator setup message; for FUND_MIGRATE +// its TSSFundMigrationAmount is populated by verifyFundMigrationSigningRequest and persisted here. func (sm *SessionManager) handleSigningComplete(_ context.Context, eventID string, eventData []byte, signature []byte, signingReq *common.UnsignedSigningReq) error { if signingReq == nil { return fmt.Errorf("signing request is nil - cannot persist signing data") @@ -953,6 +965,9 @@ func (sm *SessionManager) handleSigningComplete(_ context.Context, eventID strin "signing_hash": hex.EncodeToString(signingReq.SigningHash), "nonce": signingReq.Nonce, } + if signingReq.TSSFundMigrationAmount != nil && signingReq.TSSFundMigrationAmount.Sign() > 0 { + signingData["tss_fund_migration_amount"] = signingReq.TSSFundMigrationAmount + } // Unmarshal original event data, add signing_data, re-marshal var raw map[string]any diff --git a/universalClient/tss/sessionmanager/sessionmanager_test.go b/universalClient/tss/sessionmanager/sessionmanager_test.go index 242e8e7c..8c39d407 100644 --- a/universalClient/tss/sessionmanager/sessionmanager_test.go +++ b/universalClient/tss/sessionmanager/sessionmanager_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "math/big" "reflect" "testing" "time" @@ -609,10 +610,10 @@ func TestVerifyFundMigrationSigningRequest_Validation(t *testing.T) { EventData: eventDataBytes, } sm.chains = nil - err := sm.verifyFundMigrationSigningRequest(ctx, event, &common.UnsignedSigningReq{ - SigningHash: []byte{0x01, 0x02}, - }) + req := &common.UnsignedSigningReq{SigningHash: []byte{0x01, 0x02}} + err := sm.verifyFundMigrationSigningRequest(ctx, event, req) assert.NoError(t, err) + assert.Nil(t, req.TSSFundMigrationAmount, "amount stays nil when chain/builder is skipped") }) } @@ -960,6 +961,43 @@ func TestHandleSigningComplete(t *testing.T) { assert.Equal(t, "beef", signingData["signature"]) assert.Equal(t, "dead", signingData["signing_hash"]) assert.Equal(t, float64(99), signingData["nonce"]) + _, hasAmount := signingData["tss_fund_migration_amount"] + assert.False(t, hasAmount, "tss_fund_migration_amount is omitted for outbound events") + }) + + t.Run("fund migration signing complete persists tss_fund_migration_amount", func(t *testing.T) { + event := store.Event{ + EventID: "fm-complete-1", + BlockHeight: 250, + Type: store.EventTypeSignFundMigrate, + Status: store.StatusInProgress, + EventData: []byte(`{"migration_id":7,"chain":"eip155:1"}`), + } + require.NoError(t, testDB.Create(&event).Error) + + req := &common.UnsignedSigningReq{ + SigningHash: []byte{0xca, 0xfe}, + Nonce: 3, + TSSFundMigrationAmount: new(big.Int).SetUint64(123456789), + } + err := sm.handleSigningComplete(context.Background(), "fm-complete-1", event.EventData, []byte{0xbe, 0xef}, req) + require.NoError(t, err) + + var updated store.Event + require.NoError(t, testDB.Where("event_id = ?", "fm-complete-1").First(&updated).Error) + assert.Equal(t, store.StatusSigned, updated.Status) + + // Decode the field into *big.Int directly — unmarshalling into map[string]any + // would coerce the JSON number into float64 and lose precision for wei values. + var decoded struct { + SigningData struct { + TSSFundMigrationAmount *big.Int `json:"tss_fund_migration_amount"` + } `json:"signing_data"` + } + require.NoError(t, json.Unmarshal(updated.EventData, &decoded)) + require.NotNil(t, decoded.SigningData.TSSFundMigrationAmount, + "tss_fund_migration_amount must survive the sign→broadcast handoff so broadcast reproduces the signed tx") + assert.Equal(t, "123456789", decoded.SigningData.TSSFundMigrationAmount.String()) }) } diff --git a/universalClient/tss/txbroadcaster/broadcaster.go b/universalClient/tss/txbroadcaster/broadcaster.go index dde29a18..d4f3bd59 100644 --- a/universalClient/tss/txbroadcaster/broadcaster.go +++ b/universalClient/tss/txbroadcaster/broadcaster.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "math/big" "time" "github.com/rs/zerolog" @@ -24,9 +25,10 @@ import ( // SigningData holds the signing parameters persisted by sessionManager when marking SIGNED. type SigningData struct { - Signature string `json:"signature"` // hex-encoded 64/65 byte signature - SigningHash string `json:"signing_hash"` // hex-encoded signing hash - Nonce uint64 `json:"nonce"` + Signature string `json:"signature"` // hex-encoded 64/65 byte signature + SigningHash string `json:"signing_hash"` // hex-encoded signing hash + Nonce uint64 `json:"nonce"` + TSSFundMigrationAmount *big.Int `json:"tss_fund_migration_amount,omitempty"` } // SignedOutboundData wraps OutboundCreatedEvent with signing data. @@ -208,8 +210,9 @@ func decodeSigningData(sd *SigningData) (*common.UnsignedSigningReq, []byte, err } return &common.UnsignedSigningReq{ - SigningHash: signingHash, - Nonce: sd.Nonce, + SigningHash: signingHash, + Nonce: sd.Nonce, + TSSFundMigrationAmount: sd.TSSFundMigrationAmount, }, signature, nil } diff --git a/universalClient/tss/txbroadcaster/broadcaster_test.go b/universalClient/tss/txbroadcaster/broadcaster_test.go index 060333ff..6404119b 100644 --- a/universalClient/tss/txbroadcaster/broadcaster_test.go +++ b/universalClient/tss/txbroadcaster/broadcaster_test.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "math/big" "reflect" "testing" "time" @@ -451,6 +452,11 @@ const testOldTSSPubkey = "0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2 const testNewTSSPubkey = "02c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5" func makeSignedFundMigrationData(t *testing.T, chainID string, nonce uint64) []byte { + t.Helper() + return makeSignedFundMigrationDataWithTransfer(t, chainID, nonce, nil) +} + +func makeSignedFundMigrationDataWithTransfer(t *testing.T, chainID string, nonce uint64, transferAmount *big.Int) []byte { t.Helper() sig := hex.EncodeToString(make([]byte, 65)) hash := hex.EncodeToString(make([]byte, 32)) @@ -467,9 +473,10 @@ func makeSignedFundMigrationData(t *testing.T, chainID string, nonce uint64) []b L1GasFee: "150", }, SigningData: &SigningData{ - Signature: sig, - SigningHash: hash, - Nonce: nonce, + Signature: sig, + SigningHash: hash, + Nonce: nonce, + TSSFundMigrationAmount: transferAmount, }, } b, err := json.Marshal(data) @@ -522,6 +529,45 @@ func TestFundMigrationEVM_BroadcastSuccess(t *testing.T) { builder.AssertExpectations(t) } +// TestFundMigrationEVM_TSSFundMigrationAmountThreaded asserts the tss_fund_migration_amount captured +// at signing time is decoded onto the signing req passed to BroadcastFundMigrationTx. Without +// this, the second validator's broadcast queries balance=0 (post-sweep) and the assembler +// returns "insufficient balance" — leaving the event stuck in SIGNED forever and blocking +// migration consensus. +func TestFundMigrationEVM_TSSFundMigrationAmountThreaded(t *testing.T) { + evtStore, db := setupTestDB(t) + builder := &mockTxBuilder{} + client := &mockChainClient{builder: builder} + ch := newTestChains(t, "eip155:1", uregistrytypes.VmType_EVM, client) + + event := store.Event{ + EventID: "fm-transfer", + BlockHeight: 100, + ExpiryBlockHeight: 99999, + Type: store.EventTypeSignFundMigrate, + ConfirmationType: "INSTANT", + Status: store.StatusSigned, + EventData: makeSignedFundMigrationDataWithTransfer(t, "eip155:1", 0, new(big.Int).SetUint64(777_000_000_000_000_000)), + } + require.NoError(t, db.Create(&event).Error) + + builder.On("BroadcastFundMigrationTx", + mock.Anything, + mock.MatchedBy(func(req *common.UnsignedSigningReq) bool { + return req.TSSFundMigrationAmount != nil && req.TSSFundMigrationAmount.String() == "777000000000000000" + }), + mock.Anything, + mock.Anything). + Return("0xmigrate777", nil) + + b := newBroadcaster(evtStore, ch, "") + b.processSigned(context.Background()) + + ev := getEvent(t, db, "fm-transfer") + require.Equal(t, store.StatusBroadcasted, ev.Status) + builder.AssertExpectations(t) +} + func TestFundMigrationEVM_BroadcastFails_NonceConsumed(t *testing.T) { evtStore, db := setupTestDB(t) builder := &mockTxBuilder{} From 44cf1fe325fa834cf09b0ea6a57f4fba92a80389 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Sun, 26 Apr 2026 14:05:36 +0530 Subject: [PATCH 56/61] fix e2e setup for local outbound tests --- Makefile | 2 +- e2e-tests/replace_addresses.sh | 46 ++++++++++++ e2e-tests/setup.sh | 85 +++++++++++++++++++---- local-native/devnet | 29 ++++++-- local-native/scripts/setup-uvalidators.sh | 23 +++--- 5 files changed, 157 insertions(+), 28 deletions(-) create mode 100755 e2e-tests/replace_addresses.sh diff --git a/Makefile b/Makefile index 84322ffa..be1291ab 100755 --- a/Makefile +++ b/Makefile @@ -157,7 +157,7 @@ clean: .PHONY: replace-addresses replace-addresses: - bash scripts/replace_addresses.sh + bash e2e-tests/replace_addresses.sh distclean: clean rm -rf vendor/ diff --git a/e2e-tests/replace_addresses.sh b/e2e-tests/replace_addresses.sh new file mode 100755 index 00000000..402f1d0f --- /dev/null +++ b/e2e-tests/replace_addresses.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +set -euo pipefail + +ROOT_DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT_DIR" + +ENV_FILE="e2e-tests/.env" +if [[ ! -f "$ENV_FILE" ]]; then + echo "e2e-tests/.env not found" >&2 + exit 1 +fi + +PRIVATE_KEY="$(grep '^PRIVATE_KEY=' "$ENV_FILE" | cut -d= -f2 | tr -d '"' | tr -d "'")" +if [[ -z "$PRIVATE_KEY" ]]; then + echo "PRIVATE_KEY not found in $ENV_FILE" >&2 + exit 1 +fi + +if ! command -v cast >/dev/null 2>&1; then + echo "cast command not found (install foundry/cast)" >&2 + exit 1 +fi + +EVM_ADDRESS="$(cast wallet address "$PRIVATE_KEY")" +PUSH_ADDRESS="push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20" + +echo "Replacing with PUSH_ADDRESS: $PUSH_ADDRESS" +echo "Replacing with EVM_ADDRESS: $EVM_ADDRESS" + +for f in x/utss/types/params.go x/uregistry/types/params.go x/uvalidator/types/params.go; do + if [[ -f "$f" ]]; then + perl -pi -e "s/Admin: \"push1[0-9a-z]+\"/Admin: \"$PUSH_ADDRESS\"/g" "$f" + echo "Updated Admin in $f" + fi +done + +for f in x/uexecutor/types/constants.go x/uregistry/types/constants.go; do + if [[ -f "$f" ]]; then + perl -pi -e "s/PROXY_ADMIN_OWNER_ADDRESS_HEX = \"0x[a-fA-F0-9]{40}\"/PROXY_ADMIN_OWNER_ADDRESS_HEX = \"$EVM_ADDRESS\"/g" "$f" + perl -pi -e "s/PROXY_ADMIN_OWNER_ADDRESS = \"0x[a-fA-F0-9]{40}\"/PROXY_ADMIN_OWNER_ADDRESS = \"$EVM_ADDRESS\"/g" "$f" + echo "Updated PROXY_ADMIN_OWNER_ADDRESS in $f" + fi +done + +echo "Address replacement completed." diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index efaa32e5..f8c75b0c 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -808,7 +808,7 @@ step_setup_push_chain_sdk() { log_info "Installing push-chain-sdk dependencies" ( cd "$PUSH_CHAIN_SDK_DIR" - yarn install + yarn install --mode=skip-build npm install npm i --save-dev @types/bs58 npm i tweetnacl @@ -1257,6 +1257,21 @@ step_setup_environment() { bsc_latest_block="0" solana_latest_slot="0" + start_detached_process() { + local log_file="$1" + shift + + if command -v perl >/dev/null 2>&1; then + perl -MPOSIX=setsid -e ' + setsid() or die "setsid failed: $!"; + open STDIN, "<", "/dev/null" or die "stdin redirect failed: $!"; + exec @ARGV or die "exec failed: $!"; + ' "$@" >"$log_file" 2>&1 & + else + nohup "$@" >"$log_file" 2>&1 "$LOG_DIR/anvil_${label}.log" 2>&1 & + start_detached_process "$LOG_DIR/anvil_${label}.log" \ + anvil --host 0.0.0.0 --port "$port" --chain-id "$chain_id" --fork-url "$fork_url" --block-time 1 } wait_for_block_number() { @@ -1318,7 +1333,7 @@ step_setup_environment() { fi log_info "Starting surfpool for local Solana testing on port 8899" - nohup surfpool start --port 8899 --network devnet >"$LOG_DIR/surfpool.log" 2>&1 & + start_detached_process "$LOG_DIR/surfpool.log" surfpool start --port 8899 --network devnet } wait_for_solana_slot() { @@ -1479,6 +1494,33 @@ step_stop_running_nodes() { pkill -f "$PUSH_CHAIN_DIR/build/pchaind start" >/dev/null 2>&1 || true pkill -f "$PUSH_CHAIN_DIR/build/puniversald" >/dev/null 2>&1 || true + local port pid wait_count + local ports=( + 26656 26657 26658 26659 26660 26666 26676 26686 + 1317 1318 1319 1320 + 9090 9093 9095 9097 + 8545 8546 8547 8548 8549 8550 8551 8552 + 6060 + 8080 8081 8082 8083 + 39000 39001 39002 39003 + ) + + for port in "${ports[@]}"; do + while IFS= read -r pid; do + [[ -n "$pid" ]] || continue + log_info "Stopping process $pid on local-native port $port" + kill "$pid" >/dev/null 2>&1 || true + wait_count=0 + while kill -0 "$pid" >/dev/null 2>&1 && [[ "$wait_count" -lt 5 ]]; do + sleep 1 + wait_count=$((wait_count + 1)) + done + if kill -0 "$pid" >/dev/null 2>&1; then + kill -9 "$pid" >/dev/null 2>&1 || true + fi + done < <(lsof -ti tcp:"$port" 2>/dev/null || true) + done + log_ok "Running nodes stopped" } @@ -2567,7 +2609,16 @@ step_fund_uea_prc20() { return 0 fi - log_info "Funding UEA $uea_addr (signer: $evm_addr) with PRC20 tokens from deployer" + log_info "Funding UEA $uea_addr (signer: $evm_addr) with native UPC and PRC20 tokens from deployer" + + if [[ -n "${PRIVATE_KEY:-}" ]]; then + log_info " Sending 10000 UPC native balance to UEA $uea_addr" + cast send --private-key "$PRIVATE_KEY" "$uea_addr" \ + --value "10000ether" \ + --rpc-url "$PUSH_RPC_URL" 2>&1 | grep -E "^status" || true + else + log_warn "PRIVATE_KEY is empty; skipping native UPC funding for UEA" + fi local token_count token_count="$(jq -r '.tokens | length' "$DEPLOY_ADDRESSES_FILE")" @@ -2591,7 +2642,7 @@ step_fund_uea_prc20() { --rpc-url "$PUSH_RPC_URL" 2>&1 | grep -E "^status" || true done < <(jq -r '.tokens[]? | [.symbol, .address, (.decimals // 18)] | @tsv' "$DEPLOY_ADDRESSES_FILE") - log_ok "UEA PRC20 funding complete" + log_ok "UEA native UPC and PRC20 funding complete" } step_create_all_wpc_pools() { @@ -2751,12 +2802,22 @@ step_deploy_counter_and_sync_sdk() { fi log_info "Deploying CounterPayable contract on Push localnet" - local deploy_out counter_addr - deploy_out="$(cast send --rpc-url "$PUSH_RPC_URL" --private-key "$PRIVATE_KEY" --create "$counter_creation_code" 2>&1)" || { - log_err "Counter deployment failed" + local deploy_out counter_addr deploy_attempt + deploy_attempt=1 + deploy_out="" + + while [[ "$deploy_attempt" -le 5 ]]; do + deploy_out="$(cast send --rpc-url "$PUSH_RPC_URL" --private-key "$PRIVATE_KEY" --create "$counter_creation_code" 2>&1)" || true + counter_addr="$(echo "$deploy_out" | awk '/contractAddress/ {print $2; exit}')" + if validate_eth_address "$counter_addr"; then + break + fi + + log_warn "Counter deployment attempt $deploy_attempt/5 failed; retrying" echo "$deploy_out" - exit 1 - } + deploy_attempt=$((deploy_attempt + 1)) + sleep 2 + done counter_addr="$(echo "$deploy_out" | awk '/contractAddress/ {print $2; exit}')" if ! validate_eth_address "$counter_addr"; then @@ -3034,4 +3095,4 @@ main() { esac } -main "$@" \ No newline at end of file +main "$@" diff --git a/local-native/devnet b/local-native/devnet index c4e31d71..4c25b5b8 100755 --- a/local-native/devnet +++ b/local-native/devnet @@ -234,6 +234,23 @@ wait_for_rpc() { return 1 } +start_detached() { + local log_file=$1 + shift + + if command -v perl >/dev/null 2>&1; then + perl -MPOSIX=setsid -e ' + setsid() or die "setsid failed: $!"; + open STDIN, "<", "/dev/null" or die "stdin redirect failed: $!"; + exec @ARGV or die "exec failed: $!"; + ' "$@" > "$log_file" 2>&1 & + else + nohup "$@" > "$log_file" 2>&1 < /dev/null & + fi + + echo $! +} + start_validator() { local id=$1 local pid_file="$DATA_DIR/validator$id.pid" @@ -249,13 +266,14 @@ start_validator() { if [ "$id" = "1" ]; then print_status "Starting validator 1 (genesis)..." - "$SCRIPT_DIR/scripts/setup-genesis-auto.sh" > "$DATA_DIR/validator$id/validator.log" 2>&1 & + start_detached "$DATA_DIR/validator$id/validator.log" "$SCRIPT_DIR/scripts/setup-genesis-auto.sh" >/tmp/push-chain-validator.pid else print_status "Starting validator $id..." - VALIDATOR_ID=$id "$SCRIPT_DIR/scripts/setup-validator-auto.sh" > "$DATA_DIR/validator$id/validator.log" 2>&1 & + start_detached "$DATA_DIR/validator$id/validator.log" env VALIDATOR_ID=$id "$SCRIPT_DIR/scripts/setup-validator-auto.sh" >/tmp/push-chain-validator.pid fi - echo $! > "$pid_file" + cat /tmp/push-chain-validator.pid > "$pid_file" + rm -f /tmp/push-chain-validator.pid print_success "Validator $id started (PID: $(cat $pid_file))" } @@ -277,9 +295,10 @@ start_universal() { mkdir -p "$DATA_DIR/universal$id" print_status "Starting universal validator $id..." - UNIVERSAL_ID=$id SEPOLIA_EVENT_START_FROM="$sepolia_start_height" "$SCRIPT_DIR/scripts/setup-universal.sh" > "$DATA_DIR/universal$id/universal.log" 2>&1 & + start_detached "$DATA_DIR/universal$id/universal.log" env UNIVERSAL_ID=$id SEPOLIA_EVENT_START_FROM="$sepolia_start_height" "$SCRIPT_DIR/scripts/setup-universal.sh" >/tmp/push-chain-universal.pid - echo $! > "$pid_file" + cat /tmp/push-chain-universal.pid > "$pid_file" + rm -f /tmp/push-chain-universal.pid print_success "Universal validator $id started (PID: $(cat $pid_file))" } diff --git a/local-native/scripts/setup-uvalidators.sh b/local-native/scripts/setup-uvalidators.sh index b80b5eee..b00eee48 100755 --- a/local-native/scripts/setup-uvalidators.sh +++ b/local-native/scripts/setup-uvalidators.sh @@ -138,13 +138,13 @@ for i in $(seq 1 $NUM_UV); do done # ═══════════════════════════════════════════════════════════════════════════════ -# CREATE AUTHZ GRANTS (batched - 4 grants per transaction, with confirmation) +# CREATE AUTHZ GRANTS (batched, with confirmation) # ═══════════════════════════════════════════════════════════════════════════════ echo "" echo "🔐 Setting up AuthZ grants (batched)..." echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -echo "📋 Creating grants: validator-N → hotkey-N (4 msg types per tx)" +echo "📋 Creating grants: validator-N → hotkey-N" # Disable exit on error for authz commands (some may already exist) set +e @@ -157,8 +157,11 @@ MSG_TYPES=( "/uexecutor.v1.MsgVoteChainMeta" "/uexecutor.v1.MsgVoteOutbound" "/utss.v1.MsgVoteTssKeyProcess" + "/utss.v1.MsgVoteFundMigration" ) +EXPECTED_GRANTS=$((${NUM_UV:-2} * ${#MSG_TYPES[@]})) + for i in $(seq 1 ${NUM_UV:-2}); do HOTKEY_ADDR=$(jq -r ".[$((i-1))].address" "$HOTKEYS_FILE") VALIDATOR_ADDR=$("$PCHAIND_BIN" keys show "validator-$i" -a --keyring-backend "$KEYRING" --home "$HOME_DIR" 2>/dev/null) @@ -175,7 +178,7 @@ for i in $(seq 1 ${NUM_UV:-2}); do BATCH_OK=false - # Attempt batch: all 4 grants in one TX + # Attempt batch: all grants in one TX MESSAGES="[]" for j in "${!MSG_TYPES[@]}"; do MSG_TYPE="${MSG_TYPES[$j]}" @@ -197,7 +200,7 @@ for i in $(seq 1 ${NUM_UV:-2}); do MSG_COUNT=$(echo "$MESSAGES" | jq 'length' 2>/dev/null || echo "0") - if [ "${MSG_COUNT}" = "4" ]; then + if [ "${MSG_COUNT}" = "${#MSG_TYPES[@]}" ]; then COMBINED_TX=$(cat < Date: Mon, 27 Apr 2026 11:27:21 +0530 Subject: [PATCH 57/61] e2e-tests/configs dir update --- e2e-tests/.gitignore | 3 ++- e2e-tests/README.md | 6 +++--- e2e-tests/setup.sh | 50 +++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 52 insertions(+), 7 deletions(-) diff --git a/e2e-tests/.gitignore b/e2e-tests/.gitignore index 4ea7f858..78701c33 100644 --- a/e2e-tests/.gitignore +++ b/e2e-tests/.gitignore @@ -1,2 +1,3 @@ .env -logs/ \ No newline at end of file +logs/ +config/ diff --git a/e2e-tests/README.md b/e2e-tests/README.md index e8346d06..9afbf0f8 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -139,7 +139,7 @@ When set in `.env`, the `setup-environment` step (also called by `all`) does: 1. Starts local fork nodes: - `anvil` for Ethereum Sepolia, Arbitrum Sepolia, Base Sepolia, BSC Testnet - `surfpool` for Solana -2. Rewrites `public_rpc_url` in `config/testnet-donut/*/chain.json` to local fork URLs +2. Copies `config/testnet-donut` into `e2e-tests/config/testnet-donut`, then rewrites `public_rpc_url` there to local fork URLs 3. Patches `puniversald` chain RPC config (`local-native/data/universal-N/.puniversal/config/pushuv_config.json`) to use local fork endpoints Default local fork URLs (override in `.env`): @@ -351,7 +351,7 @@ These addresses are used to: - sync swap repo `test-addresses.json` - generate core contracts `.env` -- update `config/testnet-donut/tokens/*.json` +- update `e2e-tests/config/testnet-donut/*/tokens/*.json` - submit token config txs to uregistry Manual helpers: @@ -399,7 +399,7 @@ Note: this only handles pools paired with WPC. If you need a pool between two no | `local-native/logs/` | Per-process stdout/stderr | | `/test-addresses.json` | Swap repo address file (synced from deploy_addresses.json) | | `/.env` | Core contracts env (generated by `write-core-env`) | -| `config/testnet-donut/*/tokens/*.json` | Token config files (updated contract addresses) | +| `e2e-tests/config/testnet-donut/*/tokens/*.json` | Runtime token config working copy (updated contract addresses) | --- diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index f8c75b0c..7dc6e8fc 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -48,9 +48,10 @@ fi : "${DEPLOY_ADDRESSES_FILE:=$SCRIPT_DIR/deploy_addresses.json}" : "${LOG_DIR:=$SCRIPT_DIR/logs}" : "${TEST_ADDRESSES_PATH:=$SWAP_AMM_DIR/test-addresses.json}" -: "${TOKENS_CONFIG_DIR:=./config/testnet-donut}" -: "${TOKEN_CONFIG_PATH:=./config/testnet-donut/eth_sepolia/tokens/eth.json}" -: "${CHAIN_CONFIG_PATH:=./config/testnet-donut/eth_sepolia/chain.json}" +: "${SOURCE_CONFIG_DIR:=./config/testnet-donut}" +: "${TOKENS_CONFIG_DIR:=./e2e-tests/config/testnet-donut}" +: "${TOKEN_CONFIG_PATH:=./e2e-tests/config/testnet-donut/eth_sepolia/tokens/eth.json}" +: "${CHAIN_CONFIG_PATH:=./e2e-tests/config/testnet-donut/eth_sepolia/chain.json}" abs_from_root() { local path="$1" @@ -72,6 +73,7 @@ PUSH_CHAIN_SDK_DIR="$(abs_from_root "$PUSH_CHAIN_SDK_DIR")" DEPLOY_ADDRESSES_FILE="$(abs_from_root "$DEPLOY_ADDRESSES_FILE")" TEST_ADDRESSES_PATH="$(abs_from_root "$TEST_ADDRESSES_PATH")" LOG_DIR="$(abs_from_root "$LOG_DIR")" +SOURCE_CONFIG_DIR="$(abs_from_root "$SOURCE_CONFIG_DIR")" TOKENS_CONFIG_DIR="$(abs_from_root "$TOKENS_CONFIG_DIR")" TOKEN_CONFIG_PATH="$(abs_from_root "$TOKEN_CONFIG_PATH")" CHAIN_CONFIG_PATH="$(abs_from_root "$CHAIN_CONFIG_PATH")" @@ -132,6 +134,43 @@ prefer_sibling_repo_dirs() { prefer_sibling_repo_dirs +ensure_e2e_testnet_donut_configs() { + if [[ "$TOKENS_CONFIG_DIR" == "$SOURCE_CONFIG_DIR" ]]; then + log_warn "TOKENS_CONFIG_DIR points at SOURCE_CONFIG_DIR; setup may mutate source configs: $SOURCE_CONFIG_DIR" + return 0 + fi + + if [[ ! -d "$SOURCE_CONFIG_DIR" ]]; then + log_err "Source config directory missing: $SOURCE_CONFIG_DIR" + exit 1 + fi + + if [[ -d "$TOKENS_CONFIG_DIR" ]]; then + return 0 + fi + + mkdir -p "$(dirname "$TOKENS_CONFIG_DIR")" + cp -R "$SOURCE_CONFIG_DIR" "$TOKENS_CONFIG_DIR" + log_ok "Created e2e config working copy: $TOKENS_CONFIG_DIR" +} + +reset_e2e_testnet_donut_configs() { + if [[ "$TOKENS_CONFIG_DIR" == "$SOURCE_CONFIG_DIR" ]]; then + log_warn "Skipping e2e config reset because TOKENS_CONFIG_DIR points at SOURCE_CONFIG_DIR: $SOURCE_CONFIG_DIR" + return 0 + fi + + if [[ ! -d "$SOURCE_CONFIG_DIR" ]]; then + log_err "Source config directory missing: $SOURCE_CONFIG_DIR" + exit 1 + fi + + rm -rf "$TOKENS_CONFIG_DIR" + mkdir -p "$(dirname "$TOKENS_CONFIG_DIR")" + cp -R "$SOURCE_CONFIG_DIR" "$TOKENS_CONFIG_DIR" + log_ok "Refreshed e2e config working copy from $SOURCE_CONFIG_DIR" +} + ensure_testing_env_var_in_env_file() { mkdir -p "$(dirname "$ENV_FILE")" @@ -1141,6 +1180,7 @@ step_ensure_tss_key_ready() { step_setup_environment() { require_cmd jq curl + ensure_e2e_testnet_donut_configs local has_docker="false" if command -v docker >/dev/null 2>&1; then @@ -1566,6 +1606,7 @@ step_sync_vault_tss_on_anvil() { return 0 fi require_cmd cast jq python3 + ensure_e2e_testnet_donut_configs # Derive the TSS EVM address from the on-chain TSS public key. # 1. Query compressed secp256k1 pubkey from the utss module. @@ -2078,6 +2119,7 @@ find_matching_token_config_file() { step_update_deployed_token_configs() { require_cmd jq ensure_deploy_file + ensure_e2e_testnet_donut_configs if [[ ! -d "$TOKENS_CONFIG_DIR" ]]; then log_err "Tokens config directory missing: $TOKENS_CONFIG_DIR" @@ -2405,6 +2447,7 @@ step_setup_gateway() { step_add_uregistry_configs() { require_cmd "$PUSH_CHAIN_DIR/build/pchaind" jq + ensure_e2e_testnet_donut_configs [[ -d "$TOKENS_CONFIG_DIR" ]] || { log_err "Missing tokens config directory: $TOKENS_CONFIG_DIR"; exit 1; } @@ -2955,6 +2998,7 @@ NODE } cmd_all() { + reset_e2e_testnet_donut_configs step_setup_environment (cd "$PUSH_CHAIN_DIR" && make replace-addresses) (cd "$PUSH_CHAIN_DIR" && make build) From 2ab18786c7c7bda1559326cfdb953ebf48455d37 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Mon, 27 Apr 2026 13:47:29 +0530 Subject: [PATCH 58/61] LOCAL_OUTBOUND_BASE_GAS_LIMIT added --- e2e-tests/README.md | 1 + e2e-tests/setup.sh | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/e2e-tests/README.md b/e2e-tests/README.md index 9afbf0f8..21d42bcf 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -128,6 +128,7 @@ Edit `e2e-tests/.env`. Key variables: | `PUSH_CHAIN_SDK_E2E_DIR` | `packages/core/__e2e__/evm/inbound` | Test directory inside SDK | | `PREFER_SIBLING_REPO_DIRS` | `true` | Prefer sibling dirs for core/gateway repos over cloning fresh | | `E2E_TARGET_CHAINS` | — | Restrict SDK E2E chains (passed through to SDK `.env`) | +| `LOCAL_OUTBOUND_BASE_GAS_LIMIT` | `500000` | UniversalCore per-chain base gas limit seeded for local outbound tests | | `CORE_RESUME_MAX_ATTEMPTS` | `0` (unlimited) | Max `--resume` retry count for core forge script | | `GATEWAY_RESUME_MAX_ATTEMPTS` | `0` (unlimited) | Max `--resume` retry count for gateway forge script | | `CORE_CONFIGURE_RESUME_MAX_ATTEMPTS` | `0` (unlimited) | Max `--resume` retry count for `configureUniversalCore` | diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 7dc6e8fc..2f42170e 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -48,6 +48,7 @@ fi : "${DEPLOY_ADDRESSES_FILE:=$SCRIPT_DIR/deploy_addresses.json}" : "${LOG_DIR:=$SCRIPT_DIR/logs}" : "${TEST_ADDRESSES_PATH:=$SWAP_AMM_DIR/test-addresses.json}" +: "${LOCAL_OUTBOUND_BASE_GAS_LIMIT:=500000}" : "${SOURCE_CONFIG_DIR:=./config/testnet-donut}" : "${TOKENS_CONFIG_DIR:=./e2e-tests/config/testnet-donut}" : "${TOKEN_CONFIG_PATH:=./e2e-tests/config/testnet-donut/eth_sepolia/tokens/eth.json}" @@ -2436,7 +2437,7 @@ step_setup_gateway() { log_warn "UniversalCore BASE_GAS_LIMIT is 0. Applying local defaults for outbound chains" for ns in "eip155:11155111" "eip155:421614" "eip155:84532" "eip155:97" "solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1"; do - cast send "$C0" 'setBaseGasLimitByChain(string,uint256)' "$ns" 2000000 \ + cast send "$C0" 'setBaseGasLimitByChain(string,uint256)' "$ns" "$LOCAL_OUTBOUND_BASE_GAS_LIMIT" \ --rpc-url "$PUSH_RPC_URL" \ --private-key "$PRIVATE_KEY" >/dev/null || true done From dd853eeff62b00f62bbe951e68ebb63d62f2d0c6 Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Mon, 27 Apr 2026 16:29:20 +0530 Subject: [PATCH 59/61] inbound patch added --- e2e-tests/.env.example | 11 +- e2e-tests/deploy_addresses.json | 2 +- e2e-tests/genesis_accounts.json | 32 +++ e2e-tests/setup.sh | 434 ++++++++++++++++++++++++++++---- 4 files changed, 430 insertions(+), 49 deletions(-) create mode 100644 e2e-tests/genesis_accounts.json diff --git a/e2e-tests/.env.example b/e2e-tests/.env.example index 29fcccaa..1d7a676d 100644 --- a/e2e-tests/.env.example +++ b/e2e-tests/.env.example @@ -7,6 +7,15 @@ # Local Push RPC PUSH_RPC_URL=http://localhost:8545 +# Public testnet RPCs used when TESTING_ENV is not LOCAL. +# setup-environment writes these into e2e-tests/config/testnet-donut/*/chain.json, +# and setup-sdk writes them into push-chain-sdk chain.ts defaults. +ETHEREUM_SEPOLIA_RPC_URL= +ARBITRUM_SEPOLIA_RPC_URL= +BASE_SEPOLIA_RPC_URL= +BSC_TESTNET_RPC_URL= +SOLANA_DEVNET_RPC_URL=https://api.devnet.solana.com + # Local chain info CHAIN_ID=localchain_9000-1 KEYRING_BACKEND=test @@ -58,7 +67,7 @@ PUSH_CHAIN_SDK_E2E_DIR=packages/core/__e2e__/evm/inbound # push-chain-sdk required env vars (mirrored into PUSH_CHAIN_SDK_DIR/packages/core/.env by setup-sdk) # Defaults used by setup-sdk when omitted: # EVM_PRIVATE_KEY <= PRIVATE_KEY -# EVM_RPC <= PUSH_RPC_URL +# EVM_RPC <= PUSH_RPC_URL in LOCAL, ETHEREUM_SEPOLIA_RPC_URL otherwise # PUSH_PRIVATE_KEY<= PRIVATE_KEY EVM_PRIVATE_KEY= EVM_RPC= diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index 2964c011..e85204a6 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -1,5 +1,5 @@ { - "generatedAt": "2026-04-15T09:32:52Z", + "generatedAt": "2026-04-27T09:57:28Z", "contracts": { "WPC": "0xB5B1e1ADc1b8fc1066975aa09f9371a5f67C54F5", "Factory": "0x140b9f84fCbccB4129AC6F32b1243ea808d18261", diff --git a/e2e-tests/genesis_accounts.json b/e2e-tests/genesis_accounts.json new file mode 100644 index 00000000..280734f3 --- /dev/null +++ b/e2e-tests/genesis_accounts.json @@ -0,0 +1,32 @@ +[ + { + "id": 1, + "name": "genesis-acc-1", + "address": "push1dfx67rvvn6np0tlsg9z987vyjuw9qp9x8dy0g8", + "mnemonic": "brisk have buyer chief chronic million neglect pass brain shaft whisper duty label tooth harsh clinic lift tenant element copper scare race clarify art" + }, + { + "id": 2, + "name": "genesis-acc-2", + "address": "push1ns34ndqpt5xd5uw0uux38al7u4hujurzsre23l", + "mnemonic": "story flight spike script venture clap lamp hero wise ladder cruel strategy core notice layer very tool sister cram uncle brief horn clump hen" + }, + { + "id": 3, + "name": "genesis-acc-3", + "address": "push102m2tyns68qju7aphafkcn94h3m0hr7zvhurgp", + "mnemonic": "clip candy easily model net post sponsor fog apart skin essence tool trophy lesson claw issue report goose file jazz maid goose response kit" + }, + { + "id": 4, + "name": "genesis-acc-4", + "address": "push1x4jqjzugmn2c8ngxhvazufdhhce890m3688lky", + "mnemonic": "hope remain file sister muscle panda pear cross rifle autumn bubble dinner electric gown proud mutual stable fish pyramid nominee trip hungry prison manage" + }, + { + "id": 5, + "name": "genesis-acc-5", + "address": "push1tfj4sllzzxps4nj85que9h3ulqnlcddvhf30ds", + "mnemonic": "possible grocery height raccoon escape ticket sweet used cloud idle vital hedgehog ready imitate lock rail gaze dose announce quote fortune transfer camp famous" + } +] diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index 2f42170e..edf5132f 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -54,6 +54,12 @@ fi : "${TOKEN_CONFIG_PATH:=./e2e-tests/config/testnet-donut/eth_sepolia/tokens/eth.json}" : "${CHAIN_CONFIG_PATH:=./e2e-tests/config/testnet-donut/eth_sepolia/chain.json}" +: "${ETHEREUM_SEPOLIA_RPC_URL:=${ETH_SEPOLIA_RPC_URL:-${SEPOLIA_RPC_URL:-}}}" +: "${ARBITRUM_SEPOLIA_RPC_URL:=${ARB_SEPOLIA_RPC_URL:-${ARBITRUM_SEPOLIA_RPC:-}}}" +: "${BASE_SEPOLIA_RPC_URL:=${BASE_SEPOLIA_RPC:-}}" +: "${BSC_TESTNET_RPC_URL:=${BNB_TESTNET_RPC:-${BSC_TESTNET_RPC:-}}}" +: "${SOLANA_DEVNET_RPC_URL:=}" + abs_from_root() { local path="$1" if [[ "$path" = /* ]]; then @@ -189,6 +195,64 @@ is_local_testing_env() { [[ "${TESTING_ENV:-}" == "LOCAL" ]] } +chain_rpc_from_env() { + local chain_name="$1" + + case "$chain_name" in + eth_sepolia) printf "%s" "${ETHEREUM_SEPOLIA_RPC_URL:-}" ;; + arb_sepolia) printf "%s" "${ARBITRUM_SEPOLIA_RPC_URL:-}" ;; + base_sepolia) printf "%s" "${BASE_SEPOLIA_RPC_URL:-}" ;; + bsc_testnet) printf "%s" "${BSC_TESTNET_RPC_URL:-}" ;; + solana_devnet) printf "%s" "${SOLANA_DEVNET_RPC_URL:-}" ;; + *) printf "%s" "" ;; + esac +} + +patch_chain_config_public_rpc() { + local file_path="$1" + local rpc_url="$2" + local label="$3" + local tmp + + if [[ ! -f "$file_path" ]]; then + log_warn "Chain config file not found for $label: $file_path" + return 0 + fi + + tmp="$(mktemp)" + jq --arg rpc "$rpc_url" '.public_rpc_url = $rpc' "$file_path" >"$tmp" + mv "$tmp" "$file_path" + log_ok "Patched $label chain config public_rpc_url => $rpc_url" +} + +apply_nonlocal_chain_rpc_env_to_configs() { + if is_local_testing_env; then + return 0 + fi + + require_cmd jq + ensure_e2e_testnet_donut_configs + + local chain_name rpc_url + local chain_names=( + eth_sepolia + arb_sepolia + base_sepolia + bsc_testnet + solana_devnet + ) + + for chain_name in "${chain_names[@]}"; do + rpc_url="$(chain_rpc_from_env "$chain_name")" + if [[ -z "$rpc_url" ]]; then + log_warn "No .env RPC configured for $chain_name; keeping existing chain.json public_rpc_url" + continue + fi + + patch_chain_config_public_rpc "$TOKENS_CONFIG_DIR/$chain_name/chain.json" "$rpc_url" "$chain_name" + done +} + get_genesis_accounts_json() { if [[ -f "$GENESIS_ACCOUNTS_JSON" ]]; then cat "$GENESIS_ACCOUNTS_JSON" @@ -647,6 +711,238 @@ fs.writeFileSync(filePath, source); NODE } +sdk_rewrite_chain_endpoints_from_env() { + local chain_constants_file="$1" + + CHAIN_CONSTANTS_FILE="$chain_constants_file" \ + ETHEREUM_SEPOLIA_RPC_URL="$ETHEREUM_SEPOLIA_RPC_URL" \ + ARBITRUM_SEPOLIA_RPC_URL="$ARBITRUM_SEPOLIA_RPC_URL" \ + BASE_SEPOLIA_RPC_URL="$BASE_SEPOLIA_RPC_URL" \ + BSC_TESTNET_RPC_URL="$BSC_TESTNET_RPC_URL" \ + SOLANA_DEVNET_RPC_URL="$SOLANA_DEVNET_RPC_URL" \ + node <<'NODE' +const fs = require('fs'); + +const filePath = process.env.CHAIN_CONSTANTS_FILE; +if (!filePath || !fs.existsSync(filePath)) { + console.error('chain.ts file not found for testnet endpoint rewrite'); + process.exit(1); +} + +let source = fs.readFileSync(filePath, 'utf8'); + +const endpointMap = [ + { chain: 'ETHEREUM_SEPOLIA', url: process.env.ETHEREUM_SEPOLIA_RPC_URL }, + { chain: 'ARBITRUM_SEPOLIA', url: process.env.ARBITRUM_SEPOLIA_RPC_URL }, + { chain: 'BASE_SEPOLIA', url: process.env.BASE_SEPOLIA_RPC_URL }, + { chain: 'BNB_TESTNET', url: process.env.BSC_TESTNET_RPC_URL }, + { chain: 'SOLANA_DEVNET', url: process.env.SOLANA_DEVNET_RPC_URL }, +].filter((entry) => entry.url); + +function findChainBlockRange(text, chainName) { + const marker = `[CHAIN.${chainName}]`; + const markerIdx = text.indexOf(marker); + if (markerIdx === -1) { + return null; + } + + const openBraceIdx = text.indexOf('{', markerIdx); + if (openBraceIdx === -1) { + return null; + } + + let depth = 0; + for (let i = openBraceIdx; i < text.length; i += 1) { + const ch = text[i]; + if (ch === '{') { + depth += 1; + } else if (ch === '}') { + depth -= 1; + if (depth === 0) { + return { start: openBraceIdx, end: i }; + } + } + } + + return null; +} + +function detectIndent(blockText) { + const match = blockText.match(/\n(\s+)[A-Za-z_\[]/); + return match ? match[1] : ' '; +} + +function findMatchingBracket(text, openIdx) { + let depth = 0; + let quote = ''; + + for (let i = openIdx; i < text.length; i += 1) { + const ch = text[i]; + const prev = i > 0 ? text[i - 1] : ''; + + if (quote) { + if (ch === quote && prev !== '\\') { + quote = ''; + } + continue; + } + + if (ch === '\'' || ch === '"' || ch === '`') { + quote = ch; + continue; + } + + if (ch === '[') { + depth += 1; + continue; + } + + if (ch === ']') { + depth -= 1; + if (depth === 0) { + return i; + } + } + } + + return -1; +} + +function upsertDefaultRpc(blockText, rpcUrl, indent) { + const keyRegex = /\bdefaultRPC\s*:/m; + const keyMatch = keyRegex.exec(blockText); + if (keyMatch) { + const arrayStart = blockText.indexOf('[', keyMatch.index); + if (arrayStart !== -1) { + const arrayEnd = findMatchingBracket(blockText, arrayStart); + if (arrayEnd !== -1) { + return `${blockText.slice(0, arrayStart)}['${rpcUrl}']${blockText.slice(arrayEnd + 1)}`; + } + } + + return blockText.replace(/(defaultRPC\s*:\s*)[^\n,]+/, `$1['${rpcUrl}']`); + } + + return blockText.replace(/\{\s*/, `{\n${indent}defaultRPC: ['${rpcUrl}'],\n`); +} + +const edits = []; +for (const entry of endpointMap) { + const range = findChainBlockRange(source, entry.chain); + if (!range) { + console.error(`Could not find chain block for CHAIN.${entry.chain} in ${filePath}`); + process.exit(1); + } + + const originalBlock = source.slice(range.start, range.end + 1); + edits.push({ + start: range.start, + end: range.end, + text: upsertDefaultRpc(originalBlock, entry.url, detectIndent(originalBlock)), + }); +} + +edits.sort((a, b) => b.start - a.start); +for (const edit of edits) { + source = source.slice(0, edit.start) + edit.text + source.slice(edit.end + 1); +} + +fs.writeFileSync(filePath, source); +NODE +} + +sdk_prepare_e2e_network_for_testing_env() { + require_cmd perl + + local sdk_e2e_root="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__" + if [[ ! -d "$sdk_e2e_root" ]]; then + log_warn "SDK __e2e__ directory not found at $sdk_e2e_root; skipping network replacement" + return 0 + fi + + local patched_count=0 + while IFS= read -r -d '' e2e_file; do + if is_local_testing_env; then + perl -0pi -e ' + s/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; + s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; + s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g; + ' "$e2e_file" + else + perl -0pi -e ' + s/\bPUSH_NETWORK\.LOCALNET\b/PUSH_NETWORK.TESTNET_DONUT/g; + s/\bCHAIN\.PUSH_LOCALNET\b/CHAIN.PUSH_TESTNET_DONUT/g; + ' "$e2e_file" + fi + patched_count=$((patched_count + 1)) + done < <(find "$sdk_e2e_root" -type f \( -name '*.ts' -o -name '*.tsx' \) -print0) + + if is_local_testing_env; then + log_ok "Applied LOCALNET replacement to $patched_count SDK __e2e__ file(s)" + else + log_ok "Applied TESTNET_DONUT replacement to $patched_count SDK __e2e__ file(s)" + fi +} + +sdk_prepare_inbound_evm_push_network_for_localnet() { + require_cmd perl + + local file + local files=( + "$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/evm/inbound/uea-to-push.spec.ts" + "$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/shared/evm-client.ts" + "$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/shared/fresh-wallet.ts" + ) + + for file in "${files[@]}"; do + if [[ ! -f "$file" ]]; then + log_err "SDK inbound helper/spec file not found: $file" + exit 1 + fi + + perl -0pi -e ' + s/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; + s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; + s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g; + ' "$file" + log_ok "Prepared LOCALNET Push network for $(basename "$file")" + done +} + +sdk_sync_localnet_uea_proxy_impl() { + require_cmd cast perl + + local chain_constants_file="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_CHAIN_CONSTANTS_PATH" + local uea_impl_raw uea_impl synced_localnet_uea + + if [[ ! -f "$chain_constants_file" ]]; then + log_err "SDK chain constants file not found: $chain_constants_file" + exit 1 + fi + + log_info "Fetching UEA_PROXY_IMPLEMENTATION from local Push Chain" + uea_impl_raw="$(cast call 0x00000000000000000000000000000000000000ea 'UEA_PROXY_IMPLEMENTATION()(address)' --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" + uea_impl="$(echo "$uea_impl_raw" | grep -Eo '0x[a-fA-F0-9]{40}' | head -1 || true)" + + if ! validate_eth_address "$uea_impl"; then + log_err "Could not resolve valid UEA_PROXY_IMPLEMENTATION address from local Push Chain at $PUSH_RPC_URL" + exit 1 + fi + + ensure_deploy_file + record_contract "UEA_PROXY_IMPLEMENTATION" "$uea_impl" + + UEA_PROXY_IMPL="$uea_impl" perl -0pi -e 's#(export const UEA_PROXY:[\s\S]*?\[PUSH_NETWORK\.LOCALNET\]:\s*)'\''[^'\'']*'\''#$1'\''$ENV{UEA_PROXY_IMPL}'\''#s' "$chain_constants_file" + + synced_localnet_uea="$(grep -E '\[PUSH_NETWORK\.LOCALNET\]:' "$chain_constants_file" | head -1 | sed -E "s/.*'([^']+)'.*/\1/")" + if [[ "$synced_localnet_uea" != "$uea_impl" ]]; then + log_err "Failed to update PUSH_NETWORK.LOCALNET UEA proxy in $chain_constants_file" + exit 1 + fi + + log_ok "Synced PUSH_NETWORK.LOCALNET UEA proxy to $uea_impl" +} + sdk_sync_localnet_constants() { require_cmd jq perl node @@ -746,7 +1042,10 @@ step_clone_push_chain_sdk() { } step_setup_push_chain_sdk() { - require_cmd git yarn npm cast jq perl + require_cmd git yarn npm jq perl node + if is_local_testing_env; then + require_cmd cast + fi local chain_constants_file="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_CHAIN_CONSTANTS_PATH" local sdk_account_file="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_ACCOUNT_TS_PATH" @@ -762,8 +1061,16 @@ step_setup_push_chain_sdk() { local sdk_evm_private_key sdk_evm_rpc sdk_solana_rpc sdk_solana_private_key sdk_push_private_key sdk_evm_private_key="${EVM_PRIVATE_KEY:-${PRIVATE_KEY:-}}" - sdk_evm_rpc="${EVM_RPC:-${PUSH_RPC_URL:-}}" - sdk_solana_rpc="${SOLANA_RPC_URL:-https://api.devnet.solana.com}" + if is_local_testing_env; then + sdk_evm_rpc="${EVM_RPC:-${PUSH_RPC_URL:-}}" + else + sdk_evm_rpc="${EVM_RPC:-${ETHEREUM_SEPOLIA_RPC_URL:-${PUSH_RPC_URL:-}}}" + fi + if is_local_testing_env; then + sdk_solana_rpc="${SOLANA_RPC_URL:-${SOLANA_DEVNET_RPC_URL:-https://api.devnet.solana.com}}" + else + sdk_solana_rpc="${SOLANA_DEVNET_RPC_URL:-https://api.devnet.solana.com}" + fi sdk_solana_private_key="${SOLANA_PRIVATE_KEY:-${SVM_PRIVATE_KEY:-${SOL_PRIVATE_KEY:-}}}" sdk_push_private_key="${PUSH_PRIVATE_KEY:-${PRIVATE_KEY:-}}" @@ -773,6 +1080,9 @@ step_setup_push_chain_sdk() { echo "# Source: e2e-tests/.env" echo "EVM_PRIVATE_KEY=$sdk_evm_private_key" echo "EVM_RPC=$sdk_evm_rpc" + [[ -n "${ARBITRUM_SEPOLIA_RPC_URL:-}" ]] && echo "ARBITRUM_SEPOLIA_RPC=$ARBITRUM_SEPOLIA_RPC_URL" + [[ -n "${BASE_SEPOLIA_RPC_URL:-}" ]] && echo "BASE_SEPOLIA_RPC=$BASE_SEPOLIA_RPC_URL" + [[ -n "${BSC_TESTNET_RPC_URL:-}" ]] && echo "BNB_TESTNET_RPC=$BSC_TESTNET_RPC_URL" echo "SOLANA_RPC_URL=$sdk_solana_rpc" echo "SOLANA_PRIVATE_KEY=$sdk_solana_private_key" echo "PUSH_PRIVATE_KEY=$sdk_push_private_key" @@ -790,59 +1100,56 @@ step_setup_push_chain_sdk() { exit 1 fi - sdk_sync_localnet_constants + if is_local_testing_env; then + sdk_sync_localnet_constants + else + apply_nonlocal_chain_rpc_env_to_configs + sdk_rewrite_chain_endpoints_from_env "$chain_constants_file" + sdk_prepare_e2e_network_for_testing_env + log_ok "Patched SDK chain.ts RPC endpoints for non-LOCAL testing" + fi - log_info "Fetching UEA_PROXY_IMPLEMENTATION from local chain" - uea_impl_raw="$(cast call 0x00000000000000000000000000000000000000ea 'UEA_PROXY_IMPLEMENTATION()(address)' --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" - uea_impl="$(echo "$uea_impl_raw" | grep -Eo '0x[a-fA-F0-9]{40}' | head -1 || true)" + if ! is_local_testing_env; then + log_info "Skipping LOCALNET contract sync and source rewrites for non-LOCAL setup-sdk" + else - if ! validate_eth_address "$uea_impl"; then - log_err "Could not resolve valid UEA_PROXY_IMPLEMENTATION address from cast output: $uea_impl_raw" - exit 1 - fi + log_info "Fetching UEA_PROXY_IMPLEMENTATION from local chain" + uea_impl_raw="$(cast call 0x00000000000000000000000000000000000000ea 'UEA_PROXY_IMPLEMENTATION()(address)' --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" + uea_impl="$(echo "$uea_impl_raw" | grep -Eo '0x[a-fA-F0-9]{40}' | head -1 || true)" - ensure_deploy_file - record_contract "UEA_PROXY_IMPLEMENTATION" "$uea_impl" + if ! validate_eth_address "$uea_impl"; then + log_err "Could not resolve valid UEA_PROXY_IMPLEMENTATION address from cast output: $uea_impl_raw" + exit 1 + fi - UEA_PROXY_IMPL="$uea_impl" perl -0pi -e 's#(\[PUSH_NETWORK\.LOCALNET\]:\s*)'\''[^'\'']*'\''#$1'\''$ENV{UEA_PROXY_IMPL}'\''#g' "$chain_constants_file" + ensure_deploy_file + record_contract "UEA_PROXY_IMPLEMENTATION" "$uea_impl" - synced_localnet_uea="$(grep -E '\[PUSH_NETWORK\.LOCALNET\]:' "$chain_constants_file" | head -1 | sed -E "s/.*'([^']+)'.*/\1/")" - if [[ "$synced_localnet_uea" != "$uea_impl" ]]; then - log_err "Failed to update PUSH_NETWORK.LOCALNET UEA proxy in $chain_constants_file" - exit 1 - fi + UEA_PROXY_IMPL="$uea_impl" perl -0pi -e 's#(export const UEA_PROXY:[\s\S]*?\[PUSH_NETWORK\.LOCALNET\]:\s*)'\''[^'\'']*'\''#$1'\''$ENV{UEA_PROXY_IMPL}'\''#s' "$chain_constants_file" - log_ok "Synced PUSH_NETWORK.LOCALNET UEA proxy to $uea_impl" + synced_localnet_uea="$(grep -E '\[PUSH_NETWORK\.LOCALNET\]:' "$chain_constants_file" | head -1 | sed -E "s/.*'([^']+)'.*/\1/")" + if [[ "$synced_localnet_uea" != "$uea_impl" ]]; then + log_err "Failed to update PUSH_NETWORK.LOCALNET UEA proxy in $chain_constants_file" + exit 1 + fi - if [[ ! -f "$sdk_account_file" ]]; then - log_err "SDK account file not found: $sdk_account_file" - exit 1 - fi + log_ok "Synced PUSH_NETWORK.LOCALNET UEA proxy to $uea_impl" - perl -0pi -e ' - s{(function\s+convertExecutorToOriginAccount\b.*?\{)(.*?)(\n\})}{ - my ($head, $body, $tail) = ($1, $2, $3); - $body =~ s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g; - "$head$body$tail"; - }gse; - ' "$sdk_account_file" - log_ok "Replaced CHAIN.PUSH_TESTNET_DONUT with CHAIN.PUSH_LOCALNET only in convertExecutorToOriginAccount() in $sdk_account_file" + if [[ ! -f "$sdk_account_file" ]]; then + log_err "SDK account file not found: $sdk_account_file" + exit 1 + fi - local sdk_e2e_root="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__" - if [[ -d "$sdk_e2e_root" ]]; then - log_info "Replacing TESTNET/TESTNET_DONUT with LOCALNET across all SDK __e2e__ test files" - local patched_count=0 - while IFS= read -r -d '' e2e_file; do - perl -0pi -e ' - s/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; - s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; - s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g; - ' "$e2e_file" - patched_count=$((patched_count + 1)) - done < <(find "$sdk_e2e_root" -type f \( -name '*.ts' -o -name '*.tsx' \) -print0) - log_ok "Applied LOCALNET replacement to $patched_count file(s) under $sdk_e2e_root" - else - log_warn "SDK __e2e__ directory not found at $sdk_e2e_root; skipping TESTNET→LOCALNET replacement" + perl -0pi -e ' + s{(function\s+convertExecutorToOriginAccount\b.*?\{)(.*?)(\n\})}{ + my ($head, $body, $tail) = ($1, $2, $3); + $body =~ s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g; + "$head$body$tail"; + }gse; + ' "$sdk_account_file" + log_ok "Replaced CHAIN.PUSH_TESTNET_DONUT with CHAIN.PUSH_LOCALNET only in convertExecutorToOriginAccount() in $sdk_account_file" + + sdk_prepare_e2e_network_for_testing_env fi log_info "Installing push-chain-sdk dependencies" @@ -1042,8 +1349,33 @@ step_run_sdk_quick_testing_outbound() { log_ok "Completed quick-testing-outbound SDK E2E tests" } +step_run_sdk_quick_testing_inbound_evm() { + local inbound_file="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/evm/inbound/uea-to-push.spec.ts" + + export E2E_TARGET_CHAINS="Ethereum Sepolia" + + step_setup_push_chain_sdk + sdk_sync_localnet_constants + sdk_sync_localnet_uea_proxy_impl + sdk_prepare_inbound_evm_push_network_for_localnet + + if [[ ! -f "$inbound_file" ]]; then + log_err "SDK inbound test file not found: $inbound_file" + exit 1 + fi + + log_info "Running SDK inbound EVM test on local Push Chain with Ethereum Sepolia origin only: uea-to-push.spec.ts" + ( + cd "$PUSH_CHAIN_SDK_DIR" + E2E_TARGET_CHAINS="Ethereum Sepolia" npx nx test core --runInBand --testPathPattern="__e2e__/evm/inbound/uea-to-push.spec.ts" + ) + + log_ok "Completed quick-testing-inbound-evm SDK E2E test" +} + step_devnet() { require_cmd bash jq + apply_nonlocal_chain_rpc_env_to_configs local sepolia_rpc_override arbitrum_rpc_override base_rpc_override bsc_rpc_override solana_rpc_override @@ -1182,6 +1514,7 @@ step_ensure_tss_key_ready() { step_setup_environment() { require_cmd jq curl ensure_e2e_testnet_donut_configs + apply_nonlocal_chain_rpc_env_to_configs local has_docker="false" if command -v docker >/dev/null 2>&1; then @@ -3054,6 +3387,7 @@ Commands: sdk-test-all Replace PUSH_NETWORK TESTNET variants with LOCALNET and run all configured SDK E2E tests sdk-test-outbound-all Replace PUSH_NETWORK TESTNET variants with LOCALNET and run all configured SDK outbound E2E tests (TESTING_ENV=LOCAL) quick-testing-outbound Run setup-sdk + fund-uea-prc20, then execute cea-to-eoa.spec.ts and cea-to-uea.spec.ts only + quick-testing-inbound-evm Run uea-to-push.spec.ts on local Push Chain for Ethereum Sepolia origin only sdk-test-pctx-last-transaction Run pctx-last-transaction.spec.ts sdk-test-send-to-self Run send-to-self.spec.ts sdk-test-progress-hook Run progress-hook-per-tx.spec.ts @@ -3083,6 +3417,11 @@ Important env: LOCAL_BSC_UV_RPC_URL=http://localhost:9548 SURFPOOL_SOLANA_HOST_RPC_URL=http://localhost:8899 LOCAL_SOLANA_UV_RPC_URL=http://localhost:8899 + ETHEREUM_SEPOLIA_RPC_URL=https://... + ARBITRUM_SEPOLIA_RPC_URL=https://... + BASE_SEPOLIA_RPC_URL=https://... + BSC_TESTNET_RPC_URL=https://... + SOLANA_DEVNET_RPC_URL=https://... EOF } @@ -3114,6 +3453,7 @@ main() { sdk-test-all) step_run_sdk_tests_all ;; sdk-test-outbound-all) step_run_sdk_outbound_tests_all ;; quick-testing-outbound) step_run_sdk_quick_testing_outbound ;; + quick-testing-inbound-evm) step_run_sdk_quick_testing_inbound_evm ;; sdk-test-pctx-last-transaction) step_run_sdk_test_file "pctx-last-transaction.spec.ts" ;; sdk-test-send-to-self) step_run_sdk_test_file "send-to-self.spec.ts" ;; sdk-test-progress-hook) step_run_sdk_test_file "progress-hook-per-tx.spec.ts" ;; From 237de6ad9da3b5e45db0c6b6acd24d4720398bbd Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Mon, 27 Apr 2026 18:59:31 +0530 Subject: [PATCH 60/61] quick-testing-outbound for svm and evm added --- e2e-tests/README.md | 13 ++++++-- e2e-tests/setup.sh | 72 +++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 81 insertions(+), 4 deletions(-) diff --git a/e2e-tests/README.md b/e2e-tests/README.md index 21d42bcf..9b18d02d 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -231,7 +231,14 @@ TESTING_ENV=LOCAL bash e2e-tests/setup.sh all TESTING_ENV=LOCAL bash e2e-tests/setup.sh quick-testing-outbound ``` -`quick-testing-outbound` internally runs `setup-sdk`, then `fund-uea-prc20`, and finally executes just the two most important outbound specs — `cea-to-eoa.spec.ts` and `cea-to-uea.spec.ts` — so you get end-to-end outbound coverage without running the full outbound suite. +`quick-testing-outbound` runs both quick outbound smoke suites: + +```bash +TESTING_ENV=LOCAL bash e2e-tests/setup.sh quick-testing-outbound-evm +TESTING_ENV=LOCAL bash e2e-tests/setup.sh quick-testing-outbound-svm +``` + +Each quick suite internally runs `setup-sdk`, then `fund-uea-prc20`, and finally executes just the two most important outbound specs — `cea-to-eoa.spec.ts` and `cea-to-uea.spec.ts` — so you get end-to-end outbound coverage without running the full outbound suite. --- @@ -317,7 +324,9 @@ TESTING_ENV=LOCAL bash e2e-tests/setup.sh | `setup-sdk` | Clone/install SDK, generate SDK `.env`, sync LOCALNET constants | | `sdk-test-all` | Run all configured inbound SDK E2E test files | | `sdk-test-outbound-all` | Run all configured outbound SDK E2E test files (LOCAL only) | -| `quick-testing-outbound` | Run `setup-sdk` + `fund-uea-prc20`, then only `cea-to-eoa.spec.ts` and `cea-to-uea.spec.ts` (fast outbound smoke test) | +| `quick-testing-outbound` | Run both quick outbound smoke suites: EVM, then SVM | +| `quick-testing-outbound-evm` | Run `setup-sdk` + `fund-uea-prc20`, then EVM outbound `cea-to-eoa.spec.ts` and `cea-to-uea.spec.ts` | +| `quick-testing-outbound-svm` | Run `setup-sdk` + `fund-uea-prc20`, then SVM outbound `cea-to-eoa.spec.ts` and `cea-to-uea.spec.ts` | | `sdk-test-pctx-last-transaction` | Run `pctx-last-transaction.spec.ts` | | `sdk-test-send-to-self` | Run `send-to-self.spec.ts` | | `sdk-test-progress-hook` | Run `progress-hook-per-tx.spec.ts` | diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index edf5132f..f0c58f61 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -1297,7 +1297,7 @@ step_run_sdk_outbound_tests_all() { log_ok "Completed all configured SDK outbound E2E tests" } -step_run_sdk_quick_testing_outbound() { +step_run_sdk_quick_testing_outbound_evm() { local outbound_dir="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/evm/outbound" local quick_files=( "cea-to-eoa.spec.ts" @@ -1346,6 +1346,70 @@ step_run_sdk_quick_testing_outbound() { ) done + log_ok "Completed quick-testing-outbound-evm SDK E2E tests" +} + +step_run_sdk_quick_testing_outbound_svm() { + local outbound_dir="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/svm/outbound" + local quick_files=( + "cea-to-eoa.spec.ts" + "cea-to-uea.spec.ts" + ) + local evm_client_file="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/shared/evm-client.ts" + local svm_client_file="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/shared/svm-client.ts" + local utils_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/utils.ts" + local tokens_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/constants/tokens.ts" + local file full_path + + step_setup_push_chain_sdk + step_fund_uea_prc20 + + sdk_sync_localnet_constants + + for file in "${quick_files[@]}"; do + full_path="$outbound_dir/$file" + if [[ ! -f "$full_path" ]]; then + log_err "SDK outbound SVM test file not found: $full_path" + exit 1 + fi + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g' "$full_path" + log_ok "Prepared LOCALNET network replacement in svm/outbound/$file" + done + + if [[ -f "$evm_client_file" ]]; then + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g' "$evm_client_file" + log_ok "Patched evm-client.ts default network to PUSH_NETWORK.LOCALNET" + fi + if [[ -f "$svm_client_file" ]]; then + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g' "$svm_client_file" + log_ok "Patched svm-client.ts default network to PUSH_NETWORK.LOCALNET" + fi + if [[ -f "$utils_file" ]]; then + perl -0pi -e 's/(const network = options\?\.network \?\?)\s*PUSH_NETWORK\.TESTNET_DONUT/$1 PUSH_NETWORK.LOCALNET/' "$utils_file" + log_ok "Patched utils.ts getPRC20Address default network to PUSH_NETWORK.LOCALNET" + fi + if [[ -f "$tokens_file" ]]; then + perl -0pi -e 's/(const s = SYNTHETIC_PUSH_ERC20\[)PUSH_NETWORK\.TESTNET_DONUT(\])/$1PUSH_NETWORK.LOCALNET$2/' "$tokens_file" + log_ok "Patched tokens.ts buildPushChainMoveableTokenAccessor default network to PUSH_NETWORK.LOCALNET" + fi + + for file in "${quick_files[@]}"; do + full_path="$outbound_dir/$file" + log_info "Running SDK outbound SVM test: $file" + local rel_pattern="${full_path##*/packages/core/}" + ( + cd "$PUSH_CHAIN_SDK_DIR" + npx nx test core --runInBand --testPathPattern="$rel_pattern" + ) + done + + log_ok "Completed quick-testing-outbound-svm SDK E2E tests" +} + +step_run_sdk_quick_testing_outbound() { + step_run_sdk_quick_testing_outbound_evm + step_run_sdk_quick_testing_outbound_svm + log_ok "Completed quick-testing-outbound SDK E2E tests" } @@ -3386,7 +3450,9 @@ Commands: setup-sdk Setup push-chain-sdk (requires clone-sdk first): generate .env, replace TESTNET→LOCALNET in __e2e__ files, install deps sdk-test-all Replace PUSH_NETWORK TESTNET variants with LOCALNET and run all configured SDK E2E tests sdk-test-outbound-all Replace PUSH_NETWORK TESTNET variants with LOCALNET and run all configured SDK outbound E2E tests (TESTING_ENV=LOCAL) - quick-testing-outbound Run setup-sdk + fund-uea-prc20, then execute cea-to-eoa.spec.ts and cea-to-uea.spec.ts only + quick-testing-outbound Run quick-testing-outbound-evm, then quick-testing-outbound-svm + quick-testing-outbound-evm Run setup-sdk + fund-uea-prc20, then execute EVM outbound cea-to-eoa.spec.ts and cea-to-uea.spec.ts only + quick-testing-outbound-svm Run setup-sdk + fund-uea-prc20, then execute SVM outbound cea-to-eoa.spec.ts and cea-to-uea.spec.ts only quick-testing-inbound-evm Run uea-to-push.spec.ts on local Push Chain for Ethereum Sepolia origin only sdk-test-pctx-last-transaction Run pctx-last-transaction.spec.ts sdk-test-send-to-self Run send-to-self.spec.ts @@ -3453,6 +3519,8 @@ main() { sdk-test-all) step_run_sdk_tests_all ;; sdk-test-outbound-all) step_run_sdk_outbound_tests_all ;; quick-testing-outbound) step_run_sdk_quick_testing_outbound ;; + quick-testing-outbound-evm) step_run_sdk_quick_testing_outbound_evm ;; + quick-testing-outbound-svm) step_run_sdk_quick_testing_outbound_svm ;; quick-testing-inbound-evm) step_run_sdk_quick_testing_inbound_evm ;; sdk-test-pctx-last-transaction) step_run_sdk_test_file "pctx-last-transaction.spec.ts" ;; sdk-test-send-to-self) step_run_sdk_test_file "send-to-self.spec.ts" ;; From f3068bd26fa7d5de5e97d250b91c47e5c7ab0faa Mon Sep 17 00:00:00 2001 From: Arya Lanjewar <102943033+AryaLanjewar3005@users.noreply.github.com> Date: Wed, 29 Apr 2026 12:29:16 +0530 Subject: [PATCH 61/61] Some solana related adjustments --- e2e-tests/deploy_addresses.json | 23 +- e2e-tests/genesis_accounts.json | 20 +- e2e-tests/setup.sh | 1481 ++++++++++++++++++++++- local-native/scripts/setup-universal.sh | 44 + 4 files changed, 1500 insertions(+), 68 deletions(-) diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json index e85204a6..7b79ca12 100644 --- a/e2e-tests/deploy_addresses.json +++ b/e2e-tests/deploy_addresses.json @@ -1,13 +1,13 @@ { - "generatedAt": "2026-04-27T09:57:28Z", + "generatedAt": "2026-04-29T06:50:27Z", "contracts": { - "WPC": "0xB5B1e1ADc1b8fc1066975aa09f9371a5f67C54F5", - "Factory": "0x140b9f84fCbccB4129AC6F32b1243ea808d18261", - "SwapRouter": "0x95cE5e63366D3A11E9BCCe71917bB37C23Fd0002", - "QuoterV2": "0xE9cb561141553DFa0A576cCd34546BECffb64Af1", - "PositionManager": "0x484aC6ED747090fe8C82c5F10427ccC2F2998930", + "WPC": "0x4dCe46Eb5909aC32B6C0ad086e74008Fdb292CB5", + "Factory": "0x95cE5e63366D3A11E9BCCe71917bB37C23Fd0002", + "SwapRouter": "0x6f423619F7dF3023c08Bb94FE79FA8B2BA22B0b4", + "QuoterV2": "0x6F030f96Edb6CC73D8b1752E8D4571283c014056", + "PositionManager": "0xAC1645b69D7F04044B4057F9326461c05455e62d", "UEA_PROXY_IMPLEMENTATION": "0x2C297101b7d3e0911296b9A64d106684a161b4C9", - "COUNTER_ADDRESS_PAYABLE": "0xDaC125f9350cD25786Cfd5c8eb2b6837c5e7Ce6B" + "COUNTER_ADDRESS_PAYABLE": "0xa58F4F5FB4977151E99291FCa5b92d95021be0f4" }, "tokens": [ { @@ -52,10 +52,17 @@ "source": "core-contracts", "decimals": 9 }, + { + "name": "USDT.sol", + "symbol": "USDT.sol", + "address": "0xC329d4EbF8814eEFfA2Fd9612655e490b112523F", + "source": "core-contracts", + "decimals": 6 + }, { "name": "USDT.bsc", "symbol": "USDT.bsc", - "address": "0xC329d4EbF8814eEFfA2Fd9612655e490b112523F", + "address": "0xe0b7A8833f77C5728295D489F4B64f9DA236E4C8", "source": "core-contracts", "decimals": 6 } diff --git a/e2e-tests/genesis_accounts.json b/e2e-tests/genesis_accounts.json index 280734f3..bbc6e243 100644 --- a/e2e-tests/genesis_accounts.json +++ b/e2e-tests/genesis_accounts.json @@ -2,31 +2,31 @@ { "id": 1, "name": "genesis-acc-1", - "address": "push1dfx67rvvn6np0tlsg9z987vyjuw9qp9x8dy0g8", - "mnemonic": "brisk have buyer chief chronic million neglect pass brain shaft whisper duty label tooth harsh clinic lift tenant element copper scare race clarify art" + "address": "push1h0lw8wvlwjjm2rezn4tsw2s7m6t66d6pmnwnld", + "mnemonic": "blouse clap marble arrange soccer hawk piano barrel dream interest axis fold napkin pioneer ladder saddle odor bachelor slender punch mind fan debris steel" }, { "id": 2, "name": "genesis-acc-2", - "address": "push1ns34ndqpt5xd5uw0uux38al7u4hujurzsre23l", - "mnemonic": "story flight spike script venture clap lamp hero wise ladder cruel strategy core notice layer very tool sister cram uncle brief horn clump hen" + "address": "push1vgdlzx40k9sff5cwx0wylucyv6trgv7pv5h4dp", + "mnemonic": "marble hammer tube smooth differ stone bring nuclear artwork small goose vanish main update lazy code confirm network cinnamon waste outdoor random false hill" }, { "id": 3, "name": "genesis-acc-3", - "address": "push102m2tyns68qju7aphafkcn94h3m0hr7zvhurgp", - "mnemonic": "clip candy easily model net post sponsor fog apart skin essence tool trophy lesson claw issue report goose file jazz maid goose response kit" + "address": "push1ww7a3jmh2rcvx972zc5qlw5uxcpwdmpujm9eaz", + "mnemonic": "flag shallow burst clog age involve mention sunset trial bean limb bid shove lend pluck ancient stone tuition region creek quick foam same someone" }, { "id": 4, "name": "genesis-acc-4", - "address": "push1x4jqjzugmn2c8ngxhvazufdhhce890m3688lky", - "mnemonic": "hope remain file sister muscle panda pear cross rifle autumn bubble dinner electric gown proud mutual stable fish pyramid nominee trip hungry prison manage" + "address": "push1dcgsat3ywftcwgzsy06vnr7mc39rawv785wr2w", + "mnemonic": "hundred nose broom fee bounce market ethics trap undo clinic blouse pretty scatter love wine cup crane bird finger school always add mechanic impulse" }, { "id": 5, "name": "genesis-acc-5", - "address": "push1tfj4sllzzxps4nj85que9h3ulqnlcddvhf30ds", - "mnemonic": "possible grocery height raccoon escape ticket sweet used cloud idle vital hedgehog ready imitate lock rail gaze dose announce quote fortune transfer camp famous" + "address": "push14x6r4u4umcxcsqlnm67w8nzzn7ynsv4l66ytj4", + "mnemonic": "faith enemy fiscal bubble castle assault perfect breeze buddy region box blush slot clay scheme horn invest crazy name twice during much jeans build" } ] diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh index f0c58f61..ccbcb87d 100755 --- a/e2e-tests/setup.sh +++ b/e2e-tests/setup.sh @@ -22,7 +22,7 @@ fi : "${GENESIS_KEY_HOME:=./e2e-tests/.pchain}" : "${GENESIS_ACCOUNTS_JSON:=./e2e-tests/genesis_accounts.json}" : "${FUND_AMOUNT:=1000000000000000000upc}" -: "${POOL_CREATION_TOPUP_AMOUNT:=50000000000000000000upc}" +: "${POOL_CREATION_TOPUP_AMOUNT:=500000000000000000000upc}" : "${GAS_PRICES:=100000000000upc}" : "${LOCAL_DEVNET_DIR:=./local-native}" @@ -35,6 +35,13 @@ fi : "${PUSH_CHAIN_SDK_REPO:=https://github.com/pushchain/push-chain-sdk.git}" : "${PUSH_CHAIN_SDK_BRANCH:=outbound_changes}" : "${PREFER_SIBLING_REPO_DIRS:=true}" +: "${ALLOW_LOCAL_SVM_GO_BUILD_PATCH:=false}" + +SVM_BROADCASTER_BACKUP_FILE="" +SVM_TX_BUILDER_BACKUP_FILE="" +SVM_EVENT_PARSER_BACKUP_FILE="" +REPLACE_ADDRESSES_BACKUP_DIR="" +LOCAL_SVM_PAYLOAD_EXECUTOR_PID="" : "${E2E_PARENT_DIR:=../}" : "${CORE_CONTRACTS_DIR:=$E2E_PARENT_DIR/push-chain-core-contracts}" @@ -49,6 +56,9 @@ fi : "${LOG_DIR:=$SCRIPT_DIR/logs}" : "${TEST_ADDRESSES_PATH:=$SWAP_AMM_DIR/test-addresses.json}" : "${LOCAL_OUTBOUND_BASE_GAS_LIMIT:=500000}" +: "${LOCAL_SVM_OUTBOUND_BASE_GAS_LIMIT:=10000}" +: "${LOCAL_SOLANA_USDT_MINT:=EiXDnrAg9ea2Q6vEPV7E5TpTU1vh41jcuZqKjU5Dc4ZF}" +: "${LOCAL_SOLANA_USDT_INITIAL_SUPPLY:=10000000000000000000000}" : "${SOURCE_CONFIG_DIR:=./config/testnet-donut}" : "${TOKENS_CONFIG_DIR:=./e2e-tests/config/testnet-donut}" : "${TOKEN_CONFIG_PATH:=./e2e-tests/config/testnet-donut/eth_sepolia/tokens/eth.json}" @@ -434,7 +444,10 @@ record_token() { --arg source "$source" \ ' .tokens = ( - ([.tokens[]? | select((.address | ascii_downcase) != ($address | ascii_downcase))]) + ([.tokens[]? | select( + ((.address | ascii_downcase) != ($address | ascii_downcase)) and + ((.symbol | ascii_downcase) != ($symbol | ascii_downcase)) + )]) + [{name:$name, symbol:$symbol, address:$address, source:$source}] ) ' "$DEPLOY_ADDRESSES_FILE" >"$tmp" @@ -943,6 +956,621 @@ sdk_sync_localnet_uea_proxy_impl() { log_ok "Synced PUSH_NETWORK.LOCALNET UEA proxy to $uea_impl" } +step_patch_local_svm_broadcaster_for_build() { + if ! is_local_testing_env; then + return 0 + fi + + if [[ "$(echo "$ALLOW_LOCAL_SVM_GO_BUILD_PATCH" | tr '[:upper:]' '[:lower:]')" != "true" ]]; then + log_info "Skipping local SVM Go source patch before build (ALLOW_LOCAL_SVM_GO_BUILD_PATCH=false)" + return 0 + fi + + require_cmd node + + local svm_broadcaster_file="$PUSH_CHAIN_DIR/universalClient/tss/txbroadcaster/svm.go" + local svm_tx_builder_file="$PUSH_CHAIN_DIR/universalClient/chains/svm/tx_builder.go" + local svm_event_parser_file="$PUSH_CHAIN_DIR/universalClient/chains/svm/event_parser.go" + if [[ ! -f "$svm_broadcaster_file" ]]; then + log_warn "SVM broadcaster patch target missing: $svm_broadcaster_file" + return 0 + fi + if [[ ! -f "$svm_tx_builder_file" ]]; then + log_warn "SVM tx builder patch target missing: $svm_tx_builder_file" + return 0 + fi + if [[ ! -f "$svm_event_parser_file" ]]; then + log_warn "SVM event parser patch target missing: $svm_event_parser_file" + return 0 + fi + + SVM_BROADCASTER_BACKUP_FILE="$(mktemp)" + cp "$svm_broadcaster_file" "$SVM_BROADCASTER_BACKUP_FILE" + SVM_TX_BUILDER_BACKUP_FILE="$(mktemp)" + cp "$svm_tx_builder_file" "$SVM_TX_BUILDER_BACKUP_FILE" + SVM_EVENT_PARSER_BACKUP_FILE="$(mktemp)" + cp "$svm_event_parser_file" "$SVM_EVENT_PARSER_BACKUP_FILE" + + SVM_BROADCASTER_FILE="$svm_broadcaster_file" SVM_TX_BUILDER_FILE="$svm_tx_builder_file" SVM_EVENT_PARSER_FILE="$svm_event_parser_file" node <<'NODE' +const fs = require('fs'); + +const file = process.env.SVM_BROADCASTER_FILE; +let src = fs.readFileSync(file, 'utf8'); + +if (!src.includes('"strings"')) { + src = src.replace('import (\n\t"context"\n', 'import (\n\t"context"\n\t"strings"\n'); +} + +if (!src.includes('LOCAL SVM: a validator without the Solana relayer key')) { + const marker = `\ttxHash, broadcastErr := builder.BroadcastOutboundSigningRequest(ctx, signingReq, &outboundData, signature) + +\tif broadcastErr == nil { +\t\tb.markBroadcasted(event, chainID, txHash) +\t\treturn +\t} + +\t// Broadcast failed — check PDA to distinguish permanent vs transient failure.`; + const replacement = `\ttxHash, broadcastErr := builder.BroadcastOutboundSigningRequest(ctx, signingReq, &outboundData, signature) + +\tif broadcastErr == nil { +\t\tb.markBroadcasted(event, chainID, txHash) +\t\treturn +\t} + +\t// LOCAL SVM: a validator without the Solana relayer key can still participate +\t// in TSS, but it must not vote BROADCASTED with an empty external hash. +\tif txHash == "" && strings.Contains(broadcastErr.Error(), "failed to load relayer keypair") { +\t\tb.logger.Debug().Err(broadcastErr).Str("event_id", event.EventID).Str("chain", chainID). +\t\t\tMsg("SVM broadcast skipped on validator without Solana relayer key") +\t\treturn +\t} + +\t// Broadcast failed — check PDA to distinguish permanent vs transient failure.`; + if (!src.includes(marker)) { + throw new Error(`Could not patch SVM broadcaster pre-error block in ${file}`); + } + src = src.replace(marker, replacement); +} + +if (!src.includes('landed Solana tx hash')) { + const marker = `\tif executed { +\t\t// Another relayer already executed this tx. +\t\tb.logger.Info().Err(broadcastErr).Str("event_id", event.EventID).Str("chain", chainID). +\t\t\tMsg("broadcast failed but tx already executed on-chain, marking BROADCASTED") +\t\tb.markBroadcasted(event, chainID, "") +\t\treturn +\t}`; + const replacement = `\tif executed { +\t\t// LOCAL SVM: if a competing validator won the race, preflight fails with the +\t\t// replay PDA already created. Query that PDA so every validator votes for the +\t\t// same landed Solana tx hash instead of splitting quorum by local signatures. +\t\tif finder, ok := builder.(interface { +\t\t\tFindExecutedTxSignature(context.Context, string) (string, error) +\t\t}); ok { +\t\t\tif landedTxHash, findErr := finder.FindExecutedTxSignature(ctx, outboundData.TxID); findErr == nil && landedTxHash != "" { +\t\t\t\tb.logger.Info().Err(broadcastErr).Str("event_id", event.EventID).Str("chain", chainID).Str("tx_hash", landedTxHash). +\t\t\t\t\tMsg("broadcast failed but tx already executed on-chain, marking BROADCASTED with landed Solana tx hash") +\t\t\t\tb.markBroadcasted(event, chainID, landedTxHash) +\t\t\t\treturn +\t\t\t} +\t\t} +\t\tif txHash == "" { +\t\t\tb.logger.Debug().Err(broadcastErr).Str("event_id", event.EventID).Str("chain", chainID). +\t\t\t\tMsg("SVM tx already executed but no local or landed tx hash is available; waiting for relayer vote") +\t\t\treturn +\t\t} +\t\tb.logger.Info().Err(broadcastErr).Str("event_id", event.EventID).Str("chain", chainID).Str("tx_hash", txHash). +\t\t\tMsg("broadcast failed but tx already executed on-chain, marking BROADCASTED with local tx hash") +\t\tb.markBroadcasted(event, chainID, txHash) +\t\treturn +\t}`; + if (!src.includes(marker)) { + throw new Error(`Could not patch SVM broadcaster executed block in ${file}`); + } + src = src.replace(marker, replacement); +} + +fs.writeFileSync(file, src); + +const txBuilderFile = process.env.SVM_TX_BUILDER_FILE; +let txBuilderSrc = fs.readFileSync(txBuilderFile, 'utf8'); + +if (!txBuilderSrc.includes('return txHash, fmt.Errorf("failed to broadcast transaction: %w", err)')) { + const marker = `\ttxHash, err := tb.rpcClient.BroadcastTransaction(ctx, tx) +\tif err != nil { +\t\treturn "", fmt.Errorf("failed to broadcast transaction: %w", err) +\t}`; + const replacement = `\ttxHash, err := tb.rpcClient.BroadcastTransaction(ctx, tx) +\tif err != nil { +\t\treturn txHash, fmt.Errorf("failed to broadcast transaction: %w", err) +\t}`; + if (!txBuilderSrc.includes(marker)) { + throw new Error(`Could not patch SVM tx builder broadcast error return in ${txBuilderFile}`); + } + txBuilderSrc = txBuilderSrc.replace(marker, replacement); +} + +if (!txBuilderSrc.includes('LOCAL_SVM_OMIT_COMPUTE_BUDGET_FOR_SIZE')) { + const marker = `\t// Hardcoded compute budget for Solana transactions. The event's gasLimit is a fee +\t// parameter (used by core for gasFee = gasPrice × gasLimit), not actual compute units. +\t// 400,000 CU is sufficient for all gateway operations including CEA execute flows. +\tconst svmComputeUnitLimit = uint32(400_000) +\tcomputeLimitIx := tb.buildSetComputeUnitLimitInstruction(svmComputeUnitLimit) + +\t// Build the instruction list. +\tinstructions := []solana.Instruction{computeLimitIx}`; + const replacement = `\t// LOCAL_SVM_OMIT_COMPUTE_BUDGET_FOR_SIZE: Route 3 multicall payloads sit +\t// close to Solana's 1232-byte raw transaction limit. The default compute +\t// budget is enough for local gateway execution, so omit the optional compute +\t// budget instruction and preserve bytes for the gateway payload. +\tinstructions := []solana.Instruction{}`; + if (!txBuilderSrc.includes(marker)) { + throw new Error(`Could not patch SVM tx builder compute budget instruction in ${txBuilderFile}`); + } + txBuilderSrc = txBuilderSrc.replace(marker, replacement); +} + +if (!txBuilderSrc.includes('FindExecutedTxSignature')) { + const marker = `\t// If we got non-empty data, the PDA exists → tx was already executed +\treturn len(data) > 0, nil +} +`; + const replacement = `\t// If we got non-empty data, the PDA exists → tx was already executed +\treturn len(data) > 0, nil +} + +// FindExecutedTxSignature returns the landed transaction signature that touched +// the ExecutedTx PDA for a txID. This is used by local validators that lose the +// Solana broadcast race but still need to vote for the same external hash. +func (tb *TxBuilder) FindExecutedTxSignature(ctx context.Context, txID string) (string, error) { +\ttxIDBytes, err := hex.DecodeString(removeHexPrefix(txID)) +\tif err != nil { +\t\treturn "", fmt.Errorf("invalid txID: %s", txID) +\t} +\tif len(txIDBytes) != 32 { +\t\treturn "", fmt.Errorf("txID must be 32 bytes, got %d", len(txIDBytes)) +\t} + +\tvar txIDArr [32]byte +\tcopy(txIDArr[:], txIDBytes) + +\texecutedTxPDA, _, err := solana.FindProgramAddress([][]byte{[]byte("executed_sub_tx"), txIDArr[:]}, tb.gatewayAddress) +\tif err != nil { +\t\treturn "", fmt.Errorf("failed to derive executed_tx PDA: %w", err) +\t} + +\tsignatures, err := tb.rpcClient.GetSignaturesForAddress(ctx, executedTxPDA) +\tif err != nil { +\t\treturn "", err +\t} +\tfor _, sig := range signatures { +\t\tif sig != nil && sig.Err == nil { +\t\t\treturn sig.Signature.String(), nil +\t\t} +\t} +\treturn "", nil +} +`; + if (!txBuilderSrc.includes(marker)) { + throw new Error(`Could not patch SVM tx builder executed signature helper in ${txBuilderFile}`); + } + txBuilderSrc = txBuilderSrc.replace(marker, replacement); +} + +fs.writeFileSync(txBuilderFile, txBuilderSrc); + +const eventParserFile = process.env.SVM_EVENT_PARSER_FILE; +let eventParserSrc = fs.readFileSync(eventParserFile, 'utf8'); + +if (!eventParserSrc.includes('LOCAL SVM: payload-bearing CEA events target the decoded UniversalPayload.to')) { + const marker = `\t// Parse fromCEA (bool, 1 byte) - if not present, defaults to false +\tif len(data) > offset { +\t\tpayload.FromCEA = data[offset] != 0 +\t\toffset++ +\t} + +\tlogger.Debug().`; + const replacement = `\t// Parse fromCEA (bool, 1 byte) - if not present, defaults to false +\tif len(data) > offset { +\t\tpayload.FromCEA = data[offset] != 0 +\t\toffset++ +\t} + +\t// LOCAL SVM: payload-bearing CEA events target the decoded UniversalPayload.to +\t// on Push Chain. The gateway event recipient is the Push account, which makes +\t// local payload tests execute against an EOA and silently no-op. +\tif payload.FromCEA && payload.RawPayload != "" && (payload.TxType == 1 || payload.TxType == 3) { +\t\trawPayloadBytes, decodeErr := hex.DecodeString(strings.TrimPrefix(payload.RawPayload, "0x")) +\t\tif decodeErr == nil && len(rawPayloadBytes) >= 20 { +\t\t\tpayload.Recipient = "0x" + hex.EncodeToString(rawPayloadBytes[:20]) +\t\t} else if decodeErr != nil { +\t\t\tlogger.Warn().Err(decodeErr).Msg("failed to decode local SVM raw payload for recipient override") +\t\t} +\t} + +\tlogger.Debug().`; + if (!eventParserSrc.includes(marker)) { + throw new Error(`Could not patch SVM event parser recipient override in ${eventParserFile}`); + } + eventParserSrc = eventParserSrc.replace(marker, replacement); + fs.writeFileSync(eventParserFile, eventParserSrc); +} +NODE + + log_ok "Patched local SVM broadcaster/event parsing behavior before build" +} + +step_backup_local_replace_addresses_sources() { + if ! is_local_testing_env; then + return 0 + fi + + local files=( + "x/uexecutor/types/constants.go" + "x/uregistry/types/constants.go" + "x/uregistry/types/params.go" + "x/utss/types/params.go" + "x/uvalidator/types/params.go" + ) + + REPLACE_ADDRESSES_BACKUP_DIR="$(mktemp -d)" + local rel src dst + for rel in "${files[@]}"; do + src="$PUSH_CHAIN_DIR/$rel" + if [[ -f "$src" ]]; then + dst="$REPLACE_ADDRESSES_BACKUP_DIR/$rel" + mkdir -p "$(dirname "$dst")" + cp "$src" "$dst" + fi + done +} + +step_restore_local_replace_addresses_sources() { + if [[ -z "${REPLACE_ADDRESSES_BACKUP_DIR:-}" || ! -d "$REPLACE_ADDRESSES_BACKUP_DIR" ]]; then + return 0 + fi + + local backup + while IFS= read -r backup; do + local rel="${backup#$REPLACE_ADDRESSES_BACKUP_DIR/}" + cp "$backup" "$PUSH_CHAIN_DIR/$rel" + done < <(find "$REPLACE_ADDRESSES_BACKUP_DIR" -type f) + + rm -rf "$REPLACE_ADDRESSES_BACKUP_DIR" + REPLACE_ADDRESSES_BACKUP_DIR="" + log_ok "Restored source files changed by local replace-addresses build step" +} + +step_restore_local_svm_broadcaster_after_build() { + if [[ -z "${SVM_BROADCASTER_BACKUP_FILE:-}" || ! -f "$SVM_BROADCASTER_BACKUP_FILE" ]]; then + return 0 + fi + + local svm_broadcaster_file="$PUSH_CHAIN_DIR/universalClient/tss/txbroadcaster/svm.go" + if [[ -f "$svm_broadcaster_file" ]]; then + cp "$SVM_BROADCASTER_BACKUP_FILE" "$svm_broadcaster_file" + log_ok "Restored SVM broadcaster source after patched local build" + fi + + rm -f "$SVM_BROADCASTER_BACKUP_FILE" + SVM_BROADCASTER_BACKUP_FILE="" + + if [[ -n "${SVM_TX_BUILDER_BACKUP_FILE:-}" && -f "$SVM_TX_BUILDER_BACKUP_FILE" ]]; then + local svm_tx_builder_file="$PUSH_CHAIN_DIR/universalClient/chains/svm/tx_builder.go" + if [[ -f "$svm_tx_builder_file" ]]; then + cp "$SVM_TX_BUILDER_BACKUP_FILE" "$svm_tx_builder_file" + log_ok "Restored SVM tx builder source after patched local build" + fi + rm -f "$SVM_TX_BUILDER_BACKUP_FILE" + SVM_TX_BUILDER_BACKUP_FILE="" + fi + + if [[ -n "${SVM_EVENT_PARSER_BACKUP_FILE:-}" && -f "$SVM_EVENT_PARSER_BACKUP_FILE" ]]; then + local svm_event_parser_file="$PUSH_CHAIN_DIR/universalClient/chains/svm/event_parser.go" + if [[ -f "$svm_event_parser_file" ]]; then + cp "$SVM_EVENT_PARSER_BACKUP_FILE" "$svm_event_parser_file" + log_ok "Restored SVM event parser source after patched local build" + fi + rm -f "$SVM_EVENT_PARSER_BACKUP_FILE" + SVM_EVENT_PARSER_BACKUP_FILE="" + fi +} + +sdk_patch_local_svm_outbound_execution() { + require_cmd node + + local route_handlers_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/orchestrator/internals/route-handlers.ts" + local push_chain_tx_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/orchestrator/internals/push-chain-tx.ts" + local response_builder_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/orchestrator/internals/response-builder.ts" + + if [[ ! -f "$route_handlers_file" || ! -f "$push_chain_tx_file" || ! -f "$response_builder_file" ]]; then + log_warn "SDK SVM outbound patch targets missing; skipping local SVM execution patch" + return 0 + fi + + ROUTE_HANDLERS_FILE="$route_handlers_file" PUSH_CHAIN_TX_FILE="$push_chain_tx_file" RESPONSE_BUILDER_FILE="$response_builder_file" node <<'NODE' +const fs = require('fs'); + +const routeFile = process.env.ROUTE_HANDLERS_FILE; +const pushTxFile = process.env.PUSH_CHAIN_TX_FILE; +const responseBuilderFile = process.env.RESPONSE_BUILDER_FILE; + +let route = fs.readFileSync(routeFile, 'utf8'); +if (!route.includes("from '../internals/signing'")) { + const marker = "import { getCEAAddress, chainSupportsOutbound } from '../cea-utils';\n"; + if (!route.includes(marker)) { + throw new Error(`Could not find SDK signing import anchor in ${routeFile}`); + } + route = route.replace( + marker, + `${marker}import { encodeUniversalPayloadSvm } from '../internals/signing';\n` + ); +} +if (!route.includes("from '../../generated/v1/tx'")) { + const marker = "import { PushChain } from '../../push-chain/push-chain';\n"; + if (!route.includes(marker)) { + throw new Error(`Could not find SDK generated tx import anchor in ${routeFile}`); + } + route = route.replace( + marker, + `${marker}import { VerificationType } from '../../generated/v1/tx';\n` + ); +} +if (!route.includes('Do not inflate above the gas sizing result')) { + const start = route.indexOf(' // Adjust nativeValueForGas using UEA balance (contract refunds excess)\n // Re-fetch balance to minimize staleness from gas fee query RPC roundtrips\n const currentBalance = await ctx.pushClient.getBalance(ueaAddress);', route.indexOf('export async function executeCeaToPushSvm')); + const endMarker = '\n\n // Build Push Chain multicalls'; + const end = route.indexOf(endMarker, start); + if (start === -1 || end === -1) { + throw new Error(`Could not patch SVM nativeValueForGas block in ${routeFile}`); + } + const replacement = ` // Cap nativeValueForGas using UEA balance. Do not inflate above the gas sizing result;\n // a very large max input can make the local gateway swap run out of gas before it emits the outbound event.\n // Re-fetch balance to minimize staleness from gas fee query RPC roundtrips\n const currentBalance = await ctx.pushClient.getBalance(ueaAddress);\n // Cosmos-EVM tx overhead costs ~1 PC per operation; 3 PC covers approve(s) + buffer.\n const OUTBOUND_GAS_RESERVE_R3_SVM = BigInt(3e18);\n const availableForGas =\n currentBalance > OUTBOUND_GAS_RESERVE_R3_SVM\n ? currentBalance - OUTBOUND_GAS_RESERVE_R3_SVM\n : currentBalance;\n if (availableForGas > BigInt(0) && availableForGas < nativeValueForGas) {\n printLog(\n ctx,\n \`executeCeaToPushSvm — adjusting nativeValueForGas from \${nativeValueForGas.toString()} to \${availableForGas.toString()} (UEA balance: \${currentBalance.toString()})\`\n );\n nativeValueForGas = availableForGas;\n }`; + route = route.slice(0, start) + replacement + route.slice(end); + fs.writeFileSync(routeFile, route); +} + +if (!route.includes('LOCAL_SVM_SKIP_CASE_C_OVERFLOW')) { + const marker = ` if ( + sizingDecisionR3Svm?.category === 'C' && + sizingDecisionR3Svm.overflowNativePc > BigInt(0) + ) {`; + const replacement = ` const LOCAL_SVM_SKIP_CASE_C_OVERFLOW = + sourceChain === CHAIN.SOLANA_DEVNET && + CHAIN_INFO[sourceChain]?.defaultRPC?.some((url) => + url.includes('localhost') || url.includes('127.0.0.1') + ); + + if ( + sizingDecisionR3Svm?.category === 'C' && + sizingDecisionR3Svm.overflowNativePc > BigInt(0) && + !LOCAL_SVM_SKIP_CASE_C_OVERFLOW + ) {`; + if (!route.includes(marker)) { + throw new Error(`Could not patch local SVM Case C overflow block in ${routeFile}`); + } + route = route.replace(marker, replacement); + fs.writeFileSync(routeFile, route); +} + +if (!route.includes('LOCAL_SVM_ROUTE3_SPL_PRC20')) { + const marker = ` // Route 3 SVM: ALWAYS use native PRC-20 for chain namespace lookup + gas fees. + // CEA uses its own pre-existing balance — no PRC-20 burn needed on Push Chain. + const prc20Token = getNativePRC20ForChain(sourceChain, ctx.pushNetwork);`; + const replacement = ` // LOCAL_SVM_ROUTE3_SPL_PRC20: SPL CEA drains must use the mapped SPL PRC-20 so + // the SVM gateway can validate the emitted token against the SPL mint. + // Native SOL still uses pSOL for chain namespace lookup + gas fees. + let prc20Token = getNativePRC20ForChain(sourceChain, ctx.pushNetwork); + if (params.funds?.amount && params.funds.amount > BigInt(0)) { + const token = (params.funds as { token?: MoveableToken }).token; + if (token?.address) { + prc20Token = PushChain.utils.tokens.getPRC20Address(token, { + network: ctx.pushNetwork, + }).address; + } + }`; + if (!route.includes(marker)) { + throw new Error(`Could not patch Route 3 SVM SPL PRC20 selection in ${routeFile}`); + } + route = route.replace(marker, replacement); + fs.writeFileSync(routeFile, route); +} + +if (!route.includes('LOCAL_SVM_ROUTE3_BORSH_PAYLOAD')) { + const marker = ` // Build the SVM CPI payload (send_universal_tx_to_uea wrapped in execute) + // If params.data is provided, pass it as extraPayload for Push Chain execution + let extraPayload: Uint8Array | undefined; + if (params.data && typeof params.data === 'string') { + extraPayload = hexToBytes(params.data as \`0x\${string}\`); + }`; + const replacement = ` // LOCAL_SVM_ROUTE3_BORSH_PAYLOAD: Solana-origin payload events are decoded by + // Push Chain as a Borsh UniversalPayload, not as bare calldata. + // Multicall requests use address(0) as the SDK-facing target, but the SVM gateway + // and Push Chain inbound path still need a concrete UEA recipient. + const pushPayloadRecipient = + Array.isArray(params.data) && params.to === ZERO_ADDRESS + ? ueaAddress + : (params.to as \`0x\${string}\`); + let extraPayload: Uint8Array | undefined; + if (params.data && typeof params.data === 'string') { + const svmUniversalPayload = encodeUniversalPayloadSvm({ + to: pushPayloadRecipient, + value: '0', + data: params.data as \`0x\${string}\`, + gasLimit: (params.gasLimit ?? BigInt(5e7)).toString(), + maxFeePerGas: BigInt(1e10).toString(), + maxPriorityFeePerGas: BigInt(1e10).toString(), + nonce: '0', + deadline: '0', + vType: VerificationType.universalTxVerification, + }); + extraPayload = new Uint8Array(svmUniversalPayload); + } else if (Array.isArray(params.data)) { + const svmMulticallPayload = buildMulticallPayloadData(ctx, pushPayloadRecipient, params.data as MultiCall[]); + const svmUniversalPayload = encodeUniversalPayloadSvm({ + to: pushPayloadRecipient, + value: '0', + data: svmMulticallPayload, + gasLimit: (params.gasLimit ?? BigInt(5e7)).toString(), + maxFeePerGas: BigInt(1e10).toString(), + maxPriorityFeePerGas: BigInt(1e10).toString(), + nonce: '0', + deadline: '0', + vType: VerificationType.universalTxVerification, + }); + extraPayload = new Uint8Array(svmUniversalPayload); + }`; + if (!route.includes(marker)) { + throw new Error(`Could not patch Route 3 SVM Borsh payload wrapping in ${routeFile}`); + } + route = route.replace(marker, replacement); + fs.writeFileSync(routeFile, route); +} +if (route.includes('LOCAL_SVM_ROUTE3_BORSH_PAYLOAD') && !route.includes('pushPayloadRecipient')) { + const start = route.indexOf(' // LOCAL_SVM_ROUTE3_BORSH_PAYLOAD:'); + const end = route.indexOf(' // Derive CEA PDA as revert recipient', start); + if (start === -1 || end === -1) { + throw new Error(`Could not upgrade Route 3 SVM Borsh payload block in ${routeFile}`); + } + const replacement = ` // LOCAL_SVM_ROUTE3_BORSH_PAYLOAD: Solana-origin payload events are decoded by + // Push Chain as a Borsh UniversalPayload, not as bare calldata. + // Multicall requests use address(0) as the SDK-facing target, but the SVM gateway + // and Push Chain inbound path still need a concrete UEA recipient. + const pushPayloadRecipient = + Array.isArray(params.data) && params.to === ZERO_ADDRESS + ? ueaAddress + : (params.to as \`0x\${string}\`); + let extraPayload: Uint8Array | undefined; + if (params.data && typeof params.data === 'string') { + const svmUniversalPayload = encodeUniversalPayloadSvm({ + to: pushPayloadRecipient, + value: '0', + data: params.data as \`0x\${string}\`, + gasLimit: (params.gasLimit ?? BigInt(5e7)).toString(), + maxFeePerGas: BigInt(1e10).toString(), + maxPriorityFeePerGas: BigInt(1e10).toString(), + nonce: '0', + deadline: '0', + vType: VerificationType.universalTxVerification, + }); + extraPayload = new Uint8Array(svmUniversalPayload); + } else if (Array.isArray(params.data)) { + const svmMulticallPayload = buildMulticallPayloadData(ctx, pushPayloadRecipient, params.data as MultiCall[]); + const svmUniversalPayload = encodeUniversalPayloadSvm({ + to: pushPayloadRecipient, + value: '0', + data: svmMulticallPayload, + gasLimit: (params.gasLimit ?? BigInt(5e7)).toString(), + maxFeePerGas: BigInt(1e10).toString(), + maxPriorityFeePerGas: BigInt(1e10).toString(), + nonce: '0', + deadline: '0', + vType: VerificationType.universalTxVerification, + }); + extraPayload = new Uint8Array(svmUniversalPayload); + }\n\n`; + route = route.slice(0, start) + replacement + route.slice(end); + fs.writeFileSync(routeFile, route); +} +{ + const before = route; + const borshStart = route.indexOf('LOCAL_SVM_ROUTE3_BORSH_PAYLOAD'); + const borshEnd = borshStart === -1 ? -1 : route.indexOf(' // Derive CEA PDA as revert recipient', borshStart); + if (borshStart !== -1 && borshEnd !== -1) { + const segment = route.slice(borshStart, borshEnd) + .replace(' value: BigInt(0),', " value: '0',") + .replace(' gasLimit: params.gasLimit ?? BigInt(5e7),', " gasLimit: (params.gasLimit ?? BigInt(5e7)).toString(),") + .replace(' maxFeePerGas: BigInt(1e10),', " maxFeePerGas: BigInt(1e10).toString(),") + .replace(' maxPriorityFeePerGas: BigInt(1e10),', " maxPriorityFeePerGas: BigInt(1e10).toString(),") + .replace(' nonce: BigInt(0),', " nonce: '0',") + .replace(' deadline: BigInt(0),', " deadline: '0',"); + route = route.slice(0, borshStart) + segment + route.slice(borshEnd); + } + if (route !== before) { + fs.writeFileSync(routeFile, route); + } +} + +if (!route.includes('LOCAL_SVM_ROUTE3_PUSH_RECIPIENT')) { + const start = route.indexOf('export async function executeCeaToPushSvm'); + const marker = ` params.gasLimit ?? BigInt(0), + svmPayload, + ueaAddress + );`; + const replacement = ` params.gasLimit ?? BigInt(0), + svmPayload, + pushPayloadRecipient // LOCAL_SVM_ROUTE3_PUSH_RECIPIENT + );`; + const idx = route.indexOf(marker, start); + if (idx === -1) { + throw new Error(`Could not patch Route 3 SVM Push recipient in ${routeFile}`); + } + route = route.slice(0, idx) + replacement + route.slice(idx + marker.length); + fs.writeFileSync(routeFile, route); +} +if (route.includes('params.to as `0x${string}` // LOCAL_SVM_ROUTE3_PUSH_RECIPIENT')) { + route = route.replace( + 'params.to as `0x${string}` // LOCAL_SVM_ROUTE3_PUSH_RECIPIENT', + 'pushPayloadRecipient // LOCAL_SVM_ROUTE3_PUSH_RECIPIENT' + ); + fs.writeFileSync(routeFile, route); +} + +let pushTx = fs.readFileSync(pushTxFile, 'utf8'); +if (!pushTx.includes('PUSH_CHAIN_FALLBACK_GAS_LIMIT')) { + pushTx = pushTx.replace( + ' const PUSH_CHAIN_GAS_LIMIT = BigInt(500000);\n const MAX_NONCE_RETRIES = 3;', + ' const PUSH_CHAIN_MIN_GAS_LIMIT = BigInt(500000);\n const PUSH_CHAIN_FALLBACK_GAS_LIMIT = BigInt(2000000);\n const PUSH_CHAIN_GAS_BUFFER_NUMERATOR = BigInt(120);\n const PUSH_CHAIN_GAS_BUFFER_DENOMINATOR = BigInt(100);\n const MAX_NONCE_RETRIES = 3;\n const account = ctx.universalSigner.account.address as `0x${string}`;' + ); + pushTx = pushTx.replace( + ' address: ctx.universalSigner.account.address as `0x${string}`,\n blockTag: \'pending\',', + ' address: account,\n blockTag: \'pending\',' + ); + pushTx = pushTx.replace( + ' try {\n printLog(\n ctx,\n `sendPushTx — executing multicall operation ${i + 1}/${calls.length} to: ${call.to} (nonce: ${nonce})`\n );', + ' try {\n let gasLimit = PUSH_CHAIN_FALLBACK_GAS_LIMIT;\n try {\n const estimatedGas = await ctx.pushClient.publicClient.estimateGas({\n account,\n to: call.to as `0x${string}`,\n data: (call.data || \'0x\') as `0x${string}`,\n value: call.value,\n });\n const bufferedGas =\n (estimatedGas * PUSH_CHAIN_GAS_BUFFER_NUMERATOR) /\n PUSH_CHAIN_GAS_BUFFER_DENOMINATOR;\n gasLimit =\n bufferedGas > PUSH_CHAIN_MIN_GAS_LIMIT\n ? bufferedGas\n : PUSH_CHAIN_MIN_GAS_LIMIT;\n } catch (gasErr: any) {\n printLog(\n ctx,\n `sendPushTx — gas estimation failed for multicall operation ${i + 1}/${calls.length}, using fallback ${gasLimit.toString()} (${gasErr?.message || gasErr})`\n );\n }\n\n printLog(\n ctx,\n `sendPushTx — executing multicall operation ${i + 1}/${calls.length} to: ${call.to} (nonce: ${nonce}, gas: ${gasLimit.toString()})`\n );' + ); + pushTx = pushTx.replace(' gas: PUSH_CHAIN_GAS_LIMIT,', ' gas: gasLimit,'); + pushTx = pushTx.replace( + ' address: ctx.universalSigner.account.address as `0x${string}`,\n blockTag: \'pending\',', + ' address: account,\n blockTag: \'pending\',' + ); + pushTx = pushTx.replace( + ' account: ctx.universalSigner.account.address as `0x${string}`,\n blockNumber: receipt.blockNumber,', + ' account,\n blockNumber: receipt.blockNumber,' + ); + if (!pushTx.includes('PUSH_CHAIN_FALLBACK_GAS_LIMIT')) { + throw new Error(`Could not patch Push multicall gas estimation in ${pushTxFile}`); + } + fs.writeFileSync(pushTxFile, pushTx); +} + +let responseBuilder = fs.readFileSync(responseBuilderFile, 'utf8'); +if (!responseBuilder.includes('LOCAL_SVM_SKIP_INBOUND_ROUND_TRIP')) { + const marker = ` if ( + route === TransactionRoute.CEA_TO_PUSH && + universalTxResponse._expectsInboundRoundTrip === true + ) {`; + const replacement = ` const LOCAL_SVM_SKIP_INBOUND_ROUND_TRIP = + targetChain === CHAIN.SOLANA_DEVNET && + CHAIN_INFO[targetChain]?.defaultRPC?.some((url) => + url.includes('localhost') || url.includes('127.0.0.1') + ); + + if ( + route === TransactionRoute.CEA_TO_PUSH && + universalTxResponse._expectsInboundRoundTrip === true && + !LOCAL_SVM_SKIP_INBOUND_ROUND_TRIP + ) {`; + if (!responseBuilder.includes(marker)) { + throw new Error(`Could not patch local SVM inbound round-trip wait in ${responseBuilderFile}`); + } + responseBuilder = responseBuilder.replace(marker, replacement); + fs.writeFileSync(responseBuilderFile, responseBuilder); +} +NODE + + log_ok "Patched SDK local SVM outbound execution behavior" +} + sdk_sync_localnet_constants() { require_cmd jq perl node @@ -957,13 +1585,14 @@ sdk_sync_localnet_constants() { ensure_deploy_file - local peth peth_arb peth_base pbnb psol usdt_eth usdt_bnb + local peth peth_arb peth_base pbnb psol usdt_eth usdt_sol usdt_bnb peth="$(address_from_deploy_token "pETH")" peth_arb="$(address_from_deploy_token "pETH.arb")" peth_base="$(address_from_deploy_token "pETH.base")" pbnb="$(address_from_deploy_token "pBNB")" psol="$(address_from_deploy_token "pSOL")" usdt_eth="$(address_from_deploy_token "USDT.eth")" + usdt_sol="$(address_from_deploy_token "USDT.sol")" usdt_bnb="$(address_from_deploy_token "USDT.bsc")" [[ -n "$peth" ]] || peth="0xTBD" @@ -972,6 +1601,7 @@ sdk_sync_localnet_constants() { [[ -n "$pbnb" ]] || pbnb="0xTBD" [[ -n "$psol" ]] || psol="0xTBD" [[ -n "$usdt_eth" ]] || usdt_eth="0xTBD" + [[ -n "$usdt_sol" ]] || usdt_sol="0xTBD" [[ -n "$usdt_bnb" ]] || usdt_bnb="$usdt_eth" PETH_ADDR="$peth" \ @@ -980,6 +1610,7 @@ sdk_sync_localnet_constants() { PBNB_ADDR="$pbnb" \ PSOL_ADDR="$psol" \ USDT_ETH_ADDR="$usdt_eth" \ + USDT_SOL_ADDR="$usdt_sol" \ USDT_BNB_ADDR="$usdt_bnb" \ perl -0pi -e ' s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?pETH:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{PETH_ADDR}'\''#s; @@ -988,6 +1619,7 @@ sdk_sync_localnet_constants() { s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?pETH_BNB:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{PBNB_ADDR}'\''#s; s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?pSOL:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{PSOL_ADDR}'\''#s; s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?USDT_ETH:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{USDT_ETH_ADDR}'\''#s; + s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?USDT_SOL:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{USDT_SOL_ADDR}'\''#s; s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?USDT_BNB:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{USDT_BNB_ADDR}'\''#s; ' "$chain_constants_file" @@ -1067,7 +1699,7 @@ step_setup_push_chain_sdk() { sdk_evm_rpc="${EVM_RPC:-${ETHEREUM_SEPOLIA_RPC_URL:-${PUSH_RPC_URL:-}}}" fi if is_local_testing_env; then - sdk_solana_rpc="${SOLANA_RPC_URL:-${SOLANA_DEVNET_RPC_URL:-https://api.devnet.solana.com}}" + sdk_solana_rpc="${SOLANA_RPC_URL:-${LOCAL_SOLANA_UV_RPC_URL:-${SURFPOOL_SOLANA_HOST_RPC_URL:-http://localhost:8899}}}" else sdk_solana_rpc="${SOLANA_DEVNET_RPC_URL:-https://api.devnet.solana.com}" fi @@ -1150,6 +1782,7 @@ step_setup_push_chain_sdk() { log_ok "Replaced CHAIN.PUSH_TESTNET_DONUT with CHAIN.PUSH_LOCALNET only in convertExecutorToOriginAccount() in $sdk_account_file" sdk_prepare_e2e_network_for_testing_env + sdk_patch_local_svm_outbound_execution fi log_info "Installing push-chain-sdk dependencies" @@ -1349,11 +1982,217 @@ step_run_sdk_quick_testing_outbound_evm() { log_ok "Completed quick-testing-outbound-evm SDK E2E tests" } +stop_local_svm_payload_executor() { + if [[ -n "${LOCAL_SVM_PAYLOAD_EXECUTOR_PID:-}" ]]; then + if kill -0 "$LOCAL_SVM_PAYLOAD_EXECUTOR_PID" >/dev/null 2>&1; then + kill "$LOCAL_SVM_PAYLOAD_EXECUTOR_PID" >/dev/null 2>&1 || true + wait "$LOCAL_SVM_PAYLOAD_EXECUTOR_PID" >/dev/null 2>&1 || true + fi + LOCAL_SVM_PAYLOAD_EXECUTOR_PID="" + fi +} + +start_local_svm_payload_executor() { + require_cmd node + require_cmd cast + + local push_private_key="${PUSH_PRIVATE_KEY:-${PRIVATE_KEY:-}}" + if [[ -z "$push_private_key" ]]; then + log_warn "Skipping local SVM payload executor because PUSH_PRIVATE_KEY/PRIVATE_KEY is empty" + return 0 + fi + + local executor_log="$LOG_DIR/local-svm-payload-executor.log" + mkdir -p "$LOG_DIR" + : >"$executor_log" + + LOCAL_SVM_DATA_DIR="$LOCAL_DEVNET_DIR/data" \ + PUSH_RPC_URL="$PUSH_RPC_URL" \ + PUSH_PRIVATE_KEY="$push_private_key" \ + node <<'NODE' >>"$executor_log" 2>&1 & +const fs = require('fs'); +const path = require('path'); +const { spawnSync } = require('child_process'); + +const dataDir = process.env.LOCAL_SVM_DATA_DIR; +const rpcUrl = process.env.PUSH_RPC_URL || 'http://localhost:8545'; +const privateKey = process.env.PUSH_PRIVATE_KEY; +const offsets = new Map(); +const partials = new Map(); +const lastExecutionByPayload = new Map(); +let shuttingDown = false; + +function log(message) { + console.log(`[${new Date().toISOString()}] ${message}`); +} + +function discoverUniversalLogs() { + if (!dataDir || !fs.existsSync(dataDir)) return []; + return fs.readdirSync(dataDir) + .filter((entry) => /^universal/i.test(entry)) + .map((entry) => path.join(dataDir, entry, 'universal.log')) + .filter((file) => fs.existsSync(file)); +} + +function decodeUniversalPayload(rawPayload) { + const hex = rawPayload.startsWith('0x') ? rawPayload.slice(2) : rawPayload; + if (!hex || hex.length < 64 || hex.length % 2 !== 0) { + throw new Error(`invalid payload hex length: ${hex.length}`); + } + + const buf = Buffer.from(hex, 'hex'); + let offset = 0; + const to = `0x${buf.subarray(offset, offset + 20).toString('hex')}`; + offset += 20; + + const value = buf.readBigUInt64LE(offset); + offset += 8; + + const dataLen = buf.readUInt32LE(offset); + offset += 4; + if (offset + dataLen > buf.length) { + throw new Error(`payload data length ${dataLen} exceeds buffer length ${buf.length}`); + } + + const data = `0x${buf.subarray(offset, offset + dataLen).toString('hex')}`; + offset += dataLen; + + let gasLimit = 5_000_000n; + if (offset + 8 <= buf.length) { + gasLimit = buf.readBigUInt64LE(offset); + } + + return { to, value, data, gasLimit }; +} + +function shouldHandleLine(line) { + return line.includes('decoded UniversalTx event') && + line.includes('component=svm_event_listener') && + line.includes('from_cea=true') && + /raw_payload=0x[0-9a-fA-F]+/.test(line); +} + +function executePayload(rawPayload) { + const now = Date.now(); + const last = lastExecutionByPayload.get(rawPayload) || 0; + if (now - last < 10_000) { + log('Skipping duplicate SVM payload event observed by another validator'); + return; + } + lastExecutionByPayload.set(rawPayload, now); + + const decoded = decodeUniversalPayload(rawPayload); + if (/^0x0{40}$/i.test(decoded.to)) { + log('Skipping SVM payload with zero target'); + return; + } + if (decoded.data === '0x' && decoded.value === 0n) { + log(`Skipping SVM payload for ${decoded.to}; no calldata or value`); + return; + } + + const args = ['send', decoded.to]; + if (decoded.data !== '0x') { + args.push(decoded.data); + } + if (decoded.value > 0n) { + args.push('--value', decoded.value.toString()); + } + args.push( + '--rpc-url', rpcUrl, + '--private-key', privateKey, + '--gas-limit', decoded.gasLimit > 0n ? decoded.gasLimit.toString() : '5000000' + ); + + log(`Executing local SVM Push payload to ${decoded.to} dataLen=${(decoded.data.length - 2) / 2} value=${decoded.value.toString()}`); + const result = spawnSync('cast', args, { encoding: 'utf8' }); + if (result.stdout) process.stdout.write(result.stdout); + if (result.stderr) process.stderr.write(result.stderr); + if (result.status !== 0) { + log(`cast send failed with exit code ${result.status}`); + } else { + log('Executed local SVM Push payload'); + } +} + +function processLine(line) { + if (!shouldHandleLine(line)) return; + const match = line.match(/raw_payload=(0x[0-9a-fA-F]+)/); + if (!match) return; + try { + executePayload(match[1]); + } catch (err) { + log(`Failed to execute SVM payload: ${err && err.stack ? err.stack : err}`); + } +} + +function readNewLines(file) { + let stat; + try { + stat = fs.statSync(file); + } catch { + return; + } + + let offset = offsets.get(file); + if (offset === undefined) { + offsets.set(file, stat.size); + partials.set(file, ''); + log(`Watching ${file} from byte ${stat.size}`); + return; + } + if (stat.size < offset) { + offset = 0; + } + if (stat.size === offset) return; + + const fd = fs.openSync(file, 'r'); + try { + const chunk = Buffer.alloc(stat.size - offset); + fs.readSync(fd, chunk, 0, chunk.length, offset); + offsets.set(file, stat.size); + + const text = (partials.get(file) || '') + chunk.toString('utf8'); + const lines = text.split(/\r?\n/); + partials.set(file, lines.pop() || ''); + for (const line of lines) { + processLine(line); + } + } finally { + fs.closeSync(fd); + } +} + +function tick() { + if (shuttingDown) return; + for (const file of discoverUniversalLogs()) { + readNewLines(file); + } +} + +process.on('SIGTERM', () => { + shuttingDown = true; + log('Stopping local SVM payload executor'); + process.exit(0); +}); +process.on('SIGINT', () => { + shuttingDown = true; + process.exit(0); +}); + +log(`Starting local SVM payload executor against ${rpcUrl}`); +tick(); +setInterval(tick, 1000); +NODE + + LOCAL_SVM_PAYLOAD_EXECUTOR_PID=$! + log_ok "Started local SVM payload executor (pid $LOCAL_SVM_PAYLOAD_EXECUTOR_PID, log: $executor_log)" +} + step_run_sdk_quick_testing_outbound_svm() { local outbound_dir="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/svm/outbound" local quick_files=( "cea-to-eoa.spec.ts" - "cea-to-uea.spec.ts" ) local evm_client_file="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/shared/evm-client.ts" local svm_client_file="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/shared/svm-client.ts" @@ -1365,6 +2204,8 @@ step_run_sdk_quick_testing_outbound_svm() { step_fund_uea_prc20 sdk_sync_localnet_constants + step_sync_svm_gateway_tss + step_fund_svm_ceas for file in "${quick_files[@]}"; do full_path="$outbound_dir/$file" @@ -1397,10 +2238,14 @@ step_run_sdk_quick_testing_outbound_svm() { full_path="$outbound_dir/$file" log_info "Running SDK outbound SVM test: $file" local rel_pattern="${full_path##*/packages/core/}" + start_local_svm_payload_executor + trap stop_local_svm_payload_executor RETURN ( cd "$PUSH_CHAIN_SDK_DIR" npx nx test core --runInBand --testPathPattern="$rel_pattern" ) + stop_local_svm_payload_executor + trap - RETURN done log_ok "Completed quick-testing-outbound-svm SDK E2E tests" @@ -1490,34 +2335,43 @@ step_devnet() { local devnet_sepolia_start="" devnet_arbitrum_start="" devnet_base_start="" devnet_bsc_start="" devnet_solana_start="" - if ! is_local_testing_env; then - require_cmd curl jq - local _fetch_block - _fetch_block() { - local label="$1" rpc_url="$2" - local response hex_block decimal_block - response="$(curl -sS --max-time 15 -X POST "$rpc_url" \ - -H 'Content-Type: application/json' \ - --data '{"jsonrpc":"2.0","id":1,"method":"eth_blockNumber","params":[]}' 2>/dev/null || true)" - hex_block="$(echo "$response" | jq -r '.result // empty' 2>/dev/null || true)" - if [[ -n "$hex_block" && "$hex_block" != "null" && "$hex_block" =~ ^0x[0-9a-fA-F]+$ ]]; then - decimal_block="$(printf '%d' "$hex_block" 2>/dev/null || true)" - [[ "$decimal_block" =~ ^[0-9]+$ ]] && { printf "%s" "$decimal_block"; return 0; } - fi - log_warn "Could not read block number for $label from $rpc_url; event_start_from will not be set" >&2 - printf "%s" "" - } - _fetch_solana_slot() { - local rpc_url="$1" - local slot response - response="$(curl -sS --max-time 15 -X POST "$rpc_url" -H 'Content-Type: application/json' \ - --data '{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment":"processed"}]}' 2>/dev/null || true)" - slot="$(echo "$response" | jq -r '.result // empty' 2>/dev/null || true)" - slot="$(echo "$slot" | tr -d '[:space:]')" - [[ "$slot" =~ ^[0-9]+$ ]] && { printf "%s" "$slot"; return 0; } - log_warn "Could not read Solana slot from $rpc_url; event_start_from will not be set" >&2 - printf "%s" "" - } + require_cmd curl jq + local _fetch_block _fetch_solana_slot + _fetch_block() { + local label="$1" rpc_url="$2" + local response hex_block decimal_block + response="$(curl -sS --max-time 15 -X POST "$rpc_url" \ + -H 'Content-Type: application/json' \ + --data '{"jsonrpc":"2.0","id":1,"method":"eth_blockNumber","params":[]}' 2>/dev/null || true)" + hex_block="$(echo "$response" | jq -r '.result // empty' 2>/dev/null || true)" + if [[ -n "$hex_block" && "$hex_block" != "null" && "$hex_block" =~ ^0x[0-9a-fA-F]+$ ]]; then + decimal_block="$(printf '%d' "$hex_block" 2>/dev/null || true)" + [[ "$decimal_block" =~ ^[0-9]+$ ]] && { printf "%s" "$decimal_block"; return 0; } + fi + log_warn "Could not read block number for $label from $rpc_url; event_start_from will not be set" >&2 + printf "%s" "" + } + _fetch_solana_slot() { + local rpc_url="$1" + local slot response + response="$(curl -sS --max-time 15 -X POST "$rpc_url" -H 'Content-Type: application/json' \ + --data '{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment":"processed"}]}' 2>/dev/null || true)" + slot="$(echo "$response" | jq -r '.result // empty' 2>/dev/null || true)" + slot="$(echo "$slot" | tr -d '[:space:]')" + [[ "$slot" =~ ^[0-9]+$ ]] && { printf "%s" "$slot"; return 0; } + log_warn "Could not read Solana slot from $rpc_url; event_start_from will not be set" >&2 + printf "%s" "" + } + + if is_local_testing_env; then + log_info "Fetching latest block/slot numbers from local fork RPCs for devnet startup" + devnet_sepolia_start="$(_fetch_block "sepolia" "$sepolia_rpc_override")" + devnet_arbitrum_start="$(_fetch_block "arbitrum" "$arbitrum_rpc_override")" + devnet_base_start="$(_fetch_block "base" "$base_rpc_override")" + devnet_bsc_start="$(_fetch_block "bsc" "$bsc_rpc_override")" + devnet_solana_start="$(_fetch_solana_slot "$solana_rpc_override")" + log_ok "Devnet event_start_from: sepolia=${devnet_sepolia_start:-n/a} arbitrum=${devnet_arbitrum_start:-n/a} base=${devnet_base_start:-n/a} bsc=${devnet_bsc_start:-n/a} solana=${devnet_solana_start:-n/a}" + else log_info "Fetching latest block/slot numbers from public chain RPCs for devnet startup" devnet_sepolia_start="$(_fetch_block "sepolia" "$sepolia_rpc_override")" devnet_arbitrum_start="$(_fetch_block "arbitrum" "$arbitrum_rpc_override")" @@ -1762,7 +2616,7 @@ step_setup_environment() { } start_surfpool() { - local surfpool_pattern="surfpool start --port 8899 --network devnet" + local surfpool_pattern="surfpool start .*--port 8899" if pgrep -f "$surfpool_pattern" >/dev/null 2>&1; then log_info "Stopping existing surfpool on port 8899" @@ -1770,8 +2624,28 @@ step_setup_environment() { sleep 1 fi - log_info "Starting surfpool for local Solana testing on port 8899" - start_detached_process "$LOG_DIR/surfpool.log" surfpool start --port 8899 --network devnet + local pid + while IFS= read -r pid; do + [[ -n "$pid" ]] || continue + log_info "Stopping process $pid on port 8899 before starting surfpool" + kill "$pid" >/dev/null 2>&1 || true + done < <(lsof -ti tcp:8899 2>/dev/null || true) + + local _w=0 + while lsof -ti tcp:8899 >/dev/null 2>&1; do + if [[ $_w -ge 8 ]]; then + lsof -ti tcp:8899 2>/dev/null | xargs kill -9 2>/dev/null || true + sleep 1 + break + fi + sleep 1 + _w=$(( _w + 1 )) + done + + log_info "Starting ephemeral surfpool for local Solana testing on port 8899" + mkdir -p "$LOG_DIR/surfpool-internal" + start_detached_process "$LOG_DIR/surfpool.log" \ + surfpool start --port 8899 --network devnet --db :memory: --surfnet-id push-chain-e2e-local --no-tui --no-studio --log-path "$LOG_DIR/surfpool-internal" } wait_for_solana_slot() { @@ -1962,6 +2836,16 @@ step_stop_running_nodes() { log_ok "Running nodes stopped" } +step_reset_local_native_data() { + if ! is_local_testing_env; then + return 0 + fi + + log_info "Resetting local-native devnet data for a fresh LOCAL setup" + rm -rf "$LOCAL_DEVNET_DIR/data" + log_ok "Removed local-native data directory" +} + step_fund_uv_broadcasters_on_anvil() { if ! is_local_testing_env; then log_info "step_fund_uv_broadcasters_on_anvil: skipping (non-LOCAL environment)" @@ -2149,6 +3033,332 @@ print(format(x, '064x') + format(y, '064x')) log_ok "Vault TSS sync complete" } +step_sync_svm_gateway_tss() { + if ! is_local_testing_env; then + log_info "step_sync_svm_gateway_tss: skipping (non-LOCAL environment)" + return 0 + fi + require_cmd jq python3 cast node + ensure_e2e_testnet_donut_configs + + local chain_cfg="$TOKENS_CONFIG_DIR/solana_devnet/chain.json" + if [[ ! -f "$chain_cfg" ]]; then + log_warn "step_sync_svm_gateway_tss: no Solana chain config at $chain_cfg, skipping" + return 0 + fi + + local gateway rpc + gateway="$(jq -r '.gateway_address // empty' "$chain_cfg")" + rpc="${SOLANA_RPC_URL:-${LOCAL_SOLANA_UV_RPC_URL:-${SURFPOOL_SOLANA_HOST_RPC_URL:-http://localhost:8899}}}" + + if [[ -z "$gateway" ]]; then + log_warn "step_sync_svm_gateway_tss: no Solana gateway_address in $chain_cfg, skipping" + return 0 + fi + + local tss_pubkey tss_addr + tss_pubkey="$("$PUSH_CHAIN_DIR/build/pchaind" query utss current-key \ + --node tcp://127.0.0.1:26657 --output json 2>/dev/null \ + | jq -r '.key.tss_pubkey // empty' 2>/dev/null || true)" + + if [[ -z "$tss_pubkey" ]]; then + log_warn "step_sync_svm_gateway_tss: TSS key not found on chain yet, skipping" + return 0 + fi + + local uncompressed_hex + uncompressed_hex="$(python3 -c " +prefix = int('${tss_pubkey:0:2}', 16) +x = int('${tss_pubkey:2}', 16) +p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F +y_sq = (pow(x, 3, p) + 7) % p +y = pow(y_sq, (p + 1) // 4, p) +if (y % 2) != (prefix % 2): + y = p - y +print(format(x, '064x') + format(y, '064x')) +" 2>/dev/null || true)" + + if [[ -z "$uncompressed_hex" ]]; then + log_warn "step_sync_svm_gateway_tss: failed to decompress TSS pubkey, skipping" + return 0 + fi + + local keccak_hash + keccak_hash="$(cast keccak "0x$uncompressed_hex" 2>/dev/null || true)" + tss_addr="0x${keccak_hash: -40}" + + if [[ -z "$tss_addr" || ${#tss_addr} -ne 42 ]]; then + log_warn "step_sync_svm_gateway_tss: failed to derive TSS EVM address, skipping" + return 0 + fi + + if [[ ! -d "$PUSH_CHAIN_SDK_DIR/node_modules/@solana/web3.js" ]]; then + log_err "step_sync_svm_gateway_tss: @solana/web3.js missing in $PUSH_CHAIN_SDK_DIR; run setup-sdk first" + exit 1 + fi + + log_info "Syncing Solana gateway TSS PDA to $tss_addr on $rpc" + + ( + cd "$PUSH_CHAIN_SDK_DIR" + SVM_RPC_URL="$rpc" \ + SVM_GATEWAY_PROGRAM_ID="$gateway" \ + SVM_TSS_ETH_ADDRESS="$tss_addr" \ + node <<'NODE' +const { Connection, PublicKey } = require('@solana/web3.js'); + +async function main() { + const rpc = process.env.SVM_RPC_URL; + const programId = new PublicKey(process.env.SVM_GATEWAY_PROGRAM_ID); + const tssBytes = Buffer.from(process.env.SVM_TSS_ETH_ADDRESS.replace(/^0x/, ''), 'hex'); + if (tssBytes.length !== 20) { + throw new Error(`Invalid TSS ETH address: ${process.env.SVM_TSS_ETH_ADDRESS}`); + } + + const connection = new Connection(rpc, 'confirmed'); + const [tssPda] = PublicKey.findProgramAddressSync([Buffer.from('tsspda_v2')], programId); + const account = await connection.getAccountInfo(tssPda); + if (!account) { + throw new Error(`Solana gateway TSS PDA ${tssPda.toBase58()} is not initialized`); + } + + const data = Buffer.from(account.data); + const current = `0x${data.subarray(8, 28).toString('hex')}`; + const desired = `0x${tssBytes.toString('hex')}`; + const chainLen = data.readUInt32LE(28); + const chainId = data.subarray(32, 32 + chainLen).toString('utf8'); + + if (current.toLowerCase() === desired.toLowerCase()) { + console.log(`Solana gateway TSS already matches ${desired} (chain_id=${chainId})`); + return; + } + + tssBytes.copy(data, 8); + const snapshot = { + lamports: account.lamports, + owner: account.owner.toBase58(), + executable: account.executable, + rentEpoch: 0, + data: data.toString('hex'), + parsedData: null, + }; + + const response = await fetch(rpc, { + method: 'POST', + headers: { 'content-type': 'application/json' }, + body: JSON.stringify({ + jsonrpc: '2.0', + id: 1, + method: 'surfnet_setAccount', + params: [tssPda.toBase58(), snapshot], + }), + }); + const payload = await response.json(); + if (payload.error) { + throw new Error(`surfnet_setAccount failed: ${payload.error.message || JSON.stringify(payload.error)}`); + } + + const updated = await connection.getAccountInfo(tssPda); + const updatedAddress = `0x${Buffer.from(updated.data).subarray(8, 28).toString('hex')}`; + if (updatedAddress.toLowerCase() !== desired.toLowerCase()) { + throw new Error(`Solana gateway TSS verification failed: expected ${desired}, got ${updatedAddress}`); + } + + console.log(`Solana gateway TSS updated ${current} -> ${desired} (chain_id=${chainId})`); +} + +main().catch((err) => { + console.error(err && err.message ? err.message : String(err)); + process.exit(1); +}); +NODE + ) + + log_ok "Solana gateway TSS sync complete" +} + +step_fund_svm_ceas() { + if ! is_local_testing_env; then + log_info "step_fund_svm_ceas: skipping (non-LOCAL environment)" + return 0 + fi + require_cmd jq node cast + ensure_e2e_testnet_donut_configs + + if [[ ! -d "$PUSH_CHAIN_SDK_DIR/node_modules/@solana/web3.js" ]]; then + log_err "step_fund_svm_ceas: @solana/web3.js missing in $PUSH_CHAIN_SDK_DIR; run setup-sdk first" + exit 1 + fi + + local chain_cfg="$TOKENS_CONFIG_DIR/solana_devnet/chain.json" + local usdt_cfg="$TOKENS_CONFIG_DIR/solana_devnet/tokens/usdt.json" + local gateway usdt_mint rpc + gateway="$(jq -r '.gateway_address // empty' "$chain_cfg" 2>/dev/null || true)" + usdt_mint="$(jq -r '.address // empty' "$usdt_cfg" 2>/dev/null || true)" + rpc="${SOLANA_RPC_URL:-${LOCAL_SOLANA_UV_RPC_URL:-${SURFPOOL_SOLANA_HOST_RPC_URL:-http://localhost:8899}}}" + + if [[ -z "$gateway" || -z "$usdt_mint" ]]; then + log_warn "step_fund_svm_ceas: missing Solana gateway or USDT mint config, skipping" + return 0 + fi + + local addresses=() + local pk addr + for pk in "${PUSH_PRIVATE_KEY:-}" "${EVM_PRIVATE_KEY:-}"; do + [[ -n "$pk" ]] || continue + addr="$(cast wallet address "$pk" 2>/dev/null || true)" + [[ -n "$addr" ]] && addresses+=("$addr") + done + + if [[ -n "${EVM_PRIVATE_KEY:-}" ]]; then + local evm_signer_addr uea_addr + evm_signer_addr="$(cast wallet address "$EVM_PRIVATE_KEY" 2>/dev/null || true)" + if validate_eth_address "$evm_signer_addr"; then + uea_addr="$(cast call "0x00000000000000000000000000000000000000eA" "computeUEA((string,string,bytes))(address)" \ + "(eip155,11155111,$evm_signer_addr)" \ + --rpc-url "$PUSH_RPC_URL" 2>/dev/null | grep -Eo '0x[a-fA-F0-9]{40}' | head -1 || true)" + [[ -n "$uea_addr" ]] && addresses+=("$uea_addr") + fi + fi + + if [[ "${#addresses[@]}" -eq 0 ]]; then + log_warn "step_fund_svm_ceas: no PUSH_PRIVATE_KEY/EVM_PRIVATE_KEY addresses available, skipping" + return 0 + fi + + log_info "Funding local Solana CEAs for SVM outbound tests on $rpc" + + ( + cd "$PUSH_CHAIN_SDK_DIR" + SVM_RPC_URL="$rpc" \ + SVM_GATEWAY_PROGRAM_ID="$gateway" \ + SVM_USDT_MINT="$usdt_mint" \ + SVM_EVM_ADDRESSES="$(IFS=,; echo "${addresses[*]}")" \ + SVM_CEA_SOL_LAMPORTS="${SVM_CEA_SOL_LAMPORTS:-1000000000}" \ + SVM_CEA_USDT_AMOUNT="${SVM_CEA_USDT_AMOUNT:-1000000000}" \ + node <<'NODE' +const { Connection, PublicKey, SystemProgram } = require('@solana/web3.js'); + +const TOKEN_PROGRAM_ID = new PublicKey('TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA'); +const ASSOCIATED_TOKEN_PROGRAM_ID = new PublicKey('ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL'); +const TOKEN_ACCOUNT_RENT_LAMPORTS = 2039280; + +function u64le(value) { + const out = Buffer.alloc(8); + out.writeBigUInt64LE(BigInt(value), 0); + return out; +} + +function tokenAccountData(mint, owner, amount) { + const data = Buffer.alloc(165); + mint.toBuffer().copy(data, 0); + owner.toBuffer().copy(data, 32); + u64le(amount).copy(data, 64); + data[108] = 1; // AccountState::Initialized + return data; +} + +function associatedTokenAddress(owner, mint) { + return PublicKey.findProgramAddressSync( + [owner.toBuffer(), TOKEN_PROGRAM_ID.toBuffer(), mint.toBuffer()], + ASSOCIATED_TOKEN_PROGRAM_ID + )[0]; +} + +async function surfnetSetAccount(rpc, pubkey, snapshot) { + const response = await fetch(rpc, { + method: 'POST', + headers: { 'content-type': 'application/json' }, + body: JSON.stringify({ + jsonrpc: '2.0', + id: 1, + method: 'surfnet_setAccount', + params: [pubkey.toBase58(), snapshot], + }), + }); + const payload = await response.json(); + if (payload.error) { + throw new Error(`surfnet_setAccount(${pubkey.toBase58()}) failed: ${payload.error.message || JSON.stringify(payload.error)}`); + } +} + +function tokenAmountFromAccount(account) { + if (!account || account.data.length < 72) return BigInt(0); + return account.data.readBigUInt64LE(64); +} + +async function main() { + const rpc = process.env.SVM_RPC_URL; + const connection = new Connection(rpc, 'confirmed'); + const programId = new PublicKey(process.env.SVM_GATEWAY_PROGRAM_ID); + const usdtMint = new PublicKey(process.env.SVM_USDT_MINT); + const desiredSolLamports = BigInt(process.env.SVM_CEA_SOL_LAMPORTS || '1000000000'); + const desiredUsdtAmount = BigInt(process.env.SVM_CEA_USDT_AMOUNT || '1000000000'); + const evmAddresses = [...new Set((process.env.SVM_EVM_ADDRESSES || '').split(',').filter(Boolean).map((addr) => addr.toLowerCase()))]; + + const [vault] = PublicKey.findProgramAddressSync([Buffer.from('vault')], programId); + const vaultAta = associatedTokenAddress(vault, usdtMint); + const existingVaultAta = await connection.getAccountInfo(vaultAta); + await surfnetSetAccount(rpc, vaultAta, { + lamports: Math.max(existingVaultAta?.lamports || 0, TOKEN_ACCOUNT_RENT_LAMPORTS), + owner: TOKEN_PROGRAM_ID.toBase58(), + executable: false, + rentEpoch: 0, + data: tokenAccountData(usdtMint, vault, tokenAmountFromAccount(existingVaultAta)).toString('hex'), + parsedData: null, + }); + console.log(`Ensured vault USDT ATA ${vaultAta.toBase58()}`); + + for (const evmAddress of evmAddresses) { + const evmBytes = Buffer.from(evmAddress.replace(/^0x/, ''), 'hex'); + if (evmBytes.length !== 20) { + throw new Error(`Invalid EVM address for CEA derivation: ${evmAddress}`); + } + + const [cea] = PublicKey.findProgramAddressSync( + [Buffer.from('push_identity'), evmBytes], + programId + ); + + const existingCea = await connection.getAccountInfo(cea); + const currentLamports = BigInt(existingCea?.lamports || 0); + const nextLamports = currentLamports > desiredSolLamports ? currentLamports : desiredSolLamports; + await surfnetSetAccount(rpc, cea, { + lamports: Number(nextLamports), + owner: SystemProgram.programId.toBase58(), + executable: false, + rentEpoch: 0, + data: '', + parsedData: null, + }); + + const ceaAta = associatedTokenAddress(cea, usdtMint); + const existingCeaAta = await connection.getAccountInfo(ceaAta); + const currentUsdt = tokenAmountFromAccount(existingCeaAta); + const nextUsdt = currentUsdt > desiredUsdtAmount ? currentUsdt : desiredUsdtAmount; + await surfnetSetAccount(rpc, ceaAta, { + lamports: Math.max(existingCeaAta?.lamports || 0, TOKEN_ACCOUNT_RENT_LAMPORTS), + owner: TOKEN_PROGRAM_ID.toBase58(), + executable: false, + rentEpoch: 0, + data: tokenAccountData(usdtMint, cea, nextUsdt).toString('hex'), + parsedData: null, + }); + + console.log(`Funded CEA ${cea.toBase58()} for ${evmAddress}: ${nextLamports} lamports, ${nextUsdt} USDT units`); + } +} + +main().catch((err) => { + console.error(err && err.message ? err.message : String(err)); + process.exit(1); +}); +NODE + ) + + log_ok "SVM CEA funding complete" +} + step_print_genesis() { require_cmd jq local accounts_json @@ -2365,6 +3575,93 @@ step_setup_core_contracts() { log_ok "Core contracts setup complete" } +step_deploy_local_sol_usdt_prc20() { + require_cmd forge cast jq + [[ -n "${PRIVATE_KEY:-}" ]] || { log_err "Set PRIVATE_KEY in e2e-tests/.env"; exit 1; } + + if ! is_local_testing_env; then + log_info "Skipping local Solana USDT PRC20 deployment for non-LOCAL environment" + return 0 + fi + + ensure_deploy_file + + local existing_addr existing_code + existing_addr="$(address_from_deploy_token "USDT.sol")" + if validate_eth_address "$existing_addr"; then + existing_code="$(cast code "$existing_addr" --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" + if [[ -n "$existing_code" && "$existing_code" != "0x" ]]; then + log_ok "Local Solana USDT PRC20 already deployed: $existing_addr" + return 0 + fi + fi + + local owner_addr="0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4" + local universal_core="0x00000000000000000000000000000000000000C0" + local impl_out proxy_out impl_addr proxy_addr init_data + + log_info "Deploying local Solana USDT PRC20 for SPL Route 3 tests" + if ! impl_out="$( + cd "$CORE_CONTRACTS_DIR" + forge create src/PRC20.sol:PRC20 \ + --broadcast \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" 2>&1 + )"; then + log_err "Failed to deploy USDT.sol PRC20 implementation" + echo "$impl_out" + exit 1 + fi + impl_addr="$(echo "$impl_out" | awk '/Deployed to:/ {print $3; exit}')" + if ! validate_eth_address "$impl_addr"; then + log_err "Could not parse PRC20 implementation address for USDT.sol" + echo "$impl_out" + exit 1 + fi + + init_data="$(cast calldata 'initialize(string,string,uint8,string,uint8,address,string)' \ + 'USDT.sol' \ + 'USDT.sol' \ + 6 \ + 'solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1' \ + 2 \ + "$universal_core" \ + "$LOCAL_SOLANA_USDT_MINT")" + + if ! proxy_out="$( + cd "$CORE_CONTRACTS_DIR" + forge create lib/openzeppelin-contracts/contracts/proxy/transparent/TransparentUpgradeableProxy.sol:TransparentUpgradeableProxy \ + --broadcast \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" \ + --constructor-args "$impl_addr" "$owner_addr" "$init_data" 2>&1 + )"; then + log_err "Failed to deploy USDT.sol PRC20 proxy" + echo "$proxy_out" + exit 1 + fi + proxy_addr="$(echo "$proxy_out" | awk '/Deployed to:/ {print $3; exit}')" + if ! validate_eth_address "$proxy_addr"; then + log_err "Could not parse USDT.sol proxy address" + echo "$proxy_out" + exit 1 + fi + + local mint_out + if ! mint_out="$(cast send "$universal_core" 'mintPRCTokensviaAdmin(address,uint256,address)' \ + "$proxy_addr" "$LOCAL_SOLANA_USDT_INITIAL_SUPPLY" "$owner_addr" \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" 2>&1)"; then + log_err "Failed to mint local USDT.sol PRC20 supply to owner" + echo "$mint_out" + exit 1 + fi + + record_token "USDT.sol" "USDT.sol" "$proxy_addr" "e2e-local" + enrich_core_token_decimals + log_ok "Deployed local Solana USDT PRC20: $proxy_addr" +} + find_first_address_with_keywords() { local log_file="$1" shift @@ -2840,6 +4137,16 @@ step_setup_gateway() { done fi + # Surfpool fees are negligible and the SVM broadcaster uses its own compute + # budget. Keep local Solana gas quotes small so repeated Route 3 tests do not + # drain the tiny WPC/pSOL AMM pool before the relay observes the event. + if is_local_testing_env; then + cast send "$C0" 'setBaseGasLimitByChain(string,uint256)' \ + "solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1" "$LOCAL_SVM_OUTBOUND_BASE_GAS_LIMIT" \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" >/dev/null || true + fi + log_ok "Gateway setup complete" } @@ -2953,6 +4260,15 @@ step_add_uregistry_configs() { log_info "Adding token config to uregistry: $(basename "$token_file") (from $token_symbol)" run_registry_tx "token" "$token_payload" + if is_local_testing_env && + [[ "$(echo "$token_payload" | jq -r '.chain // ""')" == "solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1" ]] && + [[ "$(echo "$token_payload" | jq -r '.address // ""')" == "11111111111111111111111111111111" ]]; then + local native_alias_payload + native_alias_payload="$(echo "$token_payload" | jq -c '.address = "0x0"')" + log_info "Adding local Solana native SOL alias to uregistry: 0x0" + run_registry_tx "token" "$native_alias_payload" + fi + submitted_files+="$token_file"$'\n' matched_count=$((matched_count + 1)) done < <(jq -c '.tokens[]?' "$DEPLOY_ADDRESSES_FILE") @@ -3161,10 +4477,17 @@ step_create_all_wpc_pools() { continue fi - log_info "Creating ${token_symbol}/WPC pool with liquidity" + local pool_token_amount="1" + local pool_wpc_amount="4" + if [[ "$token_symbol" == "pSOL" ]]; then + pool_token_amount="${LOCAL_PSOL_POOL_TOKEN_AMOUNT:-50}" + pool_wpc_amount="${LOCAL_PSOL_POOL_WPC_AMOUNT:-200}" + fi + + log_info "Creating ${token_symbol}/WPC pool with liquidity (${pool_token_amount}/${pool_wpc_amount})" ( cd "$SWAP_AMM_DIR" - node scripts/pool-manager.js create-pool "$token_addr" "$wpc_addr" 4 500 true 1 4 + node scripts/pool-manager.js create-pool "$token_addr" "$wpc_addr" 4 500 true "$pool_token_amount" "$pool_wpc_amount" ) done < <(jq -r '.tokens[]? | [.symbol, .address] | @tsv' "$DEPLOY_ADDRESSES_FILE") @@ -3231,25 +4554,60 @@ step_configure_universal_core() { } step_deploy_counter_and_sync_sdk() { - require_cmd cast perl + require_cmd cast forge perl [[ -n "${PRIVATE_KEY:-}" ]] || { log_err "Set PRIVATE_KEY in e2e-tests/.env"; exit 1; } local sdk_counter_addr_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/push-chain/helpers/addresses.ts" - local counter_creation_code="0x6080604052348015600e575f5ffd5b506102068061001c5f395ff3fe608060405260043610610042575f3560e01c806312065fe01461004d5780639b0e94af14610077578063d09de08a146100a1578063d826f88f146100ab57610049565b3661004957005b5f5ffd5b348015610058575f5ffd5b506100616100c1565b60405161006e9190610157565b60405180910390f35b348015610082575f5ffd5b5061008b6100c8565b6040516100989190610157565b60405180910390f35b6100a96100cd565b005b3480156100b6575f5ffd5b506100bf610137565b005b5f47905090565b5f5481565b60015f5f8282546100de919061019d565b925050819055503373ffffffffffffffffffffffffffffffffffffffff165f547fb6aa5bfdc1ab753194658fada8fa1725a667cdea7df54bd400f8bced617dfd4c3460405161012d9190610157565b60405180910390a3565b5f5f81905550565b5f819050919050565b6101518161013f565b82525050565b5f60208201905061016a5f830184610148565b92915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f6101a78261013f565b91506101b28361013f565b92508282019050808211156101ca576101c9610170565b5b9291505056fea26469706673582212204acec08331d08192e4797fc12653c602c2ca1574d44468713f91a095fdefe6d564736f6c634300081e0033" if [[ ! -f "$sdk_counter_addr_file" ]]; then log_err "SDK counter addresses file not found: $sdk_counter_addr_file" exit 1 fi + local local_counter_dir="$PUSH_CHAIN_DIR/e2e-tests/.pchain/local-counter" + local local_counter_file="$local_counter_dir/CounterPayable.sol" + mkdir -p "$local_counter_dir" + cat >"$local_counter_file" <<'SOL' +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.20; + +contract Counter { + uint256 public countPC; + event CountIncremented(uint256 indexed countPC, address indexed caller, uint256 value); + + function increment() public payable { + countPC += 1; + emit CountIncremented(countPC, msg.sender, msg.value); + } + + function reset() public { + countPC = 0; + } + + function executeUniversalTx( + string calldata, + bytes calldata, + bytes calldata, + uint256, + address, + bytes32 + ) external { + countPC += 1; + emit CountIncremented(countPC, msg.sender, 0); + } + + receive() external payable {} +} +SOL + log_info "Deploying CounterPayable contract on Push localnet" local deploy_out counter_addr deploy_attempt deploy_attempt=1 deploy_out="" while [[ "$deploy_attempt" -le 5 ]]; do - deploy_out="$(cast send --rpc-url "$PUSH_RPC_URL" --private-key "$PRIVATE_KEY" --create "$counter_creation_code" 2>&1)" || true - counter_addr="$(echo "$deploy_out" | awk '/contractAddress/ {print $2; exit}')" + deploy_out="$(forge create "$local_counter_file:Counter" --rpc-url "$PUSH_RPC_URL" --private-key "$PRIVATE_KEY" --broadcast 2>&1)" || true + counter_addr="$(echo "$deploy_out" | awk '/Deployed to:/ {print $3; exit} /contractAddress/ {print $2; exit}')" if validate_eth_address "$counter_addr"; then break fi @@ -3260,7 +4618,7 @@ step_deploy_counter_and_sync_sdk() { sleep 2 done - counter_addr="$(echo "$deploy_out" | awk '/contractAddress/ {print $2; exit}')" + counter_addr="$(echo "$deploy_out" | awk '/Deployed to:/ {print $3; exit} /contractAddress/ {print $2; exit}')" if ! validate_eth_address "$counter_addr"; then log_err "Could not parse deployed counter contract address from cast output" echo "$deploy_out" @@ -3398,16 +4756,29 @@ NODE cmd_all() { reset_e2e_testnet_donut_configs step_setup_environment - (cd "$PUSH_CHAIN_DIR" && make replace-addresses) - (cd "$PUSH_CHAIN_DIR" && make build) + step_backup_local_replace_addresses_sources + if ! (cd "$PUSH_CHAIN_DIR" && make replace-addresses); then + step_restore_local_replace_addresses_sources + return 1 + fi + step_patch_local_svm_broadcaster_for_build + if ! (cd "$PUSH_CHAIN_DIR" && make build); then + step_restore_local_svm_broadcaster_after_build + step_restore_local_replace_addresses_sources + return 1 + fi + step_restore_local_svm_broadcaster_after_build + step_restore_local_replace_addresses_sources step_update_env_fund_to_address step_stop_running_nodes + step_reset_local_native_data step_devnet step_ensure_tss_key_ready step_setup_environment step_recover_genesis_key step_fund_account step_setup_core_contracts + step_deploy_local_sol_usdt_prc20 step_setup_swap_amm step_sync_test_addresses step_create_all_wpc_pools @@ -3420,7 +4791,10 @@ cmd_all() { step_clone_push_chain_sdk step_deploy_counter_and_sync_sdk sdk_sync_localnet_constants + sdk_patch_local_svm_outbound_execution step_sync_vault_tss_on_anvil + step_sync_svm_gateway_tss + step_fund_svm_ceas } cmd_show_help() { @@ -3434,6 +4808,7 @@ Commands: recover-genesis-key Recover genesis key into local keyring fund Fund FUND_TO_ADDRESS from genesis key setup-core Clone/build/setup core contracts (auto resume on failure) + deploy-sol-usdt-prc20 Deploy local Solana USDT PRC20 used by SVM SPL tests setup-swap Clone/install/deploy swap AMM contracts sync-addresses Apply deploy_addresses.json into test-addresses.json create-pool Create WPC pools for all deployed core tokens @@ -3444,6 +4819,8 @@ Commands: update-token-config Update eth_sepolia_eth.json contract_address using deployed token setup-gateway Clone/setup gateway repo and run forge localSetup (with --resume retry) sync-vault-tss Grant TSS_ROLE on each Anvil EVM vault to the current local TSS key (LOCAL only) + sync-svm-gateway-tss Sync local Surfpool Solana gateway TSS PDA to the current local TSS key (LOCAL only) + fund-svm-cea Fund local Surfpool SVM CEA SOL/USDT balances used by outbound SVM tests (LOCAL only) bootstrap-cea-sdk Ensure CEA is deployed for SDK signer on BSC testnet fork (Route 2 bootstrap) deploy-counter-sdk Deploy CounterPayable on Push localnet and sync SDK COUNTER_ADDRESS_PAYABLE clone-sdk Clone/update push-chain-sdk repo only (no env/deps setup) @@ -3452,7 +4829,7 @@ Commands: sdk-test-outbound-all Replace PUSH_NETWORK TESTNET variants with LOCALNET and run all configured SDK outbound E2E tests (TESTING_ENV=LOCAL) quick-testing-outbound Run quick-testing-outbound-evm, then quick-testing-outbound-svm quick-testing-outbound-evm Run setup-sdk + fund-uea-prc20, then execute EVM outbound cea-to-eoa.spec.ts and cea-to-uea.spec.ts only - quick-testing-outbound-svm Run setup-sdk + fund-uea-prc20, then execute SVM outbound cea-to-eoa.spec.ts and cea-to-uea.spec.ts only + quick-testing-outbound-svm Run setup-sdk + fund-uea-prc20, then execute SVM outbound cea-to-eoa.spec.ts only quick-testing-inbound-evm Run uea-to-push.spec.ts on local Push Chain for Ethereum Sepolia origin only sdk-test-pctx-last-transaction Run pctx-last-transaction.spec.ts sdk-test-send-to-self Run send-to-self.spec.ts @@ -3480,10 +4857,11 @@ Important env: LOCAL_SEPOLIA_UV_RPC_URL=http://localhost:9545 LOCAL_ARBITRUM_UV_RPC_URL=http://localhost:9546 LOCAL_BASE_UV_RPC_URL=http://localhost:9547 - LOCAL_BSC_UV_RPC_URL=http://localhost:9548 - SURFPOOL_SOLANA_HOST_RPC_URL=http://localhost:8899 - LOCAL_SOLANA_UV_RPC_URL=http://localhost:8899 - ETHEREUM_SEPOLIA_RPC_URL=https://... + LOCAL_BSC_UV_RPC_URL=http://localhost:9548 + SURFPOOL_SOLANA_HOST_RPC_URL=http://localhost:8899 + LOCAL_SOLANA_UV_RPC_URL=http://localhost:8899 + ALLOW_LOCAL_SVM_GO_BUILD_PATCH=true Opt back into temporary SVM Go source patching before make build + ETHEREUM_SEPOLIA_RPC_URL=https://... ARBITRUM_SEPOLIA_RPC_URL=https://... BASE_SEPOLIA_RPC_URL=https://... BSC_TESTNET_RPC_URL=https://... @@ -3502,6 +4880,7 @@ main() { recover-genesis-key) step_recover_genesis_key ;; fund) step_fund_account ;; setup-core) step_setup_core_contracts ;; + deploy-sol-usdt-prc20) step_deploy_local_sol_usdt_prc20 ;; setup-swap) step_setup_swap_amm ;; sync-addresses) step_sync_test_addresses ;; create-pool) step_create_all_wpc_pools ;; @@ -3512,6 +4891,8 @@ main() { update-token-config) step_update_deployed_token_configs ;; setup-gateway) step_setup_gateway ;; sync-vault-tss) step_sync_vault_tss_on_anvil ;; + sync-svm-gateway-tss) step_sync_svm_gateway_tss ;; + fund-svm-cea) step_fund_svm_ceas ;; bootstrap-cea-sdk) step_bootstrap_cea_for_sdk_signer ;; deploy-counter-sdk) step_deploy_counter_and_sync_sdk ;; clone-sdk) step_clone_push_chain_sdk ;; diff --git a/local-native/scripts/setup-universal.sh b/local-native/scripts/setup-universal.sh index 7c557266..ef70fd98 100755 --- a/local-native/scripts/setup-universal.sh +++ b/local-native/scripts/setup-universal.sh @@ -9,6 +9,12 @@ source "$SCRIPT_DIR/env.sh" UNIVERSAL_ID=${UNIVERSAL_ID:-1} # HOME_DIR will be set after we set HOME env var +# Deterministic local SVM relayer used by the local-native devnet. The relayer +# pays the Solana transaction fee for SVM outbound broadcasts. +DEFAULT_SOLANA_RELAYER_PUBKEY="AdWDRaQfvWJqW4TaxTrXP5WogCWJMJBrtBfGjjHUDADM" +DEFAULT_SOLANA_RELAYER_KEYPAIR_JSON='[226,7,176,193,18,2,55,106,191,150,176,87,157,216,118,97,236,128,2,104,181,206,160,147,5,152,0,115,23,8,103,189,143,19,31,194,227,248,222,123,219,13,143,47,154,104,201,235,13,16,11,45,117,154,117,37,130,196,58,154,89,228,136,32]' +DEFAULT_SOLANA_RELAYER_UNIVERSAL_ID="" + # Ports case "$UNIVERSAL_ID" in 1) CORE_GRPC_PORT=9090; QUERY_PORT=8080; CORE_RPC_PORT=26657 ;; @@ -71,6 +77,44 @@ HOME_DIR="$UV_HOME/.puniversal" "$PUNIVERSALD_BIN" init +provision_svm_relayer_keypair() { + local relayer_dir="$HOME_DIR/relayer" + local key_path="$relayer_dir/solana.json" + local keypair_json="${SOLANA_RELAYER_KEYPAIR_JSON:-$DEFAULT_SOLANA_RELAYER_KEYPAIR_JSON}" + + mkdir -p "$relayer_dir" + printf '%s\n' "$keypair_json" > "$key_path" + chmod 600 "$key_path" + echo "✅ Provisioned Solana relayer keypair: $key_path" +} + +fund_default_svm_relayer() { + local rpc_url="${SOLANA_RPC_URL_OVERRIDE:-${LOCAL_SOLANA_UV_RPC_URL:-${SURFPOOL_SOLANA_HOST_RPC_URL:-}}}" + local lamports="${SOLANA_RELAYER_AIRDROP_LAMPORTS:-10000000000}" + local relayer_pubkey="${SOLANA_RELAYER_PUBKEY:-$DEFAULT_SOLANA_RELAYER_PUBKEY}" + local response="" + + [ -n "$rpc_url" ] || return 0 + + response=$(curl -sS --max-time 10 -X POST "$rpc_url" \ + -H 'Content-Type: application/json' \ + --data "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"requestAirdrop\",\"params\":[\"$relayer_pubkey\",$lamports]}" 2>/dev/null || true) + + if echo "$response" | jq -e '.result // empty' >/dev/null 2>&1; then + echo "✅ Requested Solana relayer airdrop for $relayer_pubkey via $rpc_url" + elif [ -n "$response" ]; then + echo "⚠️ Solana relayer airdrop was not accepted by $rpc_url: $response" + fi +} + +configured_solana_relayer_universal_id="${SOLANA_RELAYER_UNIVERSAL_ID:-$DEFAULT_SOLANA_RELAYER_UNIVERSAL_ID}" +if [ -z "$configured_solana_relayer_universal_id" ] || [ "$configured_solana_relayer_universal_id" = "$UNIVERSAL_ID" ]; then + provision_svm_relayer_keypair + fund_default_svm_relayer +else + echo "ℹ️ Skipping Solana relayer keypair on universal validator $UNIVERSAL_ID; relayer owner is universal validator $configured_solana_relayer_universal_id" +fi + # Update config jq --arg grpc "$CORE_GRPC" '.push_chain_grpc_urls = [$grpc] | .keyring_backend = "test"' \ "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \