From 2339d47a221f6e7dd78af74c0b6e1daad8ce4ab8 Mon Sep 17 00:00:00 2001 From: Rembrandt Kuipers <50174308+RembrandtK@users.noreply.github.com> Date: Mon, 23 Feb 2026 14:26:19 +0000 Subject: [PATCH 1/4] refactor: reorganize repo into containers/ and compose profiles - Move container dirs into containers/ grouped by domain (core/, indexer/, indexing-payments/, query-payments/, oracles/, ui/) - Remove git submodules and overrides system - Add compose/dev/ overlays with descriptive profiles - Add REO eligibility oracle and real IISA scoring service - Simplify Dockerfiles to wrapper-only model - Default config works out of the box without local builds --- .env | 35 +- .gitignore | 5 +- .gitmodules | 7 - README.md | 93 +++-- compose/dev/README.md | 35 ++ compose/dev/dipper.yaml | 17 + compose/dev/eligibility-oracle.yaml | 17 + compose/dev/graph-contracts.yaml | 20 + .../dev/graph-node.yaml | 15 +- compose/dev/iisa.yaml | 13 + compose/dev/indexer-agent.yaml | 19 + compose/dev/indexer-service.yaml | 16 + compose/dev/tap-agent.yaml | 16 + config/README.md | 95 ----- config/local/.gitkeep | 1 - config/shared/lib.sh | 15 - {chain => containers/core/chain}/Dockerfile | 0 {chain => containers/core/chain}/run.sh | 0 .../core/gateway}/Dockerfile | 0 {gateway => containers/core/gateway}/run.sh | 9 +- .../core/graph-contracts}/Dockerfile | 0 containers/core/graph-contracts/run.sh | 350 ++++++++++++++++++ .../core/postgres}/setup.sql | 0 .../core/subgraph-deploy}/Dockerfile | 0 .../core/subgraph-deploy}/run.sh | 0 .../indexer/graph-node}/Dockerfile | 0 .../indexer/graph-node/dev}/Dockerfile | 0 .../indexer/graph-node/dev}/run-override.sh | 0 .../indexer/graph-node}/run.sh | 0 .../indexer/indexer-agent}/Dockerfile | 0 .../indexer-agent/dev}/run-override.sh | 0 .../indexer/indexer-agent}/run.sh | 0 containers/indexer/indexer-service/Dockerfile | 13 + .../indexer/indexer-service}/run.sh | 0 .../indexer/start-indexing}/Dockerfile | 0 .../indexer/start-indexing}/run.sh | 51 ++- .../indexing-payments/dipper/Dockerfile | 16 + .../indexing-payments/dipper}/run.sh | 31 +- .../indexing-payments/iisa-mock/Dockerfile | 0 .../indexing-payments/iisa-mock/iisa_mock.py | 0 .../indexing-payments/iisa/Dockerfile.scoring | 11 + containers/indexing-payments/iisa/scoring.py | 175 +++++++++ .../indexing-payments/iisa/seed_scores.json | 26 ++ .../oracles/block-oracle}/Dockerfile | 0 .../oracles/block-oracle}/run.sh | 2 +- .../eligibility-oracle-node/Dockerfile | 40 ++ .../oracles/eligibility-oracle-node/run.sh | 116 ++++++ .../query-payments/tap-agent/Dockerfile | 13 + .../query-payments/tap-agent/run.sh | 0 .../query-payments/tap-aggregator}/Dockerfile | 0 .../query-payments/tap-aggregator}/run.sh | 0 .../tap-escrow-manager}/Dockerfile | 0 .../query-payments/tap-escrow-manager}/run.sh | 0 containers/shared/lib.sh | 36 ++ .../ui/block-explorer}/Dockerfile | 0 dipper/Dockerfile | 53 --- dipper/source | 1 - docker-compose.yaml | 177 +++++++-- graph-contracts/run.sh | 192 ---------- indexer-service/Dockerfile | 53 --- indexer-service/Dockerfile.tap-agent | 51 --- indexer-service/source | 1 - overrides/README.md | 127 ------- .../graph-contracts/graph-contracts-dev.yaml | 11 - overrides/graph-node-dev.sh | 9 - .../indexer-agent-dev/indexer-agent-dev.yaml | 11 - overrides/indexing-payments/README.md | 120 ------ .../indexing-payments/docker-compose.yaml | 56 --- overrides/indexing-payments/start.sh | 28 -- scripts/build-with-overrides.sh | 5 - scripts/clean.sh | 53 --- scripts/dipper-cli.sh | 9 +- scripts/mine-block.sh | 16 +- scripts/reo-config.sh | 128 +++++++ scripts/start-with-overrides.sh | 4 - scripts/test-indexer-agent.sh | 9 +- 76 files changed, 1411 insertions(+), 1011 deletions(-) delete mode 100644 .gitmodules create mode 100644 compose/dev/README.md create mode 100644 compose/dev/dipper.yaml create mode 100644 compose/dev/eligibility-oracle.yaml create mode 100644 compose/dev/graph-contracts.yaml rename overrides/graph-node-dev/graph-node-dev.yaml => compose/dev/graph-node.yaml (53%) create mode 100644 compose/dev/iisa.yaml create mode 100644 compose/dev/indexer-agent.yaml create mode 100644 compose/dev/indexer-service.yaml create mode 100644 compose/dev/tap-agent.yaml delete mode 100644 config/README.md delete mode 100644 config/local/.gitkeep delete mode 100644 config/shared/lib.sh rename {chain => containers/core/chain}/Dockerfile (100%) rename {chain => containers/core/chain}/run.sh (100%) rename {gateway => containers/core/gateway}/Dockerfile (100%) rename {gateway => containers/core/gateway}/run.sh (87%) rename {graph-contracts => containers/core/graph-contracts}/Dockerfile (100%) create mode 100644 containers/core/graph-contracts/run.sh rename {postgres => containers/core/postgres}/setup.sql (100%) rename {subgraph-deploy => containers/core/subgraph-deploy}/Dockerfile (100%) rename {subgraph-deploy => containers/core/subgraph-deploy}/run.sh (100%) rename {graph-node => containers/indexer/graph-node}/Dockerfile (100%) rename {overrides/graph-node-dev => containers/indexer/graph-node/dev}/Dockerfile (100%) rename {overrides/graph-node-dev => containers/indexer/graph-node/dev}/run-override.sh (100%) rename {graph-node => containers/indexer/graph-node}/run.sh (100%) rename {indexer-agent => containers/indexer/indexer-agent}/Dockerfile (100%) rename {overrides/indexer-agent-dev => containers/indexer/indexer-agent/dev}/run-override.sh (100%) rename {indexer-agent => containers/indexer/indexer-agent}/run.sh (100%) create mode 100644 containers/indexer/indexer-service/Dockerfile rename {indexer-service => containers/indexer/indexer-service}/run.sh (100%) rename {start-indexing => containers/indexer/start-indexing}/Dockerfile (100%) rename {start-indexing => containers/indexer/start-indexing}/run.sh (61%) create mode 100644 containers/indexing-payments/dipper/Dockerfile rename {dipper => containers/indexing-payments/dipper}/run.sh (62%) rename {overrides => containers}/indexing-payments/iisa-mock/Dockerfile (100%) rename {overrides => containers}/indexing-payments/iisa-mock/iisa_mock.py (100%) create mode 100644 containers/indexing-payments/iisa/Dockerfile.scoring create mode 100644 containers/indexing-payments/iisa/scoring.py create mode 100644 containers/indexing-payments/iisa/seed_scores.json rename {block-oracle => containers/oracles/block-oracle}/Dockerfile (100%) rename {block-oracle => containers/oracles/block-oracle}/run.sh (96%) create mode 100644 containers/oracles/eligibility-oracle-node/Dockerfile create mode 100644 containers/oracles/eligibility-oracle-node/run.sh create mode 100644 containers/query-payments/tap-agent/Dockerfile rename indexer-service/run-tap-agent.sh => containers/query-payments/tap-agent/run.sh (100%) rename {tap-aggregator => containers/query-payments/tap-aggregator}/Dockerfile (100%) rename {tap-aggregator => containers/query-payments/tap-aggregator}/run.sh (100%) rename {tap-escrow-manager => containers/query-payments/tap-escrow-manager}/Dockerfile (100%) rename {tap-escrow-manager => containers/query-payments/tap-escrow-manager}/run.sh (100%) create mode 100644 containers/shared/lib.sh rename {block-explorer => containers/ui/block-explorer}/Dockerfile (100%) delete mode 100644 dipper/Dockerfile delete mode 160000 dipper/source delete mode 100644 graph-contracts/run.sh delete mode 100644 indexer-service/Dockerfile delete mode 100644 indexer-service/Dockerfile.tap-agent delete mode 160000 indexer-service/source delete mode 100644 overrides/README.md delete mode 100644 overrides/graph-contracts/graph-contracts-dev.yaml delete mode 100755 overrides/graph-node-dev.sh delete mode 100644 overrides/indexer-agent-dev/indexer-agent-dev.yaml delete mode 100644 overrides/indexing-payments/README.md delete mode 100644 overrides/indexing-payments/docker-compose.yaml delete mode 100755 overrides/indexing-payments/start.sh delete mode 100755 scripts/build-with-overrides.sh delete mode 100755 scripts/clean.sh create mode 100755 scripts/reo-config.sh delete mode 100755 scripts/start-with-overrides.sh diff --git a/.env b/.env index 129e684..d691898 100644 --- a/.env +++ b/.env @@ -13,20 +13,45 @@ # devcontainer environments to set *_HOST env vars (e.g. CHAIN_HOST=chain) # to reach services on the Docker network instead of localhost. +# --- Service profiles --- +# Controls which optional service groups are started. +# Available profiles: +# block-oracle epoch block oracle +# explorer block explorer UI +# rewards-eligibility REO eligibility oracle node +# indexing-payments dipper + iisa (requires GHCR auth — see README) +# Default: profiles that work out of the box. +COMPOSE_PROFILES=rewards-eligibility,block-oracle,explorer +# All profiles (indexing-payments requires GHCR auth — see README): +#COMPOSE_PROFILES=rewards-eligibility,block-oracle,explorer,indexing-payments + +# --- Dev overrides --- +# Uncomment and extend to build services from local source. +# See compose/dev/README.md for available overrides. +#COMPOSE_FILE=docker-compose.yaml:compose/dev/graph-node.yaml + # indexer components versions GRAPH_NODE_VERSION=v0.37.0 INDEXER_AGENT_VERSION=v0.25.4 INDEXER_SERVICE_RS_VERSION=v1.8.0 INDEXER_TAP_AGENT_VERSION=v1.12.2 +# indexing-payments image versions (requires GHCR auth — see README) +# Set real tags in .env.local when enabling the indexing-payments profile. +DIPPER_VERSION=sha-24d10d4 +IISA_VERSION= + # gateway components versions GATEWAY_COMMIT=b37acb4976313316a2bc0a488ca98749da51c61d TAP_AGGREGATOR_VERSION=sha-d38d0b9 TAP_ESCROW_MANAGER_COMMIT=530a5a72da7592b8d442b94d82a5a5f57d4a2b40 +# eligibility oracle (clone-and-build — requires published repo) +ELIGIBILITY_ORACLE_COMMIT=84710857394d3419f83dcbf6687a91f415cc1625 + # network components versions BLOCK_ORACLE_COMMIT=3a3a425ff96130c3842cee7e43d06bbe3d729aed -CONTRACTS_COMMIT=908bc32597fc946d5f76ad64eef41fedded82e70 +CONTRACTS_COMMIT=0003fe3adf7394df5c9ac1cf4ef600c96a61259f NETWORK_SUBGRAPH_COMMIT=5b6c22089a2e55db16586a19cbf6e1d73a93c7b9 TAP_CONTRACTS_COMMIT=e3351e70b3e5d9821bc0aaa90bb2173ca2a77af7 TAP_SUBGRAPH_COMMIT=cf7279f60433bf9a9d897ec2548c13c0607234cc @@ -97,5 +122,13 @@ RECEIVER_SECRET="0x2ee789a68207020b45607f5adb71933de0946baebbaaab74af7cbd69c8a90 SUBGRAPH="BFr2mx7FgkJ36Y6pE5BiXs1KmNUmVDCnL82KUSdcLW1g" SUBGRAPH_2="9p1TRzaccKzWBN4P6YEwEUxYwJn6HwPxf5dKXK2NYxgS" +# REO (Rewards Eligibility Oracle) +# Set to 1 to deploy and configure the REO contract (Phase 4). Unset or 0 to skip. +REO_ENABLED=1 +# eligibilityPeriod: how long an indexer stays eligible after renewal (seconds) +REO_ELIGIBILITY_PERIOD=300 +# oracleUpdateTimeout: fail-safe — if no oracle update for this long, all indexers eligible (seconds) +REO_ORACLE_UPDATE_TIMEOUT=86400 + # Gateway GATEWAY_API_KEY="deadbeefdeadbeefdeadbeefdeadbeef" diff --git a/.gitignore b/.gitignore index 4e194b2..9a4e456 100644 --- a/.gitignore +++ b/.gitignore @@ -17,5 +17,8 @@ Thumbs.db *.swo *~ -# Local config files +# Rust build artifacts +tests/target/ + +# Legacy local config directory (now uses config-local Docker volume) config/local/ diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index a35dcd8..0000000 --- a/.gitmodules +++ /dev/null @@ -1,7 +0,0 @@ -[submodule "indexer-service/source"] - path = indexer-service/source - url = git@github.com:graphprotocol/indexer-rs.git -[submodule "dipper/source"] - path = dipper/source - url = git@github.com:edgeandnode/dipper.git - branch = main diff --git a/README.md b/README.md index 225a469..41e2abe 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Epochs are set up to be 554 blocks long, use `scripts/mine-block.sh` to advance ## Usage -Requires Docker & Docker Compose. Install foundry on the host for mining blocks. +Requires Docker & Docker Compose v2.24+. Install foundry on the host for mining blocks. ```bash # Start (or resume) the network — skips already-completed setup steps @@ -25,11 +25,22 @@ restarts where it left off. Use `down -v` only when you want a clean slate. Add `--build` to rebuild after changes to Docker build context, including modifying `run.sh` or `Dockerfile`, or changed source code. +## Local Overrides + +Create `.env.local` (gitignored) to override defaults without touching `.env`: + +```bash +# .env.local — your local settings +COMPOSE_PROFILES=rewards-eligibility,block-oracle,explorer,indexing-payments +GRAPH_NODE_VERSION=v0.38.0-rc1 +``` + +Host scripts source `.env.local` automatically after `.env`. + ## Useful commands - `docker compose up -d --build ${service}` — rebuild a single service after code changes - `docker compose logs -f ${service}` -- `scripts/clean.sh` — interactive cleanup (volumes + generated config) - `source .env` ## Components @@ -188,63 +199,67 @@ docker exec -it redpanda rpk topic consume gateway_client_query_results --broker } ``` -## Optional: Indexing Payments +## Service Profiles -To enable payments for indexing work (alternative to TAP allocations): +Optional services are controlled via `COMPOSE_PROFILES` in `.env`. +By default, profiles that work out of the box are enabled: ```bash -# Start with indexing payments -docker compose -f docker-compose.yaml -f overrides/indexing-payments/docker-compose.yaml up - -# Or use helper script -./overrides/indexing-payments/start.sh +COMPOSE_PROFILES=rewards-eligibility,block-oracle,explorer ``` -See [overrides/indexing-payments/README.md](overrides/indexing-payments/README.md) for details. +Available profiles: -## Building components from source +| Profile | Services | Prerequisites | +| --------------------- | --------------------------------- | -------------------------- | +| `block-oracle` | block-oracle | none | +| `explorer` | block-explorer UI | none | +| `rewards-eligibility` | eligibility-oracle-node | none (clones from GitHub) | +| `indexing-payments` | dipper, iisa, iisa-scoring | GHCR auth (below) | -### docker compose overrides +To enable all profiles, uncomment the full line in `.env`: -The following components allow building from source by overriding `docker-compose.yml`: +```bash +COMPOSE_PROFILES=rewards-eligibility,block-oracle,explorer,indexing-payments +``` -- graph-node -- graph-contracts (contracts) -- indexer-agent +### GHCR authentication (indexing-payments) -Please refer to `overrides/README.md` for instructions. +The `indexing-payments` profile pulls private images from `ghcr.io/edgeandnode`. +Create a GitHub **classic** Personal Access Token with `read:packages` scope +(https://github.com/settings/tokens — fine-grained tokens do not support packages) and log in once: -### git submodules source +```bash +echo $GITHUB_TOKEN | docker login ghcr.io -u YOUR_USERNAME --password-stdin +``` -The following components allow building from source by cloning them with submodules: +Then set the image versions in `.env` or `.env.local`: -- indexer-service -- tap-agent +```bash +DIPPER_VERSION= +IISA_VERSION= +``` -Building from source requires the Git submodules to be initialized first: +## Building Components from Source -- `git submodule update --init --recursive` +### Dev overrides (compose/dev/) -And then select the `wrapper-dev` target when building the Docker image in the `docker-compose.yaml` file. +For local development, mount locally-built binaries into running containers. +Set `COMPOSE_FILE` in `.env` to include dev override files: -```diff - indexer-service: - container_name: indexer-service - build: { -- target: "wrapper", # Set to "wrapper-dev" for building from source -+ target: "wrapper-dev", # Set to "wrapper-dev" for building from source - context: indexer-service, - } +```bash +# Mount local indexer-service binary +INDEXER_SERVICE_BINARY=/path/to/indexer-rs/target/release/indexer-service-rs +COMPOSE_FILE=docker-compose.yaml:compose/dev/indexer-service.yaml - tap-agent: - container_name: tap-agent - build: { -- target: "wrapper", # Set to "wrapper-dev" for building from source -+ target: "wrapper-dev", # Set to "wrapper-dev" for building from source - context: tap-agent, - } +# Multiple overrides +COMPOSE_FILE=docker-compose.yaml:compose/dev/indexer-service.yaml:compose/dev/tap-agent.yaml ``` +Each override requires a binary path env var. Source repos own their own build; +local-network just wraps the published image with `run.sh` and utilities. +See [compose/dev/README.md](compose/dev/README.md) for details. + ## Common issues ### `too far behind` diff --git a/compose/dev/README.md b/compose/dev/README.md new file mode 100644 index 0000000..b21b5cc --- /dev/null +++ b/compose/dev/README.md @@ -0,0 +1,35 @@ +# Dev Overrides + +Compose override files for local development. Most mount a locally-built binary +into the running container, avoiding full image rebuilds. + +## Usage + +Set `COMPOSE_FILE` in `.env` (or `.env.local`) to include the override: + +```bash +COMPOSE_FILE=docker-compose.yaml:compose/dev/graph-node.yaml +``` + +Chain multiple overrides: + +```bash +COMPOSE_FILE=docker-compose.yaml:compose/dev/graph-node.yaml:compose/dev/indexer-agent.yaml +``` + +Then `docker compose up -d` applies the overrides automatically. + +## Available Overrides + +| File | Service | Required Env Var | +| ------------------------- | -------------------------------- | ------------------------------------------------------ | +| `graph-node.yaml` | graph-node | `GRAPH_NODE_SOURCE_ROOT` | +| `graph-contracts.yaml` | graph-contracts, subgraph-deploy | `CONTRACTS_SOURCE_ROOT`, `GRAPH_CONTRACTS_SOURCE_ROOT` | +| `indexer-agent.yaml` | indexer-agent | `INDEXER_AGENT_SOURCE_ROOT` | +| `indexer-service.yaml` | indexer-service | `INDEXER_SERVICE_BINARY` | +| `tap-agent.yaml` | tap-agent | `TAP_AGENT_BINARY` | +| `eligibility-oracle.yaml` | eligibility-oracle-node | `REO_BINARY` | +| `dipper.yaml` | dipper | `DIPPER_BINARY` | +| `iisa.yaml` | iisa | `IISA_VERSION=local` | + +See each file's header comments for details. diff --git a/compose/dev/dipper.yaml b/compose/dev/dipper.yaml new file mode 100644 index 0000000..823e44c --- /dev/null +++ b/compose/dev/dipper.yaml @@ -0,0 +1,17 @@ +# Dipper Dev Override +# Mounts a locally-built binary for WIP development (skip image rebuild). +# +# Set DIPPER_BINARY to the path of the locally-built binary, e.g.: +# DIPPER_BINARY=/path/to/dipper/target/release/dipper-service +# +# Build the binary locally first: +# cargo build --release --bin dipper-service +# +# Activate via COMPOSE_FILE in .env (requires indexing-payments profile): +# COMPOSE_PROFILES=indexing-payments,block-oracle +# COMPOSE_FILE=docker-compose.yaml:compose/dev/dipper.yaml + +services: + dipper: + volumes: + - ${DIPPER_BINARY:?Set DIPPER_BINARY to locally-built dipper-service binary}:/usr/local/bin/dipper-service:ro diff --git a/compose/dev/eligibility-oracle.yaml b/compose/dev/eligibility-oracle.yaml new file mode 100644 index 0000000..032ef55 --- /dev/null +++ b/compose/dev/eligibility-oracle.yaml @@ -0,0 +1,17 @@ +# Eligibility Oracle Dev Override +# Mounts a locally-built binary for WIP development (skip image rebuild). +# +# Set REO_BINARY to the path of the locally-built binary, e.g.: +# REO_BINARY=/git/local/eligibility-oracle-node/eligibility-oracle-node/target/release/eligibility-oracle +# +# Build the binary locally first: +# cargo build --release -p eligibility-oracle +# +# Activate via COMPOSE_FILE in .env (requires rewards-eligibility profile): +# COMPOSE_PROFILES=rewards-eligibility +# COMPOSE_FILE=docker-compose.yaml:compose/dev/eligibility-oracle.yaml + +services: + eligibility-oracle-node: + volumes: + - ${REO_BINARY:?Set REO_BINARY to locally-built eligibility-oracle binary}:/usr/local/bin/eligibility-oracle:ro diff --git a/compose/dev/graph-contracts.yaml b/compose/dev/graph-contracts.yaml new file mode 100644 index 0000000..6218c66 --- /dev/null +++ b/compose/dev/graph-contracts.yaml @@ -0,0 +1,20 @@ +# Graph Contracts Dev Override +# Mounts local contracts repo for WIP development (skip image rebuild). +# +# Set CONTRACTS_SOURCE_ROOT to the local contracts repo path, e.g.: +# CONTRACTS_SOURCE_ROOT=/git/graphprotocol/contracts/post-audit +# The repo must have pnpm install and pnpm build already run. +# +# Set GRAPH_CONTRACTS_SOURCE_ROOT to the local graph-network-subgraph repo, e.g.: +# GRAPH_CONTRACTS_SOURCE_ROOT=/git/graphprotocol/graph-network-subgraph +# +# Activate via COMPOSE_FILE in .env: +# COMPOSE_FILE=docker-compose.yaml:compose/dev/graph-contracts.yaml + +services: + graph-contracts: + volumes: + - ${CONTRACTS_SOURCE_ROOT:?Set CONTRACTS_SOURCE_ROOT to local contracts repo}:/opt/contracts + subgraph-deploy: + volumes: + - ${GRAPH_CONTRACTS_SOURCE_ROOT:?Set GRAPH_CONTRACTS_SOURCE_ROOT to local graph-network-subgraph repo}:/opt/graph-network-subgraph diff --git a/overrides/graph-node-dev/graph-node-dev.yaml b/compose/dev/graph-node.yaml similarity index 53% rename from overrides/graph-node-dev/graph-node-dev.yaml rename to compose/dev/graph-node.yaml index 7e19897..29b2c78 100644 --- a/overrides/graph-node-dev/graph-node-dev.yaml +++ b/compose/dev/graph-node.yaml @@ -1,11 +1,20 @@ +# Graph Node Dev Override +# Builds graph-node from local source with optional gdb debugging. +# +# Set GRAPH_NODE_SOURCE_ROOT to the local repo path, e.g.: +# GRAPH_NODE_SOURCE_ROOT=/path/to/graph-node +# +# Activate via COMPOSE_FILE in .env: +# COMPOSE_FILE=docker-compose.yaml:compose/dev/graph-node.yaml + services: graph-node: entrypoint: bash -cl /opt/run-override.sh - build: - context: "./overrides/graph-node-dev" + build: + context: "./containers/indexer/graph-node/dev" dockerfile: Dockerfile volumes: - - ./overrides/graph-node-dev/run-override.sh:/opt/run-override.sh:ro + - ./containers/indexer/graph-node/dev/run-override.sh:/opt/run-override.sh:ro - /tmp/graph-node-cargo-home:/tmp/graph-node-cargo-home - /tmp/graph-node-docker-build:/tmp/graph-node-docker-build - ${GRAPH_NODE_SOURCE_ROOT}:/opt/graph-node-source-root diff --git a/compose/dev/iisa.yaml b/compose/dev/iisa.yaml new file mode 100644 index 0000000..a6c4e57 --- /dev/null +++ b/compose/dev/iisa.yaml @@ -0,0 +1,13 @@ +# IISA Dev Override +# Uses a locally-built image instead of pulling from GHCR. +# +# Build the image in the IISA repo first: +# cd /path/to/subgraph-dips-indexer-selection && docker compose build +# +# Then set IISA_VERSION=local in .env and activate via COMPOSE_FILE: +# COMPOSE_PROFILES=indexing-payments,block-oracle +# COMPOSE_FILE=docker-compose.yaml:compose/dev/iisa.yaml + +services: + iisa: + image: ghcr.io/edgeandnode/subgraph-dips-indexer-selection:local diff --git a/compose/dev/indexer-agent.yaml b/compose/dev/indexer-agent.yaml new file mode 100644 index 0000000..c3135c0 --- /dev/null +++ b/compose/dev/indexer-agent.yaml @@ -0,0 +1,19 @@ +# Indexer Agent Dev Override +# Mounts local indexer-agent source for hot-reload development. +# +# Set INDEXER_AGENT_SOURCE_ROOT to the local repo path, e.g.: +# INDEXER_AGENT_SOURCE_ROOT=$HOME/Development/en/indexer +# +# Activate via COMPOSE_FILE in .env: +# COMPOSE_FILE=docker-compose.yaml:compose/dev/indexer-agent.yaml + +services: + indexer-agent: + entrypoint: bash -cl /opt/run-override.sh + ports: + - "${INDEXER_MANAGEMENT}:7600" + # Nodejs debugger + - 9230:9230 + volumes: + - ./containers/indexer/indexer-agent/dev/run-override.sh:/opt/run-override.sh:ro + - ${INDEXER_AGENT_SOURCE_ROOT}:/opt/indexer-agent-source-root diff --git a/compose/dev/indexer-service.yaml b/compose/dev/indexer-service.yaml new file mode 100644 index 0000000..b6a9bcd --- /dev/null +++ b/compose/dev/indexer-service.yaml @@ -0,0 +1,16 @@ +# Indexer Service Dev Override +# Mounts a locally-built binary for WIP development (skip image rebuild). +# +# Set INDEXER_SERVICE_BINARY to the path of the locally-built binary, e.g.: +# INDEXER_SERVICE_BINARY=/path/to/indexer-rs/target/release/indexer-service-rs +# +# Build the binary locally first: +# cargo build --release --bin indexer-service-rs +# +# Activate via COMPOSE_FILE in .env: +# COMPOSE_FILE=docker-compose.yaml:compose/dev/indexer-service.yaml + +services: + indexer-service: + volumes: + - ${INDEXER_SERVICE_BINARY:?Set INDEXER_SERVICE_BINARY to locally-built indexer-service-rs binary}:/usr/local/bin/indexer-service-rs:ro diff --git a/compose/dev/tap-agent.yaml b/compose/dev/tap-agent.yaml new file mode 100644 index 0000000..fd0afd6 --- /dev/null +++ b/compose/dev/tap-agent.yaml @@ -0,0 +1,16 @@ +# TAP Agent Dev Override +# Mounts a locally-built binary for WIP development (skip image rebuild). +# +# Set TAP_AGENT_BINARY to the path of the locally-built binary, e.g.: +# TAP_AGENT_BINARY=/path/to/indexer-rs/target/release/indexer-tap-agent +# +# Build the binary locally first: +# cargo build --release --bin indexer-tap-agent +# +# Activate via COMPOSE_FILE in .env: +# COMPOSE_FILE=docker-compose.yaml:compose/dev/tap-agent.yaml + +services: + tap-agent: + volumes: + - ${TAP_AGENT_BINARY:?Set TAP_AGENT_BINARY to locally-built indexer-tap-agent binary}:/usr/local/bin/indexer-tap-agent:ro diff --git a/config/README.md b/config/README.md deleted file mode 100644 index abb2df1..0000000 --- a/config/README.md +++ /dev/null @@ -1,95 +0,0 @@ -# Config Directory - -Management of configuration files for the local-network Docker Compose stack. - -## Structure - -``` -config/ -├── shared/ # Shared utilities (committed to git) -│ └── lib.sh # Shell functions (require_jq, etc.) -├── local/ # Working copies (gitignored, auto-generated) -│ ├── horizon.json -│ ├── subgraph-service.json -│ ├── tap-contracts.json -│ └── block-oracle.json -└── README.md -``` - -## How It Works - -### Mount Layout - -Services receive two directory mounts plus a direct `.env` mount: - -| Host path | Container path | Mode | Contents | -| ---------------- | ------------------ | ----------------------------------- | -------------------------------------------------- | -| `config/shared/` | `/opt/shared/` | `:ro` | Git-tracked utilities (`lib.sh`) | -| `.env` | `/opt/config/.env` | `:ro` | Environment variables (ports, mnemonics, versions) | -| `config/local/` | `/opt/config/` | `:ro` (`:rw` for `graph-contracts`) | Generated contract address files | - -Scripts source the env file and shared library: - -```sh -. /opt/config/.env -. /opt/shared/lib.sh -token=$(require_jq '."1337".L2GraphToken.address' /opt/config/horizon.json) -``` - -Note: `env_file` cannot be used because several binaries (graph-node, indexer-service-rs) -interpret environment variables as configuration flags, causing collisions with `.env` entries. - -### Contract Deployment (`graph-contracts` service) - -- Creates empty config files on first run (if they don't exist) -- Deploys Graph protocol smart contracts (Horizon) using Hardhat Ignition -- Deploys TAP contracts (Escrow, TAPVerifier, AllocationIDTracker) using Forge -- Deploys DataEdge contract -- Writes contract addresses to `local/horizon.json`, `local/subgraph-service.json`, `local/tap-contracts.json`, and `local/block-oracle.json` - -## Files - -### `shared/lib.sh` - -- **Committed to git** -- **Contains:** Shared shell functions (`require_jq`) -- **Mounted in:** Services as `/opt/shared/lib.sh:ro` - -### `local/horizon.json` - -- **Generated by:** `graph-contracts` -- **Contains:** Horizon protocol contract addresses -- **Mounted in:** Services as `/opt/config/horizon.json:ro` - -### `local/subgraph-service.json` - -- **Generated by:** `graph-contracts` -- **Contains:** Subgraph service contract addresses -- **Mounted in:** Services as `/opt/config/subgraph-service.json:ro` - -### `local/tap-contracts.json` - -- **Generated by:** `graph-contracts` -- **Contains:** TAP contract addresses (Escrow, TAPVerifier, AllocationIDTracker) -- **Mounted in:** Services as `/opt/config/tap-contracts.json:ro` - -### `local/block-oracle.json` - -- **Generated by:** `graph-contracts` -- **Contains:** DataEdge contract address -- **Mounted in:** Services as `/opt/config/block-oracle.json:ro` - -## Cleaning - -To remove generated config files and reset the environment: - -```bash -./scripts/clean.sh -``` - -This will: - -1. Stop and remove all containers -2. Optionally remove Docker volumes (postgres data, etc.) -3. **Remove all files in `config/local/`** -4. Optionally remove Docker images diff --git a/config/local/.gitkeep b/config/local/.gitkeep deleted file mode 100644 index 9d18667..0000000 --- a/config/local/.gitkeep +++ /dev/null @@ -1 +0,0 @@ -# This file ensures the dir is kept by git (with no contents) diff --git a/config/shared/lib.sh b/config/shared/lib.sh deleted file mode 100644 index 800f747..0000000 --- a/config/shared/lib.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh -# Shared shell utilities for local-network services - -require_jq() { - _val=$(jq -r "$1 // empty" "$2") - if [ -z "$_val" ]; then - echo "Error: $1 not found in $2" >&2 - exit 1 - fi - printf '%s' "$_val" -} - -contract_addr() { - require_jq ".\"1337\".$1" "/opt/config/$2.json" -} diff --git a/chain/Dockerfile b/containers/core/chain/Dockerfile similarity index 100% rename from chain/Dockerfile rename to containers/core/chain/Dockerfile diff --git a/chain/run.sh b/containers/core/chain/run.sh similarity index 100% rename from chain/run.sh rename to containers/core/chain/run.sh diff --git a/gateway/Dockerfile b/containers/core/gateway/Dockerfile similarity index 100% rename from gateway/Dockerfile rename to containers/core/gateway/Dockerfile diff --git a/gateway/run.sh b/containers/core/gateway/run.sh similarity index 87% rename from gateway/run.sh rename to containers/core/gateway/run.sh index 4230abd..bc4afa3 100755 --- a/gateway/run.sh +++ b/containers/core/gateway/run.sh @@ -10,10 +10,11 @@ tap_verifier=$(contract_addr TAPVerifier tap-contracts) dispute_manager=$(contract_addr DisputeManager.address subgraph-service) legacy_dispute_manager=$(contract_addr LegacyDisputeManager.address subgraph-service) subgraph_service=$(contract_addr SubgraphService.address subgraph-service) -network_subgraph_deployment=$(curl -s "http://graph-node:${GRAPH_NODE_GRAPHQL_PORT}/subgraphs/name/graph-network" \ - -H 'content-type: application/json' \ - -d '{"query": "{ _meta { deployment } }" }' \ - | jq -r '.data._meta.deployment') +echo "Waiting for network subgraph..." >&2 +network_subgraph_deployment=$(wait_for_gql \ + "http://graph-node:${GRAPH_NODE_GRAPHQL_PORT}/subgraphs/name/graph-network" \ + "{ _meta { deployment } }" \ + ".data._meta.deployment") cat >config.json <<-EOF { "attestations": { diff --git a/graph-contracts/Dockerfile b/containers/core/graph-contracts/Dockerfile similarity index 100% rename from graph-contracts/Dockerfile rename to containers/core/graph-contracts/Dockerfile diff --git a/containers/core/graph-contracts/run.sh b/containers/core/graph-contracts/run.sh new file mode 100644 index 0000000..79a0e54 --- /dev/null +++ b/containers/core/graph-contracts/run.sh @@ -0,0 +1,350 @@ +#!/bin/bash +set -eu +. /opt/config/.env +. /opt/shared/lib.sh + +# -- Ensure config files exist (empty JSON on first run) -- +for f in horizon.json subgraph-service.json issuance.json tap-contracts.json block-oracle.json; do + [ -f "/opt/config/$f" ] || echo '{}' > "/opt/config/$f" +done + +# -- Symlink Hardhat address books to config directory -- +# Hardhat reads/writes addresses-local-network.json; symlinks let those +# writes land in /opt/config/ without individual Docker file mounts. +ln -sf /opt/config/horizon.json /opt/contracts/packages/horizon/addresses-local-network.json +ln -sf /opt/config/subgraph-service.json /opt/contracts/packages/subgraph-service/addresses-local-network.json +ln -sf /opt/config/issuance.json /opt/contracts/packages/issuance/addresses-local-network.json + +# ============================================================ +# Phase 1: Graph protocol contracts +# ============================================================ +echo "==== Phase 1: Graph protocol contracts ====" + +# -- Helper: ensure DisputeManager registered in Controller -- +ensure_dispute_manager_registered() { + controller_address=$(jq -r '.["1337"].Controller.address // empty' /opt/config/horizon.json) + dispute_manager_address=$(jq -r '.["1337"].DisputeManager.address // empty' /opt/config/subgraph-service.json) + + if [ -z "$controller_address" ] || [ -z "$dispute_manager_address" ]; then + echo "Controller or DisputeManager address not found, skipping registration" + return + fi + + dispute_manager_id=$(cast keccak256 "DisputeManager") + current_proxy=$(cast call --rpc-url="http://chain:${CHAIN_RPC_PORT}" \ + "${controller_address}" "getContractProxy(bytes32)(address)" "${dispute_manager_id}" 2>/dev/null || echo "0x") + + current_proxy_lower=$(echo "$current_proxy" | tr '[:upper:]' '[:lower:]') + dispute_manager_lower=$(echo "$dispute_manager_address" | tr '[:upper:]' '[:lower:]') + + if [ "$current_proxy_lower" = "$dispute_manager_lower" ]; then + echo "DisputeManager already registered in Controller: ${dispute_manager_address}" + else + echo "Registering Horizon DisputeManager in Controller..." + echo " Controller: ${controller_address}" + echo " DisputeManager: ${dispute_manager_address}" + echo " Current proxy: ${current_proxy}" + cast send --rpc-url="http://chain:${CHAIN_RPC_PORT}" --confirmations=0 --private-key="${ACCOUNT1_SECRET}" \ + "${controller_address}" "setContractProxy(bytes32,address)" "${dispute_manager_id}" "${dispute_manager_address}" + fi +} + +# -- Idempotency check -- +phase1_skip=false +l2_graph_token=$(jq -r '.["1337"].L2GraphToken.address // empty' /opt/config/horizon.json 2>/dev/null || true) +if [ -n "$l2_graph_token" ]; then + code_check=$(cast code --rpc-url="http://chain:${CHAIN_RPC_PORT}" "$l2_graph_token" 2>/dev/null || echo "0x") + if [ "$code_check" != "0x" ]; then + echo "Graph protocol contracts already deployed (L2GraphToken at $l2_graph_token)" + ensure_dispute_manager_registered + echo "SKIP: Phase 1" + phase1_skip=true + else + echo "Contract addresses in horizon.json are stale (no code at $l2_graph_token), redeploying..." + fi +fi + +if [ "$phase1_skip" = "false" ]; then + echo "Deploying new version of the protocol" + cd /opt/contracts/packages/subgraph-service + npx hardhat deploy:protocol --network localNetwork --subgraph-service-config localNetwork + + # Add legacy contract stubs (gateway needs these) + TEMP_JSON=$(jq '.["1337"] += { + "LegacyServiceRegistry": {"address": "0x0000000000000000000000000000000000000000"}, + "LegacyDisputeManager": {"address": "0x0000000000000000000000000000000000000000"} + }' addresses-local-network.json) + printf '%s\n' "$TEMP_JSON" > addresses-local-network.json + + ensure_dispute_manager_registered +fi + +# -- Set issuance to 100 GRT/block for meaningful reward testing -- +rewards_manager=$(jq -r '.["1337"].RewardsManager.address // empty' /opt/config/horizon.json) +if [ -n "$rewards_manager" ]; then + target_issuance="100000000000000000000" # 100 GRT in wei + current_issuance=$(cast call --rpc-url="http://chain:${CHAIN_RPC_PORT}" \ + "${rewards_manager}" "issuancePerBlock()(uint256)" 2>/dev/null | awk '{print $1}') + if [ "$current_issuance" = "$target_issuance" ]; then + echo " issuancePerBlock already set to 100 GRT" + else + echo " Setting issuancePerBlock to 100 GRT (was ${current_issuance})" + cast send --rpc-url="http://chain:${CHAIN_RPC_PORT}" --confirmations=0 \ + --private-key="${ACCOUNT1_SECRET}" \ + "${rewards_manager}" "setIssuancePerBlock(uint256)" "${target_issuance}" + fi +fi + +echo "==== Phase 1 complete ====" + +# ============================================================ +# Phase 2: TAP contracts +# ============================================================ +echo "==== Phase 2: TAP contracts ====" + +# -- Idempotency check -- +phase2_skip=false +escrow_address=$(jq -r '."1337".Escrow // empty' /opt/config/tap-contracts.json 2>/dev/null || true) +if [ -n "$escrow_address" ]; then + code_check=$(cast code --rpc-url="http://chain:${CHAIN_RPC_PORT}" "$escrow_address" 2>/dev/null || echo "0x") + if [ "$code_check" != "0x" ]; then + echo "TAP contracts already deployed (Escrow at $escrow_address)" + echo "SKIP: Phase 2" + phase2_skip=true + else + echo "TAP contract addresses are stale (no code at Escrow $escrow_address), redeploying..." + fi +fi + +if [ "$phase2_skip" = "false" ]; then + cd /opt/timeline-aggregation-protocol-contracts + + staking=$(contract_addr HorizonStaking.address horizon) + graph_token=$(contract_addr L2GraphToken.address horizon) + + # Note: forge may output alloy log lines to stdout after the JSON; sed extracts only the JSON object + forge create --broadcast --json --rpc-url="http://chain:${CHAIN_RPC_PORT}" --mnemonic="${MNEMONIC}" \ + src/AllocationIDTracker.sol:AllocationIDTracker \ + | tee allocation_tracker.json + allocation_tracker="$(sed -n '/^{/,/^}/p' allocation_tracker.json | jq -r '.deployedTo')" + + forge create --broadcast --json --rpc-url="http://chain:${CHAIN_RPC_PORT}" --mnemonic="${MNEMONIC}" \ + src/TAPVerifier.sol:TAPVerifier --constructor-args 'TAP' '1' \ + | tee verifier.json + verifier="$(sed -n '/^{/,/^}/p' verifier.json | jq -r '.deployedTo')" + + forge create --broadcast --json --rpc-url="http://chain:${CHAIN_RPC_PORT}" --mnemonic="${MNEMONIC}" \ + src/Escrow.sol:Escrow --constructor-args "${graph_token}" "${staking}" "${verifier}" "${allocation_tracker}" 10 15 \ + | tee escrow.json + escrow="$(sed -n '/^{/,/^}/p' escrow.json | jq -r '.deployedTo')" + + cat < /opt/config/tap-contracts.json +{ + "1337": { + "AllocationIDTracker": "$allocation_tracker", + "TAPVerifier": "$verifier", + "Escrow": "$escrow" + } +} +EOF +fi + +echo "==== Phase 2 complete ====" + +# ============================================================ +# Phase 3: DataEdge contract +# ============================================================ +echo "==== Phase 3: DataEdge contract ====" + +# -- Idempotency check -- +phase3_skip=false +data_edge=$(jq -r '."1337".DataEdge // empty' /opt/config/block-oracle.json 2>/dev/null || true) +if [ -n "$data_edge" ]; then + code_check=$(cast code --rpc-url="http://chain:${CHAIN_RPC_PORT}" "$data_edge" 2>/dev/null || echo "0x") + if [ "$code_check" != "0x" ]; then + echo "DataEdge contract already deployed at $data_edge" + echo "SKIP: Phase 3" + phase3_skip=true + else + echo "DataEdge address stale (no code at $data_edge), redeploying..." + fi +fi + +if [ "$phase3_skip" = "false" ]; then + cd /opt/contracts-data-edge/packages/data-edge + export MNEMONIC="${MNEMONIC}" + sed -i "s/myth like bonus scare over problem client lizard pioneer submit female collect/${MNEMONIC}/g" hardhat.config.ts + npx hardhat data-edge:deploy --contract EventfulDataEdge --deploy-name EBO --network ganache | tee deploy.txt + data_edge="$(grep 'contract: ' deploy.txt | awk '{print $3}')" + + echo "=== Data edge deployed at: $data_edge ===" + + cat < /opt/config/block-oracle.json +{ + "1337": { + "DataEdge": "$data_edge" + } +} +ADDR_EOF + + # Register network in DataEdge + output=$(cast send --rpc-url="http://chain:${CHAIN_RPC_PORT}" --confirmations=0 --mnemonic="${MNEMONIC}" \ + "${data_edge}" \ + '0xa1dce3320000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000f030103176569703135353a313333370000000000000000000000000000000000' 2>&1) + exit_code=$? + if [ $exit_code -ne 0 ]; then + echo "Error during cast send: $output" | tee -a error.log + else + echo "$output" + fi +fi + +echo "==== Phase 3 complete ====" + +# ============================================================ +# Phase 4: Rewards Eligibility Oracle (REO) +# ============================================================ +if [ "${REO_ENABLED:-0}" != "1" ]; then + echo "==== Phase 4: Rewards Eligibility Oracle (SKIPPED — REO_ENABLED not set) ====" +else +echo "==== Phase 4: Rewards Eligibility Oracle ====" + +# Ensure NetworkOperator in issuance address book (required by configure step) +TEMP_JSON=$(jq --arg op "${ACCOUNT0_ADDRESS}" \ + '.["1337"].NetworkOperator = {"address": $op}' /opt/config/issuance.json) +printf '%s\n' "$TEMP_JSON" > /opt/config/issuance.json + +# -- Idempotency check -- +# The hardhat deploy configure step (04_configure.ts) targets REO_DEFAULTS +# (14d eligibility, 7d timeout) using the GOVERNOR account, which lacks +# OPERATOR_ROLE. run.sh below handles all configuration using ACCOUNT0 +# (OPERATOR). So we only run hardhat deploy for initial deployment; on +# re-runs where the REO proxy already exists on-chain, skip straight to +# the idempotent configuration below. +phase4_deploy_skip=false +reo_address=$(jq -r '.["1337"].RewardsEligibilityOracle.address // empty' /opt/config/issuance.json 2>/dev/null || true) +if [ -n "$reo_address" ]; then + code_check=$(cast code --rpc-url="http://chain:${CHAIN_RPC_PORT}" "$reo_address" 2>/dev/null || echo "0x") + if [ "$code_check" != "0x" ]; then + echo "REO already deployed at $reo_address" + echo "SKIP: hardhat deploy (configuration handled below)" + phase4_deploy_skip=true + else + echo "REO address stale (no code at $reo_address), redeploying..." + fi +fi + +if [ "$phase4_deploy_skip" = "false" ]; then + cd /opt/contracts/packages/deployment + + # Clean any stale governance TX batches from partial runs + rm -rf /opt/contracts/packages/deployment/txs/localNetwork + + # Full REO lifecycle via deployment package tags: + # sync → deploy → configure → transfer → integrate → verify + # Deploy scripts are idempotent (skip if already deployed/configured). + # The mnemonic provides both deployer (ACCOUNT0) and governor (ACCOUNT1), + # so all steps including RM integration execute directly. + # + # Some steps (upgrade) exit with code 1 after saving governance TX batches. + # On localNetwork, the governor key is available so we auto-execute and retry. + export GOVERNOR_KEY="${ACCOUNT1_SECRET}" + for attempt in 1 2 3; do + echo " Deploy attempt $attempt..." + if npx hardhat deploy --tags rewards-eligibility --network localNetwork --skip-prompts; then + break + fi + # Check for pending governance TXs and execute them + if ls /opt/contracts/packages/deployment/txs/localNetwork/*.json 2>/dev/null | grep -qv executed; then + echo " Executing pending governance TXs..." + npx hardhat deploy:execute-governance --network localNetwork || true + else + echo " No governance TXs to execute, deployment failed for another reason" + exit 1 + fi + done + + # Read deployed REO address from issuance address book + reo_address=$(jq -r '.["1337"].RewardsEligibilityOracle.address' /opt/config/issuance.json) +fi + +echo " REO deployed at: $reo_address" + +# Grant ORACLE_ROLE to the REO node signing key (ACCOUNT0). +# OPERATOR_ROLE is the admin for ORACLE_ROLE, and ACCOUNT0 has OPERATOR_ROLE. +# Idempotent: only grants if not already granted. +oracle_role=$(cast call --rpc-url="http://chain:${CHAIN_RPC_PORT}" \ + "${reo_address}" "ORACLE_ROLE()(bytes32)") +has_role=$(cast call --rpc-url="http://chain:${CHAIN_RPC_PORT}" \ + "${reo_address}" "hasRole(bytes32,address)(bool)" "${oracle_role}" "${ACCOUNT0_ADDRESS}" 2>/dev/null || echo "false") +if [ "$has_role" = "true" ]; then + echo " ORACLE_ROLE already granted to ${ACCOUNT0_ADDRESS}" +else + echo " Granting ORACLE_ROLE to ${ACCOUNT0_ADDRESS} (via OPERATOR_ROLE)" + cast send --rpc-url="http://chain:${CHAIN_RPC_PORT}" --confirmations=0 \ + --private-key="${ACCOUNT0_SECRET}" \ + "${reo_address}" "grantRole(bytes32,address)" "${oracle_role}" "${ACCOUNT0_ADDRESS}" +fi + +# Enable eligibility validation (deny-by-default). +# The contract defaults to validation disabled (everyone eligible). For local +# testing we want the realistic deny-by-default behaviour. Idempotent. +# Requires OPERATOR_ROLE (ACCOUNT0). +validation_enabled=$(cast call --rpc-url="http://chain:${CHAIN_RPC_PORT}" \ + "${reo_address}" "getEligibilityValidation()(bool)" 2>/dev/null || echo "false") +if [ "$validation_enabled" = "true" ]; then + echo " Eligibility validation already enabled" +else + echo " Enabling eligibility validation (deny-by-default)" + cast send --rpc-url="http://chain:${CHAIN_RPC_PORT}" --confirmations=0 \ + --private-key="${ACCOUNT0_SECRET}" \ + "${reo_address}" "setEligibilityValidation(bool)" true +fi + +# Set eligibility period (how long an indexer stays eligible after renewal). +# Contract default is 14 days; local network uses a short value for fast iteration. +# Requires OPERATOR_ROLE (ACCOUNT0). +current_period=$(cast call --rpc-url="http://chain:${CHAIN_RPC_PORT}" \ + "${reo_address}" "getEligibilityPeriod()(uint256)" 2>/dev/null | awk '{print $1}') +if [ "$current_period" = "${REO_ELIGIBILITY_PERIOD}" ]; then + echo " Eligibility period already set to ${REO_ELIGIBILITY_PERIOD}s" +else + echo " Setting eligibility period to ${REO_ELIGIBILITY_PERIOD}s (was ${current_period}s)" + cast send --rpc-url="http://chain:${CHAIN_RPC_PORT}" --confirmations=0 \ + --private-key="${ACCOUNT0_SECRET}" \ + "${reo_address}" "setEligibilityPeriod(uint256)" "${REO_ELIGIBILITY_PERIOD}" +fi + +# Set oracle update timeout (fail-safe: all indexers eligible if no oracle update for this long). +# Contract default is 7 days; local network uses a longer value to avoid accidental fail-safe. +# Requires OPERATOR_ROLE (ACCOUNT0). +current_timeout=$(cast call --rpc-url="http://chain:${CHAIN_RPC_PORT}" \ + "${reo_address}" "getOracleUpdateTimeout()(uint256)" 2>/dev/null | awk '{print $1}') +if [ "$current_timeout" = "${REO_ORACLE_UPDATE_TIMEOUT}" ]; then + echo " Oracle update timeout already set to ${REO_ORACLE_UPDATE_TIMEOUT}s" +else + echo " Setting oracle update timeout to ${REO_ORACLE_UPDATE_TIMEOUT}s (was ${current_timeout}s)" + cast send --rpc-url="http://chain:${CHAIN_RPC_PORT}" --confirmations=0 \ + --private-key="${ACCOUNT0_SECRET}" \ + "${reo_address}" "setOracleUpdateTimeout(uint256)" "${REO_ORACLE_UPDATE_TIMEOUT}" +fi + +# Clean deployment metadata from address books. +# The deployment package writes fields like implementationDeployment and +# proxyDeployment that the indexer-agent doesn't recognise, causing it to +# crash with "Address book entry contains invalid fields". +for ab in horizon.json subgraph-service.json; do + if [ -f "/opt/config/$ab" ]; then + TEMP_JSON=$(jq 'walk(if type == "object" then del(.implementationDeployment, .proxyDeployment) else . end)' "/opt/config/$ab") + printf '%s\n' "$TEMP_JSON" > "/opt/config/$ab" + fi +done + +echo "==== Phase 4 complete ====" +fi # REO_ENABLED +echo "==== All contract deployments complete ====" + +# Optional: keep container running for debugging +if [ -n "${KEEP_CONTAINER_RUNNING:-}" ]; then + tail -f /dev/null +fi diff --git a/postgres/setup.sql b/containers/core/postgres/setup.sql similarity index 100% rename from postgres/setup.sql rename to containers/core/postgres/setup.sql diff --git a/subgraph-deploy/Dockerfile b/containers/core/subgraph-deploy/Dockerfile similarity index 100% rename from subgraph-deploy/Dockerfile rename to containers/core/subgraph-deploy/Dockerfile diff --git a/subgraph-deploy/run.sh b/containers/core/subgraph-deploy/run.sh similarity index 100% rename from subgraph-deploy/run.sh rename to containers/core/subgraph-deploy/run.sh diff --git a/graph-node/Dockerfile b/containers/indexer/graph-node/Dockerfile similarity index 100% rename from graph-node/Dockerfile rename to containers/indexer/graph-node/Dockerfile diff --git a/overrides/graph-node-dev/Dockerfile b/containers/indexer/graph-node/dev/Dockerfile similarity index 100% rename from overrides/graph-node-dev/Dockerfile rename to containers/indexer/graph-node/dev/Dockerfile diff --git a/overrides/graph-node-dev/run-override.sh b/containers/indexer/graph-node/dev/run-override.sh similarity index 100% rename from overrides/graph-node-dev/run-override.sh rename to containers/indexer/graph-node/dev/run-override.sh diff --git a/graph-node/run.sh b/containers/indexer/graph-node/run.sh similarity index 100% rename from graph-node/run.sh rename to containers/indexer/graph-node/run.sh diff --git a/indexer-agent/Dockerfile b/containers/indexer/indexer-agent/Dockerfile similarity index 100% rename from indexer-agent/Dockerfile rename to containers/indexer/indexer-agent/Dockerfile diff --git a/overrides/indexer-agent-dev/run-override.sh b/containers/indexer/indexer-agent/dev/run-override.sh similarity index 100% rename from overrides/indexer-agent-dev/run-override.sh rename to containers/indexer/indexer-agent/dev/run-override.sh diff --git a/indexer-agent/run.sh b/containers/indexer/indexer-agent/run.sh similarity index 100% rename from indexer-agent/run.sh rename to containers/indexer/indexer-agent/run.sh diff --git a/containers/indexer/indexer-service/Dockerfile b/containers/indexer/indexer-service/Dockerfile new file mode 100644 index 0000000..36722bd --- /dev/null +++ b/containers/indexer/indexer-service/Dockerfile @@ -0,0 +1,13 @@ +## Local-network wrapper for indexer-service-rs +ARG INDEXER_SERVICE_RS_VERSION +FROM ghcr.io/graphprotocol/indexer-service-rs:${INDEXER_SERVICE_RS_VERSION} + +RUN apt-get update \ + && apt-get install -y --no-install-recommends curl jq \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /opt + +COPY ./run.sh /opt/run.sh + +ENTRYPOINT ["bash", "/opt/run.sh"] diff --git a/indexer-service/run.sh b/containers/indexer/indexer-service/run.sh similarity index 100% rename from indexer-service/run.sh rename to containers/indexer/indexer-service/run.sh diff --git a/start-indexing/Dockerfile b/containers/indexer/start-indexing/Dockerfile similarity index 100% rename from start-indexing/Dockerfile rename to containers/indexer/start-indexing/Dockerfile diff --git a/start-indexing/run.sh b/containers/indexer/start-indexing/run.sh similarity index 61% rename from start-indexing/run.sh rename to containers/indexer/start-indexing/run.sh index 1606a58..48f15f1 100755 --- a/start-indexing/run.sh +++ b/containers/indexer/start-indexing/run.sh @@ -7,12 +7,42 @@ t0=$SECONDS elapsed() { echo "[+$((SECONDS - t0))s] $*"; } # -- Idempotency: skip everything if allocations already active -- +# Still check curation signal — may be missing after volume cleanup. if curl -s "http://graph-node:${GRAPH_NODE_GRAPHQL_PORT}/subgraphs/name/graph-network" \ -H 'content-type: application/json' \ -d '{"query": "{ allocations(where:{status:Active}) { indexer { id } } }" }' \ | grep -qi "${RECEIVER_ADDRESS}" then - echo "Active allocations found, skipping" + echo "Active allocations found, ensuring curation signal on all deployments..." + + graph_token=$(contract_addr L2GraphToken.address horizon) + curation=$(contract_addr L2Curation.address horizon) + signal_per_dep="1000000000000000000000" # 1000 GRT per deployment + added=0 + + for subgraph_name in graph-network block-oracle semiotic/tap; do + dep_id="$(curl -s "http://graph-node:${GRAPH_NODE_GRAPHQL_PORT}/subgraphs/name/${subgraph_name}" \ + -H 'content-type: application/json' \ + -d '{"query": "{ _meta { deployment } }" }' | jq -r '.data._meta.deployment')" + [ -z "$dep_id" ] || [ "$dep_id" = "null" ] && continue + dep_hex="$(curl -s -X POST "http://ipfs:${IPFS_RPC_PORT}/api/v0/cid/format?arg=${dep_id}&b=base16" | jq -r '.Formatted')" + dep_hex="${dep_hex#f01701220}" + + existing=$(cast call --rpc-url="http://chain:${CHAIN_RPC_PORT}" \ + "${curation}" "getCurationPoolSignal(bytes32)(uint256)" "0x${dep_hex}" 2>/dev/null | awk '{print $1}') + if [ "${existing:-0}" != "0" ]; then + echo " ${subgraph_name}: signal present (${existing})" + continue + fi + + echo " ${subgraph_name}: adding signal..." + cast send --rpc-url="http://chain:${CHAIN_RPC_PORT}" --confirmations=0 --mnemonic="${MNEMONIC}" \ + "${graph_token}" "approve(address,uint256)" "${curation}" "${signal_per_dep}" + cast send --rpc-url="http://chain:${CHAIN_RPC_PORT}" --confirmations=0 --mnemonic="${MNEMONIC}" \ + "${curation}" "mint(bytes32,uint256,uint256)" "0x${dep_hex}" "${signal_per_dep}" "0" + added=$((added + 1)) + done + [ $added -gt 0 ] && echo "Added signal to ${added} deployment(s)" || echo "All deployments have signal" exit 0 fi @@ -59,6 +89,7 @@ if [ "${subgraph_count:-0}" -ge 3 ]; then echo "Subgraphs already published to GNS (count: $subgraph_count)" else gns=$(contract_addr L2GNS.address subgraph-service) + all_dep_hexes="" for dep_name in network tap block_oracle; do eval dep_id=\$${dep_name}_deployment dep_hex="$(curl -s -X POST "http://ipfs:${IPFS_RPC_PORT}/api/v0/cid/format?arg=${dep_id}&b=base16" | jq -r '.Formatted')" @@ -69,8 +100,26 @@ else "0x${dep_hex}" \ '0x0000000000000000000000000000000000000000000000000000000000000000' \ '0x0000000000000000000000000000000000000000000000000000000000000000' + all_dep_hexes="${all_dep_hexes} ${dep_hex}" done elapsed "All subgraphs published to GNS" + + # -- Add curation signal so RewardsManager distributes rewards -- + # Without curation signal, accRewardsPerSignal stays 0 and no rewards flow. + # Signal ALL deployments so any allocation can earn rewards. + graph_token=$(contract_addr L2GraphToken.address horizon) + curation=$(contract_addr L2Curation.address horizon) + signal_per_dep="1000000000000000000000" # 1000 GRT per deployment + + for dep_hex in ${all_dep_hexes}; do + elapsed "Adding curation signal to 0x${dep_hex}..." + total_approve="3000000000000000000000" # 3000 GRT total (enough for all) + cast send --rpc-url="http://chain:${CHAIN_RPC_PORT}" --confirmations=0 --mnemonic="${MNEMONIC}" \ + "${graph_token}" "approve(address,uint256)" "${curation}" "${total_approve}" + cast send --rpc-url="http://chain:${CHAIN_RPC_PORT}" --confirmations=0 --mnemonic="${MNEMONIC}" \ + "${curation}" "mint(bytes32,uint256,uint256)" "0x${dep_hex}" "${signal_per_dep}" "0" + done + elapsed "Curation signal added to all deployments" fi # -- Set indexing rules (tells indexer-agent to allocate) -- diff --git a/containers/indexing-payments/dipper/Dockerfile b/containers/indexing-payments/dipper/Dockerfile new file mode 100644 index 0000000..1deb61c --- /dev/null +++ b/containers/indexing-payments/dipper/Dockerfile @@ -0,0 +1,16 @@ +## Local-network wrapper for dipper-service +ARG DIPPER_VERSION +FROM ghcr.io/edgeandnode/dipper-service:${DIPPER_VERSION} + +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + ca-certificates \ + curl \ + jq \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /opt + +ADD run.sh /opt/run.sh + +ENTRYPOINT ["bash", "-c", "/opt/run.sh"] diff --git a/dipper/run.sh b/containers/indexing-payments/dipper/run.sh similarity index 62% rename from dipper/run.sh rename to containers/indexing-payments/dipper/run.sh index f46b44e..edd9f9d 100755 --- a/dipper/run.sh +++ b/containers/indexing-payments/dipper/run.sh @@ -5,28 +5,31 @@ set -eu . /opt/shared/lib.sh ## Parameters -# Pull the network subgraph deployment ID from the graph-node -network_subgraph_deployment=$(curl -s "http://graph-node:${GRAPH_NODE_GRAPHQL_PORT}/subgraphs/name/graph-network" \ - -H 'content-type: application/json' \ - -d '{"query": "{ _meta { deployment } }" }' \ - | jq -r '.data._meta.deployment') +echo "Waiting for network subgraph..." >&2 +network_subgraph_deployment=$(wait_for_gql \ + "http://graph-node:${GRAPH_NODE_GRAPHQL_PORT}/subgraphs/name/graph-network" \ + "{ _meta { deployment } }" \ + ".data._meta.deployment") tap_verifier=$(contract_addr TAPVerifier tap-contracts) +subgraph_service=$(contract_addr SubgraphService.address subgraph-service) ## Config cat >config.json <<-EOF { "dips": { - "service": "0x1234567890abcdef1234567890abcdef12345678", - "max_initial_amount": "1000000000000000000", - "max_ongoing_amount_per_epoch": "500000000000000000", - "max_epochs_per_collection": 10, - "min_epochs_per_collection": 2, - "duration_epochs": 20, + "data_service": "${subgraph_service}", + "recurring_collector": "0x0000000000000000000000000000000000000000", + "max_initial_tokens": "1000000000000000000", + "max_ongoing_tokens_per_second": "1000000000000000", + "max_seconds_per_collection": 86400, + "min_seconds_per_collection": 3600, + "duration_seconds": null, + "deadline_seconds": 300, "pricing_table": { "${CHAIN_ID}": { - "base_price_per_epoch": "101", - "price_per_entity": "1001" + "tokens_per_second": "101", + "tokens_per_entity_per_second": "1001" } } }, @@ -64,7 +67,7 @@ cat >config.json <<-EOF "verifier": "${tap_verifier}" }, "iisa": { - "endpoint": "http://iisa-mock:8080", + "endpoint": "http://iisa:8080", "request_timeout": 30, "connect_timeout": 10, "max_retries": 3 diff --git a/overrides/indexing-payments/iisa-mock/Dockerfile b/containers/indexing-payments/iisa-mock/Dockerfile similarity index 100% rename from overrides/indexing-payments/iisa-mock/Dockerfile rename to containers/indexing-payments/iisa-mock/Dockerfile diff --git a/overrides/indexing-payments/iisa-mock/iisa_mock.py b/containers/indexing-payments/iisa-mock/iisa_mock.py similarity index 100% rename from overrides/indexing-payments/iisa-mock/iisa_mock.py rename to containers/indexing-payments/iisa-mock/iisa_mock.py diff --git a/containers/indexing-payments/iisa/Dockerfile.scoring b/containers/indexing-payments/iisa/Dockerfile.scoring new file mode 100644 index 0000000..a1a50c4 --- /dev/null +++ b/containers/indexing-payments/iisa/Dockerfile.scoring @@ -0,0 +1,11 @@ +FROM python:3.12-slim + +WORKDIR /app + +# Install confluent-kafka for Redpanda connectivity +RUN pip install --no-cache-dir confluent-kafka + +COPY seed_scores.json ./ +COPY scoring.py ./ + +CMD ["python", "scoring.py"] diff --git a/containers/indexing-payments/iisa/scoring.py b/containers/indexing-payments/iisa/scoring.py new file mode 100644 index 0000000..a10ae6c --- /dev/null +++ b/containers/indexing-payments/iisa/scoring.py @@ -0,0 +1,175 @@ +""" +IISA scoring service for local network. + +Long-running service that ensures indexer scores are available for the +IISA HTTP service. On startup writes seed scores so IISA can start +immediately, then periodically checks Redpanda for real query data +and refreshes scores when available. + +Modelled after the eligibility-oracle-node polling pattern. +""" + +import json +import logging +import os +import shutil +import signal +import sys +import time +from pathlib import Path + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", +) +logger = logging.getLogger("iisa-scoring") + +SCORES_FILE_PATH = os.environ.get("SCORES_FILE_PATH", "/app/scores/indexer_scores.json") +SEED_SCORES_PATH = "/app/seed_scores.json" +REDPANDA_BOOTSTRAP_SERVERS = os.environ.get("REDPANDA_BOOTSTRAP_SERVERS", "") +REDPANDA_TOPIC = os.environ.get("REDPANDA_TOPIC", "gateway_queries") +REFRESH_INTERVAL = int(os.environ.get("IISA_SCORING_INTERVAL", "600")) # 10 minutes + +# Graceful shutdown +shutdown_requested = False + + +def handle_signal(signum, frame): + global shutdown_requested + logger.info(f"Received signal {signum}, shutting down") + shutdown_requested = True + + +signal.signal(signal.SIGTERM, handle_signal) +signal.signal(signal.SIGINT, handle_signal) + + +def count_redpanda_messages() -> int: + """Count messages in the Redpanda gateway_queries topic. Returns 0 on error.""" + if not REDPANDA_BOOTSTRAP_SERVERS: + return 0 + + try: + from confluent_kafka import Consumer, TopicPartition + + consumer = Consumer({ + "bootstrap.servers": REDPANDA_BOOTSTRAP_SERVERS, + "group.id": "iisa-scoring-check", + "auto.offset.reset": "earliest", + "enable.auto.commit": False, + }) + + metadata = consumer.list_topics(topic=REDPANDA_TOPIC, timeout=10) + topic_metadata = metadata.topics.get(REDPANDA_TOPIC) + + if topic_metadata is None or topic_metadata.error is not None: + consumer.close() + return 0 + + partitions = topic_metadata.partitions + if not partitions: + consumer.close() + return 0 + + total = 0 + for partition_id in partitions: + tp = TopicPartition(REDPANDA_TOPIC, partition_id) + low, high = consumer.get_watermark_offsets(tp, timeout=10) + total += high - low + + consumer.close() + return total + + except Exception as e: + logger.warning(f"Failed to check Redpanda: {e}") + return 0 + + +def write_seed_scores() -> bool: + """Copy seed scores file to the scores output path. Returns True on success.""" + scores_path = Path(SCORES_FILE_PATH) + scores_path.parent.mkdir(parents=True, exist_ok=True) + + if not Path(SEED_SCORES_PATH).exists(): + logger.error(f"Seed scores file not found: {SEED_SCORES_PATH}") + return False + + shutil.copy2(SEED_SCORES_PATH, SCORES_FILE_PATH) + + with open(SCORES_FILE_PATH) as f: + data = json.load(f) + + logger.info(f"Wrote seed scores ({len(data)} indexers) to {SCORES_FILE_PATH}") + return True + + +def ensure_scores_exist() -> bool: + """Ensure a scores file exists. Returns True if scores are available.""" + if Path(SCORES_FILE_PATH).exists(): + try: + with open(SCORES_FILE_PATH) as f: + data = json.load(f) + if data: + logger.info(f"Scores file exists with {len(data)} indexers") + return True + except (json.JSONDecodeError, OSError): + logger.warning("Existing scores file is invalid, will overwrite") + + return write_seed_scores() + + +def try_compute_scores() -> bool: + """ + Attempt to compute real scores from Redpanda data. + + TODO: Integrate the actual CronJob score computation pipeline here. + For now, logs the message count and returns False (uses seed scores). + """ + msg_count = count_redpanda_messages() + + if msg_count == 0: + logger.info("No messages in Redpanda yet, keeping current scores") + return False + + # TODO: Run actual score computation from Redpanda data when the + # CronJob pipeline is integrated into this container. The pipeline + # needs: protobuf decoding, linear regression, GeoIP resolution. + logger.info( + f"Redpanda has ~{msg_count} messages. " + "CronJob integration pending, keeping current scores." + ) + return False + + +def main() -> int: + logger.info("IISA scoring service starting") + logger.info(f"Refresh interval: {REFRESH_INTERVAL}s") + logger.info(f"Scores file: {SCORES_FILE_PATH}") + logger.info(f"Redpanda: {REDPANDA_BOOTSTRAP_SERVERS or '(not configured)'}") + + # Phase 1: Ensure scores exist so IISA can start + if not ensure_scores_exist(): + logger.error("Failed to initialize scores, exiting") + return 1 + + logger.info("Initial scores ready, entering refresh loop") + + # Phase 2: Periodic refresh loop + while not shutdown_requested: + for _ in range(REFRESH_INTERVAL): + if shutdown_requested: + break + time.sleep(1) + + if shutdown_requested: + break + + logger.info("Running periodic score refresh") + try_compute_scores() + + logger.info("IISA scoring service stopped") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/containers/indexing-payments/iisa/seed_scores.json b/containers/indexing-payments/iisa/seed_scores.json new file mode 100644 index 0000000..8fe8ed2 --- /dev/null +++ b/containers/indexing-payments/iisa/seed_scores.json @@ -0,0 +1,26 @@ +[ + { + "indexer": "0xf4ef6650e48d099a4972ea5b414dab86e1998bd3", + "url": "http://indexer-service:7601", + "lat_lin_reg_coefficient": 0.002, + "lat_coefficient_std_error": 0.001, + "lat_coefficient_upper_bound": 0.004, + "lat_normalized_score": 0.85, + "uptime_score": 0.98, + "observed_duration_seconds": 86400, + "uptime_duration_seconds": 84672, + "success_rate": 0.95, + "stake_to_fees": 500.0, + "stake_to_fees_iqr_deviation": 0.3, + "norm_uptime_score": 0.9, + "norm_success_rate": 0.88, + "norm_stake_to_fees": 0.7, + "org": "local-network", + "dst_lat": 37.7749, + "dst_lon": -122.4194, + "existing_dips_agreements": 0, + "avg_sync_duration": 5.0, + "computed_at": "2026-02-20T00:00:00+00:00", + "query_count": 1000 + } +] diff --git a/block-oracle/Dockerfile b/containers/oracles/block-oracle/Dockerfile similarity index 100% rename from block-oracle/Dockerfile rename to containers/oracles/block-oracle/Dockerfile diff --git a/block-oracle/run.sh b/containers/oracles/block-oracle/run.sh similarity index 96% rename from block-oracle/run.sh rename to containers/oracles/block-oracle/run.sh index 06ec354..8b1d8f3 100755 --- a/block-oracle/run.sh +++ b/containers/oracles/block-oracle/run.sh @@ -21,7 +21,7 @@ log_level = "trace" [protocol_chain] name = "eip155:1337" jrpc = "http://chain:8545" -polling_interval_in_seconds = 20 +polling_interval_in_seconds = 1 [indexed_chains] "eip155:1337" = "http://chain:8545" diff --git a/containers/oracles/eligibility-oracle-node/Dockerfile b/containers/oracles/eligibility-oracle-node/Dockerfile new file mode 100644 index 0000000..9f06462 --- /dev/null +++ b/containers/oracles/eligibility-oracle-node/Dockerfile @@ -0,0 +1,40 @@ +FROM debian:bookworm-slim +ARG ELIGIBILITY_ORACLE_COMMIT + +# Build + runtime dependencies +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + build-essential clang cmake lld pkg-config git \ + curl jq unzip ca-certificates \ + libssl-dev librdkafka-dev \ + && rm -rf /var/lib/apt/lists/* + +# Install Rust +RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable --profile minimal + +# Clone and build eligibility-oracle binary +WORKDIR /opt +ENV CC=clang CXX=clang++ +ENV RUSTFLAGS="-C link-arg=-fuse-ld=lld" +RUN git clone https://github.com/edgeandnode/eligibility-oracle-node && \ + cd eligibility-oracle-node && git checkout ${ELIGIBILITY_ORACLE_COMMIT} && \ + . /root/.cargo/env && cargo build --release -p eligibility-oracle && \ + cp target/release/eligibility-oracle /usr/local/bin/eligibility-oracle && \ + cd .. && rm -rf eligibility-oracle-node + +# Clean up build-only dependencies +RUN apt-get purge -y build-essential clang cmake lld pkg-config git libssl-dev librdkafka-dev && \ + apt-get autoremove -y && rm -rf /var/lib/apt/lists/* + +# Install runtime libraries +RUN apt-get update \ + && apt-get install -y --no-install-recommends libssl3 librdkafka1 \ + && rm -rf /var/lib/apt/lists/* + +# rpk CLI for Redpanda topic management +RUN curl -sLO https://github.com/redpanda-data/redpanda/releases/latest/download/rpk-linux-amd64.zip \ + && unzip rpk-linux-amd64.zip -d /usr/local/bin/ \ + && rm rpk-linux-amd64.zip + +COPY --chmod=755 ./run.sh /opt/run.sh +ENTRYPOINT ["bash", "/opt/run.sh"] diff --git a/containers/oracles/eligibility-oracle-node/run.sh b/containers/oracles/eligibility-oracle-node/run.sh new file mode 100644 index 0000000..4ccb523 --- /dev/null +++ b/containers/oracles/eligibility-oracle-node/run.sh @@ -0,0 +1,116 @@ +#!/bin/bash +set -eu +. /opt/config/.env +. /opt/shared/lib.sh + +# Wait for the REO contract address to be available in issuance.json +reo_address="" +for f in issuance.json; do + reo_address=$(jq -r '.["1337"].RewardsEligibilityOracle.address // empty' "/opt/config/$f" 2>/dev/null || true) + [ -n "$reo_address" ] && break +done + +if [ -z "$reo_address" ]; then + echo "ERROR: RewardsEligibilityOracle address not found in issuance.json" + echo "The REO contract must be deployed before starting the oracle node." + exit 1 +fi + +echo "=== Configuring eligibility-oracle-node ===" +echo " REO contract: ${reo_address}" +echo " Chain ID: ${CHAIN_ID}" +echo " Redpanda: redpanda:${REDPANDA_KAFKA_PORT}" + +# Create compacted output topic (idempotent) +rpk topic create indexer_daily_metrics \ + --brokers="redpanda:${REDPANDA_KAFKA_PORT}" \ + -c cleanup.policy=compact,delete \ + -c retention.ms=7776000000 \ + 2>/dev/null || true + +# Reset consumer group to the start of the topic. Stale committed offsets +# survive Redpanda restarts and can cause the oracle to skip new messages +# when the topic has been repopulated after a network restart. +rpk group seek eligibility-oracle --to start \ + --topics gateway_queries \ + --brokers="redpanda:${REDPANDA_KAFKA_PORT}" \ + 2>/dev/null || true + +# Generate config.toml with local network values +cat >config.toml <&2 +cat config.toml >&2 +echo "=============================" >&2 + +# Run in one-shot mode on a schedule, mirroring production (K8s CronJob). +# Each invocation rebuilds state from the compacted Kafka topic, evaluates +# eligibility, submits on-chain, then exits. +INTERVAL=10 +CHAIN_RPC="http://chain:${CHAIN_RPC_PORT}" + +# Forward SIGTERM/SIGINT to the running child so Docker stop is graceful. +child=0 +trap 'kill -TERM "$child" 2>/dev/null; wait "$child"; exit 0' SIGTERM SIGINT + +get_block_number() { + curl -sf -X POST "$CHAIN_RPC" \ + -H "Content-Type: application/json" \ + -d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ + | jq -r '.result // empty' 2>/dev/null || true +} + +echo "=== Running eligibility-oracle-node (one-shot, polling every ${INTERVAL}s) ===" +last_block="" +while true; do + current_block=$(get_block_number) + + if [ -z "$current_block" ]; then + echo "Could not fetch block number, retrying in ${INTERVAL}s" + sleep "$INTERVAL" & + child=$! + wait "$child" + continue + fi + + if [ "$current_block" = "$last_block" ]; then + sleep "$INTERVAL" & + child=$! + wait "$child" + continue + fi + + echo "--- New block: ${last_block:-none} -> ${current_block}, running oracle ---" + eligibility-oracle --config config.toml & + child=$! + wait "$child" && echo "--- Oracle finished (ok) ---" \ + || echo "--- Oracle finished (exit $?) ---" + last_block=$current_block + + sleep "$INTERVAL" & + child=$! + wait "$child" +done diff --git a/containers/query-payments/tap-agent/Dockerfile b/containers/query-payments/tap-agent/Dockerfile new file mode 100644 index 0000000..7ab3f77 --- /dev/null +++ b/containers/query-payments/tap-agent/Dockerfile @@ -0,0 +1,13 @@ +## Local-network wrapper for indexer-tap-agent +ARG INDEXER_TAP_AGENT_VERSION +FROM ghcr.io/graphprotocol/indexer-tap-agent:${INDEXER_TAP_AGENT_VERSION} + +RUN apt-get update \ + && apt-get install -y --no-install-recommends curl jq \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /opt + +COPY ./run.sh /opt/run.sh + +ENTRYPOINT ["bash", "/opt/run.sh"] diff --git a/indexer-service/run-tap-agent.sh b/containers/query-payments/tap-agent/run.sh similarity index 100% rename from indexer-service/run-tap-agent.sh rename to containers/query-payments/tap-agent/run.sh diff --git a/tap-aggregator/Dockerfile b/containers/query-payments/tap-aggregator/Dockerfile similarity index 100% rename from tap-aggregator/Dockerfile rename to containers/query-payments/tap-aggregator/Dockerfile diff --git a/tap-aggregator/run.sh b/containers/query-payments/tap-aggregator/run.sh similarity index 100% rename from tap-aggregator/run.sh rename to containers/query-payments/tap-aggregator/run.sh diff --git a/tap-escrow-manager/Dockerfile b/containers/query-payments/tap-escrow-manager/Dockerfile similarity index 100% rename from tap-escrow-manager/Dockerfile rename to containers/query-payments/tap-escrow-manager/Dockerfile diff --git a/tap-escrow-manager/run.sh b/containers/query-payments/tap-escrow-manager/run.sh similarity index 100% rename from tap-escrow-manager/run.sh rename to containers/query-payments/tap-escrow-manager/run.sh diff --git a/containers/shared/lib.sh b/containers/shared/lib.sh new file mode 100644 index 0000000..c17694e --- /dev/null +++ b/containers/shared/lib.sh @@ -0,0 +1,36 @@ +#!/bin/sh +# Shared shell utilities for local-network services + +require_jq() { + _val=$(jq -r "$1 // empty" "$2") + if [ -z "$_val" ]; then + echo "Error: $1 not found in $2" >&2 + exit 1 + fi + printf '%s' "$_val" +} + +contract_addr() { + require_jq ".\"1337\".$1" "/opt/config/$2.json" +} + +# wait_for_gql URL QUERY JQ_FILTER [TIMEOUT] +# Polls a GraphQL endpoint until JQ_FILTER returns a non-empty value. +# Prints the value on success, exits 1 on timeout. +wait_for_gql() { + _url="$1" _query="$2" _filter="$3" _timeout="${4:-120}" _elapsed=0 + while [ "$_elapsed" -lt "$_timeout" ]; do + _val=$(curl -sf "$_url" \ + -H 'content-type: application/json' \ + -d "{\"query\": \"$_query\"}" 2>/dev/null \ + | jq -r "$_filter // empty" 2>/dev/null || true) + if [ -n "$_val" ]; then + printf '%s' "$_val" + return 0 + fi + sleep 2 + _elapsed=$((_elapsed + 2)) + done + echo "Error: timed out waiting for $_url after ${_timeout}s" >&2 + exit 1 +} diff --git a/block-explorer/Dockerfile b/containers/ui/block-explorer/Dockerfile similarity index 100% rename from block-explorer/Dockerfile rename to containers/ui/block-explorer/Dockerfile diff --git a/dipper/Dockerfile b/dipper/Dockerfile deleted file mode 100644 index 85fbc6d..0000000 --- a/dipper/Dockerfile +++ /dev/null @@ -1,53 +0,0 @@ -## Rust builder -# Compile the Rust code -FROM rust:1.89.0-slim-bookworm AS rust-builder - -RUN --mount=type=cache,target=/var/cache/apt \ - apt-get update \ - && apt-get install -y --no-install-recommends \ - build-essential \ - clang \ - cmake \ - git \ - lld \ - pkg-config \ - libssl-dev \ - protobuf-compiler \ - && rm -rf /var/lib/apt/lists/* - -WORKDIR /src -COPY source/ ./ - -# Set build environment variables -# - Set the C/C++ compiler to clang -ENV CC=clang CXX=clang++ -# - Set the Rust flags to use lld as the linker -ENV RUSTFLAGS="-C link-arg=-fuse-ld=lld" - -RUN --mount=type=cache,target=/usr/local/cargo/registry \ - --mount=type=cache,target=/src/target \ - cargo build --bin dipper-service --release && \ - cp target/release/dipper-service /dipper-service - -## Wrapper image for local-network -# Includes configuration script and utilities -FROM debian:bookworm-slim AS wrapper-dev - -RUN --mount=type=cache,target=/var/cache/apt \ - apt-get update \ - && apt-get install -y --no-install-recommends \ - ca-certificates \ - curl \ - jq \ - libssl3 \ - && rm -rf /var/lib/apt/lists/* - -WORKDIR /opt - -# Install the dipper-service binary -COPY --from=rust-builder /dipper-service /usr/local/bin/dipper-service - -# Add local-network configuration script -ADD run.sh /opt/run.sh - -ENTRYPOINT ["bash", "-c", "/opt/run.sh"] diff --git a/dipper/source b/dipper/source deleted file mode 160000 index 4c96e21..0000000 --- a/dipper/source +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 4c96e21aa8b25598a343885ba68f004a4db616ec diff --git a/docker-compose.yaml b/docker-compose.yaml index e54843e..4f1c823 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,7 +1,7 @@ services: chain: container_name: chain - build: { context: chain } + build: { context: containers/core/chain } ports: ["${CHAIN_RPC_PORT}:8545"] volumes: - chain-data:/data @@ -13,7 +13,8 @@ services: block-explorer: container_name: block-explorer - build: { context: block-explorer, args: { RPC_URL: http://localhost:8545 } } + profiles: [explorer] + build: { context: containers/ui/block-explorer, args: { RPC_URL: http://localhost:8545 } } depends_on: chain: { condition: service_healthy } ports: ["${BLOCK_EXPLORER_PORT}:3000"] @@ -36,7 +37,7 @@ services: command: postgres -c 'max_connections=1000' -c 'shared_preload_libraries=pg_stat_statements' volumes: - postgres-data:/var/lib/postgresql/data - - ./postgres/setup.sql:/docker-entrypoint-initdb.d/setup.sql:ro + - ./containers/core/postgres/setup.sql:/docker-entrypoint-initdb.d/setup.sql:ro environment: POSTGRES_INITDB_ARGS: "--encoding UTF8 --locale=C" POSTGRES_HOST_AUTH_METHOD: trust @@ -47,7 +48,7 @@ services: graph-node: container_name: graph-node build: - context: "graph-node" + context: containers/indexer/graph-node args: GRAPH_NODE_VERSION: ${GRAPH_NODE_VERSION} depends_on: @@ -61,9 +62,9 @@ services: - ${GRAPH_NODE_STATUS_PORT}:8030 - ${GRAPH_NODE_METRICS_PORT}:8040 volumes: - - ./config/shared:/opt/shared:ro + - ./containers/shared:/opt/shared:ro - ./.env:/opt/config/.env:ro - - ./config/local:/opt/config:ro + - config-local:/opt/config:ro healthcheck: { interval: 1s, retries: 20, test: curl -f http://127.0.0.1:8030 } restart: on-failure:3 @@ -71,32 +72,33 @@ services: graph-contracts: container_name: graph-contracts build: - context: graph-contracts + context: containers/core/graph-contracts args: CONTRACTS_COMMIT: ${CONTRACTS_COMMIT} TAP_CONTRACTS_COMMIT: ${TAP_CONTRACTS_COMMIT} depends_on: chain: { condition: service_healthy } volumes: - - ./config/shared:/opt/shared:ro + - ./containers/shared:/opt/shared:ro - ./.env:/opt/config/.env:ro - - ./config/local:/opt/config + - config-local:/opt/config environment: - FORK_RPC_URL=${FORK_RPC_URL:-} block-oracle: container_name: block-oracle + profiles: [block-oracle, indexing-payments] build: - context: block-oracle + context: containers/oracles/block-oracle args: BLOCK_ORACLE_COMMIT: ${BLOCK_ORACLE_COMMIT} depends_on: graph-contracts: { condition: service_completed_successfully } stop_signal: SIGKILL volumes: - - ./config/shared:/opt/shared:ro + - ./containers/shared:/opt/shared:ro - ./.env:/opt/config/.env:ro - - ./config/local:/opt/config:ro + - config-local:/opt/config:ro environment: RUST_BACKTRACE: full healthcheck: @@ -110,7 +112,7 @@ services: indexer-agent: container_name: indexer-agent build: - context: indexer-agent + context: containers/indexer/indexer-agent args: INDEXER_AGENT_VERSION: ${INDEXER_AGENT_VERSION} platform: linux/amd64 @@ -119,9 +121,9 @@ services: ports: ["${INDEXER_MANAGEMENT_PORT}:7600"] stop_signal: SIGKILL volumes: - - ./config/shared:/opt/shared:ro + - ./containers/shared:/opt/shared:ro - ./.env:/opt/config/.env:ro - - ./config/local:/opt/config:ro + - config-local:/opt/config:ro healthcheck: { interval: 10s, retries: 600, test: curl -f http://127.0.0.1:7600/ } restart: on-failure:3 @@ -129,7 +131,7 @@ services: subgraph-deploy: container_name: subgraph-deploy build: - context: subgraph-deploy + context: containers/core/subgraph-deploy args: NETWORK_SUBGRAPH_COMMIT: ${NETWORK_SUBGRAPH_COMMIT} TAP_SUBGRAPH_COMMIT: ${TAP_SUBGRAPH_COMMIT} @@ -138,20 +140,20 @@ services: graph-contracts: { condition: service_completed_successfully } graph-node: { condition: service_healthy } volumes: - - ./config/shared:/opt/shared:ro + - ./containers/shared:/opt/shared:ro - ./.env:/opt/config/.env:ro - - ./config/local:/opt/config:ro + - config-local:/opt/config:ro start-indexing: container_name: start-indexing - build: { context: start-indexing } + build: { context: containers/indexer/start-indexing } depends_on: subgraph-deploy: { condition: service_completed_successfully } indexer-agent: { condition: service_healthy } volumes: - - ./config/shared:/opt/shared:ro + - ./containers/shared:/opt/shared:ro - ./.env:/opt/config/.env:ro - - ./config/local:/opt/config:ro + - config-local:/opt/config:ro redpanda: container_name: redpanda @@ -185,7 +187,7 @@ services: tap-aggregator: container_name: tap-aggregator build: - context: tap-aggregator + context: containers/query-payments/tap-aggregator args: TAP_AGGREGATOR_VERSION: ${TAP_AGGREGATOR_VERSION} depends_on: @@ -193,9 +195,9 @@ services: ports: ["${TAP_AGGREGATOR_PORT}:7610"] stop_signal: SIGKILL volumes: - - ./config/shared:/opt/shared:ro + - ./containers/shared:/opt/shared:ro - ./.env:/opt/config/.env:ro - - ./config/local:/opt/config:ro + - config-local:/opt/config:ro environment: RUST_LOG: info,tap_aggregator=trace RUST_BACKTRACE: 1 @@ -204,7 +206,7 @@ services: tap-escrow-manager: container_name: tap-escrow-manager build: - context: tap-escrow-manager + context: containers/query-payments/tap-escrow-manager args: TAP_ESCROW_MANAGER_COMMIT: ${TAP_ESCROW_MANAGER_COMMIT} depends_on: @@ -212,9 +214,9 @@ services: redpanda: { condition: service_healthy } stop_signal: SIGKILL volumes: - - ./config/shared:/opt/shared:ro + - ./containers/shared:/opt/shared:ro - ./.env:/opt/config/.env:ro - - ./config/local:/opt/config:ro + - config-local:/opt/config:ro environment: RUST_LOG: info,tap_escrow_manager=trace RUST_BACKTRACE: 1 @@ -223,7 +225,7 @@ services: gateway: container_name: gateway build: - context: gateway + context: containers/core/gateway args: GATEWAY_COMMIT: ${GATEWAY_COMMIT} depends_on: @@ -232,9 +234,9 @@ services: ports: ["${GATEWAY_PORT}:7700"] stop_signal: SIGKILL volumes: - - ./config/shared:/opt/shared:ro + - ./containers/shared:/opt/shared:ro - ./.env:/opt/config/.env:ro - - ./config/local:/opt/config:ro + - config-local:/opt/config:ro environment: RUST_LOG: info,graph_gateway=trace RUST_BACKTRACE: 1 @@ -245,8 +247,7 @@ services: indexer-service: container_name: indexer-service build: - target: "wrapper" # Set to "wrapper-dev" for building from source - context: indexer-service + context: containers/indexer/indexer-service args: INDEXER_SERVICE_RS_VERSION: ${INDEXER_SERVICE_RS_VERSION} depends_on: @@ -256,9 +257,9 @@ services: - "${INDEXER_SERVICE_PORT}:7601" stop_signal: SIGKILL volumes: - - ./config/shared:/opt/shared:ro + - ./containers/shared:/opt/shared:ro - ./.env:/opt/config/.env:ro - - ./config/local:/opt/config:ro + - config-local:/opt/config:ro environment: RUST_LOG: info,indexer_service_rs=trace RUST_BACKTRACE: 1 @@ -269,9 +270,7 @@ services: tap-agent: container_name: tap-agent build: - target: "wrapper" # Set to "wrapper-dev" for building from source - context: indexer-service - dockerfile: Dockerfile.tap-agent + context: containers/query-payments/tap-agent args: INDEXER_TAP_AGENT_VERSION: ${INDEXER_TAP_AGENT_VERSION} depends_on: @@ -279,14 +278,112 @@ services: subgraph-deploy: { condition: service_completed_successfully } stop_signal: SIGKILL volumes: - - ./config/shared:/opt/shared:ro + - ./containers/shared:/opt/shared:ro - ./.env:/opt/config/.env:ro - - ./config/local:/opt/config:ro + - config-local:/opt/config:ro environment: RUST_LOG: info,indexer_tap_agent=trace RUST_BACKTRACE: 1 restart: on-failure:3 + # --- Profiled components (activated via COMPOSE_PROFILES in .env) --- + + eligibility-oracle-node: + container_name: eligibility-oracle-node + profiles: [rewards-eligibility] + build: + context: containers/oracles/eligibility-oracle-node + args: + ELIGIBILITY_ORACLE_COMMIT: ${ELIGIBILITY_ORACLE_COMMIT} + depends_on: + redpanda: { condition: service_healthy } + gateway: { condition: service_healthy } + volumes: + - ./containers/shared:/opt/shared:ro + - ./.env:/opt/config/.env:ro + - config-local:/opt/config:ro + environment: + RUST_LOG: eligibility_oracle=debug + BLOCKCHAIN_PRIVATE_KEY: ${ACCOUNT0_SECRET} + restart: unless-stopped + + iisa-scoring: + container_name: iisa-scoring + profiles: [indexing-payments] + build: + context: containers/indexing-payments/iisa + dockerfile: Dockerfile.scoring + depends_on: + redpanda: { condition: service_healthy } + environment: + REDPANDA_BOOTSTRAP_SERVERS: "redpanda:${REDPANDA_KAFKA_PORT}" + REDPANDA_TOPIC: gateway_queries + SCORES_FILE_PATH: /app/scores/indexer_scores.json + IISA_SCORING_INTERVAL: "600" + volumes: + - iisa-scores:/app/scores + healthcheck: + test: ["CMD", "test", "-f", "/app/scores/indexer_scores.json"] + interval: 5s + retries: 10 + restart: unless-stopped + + iisa: + container_name: iisa + profiles: [indexing-payments] + image: ghcr.io/edgeandnode/subgraph-dips-indexer-selection:${IISA_VERSION} + pull_policy: if_not_present + depends_on: + iisa-scoring: { condition: service_healthy } + ports: ["8080:8080"] + environment: + IISA_HOST: "0.0.0.0" + IISA_PORT: "8080" + IISA_LOG_LEVEL: INFO + SCORES_FILE_PATH: /app/scores/indexer_scores.json + volumes: + - iisa-scores:/app/scores + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval: 10s + retries: 10 + start_period: 30s + restart: unless-stopped + + dipper: + container_name: dipper + profiles: [indexing-payments] + build: + context: containers/indexing-payments/dipper + args: + DIPPER_VERSION: ${DIPPER_VERSION} + depends_on: + block-oracle: { condition: service_healthy } + postgres: { condition: service_healthy } + gateway: { condition: service_healthy } + iisa: { condition: service_healthy } + ports: + - "${DIPPER_ADMIN_RPC_PORT}:${DIPPER_ADMIN_RPC_PORT}" + - "${DIPPER_INDEXER_RPC_PORT}:${DIPPER_INDEXER_RPC_PORT}" + volumes: + - ./containers/shared:/opt/shared:ro + - ./.env:/opt/config/.env:ro + - config-local:/opt/config:ro + environment: + RUST_BACKTRACE: full + RUST_LOG: debug + healthcheck: + interval: 5s + retries: 10 + test: + [ + "CMD-SHELL", + "curl -s -X POST -H 'Content-Type: application/json' -d '{\"jsonrpc\":\"2.0\",\"method\":\"health\",\"id\":1}' http://localhost:9000/ | grep -q jsonrpc", + ] + restart: unless-stopped + + # --- Readiness check --- + ready: container_name: ready image: busybox:latest @@ -300,3 +397,5 @@ volumes: postgres-data: ipfs-data: redpanda-data: + iisa-scores: + config-local: diff --git a/graph-contracts/run.sh b/graph-contracts/run.sh deleted file mode 100644 index 008455e..0000000 --- a/graph-contracts/run.sh +++ /dev/null @@ -1,192 +0,0 @@ -#!/bin/bash -set -eu -. /opt/config/.env -. /opt/shared/lib.sh - -# -- Ensure config files exist (empty JSON on first run) -- -for f in horizon.json subgraph-service.json issuance.json tap-contracts.json block-oracle.json; do - [ -f "/opt/config/$f" ] || echo '{}' > "/opt/config/$f" -done - -# -- Symlink Hardhat address books to config directory -- -# Hardhat reads/writes addresses-local-network.json; symlinks let those -# writes land in /opt/config/ without individual Docker file mounts. -ln -sf /opt/config/horizon.json /opt/contracts/packages/horizon/addresses-local-network.json -ln -sf /opt/config/subgraph-service.json /opt/contracts/packages/subgraph-service/addresses-local-network.json -ln -sf /opt/config/issuance.json /opt/contracts/packages/issuance/addresses-local-network.json - -# ============================================================ -# Phase 1: Graph protocol contracts -# ============================================================ -echo "==== Phase 1/3: Graph protocol contracts ====" - -# -- Helper: ensure DisputeManager registered in Controller -- -ensure_dispute_manager_registered() { - controller_address=$(jq -r '.["1337"].Controller.address // empty' /opt/config/horizon.json) - dispute_manager_address=$(jq -r '.["1337"].DisputeManager.address // empty' /opt/config/subgraph-service.json) - - if [ -z "$controller_address" ] || [ -z "$dispute_manager_address" ]; then - echo "Controller or DisputeManager address not found, skipping registration" - return - fi - - dispute_manager_id=$(cast keccak256 "DisputeManager") - current_proxy=$(cast call --rpc-url="http://chain:${CHAIN_RPC_PORT}" \ - "${controller_address}" "getContractProxy(bytes32)(address)" "${dispute_manager_id}" 2>/dev/null || echo "0x") - - current_proxy_lower=$(echo "$current_proxy" | tr '[:upper:]' '[:lower:]') - dispute_manager_lower=$(echo "$dispute_manager_address" | tr '[:upper:]' '[:lower:]') - - if [ "$current_proxy_lower" = "$dispute_manager_lower" ]; then - echo "DisputeManager already registered in Controller: ${dispute_manager_address}" - else - echo "Registering Horizon DisputeManager in Controller..." - echo " Controller: ${controller_address}" - echo " DisputeManager: ${dispute_manager_address}" - echo " Current proxy: ${current_proxy}" - cast send --rpc-url="http://chain:${CHAIN_RPC_PORT}" --confirmations=0 --private-key="${ACCOUNT1_SECRET}" \ - "${controller_address}" "setContractProxy(bytes32,address)" "${dispute_manager_id}" "${dispute_manager_address}" - fi -} - -# -- Idempotency check -- -phase1_skip=false -l2_graph_token=$(jq -r '.["1337"].L2GraphToken.address // empty' /opt/config/horizon.json 2>/dev/null || true) -if [ -n "$l2_graph_token" ]; then - code_check=$(cast code --rpc-url="http://chain:${CHAIN_RPC_PORT}" "$l2_graph_token" 2>/dev/null || echo "0x") - if [ "$code_check" != "0x" ]; then - echo "Graph protocol contracts already deployed (L2GraphToken at $l2_graph_token)" - ensure_dispute_manager_registered - echo "SKIP: Phase 1" - phase1_skip=true - else - echo "Contract addresses in horizon.json are stale (no code at $l2_graph_token), redeploying..." - fi -fi - -if [ "$phase1_skip" = "false" ]; then - echo "Deploying new version of the protocol" - cd /opt/contracts/packages/subgraph-service - npx hardhat deploy:protocol --network localNetwork --subgraph-service-config localNetwork - - # Add legacy contract stubs (gateway needs these) - TEMP_JSON=$(jq '.["1337"] += { - "LegacyServiceRegistry": {"address": "0x0000000000000000000000000000000000000000"}, - "LegacyDisputeManager": {"address": "0x0000000000000000000000000000000000000000"} - }' addresses-local-network.json) - printf '%s\n' "$TEMP_JSON" > addresses-local-network.json - - ensure_dispute_manager_registered -fi - -echo "==== Phase 1/3 complete ====" - -# ============================================================ -# Phase 2: TAP contracts -# ============================================================ -echo "==== Phase 2/3: TAP contracts ====" - -# -- Idempotency check -- -phase2_skip=false -escrow_address=$(jq -r '."1337".Escrow // empty' /opt/config/tap-contracts.json 2>/dev/null || true) -if [ -n "$escrow_address" ]; then - code_check=$(cast code --rpc-url="http://chain:${CHAIN_RPC_PORT}" "$escrow_address" 2>/dev/null || echo "0x") - if [ "$code_check" != "0x" ]; then - echo "TAP contracts already deployed (Escrow at $escrow_address)" - echo "SKIP: Phase 2" - phase2_skip=true - else - echo "TAP contract addresses are stale (no code at Escrow $escrow_address), redeploying..." - fi -fi - -if [ "$phase2_skip" = "false" ]; then - cd /opt/timeline-aggregation-protocol-contracts - - staking=$(contract_addr HorizonStaking.address horizon) - graph_token=$(contract_addr L2GraphToken.address horizon) - - # Note: forge may output alloy log lines to stdout after the JSON; sed extracts only the JSON object - forge create --broadcast --json --rpc-url="http://chain:${CHAIN_RPC_PORT}" --mnemonic="${MNEMONIC}" \ - src/AllocationIDTracker.sol:AllocationIDTracker \ - | tee allocation_tracker.json - allocation_tracker="$(sed -n '/^{/,/^}/p' allocation_tracker.json | jq -r '.deployedTo')" - - forge create --broadcast --json --rpc-url="http://chain:${CHAIN_RPC_PORT}" --mnemonic="${MNEMONIC}" \ - src/TAPVerifier.sol:TAPVerifier --constructor-args 'TAP' '1' \ - | tee verifier.json - verifier="$(sed -n '/^{/,/^}/p' verifier.json | jq -r '.deployedTo')" - - forge create --broadcast --json --rpc-url="http://chain:${CHAIN_RPC_PORT}" --mnemonic="${MNEMONIC}" \ - src/Escrow.sol:Escrow --constructor-args "${graph_token}" "${staking}" "${verifier}" "${allocation_tracker}" 10 15 \ - | tee escrow.json - escrow="$(sed -n '/^{/,/^}/p' escrow.json | jq -r '.deployedTo')" - - cat < /opt/config/tap-contracts.json -{ - "1337": { - "AllocationIDTracker": "$allocation_tracker", - "TAPVerifier": "$verifier", - "Escrow": "$escrow" - } -} -EOF -fi - -echo "==== Phase 2/3 complete ====" - -# ============================================================ -# Phase 3: DataEdge contract -# ============================================================ -echo "==== Phase 3/3: DataEdge contract ====" - -# -- Idempotency check -- -phase3_skip=false -data_edge=$(jq -r '."1337".DataEdge // empty' /opt/config/block-oracle.json 2>/dev/null || true) -if [ -n "$data_edge" ]; then - code_check=$(cast code --rpc-url="http://chain:${CHAIN_RPC_PORT}" "$data_edge" 2>/dev/null || echo "0x") - if [ "$code_check" != "0x" ]; then - echo "DataEdge contract already deployed at $data_edge" - echo "SKIP: Phase 3" - phase3_skip=true - else - echo "DataEdge address stale (no code at $data_edge), redeploying..." - fi -fi - -if [ "$phase3_skip" = "false" ]; then - cd /opt/contracts-data-edge/packages/data-edge - export MNEMONIC="${MNEMONIC}" - sed -i "s/myth like bonus scare over problem client lizard pioneer submit female collect/${MNEMONIC}/g" hardhat.config.ts - npx hardhat data-edge:deploy --contract EventfulDataEdge --deploy-name EBO --network ganache | tee deploy.txt - data_edge="$(grep 'contract: ' deploy.txt | awk '{print $3}')" - - echo "=== Data edge deployed at: $data_edge ===" - - cat < /opt/config/block-oracle.json -{ - "1337": { - "DataEdge": "$data_edge" - } -} -ADDR_EOF - - # Register network in DataEdge - output=$(cast send --rpc-url="http://chain:${CHAIN_RPC_PORT}" --confirmations=0 --mnemonic="${MNEMONIC}" \ - "${data_edge}" \ - '0xa1dce3320000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000f030103176569703135353a313333370000000000000000000000000000000000' 2>&1) - exit_code=$? - if [ $exit_code -ne 0 ]; then - echo "Error during cast send: $output" | tee -a error.log - else - echo "$output" - fi -fi - -echo "==== Phase 3/3 complete ====" -echo "==== All contract deployments complete ====" - -# Optional: keep container running for debugging -if [ -n "${KEEP_CONTAINER_RUNNING:-}" ]; then - tail -f /dev/null -fi diff --git a/indexer-service/Dockerfile b/indexer-service/Dockerfile deleted file mode 100644 index 59fb38c..0000000 --- a/indexer-service/Dockerfile +++ /dev/null @@ -1,53 +0,0 @@ -ARG INDEXER_SERVICE_RS_VERSION - -## Rust builder image -FROM rust:1-slim AS rust-builder - -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - build-essential \ - git \ - pkg-config \ - protobuf-compiler \ - libssl-dev \ - libsasl2-dev \ - && rm -rf /var/lib/apt/lists/* - -WORKDIR /opt - -COPY source ./ - -# Force SQLx to use the offline mode to statically check the database queries against -# the prepared files in the `.sqlx` directory. -ENV SQLX_OFFLINE=true - -RUN --mount=type=cache,sharing=locked,id=cargo-registry,target=/usr/local/cargo/registry \ - --mount=type=cache,id=indexer-service-rs-build-cache,target=/opt/target \ - cargo build --bin indexer-service-rs \ - && cp target/debug/indexer-service-rs /opt/indexer-service-rs - - -## Wrapper development image -FROM debian:bookworm-slim AS wrapper-dev - -RUN apt-get update \ - && apt-get install -y curl jq \ - && rm -rf /var/lib/apt/lists/* - -# Copy the built binary from the rust builder image -COPY --from=rust-builder /opt/indexer-service-rs /usr/local/bin/indexer-service-rs - -COPY ./run.sh /opt/run.sh - -ENTRYPOINT ["bash", "/opt/run.sh"] - - -## Wrapper image -FROM ghcr.io/graphprotocol/indexer-service-rs:${INDEXER_SERVICE_RS_VERSION} AS wrapper -RUN apt-get update \ - && apt-get install -y curl jq \ - && rm -rf /var/lib/apt/lists/* - -COPY ./run.sh /opt/run.sh - -ENTRYPOINT ["bash", "/opt/run.sh"] \ No newline at end of file diff --git a/indexer-service/Dockerfile.tap-agent b/indexer-service/Dockerfile.tap-agent deleted file mode 100644 index cdb5e21..0000000 --- a/indexer-service/Dockerfile.tap-agent +++ /dev/null @@ -1,51 +0,0 @@ -ARG INDEXER_TAP_AGENT_VERSION - -## Rust builder image -FROM rust:1-slim AS rust-builder - -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - build-essential \ - git \ - pkg-config \ - protobuf-compiler \ - libssl-dev \ - libsasl2-dev \ - && rm -rf /var/lib/apt/lists/* - -WORKDIR /opt - -COPY source ./ - -# Force SQLx to use the offline mode to statically check the database queries against -# the prepared files in the `.sqlx` directory. -ENV SQLX_OFFLINE=true - -RUN --mount=type=cache,sharing=locked,id=cargo-registry,target=/usr/local/cargo/registry \ - --mount=type=cache,id=indexer-tap-agent-build-cache,target=/opt/target \ - cargo build --bin indexer-tap-agent \ - && cp target/debug/indexer-tap-agent /opt/indexer-tap-agent - - -## Wrapper development image -FROM debian:bookworm-slim AS wrapper-dev - -RUN apt-get update \ - && apt-get install -y curl jq \ - && rm -rf /var/lib/apt/lists/* - -# Copy the built binary from the rust builder image -COPY --from=rust-builder /opt/indexer-tap-agent /usr/local/bin/indexer-tap-agent - -COPY ./run-tap-agent.sh /opt/run.sh - -ENTRYPOINT ["bash", "/opt/run.sh"] - -## Wrapper image -FROM ghcr.io/graphprotocol/indexer-tap-agent:${INDEXER_TAP_AGENT_VERSION} AS wrapper -RUN apt-get update \ - && apt-get install -y git jq \ - && rm -rf /var/lib/apt/lists/* - -COPY ./run-tap-agent.sh /opt/run.sh -ENTRYPOINT ["bash", "/opt/run.sh"] diff --git a/indexer-service/source b/indexer-service/source deleted file mode 160000 index ea83cab..0000000 --- a/indexer-service/source +++ /dev/null @@ -1 +0,0 @@ -Subproject commit ea83cab0eb234c15d444aecc03f81a626e1ea0cc diff --git a/overrides/README.md b/overrides/README.md deleted file mode 100644 index b946de7..0000000 --- a/overrides/README.md +++ /dev/null @@ -1,127 +0,0 @@ -# Development Environments build on local-network - -## graph-node - -Graph node development works with the local network by mounting the source directory defined at `GRAPH_NODE_SOURCE_ROOT`, and builds using the `rust:latest` official rust docker image. - -Build artifacts are mounted at /tmp/graph-node-docker-build (host and container), and `CARGO_HOME` is set to `/tmp/graph-node-cargo-home` to reduce build times. - -### Debugging - -Local debugging of the service can be enabled, allowing source-level debug with gdb or an IDE. With the env var `WAIT_FOR_DEBUG` is not an empty string, we will execute the `graph-node` binary in a gdb server exposed on :2345. - -### Example vscode launch.json - -```json -{ - // Use IntelliSense to learn about possible attributes. - // Hover to view descriptions of existing attributes. - // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 - "version": "0.2.0", - "configurations": [ - { - "name": "Attach to Remote GDB Server", - "type": "cppdbg", - "request": "launch", - "program": "${workspaceFolder}/target/debug/graph-node", // Path to the binary on the local machine - "miDebuggerServerAddress": "localhost:2345", // Address of the remote GDB server - "miDebuggerPath": "/usr/bin/gdb", // Path to GDB on the local machine - "cwd": "${workspaceFolder}", // Current working directory - "environment": [], - "externalConsole": false, - "MIMode": "gdb", - "setupCommands": [ - { - "description": "Enable pretty-printing for gdb", - "text": "-enable-pretty-printing", - "ignoreFailures": true - } - ], - "logging": { - "engineLogging": true - }, - "sourceFileMap": { - "/app": "${workspaceFolder}" // Maps the /app directory in the container to the local workspace - } - } - ] -} -``` - -If either the build or execution of the graph-node fail then we fall into a trap and pause the container using `tail -f /dev/null`. - -## indexer-agent, indexer-service (ts) (Hotload Dev Environment) - -This is a draft/POC of a hotload dev environment for the indexer agent. It's intended to provide a quick and easy way to iterate on the indexer codebase without having to rebuild the docker image and restart the stack. - -## Usage Examples - -To bring the whole stack up using the override, simply specify the override file when running `docker compose up`: - -```bash -# build -INDEXER_AGENT_SOURCE_ROOT= \ -docker-compose down && \ -docker compose build -f docker-compose.yaml -f overrides/indexer-agent-dev/indexer-agent-dev.yaml - -# start -INDEXER_AGENT_SOURCE_ROOT= \ -docker compose up -f docker-compose.yaml -f overrides/indexer-agent-dev/indexer-agent-dev.yaml -d -``` - -To update the container (when making changes to the entrypoint or Dockerfile), you'll need to rebuild the image and restart the container: - -```bash -# in the root of this checkout, with the local-network up and running, replace the indexer-agent with a hotload dev environment -INDEXER_AGENT_SOURCE_ROOT= \ -docker compose \ --f docker-compose.yaml \ --f overrides/indexer-agent-dev/indexer-agent-dev.yaml \ -up -d --no-deps indexer-agent -``` - -This will apply the overrides to the indexer-agent service to the docker-compose stack running and start it. - -## Network Subgraph Development - -A Network Subgraph directory can be mounted to the `subgraph-deploy` container for development purposes. - -To start the local network with a local Network Subgraph: - -```bash -# build -GRAPH_CONTRACTS_SOURCE_ROOT= \ -docker compose \ --f docker-compose.yaml \ --f overrides/graph-contracts/graph-contracts-dev.yaml \ -build - -GRAPH_CONTRACTS_SOURCE_ROOT= \ -docker compose \ --f docker-compose.yaml \ --f overrides/graph-contracts/graph-contracts-dev.yaml \ -up -d graph-contracts -``` - -Note that running in this mode will leave the `graph-contracts` container running so you can ssh into it for debugging/development. This might interfere with other components that depend on the container exiting. The network subgraph source is mounted into `subgraph-deploy` which handles subgraph deployment. - -## Indexing Payments - -Override at `indexing-payments/` adds the dipper service for indexing fee payments via GRT transfers. - -**Use case:** Testing indexing payment flows without TAP allocation complexity - -**Key features:** - -- GRT transfers (no allocations needed) -- Receipt ID system for async processing -- 1% automatic protocol burn -- Co-exists with TAP for query fees - -To start with indexing payments: - -```bash -docker compose -f docker-compose.yaml -f overrides/indexing-payments/docker-compose.yaml up -d -``` - -See [indexing-payments/README.md](indexing-payments/README.md) for detailed usage. diff --git a/overrides/graph-contracts/graph-contracts-dev.yaml b/overrides/graph-contracts/graph-contracts-dev.yaml deleted file mode 100644 index e21a827..0000000 --- a/overrides/graph-contracts/graph-contracts-dev.yaml +++ /dev/null @@ -1,11 +0,0 @@ -services: - graph-contracts: - volumes: - - ./horizon.json:/opt/contracts/packages/horizon/addresses-local-network.json - - ./subgraph-service.json:/opt/contracts/packages/subgraph-service/addresses-local-network.json - environment: - - KEEP_CONTAINER_RUNNING=true - subgraph-deploy: - volumes: - # Paths need to be absolute or relative to the location of the docker-compose.yaml file - - ${GRAPH_CONTRACTS_SOURCE_ROOT}:/opt/graph-network-subgraph diff --git a/overrides/graph-node-dev.sh b/overrides/graph-node-dev.sh deleted file mode 100755 index 78ea2e8..0000000 --- a/overrides/graph-node-dev.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -COMPOSE_FILES=( - -f docker-compose.yaml - -f overrides/graph-node-dev/graph-node-dev.yaml -) -COMMAND=$1 -shift -docker compose ${COMPOSE_FILES[@]} $COMMAND "$@" diff --git a/overrides/indexer-agent-dev/indexer-agent-dev.yaml b/overrides/indexer-agent-dev/indexer-agent-dev.yaml deleted file mode 100644 index 9cb0e7f..0000000 --- a/overrides/indexer-agent-dev/indexer-agent-dev.yaml +++ /dev/null @@ -1,11 +0,0 @@ -services: - indexer-agent: - entrypoint: bash -cl /opt/run-override.sh - ports: - - "${INDEXER_MANAGEMENT}:7600" - # Nodejs debugger - - 9230:9230 - volumes: - - ./overrides/indexer-agent-dev/run-override.sh:/opt/run-override.sh:ro - # Paths need to be absolute or relative to the location of the docker-compose.yaml file - - ${INDEXER_AGENT_SOURCE_ROOT}:/opt/indexer-agent-source-root diff --git a/overrides/indexing-payments/README.md b/overrides/indexing-payments/README.md deleted file mode 100644 index 936323e..0000000 --- a/overrides/indexing-payments/README.md +++ /dev/null @@ -1,120 +0,0 @@ -# Indexing Payments Override - -This override adds the dipper service for Indexing Payments, enabling indexers to receive payments for indexing work via GRT transfers. - -## What Are Indexing Payments? - -Indexing Payments solve capital efficiency problems for indexing fees: - -- **No large allocations needed** ($50-$1000 for $5-$100 monthly fees) -- **GRT transfers** without allocation overhead -- **Asynchronous processing** with receipt IDs -- **1% protocol burn** automatically applied - -See [../../docs/indexing-payments/README.md](../../docs/indexing-payments/README.md) for architecture details. - -## Payment Systems - -| System | Use Case | Method | -| ---------------------------- | ------------- | ---------------------- | -| **TAP** (default) | Query fees | Allocations + receipts | -| **Payments** (this override) | Indexing fees | GRT transfers | - -Both systems can run simultaneously and are independent. - -## Prerequisites - -1. **Dipper source repository** cloned: - ```bash - git submodule update --init --recursive dipper/source - ``` - -## Usage - -### Start with Indexing Payments - -```bash -# Build (first time or after changes) -docker compose -f docker-compose.yaml -f overrides/indexing-payments/docker-compose.yaml build - -# Start all services including dipper -docker compose -f docker-compose.yaml -f overrides/indexing-payments/docker-compose.yaml up -d - -# Or use helper script -./overrides/indexing-payments/start.sh -``` - -### Verify Services - -```bash -# Check all services -docker compose ps - -# Check dipper specifically -docker compose logs dipper - -# Check database -docker compose exec postgres psql -U postgres -l | grep dipper -``` - -### Test Functionality - -See [../../flows/IndexingPaymentsTesting.md](../../flows/IndexingPaymentsTesting.md) for step-by-step testing guide. - -### Stop Indexing Payments - -```bash -# Stop all services -docker compose -f docker-compose.yaml -f overrides/indexing-payments/docker-compose.yaml down - -# Or just stop dipper -docker compose stop dipper -docker compose rm dipper -``` - -## Configuration - -The dipper service is configured via `dipper/run.sh` which generates a config file at runtime using environment variables from `.env`. - -**Key configuration:** - -- **Admin RPC:** `localhost:${DIPPER_ADMIN_RPC_PORT}` (default: 9000) -- **Indexer RPC:** `localhost:${DIPPER_INDEXER_RPC_PORT}` (default: 9001) -- **Database:** `postgres://postgres:postgres@postgres:5432/dipper_1` -- **Network:** Queries network subgraph via gateway -- **Signer:** Uses `ACCOUNT0_SECRET` for transaction signing - -## Troubleshooting - -**Dipper fails to start:** - -- Verify submodule: `ls dipper/source/` -- Check logs: `docker compose logs dipper` - -**Database connection errors:** - -- Ensure postgres is healthy: `docker compose ps postgres` -- Check database exists: `docker compose exec postgres psql -U postgres -l | grep dipper` - -**RPC endpoints not responding:** - -- Check port conflicts: `lsof -i :9000` and `lsof -i :9001` -- Verify ports in `.env` match docker-compose - -**Contracts not found:** - -- Verify contracts deployed: `docker compose logs graph-contracts` - -## Switching Back to Default - -Simply stop using the override: - -```bash -# Stop everything -docker compose -f docker-compose.yaml -f overrides/indexing-payments/docker-compose.yaml down - -# Start without indexing payments -docker compose up -d -``` - -The dipper database remains but is unused. diff --git a/overrides/indexing-payments/docker-compose.yaml b/overrides/indexing-payments/docker-compose.yaml deleted file mode 100644 index 930fe03..0000000 --- a/overrides/indexing-payments/docker-compose.yaml +++ /dev/null @@ -1,56 +0,0 @@ -# Indexing Payments Override -# Adds dipper service for indexing fee payments -# Usage: docker compose -f docker-compose.yaml -f overrides/indexing-payments/docker-compose.yaml up - -services: - # Mock IISA service for local development - # Provides indexer selection endpoints without BigQuery dependency - iisa-mock: - container_name: iisa-mock - build: - context: ./overrides/indexing-payments/iisa-mock - ports: - - "8080:8080" - healthcheck: - test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8080/health')"] - interval: 5s - retries: 10 - restart: unless-stopped - - # Add dipper service for indexing payment processing - dipper: - container_name: dipper - build: - context: ./dipper - dockerfile: Dockerfile - target: wrapper-dev # or wrapper-prebuilt for releases - depends_on: - block-oracle: { condition: service_healthy } - postgres: { condition: service_healthy } - gateway: { condition: service_healthy } - iisa-mock: { condition: service_healthy } - ports: - - "${DIPPER_ADMIN_RPC_PORT}:${DIPPER_ADMIN_RPC_PORT}" - - "${DIPPER_INDEXER_RPC_PORT}:${DIPPER_INDEXER_RPC_PORT}" - volumes: - - ./config/shared:/opt/shared:ro - - ./config/local:/opt/config:ro - environment: - RUST_BACKTRACE: full - RUST_LOG: debug - healthcheck: - interval: 5s - retries: 10 - # Dipper uses JSON-RPC, so we check if it responds to any POST request - test: - [ - "CMD-SHELL", - "curl -s -X POST -H 'Content-Type: application/json' -d '{\"jsonrpc\":\"2.0\",\"method\":\"health\",\"id\":1}' http://localhost:9000/ | grep -q jsonrpc", - ] - restart: unless-stopped - - # Optional: Override indexer-agent if indexing-payment-specific config needed - # indexer-agent: - # environment: - # - INDEXING_PAYMENTS_ENABLED=true - # - DIPPER_URL=http://dipper:${DIPPER_INDEXER_RPC_PORT} diff --git a/overrides/indexing-payments/start.sh b/overrides/indexing-payments/start.sh deleted file mode 100755 index 9a88ea4..0000000 --- a/overrides/indexing-payments/start.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -set -e - -echo "Starting local-network with Indexing Payments..." - -# Step 1: Initialize submodule -if [ ! -d "dipper/source/.git" ]; then - echo "Initializing dipper submodule..." - git submodule update --init --recursive dipper/source -fi - -# Step 2: Build and start services -echo "Starting services..." -docker compose -f docker-compose.yaml -f overrides/indexing-payments/docker-compose.yaml \ - up -d - -# Step 3: Wait and show status -echo "Waiting for services to become healthy..." -sleep 5 -docker compose ps - -echo "" -echo "Local network with Indexing Payments is running!" -echo "" -echo "Admin RPC: http://localhost:${DIPPER_ADMIN_RPC_PORT:-9000}" -echo "Indexer RPC: http://localhost:${DIPPER_INDEXER_RPC_PORT:-9001}" -echo "" -echo "See flows/IndexingPaymentsTesting.md for testing instructions" diff --git a/scripts/build-with-overrides.sh b/scripts/build-with-overrides.sh deleted file mode 100755 index 9b9f7f7..0000000 --- a/scripts/build-with-overrides.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -export INDEXER_AGENT_SOURCE_ROOT=$HOME/Development/en/indexer - -docker compose -f docker-compose.yaml \ --f overrides/indexer-agent-dev/indexer-agent-dev.yaml build $@ diff --git a/scripts/clean.sh b/scripts/clean.sh deleted file mode 100755 index 55e83aa..0000000 --- a/scripts/clean.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash -set -e - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -NC='\033[0m' # No Color - -echo -e "${YELLOW}Cleaning local-network environment...${NC}" -echo "" - -# Get the script directory and navigate to repo root -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" -cd "$REPO_ROOT" - -# Stop and remove containers -echo -e "${YELLOW}Stopping and removing containers...${NC}" -docker compose down --remove-orphans - -# Remove all persistent state (volumes + config files) together -# These must be removed together to avoid inconsistent state -read -p "Remove all persistent state (volumes + config)? [y/N] " -n 1 -r -echo -if [[ $REPLY =~ ^[Yy]$ ]]; then - echo -e "${YELLOW}Removing Docker volumes...${NC}" - docker compose down --volumes - echo -e "${GREEN}Volumes removed${NC}" - - echo -e "${YELLOW}Removing generated config files...${NC}" - if [ -d "config/local" ]; then - rm -rf config/local/* - echo -e "${GREEN}Config files removed${NC}" - fi -else - echo -e "${YELLOW}Skipping state removal (volumes and config preserved)${NC}" -fi - -# Prune Docker images (optional, ask user) -read -p "Remove Docker images built for this project? [y/N] " -n 1 -r -echo -if [[ $REPLY =~ ^[Yy]$ ]]; then - echo -e "${YELLOW}Removing Docker images...${NC}" - docker images --filter "reference=*local-network*" -q | xargs -r docker rmi -f - echo -e "${GREEN}Docker images removed${NC}" -else - echo -e "${YELLOW}Skipping image removal${NC}" -fi - -echo "" -echo -e "${GREEN}Cleanup complete!${NC}" -echo -e "${YELLOW}To start fresh, run: docker compose up -d${NC}" diff --git a/scripts/dipper-cli.sh b/scripts/dipper-cli.sh index 4dca0ef..911049a 100755 --- a/scripts/dipper-cli.sh +++ b/scripts/dipper-cli.sh @@ -13,7 +13,12 @@ export INDEXING_SIGNING_KEY="${RECEIVER_SECRET}" export INDEXING_SERVER_URL="http://${DIPPER_HOST:-localhost}:${DIPPER_ADMIN_RPC_PORT}/" # Change to dipper source directory -cd "$SCRIPT_DIR/../dipper/source" +DIPPER_SOURCE="${DIPPER_SOURCE_ROOT:-}" +if [ -z "$DIPPER_SOURCE" ] || [ ! -d "$DIPPER_SOURCE" ]; then + echo "Error: Set DIPPER_SOURCE_ROOT to a local clone of edgeandnode/dipper." >&2 + exit 1 +fi +cd "$DIPPER_SOURCE" # Run dipper-cli with all passed arguments -cargo run --bin dipper-cli -- "$@" \ No newline at end of file +cargo run --bin dipper-cli -- "$@" diff --git a/scripts/mine-block.sh b/scripts/mine-block.sh index a68c205..522a08f 100755 --- a/scripts/mine-block.sh +++ b/scripts/mine-block.sh @@ -1,20 +1,18 @@ #!/bin/bash -# This script mines n blocks and advances the time by 12 seconds -# each block to mimic the behavior of ethereum. +# Mine n blocks, advancing time by 12 seconds per block to mimic Ethereum. +# Usage: mine-block.sh [count] - -# Number of times to run the commands, default is 1 count=${1:-1} RPC_URL="http://${CHAIN_HOST:-localhost}:${CHAIN_RPC_PORT:-8545}" for ((i=0; i /dev/null - cast rpc --rpc-url="$RPC_URL" evm_mine + cast rpc --rpc-url="$RPC_URL" evm_mine > /dev/null done diff --git a/scripts/reo-config.sh b/scripts/reo-config.sh new file mode 100755 index 0000000..31e24fe --- /dev/null +++ b/scripts/reo-config.sh @@ -0,0 +1,128 @@ +#!/bin/bash +# View or change REO (Rewards Eligibility Oracle) contract configuration. +# +# Usage: +# ./scripts/reo-config.sh # Show current config +# ./scripts/reo-config.sh eligibility-period 300 # Set eligibility period to 5 minutes +# ./scripts/reo-config.sh oracle-timeout 86400 # Set oracle update timeout to 1 day +# +# Common values: +# Eligibility period: 300 (5min), 600 (10min), 3600 (1hr), 86400 (1day) +# Oracle update timeout: 86400 (1day), 604800 (7days) +set -eu + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# Load environment +# shellcheck source=../.env +. "$REPO_ROOT/.env" + +RPC_URL="http://${CHAIN_HOST:-localhost}:${CHAIN_RPC_PORT}" + +# Read REO contract address from config-local volume +REO_ADDRESS=$(docker exec graph-node cat /opt/config/issuance.json 2>/dev/null \ + | jq -r '.["1337"].RewardsEligibilityOracle.address // empty' 2>/dev/null || true) +if [ -z "$REO_ADDRESS" ]; then + echo "ERROR: RewardsEligibilityOracle address not found." + echo " Is the local network running with the REO contract deployed?" + exit 1 +fi + +# cast call returns e.g. "1209600 [1.209e6]" — strip the annotation +cast_uint() { + echo "$1" | awk '{print $1}' +} + +format_duration() { + local secs=$1 + if [ "$secs" -ge 86400 ]; then + echo "${secs}s ($(( secs / 86400 ))d $(( (secs % 86400) / 3600 ))h)" + elif [ "$secs" -ge 3600 ]; then + echo "${secs}s ($(( secs / 3600 ))h $(( (secs % 3600) / 60 ))m)" + elif [ "$secs" -ge 60 ]; then + echo "${secs}s ($(( secs / 60 ))m $(( secs % 60 ))s)" + else + echo "${secs}s" + fi +} + +show_config() { + echo "=== REO Contract Configuration ===" + echo " Contract: $REO_ADDRESS" + echo "" + + validation=$(cast call --rpc-url="$RPC_URL" \ + "$REO_ADDRESS" "getEligibilityValidation()(bool)" 2>/dev/null) + echo " Eligibility validation: $validation" + + period=$(cast_uint "$(cast call --rpc-url="$RPC_URL" \ + "$REO_ADDRESS" "getEligibilityPeriod()(uint256)" 2>/dev/null)") + echo " Eligibility period: $(format_duration "$period")" + + timeout=$(cast_uint "$(cast call --rpc-url="$RPC_URL" \ + "$REO_ADDRESS" "getOracleUpdateTimeout()(uint256)" 2>/dev/null)") + echo " Oracle update timeout: $(format_duration "$timeout")" + + last_update=$(cast_uint "$(cast call --rpc-url="$RPC_URL" \ + "$REO_ADDRESS" "getLastOracleUpdateTime()(uint256)" 2>/dev/null)") + if [ "$last_update" = "0" ]; then + echo " Last oracle update: never" + else + now=$(date +%s) + ago=$(( now - last_update )) + echo " Last oracle update: $(format_duration "$ago") ago (timestamp $last_update)" + fi +} + +set_param() { + local param_name=$1 + local setter=$2 + local getter=$3 + local new_value=$4 + + current=$(cast_uint "$(cast call --rpc-url="$RPC_URL" \ + "$REO_ADDRESS" "${getter}()(uint256)" 2>/dev/null)") + + if [ "$current" = "$new_value" ]; then + echo "$param_name is already $new_value" + return + fi + + echo "Setting $param_name: $(format_duration "$current") -> $(format_duration "$new_value")" + cast send --rpc-url="$RPC_URL" --confirmations=0 \ + --private-key="$ACCOUNT0_SECRET" \ + "$REO_ADDRESS" "${setter}(uint256)" "$new_value" + echo "Done." +} + +case "${1:-}" in + eligibility-period) + if [ -z "${2:-}" ]; then + echo "Usage: $0 eligibility-period " + echo " e.g.: $0 eligibility-period 300 # 5 minutes" + exit 1 + fi + set_param "eligibility period" "setEligibilityPeriod" "getEligibilityPeriod" "$2" + ;; + oracle-timeout) + if [ -z "${2:-}" ]; then + echo "Usage: $0 oracle-timeout " + echo " e.g.: $0 oracle-timeout 86400 # 1 day" + exit 1 + fi + set_param "oracle update timeout" "setOracleUpdateTimeout" "getOracleUpdateTimeout" "$2" + ;; + "") + show_config + ;; + *) + echo "Unknown command: $1" + echo "" + echo "Usage:" + echo " $0 Show current config" + echo " $0 eligibility-period Set eligibility period" + echo " $0 oracle-timeout Set oracle update timeout" + exit 1 + ;; +esac diff --git a/scripts/start-with-overrides.sh b/scripts/start-with-overrides.sh deleted file mode 100755 index 13bdaf2..0000000 --- a/scripts/start-with-overrides.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -export INDEXER_AGENT_SOURCE_ROOT=$HOME/Development/en/indexer -docker compose -f docker-compose.yaml \ --f overrides/indexer-agent-dev/indexer-agent-dev.yaml up -d $@ diff --git a/scripts/test-indexer-agent.sh b/scripts/test-indexer-agent.sh index 8d6ef2c..c945134 100755 --- a/scripts/test-indexer-agent.sh +++ b/scripts/test-indexer-agent.sh @@ -33,9 +33,10 @@ if [ ! -f "docker-compose.yaml" ]; then exit 1 fi -# Check if indexer-agent source is initialized -if [ ! -d "indexer-agent/source/packages" ]; then - echo -e "${RED}Error: indexer-agent source not found. Run: git submodule update --init --recursive indexer-agent/source${NC}" +# Check if indexer-agent source is available +INDEXER_AGENT_SOURCE="${INDEXER_AGENT_SOURCE_ROOT:-}" +if [ -z "$INDEXER_AGENT_SOURCE" ] || [ ! -d "${INDEXER_AGENT_SOURCE}/packages" ]; then + echo -e "${RED}Error: Set INDEXER_AGENT_SOURCE_ROOT to a local clone of graphprotocol/indexer.${NC}" exit 1 fi @@ -98,7 +99,7 @@ fi export INDEXER_TEST_API_KEY="${INDEXER_TEST_API_KEY:-}" # Navigate to indexer source -cd indexer-agent/source +cd "${INDEXER_AGENT_SOURCE}" # Install dependencies if needed if [ ! -d "node_modules" ]; then From ec97852c28d289f4fc78c564ed2df904e48985d8 Mon Sep 17 00:00:00 2001 From: Rembrandt Kuipers <50174308+RembrandtK@users.noreply.github.com> Date: Mon, 23 Feb 2026 14:26:24 +0000 Subject: [PATCH 2/4] feat: add shell test and validation scripts - test-baseline-state.sh: Layer 1 state observation - test-baseline-queries.sh: baseline query validation - test-indexer-guide-queries.sh: indexer guide queries - test-reo-eligibility.sh: REO eligibility cycle e2e --- scripts/test-baseline-queries.sh | 191 +++++++++++++++++++ scripts/test-baseline-state.sh | 260 ++++++++++++++++++++++++++ scripts/test-indexer-guide-queries.sh | 181 ++++++++++++++++++ scripts/test-reo-eligibility.sh | 203 ++++++++++++++++++++ 4 files changed, 835 insertions(+) create mode 100755 scripts/test-baseline-queries.sh create mode 100755 scripts/test-baseline-state.sh create mode 100755 scripts/test-indexer-guide-queries.sh create mode 100755 scripts/test-reo-eligibility.sh diff --git a/scripts/test-baseline-queries.sh b/scripts/test-baseline-queries.sh new file mode 100755 index 0000000..5b81dd4 --- /dev/null +++ b/scripts/test-baseline-queries.sh @@ -0,0 +1,191 @@ +#!/bin/bash +# Layer 0: Validate all GraphQL verification queries from BaselineTestPlan.md +# +# Runs each query against the network subgraph with real local network values. +# Checks for GraphQL errors — does NOT verify operational outcomes. +# +# Prerequisites: +# - Local network running (graph-node, indexer-agent with allocations) +# - Network subgraph deployed and synced +# +# Usage: ./scripts/test-baseline-queries.sh +set -eu + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# shellcheck source=../.env +. "$REPO_ROOT/.env" + +SUBGRAPH_URL="http://${GRAPH_NODE_HOST:-localhost}:${GRAPH_NODE_GRAPHQL_PORT}/subgraphs/name/graph-network" +INDEXER=$(echo "$RECEIVER_ADDRESS" | tr '[:upper:]' '[:lower:]') + +pass=0 +fail=0 +total=0 + +# -- Helper -- +run_query() { + local label="$1" + local query="$2" + total=$((total + 1)) + + result=$(curl -s --max-time 10 "$SUBGRAPH_URL" \ + -H 'content-type: application/json' \ + -d "{\"query\": \"$query\"}" 2>&1) + + if echo "$result" | grep -q '"errors"'; then + echo " FAIL $label" + echo " $(echo "$result" | jq -r '.errors[0].message' 2>/dev/null || echo "$result")" + fail=$((fail + 1)) + return 1 + elif echo "$result" | grep -q '"data"'; then + echo " PASS $label" + pass=$((pass + 1)) + return 0 + else + echo " FAIL $label (no data or errors in response)" + echo " $result" + fail=$((fail + 1)) + return 1 + fi +} + +echo "=== BaselineTestPlan Query Validation ===" +echo " Subgraph: $SUBGRAPH_URL" +echo " Indexer: $INDEXER" +echo "" + +# -- Resolve dynamic values -- +# Get first allocation ID for queries that need it +ALLOC_ID=$(curl -s "$SUBGRAPH_URL" \ + -H 'content-type: application/json' \ + -d "{\"query\": \"{ allocations(first: 1, where: { indexer_: { id: \\\"$INDEXER\\\" } }) { id subgraphDeployment { ipfsHash } } }\"}" \ + | jq -r '.data.allocations[0].id // empty' 2>/dev/null || true) + +DEPLOYMENT=$(curl -s "$SUBGRAPH_URL" \ + -H 'content-type: application/json' \ + -d "{\"query\": \"{ allocations(first: 1, where: { indexer_: { id: \\\"$INDEXER\\\" } }) { subgraphDeployment { ipfsHash } } }\"}" \ + | jq -r '.data.allocations[0].subgraphDeployment.ipfsHash // empty' 2>/dev/null || true) + +if [ -z "$ALLOC_ID" ]; then + echo " WARNING: No allocations found for indexer. Some queries will use placeholder values." + ALLOC_ID="0x0000000000000000000000000000000000000000" +fi +if [ -z "$DEPLOYMENT" ]; then + DEPLOYMENT="QmUnknown" +fi + +echo " Allocation: $ALLOC_ID" +echo " Deployment: $DEPLOYMENT" +echo "" + +# ============================================================ +# Cycle 1: Indexer Setup and Registration +# ============================================================ +echo "--- Cycle 1: Indexer Setup and Registration ---" + +run_query "1.1 Indexer setup" \ + "{ indexers(where: { id: \\\"$INDEXER\\\" }) { id createdAt stakedTokens queryFeeCut indexingRewardCut } }" || true + +run_query "1.2 Indexer URL/GEO" \ + "{ indexers(where: { id: \\\"$INDEXER\\\" }) { id url geoHash } }" || true + +run_query "1.3 SubgraphService provision" \ + "{ provisions(where: { indexer_: { id: \\\"$INDEXER\\\" } }) { id indexer { id url geoHash } tokensProvisioned tokensAllocated tokensThawing thawingPeriod maxVerifierCut dataService { id } } }" || true + +echo "" + +# ============================================================ +# Cycle 2: Stake Management +# ============================================================ +echo "--- Cycle 2: Stake Management ---" + +run_query "2.1 Stake view" \ + "{ indexers(where: { id: \\\"$INDEXER\\\" }) { id stakedTokens allocatedTokens availableStake } }" || true + +run_query "2.2 Thaw requests" \ + "{ indexers(where: { id: \\\"$INDEXER\\\" }) { id stakedTokens availableStake } thawRequests(where: { indexer_: { id: \\\"$INDEXER\\\" } }) { id tokens thawingUntil type } }" || true + +echo "" + +# ============================================================ +# Cycle 3: Provision Management +# ============================================================ +echo "--- Cycle 3: Provision Management ---" + +run_query "3.1 View provision" \ + "{ provisions(where: { indexer_: { id: \\\"$INDEXER\\\" } }) { id tokensProvisioned tokensThawing tokensAllocated thawingPeriod maxVerifierCut } }" || true + +run_query "3.2 Provision + indexer stake" \ + "{ provisions(where: { indexer_: { id: \\\"$INDEXER\\\" } }) { id tokensProvisioned tokensAllocated indexer { stakedTokens availableStake } } }" || true + +run_query "3.3 Provision + thawRequests (enum filter)" \ + "{ provisions(where: { indexer_: { id: \\\"$INDEXER\\\" } }) { id tokensProvisioned tokensThawing } thawRequests(where: { indexer_: { id: \\\"$INDEXER\\\" }, type: Provision }) { id tokens thawingUntil } }" || true + +run_query "3.4 Provision + indexer availableStake" \ + "{ provisions(where: { indexer_: { id: \\\"$INDEXER\\\" } }) { id tokensProvisioned tokensThawing } indexers(where: { id: \\\"$INDEXER\\\" }) { availableStake } }" || true + +echo "" + +# ============================================================ +# Cycle 4: Allocation Management +# ============================================================ +echo "--- Cycle 4: Allocation Management ---" + +run_query "4.1 Deployments with rewards" \ + "{ subgraphDeployments(where: { deniedAt: 0, signalledTokens_not: 0, indexingRewardAmount_not: 0 }) { ipfsHash stakedTokens signalledTokens indexingRewardAmount manifest { network } } }" || true + +run_query "4.2 Active allocations" \ + "{ allocations(where: { indexer_: { id: \\\"$INDEXER\\\" }, status: \\\"Active\\\" }) { id allocatedTokens createdAtEpoch subgraphDeployment { ipfsHash } } }" || true + +run_query "4.5 Allocations by deployment" \ + "{ allocations(where: { indexer_: { id: \\\"$INDEXER\\\" }, subgraphDeployment_: { ipfsHash: \\\"$DEPLOYMENT\\\" } }) { id status allocatedTokens createdAtEpoch closedAtEpoch } }" || true + +echo "" + +# ============================================================ +# Cycle 5: Query Serving and Revenue +# ============================================================ +echo "--- Cycle 5: Query Serving and Revenue ---" + +run_query "5.2 Epoch + active allocations" \ + "{ graphNetworks { currentEpoch } allocations(where: { indexer_: { id: \\\"$INDEXER\\\" }, status: \\\"Active\\\" }) { id allocatedTokens createdAtEpoch } }" || true + +run_query "5.2b Closed allocation rewards" \ + "{ allocations(where: { id: \\\"$ALLOC_ID\\\" }) { id status allocatedTokens indexingRewards closedAtEpoch } }" || true + +run_query "5.3 Query fees collected" \ + "{ allocations(where: { indexer_: { id: \\\"$INDEXER\\\" }, status: \\\"Closed\\\" }) { id queryFeesCollected closedAtEpoch } }" || true + +run_query "5.4 Allocation POI" \ + "{ allocations(where: { id: \\\"$ALLOC_ID\\\" }) { id status indexingRewards poi } }" || true + +echo "" + +# ============================================================ +# Cycle 6: Network Health +# ============================================================ +echo "--- Cycle 6: Network Health ---" + +run_query "6.1 Indexer health" \ + "{ indexers(where: { id: \\\"$INDEXER\\\" }) { id url geoHash stakedTokens allocatedTokens availableStake delegatedTokens queryFeesCollected rewardsEarned allocations(where: { status: \\\"Active\\\" }) { id subgraphDeployment { ipfsHash } } } }" || true + +run_query "6.2 Epoch progression" \ + "{ graphNetworks { id currentEpoch totalTokensStaked totalTokensAllocated totalQueryFees totalIndexingRewards } }" || true + +echo "" + +# ============================================================ +# Summary +# ============================================================ +echo "=== Results ===" +echo " $pass passed, $fail failed, $total total" + +if [ "$fail" -eq 0 ]; then + echo " All queries valid." + exit 0 +else + echo " Some queries have schema errors. Fix the test plan or subgraph." + exit 1 +fi diff --git a/scripts/test-baseline-state.sh b/scripts/test-baseline-state.sh new file mode 100755 index 0000000..603e512 --- /dev/null +++ b/scripts/test-baseline-state.sh @@ -0,0 +1,260 @@ +#!/bin/bash +# Layer 1: Verify local network state matches BaselineTestPlan expectations. +# +# Checks that the network initialised correctly after `docker compose up`: +# - Indexer registered with stake, URL, geoHash +# - Provision exists with non-zero tokens +# - Active allocations exist +# - Subgraph deployments synced and healthy +# - Gateway serves queries +# - Epoch is progressing +# - Indexer agent management API responsive +# +# This catches deployment regressions before you run operational tests. +# +# Prerequisites: +# - Local network fully started (all services healthy) +# +# Usage: ./scripts/test-baseline-state.sh +set -eu + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# shellcheck source=../.env +. "$REPO_ROOT/.env" + +SUBGRAPH_URL="http://${GRAPH_NODE_HOST:-localhost}:${GRAPH_NODE_GRAPHQL_PORT}/subgraphs/name/graph-network" +AGENT_URL="http://${INDEXER_AGENT_HOST:-localhost}:${INDEXER_MANAGEMENT_PORT}" +GATEWAY_URL="http://${GATEWAY_HOST:-localhost}:${GATEWAY_PORT}" +RPC_URL="http://${CHAIN_HOST:-localhost}:${CHAIN_RPC_PORT}" +INDEXER=$(echo "$RECEIVER_ADDRESS" | tr '[:upper:]' '[:lower:]') + +export PATH="$HOME/.foundry/bin:$PATH" + +pass=0 +fail=0 +total=0 + +# -- Helpers -- +check() { + local label="$1" + local condition="$2" + total=$((total + 1)) + + if eval "$condition" > /dev/null 2>&1; then + echo " PASS $label" + pass=$((pass + 1)) + return 0 + else + echo " FAIL $label" + fail=$((fail + 1)) + return 1 + fi +} + +gql() { + local url="$1" + local query="$2" + curl -s --max-time 10 "$url" \ + -H 'content-type: application/json' \ + -d "{\"query\": \"$query\"}" 2>/dev/null +} + +jq_test() { + local json="$1" + local expr="$2" + echo "$json" | jq -e "$expr" > /dev/null 2>&1 +} + +echo "=== Baseline State Observation ===" +echo " Subgraph: $SUBGRAPH_URL" +echo " Agent: $AGENT_URL" +echo " Gateway: $GATEWAY_URL" +echo " RPC: $RPC_URL" +echo " Indexer: $INDEXER" +echo "" + +# ============================================================ +# Network Subgraph (Cycle 1: Indexer Setup) +# ============================================================ +echo "--- Indexer Registration (Cycle 1) ---" + +indexer_data=$(gql "$SUBGRAPH_URL" \ + "{ indexers(where: { id: \\\"$INDEXER\\\" }) { id stakedTokens url geoHash queryFeeCut indexingRewardCut } }") + +check "1.1 Indexer entity exists" \ + "jq_test '$indexer_data' '.data.indexers | length > 0'" || true + +check "1.1 Staked tokens non-zero" \ + "jq_test '$indexer_data' '.data.indexers[0].stakedTokens != \"0\"'" || true + +check "1.2 URL is set" \ + "jq_test '$indexer_data' '.data.indexers[0].url != null and .data.indexers[0].url != \"\"'" || true + +check "1.2 GeoHash is set" \ + "jq_test '$indexer_data' '.data.indexers[0].geoHash != null and .data.indexers[0].geoHash != \"\"'" || true + +echo "" + +# ============================================================ +# Provision (Cycle 1.3 + Cycle 3) +# ============================================================ +echo "--- Provision (Cycle 1.3 / Cycle 3) ---" + +provision_data=$(gql "$SUBGRAPH_URL" \ + "{ provisions(where: { indexer_: { id: \\\"$INDEXER\\\" } }) { id tokensProvisioned tokensAllocated tokensThawing dataService { id } } }") + +check "1.3 Provision exists" \ + "jq_test '$provision_data' '.data.provisions | length > 0'" || true + +check "1.3 Provision tokens non-zero" \ + "jq_test '$provision_data' '.data.provisions[0].tokensProvisioned != \"0\"'" || true + +check "3.1 DataService is set" \ + "jq_test '$provision_data' '.data.provisions[0].dataService.id != null'" || true + +echo "" + +# ============================================================ +# Allocations (Cycle 4) +# ============================================================ +echo "--- Allocations (Cycle 4) ---" + +alloc_data=$(gql "$SUBGRAPH_URL" \ + "{ allocations(where: { indexer_: { id: \\\"$INDEXER\\\" }, status: \\\"Active\\\" }) { id allocatedTokens subgraphDeployment { ipfsHash } createdAtEpoch } }") + +active_count=$(echo "$alloc_data" | jq '.data.allocations | length' 2>/dev/null || echo "0") + +check "4.x Active allocations exist" \ + "[ \"$active_count\" -gt 0 ]" || true + +echo " ($active_count active allocations)" + +echo "" + +# ============================================================ +# Graph Node Deployments (via Agent) +# ============================================================ +echo "--- Graph Node Deployments ---" + +deploy_data=$(gql "$AGENT_URL" \ + "{ indexerDeployments { subgraphDeployment synced health } }") + +deploy_count=$(echo "$deploy_data" | jq '.data.indexerDeployments | length' 2>/dev/null || echo "0") +synced_count=$(echo "$deploy_data" | jq '[.data.indexerDeployments[] | select(.synced == true)] | length' 2>/dev/null || echo "0") +healthy_count=$(echo "$deploy_data" | jq '[.data.indexerDeployments[] | select(.health == "healthy")] | length' 2>/dev/null || echo "0") + +check "Deployments indexed" \ + "[ \"$deploy_count\" -gt 0 ]" || true + +check "All deployments synced" \ + "[ \"$synced_count\" = \"$deploy_count\" ]" || true + +check "All deployments healthy" \ + "[ \"$healthy_count\" = \"$deploy_count\" ]" || true + +echo " ($synced_count/$deploy_count synced, $healthy_count/$deploy_count healthy)" + +echo "" + +# ============================================================ +# Agent Registration +# ============================================================ +echo "--- Indexer Agent ---" + +reg_data=$(gql "$AGENT_URL" \ + "{ indexerRegistration(protocolNetwork: \\\"hardhat\\\") { address url registered } }") + +check "Agent registered" \ + "jq_test '$reg_data' '.data.indexerRegistration[0].registered == true'" || true + +check "Agent URL matches subgraph" \ + "jq_test '$reg_data' '.data.indexerRegistration[0].url != null'" || true + +echo "" + +# ============================================================ +# Gateway (Cycle 5) +# ============================================================ +echo "--- Gateway (Cycle 5) ---" + +gw_response=$(curl -s --max-time 10 \ + "$GATEWAY_URL/api/subgraphs/id/$SUBGRAPH" \ + -H 'content-type: application/json' \ + -H "Authorization: Bearer $GATEWAY_API_KEY" \ + -d '{"query": "{ _meta { block { number } } }"}' 2>/dev/null) + +check "Gateway serves queries" \ + "jq_test '$gw_response' '.data._meta.block.number != null'" || true + +block_num=$(echo "$gw_response" | jq '.data._meta.block.number' 2>/dev/null || echo "?") +echo " (block $block_num)" + +echo "" + +# ============================================================ +# Epoch Progression (Cycle 6) +# ============================================================ +echo "--- Network Health (Cycle 6) ---" + +network_data=$(gql "$SUBGRAPH_URL" \ + "{ graphNetworks { currentEpoch totalTokensStaked totalTokensAllocated } }") + +check "6.2 Epoch is non-zero" \ + "jq_test '$network_data' '.data.graphNetworks[0].currentEpoch > 0'" || true + +current_epoch=$(echo "$network_data" | jq '.data.graphNetworks[0].currentEpoch' 2>/dev/null || echo "?") +echo " (epoch $current_epoch)" + +echo "" + +# ============================================================ +# Chain RPC +# ============================================================ +echo "--- Chain ---" + +chain_block=$(cast block-number --rpc-url="$RPC_URL" 2>/dev/null || echo "0") +check "Chain RPC responsive" \ + "[ \"$chain_block\" -gt 0 ]" || true + +echo " (block $chain_block)" + +echo "" + +# ============================================================ +# REO (if deployed) +# ============================================================ +REO_ADDRESS=$(docker exec graph-node cat /opt/config/issuance.json 2>/dev/null \ + | jq -r '.["1337"].RewardsEligibilityOracle.address // empty' 2>/dev/null || true) + +if [ -n "$REO_ADDRESS" ]; then + echo "--- REO Contract ---" + + validation=$(cast call --rpc-url="$RPC_URL" "$REO_ADDRESS" "getEligibilityValidation()(bool)" 2>/dev/null || echo "error") + check "REO deployed and callable" \ + "[ \"$validation\" = \"true\" ] || [ \"$validation\" = \"false\" ]" || true + echo " (validation=$validation)" + + eligible=$(cast call --rpc-url="$RPC_URL" "$REO_ADDRESS" "isEligible(address)(bool)" "$RECEIVER_ADDRESS" 2>/dev/null || echo "error") + check "Indexer eligibility queryable" \ + "[ \"$eligible\" = \"true\" ] || [ \"$eligible\" = \"false\" ]" || true + echo " (eligible=$eligible)" + + echo "" +fi + +# ============================================================ +# Summary +# ============================================================ +echo "=== Results ===" +echo " $pass passed, $fail failed, $total total" + +if [ "$fail" -eq 0 ]; then + echo " Network state matches baseline expectations." + exit 0 +else + echo " Some checks failed — network may not be fully initialised." + echo " Wait for all services to be healthy: docker compose ps" + exit 1 +fi diff --git a/scripts/test-indexer-guide-queries.sh b/scripts/test-indexer-guide-queries.sh new file mode 100755 index 0000000..8fc0c7b --- /dev/null +++ b/scripts/test-indexer-guide-queries.sh @@ -0,0 +1,181 @@ +#!/bin/bash +# Layer 0: Validate all queries and cast commands from IndexerTestGuide.md +# +# Tests: +# - GraphQL verification queries against network subgraph +# - cast call commands against REO and RewardsManager contracts +# +# Prerequisites: +# - Local network running with eligibility-oracle override +# - REO contract deployed (Phase 4) +# - `cast` available (Foundry) +# +# Usage: ./scripts/test-indexer-guide-queries.sh +set -eu + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# shellcheck source=../.env +. "$REPO_ROOT/.env" + +# Ensure cast is on PATH +export PATH="$HOME/.foundry/bin:$PATH" + +SUBGRAPH_URL="http://${GRAPH_NODE_HOST:-localhost}:${GRAPH_NODE_GRAPHQL_PORT}/subgraphs/name/graph-network" +RPC_URL="http://${CHAIN_HOST:-localhost}:${CHAIN_RPC_PORT}" +INDEXER=$(echo "$RECEIVER_ADDRESS" | tr '[:upper:]' '[:lower:]') + +pass=0 +fail=0 +total=0 + +# -- Helpers -- +run_query() { + local label="$1" + local query="$2" + total=$((total + 1)) + + result=$(curl -s --max-time 10 "$SUBGRAPH_URL" \ + -H 'content-type: application/json' \ + -d "{\"query\": \"$query\"}" 2>&1) + + if echo "$result" | grep -q '"errors"'; then + echo " FAIL $label" + echo " $(echo "$result" | jq -r '.errors[0].message' 2>/dev/null || echo "$result")" + fail=$((fail + 1)) + return 1 + elif echo "$result" | grep -q '"data"'; then + echo " PASS $label" + pass=$((pass + 1)) + return 0 + else + echo " FAIL $label (no data or errors in response)" + echo " $result" + fail=$((fail + 1)) + return 1 + fi +} + +run_cast() { + local label="$1" + shift + total=$((total + 1)) + + if result=$("$@" 2>&1); then + echo " PASS $label" + echo " → $result" + pass=$((pass + 1)) + return 0 + else + echo " FAIL $label" + echo " $result" + fail=$((fail + 1)) + return 1 + fi +} + +echo "=== IndexerTestGuide Query & Command Validation ===" +echo " Subgraph: $SUBGRAPH_URL" +echo " RPC: $RPC_URL" +echo " Indexer: $INDEXER" +echo "" + +# -- Resolve REO contract address -- +REO_ADDRESS=$(docker exec graph-node cat /opt/config/issuance.json 2>/dev/null \ + | jq -r '.["1337"].RewardsEligibilityOracle.address // empty' 2>/dev/null || true) + +if [ -z "$REO_ADDRESS" ]; then + echo " WARNING: REO contract not found. Skipping cast tests." + echo " Is the eligibility-oracle override active?" + SKIP_CAST=true +else + echo " REO: $REO_ADDRESS" + SKIP_CAST=false +fi + +# -- Resolve RewardsManager address -- +REWARDS_MANAGER=$(docker exec graph-node cat /opt/config/horizon.json 2>/dev/null \ + | jq -r '.["1337"].RewardsManager.address // empty' 2>/dev/null || true) + +if [ -n "$REWARDS_MANAGER" ]; then + echo " RM: $REWARDS_MANAGER" +fi +echo "" + +# ============================================================ +# GraphQL Queries +# ============================================================ +echo "--- GraphQL Queries ---" + +run_query "1.1 Indexer allocations (singular)" \ + "{ indexer(id: \\\"$INDEXER\\\") { allocations(where: { status: \\\"Active\\\" }) { id subgraphDeployment { ipfsHash } allocatedTokens createdAtEpoch } } graphNetwork(id: \\\"1\\\") { currentEpoch } }" || true + +ALLOC_ID=$(curl -s "$SUBGRAPH_URL" \ + -H 'content-type: application/json' \ + -d "{\"query\": \"{ allocations(first: 1, where: { indexer_: { id: \\\"$INDEXER\\\" } }) { id } }\"}" \ + | jq -r '.data.allocations[0].id // empty' 2>/dev/null || true) +ALLOC_ID="${ALLOC_ID:-0x0000000000000000000000000000000000000000}" + +run_query "2.2 Allocation close verification" \ + "{ allocations(where: { id: \\\"$ALLOC_ID\\\" }) { id status indexingRewards closedAtEpoch } }" || true + +run_query "4.2 Allocation with epochs" \ + "{ allocations(where: { id: \\\"$ALLOC_ID\\\" }) { id status indexingRewards createdAtEpoch closedAtEpoch } }" || true + +echo "" + +# ============================================================ +# Cast Commands (contract calls) +# ============================================================ +echo "--- Contract Calls (cast) ---" + +if [ "$SKIP_CAST" = "true" ]; then + echo " SKIP (REO contract not deployed)" + echo "" +else + run_cast "Prereq: getEligibilityValidation" \ + cast call --rpc-url="$RPC_URL" "$REO_ADDRESS" "getEligibilityValidation()(bool)" || true + + run_cast "Prereq: getEligibilityPeriod" \ + cast call --rpc-url="$RPC_URL" "$REO_ADDRESS" "getEligibilityPeriod()(uint256)" || true + + ORACLE_ROLE=$(cast keccak "ORACLE_ROLE" 2>/dev/null || true) + if [ -n "$ORACLE_ROLE" ]; then + run_cast "Prereq: hasRole(ORACLE_ROLE, indexer)" \ + cast call --rpc-url="$RPC_URL" "$REO_ADDRESS" "hasRole(bytes32,address)(bool)" "$ORACLE_ROLE" "$INDEXER" || true + fi + + run_cast "2.1 isEligible" \ + cast call --rpc-url="$RPC_URL" "$REO_ADDRESS" "isEligible(address)(bool)" "$INDEXER" || true + + run_cast "2.1 getEligibilityRenewalTime" \ + cast call --rpc-url="$RPC_URL" "$REO_ADDRESS" "getEligibilityRenewalTime(address)(uint256)" "$INDEXER" || true + + run_cast "3.1 block timestamp" \ + cast block latest --field timestamp --rpc-url="$RPC_URL" || true + + run_cast "Troubleshoot: paused" \ + cast call --rpc-url="$RPC_URL" "$REO_ADDRESS" "paused()(bool)" || true + + if [ -n "$REWARDS_MANAGER" ]; then + run_cast "Troubleshoot: getRewardsEligibilityOracle" \ + cast call --rpc-url="$RPC_URL" "$REWARDS_MANAGER" "getRewardsEligibilityOracle()(address)" || true + fi + + echo "" +fi + +# ============================================================ +# Summary +# ============================================================ +echo "=== Results ===" +echo " $pass passed, $fail failed, $total total" + +if [ "$fail" -eq 0 ]; then + echo " All queries and commands valid." + exit 0 +else + echo " Some queries or commands failed. Check output above." + exit 1 +fi diff --git a/scripts/test-reo-eligibility.sh b/scripts/test-reo-eligibility.sh new file mode 100755 index 0000000..94648a0 --- /dev/null +++ b/scripts/test-reo-eligibility.sh @@ -0,0 +1,203 @@ +#!/bin/bash +# Test the Rewards Eligibility Oracle (REO) end-to-end cycle. +# +# Demonstrates: indexer NOT eligible → gateway queries → REO evaluates → indexer IS eligible +# +# Prerequisites: +# - Local network running with eligibility-oracle override +# - REO contract deployed (Phase 4 in graph-contracts) +# - REO node running and connected to Redpanda +# - `cast` available (Foundry) +# +# Usage: ./scripts/test-reo-eligibility.sh [query_count] +# query_count: number of gateway queries to send (default: 10) +set -eu + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# Load environment +# shellcheck source=../.env +. "$REPO_ROOT/.env" + +# Host-side defaults (containers use internal hostnames) +RPC_URL="http://${CHAIN_HOST:-localhost}:${CHAIN_RPC_PORT}" +GATEWAY_URL="http://${GATEWAY_HOST:-localhost}:${GATEWAY_PORT}" +QUERY_COUNT="${1:-10}" +INDEXER="${RECEIVER_ADDRESS}" +REO_POLL_TIMEOUT=150 # Max wait: 2.5 cycles (worst case: just missed a cycle) +REO_POLL_INTERVAL=10 # Check every 10s + +# -- Read REO contract address from config-local volume -- +REO_ADDRESS=$(docker exec graph-node cat /opt/config/issuance.json 2>/dev/null \ + | jq -r '.["1337"].RewardsEligibilityOracle.address // empty' 2>/dev/null || true) +if [ -z "$REO_ADDRESS" ]; then + echo "ERROR: RewardsEligibilityOracle address not found." + echo " Is the local network running? Is the REO contract deployed (Phase 4)?" + echo " Check: docker exec graph-node cat /opt/config/issuance.json | jq ." + exit 1 +fi + +echo "=== REO Eligibility Cycle Test ===" +echo " REO contract: $REO_ADDRESS" +echo " Indexer: $INDEXER" +echo " RPC: $RPC_URL" +echo " Gateway: $GATEWAY_URL" +echo " Queries: $QUERY_COUNT" +echo "" + +# -- Helper functions -- +check_eligible() { + cast call --rpc-url="$RPC_URL" \ + "$REO_ADDRESS" "isEligible(address)(bool)" "$1" 2>/dev/null +} + +get_validation_enabled() { + cast call --rpc-url="$RPC_URL" \ + "$REO_ADDRESS" "getEligibilityValidation()(bool)" 2>/dev/null +} + +get_last_oracle_update() { + cast call --rpc-url="$RPC_URL" \ + "$REO_ADDRESS" "getLastOracleUpdateTime()(uint256)" 2>/dev/null +} + +# ============================================================ +# Step 1: Check contract state +# ============================================================ +echo "--- Step 1: Check contract state ---" + +validation=$(get_validation_enabled) +echo " Eligibility validation enabled: $validation" + +if [ "$validation" != "true" ]; then + echo " ERROR: Eligibility validation is not enabled on the REO contract." + echo " This should be enabled during deployment (Phase 4 in graph-contracts)." + echo " Re-run graph-contracts or enable manually:" + echo " cast send --rpc-url=$RPC_URL --private-key=\$ACCOUNT0_SECRET $REO_ADDRESS 'setEligibilityValidation(bool)' true" + exit 1 +fi + +last_update=$(get_last_oracle_update) +echo " Last oracle update time: $last_update" + +# Seed lastOracleUpdateTime if it's 0 (prevents fail-safe from making everyone eligible). +# Call renewIndexerEligibility with an empty array — this sets the timestamp without +# marking any indexer eligible. Requires ORACLE_ROLE (ACCOUNT0). +if [ "$last_update" = "0" ]; then + echo " Seeding lastOracleUpdateTime (empty oracle update)..." + cast send --rpc-url="$RPC_URL" --confirmations=0 \ + --private-key="$ACCOUNT0_SECRET" \ + "$REO_ADDRESS" "renewIndexerEligibility(address[],bytes)" "[]" "0x" > /dev/null + echo " Last oracle update time: $(get_last_oracle_update)" +fi + +echo "" + +# ============================================================ +# Step 2: Verify indexer is NOT eligible +# ============================================================ +echo "--- Step 2: Verify indexer is NOT eligible ---" + +eligible_before=$(check_eligible "$INDEXER") +echo " isEligible($INDEXER) = $eligible_before" + +if [ "$eligible_before" = "true" ]; then + echo "" + echo " WARNING: Indexer is already eligible. This can happen if:" + echo " - The REO node already submitted eligibility in a previous cycle" + echo " - The eligibility period hasn't expired yet" + echo " The test will continue but won't demonstrate the full deny→allow transition." + echo "" +fi + +# ============================================================ +# Step 3: Send queries through the gateway +# ============================================================ +echo "--- Step 3: Send $QUERY_COUNT queries through gateway ---" + +# Mine blocks first to prevent "too far behind" errors +if ! "$SCRIPT_DIR/mine-block.sh" 5 > /dev/null 2>&1; then + echo " ERROR: Failed to mine blocks. Is the chain accessible at $RPC_URL?" + exit 1 +fi + +success=0 +fail=0 +for i in $(seq 1 "$QUERY_COUNT"); do + response=$(curl -s -w "\n%{http_code}" \ + "$GATEWAY_URL/api/subgraphs/id/$SUBGRAPH" \ + -H 'content-type: application/json' \ + -H "Authorization: Bearer $GATEWAY_API_KEY" \ + -d '{"query": "{ _meta { block { number } } }"}') + http_code=$(echo "$response" | tail -1) + if [ "$http_code" = "200" ]; then + success=$((success + 1)) + else + fail=$((fail + 1)) + fi +done + +echo " Sent $QUERY_COUNT queries: $success OK, $fail failed" + +if [ "$success" -eq 0 ]; then + echo " ERROR: All queries failed. Is the gateway healthy?" + echo " Check: docker compose ps gateway" + exit 1 +fi + +echo "" + +# ============================================================ +# Step 4: Poll until indexer is eligible (or timeout) +# ============================================================ +echo "--- Step 4: Wait for REO node to process queries ---" +echo " Polling every ${REO_POLL_INTERVAL}s, timeout ${REO_POLL_TIMEOUT}s" +echo " (REO node cycles every 60s; may need up to 2 cycles if we just missed one)" +echo "" + +elapsed=0 +eligible_after="false" +while [ $elapsed -lt $REO_POLL_TIMEOUT ]; do + sleep $REO_POLL_INTERVAL + elapsed=$((elapsed + REO_POLL_INTERVAL)) + eligible_after=$(check_eligible "$INDEXER") + if [ "$eligible_after" = "true" ]; then + echo " Eligible after ${elapsed}s" + break + fi + printf " %ds / %ds — not yet eligible...\r" "$elapsed" "$REO_POLL_TIMEOUT" +done + +if [ "$eligible_after" != "true" ]; then + echo " Timed out after ${REO_POLL_TIMEOUT}s " +fi + +echo "" + +# ============================================================ +# Summary +# ============================================================ +echo "=== Results ===" +echo " Before queries: isEligible = $eligible_before" +echo " After REO cycle: isEligible = $eligible_after" + +if [ "$eligible_before" = "false" ] && [ "$eligible_after" = "true" ]; then + echo "" + echo " SUCCESS: Full deny → allow cycle verified" + echo " The indexer was initially ineligible, served queries, and was marked eligible by the REO." + exit 0 +elif [ "$eligible_before" = "true" ] && [ "$eligible_after" = "true" ]; then + echo "" + echo " PARTIAL: Indexer was already eligible before the test." + echo " The REO is working but the deny→allow transition was not demonstrated." + echo " To see the full cycle, wait for the eligibility period to expire or redeploy." + exit 0 +elif [ "$eligible_after" = "false" ]; then + echo "" + echo " NEEDS MORE TIME: Indexer is still not eligible." + echo " The REO node may not have completed its cycle yet." + echo " Check REO logs: docker compose logs --tail 50 eligibility-oracle-node" + echo " Then re-check manually: cast call --rpc-url=$RPC_URL $REO_ADDRESS 'isEligible(address)(bool)' $INDEXER" + exit 1 +fi From db7a5ca7a4b7364e0e4506252ec692210a743e3a Mon Sep 17 00:00:00 2001 From: Rembrandt Kuipers <50174308+RembrandtK@users.noreply.github.com> Date: Mon, 23 Feb 2026 14:26:35 +0000 Subject: [PATCH 3/4] feat: add Rust integration test crate Test suites: allocation lifecycle, eligibility, network state, provision management, query fees, REO governance, reward collection, stake management. Includes nextest config for serial execution. --- tests/.config/nextest.toml | 10 + tests/Cargo.lock | 1964 +++++++++++++++++++++++++++ tests/Cargo.toml | 15 + tests/src/cast.rs | 451 ++++++ tests/src/graphql.rs | 189 +++ tests/src/lib.rs | 266 ++++ tests/src/management.rs | 73 + tests/src/polling.rs | 308 +++++ tests/src/staking.rs | 141 ++ tests/tests/allocation_lifecycle.rs | 205 +++ tests/tests/eligibility.rs | 207 +++ tests/tests/network_state.rs | 216 +++ tests/tests/provision_management.rs | 114 ++ tests/tests/query_fees.rs | 96 ++ tests/tests/reo_governance.rs | 688 ++++++++++ tests/tests/reward_collection.rs | 105 ++ tests/tests/stake_management.rs | 81 ++ 17 files changed, 5129 insertions(+) create mode 100644 tests/.config/nextest.toml create mode 100644 tests/Cargo.lock create mode 100644 tests/Cargo.toml create mode 100644 tests/src/cast.rs create mode 100644 tests/src/graphql.rs create mode 100644 tests/src/lib.rs create mode 100644 tests/src/management.rs create mode 100644 tests/src/polling.rs create mode 100644 tests/src/staking.rs create mode 100644 tests/tests/allocation_lifecycle.rs create mode 100644 tests/tests/eligibility.rs create mode 100644 tests/tests/network_state.rs create mode 100644 tests/tests/provision_management.rs create mode 100644 tests/tests/query_fees.rs create mode 100644 tests/tests/reo_governance.rs create mode 100644 tests/tests/reward_collection.rs create mode 100644 tests/tests/stake_management.rs diff --git a/tests/.config/nextest.toml b/tests/.config/nextest.toml new file mode 100644 index 0000000..25e7c0b --- /dev/null +++ b/tests/.config/nextest.toml @@ -0,0 +1,10 @@ +# All tests share a single blockchain (hardhat chain) and must run serially. +# Nextest runs each test as a separate process, so #[serial] (in-process +# locking) doesn't work. Instead, use a test group with max-threads = 1. + +[test-groups.shared-chain] +max-threads = 1 + +[[profile.default.overrides]] +filter = "all()" +test-group = "shared-chain" diff --git a/tests/Cargo.lock b/tests/Cargo.lock new file mode 100644 index 0000000..6caa32e --- /dev/null +++ b/tests/Cargo.lock @@ -0,0 +1,1964 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "anyhow" +version = "1.0.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "aws-lc-rs" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9a7b350e3bb1767102698302bc37256cbd48422809984b98d292c40e2579aa9" +dependencies = [ + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.37.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b092fe214090261288111db7a2b2c2118e5a7f30dc2569f1732c4069a6840549" +dependencies = [ + "cc", + "cmake", + "dunce", + "fs_extra", +] + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bitflags" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" + +[[package]] +name = "bumpalo" +version = "3.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" + +[[package]] +name = "bytes" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" + +[[package]] +name = "cc" +version = "1.2.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" +dependencies = [ + "find-msvc-tools", + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "cmake" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" +dependencies = [ + "cc", +] + +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "find-msvc-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + +[[package]] +name = "futures-channel" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" +dependencies = [ + "futures-core", +] + +[[package]] +name = "futures-core" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" + +[[package]] +name = "futures-executor" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-sink" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" + +[[package]] +name = "futures-task" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" + +[[package]] +name = "futures-util" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" +dependencies = [ + "futures-core", + "futures-task", + "pin-project-lite", + "slab", +] + +[[package]] +name = "getrandom" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasip2", + "wasm-bindgen", +] + +[[package]] +name = "h2" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + +[[package]] +name = "http" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +dependencies = [ + "bytes", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "hyper" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http", + "http-body", + "httparse", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" +dependencies = [ + "base64", + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2", + "system-configuration", + "tokio", + "tower-service", + "tracing", + "windows-registry", +] + +[[package]] +name = "icu_collections" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + +[[package]] +name = "icu_properties" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "2.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "itoa" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" + +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.4", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93f0862381daaec758576dcc22eb7bbf4d7efd67328553f3b45a412a51a3fb21" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "libc" +version = "0.2.182" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" + +[[package]] +name = "litemap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + +[[package]] +name = "local-network-tests" +version = "0.1.0" +dependencies = [ + "anyhow", + "pretty_assertions", + "reqwest", + "serde", + "serde_json", + "serial_test", + "tokio", +] + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "memchr" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "mio" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.2", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "openssl-probe" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-link", +] + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "potential_utf" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "pretty_assertions" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" +dependencies = [ + "diff", + "yansi", +] + +[[package]] +name = "proc-macro2" +version = "1.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2", + "thiserror 2.0.18", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +dependencies = [ + "aws-lc-rs", + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.18", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.60.2", +] + +[[package]] +name = "quote" +version = "1.0.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" +dependencies = [ + "getrandom 0.3.4", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags", +] + +[[package]] +name = "reqwest" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801" +dependencies = [ + "base64", + "bytes", + "encoding_rs", + "futures-core", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "js-sys", + "log", + "mime", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pki-types", + "rustls-platform-verifier", + "serde", + "serde_json", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tower", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.17", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rustls" +version = "0.23.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" +dependencies = [ + "aws-lc-rs", + "once_cell", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pki-types" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" +dependencies = [ + "web-time", + "zeroize", +] + +[[package]] +name = "rustls-platform-verifier" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784" +dependencies = [ + "core-foundation 0.10.1", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-root-certs", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + +[[package]] +name = "rustls-webpki" +version = "0.103.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" +dependencies = [ + "aws-lc-rs", + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scc" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46e6f046b7fef48e2660c57ed794263155d713de679057f2d0c169bfc6e756cc" +dependencies = [ + "sdd", +] + +[[package]] +name = "schannel" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "sdd" +version = "3.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490dcfcbfef26be6800d11870ff2df8774fa6e86d047e3e8c8a76b25655e41ca" + +[[package]] +name = "security-framework" +version = "3.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" +dependencies = [ + "bitflags", + "core-foundation 0.10.1", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.149" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + +[[package]] +name = "serial_test" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "911bd979bf1070a3f3aa7b691a3b3e9968f339ceeec89e08c280a8a22207a32f" +dependencies = [ + "futures-executor", + "futures-util", + "log", + "once_cell", + "parking_lot", + "scc", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a7d91949b85b0d2fb687445e448b40d322b6b3e4af6b44a29b21d9a5f33e6d9" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" +dependencies = [ + "errno", + "libc", +] + +[[package]] +name = "slab" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "socket2" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86f4aa3ad99f2088c990dfa82d367e19cb29268ed67c574d10d0a4bfe71f07e0" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.117" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "system-configuration" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" +dependencies = [ + "bitflags", + "core-foundation 0.9.4", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" +dependencies = [ + "thiserror-impl 2.0.18", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tinystr" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.49.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" +dependencies = [ + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-macros" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tower" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +dependencies = [ + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" +dependencies = [ + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" +dependencies = [ + "once_cell", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "unicode-ident" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.2+wasi-0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.110" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1de241cdc66a9d91bd84f097039eb140cdc6eec47e0cdbaf9d932a1dd6c35866" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a42e96ea38f49b191e08a1bab66c7ffdba24b06f9995b39a9dd60222e5b6f1da" +dependencies = [ + "cfg-if", + "futures-util", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.110" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12fdf6649048f2e3de6d7d5ff3ced779cdedee0e0baffd7dff5cdfa3abc8a52" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.110" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e63d1795c565ac3462334c1e396fd46dbf481c40f51f5072c310717bc4fb309" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.110" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9f9cdac23a5ce71f6bf9f8824898a501e511892791ea2a0c6b8568c68b9cb53" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "web-sys" +version = "0.3.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2c7c5718134e770ee62af3b6b4a84518ec10101aad610c024b64d6ff29bb1ff" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-root-certs" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "804f18a4ac2676ffb4e8b5b5fa9ae38af06df08162314f96a68d2a363e21a8ca" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-registry" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720" +dependencies = [ + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "wit-bindgen" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" + +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + +[[package]] +name = "yansi" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" diff --git a/tests/Cargo.toml b/tests/Cargo.toml new file mode 100644 index 0000000..0da14e7 --- /dev/null +++ b/tests/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "local-network-tests" +version = "0.1.0" +edition = "2024" + +[dependencies] +anyhow = "1" +reqwest = { version = "0.13", features = ["json"] } +serde = { version = "1", features = ["derive"] } +serde_json = "1" +tokio = { version = "1", features = ["full"] } + +[dev-dependencies] +pretty_assertions = "1" +serial_test = "3" diff --git a/tests/src/cast.rs b/tests/src/cast.rs new file mode 100644 index 0000000..8bc843b --- /dev/null +++ b/tests/src/cast.rs @@ -0,0 +1,451 @@ +//! Wrapper around the `cast` CLI (Foundry) for contract calls and transactions. + +use anyhow::{Context, Result}; +use std::process::Command; + +use crate::TestNetwork; + +impl TestNetwork { + /// Read-only contract call via `cast call`. + /// Returns the raw stdout (decoded return value). + pub fn cast_call(&self, to: &str, sig: &str, args: &[&str]) -> Result { + let mut cmd = Command::new("cast"); + cmd.arg("call") + .arg(format!("--rpc-url={}", self.rpc_url)) + .arg(to) + .arg(sig); + for arg in args { + cmd.arg(arg); + } + run_command(&mut cmd) + } + + /// State-changing transaction via `cast send`. + /// Uses `account0_secret` as the signer. Returns stdout. + pub fn cast_send(&self, to: &str, sig: &str, args: &[&str]) -> Result { + let mut cmd = Command::new("cast"); + cmd.arg("send") + .arg(format!("--rpc-url={}", self.rpc_url)) + .arg("--confirmations=0") + .arg(format!("--private-key={}", self.account0_secret)) + .arg(to) + .arg(sig); + for arg in args { + cmd.arg(arg); + } + run_command(&mut cmd) + } + + /// Check if an address is eligible via the REO contract. + pub fn reo_is_eligible(&self, address: &str) -> Result { + let reo = self + .contracts + .reo + .as_deref() + .context("REO contract not deployed")?; + let output = self.cast_call(reo, "isEligible(address)(bool)", &[address])?; + Ok(output.trim() == "true") + } + + /// Check if eligibility validation is enabled on the REO contract. + pub fn reo_validation_enabled(&self) -> Result { + let reo = self + .contracts + .reo + .as_deref() + .context("REO contract not deployed")?; + let output = self.cast_call(reo, "getEligibilityValidation()(bool)", &[])?; + Ok(output.trim() == "true") + } + + /// Get the last oracle update time from the REO contract. + pub fn reo_last_oracle_update(&self) -> Result { + let reo = self + .contracts + .reo + .as_deref() + .context("REO contract not deployed")?; + let output = self.cast_call(reo, "getLastOracleUpdateTime()(uint256)", &[])?; + cast_parse_uint(&output) + .parse() + .context("parsing lastOracleUpdateTime") + } + + /// Seed the REO lastOracleUpdateTime by calling renewIndexerEligibility with + /// an empty array. Requires ORACLE_ROLE (account0). + pub fn reo_seed_oracle_timestamp(&self) -> Result<()> { + let reo = self + .contracts + .reo + .as_deref() + .context("REO contract not deployed")? + .to_string(); + self.cast_send( + &reo, + "renewIndexerEligibility(address[],bytes)", + &["[]", "0x"], + )?; + Ok(()) + } + + /// Renew eligibility for a specific indexer. Requires ORACLE_ROLE (account0). + pub fn reo_renew_indexer(&self, address: &str) -> Result<()> { + let reo = self + .contracts + .reo + .as_deref() + .context("REO contract not deployed")? + .to_string(); + let array = format!("[{address}]"); + self.cast_send( + &reo, + "renewIndexerEligibility(address[],bytes)", + &[&array, "0x"], + )?; + Ok(()) + } + + /// Get the eligibility period (seconds) from the REO contract. + pub fn reo_eligibility_period(&self) -> Result { + let reo = self + .contracts + .reo + .as_deref() + .context("REO contract not deployed")?; + let output = self.cast_call(reo, "getEligibilityPeriod()(uint256)", &[])?; + cast_parse_uint(&output) + .parse() + .context("parsing eligibilityPeriod") + } + + /// State-changing transaction via `cast send`, signed by an arbitrary private key. + pub fn cast_send_as(&self, key: &str, to: &str, sig: &str, args: &[&str]) -> Result { + let mut cmd = Command::new("cast"); + cmd.arg("send") + .arg(format!("--rpc-url={}", self.rpc_url)) + .arg("--confirmations=0") + .arg(format!("--private-key={key}")) + .arg(to) + .arg(sig); + for arg in args { + cmd.arg(arg); + } + run_command(&mut cmd) + } + + /// Try a `cast send` and return Ok(true) if it succeeds, Ok(false) if it reverts. + pub fn cast_send_may_revert( + &self, + key: &str, + to: &str, + sig: &str, + args: &[&str], + ) -> Result { + match self.cast_send_as(key, to, sig, args) { + Ok(_) => Ok(true), + Err(e) => { + let msg = format!("{e:#}"); + if msg.contains("revert") || msg.contains("execution reverted") { + Ok(false) + } else { + Err(e) + } + } + } + } + + /// State-changing transaction via `cast send`, signed by `receiver_secret` (the indexer). + /// Needed for operations that require `onlyAuthorizedForProvision`. + pub fn cast_send_as_indexer(&self, to: &str, sig: &str, args: &[&str]) -> Result { + let mut cmd = Command::new("cast"); + cmd.arg("send") + .arg(format!("--rpc-url={}", self.rpc_url)) + .arg("--confirmations=0") + .arg(format!("--private-key={}", self.receiver_secret)) + .arg(to) + .arg(sig); + for arg in args { + cmd.arg(arg); + } + run_command(&mut cmd) + } + + /// Collect indexing rewards for an allocation via `SubgraphService.collect()`. + /// + /// `closeAllocation` does NOT collect rewards — it reclaims them. + /// This function calls `collect(indexer, PaymentTypes.IndexingRewards, data)` directly, + /// which calls `takeRewards()` and mints GRT to the indexer's stake. + /// + /// Must be called BEFORE closing the allocation. + /// Requires calling as the indexer (RECEIVER_SECRET) due to `onlyAuthorizedForProvision`. + pub fn collect_indexing_rewards(&self, allocation_id: &str) -> Result { + let ss = &self.contracts.subgraph_service; + // PaymentTypes.IndexingRewards = 2 + // data = abi.encode(address allocationId, bytes32 poi, bytes poiMetadata) + // Use a non-zero POI (keccak of "test") so it takes the CLAIMED path + let poi = "0x9c22ff5f21f0b81b113e63f7db6da94fedef11b2119b4088b89664fb9a3cb658"; + // Build the full call: collect(address indexer, uint8 paymentType, bytes data) + let mut cmd = Command::new("bash"); + cmd.arg("-c").arg(format!( + "cast send --rpc-url={rpc} --confirmations=0 --private-key={key} \ + {ss} 'collect(address,uint8,bytes)' '{indexer}' 2 \ + $(cast abi-encode 'f(address,bytes32,bytes)' '{alloc}' '{poi}' '0x')", + rpc = self.rpc_url, + key = self.receiver_secret, + ss = ss, + indexer = self.indexer_address, + alloc = allocation_id, + poi = poi, + )); + run_command(&mut cmd) + } + + /// Query the indexer's total staked tokens from the HorizonStaking contract. + pub fn staked_tokens(&self) -> Result { + let output = self.cast_call( + &self.contracts.horizon_staking, + "getStake(address)(uint256)", + &[&self.indexer_address], + )?; + cast_parse_uint(&output) + .parse() + .context("parsing staked tokens") + } + + // --- REO Governance Operations (ReoTestPlan Cycles 3-5, 7) --- + + /// Set eligibility validation on/off. Requires OPERATOR_ROLE (account0). + /// ReoTestPlan 4.1 (enable) / 7.2 (disable). + pub fn reo_set_validation(&self, enabled: bool) -> Result<()> { + let reo = self + .contracts + .reo + .as_deref() + .context("REO contract not deployed")? + .to_string(); + self.cast_send( + &reo, + "setEligibilityValidation(bool)", + &[if enabled { "true" } else { "false" }], + )?; + Ok(()) + } + + /// Set the eligibility period (seconds). Requires OPERATOR_ROLE (account0). + /// ReoTestPlan 4.4. + pub fn reo_set_eligibility_period(&self, seconds: u64) -> Result<()> { + let reo = self + .contracts + .reo + .as_deref() + .context("REO contract not deployed")? + .to_string(); + self.cast_send( + &reo, + "setEligibilityPeriod(uint256)", + &[&seconds.to_string()], + )?; + Ok(()) + } + + /// Get the oracle update timeout (seconds). ReoTestPlan 1.3. + pub fn reo_oracle_timeout(&self) -> Result { + let reo = self + .contracts + .reo + .as_deref() + .context("REO contract not deployed")?; + let output = self.cast_call(reo, "getOracleUpdateTimeout()(uint256)", &[])?; + cast_parse_uint(&output) + .parse() + .context("parsing oracleUpdateTimeout") + } + + /// Set the oracle update timeout (seconds). Requires OPERATOR_ROLE (account0). + /// ReoTestPlan 5.1. + pub fn reo_set_oracle_timeout(&self, seconds: u64) -> Result<()> { + let reo = self + .contracts + .reo + .as_deref() + .context("REO contract not deployed")? + .to_string(); + self.cast_send( + &reo, + "setOracleUpdateTimeout(uint256)", + &[&seconds.to_string()], + )?; + Ok(()) + } + + /// Pause the REO contract. Requires PAUSE_ROLE (account0 on local network). + /// ReoTestPlan 7.1. + pub fn reo_pause(&self) -> Result<()> { + let reo = self + .contracts + .reo + .as_deref() + .context("REO contract not deployed")? + .to_string(); + self.cast_send(&reo, "pause()", &[])?; + Ok(()) + } + + /// Unpause the REO contract. Requires PAUSE_ROLE. + /// ReoTestPlan 7.1. + pub fn reo_unpause(&self) -> Result<()> { + let reo = self + .contracts + .reo + .as_deref() + .context("REO contract not deployed")? + .to_string(); + self.cast_send(&reo, "unpause()", &[])?; + Ok(()) + } + + /// Check if the REO contract is paused. ReoTestPlan 1.5 / 7.1. + pub fn reo_is_paused(&self) -> Result { + let reo = self + .contracts + .reo + .as_deref() + .context("REO contract not deployed")?; + let output = self.cast_call(reo, "paused()(bool)", &[])?; + Ok(output.trim() == "true") + } + + /// Renew eligibility for multiple indexers in a batch. ReoTestPlan 3.3. + pub fn reo_renew_batch(&self, addresses: &[&str]) -> Result<()> { + let reo = self + .contracts + .reo + .as_deref() + .context("REO contract not deployed")? + .to_string(); + let array = format!("[{}]", addresses.join(",")); + self.cast_send( + &reo, + "renewIndexerEligibility(address[],bytes)", + &[&array, "0x"], + )?; + Ok(()) + } + + /// Get the eligibility renewal time for an indexer. ReoTestPlan 3.2. + pub fn reo_renewal_time(&self, address: &str) -> Result { + let reo = self + .contracts + .reo + .as_deref() + .context("REO contract not deployed")?; + let output = self.cast_call( + reo, + "getEligibilityRenewalTime(address)(uint256)", + &[address], + )?; + cast_parse_uint(&output) + .parse() + .context("parsing eligibilityRenewalTime") + } + + /// Check the RewardsManager → REO integration. ReoTestPlan 1.4. + pub fn rewards_manager_reo_address(&self) -> Result { + let output = self.cast_call( + &self.contracts.rewards_manager, + "getRewardsEligibilityOracle()(address)", + &[], + )?; + Ok(output.trim().to_string()) + } + + /// Get the latest block timestamp from the chain. + pub fn get_block_timestamp(&self) -> Result { + let output = run_command( + Command::new("cast") + .arg("block") + .arg("latest") + .arg("--field=timestamp") + .arg(format!("--rpc-url={}", self.rpc_url)), + )?; + cast_parse_uint(&output) + .parse() + .context("parsing block timestamp") + } + + // --- Rewards View Functions (ReoTestPlan Cycle 6) --- + + /// Query pending rewards for an allocation via RewardsManager.getRewards(). + /// ReoTestPlan 6.5: view functions should return 0 for ineligible indexers. + pub fn rewards_pending(&self, allocation_id: &str) -> Result { + let output = self.cast_call( + &self.contracts.rewards_manager, + "getRewards(address,address)(uint256)", + &[&self.contracts.subgraph_service, allocation_id], + )?; + cast_parse_uint(&output) + .parse() + .context("parsing pending rewards") + } + + // --- Utility helpers --- + + /// Get the latest block number (sync, via cast). + pub fn get_block_number_sync(&self) -> Result { + let output = run_command( + Command::new("cast") + .arg("block-number") + .arg(format!("--rpc-url={}", self.rpc_url)), + )?; + cast_parse_uint(&output) + .parse() + .context("parsing block number") + } + + /// Query event logs in a block range for a specific contract address. + /// Returns parsed JSON log objects. + pub fn cast_logs_json( + &self, + address: &str, + from_block: u64, + to_block: u64, + ) -> Result> { + let mut cmd = Command::new("cast"); + cmd.arg("logs") + .arg("--json") + .arg(format!("--from-block={from_block}")) + .arg(format!("--to-block={to_block}")) + .arg(format!("--address={address}")) + .arg(format!("--rpc-url={}", self.rpc_url)); + let output = run_command(&mut cmd)?; + let logs: Vec = + serde_json::from_str(&output).context("parsing cast logs JSON")?; + Ok(logs) + } + + /// Compute keccak256 hash of a string via cast. + pub fn cast_keccak(&self, input: &str) -> Result { + let output = run_command(Command::new("cast").arg("keccak").arg(input))?; + Ok(output.trim().to_string()) + } +} + +/// Run a command, returning trimmed stdout on success or an error with stderr. +fn run_command(cmd: &mut Command) -> Result { + let output = cmd.output().context("spawning command")?; + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + let program = cmd.get_program().to_string_lossy().to_string(); + anyhow::bail!("{program} failed: {stderr}"); + } + Ok(String::from_utf8(output.stdout)?.trim().to_string()) +} + +/// Extract the numeric value from cast output. +/// +/// Cast formats large numbers with a human-readable suffix: +/// `1771675624 [1.771e9]` +/// This returns just the first whitespace-delimited token (`1771675624`). +pub fn cast_parse_uint(raw: &str) -> &str { + raw.split_whitespace().next().unwrap_or(raw) +} diff --git a/tests/src/graphql.rs b/tests/src/graphql.rs new file mode 100644 index 0000000..0269a92 --- /dev/null +++ b/tests/src/graphql.rs @@ -0,0 +1,189 @@ +//! GraphQL query helpers for the network subgraph and indexer management API. + +use anyhow::{Context, Result}; +use serde_json::Value; + +use crate::TestNetwork; + +impl TestNetwork { + /// Execute a GraphQL query against the network subgraph (graph-node). + pub async fn subgraph_query(&self, query: &str) -> Result { + self.graphql_post(&self.subgraph_url, query, None).await + } + + /// Execute a GraphQL query/mutation against the indexer management API. + pub async fn management_query(&self, query: &str) -> Result { + self.graphql_post(&self.management_url, query, None).await + } + + /// Send a query through the gateway for a specific subgraph. + pub async fn gateway_query(&self, query: &str) -> Result { + let url = format!("{}/api/subgraphs/id/{}", self.gateway_url, self.subgraph_id); + let client = reqwest::Client::new(); + let body = serde_json::json!({ "query": query }); + let resp = client + .post(&url) + .header("content-type", "application/json") + .header("Authorization", format!("Bearer {}", self.gateway_api_key)) + .json(&body) + .send() + .await + .context("sending gateway query")?; + Ok(resp) + } + + /// Send N queries through the gateway. Returns (success_count, fail_count). + pub async fn send_gateway_queries(&self, count: usize) -> Result<(usize, usize)> { + let query = r#"{ _meta { block { number } } }"#; + let mut success = 0; + let mut fail = 0; + for _ in 0..count { + match self.gateway_query(query).await { + Ok(resp) if resp.status().is_success() => success += 1, + _ => fail += 1, + } + } + Ok((success, fail)) + } + + /// Query the indexer entity from the network subgraph. + /// Address must be lowercase (subgraph convention). + /// Includes fields needed for BaselineTestPlan 6.1 (indexer health). + pub async fn query_indexer(&self, address: &str) -> Result { + let addr = address.to_lowercase(); + let query = format!( + r#"{{ indexer(id: "{addr}") {{ + id stakedTokens allocatedTokens availableStake url geoHash + delegatedTokens queryFeesCollected rewardsEarned + allocations(where: {{ status: Active }}) {{ + id subgraphDeployment {{ ipfsHash }} + }} + }} }}"# + ); + let resp = self.subgraph_query(&query).await?; + Ok(resp["data"]["indexer"].clone()) + } + + /// Query provisions for an indexer from the network subgraph. + pub async fn query_provisions(&self, indexer: &str) -> Result { + let addr = indexer.to_lowercase(); + let query = format!( + r#"{{ provisions(where: {{ indexer: "{addr}" }}) {{ + id tokensProvisioned tokensAllocated tokensThawing thawingPeriod + dataService {{ id }} + }} }}"# + ); + let resp = self.subgraph_query(&query).await?; + Ok(resp["data"]["provisions"].clone()) + } + + /// Query active allocations for an indexer from the network subgraph. + pub async fn query_active_allocations(&self, indexer: &str) -> Result { + let addr = indexer.to_lowercase(); + let query = format!( + r#"{{ allocations(where: {{ indexer: "{addr}", status: Active }}) {{ + id allocatedTokens createdAtEpoch + subgraphDeployment {{ ipfsHash }} + }} }}"# + ); + let resp = self.subgraph_query(&query).await?; + Ok(resp["data"]["allocations"].clone()) + } + + /// Query a single allocation by ID from the network subgraph. + pub async fn query_allocation(&self, id: &str) -> Result { + let alloc_id = id.to_lowercase(); + let query = format!( + r#"{{ allocation(id: "{alloc_id}") {{ + id status allocatedTokens indexingRewards + createdAtEpoch closedAtEpoch + subgraphDeployment {{ ipfsHash }} + }} }}"# + ); + let resp = self.subgraph_query(&query).await?; + Ok(resp["data"]["allocation"].clone()) + } + + /// Query network-level metrics from the network subgraph. + /// Includes fields needed for BaselineTestPlan 6.2 (network health). + pub async fn query_network(&self) -> Result { + let query = r#"{ graphNetworks(first: 1) { + currentEpoch totalTokensStaked totalTokensAllocated + totalQueryFees totalIndexingRewards + } }"#; + let resp = self.subgraph_query(query).await?; + Ok(resp["data"]["graphNetworks"][0].clone()) + } + + /// Get the latest block number indexed by graph-node (from the network subgraph). + /// This is safer than `get_block_number()` for use with the indexer-agent, + /// which needs graph-node to have the block hash cached. + pub async fn subgraph_block_number(&self) -> Result { + let query = r#"{ _meta { block { number } } }"#; + let resp = self.subgraph_query(query).await?; + resp["data"]["_meta"]["block"]["number"] + .as_u64() + .context("subgraph _meta block number not found") + } + + /// Query the block-oracle subgraph for epoch block number data. + /// Returns true if the block-oracle has processed the given epoch. + pub async fn block_oracle_has_epoch(&self, epoch: u64) -> Result { + let epoch_id = format!("{epoch}-eip155:1337"); + let query = format!(r#"{{ networkEpochBlockNumber(id: "{epoch_id}") {{ epochNumber }} }}"#); + let resp = self + .graphql_post(&self.block_oracle_subgraph_url, &query, None) + .await?; + Ok(!resp["data"]["networkEpochBlockNumber"].is_null()) + } + + /// Query the TAP subgraph for escrow accounts. + pub async fn query_tap_escrow_accounts(&self) -> Result { + let query = r#"{ escrowAccounts(first: 10) { + balance + sender { id } + receiver { id } + } }"#; + // TAP subgraph may be empty — treat GraphQL errors as empty result + let resp = self.graphql_post(&self.tap_subgraph_url, query, None).await; + match resp { + Ok(v) => Ok(v["data"]["escrowAccounts"].clone()), + Err(_) => Ok(Value::Array(vec![])), + } + } + + /// Low-level GraphQL POST. Returns the parsed JSON response. + async fn graphql_post( + &self, + url: &str, + query: &str, + variables: Option<&Value>, + ) -> Result { + let client = reqwest::Client::new(); + let mut body = serde_json::json!({ "query": query }); + if let Some(vars) = variables { + body["variables"] = vars.clone(); + } + let resp = client + .post(url) + .header("content-type", "application/json") + .json(&body) + .send() + .await + .with_context(|| format!("POST {url}"))?; + let status = resp.status(); + let text = resp.text().await.context("reading response body")?; + if !status.is_success() { + anyhow::bail!("GraphQL request to {url} failed ({status}): {text}"); + } + let json: Value = serde_json::from_str(&text) + .with_context(|| format!("parsing JSON from {url}: {text}"))?; + if let Some(errors) = json.get("errors") + && errors.is_array() + && !errors.as_array().unwrap().is_empty() + { + anyhow::bail!("GraphQL errors from {url}: {errors}"); + } + Ok(json) + } +} diff --git a/tests/src/lib.rs b/tests/src/lib.rs new file mode 100644 index 0000000..c64818b --- /dev/null +++ b/tests/src/lib.rs @@ -0,0 +1,266 @@ +//! Integration test helpers for the local network. +//! +//! Provides `TestNetwork` — a typed interface to the local network services +//! (chain RPC, subgraph, gateway, indexer management API, contract calls). + +pub mod cast; +pub mod graphql; +pub mod management; +pub mod polling; +pub mod staking; + +use anyhow::{Context, Result}; +use std::collections::HashMap; +use std::path::{Path, PathBuf}; + +/// Typed interface to a running local network. +/// +/// Created from environment variables (`.env` + `.env.local`). +/// All URLs default to devcontainer-friendly hostnames (service names on the +/// Docker network) with fallback to localhost for host-side execution. +#[derive(Debug, Clone)] +pub struct TestNetwork { + pub rpc_url: String, + pub subgraph_url: String, + pub block_oracle_subgraph_url: String, + pub tap_subgraph_url: String, + pub gateway_url: String, + pub management_url: String, + pub gateway_api_key: String, + pub subgraph_id: String, + pub indexer_address: String, + pub account0_secret: String, + /// The indexer's private key (RECEIVER_SECRET). Needed for calling + /// `collect()` on the SubgraphService (requires `onlyAuthorizedForProvision`). + pub receiver_secret: String, + pub chain_id: u64, + /// Contract addresses loaded from config-local volume via `docker exec`. + pub contracts: Contracts, +} + +/// Contract addresses loaded from the config-local Docker volume. +#[derive(Debug, Clone, Default)] +pub struct Contracts { + pub epoch_manager: String, + pub rewards_manager: String, + pub horizon_staking: String, + pub subgraph_service: String, + pub payments_escrow: String, + pub grt_token: String, + pub reo: Option, +} + +impl TestNetwork { + /// Build a `TestNetwork` from `.env` (and `.env.local` if present). + /// + /// Expects to be called from the repo root, or with `repo_root` pointing there. + pub fn from_env(repo_root: &Path) -> Result { + let vars = load_env_files(repo_root)?; + + let chain_host = std::env::var("CHAIN_HOST").unwrap_or_else(|_| { + vars.get("CHAIN_HOST") + .cloned() + .unwrap_or("localhost".into()) + }); + let chain_port = vars.get("CHAIN_RPC_PORT").cloned().unwrap_or("8545".into()); + let graph_host = std::env::var("GRAPH_NODE_HOST").unwrap_or_else(|_| { + vars.get("GRAPH_NODE_HOST") + .cloned() + .unwrap_or("localhost".into()) + }); + let graph_port = vars + .get("GRAPH_NODE_GRAPHQL_PORT") + .cloned() + .unwrap_or("8000".into()); + let gateway_host = std::env::var("GATEWAY_HOST").unwrap_or_else(|_| { + vars.get("GATEWAY_HOST") + .cloned() + .unwrap_or("localhost".into()) + }); + let gateway_port = vars.get("GATEWAY_PORT").cloned().unwrap_or("7700".into()); + let mgmt_host = std::env::var("INDEXER_AGENT_HOST").unwrap_or_else(|_| { + vars.get("INDEXER_AGENT_HOST") + .cloned() + .unwrap_or("localhost".into()) + }); + let mgmt_port = vars + .get("INDEXER_MANAGEMENT_PORT") + .cloned() + .unwrap_or("7600".into()); + + let rpc_url = format!("http://{chain_host}:{chain_port}"); + let subgraph_url = format!("http://{graph_host}:{graph_port}/subgraphs/name/graph-network"); + let block_oracle_subgraph_url = + format!("http://{graph_host}:{graph_port}/subgraphs/name/block-oracle"); + let tap_subgraph_url = + format!("http://{graph_host}:{graph_port}/subgraphs/name/semiotic/tap"); + let gateway_url = format!("http://{gateway_host}:{gateway_port}"); + let management_url = format!("http://{mgmt_host}:{mgmt_port}"); + + let gateway_api_key = vars + .get("GATEWAY_API_KEY") + .cloned() + .unwrap_or("deadbeefdeadbeefdeadbeefdeadbeef".into()); + let subgraph_id = vars + .get("SUBGRAPH") + .cloned() + .context("SUBGRAPH not set in .env")?; + let indexer_address = vars + .get("RECEIVER_ADDRESS") + .cloned() + .context("RECEIVER_ADDRESS not set in .env")?; + let account0_secret = vars + .get("ACCOUNT0_SECRET") + .cloned() + .context("ACCOUNT0_SECRET not set in .env")?; + let receiver_secret = vars + .get("RECEIVER_SECRET") + .cloned() + .context("RECEIVER_SECRET not set in .env")?; + let chain_id = vars + .get("CHAIN_ID") + .and_then(|v| v.parse().ok()) + .unwrap_or(1337); + + let contracts = load_contracts()?; + + Ok(Self { + rpc_url, + subgraph_url, + block_oracle_subgraph_url, + tap_subgraph_url, + gateway_url, + management_url, + gateway_api_key, + subgraph_id, + indexer_address, + account0_secret, + receiver_secret, + chain_id, + contracts, + }) + } + + /// Convenience: build from the default repo root (two levels up from this crate). + pub fn from_default_env() -> Result { + let manifest = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + let repo_root = manifest + .parent() + .context("tests/ crate must be inside the repo root")?; + Self::from_env(repo_root) + } +} + +/// Parse a simple `.env` file (KEY=VALUE, ignoring comments and blank lines). +/// Does NOT handle shell expansion like `${VAR}`. +fn parse_env_file(path: &Path) -> Result> { + let content = + std::fs::read_to_string(path).with_context(|| format!("reading {}", path.display()))?; + let mut map = HashMap::new(); + for line in content.lines() { + let trimmed = line.trim(); + if trimmed.is_empty() || trimmed.starts_with('#') { + continue; + } + if let Some((key, value)) = trimmed.split_once('=') { + let key = key.trim(); + let value = value.trim().trim_matches('"'); + // Skip lines that use shell variable expansion (e.g. ${FOO}) + if !value.contains("${") { + map.insert(key.to_string(), value.to_string()); + } + } + } + Ok(map) +} + +/// Load `.env` and optionally `.env.local`, with `.env.local` values taking precedence. +fn load_env_files(repo_root: &Path) -> Result> { + let mut vars = parse_env_file(&repo_root.join(".env"))?; + let local_path = repo_root.join(".env.local"); + if local_path.exists() { + let local_vars = parse_env_file(&local_path)?; + vars.extend(local_vars); + } + Ok(vars) +} + +/// Load contract addresses from the config-local Docker volume via `docker exec`. +fn load_contracts() -> Result { + let horizon_json = docker_cat("graph-node", "/opt/config/horizon.json") + .context("reading horizon.json from graph-node container")?; + let horizon: serde_json::Value = + serde_json::from_str(&horizon_json).context("parsing horizon.json")?; + + let epoch_manager = horizon["1337"]["EpochManager"]["address"] + .as_str() + .context("EpochManager address not found in horizon.json")? + .to_string(); + + let rewards_manager = horizon["1337"]["RewardsManager"]["address"] + .as_str() + .context("RewardsManager address not found in horizon.json")? + .to_string(); + + let horizon_staking = horizon["1337"]["HorizonStaking"]["address"] + .as_str() + .context("HorizonStaking address not found in horizon.json")? + .to_string(); + + let payments_escrow = horizon["1337"]["PaymentsEscrow"]["address"] + .as_str() + .context("PaymentsEscrow address not found in horizon.json")? + .to_string(); + + let grt_token = horizon["1337"]["L2GraphToken"]["address"] + .as_str() + .context("L2GraphToken address not found in horizon.json")? + .to_string(); + + // SubgraphService is in a separate address book + let ss_json = docker_cat("graph-node", "/opt/config/subgraph-service.json") + .context("reading subgraph-service.json from graph-node container")?; + let ss: serde_json::Value = + serde_json::from_str(&ss_json).context("parsing subgraph-service.json")?; + let subgraph_service = ss["1337"]["SubgraphService"]["address"] + .as_str() + .context("SubgraphService address not found in subgraph-service.json")? + .to_string(); + + // REO address is in issuance.json (optional — may not be deployed) + let reo = docker_cat("graph-node", "/opt/config/issuance.json") + .ok() + .and_then(|json| serde_json::from_str::(&json).ok()) + .and_then(|v| { + v["1337"]["RewardsEligibilityOracle"]["address"] + .as_str() + .map(String::from) + }); + + Ok(Contracts { + epoch_manager, + rewards_manager, + horizon_staking, + subgraph_service, + payments_escrow, + grt_token, + reo, + }) +} + +/// Read a file from a running Docker container. +fn docker_cat(container: &str, path: &str) -> Result { + let output = std::process::Command::new("docker") + .args(["exec", container, "cat", path]) + .output() + .context("running docker exec")?; + if !output.status.success() { + anyhow::bail!( + "docker exec {} cat {} failed: {}", + container, + path, + String::from_utf8_lossy(&output.stderr) + ); + } + Ok(String::from_utf8(output.stdout)?) +} diff --git a/tests/src/management.rs b/tests/src/management.rs new file mode 100644 index 0000000..c444387 --- /dev/null +++ b/tests/src/management.rs @@ -0,0 +1,73 @@ +//! Indexer management API helpers (indexer-agent GraphQL mutations). + +use anyhow::{Context, Result}; +use serde_json::Value; + +use crate::TestNetwork; + +/// Protocol network identifier for the local chain. +const PROTOCOL_NETWORK: &str = "eip155:1337"; + +impl TestNetwork { + /// Create an allocation via the indexer management API. + /// `deployment` is the IPFS hash (e.g., "QmXU9FEf..."). + /// `amount` is in GRT (e.g., "0.01"). + /// Returns the mutation result with `allocation` (ID), `deployment`, `allocatedTokens`. + pub async fn create_allocation(&self, deployment: &str, amount: &str) -> Result { + let query = format!( + r#"mutation {{ + createAllocation( + deployment: "{deployment}", + amount: "{amount}", + protocolNetwork: "{PROTOCOL_NETWORK}" + ) {{ + allocation deployment allocatedTokens + }} + }}"# + ); + let resp = self.management_query(&query).await?; + resp["data"]["createAllocation"] + .as_object() + .context("createAllocation returned null")?; + Ok(resp["data"]["createAllocation"].clone()) + } + + /// Close an allocation via the indexer management API. + /// + /// Provides `blockNumber` explicitly because the indexer-agent's auto-resolution + /// returns null when `force=true` is used without a block number. + /// Uses the subgraph's latest indexed block (not the chain tip) to ensure + /// graph-node has the block hash cached. + /// Returns the mutation result with `allocation`, `allocatedTokens`, `indexingRewards`. + pub async fn close_allocation(&self, allocation_id: &str) -> Result { + let block_number = self.subgraph_block_number().await?; + let query = format!( + r#"mutation {{ + closeAllocation( + allocation: "{allocation_id}", + blockNumber: {block_number}, + force: true, + protocolNetwork: "{PROTOCOL_NETWORK}" + ) {{ + allocation allocatedTokens indexingRewards + }} + }}"# + ); + let resp = self.management_query(&query).await?; + resp["data"]["closeAllocation"] + .as_object() + .context("closeAllocation returned null")?; + Ok(resp["data"]["closeAllocation"].clone()) + } + + /// Get allocations from the indexer management API. + pub async fn get_allocations(&self) -> Result { + let query = format!( + r#"{{ indexerAllocations(protocolNetwork: "{PROTOCOL_NETWORK}") {{ + id subgraphDeployment allocatedTokens createdAtEpoch closedAtEpoch status + }} }}"# + ); + let resp = self.management_query(&query).await?; + Ok(resp["data"]["indexerAllocations"].clone()) + } +} diff --git a/tests/src/polling.rs b/tests/src/polling.rs new file mode 100644 index 0000000..121ff24 --- /dev/null +++ b/tests/src/polling.rs @@ -0,0 +1,308 @@ +//! Polling, retry, block mining, and epoch advancement helpers. + +use anyhow::{Context, Result}; +use std::future::Future; +use std::time::{Duration, Instant}; + +use crate::TestNetwork; +use crate::cast::cast_parse_uint; + +/// Result of a `poll_until` call. +#[derive(Debug)] +pub enum PollResult { + /// Condition was met, with the final value. + Ready(T), + /// Timed out before the condition was met. + TimedOut, +} + +impl PollResult { + pub fn unwrap(self) -> T { + match self { + PollResult::Ready(v) => v, + PollResult::TimedOut => panic!("poll_until timed out"), + } + } + + pub fn is_ready(&self) -> bool { + matches!(self, PollResult::Ready(_)) + } +} + +impl TestNetwork { + /// Poll a condition until it returns `Some(T)` or the timeout expires. + pub async fn poll_until( + &self, + timeout: Duration, + interval: Duration, + mut check: F, + ) -> PollResult + where + F: FnMut() -> Fut, + Fut: Future>>, + { + let start = Instant::now(); + loop { + match check().await { + Ok(Some(value)) => return PollResult::Ready(value), + Ok(None) => {} + Err(e) => { + eprintln!("poll_until check error (continuing): {e:#}"); + } + } + if start.elapsed() >= timeout { + return PollResult::TimedOut; + } + tokio::time::sleep(interval).await; + } + } + + /// Mine `count` blocks, advancing chain time by 12s per block (mimics Ethereum). + pub async fn mine_blocks(&self, count: u32) -> Result<()> { + let client = reqwest::Client::new(); + for _ in 0..count { + // Advance time by 12 seconds + client + .post(&self.rpc_url) + .json(&serde_json::json!({ + "jsonrpc": "2.0", + "method": "evm_increaseTime", + "params": [12], + "id": 1 + })) + .send() + .await + .context("evm_increaseTime")?; + + // Mine the block + client + .post(&self.rpc_url) + .json(&serde_json::json!({ + "jsonrpc": "2.0", + "method": "evm_mine", + "params": [], + "id": 2 + })) + .send() + .await + .context("evm_mine")?; + } + Ok(()) + } + + /// Advance N epochs by mining blocks one epoch at a time. + /// + /// Advances one epoch per iteration, waiting for the block-oracle to process + /// each transition. This avoids gaps in the block-oracle subgraph which would + /// cause the indexer-agent to fail when closing allocations (it needs block + /// hashes for every epoch boundary). + /// + /// Returns the new epoch number. + pub async fn advance_epochs(&self, n: u32) -> Result { + let em = &self.contracts.epoch_manager; + let raw = self.cast_call(em, "epochLength()(uint256)", &[])?; + let epoch_length: u64 = cast_parse_uint(&raw) + .parse() + .context("parsing epochLength")?; + + let mut new_epoch = 0u64; + for i in 0..n { + let raw = self.cast_call(em, "currentEpoch()(uint256)", &[])?; + let current_epoch: u64 = cast_parse_uint(&raw) + .parse() + .context("parsing currentEpoch")?; + let current_block: u64 = self.get_block_number().await?; + let raw = self.cast_call(em, "currentEpochBlock()(uint256)", &[])?; + let epoch_block: u64 = cast_parse_uint(&raw) + .parse() + .context("parsing currentEpochBlock")?; + + let blocks_in_epoch = current_block.saturating_sub(epoch_block); + let blocks_to_mine = epoch_length - blocks_in_epoch; + + eprintln!( + "advance_epochs: step {}/{n}, epoch={current_epoch}, \ + mining {blocks_to_mine} blocks", + i + 1 + ); + + self.mine_blocks(blocks_to_mine as u32).await?; + + // Emit the EpochRun event so the network subgraph updates. + self.cast_send(em, "runEpoch()", &[])?; + + let raw = self.cast_call(em, "currentEpoch()(uint256)", &[])?; + new_epoch = cast_parse_uint(&raw) + .parse() + .context("parsing new currentEpoch")?; + + // Wait for both subgraphs to index this epoch before advancing further. + // The block-oracle needs to process each epoch individually to avoid gaps. + self.wait_for_epoch_sync(new_epoch).await?; + } + + Ok(new_epoch) + } + + /// Wait until both the network subgraph and block-oracle subgraph reflect + /// `target_epoch`. Mines a block each iteration to provide confirmations + /// for the block-oracle's DataEdge transactions on the automine chain. + async fn wait_for_epoch_sync(&self, target_epoch: u64) -> Result<()> { + let timeout = Duration::from_secs(120); + let interval = Duration::from_secs(2); + let start = Instant::now(); + + let mut network_ready = false; + let mut oracle_ready = false; + let mut resumed = false; + + loop { + if !network_ready { + let network = self.query_network().await?; + let subgraph_epoch = network["currentEpoch"] + .as_u64() + .or_else(|| { + network["currentEpoch"] + .as_str() + .and_then(|s| s.parse().ok()) + }) + .unwrap_or(0); + if subgraph_epoch >= target_epoch { + network_ready = true; + } + } + + if !oracle_ready { + match self.block_oracle_has_epoch(target_epoch).await { + Ok(true) => oracle_ready = true, + Ok(false) => {} + Err(e) => { + eprintln!(" block-oracle check error (continuing): {e:#}"); + } + } + } + + if network_ready && oracle_ready { + return Ok(()); + } + + if start.elapsed() >= timeout { + anyhow::bail!( + "Epoch sync to {target_epoch} timed out after {timeout:?} \ + (network_subgraph={network_ready}, block_oracle={oracle_ready})" + ); + } + + // The indexer-agent may pause subgraphs during testing. If the + // network subgraph hasn't caught up after 15s, resume all subgraphs. + if !network_ready && !resumed && start.elapsed() >= Duration::from_secs(15) { + eprintln!(" Subgraph slow to sync — resuming subgraphs..."); + self.resume_subgraphs().await; + resumed = true; + } + + // Mine a block to provide confirmations for block-oracle DataEdge txs + self.mine_blocks(1).await?; + tokio::time::sleep(interval).await; + } + } + + /// Resume all known subgraphs via graph-node admin API. + /// The indexer-agent may pause subgraphs during test runs; this ensures + /// they keep indexing. + async fn resume_subgraphs(&self) { + let admin_url = self + .subgraph_url + .replace(":8000/subgraphs/name/graph-network", ":8020/"); + let client = reqwest::Client::new(); + for name in ["graph-network", "block-oracle", "semiotic/tap"] { + // Get the deployment ID for this subgraph + let meta_url = self.subgraph_url.replace("graph-network", name); + let meta_resp = client + .post(&meta_url) + .header("content-type", "application/json") + .json(&serde_json::json!({"query": "{ _meta { deployment } }"})) + .send() + .await; + let deployment = match meta_resp { + Ok(resp) => { + let json: serde_json::Value = match resp.json().await { + Ok(j) => j, + Err(_) => continue, + }; + match json["data"]["_meta"]["deployment"].as_str() { + Some(d) => d.to_string(), + None => continue, + } + } + Err(_) => continue, + }; + let _ = client + .post(&admin_url) + .header("content-type", "application/json") + .json(&serde_json::json!({ + "jsonrpc": "2.0", + "method": "subgraph_resume", + "params": {"deployment": deployment}, + "id": 1 + })) + .send() + .await; + } + } + + /// Advance chain time by `seconds` and mine one block. + /// Useful for expiring eligibility periods without mining many blocks. + pub async fn advance_time(&self, seconds: u64) -> Result<()> { + let client = reqwest::Client::new(); + client + .post(&self.rpc_url) + .json(&serde_json::json!({ + "jsonrpc": "2.0", + "method": "evm_increaseTime", + "params": [seconds], + "id": 1 + })) + .send() + .await + .context("evm_increaseTime")?; + client + .post(&self.rpc_url) + .json(&serde_json::json!({ + "jsonrpc": "2.0", + "method": "evm_mine", + "params": [], + "id": 2 + })) + .send() + .await + .context("evm_mine")?; + Ok(()) + } + + /// Get the latest block number from the chain. + pub async fn get_block_number(&self) -> Result { + let client = reqwest::Client::new(); + let resp: serde_json::Value = client + .post(&self.rpc_url) + .json(&serde_json::json!({ + "jsonrpc": "2.0", + "method": "eth_blockNumber", + "params": [], + "id": 1 + })) + .send() + .await + .context("eth_blockNumber")? + .json() + .await + .context("parsing eth_blockNumber response")?; + + let hex = resp["result"] + .as_str() + .context("eth_blockNumber result not a string")?; + let num = u64::from_str_radix(hex.trim_start_matches("0x"), 16) + .context("parsing hex block number")?; + Ok(num) + } +} diff --git a/tests/src/staking.rs b/tests/src/staking.rs new file mode 100644 index 0000000..e8631a8 --- /dev/null +++ b/tests/src/staking.rs @@ -0,0 +1,141 @@ +//! Staking and provision management operations. +//! +//! Higher-level functions that emulate what Explorer UI and `graph indexer` CLI +//! do for stake and provision management. Each function maps to a specific +//! BaselineTestPlan operation. +//! +//! Mapping: +//! - `stake_tokens` → Explorer "Add Stake" (BaselineTestPlan 2.1) +//! - `unstake_tokens` → Explorer "Unstake" (BaselineTestPlan 2.2) +//! - `provision_add` → `graph indexer provisions add` (BaselineTestPlan 3.2) +//! - `provision_thaw` → `graph indexer provisions thaw` (BaselineTestPlan 3.3) +//! - `provision_deprovision` → `graph indexer provisions remove` (BaselineTestPlan 3.4) + +use anyhow::{Context, Result}; + +use crate::TestNetwork; +use crate::cast::cast_parse_uint; + +impl TestNetwork { + // --- Stake Management (BaselineTestPlan Cycle 2) --- + + /// Add stake to the indexer via `HorizonStaking.stakeTo()`. + /// Emulates Explorer "Add Stake" (BaselineTestPlan 2.1). + /// + /// Account0 approves and stakes GRT to the indexer. In production, + /// the indexer does this through Explorer using their own GRT. + /// `amount_wei` is in wei (e.g., "1000000000000000000000" for 1000 GRT). + pub fn stake_tokens(&self, amount_wei: &str) -> Result<()> { + self.cast_send( + &self.contracts.grt_token, + "approve(address,uint256)", + &[&self.contracts.horizon_staking, amount_wei], + )?; + self.cast_send( + &self.contracts.horizon_staking, + "stakeTo(address,uint256)", + &[&self.indexer_address, amount_wei], + )?; + Ok(()) + } + + /// Unstake idle (unprovisioned) tokens via `HorizonStaking.unstake()`. + /// Emulates Explorer "Unstake" (BaselineTestPlan 2.2). + /// + /// Only works on idle stake (not provisioned or allocated). + /// Called as the indexer (RECEIVER_SECRET). + pub fn unstake_tokens(&self, amount_wei: &str) -> Result<()> { + self.cast_send_as_indexer( + &self.contracts.horizon_staking, + "unstake(uint256)", + &[amount_wei], + )?; + Ok(()) + } + + /// Get idle (unprovisioned, unallocated) stake for the indexer. + pub fn idle_stake(&self) -> Result { + let output = self.cast_call( + &self.contracts.horizon_staking, + "getIdleStake(address)(uint256)", + &[&self.indexer_address], + )?; + cast_parse_uint(&output) + .parse() + .context("parsing idle stake") + } + + // --- Provision Management (BaselineTestPlan Cycle 3) --- + + /// Add idle stake to the SubgraphService provision. + /// Emulates `graph indexer provisions add` (BaselineTestPlan 3.2). + /// + /// Moves tokens from idle stake into the provision for SubgraphService. + /// Called as the indexer (RECEIVER_SECRET). + pub fn provision_add(&self, amount_wei: &str) -> Result<()> { + self.cast_send_as_indexer( + &self.contracts.horizon_staking, + "addToProvision(address,address,uint256)", + &[ + &self.indexer_address, + &self.contracts.subgraph_service, + amount_wei, + ], + )?; + Ok(()) + } + + /// Initiate thawing from the SubgraphService provision. + /// Emulates `graph indexer provisions thaw` (BaselineTestPlan 3.3). + /// + /// Starts the thawing process. Tokens remain locked until the thawing + /// period expires, then `provision_deprovision()` completes the removal. + /// Called as the indexer (RECEIVER_SECRET). + pub fn provision_thaw(&self, amount_wei: &str) -> Result<()> { + self.cast_send_as_indexer( + &self.contracts.horizon_staking, + "thaw(address,address,uint256)", + &[ + &self.indexer_address, + &self.contracts.subgraph_service, + amount_wei, + ], + )?; + Ok(()) + } + + /// Complete removal of thawed stake from provision. + /// Emulates `graph indexer provisions remove` (BaselineTestPlan 3.4). + /// + /// Can only succeed after the thawing period has elapsed. + /// `n_thaw_requests` is typically 1 (one thaw request to process). + /// Called as the indexer (RECEIVER_SECRET). + pub fn provision_deprovision(&self, n_thaw_requests: u64) -> Result<()> { + self.cast_send_as_indexer( + &self.contracts.horizon_staking, + "deprovision(address,address,uint256)", + &[ + &self.indexer_address, + &self.contracts.subgraph_service, + &n_thaw_requests.to_string(), + ], + )?; + Ok(()) + } + + /// Get the thawing period (seconds) for the indexer's SubgraphService provision. + /// Queries the network subgraph for the provision's thawingPeriod field. + pub async fn provision_thawing_period(&self) -> Result { + let provisions = self.query_provisions(&self.indexer_address).await?; + let provisions = provisions + .as_array() + .context("provisions should be an array")?; + let first = provisions + .first() + .context("no provision found for indexer")?; + first["thawingPeriod"] + .as_u64() + .or_else(|| first["thawingPeriod"].as_str().and_then(|s| s.parse().ok())) + .context("thawingPeriod not found in provision") + } +} diff --git a/tests/tests/allocation_lifecycle.rs b/tests/tests/allocation_lifecycle.rs new file mode 100644 index 0000000..2b0f3d6 --- /dev/null +++ b/tests/tests/allocation_lifecycle.rs @@ -0,0 +1,205 @@ +//! Allocation Lifecycle Tests (BaselineTestPlan Cycles 4-5, 7) +//! +//! Exercises the allocation management and revenue collection workflow: +//! close existing allocation → verify → create new allocation → advance → close → verify +//! +//! Mapping to BaselineTestPlan: +//! - `close_and_recreate_allocation` → Cycle 4.2 (create) + 5.2 (close + rewards) +//! - `close_allocation_collects_rewards` → Cycle 5.2 (agent-mediated close with reward assertion) +//! - `gateway_query_serving` → Cycle 5.1 (query serving through gateway) +//! +//! The management API mutations (`createAllocation`, `closeAllocation`) emulate +//! what `graph indexer allocations create/close` does. The close path internally +//! triggers a multicall: collect(IndexingRewards) + stopService. + +use anyhow::{Context, Result}; +use local_network_tests::TestNetwork; +use serial_test::serial; + +fn net() -> Result { + TestNetwork::from_default_env() +} + +/// BaselineTestPlan 4.2 + 5.2: Create and close allocations. +/// +/// Emulates `graph indexer allocations create` and `graph indexer allocations close`. +#[tokio::test] +#[serial] +async fn close_and_recreate_allocation() -> Result<()> { + let net = net()?; + + // Pick an existing active allocation to close + let allocs = net.get_allocations().await?; + let allocs = allocs.as_array().context("expected allocation array")?; + let active = allocs + .iter() + .find(|a| a["closedAtEpoch"].is_null()) + .context("no active allocation found to close")?; + let alloc_id = active["id"].as_str().context("allocation missing id")?; + let deployment = active["subgraphDeployment"] + .as_str() + .context("allocation missing deployment")? + .to_string(); + + // Advance epochs so allocation is old enough to close + eprintln!("--- Advancing 2 epochs ---"); + let new_epoch = net.advance_epochs(2).await?; + eprintln!(" Now at epoch {new_epoch}"); + + // Close the existing allocation (emulates: graph indexer allocations close) + eprintln!("--- Closing allocation {alloc_id} ---"); + let close_result = net.close_allocation(alloc_id).await?; + let rewards = close_result["indexingRewards"].as_str().unwrap_or("0"); + eprintln!(" indexingRewards: {rewards}"); + + assert_eq!( + close_result["allocation"].as_str().unwrap_or(""), + alloc_id, + "Closed allocation ID should match" + ); + + // Create a new allocation for the same deployment (emulates: graph indexer allocations create) + eprintln!("--- Creating new allocation for {deployment} ---"); + let amount = "0.01"; // GRT (management API takes GRT, not wei) + let create_result = net.create_allocation(&deployment, amount).await?; + let new_alloc_id = create_result["allocation"] + .as_str() + .context("createAllocation should return allocation ID")?; + eprintln!(" Created allocation: {new_alloc_id}"); + + assert!( + !new_alloc_id.is_empty(), + "Allocation ID should be non-empty" + ); + assert_eq!( + create_result["deployment"].as_str().unwrap_or(""), + deployment, + "Deployment should match" + ); + + // Advance 2 more epochs and close the new allocation + eprintln!("--- Advancing 2 epochs ---"); + net.advance_epochs(2).await?; + + eprintln!("--- Closing new allocation {new_alloc_id} ---"); + let close_result = net.close_allocation(new_alloc_id).await?; + let rewards = close_result["indexingRewards"].as_str().unwrap_or("0"); + eprintln!(" indexingRewards: {rewards}"); + + assert_eq!( + close_result["allocation"].as_str().unwrap_or(""), + new_alloc_id, + "Closed allocation ID should match" + ); + + // Re-create the allocation to restore network state + eprintln!("--- Restoring allocation for {deployment} ---"); + net.create_allocation(&deployment, "0.01").await?; + + Ok(()) +} + +/// BaselineTestPlan 5.2: Close allocation via agent and verify indexingRewards > 0. +/// +/// The indexer-agent's close flow does a multicall: collect(IndexingRewards) + stopService. +/// This test verifies that the agent-mediated close produces non-zero rewards. +/// Emulates `graph indexer allocations close` with reward verification. +#[tokio::test] +#[serial] +async fn close_allocation_collects_rewards() -> Result<()> { + let net = net()?; + + // Find an active allocation + let allocs = net.get_allocations().await?; + let allocs = allocs.as_array().context("expected allocation array")?; + let active = allocs + .iter() + .find(|a| a["closedAtEpoch"].is_null()) + .context("no active allocation found")?; + let alloc_id = active["id"] + .as_str() + .context("allocation missing id")? + .to_string(); + let deployment = active["subgraphDeployment"] + .as_str() + .context("allocation missing deployment")? + .to_string(); + + eprintln!("=== Close-collects-rewards test (BaselineTestPlan 5.2) ==="); + eprintln!(" Allocation: {alloc_id}"); + eprintln!(" Deployment: {deployment}"); + + // Close and recreate so we have a fresh allocation with known epoch boundaries + net.advance_epochs(2).await?; + net.close_allocation(&alloc_id).await?; + + let result = net.create_allocation(&deployment, "0.01").await?; + let fresh_alloc = result["allocation"] + .as_str() + .context("expected allocation ID")? + .to_string(); + eprintln!(" Fresh allocation: {fresh_alloc}"); + + // Advance epochs so rewards accumulate + net.advance_epochs(2).await?; + + // Ensure indexer is eligible (eligibility may expire during epoch advancement) + if net.contracts.reo.is_some() { + net.reo_renew_indexer(&net.indexer_address)?; + assert!( + net.reo_is_eligible(&net.indexer_address)?, + "Indexer must be eligible before close" + ); + } + + // Close via agent — this triggers collect(IndexingRewards) + stopService multicall + eprintln!(" Closing allocation via agent..."); + let close_result = net.close_allocation(&fresh_alloc).await?; + let rewards_str = close_result["indexingRewards"].as_str().unwrap_or("0"); + let rewards: f64 = rewards_str.parse().unwrap_or(0.0); + eprintln!(" indexingRewards: {rewards_str} ({rewards:.2} GRT)"); + + assert!( + rewards > 0.0, + "Agent-mediated close should collect non-zero rewards. \ + Got indexingRewards={rewards_str}" + ); + + // Verify closed allocation in subgraph + let alloc_data = net.query_allocation(&fresh_alloc).await?; + assert_eq!( + alloc_data["status"].as_str().unwrap_or(""), + "Closed", + "Allocation should be Closed in subgraph" + ); + + // Restore allocation + net.advance_epochs(2).await?; + net.create_allocation(&deployment, "0.01").await?; + eprintln!(" Restored allocation for {deployment}"); + + Ok(()) +} + +/// BaselineTestPlan 5.1: Send test queries through gateway. +/// +/// Emulates the `query_test.sh` script from the test plan. +#[tokio::test] +#[serial] +async fn gateway_query_serving() -> Result<()> { + let net = net()?; + + // Mine blocks to prevent "too far behind" errors + net.mine_blocks(5).await?; + + eprintln!("--- Sending 10 queries through gateway ---"); + let (success, fail) = net.send_gateway_queries(10).await?; + eprintln!(" {success} OK, {fail} failed"); + + assert!( + success >= 8, + "At least 8/10 gateway queries should succeed, got {success}/10" + ); + + Ok(()) +} diff --git a/tests/tests/eligibility.rs b/tests/tests/eligibility.rs new file mode 100644 index 0000000..65e5b14 --- /dev/null +++ b/tests/tests/eligibility.rs @@ -0,0 +1,207 @@ +//! REO Eligibility Lifecycle Tests (IndexerTestGuide Sets 2-4) +//! +//! Mapping to IndexerTestGuide: +//! - Set 2: Eligible indexer receives rewards (renew → close → rewards > 0) +//! - Set 3: Ineligible indexer denied rewards (expire → close → rewards = 0) +//! - Set 4: Optimistic recovery (expire → re-renew → close → full rewards) +//! +//! Uses deterministic contract calls via `renewIndexerEligibility` (account0 has +//! ORACLE_ROLE) and `evm_increaseTime` to expire eligibility periods. +//! +//! These tests share mutable chain state (allocations, eligibility, epoch) so they +//! run as a single sequential test to avoid races. +//! +//! No dependency on the REO node's async processing. + +use anyhow::{Context, Result}; +use local_network_tests::TestNetwork; + +fn net() -> Result { + TestNetwork::from_default_env() +} + +/// Parse a reward string (may be "0", "0.0", "123.456", etc.) to f64. +fn parse_rewards(s: &str) -> f64 { + s.parse::().unwrap_or(0.0) +} + +/// Helper: close an existing active allocation and return (deployment, alloc_id). +/// This frees the deployment for a new allocation. +async fn close_existing_allocation(net: &TestNetwork) -> Result<(String, String)> { + let allocs = net.get_allocations().await?; + let allocs = allocs.as_array().context("expected allocation array")?; + let active = allocs + .iter() + .find(|a| a["closedAtEpoch"].is_null()) + .context("no active allocation found")?; + let alloc_id = active["id"] + .as_str() + .context("allocation missing id")? + .to_string(); + let deployment = active["subgraphDeployment"] + .as_str() + .context("allocation missing deployment")? + .to_string(); + + // Advance epochs so allocation is old enough to close + net.advance_epochs(2).await?; + net.close_allocation(&alloc_id).await?; + + Ok((deployment, alloc_id)) +} + +/// Helper: create allocation, advance epochs, and return the allocation ID. +async fn create_test_allocation(net: &TestNetwork, deployment: &str) -> Result { + let amount = "0.01"; // GRT (management API takes GRT, not wei) + let result = net.create_allocation(deployment, amount).await?; + let alloc_id = result["allocation"] + .as_str() + .context("expected allocation ID")? + .to_string(); + + // Advance epochs so it's old enough to close + net.advance_epochs(2).await?; + + Ok(alloc_id) +} + +/// IndexerTestGuide Sets 2, 3, and 4: Complete eligibility lifecycle. +/// +/// Runs sequentially to avoid shared-state races. +/// Each section maps to an IndexerTestGuide set: +/// - Set 2.1: `renewIndexerEligibility` → `isEligible` = true +/// - Set 2.2: close allocation → `indexingRewards` > 0 +/// - Set 3.1: advance past eligibility period → `isEligible` = false +/// - Set 3.2: close allocation → `indexingRewards` = 0 +/// - Set 4.1: `renewIndexerEligibility` → `isEligible` = true (re-renewal) +/// - Set 4.2: close allocation → rewards > 0 AND > Set 2 rewards (optimistic) +#[tokio::test] +async fn eligibility_lifecycle() -> Result<()> { + let net = net()?; + if net.contracts.reo.is_none() { + eprintln!("REO not deployed, skipping all eligibility tests"); + return Ok(()); + } + + // Free up a deployment by closing an existing allocation + eprintln!("=== Setup: close existing allocation to free a deployment ==="); + let (deployment, _) = close_existing_allocation(&net).await?; + eprintln!(" Deployment: {deployment}"); + + // ── Set 2: Eligible → close → verify rewards received ── + eprintln!(); + eprintln!("=== Set 2: Eligible indexer closes allocation ==="); + + net.reo_renew_indexer(&net.indexer_address)?; + assert!( + net.reo_is_eligible(&net.indexer_address)?, + "Indexer should be eligible after renewal" + ); + + let alloc_id = create_test_allocation(&net, &deployment).await?; + eprintln!(" Allocation: {alloc_id}"); + + // Re-renew to ensure still eligible (time advanced during epoch mining) + net.reo_renew_indexer(&net.indexer_address)?; + assert!( + net.reo_is_eligible(&net.indexer_address)?, + "Indexer should still be eligible before close" + ); + + let close = net.close_allocation(&alloc_id).await?; + let rewards = close["indexingRewards"].as_str().unwrap_or("0"); + let eligible_rewards = parse_rewards(rewards); + eprintln!(" indexingRewards: {rewards} (eligible)"); + assert!( + eligible_rewards > 0.0, + "Set 2: Eligible indexer should receive rewards, got {rewards}" + ); + + // ── Set 3: Ineligible → close → verify rewards denied ── + eprintln!(); + eprintln!("=== Set 3: Ineligible indexer denied rewards ==="); + + net.reo_renew_indexer(&net.indexer_address)?; + let alloc_id = create_test_allocation(&net, &deployment).await?; + eprintln!(" Allocation: {alloc_id}"); + + // Expire eligibility + let period = net.reo_eligibility_period()?; + eprintln!(" Advancing time by {period}s + 60s to expire eligibility"); + net.advance_time(period + 60).await?; + + assert!( + !net.reo_is_eligible(&net.indexer_address)?, + "Set 3: Indexer should be ineligible after period expiry" + ); + + // ReoTestPlan 6.3: Record stake before closing while ineligible + let stake_before_denied = net.staked_tokens()?; + + let close = net.close_allocation(&alloc_id).await?; + let rewards = close["indexingRewards"].as_str().unwrap_or("0"); + let ineligible_rewards = parse_rewards(rewards); + eprintln!(" indexingRewards: {rewards} (ineligible)"); + assert!( + ineligible_rewards == 0.0, + "Set 3: Ineligible indexer should receive zero rewards, got {rewards}" + ); + + // ReoTestPlan 6.3: Verify stake did not increase (denied rewards not credited) + let stake_after_denied = net.staked_tokens()?; + eprintln!( + " Staked tokens: {stake_before_denied} → {stake_after_denied} (should not increase)" + ); + assert!( + stake_after_denied <= stake_before_denied, + "Set 3 / ReoTestPlan 6.3: Stake should not increase when rewards are denied. \ + Before: {stake_before_denied}, After: {stake_after_denied}" + ); + + // ── Set 4: Optimistic recovery → re-renew → verify re-eligibility ── + eprintln!(); + eprintln!("=== Set 4: Re-renewed indexer (optimistic recovery) ==="); + + net.reo_renew_indexer(&net.indexer_address)?; + let alloc_id = create_test_allocation(&net, &deployment).await?; + eprintln!(" Allocation: {alloc_id}"); + + // Let eligibility expire + eprintln!(" Expiring eligibility ({period}s)..."); + net.advance_time(period + 60).await?; + assert!( + !net.reo_is_eligible(&net.indexer_address)?, + "Should be ineligible" + ); + + // Advance more epochs while ineligible + net.advance_epochs(2).await?; + + // Re-renew — the key assertion: eligibility can be restored + net.reo_renew_indexer(&net.indexer_address)?; + assert!( + net.reo_is_eligible(&net.indexer_address)?, + "Should be eligible after re-renewal" + ); + + let close = net.close_allocation(&alloc_id).await?; + let rewards = close["indexingRewards"].as_str().unwrap_or("0"); + let recovery_rewards = parse_rewards(rewards); + eprintln!(" indexingRewards: {rewards} (re-eligible)"); + assert!( + recovery_rewards > 0.0, + "Set 4: Re-eligible indexer should receive rewards, got {rewards}" + ); + assert!( + recovery_rewards > eligible_rewards, + "Set 4: Re-eligible rewards ({recovery_rewards}) should exceed \ + Set 2 rewards ({eligible_rewards}) due to longer accumulation" + ); + + // Restore: re-create the allocation we consumed + eprintln!(); + eprintln!("=== Cleanup: restoring allocation for {deployment} ==="); + net.create_allocation(&deployment, "0.01").await?; + + Ok(()) +} diff --git a/tests/tests/network_state.rs b/tests/tests/network_state.rs new file mode 100644 index 0000000..f4c1bfb --- /dev/null +++ b/tests/tests/network_state.rs @@ -0,0 +1,216 @@ +//! Network State Observation Tests (BaselineTestPlan Cycles 1, 3.1, 6) +//! +//! Verifies the running local network matches BaselineTestPlan expectations. +//! All tests are read-only — they observe state without modifying it. +//! +//! Mapping to BaselineTestPlan: +//! - `indexer_registered` → Cycle 1.1 (stake) + 1.2 (url, geoHash) +//! - `provision_exists` → Cycle 1.3 + 3.1 (provision with tokensProvisioned) +//! - `active_allocations` → Cycle 4.1 (active allocations exist) +//! - `gateway_serves_queries` → Cycle 5.1 (gateway reachable) +//! - `indexer_health_metrics` → Cycle 6.1 (all expected fields populated) +//! - `epoch_progressing` → Cycle 6.2 (currentEpoch > 0) +//! - `reo_contract_state` → IndexerTestGuide prerequisites + +use anyhow::Result; +use local_network_tests::TestNetwork; + +fn net() -> Result { + TestNetwork::from_default_env() +} + +/// BaselineTestPlan 1.1 + 1.2: Indexer registered with stake, URL, and geoHash. +/// +/// Verification query matches the BaselineTestPlan 1.1/1.2 queries. +#[tokio::test] +async fn indexer_registered() -> Result<()> { + let net = net()?; + let indexer = net.query_indexer(&net.indexer_address).await?; + + assert!( + !indexer.is_null(), + "Indexer entity should exist in subgraph" + ); + + let staked = indexer["stakedTokens"].as_str().unwrap_or("0"); + assert!( + staked != "0", + "stakedTokens should be non-zero, got {staked}" + ); + + let url = indexer["url"].as_str().unwrap_or(""); + assert!(!url.is_empty(), "url should be set"); + + let geo = indexer["geoHash"].as_str().unwrap_or(""); + assert!(!geo.is_empty(), "geoHash should be set"); + + Ok(()) +} + +/// BaselineTestPlan 1.3 + 3.1: Provision exists with non-zero tokensProvisioned. +/// +/// Verifies the indexer-agent automatically created a SubgraphService provision. +/// Emulates `graph indexer provisions get` (Cycle 3.1). +#[tokio::test] +async fn provision_exists() -> Result<()> { + let net = net()?; + let provisions = net.query_provisions(&net.indexer_address).await?; + let provisions = provisions + .as_array() + .expect("provisions should be an array"); + + assert!( + !provisions.is_empty(), + "At least one provision should exist for the indexer" + ); + + let first = &provisions[0]; + let tokens = first["tokensProvisioned"].as_str().unwrap_or("0"); + assert!( + tokens != "0", + "tokensProvisioned should be non-zero, got {tokens}" + ); + + Ok(()) +} + +/// BaselineTestPlan 4.1: Active allocations exist with non-zero allocatedTokens. +#[tokio::test] +async fn active_allocations() -> Result<()> { + let net = net()?; + let allocs = net.query_active_allocations(&net.indexer_address).await?; + let allocs = allocs.as_array().expect("allocations should be an array"); + + assert!( + !allocs.is_empty(), + "At least one active allocation should exist" + ); + + for alloc in allocs { + let tokens = alloc["allocatedTokens"].as_str().unwrap_or("0"); + assert!( + tokens != "0", + "allocatedTokens should be non-zero for allocation {}", + alloc["id"] + ); + } + + Ok(()) +} + +/// BaselineTestPlan 5.1: Gateway serves queries (reachability check). +/// +/// Full query success rate is tested in `allocation_lifecycle::gateway_query_serving`. +/// This test confirms the gateway is reachable and returns valid JSON. +#[tokio::test] +async fn gateway_serves_queries() -> Result<()> { + let net = net()?; + net.mine_blocks(5).await?; + + let resp = net + .gateway_query(r#"{ _meta { block { number } } }"#) + .await?; + assert!( + resp.status().is_success(), + "Gateway should return 200, got {}", + resp.status() + ); + + let body: serde_json::Value = resp.json().await?; + assert!(body.is_object(), "Gateway should return JSON, got {body}"); + + Ok(()) +} + +/// BaselineTestPlan 6.1: Indexer health — all expected fields populated. +/// +/// Queries the same fields as BaselineTestPlan 6.1 and verifies the indexer +/// has active allocations visible and accumulated metrics present. +#[tokio::test] +async fn indexer_health_metrics() -> Result<()> { + let net = net()?; + let indexer = net.query_indexer(&net.indexer_address).await?; + + assert!(!indexer.is_null(), "Indexer entity should exist"); + + // All expected fields should be populated (not null) + for field in [ + "stakedTokens", + "allocatedTokens", + "availableStake", + "url", + "geoHash", + ] { + assert!( + !indexer[field].is_null(), + "Indexer field '{field}' should be populated" + ); + } + + // Active allocations should be visible + let allocs = indexer["allocations"] + .as_array() + .expect("allocations should be an array"); + assert!( + !allocs.is_empty(), + "Active allocations should be visible in indexer entity" + ); + + // Log accumulated metrics (may be zero on a fresh network) + let rewards = indexer["rewardsEarned"].as_str().unwrap_or("n/a"); + let fees = indexer["queryFeesCollected"].as_str().unwrap_or("n/a"); + let delegated = indexer["delegatedTokens"].as_str().unwrap_or("n/a"); + eprintln!("=== BaselineTestPlan 6.1: Indexer Health ==="); + eprintln!(" rewardsEarned: {rewards}"); + eprintln!(" queryFeesCollected: {fees}"); + eprintln!(" delegatedTokens: {delegated}"); + eprintln!(" activeAllocations: {}", allocs.len()); + + Ok(()) +} + +/// BaselineTestPlan 6.2: Epoch progressing (currentEpoch > 0). +#[tokio::test] +async fn epoch_progressing() -> Result<()> { + let net = net()?; + let network = net.query_network().await?; + + let epoch = network["currentEpoch"] + .as_u64() + .or_else(|| { + network["currentEpoch"] + .as_str() + .and_then(|s| s.parse().ok()) + }) + .unwrap_or(0); + assert!(epoch > 0, "currentEpoch should be > 0, got {epoch}"); + + Ok(()) +} + +/// IndexerTestGuide prerequisites: REO contract state. +/// +/// Verifies eligibility validation is enabled and the oracle has been updated. +/// These are prerequisites for IndexerTestGuide Sets 2-4. +#[tokio::test] +async fn reo_contract_state() -> Result<()> { + let net = net()?; + if net.contracts.reo.is_none() { + eprintln!("REO not deployed, skipping"); + return Ok(()); + } + + let validation = net.reo_validation_enabled()?; + assert!(validation, "Eligibility validation should be enabled"); + + let last_update = net.reo_last_oracle_update()?; + assert!( + last_update > 0, + "Last oracle update time should be > 0, got {last_update}" + ); + + let eligible = net.reo_is_eligible(&net.indexer_address)?; + eprintln!(" isEligible({}) = {eligible}", net.indexer_address); + + Ok(()) +} diff --git a/tests/tests/provision_management.rs b/tests/tests/provision_management.rs new file mode 100644 index 0000000..685a0b0 --- /dev/null +++ b/tests/tests/provision_management.rs @@ -0,0 +1,114 @@ +//! Provision Management Tests (BaselineTestPlan Cycle 3) +//! +//! Tests adding, thawing, and removing stake from the SubgraphService provision. +//! In production, these operations use `graph indexer provisions` CLI; +//! here we emulate them with direct HorizonStaking contract calls. +//! +//! Mapping to BaselineTestPlan: +//! - `provision_lifecycle` → Cycle 3.2 (add) + 3.3 (thaw) + 3.4 (deprovision) +//! +//! Note: Cycle 3.1 (view provision) is covered by `network_state::provision_exists`. + +use anyhow::Result; +use local_network_tests::TestNetwork; +use serial_test::serial; + +fn net() -> Result { + TestNetwork::from_default_env() +} + +/// BaselineTestPlan 3.2 + 3.3 + 3.4: Provision add → thaw → deprovision. +/// +/// Runs as a single test since each step depends on the previous: +/// 1. Add idle stake to provision (emulates `graph indexer provisions add`) +/// 2. Thaw from provision (emulates `graph indexer provisions thaw`) +/// 3. Advance past thawing period +/// 4. Deprovision (emulates `graph indexer provisions remove`) +/// 5. Verify tokens return to idle stake +#[tokio::test] +#[serial] +async fn provision_lifecycle() -> Result<()> { + let net = net()?; + eprintln!("=== BaselineTestPlan 3.2-3.4: Provision Lifecycle ==="); + + let amount = "1000000000000000000000"; // 1000 GRT + + // Add idle stake to work with + net.stake_tokens(amount)?; + let idle_before = net.idle_stake()?; + eprintln!(" Idle stake: {idle_before}"); + assert!(idle_before > 0, "Need idle stake for provision tests"); + + // -- 3.2: Add to provision -- + // Emulates: graph indexer provisions add 1000 + eprintln!(); + eprintln!("--- 3.2: Add to provision ---"); + net.provision_add(amount)?; + let idle_after_add = net.idle_stake()?; + eprintln!(" Idle stake after provision_add: {idle_after_add}"); + assert!( + idle_after_add < idle_before, + "Idle stake should decrease after adding to provision. \ + Before: {idle_before}, After: {idle_after_add}" + ); + + // Verify via subgraph (mine blocks to trigger indexing) + net.mine_blocks(2).await?; + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + let provisions = net.query_provisions(&net.indexer_address).await?; + let provisioned = provisions + .as_array() + .and_then(|p| p.first()) + .and_then(|p| p["tokensProvisioned"].as_str()) + .unwrap_or("0"); + eprintln!(" tokensProvisioned (subgraph): {provisioned}"); + + // -- 3.3: Thaw from provision -- + // Emulates: graph indexer provisions thaw 1000 + eprintln!(); + eprintln!("--- 3.3: Thaw from provision ---"); + net.provision_thaw(amount)?; + + // Verify thawing state via subgraph + net.mine_blocks(2).await?; + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + let provisions = net.query_provisions(&net.indexer_address).await?; + let thawing = provisions + .as_array() + .and_then(|p| p.first()) + .and_then(|p| p["tokensThawing"].as_str()) + .unwrap_or("0"); + eprintln!(" tokensThawing (subgraph): {thawing}"); + assert!( + thawing != "0", + "tokensThawing should be non-zero after thaw" + ); + + // Get thawing period + let thawing_period = net.provision_thawing_period().await?; + eprintln!(" Thawing period: {thawing_period}s"); + + // -- 3.4: Deprovision after thawing period -- + // Emulates: graph indexer provisions remove (after waiting for thaw) + eprintln!(); + eprintln!("--- 3.4: Deprovision ---"); + if thawing_period > 0 { + eprintln!( + " Advancing time by {}s to expire thawing period...", + thawing_period + 60 + ); + net.advance_time(thawing_period + 60).await?; + } + + net.provision_deprovision(1)?; + let idle_final = net.idle_stake()?; + eprintln!(" Idle stake after deprovision: {idle_final}"); + + assert!( + idle_final > idle_after_add, + "Idle stake should increase after deprovision. \ + After add: {idle_after_add}, After deprovision: {idle_final}" + ); + + Ok(()) +} diff --git a/tests/tests/query_fees.rs b/tests/tests/query_fees.rs new file mode 100644 index 0000000..e7c8699 --- /dev/null +++ b/tests/tests/query_fees.rs @@ -0,0 +1,96 @@ +//! Query Fee Tests (BaselineTestPlan Cycle 5.1, 5.3) +//! +//! Tests the TAP (Timeline Aggregation Protocol) query fee pipeline: +//! gateway query → TAP receipt → Kafka → aggregation → escrow +//! +//! Mapping to BaselineTestPlan: +//! - `gateway_queries_generate_tap_receipts` → Cycle 5.1 (send test queries, verify receipts) +//! - `tap_escrow_state_observable` → Cycle 5.3 (verify query fee collection state) +//! +//! The local network runs the full TAP stack: gateway, tap-aggregator, +//! tap-escrow-manager, tap-agent, and redpanda (Kafka). Query fees are +//! generated automatically when queries pass through the gateway with +//! an API key. + +use anyhow::Result; +use local_network_tests::TestNetwork; + +fn net() -> Result { + TestNetwork::from_default_env() +} + +/// BaselineTestPlan 5.1: Verify gateway queries generate TAP receipts. +/// +/// Emulates the `query_test.sh` script from the test plan. +/// Sends queries through the gateway and checks that the indexer-service +/// receives and validates TAP V2 receipts. +#[tokio::test] +async fn gateway_queries_generate_tap_receipts() -> Result<()> { + let net = net()?; + + eprintln!("=== TAP Receipt Generation Test ==="); + + // Send a batch of queries through the gateway + let (ok, fail) = net.send_gateway_queries(20).await?; + eprintln!(" Gateway queries: {ok} OK, {fail} failed (out of 20)"); + + // At least some should succeed (attestation signer may be stale for some) + assert!( + ok >= 1, + "At least 1 gateway query should succeed, got {ok} OK / {fail} failed" + ); + + Ok(()) +} + +/// BaselineTestPlan 5.3: Check query fee collection state. +/// +/// Verifies TAP escrow accounts in the TAP subgraph and on-chain via +/// `PaymentsEscrow.getBalance()`. In production, `queryFeesCollected` +/// in the network subgraph would be non-zero after queries flow through. +/// +/// Note: This test observes current state rather than asserting a specific +/// value, since escrow deposits depend on background TAP processing timing. +#[tokio::test] +async fn tap_escrow_state_observable() -> Result<()> { + let net = net()?; + + eprintln!("=== TAP Escrow State Test ==="); + + // Check TAP subgraph for escrow accounts + let accounts = net.query_tap_escrow_accounts().await?; + let count = accounts.as_array().map(|a| a.len()).unwrap_or(0); + eprintln!(" TAP escrow accounts: {count}"); + + if count > 0 { + for acc in accounts.as_array().unwrap() { + let sender = acc["sender"]["id"].as_str().unwrap_or("?"); + let receiver = acc["receiver"]["id"].as_str().unwrap_or("?"); + let balance = acc["balance"].as_str().unwrap_or("0"); + eprintln!(" sender={sender} receiver={receiver} balance={balance}"); + } + } else { + eprintln!(" NOTE: No escrow accounts yet — TAP escrow manager may need time to process"); + } + + // Check on-chain escrow balance directly + // getBalance(payer, collector, receiver) — collector is the SubgraphService + let escrow_balance = net.cast_call( + &net.contracts.payments_escrow, + "getBalance(address,address,address)(uint256)", + &[ + "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", // payer (gateway/account0) + &net.contracts.subgraph_service, // collector + &net.indexer_address, // receiver (indexer) + ], + ); + match escrow_balance { + Ok(balance) => eprintln!(" On-chain escrow balance: {balance}"), + Err(e) => eprintln!(" On-chain escrow query failed: {e:#}"), + } + + // This test is observational — it passes regardless of state to document + // the TAP system's current behavior. The key assertion is that querying + // doesn't error out (services are reachable). + Ok(()) +} diff --git a/tests/tests/reo_governance.rs b/tests/tests/reo_governance.rs new file mode 100644 index 0000000..16b0664 --- /dev/null +++ b/tests/tests/reo_governance.rs @@ -0,0 +1,688 @@ +//! REO Governance & Coordinator Tests (ReoTestPlan Cycles 1, 3, 4, 5, 7) +//! +//! Tests the coordinator/governance operations on the RewardsEligibilityOracle: +//! deployment verification, oracle operations, validation toggle, timeout +//! fail-open, pause/unpause, and access control. +//! +//! These are operations performed by the protocol team (not indexers). +//! On the local network, account0 holds all privileged roles. +//! +//! Mapping to ReoTestPlan: +//! - `deployment_parameters` → Cycle 1.3 (default config) +//! - `rewards_manager_integration` → Cycle 1.4 (RewardsManager → REO) +//! - `contract_not_paused` → Cycle 1.5 +//! - `renew_single_indexer` → Cycle 3.2 +//! - `batch_renewal` → Cycle 3.3 +//! - `zero_address_skipped` → Cycle 3.4 +//! - `unauthorized_renewal_reverts` → Cycle 3.5 +//! - `enable_validation_eligible_stays` → Cycle 4.1 + 4.2 +//! - `eligibility_expires_after_period` → Cycle 4.4 +//! - `timeout_failopen` → Cycle 5.1 +//! - `oracle_renewal_resets_timeout` → Cycle 5.2 +//! - `pause_blocks_writes` → Cycle 7.1 +//! - `disable_validation_emergency` → Cycle 7.2 +//! - `access_control_unauthorized` → Cycle 7.3 + +use anyhow::{Context, Result}; +use local_network_tests::TestNetwork; +use serial_test::serial; + +fn net() -> Result { + TestNetwork::from_default_env() +} + +/// A private key for an account with NO roles on the REO contract. +/// Hardhat account #9 — has ETH but no governance roles. +const UNAUTHORIZED_KEY: &str = "0x2a871d0798f97d79848a013d4936a73bf4cc922c825d33c1cf7073dff6d409c6"; + +// ── Cycle 1: Deployment Verification ── + +/// ReoTestPlan 1.3: Verify default parameters. +#[tokio::test] +#[serial] +async fn deployment_parameters() -> Result<()> { + let net = net()?; + if net.contracts.reo.is_none() { + eprintln!("REO not deployed, skipping"); + return Ok(()); + } + + eprintln!("=== ReoTestPlan 1.3: Deployment Parameters ==="); + + let period = net.reo_eligibility_period()?; + eprintln!(" eligibilityPeriod: {period}s"); + assert!(period > 0, "eligibilityPeriod should be > 0"); + + let timeout = net.reo_oracle_timeout()?; + eprintln!(" oracleUpdateTimeout: {timeout}s"); + assert!(timeout > 0, "oracleUpdateTimeout should be > 0"); + + let validation = net.reo_validation_enabled()?; + eprintln!(" eligibilityValidation: {validation}"); + // On local network, validation is pre-enabled by setup + + Ok(()) +} + +/// ReoTestPlan 1.4: RewardsManager points to the REO contract. +#[tokio::test] +#[serial] +async fn rewards_manager_integration() -> Result<()> { + let net = net()?; + let reo = match &net.contracts.reo { + Some(addr) => addr.clone(), + None => { + eprintln!("REO not deployed, skipping"); + return Ok(()); + } + }; + + eprintln!("=== ReoTestPlan 1.4: RewardsManager Integration ==="); + + let configured_reo = net.rewards_manager_reo_address()?; + eprintln!(" RewardsManager.getRewardsEligibilityOracle(): {configured_reo}"); + eprintln!(" Expected REO address: {reo}"); + + assert_eq!( + configured_reo.to_lowercase(), + reo.to_lowercase(), + "RewardsManager should point to the REO contract" + ); + + Ok(()) +} + +/// ReoTestPlan 1.5: Contract is not paused. +#[tokio::test] +#[serial] +async fn contract_not_paused() -> Result<()> { + let net = net()?; + if net.contracts.reo.is_none() { + eprintln!("REO not deployed, skipping"); + return Ok(()); + } + + eprintln!("=== ReoTestPlan 1.5: Contract Not Paused ==="); + + let paused = net.reo_is_paused()?; + eprintln!(" paused: {paused}"); + assert!(!paused, "REO should not be paused"); + + Ok(()) +} + +// ── Cycle 3: Oracle Operations ── + +/// ReoTestPlan 3.2: Renew single indexer and verify timestamps + events. +#[tokio::test] +#[serial] +async fn renew_single_indexer() -> Result<()> { + let net = net()?; + let reo = match &net.contracts.reo { + Some(addr) => addr.clone(), + None => { + eprintln!("REO not deployed, skipping"); + return Ok(()); + } + }; + + eprintln!("=== ReoTestPlan 3.2: Renew Single Indexer ==="); + + let before_oracle = net.reo_last_oracle_update()?; + let before_renewal = net.reo_renewal_time(&net.indexer_address)?; + + // Record block before renewal for event verification + let block_before = net.get_block_number_sync()?; + + net.reo_renew_indexer(&net.indexer_address)?; + + let block_after = net.get_block_number_sync()?; + let after_oracle = net.reo_last_oracle_update()?; + let after_renewal = net.reo_renewal_time(&net.indexer_address)?; + + eprintln!(" lastOracleUpdateTime: {before_oracle} → {after_oracle}"); + eprintln!(" renewalTime({}):", net.indexer_address); + eprintln!(" before: {before_renewal}"); + eprintln!(" after: {after_renewal}"); + + assert!( + after_oracle >= before_oracle, + "lastOracleUpdateTime should not decrease" + ); + assert!(after_renewal > 0, "renewalTime should be set after renewal"); + assert!( + after_renewal >= before_renewal, + "renewalTime should not decrease" + ); + + assert!( + net.reo_is_eligible(&net.indexer_address)?, + "Indexer should be eligible after renewal" + ); + + // Event verification: check for IndexerEligibilityRenewed event + let logs = net.cast_logs_json(&reo, block_before, block_after)?; + eprintln!( + " Events emitted: {} log(s) in blocks {block_before}..{block_after}", + logs.len() + ); + assert!( + !logs.is_empty(), + "renewIndexerEligibility should emit events" + ); + + let renewed_topic = net.cast_keccak("IndexerEligibilityRenewed(address,address)")?; + let has_renewed_event = logs.iter().any(|log| { + log["topics"] + .as_array() + .and_then(|t| t.first()) + .and_then(|t| t.as_str()) + .is_some_and(|t| t == renewed_topic) + }); + eprintln!(" IndexerEligibilityRenewed event: {has_renewed_event}"); + assert!( + has_renewed_event, + "Should emit IndexerEligibilityRenewed event" + ); + + Ok(()) +} + +/// ReoTestPlan 3.3: Batch renewal of multiple addresses. +#[tokio::test] +#[serial] +async fn batch_renewal() -> Result<()> { + let net = net()?; + if net.contracts.reo.is_none() { + eprintln!("REO not deployed, skipping"); + return Ok(()); + } + + eprintln!("=== ReoTestPlan 3.3: Batch Renewal ==="); + + // Use the indexer plus two arbitrary addresses + let addr1 = &net.indexer_address; + let addr2 = "0x70997970C51812dc3A010C7d01b50e0d17dc79C8"; // Hardhat #1 + let addr3 = "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC"; // Hardhat #2 + + net.reo_renew_batch(&[addr1, addr2, addr3])?; + + let t1 = net.reo_renewal_time(addr1)?; + let t2 = net.reo_renewal_time(addr2)?; + let t3 = net.reo_renewal_time(addr3)?; + eprintln!(" renewalTime({addr1}): {t1}"); + eprintln!(" renewalTime({addr2}): {t2}"); + eprintln!(" renewalTime({addr3}): {t3}"); + + assert!(t1 > 0, "addr1 should have renewal timestamp"); + assert!(t2 > 0, "addr2 should have renewal timestamp"); + assert!(t3 > 0, "addr3 should have renewal timestamp"); + + Ok(()) +} + +/// ReoTestPlan 3.4: Zero addresses silently skipped in renewal. +#[tokio::test] +#[serial] +async fn zero_address_skipped() -> Result<()> { + let net = net()?; + if net.contracts.reo.is_none() { + eprintln!("REO not deployed, skipping"); + return Ok(()); + } + + eprintln!("=== ReoTestPlan 3.4: Zero Address Skipped ==="); + + let zero = "0x0000000000000000000000000000000000000000"; + // Should succeed (zero address is silently skipped) + net.reo_renew_batch(&[zero, &net.indexer_address])?; + + let zero_time = net.reo_renewal_time(zero)?; + let indexer_time = net.reo_renewal_time(&net.indexer_address)?; + eprintln!(" renewalTime(zero): {zero_time}"); + eprintln!(" renewalTime(indexer): {indexer_time}"); + + assert_eq!(zero_time, 0, "Zero address should not get a renewal time"); + assert!(indexer_time > 0, "Indexer should still get renewed"); + + Ok(()) +} + +/// ReoTestPlan 3.5: Unauthorized account cannot renew. +#[tokio::test] +#[serial] +async fn unauthorized_renewal_reverts() -> Result<()> { + let net = net()?; + let reo = match &net.contracts.reo { + Some(addr) => addr.clone(), + None => { + eprintln!("REO not deployed, skipping"); + return Ok(()); + } + }; + + eprintln!("=== ReoTestPlan 3.5: Unauthorized Renewal Reverts ==="); + + let array = format!("[{}]", net.indexer_address); + let succeeded = net.cast_send_may_revert( + UNAUTHORIZED_KEY, + &reo, + "renewIndexerEligibility(address[],bytes)", + &[&array, "0x"], + )?; + + eprintln!(" Unauthorized renewal succeeded: {succeeded}"); + assert!( + !succeeded, + "Renewal from unauthorized account should revert" + ); + + Ok(()) +} + +// ── Cycle 4: Validation Toggle ── + +/// ReoTestPlan 4.1 + 4.2: Enable validation, verify renewed indexer stays eligible. +/// +/// Saves and restores the original validation state. +#[tokio::test] +#[serial] +async fn enable_validation_eligible_stays() -> Result<()> { + let net = net()?; + if net.contracts.reo.is_none() { + eprintln!("REO not deployed, skipping"); + return Ok(()); + } + + eprintln!("=== ReoTestPlan 4.1 + 4.2: Enable Validation ==="); + + let original = net.reo_validation_enabled()?; + + // Ensure indexer is renewed + net.reo_renew_indexer(&net.indexer_address)?; + + // Enable validation + net.reo_set_validation(true)?; + assert!( + net.reo_validation_enabled()?, + "Validation should be enabled" + ); + + // Renewed indexer should still be eligible + let eligible = net.reo_is_eligible(&net.indexer_address)?; + eprintln!(" isEligible after enabling validation: {eligible}"); + assert!( + eligible, + "Renewed indexer should remain eligible after enabling validation" + ); + + // Restore original state + net.reo_set_validation(original)?; + + Ok(()) +} + +/// ReoTestPlan 4.4: Eligibility expires after period. +/// +/// Reduces the period to 60s, renews, waits, verifies expiry, then restores. +#[tokio::test] +#[serial] +async fn eligibility_expires_after_period() -> Result<()> { + let net = net()?; + if net.contracts.reo.is_none() { + eprintln!("REO not deployed, skipping"); + return Ok(()); + } + + eprintln!("=== ReoTestPlan 4.4: Eligibility Expires After Period ==="); + + let original_period = net.reo_eligibility_period()?; + let original_validation = net.reo_validation_enabled()?; + + // Enable validation and set short period + net.reo_set_validation(true)?; + net.reo_set_eligibility_period(60)?; + eprintln!(" Set eligibilityPeriod to 60s"); + + // Renew indexer + net.reo_renew_indexer(&net.indexer_address)?; + assert!( + net.reo_is_eligible(&net.indexer_address)?, + "Should be eligible immediately after renewal" + ); + + // Advance past the 60s period + net.advance_time(65).await?; + + let eligible = net.reo_is_eligible(&net.indexer_address)?; + eprintln!(" isEligible after 65s: {eligible}"); + assert!(!eligible, "Should be ineligible after period expires"); + + // Restore original state + net.reo_set_eligibility_period(original_period)?; + net.reo_set_validation(original_validation)?; + // Re-renew to restore eligibility + net.reo_renew_indexer(&net.indexer_address)?; + eprintln!(" Restored period={original_period}s, validation={original_validation}"); + + Ok(()) +} + +// ── Cycle 5: Timeout Fail-Open ── + +/// ReoTestPlan 5.1: Oracle timeout makes all indexers eligible (fail-open). +/// +/// Reduces timeout to 60s, lets it expire, verifies an unrenewed address +/// becomes eligible via the fail-open mechanism. +#[tokio::test] +#[serial] +async fn timeout_failopen() -> Result<()> { + let net = net()?; + if net.contracts.reo.is_none() { + eprintln!("REO not deployed, skipping"); + return Ok(()); + } + + eprintln!("=== ReoTestPlan 5.1: Timeout Fail-Open ==="); + + let original_timeout = net.reo_oracle_timeout()?; + let original_validation = net.reo_validation_enabled()?; + + // Use an address that has never been renewed + let never_renewed = "0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65"; // Hardhat #4 + + // Enable validation so non-renewed addresses are ineligible + net.reo_set_validation(true)?; + + // Renew the main indexer (to set lastOracleUpdateTime) + net.reo_renew_indexer(&net.indexer_address)?; + + // Verify the never-renewed address is NOT eligible + let before = net.reo_is_eligible(never_renewed)?; + eprintln!(" isEligible({never_renewed}) before timeout: {before}"); + assert!(!before, "Never-renewed address should be ineligible"); + + // Reduce timeout to 60s and wait + net.reo_set_oracle_timeout(60)?; + eprintln!(" Set oracleUpdateTimeout to 60s"); + + net.advance_time(65).await?; + + // Now the fail-open should kick in + let after = net.reo_is_eligible(never_renewed)?; + eprintln!(" isEligible({never_renewed}) after timeout: {after}"); + assert!( + after, + "Never-renewed address should be eligible via fail-open after oracle timeout" + ); + + // Restore + net.reo_set_oracle_timeout(original_timeout)?; + net.reo_set_validation(original_validation)?; + net.reo_renew_indexer(&net.indexer_address)?; + eprintln!(" Restored timeout={original_timeout}s, validation={original_validation}"); + + Ok(()) +} + +/// ReoTestPlan 5.2: Oracle renewal resets the timeout clock. +#[tokio::test] +#[serial] +async fn oracle_renewal_resets_timeout() -> Result<()> { + let net = net()?; + if net.contracts.reo.is_none() { + eprintln!("REO not deployed, skipping"); + return Ok(()); + } + + eprintln!("=== ReoTestPlan 5.2: Oracle Renewal Resets Timeout ==="); + + let before = net.reo_last_oracle_update()?; + let ts_before = net.get_block_timestamp()?; + eprintln!(" lastOracleUpdateTime before: {before}"); + eprintln!(" block.timestamp before: {ts_before}"); + + // Advance time so we can see a clear difference + net.advance_time(30).await?; + + // Renew — this should update lastOracleUpdateTime + net.reo_renew_indexer(&net.indexer_address)?; + + let after = net.reo_last_oracle_update()?; + let ts_after = net.get_block_timestamp()?; + eprintln!(" lastOracleUpdateTime after: {after}"); + eprintln!(" block.timestamp after: {ts_after}"); + + assert!( + after > before, + "lastOracleUpdateTime should increase after renewal. Before: {before}, After: {after}" + ); + + Ok(()) +} + +// ── Cycle 7: Emergency Operations ── + +/// ReoTestPlan 7.1: Pause blocks writes, view functions still work. +/// +/// Pauses, verifies writes revert, reads still work, then unpauses. +#[tokio::test] +#[serial] +async fn pause_blocks_writes() -> Result<()> { + let net = net()?; + let reo = match &net.contracts.reo { + Some(addr) => addr.clone(), + None => { + eprintln!("REO not deployed, skipping"); + return Ok(()); + } + }; + + eprintln!("=== ReoTestPlan 7.1: Pause Blocks Writes ==="); + + // Pause + net.reo_pause()?; + assert!(net.reo_is_paused()?, "Should be paused"); + eprintln!(" Paused: true"); + + // View functions should still work + let eligible = net.reo_is_eligible(&net.indexer_address)?; + eprintln!(" isEligible (while paused): {eligible}"); + // No assertion on the value — just that it doesn't revert + + // Write should revert while paused + let array = format!("[{}]", net.indexer_address); + let succeeded = net.cast_send_may_revert( + &net.account0_secret, + &reo, + "renewIndexerEligibility(address[],bytes)", + &[&array, "0x"], + )?; + eprintln!(" renewIndexerEligibility while paused succeeded: {succeeded}"); + assert!( + !succeeded, + "renewIndexerEligibility should revert while paused" + ); + + // Unpause + net.reo_unpause()?; + assert!(!net.reo_is_paused()?, "Should be unpaused"); + eprintln!(" Unpaused: true"); + + // Writes should work again + net.reo_renew_indexer(&net.indexer_address)?; + eprintln!(" Renewal after unpause: OK"); + + Ok(()) +} + +/// ReoTestPlan 7.2: Disable validation makes all indexers eligible. +#[tokio::test] +#[serial] +async fn disable_validation_emergency() -> Result<()> { + let net = net()?; + if net.contracts.reo.is_none() { + eprintln!("REO not deployed, skipping"); + return Ok(()); + } + + eprintln!("=== ReoTestPlan 7.2: Disable Validation (Emergency) ==="); + + let original = net.reo_validation_enabled()?; + + // Enable validation first + net.reo_set_validation(true)?; + + // An address that was never renewed should be ineligible + let never_renewed = "0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65"; + // Renew the main indexer so lastOracleUpdateTime is fresh (prevent fail-open) + net.reo_renew_indexer(&net.indexer_address)?; + + let before = net.reo_is_eligible(never_renewed)?; + eprintln!(" isEligible({never_renewed}) with validation on: {before}"); + assert!( + !before, + "Never-renewed should be ineligible with validation on" + ); + + // Disable validation — emergency override + net.reo_set_validation(false)?; + + let after = net.reo_is_eligible(never_renewed)?; + eprintln!(" isEligible({never_renewed}) with validation off: {after}"); + assert!( + after, + "All indexers should be eligible when validation is disabled" + ); + + // Restore + net.reo_set_validation(original)?; + net.reo_renew_indexer(&net.indexer_address)?; + + Ok(()) +} + +/// ReoTestPlan 7.3: Unauthorized accounts cannot perform governance operations. +#[tokio::test] +#[serial] +async fn access_control_unauthorized() -> Result<()> { + let net = net()?; + let reo = match &net.contracts.reo { + Some(addr) => addr.clone(), + None => { + eprintln!("REO not deployed, skipping"); + return Ok(()); + } + }; + + eprintln!("=== ReoTestPlan 7.3: Access Control ==="); + + // Non-operator cannot set eligibility period + let ok = net.cast_send_may_revert( + UNAUTHORIZED_KEY, + &reo, + "setEligibilityPeriod(uint256)", + &["100"], + )?; + eprintln!(" setEligibilityPeriod (unauthorized): succeeded={ok}"); + assert!(!ok, "setEligibilityPeriod should revert for non-operator"); + + // Non-operator cannot enable validation + let ok = net.cast_send_may_revert( + UNAUTHORIZED_KEY, + &reo, + "setEligibilityValidation(bool)", + &["true"], + )?; + eprintln!(" setEligibilityValidation (unauthorized): succeeded={ok}"); + assert!( + !ok, + "setEligibilityValidation should revert for non-operator" + ); + + // Non-pause-role cannot pause + let ok = net.cast_send_may_revert(UNAUTHORIZED_KEY, &reo, "pause()", &[])?; + eprintln!(" pause (unauthorized): succeeded={ok}"); + assert!(!ok, "pause should revert for non-pause-role"); + + // Non-operator cannot set oracle timeout + let ok = net.cast_send_may_revert( + UNAUTHORIZED_KEY, + &reo, + "setOracleUpdateTimeout(uint256)", + &["100"], + )?; + eprintln!(" setOracleUpdateTimeout (unauthorized): succeeded={ok}"); + assert!(!ok, "setOracleUpdateTimeout should revert for non-operator"); + + Ok(()) +} + +// ── Cycle 6: Rewards Integration (View Functions) ── + +/// ReoTestPlan 6.5: View functions reflect zero for ineligible indexer. +/// +/// When an indexer is ineligible, `RewardsManager.getRewards()` should +/// return 0 for their active allocations, preventing the UI from +/// displaying unclaimable rewards. +/// +/// Saves and restores the original validation state. +#[tokio::test] +#[serial] +async fn rewards_view_zero_for_ineligible() -> Result<()> { + let net = net()?; + if net.contracts.reo.is_none() { + eprintln!("REO not deployed, skipping"); + return Ok(()); + } + + eprintln!("=== ReoTestPlan 6.5: View Functions Zero for Ineligible ==="); + + let original_period = net.reo_eligibility_period()?; + let original_validation = net.reo_validation_enabled()?; + + // Enable validation and renew so indexer starts eligible + net.reo_set_validation(true)?; + net.reo_renew_indexer(&net.indexer_address)?; + assert!( + net.reo_is_eligible(&net.indexer_address)?, + "Indexer should be eligible after renewal" + ); + + // Get an active allocation + let allocs = net.query_active_allocations(&net.indexer_address).await?; + let allocs = allocs.as_array().context("expected allocation array")?; + let active = allocs.first().context("no active allocation found")?; + let alloc_id = active["id"].as_str().context("allocation missing id")?; + eprintln!(" Active allocation: {alloc_id}"); + + // Check rewards while eligible — may be non-zero + let rewards_eligible = net.rewards_pending(alloc_id)?; + eprintln!(" Pending rewards (eligible): {rewards_eligible}"); + + // Make indexer ineligible: set short period and advance time + net.reo_set_eligibility_period(60)?; + net.advance_time(65).await?; + + assert!( + !net.reo_is_eligible(&net.indexer_address)?, + "Indexer should be ineligible after period expiry" + ); + + // Check rewards while ineligible — should be 0 + let rewards_ineligible = net.rewards_pending(alloc_id)?; + eprintln!(" Pending rewards (ineligible): {rewards_ineligible}"); + + assert_eq!( + rewards_ineligible, 0, + "getRewards() should return 0 for ineligible indexer, got {rewards_ineligible}" + ); + + // Restore original state + net.reo_set_eligibility_period(original_period)?; + net.reo_set_validation(original_validation)?; + net.reo_renew_indexer(&net.indexer_address)?; + eprintln!(" Restored period={original_period}s, validation={original_validation}"); + + Ok(()) +} diff --git a/tests/tests/reward_collection.rs b/tests/tests/reward_collection.rs new file mode 100644 index 0000000..87d0a2b --- /dev/null +++ b/tests/tests/reward_collection.rs @@ -0,0 +1,105 @@ +//! Reward Collection Tests — Direct Contract Call +//! +//! Tests `SubgraphService.collect(IndexingRewards)` directly via cast. +//! This bypasses the indexer-agent to verify the raw contract behavior: +//! create allocation → advance epochs → collect() → verify stake increase +//! +//! Not directly mapped to BaselineTestPlan or IndexerTestGuide — those cover +//! the agent-mediated close path (which does collect internally as a multicall). +//! See `allocation_lifecycle::close_allocation_collects_rewards` for that flow. +//! +//! This test provides additional coverage of the underlying contract mechanism. + +use anyhow::{Context, Result}; +use local_network_tests::TestNetwork; +use serial_test::serial; + +fn net() -> Result { + TestNetwork::from_default_env() +} + +/// Verify that calling `SubgraphService.collect(IndexingRewards)` mints GRT +/// to the indexer's stake. +/// +/// This is the raw contract operation that the indexer-agent invokes as part +/// of its close multicall (collect + stopService). +#[tokio::test] +#[serial] +async fn collect_indexing_rewards_increases_stake() -> Result<()> { + let net = net()?; + + // Find an active allocation + let allocs = net.get_allocations().await?; + let allocs = allocs.as_array().context("expected allocation array")?; + let active = allocs + .iter() + .find(|a| a["closedAtEpoch"].is_null()) + .context("no active allocation found")?; + let alloc_id = active["id"] + .as_str() + .context("allocation missing id")? + .to_string(); + let deployment = active["subgraphDeployment"] + .as_str() + .context("allocation missing deployment")? + .to_string(); + + eprintln!("=== Reward collection test ==="); + eprintln!(" Allocation: {alloc_id}"); + eprintln!(" Deployment: {deployment}"); + + // Close and recreate so we have a fresh allocation with known epoch boundaries + net.advance_epochs(2).await?; + net.close_allocation(&alloc_id).await?; + + let result = net.create_allocation(&deployment, "0.01").await?; + let fresh_alloc = result["allocation"] + .as_str() + .context("expected allocation ID")? + .to_string(); + eprintln!(" Fresh allocation: {fresh_alloc}"); + + // Advance epochs so rewards accumulate (need > 1 epoch for allocation maturity) + net.advance_epochs(2).await?; + + // Ensure indexer is eligible (eligibility may have expired during epoch advancement) + if net.contracts.reo.is_some() { + net.reo_renew_indexer(&net.indexer_address)?; + assert!( + net.reo_is_eligible(&net.indexer_address)?, + "Indexer must be eligible to collect rewards" + ); + } + + // Record stake before collect + let stake_before = net.staked_tokens()?; + eprintln!(" Stake before collect: {stake_before}"); + + // Call collect(IndexingRewards) — this is the key operation + eprintln!(" Calling collect(IndexingRewards)..."); + net.collect_indexing_rewards(&fresh_alloc)?; + + // Record stake after collect + let stake_after = net.staked_tokens()?; + let reward_delta = stake_after.saturating_sub(stake_before); + let reward_grt = reward_delta as f64 / 1e18; + eprintln!(" Stake after collect: {stake_after}"); + eprintln!(" Reward delta: {reward_delta} wei ({reward_grt:.2} GRT)"); + + assert!( + stake_after > stake_before, + "Staked tokens should increase after collect(IndexingRewards). \ + Before: {stake_before}, After: {stake_after}" + ); + + // Restore: close the fresh allocation (if still open) and recreate. + // The collect() call or the indexer-agent may have auto-closed it. + net.advance_epochs(2).await?; + if let Err(e) = net.close_allocation(&fresh_alloc).await { + eprintln!(" Close skipped (already closed): {e:#}"); + } + net.create_allocation(&deployment, "0.01").await?; + eprintln!(" Restored allocation for {deployment}"); + + Ok(()) +} diff --git a/tests/tests/stake_management.rs b/tests/tests/stake_management.rs new file mode 100644 index 0000000..bdebdd3 --- /dev/null +++ b/tests/tests/stake_management.rs @@ -0,0 +1,81 @@ +//! Stake Management Tests (BaselineTestPlan Cycle 2) +//! +//! Tests adding and removing stake from the indexer. +//! In production, these operations are performed via Explorer UI; +//! here we emulate them with direct HorizonStaking contract calls. +//! +//! Mapping to BaselineTestPlan: +//! - `add_stake` → Cycle 2.1 (Add stake via Explorer) +//! - `unstake_idle_tokens` → Cycle 2.2 (Unstake tokens) + +use anyhow::Result; +use local_network_tests::TestNetwork; +use serial_test::serial; + +fn net() -> Result { + TestNetwork::from_default_env() +} + +/// BaselineTestPlan 2.1: Add stake to indexer. +/// +/// Emulates Explorer "Add Stake": approve GRT → stakeTo(indexer, amount). +/// Verifies stakedTokens increases after staking. +#[tokio::test] +#[serial] +async fn add_stake() -> Result<()> { + let net = net()?; + eprintln!("=== BaselineTestPlan 2.1: Add Stake ==="); + + let before = net.staked_tokens()?; + eprintln!(" Staked before: {before}"); + + let amount = "1000000000000000000000"; // 1000 GRT + net.stake_tokens(amount)?; + + let after = net.staked_tokens()?; + let delta = after.saturating_sub(before); + eprintln!(" Staked after: {after} (+{delta} wei)"); + + assert!( + after > before, + "stakedTokens should increase after adding stake. Before: {before}, After: {after}" + ); + + Ok(()) +} + +/// BaselineTestPlan 2.2: Unstake idle tokens. +/// +/// Emulates Explorer "Unstake": adds idle stake, then calls unstake(). +/// Verifies stakedTokens decreases after unstaking. +/// +/// Note: This only unstakes idle (unprovisioned) tokens. Full thawing +/// and withdrawal after the thawing period is tested in provision_management. +#[tokio::test] +#[serial] +async fn unstake_idle_tokens() -> Result<()> { + let net = net()?; + eprintln!("=== BaselineTestPlan 2.2: Unstake Tokens ==="); + + // Add some stake to create idle (unprovisioned) tokens + let amount = "1000000000000000000000"; // 1000 GRT + net.stake_tokens(amount)?; + + let idle = net.idle_stake()?; + eprintln!(" Idle stake after adding: {idle}"); + assert!(idle > 0, "Should have idle stake to unstake"); + + // Unstake the idle portion + let before = net.staked_tokens()?; + net.unstake_tokens(amount)?; + let after = net.staked_tokens()?; + eprintln!(" Staked before unstake: {before}"); + eprintln!(" Staked after unstake: {after}"); + + assert!( + after < before, + "stakedTokens should decrease after unstaking. Before: {before}, After: {after}" + ); + + Ok(()) +} From b5eb2ee4da1791598b73d8d545e5b6ccab858d31 Mon Sep 17 00:00:00 2001 From: Rembrandt Kuipers <50174308+RembrandtK@users.noreply.github.com> Date: Mon, 23 Feb 2026 14:26:45 +0000 Subject: [PATCH 4/4] docs: add testing, REO, and flow documentation - docs/testing/: test framework, REO test plan and status - docs/eligibility-oracle/: goal and status tracking - docs/explorer/: integration goal - docs/flows/: updated testing flow guides - docs/indexing-payments/: reorganize into safe-based/ subdir - tests/README.md: coverage mapping table --- docs/README.md | 56 +++- docs/eligibility-oracle/Goal.md | 115 ++++++++ docs/eligibility-oracle/Status.md | 233 +++++++++++++++++ docs/explorer/Goal.md | 154 +++++++++++ docs/flows/EligibilityOracleTesting.md | 171 ++++++++++++ {flows => docs/flows}/IndexerAgentTesting.md | 30 ++- .../flows}/IndexingPaymentsTesting.md | 43 +-- {flows => docs/flows}/README.md | 8 + .../RecurringCollectorDeployment.md | 71 +++++ .../archive/IntegrationSummary.md | 46 ++-- .../archive/TestingStatus.md | 100 ------- .../archive/UserExperience.md | 30 ++- .../{ => safe-based}/Architecture.md | 0 .../{ => safe-based}/DipperServicePlan.md | 0 .../{ => safe-based}/IndexerAgentPlan.md | 0 .../{ => safe-based}/IndexerServicePlan.md | 0 .../{ => safe-based}/README.md | 18 +- docs/testing/TestFramework.md | 245 ++++++++++++++++++ docs/testing/reo/CurationSignal.md | 115 ++++++++ docs/testing/reo/Goal.md | 97 +++++++ docs/testing/reo/Status.md | 175 +++++++++++++ tests/README.md | 138 ++++++++++ 22 files changed, 1666 insertions(+), 179 deletions(-) create mode 100644 docs/eligibility-oracle/Goal.md create mode 100644 docs/eligibility-oracle/Status.md create mode 100644 docs/explorer/Goal.md create mode 100644 docs/flows/EligibilityOracleTesting.md rename {flows => docs/flows}/IndexerAgentTesting.md (91%) rename {flows => docs/flows}/IndexingPaymentsTesting.md (87%) rename {flows => docs/flows}/README.md (77%) create mode 100644 docs/indexing-payments/RecurringCollectorDeployment.md delete mode 100644 docs/indexing-payments/archive/TestingStatus.md rename docs/indexing-payments/{ => safe-based}/Architecture.md (100%) rename docs/indexing-payments/{ => safe-based}/DipperServicePlan.md (100%) rename docs/indexing-payments/{ => safe-based}/IndexerAgentPlan.md (100%) rename docs/indexing-payments/{ => safe-based}/IndexerServicePlan.md (100%) rename docs/indexing-payments/{ => safe-based}/README.md (83%) create mode 100644 docs/testing/TestFramework.md create mode 100644 docs/testing/reo/CurationSignal.md create mode 100644 docs/testing/reo/Goal.md create mode 100644 docs/testing/reo/Status.md create mode 100644 tests/README.md diff --git a/docs/README.md b/docs/README.md index b5c749e..2f34aba 100644 --- a/docs/README.md +++ b/docs/README.md @@ -4,27 +4,67 @@ This directory contains detailed documentation for the local-network project. ## Indexing Payments -**[Start Here: indexing-payments/README.md](./indexing-payments/README.md)** +**[Start Here: indexing-payments/safe-based/README.md](./indexing-payments/safe-based/README.md)** **Implementation Documentation:** -- [Architecture.md](./indexing-payments/Architecture.md) - Technical architecture -- [DipperServicePlan.md](./indexing-payments/DipperServicePlan.md) - Dipper service implementation -- [IndexerAgentPlan.md](./indexing-payments/IndexerAgentPlan.md) - Agent modifications -- [IndexerServicePlan.md](./indexing-payments/IndexerServicePlan.md) - Service updates + +- [Architecture.md](./indexing-payments/safe-based/Architecture.md) - Technical architecture +- [DipperServicePlan.md](./indexing-payments/safe-based/DipperServicePlan.md) - Dipper service implementation +- [IndexerAgentPlan.md](./indexing-payments/safe-based/IndexerAgentPlan.md) - Agent modifications +- [IndexerServicePlan.md](./indexing-payments/safe-based/IndexerServicePlan.md) - Service updates **Planning Summaries:** [archive/](./indexing-payments/archive/) + - [IntegrationSummary.md](./indexing-payments/archive/IntegrationSummary.md) - Implementation status & quick start - [UserExperience.md](./indexing-payments/archive/UserExperience.md) - What changes with override - [TestingStatus.md](./indexing-payments/archive/TestingStatus.md) - Current testing status +## Eligibility Oracle + +**[Start Here: eligibility-oracle/Goal.md](./eligibility-oracle/Goal.md)** + +- [Goal.md](./eligibility-oracle/Goal.md) - Objective and scope +- [Status.md](./eligibility-oracle/Status.md) - Implementation progress and log + +## Test Plan Automation + +**[Start Here: testing/reo/Goal.md](./testing/reo/Goal.md)** + +- [Goal.md](./testing/reo/Goal.md) - Layered automation approach and workflow sequence +- [Status.md](./testing/reo/Status.md) - Progress, bugs found, and gaps +- [CurationSignal.md](./testing/reo/CurationSignal.md) - Task: add curation signal to local network setup +- [TestFramework.md](./testing/TestFramework.md) - Task: test framework evaluation (bash + Rust) + +**Scripts:** + +- `scripts/test-baseline-queries.sh` - Layer 0: Validate BaselineTestPlan GraphQL queries +- `scripts/test-indexer-guide-queries.sh` - Layer 0: Validate IndexerTestGuide queries and cast commands +- `scripts/test-baseline-state.sh` - Layer 1: Verify network state matches baseline expectations + +## Graph Explorer + +**[Start Here: explorer/Goal.md](./explorer/Goal.md)** + +- [Goal.md](./explorer/Goal.md) - Task: integrate Graph Explorer with local network + +## Testing Flows + +Step-by-step testing guides: [flows/](./flows/) + +- [EligibilityOracleTesting.md](./flows/EligibilityOracleTesting.md) - REO eligibility cycle +- [IndexingPaymentsTesting.md](./flows/IndexingPaymentsTesting.md) - Dipper indexing payments +- [IndexerAgentTesting.md](./flows/IndexerAgentTesting.md) - Indexer agent behavior + ## Usage -**To enable Indexing Payments:** +**Service profiles** are enabled by default in `.env`. To customize, edit `COMPOSE_PROFILES`: + ```bash -docker compose -f docker-compose.yaml -f overrides/indexing-payments/docker-compose.yaml up +COMPOSE_PROFILES=rewards-eligibility,indexing-payments,block-oracle,explorer # all (default) +COMPOSE_PROFILES=rewards-eligibility # REO only ``` -See [overrides/indexing-payments/README.md](../overrides/indexing-payments/README.md) for usage guide and [flows/IndexingPaymentsTesting.md](../flows/IndexingPaymentsTesting.md) for testing. +Then `docker compose up -d` applies the active profiles automatically. ## Documentation Guidelines diff --git a/docs/eligibility-oracle/Goal.md b/docs/eligibility-oracle/Goal.md new file mode 100644 index 0000000..3da5ab5 --- /dev/null +++ b/docs/eligibility-oracle/Goal.md @@ -0,0 +1,115 @@ +# Rewards Eligibility Oracle - Goal + +## Objective + +Add the Rewards Eligibility Oracle (REO) to the local network so that indexer reward eligibility can be tested end-to-end. This involves two pieces: + +1. **REO contract** - Deploy and configure the `RewardsEligibilityOracle` contract, integrate it with the `RewardsManager` +2. **REO node** - Containerise and run the eligibility-oracle-node service that consumes gateway query data from Redpanda and submits eligible indexers on-chain + +## Background + +The REO determines which **indexers** are eligible for rewards based on their query-serving performance. This is not the Subgraph Availability Oracle (SAO) and has nothing to do with subgraph denial. + +### How It Works + +1. The **gateway** publishes query attempt data to the `gateway_queries` Redpanda topic (already exists in local network) +2. The **REO node** consumes these events, aggregates per-indexer metrics over a rolling window (28 days default), and evaluates eligibility based on: + - Minimum online days (default: 5) + - Minimum subgraphs served (default: 1) + - Maximum latency (default: 5000ms) + - Maximum blocks behind (default: 50000) +3. Eligible indexers are submitted on-chain via `renewIndexerEligibility()` on the REO contract +4. The **RewardsManager** checks `rewardsEligibilityOracle.isEligible(indexer)` when distributing rewards + +### REO Contract Design + +- **Deny by default**: indexers are not eligible until an authorized oracle calls `renewIndexerEligibility()` +- **Time-based**: eligibility expires after `eligibilityPeriod` (default: 14 days) and must be renewed +- **Fail-safe**: if oracles stop updating for `oracleUpdateTimeout` (default: 7 days), all indexers become eligible +- **Global toggle**: `eligibilityValidationEnabled` can disable all eligibility checks (default: disabled) +- **Role-based access**: GOVERNOR, OPERATOR, ORACLE, PAUSE roles + +## What Success Looks Like + +1. **REO contract deployed** as part of `graph-contracts` setup, integrated with `RewardsManager` via `setRewardsEligibilityOracle()` +2. **REO node running** in docker-compose, consuming from local Redpanda `gateway_queries` topic +3. **Indexer marked eligible** after serving queries through the gateway +4. **Rewards gated by eligibility** - can verify via `isEligible()` contract calls and reward distribution behaviour + +## Components + +### Existing (already in local network) + +- **graph-contracts** - Deploys Horizon protocol contracts including `RewardsManager`; `issuance.json` already referenced but issuance package not yet deployed +- **gateway** - Publishes query data to `gateway_queries` Redpanda topic +- **redpanda** - Kafka-compatible message broker, already running +- **graph-node** - Indexes subgraphs, serves queries +- **indexer-agent / indexer-service** - Indexer infrastructure + +### To Be Added + +- **REO contract deployment** - Add Phase 4 to `graph-contracts/run.sh` using issuance package deployment scripts from `packages/deployment/deploy/rewards/eligibility/` +- **REO node containerisation** - Create Dockerfile for the Rust service at `/git/local/eligibility-oracle-node/eligibility-oracle-node` +- **REO node docker-compose service** - Config, Redpanda connection, chain RPC, contract address, signing key +- **Redpanda topic setup** - Create compacted `indexer_daily_metrics` topic for REO node state persistence + +## Source Repositories + +| Component | Location | Branch | +| --------------------------------- | ---------------------------------------------------------------------- | ---------- | +| REO contract + deployment scripts | `/git/graphprotocol/contracts` | `post-audit` | +| REO contract source | `packages/issuance/contracts/eligibility/RewardsEligibilityOracle.sol` | | +| REO deployment scripts | `packages/deployment/deploy/rewards/eligibility/` | | +| REO node (Rust service) | `/git/local/eligibility-oracle-node/eligibility-oracle-node` | | + +## Implementation Tasks + +### 1. Contract Deployment & Integration + +- Update `CONTRACTS_COMMIT` in `.env` to point to `post-audit` branch +- Add local network (chain 1337) support to `packages/deployment` (see [Status.md](./Status.md#gaps-to-fix)) +- Add REO contract deployment as a new phase in `graph-contracts/run.sh` +- Configure roles: grant ORACLE_ROLE to the REO node's signing key +- Integrate with RewardsManager: call `setRewardsEligibilityOracle(reoAddress)` +- Write deployed address to `issuance.json` + +### 2. REO Node Containerisation + +- Create Dockerfile for the Rust workspace at `/git/local/eligibility-oracle-node/eligibility-oracle-node` +- Create `config.toml` template with local network values: + - `kafka.bootstrap_servers` = `redpanda:9092` + - `kafka.input_topic` = `gateway_queries` + - `blockchain.rpc_urls` = `["http://chain:8545"]` + - `blockchain.contract_address` = from `issuance.json` + - `blockchain.chain_id` = `1337` + - `blockchain.private_key` = signing key with ORACLE_ROLE +- Add to docker-compose (likely as an override like indexing-payments) + +### 3. Redpanda Topic Setup + +- Create compacted `indexer_daily_metrics` topic for REO node persistence +- May need a setup script or init container + +### 4. Testing & Validation + +- Send queries through the gateway to generate `gateway_queries` events +- Verify REO node processes events and submits eligibility on-chain +- Check `isEligible(indexerAddress)` returns true +- Verify reward distribution honours eligibility + +## Configuration for Local Network + +For local testing, sensible overrides to the REO node defaults: + +- Shorter `analysis_period_days` (e.g., 1 day instead of 28) +- Lower `min_online_days` (e.g., 1 instead of 5) +- Shorter `scheduling.interval_secs` (e.g., 60 instead of 10800) +- Shorter `staleness_threshold_hours` (e.g., 1 instead of 20) +- Consider starting with `eligibilityValidationEnabled = false` on the contract and enabling once the node is running + +## Related Documentation + +- [Local Network README](../../README.md) +- [REO Contract Spec](file:///git/graphprotocol/contracts/post-audit/packages/issuance/contracts/eligibility/RewardsEligibilityOracle.md) +- [REO Deployment Guide](file:///git/graphprotocol/contracts/post-audit/packages/deployment/docs/deploy/RewardsEligibilityOracleDeployment.md) diff --git a/docs/eligibility-oracle/Status.md b/docs/eligibility-oracle/Status.md new file mode 100644 index 0000000..261bc63 --- /dev/null +++ b/docs/eligibility-oracle/Status.md @@ -0,0 +1,233 @@ +# Rewards Eligibility Oracle - Status + +> Last updated: 2026-02-18 + +## Current Phase: End-to-end verified + +### Summary + +REO is fully integrated into the local network. Both workstreams complete and end-to-end flow verified: + +1. **Contract deployment** - DONE: Phase 4 deploys REO, integrates with RewardsManager, grants ORACLE_ROLE +2. **REO node** - DONE: consumes gateway_queries, evaluates eligibility, submits on-chain +3. **End-to-end** - VERIFIED: queries through gateway -> REO node -> `isEligible()` returns true + +## Completed + +- [x] Created goal documentation ([Goal.md](./Goal.md)) +- [x] Surveyed existing local network components (19 services, 3 contract deployment phases) +- [x] Identified `issuance.json` already referenced in `graph-contracts/run.sh` but issuance contracts not yet deployed +- [x] Explored REO contract in `graphprotocol/contracts` post-audit branch +- [x] Explored REO node at `/git/local/eligibility-oracle-node/eligibility-oracle-node` +- [x] Explored deployment package scripts and documentation for local network feasibility +- [x] Identified deployment package gaps and REO node contract signature mismatch +- [x] Created REO node Dockerfile, run.sh, docker-compose override +- [x] Fixed deployment package: added localNetwork support (chain 1337) on post-audit branch +- [x] Added Phase 4 (REO) to `graph-contracts/run.sh` with idempotency, RM integration, ORACLE_ROLE grant +- [x] Fixed REO node ABI mismatch: added `bytes calldata data` param and `uint256` return type +- [x] Updated `CONTRACTS_COMMIT` in `.env` to post-audit branch (`0003fe3a`) +- [x] Fixed address book compatibility: deployment package writes `implementationDeployment` field that indexer-agent rejects; added cleanup step in Phase 4 +- [x] Fixed docker-compose override: added `.env` bind mount for REO node container +- [x] End-to-end verified: gateway queries -> Redpanda -> REO node -> on-chain eligibility + +## Up Next + +- [ ] Clean run test: `docker compose down -v && up` to verify full lifecycle from scratch +- [x] Document testing procedure in `docs/flows/` +- [x] Create automated test script (`scripts/test-reo-eligibility.sh`) + +## Gaps To Fix + +### ~~1. Deployment package: extend for local network (chain 1337) deployment~~ FIXED + +**Branch:** `post-audit` in `graphprotocol/contracts` (commit `bcf73964`) + +**What was done:** + +- `rocketh/config.ts`: added `graphLocalNetworkChain` (id: 1337) and `localNetwork` environment +- `hardhat.config.ts`: added chain 1337 descriptor and `localNetwork` network config (`http://chain:8545`, test mnemonic) +- `lib/address-book-utils.ts`: added `isLocalNetworkMode()` detection and `addresses-local-network.json` resolution for all three packages (horizon, subgraph-service, issuance) +- `00_sync.ts`: no changes needed - chain 1337 naturally passes the `31337` guard, address book resolution handles the rest +- Docs: added localNetwork to quick reference, local network section in LocalForkTesting.md, fixed broken README link + +**Governance handling:** Deploy scripts check if deployer has GOVERNOR_ROLE on-chain. In local network (same account), TXs execute directly inline - no governance batch files needed. + +### ~~5. REO node: contract signature mismatch~~ FIXED + +**Where:** `/git/local/eligibility-oracle-node/eligibility-oracle-node/crates/eligibility-oracle/src/blockchain.rs` + +**What was done:** + +- Updated `sol!` macro: `renewIndexerEligibility(address[] calldata indexers, bytes calldata data) external returns (uint256)` +- Added `Bytes` import from `alloy::primitives` +- Updated call site to pass `Bytes::new()` as the `data` parameter +- Verified: `cargo check -p eligibility-oracle` passes clean + +### ~~6. Deployment package: `06_integrate.ts` hardcodes `canExecuteDirectly=false`~~ FIXED + +**Branch:** `post-audit` in `graphprotocol/contracts` (commit `5e23cde8`) + +**What was done:** + +- Query `eth_accounts` from the provider to check if the governor key is available +- If governor is in the accounts list (e.g., mnemonic-derived), execute directly with governor as executor +- If not (e.g., Safe multisig in production), generate governance TX file as before +- Removed `requireDeployer` dependency since executor is now the governor + +### 7. Indexer-agent: strict address book validation + +**Status:** Worked around in local network; upstream fix recommended + +The indexer-agent validates address book JSON entries and rejects any with unknown fields. The deployment package (post-audit) now writes `implementationDeployment` and `proxyDeployment` metadata to address books, causing the agent to crash with: + +``` +Address book entry contains invalid fields: implementationDeployment +``` + +**Local workaround:** Phase 4 in `graph-contracts/run.sh` strips these fields from `horizon.json` and `subgraph-service.json` after deployment. + +**Proper fix:** The indexer-agent should use permissive parsing that extracts known fields and ignores unknown ones. This would prevent breakage as the address book format evolves. + +## Notes + +### Contract Deployment Approach + +Phase 4 in `graph-contracts/run.sh` runs the full REO lifecycle via a single deployment package invocation, plus one cast call for ORACLE_ROLE: + +```bash +# Full lifecycle: sync → deploy → configure → transfer → integrate → verify +cd /opt/contracts/packages/deployment +npx hardhat deploy --tags rewards-eligibility --network localNetwork --skip-prompts + +# Grant ORACLE_ROLE (not part of standard deployment - local network specific) +cast send ... --private-key="${ACCOUNT1_SECRET}" "${reo_address}" "grantRole(bytes32,address)" "${oracle_role}" "${ACCOUNT0_ADDRESS}" +``` + +### REO Node Architecture + +- Consumes `gateway_queries` topic (already published by gateway in local network) +- Persists aggregated metrics to compacted `indexer_daily_metrics` topic +- Runs in daemon mode (periodic cycles) or single invocation +- Uses Alloy for contract interaction, rdkafka for Redpanda +- Needs `librdkafka` in container (rdkafka crate dependency) + +### Local Network Tuning + +Default REO node config is for production (28-day windows, 3-hour cycles). For local testing need shorter values - see [Goal.md](./Goal.md#configuration-for-local-network). + +### Accounts + +- `ACCOUNT0` (0xf39F...) - deployer, ORACLE_ROLE on REO, signing key for REO node +- `ACCOUNT1` (0x7099...) - governor role in contracts, GOVERNOR_ROLE on REO (after configure) +- `RECEIVER` (0xf4EF...) - indexer + +### Decisions Made + +- Use the deployment package scripts for contract deployment (not local forge/cast workarounds) +- REO node to run in daemon mode with shortened intervals for local testing +- ACCOUNT0 as both NetworkOperator and ORACLE_ROLE holder +- Skip governance transfer step in local network (deployer keeps GOVERNOR_ROLE) + +### Prerequisites for Testing + +- `CONTRACTS_COMMIT` in `.env` must point to a commit on post-audit that includes localNetwork support +- The post-audit branch must be pushed to GitHub (Docker build clones from there) + +--- + +## Log + +### 2026-02-18 - Project started + +- Created [Goal.md](./Goal.md) and this status document +- Explored REO contract in `graphprotocol/contracts` post-audit branch: upgradeable proxy pattern, role-based access, time-based eligibility with fail-safe +- Explored REO node: Rust service, Redpanda consumer, batched on-chain submission, no Dockerfile yet +- Identified key integration point: `RewardsManager.setRewardsEligibilityOracle()` connects the two systems +- Local network already has the `gateway_queries` Redpanda topic from gateway service + +### 2026-02-18 - Deployment package analysis + +- Analysed deployment scripts in `packages/deployment/deploy/rewards/eligibility/` +- Identified 5 gaps blocking local network deployment (see Gaps section above) +- Critical finding: REO node has ABI mismatch with contract (`renewIndexerEligibility` missing `data` param) +- Decision: fix deployment package rather than create local workarounds +- Proceeding with REO node containerisation (independent of contract deployment) + +### 2026-02-18 - REO node containerisation + +- Created `eligibility-oracle-node/Dockerfile` (multi-stage: rust-builder + wrapper-dev) +- Created `eligibility-oracle-node/run.sh` (generates config.toml, creates Redpanda topic, starts daemon) +- Created `overrides/eligibility-oracle/docker-compose.yaml` (override pattern like indexing-payments) +- Symlinked source from `/git/local/eligibility-oracle-node/eligibility-oracle-node` +- Config uses relaxed local network thresholds: 1-day window, 1 min online day, 60s cycle interval +- Uses `ACCOUNT0_SECRET` as the oracle signing key (needs ORACLE_ROLE granted once contract is deployed) +- Updated overrides/README.md with eligibility oracle section + +### 2026-02-18 - Checked post-audit branch + +- Switched target from `baseline` to `post-audit` branch (freshly rebased with all changes) +- Confirmed deployment package gap still present on `post-audit`: no chain 1337 / localNetwork support +- Fork-based testing (chain 31337 + FORK_NETWORK) works fine - this is a new capability needed, not a broken feature +- Rephrased gap #1 to clarify the distinction + +### 2026-02-18 - Fixed deployment package for local network + +- Added localNetwork support to `packages/deployment` on `post-audit` branch (commit `bcf73964`) +- Changes: `rocketh/config.ts` (chain 1337 + environment), `hardhat.config.ts` (chain descriptor + network), `address-book-utils.ts` (`isLocalNetworkMode()` + `addresses-local-network.json` resolution) +- `00_sync.ts` works as-is: chain 1337 bypasses the `31337` guard, address books resolve via updated path functions +- Governance TX handling confirmed: deploy scripts check `hasRole(GOVERNOR_ROLE, deployer)` on-chain, execute directly when true +- Fixed broken README link (`DeploymentDesignPrinciples.md` -> `deploy/ImplementationPrinciples.md`) +- Added localNetwork to DeploymentSetup.md quick reference table +- Added local network section to LocalForkTesting.md + +### 2026-02-18 - Phase 4 and REO node ABI fix + +- Added Phase 4 (REO) to `graph-contracts/run.sh`: + - Idempotency check via `issuance.json` + on-chain code check + - Pre-populates NetworkOperator (ACCOUNT0) in issuance address book + - Runs `npx hardhat deploy --tags rewards-eligibility-configure --network localNetwork --skip-prompts` + - Integrates REO with RewardsManager via cast (ACCOUNT1 governor key) + - Grants ORACLE_ROLE to ACCOUNT0 via cast +- Fixed REO node ABI mismatch in `blockchain.rs`: + - Updated sol! macro to match contract: `renewIndexerEligibility(address[], bytes) returns (uint256)` + - Added `Bytes` import, pass `Bytes::new()` at call site + - Compiles clean: `cargo check -p eligibility-oracle` +- Fixed gap #6: `06_integrate.ts` now checks `eth_accounts` for governor key availability + - Phase 4 updated to use full `rewards-eligibility` tag (single npx invocation) + - Only remaining cast call: ORACLE_ROLE grant (local network specific) +- Note: `CONTRACTS_COMMIT` in `.env` needs updating to post-audit (143 commits behind) + +### 2026-02-18 - End-to-end integration testing + +- Updated `CONTRACTS_COMMIT` in `.env` to `0003fe3a` (post-audit HEAD, already in sync with origin) +- Built all images and started network with REO override +- Phase 4 deployed REO contract at `0x86a2ee8faf9a840f7a2c64ca3d51209f9a02081d` +- RewardsManager integrated, ORACLE_ROLE granted to ACCOUNT0 +- **Bug found**: indexer-agent crashes with `Address book entry contains invalid fields: implementationDeployment` + - Cause: deployment package (post-audit) writes `implementationDeployment` metadata to horizon.json + - indexer-agent strictly validates address book fields and rejects unknown ones + - Fix: added `jq walk(... del(.implementationDeployment, .proxyDeployment))` cleanup after Phase 4 in run.sh + - This is arguably an indexer-agent bug (should ignore unknown fields, not reject them) +- **Bug found**: REO node container missing `.env` bind mount + - Override only had `./config/local:/opt/config:ro`, not `./.env:/opt/config/.env:ro` + - Caused `CHAIN_ID: unbound variable` crash + - Fix: added `.env` bind mount to override's volumes +- After fixes, full flow verified: + - Sent 10 queries through gateway (subgraph `BFr2mx7...`) + - REO node consumed 11 messages from `gateway_queries` (10 + 1 from gateway health check) + - Evaluated: 1 eligible indexer (`0xf4ef...`), days_online=1, good_queries=11 + - Submitted `renewIndexerEligibility` on-chain (tx `0x0f48d617...`) + - `isEligible(0xf4ef...)` returns `true` on-chain + +### 2026-02-18 - Demonstration script and testing documentation + +- Created `scripts/test-reo-eligibility.sh`: automated full-cycle test + - Enables eligibility validation on the REO contract (default is disabled) + - Seeds `lastOracleUpdateTime` via empty `renewIndexerEligibility([])` to disable fail-safe + - Verifies indexer is NOT eligible (deny-by-default) + - Sends queries through gateway, waits for REO node cycle (75s) + - Verifies indexer IS eligible after REO submission +- Created `docs/flows/EligibilityOracleTesting.md`: step-by-step manual and automated testing guide + - Includes contract behaviour reference table (three layers: validation toggle, fail-safe, per-indexer) + - Troubleshooting section for common issues +- Note: previous e2e test did not verify initial ineligibility — only checked `isEligible()` after REO had already submitted. The new script properly demonstrates the full deny→allow transition diff --git a/docs/explorer/Goal.md b/docs/explorer/Goal.md new file mode 100644 index 0000000..0dbadc1 --- /dev/null +++ b/docs/explorer/Goal.md @@ -0,0 +1,154 @@ +# Task: Graph Explorer Integration + +> Created: 2026-02-20 + +## Problem + +Several BaselineTestPlan cycles (1-2) reference Explorer UI operations for staking, delegation, and curation. Currently these are done by `graph-contracts` during deployment or by `cast send` in scripts. There is no way to: + +1. Visually verify protocol state during development/testing +2. Test the actual UI flows that indexers use in production +3. Reference exact UI components when documenting test equivalents + +## Objective + +Add Graph Explorer as an optional service in the local network, providing both a visual development tool and a reference for what contract calls the UI makes. + +## Source + +The Graph Explorer repository is available locally at `/git/edgeandnode/graph-explorer/graph-explorer`. + +### Architecture + +- **Next.js 14** frontend, no backend database +- All data from network subgraph GraphQL + direct contract calls via Wagmi/Viem +- Docker support: multi-stage build, port 3000 +- All required infrastructure already exists in local-network (graph-node, subgraphs, chain RPC, IPFS) + +### Key Finding: No API Layer + +Explorer has **no REST/GraphQL API for write operations**. All staking, delegation, and curation operations are direct smart contract calls from the browser via Wagmi hooks. This means: + +- For test automation, `cast send` is the equivalent of what the UI does +- The value of Explorer in local-network is **visual verification and reference**, not scripting +- Test scripts should reference the Explorer component that makes each call + +## Contract Call Reference + +This maps Explorer UI actions to the contract calls test scripts should make: + +### Curation + +| UI Action | Explorer Component | Contract | Function | +| ------------------ | -------------------------- | ---------- | --------------------------------------- | +| Signal (version) | `SignalForm.tsx:238-257` | L2Curation | `mint(bytes32, uint256, uint256)` | +| Signal (named) | `SignalForm.tsx:238-257` | L2GNS | `mintSignal(uint256, uint256, uint256)` | +| Unsignal (version) | `UnsignalForm.tsx:202-224` | L2Curation | `burn(bytes32, uint256, uint256)` | +| Unsignal (named) | `UnsignalForm.tsx:202-224` | L2GNS | `burnSignal(uint256, uint256, uint256)` | + +### Delegation + +| UI Action | Explorer Component | Contract | Function | +| ---------- | -------------------------------------- | -------------- | ---------------------------------------------- | +| Delegate | `DelegateTransactionContext.tsx:40-62` | HorizonStaking | `delegate(address, address, uint256, uint256)` | +| Undelegate | `UndelegateFormDefinition.tsx:62-87` | HorizonStaking | `undelegate(address, address, uint256)` | + +### Staking + +| UI Action | Explorer Component | Contract | Function | +| --------- | ------------------------- | -------------- | ----------------------------------------------------------------- | +| Stake | `StakeForm.tsx:104-120` | HorizonStaking | `stake(uint256)` or `stakeToProvision(address, address, uint256)` | +| Unstake | `UnstakeForm.tsx:101-146` | HorizonStaking | `unstake(uint256)` or `thaw(address, address, uint256)` | + +### Token Approval (all operations) + +| UI Action | Explorer Component | Contract | Function | +| ------------- | ---------------------------- | ------------ | --------------------------- | +| Approve spend | `GraphTokenApprovalFlow.tsx` | L2GraphToken | `approve(address, uint256)` | + +All Explorer component paths are relative to `/git/edgeandnode/graph-explorer/graph-explorer/src/`. + +## Implementation Approach + +### Override Pattern + +Add as a profiled service in `docker-compose.yaml`: + +```yaml +# profiles: [explorer] +``` + +### Docker Compose Override + +```yaml +services: + graph-explorer: + build: + context: /git/edgeandnode/graph-explorer/graph-explorer + dockerfile: Dockerfile + ports: + - "${EXPLORER_PORT:-3001}:3000" + environment: + - ENVIRONMENT=local + - DEFAULT_CHAIN_NAME=hardhat + - GRAPH_NETWORK_ID=1337 + - IS_TESTNET=true + depends_on: + ready: + condition: service_completed_successfully +``` + +### Configuration Challenges + +1. **Chain configuration**: Explorer expects Ethereum/Arbitrum chains. Local network uses hardhat (chainId 1337). May need chain config overrides or patches. + +2. **Contract addresses**: Explorer resolves addresses from `@graphprotocol/address-book`. Local network deploys fresh addresses each time. Need to either: + - Override address resolution at runtime + - Build with local addresses baked in + - Patch the address-book module + +3. **Private npm dependencies**: `@edgeandnode/gds` and `@edgeandnode/graph-auth-kit` require npm authentication. The Dockerfile uses `.npmrc` secret mounting. + +4. **Wallet connection**: MetaMask or similar needs to connect to the local hardhat chain (chainId 1337, RPC at localhost:8545). + +### Complexity Assessment + +| Aspect | Difficulty | Notes | +| ------------------ | ----------- | ------------------------------ | +| Docker build | Low | Dockerfile exists, port 3000 | +| Chain config | Medium | Needs hardhat chain support | +| Address resolution | Medium-High | Fresh addresses per deployment | +| npm auth for build | Low | `.npmrc` pattern exists | +| Wallet integration | Medium | MetaMask + hardhat chain | +| Overall | Medium | Not blocking, but non-trivial | + +## Approach + +### Phase 1: Investigate Feasibility + +- [ ] Attempt local Docker build with npm auth +- [ ] Identify all hardcoded chain/network assumptions +- [ ] Test if address-book can be overridden at runtime +- [ ] Document blockers + +### Phase 2: Minimal Integration + +- [ ] Add graph-explorer service to `docker-compose.yaml` with `profiles: [explorer]` +- [ ] Configure for local hardhat chain +- [ ] Verify read-only operations work (view indexers, allocations, subgraphs) + +### Phase 3: Full Integration + +- [ ] Enable wallet connection to local chain +- [ ] Test write operations (delegate, signal, stake) via UI +- [ ] Document setup in README + +## Value Assessment + +**For testing**: Medium — test automation uses `cast send` regardless; Explorer adds visual verification but isn't required for scripted tests. + +**For development**: High — seeing protocol state visually accelerates debugging and makes the local network more approachable. + +**For documentation**: High — can reference exact UI flows and screenshot expected states. + +**Recommendation**: Worth pursuing but not a strict dependency for test automation. The contract call reference table above (linking UI components to `cast` equivalents) bridges the gap for scripted tests. diff --git a/docs/flows/EligibilityOracleTesting.md b/docs/flows/EligibilityOracleTesting.md new file mode 100644 index 0000000..de5172e --- /dev/null +++ b/docs/flows/EligibilityOracleTesting.md @@ -0,0 +1,171 @@ +# Eligibility Oracle Testing Flow + +Test the Rewards Eligibility Oracle (REO) end-to-end: indexer starts ineligible, serves queries through the gateway, and is marked eligible by the REO node. + +## Prerequisites + +1. Local network running with the rewards-eligibility profile enabled (`COMPOSE_PROFILES=rewards-eligibility` in `.env`, enabled by default): + ```bash + docker compose up -d --build + ``` + +2. All core services healthy (gateway, graph-node, redpanda, chain, graph-contracts): + ```bash + docker compose ps + ``` + +3. REO contract deployed (Phase 4 in graph-contracts logs): + ```bash + docker compose logs graph-contracts | grep "Phase 4" + ``` + +4. REO node running and connected: + ```bash + docker compose logs --tail 20 eligibility-oracle-node + ``` + +5. `cast` available on the host (installed with Foundry). + +6. Source environment variables: + ```bash + source .env + ``` + +## Automated Test + +Run the full cycle with a single script: + +```bash +./scripts/test-reo-eligibility.sh # default: 10 queries +./scripts/test-reo-eligibility.sh 50 # send 50 queries +``` + +The script: +1. Checks eligibility validation is enabled (done by deployment, errors if not) +2. Seeds `lastOracleUpdateTime` to disable the fail-safe (if needed) +3. Verifies the indexer is NOT eligible +4. Sends queries through the gateway +5. Polls `isEligible()` every 10s until true or timeout (150s) + +## Manual Step-by-Step + +### 1. Read REO contract address + +```bash +source .env +REO=$(docker exec graph-node cat /opt/config/issuance.json | jq -r '.["1337"].RewardsEligibilityOracle.address') +RPC="http://localhost:${CHAIN_RPC_PORT}" +echo "REO: $REO" +``` + +### 2. Check contract state + +```bash +# Is eligibility validation enabled? +cast call --rpc-url="$RPC" "$REO" "getEligibilityValidation()(bool)" + +# When was the last oracle update? +cast call --rpc-url="$RPC" "$REO" "getLastOracleUpdateTime()(uint256)" + +# Is the indexer eligible? +cast call --rpc-url="$RPC" "$REO" "isEligible(address)(bool)" "$RECEIVER_ADDRESS" +``` + +### 3. Verify eligibility validation is enabled + +Deployment (Phase 4) enables validation automatically. Confirm: + +```bash +cast call --rpc-url="$RPC" "$REO" "getEligibilityValidation()(bool)" +# Expected: true +``` + +If not enabled, re-run graph-contracts or enable manually: +```bash +# Requires OPERATOR_ROLE (ACCOUNT0) +cast send --rpc-url="$RPC" --confirmations=0 \ + --private-key="$ACCOUNT0_SECRET" \ + "$REO" "setEligibilityValidation(bool)" true +``` + +### 4. Seed the oracle timestamp + +If `lastOracleUpdateTime` is 0 (never updated), the fail-safe makes everyone eligible regardless. Seed it with an empty update: + +```bash +# Requires ORACLE_ROLE (ACCOUNT0) +cast send --rpc-url="$RPC" --confirmations=0 \ + --private-key="$ACCOUNT0_SECRET" \ + "$REO" "renewIndexerEligibility(address[],bytes)" "[]" "0x" +``` + +### 5. Verify indexer is NOT eligible + +```bash +cast call --rpc-url="$RPC" "$REO" "isEligible(address)(bool)" "$RECEIVER_ADDRESS" +# Expected: false +``` + +### 6. Send queries through the gateway + +```bash +# Mine blocks first to keep the gateway happy +./scripts/mine-block.sh 5 + +# Send 10 queries +./scripts/query_gateway.sh 10 +``` + +### 7. Wait for the REO node cycle + +The REO node cycles every 60 seconds in local network configuration. Watch the logs: + +```bash +docker compose logs -f eligibility-oracle-node +``` + +Look for: +- `Consumed N messages from gateway_queries` +- `Eligible indexers: [0xf4ef...]` +- `renewIndexerEligibility` transaction submitted + +### 8. Verify indexer IS eligible + +```bash +cast call --rpc-url="$RPC" "$REO" "isEligible(address)(bool)" "$RECEIVER_ADDRESS" +# Expected: true +``` + +## Understanding the Contract Behaviour + +The REO contract has three layers of eligibility logic: + +| Condition | `isEligible()` returns | Notes | +|---|---|---| +| Validation disabled | `true` (all) | Default after deployment | +| Validation enabled, oracle never updated (fail-safe) | `true` (all) | `lastOracleUpdateTime=0`, timeout expired | +| Validation enabled, oracle active, indexer not renewed | `false` | Deny-by-default | +| Validation enabled, oracle active, indexer renewed | `true` | Within `eligibilityPeriod` (14 days) | +| Validation enabled, oracle stale (`> oracleUpdateTimeout`) | `true` (all) | Fail-safe for oracle downtime | + +The automated test script handles states 1 and 2 by enabling validation and seeding the oracle timestamp. + +## Troubleshooting + +### Indexer already eligible before test +The REO node may have already submitted eligibility in a previous cycle. Wait for the `eligibilityPeriod` (14 days on-chain, but you can check the configured value) to expire, or redeploy the contracts with `docker compose down -v && up`. + +### REO node not submitting on-chain +Check that: +- The `gateway_queries` Redpanda topic has messages: `docker compose exec redpanda rpk topic consume gateway_queries --num 1` +- The node has ORACLE_ROLE: `cast call --rpc-url="$RPC" "$REO" "hasRole(bytes32,address)(bool)" "$(cast call --rpc-url=$RPC $REO 'ORACLE_ROLE()(bytes32)')" "$ACCOUNT0_ADDRESS"` +- The node can reach the chain: check logs for RPC errors + +### All queries failing (HTTP != 200) +- Mine blocks: `./scripts/mine-block.sh 10` +- Check gateway health: `docker compose ps gateway` +- Ensure at least one subgraph is allocated and synced + +### Cast command fails +- Ensure Foundry is installed: `cast --version` +- Check chain is running: `cast block-number --rpc-url="$RPC"` diff --git a/flows/IndexerAgentTesting.md b/docs/flows/IndexerAgentTesting.md similarity index 91% rename from flows/IndexerAgentTesting.md rename to docs/flows/IndexerAgentTesting.md index f8faf05..d3c0d85 100644 --- a/flows/IndexerAgentTesting.md +++ b/docs/flows/IndexerAgentTesting.md @@ -6,7 +6,7 @@ This guide explains how to run tests for the indexer-agent when developing from - Docker installed and running - Node.js 20 or 22 installed -- Indexer-agent source initialized: `git submodule update --init --recursive indexer-agent/source` +- Indexer-agent source available (set `INDEXER_AGENT_SOURCE_ROOT` to a local clone of graphprotocol/indexer) ## Quick Start @@ -21,7 +21,8 @@ From the local-network root directory: ./scripts/test-indexer-agent.sh test # More verbose output ``` -**⚠️ Important**: +**⚠️ Important**: + - Tests can take 10-15 minutes or more to complete, especially on first run when dependencies are being installed. The test suite runs tests for multiple packages (indexer-common, indexer-agent, indexer-cli) sequentially. - **The test script may exit with a non-zero status code even when it runs successfully** - this just means some tests failed. Always check the output or log file to see the actual test results and failure details. @@ -38,6 +39,7 @@ The test script automatically: ## Test Environment The script sets up the following test database: + - Host: `localhost` - Port: `5433` - Database: `indexer_tests` @@ -65,7 +67,7 @@ Since the monorepo contains multiple packages, you can run tests for specific pa ```bash # Run tests only for indexer-agent package -cd indexer-agent/source/packages/indexer-agent +cd $INDEXER_AGENT_SOURCE_ROOT/packages/indexer-agent export POSTGRES_TEST_HOST=localhost export POSTGRES_TEST_PORT=5433 export POSTGRES_TEST_DATABASE=indexer_tests @@ -79,13 +81,15 @@ yarn test ## Important Learnings ### Directory Navigation + - **Always check your current directory** before running commands with `pwd` -- The test script changes directories to `indexer-agent/source` during execution +- The test script changes directories to `$INDEXER_AGENT_SOURCE_ROOT` during execution - Test output files are created in the directory where you run the script -- After debugging, you might be in `indexer-agent/source` instead of the local-network root +- After debugging, you might be in `$INDEXER_AGENT_SOURCE_ROOT` instead of the local-network root - Use absolute paths when in doubt: `/home/pablo/repos/local-network/scripts/test-indexer-agent.sh` ### Understanding Test Output + - The test script exits with non-zero status if any tests fail - this is normal - Always check the actual test output to understand what happened - Tests run for multiple packages sequentially: @@ -95,33 +99,38 @@ yarn test - If indexer-common fails, the other packages won't run at all ### Making Code Changes + - After modifying TypeScript files, you must compile before running tests: ```bash - cd indexer-agent/source + cd $INDEXER_AGENT_SOURCE_ROOT yarn compile ``` - Test error line numbers may not match exactly due to transpilation - Debug console.log statements work and will appear in test output ### Environment Variables + - `INDEXER_TEST_JRPC_PROVIDER_URL` - Ethereum RPC endpoint (defaults to public Arbitrum Sepolia) - `INDEXER_TEST_API_KEY` - API key for The Graph's subgraph endpoints (may be required) ## Troubleshooting ### Tests fail with connection errors + - Ensure Docker is running - Check if port 5433 is available: `lsof -i :5433` - Try running with more verbose output: `./scripts/test-indexer-agent.sh test` ### Dependencies not found + - The script should auto-install dependencies, but you can manually run: ```bash - cd indexer-agent/source + cd $INDEXER_AGENT_SOURCE_ROOT yarn install --frozen-lockfile ``` ### PostgreSQL container issues + - The script automatically cleans up containers, but you can manually remove: ```bash docker stop indexer-tests-postgres @@ -129,6 +138,7 @@ yarn test ``` ### Cleaning Test Output + - Remove ANSI escape codes from test output for easier reading: ```bash cat test-output.log | sed 's/\x1b\[[0-9;]*m//g' > test-output-clean.log @@ -137,6 +147,7 @@ yarn test ## CI Integration The tests run in CI using GitHub Actions with: + - PostgreSQL service container - Matrix testing for Node.js 20 and 22 - Environment secrets for integration tests (optional) @@ -144,6 +155,7 @@ The tests run in CI using GitHub Actions with: ### Timeout Considerations When running tests in automation or CI: + - Set appropriate timeouts (15-20 minutes minimum) - First runs take longer due to dependency installation - The test suite runs multiple packages sequentially (indexer-common → indexer-agent → indexer-cli) @@ -171,10 +183,10 @@ export POSTGRES_TEST_PASSWORD=testpass export NODE_OPTIONS="--dns-result-order=ipv4first" # Run tests -cd indexer-agent/source +cd $INDEXER_AGENT_SOURCE_ROOT yarn test # Clean up docker stop indexer-tests-postgres docker rm indexer-tests-postgres -``` \ No newline at end of file +``` diff --git a/flows/IndexingPaymentsTesting.md b/docs/flows/IndexingPaymentsTesting.md similarity index 87% rename from flows/IndexingPaymentsTesting.md rename to docs/flows/IndexingPaymentsTesting.md index 242e235..9e0c154 100644 --- a/flows/IndexingPaymentsTesting.md +++ b/docs/flows/IndexingPaymentsTesting.md @@ -7,14 +7,15 @@ This guide walks through testing the Indexing Payments system in the local-netwo ## Prerequisites 1. All services running and healthy: + ```bash docker compose ps ``` -2. Dipper service must be built from source (private repo): +2. Dipper service running (enable `indexing-payments` profile in `.env`): + ```bash - git submodule update --init --recursive --force dipper/source - # Ensure docker-compose.yaml has target: "wrapper-dev" for dipper + # Add indexing-payments to COMPOSE_PROFILES in .env, then: docker compose up -d --build dipper ``` @@ -28,36 +29,32 @@ This guide walks through testing the Indexing Payments system in the local-netwo You have two options for running the dipper CLI: ### Option 1: Use the Wrapper Script (Recommended) + ```bash # From repo root - automatically handles environment variables ./scripts/dipper-cli.sh [command] ``` ### Option 2: Run from Source + ```bash -cd dipper/source +# Set DIPPER_SOURCE_ROOT to a local clone of edgeandnode/dipper +cd $DIPPER_SOURCE_ROOT # All commands will be run from this directory using cargo # Note: You'll need to set environment variables manually (see below) ``` ## Configure Authentication -**Important**: The dipper CLI requires environment variables to be set for EVERY command. You have two options: +**Important**: The dipper CLI requires environment variables to be set for EVERY command. -### Option 1: Export Once per Session ```bash # Set up dipper CLI authentication (valid for current shell session) -source ../../.env # Load environment from repo root +source .env # Load environment from repo root export INDEXING_SIGNING_KEY="${RECEIVER_SECRET}" export INDEXING_SERVER_URL="http://localhost:${DIPPER_ADMIN_RPC_PORT}/" ``` -### Option 2: Include with Each Command -```bash -# Source .env and set variables inline with each command -source ../../.env && export INDEXING_SIGNING_KEY="${RECEIVER_SECRET}" && export INDEXING_SERVER_URL="http://localhost:${DIPPER_ADMIN_RPC_PORT}/" && cargo run --bin dipper-cli -- [command] -``` - **Note**: The CLI will fail with `missing field 'server_url'` if these environment variables are not set. ## Testing Flow @@ -68,10 +65,10 @@ source ../../.env && export INDEXING_SIGNING_KEY="${RECEIVER_SECRET}" && export # Using wrapper script (from repo root): ./scripts/dipper-cli.sh requests register "QmNngXzFajkQHRj3ZjAJAF7jc2AibTQKB4dwftjiKXC9RP" 1337 -# OR using cargo directly (from dipper/source): +# OR using cargo directly (from $DIPPER_SOURCE_ROOT): cargo run --bin dipper-cli -- requests register "QmNngXzFajkQHRj3ZjAJAF7jc2AibTQKB4dwftjiKXC9RP" 1337 -# Expected output: +# Expected output: # Creating indexing request for deployment ID: DeploymentId(QmNngXzFajkQHRj3ZjAJAF7jc2AibTQKB4dwftjiKXC9RP) # Created indexing request with ID: 01983d54-a2a0-7933-a4f5-bb96d7f4dd52 ``` @@ -82,7 +79,7 @@ cargo run --bin dipper-cli -- requests register "QmNngXzFajkQHRj3ZjAJAF7jc2AibTQ # Using wrapper script (from repo root): ./scripts/dipper-cli.sh requests list -# OR using cargo directly (from dipper/source): +# OR using cargo directly (from $DIPPER_SOURCE_ROOT): cargo run --bin dipper-cli -- requests list # Expected output: JSON array showing your indexing request with status "OPEN" @@ -98,6 +95,7 @@ cargo run --bin dipper-cli -- requests list ### 3. Check Dipper Logs Monitor dipper service logs for payment processing: + ```bash # Watch for indexing registration and payment activity docker compose logs -f dipper @@ -114,6 +112,7 @@ docker compose logs -f dipper | grep -E "(payment|receipt|indexing|registered)" ### 4. Verify Database State Check PostgreSQL for indexing payment data: + ```bash docker compose exec postgres psql -U postgres -d dipper -c "SELECT * FROM indexing_requests;" ``` @@ -135,6 +134,7 @@ curl -s http://localhost:8000/subgraphs/name/graph-network -X POST \ ``` **Important**: Check indexer-agent logs while waiting: + ```bash docker logs indexer-agent --tail 50 -f | grep -E "(allocation|QmNng|agreement)" ``` @@ -162,29 +162,34 @@ cargo run --bin dipper-cli -- requests cancel 01983d54-a2a0-7933-a4f5-bb96d7f4dd ## Common Issues ### Dipper Not Starting -- Check if submodule is initialized: `ls dipper/source/` -- Verify docker-compose.yaml has `target: "wrapper-dev"` + +- Verify `indexing-payments` profile is in COMPOSE_PROFILES +- Check `DIPPER_VERSION` is set in `.env` - Check logs: `docker compose logs dipper` - Ensure Postgres is healthy and migrations completed ### Authentication Errors + - Verify `INDEXING_SIGNING_KEY` is set correctly - Ensure `RECEIVER_SECRET` is available in .env - Check `INDEXING_SERVER_URL` includes the port - Try: `echo $INDEXING_SIGNING_KEY` to verify it's set ### CLI Connection Issues + - Ensure dipper service is healthy: `docker compose ps dipper` - Check admin RPC is accessible: `curl http://localhost:9000/` - Verify port mapping in docker-compose.yaml ### Environment Variable Issues + - **"missing field 'server_url'"**: Environment variables not set - Remember: Variables must be set for EVERY dipper-cli command - If switching terminals/sessions, re-export the variables - Alternative: Create a shell script that sets variables and runs commands ### No Payment Activity + - Ensure gateway is healthy and can route queries - Verify indexer-service has the indexing payment RPC port exposed (7602) - Check that an allocation exists for the subgraph @@ -198,4 +203,4 @@ cargo run --bin dipper-cli -- requests cancel 01983d54-a2a0-7933-a4f5-bb96d7f4dd # Optionally restart services docker compose restart dipper gateway -``` \ No newline at end of file +``` diff --git a/flows/README.md b/docs/flows/README.md similarity index 77% rename from flows/README.md rename to docs/flows/README.md index ea02f04..1f35138 100644 --- a/flows/README.md +++ b/docs/flows/README.md @@ -11,6 +11,14 @@ Test the Indexing Payments system including: - Monitoring payment flows - Verifying receipt aggregation +### [Eligibility Oracle Testing](./EligibilityOracleTesting.md) +Test the Rewards Eligibility Oracle (REO) end-to-end cycle: +- Verifying deny-by-default (indexer not eligible) +- Sending gateway queries to generate eligibility data +- REO node evaluation and on-chain submission +- Verifying indexer becomes eligible +- Automated script: `./scripts/test-reo-eligibility.sh` + ### [Indexer Setup](./indexer-setup.md) *(coming soon)* Complete workflow for setting up a new indexer including: - Indexer registration diff --git a/docs/indexing-payments/RecurringCollectorDeployment.md b/docs/indexing-payments/RecurringCollectorDeployment.md new file mode 100644 index 0000000..58c41a1 --- /dev/null +++ b/docs/indexing-payments/RecurringCollectorDeployment.md @@ -0,0 +1,71 @@ +# RecurringCollector Deployment — Outstanding Work + +Status: **not yet deployed** in local network or production. + +Dipper references `recurring_collector` in its config but currently uses the null address. +The contract source exists in the `rem-baseline-merge` contracts branch but is not wired +into any deployment path. + +## Contracts repo (`graphprotocol/contracts`) + +### 1. Ignition modules (local network / Hardhat) + +The `deploy:protocol` Hardhat task deploys SubgraphService via Ignition modules. +The SubgraphService Solidity constructor now expects a 5th parameter (`recurringCollector`), +but the Ignition module still passes only 4 — deployment will fail on the current baseline. + +Commit `f3fdc5114` ("feat: add RecurringCollector, indexingFeesCut, and library linking to +ignition deployment") adds the required Ignition wiring but is **not merged** into the +baseline branch. It needs to be cherry-picked or merged. That commit adds: + +- `packages/horizon/ignition/modules/core/RecurringCollector.ts` +- RecurringCollector import in `core.ts` +- 5th constructor arg in `SubgraphService.ts` Ignition module +- Config patching in `deploy.ts` task + +### 2. Deployment package (production / testnet) + +`packages/deployment/deploy/service/subgraph/01_deploy.ts` constructs SubgraphService with +4 args (Controller, DisputeManager, GraphTallyCollector, Curation). Once the contract +requires 5, this script must also be updated: + +- Add RecurringCollector to the contract registry or fetch it as a dependency +- Deploy RecurringCollector (or reference an existing deployment) before SubgraphService +- Pass `recurringCollectorAddress` as the 5th constructor arg +- Update `02_upgrade.ts` if the upgrade path needs the new implementation + +`Directory.sol` gains an immutable `RECURRING_COLLECTOR` field and a +`recurringCollector()` getter. Since Solidity immutables are embedded in bytecode +(not storage), this does not break storage layout — it's a standard proxy +implementation upgrade via `upgradeAndCall()`. + +## Local network (`rem-local-network`) + +After the contracts branch includes RecurringCollector in Ignition: + +1. **`.env`** — update `CONTRACTS_COMMIT` to the new contracts commit +2. **`containers/core/graph-contracts/run.sh`** — extract RecurringCollector address from + the deployed address book (likely `horizon.json`) +3. **`containers/indexing-payments/dipper/run.sh`** — replace null address with: + ```bash + recurring_collector=$(contract_addr RecurringCollector.address horizon) + ``` + +## Dipper + +No code changes needed — Dipper already has full RCA support (EIP-712 signing, agreement +lifecycle, chain listener, on-chain cancellation). It uses hand-written `sol!` macro +bindings, not a contracts submodule, so no dependency to bump. It just needs the real +contract address in its config. + +## Summary of blocking order + +``` +contracts: merge Ignition commit (f3fdc5114) into baseline + ↓ +contracts: update deployment package for 5-arg SubgraphService + ↓ +local-network: bump CONTRACTS_COMMIT, wire RecurringCollector address + ↓ +dipper config picks up real address — RCA functional end-to-end +``` diff --git a/docs/indexing-payments/archive/IntegrationSummary.md b/docs/indexing-payments/archive/IntegrationSummary.md index f13ecf1..08e1c05 100644 --- a/docs/indexing-payments/archive/IntegrationSummary.md +++ b/docs/indexing-payments/archive/IntegrationSummary.md @@ -1,59 +1,51 @@ # Indexing Payments Integration -**Status:** Phases 1-4 Complete ✅ | Blocked on dipper submodule access +ARCHIVED: This document describes the initial integration. Current setup uses published images (no submodules). ## Quick Start To enable Indexing Payments on local-network: ```bash -# Initialize dipper submodule (requires SSH access) -git submodule update --init dipper/source - -# Start with override -docker compose -f docker-compose.yaml -f overrides/indexing-payments/docker-compose.yaml up +# Enable indexing-payments profile in .env: +# COMPOSE_PROFILES=indexing-payments +docker compose up ``` -See [overrides/indexing-payments/README.md](../../overrides/indexing-payments/README.md) for usage. - ## What Was Implemented **Infrastructure (Phase 1):** + - Database: `dipper_1` created in postgres - Environment: `DIPPER_ADMIN_RPC_PORT`, `DIPPER_INDEXER_RPC_PORT` -- Documentation: [docs/indexing-payments/](../indexing-payments/), [flows/](../../flows/) +- Documentation: [docs/indexing-payments/](../indexing-payments/), [docs/flows/](../../flows/) - Scripts: merge-contracts, dipper-cli, test helpers **Service Files (Phase 2):** -- [dipper/Dockerfile](../../dipper/Dockerfile) - Multi-stage Rust + Python build -- [dipper/run.sh](../../dipper/run.sh) - Configuration script -- Submodule: git@github.com:edgeandnode/dipper.git (main branch) + +- [containers/indexing-payments/dipper/Dockerfile](../../../containers/indexing-payments/dipper/Dockerfile) - Wrapper image +- [containers/indexing-payments/dipper/run.sh](../../../containers/indexing-payments/dipper/run.sh) - Configuration script **Override (Phase 3):** -- [overrides/indexing-payments/docker-compose.yaml](../../overrides/indexing-payments/docker-compose.yaml) -- [overrides/indexing-payments/README.md](../../overrides/indexing-payments/README.md) -- [overrides/indexing-payments/start.sh](../../overrides/indexing-payments/start.sh) + +- Service definitions in `docker-compose.yaml` with `profiles: [indexing-payments]` **Documentation (Phase 4):** -- Updated [README.md](../../README.md) and [overrides/README.md](../../overrides/README.md) -- All terminology updated (DIPs → Indexing Payments) -## Current Blocker +- Updated [README.md](../../README.md) +- All terminology updated (DIPs → Indexing Payments) -**Dipper submodule not initialized** - requires access to private repo: -- `git@github.com:edgeandnode/dipper.git` -- Path: `dipper/source/` (empty) +## Notes -Without submodule: cannot build dipper service, cannot test payment flows. +Dipper now uses a published image (`ghcr.io/edgeandnode/dipper-service`). No submodule required. ## Documentation -- [overrides/indexing-payments/README.md](../../../overrides/indexing-payments/README.md) - Usage guide +- Enable via `COMPOSE_PROFILES=indexing-payments` in `.env` - [docs/indexing-payments/](../) - Architecture & implementation plans -- [flows/IndexingPaymentsTesting.md](../../../flows/IndexingPaymentsTesting.md) - Testing guide +- [docs/flows/IndexingPaymentsTesting.md](../../flows/IndexingPaymentsTesting.md) - Testing guide ## Next Steps -1. Initialize submodule: `git submodule update --init dipper/source` -2. Build: `docker compose -f docker-compose.yaml -f overrides/indexing-payments/docker-compose.yaml build` -3. Test: Follow [flows/IndexingPaymentsTesting.md](../../flows/IndexingPaymentsTesting.md) +1. Build: `docker compose build dipper iisa-mock` +2. Test: Follow [docs/flows/IndexingPaymentsTesting.md](../../flows/IndexingPaymentsTesting.md) diff --git a/docs/indexing-payments/archive/TestingStatus.md b/docs/indexing-payments/archive/TestingStatus.md deleted file mode 100644 index f0d1aa0..0000000 --- a/docs/indexing-payments/archive/TestingStatus.md +++ /dev/null @@ -1,100 +0,0 @@ -# Indexing Payments Testing Status - -**Last Updated:** 2026-02-03 -**Status:** Build Complete ✅ | Config Blocked ⏸️ - -## Current State - -### ✅ What Works - -**Infrastructure (Phases 1-4):** -- Database setup (dipper_1) ✅ -- Environment variables (.env) ✅ -- Documentation complete ✅ -- Helper scripts ✅ -- Dipper submodule initialized ✅ -- Docker build system fixed ✅ -- Dipper service successfully builds ✅ - -**Build Fixes Applied:** -- Dockerfile updated for current dipper (Rust-only, no Python) ✅ -- Docker Compose context and volume paths fixed ✅ -- Environment variable loading fixed (set -a) ✅ -- TAPVerifier address extraction fixed ✅ -- Config generation works ✅ - -### ⏸️ What's Blocked - -**Primary Blocker: Dipper Service Config Schema Mismatch** -- The run.sh configuration script was extracted from older dips-debug branch -- Current dipper service (main branch) expects different config structure -- Error: `missing field 'gateway_operator_allowlist'` despite field being present -- Config structure may have changed significantly in newer dipper versions - -**Symptoms:** -- Dipper service starts but immediately exits with code 101 -- Config deserial ization fails at line 27 -- Multiple attempts to add gateway_operator_allowlist to different sections didn't resolve - -**Root Cause:** -- run.sh config template is outdated -- Need to match current dipper service config schema -- May need to reference dipper repository for current config structure - -### 🔧 Next Steps - -**To Unblock:** -1. Check dipper repository for example configs or config schema -2. Update run.sh config generation to match current dipper version -3. Or: Pin dipper submodule to older commit that matches extracted run.sh config - -**Options:** -- **Option A:** Update config to current schema (recommended for long-term) - - Find example config in dipper repository - - Update run.sh to generate correct structure - -- **Option B:** Use older dipper version (quick fix) - - Find commit that matches the run.sh config structure - - Update submodule to pin to that commit - -## Quick Test (When Unblocked) - -```bash -# 1. Start services -docker compose -f docker-compose.yaml -f overrides/indexing-payments/docker-compose.yaml up -d - -# 2. Verify dipper health -curl http://localhost:9000/health - -# 3. Test CLI -./scripts/dipper-cli.sh requests list - -# 4. Follow full test guide -# See flows/IndexingPaymentsTesting.md -``` - -## Build Progress - -### Commits Made: -1. Phase 1: Database, environment, docs, scripts (commit 1346466) -2. Terminology: Updated 7 files to use "Indexing Payments" -3. Identifiers: Changed dips_* to indexing* in technical references -4. Protobuf: Renamed GatewayDipsService → GatewayIndexingService -5. Phases 2-4: Dockerfile, submodule, overrides, README updates -6. Submodule: Initialized dipper/source -7. Build fix: Updated Dockerfile for current Rust-only dipper -8. Config fix: Environment variables, volume paths, TAPVerifier - -### Current Branch State: -- Branch: rem-local-network -- Commits ahead: 11 -- All changes committed ✅ -- Ready for config schema fix - -## Documentation - -- [Testing Guide](../../../flows/IndexingPaymentsTesting.md) - Step-by-step testing instructions -- [Architecture](../Architecture.md) - Technical architecture -- [Integration Summary](./IntegrationSummary.md) - Implementation overview -- [Usage Guide](../../../overrides/indexing-payments/README.md) - Getting started -- [Dipper Service Plan](../DipperServicePlan.md) - Service configuration details diff --git a/docs/indexing-payments/archive/UserExperience.md b/docs/indexing-payments/archive/UserExperience.md index 5bb42b6..a6b9bba 100644 --- a/docs/indexing-payments/archive/UserExperience.md +++ b/docs/indexing-payments/archive/UserExperience.md @@ -6,29 +6,32 @@ - **Default:** TAP for query fees (no change) - **With Override:** TAP + Indexing Payments (dipper service added) -- **Enable:** `docker compose -f docker-compose.yaml -f overrides/indexing-payments/docker-compose.yaml up` +- **Enable:** Set `COMPOSE_PROFILES=indexing-payments` in `.env`, then `docker compose up` ## Key Differences ### Payment Methods -| Aspect | TAP (Default) | Indexing Payments (Override) | -|--------|---------------|------------------------------| -| **Use Case** | Query fees | Indexing fees | -| **Method** | Allocations + receipts | GRT transfers | -| **Capital** | $50-$1000 allocations | Minimal (just payment amount) | -| **Response** | Synchronous | Asynchronous (receipt ID) | -| **Burn** | No burn | 1% protocol burn | +| Aspect | TAP (Default) | Indexing Payments (Override) | +| ------------ | ---------------------- | ----------------------------- | +| **Use Case** | Query fees | Indexing fees | +| **Method** | Allocations + receipts | GRT transfers | +| **Capital** | $50-$1000 allocations | Minimal (just payment amount) | +| **Response** | Synchronous | Asynchronous (receipt ID) | +| **Burn** | No burn | 1% protocol burn | ### What's Added **New Service:** + - `dipper` container running on ports 9000 (admin) and 9001 (indexer) **New Database:** + - `dipper_1` database in postgres (unused in default setup) **New Workflow:** + 1. Admin registers indexing request 2. Indexer receives request via dipper 3. Indexer performs work, submits report @@ -45,20 +48,23 @@ ## Usage Comparison **Default (TAP Only):** + ```bash docker compose up # All services except dipper ``` **With Indexing Payments:** + ```bash -docker compose -f docker-compose.yaml -f overrides/indexing-payments/docker-compose.yaml up +# Set COMPOSE_PROFILES=indexing-payments in .env +docker compose up # All services including dipper ``` ## Documentation For detailed architecture and testing, see: -- [Architecture](../Architecture.md) -- [Testing Guide](../../../flows/IndexingPaymentsTesting.md) -- [Usage Guide](../../../overrides/indexing-payments/README.md) + +- [Architecture](../safe-based/Architecture.md) +- [Testing Guide](../../flows/IndexingPaymentsTesting.md) diff --git a/docs/indexing-payments/Architecture.md b/docs/indexing-payments/safe-based/Architecture.md similarity index 100% rename from docs/indexing-payments/Architecture.md rename to docs/indexing-payments/safe-based/Architecture.md diff --git a/docs/indexing-payments/DipperServicePlan.md b/docs/indexing-payments/safe-based/DipperServicePlan.md similarity index 100% rename from docs/indexing-payments/DipperServicePlan.md rename to docs/indexing-payments/safe-based/DipperServicePlan.md diff --git a/docs/indexing-payments/IndexerAgentPlan.md b/docs/indexing-payments/safe-based/IndexerAgentPlan.md similarity index 100% rename from docs/indexing-payments/IndexerAgentPlan.md rename to docs/indexing-payments/safe-based/IndexerAgentPlan.md diff --git a/docs/indexing-payments/IndexerServicePlan.md b/docs/indexing-payments/safe-based/IndexerServicePlan.md similarity index 100% rename from docs/indexing-payments/IndexerServicePlan.md rename to docs/indexing-payments/safe-based/IndexerServicePlan.md diff --git a/docs/indexing-payments/README.md b/docs/indexing-payments/safe-based/README.md similarity index 83% rename from docs/indexing-payments/README.md rename to docs/indexing-payments/safe-based/README.md index 27b9134..76de402 100644 --- a/docs/indexing-payments/README.md +++ b/docs/indexing-payments/safe-based/README.md @@ -2,6 +2,8 @@ This folder contains all documentation for the Indexing Payments Safe-based payment system implementation. +This approach is obsolete. + ## Overview The Indexing Payments Safe payment system replaces TAP (Timeline Aggregation Protocol) for indexing fee payments, using on-chain GRT transfers via Safe Module pattern with asynchronous processing. @@ -9,9 +11,11 @@ The Indexing Payments Safe payment system replaces TAP (Timeline Aggregation Pro ## Documentation Structure ### Architecture + - [`Architecture.md`](./Architecture.md) - System architecture, design principles, and component interactions ### Implementation Plans + - [`IndexerAgentPlan.md`](./IndexerAgentPlan.md) - Changes needed for the Indexer Agent to handle Receipt IDs and polling - [`DipperServicePlan.md`](./DipperServicePlan.md) - Core payment processing implementation in the Dipper service - [`IndexerServicePlan.md`](./IndexerServicePlan.md) - Minimal protocol buffer updates for the Indexer Service @@ -19,19 +23,25 @@ The Indexing Payments Safe payment system replaces TAP (Timeline Aggregation Pro ## Quick Links ### For Indexer Agent Development -Start with the [Indexer Agent Plan](./indexer-agent-plan.md) which covers: + +Start with the [Indexer Agent Plan](./IndexerAgentPlan.md) which covers: + - Receipt ID tracking - Status polling mechanism - Database schema updates ### For Dipper Development -See the [Dipper Plan](./dipper-plan.md) for: + +See the [Dipper Plan](./DipperServicePlan.md) for: + - Safe Module client implementation - Worker-based payment processing - Receipt status management ### For Understanding the System -Read the [Architecture Document](./architecture.md) to understand: + +Read the [Architecture Document](./Architecture.md) to understand: + - Payment flow and state machine - Security considerations - API specifications @@ -41,4 +51,4 @@ Read the [Architecture Document](./architecture.md) to understand: - **Receipt ID**: Replaces TAP receipts, enables async processing - **State Machine**: PENDING → SUBMITTED/FAILED - **Safe Module**: Direct execution pattern for GRT transfers -- **1% Protocol Burn**: Automatic burn on all payments \ No newline at end of file +- **1% Protocol Burn**: Automatic burn on all payments diff --git a/docs/testing/TestFramework.md b/docs/testing/TestFramework.md new file mode 100644 index 0000000..5e54d70 --- /dev/null +++ b/docs/testing/TestFramework.md @@ -0,0 +1,245 @@ +# Task: Test Framework for Local Network Automation + +> Created: 2026-02-20 + +## Problem + +Test automation currently uses bash scripts with custom PASS/FAIL helpers. This works well for Layers 0-1 (query validation, state observation) but will not scale to Layers 2-3 (operational lifecycle, timing-dependent flows) which require polling, retries, state tracking, parallel execution, and structured assertions. + +## Current State + +### Scripts + +| Script | Layer | Lines | Pattern | +| ------------------------------- | ----- | ----- | -------------------------- | +| `test-baseline-queries.sh` | 0 | 192 | curl + grep for errors | +| `test-indexer-guide-queries.sh` | 0 | 182 | curl + cast + grep | +| `test-baseline-state.sh` | 1 | 261 | curl + jq assertions | +| `test-reo-eligibility.sh` | 2-3 | ~200 | curl + cast + polling loop | + +### Strengths + +- Clean, readable, well-documented +- Direct access to curl, cast, docker exec, jq +- Zero compilation overhead +- Familiar to operations-oriented teams + +### Pain Points Growing With Scale + +- Manual assertion logic (string comparison via `eval`) +- No parallel execution +- Duplicated helpers across scripts (`gql()`, `check()`, `run_query()`) +- Polling/retry patterns fragile in bash +- No structured test reporting (JSON/TAP/XML) +- No test filtering or selective execution + +## Options Evaluated + +### Option A: Bash + Shared Helpers + +Extract common functions into `scripts/lib/test-helpers.sh`, keep writing bash. + +| Aspect | Rating | +| -------------- | ------------------------------------------------------- | +| Learning curve | None | +| Layer 0-1 fit | Excellent | +| Layer 2-3 fit | Poor — polling, state machines, parallelism are fragile | +| Maintenance | Degrades as test count grows | + +**Verdict**: Right for Layers 0-1. Not sufficient for Layers 2-3. + +### Option B: Python pytest + +Already installed in devcontainer (v9.0.2 + pytest-cov). + +| Aspect | Rating | +| ----------------- | -------------------------------------------------------- | +| Learning curve | Low — 1-2 hours for bash-familiar developers | +| Layer 0-1 fit | Overkill — just curl + jq | +| Layer 2-3 fit | Strong — fixtures, retry, async, parallel (`-n auto`) | +| JSON assertions | Native dict access, no jq dependency | +| Subprocess calls | `subprocess.run(["cast", ...])` — more verbose than bash | +| Failure reporting | Excellent — diffs, tracebacks, captured output | + +**Available plugins**: `pytest-asyncio`, `pytest-xdist` (parallel), `pytest-timeout`, `pytest-retry`. Would need `pip install` in Dockerfile (4 lines). + +### Option C: Rust + cargo-nextest + +Already installed: `cargo-nextest` 0.9.127, `cargo-make` 0.37.24, full async toolchain. The eligibility-oracle project at `/git/local/eligibility-oracle-node/` demonstrates the exact patterns needed. + +| Aspect | Rating | +| ------------------ | --------------------------------------------------------------- | +| Learning curve | Medium — team already writes Rust | +| Layer 0-1 fit | Acceptable — more verbose than bash but type-safe | +| Layer 2-3 fit | Strong — `tokio::test`, `reqwest`, structured error handling | +| JSON assertions | `serde_json` value access + `pretty_assertions` for diffs | +| Subprocess calls | `std::process::Command` — safe (no shell escaping), but verbose | +| Failure reporting | Good — backtraces, `anyhow` context, `pretty_assertions` | +| Compile time | 20-30s initial, 2-5s incremental | +| Parallel execution | Built into nextest — automatic, zero config | +| IDE support | Full rust-analyzer autocomplete, inline docs | + +**Key advantage**: Primary language of the devcontainer and team. No context-switching. The `reqwest` + `serde_json` + `tokio::test` pattern is already proven in the workspace. + +**Key trade-off**: 20-30s initial compile per session vs instant bash execution. + +### Option D: BATS or Node.js + +- **BATS**: Not installed, marginal benefit over bash + helpers, still bash underneath +- **Node.js (jest/vitest)**: Available but no advantage over Python or Rust for CLI orchestration + +Neither recommended. + +## Comparison: Layer 2-3 Test Example + +A test that creates an allocation, advances epochs, and verifies rewards: + +### Bash + +```bash +# Create allocation (fragile string parsing) +result=$(curl -s "$AGENT_URL" -H 'content-type: application/json' \ + -d '{"query": "mutation { createAllocation(...) { id } }"}') +alloc_id=$(echo "$result" | jq -r '.data.createAllocation.id') + +# Advance 3 epochs (manual loop) +for i in 1 2 3; do + ./scripts/advance-epoch.sh +done + +# Poll until closed (manual timeout) +elapsed=0 +while [ $elapsed -lt 120 ]; do + status=$(curl -s "$SUBGRAPH_URL" ... | jq -r '.data.allocations[0].status') + [ "$status" = "Closed" ] && break + sleep 5; elapsed=$((elapsed + 5)) +done +[ "$status" = "Closed" ] || { echo "FAIL: timed out"; exit 1; } +``` + +### Rust + +```rust +#[tokio::test] +async fn test_allocation_lifecycle() -> Result<()> { + let net = TestNetwork::from_env()?; + + let alloc = net.create_allocation(&deployment).await?; + net.advance_epochs(3).await?; + net.close_allocation(&alloc.id).await?; + + let result = net.poll_until(Duration::from_secs(120), || async { + let a = net.query_allocation(&alloc.id).await?; + Ok(a.status == "Closed") + }).await?; + + assert!(result.indexing_rewards > 0, "Expected rewards, got 0"); + Ok(()) +} +``` + +### Python + +```python +def test_allocation_lifecycle(network): + alloc = network.create_allocation(deployment) + network.advance_epochs(3) + network.close_allocation(alloc["id"]) + + result = network.poll_until( + timeout=120, + check=lambda: network.query_allocation(alloc["id"])["status"] == "Closed" + ) + + assert result["indexingRewards"] != "0" +``` + +## Recommendation: Rust for Layers 2-3, Keep Bash for Layers 0-1 + +### Rationale + +1. **Layers 0-1 are done and working** in bash. Moving them gains nothing. +2. **Layers 2-3 need orchestration** that bash handles poorly. +3. **Rust is the team's primary language** — the devcontainer, the eligibility-oracle, and the broader Graph ecosystem tooling are Rust-first. +4. **The tooling is already paid for**: cargo-nextest, tokio, reqwest, serde_json are all installed and proven in the workspace. +5. **pytest is the pragmatic alternative** if Rust compile times prove too disruptive during rapid test development. It's installed and ready. + +### Decision Point + +Try Rust first on one test (port `test-reo-eligibility.sh` to a Rust integration test). If the compile-time overhead is acceptable during active development, continue with Rust. If not, fall back to pytest — the test structure and helper patterns are identical, just in a different language. + +## Implementation Plan + +### Phase 1: Shared Bash Helpers (immediate) + +Extract duplicated functions into a shared library: + +``` +scripts/lib/ + test-helpers.sh # gql(), check(), jq_test(), run_query(), run_cast() + test-constants.sh # URL resolution, env loading, PATH setup +``` + +Refactor existing Layer 0-1 scripts to source these. No behavior change. + +### Phase 2: Rust Test Crate (next) + +``` +tests/ + Cargo.toml + src/ + lib.rs # TestNetwork struct, shared helpers + graphql.rs # GraphQL query helpers + cast.rs # cast CLI wrapper + polling.rs # poll_until, retry logic + tests/ + reo_eligibility.rs # Port of test-reo-eligibility.sh + allocation_cycle.rs # Layer 2: create → close → verify +``` + +Minimal `Cargo.toml`: + +```toml +[package] +name = "local-network-tests" +version = "0.1.0" +edition = "2024" + +[dependencies] +reqwest = { version = "0.12", features = ["json"] } +serde_json = "1" +tokio = { version = "1", features = ["full"] } +anyhow = "1" + +[dev-dependencies] +pretty_assertions = "1" +``` + +### Phase 3: Migrate Remaining Tests + +Once the pattern is proven: + +- Layer 2 operational lifecycle tests in Rust +- Layer 3 timing-dependent tests in Rust +- Keep bash scripts for quick manual validation (they remain useful documentation) + +### Integration + +```bash +# Run bash tests (Layers 0-1) +./scripts/test-baseline-queries.sh +./scripts/test-baseline-state.sh +./scripts/test-indexer-guide-queries.sh + +# Run Rust tests (Layers 2-3) +cd tests && cargo nextest run + +# Run everything +cargo make test-all # Orchestrates both +``` + +## Dependencies + +- Extract shared bash helpers (no new deps) +- Rust test crate: `reqwest`, `serde_json`, `tokio`, `anyhow` (all already in workspace) +- Optional: `pretty_assertions` for better diff output diff --git a/docs/testing/reo/CurationSignal.md b/docs/testing/reo/CurationSignal.md new file mode 100644 index 0000000..db0dda5 --- /dev/null +++ b/docs/testing/reo/CurationSignal.md @@ -0,0 +1,115 @@ +# Task: Add Curation Signal to Local Network Setup + +> Created: 2026-02-20 +> Status: RESOLVED (2026-02-22) — implemented in `start-indexing/run.sh` and `graph-contracts/run.sh` + +## Problem + +BaselineTestPlan test 4.1 filters for `signalledTokens_not: 0` and returns empty on the local network because no curation signal is added during setup. This means any test that depends on curation data (signal amounts, curator entities, deployment filtering by signal) cannot run. + +## Objective + +Add curation signal to deployed subgraphs as part of the standard `start-indexing` setup flow, so the local network starts with realistic curation state. + +## Scope + +Small change (~20-30 lines) in `start-indexing/run.sh`. No new services, no new containers. + +## Implementation + +### Contracts + +| Contract | Config file | Key | +|----------|------------|-----| +| L2Curation | `subgraph-service.json` | `.["1337"].L2Curation.address` | +| L2GraphToken | `horizon.json` | `.["1337"].L2GraphToken.address` | +| L2GNS | `subgraph-service.json` | `.["1337"].L2GNS.address` | + +Addresses resolved via `contract_addr` helper in [containers/shared/lib.sh](../../../containers/shared/lib.sh). + +### Insertion Point + +In [start-indexing/run.sh](../../../start-indexing/run.sh), after GNS publishing (line 74) and before setting indexing rules (line 76): + +``` +line 54-74: Publish subgraphs to GNS +line ??: << ADD CURATION SIGNAL HERE >> +line 76-80: Set indexing rules +``` + +### Steps + +For each deployed subgraph (network, tap, block_oracle): + +1. **Convert deployment IPFS hash to bytes32** (already done for GNS publishing — reuse `dep_hex`) + +2. **Approve L2Curation to spend GRT**: + ```bash + cast send --rpc-url="http://chain:${CHAIN_RPC_PORT}" \ + --confirmations=0 --mnemonic="${MNEMONIC}" \ + "${grt}" 'approve(address,uint256)' "${curation}" "${SIGNAL_AMOUNT}" + ``` + +3. **Mint signal via L2Curation**: + ```bash + cast send --rpc-url="http://chain:${CHAIN_RPC_PORT}" \ + --confirmations=0 --mnemonic="${MNEMONIC}" \ + "${curation}" 'mint(bytes32,uint256,uint256)(uint256,uint256)' \ + "0x${dep_hex}" "${SIGNAL_AMOUNT}" "0" + ``` + +### Parameters + +- **Curator account**: ACCOUNT0 (`0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266`) — same account that publishes subgraphs +- **Signal amount**: 1000 GRT per subgraph (1000000000000000000000 wei) — enough to be meaningful, small relative to total supply +- **Min signal out**: 0 (no slippage protection needed on local network) + +### Graph Explorer UI Equivalent + +In Graph Explorer, this is the "Signal" action on a subgraph detail page: + +| Explorer component | Contract call | +|---|---| +| [SignalForm.tsx:238-257](../../../) `onSignal()` — version signal path | `L2Curation.mint(deploymentId, amount, minSignal)` | +| [SignalForm.tsx:238-257](../../../) `onSignal()` — named signal path | `L2GNS.mintSignal(subgraphId, amount, minNameSignal)` | + +For local network setup, the direct `L2Curation.mint()` call is simpler since we have deployment IDs directly and don't need NFT subgraph IDs. + +### Verification + +After signal is added, this query should return results: + +```graphql +{ + subgraphDeployments(where: { signalledTokens_not: "0" }) { + ipfsHash + signalledTokens + curatorSignals { + curator { id } + signal + signalledTokens + } + } +} +``` + +### Idempotency + +Check `signalledTokens` before minting — if already non-zero, skip. Follows the same pattern used for GNS publishing (check `subgraph_count` before publishing). + +## Dependencies + +None. All contracts already deployed. ACCOUNT0 already has GRT from protocol initialization. + +## Affected Tests + +- BaselineTestPlan 4.1: `subgraphDeployments(where: { signalledTokens_not: 0 })` — currently returns empty, will return data +- Any future Layer 2 tests involving curation operations +- Enables testing of curation-dependent reward calculations + +## Files to Modify + +| File | Change | +|------|--------| +| `start-indexing/run.sh` | Add curation signal block after GNS publishing | +| `scripts/test-baseline-state.sh` | Add check for `signalledTokens` non-zero | diff --git a/docs/testing/reo/Goal.md b/docs/testing/reo/Goal.md new file mode 100644 index 0000000..52a98de --- /dev/null +++ b/docs/testing/reo/Goal.md @@ -0,0 +1,97 @@ +# Test Plan Automation - Goal + +## Objective + +Automate the verification queries and commands from the indexer test plans so they are repeatable, catch schema drift early, and progressively cover more of the operational workflow. + +The test plans live in [graphprotocol/contracts](https://github.com/graphprotocol/contracts) and are designed for human indexers running against Arbitrum Sepolia. This automation adapts them for the local network, where we control the full stack and can cycle through epochs in seconds. + +## Source Test Plans + +| Document | Scope | Tests | +|----------|-------|-------| +| [BaselineTestPlan.md](https://github.com/graphprotocol/contracts/blob/reo-testing/packages/issuance/docs/testing/reo/BaselineTestPlan.md) | Standard indexer operations (stake, provision, allocate, query, rewards) | 7 cycles, 22 tests | +| [IndexerTestGuide.md](https://github.com/graphprotocol/contracts/blob/reo-testing/packages/issuance/docs/testing/reo/IndexerTestGuide.md) | REO eligibility flows (renew, expire, deny, recover) | 5 sets, 8 tests | + +## Automation Layers + +Each layer builds on the previous. The goal is to move progressively from schema validation toward full operational testing. + +``` +Layer 0: Query Validation ← scripts validate queries parse correctly +Layer 1: State Observation ← scripts check network state matches expectations +Layer 2: Operational Lifecycle ← scripts drive state changes and verify outcomes +Layer 3: Timing-Dependent ← scripts manage epoch advancement and eligibility expiry +``` + +### Layer 0: Query Schema Validation + +**What**: Run every GraphQL verification query and `cast` command from the test plans against the live network. Check for schema errors, missing fields, invalid enum values. + +**Why**: Catches the kind of bugs we found manually — `unallocatedStake` vs `availableStake`, `"ProvisionThaw"` vs `Provision`, `indexingRewardAmount` on wrong entity type. These are silent failures that would block an indexer following the docs. + +**Speed**: Seconds. No state changes. Safe to run anytime. + +**Scripts**: +- `scripts/test-baseline-queries.sh` — all 14 BaselineTestPlan queries +- `scripts/test-indexer-guide-queries.sh` — IndexerTestGuide queries + cast commands + +### Layer 1: State Observation + +**What**: After network startup, verify the expected state exists: indexer registered, provision created, allocations active, epoch progressing, subgraph synced. + +**Why**: Confirms the local network initialised correctly before running operational tests. Catches deployment regressions (e.g., contract upgrade breaks address book, indexer-agent fails to register). + +**Speed**: Seconds. Read-only. + +**Builds on**: Layer 0 queries, filtered to check specific values (non-zero stake, Active allocations, populated URL/geoHash). + +### Layer 2: Operational Lifecycle + +**What**: Execute the Cycle 7 end-to-end workflow: create allocation → send queries → close allocation → verify rewards and fees. Uses `cast send` for contract interactions and the indexer management API for allocation operations. + +**Why**: Validates that the full indexer operational cycle works, not just that queries parse. This is what an indexer actually does. + +**Speed**: Minutes. Requires epoch advancement between steps. + +**Builds on**: `advance-epoch.sh`, `query_gateway.sh`, `mine-block.sh`. + +### Layer 3: Timing-Dependent Flows + +**What**: Test eligibility expiry, thawing periods, and other time-dependent behaviour by advancing chain time and epochs programmatically. + +**Why**: These are the hardest tests to run manually — an indexer on testnet waits hours for epochs to advance. On local network we can cycle in seconds. + +**Covers**: +- IndexerTestGuide Sets 2-4 (eligible → expire → ineligible → re-renew → full rewards) +- BaselineTestPlan 2.2 (unstake thawing), 3.3-3.4 (provision thawing) + +**Builds on**: `advance-epoch.sh`, `cast send` for eligibility renewal, `REO_ELIGIBILITY_PERIOD` from `.env`. + +## Local Network Advantages + +The local network can do things testnet can't: + +| Capability | Testnet | Local | +|-----------|---------|-------| +| Advance epoch | Wait ~110 min | `./scripts/advance-epoch.sh` (seconds) | +| Control eligibility period | Fixed by coordinator | `REO_ELIGIBILITY_PERIOD` in `.env` | +| Advance chain time | Wait | `evm_increaseTime` RPC | +| Reset state | Can't | `docker compose down -v && up` | +| Full log access | Partial | All containers, all levels | + +## Workflow Sequence + +For each test plan update or protocol upgrade: + +1. Start local network (`docker compose up -d`) +2. Run Layer 0 (`test-baseline-queries.sh`, `test-indexer-guide-queries.sh`) — catch schema issues immediately +3. Run Layer 1 (state observation) — confirm network initialised correctly +4. Run Layer 2 (operational lifecycle) — validate full cycle +5. Run Layer 3 (timing flows) — test eligibility and thawing +6. Fix any issues found, update test plans and scripts together + +## Related Documentation + +- [Eligibility Oracle Goal](../../eligibility-oracle/Goal.md) — REO local network integration +- [Eligibility Oracle Status](../../eligibility-oracle/Status.md) — REO implementation log diff --git a/docs/testing/reo/Status.md b/docs/testing/reo/Status.md new file mode 100644 index 0000000..611a6b3 --- /dev/null +++ b/docs/testing/reo/Status.md @@ -0,0 +1,175 @@ +# Test Plan Automation - Status + +> Last updated: 2026-02-22 + +## Current Phase: Layers 0-3 complete + +### Summary + +All test layers are implemented and passing. 12 Rust integration tests cover network state observation, allocation lifecycle, reward collection, eligibility lifecycle, and query fee flow. Infrastructure changes enable the reward pipeline (curation signal + issuance config) and speed up epoch advancement (1s EBO polling). + +## Layer Progress + +| Layer | Status | Implementation | +|-------|--------|----------------| +| 0 - Query Validation | Done | `test-baseline-queries.sh`, `test-indexer-guide-queries.sh` | +| 1 - State Observation | Done | `test-baseline-state.sh` + Rust `network_state.rs` (6 tests) | +| 2 - Operational Lifecycle | Done | Rust `allocation_lifecycle.rs` (2 tests) | +| 3 - Timing-Dependent Flows | Done | Rust `eligibility.rs` (1 test), `reward_collection.rs` (1 test), `query_fees.rs` (2 tests) | + +### Rust Test Suite (12 tests) + +``` +tests/tests/ + network_state.rs 6 tests ~1s read-only state checks + allocation_lifecycle.rs 2 tests ~38s create/close/query lifecycle + eligibility.rs 1 test ~100s eligible/ineligible/re-eligible cycle + reward_collection.rs 1 test ~54s collect(IndexingRewards) → stake increase + query_fees.rs 2 tests ~1s gateway receipts + escrow observability +``` + +Run with: `cd tests && cargo test -- --nocapture` + +## Completed + +- [x] Manual validation of all 14 BaselineTestPlan GraphQL queries +- [x] Manual validation of all IndexerTestGuide GraphQL queries +- [x] Manual validation of all IndexerTestGuide `cast` commands +- [x] Fixed 3 bugs in BaselineTestPlan.md (pushed to `reo-testing` branch) +- [x] Fixed 1 bug in IndexerTestGuide.md (pushed to `reo-testing` branch) +- [x] Created Layer 0 bash scripts +- [x] Created Rust test crate with `TestNetwork` helper library +- [x] Network state observation tests (Layer 1 in Rust) +- [x] Allocation lifecycle tests (Layer 2) +- [x] Deterministic eligibility lifecycle tests (Layer 3) +- [x] Reward collection via `collect(IndexingRewards)` (Layer 3) +- [x] Query fee / TAP receipt generation tests (Layer 3) +- [x] Enabled reward pipeline: curation signal + issuance config in deploy scripts +- [x] EBO polling interval reduced from 20s to 1s for faster tests + +## Bugs Found and Fixed + +### BaselineTestPlan.md (3 bugs) + +| Bug | Tests affected | Fix | +|-----|---------------|-----| +| `unallocatedStake` field doesn't exist on Indexer | 2.1, 2.2, 3.2, 3.4, 6.1 | Changed to `availableStake` | +| `type: "ProvisionThaw"` invalid enum value | 3.3 | Changed to `type: Provision` (enum, not string) | +| `indexingRewardAmount` doesn't exist on Indexer | 6.1 | Changed to `rewardsEarned` | + +### IndexerTestGuide.md (1 bug) + +| Bug | Test affected | Fix | +|-----|--------------|-----| +| `subgraphDeployment { id { id } }` invalid nested scalar selection | 1.1 | Changed to `subgraphDeployment { ipfsHash }` | + +### Infrastructure bugs found during test development + +| Bug | Impact | Fix | +|-----|--------|-----| +| No curation signal on any deployment | `accRewardsPerSignal = 0` — all rewards are zero regardless of issuance | Added `L2Curation.mint()` in `start-indexing/run.sh` | +| `issuancePerBlock` not configured | Default issuance too low for meaningful testing | Added `setIssuancePerBlock(100e18)` in `graph-contracts/run.sh` | +| `closeAllocation` returns 0 rewards when indexer ineligible | `RewardsDeniedDueToEligibility` event fires instead of `HorizonRewardsAssigned` | Tests renew eligibility before reward-dependent operations | +| `PaymentsEscrow.getBalance()` needs 3 args | Signature is `(payer, collector, receiver)` not `(payer, receiver)` | Fixed in `query_fees.rs` | +| EBO polling at 20s causes slow tests | Epoch sync takes ~2min per test with epoch advancement | Reduced `polling_interval_in_seconds` to 1 | + +## Key Technical Findings + +### Reward Pipeline Requirements + +For indexing rewards to flow, ALL of these must be true: +1. `issuancePerBlock > 0` on RewardsManager (requires Governor = ACCOUNT1_SECRET) +2. Curation signal exists on the deployment (`signalledTokens > 0` via `L2Curation.mint()`) +3. Allocation spans multiple epochs +4. Indexer is eligible (if REO deployed) at the time of collect/close + +### collect() vs closeAllocation() + +`closeAllocation` calls `reclaimRewards()` which sends rewards to a reclaim address (or drops them). To route rewards to the indexer's stake, `SubgraphService.collect(indexer, PaymentTypes.IndexingRewards, data)` must be called BEFORE closing. + +However, `closeAllocation` via the management API does internally handle reward collection — the `indexingRewards` field in the close response is non-zero when the indexer is eligible and curation signal exists. + +The `collect_indexing_rewards` test directly calls `SubgraphService.collect()` as the indexer (RECEIVER_SECRET) and verifies the stake delta. + +### Eligibility Expiry During Mining + +Mining ~100 blocks for epoch advancement adds ~1200s of chain time (12s per block). With a 300s eligibility period, the indexer becomes ineligible mid-test. Tests must call `reo_renew_indexer()` before any reward-dependent operation. + +### TAP Query Fee Pipeline + +The TAP stack works end-to-end for receipt generation (20/20 gateway queries succeed). However: +- TAP escrow deposits are not observed (escrow balance = 0) +- TAP subgraph shows 0 escrow accounts +- This is expected — the TAP escrow manager processes asynchronously and may need longer running time + +## Gaps + +### ~~No signal on local network deployments~~ RESOLVED + +Fixed by adding `L2Curation.mint()` in `start-indexing/run.sh` and `setIssuancePerBlock(100e18)` in `graph-contracts/run.sh`. + +### Explorer UI operations not scriptable — [Task: explorer/Goal.md](../../explorer/Goal.md) + +Cycles 1-2 in BaselineTestPlan use Explorer UI for staking and delegation parameters. On local network these are done by `graph-contracts` during deployment. + +### ~~Test framework for Layers 2-3~~ RESOLVED + +Rust test crate implemented with `TestNetwork` helper library. See [TestFramework.md](../TestFramework.md) for the evaluation that led to this choice. + +### ~~Indexer CLI not available in devcontainer~~ RESOLVED + +Management API at `indexer-agent:7600` covers all operations via GraphQL. + +### Cold start validation pending + +Tests assume an already-running network. Full validation from `docker compose down -v && docker compose up -d` → test pass has not been confirmed yet. + +--- + +## Log + +### 2026-02-20 — Initial validation + +- Ran all BaselineTestPlan queries against local network subgraph (graph-node:8000) +- Found 3 schema bugs: `unallocatedStake`, `"ProvisionThaw"`, `indexingRewardAmount` on Indexer +- Fixed all 3 in BaselineTestPlan.md, committed to `reo-testing` branch +- Ran all IndexerTestGuide queries and cast commands +- Found 1 bug: invalid nested `{ id { id } }` selection on scalar +- Fixed in IndexerTestGuide.md, committed to `reo-testing` branch +- Created Layer 0 automation scripts +- Created Goal.md and Status.md for tracking + +### 2026-02-20 — Layer 1 and gap resolution + +- Built `test-baseline-state.sh`: 18 checks across indexer registration, provision, allocations, deployments, gateway, epoch, chain, and REO +- Investigated `graph indexer` CLI gap: CLI available via npx, but more importantly the indexer-agent management API (port 7600) exposes full GraphQL schema with all query and mutation operations +- Management API tested: `indexerRegistration`, `allocations`, `indexerDeployments`, `provisions`, `indexingRules` all work via curl +- This resolves the biggest gap for Layer 2: operational tests can use management API mutations (`createAllocation`, `closeAllocation`, `queueActions`, etc.) instead of the CLI + +### 2026-02-20 — Gap investigation and task docs + +- Investigated curation signal: L2Curation contract deployed, `mint()` available, ACCOUNT0 has GRT — straightforward to add to `start-indexing/run.sh` +- Investigated Graph Explorer: repo at `/git/edgeandnode/graph-explorer/`, Next.js app with Docker support, no backend API (all contract calls via Wagmi/Viem) +- Documented Explorer contract call reference: mapped UI components (SignalForm, DelegateTransactionContext, StakeForm) to equivalent `cast send` calls +- Evaluated test frameworks: Rust (cargo-nextest) recommended for Layers 2-3 given devcontainer tooling; bash retained for Layers 0-1 +- Created task docs: [CurationSignal.md](./CurationSignal.md), [explorer/Goal.md](../../explorer/Goal.md), [TestFramework.md](../TestFramework.md) + +### 2026-02-21 — Rust test crate and Layers 2-3 + +- Created Rust integration test crate (`tests/`) with `TestNetwork` helper +- Implemented `network_state.rs` (6 tests): indexer registration, provision, allocations, gateway, epoch, REO state +- Implemented `allocation_lifecycle.rs` (2 tests): create/close cycle, gateway query serving +- Implemented `eligibility.rs` (1 test): 3-phase lifecycle (eligible → ineligible → re-eligible) with deterministic contract calls +- All 9 tests passing + +### 2026-02-22 — Reward pipeline and expanded coverage + +- Discovered rewards were zero: `accRewardsPerSignal = 0` due to missing curation signal +- Fixed by adding `L2Curation.mint(1000 GRT)` in `start-indexing/run.sh` +- Set `issuancePerBlock = 100 GRT` in `graph-contracts/run.sh` (requires ACCOUNT1_SECRET as Governor) +- Reduced EBO polling from 20s to 1s — tests 3x faster (allocation_lifecycle 105s→38s, eligibility 277s→91s) +- Added `reward_collection.rs`: `collect(IndexingRewards)` increases stake by ~12,000 GRT +- Added `query_fees.rs`: gateway generates TAP receipts (20/20), escrow state observable +- Found and fixed eligibility expiry during mining (300s period, ~1200s chain time in 2 epoch advances) +- Fixed `PaymentsEscrow.getBalance()` signature: 3 args (payer, collector, receiver) +- All 12 tests passing diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000..d940b2e --- /dev/null +++ b/tests/README.md @@ -0,0 +1,138 @@ +# Integration Tests + +Automated integration tests for the local network. Each test maps to a specific +operation from the [BaselineTestPlan](../../graphprotocol/contracts/reo-testing/packages/issuance/docs/testing/reo/BaselineTestPlan.md), +[IndexerTestGuide](../../graphprotocol/contracts/reo-testing/packages/issuance/docs/testing/reo/IndexerTestGuide.md), +or [ReoTestPlan](../../graphprotocol/contracts/reo-testing/packages/issuance/docs/testing/reo/ReoTestPlan.md). + +## Running + +Requires the local network running (`docker compose up -d` from the repo root). + +```bash +cd tests +cargo nextest run --no-capture +``` + +All tests share a single blockchain and run serially (configured in +[.config/nextest.toml](.config/nextest.toml)). + +## Test Mapping + +### BaselineTestPlan Coverage + +| Cycle | Test | Automated Test | File | +|-------|------|---------------|------| +| 1.1 | Indexer stake visible | `indexer_registered` | `network_state.rs` | +| 1.2 | Indexer URL + geoHash | `indexer_registered` | `network_state.rs` | +| 1.3 | Provision exists | `provision_exists` | `network_state.rs` | +| 2.1 | Add stake (Explorer) | `add_stake` | `stake_management.rs` | +| 2.2 | Unstake tokens | `unstake_idle_tokens` | `stake_management.rs` | +| 3.1 | View provision | `provision_exists` | `network_state.rs` | +| 3.2 | Add to provision | `provision_lifecycle` | `provision_management.rs` | +| 3.3 | Thaw from provision | `provision_lifecycle` | `provision_management.rs` | +| 3.4 | Deprovision | `provision_lifecycle` | `provision_management.rs` | +| 4.1 | Active allocations exist | `active_allocations` | `network_state.rs` | +| 4.2 | Create allocation | `close_and_recreate_allocation` | `allocation_lifecycle.rs` | +| 4.3 | Create via actions queue | Indexer CLI workflow | — | +| 4.4 | Create via deployment rules | Indexer CLI workflow | — | +| 4.5 | Reallocate | Indexer CLI workflow | — | +| 5.1 | Gateway query serving | `gateway_serves_queries` + `gateway_query_serving` + `gateway_queries_generate_tap_receipts` | `network_state.rs`, `allocation_lifecycle.rs`, `query_fees.rs` | +| 5.2 | Close allocation + rewards | `close_and_recreate_allocation` + `close_allocation_collects_rewards` | `allocation_lifecycle.rs` | +| 5.3 | TAP escrow state | `tap_escrow_state_observable` (observational only, no assertions) | `query_fees.rs` | +| 5.4 | Close with explicit POI | Indexer CLI workflow | — | +| 6.1 | Indexer health metrics | `indexer_health_metrics` | `network_state.rs` | +| 6.2 | Epoch progression | `epoch_progressing` | `network_state.rs` | +| 6.3 | Log review | Manual | — | +| 7 | End-to-end (close+create) | `close_and_recreate_allocation` | `allocation_lifecycle.rs` | + +### IndexerTestGuide (REO) Coverage + +| Set | Test | Automated Test | File | +|-----|------|---------------|------| +| Prereqs | REO contract state | `reo_contract_state` | `network_state.rs` | +| 1 | Prepare allocations | Covered by `close_and_recreate_allocation` (setup) | `allocation_lifecycle.rs` | +| 2 | Eligible → close → rewards > 0 | `eligibility_lifecycle` (Set 2) | `eligibility.rs` | +| 3 | Ineligible → close → rewards = 0 | `eligibility_lifecycle` (Set 3) | `eligibility.rs` | +| 4 | Re-renew → close → full rewards | `eligibility_lifecycle` (Set 4) | `eligibility.rs` | +| 5 | Validation disabled | `disable_validation_emergency` | `reo_governance.rs` | + +### ReoTestPlan Coverage (Coordinator/Governance) + +| Cycle | Test | Automated Test | File | +|-------|------|---------------|------| +| 1.3 | Default parameters | `deployment_parameters` | `reo_governance.rs` | +| 1.4 | RewardsManager → REO | `rewards_manager_integration` | `reo_governance.rs` | +| 1.5 | Contract not paused | `contract_not_paused` | `reo_governance.rs` | +| 2.1 | All eligible (validation off) | Covered by `disable_validation_emergency` | `reo_governance.rs` | +| 2.2 | No renewal history eligible | Covered by `disable_validation_emergency` | `reo_governance.rs` | +| 2.3 | Rewards flow (validation off) | Covered by baseline tests | `allocation_lifecycle.rs` | +| 3.1 | Grant oracle role | Testnet only (account0 has all roles locally) | — | +| 3.2 | Renew single indexer + events | `renew_single_indexer` | `reo_governance.rs` | +| 3.3 | Batch renewal | `batch_renewal` | `reo_governance.rs` | +| 3.4 | Zero address skipped | `zero_address_skipped` | `reo_governance.rs` | +| 3.5 | Unauthorized renewal reverts | `unauthorized_renewal_reverts` | `reo_governance.rs` | +| 4.1+4.2 | Enable validation, eligible stays | `enable_validation_eligible_stays` | `reo_governance.rs` | +| 4.3 | Non-renewed indexer ineligible | Covered by `eligibility_lifecycle` Set 3 | `eligibility.rs` | +| 4.4 | Period expiry | `eligibility_expires_after_period` | `reo_governance.rs` | +| 5.1 | Timeout fail-open | `timeout_failopen` | `reo_governance.rs` | +| 5.2 | Renewal resets timeout | `oracle_renewal_resets_timeout` | `reo_governance.rs` | +| 6.1 | Eligible → rewards | `eligibility_lifecycle` (Set 2) | `eligibility.rs` | +| 6.2 | Ineligible → denied | `eligibility_lifecycle` (Set 3) | `eligibility.rs` | +| 6.3 | Denied rewards → stake unchanged | `eligibility_lifecycle` (Set 3 stake check) | `eligibility.rs` | +| 6.4 | Re-renewal restores rewards | `eligibility_lifecycle` (Set 4) | `eligibility.rs` | +| 6.5 | View functions zero for ineligible | `rewards_view_zero_for_ineligible` | `reo_governance.rs` | +| 6.6 | Optimistic full rewards | `eligibility_lifecycle` (Set 4) | `eligibility.rs` | +| 7.1 | Pause blocks writes | `pause_blocks_writes` | `reo_governance.rs` | +| 7.2 | Disable validation (emergency) | `disable_validation_emergency` | `reo_governance.rs` | +| 7.3 | Access control | `access_control_unauthorized` | `reo_governance.rs` | +| 1.1 | Proxy + implementation | Testnet only | — | +| 1.2 | Role assignments | Testnet only | — | +| 8.1-8.3 | Explorer UI verification | Requires Explorer team | — | + +### Additional Coverage (not in test plans) + +| Test | What it verifies | File | +|------|-----------------|------| +| `collect_indexing_rewards_increases_stake` | Direct `SubgraphService.collect(IndexingRewards)` contract call | `reward_collection.rs` | + +## Test Files + +| File | Purpose | Tests | +|------|---------|-------| +| `network_state.rs` | Read-only state observation (Cycles 1, 3.1, 4.1, 6) | 7 | +| `stake_management.rs` | Stake add/remove (Cycle 2) | 2 | +| `provision_management.rs` | Provision add/thaw/deprovision (Cycle 3) | 1 | +| `allocation_lifecycle.rs` | Allocation create/close + gateway queries (Cycles 4-5, 7) | 3 | +| `query_fees.rs` | TAP receipt generation + escrow state (Cycle 5) | 2 | +| `reward_collection.rs` | Direct reward collection contract call | 1 | +| `eligibility.rs` | REO eligibility lifecycle (IndexerTestGuide Sets 2-4, ReoTestPlan 6.1-6.4/6.6) | 1 | +| `reo_governance.rs` | REO governance operations (ReoTestPlan Cycles 1, 3, 4, 5, 6.5, 7) | 15 | +| **Total** | | **32** | + +## Library Modules + +The test helper library (`src/`) provides typed wrappers that emulate what +production tools do. Each function is documented with the tool/UI operation +it corresponds to. + +| Module | Operations | Emulates | +|--------|-----------|----------| +| `graphql.rs` | Subgraph queries, gateway queries | Explorer, `graphql` CLI | +| `management.rs` | `createAllocation`, `closeAllocation`, `getDeployments` | `graph indexer allocations` CLI | +| `staking.rs` | `stake_tokens`, `unstake_tokens`, `provision_add/thaw/deprovision` | Explorer UI, `graph indexer provisions` CLI | +| `cast.rs` | Contract calls (`cast send/call`), REO governance, rewards view, epoch control | Direct contract interaction, `reo:enable/disable/status` Hardhat tasks | +| `polling.rs` | `advance_epochs`, `advance_time`, `mine_blocks` | Chain time manipulation | + +## Not Automated (Requires Testnet) + +These items cannot be tested on the local network and must be verified on Arbitrum Sepolia: + +- **ReoTestPlan 1.1-1.2**: Proxy implementation address and role assignments (deployment-specific) +- **ReoTestPlan 3.1**: Grant oracle role (account0 already has all roles on local network) +- **ReoTestPlan 8.1-8.3**: Explorer UI displays correct rewards/denial state (requires Explorer team) +- **ReoTestPlan Cycle 6 (multi-indexer)**: Multi-indexer reward cycling (requires 3+ indexers) +- **BaselineTestPlan 4.3-4.5**: Actions queue, deployment rules, reallocate (indexer CLI workflows) +- **BaselineTestPlan 5.4**: Close with explicit POI (indexer CLI workflow) +- **BaselineTestPlan 5.3**: TAP escrow state test is observational only (verifies services are reachable but makes no assertions on escrow balances or `queryFeesCollected`) +- **BaselineTestPlan 6.3**: Log review across indexer components