-
Notifications
You must be signed in to change notification settings - Fork 4
524 lines (464 loc) · 21.7 KB
/
ci_run.yml
File metadata and controls
524 lines (464 loc) · 21.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
---
# Reusable workflow: a single foc-devnet CI run.
#
# Called by ci_pull_request.yml (default config, no reporting) and
# ci_nightly.yml (stability / frontier matrix, issue reporting enabled).
#
# The only behavioural difference between callers is the `init_flags` input,
# which controls which versions of Curio / filecoin-services are used.
name: CI Run
on:
workflow_call:
inputs:
name:
description: 'Human-readable run name (e.g. default, stability, frontier)'
required: true
type: string
init_flags:
description: 'Extra flags forwarded to `foc-devnet init`'
required: false
type: string
default: ''
enable_reporting:
description: 'When true, file a GitHub issue with the scenario report'
required: false
type: boolean
default: false
skip_report_on_pass:
description: 'Skip filing an issue when all scenarios pass'
required: false
type: boolean
default: true
issue_label:
description: 'Label applied to the filed GitHub issue'
required: false
type: string
default: ''
issue_title:
description: 'Title of the filed GitHub issue'
required: false
type: string
default: ''
jobs:
foc-start-test:
runs-on: ["self-hosted", "linux", "x64", "16xlarge+gpu"]
timeout-minutes: 100
permissions:
contents: read
issues: write
steps:
- uses: actions/checkout@v6
# Free up disk space on GitHub Actions runner to avoid "no space left" errors
- name: "EXEC: {Free up disk space}, independent"
uses: endersonmenezes/free-disk-space@v3
with:
remove_android: true
remove_dotnet: true
remove_haskell: true
remove_tool_cache: true
remove_swap: true
remove_packages: "azure-cli google-cloud-cli microsoft-edge-stable google-chrome-stable firefox postgresql* temurin-* *llvm* mysql* dotnet-sdk-*"
remove_packages_one_command: true
remove_folders: "/usr/share/swift /usr/share/miniconda /usr/share/az* /usr/local/lib/node_modules /usr/local/share/chromium /usr/local/share/powershell /usr/local/julia /usr/local/aws-cli /usr/local/aws-sam-cli /usr/share/gradle"
rm_cmd: "rmz"
rmz_version: "3.1.1"
# Setup Rust toolchain and restore cached dependencies
- name: "EXEC: {Setup Rust toolchain}, independent"
uses: actions-rust-lang/setup-rust-toolchain@v1
# CACHE-RUST: Rust dependencies and build artifacts
# These are keyed on the Cargo.lock file to ensure cache validity
- name: "CACHE_RESTORE: {C-rust-cache}"
id: cache-rust
uses: actions/cache/restore@v4
with:
path: |
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-rust-build-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-rust-build-
# Setup Docker for building and running containers
- name: "EXEC: {Setup Docker}, independent"
uses: docker/setup-buildx-action@v3
- name: "EXEC: {Install build dependencies}, independent"
run: |
sudo apt-get update
sudo apt-get install -y tar openssl pkg-config libssl-dev \
build-essential zlib1g-dev libncurses5-dev libgdbm-dev libnss3-dev \
libreadline-dev libffi-dev libsqlite3-dev libbz2-dev liblzma-dev curl
# Build the foc-devnet binary
- name: "EXEC: {Build foc-devnet binary}, DEP: {C-rust-cache}"
run: cargo build --release
# CACHE-RUST: Save Rust build cache for future runs
- name: "CACHE_SAVE: {C-rust-cache}"
if: steps.cache-rust.outputs.cache-hit != 'true'
uses: actions/cache/save@v4
with:
path: |
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-rust-build-${{ hashFiles('**/Cargo.lock') }}
# Copy binary and clean up Rust artifacts to save disk space
- name: "EXEC: {Copy binary and clean cache}, DEP: {C-rust-cache}"
run: |
cp ./target/release/foc-devnet ./foc-devnet
rm -rf ~/.cargo/registry/
rm -rf ~/.cargo/git/db/
rm -rf target/
df -h
# Compute cache keys based on version info and source files
# - CODE_HASH: Changes when Lotus/Curio versions change (for build artifacts cache)
# - DOCKER_HASH: Changes when Dockerfiles change (for Docker images cache)
- name: "CHECK: {Compute version hashes}"
id: version-hashes
run: |
# Get version output
VERSION_OUTPUT=$(./foc-devnet version 2>&1)
# Compute CODE_HASH from all default:code: lines (Lotus/Curio versions)
CODE_HASH=$(echo "$VERSION_OUTPUT" | grep 'default:code:' | sha256sum | cut -d' ' -f1)
echo "code-hash=$CODE_HASH" >> $GITHUB_OUTPUT
echo "CODE_HASH: $CODE_HASH"
# Compute DOCKER_HASH from docker/ directory (Dockerfile changes)
DOCKER_HASH=$(find docker -type f -exec sha256sum {} \; | sort | sha256sum | cut -d' ' -f1)
echo "docker-hash=$DOCKER_HASH" >> $GITHUB_OUTPUT
echo "DOCKER_HASH: $DOCKER_HASH"
# CACHE-DOCKER: Try to restore pre-built Docker images (foc-lotus, foc-lotus-miner, foc-builder, foc-curio, foc-yugabyte)
# These images contain YugabyteDB and all build dependencies
- name: "CACHE_RESTORE: {C-docker-images-cache}"
id: cache-docker-images
uses: actions/cache/restore@v4
with:
path: ~/.docker-images-cache
key: ${{ runner.os }}-docker-images-${{ steps.version-hashes.outputs.docker-hash }}
# CACHE-DOCKER: If Docker images are cached, load them from tarballs
- name: "EXEC: {Load Docker images}, DEP: {C-docker-images-cache}"
if: steps.cache-docker-images.outputs.cache-hit == 'true'
run: |
echo "Loading Docker images from cache..."
for image in ~/.docker-images-cache/*.tar; do
if [ -f "$image" ]; then
echo "Loading $(basename $image)..."
docker load -i "$image"
fi
done
echo "Docker images loaded successfully, list:"
docker images
rm -rf ~/.docker-images-cache
df -h
# If Docker images are cached, skip building them AND skip downloading YugabyteDB
# (YugabyteDB is already baked into the foc-yugabyte Docker image)
- name: "EXEC: {Initialize with cached Docker}, DEP: {C-docker-images-cache}"
if: steps.cache-docker-images.outputs.cache-hit == 'true'
run: |
./foc-devnet clean --all
./foc-devnet init --no-docker-build
# If Docker images are not cached, do full init (downloads YugabyteDB and builds all images)
- name: "EXEC: {Initialize without cache}, independent"
if: steps.cache-docker-images.outputs.cache-hit != 'true'
run: |
./foc-devnet clean --all
./foc-devnet init
# CACHE-DOCKER: Save Docker images as tarballs for caching
- name: "EXEC: {Save Docker images for cache}, DEP: {C-docker-images-cache}"
if: steps.cache-docker-images.outputs.cache-hit != 'true'
run: |-
mkdir -p ~/.docker-images-cache
echo "Saving Docker images for cache..."
docker save foc-lotus -o ~/.docker-images-cache/foc-lotus.tar
docker save foc-lotus-miner -o ~/.docker-images-cache/foc-lotus-miner.tar
docker save foc-builder -o ~/.docker-images-cache/foc-builder.tar
docker save foc-curio -o ~/.docker-images-cache/foc-curio.tar
docker save foc-yugabyte -o ~/.docker-images-cache/foc-yugabyte.tar
echo "Docker images saved to cache"
ls -lath ~/.docker-images-cache/
df -h
# CACHE-DOCKER: Save Docker images cache for future runs
- name: "CACHE_SAVE: {C-docker-images-cache}"
if: steps.cache-docker-images.outputs.cache-hit != 'true'
uses: actions/cache/save@v4
with:
path: ~/.docker-images-cache
key: ${{ runner.os }}-docker-images-${{ steps.version-hashes.outputs.docker-hash }}
# CACHE-BINARIES: Try to restore previously built Lotus/Curio binaries
- name: "CACHE_RESTORE: {C-build-artifacts-cache}"
id: cache-binaries
uses: actions/cache/restore@v4
with:
path: ~/.foc-devnet/bin
key: ${{ runner.os }}-binaries-${{ inputs.name }}-${{ steps.version-hashes.outputs.code-hash }}
- name: "EXEC: {Ensure permissions on binaries}, DEP: {C-build-artifacts-cache}"
if: steps.cache-binaries.outputs.cache-hit == 'true'
run: sudo chown -R $USER:$USER ~/.foc-devnet/bin/
# CACHE-GO: Try to restore foc-builder Go module cache to speed up Lotus/Curio builds
- name: "CACHE_RESTORE: {C-foc-builder-cache}"
id: cache-go
if: steps.cache-binaries.outputs.cache-hit != 'true'
uses: actions/cache/restore@v4
with:
path: ~/.foc-devnet/docker/volumes/cache/foc-builder
key: ${{ runner.os }}-foc-builder-cache-${{ inputs.name }}-${{ hashFiles('docker/**') }}-${{ hashFiles('src/config.rs') }}
restore-keys: |
${{ runner.os }}-foc-builder-cache-${{ inputs.name }}-
- name: "EXEC: {Ensure permissions}, DEP: {C-foc-builder-cache}"
if: steps.cache-binaries.outputs.cache-hit != 'true' &&
steps.cache-go.outputs.cache-hit == 'true'
run: sudo chown -R $USER:$USER ~/.foc-devnet/
- name: "EXEC: {Check disk space}, independent"
run: df -h
# Build Lotus and Curio if not cached
- name: "EXEC: {Build Lotus}, DEP: {C-build-artifacts-cache}"
if: steps.cache-binaries.outputs.cache-hit != 'true'
run: ./foc-devnet build lotus
- name: "EXEC: {Build Curio}, DEP: {C-build-artifacts-cache}"
if: steps.cache-binaries.outputs.cache-hit != 'true'
run: ./foc-devnet build curio
# CACHE-GO: Save Go module cache for future builds
- name: "CACHE_SAVE: {C-foc-builder-cache}"
if: steps.cache-binaries.outputs.cache-hit != 'true' &&
steps.cache-go.outputs.cache-hit != 'true'
uses: actions/cache/save@v4
with:
path: ~/.foc-devnet/docker/volumes/cache/foc-builder
key: ${{ runner.os }}-foc-builder-cache-${{ inputs.name }}-${{ hashFiles('docker/**') }}-${{ hashFiles('src/config.rs') }}
# CACHE-BINARIES: Save built Lotus/Curio binaries for future runs
- name: "CACHE_SAVE: {C-build-artifacts-cache}"
if: steps.cache-binaries.outputs.cache-hit != 'true'
uses: actions/cache/save@v4
with:
path: ~/.foc-devnet/bin
key: ${{ runner.os }}-binaries-${{ inputs.name }}-${{ steps.version-hashes.outputs.code-hash }}
# Disk free-up
- name: "EXEC: {Clean up Go modules}, DEP: {C-build-artifacts-cache}"
run: |
sudo rm -rf ~/.foc-devnet/docker/volumes/cache
sudo rm -rf ~/.foc-devnet/code/lotus
sudo rm -rf ~/.foc-devnet/code/curio
df -h
# Download and extract Filecoin proof parameters from S3
- name: "EXEC: {Download proof parameters from S3}, independent"
run: |
mkdir -p ~/.foc-devnet/docker/volumes/cache/filecoin-proof-parameters/
curl -L https://fil-proof-params-2k-cache.s3.us-east-2.amazonaws.com/filecoin-proof-params-2k.tar -o /tmp/filecoin-proof-params-2k.tar
tar -xf /tmp/filecoin-proof-params-2k.tar -C ~/.foc-devnet/docker/volumes/cache/filecoin-proof-parameters/
rm /tmp/filecoin-proof-params-2k.tar
ls -lath ~/.foc-devnet/docker/volumes/cache/filecoin-proof-parameters/
PROOF_PARAMS_HASH=$(find ~/.foc-devnet/docker/volumes/cache/filecoin-proof-parameters -type f -exec sha256sum {} \; | cut -d' ' -f1 | sort | sha256sum | cut -d' ' -f1)
echo "Downloaded proof parameters with hash: $PROOF_PARAMS_HASH"
# Verify cluster is running correctly
- name: "EXEC: {Check cluster status}, independent"
run: ./foc-devnet status
# Configure /etc/hosts for inter-SP communication via host.docker.internal
- name: "EXEC: {Configure host.docker.internal for SP-to-SP comms}, independent"
run: echo '127.0.0.1 host.docker.internal' | sudo tee -a /etc/hosts
# Start the full Filecoin localnet cluster
- name: "EXEC: {Start cluster}, independent"
id: start_cluster
continue-on-error: true
run: ./foc-devnet start --parallel
# Collect and print Docker container logs for debugging (always runs for diagnostics)
- name: "EXEC: {Collect Docker logs}, independent"
if: always()
run: |
RUN_DIR="$HOME/.foc-devnet/state/latest"
echo "+++++++++++ foc-devnet version"
cat "$RUN_DIR/version.txt" 2>/dev/null || echo "No version file found"
echo "+++++++++++ Disk space"
sudo df -h 2>/dev/null || echo "df command failed"
echo "+++++++++++ Run Directory Contents"
ls -lath "$RUN_DIR" 2>/dev/null || echo "No run directory found"
echo "+++++++++++ Contract Addresses"
cat "$RUN_DIR/contract_addresses.json" 2>/dev/null || echo "No contract addresses file found"
echo "+++++++++++ Step Context"
cat "$RUN_DIR/step_context.json" 2>/dev/null || echo "No step context file found"
echo "+++++++++++ FOC Metadata"
cat "$RUN_DIR/foc_metadata.json" 2>/dev/null || echo "No foc metadata file found"
echo "+++++++++++ Container Logs"
if [ -d "$RUN_DIR/logs" ]; then
for logfile in "$RUN_DIR/logs"/*; do
if [ -f "$logfile" ]; then
echo ""
echo "📰 Logs from $(basename "$logfile") 📰"
cat "$logfile" 2>/dev/null || echo "Failed to read $logfile"
fi
done
else
echo "No container logs directory found"
fi
# Verify cluster is running correctly
- name: "EXEC: {Check cluster status}, independent"
if: always()
run: ./foc-devnet status
- name: "EXEC: {List foc-* containers}, independent"
if: always()
run: |
echo "Containers using foc-* images (running or exited):"
docker ps -a --format 'table {{.Names}}\t{{.Image}}\t{{.Status}}'
# Verify devnet-info.json was exported successfully
- name: "CHECK: {Verify devnet-info.json exists}"
if: steps.start_cluster.outcome == 'success'
run: |
DEVNET_INFO="$HOME/.foc-devnet/state/latest/devnet-info.json"
test -f "$DEVNET_INFO" || exit 1
echo "✓ devnet-info.json created"
jq '.version' "$DEVNET_INFO"
# Setup Node.js for JavaScript examples
- name: "EXEC: {Setup Node.js}, independent"
if: steps.start_cluster.outcome == 'success'
uses: actions/setup-node@v4
with:
node-version: 'lts/*'
# Setup pnpm (required by scenario tests)
- name: "EXEC: {Setup pnpm}, independent"
if: steps.start_cluster.outcome == 'success'
uses: pnpm/action-setup@v4
with:
version: latest
# Validate schema using zod
- name: "CHECK: {Validate devnet-info.json schema}"
if: steps.start_cluster.outcome == 'success'
run: |
DEVNET_INFO="$HOME/.foc-devnet/state/latest/devnet-info.json"
cd examples
npm install
node validate-schema.js "$DEVNET_INFO"
node read-devnet-info.js "$DEVNET_INFO"
node check-balances.js "$DEVNET_INFO"
echo "✓ All examples ran well"
# Resolve the numeric job ID for deep CI links in the scenario report.
# GH exposes `GITHUB_RUN_ID`, a string but not the numberic value needed to build links.
# Exports GITHUB_CI_JOB_ID into $GITHUB_ENV so run.py can skip all name-matching
# heuristics and query the Jobs API directly with a known job ID.
- name: "SETUP: {Get CI job ID (numeric)}"
if: always()
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
GITHUB_CI_JOB_ID=$(curl -sSfL \
-H "Authorization: Bearer $GH_TOKEN" \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID/jobs?per_page=100" \
| jq -r --arg job "$GITHUB_JOB" \
'[.jobs[] | select(.name == $job or (.name | startswith($job + " (")))] | first | .id // empty')
if [[ -n "$GITHUB_CI_JOB_ID" ]]; then
echo "GITHUB_CI_JOB_ID=$GITHUB_CI_JOB_ID" >> "$GITHUB_ENV"
echo "Resolved CI job ID: $GITHUB_CI_JOB_ID"
echo "Job URL: $GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID/job/$GITHUB_CI_JOB_ID"
else
echo "Warning: could not resolve numeric job ID (GITHUB_JOB=$GITHUB_JOB)"
fi
# Setup scenario test prerequisites (Foundry, Python 3.11 via pyenv, cqlsh)
- name: "EXEC: {Setup scenario prerequisites}, independent"
if: steps.start_cluster.outcome == 'success'
run: ./scripts/setup-scenarios-prerequisites.sh
# Run scenario tests against the live devnet
- name: "TEST: {Run scenario tests}"
id: scenario_tests
if: steps.start_cluster.outcome == 'success'
env:
REPORTING: ${{ inputs.enable_reporting }}
SKIP_REPORT_ON_PASS: ${{ inputs.skip_report_on_pass }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SCENARIO_RUN_TYPE: ${{ inputs.name }}
run: python3 scenarios/run.py
# Ensure scenario report exists even if tests didn't run (for issue reporting)
- name: "EXEC: {Ensure scenario report exists}"
if: always()
run: |
REPORT="$HOME/.foc-devnet/state/latest/scenario_report.md"
if [ ! -f "$REPORT" ]; then
mkdir -p "$(dirname "$REPORT")"
{
echo "# Scenario Test Report (${{ inputs.name }})"
echo ""
echo "**Something failed before a proper scenario report could be generated.**"
echo ""
echo "**Start cluster outcome**: ${{ steps.start_cluster.outcome }}"
echo ""
echo "**CI run**: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
echo ""
echo "## foc-devnet version"
echo '```'
./foc-devnet version 2>&1 || echo "version command failed"
echo '```'
} > "$REPORT"
fi
# Upload scenario report as artifact (name includes run name to avoid collisions in matrix)
- name: "EXEC: {Upload scenario report}"
if: always()
uses: actions/upload-artifact@v4
with:
name: scenario-report-${{ inputs.name }}
path: ~/.foc-devnet/state/latest/scenario_*.md
if-no-files-found: ignore
# Determine whether to file an issue (only when reporting is enabled).
# Uses job.status to catch failures in ANY step — not just start_cluster
# and scenario_tests. If an intermediate step (e.g. prerequisites) fails
# and scenario_tests is skipped, job.status is still 'failure'.
- name: "CHECK: {Determine if issue should be filed}"
id: should_file
if: always() && inputs.enable_reporting
env:
JOB_STATUS: ${{ job.status }}
run: |
if [[ "$JOB_STATUS" == "success" ]]; then
PASSED="true"
else
PASSED="false"
echo "Job status: $JOB_STATUS"
echo " start_cluster.outcome=${{ steps.start_cluster.outcome }}"
echo " scenario_tests.outcome=${{ steps.scenario_tests.outcome }}"
fi
echo "passed=$PASSED" >> $GITHUB_OUTPUT
if [[ "$PASSED" == "true" && "${{ inputs.skip_report_on_pass }}" == "true" ]]; then
echo "file=false" >> $GITHUB_OUTPUT
echo "Skipping issue: tests passed and skip_report_on_pass is true"
else
echo "file=true" >> $GITHUB_OUTPUT
echo "Filing issue (${{ inputs.name }}): passed=$PASSED"
fi
# Read scenario report content from the filesystem directly
- name: "EXEC: {Read scenario report}"
id: report
if: always() && steps.should_file.outputs.file == 'true' && inputs.enable_reporting
run: |
REPORT="$HOME/.foc-devnet/state/latest/scenario_report.md"
if [ -f "$REPORT" ]; then
CONTENT=$(cat "$REPORT")
else
CONTENT="No scenario report available for **${{ inputs.name }}** strategy."
fi
EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
echo "content<<$EOF" >> $GITHUB_OUTPUT
echo "$CONTENT" >> $GITHUB_OUTPUT
echo "$EOF" >> $GITHUB_OUTPUT
# Create or update a GitHub issue with the scenario report
- name: "EXEC: {Create or update issue}"
if: always() && steps.should_file.outputs.file == 'true' && inputs.enable_reporting
uses: ipdxco/create-or-update-issue@v1
with:
# We're not using `github.token` here because it won't trigger other workflows like `add-issues-to-project`.
# Instead, we use a PAT to trigger other workflows.
# This PAT has permissions to open/update issues, which is why it was used.
# Alternatively, we could create a more narrowly scoped PAT, but this would be another PAT to setup/manage.
GITHUB_TOKEN: ${{ secrets.FILOZZY_RELEASE_PLEASE_PAT_FILOZONE }}
title: ${{ inputs.issue_title }}
body: |
The **${{ inputs.name }}** scenarios run **${{ steps.should_file.outputs.passed == 'true' && 'passed ✅' || 'failed ❌' }}**.
See [the workflow run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details.
${{ steps.report.outputs.content }}
label: ${{ inputs.issue_label }}
# Clean shutdown (always runs to avoid leaving containers behind)
- name: "EXEC: {Stop cluster}, independent"
if: always()
run: ./foc-devnet stop
# Mark job as failed if the start step failed, but only after all steps
- name: "CHECK: {Fail job if start failed}"
if: always() && steps.start_cluster.outcome == 'failure'
run: |
echo "Start cluster failed earlier; marking job as failed." >&2
exit 1