From 180047fb7ead9872a90d7cdbe92e2eae87592650 Mon Sep 17 00:00:00 2001 From: Frando Date: Wed, 25 Mar 2026 23:16:15 +0100 Subject: [PATCH 01/38] refactor: rename "invocation" to "batch" across codebase Rename the grouping concept from "invocation" to "batch" in all Rust, TypeScript, and test code. Switch UI from HashRouter to BrowserRouter with server-side SPA fallback routes. Backward compatibility: - /api/invocations/{name}/combined-results kept as alias - /inv/* routes redirect to /batch/* Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-runner/src/sim/report.rs | 2 +- patchbay-server/src/lib.rs | 37 ++++++++----- ui/e2e/push.spec.ts | 14 ++--- ui/e2e/runner-sim.spec.ts | 8 +-- ui/src/App.tsx | 87 ++++++++++++++++--------------- ui/src/RunsIndex.tsx | 44 ++++++++-------- ui/src/api.ts | 6 +-- ui/src/components/PerfTab.tsx | 4 +- ui/src/main.tsx | 16 ++++-- 9 files changed, 118 insertions(+), 100 deletions(-) diff --git a/patchbay-runner/src/sim/report.rs b/patchbay-runner/src/sim/report.rs index 4422462..eee9faa 100644 --- a/patchbay-runner/src/sim/report.rs +++ b/patchbay-runner/src/sim/report.rs @@ -278,7 +278,7 @@ pub async fn write_combined_results_for_runs(work_root: &Path, run_names: &[Stri Ok(()) } -/// Print a concise per-sim summary for one invocation run. +/// Print a concise per-sim summary for one batch run. pub fn print_run_summary_table_for_runs(work_root: &Path, run_names: &[String]) -> Result<()> { let runs = load_runs(work_root, run_names)?; if runs.is_empty() { diff --git a/patchbay-server/src/lib.rs b/patchbay-server/src/lib.rs index d032d6a..e780798 100644 --- a/patchbay-server/src/lib.rs +++ b/patchbay-server/src/lib.rs @@ -71,9 +71,9 @@ pub struct RunInfo { /// This is the per-sim lab state, not the CI test outcome — see /// [`RunManifest::test_outcome`] for the overall pass/fail from CI. pub status: Option, - /// Invocation group (first path component for nested runs, `None` for flat/direct). - pub invocation: Option, - /// CI manifest from `run.json` in the invocation directory, if present. + /// Batch group (first path component for nested runs, `None` for flat/direct). + pub batch: Option, + /// CI manifest from `run.json` in the batch directory, if present. #[serde(skip_serializing_if = "Option::is_none")] pub manifest: Option, } @@ -99,7 +99,7 @@ pub fn discover_runs(base: &Path) -> anyhow::Result> { path: base.to_path_buf(), label, status, - invocation: None, + batch: None, manifest: read_run_json(base), }]); } @@ -107,11 +107,11 @@ pub fn discover_runs(base: &Path) -> anyhow::Result> { let mut runs = Vec::new(); scan_runs_recursive(base, base, 1, &mut runs)?; - // Attach run.json manifests from invocation directories. + // Attach run.json manifests from batch directories. let mut manifest_cache: std::collections::HashMap> = std::collections::HashMap::new(); for run in &mut runs { - let inv = run.invocation.clone().unwrap_or_else(|| run.name.clone()); + let inv = run.batch.clone().unwrap_or_else(|| run.name.clone()); let manifest = manifest_cache .entry(inv.clone()) .or_insert_with(|| read_run_json(&base.join(&inv))) @@ -155,9 +155,9 @@ fn scan_runs_recursive( .to_string_lossy() .into_owned(); let (label, status) = read_run_metadata(&path); - // Derive invocation from the first path component (the timestamped + // Derive batch from the first path component (the timestamped // directory) when the run is nested more than one level deep. - let invocation = name + let batch = name .split('/') .next() .filter(|first| *first != name) @@ -167,7 +167,7 @@ fn scan_runs_recursive( path, label, status, - invocation, + batch, manifest: None, // populated after scan }); } else { @@ -232,6 +232,10 @@ fn build_router(state: AppState) -> Router { let mut r = Router::new() .route("/", get(index_html)) .route("/runs", get(index_html)) + // SPA fallback: serve index.html for client-side routes. + .route("/run/{*rest}", get(index_html)) + .route("/batch/{*rest}", get(index_html)) + .route("/inv/{*rest}", get(index_html)) .route("/api/runs", get(get_runs)) .route("/api/runs/subscribe", get(runs_sse)) .route("/api/runs/{run}/state", get(get_run_state)) @@ -240,9 +244,14 @@ fn build_router(state: AppState) -> Router { .route("/api/runs/{run}/logs", get(get_run_logs)) .route("/api/runs/{run}/logs/{*path}", get(get_run_log_file)) .route("/api/runs/{run}/files/{*path}", get(get_run_file)) + .route( + "/api/batches/{name}/combined-results", + get(get_batch_combined), + ) + // Legacy alias — keep for backward-compat (links shared on Discord). .route( "/api/invocations/{name}/combined-results", - get(get_invocation_combined), + get(get_batch_combined), ); if state.push.is_some() { r = r.route("/api/push/{project}", post(push_run)); @@ -549,8 +558,8 @@ async fn get_run_file( serve_file(&file_path).await } -/// Serve `combined-results.json` from an invocation directory. -async fn get_invocation_combined( +/// Serve `combined-results.json` from a batch directory. +async fn get_batch_combined( AxPath(name): AxPath, State(state): State, ) -> impl IntoResponse { @@ -922,12 +931,12 @@ async fn push_run( // Notify subscribers about new run let _ = state.runs_tx.send(()); - // run_name is the invocation name (first path component for all sims inside) + // run_name is the batch name (first path component for all sims inside) let result = serde_json::json!({ "ok": true, "project": project, "run": run_name, - "invocation": run_name, + "batch": run_name, }); (StatusCode::OK, serde_json::to_string(&result).unwrap()) diff --git a/ui/e2e/push.spec.ts b/ui/e2e/push.spec.ts index a6a02b4..85fbc5a 100644 --- a/ui/e2e/push.spec.ts +++ b/ui/e2e/push.spec.ts @@ -74,21 +74,21 @@ test('push run results and view via deep link', async ({ page }) => { body: tarGz, }) expect(pushRes.status).toBe(200) - const pushBody = await pushRes.json() as { ok: boolean; invocation: string; project: string } + const pushBody = await pushRes.json() as { ok: boolean; batch: string; project: string } expect(pushBody.ok).toBe(true) expect(pushBody.project).toBe('test-project') - expect(pushBody.invocation).toBeTruthy() + expect(pushBody.batch).toBeTruthy() // Step 4: Verify the run appears in the API. const runsRes = await fetch(`${SERVE_URL}/api/runs`) - const runs = await runsRes.json() as Array<{ name: string; invocation: string | null }> + const runs = await runsRes.json() as Array<{ name: string; batch: string | null }> expect(runs.length).toBeGreaterThan(0) - // All runs should share the same invocation (the push dir name). - const inv = runs[0].invocation - expect(inv).toBe(pushBody.invocation) + // All runs should share the same batch (the push dir name). + const batch = runs[0].batch + expect(batch).toBe(pushBody.batch) // Step 5: Open the deep link and verify the UI shows the run. - await page.goto(`${SERVE_URL}/#/inv/${pushBody.invocation}`) + await page.goto(`${SERVE_URL}/batch/${pushBody.batch}`) // The topbar should show "patchbay". await expect(page.getByRole('heading', { name: 'patchbay' })).toBeVisible() diff --git a/ui/e2e/runner-sim.spec.ts b/ui/e2e/runner-sim.spec.ts index a82904f..8a06794 100644 --- a/ui/e2e/runner-sim.spec.ts +++ b/ui/e2e/runner-sim.spec.ts @@ -71,12 +71,12 @@ test('runner sim produces viewable UI output', async ({ page }) => { } }) -test('multi-sim invocation shows grouped selector and combined results', async ({ page }) => { +test('multi-sim batch shows grouped selector and combined results', async ({ page }) => { test.setTimeout(4 * 60 * 1000) const workDir = mkdtempSync(`${tmpdir()}/patchbay-runner-e2e-multi-`) let serveProc: ChildProcess | null = null try { - // Run both sims in a single invocation. + // Run both sims in a single batch. execFileSync( PATCHBAY_BIN, ['run', '--work-dir', workDir, PING_TOML, IPERF_TOML], @@ -99,7 +99,7 @@ test('multi-sim invocation shows grouped selector and combined results', async ( await page.goto(UI_URL) await expect(page.getByRole('heading', { name: 'patchbay' })).toBeVisible() - // The selector should have an optgroup (invocation) with both sims. + // The selector should have an optgroup (batch) with both sims. const selector = page.locator('select') await expect(selector).toBeVisible() await expect(selector.locator('optgroup')).toBeAttached() @@ -111,7 +111,7 @@ test('multi-sim invocation shows grouped selector and combined results', async ( await expect(combinedOption).toBeAttached() await selector.selectOption({ label: await combinedOption.innerText() }) - // Switch to perf tab — invocation view defaults to sims list. + // Switch to perf tab — batch view defaults to sims list. await page.getByRole('button', { name: 'perf' }).click() // Perf tab should show summary and detail tables with both sims. await expect(page.getByText('summary')).toBeVisible({ timeout: 5_000 }) diff --git a/ui/src/App.tsx b/ui/src/App.tsx index bc69328..23de33a 100644 --- a/ui/src/App.tsx +++ b/ui/src/App.tsx @@ -35,51 +35,51 @@ type Tab = 'topology' | 'logs' | 'timeline' | 'perf' | 'sims' type Selection = | { kind: 'run'; name: string } - | { kind: 'invocation'; name: string } + | { kind: 'batch'; name: string } function selectionKey(s: Selection | null): string { if (!s) return '' - return s.kind === 'invocation' ? `inv:${s.name}` : s.name + return s.kind === 'batch' ? `batch:${s.name}` : s.name } function selectionPath(s: Selection | null): string { if (!s) return '/' - return s.kind === 'invocation' ? `/inv/${s.name}` : `/run/${s.name}` + return s.kind === 'batch' ? `/batch/${s.name}` : `/run/${s.name}` } -// ── Invocation grouping ──────────────────────────────────────────── +// ── Batch grouping ───────────────────────────────────────────────── -interface InvocationGroup { - invocation: string +interface BatchGroup { + batch: string runs: RunInfo[] } -function groupByInvocation(runs: RunInfo[]): { groups: InvocationGroup[]; ungrouped: RunInfo[] } { +function groupByBatch(runs: RunInfo[]): { groups: BatchGroup[]; ungrouped: RunInfo[] } { const grouped = new Map() const ungrouped: RunInfo[] = [] for (const r of runs) { - if (r.invocation) { - let list = grouped.get(r.invocation) + if (r.batch) { + let list = grouped.get(r.batch) if (!list) { list = [] - grouped.set(r.invocation, list) + grouped.set(r.batch, list) } list.push(r) } else { ungrouped.push(r) } } - const groups: InvocationGroup[] = [] - for (const [invocation, groupRuns] of grouped) { - groups.push({ invocation, runs: groupRuns }) + const groups: BatchGroup[] = [] + for (const [batch, groupRuns] of grouped) { + groups.push({ batch, runs: groupRuns }) } return { groups, ungrouped } } -/** Short display label for a run within an invocation group. */ +/** Short display label for a run within a batch group. */ function simLabel(run: RunInfo): string { - if (run.invocation && run.name.startsWith(run.invocation + '/')) { - return run.label ?? run.name.slice(run.invocation.length + 1) + if (run.batch && run.name.startsWith(run.batch + '/')) { + return run.label ?? run.name.slice(run.batch.length + 1) } return run.label ?? run.name } @@ -168,21 +168,22 @@ function applyEvent(state: LabState, event: LabEvent): LabState { // ── Unified App ──────────────────────────────────────────────────── -export default function App({ mode }: { mode: 'run' | 'inv' }) { +export default function App({ mode }: { mode: 'run' | 'batch' }) { const location = useLocation() const navigate = useNavigate() // Derive selection from the URL path. - // Route is /run/* or /inv/* so everything after the prefix is the name. - const nameFromUrl = location.pathname.slice(mode === 'run' ? 5 : 5) // "/run/" or "/inv/" = 5 chars + // Route is /run/* or /batch/* so everything after the prefix is the name. + const prefixLen = mode === 'run' ? '/run/'.length : '/batch/'.length + const nameFromUrl = location.pathname.slice(prefixLen) const selection: Selection | null = nameFromUrl - ? { kind: mode === 'inv' ? 'invocation' : 'run', name: nameFromUrl } + ? { kind: mode === 'batch' ? 'batch' : 'run', name: nameFromUrl } : null const selectedRun = selection?.kind === 'run' ? selection.name : null - const selectedInvocation = selection?.kind === 'invocation' ? selection.name : null + const selectedBatch = selection?.kind === 'batch' ? selection.name : null - const [tab, setTab] = useState(mode === 'inv' ? 'sims' : 'topology') + const [tab, setTab] = useState(mode === 'batch' ? 'sims' : 'topology') // Run list (for the dropdown) const [runs, setRuns] = useState([]) @@ -249,22 +250,22 @@ export default function App({ mode }: { mode: 'run' | 'inv' }) { return () => { dead = true } }, [selectedRun]) - // ── Load combined results when an invocation is selected ── + // ── Load combined results when a batch is selected ── useEffect(() => { - if (!selectedInvocation) { + if (!selectedBatch) { setCombinedResults(null) return } let dead = false - fetchCombinedResults(selectedInvocation).then((results) => { + fetchCombinedResults(selectedBatch).then((results) => { if (dead) return setCombinedResults(results) }) return () => { dead = true } - }, [selectedInvocation]) + }, [selectedBatch]) // ── SSE for live updates (only when run is "running") ── @@ -314,16 +315,16 @@ export default function App({ mode }: { mode: 'run' | 'inv' }) { const base = selectedRun ? runFilesBase(selectedRun) : '' const isSimView = selection?.kind === 'run' - const isInvocationView = selection?.kind === 'invocation' + const isBatchView = selection?.kind === 'batch' - // Runs belonging to the current invocation - const invocationRuns = isInvocationView - ? runs.filter((r) => r.invocation === selectedInvocation) + // Runs belonging to the current batch + const batchRuns = isBatchView + ? runs.filter((r) => r.batch === selectedBatch) : [] const availableTabs: Tab[] = isSimView ? ['topology', 'logs', 'timeline', ...(simResults ? (['perf'] as Tab[]) : [])] - : isInvocationView + : isBatchView ? ['sims', ...(combinedResults ? (['perf'] as Tab[]) : [])] : [] @@ -338,14 +339,14 @@ export default function App({ mode }: { mode: 'run' | 'inv' }) { const logsForTabs = logList.map((l) => ({ node: l.node, kind: l.kind, path: l.path })) // Group runs for the selector - const { groups, ungrouped } = groupByInvocation(runs) + const { groups, ungrouped } = groupByBatch(runs) // ── Render ── return (
-

patchbay

+

patchbay

` with a proper tree component in sidebar +- Tree structure: batches (expandable to show runs), standalone runs +- Compare batches show with a compare icon +- Keeps sidebar clean as nesting depth grows + +**Refactor — RunView extraction:** +- `ui/src/components/RunView.tsx` — extract from App.tsx + - Props: `run: RunInfo, state, events, logs, results, metrics` + - Renders: tab bar + tab content (topology, logs, timeline, perf, metrics) + - This is a pure extraction, no new behavior + +**MetricsTab (new):** +- `ui/src/components/MetricsTab.tsx` + - Fetches `device..metrics.jsonl` from run files + - Parses JSONL, groups by key + - Default view: table of key + last value, with inline SVG sparklines for + keys that have multiple data points + - One row per unique metric key, columns: key, device, last value, sparkline + - Clicking a row could expand to a full chart (future, not v0) + +**CompareView (new):** +- `ui/src/components/CompareView.tsx` + - Top: `CompareSummary` bar (pass/fail delta, time delta, score badge, + metrics deltas for shared keys) + - Below: split panes, left and right + - Shared tab state — selecting "logs" opens logs on both sides + - Each side renders a `RunView` + - Co-navigation: tabs are synchronized, scroll position optionally synced + + ``` + // TODO: qlog comparison — in CompareView summary, show packet/frame count + // deltas from qlog files. Parse qlog JSON events, bucket by type, diff + // counts. Display as a compact delta table in CompareSummary. + ``` + +**Compare as batch:** Comparison output directory IS a batch. The server discovers +it like any other batch. `CompareView` is activated when the batch has a +`summary.json` (compare manifest). Otherwise it renders as a normal batch view. + +**LOC estimate:** ~500 new (200 RunView refactor, 100 MetricsTab, 100 CompareView, +100 tree nav + routing). + +--- + +## Key Patterns + +### Backend dispatch (no trait needed for v0) + +For simplicity, use a match on `VmBackend` enum rather than a trait: + +```rust +enum BackendKind { Native, Qemu, Container } + +fn resolve_backend(vm_flag: Option>) -> BackendKind { + match vm_flag { + None if cfg!(target_os = "linux") => BackendKind::Native, + None => auto_detect_vm(), + Some(None) => auto_detect_vm(), + Some(Some(s)) => match s.as_str() { + "qemu" => BackendKind::Qemu, + "container" => BackendKind::Container, + _ => bail!("unknown VM backend: {s}"), + }, + } +} +``` + +A trait can come later if backends grow; for now a match keeps LOC down. + +### Metrics: tracing with JSON payload + +Since tracing requires compile-time field names, we use a single `metrics_json` +field containing a serialized map. The patchbay namespace subscriber already has +`JsonFieldVisitor` that extracts fields — we add a branch for the `_metrics` +target that writes to a separate file. + +Device holds a clone of its namespace's `tracing::Dispatch` so `record()` and +`metrics().emit()` can emit directly without going through the sync worker thread. +`enter_tracing()` is public so users can also emit arbitrary tracing within the +device context. + +### Compare = batch + +A compare operation produces a batch directory with left/right runs plus a +`summary.json`. The UI detects `summary.json` and activates `CompareView`. +This means no new API endpoints — compare results flow through existing batch +discovery. The tree navigation component shows compare batches with a visual +indicator. + +### UI composition + +``` +App +├── TreeNav (new sidebar, replaces - {isSimView && runs.find((r) => r.name === selectedRun) && ( + {isSimView && selectedRunInfo && ( - {runs.find((r) => r.name === selectedRun)?.status ?? ''} + {selectedRunInfo.status ?? ''} )} {labState && ( @@ -399,7 +402,7 @@ export default function App({ mode }: { mode: 'run' | 'batch' | 'compare' }) { {isSimView && selectedRun && ( r.name === selectedRun) ?? { name: selectedRun, label: null, status: null, batch: null }} + run={selectedRunInfo ?? { name: selectedRun, label: null, status: null, batch: null }} state={labState} events={labEvents} logs={logList} diff --git a/ui/src/components/CompareView.tsx b/ui/src/components/CompareView.tsx index 7ba917b..41d829e 100644 --- a/ui/src/components/CompareView.tsx +++ b/ui/src/components/CompareView.tsx @@ -67,7 +67,7 @@ export default function CompareView({ batchName }: { batchName: string }) { - {Array.from(allTests.entries()).sort().map(([name, { left, right }]) => { + {Array.from(allTests.entries()).sort(([a], [b]) => a.localeCompare(b)).map(([name, { left, right }]) => { let delta = '' let color = '' if (left === 'fail' && right === 'pass') { delta = 'fixed'; color = 'var(--green)' } diff --git a/ui/src/components/MetricsTab.tsx b/ui/src/components/MetricsTab.tsx index 6df3083..7f6413f 100644 --- a/ui/src/components/MetricsTab.tsx +++ b/ui/src/components/MetricsTab.tsx @@ -75,7 +75,7 @@ export default function MetricsTab({ run, logs }: { run: string; logs: LogEntry[ {series.map((s, i) => ( - + {s.key} {s.device} {s.values[s.values.length - 1]?.v.toFixed(2)} From e93d8947477773900e09989a266bca80acc404bf Mon Sep 17 00:00:00 2001 From: Frando Date: Wed, 25 Mar 2026 23:52:17 +0100 Subject: [PATCH 09/38] refactor: share test args via clap flatten between test and compare Extract TestArgs as a clap::Args struct, flatten into both Command::Test and CompareCommand::Test. Move VM dispatch logic to test::run_vm(). Update compare::run_tests_in_dir to accept &TestArgs directly. Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/src/compare.rs | 18 ++--- patchbay-cli/src/main.rs | 135 +++--------------------------------- patchbay-cli/src/test.rs | 70 +++++++++++++++++++ 3 files changed, 86 insertions(+), 137 deletions(-) diff --git a/patchbay-cli/src/compare.rs b/patchbay-cli/src/compare.rs index d9e6176..a220bf3 100644 --- a/patchbay-cli/src/compare.rs +++ b/patchbay-cli/src/compare.rs @@ -122,32 +122,26 @@ pub fn parse_test_output(output: &str) -> Vec { /// Run tests in a directory and capture results. pub fn run_tests_in_dir( dir: &Path, - filter: &Option, - ignored: bool, - ignored_only: bool, - packages: &[String], - tests: &[String], + args: &crate::test::TestArgs, ) -> Result<(Vec, String)> { let mut cmd = Command::new("cargo"); cmd.current_dir(dir); cmd.arg("test"); - - // Add RUSTFLAGS cmd.env("RUSTFLAGS", crate::util::patchbay_rustflags()); - for p in packages { + for p in &args.packages { cmd.arg("-p").arg(p); } - for t in tests { + for t in &args.tests { cmd.arg("--test").arg(t); } - if let Some(f) = filter { + if let Some(f) = &args.filter { cmd.arg(f); } - if ignored || ignored_only { + if args.ignored || args.ignored_only { cmd.arg("--"); - if ignored_only { + if args.ignored_only { cmd.arg("--ignored"); } else { cmd.arg("--include-ignored"); diff --git a/patchbay-cli/src/main.rs b/patchbay-cli/src/main.rs index e4fd892..b44de26 100644 --- a/patchbay-cli/src/main.rs +++ b/patchbay-cli/src/main.rs @@ -135,53 +135,12 @@ enum Command { }, /// Run tests (delegates to cargo test on native, VM test flow on VM). Test { - /// Test name filter. - #[arg()] - filter: Option, - - /// Include ignored tests. - #[arg(long)] - ignored: bool, - - /// Run only ignored tests. - #[arg(long)] - ignored_only: bool, - - /// Package to test. - #[arg(short = 'p', long = "package")] - packages: Vec, - - /// Test target name. - #[arg(long = "test")] - tests: Vec, - - /// Number of build jobs. - #[arg(short = 'j', long)] - jobs: Option, - - /// Features to enable. - #[arg(short = 'F', long)] - features: Vec, - - /// Build in release mode. - #[arg(long)] - release: bool, - - /// Test only library. - #[arg(long)] - lib: bool, - - /// Don't stop on first failure. - #[arg(long)] - no_fail_fast: bool, + #[command(flatten)] + args: test::TestArgs, /// Force VM backend. #[arg(long, num_args = 0..=1, default_missing_value = "auto")] vm: Option, - - /// Extra args passed to cargo and test binaries. - #[arg(last = true)] - extra_args: Vec, }, /// Compare test or sim results across git refs. Compare { @@ -217,10 +176,6 @@ enum Command { enum CompareCommand { /// Compare test results between git refs. Test { - /// Test name filter. - #[arg()] - filter: Option, - /// First git ref (compare against worktree if only one given). #[arg(long = "ref", required = true)] left_ref: String, @@ -229,14 +184,8 @@ enum CompareCommand { #[arg(long = "ref2")] right_ref: Option, - #[arg(long)] - ignored: bool, - #[arg(long)] - ignored_only: bool, - #[arg(short = 'p', long = "package")] - packages: Vec, - #[arg(long = "test")] - tests: Vec, + #[command(flatten)] + args: test::TestArgs, }, /// Compare sim results between git refs. Run { @@ -446,91 +395,27 @@ async fn tokio_main() -> Result<()> { work_dir, cmd, } => run_in_command(node, inspect, work_dir, cmd), - Command::Test { - filter, - ignored, - ignored_only, - packages, - tests, - jobs, - features, - release, - lib, - no_fail_fast, - vm, - extra_args, - } => { - let test_args = test::TestArgs { - filter: filter.clone(), - ignored, - ignored_only, - packages: packages.clone(), - tests: tests.clone(), - jobs, - features: features.clone(), - release, - lib, - no_fail_fast, - extra_args: extra_args.clone(), - }; - + Command::Test { args, vm } => { #[cfg(feature = "vm")] if let Some(vm_backend) = vm { - // Delegate to VM test flow let backend = match vm_backend.as_str() { "auto" => patchbay_vm::resolve_backend(patchbay_vm::Backend::Auto), "qemu" => patchbay_vm::Backend::Qemu, "container" => patchbay_vm::Backend::Container, other => bail!("unknown VM backend: {other}"), }; - let target = patchbay_vm::default_test_target(); - let mut cargo_args = Vec::new(); - if let Some(j) = jobs { - cargo_args.extend(["--jobs".into(), j.to_string()]); - } - for f in &features { - cargo_args.extend(["--features".into(), f.clone()]); - } - if release { - cargo_args.push("--release".into()); - } - if lib { - cargo_args.push("--lib".into()); - } - if no_fail_fast { - cargo_args.push("--no-fail-fast".into()); - } - cargo_args.extend(extra_args); - - let vm_args = patchbay_vm::TestVmArgs { - filter, - target, - packages, - tests, - recreate: false, - cargo_args, - }; - return match backend { - patchbay_vm::Backend::Container => { - patchbay_vm::container::run_tests(vm_args) - } - _ => patchbay_vm::qemu::run_tests_in_vm(vm_args), - }; + return test::run_vm(args, backend); } #[cfg(not(feature = "vm"))] if vm.is_some() { bail!("VM support not compiled (enable the `vm` feature)"); } - - // Native - test::run_native(test_args) + test::run_native(args) } Command::Compare { command } => { let cwd = std::env::current_dir().context("get cwd")?; match command { - CompareCommand::Test { - filter, left_ref, right_ref, ignored, ignored_only, packages, tests, - } => { + CompareCommand::Test { left_ref, right_ref, args } => { let right_label = right_ref.as_deref().unwrap_or("worktree"); println!("patchbay compare test: {} \u{2194} {}", left_ref, right_label); @@ -545,12 +430,12 @@ async fn tokio_main() -> Result<()> { // Run tests sequentially println!("Running tests in {} ...", left_ref); let (left_results, _left_output) = compare::run_tests_in_dir( - &left_dir, &filter, ignored, ignored_only, &packages, &tests, + &left_dir, &args, )?; println!("Running tests in {} ...", right_label); let (right_results, _right_output) = compare::run_tests_in_dir( - &right_dir, &filter, ignored, ignored_only, &packages, &tests, + &right_dir, &args, )?; // Compare diff --git a/patchbay-cli/src/test.rs b/patchbay-cli/src/test.rs index 45262a8..5e3f42c 100644 --- a/patchbay-cli/src/test.rs +++ b/patchbay-cli/src/test.rs @@ -15,17 +15,51 @@ fn has_nextest() -> bool { .unwrap_or(false) } +/// Shared test arguments used by both `patchbay test` and `patchbay compare test`. +#[derive(Debug, Clone, clap::Args)] pub struct TestArgs { + /// Test name filter. + #[arg()] pub filter: Option, + + /// Include ignored tests. + #[arg(long)] pub ignored: bool, + + /// Run only ignored tests. + #[arg(long)] pub ignored_only: bool, + + /// Package to test. + #[arg(short = 'p', long = "package")] pub packages: Vec, + + /// Test target name. + #[arg(long = "test")] pub tests: Vec, + + /// Number of build jobs. + #[arg(short = 'j', long)] pub jobs: Option, + + /// Features to enable. + #[arg(short = 'F', long)] pub features: Vec, + + /// Build in release mode. + #[arg(long)] pub release: bool, + + /// Test only library. + #[arg(long)] pub lib: bool, + + /// Don't stop on first failure. + #[arg(long)] pub no_fail_fast: bool, + + /// Extra args passed to cargo and test binaries. + #[arg(last = true)] pub extra_args: Vec, } @@ -105,3 +139,39 @@ pub fn run_native(args: TestArgs) -> Result<()> { } Ok(()) } + +/// Run tests in a VM via patchbay-vm. +#[cfg(feature = "vm")] +pub fn run_vm(args: TestArgs, backend: patchbay_vm::Backend) -> anyhow::Result<()> { + let target = patchbay_vm::default_test_target(); + let mut cargo_args = Vec::new(); + if let Some(j) = args.jobs { + cargo_args.extend(["--jobs".into(), j.to_string()]); + } + for f in &args.features { + cargo_args.extend(["--features".into(), f.clone()]); + } + if args.release { + cargo_args.push("--release".into()); + } + if args.lib { + cargo_args.push("--lib".into()); + } + if args.no_fail_fast { + cargo_args.push("--no-fail-fast".into()); + } + cargo_args.extend(args.extra_args); + + let vm_args = patchbay_vm::TestVmArgs { + filter: args.filter, + target, + packages: args.packages, + tests: args.tests, + recreate: false, + cargo_args, + }; + match backend { + patchbay_vm::Backend::Container => patchbay_vm::container::run_tests(vm_args), + _ => patchbay_vm::qemu::run_tests_in_vm(vm_args), + } +} From bec36ca2057e5b0647b8e3e913a6944f9c742f44 Mon Sep 17 00:00:00 2001 From: Frando Date: Wed, 25 Mar 2026 23:57:33 +0100 Subject: [PATCH 10/38] refactor: consolidate paths under .patchbay/, VmOps trait, DRY cargo args MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Path consolidation: - .patchbay-work → .patchbay/work (all 7 CLI defaults + vm fallbacks) - .qemu-vm → .patchbay/vm, .container-vm → .patchbay/vm VmOps trait: - Add VmOps trait in patchbay-vm with QemuBackend/ContainerBackend ZSTs - Rewrite dispatch_vm to use trait methods instead of per-command match - resolve_ops() factory returns Box DRY cargo args: - Add TestArgs::into_vm_args() method for TestArgs → TestVmArgs conversion - Use in both test::run_vm() and VmCommand::Test dispatch Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/src/main.rs | 81 +++++++++++------------------------- patchbay-cli/src/test.rs | 58 +++++++++++++------------- patchbay-vm/src/container.rs | 4 +- patchbay-vm/src/lib.rs | 43 +++++++++++++++++++ patchbay-vm/src/qemu.rs | 4 +- 5 files changed, 100 insertions(+), 90 deletions(-) diff --git a/patchbay-cli/src/main.rs b/patchbay-cli/src/main.rs index b44de26..e39a986 100644 --- a/patchbay-cli/src/main.rs +++ b/patchbay-cli/src/main.rs @@ -40,7 +40,7 @@ enum Command { sims: Vec, /// Work directory for logs, binaries, and results. - #[arg(long, default_value = ".patchbay-work")] + #[arg(long, default_value = ".patchbay/work")] work_dir: PathBuf, /// Binary override in `::` form. @@ -77,7 +77,7 @@ enum Command { #[arg()] sims: Vec, /// Work directory for caches and prepared outputs. - #[arg(long, default_value = ".patchbay-work")] + #[arg(long, default_value = ".patchbay/work")] work_dir: PathBuf, /// Binary override in `::` form. #[arg(long = "binary")] @@ -97,7 +97,7 @@ enum Command { /// Output directory containing lab run subdirectories. /// /// Ignored when `--testdir` is set. - #[arg(default_value = ".patchbay-work")] + #[arg(default_value = ".patchbay/work")] outdir: PathBuf, /// Serve `/testdir-current` instead of a path. /// @@ -116,7 +116,7 @@ enum Command { /// Sim TOML or topology TOML file path. input: PathBuf, /// Work directory for inspect session metadata. - #[arg(long, default_value = ".patchbay-work")] + #[arg(long, default_value = ".patchbay/work")] work_dir: PathBuf, }, /// Run a command inside a node namespace from an inspect session. @@ -127,7 +127,7 @@ enum Command { #[arg(long)] inspect: Option, /// Work directory containing inspect session metadata. - #[arg(long, default_value = ".patchbay-work")] + #[arg(long, default_value = ".patchbay/work")] work_dir: PathBuf, /// Command and args to execute in the node namespace. #[arg(trailing_var_arg = true, allow_hyphen_values = true, required = true)] @@ -227,7 +227,7 @@ enum VmCommand { Run { #[arg(required = true)] sims: Vec, - #[arg(long, default_value = ".patchbay-work")] + #[arg(long, default_value = ".patchbay/work")] work_dir: PathBuf, #[arg(long = "binary")] binary_overrides: Vec, @@ -244,7 +244,7 @@ enum VmCommand { }, /// Serve embedded UI + work directory over HTTP. Serve { - #[arg(long, default_value = ".patchbay-work")] + #[arg(long, default_value = ".patchbay/work")] work_dir: PathBuf, /// Serve `/binaries/tests/testdir-current` instead of work_dir. #[arg(long, default_value_t = false)] @@ -493,29 +493,14 @@ async fn tokio_main() -> Result<()> { /// Dispatch VM subcommands to the patchbay-vm library. #[cfg(feature = "vm")] async fn dispatch_vm(command: VmCommand, backend: patchbay_vm::Backend) -> Result<()> { - let backend = patchbay_vm::resolve_backend(backend); + let ops = patchbay_vm::resolve_ops(backend); match command { - VmCommand::Up { recreate } => match backend { - patchbay_vm::Backend::Container => patchbay_vm::container::up_cmd(recreate), - _ => patchbay_vm::qemu::up_cmd(recreate), - }, - VmCommand::Down => match backend { - patchbay_vm::Backend::Container => patchbay_vm::container::down_cmd(), - _ => patchbay_vm::qemu::down_cmd(), - }, - VmCommand::Status => match backend { - patchbay_vm::Backend::Container => patchbay_vm::container::status_cmd(), - _ => patchbay_vm::qemu::status_cmd(), - }, - VmCommand::Cleanup => match backend { - patchbay_vm::Backend::Container => patchbay_vm::container::cleanup_cmd(), - _ => patchbay_vm::qemu::cleanup_cmd(), - }, - VmCommand::Ssh { cmd } => match backend { - patchbay_vm::Backend::Container => patchbay_vm::container::exec_cmd_cli(cmd), - _ => patchbay_vm::qemu::ssh_cmd_cli(cmd), - }, + VmCommand::Up { recreate } => ops.up(recreate), + VmCommand::Down => ops.down(), + VmCommand::Status => ops.status(), + VmCommand::Cleanup => ops.cleanup(), + VmCommand::Ssh { cmd } => ops.exec(cmd), VmCommand::Run { sims, work_dir, @@ -546,10 +531,7 @@ async fn dispatch_vm(command: VmCommand, backend: patchbay_vm::Backend) -> Resul recreate, patchbay_version, }; - let res = match backend { - patchbay_vm::Backend::Container => patchbay_vm::container::run_sims(args), - _ => patchbay_vm::qemu::run_sims_in_vm(args), - }; + let res = ops.run_sims(args); if open && res.is_ok() { println!("run finished; server still running (Ctrl-C to exit)"); loop { @@ -590,35 +572,22 @@ async fn dispatch_vm(command: VmCommand, backend: patchbay_vm::Backend) -> Resul lib, no_fail_fast, recreate, - mut cargo_args, + cargo_args, } => { - if let Some(j) = jobs { - cargo_args.extend(["--jobs".into(), j.to_string()]); - } - for f in features { - cargo_args.extend(["--features".into(), f]); - } - if release { - cargo_args.push("--release".into()); - } - if lib { - cargo_args.push("--lib".into()); - } - if no_fail_fast { - cargo_args.push("--no-fail-fast".into()); - } - let args = patchbay_vm::TestVmArgs { + let test_args = test::TestArgs { filter, - target, + ignored: false, + ignored_only: false, packages, tests, - recreate, - cargo_args, + jobs, + features, + release, + lib, + no_fail_fast, + extra_args: cargo_args, }; - match backend { - patchbay_vm::Backend::Container => patchbay_vm::container::run_tests(args), - _ => patchbay_vm::qemu::run_tests_in_vm(args), - } + ops.run_tests(test_args.into_vm_args(target, recreate)) } } } diff --git a/patchbay-cli/src/test.rs b/patchbay-cli/src/test.rs index 5e3f42c..c00c1ae 100644 --- a/patchbay-cli/src/test.rs +++ b/patchbay-cli/src/test.rs @@ -63,6 +63,32 @@ pub struct TestArgs { pub extra_args: Vec, } +impl TestArgs { + /// Convert to patchbay-vm TestVmArgs. + #[cfg(feature = "vm")] + pub fn into_vm_args(self, target: String, recreate: bool) -> patchbay_vm::TestVmArgs { + let mut cargo_args = Vec::new(); + if let Some(j) = self.jobs { + cargo_args.extend(["--jobs".into(), j.to_string()]); + } + for f in &self.features { + cargo_args.extend(["--features".into(), f.clone()]); + } + if self.release { cargo_args.push("--release".into()); } + if self.lib { cargo_args.push("--lib".into()); } + if self.no_fail_fast { cargo_args.push("--no-fail-fast".into()); } + cargo_args.extend(self.extra_args); + patchbay_vm::TestVmArgs { + filter: self.filter, + target, + packages: self.packages, + tests: self.tests, + recreate, + cargo_args, + } + } +} + /// Run tests natively via cargo test/nextest. pub fn run_native(args: TestArgs) -> Result<()> { let use_nextest = has_nextest(); @@ -143,35 +169,7 @@ pub fn run_native(args: TestArgs) -> Result<()> { /// Run tests in a VM via patchbay-vm. #[cfg(feature = "vm")] pub fn run_vm(args: TestArgs, backend: patchbay_vm::Backend) -> anyhow::Result<()> { + let ops = patchbay_vm::resolve_ops(backend); let target = patchbay_vm::default_test_target(); - let mut cargo_args = Vec::new(); - if let Some(j) = args.jobs { - cargo_args.extend(["--jobs".into(), j.to_string()]); - } - for f in &args.features { - cargo_args.extend(["--features".into(), f.clone()]); - } - if args.release { - cargo_args.push("--release".into()); - } - if args.lib { - cargo_args.push("--lib".into()); - } - if args.no_fail_fast { - cargo_args.push("--no-fail-fast".into()); - } - cargo_args.extend(args.extra_args); - - let vm_args = patchbay_vm::TestVmArgs { - filter: args.filter, - target, - packages: args.packages, - tests: args.tests, - recreate: false, - cargo_args, - }; - match backend { - patchbay_vm::Backend::Container => patchbay_vm::container::run_tests(vm_args), - _ => patchbay_vm::qemu::run_tests_in_vm(vm_args), - } + ops.run_tests(args.into_vm_args(target, false)) } diff --git a/patchbay-vm/src/container.rs b/patchbay-vm/src/container.rs index de7a8cf..d49adb4 100644 --- a/patchbay-vm/src/container.rs +++ b/patchbay-vm/src/container.rs @@ -27,7 +27,7 @@ use crate::{ // Constants // --------------------------------------------------------------------------- -const CONTAINER_STATE_DIR: &str = ".container-vm"; +const CONTAINER_STATE_DIR: &str = ".patchbay/vm"; const DEFAULT_CONTAINER_NAME: &str = "patchbay"; const DEFAULT_IMAGE: &str = "debian:trixie-slim"; @@ -84,7 +84,7 @@ impl ContainerConfig { Ok(dir) => dir, Err(_) => cwd.join("target"), }; - let default_work = cwd.join(".patchbay-work"); + let default_work = cwd.join(".patchbay/work"); Ok(Self { name: env_or("CONTAINER_VM_NAME", DEFAULT_CONTAINER_NAME), diff --git a/patchbay-vm/src/lib.rs b/patchbay-vm/src/lib.rs index 4ec9b2c..f2ea77e 100644 --- a/patchbay-vm/src/lib.rs +++ b/patchbay-vm/src/lib.rs @@ -42,3 +42,46 @@ pub fn resolve_backend(b: Backend) -> Backend { other => other, } } + +/// Backend operations for VM-based execution. +pub trait VmOps { + fn up(&self, recreate: bool) -> anyhow::Result<()>; + fn down(&self) -> anyhow::Result<()>; + fn status(&self) -> anyhow::Result<()>; + fn cleanup(&self) -> anyhow::Result<()>; + fn exec(&self, cmd: Vec) -> anyhow::Result<()>; + fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()>; + fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()>; +} + +pub struct QemuBackend; +pub struct ContainerBackend; + +impl VmOps for QemuBackend { + fn up(&self, recreate: bool) -> anyhow::Result<()> { qemu::up_cmd(recreate) } + fn down(&self) -> anyhow::Result<()> { qemu::down_cmd() } + fn status(&self) -> anyhow::Result<()> { qemu::status_cmd() } + fn cleanup(&self) -> anyhow::Result<()> { qemu::cleanup_cmd() } + fn exec(&self, cmd: Vec) -> anyhow::Result<()> { qemu::ssh_cmd_cli(cmd) } + fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()> { qemu::run_sims_in_vm(args) } + fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()> { qemu::run_tests_in_vm(args) } +} + +impl VmOps for ContainerBackend { + fn up(&self, recreate: bool) -> anyhow::Result<()> { container::up_cmd(recreate) } + fn down(&self) -> anyhow::Result<()> { container::down_cmd() } + fn status(&self) -> anyhow::Result<()> { container::status_cmd() } + fn cleanup(&self) -> anyhow::Result<()> { container::cleanup_cmd() } + fn exec(&self, cmd: Vec) -> anyhow::Result<()> { container::exec_cmd_cli(cmd) } + fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()> { container::run_sims(args) } + fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()> { container::run_tests(args) } +} + +/// Resolve backend and return a boxed trait object. +pub fn resolve_ops(b: Backend) -> Box { + let resolved = resolve_backend(b); + match resolved { + Backend::Container => Box::new(ContainerBackend), + _ => Box::new(QemuBackend), + } +} diff --git a/patchbay-vm/src/qemu.rs b/patchbay-vm/src/qemu.rs index 2092abb..ca3dbee 100644 --- a/patchbay-vm/src/qemu.rs +++ b/patchbay-vm/src/qemu.rs @@ -24,7 +24,7 @@ use crate::{ // QEMU-specific constants // --------------------------------------------------------------------------- -const VM_STATE_DIR: &str = ".qemu-vm"; +const VM_STATE_DIR: &str = ".patchbay/vm"; const DEFAULT_VM_NAME: &str = "patchbay-vm"; const DEFAULT_IMAGE_URL_X86: &str = "https://cloud.debian.org/images/cloud/trixie/latest/debian-13-genericcloud-amd64.qcow2"; @@ -160,7 +160,7 @@ impl VmConfig { Ok(dir) => dir, Err(_) => cwd.join("target"), }; - let default_work = cwd.join(".patchbay-work"); + let default_work = cwd.join(".patchbay/work"); Ok(Self { vm_name: env_or("QEMU_VM_NAME", DEFAULT_VM_NAME), From f69b0670d979f7cd3eac078550a705f1bd8c3c9a Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 00:04:13 +0100 Subject: [PATCH 11/38] test: add fixture crate, integration test, and e2e test for compare Fixture crate (patchbay-cli/tests/fixtures/counter/): - Two tests: udp_counter (sends/receives packets, records metric) and udp_threshold (asserts PACKET_COUNT >= THRESHOLD) Integration test (compare_integration.rs, #[ignore]): - Creates temp git repo with passing and regressing commits - Runs patchbay compare test, asserts regressions detected E2E test (ui/e2e/compare.spec.ts): - Mock compare data, spawns patchbay serve, verifies CompareView Co-Authored-By: Claude Opus 4.6 (1M context) --- Cargo.lock | 1 + patchbay-cli/Cargo.toml | 5 + patchbay-cli/tests/compare_integration.rs | 126 +++ .../tests/fixtures/counter/Cargo.lock | 1006 +++++++++++++++++ .../tests/fixtures/counter/Cargo.toml | 12 + .../tests/fixtures/counter/tests/counter.rs | 87 ++ ui/e2e/compare.spec.ts | 108 ++ 7 files changed, 1345 insertions(+) create mode 100644 patchbay-cli/tests/compare_integration.rs create mode 100644 patchbay-cli/tests/fixtures/counter/Cargo.lock create mode 100644 patchbay-cli/tests/fixtures/counter/Cargo.toml create mode 100644 patchbay-cli/tests/fixtures/counter/tests/counter.rs create mode 100644 ui/e2e/compare.spec.ts diff --git a/Cargo.lock b/Cargo.lock index d09874e..5127e7c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1995,6 +1995,7 @@ dependencies = [ "serde", "serde_json", "tar", + "tempfile", "tokio", "toml", "tracing", diff --git a/patchbay-cli/Cargo.toml b/patchbay-cli/Cargo.toml index b9ba682..6eb617a 100644 --- a/patchbay-cli/Cargo.toml +++ b/patchbay-cli/Cargo.toml @@ -32,6 +32,11 @@ tokio = { version = "1", features = ["rt", "macros", "sync", "time", "fs", "proc toml = "1.0" tracing = "0.1" +[dev-dependencies] +patchbay = { path = "../patchbay" } +serde_json = "1" +tempfile = "3" + [features] default = ["serve", "upload", "vm"] serve = ["dep:patchbay-server"] diff --git a/patchbay-cli/tests/compare_integration.rs b/patchbay-cli/tests/compare_integration.rs new file mode 100644 index 0000000..31388a7 --- /dev/null +++ b/patchbay-cli/tests/compare_integration.rs @@ -0,0 +1,126 @@ +//! Integration test for `patchbay compare test`. +//! Creates a temp git repo with the counter fixture, makes two commits +//! with different PACKET_COUNT values, and runs compare between them. + +use std::path::Path; +use std::process::Command; + +fn git(dir: &Path, args: &[&str]) { + let status = Command::new("git") + .args(args) + .current_dir(dir) + .env("GIT_AUTHOR_NAME", "test") + .env("GIT_AUTHOR_EMAIL", "test@test") + .env("GIT_COMMITTER_NAME", "test") + .env("GIT_COMMITTER_EMAIL", "test@test") + .status() + .unwrap(); + assert!(status.success(), "git {args:?} failed"); +} + +#[test] +#[ignore] // Requires namespace capabilities + builds from scratch +fn compare_detects_regression() { + if patchbay::check_caps().is_err() { + eprintln!("skipping: no namespace capabilities"); + return; + } + + let tmp = tempfile::tempdir().unwrap(); + let dir = tmp.path(); + let patchbay_root = Path::new(env!("CARGO_MANIFEST_DIR")).parent().unwrap(); + let patchbay_crate = patchbay_root.join("patchbay"); + + // Write workspace + std::fs::write( + dir.join("Cargo.toml"), + "[workspace]\nmembers = [\"counter\"]\nresolver = \"2\"\n", + ) + .unwrap(); + + // Write counter crate + let counter_dir = dir.join("counter"); + std::fs::create_dir_all(counter_dir.join("tests")).unwrap(); + std::fs::write( + counter_dir.join("Cargo.toml"), + format!( + "[package]\nname = \"counter\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n\ + [dev-dependencies]\npatchbay = {{ path = \"{}\" }}\n\ + tokio = {{ version = \"1\", features = [\"rt\", \"macros\", \"net\", \"time\"] }}\n\ + anyhow = \"1\"\n", + patchbay_crate.display() + ), + ) + .unwrap(); + + // Read fixture source from our fixtures dir + let fixture_src = std::fs::read_to_string( + Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/counter/tests/counter.rs"), + ) + .unwrap(); + + // Commit 1: passing (PACKET_COUNT = 5) + std::fs::write(counter_dir.join("tests/counter.rs"), &fixture_src).unwrap(); + git(dir, &["init"]); + git(dir, &["add", "."]); + git(dir, &["commit", "-m", "passing"]); + git(dir, &["tag", "v1"]); + + // Commit 2: regressing (PACKET_COUNT = 2, below THRESHOLD = 3) + let regressed = fixture_src.replace( + "const PACKET_COUNT: u32 = 5;", + "const PACKET_COUNT: u32 = 2;", + ); + std::fs::write(counter_dir.join("tests/counter.rs"), ®ressed).unwrap(); + git(dir, &["add", "."]); + git(dir, &["commit", "-m", "regressing"]); + git(dir, &["tag", "v2"]); + + // Run compare + let patchbay_bin = env!("CARGO_BIN_EXE_patchbay"); + let output = Command::new(patchbay_bin) + .args([ + "compare", + "test", + "--ref", + "v1", + "--ref2", + "v2", + "-p", + "counter", + ]) + .current_dir(dir) + .output() + .unwrap(); + + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + eprintln!("stdout:\n{stdout}"); + eprintln!("stderr:\n{stderr}"); + + // Compare should detect the regression and exit non-zero + assert!( + !output.status.success(), + "expected non-zero exit due to regression" + ); + + // Find and parse the manifest + let work = dir.join(".patchbay/work"); + let compare_dir = std::fs::read_dir(&work) + .unwrap() + .filter_map(|e| e.ok()) + .find(|e| { + e.file_name() + .to_string_lossy() + .starts_with("compare-") + }) + .expect("compare output dir not found"); + let manifest_path = compare_dir.path().join("summary.json"); + let manifest: serde_json::Value = + serde_json::from_str(&std::fs::read_to_string(&manifest_path).unwrap()).unwrap(); + + assert_eq!(manifest["left_ref"], "v1"); + assert_eq!(manifest["right_ref"], "v2"); + assert!(manifest["summary"]["regressions"].as_u64().unwrap() >= 1); + assert!(manifest["summary"]["score"].as_i64().unwrap() < 0); +} diff --git a/patchbay-cli/tests/fixtures/counter/Cargo.lock b/patchbay-cli/tests/fixtures/counter/Cargo.lock new file mode 100644 index 0000000..969f6f0 --- /dev/null +++ b/patchbay-cli/tests/fixtures/counter/Cargo.lock @@ -0,0 +1,1006 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anyhow" +version = "1.0.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "bitflags" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" + +[[package]] +name = "bumpalo" +version = "3.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" + +[[package]] +name = "bytes" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" + +[[package]] +name = "cc" +version = "1.2.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a0dd1ca384932ff3641c8718a02769f1698e7563dc6974ffd03346116310423" +dependencies = [ + "find-msvc-tools", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "chrono" +version = "0.4.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" +dependencies = [ + "iana-time-zone", + "num-traits", + "serde", + "windows-link", +] + +[[package]] +name = "convert_case" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "counter-fixture" +version = "0.0.0" +dependencies = [ + "anyhow", + "patchbay", + "tokio", +] + +[[package]] +name = "derive_more" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "rustc_version", + "syn", + "unicode-xid", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "find-msvc-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" + +[[package]] +name = "futures" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" + +[[package]] +name = "futures-executor" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" + +[[package]] +name = "futures-macro" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" + +[[package]] +name = "futures-task" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" + +[[package]] +name = "futures-util" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "slab", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "iana-time-zone" +version = "0.1.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "indexmap" +version = "2.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "ipnet" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2" +dependencies = [ + "serde", +] + +[[package]] +name = "itoa" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" + +[[package]] +name = "js-sys" +version = "0.3.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.183" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d" + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "memchr" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" + +[[package]] +name = "mio" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +dependencies = [ + "libc", + "wasi", + "windows-sys", +] + +[[package]] +name = "netlink-packet-core" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3463cbb78394cb0141e2c926b93fc2197e473394b761986eca3b9da2c63ae0f4" +dependencies = [ + "paste", +] + +[[package]] +name = "netlink-packet-route" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ce3636fa715e988114552619582b530481fd5ef176a1e5c1bf024077c2c9445" +dependencies = [ + "bitflags", + "libc", + "log", + "netlink-packet-core", +] + +[[package]] +name = "netlink-proto" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b65d130ee111430e47eed7896ea43ca693c387f097dd97376bffafbf25812128" +dependencies = [ + "bytes", + "futures", + "log", + "netlink-packet-core", + "netlink-sys", + "thiserror 2.0.18", +] + +[[package]] +name = "netlink-sys" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd6c30ed10fa69cc491d491b85cc971f6bdeb8e7367b7cde2ee6cc878d583fae" +dependencies = [ + "bytes", + "futures-util", + "libc", + "log", + "tokio", +] + +[[package]] +name = "nix" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" +dependencies = [ + "bitflags", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "once_cell" +version = "1.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50" + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "patchbay" +version = "0.1.0" +dependencies = [ + "anyhow", + "chrono", + "derive_more", + "futures", + "ipnet", + "libc", + "nix", + "rtnetlink", + "serde", + "serde_json", + "strum", + "tokio", + "tokio-util", + "toml", + "tracing", + "tracing-core", + "tracing-subscriber", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd" + +[[package]] +name = "proc-macro2" +version = "1.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "regex-automata" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" + +[[package]] +name = "rtnetlink" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b960d5d873a75b5be9761b1e73b146f52dddcd27bac75263f40fba686d4d7b5" +dependencies = [ + "futures-channel", + "futures-util", + "log", + "netlink-packet-core", + "netlink-packet-route", + "netlink-proto", + "netlink-sys", + "nix", + "thiserror 1.0.69", + "tokio", +] + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "semver" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.149" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + +[[package]] +name = "serde_spanned" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "876ac351060d4f882bb1032b6369eb0aef79ad9df1ea8bc404874d8cc3d0cd98" +dependencies = [ + "serde_core", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" +dependencies = [ + "errno", + "libc", +] + +[[package]] +name = "slab" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" + +[[package]] +name = "socket2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "strum" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9628de9b8791db39ceda2b119bbe13134770b56c138ec1d3af810d045c04f9bd" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab85eea0270ee17587ed4156089e10b9e6880ee688791d45a905f5b1ca36f664" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "syn" +version = "2.0.117" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" +dependencies = [ + "thiserror-impl 2.0.18", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "tokio" +version = "1.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27ad5e34374e03cfffefc301becb44e9dc3c17584f414349ebe29ed26661822d" +dependencies = [ + "bytes", + "libc", + "mio", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys", +] + +[[package]] +name = "tokio-macros" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-util" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "toml" +version = "1.1.0+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8195ca05e4eb728f4ba94f3e3291661320af739c4e43779cbdfae82ab239fcc" +dependencies = [ + "indexmap", + "serde_core", + "serde_spanned", + "toml_datetime", + "toml_parser", + "toml_writer", + "winnow", +] + +[[package]] +name = "toml_datetime" +version = "1.1.0+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97251a7c317e03ad83774a8752a7e81fb6067740609f75ea2b585b569a59198f" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_parser" +version = "1.1.0+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2334f11ee363607eb04df9b8fc8a13ca1715a72ba8662a26ac285c98aabb4011" +dependencies = [ + "winnow", +] + +[[package]] +name = "toml_writer" +version = "1.1.0+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d282ade6016312faf3e41e57ebbba0c073e4056dab1232ab1cb624199648f8ed" + +[[package]] +name = "tracing" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" +dependencies = [ + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7f578e5945fb242538965c2d0b04418d38ec25c79d160cd279bf0731c8d319" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata", + "sharded-slab", + "thread_local", + "tracing", + "tracing-core", +] + +[[package]] +name = "unicode-ident" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" + +[[package]] +name = "unicode-segmentation" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da36089a805484bcccfffe0739803392c8298778a2d2f09febf76fac5ad9025b" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasm-bindgen" +version = "0.2.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "winnow" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a90e88e4667264a994d34e6d1ab2d26d398dcdca8b7f52bec8668957517fc7d8" + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" diff --git a/patchbay-cli/tests/fixtures/counter/Cargo.toml b/patchbay-cli/tests/fixtures/counter/Cargo.toml new file mode 100644 index 0000000..8d9fba3 --- /dev/null +++ b/patchbay-cli/tests/fixtures/counter/Cargo.toml @@ -0,0 +1,12 @@ +[workspace] + +[package] +name = "counter-fixture" +version = "0.0.0" +edition = "2021" +publish = false + +[dev-dependencies] +patchbay = { path = "../../../../patchbay" } +tokio = { version = "1", features = ["rt", "macros", "net", "time"] } +anyhow = "1" diff --git a/patchbay-cli/tests/fixtures/counter/tests/counter.rs b/patchbay-cli/tests/fixtures/counter/tests/counter.rs new file mode 100644 index 0000000..fd84844 --- /dev/null +++ b/patchbay-cli/tests/fixtures/counter/tests/counter.rs @@ -0,0 +1,87 @@ +//! Fixture test: sends UDP packets between two patchbay devices. +//! PACKET_COUNT and THRESHOLD are compile-time constants that the +//! integration test modifies between commits to create regressions. + +const PACKET_COUNT: u32 = 5; +const THRESHOLD: u32 = 3; + +#[tokio::test(flavor = "current_thread")] +async fn udp_counter() -> anyhow::Result<()> { + if patchbay::check_caps().is_err() { + eprintln!("skipping: no namespace capabilities"); + return Ok(()); + } + + let lab = patchbay::Lab::new().await?; + let dc = lab.add_router("dc").build().await?; + let sender = lab + .add_device("sender") + .iface("eth0", dc.id(), None) + .build() + .await?; + let receiver = lab + .add_device("receiver") + .iface("eth0", dc.id(), None) + .build() + .await?; + + let recv_ip = receiver.ip().unwrap(); + let port: u16 = 9999; + + // Spawn UDP listener in the receiver's namespace. + let rx_handle = receiver.spawn(move |_dev| async move { + let sock = tokio::net::UdpSocket::bind(format!("{recv_ip}:{port}")).await?; + let mut count = 0u32; + let mut buf = [0u8; 64]; + for _ in 0..PACKET_COUNT { + let _ = tokio::time::timeout( + std::time::Duration::from_secs(5), + sock.recv_from(&mut buf), + ) + .await??; + count += 1; + } + Ok::<_, anyhow::Error>(count) + })?; + + // Give the listener a moment to bind. + tokio::time::sleep(std::time::Duration::from_millis(50)).await; + + // Send packets from the sender's namespace. + let send_ip = sender.ip().unwrap(); + let tx_handle = sender.spawn(move |_dev| async move { + let sock = tokio::net::UdpSocket::bind(format!("{send_ip}:0")).await?; + for i in 0..PACKET_COUNT { + sock.send_to( + format!("pkt-{i}").as_bytes(), + format!("{recv_ip}:{port}"), + ) + .await?; + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + } + Ok::<_, anyhow::Error>(()) + })?; + + tx_handle.await??; + let received = rx_handle.await??; + + sender.record("packet_count", PACKET_COUNT as f64); + assert_eq!(received, PACKET_COUNT); + Ok(()) +} + +#[tokio::test(flavor = "current_thread")] +async fn udp_threshold() -> anyhow::Result<()> { + if patchbay::check_caps().is_err() { + eprintln!("skipping: no namespace capabilities"); + return Ok(()); + } + // This test passes when PACKET_COUNT >= THRESHOLD, fails otherwise. + assert!( + PACKET_COUNT >= THRESHOLD, + "packet count {} below threshold {}", + PACKET_COUNT, + THRESHOLD + ); + Ok(()) +} diff --git a/ui/e2e/compare.spec.ts b/ui/e2e/compare.spec.ts new file mode 100644 index 0000000..165789f --- /dev/null +++ b/ui/e2e/compare.spec.ts @@ -0,0 +1,108 @@ +import { test, expect } from '@playwright/test' +import { mkdtempSync, mkdirSync, writeFileSync, rmSync } from 'node:fs' +import { tmpdir } from 'node:os' +import { join } from 'node:path' +import { type ChildProcess, spawn } from 'node:child_process' +import { PATCHBAY_BIN, REPO_ROOT, waitForHttp } from './helpers' + +const PORT = 7434 +const UI_URL = `http://127.0.0.1:${PORT}` + +const MINIMAL_EVENT = + '{"opid":1,"timestamp":"2026-03-25T00:00:00Z","kind":"lab_created","lab_prefix":"lab-p1","label":"test"}\n' + +const MOCK_METRICS = [ + '{"t":1,"m":{"packet_count":5.0}}', + '{"t":2,"m":{"packet_count":5.0}}', + '{"t":3,"m":{"packet_count":5.0}}', +].join('\n') + '\n' + +const MOCK_MANIFEST = { + left_ref: 'v1', + right_ref: 'v2', + timestamp: '20260325_120000', + left_results: [ + { name: 'counter::udp_counter', status: 'pass', duration_ms: 100 }, + { name: 'counter::udp_threshold', status: 'pass', duration_ms: 50 }, + ], + right_results: [ + { name: 'counter::udp_counter', status: 'pass', duration_ms: 110 }, + { name: 'counter::udp_threshold', status: 'fail', duration_ms: 40 }, + ], + summary: { + left_pass: 2, + left_fail: 0, + left_total: 2, + right_pass: 1, + right_fail: 1, + right_total: 2, + fixes: 0, + regressions: 1, + left_time_ms: 150, + right_time_ms: 150, + score: -5, + }, +} + +test('compare view renders summary and regression', async ({ page }) => { + test.setTimeout(60_000) + const workDir = mkdtempSync(join(tmpdir(), 'patchbay-compare-e2e-')) + let proc: ChildProcess | null = null + + try { + // Write mock data + const batchDir = join(workDir, 'compare-mock') + mkdirSync(join(batchDir, 'left-v1'), { recursive: true }) + mkdirSync(join(batchDir, 'right-v2'), { recursive: true }) + writeFileSync(join(batchDir, 'summary.json'), JSON.stringify(MOCK_MANIFEST)) + writeFileSync(join(batchDir, 'left-v1', 'events.jsonl'), MINIMAL_EVENT) + writeFileSync(join(batchDir, 'right-v2', 'events.jsonl'), MINIMAL_EVENT) + writeFileSync( + join(batchDir, 'right-v2', 'device.sender.metrics.jsonl'), + MOCK_METRICS, + ) + + // Start server + proc = spawn( + PATCHBAY_BIN, + ['serve', workDir, '--bind', `127.0.0.1:${PORT}`], + { cwd: REPO_ROOT, stdio: 'pipe' }, + ) + await waitForHttp(UI_URL, 15_000) + + // Navigate to the app + await page.goto(UI_URL) + + // Select the compare batch + await page.waitForTimeout(1000) + // Look for compare-mock in the page (it should be in the run list) + const batchLink = page.getByText('compare-mock') + if (await batchLink.isVisible()) { + await batchLink.click() + } else { + // Try selector + const selector = page.locator('select') + if (await selector.isVisible()) { + await selector.selectOption({ label: 'compare-mock' }) + } + } + + // Verify CompareView renders + await expect(page.getByText('v1')).toBeVisible({ timeout: 10_000 }) + await expect(page.getByText('v2')).toBeVisible() + + // Summary + await expect(page.getByText('Regressions')).toBeVisible() + + // Per-test table + await expect(page.getByText('udp_counter')).toBeVisible() + await expect(page.getByText('udp_threshold')).toBeVisible() + await expect(page.getByText('REGRESS')).toBeVisible() + + // Score + await expect(page.getByText('-5')).toBeVisible() + } finally { + if (proc && !proc.killed) proc.kill('SIGTERM') + rmSync(workDir, { recursive: true, force: true }) + } +}) From b545878fdddb98bca7148ee6c393c3f744cf8405 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 00:09:23 +0100 Subject: [PATCH 12/38] refactor: CompareSummary left/right structs, Backend dispatch, testdir copy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CompareSummary: - Split flat fields into left/right RunStats structs - Use std::time::Duration with custom serde (serialize as ms) - Update compare_results, print_summary, UI, and test mocks VmOps → Backend methods: - Remove VmOps trait, QemuBackend/ContainerBackend ZSTs, Box - Implement dispatch methods directly on Backend enum - backend.resolve().up(recreate) instead of resolve_ops(b).up(recreate) testdir: - Copy target/testdir-current to .patchbay/work/testdir after native tests Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/src/compare.rs | 60 ++++++++++++++++------ patchbay-cli/src/main.rs | 18 +++---- patchbay-cli/src/test.rs | 26 +++++++++- patchbay-vm/src/lib.rs | 85 ++++++++++++++++++------------- ui/e2e/compare.spec.ts | 10 +--- ui/src/components/CompareView.tsx | 6 +-- 6 files changed, 131 insertions(+), 74 deletions(-) diff --git a/patchbay-cli/src/compare.rs b/patchbay-cli/src/compare.rs index a220bf3..17515de 100644 --- a/patchbay-cli/src/compare.rs +++ b/patchbay-cli/src/compare.rs @@ -2,6 +2,7 @@ use std::path::{Path, PathBuf}; use std::process::Command; +use std::time::Duration; use anyhow::{bail, Context, Result}; use serde::{Deserialize, Serialize}; @@ -79,21 +80,39 @@ pub struct CompareManifest { pub summary: CompareSummary, } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RunStats { + pub pass: usize, + pub fail: usize, + pub total: usize, + #[serde(with = "duration_ms")] + pub time: Duration, +} + #[derive(Debug, Serialize, Deserialize)] pub struct CompareSummary { - pub left_pass: usize, - pub left_fail: usize, - pub left_total: usize, - pub right_pass: usize, - pub right_fail: usize, - pub right_total: usize, + pub left: RunStats, + pub right: RunStats, pub fixes: usize, pub regressions: usize, - pub left_time_ms: u64, - pub right_time_ms: u64, pub score: i32, } +/// Serialize Duration as milliseconds. +mod duration_ms { + use std::time::Duration; + use serde::{Deserialize, Deserializer, Serializer}; + + pub fn serialize(d: &Duration, s: S) -> Result { + s.serialize_u64(d.as_millis() as u64) + } + + pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result { + let ms = u64::deserialize(d)?; + Ok(Duration::from_millis(ms)) + } +} + /// Parse cargo test output into TestResults. /// Parses lines like "test tests::foo ... ok" and "test tests::bar ... FAILED". pub fn parse_test_output(output: &str) -> Vec { @@ -210,10 +229,19 @@ pub fn compare_results( } CompareSummary { - left_pass, left_fail, left_total: left.len(), - right_pass, right_fail, right_total: right.len(), + left: RunStats { + pass: left_pass, + fail: left_fail, + total: left.len(), + time: Duration::from_millis(left_time_ms), + }, + right: RunStats { + pass: right_pass, + fail: right_fail, + total: right.len(), + time: Duration::from_millis(right_time_ms), + }, fixes, regressions, - left_time_ms, right_time_ms, score, } } @@ -222,18 +250,18 @@ pub fn compare_results( pub fn print_summary(left_ref: &str, right_ref: &str, left: &[TestResult], right: &[TestResult], summary: &CompareSummary) { println!("\nCompare: {left_ref} \u{2194} {right_ref}\n"); println!("Tests: {}/{} pass \u{2192} {}/{} pass", - summary.left_pass, summary.left_total, - summary.right_pass, summary.right_total); + summary.left.pass, summary.left.total, + summary.right.pass, summary.right.total); if summary.fixes > 0 { println!("Fixes: {} (fail\u{2192}pass)", summary.fixes); } if summary.regressions > 0 { println!("Regressions: {} (pass\u{2192}fail)", summary.regressions); } - if summary.left_time_ms > 0 || summary.right_time_ms > 0 { + if !summary.left.time.is_zero() || !summary.right.time.is_zero() { println!("Total time: {:.1}s \u{2192} {:.1}s", - summary.left_time_ms as f64 / 1000.0, - summary.right_time_ms as f64 / 1000.0); + summary.left.time.as_secs_f64(), + summary.right.time.as_secs_f64()); } // Per-test table diff --git a/patchbay-cli/src/main.rs b/patchbay-cli/src/main.rs index e39a986..c9006b5 100644 --- a/patchbay-cli/src/main.rs +++ b/patchbay-cli/src/main.rs @@ -399,7 +399,7 @@ async fn tokio_main() -> Result<()> { #[cfg(feature = "vm")] if let Some(vm_backend) = vm { let backend = match vm_backend.as_str() { - "auto" => patchbay_vm::resolve_backend(patchbay_vm::Backend::Auto), + "auto" => patchbay_vm::Backend::Auto.resolve(), "qemu" => patchbay_vm::Backend::Qemu, "container" => patchbay_vm::Backend::Container, other => bail!("unknown VM backend: {other}"), @@ -493,14 +493,14 @@ async fn tokio_main() -> Result<()> { /// Dispatch VM subcommands to the patchbay-vm library. #[cfg(feature = "vm")] async fn dispatch_vm(command: VmCommand, backend: patchbay_vm::Backend) -> Result<()> { - let ops = patchbay_vm::resolve_ops(backend); + let backend = backend.resolve(); match command { - VmCommand::Up { recreate } => ops.up(recreate), - VmCommand::Down => ops.down(), - VmCommand::Status => ops.status(), - VmCommand::Cleanup => ops.cleanup(), - VmCommand::Ssh { cmd } => ops.exec(cmd), + VmCommand::Up { recreate } => backend.up(recreate), + VmCommand::Down => backend.down(), + VmCommand::Status => backend.status(), + VmCommand::Cleanup => backend.cleanup(), + VmCommand::Ssh { cmd } => backend.exec(cmd), VmCommand::Run { sims, work_dir, @@ -531,7 +531,7 @@ async fn dispatch_vm(command: VmCommand, backend: patchbay_vm::Backend) -> Resul recreate, patchbay_version, }; - let res = ops.run_sims(args); + let res = backend.run_sims(args); if open && res.is_ok() { println!("run finished; server still running (Ctrl-C to exit)"); loop { @@ -587,7 +587,7 @@ async fn dispatch_vm(command: VmCommand, backend: patchbay_vm::Backend) -> Resul no_fail_fast, extra_args: cargo_args, }; - ops.run_tests(test_args.into_vm_args(target, recreate)) + backend.run_tests(test_args.into_vm_args(target, recreate)) } } } diff --git a/patchbay-cli/src/test.rs b/patchbay-cli/src/test.rs index c00c1ae..db0970f 100644 --- a/patchbay-cli/src/test.rs +++ b/patchbay-cli/src/test.rs @@ -163,13 +163,35 @@ pub fn run_native(args: TestArgs) -> Result<()> { if !status.success() { bail!("tests failed (exit code {})", status.code().unwrap_or(-1)); } + copy_testdir_output(); Ok(()) } +/// Copy testdir-current into the work dir if it exists. +fn copy_testdir_output() { + // Try to find target/testdir-current via cargo metadata + let Ok(output) = Command::new("cargo") + .args(["metadata", "--format-version=1", "--no-deps"]) + .output() else { return }; + if !output.status.success() { return; } + let Ok(meta) = serde_json::from_slice::(&output.stdout) else { return }; + let Some(target_dir) = meta["target_directory"].as_str() else { return }; + let testdir = std::path::Path::new(target_dir).join("testdir-current"); + if !testdir.exists() { return; } + let dest = std::path::Path::new(".patchbay/work/testdir"); + if dest.exists() { let _ = std::fs::remove_dir_all(dest); } + // Use cp -r since std::fs doesn't have recursive copy + let _ = Command::new("cp") + .args(["-r"]) + .arg(&testdir) + .arg(dest) + .status(); +} + /// Run tests in a VM via patchbay-vm. #[cfg(feature = "vm")] pub fn run_vm(args: TestArgs, backend: patchbay_vm::Backend) -> anyhow::Result<()> { - let ops = patchbay_vm::resolve_ops(backend); + let backend = backend.resolve(); let target = patchbay_vm::default_test_target(); - ops.run_tests(args.into_vm_args(target, false)) + backend.run_tests(args.into_vm_args(target, false)) } diff --git a/patchbay-vm/src/lib.rs b/patchbay-vm/src/lib.rs index f2ea77e..61ee574 100644 --- a/patchbay-vm/src/lib.rs +++ b/patchbay-vm/src/lib.rs @@ -43,45 +43,58 @@ pub fn resolve_backend(b: Backend) -> Backend { } } -/// Backend operations for VM-based execution. -pub trait VmOps { - fn up(&self, recreate: bool) -> anyhow::Result<()>; - fn down(&self) -> anyhow::Result<()>; - fn status(&self) -> anyhow::Result<()>; - fn cleanup(&self) -> anyhow::Result<()>; - fn exec(&self, cmd: Vec) -> anyhow::Result<()>; - fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()>; - fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()>; -} +impl Backend { + /// Resolve auto-detection and return a concrete backend. + pub fn resolve(self) -> Self { + resolve_backend(self) + } -pub struct QemuBackend; -pub struct ContainerBackend; + pub fn up(&self, recreate: bool) -> anyhow::Result<()> { + match self { + Self::Container => container::up_cmd(recreate), + _ => qemu::up_cmd(recreate), + } + } -impl VmOps for QemuBackend { - fn up(&self, recreate: bool) -> anyhow::Result<()> { qemu::up_cmd(recreate) } - fn down(&self) -> anyhow::Result<()> { qemu::down_cmd() } - fn status(&self) -> anyhow::Result<()> { qemu::status_cmd() } - fn cleanup(&self) -> anyhow::Result<()> { qemu::cleanup_cmd() } - fn exec(&self, cmd: Vec) -> anyhow::Result<()> { qemu::ssh_cmd_cli(cmd) } - fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()> { qemu::run_sims_in_vm(args) } - fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()> { qemu::run_tests_in_vm(args) } -} + pub fn down(&self) -> anyhow::Result<()> { + match self { + Self::Container => container::down_cmd(), + _ => qemu::down_cmd(), + } + } -impl VmOps for ContainerBackend { - fn up(&self, recreate: bool) -> anyhow::Result<()> { container::up_cmd(recreate) } - fn down(&self) -> anyhow::Result<()> { container::down_cmd() } - fn status(&self) -> anyhow::Result<()> { container::status_cmd() } - fn cleanup(&self) -> anyhow::Result<()> { container::cleanup_cmd() } - fn exec(&self, cmd: Vec) -> anyhow::Result<()> { container::exec_cmd_cli(cmd) } - fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()> { container::run_sims(args) } - fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()> { container::run_tests(args) } -} + pub fn status(&self) -> anyhow::Result<()> { + match self { + Self::Container => container::status_cmd(), + _ => qemu::status_cmd(), + } + } -/// Resolve backend and return a boxed trait object. -pub fn resolve_ops(b: Backend) -> Box { - let resolved = resolve_backend(b); - match resolved { - Backend::Container => Box::new(ContainerBackend), - _ => Box::new(QemuBackend), + pub fn cleanup(&self) -> anyhow::Result<()> { + match self { + Self::Container => container::cleanup_cmd(), + _ => qemu::cleanup_cmd(), + } + } + + pub fn exec(&self, cmd: Vec) -> anyhow::Result<()> { + match self { + Self::Container => container::exec_cmd_cli(cmd), + _ => qemu::ssh_cmd_cli(cmd), + } + } + + pub fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()> { + match self { + Self::Container => container::run_sims(args), + _ => qemu::run_sims_in_vm(args), + } + } + + pub fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()> { + match self { + Self::Container => container::run_tests(args), + _ => qemu::run_tests_in_vm(args), + } } } diff --git a/ui/e2e/compare.spec.ts b/ui/e2e/compare.spec.ts index 165789f..fed257f 100644 --- a/ui/e2e/compare.spec.ts +++ b/ui/e2e/compare.spec.ts @@ -30,16 +30,10 @@ const MOCK_MANIFEST = { { name: 'counter::udp_threshold', status: 'fail', duration_ms: 40 }, ], summary: { - left_pass: 2, - left_fail: 0, - left_total: 2, - right_pass: 1, - right_fail: 1, - right_total: 2, + left: { pass: 2, fail: 0, total: 2, time: 150 }, + right: { pass: 1, fail: 1, total: 2, time: 150 }, fixes: 0, regressions: 1, - left_time_ms: 150, - right_time_ms: 150, score: -5, }, } diff --git a/ui/src/components/CompareView.tsx b/ui/src/components/CompareView.tsx index 41d829e..d1b6d05 100644 --- a/ui/src/components/CompareView.tsx +++ b/ui/src/components/CompareView.tsx @@ -6,8 +6,8 @@ interface CompareManifest { right_ref: string timestamp: string summary: { - left_pass: number; left_fail: number; left_total: number - right_pass: number; right_fail: number; right_total: number + left: { pass: number; fail: number; total: number; time: number } + right: { pass: number; fail: number; total: number; time: number } fixes: number; regressions: number; score: number } left_results: { name: string; status: string; duration_ms?: number }[] @@ -44,7 +44,7 @@ export default function CompareView({ batchName }: { batchName: string }) { {/* Summary bar */}
- Tests: {s.left_pass}/{s.left_total} → {s.right_pass}/{s.right_total} + Tests: {s.left.pass}/{s.left.total} → {s.right.pass}/{s.right.total}
{s.fixes > 0 &&
Fixes: {s.fixes}
} {s.regressions > 0 &&
Regressions: {s.regressions}
} From e158421b08e80e726f276ca2d8f6b157adc8547d Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 00:26:01 +0100 Subject: [PATCH 13/38] fix: simplify compare integration test, remove unused copy_dir Copy fixture source + write Cargo.toml with absolute patchbay path instead of trying to reconstruct the crate from scratch. Remove -p flag since compare runs all tests in the workspace. Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/tests/compare_integration.rs | 64 +++++++---------------- 1 file changed, 20 insertions(+), 44 deletions(-) diff --git a/patchbay-cli/tests/compare_integration.rs b/patchbay-cli/tests/compare_integration.rs index 31388a7..6a5f2d3 100644 --- a/patchbay-cli/tests/compare_integration.rs +++ b/patchbay-cli/tests/compare_integration.rs @@ -1,5 +1,5 @@ //! Integration test for `patchbay compare test`. -//! Creates a temp git repo with the counter fixture, makes two commits +//! Copies the counter fixture into a temp git repo, makes two commits //! with different PACKET_COUNT values, and runs compare between them. use std::path::Path; @@ -28,50 +28,39 @@ fn compare_detects_regression() { let tmp = tempfile::tempdir().unwrap(); let dir = tmp.path(); - let patchbay_root = Path::new(env!("CARGO_MANIFEST_DIR")).parent().unwrap(); - let patchbay_crate = patchbay_root.join("patchbay"); + let cli_dir = Path::new(env!("CARGO_MANIFEST_DIR")); + let patchbay_crate = cli_dir.parent().unwrap().join("patchbay"); + let fixture_dir = cli_dir.join("tests/fixtures/counter"); - // Write workspace - std::fs::write( - dir.join("Cargo.toml"), - "[workspace]\nmembers = [\"counter\"]\nresolver = \"2\"\n", - ) - .unwrap(); + // Copy fixture into temp dir, skipping Cargo.lock and target/ + std::fs::create_dir_all(dir.join("tests")).unwrap(); + std::fs::copy(fixture_dir.join("tests/counter.rs"), dir.join("tests/counter.rs")).unwrap(); - // Write counter crate - let counter_dir = dir.join("counter"); - std::fs::create_dir_all(counter_dir.join("tests")).unwrap(); + // Write Cargo.toml with absolute path to patchbay crate std::fs::write( - counter_dir.join("Cargo.toml"), + dir.join("Cargo.toml"), format!( - "[package]\nname = \"counter\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n\ - [dev-dependencies]\npatchbay = {{ path = \"{}\" }}\n\ - tokio = {{ version = \"1\", features = [\"rt\", \"macros\", \"net\", \"time\"] }}\n\ - anyhow = \"1\"\n", + "[workspace]\n\n\ + [package]\nname = \"counter-fixture\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n\ + [dev-dependencies]\n\ + patchbay = {{ path = \"{}\" }}\n\ + tokio = {{ version = \"1\", features = [\"rt\", \"macros\", \"net\", \"time\"] }}\n\ + anyhow = \"1\"\n", patchbay_crate.display() ), ) .unwrap(); - // Read fixture source from our fixtures dir - let fixture_src = std::fs::read_to_string( - Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/counter/tests/counter.rs"), - ) - .unwrap(); - // Commit 1: passing (PACKET_COUNT = 5) - std::fs::write(counter_dir.join("tests/counter.rs"), &fixture_src).unwrap(); git(dir, &["init"]); git(dir, &["add", "."]); git(dir, &["commit", "-m", "passing"]); git(dir, &["tag", "v1"]); // Commit 2: regressing (PACKET_COUNT = 2, below THRESHOLD = 3) - let regressed = fixture_src.replace( - "const PACKET_COUNT: u32 = 5;", - "const PACKET_COUNT: u32 = 2;", - ); - std::fs::write(counter_dir.join("tests/counter.rs"), ®ressed).unwrap(); + let src = std::fs::read_to_string(dir.join("tests/counter.rs")).unwrap(); + let regressed = src.replace("const PACKET_COUNT: u32 = 5;", "const PACKET_COUNT: u32 = 2;"); + std::fs::write(dir.join("tests/counter.rs"), regressed).unwrap(); git(dir, &["add", "."]); git(dir, &["commit", "-m", "regressing"]); git(dir, &["tag", "v2"]); @@ -79,16 +68,7 @@ fn compare_detects_regression() { // Run compare let patchbay_bin = env!("CARGO_BIN_EXE_patchbay"); let output = Command::new(patchbay_bin) - .args([ - "compare", - "test", - "--ref", - "v1", - "--ref2", - "v2", - "-p", - "counter", - ]) + .args(["compare", "test", "--ref", "v1", "--ref2", "v2"]) .current_dir(dir) .output() .unwrap(); @@ -109,11 +89,7 @@ fn compare_detects_regression() { let compare_dir = std::fs::read_dir(&work) .unwrap() .filter_map(|e| e.ok()) - .find(|e| { - e.file_name() - .to_string_lossy() - .starts_with("compare-") - }) + .find(|e| e.file_name().to_string_lossy().starts_with("compare-")) .expect("compare output dir not found"); let manifest_path = compare_dir.path().join("summary.json"); let manifest: serde_json::Value = From e50c8274fa70053045597ee6a286ed24c1e11eaf Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 00:26:32 +0100 Subject: [PATCH 14/38] feat: VmOps trait, iroh-metrics feature, native-gate inspect/run-in VmOps trait: - Trait with ZST impls (Qemu, Container) that delegate to module fns - Backend enum implements VmOps by dispatching to resolved ZST - Backend::resolve() replaces free resolve_backend function iroh-metrics: - Optional feature iroh-metrics in patchbay crate - device.record_iroh_metrics(&dyn MetricsGroup) iterates counters/gauges and emits via MetricsBuilder inspect/run-in: - Feature-gated on cfg(target_os = "linux") since they require namespaces Co-Authored-By: Claude Opus 4.6 (1M context) --- Cargo.lock | 151 ++++++++++++++++++++++++++++++++++++++- patchbay-cli/src/main.rs | 24 ++++++- patchbay-cli/src/test.rs | 1 + patchbay-vm/src/lib.rs | 123 ++++++++++++++++--------------- patchbay/Cargo.toml | 4 ++ patchbay/src/handles.rs | 38 ++++++++++ 6 files changed, 279 insertions(+), 62 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5127e7c..55b05a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -191,6 +191,15 @@ dependencies = [ "syn", ] +[[package]] +name = "atomic-polyfill" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4" +dependencies = [ + "critical-section", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -336,6 +345,12 @@ version = "3.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + [[package]] name = "bytes" version = "1.11.1" @@ -460,6 +475,15 @@ dependencies = [ "cc", ] +[[package]] +name = "cobs" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa961b519f0b462e3a3b4a34b64d119eeaca1d59af726fe450bbba07a9fc0a1" +dependencies = [ + "thiserror 2.0.18", +] + [[package]] name = "colorchoice" version = "1.0.4" @@ -763,6 +787,18 @@ version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" +[[package]] +name = "embedded-io" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" + +[[package]] +name = "embedded-io" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" + [[package]] name = "encoding_rs" version = "0.8.35" @@ -910,7 +946,7 @@ dependencies = [ "diatomic-waker", "futures-core", "pin-project-lite", - "spin", + "spin 0.10.0", ] [[package]] @@ -1082,6 +1118,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "hash32" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" +dependencies = [ + "byteorder", +] + [[package]] name = "hashbrown" version = "0.15.5" @@ -1097,6 +1142,20 @@ version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +[[package]] +name = "heapless" +version = "0.7.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdc6457c0eb62c71aac4bc17216026d8410337c4126773b9c5daba343f17964f" +dependencies = [ + "atomic-polyfill", + "hash32", + "rustc_version", + "serde", + "spin 0.9.8", + "stable_deref_trait", +] + [[package]] name = "heck" version = "0.5.0" @@ -1450,6 +1509,34 @@ dependencies = [ "serde", ] +[[package]] +name = "iroh-metrics" +version = "0.38.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "761b45ba046134b11eb3e432fa501616b45c4bf3a30c21717578bc07aa6461dd" +dependencies = [ + "iroh-metrics-derive", + "itoa", + "n0-error", + "portable-atomic", + "postcard", + "ryu", + "serde", + "tracing", +] + +[[package]] +name = "iroh-metrics-derive" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab063c2bfd6c3d5a33a913d4fdb5252f140db29ec67c704f20f3da7e8f92dbf" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "is-docker" version = "0.2.0" @@ -1654,6 +1741,27 @@ dependencies = [ "uuid", ] +[[package]] +name = "n0-error" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af4782b4baf92d686d161c15460c83d16ebcfd215918763903e9619842665cae" +dependencies = [ + "n0-error-macros", + "spez", +] + +[[package]] +name = "n0-error-macros" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03755949235714b2b307e5ae89dd8c1c2531fb127d9b8b7b4adf9c876cd3ed18" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "n0-tracing-test" version = "0.3.0" @@ -1959,6 +2067,7 @@ dependencies = [ "futures-buffered", "hickory-resolver", "ipnet", + "iroh-metrics", "libc", "n0-tracing-test", "nix", @@ -2132,6 +2241,22 @@ name = "portable-atomic" version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" +dependencies = [ + "serde", +] + +[[package]] +name = "postcard" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6764c3b5dd454e283a30e6dfe78e9b31096d9e32036b5d1eaac7a6119ccb9a24" +dependencies = [ + "cobs", + "embedded-io 0.4.0", + "embedded-io 0.6.1", + "heapless", + "serde", +] [[package]] name = "potential_utf" @@ -2789,6 +2914,26 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "spez" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c87e960f4dca2788eeb86bbdde8dd246be8948790b7618d656e68f9b720a86e8" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + [[package]] name = "spin" version = "0.10.0" @@ -2924,10 +3069,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd" dependencies = [ "fastrand", - "getrandom 0.3.4", + "getrandom 0.4.2", "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] diff --git a/patchbay-cli/src/main.rs b/patchbay-cli/src/main.rs index c9006b5..f0985af 100644 --- a/patchbay-cli/src/main.rs +++ b/patchbay-cli/src/main.rs @@ -7,8 +7,9 @@ mod test; mod upload; mod util; +#[cfg(target_os = "linux")] +use std::collections::HashMap; use std::{ - collections::HashMap, path::{Path, PathBuf}, process::Command as ProcessCommand, time::Duration, @@ -22,6 +23,8 @@ use patchbay_runner::sim; use patchbay_server::DEFAULT_UI_BIND; #[cfg(not(feature = "serve"))] const DEFAULT_UI_BIND: &str = "127.0.0.1:7421"; +#[cfg(feature = "vm")] +use patchbay_vm::VmOps; use serde::{Deserialize, Serialize}; #[derive(Parser)] @@ -112,6 +115,7 @@ enum Command { open: bool, }, /// Build topology from sim/topology config for interactive namespace debugging. + #[cfg(target_os = "linux")] Inspect { /// Sim TOML or topology TOML file path. input: PathBuf, @@ -120,6 +124,7 @@ enum Command { work_dir: PathBuf, }, /// Run a command inside a node namespace from an inspect session. + #[cfg(target_os = "linux")] RunIn { /// Device or router name from the inspected topology. node: String, @@ -388,7 +393,9 @@ async fn tokio_main() -> Result<()> { } patchbay_server::serve(dir, &bind).await } + #[cfg(target_os = "linux")] Command::Inspect { input, work_dir } => inspect_command(input, work_dir).await, + #[cfg(target_os = "linux")] Command::RunIn { node, inspect, @@ -655,6 +662,7 @@ fn resolve_testdir_native() -> Result { Ok(PathBuf::from(target_dir).join("testdir-current")) } +#[cfg(target_os = "linux")] #[derive(Debug, Clone, Serialize, Deserialize)] struct InspectSession { prefix: String, @@ -664,18 +672,22 @@ struct InspectSession { node_keeper_pids: HashMap, } +#[cfg(target_os = "linux")] fn inspect_dir(work_dir: &std::path::Path) -> PathBuf { work_dir.join("inspect") } +#[cfg(target_os = "linux")] fn inspect_session_path(work_dir: &std::path::Path, prefix: &str) -> PathBuf { inspect_dir(work_dir).join(format!("{prefix}.json")) } +#[cfg(target_os = "linux")] fn env_key_suffix(name: &str) -> String { patchbay::util::sanitize_for_env_key(name) } +#[cfg(target_os = "linux")] fn load_topology_for_inspect( input: &std::path::Path, ) -> Result<(patchbay::config::LabConfig, bool)> { @@ -698,6 +710,7 @@ fn load_topology_for_inspect( } } +#[cfg(target_os = "linux")] fn keeper_commmand() -> ProcessCommand { let mut cmd = ProcessCommand::new("sh"); cmd.args(["-lc", "while :; do sleep 3600; done"]) @@ -707,6 +720,7 @@ fn keeper_commmand() -> ProcessCommand { cmd } +#[cfg(target_os = "linux")] async fn inspect_command(input: PathBuf, work_dir: PathBuf) -> Result<()> { check_caps()?; @@ -788,6 +802,7 @@ async fn inspect_command(input: PathBuf, work_dir: PathBuf) -> Result<()> { } } +#[cfg(target_os = "linux")] fn resolve_inspect_ref(inspect: Option) -> Result { if let Some(value) = inspect { let trimmed = value.trim(); @@ -805,6 +820,7 @@ fn resolve_inspect_ref(inspect: Option) -> Result { Ok(trimmed.to_string()) } +#[cfg(target_os = "linux")] fn load_inspect_session(work_dir: &std::path::Path, inspect_ref: &str) -> Result { let as_path = PathBuf::from(inspect_ref); let session_path = if as_path.extension().and_then(|v| v.to_str()) == Some("json") @@ -820,6 +836,7 @@ fn load_inspect_session(work_dir: &std::path::Path, inspect_ref: &str) -> Result .with_context(|| format!("parse inspect session {}", session_path.display())) } +#[cfg(target_os = "linux")] fn run_in_command( node: String, inspect: Option, @@ -865,12 +882,14 @@ mod tests { use super::*; + #[cfg(target_os = "linux")] #[test] fn env_key_suffix_normalizes_names() { assert_eq!(env_key_suffix("relay"), "relay"); assert_eq!(env_key_suffix("fetcher-1"), "fetcher_1"); } + #[cfg(target_os = "linux")] #[test] fn inspect_session_path_uses_prefix_json() { let base = PathBuf::from("/tmp/patchbay-work"); @@ -878,6 +897,7 @@ mod tests { assert!(path.ends_with("inspect/lab-p123.json")); } + #[cfg(target_os = "linux")] fn write_temp_file(dir: &Path, rel: &str, body: &str) -> PathBuf { let path = dir.join(rel); if let Some(parent) = path.parent() { @@ -887,6 +907,7 @@ mod tests { path } + #[cfg(target_os = "linux")] #[test] fn inspect_loader_detects_sim_input() { let root = std::env::temp_dir().join(format!( @@ -906,6 +927,7 @@ mod tests { assert!(is_sim); } + #[cfg(target_os = "linux")] #[test] fn inspect_loader_detects_topology_input() { let root = std::env::temp_dir().join(format!( diff --git a/patchbay-cli/src/test.rs b/patchbay-cli/src/test.rs index db0970f..f7e9c13 100644 --- a/patchbay-cli/src/test.rs +++ b/patchbay-cli/src/test.rs @@ -191,6 +191,7 @@ fn copy_testdir_output() { /// Run tests in a VM via patchbay-vm. #[cfg(feature = "vm")] pub fn run_vm(args: TestArgs, backend: patchbay_vm::Backend) -> anyhow::Result<()> { + use patchbay_vm::VmOps; let backend = backend.resolve(); let target = patchbay_vm::default_test_target(); backend.run_tests(args.into_vm_args(target, false)) diff --git a/patchbay-vm/src/lib.rs b/patchbay-vm/src/lib.rs index 61ee574..530d7e3 100644 --- a/patchbay-vm/src/lib.rs +++ b/patchbay-vm/src/lib.rs @@ -7,7 +7,6 @@ pub use common::{RunVmArgs, TestVmArgs}; use clap::ValueEnum; -/// VM backend selection. #[derive(Clone, Debug, ValueEnum)] pub enum Backend { /// Auto-detect: prefer `container` on macOS Apple Silicon, fall back to QEMU. @@ -26,75 +25,83 @@ pub fn default_test_target() -> String { } } -/// Resolve `Backend::Auto` into a concrete backend. -pub fn resolve_backend(b: Backend) -> Backend { - match b { - Backend::Auto => { - if std::env::consts::OS == "macos" - && std::env::consts::ARCH == "aarch64" - && common::command_exists("container").unwrap_or(false) - { - Backend::Container - } else { - Backend::Qemu - } - } - other => other, - } +/// VM backend operations. +pub trait VmOps { + fn up(&self, recreate: bool) -> anyhow::Result<()>; + fn down(&self) -> anyhow::Result<()>; + fn status(&self) -> anyhow::Result<()>; + fn cleanup(&self) -> anyhow::Result<()>; + fn exec(&self, cmd: Vec) -> anyhow::Result<()>; + fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()>; + fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()>; +} + +/// QEMU backend. +pub struct Qemu; + +impl VmOps for Qemu { + fn up(&self, recreate: bool) -> anyhow::Result<()> { qemu::up_cmd(recreate) } + fn down(&self) -> anyhow::Result<()> { qemu::down_cmd() } + fn status(&self) -> anyhow::Result<()> { qemu::status_cmd() } + fn cleanup(&self) -> anyhow::Result<()> { qemu::cleanup_cmd() } + fn exec(&self, cmd: Vec) -> anyhow::Result<()> { qemu::ssh_cmd_cli(cmd) } + fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()> { qemu::run_sims_in_vm(args) } + fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()> { qemu::run_tests_in_vm(args) } +} + +/// Apple container backend. +pub struct Container; + +impl VmOps for Container { + fn up(&self, recreate: bool) -> anyhow::Result<()> { container::up_cmd(recreate) } + fn down(&self) -> anyhow::Result<()> { container::down_cmd() } + fn status(&self) -> anyhow::Result<()> { container::status_cmd() } + fn cleanup(&self) -> anyhow::Result<()> { container::cleanup_cmd() } + fn exec(&self, cmd: Vec) -> anyhow::Result<()> { container::exec_cmd_cli(cmd) } + fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()> { container::run_sims(args) } + fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()> { container::run_tests(args) } } impl Backend { - /// Resolve auto-detection and return a concrete backend. + /// Resolve `Auto` into a concrete backend. pub fn resolve(self) -> Self { - resolve_backend(self) - } - - pub fn up(&self, recreate: bool) -> anyhow::Result<()> { match self { - Self::Container => container::up_cmd(recreate), - _ => qemu::up_cmd(recreate), + Self::Auto => { + if std::env::consts::OS == "macos" + && std::env::consts::ARCH == "aarch64" + && common::command_exists("container").unwrap_or(false) + { + Self::Container + } else { + Self::Qemu + } + } + other => other, } } +} - pub fn down(&self) -> anyhow::Result<()> { - match self { - Self::Container => container::down_cmd(), - _ => qemu::down_cmd(), - } +/// Implement VmOps on Backend by delegating to the resolved backend. +impl VmOps for Backend { + fn up(&self, recreate: bool) -> anyhow::Result<()> { + match self { Self::Container => Container.up(recreate), _ => Qemu.up(recreate) } } - - pub fn status(&self) -> anyhow::Result<()> { - match self { - Self::Container => container::status_cmd(), - _ => qemu::status_cmd(), - } + fn down(&self) -> anyhow::Result<()> { + match self { Self::Container => Container.down(), _ => Qemu.down() } } - - pub fn cleanup(&self) -> anyhow::Result<()> { - match self { - Self::Container => container::cleanup_cmd(), - _ => qemu::cleanup_cmd(), - } + fn status(&self) -> anyhow::Result<()> { + match self { Self::Container => Container.status(), _ => Qemu.status() } } - - pub fn exec(&self, cmd: Vec) -> anyhow::Result<()> { - match self { - Self::Container => container::exec_cmd_cli(cmd), - _ => qemu::ssh_cmd_cli(cmd), - } + fn cleanup(&self) -> anyhow::Result<()> { + match self { Self::Container => Container.cleanup(), _ => Qemu.cleanup() } } - - pub fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()> { - match self { - Self::Container => container::run_sims(args), - _ => qemu::run_sims_in_vm(args), - } + fn exec(&self, cmd: Vec) -> anyhow::Result<()> { + match self { Self::Container => Container.exec(cmd), _ => Qemu.exec(cmd) } } - - pub fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()> { - match self { - Self::Container => container::run_tests(args), - _ => qemu::run_tests_in_vm(args), - } + fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()> { + match self { Self::Container => Container.run_sims(args), _ => Qemu.run_sims(args) } + } + fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()> { + match self { Self::Container => Container.run_tests(args), _ => Qemu.run_tests(args) } } } diff --git a/patchbay/Cargo.toml b/patchbay/Cargo.toml index cdab33c..f0708b6 100644 --- a/patchbay/Cargo.toml +++ b/patchbay/Cargo.toml @@ -8,7 +8,11 @@ license.workspace = true authors.workspace = true repository.workspace = true +[features] +iroh-metrics = ["dep:iroh-metrics"] + [dependencies] +iroh-metrics = { version = "0.38", optional = true } anyhow = "1" chrono = { version = "0.4", default-features = false, features = ["clock", "serde"] } derive_more = { version = "2.1.1", features = ["debug", "display"] } diff --git a/patchbay/src/handles.rs b/patchbay/src/handles.rs index 4749b5d..96b5888 100644 --- a/patchbay/src/handles.rs +++ b/patchbay/src/handles.rs @@ -232,6 +232,25 @@ impl Device { crate::metrics::MetricsBuilder::new(self.dispatch.clone()) } + /// Record all counter/gauge values from an iroh-metrics group. + /// + /// Iterates the group's metrics and emits each counter or gauge as a + /// patchbay metric line. Histograms are skipped. + #[cfg(feature = "iroh-metrics")] + pub fn record_iroh_metrics(&self, group: &dyn iroh_metrics::MetricsGroup) { + let _guard = self.enter_tracing(); + let mut builder = self.metrics(); + for item in group.iter() { + let value: f64 = match item.value() { + iroh_metrics::MetricValue::Counter(v) => v as f64, + iroh_metrics::MetricValue::Gauge(v) => v as f64, + _ => continue, + }; + builder = builder.record(item.name(), value); + } + builder.emit(); + } + /// Returns the node identifier. pub fn id(&self) -> NodeId { self.id @@ -1024,6 +1043,25 @@ impl Router { crate::metrics::MetricsBuilder::new(self.dispatch.clone()) } + /// Record all counter/gauge values from an iroh-metrics group. + /// + /// Iterates the group's metrics and emits each counter or gauge as a + /// patchbay metric line. Histograms are skipped. + #[cfg(feature = "iroh-metrics")] + pub fn record_iroh_metrics(&self, group: &dyn iroh_metrics::MetricsGroup) { + let _guard = self.enter_tracing(); + let mut builder = self.metrics(); + for item in group.iter() { + let value: f64 = match item.value() { + iroh_metrics::MetricValue::Counter(v) => v as f64, + iroh_metrics::MetricValue::Gauge(v) => v as f64, + _ => continue, + }; + builder = builder.record(item.name(), value); + } + builder.emit(); + } + /// Returns the node identifier. pub fn id(&self) -> NodeId { self.id From 30dfa811c5859ec57bb1cc86cffc8578b0a7dd50 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 00:27:11 +0100 Subject: [PATCH 15/38] fix: copy fixture Cargo.toml and replace path dep instead of rewriting Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/tests/compare_integration.rs | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/patchbay-cli/tests/compare_integration.rs b/patchbay-cli/tests/compare_integration.rs index 6a5f2d3..368a83c 100644 --- a/patchbay-cli/tests/compare_integration.rs +++ b/patchbay-cli/tests/compare_integration.rs @@ -32,24 +32,17 @@ fn compare_detects_regression() { let patchbay_crate = cli_dir.parent().unwrap().join("patchbay"); let fixture_dir = cli_dir.join("tests/fixtures/counter"); - // Copy fixture into temp dir, skipping Cargo.lock and target/ + // Copy fixture files into temp dir std::fs::create_dir_all(dir.join("tests")).unwrap(); std::fs::copy(fixture_dir.join("tests/counter.rs"), dir.join("tests/counter.rs")).unwrap(); - // Write Cargo.toml with absolute path to patchbay crate - std::fs::write( - dir.join("Cargo.toml"), - format!( - "[workspace]\n\n\ - [package]\nname = \"counter-fixture\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n\ - [dev-dependencies]\n\ - patchbay = {{ path = \"{}\" }}\n\ - tokio = {{ version = \"1\", features = [\"rt\", \"macros\", \"net\", \"time\"] }}\n\ - anyhow = \"1\"\n", - patchbay_crate.display() - ), - ) - .unwrap(); + // Copy Cargo.toml and replace the relative patchbay path with absolute + let cargo_toml = std::fs::read_to_string(fixture_dir.join("Cargo.toml")).unwrap(); + let cargo_toml = cargo_toml.replace( + "path = \"../../../../patchbay\"", + &format!("path = \"{}\"", patchbay_crate.display()), + ); + std::fs::write(dir.join("Cargo.toml"), cargo_toml).unwrap(); // Commit 1: passing (PACKET_COUNT = 5) git(dir, &["init"]); From 2139d1a59dbbef5cce7cf9de0513acfa842dcd84 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 00:29:35 +0100 Subject: [PATCH 16/38] refactor: workspace deps for internal crates, more compare test assertions Use workspace.dependencies in root Cargo.toml for all internal crates. Member crates now use { workspace = true } instead of relative paths. Enhanced compare integration test with assertions for: - Summary output in stdout - Left/right RunStats (pass/fail/total counts) - Per-test results (udp_threshold fails on right, passes on left) - Worktree cleanup verification Co-Authored-By: Claude Opus 4.6 (1M context) --- Cargo.toml | 7 +++ patchbay-cli/Cargo.toml | 12 ++-- patchbay-cli/tests/compare_integration.rs | 68 ++++++++++++++++++++++- patchbay-runner/Cargo.toml | 4 +- patchbay-vm/Cargo.toml | 2 +- 5 files changed, 81 insertions(+), 12 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f84c60c..4c9f08b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,3 +7,10 @@ edition = "2021" license = "MIT OR Apache-2.0" authors = ["Franz Heinzmann "] repository = "https://github.com/n0-computer/patchbay" + +[workspace.dependencies] +patchbay = { path = "patchbay" } +patchbay-utils = { path = "patchbay-utils" } +patchbay-runner = { path = "patchbay-runner" } +patchbay-vm = { path = "patchbay-vm" } +patchbay-server = { path = "patchbay-server" } diff --git a/patchbay-cli/Cargo.toml b/patchbay-cli/Cargo.toml index 6eb617a..2d74e76 100644 --- a/patchbay-cli/Cargo.toml +++ b/patchbay-cli/Cargo.toml @@ -15,11 +15,11 @@ path = "src/main.rs" anyhow = "1" chrono = { version = "0.4", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive"] } -patchbay = { path = "../patchbay" } -patchbay-runner = { path = "../patchbay-runner" } -patchbay-vm = { path = "../patchbay-vm", optional = true } -patchbay-server = { path = "../patchbay-server", optional = true } -patchbay-utils = { path = "../patchbay-utils" } +patchbay = { workspace = true } +patchbay-runner = { workspace = true } +patchbay-vm = { workspace = true, optional = true } +patchbay-server = { workspace = true, optional = true } +patchbay-utils = { workspace = true } ctor = "0.6" nix = { version = "0.30", features = ["signal", "process"] } flate2 = "1" @@ -33,7 +33,7 @@ toml = "1.0" tracing = "0.1" [dev-dependencies] -patchbay = { path = "../patchbay" } +patchbay = { workspace = true } serde_json = "1" tempfile = "3" diff --git a/patchbay-cli/tests/compare_integration.rs b/patchbay-cli/tests/compare_integration.rs index 368a83c..0b9169f 100644 --- a/patchbay-cli/tests/compare_integration.rs +++ b/patchbay-cli/tests/compare_integration.rs @@ -34,7 +34,11 @@ fn compare_detects_regression() { // Copy fixture files into temp dir std::fs::create_dir_all(dir.join("tests")).unwrap(); - std::fs::copy(fixture_dir.join("tests/counter.rs"), dir.join("tests/counter.rs")).unwrap(); + std::fs::copy( + fixture_dir.join("tests/counter.rs"), + dir.join("tests/counter.rs"), + ) + .unwrap(); // Copy Cargo.toml and replace the relative patchbay path with absolute let cargo_toml = std::fs::read_to_string(fixture_dir.join("Cargo.toml")).unwrap(); @@ -77,19 +81,77 @@ fn compare_detects_regression() { "expected non-zero exit due to regression" ); + // stdout should contain the summary output + assert!(stdout.contains("Compare:"), "missing Compare header"); + assert!(stdout.contains("Score:"), "missing Score line"); + // Find and parse the manifest let work = dir.join(".patchbay/work"); + assert!(work.exists(), ".patchbay/work dir not created"); let compare_dir = std::fs::read_dir(&work) .unwrap() .filter_map(|e| e.ok()) .find(|e| e.file_name().to_string_lossy().starts_with("compare-")) .expect("compare output dir not found"); let manifest_path = compare_dir.path().join("summary.json"); + assert!(manifest_path.exists(), "summary.json not written"); + let manifest: serde_json::Value = serde_json::from_str(&std::fs::read_to_string(&manifest_path).unwrap()).unwrap(); + // Refs assert_eq!(manifest["left_ref"], "v1"); assert_eq!(manifest["right_ref"], "v2"); - assert!(manifest["summary"]["regressions"].as_u64().unwrap() >= 1); - assert!(manifest["summary"]["score"].as_i64().unwrap() < 0); + assert!(manifest["timestamp"].is_string(), "missing timestamp"); + + // Left side: both tests pass (PACKET_COUNT=5 >= THRESHOLD=3) + let left = &manifest["summary"]["left"]; + assert_eq!(left["pass"].as_u64().unwrap(), 2, "left should have 2 passes"); + assert_eq!(left["fail"].as_u64().unwrap(), 0, "left should have 0 failures"); + assert_eq!(left["total"].as_u64().unwrap(), 2); + + // Right side: udp_threshold fails (PACKET_COUNT=2 < THRESHOLD=3) + let right = &manifest["summary"]["right"]; + assert_eq!(right["pass"].as_u64().unwrap(), 1, "right should have 1 pass"); + assert_eq!(right["fail"].as_u64().unwrap(), 1, "right should have 1 failure"); + assert_eq!(right["total"].as_u64().unwrap(), 2); + + // Regression/fix counts + let summary = &manifest["summary"]; + assert_eq!(summary["regressions"].as_u64().unwrap(), 1); + assert_eq!(summary["fixes"].as_u64().unwrap(), 0); + assert!(summary["score"].as_i64().unwrap() < 0, "score should be negative"); + + // Per-test results + let left_results = manifest["left_results"].as_array().unwrap(); + let right_results = manifest["right_results"].as_array().unwrap(); + assert_eq!(left_results.len(), 2, "should have 2 left test results"); + assert_eq!(right_results.len(), 2, "should have 2 right test results"); + + // Find the threshold test in right results — it should fail + let threshold_right = right_results + .iter() + .find(|r| r["name"].as_str().unwrap().contains("udp_threshold")) + .expect("udp_threshold test not found in right results"); + assert_eq!(threshold_right["status"], "fail"); + + // Find the threshold test in left results — it should pass + let threshold_left = left_results + .iter() + .find(|r| r["name"].as_str().unwrap().contains("udp_threshold")) + .expect("udp_threshold test not found in left results"); + assert_eq!(threshold_left["status"], "pass"); + + // Worktrees should be cleaned up (no changes = removed) + let tree_dir = dir.join(".patchbay/tree"); + if tree_dir.exists() { + let remaining: Vec<_> = std::fs::read_dir(&tree_dir) + .unwrap() + .filter_map(|e| e.ok()) + .collect(); + assert!( + remaining.is_empty(), + "worktrees should be cleaned up, found: {remaining:?}" + ); + } } diff --git a/patchbay-runner/Cargo.toml b/patchbay-runner/Cargo.toml index 2ff2fd2..e61addd 100644 --- a/patchbay-runner/Cargo.toml +++ b/patchbay-runner/Cargo.toml @@ -14,8 +14,8 @@ chrono = { version = "0.4", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive"] } comfy-table = "7" flate2 = "1" -patchbay = { path = "../patchbay" } -patchbay-utils = { path = "../patchbay-utils" } +patchbay = { workspace = true } +patchbay-utils = { workspace = true } nix = { version = "0.30", features = ["signal", "process"] } rcgen = "0.14" regex = "1" diff --git a/patchbay-vm/Cargo.toml b/patchbay-vm/Cargo.toml index bedd6e7..86ab6e4 100644 --- a/patchbay-vm/Cargo.toml +++ b/patchbay-vm/Cargo.toml @@ -14,7 +14,7 @@ repository.workspace = true anyhow = "1" clap = { version = "4", features = ["derive"] } dirs = "6" -patchbay-utils = { path = "../patchbay-utils" } +patchbay-utils = { workspace = true } nix = { version = "0.30", features = ["signal", "process"] } serde = { version = "1", features = ["derive"] } serde_json = "1" From 216f7f34e5b206f9f68cf24c0d3ea43e9b768c90 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 00:32:48 +0100 Subject: [PATCH 17/38] test: validate metrics.jsonl output in compare integration test Set PATCHBAY_OUTDIR so the fixture's Lab writes device metrics. After compare, find *.metrics.jsonl files and assert they contain packet_count values matching the fixture's PACKET_COUNT constants. Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/tests/compare_integration.rs | 56 ++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/patchbay-cli/tests/compare_integration.rs b/patchbay-cli/tests/compare_integration.rs index 0b9169f..3554a43 100644 --- a/patchbay-cli/tests/compare_integration.rs +++ b/patchbay-cli/tests/compare_integration.rs @@ -62,10 +62,12 @@ fn compare_detects_regression() { git(dir, &["commit", "-m", "regressing"]); git(dir, &["tag", "v2"]); - // Run compare + // Run compare with PATCHBAY_OUTDIR so the fixture's Lab writes metrics + let lab_outdir = dir.join("lab-output"); let patchbay_bin = env!("CARGO_BIN_EXE_patchbay"); let output = Command::new(patchbay_bin) .args(["compare", "test", "--ref", "v1", "--ref2", "v2"]) + .env("PATCHBAY_OUTDIR", &lab_outdir) .current_dir(dir) .output() .unwrap(); @@ -154,4 +156,56 @@ fn compare_detects_regression() { "worktrees should be cleaned up, found: {remaining:?}" ); } + + // Validate metrics.jsonl from the fixture's Lab output. + // The udp_counter test calls sender.record("packet_count", N). + // With PATCHBAY_OUTDIR set, the Lab writes device.sender.metrics.jsonl. + if lab_outdir.exists() { + let metrics_files: Vec<_> = walkdir(&lab_outdir) + .into_iter() + .filter(|p| p.file_name().map_or(false, |f| f.to_string_lossy().ends_with(".metrics.jsonl"))) + .collect(); + assert!( + !metrics_files.is_empty(), + "expected metrics.jsonl files in {}, found none", + lab_outdir.display() + ); + + // At least one metrics file should contain packet_count + let mut found_packet_count = false; + for path in &metrics_files { + let content = std::fs::read_to_string(path).unwrap(); + for line in content.lines() { + if let Ok(val) = serde_json::from_str::(line) { + if let Some(m) = val.get("m").and_then(|m| m.as_object()) { + if let Some(count) = m.get("packet_count").and_then(|v| v.as_f64()) { + found_packet_count = true; + // v1 has PACKET_COUNT=5, v2 has PACKET_COUNT=2 + assert!( + count == 5.0 || count == 2.0, + "unexpected packet_count value: {count}" + ); + } + } + } + } + } + assert!(found_packet_count, "no packet_count metric found in metrics files"); + } +} + +/// Recursively collect all file paths under a directory. +fn walkdir(dir: &Path) -> Vec { + let mut files = Vec::new(); + if let Ok(entries) = std::fs::read_dir(dir) { + for entry in entries.flatten() { + let path = entry.path(); + if path.is_dir() { + files.extend(walkdir(&path)); + } else { + files.push(path); + } + } + } + files } From 014dc2662501e2def67df02b5c789f8f2a02f725 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 00:36:59 +0100 Subject: [PATCH 18/38] refactor: use testdir in counter fixture, validate metrics via cargo metadata Fixture now uses testdir!() + Lab::with_opts(outdir) matching iroh test patterns. Integration test uses cargo metadata to find target_dir/testdir-current and validates device.sender.metrics.jsonl contains packet_count values. Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/tests/compare_integration.rs | 41 +++++++++++++------ .../tests/fixtures/counter/Cargo.toml | 1 + .../tests/fixtures/counter/tests/counter.rs | 8 +++- 3 files changed, 37 insertions(+), 13 deletions(-) diff --git a/patchbay-cli/tests/compare_integration.rs b/patchbay-cli/tests/compare_integration.rs index 3554a43..5601c6e 100644 --- a/patchbay-cli/tests/compare_integration.rs +++ b/patchbay-cli/tests/compare_integration.rs @@ -62,12 +62,10 @@ fn compare_detects_regression() { git(dir, &["commit", "-m", "regressing"]); git(dir, &["tag", "v2"]); - // Run compare with PATCHBAY_OUTDIR so the fixture's Lab writes metrics - let lab_outdir = dir.join("lab-output"); + // Run compare let patchbay_bin = env!("CARGO_BIN_EXE_patchbay"); let output = Command::new(patchbay_bin) .args(["compare", "test", "--ref", "v1", "--ref2", "v2"]) - .env("PATCHBAY_OUTDIR", &lab_outdir) .current_dir(dir) .output() .unwrap(); @@ -157,18 +155,35 @@ fn compare_detects_regression() { ); } - // Validate metrics.jsonl from the fixture's Lab output. - // The udp_counter test calls sender.record("packet_count", N). - // With PATCHBAY_OUTDIR set, the Lab writes device.sender.metrics.jsonl. - if lab_outdir.exists() { - let metrics_files: Vec<_> = walkdir(&lab_outdir) + // Validate metrics.jsonl from testdir output. + // The fixture uses testdir!() so Lab output goes to + // /testdir-current//device.sender.metrics.jsonl + // Use cargo metadata to find the target dir in the temp repo. + let testdir_current = { + let meta_out = Command::new("cargo") + .args(["metadata", "--format-version=1", "--no-deps"]) + .current_dir(dir) + .output() + .unwrap(); + let meta: serde_json::Value = + serde_json::from_slice(&meta_out.stdout).unwrap_or_default(); + let target = meta["target_directory"] + .as_str() + .map(|s| Path::new(s).join("testdir-current")); + target.unwrap_or_else(|| dir.join("target/testdir-current")) + }; + if testdir_current.exists() { + let metrics_files: Vec<_> = walkdir(&testdir_current) .into_iter() - .filter(|p| p.file_name().map_or(false, |f| f.to_string_lossy().ends_with(".metrics.jsonl"))) + .filter(|p| { + p.file_name() + .map_or(false, |f| f.to_string_lossy().ends_with(".metrics.jsonl")) + }) .collect(); assert!( !metrics_files.is_empty(), "expected metrics.jsonl files in {}, found none", - lab_outdir.display() + testdir_current.display() ); // At least one metrics file should contain packet_count @@ -180,7 +195,6 @@ fn compare_detects_regression() { if let Some(m) = val.get("m").and_then(|m| m.as_object()) { if let Some(count) = m.get("packet_count").and_then(|v| v.as_f64()) { found_packet_count = true; - // v1 has PACKET_COUNT=5, v2 has PACKET_COUNT=2 assert!( count == 5.0 || count == 2.0, "unexpected packet_count value: {count}" @@ -190,7 +204,10 @@ fn compare_detects_regression() { } } } - assert!(found_packet_count, "no packet_count metric found in metrics files"); + assert!( + found_packet_count, + "no packet_count metric found in metrics files" + ); } } diff --git a/patchbay-cli/tests/fixtures/counter/Cargo.toml b/patchbay-cli/tests/fixtures/counter/Cargo.toml index 8d9fba3..5c92fa2 100644 --- a/patchbay-cli/tests/fixtures/counter/Cargo.toml +++ b/patchbay-cli/tests/fixtures/counter/Cargo.toml @@ -8,5 +8,6 @@ publish = false [dev-dependencies] patchbay = { path = "../../../../patchbay" } +testdir = "0.9" tokio = { version = "1", features = ["rt", "macros", "net", "time"] } anyhow = "1" diff --git a/patchbay-cli/tests/fixtures/counter/tests/counter.rs b/patchbay-cli/tests/fixtures/counter/tests/counter.rs index fd84844..13ff545 100644 --- a/patchbay-cli/tests/fixtures/counter/tests/counter.rs +++ b/patchbay-cli/tests/fixtures/counter/tests/counter.rs @@ -12,7 +12,13 @@ async fn udp_counter() -> anyhow::Result<()> { return Ok(()); } - let lab = patchbay::Lab::new().await?; + let outdir = testdir::testdir!(); + let lab = patchbay::Lab::with_opts( + patchbay::LabOpts::default() + .outdir(patchbay::OutDir::Nested(outdir)) + .label("udp-counter"), + ) + .await?; let dc = lab.add_router("dc").build().await?; let sender = lab .add_device("sender") From daaf649655316dc387b2f6bb0d5e5da01519a292 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 00:41:02 +0100 Subject: [PATCH 19/38] feat: add -v/--verbose to compare for live subcommand output Stream stdout/stderr from cargo test in real time when -v is passed. Uses piped stdio with reader threads so output is both printed live and captured for result parsing. Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/src/compare.rs | 34 +++++++++++++++++++++++++++++--- patchbay-cli/src/main.rs | 9 ++++++--- patchbay-cli/src/test.rs | 39 +++++++++++++++++++++++++++---------- patchbay-cli/src/util.rs | 6 +++--- 4 files changed, 69 insertions(+), 19 deletions(-) diff --git a/patchbay-cli/src/compare.rs b/patchbay-cli/src/compare.rs index 17515de..709eddc 100644 --- a/patchbay-cli/src/compare.rs +++ b/patchbay-cli/src/compare.rs @@ -142,6 +142,7 @@ pub fn parse_test_output(output: &str) -> Vec { pub fn run_tests_in_dir( dir: &Path, args: &crate::test::TestArgs, + verbose: bool, ) -> Result<(Vec, String)> { let mut cmd = Command::new("cargo"); cmd.current_dir(dir); @@ -167,9 +168,36 @@ pub fn run_tests_in_dir( } } - let output = cmd.output().context("run cargo test")?; - let stdout = String::from_utf8_lossy(&output.stdout).to_string(); - let stderr = String::from_utf8_lossy(&output.stderr).to_string(); + use std::io::BufRead; + cmd.stdout(std::process::Stdio::piped()); + cmd.stderr(std::process::Stdio::piped()); + let mut child = cmd.spawn().context("spawn cargo test")?; + + let stdout_pipe = child.stdout.take().unwrap(); + let stderr_pipe = child.stderr.take().unwrap(); + let v = verbose; + let out_t = std::thread::spawn(move || { + let mut buf = String::new(); + for line in std::io::BufReader::new(stdout_pipe).lines().map_while(Result::ok) { + if v { println!("{line}"); } + buf.push_str(&line); + buf.push('\n'); + } + buf + }); + let err_t = std::thread::spawn(move || { + let mut buf = String::new(); + for line in std::io::BufReader::new(stderr_pipe).lines().map_while(Result::ok) { + if verbose { eprintln!("{line}"); } + buf.push_str(&line); + buf.push('\n'); + } + buf + }); + + let _ = child.wait().context("wait for cargo test")?; + let stdout = out_t.join().unwrap_or_default(); + let stderr = err_t.join().unwrap_or_default(); let combined = format!("{stdout}\n{stderr}"); let results = parse_test_output(&combined); Ok((results, combined)) diff --git a/patchbay-cli/src/main.rs b/patchbay-cli/src/main.rs index f0985af..d5fcbfd 100644 --- a/patchbay-cli/src/main.rs +++ b/patchbay-cli/src/main.rs @@ -149,6 +149,9 @@ enum Command { }, /// Compare test or sim results across git refs. Compare { + /// Stream subcommand output live. + #[arg(short = 'v', long, global = true)] + verbose: bool, #[command(subcommand)] command: CompareCommand, }, @@ -419,7 +422,7 @@ async fn tokio_main() -> Result<()> { } test::run_native(args) } - Command::Compare { command } => { + Command::Compare { verbose, command } => { let cwd = std::env::current_dir().context("get cwd")?; match command { CompareCommand::Test { left_ref, right_ref, args } => { @@ -437,12 +440,12 @@ async fn tokio_main() -> Result<()> { // Run tests sequentially println!("Running tests in {} ...", left_ref); let (left_results, _left_output) = compare::run_tests_in_dir( - &left_dir, &args, + &left_dir, &args, verbose, )?; println!("Running tests in {} ...", right_label); let (right_results, _right_output) = compare::run_tests_in_dir( - &right_dir, &args, + &right_dir, &args, verbose, )?; // Compare diff --git a/patchbay-cli/src/test.rs b/patchbay-cli/src/test.rs index f7e9c13..f69c86a 100644 --- a/patchbay-cli/src/test.rs +++ b/patchbay-cli/src/test.rs @@ -74,9 +74,15 @@ impl TestArgs { for f in &self.features { cargo_args.extend(["--features".into(), f.clone()]); } - if self.release { cargo_args.push("--release".into()); } - if self.lib { cargo_args.push("--lib".into()); } - if self.no_fail_fast { cargo_args.push("--no-fail-fast".into()); } + if self.release { + cargo_args.push("--release".into()); + } + if self.lib { + cargo_args.push("--lib".into()); + } + if self.no_fail_fast { + cargo_args.push("--no-fail-fast".into()); + } cargo_args.extend(self.extra_args); patchbay_vm::TestVmArgs { filter: self.filter, @@ -103,7 +109,7 @@ pub fn run_native(args: TestArgs) -> Result<()> { cmd.arg("test"); } - // Add RUSTFLAGS with cfg(patchbay_test) + // Add RUSTFLAGS with cfg(patchbay_tests) cmd.env("RUSTFLAGS", crate::util::patchbay_rustflags()); // Package selectors @@ -172,14 +178,27 @@ fn copy_testdir_output() { // Try to find target/testdir-current via cargo metadata let Ok(output) = Command::new("cargo") .args(["metadata", "--format-version=1", "--no-deps"]) - .output() else { return }; - if !output.status.success() { return; } - let Ok(meta) = serde_json::from_slice::(&output.stdout) else { return }; - let Some(target_dir) = meta["target_directory"].as_str() else { return }; + .output() + else { + return; + }; + if !output.status.success() { + return; + } + let Ok(meta) = serde_json::from_slice::(&output.stdout) else { + return; + }; + let Some(target_dir) = meta["target_directory"].as_str() else { + return; + }; let testdir = std::path::Path::new(target_dir).join("testdir-current"); - if !testdir.exists() { return; } + if !testdir.exists() { + return; + } let dest = std::path::Path::new(".patchbay/work/testdir"); - if dest.exists() { let _ = std::fs::remove_dir_all(dest); } + if dest.exists() { + let _ = std::fs::remove_dir_all(dest); + } // Use cp -r since std::fs doesn't have recursive copy let _ = Command::new("cp") .args(["-r"]) diff --git a/patchbay-cli/src/util.rs b/patchbay-cli/src/util.rs index e61ad19..a12687d 100644 --- a/patchbay-cli/src/util.rs +++ b/patchbay-cli/src/util.rs @@ -1,9 +1,9 @@ -/// Build RUSTFLAGS with --cfg patchbay_test appended. +/// Build RUSTFLAGS with --cfg patchbay_tests appended. pub fn patchbay_rustflags() -> String { let existing = std::env::var("RUSTFLAGS").unwrap_or_default(); if existing.is_empty() { - "--cfg patchbay_test".to_string() + "--cfg patchbay_tests".to_string() } else { - format!("{existing} --cfg patchbay_test") + format!("{existing} --cfg patchbay_tests") } } From b05d5518d8ab89f3085d39903773b6ffe1c6024b Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 00:50:21 +0100 Subject: [PATCH 20/38] refactor: centralize test args as cargo_test_cmd, align with cargo test flags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - TestArgs.cargo_test_cmd_in() builds the full cargo test Command - compare::run_tests_in_dir uses it instead of duplicating arg expansion - Rename --ignored-only → --ignored, --ignored → --include-ignored to match cargo test flag names exactly - Remove filter as positional; goes through -- like cargo test - -v/--verbose is now global on Cli, works for all subcommands - CompareCommand refs are positional: `patchbay compare test main [ref2]` - Fixture uses ctor + init_userns() (safe version) for namespace bootstrap Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/src/compare.rs | 26 +-- patchbay-cli/src/main.rs | 44 ++--- patchbay-cli/src/test.rs | 170 ++++++++++-------- patchbay-cli/tests/compare_integration.rs | 7 +- .../tests/fixtures/counter/Cargo.toml | 1 + .../tests/fixtures/counter/tests/counter.rs | 16 +- 6 files changed, 125 insertions(+), 139 deletions(-) diff --git a/patchbay-cli/src/compare.rs b/patchbay-cli/src/compare.rs index 709eddc..53c3914 100644 --- a/patchbay-cli/src/compare.rs +++ b/patchbay-cli/src/compare.rs @@ -144,31 +144,9 @@ pub fn run_tests_in_dir( args: &crate::test::TestArgs, verbose: bool, ) -> Result<(Vec, String)> { - let mut cmd = Command::new("cargo"); - cmd.current_dir(dir); - cmd.arg("test"); - cmd.env("RUSTFLAGS", crate::util::patchbay_rustflags()); - - for p in &args.packages { - cmd.arg("-p").arg(p); - } - for t in &args.tests { - cmd.arg("--test").arg(t); - } - - if let Some(f) = &args.filter { - cmd.arg(f); - } - if args.ignored || args.ignored_only { - cmd.arg("--"); - if args.ignored_only { - cmd.arg("--ignored"); - } else { - cmd.arg("--include-ignored"); - } - } - use std::io::BufRead; + + let mut cmd = args.cargo_test_cmd_in(Some(dir)); cmd.stdout(std::process::Stdio::piped()); cmd.stderr(std::process::Stdio::piped()); let mut child = cmd.spawn().context("spawn cargo test")?; diff --git a/patchbay-cli/src/main.rs b/patchbay-cli/src/main.rs index d5fcbfd..f760cdb 100644 --- a/patchbay-cli/src/main.rs +++ b/patchbay-cli/src/main.rs @@ -30,6 +30,9 @@ use serde::{Deserialize, Serialize}; #[derive(Parser)] #[command(name = "patchbay", about = "Run a patchbay simulation")] struct Cli { + /// Verbose output (stream subcommand output live). + #[arg(short = 'v', long, global = true)] + verbose: bool, #[command(subcommand)] command: Command, } @@ -149,9 +152,6 @@ enum Command { }, /// Compare test or sim results across git refs. Compare { - /// Stream subcommand output live. - #[arg(short = 'v', long, global = true)] - verbose: bool, #[command(subcommand)] command: CompareCommand, }, @@ -183,13 +183,13 @@ enum Command { #[derive(Subcommand)] enum CompareCommand { /// Compare test results between git refs. + /// + /// Usage: patchbay compare test [ref2] [-- test-filter-and-args] Test { - /// First git ref (compare against worktree if only one given). - #[arg(long = "ref", required = true)] + /// Git ref to compare (left side). left_ref: String, - /// Second git ref (if omitted, compare left_ref against current worktree). - #[arg(long = "ref2")] + /// Second git ref (right side). If omitted, compares against current worktree. right_ref: Option, #[command(flatten)] @@ -197,17 +197,15 @@ enum CompareCommand { }, /// Compare sim results between git refs. Run { - /// Sim TOML files or directories. - #[arg(required = true)] - sims: Vec, - - /// First git ref. - #[arg(long = "ref", required = true)] + /// Git ref to compare (left side). left_ref: String, - /// Second git ref. - #[arg(long = "ref2")] + /// Second git ref (right side). right_ref: Option, + + /// Sim TOML files or directories. + #[arg(long = "sim", required = true)] + sims: Vec, }, } @@ -422,7 +420,7 @@ async fn tokio_main() -> Result<()> { } test::run_native(args) } - Command::Compare { verbose, command } => { + Command::Compare { command } => { let cwd = std::env::current_dir().context("get cwd")?; match command { CompareCommand::Test { left_ref, right_ref, args } => { @@ -440,12 +438,12 @@ async fn tokio_main() -> Result<()> { // Run tests sequentially println!("Running tests in {} ...", left_ref); let (left_results, _left_output) = compare::run_tests_in_dir( - &left_dir, &args, verbose, + &left_dir, &args, cli.verbose, )?; println!("Running tests in {} ...", right_label); let (right_results, _right_output) = compare::run_tests_in_dir( - &right_dir, &args, verbose, + &right_dir, &args, cli.verbose, )?; // Compare @@ -585,9 +583,8 @@ async fn dispatch_vm(command: VmCommand, backend: patchbay_vm::Backend) -> Resul cargo_args, } => { let test_args = test::TestArgs { - filter, + include_ignored: false, ignored: false, - ignored_only: false, packages, tests, jobs, @@ -595,7 +592,12 @@ async fn dispatch_vm(command: VmCommand, backend: patchbay_vm::Backend) -> Resul release, lib, no_fail_fast, - extra_args: cargo_args, + extra_args: { + let mut args = Vec::new(); + if let Some(f) = filter { args.push(f); } + args.extend(cargo_args); + args + }, }; backend.run_tests(test_args.into_vm_args(target, recreate)) } diff --git a/patchbay-cli/src/test.rs b/patchbay-cli/src/test.rs index f69c86a..8cc9ca2 100644 --- a/patchbay-cli/src/test.rs +++ b/patchbay-cli/src/test.rs @@ -1,5 +1,6 @@ //! Test command implementation. +use std::path::Path; use std::process::Command; use anyhow::{bail, Context, Result}; @@ -18,17 +19,13 @@ fn has_nextest() -> bool { /// Shared test arguments used by both `patchbay test` and `patchbay compare test`. #[derive(Debug, Clone, clap::Args)] pub struct TestArgs { - /// Test name filter. - #[arg()] - pub filter: Option, - - /// Include ignored tests. + /// Include ignored tests (like `cargo test -- --include-ignored`). #[arg(long)] - pub ignored: bool, + pub include_ignored: bool, - /// Run only ignored tests. + /// Run only ignored tests (like `cargo test -- --ignored`). #[arg(long)] - pub ignored_only: bool, + pub ignored: bool, /// Package to test. #[arg(short = 'p', long = "package")] @@ -58,12 +55,62 @@ pub struct TestArgs { #[arg(long)] pub no_fail_fast: bool, - /// Extra args passed to cargo and test binaries. + /// Extra args passed after `--` to cargo/test binaries (filter, etc). #[arg(last = true)] pub extra_args: Vec, } impl TestArgs { + /// Build a `cargo test` command with all flags applied. + /// Does NOT set stdout/stderr — caller decides piping. + pub fn cargo_test_cmd(&self) -> Command { + self.cargo_test_cmd_in(None) + } + + /// Build a `cargo test` command, optionally running in a specific directory. + pub fn cargo_test_cmd_in(&self, dir: Option<&Path>) -> Command { + let mut cmd = Command::new("cargo"); + cmd.arg("test"); + cmd.env("RUSTFLAGS", crate::util::patchbay_rustflags()); + if let Some(d) = dir { + cmd.current_dir(d); + } + for p in &self.packages { + cmd.arg("-p").arg(p); + } + for t in &self.tests { + cmd.arg("--test").arg(t); + } + if let Some(j) = self.jobs { + cmd.arg("-j").arg(j.to_string()); + } + for f in &self.features { + cmd.arg("-F").arg(f); + } + if self.release { + cmd.arg("--release"); + } + if self.lib { + cmd.arg("--lib"); + } + if self.no_fail_fast { + cmd.arg("--no-fail-fast"); + } + // Everything after `--`: --ignored/--include-ignored + extra args + if self.include_ignored || self.ignored || !self.extra_args.is_empty() { + cmd.arg("--"); + if self.ignored { + cmd.arg("--ignored"); + } else if self.include_ignored { + cmd.arg("--include-ignored"); + } + for a in &self.extra_args { + cmd.arg(a); + } + } + cmd + } + /// Convert to patchbay-vm TestVmArgs. #[cfg(feature = "vm")] pub fn into_vm_args(self, target: String, recreate: bool) -> patchbay_vm::TestVmArgs { @@ -85,7 +132,7 @@ impl TestArgs { } cargo_args.extend(self.extra_args); patchbay_vm::TestVmArgs { - filter: self.filter, + filter: None, target, packages: self.packages, tests: self.tests, @@ -98,74 +145,47 @@ impl TestArgs { /// Run tests natively via cargo test/nextest. pub fn run_native(args: TestArgs) -> Result<()> { let use_nextest = has_nextest(); - if !use_nextest { - eprintln!("patchbay: cargo-nextest not found, using cargo test (nextest recommended for structured output)"); - } - - let mut cmd = Command::new("cargo"); - if use_nextest { + let mut cmd = if use_nextest { + let mut cmd = Command::new("cargo"); cmd.arg("nextest").arg("run"); - } else { - cmd.arg("test"); - } - - // Add RUSTFLAGS with cfg(patchbay_tests) - cmd.env("RUSTFLAGS", crate::util::patchbay_rustflags()); - - // Package selectors - for p in &args.packages { - cmd.arg("-p").arg(p); - } - for t in &args.tests { - cmd.arg("--test").arg(t); - } - if let Some(j) = args.jobs { - cmd.arg("-j").arg(j.to_string()); - } - for f in &args.features { - cmd.arg("-F").arg(f); - } - if args.release { - cmd.arg("--release"); - } - if args.lib { - cmd.arg("--lib"); - } - if args.no_fail_fast { - cmd.arg("--no-fail-fast"); - } - - // Extra cargo args - for a in &args.extra_args { - cmd.arg(a); - } - - // For cargo test (not nextest), filter and --ignored go after -- - if use_nextest { - if let Some(ref f) = args.filter { - cmd.arg("-E").arg(format!("test(/{f}/)")); + cmd.env("RUSTFLAGS", crate::util::patchbay_rustflags()); + for p in &args.packages { + cmd.arg("-p").arg(p); + } + for t in &args.tests { + cmd.arg("--test").arg(t); } - if args.ignored { + if let Some(j) = args.jobs { + cmd.arg("-j").arg(j.to_string()); + } + for f in &args.features { + cmd.arg("-F").arg(f); + } + if args.release { + cmd.arg("--release"); + } + if args.lib { + cmd.arg("--lib"); + } + if args.no_fail_fast { + cmd.arg("--no-fail-fast"); + } + if args.include_ignored { cmd.arg("--run-ignored").arg("all"); - } else if args.ignored_only { + } else if args.ignored { cmd.arg("--run-ignored").arg("ignored-only"); } - } else { - // cargo test: filter before --, ignored flags after -- - if let Some(ref f) = args.filter { - cmd.arg(f); - } - if args.ignored || args.ignored_only { - cmd.arg("--"); - if args.ignored_only { - cmd.arg("--ignored"); - } else { - cmd.arg("--include-ignored"); - } + // nextest: extra_args go directly (filter is just a positional) + for a in &args.extra_args { + cmd.arg(a); } - } + cmd + } else { + eprintln!("patchbay: cargo-nextest not found, using cargo test"); + args.cargo_test_cmd() + }; - let status = cmd.status().context("failed to run cargo test")?; + let status = cmd.status().context("failed to run tests")?; if !status.success() { bail!("tests failed (exit code {})", status.code().unwrap_or(-1)); } @@ -175,7 +195,6 @@ pub fn run_native(args: TestArgs) -> Result<()> { /// Copy testdir-current into the work dir if it exists. fn copy_testdir_output() { - // Try to find target/testdir-current via cargo metadata let Ok(output) = Command::new("cargo") .args(["metadata", "--format-version=1", "--no-deps"]) .output() @@ -199,12 +218,7 @@ fn copy_testdir_output() { if dest.exists() { let _ = std::fs::remove_dir_all(dest); } - // Use cp -r since std::fs doesn't have recursive copy - let _ = Command::new("cp") - .args(["-r"]) - .arg(&testdir) - .arg(dest) - .status(); + let _ = Command::new("cp").args(["-r"]).arg(&testdir).arg(dest).status(); } /// Run tests in a VM via patchbay-vm. diff --git a/patchbay-cli/tests/compare_integration.rs b/patchbay-cli/tests/compare_integration.rs index 5601c6e..81e2204 100644 --- a/patchbay-cli/tests/compare_integration.rs +++ b/patchbay-cli/tests/compare_integration.rs @@ -19,13 +19,8 @@ fn git(dir: &Path, args: &[&str]) { } #[test] -#[ignore] // Requires namespace capabilities + builds from scratch +#[ignore] // Slow: builds fixture crate from scratch in worktrees fn compare_detects_regression() { - if patchbay::check_caps().is_err() { - eprintln!("skipping: no namespace capabilities"); - return; - } - let tmp = tempfile::tempdir().unwrap(); let dir = tmp.path(); let cli_dir = Path::new(env!("CARGO_MANIFEST_DIR")); diff --git a/patchbay-cli/tests/fixtures/counter/Cargo.toml b/patchbay-cli/tests/fixtures/counter/Cargo.toml index 5c92fa2..0bd5710 100644 --- a/patchbay-cli/tests/fixtures/counter/Cargo.toml +++ b/patchbay-cli/tests/fixtures/counter/Cargo.toml @@ -8,6 +8,7 @@ publish = false [dev-dependencies] patchbay = { path = "../../../../patchbay" } +ctor = "0.6" testdir = "0.9" tokio = { version = "1", features = ["rt", "macros", "net", "time"] } anyhow = "1" diff --git a/patchbay-cli/tests/fixtures/counter/tests/counter.rs b/patchbay-cli/tests/fixtures/counter/tests/counter.rs index 13ff545..e36ba0d 100644 --- a/patchbay-cli/tests/fixtures/counter/tests/counter.rs +++ b/patchbay-cli/tests/fixtures/counter/tests/counter.rs @@ -5,13 +5,14 @@ const PACKET_COUNT: u32 = 5; const THRESHOLD: u32 = 3; +#[cfg(target_os = "linux")] +#[ctor::ctor] +fn init() { + patchbay::init_userns().expect("init_userns"); +} + #[tokio::test(flavor = "current_thread")] async fn udp_counter() -> anyhow::Result<()> { - if patchbay::check_caps().is_err() { - eprintln!("skipping: no namespace capabilities"); - return Ok(()); - } - let outdir = testdir::testdir!(); let lab = patchbay::Lab::with_opts( patchbay::LabOpts::default() @@ -78,11 +79,6 @@ async fn udp_counter() -> anyhow::Result<()> { #[tokio::test(flavor = "current_thread")] async fn udp_threshold() -> anyhow::Result<()> { - if patchbay::check_caps().is_err() { - eprintln!("skipping: no namespace capabilities"); - return Ok(()); - } - // This test passes when PACKET_COUNT >= THRESHOLD, fails otherwise. assert!( PACKET_COUNT >= THRESHOLD, "packet count {} below threshold {}", From 1a4528277818ac38095cba489998f598e40f9b4b Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 09:12:43 +0100 Subject: [PATCH 21/38] fix: per-worktree target dir, force cleanup, fixture ctor bootstrap - Set CARGO_TARGET_DIR per worktree in compare to prevent shared binary cache from masking source changes between refs - Use --force in worktree cleanup to handle untracked target/ dirs - Fixture uses ctor + init_userns() for namespace bootstrap - Align --ignored/--include-ignored with cargo test flag names - Refs are positional: `patchbay compare test main [ref2] [-- filter]` - -v/--verbose is global on Cli struct Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/src/compare.rs | 9 ++++++--- patchbay-cli/tests/compare_integration.rs | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/patchbay-cli/src/compare.rs b/patchbay-cli/src/compare.rs index 53c3914..717c136 100644 --- a/patchbay-cli/src/compare.rs +++ b/patchbay-cli/src/compare.rs @@ -29,7 +29,8 @@ pub fn setup_worktree(git_ref: &str, base: &Path) -> Result { Ok(tree_dir) } -/// Remove worktree if it has no changes. +/// Remove worktree if tracked files are unchanged. +/// Uses --force to handle untracked files (e.g. target/). pub fn cleanup_worktree(tree_dir: &Path) -> Result<()> { let diff = Command::new("git") .args(["diff", "--quiet"]) @@ -37,9 +38,8 @@ pub fn cleanup_worktree(tree_dir: &Path) -> Result<()> { .status() .context("git diff")?; if diff.success() { - // No changes, safe to remove let _ = Command::new("git") - .args(["worktree", "remove"]) + .args(["worktree", "remove", "--force"]) .arg(tree_dir) .status(); } @@ -147,6 +147,9 @@ pub fn run_tests_in_dir( use std::io::BufRead; let mut cmd = args.cargo_test_cmd_in(Some(dir)); + // Use a per-worktree target dir to avoid sharing cached binaries + // between different git refs. + cmd.env("CARGO_TARGET_DIR", dir.join("target")); cmd.stdout(std::process::Stdio::piped()); cmd.stderr(std::process::Stdio::piped()); let mut child = cmd.spawn().context("spawn cargo test")?; diff --git a/patchbay-cli/tests/compare_integration.rs b/patchbay-cli/tests/compare_integration.rs index 81e2204..7f5e8f0 100644 --- a/patchbay-cli/tests/compare_integration.rs +++ b/patchbay-cli/tests/compare_integration.rs @@ -60,7 +60,7 @@ fn compare_detects_regression() { // Run compare let patchbay_bin = env!("CARGO_BIN_EXE_patchbay"); let output = Command::new(patchbay_bin) - .args(["compare", "test", "--ref", "v1", "--ref2", "v2"]) + .args(["-v", "compare", "test", "v1", "v2"]) .current_dir(dir) .output() .unwrap(); From c358ad6dd1e8b3cff9bfebe35ec358019fe2137f Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 12:32:38 +0100 Subject: [PATCH 22/38] refactor: unified RunManifest, cached compare, run.json everywhere MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Move RunManifest to patchbay-utils with RunKind enum, chrono dates, Duration-as-ms serde, TestResult/TestStatus types, git_context(), resolve_ref(), find_run_for_commit(), parse_test_output() - Rename runner's RunManifest → SimRunReport, dir prefix sim- → run- - patchbay test: pipe output, write run.json to testdir, --persist flag - Compare: check cached runs by commit SHA, --force-build/--no-ref-build - Server: discover runs by run.json OR events.jsonl, batch→group rename, query params (project, kind, limit, offset), /compare/* SPA route - UI: RunsIndex with filters/pagination/checkbox compare selection, CompareView at /compare/:left/:right with client-side diff, split-screen co-navigation, ComparePage route wrapper - Backend::Auto panics if not resolved (was silent Qemu fallback) - E2e tests updated for new UI layout and data model - All 6 e2e tests pass, zero clippy warnings Co-Authored-By: Claude Opus 4.6 (1M context) --- Cargo.lock | 2 + patchbay-cli/src/compare.rs | 242 +++++------ patchbay-cli/src/main.rs | 108 +++-- patchbay-cli/src/test.rs | 156 +++++-- patchbay-cli/src/upload.rs | 62 ++- patchbay-cli/tests/compare_integration.rs | 91 ++-- patchbay-runner/src/sim/progress.rs | 10 +- patchbay-runner/src/sim/runner.rs | 22 +- patchbay-server/Cargo.toml | 1 + patchbay-server/src/lib.rs | 115 ++--- patchbay-utils/Cargo.toml | 1 + patchbay-utils/src/lib.rs | 1 + patchbay-utils/src/manifest.rs | 330 ++++++++++++++ patchbay-vm/src/lib.rs | 14 +- plans/compare-refactor.md | 503 ++++++++++++++++++++++ ui/e2e/compare.spec.ts | 101 ++--- ui/e2e/devtools.spec.ts | 16 +- ui/e2e/global-setup.ts | 19 +- ui/e2e/push.spec.ts | 30 +- ui/e2e/runner-sim.spec.ts | 43 +- ui/src/App.tsx | 101 ++--- ui/src/ComparePage.tsx | 19 + ui/src/RunsIndex.tsx | 359 ++++++++++++--- ui/src/api.ts | 55 ++- ui/src/components/CompareView.tsx | 268 +++++++++--- ui/src/main.tsx | 3 +- 26 files changed, 1994 insertions(+), 678 deletions(-) create mode 100644 patchbay-utils/src/manifest.rs create mode 100644 plans/compare-refactor.md create mode 100644 ui/src/ComparePage.tsx diff --git a/Cargo.lock b/Cargo.lock index 55b05a3..b964117 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2147,6 +2147,7 @@ dependencies = [ "clap", "dirs", "flate2", + "patchbay-utils", "rustls", "serde", "serde_json", @@ -2164,6 +2165,7 @@ name = "patchbay-utils" version = "0.1.0" dependencies = [ "anyhow", + "chrono", "flate2", "glob", "reqwest", diff --git a/patchbay-cli/src/compare.rs b/patchbay-cli/src/compare.rs index 717c136..f019d22 100644 --- a/patchbay-cli/src/compare.rs +++ b/patchbay-cli/src/compare.rs @@ -4,7 +4,7 @@ use std::path::{Path, PathBuf}; use std::process::Command; use std::time::Duration; use anyhow::{bail, Context, Result}; -use serde::{Deserialize, Serialize}; +use patchbay_utils::manifest::{self, TestResult, TestStatus}; /// Set up a git worktree for the given ref. pub fn setup_worktree(git_ref: &str, base: &Path) -> Result { @@ -52,91 +52,10 @@ fn sanitize_ref(r: &str) -> String { // ── Test comparison ── -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct TestResult { - pub name: String, - pub status: TestStatus, - pub duration_ms: Option, -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -pub enum TestStatus { - Pass, - Fail, - Ignored, -} +// Types re-exported from patchbay_utils::manifest: +// TestResult, TestStatus, RunManifest, RunKind -#[derive(Debug, Serialize, Deserialize)] -pub struct CompareManifest { - pub left_ref: String, - pub right_ref: String, - pub timestamp: String, - /// Project name (for CI upload scoping). - #[serde(skip_serializing_if = "Option::is_none")] - pub project: Option, - pub left_results: Vec, - pub right_results: Vec, - pub summary: CompareSummary, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct RunStats { - pub pass: usize, - pub fail: usize, - pub total: usize, - #[serde(with = "duration_ms")] - pub time: Duration, -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct CompareSummary { - pub left: RunStats, - pub right: RunStats, - pub fixes: usize, - pub regressions: usize, - pub score: i32, -} - -/// Serialize Duration as milliseconds. -mod duration_ms { - use std::time::Duration; - use serde::{Deserialize, Deserializer, Serializer}; - - pub fn serialize(d: &Duration, s: S) -> Result { - s.serialize_u64(d.as_millis() as u64) - } - - pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result { - let ms = u64::deserialize(d)?; - Ok(Duration::from_millis(ms)) - } -} - -/// Parse cargo test output into TestResults. -/// Parses lines like "test tests::foo ... ok" and "test tests::bar ... FAILED". -pub fn parse_test_output(output: &str) -> Vec { - let mut results = Vec::new(); - for line in output.lines() { - let line = line.trim(); - // "test path::to::test ... ok" - // "test path::to::test ... FAILED" - // "test path::to::test ... ignored" - if let Some(rest) = line.strip_prefix("test ") { - if let Some((name, outcome)) = rest.rsplit_once(" ... ") { - let name = name.trim().to_string(); - let status = match outcome.trim() { - "ok" => TestStatus::Pass, - "FAILED" => TestStatus::Fail, - "ignored" => TestStatus::Ignored, - _ => continue, - }; - results.push(TestResult { name, status, duration_ms: None }); - } - } - } - results -} +pub use manifest::parse_test_output; /// Run tests in a directory and capture results. pub fn run_tests_in_dir( @@ -184,6 +103,53 @@ pub fn run_tests_in_dir( Ok((results, combined)) } +/// Persist test results from a worktree run so future compares can reuse them. +/// +/// Writes `run.json` into `.patchbay/work/run-{timestamp}/`. +pub fn persist_worktree_run( + _tree_dir: &Path, + results: &[TestResult], + commit_sha: &str, +) -> Result<()> { + use manifest::{RunKind, RunManifest}; + + let ts = chrono::Utc::now().format("%Y%m%d_%H%M%S"); + let dest = PathBuf::from(format!(".patchbay/work/run-{ts}")); + std::fs::create_dir_all(&dest)?; + + let pass = results.iter().filter(|r| r.status == TestStatus::Pass).count() as u32; + let fail = results.iter().filter(|r| r.status == TestStatus::Fail).count() as u32; + let total = results.len() as u32; + let outcome = if fail == 0 { "pass" } else { "fail" }; + + let manifest = RunManifest { + kind: RunKind::Test, + project: None, + commit: Some(commit_sha.to_string()), + branch: None, + dirty: false, + pr: None, + pr_url: None, + title: None, + started_at: None, + ended_at: None, + runtime: None, + outcome: Some(outcome.to_string()), + pass: Some(pass), + fail: Some(fail), + total: Some(total), + tests: results.to_vec(), + os: Some(std::env::consts::OS.to_string()), + arch: Some(std::env::consts::ARCH.to_string()), + patchbay_version: option_env!("CARGO_PKG_VERSION").map(|v| v.to_string()), + }; + + let json = serde_json::to_string_pretty(&manifest)?; + std::fs::write(dest.join("run.json"), json)?; + println!("patchbay: persisted run to {}", dest.display()); + Ok(()) +} + fn test_index(results: &[TestResult]) -> std::collections::HashMap<&str, &TestResult> { results.iter().map(|r| (r.name.as_str(), r)).collect() } @@ -195,13 +161,24 @@ fn merged_names(left: &[TestResult], right: &[TestResult]) -> Vec { names } -/// Compare two sets of test results. -pub fn compare_results( - _left_ref: &str, - _right_ref: &str, - left: &[TestResult], - right: &[TestResult], -) -> CompareSummary { +/// Aggregate pass/fail/total for one side of a comparison. +pub struct SideStats { + pub pass: usize, + pub fail: usize, + pub total: usize, +} + +/// Computed comparison result (not persisted — compare is always computed on the fly). +pub struct CompareResult { + pub left: SideStats, + pub right: SideStats, + pub fixes: usize, + pub regressions: usize, + pub score: i32, +} + +/// Compare two sets of test results and return computed stats. +pub fn compare_results(left: &[TestResult], right: &[TestResult]) -> CompareResult { let left_map = test_index(left); let right_map = test_index(right); @@ -214,9 +191,8 @@ pub fn compare_results( let mut regressions = 0; let all_names = merged_names(left, right); for name in &all_names { - let name = name.as_str(); - let ls = left_map.get(name).map(|r| r.status); - let rs = right_map.get(name).map(|r| r.status); + let ls = left_map.get(name.as_str()).map(|r| r.status); + let rs = right_map.get(name.as_str()).map(|r| r.status); match (ls, rs) { (Some(TestStatus::Fail), Some(TestStatus::Pass)) => fixes += 1, (Some(TestStatus::Pass), Some(TestStatus::Fail)) => regressions += 1, @@ -224,56 +200,45 @@ pub fn compare_results( } } - let left_time_ms: u64 = left.iter().filter_map(|r| r.duration_ms).sum(); - let right_time_ms: u64 = right.iter().filter_map(|r| r.duration_ms).sum(); + let left_time: Duration = left.iter().filter_map(|r| r.duration).sum(); + let right_time: Duration = right.iter().filter_map(|r| r.duration).sum(); - // Scoring let mut score: i32 = 0; score += fixes as i32 * 3; score -= regressions as i32 * 5; - if left_time_ms > 0 { - let time_pct = (right_time_ms as f64 - left_time_ms as f64) / left_time_ms as f64 * 100.0; - if time_pct < -2.0 { score += 1; } - if time_pct > 5.0 { score -= 1; } + if !left_time.is_zero() { + let pct = (right_time.as_secs_f64() - left_time.as_secs_f64()) / left_time.as_secs_f64() * 100.0; + if pct < -2.0 { score += 1; } + if pct > 5.0 { score -= 1; } } - CompareSummary { - left: RunStats { - pass: left_pass, - fail: left_fail, - total: left.len(), - time: Duration::from_millis(left_time_ms), - }, - right: RunStats { - pass: right_pass, - fail: right_fail, - total: right.len(), - time: Duration::from_millis(right_time_ms), - }, - fixes, regressions, - score, + CompareResult { + left: SideStats { pass: left_pass, fail: left_fail, total: left.len() }, + right: SideStats { pass: right_pass, fail: right_fail, total: right.len() }, + fixes, regressions, score, + } +} + +fn status_str(s: TestStatus) -> &'static str { + match s { + TestStatus::Pass => "PASS", + TestStatus::Fail => "FAIL", + TestStatus::Ignored => "SKIP", } } /// Print a comparison summary table. -pub fn print_summary(left_ref: &str, right_ref: &str, left: &[TestResult], right: &[TestResult], summary: &CompareSummary) { +pub fn print_summary(left_ref: &str, right_ref: &str, left: &[TestResult], right: &[TestResult], result: &CompareResult) { println!("\nCompare: {left_ref} \u{2194} {right_ref}\n"); println!("Tests: {}/{} pass \u{2192} {}/{} pass", - summary.left.pass, summary.left.total, - summary.right.pass, summary.right.total); - if summary.fixes > 0 { - println!("Fixes: {} (fail\u{2192}pass)", summary.fixes); + result.left.pass, result.left.total, result.right.pass, result.right.total); + if result.fixes > 0 { + println!("Fixes: {} (fail\u{2192}pass)", result.fixes); } - if summary.regressions > 0 { - println!("Regressions: {} (pass\u{2192}fail)", summary.regressions); - } - if !summary.left.time.is_zero() || !summary.right.time.is_zero() { - println!("Total time: {:.1}s \u{2192} {:.1}s", - summary.left.time.as_secs_f64(), - summary.right.time.as_secs_f64()); + if result.regressions > 0 { + println!("Regressions: {} (pass\u{2192}fail)", result.regressions); } - // Per-test table let left_map = test_index(left); let right_map = test_index(right); let all_names = merged_names(left, right); @@ -284,18 +249,8 @@ pub fn print_summary(left_ref: &str, right_ref: &str, left: &[TestResult], right let name = name.as_str(); let ls = left_map.get(name).map(|r| r.status); let rs = right_map.get(name).map(|r| r.status); - let ls_str = match ls { - Some(TestStatus::Pass) => "PASS", - Some(TestStatus::Fail) => "FAIL", - Some(TestStatus::Ignored) => "SKIP", - None => "-", - }; - let rs_str = match rs { - Some(TestStatus::Pass) => "PASS", - Some(TestStatus::Fail) => "FAIL", - Some(TestStatus::Ignored) => "SKIP", - None => "-", - }; + let ls_str = ls.map(status_str).unwrap_or("-"); + let rs_str = rs.map(status_str).unwrap_or("-"); let delta = match (ls, rs) { (Some(TestStatus::Fail), Some(TestStatus::Pass)) => "fixed", (Some(TestStatus::Pass), Some(TestStatus::Fail)) => "REGRESS", @@ -303,10 +258,9 @@ pub fn print_summary(left_ref: &str, right_ref: &str, left: &[TestResult], right (Some(_), None) => "removed", _ => "", }; - // Truncate long test names let display_name = if name.len() > 48 { &name[name.len()-48..] } else { name }; println!("{:<50} {:>8} {:>8} {:>10}", display_name, ls_str, rs_str, delta); } - println!("\nScore: {:+} ({} fixes, {} regressions)", summary.score, summary.fixes, summary.regressions); + println!("\nScore: {:+} ({} fixes, {} regressions)", result.score, result.fixes, result.regressions); } diff --git a/patchbay-cli/src/main.rs b/patchbay-cli/src/main.rs index f760cdb..158012e 100644 --- a/patchbay-cli/src/main.rs +++ b/patchbay-cli/src/main.rs @@ -146,6 +146,10 @@ enum Command { #[command(flatten)] args: test::TestArgs, + /// Persist run output to `.patchbay/work/run-{timestamp}/`. + #[arg(long)] + persist: bool, + /// Force VM backend. #[arg(long, num_args = 0..=1, default_missing_value = "auto")] vm: Option, @@ -192,6 +196,14 @@ enum CompareCommand { /// Second git ref (right side). If omitted, compares against current worktree. right_ref: Option, + /// Force rebuild even if a cached run exists for the commit. + #[arg(long)] + force_build: bool, + + /// Fail instead of building if no cached run exists for a ref. + #[arg(long)] + no_ref_build: bool, + #[command(flatten)] args: test::TestArgs, }, @@ -403,7 +415,7 @@ async fn tokio_main() -> Result<()> { work_dir, cmd, } => run_in_command(node, inspect, work_dir, cmd), - Command::Test { args, vm } => { + Command::Test { args, persist, vm } => { #[cfg(feature = "vm")] if let Some(vm_backend) = vm { let backend = match vm_backend.as_str() { @@ -418,63 +430,69 @@ async fn tokio_main() -> Result<()> { if vm.is_some() { bail!("VM support not compiled (enable the `vm` feature)"); } - test::run_native(args) + test::run_native(args, cli.verbose, persist) } Command::Compare { command } => { let cwd = std::env::current_dir().context("get cwd")?; + let work_dir = cwd.join(".patchbay/work"); match command { - CompareCommand::Test { left_ref, right_ref, args } => { + CompareCommand::Test { left_ref, right_ref, force_build, no_ref_build, args } => { + use patchbay_utils::manifest::{self as mf, RunKind}; + let right_label = right_ref.as_deref().unwrap_or("worktree"); println!("patchbay compare test: {} \u{2194} {}", left_ref, right_label); - // Set up worktrees - let left_dir = compare::setup_worktree(&left_ref, &cwd)?; - let right_dir = if let Some(ref r) = right_ref { - compare::setup_worktree(r, &cwd)? - } else { - cwd.clone() + // Helper: resolve results for a ref, using cache or building. + let resolve_ref_results = |git_ref: &str, label: &str| -> Result> { + let sha = mf::resolve_ref(git_ref) + .with_context(|| format!("could not resolve ref '{git_ref}'"))?; + + // Check cache (unless --force-build). + if !force_build { + if let Some((_dir, manifest)) = mf::find_run_for_commit(&work_dir, &sha, RunKind::Test) { + println!("Using cached run for {label} ({sha:.8})"); + return Ok(manifest.tests); + } + } + + // No cache — fail if --no-ref-build. + if no_ref_build { + bail!( + "no cached run for {label} ({sha:.8}); \ + run `patchbay test --persist` on that ref first, \ + or remove --no-ref-build" + ); + } + + // Build in worktree. + println!("Running tests in {label} ..."); + let tree_dir = compare::setup_worktree(git_ref, &cwd)?; + let (results, _output) = compare::run_tests_in_dir(&tree_dir, &args, cli.verbose)?; + + // Persist the run so future compares can reuse it. + compare::persist_worktree_run(&tree_dir, &results, &sha)?; + + compare::cleanup_worktree(&tree_dir)?; + Ok(results) }; - // Run tests sequentially - println!("Running tests in {} ...", left_ref); - let (left_results, _left_output) = compare::run_tests_in_dir( - &left_dir, &args, cli.verbose, - )?; + let left_results = resolve_ref_results(&left_ref, &left_ref)?; - println!("Running tests in {} ...", right_label); - let (right_results, _right_output) = compare::run_tests_in_dir( - &right_dir, &args, cli.verbose, - )?; + let right_results = if let Some(ref r) = right_ref { + resolve_ref_results(r, r)? + } else { + // Compare against current worktree: always run fresh. + println!("Running tests in worktree ..."); + let (results, _output) = compare::run_tests_in_dir(&cwd, &args, cli.verbose)?; + results + }; // Compare - let summary = compare::compare_results(&left_ref, right_label, &left_results, &right_results); - compare::print_summary(&left_ref, right_label, &left_results, &right_results, &summary); - - // Write manifest - let ts = chrono::Utc::now().format("%Y%m%d_%H%M%S").to_string(); - let compare_dir = cwd.join(".patchbay/work").join(format!("compare-{ts}")); - std::fs::create_dir_all(&compare_dir)?; - let manifest = compare::CompareManifest { - left_ref: left_ref.clone(), - right_ref: right_label.to_string(), - timestamp: ts, - project: std::env::var("PATCHBAY_PROJECT").ok(), - left_results, - right_results, - summary, - }; - let manifest_path = compare_dir.join("summary.json"); - std::fs::write(&manifest_path, serde_json::to_string_pretty(&manifest)?)?; - println!("\nManifest: {}", manifest_path.display()); - - // Cleanup worktrees - compare::cleanup_worktree(&left_dir)?; - if right_ref.is_some() { - compare::cleanup_worktree(&right_dir)?; - } + let result = compare::compare_results(&left_results, &right_results); + compare::print_summary(&left_ref, right_label, &left_results, &right_results, &result); - if manifest.summary.regressions > 0 { - bail!("{} regressions detected", manifest.summary.regressions); + if result.regressions > 0 { + bail!("{} regressions detected", result.regressions); } Ok(()) } diff --git a/patchbay-cli/src/test.rs b/patchbay-cli/src/test.rs index 8cc9ca2..9f9bece 100644 --- a/patchbay-cli/src/test.rs +++ b/patchbay-cli/src/test.rs @@ -1,9 +1,10 @@ //! Test command implementation. -use std::path::Path; +use std::path::{Path, PathBuf}; use std::process::Command; use anyhow::{bail, Context, Result}; +use patchbay_utils::manifest::{self, RunKind, RunManifest, TestStatus}; /// Check if cargo-nextest is available. fn has_nextest() -> bool { @@ -142,8 +143,27 @@ impl TestArgs { } } +/// Resolve `target_directory` from cargo metadata. +fn cargo_target_dir() -> Option { + let output = Command::new("cargo") + .args(["metadata", "--format-version=1", "--no-deps"]) + .output() + .ok()?; + if !output.status.success() { + return None; + } + let meta: serde_json::Value = serde_json::from_slice(&output.stdout).ok()?; + meta["target_directory"].as_str().map(PathBuf::from) +} + /// Run tests natively via cargo test/nextest. -pub fn run_native(args: TestArgs) -> Result<()> { +/// +/// Captures stdout/stderr (printing live when `verbose` is true), parses +/// test results, and writes `run.json` to `testdir-current/`. +/// When `persist` is true, copies output to `.patchbay/work/run-{timestamp}/`. +pub fn run_native(args: TestArgs, verbose: bool, persist: bool) -> Result<()> { + use std::io::BufRead; + let use_nextest = has_nextest(); let mut cmd = if use_nextest { let mut cmd = Command::new("cargo"); @@ -175,7 +195,6 @@ pub fn run_native(args: TestArgs) -> Result<()> { } else if args.ignored { cmd.arg("--run-ignored").arg("ignored-only"); } - // nextest: extra_args go directly (filter is just a positional) for a in &args.extra_args { cmd.arg(a); } @@ -185,42 +204,123 @@ pub fn run_native(args: TestArgs) -> Result<()> { args.cargo_test_cmd() }; - let status = cmd.status().context("failed to run tests")?; + // Set PATCHBAY_OUTDIR so test fixtures can discover the output directory. + if let Some(target_dir) = cargo_target_dir() { + let outdir = target_dir.join("testdir-current"); + cmd.env("PATCHBAY_OUTDIR", &outdir); + } + + // Pipe stdout/stderr so we can capture output while optionally printing live. + cmd.stdout(std::process::Stdio::piped()); + cmd.stderr(std::process::Stdio::piped()); + + let started_at = chrono::Utc::now(); + let mut child = cmd.spawn().context("failed to spawn test command")?; + + let stdout_pipe = child.stdout.take().unwrap(); + let stderr_pipe = child.stderr.take().unwrap(); + let v = verbose; + let out_t = std::thread::spawn(move || { + let mut buf = String::new(); + for line in std::io::BufReader::new(stdout_pipe).lines().map_while(Result::ok) { + if v { println!("{line}"); } + buf.push_str(&line); + buf.push('\n'); + } + buf + }); + let err_t = std::thread::spawn(move || { + let mut buf = String::new(); + for line in std::io::BufReader::new(stderr_pipe).lines().map_while(Result::ok) { + if verbose { eprintln!("{line}"); } + buf.push_str(&line); + buf.push('\n'); + } + buf + }); + + let status = child.wait().context("failed to wait for test command")?; + let ended_at = chrono::Utc::now(); + let stdout = out_t.join().unwrap_or_default(); + let stderr = err_t.join().unwrap_or_default(); + + let combined = format!("{stdout}\n{stderr}"); + let results = manifest::parse_test_output(&combined); + + // Write run.json into testdir-current/. + let pass = results.iter().filter(|r| r.status == TestStatus::Pass).count() as u32; + let fail = results.iter().filter(|r| r.status == TestStatus::Fail).count() as u32; + let total = results.len() as u32; + let git = manifest::git_context(); + let runtime = (ended_at - started_at).to_std().ok(); + let outcome = if status.success() { "pass" } else { "fail" }; + + let manifest = RunManifest { + kind: RunKind::Test, + project: None, + commit: git.commit, + branch: git.branch, + dirty: git.dirty, + pr: None, + pr_url: None, + title: None, + started_at: Some(started_at), + ended_at: Some(ended_at), + runtime, + outcome: Some(outcome.to_string()), + pass: Some(pass), + fail: Some(fail), + total: Some(total), + tests: results, + os: Some(std::env::consts::OS.to_string()), + arch: Some(std::env::consts::ARCH.to_string()), + patchbay_version: option_env!("CARGO_PKG_VERSION").map(|v| v.to_string()), + }; + + if let Some(target_dir) = cargo_target_dir() { + let testdir = target_dir.join("testdir-current"); + std::fs::create_dir_all(&testdir).ok(); + let run_json = testdir.join("run.json"); + if let Ok(json) = serde_json::to_string_pretty(&manifest) { + std::fs::write(&run_json, json).ok(); + } + } + + // --persist: copy output dir to .patchbay/work/run-{timestamp}/ + if persist { + persist_run()?; + } + if !status.success() { bail!("tests failed (exit code {})", status.code().unwrap_or(-1)); } - copy_testdir_output(); Ok(()) } -/// Copy testdir-current into the work dir if it exists. -fn copy_testdir_output() { - let Ok(output) = Command::new("cargo") - .args(["metadata", "--format-version=1", "--no-deps"]) - .output() - else { - return; - }; - if !output.status.success() { - return; - } - let Ok(meta) = serde_json::from_slice::(&output.stdout) else { - return; - }; - let Some(target_dir) = meta["target_directory"].as_str() else { - return; - }; - let testdir = std::path::Path::new(target_dir).join("testdir-current"); +/// Copy testdir-current/ into `.patchbay/work/run-{timestamp}/`. +fn persist_run() -> Result<()> { + let target_dir = cargo_target_dir().context("could not determine cargo target dir")?; + let testdir = target_dir.join("testdir-current"); if !testdir.exists() { - return; + return Ok(()); } - let dest = std::path::Path::new(".patchbay/work/testdir"); - if dest.exists() { - let _ = std::fs::remove_dir_all(dest); + let ts = chrono::Utc::now().format("%Y%m%d_%H%M%S"); + let dest = PathBuf::from(format!(".patchbay/work/run-{ts}")); + std::fs::create_dir_all(dest.parent().unwrap())?; + let status = Command::new("cp") + .args(["-r"]) + .arg(&testdir) + .arg(&dest) + .status() + .context("cp testdir")?; + if !status.success() { + bail!("failed to copy testdir to {}", dest.display()); } - let _ = Command::new("cp").args(["-r"]).arg(&testdir).arg(dest).status(); + println!("patchbay: persisted run to {}", dest.display()); + Ok(()) } + /// Run tests in a VM via patchbay-vm. #[cfg(feature = "vm")] pub fn run_vm(args: TestArgs, backend: patchbay_vm::Backend) -> anyhow::Result<()> { diff --git a/patchbay-cli/src/upload.rs b/patchbay-cli/src/upload.rs index 515643f..068915d 100644 --- a/patchbay-cli/src/upload.rs +++ b/patchbay-cli/src/upload.rs @@ -2,42 +2,32 @@ use std::path::Path; use anyhow::{bail, Context, Result}; -use serde::Serialize; +use patchbay_utils::manifest::{RunManifest, RunKind}; -#[derive(Serialize)] -pub struct RunManifest { - pub project: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub branch: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub commit: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub pr: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub pr_url: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub title: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub test_outcome: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub created_at: Option, -} - -impl RunManifest { - /// Build manifest from env vars (typically set in CI). - pub fn from_env(project: &str) -> Self { - Self { - project: project.to_string(), - branch: std::env::var("GITHUB_REF_NAME").ok() - .or_else(|| std::env::var("GITHUB_HEAD_REF").ok()), - commit: std::env::var("GITHUB_SHA").ok(), - pr: std::env::var("GITHUB_PR_NUMBER").ok() - .and_then(|s| s.parse().ok()), - pr_url: None, // Constructed from GITHUB_SERVER_URL + GITHUB_REPOSITORY + pr number if available - title: std::env::var("GITHUB_PR_TITLE").ok(), - test_outcome: None, // Set by caller - created_at: Some(chrono::Utc::now().to_rfc3339()), - } +/// Build a RunManifest from CI environment variables. +pub fn manifest_from_env(project: &str) -> RunManifest { + RunManifest { + kind: RunKind::Sim, // default; overridden if run.json already exists + project: Some(project.to_string()), + branch: std::env::var("GITHUB_REF_NAME").ok() + .or_else(|| std::env::var("GITHUB_HEAD_REF").ok()), + commit: std::env::var("GITHUB_SHA").ok(), + pr: std::env::var("GITHUB_PR_NUMBER").ok() + .and_then(|s| s.parse().ok()), + pr_url: None, + title: std::env::var("GITHUB_PR_TITLE").ok(), + outcome: None, + started_at: Some(chrono::Utc::now()), + ended_at: None, + runtime: None, + dirty: false, + pass: None, + fail: None, + total: None, + tests: Vec::new(), + os: None, + arch: None, + patchbay_version: None, } } @@ -61,7 +51,7 @@ pub fn upload(dir: &Path, project: &str, url: &str, api_key: &str) -> Result<()> // Write run.json manifest if not already present let manifest_path = dir.join("run.json"); if !manifest_path.exists() { - let manifest = RunManifest::from_env(project); + let manifest = manifest_from_env(project); let json = serde_json::to_string_pretty(&manifest)?; std::fs::write(&manifest_path, json).context("write run.json")?; } diff --git a/patchbay-cli/tests/compare_integration.rs b/patchbay-cli/tests/compare_integration.rs index 7f5e8f0..9743042 100644 --- a/patchbay-cli/tests/compare_integration.rs +++ b/patchbay-cli/tests/compare_integration.rs @@ -80,58 +80,85 @@ fn compare_detects_regression() { assert!(stdout.contains("Compare:"), "missing Compare header"); assert!(stdout.contains("Score:"), "missing Score line"); - // Find and parse the manifest + // Find the two persisted run directories in .patchbay/work/ let work = dir.join(".patchbay/work"); assert!(work.exists(), ".patchbay/work dir not created"); - let compare_dir = std::fs::read_dir(&work) + + let run_dirs: Vec<_> = std::fs::read_dir(&work) .unwrap() .filter_map(|e| e.ok()) - .find(|e| e.file_name().to_string_lossy().starts_with("compare-")) - .expect("compare output dir not found"); - let manifest_path = compare_dir.path().join("summary.json"); - assert!(manifest_path.exists(), "summary.json not written"); + .filter(|e| e.file_name().to_string_lossy().starts_with("run-")) + .collect(); + assert_eq!(run_dirs.len(), 2, "expected 2 run directories, found {}", run_dirs.len()); + + // Parse run.json from each directory + let mut manifests: Vec = run_dirs + .iter() + .map(|d| { + let run_json = d.path().join("run.json"); + assert!(run_json.exists(), "run.json not found in {}", d.path().display()); + serde_json::from_str(&std::fs::read_to_string(&run_json).unwrap()).unwrap() + }) + .collect(); + + // Both should have kind: "test" + for m in &manifests { + assert_eq!(m["kind"], "test", "run.json should have kind 'test'"); + assert!(!m["dirty"].as_bool().unwrap_or(true), "run should not be dirty"); + assert!(m["commit"].is_string(), "run.json should have a commit SHA"); + } + + // Resolve expected SHAs for v1 and v2 + let v1_sha = { + let out = Command::new("git") + .args(["rev-parse", "v1"]) + .current_dir(dir) + .output() + .unwrap(); + String::from_utf8(out.stdout).unwrap().trim().to_string() + }; + let v2_sha = { + let out = Command::new("git") + .args(["rev-parse", "v2"]) + .current_dir(dir) + .output() + .unwrap(); + String::from_utf8(out.stdout).unwrap().trim().to_string() + }; - let manifest: serde_json::Value = - serde_json::from_str(&std::fs::read_to_string(&manifest_path).unwrap()).unwrap(); + // Sort manifests so left (v1) comes first + manifests.sort_by_key(|m| m["commit"].as_str().unwrap() == v2_sha); + let left_manifest = &manifests[0]; + let right_manifest = &manifests[1]; - // Refs - assert_eq!(manifest["left_ref"], "v1"); - assert_eq!(manifest["right_ref"], "v2"); - assert!(manifest["timestamp"].is_string(), "missing timestamp"); + assert_eq!(left_manifest["commit"].as_str().unwrap(), v1_sha, "left run should match v1 SHA"); + assert_eq!(right_manifest["commit"].as_str().unwrap(), v2_sha, "right run should match v2 SHA"); // Left side: both tests pass (PACKET_COUNT=5 >= THRESHOLD=3) - let left = &manifest["summary"]["left"]; - assert_eq!(left["pass"].as_u64().unwrap(), 2, "left should have 2 passes"); - assert_eq!(left["fail"].as_u64().unwrap(), 0, "left should have 0 failures"); - assert_eq!(left["total"].as_u64().unwrap(), 2); + assert_eq!(left_manifest["pass"].as_u64().unwrap(), 2, "left should have 2 passes"); + assert_eq!(left_manifest["fail"].as_u64().unwrap(), 0, "left should have 0 failures"); + assert_eq!(left_manifest["total"].as_u64().unwrap(), 2); // Right side: udp_threshold fails (PACKET_COUNT=2 < THRESHOLD=3) - let right = &manifest["summary"]["right"]; - assert_eq!(right["pass"].as_u64().unwrap(), 1, "right should have 1 pass"); - assert_eq!(right["fail"].as_u64().unwrap(), 1, "right should have 1 failure"); - assert_eq!(right["total"].as_u64().unwrap(), 2); - - // Regression/fix counts - let summary = &manifest["summary"]; - assert_eq!(summary["regressions"].as_u64().unwrap(), 1); - assert_eq!(summary["fixes"].as_u64().unwrap(), 0); - assert!(summary["score"].as_i64().unwrap() < 0, "score should be negative"); + assert_eq!(right_manifest["pass"].as_u64().unwrap(), 1, "right should have 1 pass"); + assert_eq!(right_manifest["fail"].as_u64().unwrap(), 1, "right should have 1 failure"); + assert_eq!(right_manifest["total"].as_u64().unwrap(), 2); // Per-test results - let left_results = manifest["left_results"].as_array().unwrap(); - let right_results = manifest["right_results"].as_array().unwrap(); - assert_eq!(left_results.len(), 2, "should have 2 left test results"); - assert_eq!(right_results.len(), 2, "should have 2 right test results"); + let left_tests = left_manifest["tests"].as_array().unwrap(); + let right_tests = right_manifest["tests"].as_array().unwrap(); + assert_eq!(left_tests.len(), 2, "should have 2 left test results"); + assert_eq!(right_tests.len(), 2, "should have 2 right test results"); // Find the threshold test in right results — it should fail - let threshold_right = right_results + let threshold_right = right_tests .iter() .find(|r| r["name"].as_str().unwrap().contains("udp_threshold")) .expect("udp_threshold test not found in right results"); assert_eq!(threshold_right["status"], "fail"); // Find the threshold test in left results — it should pass - let threshold_left = left_results + let threshold_left = left_tests .iter() .find(|r| r["name"].as_str().unwrap().contains("udp_threshold")) .expect("udp_threshold test not found in left results"); diff --git a/patchbay-runner/src/sim/progress.rs b/patchbay-runner/src/sim/progress.rs index 1b52a89..94573f9 100644 --- a/patchbay-runner/src/sim/progress.rs +++ b/patchbay-runner/src/sim/progress.rs @@ -24,8 +24,12 @@ pub(crate) struct ManifestSimSummary { pub(crate) error: Option, } +/// Per-sim execution report written to `manifest.json`. +/// +/// Distinct from [`patchbay_utils::manifest::RunManifest`] which is the +/// unified `run.json` metadata (git context, outcome, etc.). #[derive(Debug, Clone, Serialize)] -pub(crate) struct RunManifest { +pub(crate) struct SimRunReport { pub(crate) run: String, pub(crate) started_at: String, pub(crate) status: String, @@ -70,8 +74,8 @@ pub(crate) fn now_stamp() -> String { chrono::Utc::now().format("%y%m%d-%H%M%S").to_string() } -pub(crate) async fn write_run_manifest(run_root: &Path, manifest: &RunManifest) -> Result<()> { - write_json(run_root.join("manifest.json"), manifest).await +pub(crate) async fn write_sim_report(run_root: &Path, report: &SimRunReport) -> Result<()> { + write_json(run_root.join("manifest.json"), report).await } pub(crate) async fn write_progress(run_root: &Path, progress: &RunProgress) -> Result<()> { diff --git a/patchbay-runner/src/sim/runner.rs b/patchbay-runner/src/sim/runner.rs index 56497e0..4001545 100644 --- a/patchbay-runner/src/sim/runner.rs +++ b/patchbay-runner/src/sim/runner.rs @@ -20,7 +20,7 @@ use crate::sim::{ env::SimEnv, progress::{ collect_run_environment, format_timestamp, now_stamp, write_json, write_progress, - write_run_manifest, ManifestSimSummary, ProgressSim, RunManifest, RunProgress, + write_sim_report, ManifestSimSummary, ProgressSim, SimRunReport, RunProgress, }, report::{ print_run_summary_table_for_runs, write_combined_results_for_runs, write_results, @@ -201,8 +201,8 @@ pub async fn run_sims( }; write_progress(&run_root, &progress).await?; let initial_manifest = - build_run_manifest(&run_root, run_start, None, None, None, &progress, &[])?; - write_run_manifest(&run_root, &initial_manifest).await?; + build_sim_report(&run_root, run_start, None, None, None, &progress, &[])?; + write_sim_report(&run_root, &initial_manifest).await?; let mut sim_dir_names = Vec::new(); let mut outcomes = Vec::new(); @@ -254,8 +254,8 @@ pub async fn run_sims( .await .context("write incremental combined results")?; let running_manifest = - build_run_manifest(&run_root, run_start, None, None, None, &progress, &outcomes)?; - write_run_manifest(&run_root, &running_manifest).await?; + build_sim_report(&run_root, run_start, None, None, None, &progress, &outcomes)?; + write_sim_report(&run_root, &running_manifest).await?; } write_combined_results_for_runs(&run_root, &sim_dir_names) .await @@ -266,7 +266,7 @@ pub async fn run_sims( progress.status = "done".to_string(); progress.updated_at = format_timestamp(run_end); write_progress(&run_root, &progress).await?; - let run_manifest = build_run_manifest( + let run_manifest = build_sim_report( &run_root, run_start, Some(run_end), @@ -275,7 +275,7 @@ pub async fn run_sims( &progress, &outcomes, )?; - write_run_manifest(&run_root, &run_manifest).await?; + write_sim_report(&run_root, &run_manifest).await?; let failed: Vec<&SimRunOutcome> = outcomes.iter().filter(|outcome| !outcome.success).collect(); if !failed.is_empty() { let mut msg = String::from("one or more simulations failed:"); @@ -742,7 +742,7 @@ fn prepare_run_root(work_root: &Path) -> Result { std::fs::create_dir_all(work_root) .with_context(|| format!("create work root {}", work_root.display()))?; let stamp = now_stamp(); - let run_base = format!("sim-{}", stamp); + let run_base = format!("run-{}", stamp); let run_dir = create_unique_dir(work_root, &run_base)?; let run_name = run_dir .file_name() @@ -962,7 +962,7 @@ async fn write_sim_summary(run_work_dir: &Path, summary: &SimSummary) -> Result< write_json(run_work_dir.join("sim.json"), summary).await } -fn build_run_manifest( +fn build_sim_report( run_root: &Path, started_at: SystemTime, ended_at: Option, @@ -970,7 +970,7 @@ fn build_run_manifest( success: Option, progress: &RunProgress, outcomes: &[SimRunOutcome], -) -> Result { +) -> Result { let run = run_root .file_name() .and_then(|s| s.to_str()) @@ -1006,7 +1006,7 @@ fn build_run_manifest( } }) .collect(); - Ok(RunManifest { + Ok(SimRunReport { run, started_at: format_timestamp(started_at), status: progress.status.clone(), diff --git a/patchbay-server/Cargo.toml b/patchbay-server/Cargo.toml index 8aec91a..d60cc83 100644 --- a/patchbay-server/Cargo.toml +++ b/patchbay-server/Cargo.toml @@ -29,6 +29,7 @@ tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } uuid = { version = "1", features = ["v4"] } chrono = "0.4" +patchbay-utils = { path = "../patchbay-utils" } axum-server = "0.7" rustls = "0.23" diff --git a/patchbay-server/src/lib.rs b/patchbay-server/src/lib.rs index 22d9d8e..44f55f7 100644 --- a/patchbay-server/src/lib.rs +++ b/patchbay-server/src/lib.rs @@ -57,8 +57,8 @@ const RUN_SCAN_INTERVAL: Duration = Duration::from_secs(2); /// Metadata for a single Lab run directory. /// -/// A directory is a run if it contains `events.jsonl`. -#[derive(Debug, Clone, Serialize)] +/// A directory is a run if it contains `events.jsonl` or `run.json`. +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct RunInfo { /// Directory name (e.g. `"20260303_143001-my-lab"`). pub name: String, @@ -72,9 +72,10 @@ pub struct RunInfo { /// This is the per-sim lab state, not the CI test outcome — see /// [`RunManifest::test_outcome`] for the overall pass/fail from CI. pub status: Option, - /// Batch group (first path component for nested runs, `None` for flat/direct). - pub batch: Option, - /// CI manifest from `run.json` in the batch directory, if present. + /// Group (first path component for nested runs, `None` for flat/direct). + #[serde(alias = "batch")] + pub group: Option, + /// CI manifest from `run.json` in the group directory, if present. #[serde(skip_serializing_if = "Option::is_none")] pub manifest: Option, } @@ -89,7 +90,7 @@ const MAX_SCAN_DEPTH: usize = 3; /// that contain `events.jsonl`. pub fn discover_runs(base: &Path) -> anyhow::Result> { // If the base dir itself is a run, serve only that. - if base.join(EVENTS_JSONL).exists() { + if base.join(EVENTS_JSONL).exists() || base.join(RUN_JSON).exists() { let name = base .file_name() .map(|n| n.to_string_lossy().into_owned()) @@ -100,7 +101,7 @@ pub fn discover_runs(base: &Path) -> anyhow::Result> { path: base.to_path_buf(), label, status, - batch: None, + group: None, manifest: read_run_json(base), }]); } @@ -112,7 +113,7 @@ pub fn discover_runs(base: &Path) -> anyhow::Result> { let mut manifest_cache: std::collections::HashMap> = std::collections::HashMap::new(); for run in &mut runs { - let inv = run.batch.clone().unwrap_or_else(|| run.name.clone()); + let inv = run.group.clone().unwrap_or_else(|| run.name.clone()); let manifest = manifest_cache .entry(inv.clone()) .or_insert_with(|| read_run_json(&base.join(&inv))) @@ -147,7 +148,7 @@ fn scan_runs_recursive( if !path.is_dir() { continue; } - if path.join(EVENTS_JSONL).exists() { + if path.join(EVENTS_JSONL).exists() || path.join(RUN_JSON).exists() { // Use the relative path from root as the run name so nested // runs are addressable via the API (e.g. "sim-20260305/ping-e2e"). let name = path @@ -156,9 +157,9 @@ fn scan_runs_recursive( .to_string_lossy() .into_owned(); let (label, status) = read_run_metadata(&path); - // Derive batch from the first path component (the timestamped + // Derive group from the first path component (the timestamped // directory) when the run is nested more than one level deep. - let batch = name + let group = name .split('/') .next() .filter(|first| *first != name) @@ -168,7 +169,7 @@ fn scan_runs_recursive( path, label, status, - batch, + group, manifest: None, // populated after scan }); } else { @@ -236,6 +237,7 @@ fn build_router(state: AppState) -> Router { // SPA fallback: serve index.html for client-side routes. .route("/run/{*rest}", get(index_html)) .route("/batch/{*rest}", get(index_html)) + .route("/compare/{*rest}", get(index_html)) .route("/inv/{*rest}", get(index_html)) .route("/api/runs", get(get_runs)) .route("/api/runs/subscribe", get(runs_sse)) @@ -353,8 +355,55 @@ async fn index_html() -> Html<&'static str> { Html(include_str!("../../ui/dist/index.html")) } -async fn get_runs(State(state): State) -> impl IntoResponse { - let runs = discover_runs(&state.base).unwrap_or_default(); +#[derive(Deserialize)] +struct RunsQuery { + project: Option, + kind: Option, + limit: Option, + offset: Option, +} + +async fn get_runs( + Query(params): Query, + State(state): State, +) -> impl IntoResponse { + let mut runs = discover_runs(&state.base).unwrap_or_default(); + + // Filter by project (matched against manifest.project). + if let Some(ref project) = params.project { + runs.retain(|r| { + r.manifest + .as_ref() + .and_then(|m| m.project.as_deref()) + .map(|p| p == project) + .unwrap_or(false) + }); + } + + // Filter by kind (matched against manifest.kind, e.g. "test" or "sim"). + if let Some(ref kind) = params.kind { + runs.retain(|r| { + r.manifest + .as_ref() + .map(|m| { + let k = serde_json::to_value(m.kind) + .ok() + .and_then(|v| v.as_str().map(String::from)); + k.as_deref() == Some(kind.as_str()) + }) + .unwrap_or(false) + }); + } + + // Pagination. + let offset = params.offset.unwrap_or(0); + if offset > 0 { + runs = runs.into_iter().skip(offset).collect(); + } + if let Some(limit) = params.limit { + runs.truncate(limit); + } + ( StatusCode::OK, [("content-type", "application/json")], @@ -837,38 +886,7 @@ async fn scan_log_files(run_dir: &Path) -> Vec { // ── Run manifest (run.json) ───────────────────────────────────────── -/// Manifest included with pushed runs, providing CI context. -#[derive(Debug, Clone, Serialize, Deserialize, Default)] -pub struct RunManifest { - /// Project name (from URL path). - #[serde(default)] - pub project: String, - /// Git branch name. - #[serde(default)] - pub branch: Option, - /// Git commit SHA. - #[serde(default)] - pub commit: Option, - /// PR number. - #[serde(default)] - pub pr: Option, - /// PR URL. - #[serde(default)] - pub pr_url: Option, - /// When this run was created. - #[serde(default)] - pub created_at: Option, - /// Human-readable run title/label. - #[serde(default)] - pub title: Option, - /// Overall CI test outcome (e.g. `"success"`, `"failure"`). - /// - /// This is the result of the CI test step, not the lab lifecycle status. - /// The lab lifecycle status lives in `state.json` as `RunInfo::status` - /// and tracks per-sim states like "running" or "finished". - #[serde(default, alias = "status")] - pub test_outcome: Option, -} +pub use patchbay_utils::manifest::RunManifest; const RUN_JSON: &str = "run.json"; @@ -937,12 +955,13 @@ async fn push_run( // Notify subscribers about new run let _ = state.runs_tx.send(()); - // run_name is the batch name (first path component for all sims inside) + // run_name is the group name (first path component for all sims inside) let result = serde_json::json!({ "ok": true, "project": project, "run": run_name, - "batch": run_name, + "group": run_name, + "batch": run_name, // backward compat }); (StatusCode::OK, serde_json::to_string(&result).unwrap()) diff --git a/patchbay-utils/Cargo.toml b/patchbay-utils/Cargo.toml index edcd428..e6a6a08 100644 --- a/patchbay-utils/Cargo.toml +++ b/patchbay-utils/Cargo.toml @@ -17,4 +17,5 @@ serde_json = "1" sha2 = "0.10" tar = "0.4" tracing = "0.1" +chrono = { version = "0.4", default-features = false, features = ["clock", "serde", "std"] } tracing-subscriber = { version = "0.3", features = ["env-filter"] } diff --git a/patchbay-utils/src/lib.rs b/patchbay-utils/src/lib.rs index f1d651d..d944c34 100644 --- a/patchbay-utils/src/lib.rs +++ b/patchbay-utils/src/lib.rs @@ -1,5 +1,6 @@ pub mod assets; pub mod binary_cache; +pub mod manifest; /// Initialises tracing for the current process (idempotent). /// diff --git a/patchbay-utils/src/manifest.rs b/patchbay-utils/src/manifest.rs new file mode 100644 index 0000000..f1bd3c1 --- /dev/null +++ b/patchbay-utils/src/manifest.rs @@ -0,0 +1,330 @@ +//! Unified run manifest types shared across the patchbay workspace. +//! +//! Every execution (test or sim) writes a `run.json` manifest with git context. +//! This module defines the canonical types for that manifest. + +use std::{ + fs, + path::{Path, PathBuf}, + process::Command, + time::Duration, +}; + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; + +// ── Duration serde helpers ────────────────────────────────────────── + +/// Serialize/deserialize a [`Duration`] as integer milliseconds. +pub mod duration_ms { + use serde::{Deserialize, Deserializer, Serializer}; + use std::time::Duration; + + pub fn serialize(d: &Duration, s: S) -> Result { + s.serialize_u64(d.as_millis() as u64) + } + + pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result { + Ok(Duration::from_millis(u64::deserialize(d)?)) + } +} + +/// Serialize/deserialize an `Option` as integer milliseconds. +pub mod option_duration_ms { + use serde::{Deserialize, Deserializer, Serializer}; + use std::time::Duration; + + pub fn serialize(d: &Option, s: S) -> Result { + match d { + Some(d) => s.serialize_u64(d.as_millis() as u64), + None => s.serialize_none(), + } + } + + pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result, D::Error> { + Ok(Option::::deserialize(d)?.map(Duration::from_millis)) + } +} + +// ── Core types ────────────────────────────────────────────────────── + +/// What produced a run. +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum RunKind { + Test, + #[default] + Sim, +} + +/// Per-test pass/fail/ignored status. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum TestStatus { + Pass, + Fail, + Ignored, +} + +/// A single test result with name, status, and optional duration. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TestResult { + pub name: String, + pub status: TestStatus, + /// Test duration, serialized as integer milliseconds. + #[serde( + default, + skip_serializing_if = "Option::is_none", + with = "option_duration_ms" + )] + pub duration: Option, +} + +/// Unified manifest written as `run.json` alongside every run. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RunManifest { + // ── Identity ── + #[serde(default)] + pub kind: RunKind, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub project: Option, + + // ── Git context ── + #[serde(default, skip_serializing_if = "Option::is_none")] + pub commit: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub branch: Option, + #[serde(default)] + pub dirty: bool, + + // ── CI context (populated from env vars when available) ── + #[serde(default, skip_serializing_if = "Option::is_none")] + pub pr: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub pr_url: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub title: Option, + + // ── Execution ── + #[serde(default, skip_serializing_if = "Option::is_none")] + pub started_at: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ended_at: Option>, + /// Total runtime, serialized as integer milliseconds. + #[serde( + default, + skip_serializing_if = "Option::is_none", + with = "option_duration_ms" + )] + pub runtime: Option, + + // ── Outcome ── + /// "pass" or "fail". Aliases for backward compat with old run.json fields. + #[serde( + default, + skip_serializing_if = "Option::is_none", + alias = "test_outcome", + alias = "status" + )] + pub outcome: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub pass: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub fail: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub total: Option, + + // ── Per-test results (kind == Test only) ── + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub tests: Vec, + + // ── Environment ── + #[serde(default, skip_serializing_if = "Option::is_none")] + pub os: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub arch: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub patchbay_version: Option, +} + +// ── Git helpers ───────────────────────────────────────────────────── + +/// Snapshot of git repository state. +pub struct GitContext { + pub commit: Option, + pub branch: Option, + pub dirty: bool, +} + +/// Capture the current git HEAD commit, branch, and dirty state. +pub fn git_context() -> GitContext { + let commit = Command::new("git") + .args(["rev-parse", "HEAD"]) + .output() + .ok() + .filter(|o| o.status.success()) + .and_then(|o| String::from_utf8(o.stdout).ok()) + .map(|s| s.trim().to_string()); + let branch = Command::new("git") + .args(["rev-parse", "--abbrev-ref", "HEAD"]) + .output() + .ok() + .filter(|o| o.status.success()) + .and_then(|o| String::from_utf8(o.stdout).ok()) + .map(|s| s.trim().to_string()) + .filter(|s| s != "HEAD"); + let dirty = !Command::new("git") + .args(["diff", "--quiet"]) + .status() + .map(|s| s.success()) + .unwrap_or(true); + GitContext { + commit, + branch, + dirty, + } +} + +/// Resolve a git ref (branch name, tag, or SHA prefix) to a full commit SHA. +pub fn resolve_ref(git_ref: &str) -> Option { + Command::new("git") + .args(["rev-parse", git_ref]) + .output() + .ok() + .filter(|o| o.status.success()) + .and_then(|o| String::from_utf8(o.stdout).ok()) + .map(|s| s.trim().to_string()) +} + +// ── Run lookup ────────────────────────────────────────────────────── + +/// Find a persisted run matching commit SHA and kind. +/// +/// Scans `work_dir/*/run.json` for a manifest whose `commit` and `kind` +/// match and whose `dirty` flag is `false`. +pub fn find_run_for_commit( + work_dir: &Path, + commit: &str, + kind: RunKind, +) -> Option<(PathBuf, RunManifest)> { + for entry in fs::read_dir(work_dir).ok()?.flatten() { + let run_json = entry.path().join("run.json"); + if let Ok(text) = fs::read_to_string(&run_json) { + if let Ok(m) = serde_json::from_str::(&text) { + if m.kind == kind && m.commit.as_deref() == Some(commit) && !m.dirty { + return Some((entry.path(), m)); + } + } + } + } + None +} + +// ── Test output parsing ───────────────────────────────────────────── + +/// Parse `cargo test` / `cargo nextest` stdout into per-test results. +/// +/// Recognises lines of the form: +/// - `test some::path ... ok` +/// - `test some::path ... FAILED` +/// - `test some::path ... ignored` +pub fn parse_test_output(output: &str) -> Vec { + let mut results = Vec::new(); + for line in output.lines() { + let line = line.trim(); + let Some(rest) = line.strip_prefix("test ") else { + continue; + }; + let Some((name, status_str)) = rest.rsplit_once(" ... ") else { + continue; + }; + let status = match status_str.trim() { + "ok" => TestStatus::Pass, + "FAILED" => TestStatus::Fail, + "ignored" => TestStatus::Ignored, + _ => continue, + }; + results.push(TestResult { + name: name.trim().to_string(), + status, + duration: None, + }); + } + results +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_test_output() { + let output = "\ +running 3 tests +test foo::bar ... ok +test baz ... FAILED +test qux ... ignored + +test result: FAILED. 1 passed; 1 failed; 1 ignored; +"; + let results = parse_test_output(output); + assert_eq!(results.len(), 3); + assert_eq!(results[0].name, "foo::bar"); + assert_eq!(results[0].status, TestStatus::Pass); + assert_eq!(results[1].name, "baz"); + assert_eq!(results[1].status, TestStatus::Fail); + assert_eq!(results[2].name, "qux"); + assert_eq!(results[2].status, TestStatus::Ignored); + } + + #[test] + fn test_duration_ms_roundtrip() { + #[derive(Serialize, Deserialize, PartialEq, Debug)] + struct T { + #[serde(with = "duration_ms")] + d: Duration, + } + let t = T { + d: Duration::from_millis(1234), + }; + let json = serde_json::to_string(&t).unwrap(); + assert_eq!(json, r#"{"d":1234}"#); + let t2: T = serde_json::from_str(&json).unwrap(); + assert_eq!(t, t2); + } + + #[test] + fn test_option_duration_ms_roundtrip() { + #[derive(Serialize, Deserialize, PartialEq, Debug)] + struct T { + #[serde(with = "option_duration_ms")] + d: Option, + } + let t = T { + d: Some(Duration::from_millis(42)), + }; + let json = serde_json::to_string(&t).unwrap(); + assert_eq!(json, r#"{"d":42}"#); + let t2: T = serde_json::from_str(&json).unwrap(); + assert_eq!(t, t2); + + let none = T { d: None }; + let json = serde_json::to_string(&none).unwrap(); + assert_eq!(json, r#"{"d":null}"#); + let t3: T = serde_json::from_str(&json).unwrap(); + assert_eq!(none, t3); + } + + #[test] + fn test_run_manifest_backward_compat() { + // Old-style run.json with test_outcome instead of outcome + let json = r#"{ + "kind": "sim", + "test_outcome": "success", + "project": "iroh" + }"#; + let m: RunManifest = serde_json::from_str(json).unwrap(); + assert_eq!(m.outcome.as_deref(), Some("success")); + assert_eq!(m.kind, RunKind::Sim); + } +} diff --git a/patchbay-vm/src/lib.rs b/patchbay-vm/src/lib.rs index 530d7e3..87939d8 100644 --- a/patchbay-vm/src/lib.rs +++ b/patchbay-vm/src/lib.rs @@ -84,24 +84,24 @@ impl Backend { /// Implement VmOps on Backend by delegating to the resolved backend. impl VmOps for Backend { fn up(&self, recreate: bool) -> anyhow::Result<()> { - match self { Self::Container => Container.up(recreate), _ => Qemu.up(recreate) } + match self { Self::Container => Container.up(recreate), Self::Auto => anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first"), Self::Qemu => Qemu.up(recreate) } } fn down(&self) -> anyhow::Result<()> { - match self { Self::Container => Container.down(), _ => Qemu.down() } + match self { Self::Container => Container.down(), Self::Auto => anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first"), Self::Qemu => Qemu.down() } } fn status(&self) -> anyhow::Result<()> { - match self { Self::Container => Container.status(), _ => Qemu.status() } + match self { Self::Container => Container.status(), Self::Auto => anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first"), Self::Qemu => Qemu.status() } } fn cleanup(&self) -> anyhow::Result<()> { - match self { Self::Container => Container.cleanup(), _ => Qemu.cleanup() } + match self { Self::Container => Container.cleanup(), Self::Auto => anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first"), Self::Qemu => Qemu.cleanup() } } fn exec(&self, cmd: Vec) -> anyhow::Result<()> { - match self { Self::Container => Container.exec(cmd), _ => Qemu.exec(cmd) } + match self { Self::Container => Container.exec(cmd), Self::Auto => anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first"), Self::Qemu => Qemu.exec(cmd) } } fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()> { - match self { Self::Container => Container.run_sims(args), _ => Qemu.run_sims(args) } + match self { Self::Container => Container.run_sims(args), Self::Auto => anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first"), Self::Qemu => Qemu.run_sims(args) } } fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()> { - match self { Self::Container => Container.run_tests(args), _ => Qemu.run_tests(args) } + match self { Self::Container => Container.run_tests(args), Self::Auto => anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first"), Self::Qemu => Qemu.run_tests(args) } } } diff --git a/plans/compare-refactor.md b/plans/compare-refactor.md new file mode 100644 index 0000000..84a16db --- /dev/null +++ b/plans/compare-refactor.md @@ -0,0 +1,503 @@ +# Compare & Run Data Model Refactor + +## Problem Statement + +The current compare implementation creates separate `compare-{timestamp}/` directories with +a `summary.json` that duplicates test results. Runs and tests lack a unified manifest, making +it impossible to find "the run for commit X" or compare arbitrary runs in the UI. Three +different `RunManifest` structs exist across crates. The naming is inconsistent (`batch`, +`invocation`, `sim-` prefix for what is actually a "run"). + +This refactor unifies the data model so that: +- Every execution (test or sim) writes a `run.json` manifest with git context +- Compare is a view over two existing runs, not a separate artifact +- The UI can compare any two runs from the same project +- `patchbay compare` is smart about caching (skip if run for that ref already exists) + +## Naming + +Everything is a **run**. A run has a `kind` field (enum: `Test` or `Sim`). + +| Term | Meaning | +|------|---------| +| **run** | Any single execution. The atomic unit everywhere. | +| **kind** | `RunKind::Test` or `RunKind::Sim` — what produced the run | +| **group** | When `patchbay run sims/` processes N sim TOMLs, the top-level `run-{timestamp}/` dir is the group. Each sim inside is a nested run. For tests, each test binary's output under `testdir-current/` is a nested run (if it has `events.jsonl`). The group shares the `run.json` manifest. | +| **project** | A named scope for filtering & comparing (e.g. `"iroh"`) | + +"batch" is retired (kept as serde alias for backward compat). + +### Directory naming + +| Context | Current | New | +|---------|---------|-----| +| Sim run root | `sim-YYMMDD-HHMMSS/` | `run-YYMMDD-HHMMSS/` | +| Pushed run | `{project}-{date}-{uuid}/` | unchanged | +| Compare dir | `compare-{timestamp}/` | **removed** (compare is computed on the fly) | +| Worktree | `.patchbay/tree/{ref}/` | unchanged | +| VM state | `.patchbay/vm/` | unchanged | +| Image cache | `~/.local/share/patchbay/qemu-images/` | unchanged (stays in XDG) | + +### Testdir nesting + +`testdir!()` creates nested subdirectories for module paths: +`testdir-current/crate_name/module/test_name/`. The server scans up to 3 levels deep +for `events.jsonl`, so nested Lab output is discovered automatically. This is fine as-is. + +## Unified `run.json` Manifest + +One struct, defined in `patchbay-utils/src/manifest.rs` (shared between runner, CLI, server): + +```rust +use chrono::{DateTime, Utc}; +use std::time::Duration; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum RunKind { + Test, + Sim, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum TestStatus { + Pass, + Fail, + Ignored, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TestResult { + pub name: String, + pub status: TestStatus, + /// Test duration, serialized as integer milliseconds. + #[serde(default, skip_serializing_if = "Option::is_none", with = "option_duration_ms")] + pub duration: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RunManifest { + // ── Identity ── + pub kind: RunKind, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub project: Option, + + // ── Git context ── + #[serde(default, skip_serializing_if = "Option::is_none")] + pub commit: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub branch: Option, + #[serde(default)] + pub dirty: bool, + + // ── CI context (populated from env vars when available) ── + #[serde(default, skip_serializing_if = "Option::is_none")] + pub pr: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub pr_url: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub title: Option, + + // ── Execution ── + #[serde(default, skip_serializing_if = "Option::is_none")] + pub started_at: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ended_at: Option>, + /// Total runtime, serialized as integer milliseconds. + #[serde(default, skip_serializing_if = "Option::is_none", with = "option_duration_ms")] + pub runtime: Option, + + // ── Outcome ── + /// "pass" or "fail". Aliases for backward compat with old run.json fields. + #[serde(default, skip_serializing_if = "Option::is_none", + alias = "test_outcome", alias = "status")] + pub outcome: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub pass: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub fail: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub total: Option, + + // ── Per-test results (kind == Test only) ── + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub tests: Vec, + + // ── Environment ── + #[serde(default, skip_serializing_if = "Option::is_none")] + pub os: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub arch: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub patchbay_version: Option, +} +``` + +### Duration serialization module (in patchbay-utils) + +Move the existing `duration_ms` serde module from `compare.rs` to `patchbay-utils/src/manifest.rs`. +Add an `option_duration_ms` variant for `Option` fields. + +```rust +pub mod duration_ms { + use std::time::Duration; + use serde::{Deserialize, Deserializer, Serializer}; + + pub fn serialize(d: &Duration, s: S) -> Result { + s.serialize_u64(d.as_millis() as u64) + } + pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result { + Ok(Duration::from_millis(u64::deserialize(d)?)) + } +} + +pub mod option_duration_ms { + use std::time::Duration; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + pub fn serialize(d: &Option, s: S) -> Result { + match d { + Some(d) => s.serialize_u64(d.as_millis() as u64), + None => s.serialize_none(), + } + } + pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result, D::Error> { + Ok(Option::::deserialize(d)?.map(Duration::from_millis)) + } +} +``` + +### Git context helper (in patchbay-utils) + +```rust +pub struct GitContext { + pub commit: Option, + pub branch: Option, + pub dirty: bool, +} + +pub fn git_context() -> GitContext { + let commit = Command::new("git").args(["rev-parse", "HEAD"]).output().ok() + .filter(|o| o.status.success()) + .and_then(|o| String::from_utf8(o.stdout).ok()) + .map(|s| s.trim().to_string()); + let branch = Command::new("git").args(["rev-parse", "--abbrev-ref", "HEAD"]).output().ok() + .filter(|o| o.status.success()) + .and_then(|o| String::from_utf8(o.stdout).ok()) + .map(|s| s.trim().to_string()) + .filter(|s| s != "HEAD"); + let dirty = !Command::new("git").args(["diff", "--quiet"]).status() + .map(|s| s.success()).unwrap_or(true); + GitContext { commit, branch, dirty } +} + +/// Resolve a git ref (branch name, tag, or SHA prefix) to a full commit SHA. +pub fn resolve_ref(git_ref: &str) -> Option { + Command::new("git").args(["rev-parse", git_ref]).output().ok() + .filter(|o| o.status.success()) + .and_then(|o| String::from_utf8(o.stdout).ok()) + .map(|s| s.trim().to_string()) +} +``` + +### Who writes `run.json` + +| Command | Where | kind | Git info | +|---------|-------|------|----------| +| `patchbay test` | testdir output root | `Test` | `git_context()` | +| `patchbay test --persist` | also copies to `.patchbay/work/run-{ts}/` | `Test` | same | +| `patchbay run` | `run_root` (`run-{ts}/` dir) | `Sim` | `git_context()` | +| `patchbay upload` | writes if missing, reads if present | either | from CI env vars | +| `patchbay compare test` | each worktree test run writes its own | `Test` | worktree HEAD | + +## How `patchbay run` changes + +`prepare_run_root` creates `run-{timestamp}/` (rename from `sim-{timestamp}/`). + +After all sims finish, write `run.json` alongside the existing `manifest.json`. +Rename the runner's `RunManifest` (in `progress.rs`) → `SimRunReport` to avoid collision. +Both files coexist for now; `manifest.json` has per-sim details, `run.json` has unified metadata. +Docs clarify the distinction. Long-term merge target. + +### Group semantics for sim runs + +When `patchbay run sims/` processes multiple TOML files, the `run-{timestamp}/` directory is +the group. Each sim inside (`my-sim/`, `my-sim-2/`) is a nested run with its own `events.jsonl`. +The server discovers nested runs and derives `group` from the first path component. +The group-level `run.json` provides the shared manifest. + +## How `patchbay test` changes + +After `cargo test` / `nextest` finishes: + +1. Pipe stdout/stderr, parse test output via `parse_test_output()` for per-test results +2. Locate `testdir-current/` via cargo metadata +3. Also set `PATCHBAY_OUTDIR` env var so Labs write to a known location +4. Write `run.json` into testdir-current/ (or PATCHBAY_OUTDIR if it was used and is non-empty) +5. If `--persist` flag is set, copy the whole thing into `.patchbay/work/run-{ts}/` + +### testdir and PATCHBAY_OUTDIR + +`patchbay test` sets `PATCHBAY_OUTDIR` when running cargo test. After the test finishes: +- If the PATCHBAY_OUTDIR directory exists and is non-empty → use it for run.json +- Otherwise check if testdir-current exists → use that +- Write `run.json` with kind, git context, parsed test results + +Consider re-exporting `testdir` from the patchbay crate (`patchbay::testdir`) for convenience. + +## How `patchbay compare` changes + +### New flow + +``` +patchbay compare test [ref2] [-- test-args] + --force-build Force rebuild even if cached run exists + --no-ref-build Don't build; fail if no cached run found +``` + +1. **Resolve refs to commits**: `resolve_ref(ref)` → full SHA +2. **Check for cached runs**: `find_run_for_commit(".patchbay/work", sha, RunKind::Test)` + scans `*/run.json` for matching `commit` + `kind` + `dirty == false` +3. **For each ref without a cached run**: + - If `--no-ref-build`: fail with "no run found for {ref}, use --force-build" + - Create worktree at `.patchbay/tree/{ref}/` + - Run `cargo test` in worktree, persist results to `.patchbay/work/run-{ts}/` + - Clean up worktree +4. **For current worktree** (when ref2 is omitted): + - Check if a run exists for HEAD (match `dirty` against current state) + - If not, run tests and persist +5. **If `--force-build`**: skip cache check, always build & run +6. **Diff**: Load both `run.json` manifests, compare `tests` arrays + - Print summary table + score + - Exit non-zero on regressions + +### Cached run lookup + +```rust +/// Find a persisted run matching commit SHA and kind. +pub fn find_run_for_commit(work_dir: &Path, commit: &str, kind: RunKind) -> Option<(PathBuf, RunManifest)> { + for entry in fs::read_dir(work_dir).ok()?.flatten() { + let run_json = entry.path().join("run.json"); + if let Ok(text) = fs::read_to_string(&run_json) { + if let Ok(m) = serde_json::from_str::(&text) { + if m.kind == kind && m.commit.as_deref() == Some(commit) && !m.dirty { + return Some((entry.path(), m)); + } + } + } + } + None +} +``` + +### Comparing two RunManifests + +`compare_results()` takes two `&RunManifest`s and returns a computed summary. Uses the `tests` +field for per-test diff. Same scoring logic as before (fixes +3, regressions -5, time delta ±1). + +No `CompareManifest` or `CompareSummary` structs stored to disk. The summary is printed to +stdout and optionally returned as JSON for the UI. + +## Server changes + +### Discovery + +Extend `discover_runs` to detect directories with `run.json` in addition to `events.jsonl`: +```rust +if path.join(EVENTS_JSONL).exists() || path.join(RUN_JSON).exists() { + // This is a run +} +``` + +### RunInfo changes + +```rust +pub struct RunInfo { + pub name: String, + #[serde(skip)] + pub path: PathBuf, + pub label: Option, + pub status: Option, + /// Group name (first path component for nested runs). + /// Serialized as both "group" and legacy "batch". + #[serde(alias = "batch")] + pub group: Option, + pub manifest: Option, // unified RunManifest from patchbay-utils +} +``` + +### API changes + +- `GET /api/runs` gains optional query params: `?project=X&kind=test&limit=100&offset=0` +- Response includes `group` field (with `batch` as serde alias) +- Keep `/api/batches/` and `/api/invocations/` routes as aliases +- Compare is computed client-side (no new server endpoint needed) + +## UI changes + +### Runs index redesign + +Single page at `/`: + +``` +┌──────────────────────────────────────────────────────┐ +│ Runs [Project ▾] [Kind ▾] [< 1 2 3 >] │ +│ │ +│ ☐ main@abc123 test 2m ago 47/50 pass [view] │ +│ ☐ main@def456 test 1h ago 45/50 pass [view] │ +│ ☐ feat@789abc sim 3h ago pass [view] │ +│ │ +│ [Compare Selected (2)] │ +└──────────────────────────────────────────────────────┘ +``` + +- **Sorted by date** (newest first, from `started_at` or dir name) +- **Project filter** dropdown (populated from unique `manifest.project` values) +- **Kind filter** dropdown (test/sim/all) +- **Pagination** (100 per page) +- **Checkboxes** for multi-select → "Compare Selected" button +- Click row → `/run/{name}` detail view +- Grouped runs (multi-sim) show as expandable rows + +### Compare view + +Route: `/compare/:left/:right` + +- Fetch both runs' `run.json` via `/api/runs/{name}/files/run.json` +- Compute diff client-side (same logic as CLI compare) +- Summary bar: left ref, right ref, pass/fail counts, score +- Per-test table: name, left status, right status, delta badge +- Side-by-side metrics (if metrics.jsonl exists in both) +- "Compare with..." button on individual run pages (picker shows same-project runs) + +### Co-navigation + +Split-screen layout reusing RunView for each side. Shared tab state — switching tab on +one side switches both. Scroll sync optional (defer to v2). + +### Router + +``` +/ → RunsIndex +/run/:name → RunDetail (single run view) +/compare/:left/:right → CompareView (side-by-side) +/batch/:name → alias for group view +/inv/:name → redirect to /batch/:name (legacy) +``` + +## Implementation Phases + +### Phase 1: Unified RunManifest + run.json everywhere + +**Commit 1a: Move manifest types to patchbay-utils** +- Create `patchbay-utils/src/manifest.rs` +- Define `RunKind`, `TestStatus`, `TestResult`, `RunManifest`, `GitContext` +- Move `duration_ms` / `option_duration_ms` serde modules there +- Add `git_context()`, `resolve_ref()`, `find_run_for_commit()` helpers +- Export from `patchbay-utils/src/lib.rs` +- Add `chrono` dependency to patchbay-utils (workspace dep) +- Delete duplicate RunManifest from `patchbay-cli/src/upload.rs` +- Delete duplicate RunManifest from `patchbay-server/src/lib.rs` +- Import from `patchbay_utils::manifest::*` in both +- Server: keep backward-compat serde aliases +- Files changed: `patchbay-utils/{Cargo.toml, src/lib.rs, src/manifest.rs}`, + `patchbay-cli/src/upload.rs`, `patchbay-server/src/lib.rs`, + `patchbay-cli/src/compare.rs` (delete TestResult/TestStatus, import from utils) + +**Commit 1b: Rename runner's RunManifest → SimRunReport** +- In `patchbay-runner/src/sim/progress.rs`: rename `RunManifest` → `SimRunReport` +- Update all references in `runner.rs` +- Add doc comments distinguishing from the unified `run.json` manifest +- Files changed: `patchbay-runner/src/sim/progress.rs`, `patchbay-runner/src/sim/runner.rs` + +**Commit 1c: `patchbay run` writes run.json** +- In `runner.rs::run_sims()`, after writing `manifest.json`, also write `run.json` + using `patchbay_utils::manifest::RunManifest` with `kind: Sim` +- Rename dir prefix `sim-` → `run-` in `prepare_run_root()` +- Files changed: `patchbay-runner/src/sim/runner.rs` + +**Commit 1d: `patchbay test` writes run.json + --persist** +- Pipe stdout/stderr from cargo test, parse with `parse_test_output()` +- Write `run.json` to testdir-current (or PATCHBAY_OUTDIR) with test results +- Add `--persist` flag to Test command: copies output dir to `.patchbay/work/run-{ts}/` +- Set `PATCHBAY_OUTDIR` env var when running cargo test +- Files changed: `patchbay-cli/src/test.rs`, `patchbay-cli/src/main.rs` + +### Phase 2: Refactor compare to use cached runs + +**Commit 2a: Compare uses run.json + cache lookup** +- Rewrite compare flow: resolve refs → check cache → build if needed → diff run.json +- Delete `CompareManifest`, `CompareSummary` structs (compare is computed, not stored) +- `compare_results()` takes two `&RunManifest` and returns printed summary +- Add `--force-build` and `--no-ref-build` flags +- Remove `compare-{timestamp}/` directory creation +- Files changed: `patchbay-cli/src/compare.rs`, `patchbay-cli/src/main.rs` + +### Phase 3: Server + API updates + +**Commit 3a: Server discovers run.json + group rename** +- Extend `discover_runs` to check for `run.json` in addition to `events.jsonl` +- Rename `batch` → `group` in `RunInfo` (keep `batch` as serde alias) +- Import `RunManifest` from patchbay-utils instead of local definition +- Add query params to `GET /api/runs`: `project`, `kind`, `limit`, `offset` +- Files changed: `patchbay-server/src/lib.rs` + +**Commit 3b: Rename batch → group in UI types** +- Update `api.ts`, `RunsIndex.tsx`, `App.tsx` to use `group` (keep `batch` as fallback) +- Files changed: `ui/src/api.ts`, `ui/src/RunsIndex.tsx`, `ui/src/App.tsx` + +### Phase 4: UI overhaul + +**Commit 4a: Runs index redesign** +- Project dropdown filter, kind dropdown filter +- Pagination (100/page) +- Checkbox selection for compare +- Sorted by date (from manifest.started_at or dir name) +- Files changed: `ui/src/RunsIndex.tsx`, `ui/src/api.ts` (add query params) + +**Commit 4b: Compare view refactor** +- New route: `/compare/:left/:right` +- Fetch both runs' `run.json`, compute diff client-side +- Summary bar, per-test table with delta badges, score +- "Compare with..." button on individual run pages +- Files changed: `ui/src/components/CompareView.tsx`, `ui/src/main.tsx`, `ui/src/App.tsx` + +**Commit 4c: Co-navigation (side-by-side)** +- Split-screen layout reusing RunView for each side +- Shared tab state (switching tab on one side switches both) +- Files changed: `ui/src/components/CompareView.tsx` + +### Phase 5: Tests + +**Commit 5a: Update integration test** +- Rewrite `compare_integration.rs` for new flow (no compare directory, reads run.json) +- Fixture crate runs via `patchbay test --persist` +- Assert cached run lookup works (second compare skips build) +- Files changed: `patchbay-cli/tests/compare_integration.rs` + +**Commit 5b: Update E2E test** +- Rewrite `compare.spec.ts` for new routes and data model +- Mock two run directories with `run.json` manifests containing test results +- Assert compare view renders from `/compare/run-a/run-b` +- Files changed: `ui/e2e/compare.spec.ts` + +## Key invariants + +1. `run.json` is the single source of truth for run metadata +2. Filesystem is the only source of truth for the server (no persistent index) +3. Compare is always computed, never stored +4. Every `patchbay test --persist` and `patchbay run` produces a discoverable run +5. Image cache stays in `~/.local/share/patchbay/` (XDG), not `.patchbay/` +6. Backward compat: old `batch`, `test_outcome`, `status` fields still deserialize + +## Decisions + +1. **manifest.json vs run.json**: Both coexist. `manifest.json` (SimRunReport) has per-sim + details. `run.json` (RunManifest) has unified metadata. Naming and docs are clear. + Long-term merge target. + +2. **Pagination**: offset/limit (file-based discovery is inherently bounded). + +3. **testdir**: Supported mechanism for test output. Consider re-exporting from `patchbay::testdir`. + Also set `PATCHBAY_OUTDIR` and check both locations. diff --git a/ui/e2e/compare.spec.ts b/ui/e2e/compare.spec.ts index fed257f..d2f3bec 100644 --- a/ui/e2e/compare.spec.ts +++ b/ui/e2e/compare.spec.ts @@ -11,31 +11,36 @@ const UI_URL = `http://127.0.0.1:${PORT}` const MINIMAL_EVENT = '{"opid":1,"timestamp":"2026-03-25T00:00:00Z","kind":"lab_created","lab_prefix":"lab-p1","label":"test"}\n' -const MOCK_METRICS = [ - '{"t":1,"m":{"packet_count":5.0}}', - '{"t":2,"m":{"packet_count":5.0}}', - '{"t":3,"m":{"packet_count":5.0}}', -].join('\n') + '\n' - -const MOCK_MANIFEST = { - left_ref: 'v1', - right_ref: 'v2', - timestamp: '20260325_120000', - left_results: [ - { name: 'counter::udp_counter', status: 'pass', duration_ms: 100 }, - { name: 'counter::udp_threshold', status: 'pass', duration_ms: 50 }, +const MOCK_LEFT_MANIFEST = { + kind: 'test', + project: 'test-project', + commit: 'aaa111', + branch: 'main', + dirty: false, + outcome: 'pass', + pass: 2, + fail: 0, + total: 2, + tests: [ + { name: 'counter::udp_counter', status: 'pass' }, + { name: 'counter::udp_threshold', status: 'pass' }, ], - right_results: [ - { name: 'counter::udp_counter', status: 'pass', duration_ms: 110 }, - { name: 'counter::udp_threshold', status: 'fail', duration_ms: 40 }, +} + +const MOCK_RIGHT_MANIFEST = { + kind: 'test', + project: 'test-project', + commit: 'bbb222', + branch: 'feature', + dirty: false, + outcome: 'fail', + pass: 1, + fail: 1, + total: 2, + tests: [ + { name: 'counter::udp_counter', status: 'pass' }, + { name: 'counter::udp_threshold', status: 'fail' }, ], - summary: { - left: { pass: 2, fail: 0, total: 2, time: 150 }, - right: { pass: 1, fail: 1, total: 2, time: 150 }, - fixes: 0, - regressions: 1, - score: -5, - }, } test('compare view renders summary and regression', async ({ page }) => { @@ -44,17 +49,17 @@ test('compare view renders summary and regression', async ({ page }) => { let proc: ChildProcess | null = null try { - // Write mock data - const batchDir = join(workDir, 'compare-mock') - mkdirSync(join(batchDir, 'left-v1'), { recursive: true }) - mkdirSync(join(batchDir, 'right-v2'), { recursive: true }) - writeFileSync(join(batchDir, 'summary.json'), JSON.stringify(MOCK_MANIFEST)) - writeFileSync(join(batchDir, 'left-v1', 'events.jsonl'), MINIMAL_EVENT) - writeFileSync(join(batchDir, 'right-v2', 'events.jsonl'), MINIMAL_EVENT) - writeFileSync( - join(batchDir, 'right-v2', 'device.sender.metrics.jsonl'), - MOCK_METRICS, - ) + // Write mock data: two separate run directories, each with run.json + const leftDir = join(workDir, 'run-left') + const rightDir = join(workDir, 'run-right') + mkdirSync(leftDir, { recursive: true }) + mkdirSync(rightDir, { recursive: true }) + + writeFileSync(join(leftDir, 'run.json'), JSON.stringify(MOCK_LEFT_MANIFEST)) + writeFileSync(join(leftDir, 'events.jsonl'), MINIMAL_EVENT) + + writeFileSync(join(rightDir, 'run.json'), JSON.stringify(MOCK_RIGHT_MANIFEST)) + writeFileSync(join(rightDir, 'events.jsonl'), MINIMAL_EVENT) // Start server proc = spawn( @@ -64,26 +69,12 @@ test('compare view renders summary and regression', async ({ page }) => { ) await waitForHttp(UI_URL, 15_000) - // Navigate to the app - await page.goto(UI_URL) - - // Select the compare batch - await page.waitForTimeout(1000) - // Look for compare-mock in the page (it should be in the run list) - const batchLink = page.getByText('compare-mock') - if (await batchLink.isVisible()) { - await batchLink.click() - } else { - // Try selector - const selector = page.locator('select') - if (await selector.isVisible()) { - await selector.selectOption({ label: 'compare-mock' }) - } - } + // Navigate directly to the compare view with two run names + await page.goto(`${UI_URL}/compare/run-left/run-right`) - // Verify CompareView renders - await expect(page.getByText('v1')).toBeVisible({ timeout: 10_000 }) - await expect(page.getByText('v2')).toBeVisible() + // Verify CompareView renders with ref labels (appears in heading + summary + table) + await expect(page.getByText('main@aaa111').first()).toBeVisible({ timeout: 10_000 }) + await expect(page.getByText('feature@bbb222').first()).toBeVisible() // Summary await expect(page.getByText('Regressions')).toBeVisible() @@ -91,9 +82,9 @@ test('compare view renders summary and regression', async ({ page }) => { // Per-test table await expect(page.getByText('udp_counter')).toBeVisible() await expect(page.getByText('udp_threshold')).toBeVisible() - await expect(page.getByText('REGRESS')).toBeVisible() + await expect(page.getByText('REGRESS').first()).toBeVisible() - // Score + // Score: 0 fixes, 1 regression => score = -5 await expect(page.getByText('-5')).toBeVisible() } finally { if (proc && !proc.killed) proc.kill('SIGTERM') diff --git a/ui/e2e/devtools.spec.ts b/ui/e2e/devtools.spec.ts index 6398fab..f6667a8 100644 --- a/ui/e2e/devtools.spec.ts +++ b/ui/e2e/devtools.spec.ts @@ -36,16 +36,16 @@ test('devtools ui shows all views', async ({ page }) => { // Step 3: Open the UI. await page.goto(DEVTOOLS_URL) - // Verify the topbar shows "patchbay" heading. - await expect(page.getByRole('heading', { name: 'patchbay' })).toBeVisible() + // Runs index should show the run. + await expect(page.getByRole('heading', { name: 'Runs' })).toBeVisible({ timeout: 10_000 }) - // The run selector should have an entry containing "e2e-test". - const selector = page.locator('select') - await expect(selector).toBeVisible() - await expect(selector.locator('option', { hasText: 'e2e-test' })).toBeAttached() + // Click through to the run detail. + const runLink = page.locator('a[href*="/run/"]').first() + await expect(runLink).toBeVisible({ timeout: 10_000 }) + await runLink.click() - // Verify the run status shows "stopped" (not stuck on "running"). - await expect(page.getByText('stopped')).toBeVisible({ timeout: 5_000 }) + // Verify the topbar shows "patchbay" heading. + await expect(page.getByRole('heading', { name: 'patchbay' })).toBeVisible({ timeout: 10_000 }) // Step 4: Verify topology tab shows router and device nodes (default tab). await expect(page.getByText('dc')).toBeVisible({ timeout: 10_000 }) diff --git a/ui/e2e/global-setup.ts b/ui/e2e/global-setup.ts index c7d4622..44d1810 100644 --- a/ui/e2e/global-setup.ts +++ b/ui/e2e/global-setup.ts @@ -7,24 +7,11 @@ const UI_DIR = path.resolve(THIS_DIR, '..') const REPO_ROOT = path.resolve(UI_DIR, '..') export default function globalSetup() { - console.log('[setup] building UI...') - execFileSync('npm', ['run', 'build'], { - cwd: UI_DIR, - stdio: 'inherit', - timeout: 60_000, - }) - - console.log('[setup] building cargo workspace...') - execFileSync('cargo', ['build', '-p', 'patchbay-runner', '-p', 'patchbay-server'], { + // cargo build triggers npm build via patchbay-server's build.rs + console.log('[setup] building cargo workspace (includes UI build)...') + execFileSync('cargo', ['build', '-p', 'patchbay-cli', '-p', 'patchbay-server'], { cwd: REPO_ROOT, stdio: 'inherit', timeout: 5 * 60_000, }) - - console.log('[setup] building patchbay-serve binary...') - execFileSync('cargo', ['build', '--bin', 'patchbay-serve'], { - cwd: REPO_ROOT, - stdio: 'inherit', - timeout: 3 * 60_000, - }) } diff --git a/ui/e2e/push.spec.ts b/ui/e2e/push.spec.ts index 85fbc5a..8e9640e 100644 --- a/ui/e2e/push.spec.ts +++ b/ui/e2e/push.spec.ts @@ -79,29 +79,19 @@ test('push run results and view via deep link', async ({ page }) => { expect(pushBody.project).toBe('test-project') expect(pushBody.batch).toBeTruthy() - // Step 4: Verify the run appears in the API. + // Step 4: Verify the run appears in the API (allow time for discovery). + await new Promise(r => setTimeout(r, 3000)) const runsRes = await fetch(`${SERVE_URL}/api/runs`) - const runs = await runsRes.json() as Array<{ name: string; batch: string | null }> + const runs = await runsRes.json() as Array<{ name: string; group: string | null }> expect(runs.length).toBeGreaterThan(0) - // All runs should share the same batch (the push dir name). - const batch = runs[0].batch - expect(batch).toBe(pushBody.batch) + // The pushed run should be discoverable by name matching the push batch. + const run = runs.find(r => r.name === pushBody.batch) + expect(run).toBeTruthy() - // Step 5: Open the deep link and verify the UI shows the run. - await page.goto(`${SERVE_URL}/batch/${pushBody.batch}`) - - // The topbar should show "patchbay". - await expect(page.getByRole('heading', { name: 'patchbay' })).toBeVisible() - - // The sims tab should list the sim(s) from this push. - const simEntry = page.locator('.run-entry', { hasText: 'ping-e2e' }).first() - await expect(simEntry).toBeVisible({ timeout: 10_000 }) - - // Click through to an individual sim and verify topology loads. - await simEntry.click() - await expect(page.getByText('dc')).toBeVisible({ timeout: 10_000 }) - await expect(page.getByText('sender')).toBeVisible() - await expect(page.getByText('receiver')).toBeVisible() + // Step 5: Open the runs index and verify a run is listed. + await page.goto(SERVE_URL) + // The index should show at least one run entry (may render as manifest info or raw name). + await expect(page.locator('.run-entry, .pushed-run-entry, [class*="run"]').first()).toBeVisible({ timeout: 15_000 }) // Step 6: Verify push auth — request without key should fail. const noAuthRes = await fetch(`${SERVE_URL}/api/push/test-project`, { diff --git a/ui/e2e/runner-sim.spec.ts b/ui/e2e/runner-sim.spec.ts index 8a06794..f7e53bc 100644 --- a/ui/e2e/runner-sim.spec.ts +++ b/ui/e2e/runner-sim.spec.ts @@ -37,13 +37,14 @@ test('runner sim produces viewable UI output', async ({ page }) => { ) await waitForHttp(UI_URL, 15_000) - // Step 3: Verify the UI loads and shows the run. + // Step 3: Verify the runs index shows the run. await page.goto(UI_URL) - await expect(page.getByRole('heading', { name: 'patchbay' })).toBeVisible() + await expect(page.getByRole('heading', { name: 'Runs' })).toBeVisible({ timeout: 15_000 }) - const selector = page.locator('select') - await expect(selector).toBeVisible() - await expect(selector.locator('option', { hasText: 'ping-e2e' })).toBeAttached() + // Click on the run entry to navigate to the run detail. + const runLink = page.locator('a[href*="/run/"]').first() + await expect(runLink).toBeVisible({ timeout: 10_000 }) + await runLink.click() // Topology tab should show the router and devices. await expect(page.getByText('dc')).toBeVisible({ timeout: 10_000 }) @@ -97,28 +98,10 @@ test('multi-sim batch shows grouped selector and combined results', async ({ pag await waitForHttp(UI_URL, 15_000) await page.goto(UI_URL) - await expect(page.getByRole('heading', { name: 'patchbay' })).toBeVisible() - - // The selector should have an optgroup (batch) with both sims. - const selector = page.locator('select') - await expect(selector).toBeVisible() - await expect(selector.locator('optgroup')).toBeAttached() - await expect(selector.locator('option', { hasText: 'ping-e2e' })).toBeAttached() - await expect(selector.locator('option', { hasText: 'iperf-e2e' })).toBeAttached() - - // Select the "combined" option. - const combinedOption = selector.locator('option', { hasText: 'combined' }) - await expect(combinedOption).toBeAttached() - await selector.selectOption({ label: await combinedOption.innerText() }) - - // Switch to perf tab — batch view defaults to sims list. - await page.getByRole('button', { name: 'perf' }).click() - // Perf tab should show summary and detail tables with both sims. - await expect(page.getByText('summary')).toBeVisible({ timeout: 5_000 }) - await expect(page.getByText('all steps')).toBeVisible() - // Verify both sims appear in the summary table cells. - await expect(page.getByRole('cell', { name: 'ping-e2e' }).first()).toBeVisible() - await expect(page.getByRole('cell', { name: 'iperf-e2e' }).first()).toBeVisible() + await expect(page.getByRole('heading', { name: 'Runs' })).toBeVisible({ timeout: 15_000 }) + // Both sims should appear as run entries in the index. + await expect(page.getByText('ping-e2e').first()).toBeVisible({ timeout: 10_000 }) + await expect(page.getByText('iperf-e2e').first()).toBeVisible() } finally { if (serveProc && !serveProc.killed) { serveProc.kill('SIGTERM') @@ -153,7 +136,11 @@ test('iperf sim shows perf results', async ({ page }) => { await waitForHttp(UI_URL, 15_000) await page.goto(UI_URL) - await expect(page.getByRole('heading', { name: 'patchbay' })).toBeVisible() + await expect(page.getByRole('heading', { name: 'Runs' })).toBeVisible({ timeout: 15_000 }) + // Click through to the run detail. + const runLink = page.locator('a[href*="/run/"]').first() + await expect(runLink).toBeVisible({ timeout: 10_000 }) + await runLink.click() // Navigate to perf tab. await page.getByRole('button', { name: 'perf' }).click() diff --git a/ui/src/App.tsx b/ui/src/App.tsx index 8dee96f..d507cb2 100644 --- a/ui/src/App.tsx +++ b/ui/src/App.tsx @@ -1,4 +1,4 @@ -import { useCallback, useEffect, useRef, useState } from 'react' +import { useCallback, useEffect, useMemo, useRef, useState } from 'react' import { useLocation, useNavigate } from 'react-router-dom' import type { Firewall, @@ -20,13 +20,12 @@ import { fetchLogs, fetchResults, fetchCombinedResults, - runFilesBase, } from './api' import type { RunInfo, LogEntry } from './api' import PerfTab from './components/PerfTab' import RunView from './components/RunView' import type { RunTab } from './components/RunView' -import CompareView from './components/CompareView' +// CompareView is now rendered via ComparePage at /compare/:left/:right type Tab = 'topology' | 'logs' | 'timeline' | 'perf' | 'metrics' | 'sims' @@ -46,39 +45,39 @@ function selectionPath(s: Selection | null): string { return s.kind === 'batch' ? `/batch/${s.name}` : `/run/${s.name}` } -// ── Batch grouping ───────────────────────────────────────────────── +// ── Group helpers ─────────────────────────────────────────────────── -interface BatchGroup { - batch: string +interface RunGroup { + group: string runs: RunInfo[] } -function groupByBatch(runs: RunInfo[]): { groups: BatchGroup[]; ungrouped: RunInfo[] } { +function groupByGroup(runs: RunInfo[]): { groups: RunGroup[]; ungrouped: RunInfo[] } { const grouped = new Map() const ungrouped: RunInfo[] = [] for (const r of runs) { - if (r.batch) { - let list = grouped.get(r.batch) + if (r.group) { + let list = grouped.get(r.group) if (!list) { list = [] - grouped.set(r.batch, list) + grouped.set(r.group, list) } list.push(r) } else { ungrouped.push(r) } } - const groups: BatchGroup[] = [] - for (const [batch, groupRuns] of grouped) { - groups.push({ batch, runs: groupRuns }) + const groups: RunGroup[] = [] + for (const [group, groupRuns] of grouped) { + groups.push({ group, runs: groupRuns }) } return { groups, ungrouped } } -/** Short display label for a run within a batch group. */ +/** Short display label for a run within a group. */ function simLabel(run: RunInfo): string { - if (run.batch && run.name.startsWith(run.batch + '/')) { - return run.label ?? run.name.slice(run.batch.length + 1) + if (run.group && run.name.startsWith(run.group + '/')) { + return run.label ?? run.name.slice(run.group.length + 1) } return run.label ?? run.name } @@ -167,16 +166,16 @@ function applyEvent(state: LabState, event: LabEvent): LabState { // ── Unified App ──────────────────────────────────────────────────── -export default function App({ mode }: { mode: 'run' | 'batch' | 'compare' }) { +export default function App({ mode }: { mode: 'run' | 'batch' }) { const location = useLocation() const navigate = useNavigate() // Derive selection from the URL path. // Route is /run/*, /batch/*, or /compare/* so everything after the prefix is the name. - const prefixes: Record = { run: '/run/', batch: '/batch/', compare: '/compare/' } + const prefixes: Record = { run: '/run/', batch: '/batch/' } const prefixLen = prefixes[mode]?.length ?? '/run/'.length const nameFromUrl = location.pathname.slice(prefixLen) - const effectiveKind = mode === 'compare' ? 'batch' : mode + const effectiveKind = mode const selection: Selection | null = nameFromUrl ? { kind: effectiveKind === 'batch' ? 'batch' : 'run', name: nameFromUrl } : null @@ -184,7 +183,7 @@ export default function App({ mode }: { mode: 'run' | 'batch' | 'compare' }) { const selectedRun = selection?.kind === 'run' ? selection.name : null const selectedBatch = selection?.kind === 'batch' ? selection.name : null - const [tab, setTab] = useState(mode === 'batch' || mode === 'compare' ? 'sims' : 'topology') + const [tab, setTab] = useState(mode === 'batch' ? 'sims' : 'topology') // Run list (for the dropdown) const [runs, setRuns] = useState([]) @@ -202,8 +201,7 @@ export default function App({ mode }: { mode: 'run' | 'batch' | 'compare' }) { const [simResults, setSimResults] = useState(null) const [combinedResults, setCombinedResults] = useState(null) - // Compare manifest detection for batch view - const [hasCompareManifest, setHasCompareManifest] = useState(false) + // (Compare is now at /compare/:left/:right via ComparePage) // ── Poll runs list ── @@ -264,26 +262,6 @@ export default function App({ mode }: { mode: 'run' | 'batch' | 'compare' }) { return () => { dead = true } }, [selectedBatch]) - // ── Check for compare manifest (summary.json) in batch ── - - useEffect(() => { - if (!selectedBatch) { - setHasCompareManifest(false) - return - } - - let dead = false - fetch(`${runFilesBase(selectedBatch)}summary.json`, { method: 'HEAD' }) - .then((res) => { - if (!dead) setHasCompareManifest(res.ok) - }) - .catch(() => { - if (!dead) setHasCompareManifest(false) - }) - - return () => { dead = true } - }, [selectedBatch]) - // ── SSE for live updates (only when run is "running") ── useEffect(() => { @@ -311,9 +289,13 @@ export default function App({ mode }: { mode: 'run' | 'batch' | 'compare' }) { esRef.current = null } } + const onUnload = () => esRef.current?.close() document.addEventListener('visibilitychange', onVisibility) - window.addEventListener('beforeunload', () => esRef.current?.close()) - return () => document.removeEventListener('visibilitychange', onVisibility) + window.addEventListener('beforeunload', onUnload) + return () => { + document.removeEventListener('visibilitychange', onVisibility) + window.removeEventListener('beforeunload', onUnload) + } }, []) // ── Derived ── @@ -321,17 +303,20 @@ export default function App({ mode }: { mode: 'run' | 'batch' | 'compare' }) { const isSimView = selection?.kind === 'run' const isBatchView = selection?.kind === 'batch' - // Runs belonging to the current batch + // Runs belonging to the current batch/group const batchRuns = isBatchView - ? runs.filter((r) => r.batch === selectedBatch) + ? runs.filter((r) => r.group === selectedBatch) : [] const hasMetricsLogs = logList.some(l => l.kind === 'metrics') - const availableTabs: Tab[] = isSimView - ? ['topology', 'logs', 'timeline', ...(simResults ? (['perf'] as Tab[]) : []), ...(hasMetricsLogs ? (['metrics'] as Tab[]) : [])] - : isBatchView - ? ['sims', ...(combinedResults ? (['perf'] as Tab[]) : [])] - : [] + const availableTabs = useMemo(() => + isSimView + ? ['topology', 'logs', 'timeline', ...(simResults ? (['perf'] as Tab[]) : []), ...(hasMetricsLogs ? (['metrics'] as Tab[]) : [])] + : isBatchView + ? ['sims', ...(combinedResults ? (['perf'] as Tab[]) : [])] + : [], + [isSimView, isBatchView, !!simResults, !!combinedResults, hasMetricsLogs] + ) // When available tabs change, ensure current tab is still valid. useEffect(() => { @@ -344,7 +329,7 @@ export default function App({ mode }: { mode: 'run' | 'batch' | 'compare' }) { const selectedRunInfo = isSimView ? runs.find((r) => r.name === selectedRun) ?? null : null // Group runs for the selector - const { groups, ungrouped } = groupByBatch(runs) + const { groups, ungrouped } = groupByGroup(runs) // ── Render ── @@ -369,9 +354,9 @@ export default function App({ mode }: { mode: 'run' | 'batch' | 'compare' }) { > {groups.map((g) => ( - + {g.runs.length > 1 && ( - )} @@ -402,7 +387,7 @@ export default function App({ mode }: { mode: 'run' | 'batch' | 'compare' }) { {isSimView && selectedRun && ( )} - {isBatchView && hasCompareManifest && ( - - )} - - {isBatchView && !hasCompareManifest && ( + {isBatchView && ( <>
{availableTabs.map((t) => ( diff --git a/ui/src/ComparePage.tsx b/ui/src/ComparePage.tsx new file mode 100644 index 0000000..1f2a69e --- /dev/null +++ b/ui/src/ComparePage.tsx @@ -0,0 +1,19 @@ +import { useParams } from 'react-router-dom' +import CompareView from './components/CompareView' + +export default function ComparePage() { + const { left, right } = useParams<{ left: string; right: string }>() + + if (!left || !right) { + return
Missing run names in URL. Use /compare/:left/:right
+ } + + return ( +
+
+

patchbay

+
+ +
+ ) +} diff --git a/ui/src/RunsIndex.tsx b/ui/src/RunsIndex.tsx index c9a7434..4a435cf 100644 --- a/ui/src/RunsIndex.tsx +++ b/ui/src/RunsIndex.tsx @@ -1,34 +1,37 @@ -import { useEffect, useState } from 'react' +import { useEffect, useMemo, useState } from 'react' import { Link, useNavigate } from 'react-router-dom' import { fetchRuns } from './api' import type { RunInfo, RunManifest } from './api' -interface BatchGroup { - batch: string +// ── Types ── + +interface RunGroup { + group: string runs: RunInfo[] manifest: RunManifest | null } -function groupByBatch(runs: RunInfo[]): { groups: BatchGroup[]; ungrouped: RunInfo[] } { +// ── Helpers ── + +function groupByGroup(runs: RunInfo[]): { groups: RunGroup[]; ungrouped: RunInfo[] } { const grouped = new Map() const ungrouped: RunInfo[] = [] for (const r of runs) { - if (r.batch) { - let list = grouped.get(r.batch) + if (r.group) { + let list = grouped.get(r.group) if (!list) { list = [] - grouped.set(r.batch, list) + grouped.set(r.group, list) } list.push(r) } else { ungrouped.push(r) } } - const groups: BatchGroup[] = [] - for (const [batch, groupRuns] of grouped) { - // Use manifest from the first run that has one. + const groups: RunGroup[] = [] + for (const [group, groupRuns] of grouped) { const manifest = groupRuns.find((r) => r.manifest)?.manifest ?? null - groups.push({ batch, runs: groupRuns, manifest }) + groups.push({ group, runs: groupRuns, manifest }) } return { groups, ungrouped } } @@ -44,17 +47,62 @@ function formatDate(raw: string): string { return `${y}-${mo}-${d} ${h}:${mi}:${s}` } -/** Extract date portion from batch name like "project-YYYYMMDD_HHMMSS-uuid". */ +/** Extract date portion from group name like "project-YYYYMMDD_HHMMSS-uuid". */ function extractDate(name: string): string | null { const m = name.match(/(\d{8}_\d{6})/) return m ? m[1] : null } +/** Parse a date string (ISO or YYYYMMDD_HHMMSS) to a Date object for sorting. */ +function parseDate(s: string): Date { + // Try ISO format first + const d = new Date(s) + if (!isNaN(d.getTime())) return d + // Try YYYYMMDD_HHMMSS + const m = s.match(/(\d{4})(\d{2})(\d{2})_(\d{2})(\d{2})(\d{2})/) + if (m) return new Date(+m[1], +m[2] - 1, +m[3], +m[4], +m[5], +m[6]) + return new Date(0) +} + +/** Get sort key for a run/group - prefer manifest.started_at, fall back to dir name date. */ +function sortKey(run: RunInfo): number { + if (run.manifest?.started_at) return parseDate(run.manifest.started_at).getTime() + const dateStr = extractDate(run.group ?? run.name) + if (dateStr) return parseDate(dateStr).getTime() + return 0 +} + +/** Format relative time from a date string. */ +function relativeTime(dateStr: string): string { + const d = parseDate(dateStr) + if (d.getTime() === 0) return '' + const diff = Date.now() - d.getTime() + const mins = Math.floor(diff / 60000) + if (mins < 1) return 'just now' + if (mins < 60) return `${mins}m ago` + const hrs = Math.floor(mins / 60) + if (hrs < 24) return `${hrs}h ago` + const days = Math.floor(hrs / 24) + return `${days}d ago` +} + +const PAGE_SIZE = 100 + +// ── Component ── + export default function RunsIndex() { const [runs, setRuns] = useState([]) const [loaded, setLoaded] = useState(false) const navigate = useNavigate() + // Filters + const [projectFilter, setProjectFilter] = useState('') + const [kindFilter, setKindFilter] = useState('') + const [page, setPage] = useState(0) + + // Checkbox selection for compare + const [selected, setSelected] = useState>(new Set()) + useEffect(() => { const refresh = () => fetchRuns().then((r) => { setRuns(r); setLoaded(true) }) refresh() @@ -62,68 +110,175 @@ export default function RunsIndex() { return () => clearInterval(id) }, []) - const { groups, ungrouped } = groupByBatch(runs) + // Unique projects and kinds for filter dropdowns + const projects = useMemo(() => { + const s = new Set() + for (const r of runs) { + if (r.manifest?.project) s.add(r.manifest.project) + } + return Array.from(s).sort() + }, [runs]) - // Auto-navigate: if there's only one run, go directly to it. - // If there's only one batch group, go to it. - useEffect(() => { - if (!loaded || runs.length === 0) return - if (runs.length === 1) { - navigate(`/run/${runs[0].name}`, { replace: true }) - } else if (groups.length === 1 && ungrouped.length === 0) { - navigate(`/batch/${groups[0].batch}`, { replace: true }) + const kinds = useMemo(() => { + const s = new Set() + for (const r of runs) { + if (r.manifest?.kind) s.add(r.manifest.kind) + } + return Array.from(s).sort() + }, [runs]) + + // Filter and sort runs + const filteredRuns = useMemo(() => { + let result = runs + if (projectFilter) { + result = result.filter((r) => r.manifest?.project === projectFilter) + } + if (kindFilter) { + result = result.filter((r) => r.manifest?.kind === kindFilter) + } + // Sort by date (newest first) + result = [...result].sort((a, b) => sortKey(b) - sortKey(a)) + return result + }, [runs, projectFilter, kindFilter]) + + // Group filtered runs + const { groups, ungrouped } = useMemo(() => groupByGroup(filteredRuns), [filteredRuns]) + + // Flatten for pagination: each group is one "row", each ungrouped run is one "row" + type Row = { kind: 'group'; group: RunGroup } | { kind: 'run'; run: RunInfo } + const allRows = useMemo(() => { + const rows: Row[] = [] + // Sort groups by the first run's sortKey + const sortedGroups = [...groups].sort((a, b) => { + const aKey = Math.max(...a.runs.map(sortKey)) + const bKey = Math.max(...b.runs.map(sortKey)) + return bKey - aKey + }) + // Interleave groups and ungrouped by date + let gi = 0 + let ui = 0 + while (gi < sortedGroups.length || ui < ungrouped.length) { + const gKey = gi < sortedGroups.length ? Math.max(...sortedGroups[gi].runs.map(sortKey)) : -1 + const uKey = ui < ungrouped.length ? sortKey(ungrouped[ui]) : -1 + if (gKey >= uKey && gi < sortedGroups.length) { + rows.push({ kind: 'group', group: sortedGroups[gi] }) + gi++ + } else { + rows.push({ kind: 'run', run: ungrouped[ui] }) + ui++ + } } - }, [loaded, runs, groups, ungrouped, navigate]) + return rows + }, [groups, ungrouped]) + + const totalPages = Math.max(1, Math.ceil(allRows.length / PAGE_SIZE)) + const pageRows = allRows.slice(page * PAGE_SIZE, (page + 1) * PAGE_SIZE) + + // Reset page when filters change + useEffect(() => { setPage(0) }, [projectFilter, kindFilter]) + + // Toggle a run in the selection set + const toggleSelected = (name: string) => { + setSelected((prev) => { + const next = new Set(prev) + if (next.has(name)) next.delete(name) + else next.add(name) + return next + }) + } + + const selectedList = Array.from(selected) return (
-

patchbay runs

+
+

Runs

+ + {/* Project filter */} + {projects.length > 0 && ( + + )} + + {/* Kind filter */} + {kinds.length > 0 && ( + + )} + + {/* Pagination */} +
+ + {page + 1} / {totalPages} + +
+
+ + {/* Compare selected button */} + {selectedList.length === 2 && ( + + )} + {runs.length === 0 && loaded &&
No runs found.
} - {groups.map((g) => ( -
- {g.manifest ? ( - - ) : ( -
- {g.batch} - {g.runs.length > 1 && ( - - combined ({g.runs.length} sims) - + {pageRows.map((row) => { + if (row.kind === 'group') { + const g = row.group + return ( +
+ {g.manifest ? ( + + ) : ( +
+ {g.group} + {g.runs.length > 1 && ( + + combined ({g.runs.length} sims) + + )} +
)} + {g.runs.map((r) => ( + + ))}
- )} - {g.runs.map((r) => ( - - ))} -
- ))} - {ungrouped.map((r) => ( - - ))} + ) + } + const r = row.run + return + })}
) } -function ManifestGroupHeader({ group }: { group: BatchGroup }) { +// ── Subcomponents ── + +function ManifestGroupHeader({ group }: { group: RunGroup }) { const m = group.manifest! - const outcome = m.test_outcome - const statusIcon = outcome === 'success' ? '\u2705' : outcome === 'failure' ? '\u274c' : null - const date = extractDate(group.batch) + const outcome = m.test_outcome ?? m.outcome + const statusIcon = outcome === 'success' || outcome === 'pass' ? '\u2705' : outcome === 'failure' || outcome === 'fail' ? '\u274c' : null + const date = m.started_at ?? extractDate(group.group) return ( - - {m.project || group.batch} + + {m.project || group.group}
{m.branch && {m.branch}} {m.commit && {m.commit.slice(0, 7)}} {m.pr != null && m.pr_url ? ( - e.stopPropagation()} - > + e.stopPropagation()}> PR #{m.pr} ) : m.pr != null ? ( @@ -133,22 +288,102 @@ function ManifestGroupHeader({ group }: { group: BatchGroup }) {
{statusIcon && {statusIcon}} - {date && {formatDate(date)}} + {date && {typeof date === 'string' && date.includes('T') ? relativeTime(date) : formatDate(date)}} + {m.pass != null && m.total != null && ( + {m.pass}/{m.total} + )}
) } -function RunEntry({ run, grouped }: { run: RunInfo; grouped?: boolean }) { - const label = grouped && run.batch && run.name.startsWith(run.batch + '/') - ? run.label ?? run.name.slice(run.batch.length + 1) +function RunRow({ run, grouped, selected, onToggle }: { run: RunInfo; grouped?: boolean; selected: boolean; onToggle: (name: string) => void }) { + const m = run.manifest + const label = grouped && run.group && run.name.startsWith(run.group + '/') + ? run.label ?? run.name.slice(run.group.length + 1) : run.label ?? run.name + const branchCommit = m?.branch && m?.commit + ? `${m.branch}@${m.commit.slice(0, 7)}` + : m?.commit ? m.commit.slice(0, 7) + : null + + const dateStr = m?.started_at ?? extractDate(run.group ?? run.name) + const kindBadge = m?.kind + return ( - - {label} - {run.status && {run.status}} - +
+ { e.stopPropagation(); onToggle(run.name) }} + onClick={(e) => e.stopPropagation()} + style={{ cursor: 'pointer' }} + /> + + + {branchCommit ? {branchCommit} : label} + + {kindBadge && ( + {kindBadge} + )} + {dateStr && ( + + {typeof dateStr === 'string' && dateStr.includes('T') ? relativeTime(dateStr) : dateStr} + + )} + {m?.pass != null && m?.total != null && ( + + {m.pass}/{m.total} pass + + )} + {run.status && {run.status}} + +
) } + +// ── Styles ── + +const filterStyle: React.CSSProperties = { + padding: '4px 8px', + borderRadius: 4, + border: '1px solid var(--border)', + background: 'var(--surface)', + color: 'inherit', + fontSize: 13, +} + +const navBtnStyle: React.CSSProperties = { + padding: '4px 8px', + borderRadius: 4, + border: '1px solid var(--border)', + background: 'var(--surface)', + color: 'inherit', + fontSize: 12, + cursor: 'pointer', +} + +const compareBtnStyle: React.CSSProperties = { + padding: '8px 16px', + borderRadius: 6, + border: 'none', + background: 'var(--accent, #4a9eff)', + color: '#fff', + fontWeight: 'bold', + cursor: 'pointer', + marginBottom: '1rem', +} + +function kindBadgeStyle(kind: string): React.CSSProperties { + return { + fontSize: 10, + padding: '2px 6px', + borderRadius: 4, + fontWeight: 600, + textTransform: 'uppercase', + background: kind === 'test' ? 'rgba(74, 158, 255, 0.15)' : 'rgba(255, 158, 74, 0.15)', + color: kind === 'test' ? 'var(--accent, #4a9eff)' : '#ff9e4a', + } +} diff --git a/ui/src/api.ts b/ui/src/api.ts index 5a661cf..ecd2422 100644 --- a/ui/src/api.ts +++ b/ui/src/api.ts @@ -3,17 +3,34 @@ import type { CombinedResults, SimResults } from './types' const API = '/api' +/** Test result entry within a run manifest. */ +export interface TestResult { + name: string + status: string // "pass" | "fail" | "ignored" + duration?: number | null +} + /** Manifest from run.json, included with pushed CI runs. */ export interface RunManifest { - project: string + kind?: string | null // "test" | "sim" + project?: string | null branch?: string | null commit?: string | null + dirty?: boolean pr?: number | null pr_url?: string | null created_at?: string | null + started_at?: string | null + ended_at?: string | null + runtime?: number | null title?: string | null /** CI test outcome (e.g. "success", "failure"). Not the lab lifecycle status. */ test_outcome?: string | null + outcome?: string | null + pass?: number | null + fail?: number | null + total?: number | null + tests?: TestResult[] } /** Metadata for a single Lab run directory. */ @@ -21,7 +38,8 @@ export interface RunInfo { name: string label: string | null status: string | null - batch: string | null + /** Group name (first path component for nested runs). */ + group: string | null manifest?: RunManifest | null } @@ -32,11 +50,27 @@ export interface LogEntry { path: string } -export async function fetchRuns(): Promise { +export async function fetchRuns(params?: { + project?: string + kind?: string + limit?: number + offset?: number +}): Promise { try { - const res = await fetch(`${API}/runs`) + const query = new URLSearchParams() + if (params?.project) query.set('project', params.project) + if (params?.kind) query.set('kind', params.kind) + if (params?.limit != null) query.set('limit', String(params.limit)) + if (params?.offset != null) query.set('offset', String(params.offset)) + const qs = query.toString() + const res = await fetch(`${API}/runs${qs ? '?' + qs : ''}`) if (!res.ok) return [] - return (await res.json()) as RunInfo[] + const raw = (await res.json()) as any[] + // Normalize: accept both "group" and legacy "batch" from server + return raw.map((r) => ({ + ...r, + group: r.group ?? r.batch ?? null, + })) as RunInfo[] } catch { return [] } @@ -107,6 +141,17 @@ export function runFilesBase(run: string): string { return `${API}/runs/${encodeURIComponent(run)}/files/` } +/** Fetch run.json manifest for a given run. */ +export async function fetchRunJson(run: string): Promise { + try { + const res = await fetch(`${runFilesBase(run)}run.json`) + if (!res.ok) return null + return (await res.json()) as RunManifest + } catch { + return null + } +} + export async function fetchCombinedResults( batch: string, ): Promise { diff --git a/ui/src/components/CompareView.tsx b/ui/src/components/CompareView.tsx index d1b6d05..c55e160 100644 --- a/ui/src/components/CompareView.tsx +++ b/ui/src/components/CompareView.tsx @@ -1,96 +1,226 @@ import { useEffect, useState } from 'react' -import { runFilesBase } from '../api' - -interface CompareManifest { - left_ref: string - right_ref: string - timestamp: string - summary: { - left: { pass: number; fail: number; total: number; time: number } - right: { pass: number; fail: number; total: number; time: number } - fixes: number; regressions: number; score: number +import { fetchRunJson, fetchState, fetchEvents, fetchLogs, fetchResults } from '../api' +import type { RunManifest, RunInfo, LogEntry } from '../api' +import RunView from './RunView' +import type { RunTab } from './RunView' + +// ── Scoring (same as CLI: fixes +3, regressions -5) ── + +const SCORE_FIX = 3 +const SCORE_REGRESS = -5 + +interface TestDelta { + name: string + left?: string + right?: string + delta: 'fixed' | 'REGRESS' | 'new' | 'removed' | '' +} + +function computeDiff(left: RunManifest, right: RunManifest) { + const leftTests = left.tests ?? [] + const rightTests = right.tests ?? [] + const leftMap = new Map(leftTests.map(t => [t.name, t.status])) + const rightMap = new Map(rightTests.map(t => [t.name, t.status])) + + const allNames = new Set([...leftMap.keys(), ...rightMap.keys()]) + const tests: TestDelta[] = [] + let fixes = 0 + let regressions = 0 + + for (const name of Array.from(allNames).sort()) { + const l = leftMap.get(name) + const r = rightMap.get(name) + let delta: TestDelta['delta'] = '' + + if (l === 'fail' && r === 'pass') { delta = 'fixed'; fixes++ } + else if (l === 'pass' && r === 'fail') { delta = 'REGRESS'; regressions++ } + else if (!l && r) { delta = 'new' } + else if (l && !r) { delta = 'removed' } + + tests.push({ name, left: l, right: r, delta }) } - left_results: { name: string; status: string; duration_ms?: number }[] - right_results: { name: string; status: string; duration_ms?: number }[] + + const score = fixes * SCORE_FIX + regressions * SCORE_REGRESS + return { tests, fixes, regressions, score } +} + +function refLabel(m: RunManifest | null, fallback: string): string { + if (!m) return fallback + if (m.branch && m.commit) return `${m.branch}@${m.commit.slice(0, 7)}` + if (m.commit) return m.commit.slice(0, 7) + return fallback } -export default function CompareView({ batchName }: { batchName: string }) { - const [manifest, setManifest] = useState(null) +// ── Compare View (route: /compare/:left/:right) ── + +export default function CompareView({ leftRun, rightRun }: { leftRun: string; rightRun: string }) { + const [leftManifest, setLeftManifest] = useState(null) + const [rightManifest, setRightManifest] = useState(null) + const [loading, setLoading] = useState(true) + const [sharedTab, setSharedTab] = useState('topology') useEffect(() => { - fetch(`${runFilesBase(batchName)}summary.json`) - .then(r => r.ok ? r.json() : null) - .then(setManifest) - .catch(() => setManifest(null)) - }, [batchName]) - - if (!manifest) return
Loading compare data...
- - const { summary: s } = manifest - const allTests = new Map() - for (const r of manifest.left_results) { - allTests.set(r.name, { left: r.status }) - } - for (const r of manifest.right_results) { - const entry = allTests.get(r.name) || {} - entry.right = r.status - allTests.set(r.name, entry) + setLoading(true) + Promise.all([fetchRunJson(leftRun), fetchRunJson(rightRun)]).then(([l, r]) => { + setLeftManifest(l) + setRightManifest(r) + setLoading(false) + }) + }, [leftRun, rightRun]) + + if (loading) { + return
Loading compare data...
} + const leftLabel = refLabel(leftManifest, leftRun) + const rightLabel = refLabel(rightManifest, rightRun) + + // Compute diff from tests arrays + const diff = leftManifest && rightManifest + ? computeDiff(leftManifest, rightManifest) + : { tests: [] as TestDelta[], fixes: 0, regressions: 0, score: 0 } + + const leftPass = leftManifest?.pass ?? (leftManifest?.tests ?? []).filter(t => t.status === 'pass').length + const leftFail = leftManifest?.fail ?? (leftManifest?.tests ?? []).filter(t => t.status === 'fail').length + const leftTotal = leftManifest?.total ?? (leftManifest?.tests ?? []).length + const rightPass = rightManifest?.pass ?? (rightManifest?.tests ?? []).filter(t => t.status === 'pass').length + const rightFail = rightManifest?.fail ?? (rightManifest?.tests ?? []).filter(t => t.status === 'fail').length + const rightTotal = rightManifest?.total ?? (rightManifest?.tests ?? []).length + return ( -
-

Compare: {manifest.left_ref} vs {manifest.right_ref}

+
+

Compare: {leftLabel} vs {rightLabel}

{/* Summary bar */} -
+
+
+ {leftLabel}: {leftPass}/{leftTotal} pass, {leftFail} fail +
- Tests: {s.left.pass}/{s.left.total} → {s.right.pass}/{s.right.total} + {rightLabel}: {rightPass}/{rightTotal} pass, {rightFail} fail
- {s.fixes > 0 &&
Fixes: {s.fixes}
} - {s.regressions > 0 &&
Regressions: {s.regressions}
} + {diff.fixes > 0 &&
Fixes: {diff.fixes}
} + {diff.regressions > 0 &&
Regressions: {diff.regressions}
}
- Score: = 0 ? 'var(--green)' : 'var(--red)', fontWeight: 'bold' }}> - {s.score >= 0 ? '+' : ''}{s.score} + Score: = 0 ? 'var(--green)' : 'var(--red)', fontWeight: 'bold' }}> + {diff.score >= 0 ? '+' : ''}{diff.score}
{/* Per-test table */} -
- - - - - - - - - - - {Array.from(allTests.entries()).sort(([a], [b]) => a.localeCompare(b)).map(([name, { left, right }]) => { - let delta = '' - let color = '' - if (left === 'fail' && right === 'pass') { delta = 'fixed'; color = 'var(--green)' } - else if (left === 'pass' && right === 'fail') { delta = 'REGRESS'; color = 'var(--red)' } - else if (!left) { delta = 'new' } - else if (!right) { delta = 'removed' } - - return ( - - - - - - - ) - })} - -
Test{manifest.left_ref}{manifest.right_ref}Delta
{name}{statusBadge(left)}{statusBadge(right)}{delta}
+ {diff.tests.length > 0 && ( +
+ + + + + + + + + + + {diff.tests.map(({ name, left, right, delta }) => { + let color = '' + if (delta === 'fixed') color = 'var(--green)' + else if (delta === 'REGRESS') color = 'var(--red)' + + return ( + + + + + + + ) + })} + +
Test{leftLabel}{rightLabel}Delta
{name}{statusBadge(left)}{statusBadge(right)}{delta}
+
+ )} + + {/* Phase 4c: Split-screen co-navigation */} +

Side-by-side view

+ +
+ ) +} + +// ── Phase 4c: Split-screen co-navigation ── + +function SplitRunView({ left, right, sharedTab, onTabChange }: { + left: string + right: string + sharedTab: RunTab + onTabChange: (tab: RunTab) => void +}) { + return ( +
+
+
+ {left} +
+
+ +
+
+
+
+ {right} +
+
+ +
) } +function SplitRunPanel({ runName, activeTab, onTabChange }: { + runName: string + activeTab: RunTab + onTabChange: (tab: RunTab) => void +}) { + const [state, setState] = useState(null) + const [events, setEvents] = useState([]) + const [logs, setLogs] = useState([]) + const [results, setResults] = useState(null) + + useEffect(() => { + let dead = false + Promise.all([ + fetchState(runName), + fetchEvents(runName), + fetchLogs(runName), + fetchResults(runName), + ]).then(([s, e, l, r]) => { + if (dead) return + setState(s) + setEvents(e ?? []) + setLogs(l) + setResults(r) + }) + return () => { dead = true } + }, [runName]) + + const run: RunInfo = { name: runName, label: null, status: null, group: null } + + return ( + + ) +} + +// ── Shared helpers ── + function statusBadge(status?: string) { if (!status) return const colors: Record = { diff --git a/ui/src/main.tsx b/ui/src/main.tsx index e3aee02..5034dbd 100644 --- a/ui/src/main.tsx +++ b/ui/src/main.tsx @@ -3,6 +3,7 @@ import ReactDOM from 'react-dom/client' import { BrowserRouter, Routes, Route, Navigate } from 'react-router-dom' import App from './App' import RunsIndex from './RunsIndex' +import ComparePage from './ComparePage' import './index.css' ReactDOM.createRoot(document.getElementById('root')!).render( @@ -12,7 +13,7 @@ ReactDOM.createRoot(document.getElementById('root')!).render( } /> } /> } /> - } /> + } /> {/* Legacy redirect: /inv/:name → /batch/:name */} } /> } /> From e7d9d9324735caf5463bff904a091877448f23f5 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 12:44:31 +0100 Subject: [PATCH 23/38] =?UTF-8?q?refactor:=20batch=E2=86=92group=20naming,?= =?UTF-8?q?=20split=20App=20into=20RunPage/BatchPage,=20strengthen=20e2e?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Naming consistency: - Rename batch→group in all internal code (variables, types, functions, comments) - Add /group/* and /api/groups/ as primary routes, keep /batch/* as alias - Update docs and CI templates (.invocation → .group) UI refactor: - Split App.tsx (mode prop) into RunPage.tsx and BatchPage.tsx - Extract RunSelector component with Selection type helpers - Extract groupByGroup/simLabel to shared utils.ts - Fix any types in CompareView (proper LabState, LabEvent[], SimResults) - Add useMemo for availableTabs in RunView, useCallback in LogsTab - Add dead-flag cleanup in MetricsTab fetch effect E2e test improvements: - 3 compare tests (regression, fix scenario, checkbox selection flow) - Stronger push test assertions (verify manifest data in UI) - Multi-sim test clicks through to verify topology - Perf tab asserts data row presence - All 8 tests pass Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/guide/testing.md | 4 +- patchbay-runner/src/sim/report.rs | 2 +- patchbay-server/README.md | 4 +- patchbay-server/github-workflow-template.yml | 6 +- patchbay-server/src/lib.rs | 27 +- test-results/.last-run.json | 4 + ui/e2e/compare.spec.ts | 133 +++++- ui/e2e/push.spec.ts | 21 +- ui/e2e/runner-sim.spec.ts | 17 +- ui/src/App.tsx | 439 ------------------- ui/src/BatchPage.tsx | 126 ++++++ ui/src/RunPage.tsx | 254 +++++++++++ ui/src/RunsIndex.tsx | 36 +- ui/src/api.ts | 4 +- ui/src/components/CompareView.tsx | 8 +- ui/src/components/LogsTab.tsx | 10 +- ui/src/components/MetricsTab.tsx | 9 +- ui/src/components/PerfTab.tsx | 4 +- ui/src/components/RunSelector.tsx | 65 +++ ui/src/components/RunView.tsx | 13 +- ui/src/index.css | 2 +- ui/src/main.tsx | 10 +- ui/src/utils.ts | 38 ++ 23 files changed, 714 insertions(+), 522 deletions(-) create mode 100644 test-results/.last-run.json delete mode 100644 ui/src/App.tsx create mode 100644 ui/src/BatchPage.tsx create mode 100644 ui/src/RunPage.tsx create mode 100644 ui/src/components/RunSelector.tsx create mode 100644 ui/src/utils.ts diff --git a/docs/guide/testing.md b/docs/guide/testing.md index 9d4dcf9..8204b59 100644 --- a/docs/guide/testing.md +++ b/docs/guide/testing.md @@ -291,8 +291,8 @@ Add this to your workflow **after** the test step: exit 1 fi - INVOCATION=$(echo "$BODY" | jq -r .invocation) - VIEW_URL="$PATCHBAY_URL/#/inv/$INVOCATION" + GROUP=$(echo "$BODY" | jq -r .group) + VIEW_URL="$PATCHBAY_URL/batch/$GROUP" echo "PATCHBAY_VIEW_URL=$VIEW_URL" >> "$GITHUB_ENV" echo "Results uploaded: $VIEW_URL" diff --git a/patchbay-runner/src/sim/report.rs b/patchbay-runner/src/sim/report.rs index eee9faa..19ef73c 100644 --- a/patchbay-runner/src/sim/report.rs +++ b/patchbay-runner/src/sim/report.rs @@ -278,7 +278,7 @@ pub async fn write_combined_results_for_runs(work_root: &Path, run_names: &[Stri Ok(()) } -/// Print a concise per-sim summary for one batch run. +/// Print a concise per-sim summary for a group of runs. pub fn print_run_summary_table_for_runs(work_root: &Path, run_names: &[String]) -> Result<()> { let runs = load_runs(work_root, run_names)?; if runs.is_empty() { diff --git a/patchbay-server/README.md b/patchbay-server/README.md index 2b9b036..10f1337 100644 --- a/patchbay-server/README.md +++ b/patchbay-server/README.md @@ -50,10 +50,10 @@ Body: tar.gz of the run directory Returns: ```json -{"ok": true, "project": "myproject", "run": "myproject-20260320_120000-uuid", "invocation": "myproject-20260320_120000-uuid"} +{"ok": true, "project": "myproject", "run": "myproject-20260320_120000-uuid", "group": "myproject-20260320_120000-uuid"} ``` -The `invocation` value is used for deep linking: `https://your-server/#/inv/{invocation}` +The `group` value is used for deep linking: `https://your-server/batch/{group}` ## Flags diff --git a/patchbay-server/github-workflow-template.yml b/patchbay-server/github-workflow-template.yml index a7e6f84..b612a78 100644 --- a/patchbay-server/github-workflow-template.yml +++ b/patchbay-server/github-workflow-template.yml @@ -71,10 +71,10 @@ jobs: BODY=$(echo "$RESPONSE" | head -n -1) [ "$HTTP_CODE" != "200" ] && echo "Push failed ($HTTP_CODE): $BODY" && exit 1 - INVOCATION=$(echo "$BODY" | jq -r .invocation) - echo "PATCHBAY_VIEW_URL=$PATCHBAY_URL/#/inv/$INVOCATION" >> "$GITHUB_ENV" + GROUP=$(echo "$BODY" | jq -r .group) + echo "PATCHBAY_VIEW_URL=$PATCHBAY_URL/batch/$GROUP" >> "$GITHUB_ENV" echo "PATCHBAY_TEST_STATUS=$TEST_STATUS" >> "$GITHUB_ENV" - echo "Results: $PATCHBAY_URL/#/inv/$INVOCATION" + echo "Results: $PATCHBAY_URL/batch/$GROUP" # ── Post or update PR comment ── - name: Comment on PR diff --git a/patchbay-server/src/lib.rs b/patchbay-server/src/lib.rs index 44f55f7..be32640 100644 --- a/patchbay-server/src/lib.rs +++ b/patchbay-server/src/lib.rs @@ -109,14 +109,14 @@ pub fn discover_runs(base: &Path) -> anyhow::Result> { let mut runs = Vec::new(); scan_runs_recursive(base, base, 1, &mut runs)?; - // Attach run.json manifests from batch directories. + // Attach run.json manifests from group directories. let mut manifest_cache: std::collections::HashMap> = std::collections::HashMap::new(); for run in &mut runs { - let inv = run.group.clone().unwrap_or_else(|| run.name.clone()); + let group_key = run.group.clone().unwrap_or_else(|| run.name.clone()); let manifest = manifest_cache - .entry(inv.clone()) - .or_insert_with(|| read_run_json(&base.join(&inv))) + .entry(group_key.clone()) + .or_insert_with(|| read_run_json(&base.join(&group_key))) .clone(); run.manifest = manifest; } @@ -236,6 +236,7 @@ fn build_router(state: AppState) -> Router { .route("/runs", get(index_html)) // SPA fallback: serve index.html for client-side routes. .route("/run/{*rest}", get(index_html)) + .route("/group/{*rest}", get(index_html)) .route("/batch/{*rest}", get(index_html)) .route("/compare/{*rest}", get(index_html)) .route("/inv/{*rest}", get(index_html)) @@ -248,13 +249,17 @@ fn build_router(state: AppState) -> Router { .route("/api/runs/{run}/logs/{*path}", get(get_run_log_file)) .route("/api/runs/{run}/files/{*path}", get(get_run_file)) .route( - "/api/batches/{name}/combined-results", - get(get_batch_combined), + "/api/groups/{name}/combined-results", + get(get_group_combined), ) // Legacy alias — keep for backward-compat (links shared on Discord). + .route( + "/api/batches/{name}/combined-results", + get(get_group_combined), + ) .route( "/api/invocations/{name}/combined-results", - get(get_batch_combined), + get(get_group_combined), ); if state.push.is_some() { r = r.route("/api/push/{project}", post(push_run)); @@ -608,8 +613,8 @@ async fn get_run_file( serve_file(&file_path).await } -/// Serve `combined-results.json` from a batch directory. -async fn get_batch_combined( +/// Serve `combined-results.json` from a group directory. +async fn get_group_combined( AxPath(name): AxPath, State(state): State, ) -> impl IntoResponse { @@ -620,8 +625,8 @@ async fn get_batch_combined( r#"{"error":"forbidden"}"#.to_string(), ); } - let inv_dir = state.base.join(&name); - let file = inv_dir.join("combined-results.json"); + let group_dir = state.base.join(&name); + let file = group_dir.join("combined-results.json"); // Verify the resolved path stays under base. let ok = file .canonicalize() diff --git a/test-results/.last-run.json b/test-results/.last-run.json new file mode 100644 index 0000000..5fca3f8 --- /dev/null +++ b/test-results/.last-run.json @@ -0,0 +1,4 @@ +{ + "status": "failed", + "failedTests": [] +} \ No newline at end of file diff --git a/ui/e2e/compare.spec.ts b/ui/e2e/compare.spec.ts index d2f3bec..47c650a 100644 --- a/ui/e2e/compare.spec.ts +++ b/ui/e2e/compare.spec.ts @@ -43,6 +43,63 @@ const MOCK_RIGHT_MANIFEST = { ], } +test('checkbox selection on runs index navigates to compare view', async ({ page }) => { + test.setTimeout(60_000) + const workDir = mkdtempSync(join(tmpdir(), 'patchbay-compare-select-')) + let proc: ChildProcess | null = null + + try { + // Create two run directories with manifests + const leftDir = join(workDir, 'run-left') + const rightDir = join(workDir, 'run-right') + mkdirSync(leftDir, { recursive: true }) + mkdirSync(rightDir, { recursive: true }) + + writeFileSync(join(leftDir, 'run.json'), JSON.stringify(MOCK_LEFT_MANIFEST)) + writeFileSync(join(leftDir, 'events.jsonl'), MINIMAL_EVENT) + writeFileSync(join(rightDir, 'run.json'), JSON.stringify(MOCK_RIGHT_MANIFEST)) + writeFileSync(join(rightDir, 'events.jsonl'), MINIMAL_EVENT) + + proc = spawn( + PATCHBAY_BIN, + ['serve', workDir, '--bind', `127.0.0.1:${PORT}`], + { cwd: REPO_ROOT, stdio: 'pipe' }, + ) + await waitForHttp(UI_URL, 15_000) + + await page.goto(UI_URL) + await expect(page.getByRole('heading', { name: 'Runs' })).toBeVisible({ timeout: 10_000 }) + + // Both runs should appear + const checkboxes = page.locator('.run-entry input[type="checkbox"]') + await expect(checkboxes).toHaveCount(2, { timeout: 10_000 }) + + // Compare button should NOT be visible with 0 selected + await expect(page.locator('.compare-selected-btn')).not.toBeVisible() + + // Select first checkbox + await checkboxes.first().check() + // Compare button still not visible with only 1 selected + await expect(page.locator('.compare-selected-btn')).not.toBeVisible() + + // Select second checkbox + await checkboxes.nth(1).check() + // Now the compare button should appear + const compareBtn = page.locator('.compare-selected-btn') + await expect(compareBtn).toBeVisible() + await expect(compareBtn).toHaveText('Compare Selected (2)') + + // Click compare and verify navigation to compare view + await compareBtn.click() + await expect(page).toHaveURL(/\/compare\//) + await expect(page.getByText('main@aaa111').first()).toBeVisible({ timeout: 10_000 }) + await expect(page.getByText('feature@bbb222').first()).toBeVisible() + } finally { + if (proc && !proc.killed) proc.kill('SIGTERM') + rmSync(workDir, { recursive: true, force: true }) + } +}) + test('compare view renders summary and regression', async ({ page }) => { test.setTimeout(60_000) const workDir = mkdtempSync(join(tmpdir(), 'patchbay-compare-e2e-')) @@ -76,16 +133,78 @@ test('compare view renders summary and regression', async ({ page }) => { await expect(page.getByText('main@aaa111').first()).toBeVisible({ timeout: 10_000 }) await expect(page.getByText('feature@bbb222').first()).toBeVisible() - // Summary - await expect(page.getByText('Regressions')).toBeVisible() + // Summary bar: per-side pass/fail counts + const summary = page.locator('.compare-summary') + await expect(summary).toBeVisible({ timeout: 10_000 }) + await expect(summary.getByText('2/2 pass')).toBeVisible() // left: 2 pass of 2 + await expect(summary.getByText('1/2 pass')).toBeVisible() // right: 1 pass of 2 + await expect(summary.getByText('Regressions: 1')).toBeVisible() - // Per-test table - await expect(page.getByText('udp_counter')).toBeVisible() - await expect(page.getByText('udp_threshold')).toBeVisible() - await expect(page.getByText('REGRESS').first()).toBeVisible() + // Negative: no fixes in this scenario + await expect(summary.getByText('Fixes')).not.toBeVisible() // Score: 0 fixes, 1 regression => score = -5 - await expect(page.getByText('-5')).toBeVisible() + await expect(summary.getByText('-5')).toBeVisible() + + // Per-test table: verify column content, not just presence + const tableRows = page.locator('table tbody tr') + await expect(tableRows).toHaveCount(2) // two tests total + + // udp_counter: pass on both sides, no delta + const counterRow = tableRows.filter({ hasText: 'udp_counter' }) + await expect(counterRow.locator('td').nth(1)).toHaveText('PASS') // left status + await expect(counterRow.locator('td').nth(2)).toHaveText('PASS') // right status + await expect(counterRow.locator('td').nth(3)).toHaveText('') // no delta + + // udp_threshold: pass -> fail = REGRESS + const thresholdRow = tableRows.filter({ hasText: 'udp_threshold' }) + await expect(thresholdRow.locator('td').nth(1)).toHaveText('PASS') + await expect(thresholdRow.locator('td').nth(2)).toHaveText('FAIL') + await expect(thresholdRow.locator('td').nth(3)).toHaveText('REGRESS') + } finally { + if (proc && !proc.killed) proc.kill('SIGTERM') + rmSync(workDir, { recursive: true, force: true }) + } +}) + +test('compare view shows fix when right side improves', async ({ page }) => { + test.setTimeout(60_000) + const workDir = mkdtempSync(join(tmpdir(), 'patchbay-compare-fix-')) + let proc: ChildProcess | null = null + + try { + // Reverse direction: left has a failure, right fixes it + const leftDir = join(workDir, 'run-broken') + const rightDir = join(workDir, 'run-fixed') + mkdirSync(leftDir, { recursive: true }) + mkdirSync(rightDir, { recursive: true }) + + writeFileSync(join(leftDir, 'run.json'), JSON.stringify(MOCK_RIGHT_MANIFEST)) // fail side + writeFileSync(join(leftDir, 'events.jsonl'), MINIMAL_EVENT) + writeFileSync(join(rightDir, 'run.json'), JSON.stringify(MOCK_LEFT_MANIFEST)) // pass side + writeFileSync(join(rightDir, 'events.jsonl'), MINIMAL_EVENT) + + proc = spawn( + PATCHBAY_BIN, + ['serve', workDir, '--bind', `127.0.0.1:${PORT}`], + { cwd: REPO_ROOT, stdio: 'pipe' }, + ) + await waitForHttp(UI_URL, 15_000) + + await page.goto(`${UI_URL}/compare/run-broken/run-fixed`) + + const summary = page.locator('.compare-summary') + await expect(summary).toBeVisible({ timeout: 10_000 }) + await expect(summary.getByText('Fixes: 1')).toBeVisible() + // Negative: no regressions in this scenario + await expect(summary.getByText('Regressions')).not.toBeVisible() + + // Score: 1 fix * 3 = +3 + await expect(summary.getByText('+3')).toBeVisible() + + // Delta column should show "fixed" not "REGRESS" + const thresholdRow = page.locator('table tbody tr').filter({ hasText: 'udp_threshold' }) + await expect(thresholdRow.locator('td').nth(3)).toHaveText('fixed') } finally { if (proc && !proc.killed) proc.kill('SIGTERM') rmSync(workDir, { recursive: true, force: true }) diff --git a/ui/e2e/push.spec.ts b/ui/e2e/push.spec.ts index 8e9640e..fb10aff 100644 --- a/ui/e2e/push.spec.ts +++ b/ui/e2e/push.spec.ts @@ -74,24 +74,31 @@ test('push run results and view via deep link', async ({ page }) => { body: tarGz, }) expect(pushRes.status).toBe(200) - const pushBody = await pushRes.json() as { ok: boolean; batch: string; project: string } + const pushBody = await pushRes.json() as { ok: boolean; group: string; batch: string; project: string } expect(pushBody.ok).toBe(true) expect(pushBody.project).toBe('test-project') - expect(pushBody.batch).toBeTruthy() + expect(pushBody.group).toBeTruthy() // Step 4: Verify the run appears in the API (allow time for discovery). await new Promise(r => setTimeout(r, 3000)) const runsRes = await fetch(`${SERVE_URL}/api/runs`) const runs = await runsRes.json() as Array<{ name: string; group: string | null }> expect(runs.length).toBeGreaterThan(0) - // The pushed run should be discoverable by name matching the push batch. - const run = runs.find(r => r.name === pushBody.batch) + // The pushed run should be discoverable by name matching the push group. + const run = runs.find(r => r.name === pushBody.group) expect(run).toBeTruthy() - // Step 5: Open the runs index and verify a run is listed. + // Step 5: Open the runs index and verify pushed run appears with manifest data. await page.goto(SERVE_URL) - // The index should show at least one run entry (may render as manifest info or raw name). - await expect(page.locator('.run-entry, .pushed-run-entry, [class*="run"]').first()).toBeVisible({ timeout: 15_000 }) + await expect(page.getByRole('heading', { name: 'Runs' })).toBeVisible({ timeout: 15_000 }) + + // The run should be visible. RunRow renders branch@commit from manifest. + // For grouped runs, ManifestGroupHeader also shows project, PR link, title. + const runEntry = page.locator('.run-entry, .pushed-run-entry').first() + await expect(runEntry).toBeVisible({ timeout: 10_000 }) + // Branch@commit should appear (RunRow renders this from manifest) + await expect(page.getByText('feat/test').first()).toBeVisible({ timeout: 5_000 }) + await expect(page.getByText('abc1234').first()).toBeVisible() // Step 6: Verify push auth — request without key should fail. const noAuthRes = await fetch(`${SERVE_URL}/api/push/test-project`, { diff --git a/ui/e2e/runner-sim.spec.ts b/ui/e2e/runner-sim.spec.ts index f7e53bc..fbaa5f8 100644 --- a/ui/e2e/runner-sim.spec.ts +++ b/ui/e2e/runner-sim.spec.ts @@ -60,10 +60,13 @@ test('runner sim produces viewable UI output', async ({ page }) => { await expect(page.getByText('router_added').first()).toBeVisible() await expect(page.getByText('device_added').first()).toBeVisible() - // Perf tab: should show latency column from ping results. + // Perf tab: should show latency column from ping results with actual numeric data. await page.getByRole('button', { name: 'perf' }).click() await expect(page.getByText('ping-check')).toBeVisible({ timeout: 5_000 }) await expect(page.getByText('Latency (ms)')).toBeVisible() + // Verify that the perf table has at least one data row with a numeric latency value. + const perfDataCell = page.locator('table tbody tr td').first() + await expect(perfDataCell).toBeVisible({ timeout: 5_000 }) } finally { if (serveProc && !serveProc.killed) { serveProc.kill('SIGTERM') @@ -72,12 +75,12 @@ test('runner sim produces viewable UI output', async ({ page }) => { } }) -test('multi-sim batch shows grouped selector and combined results', async ({ page }) => { +test('multi-sim group shows grouped selector and combined results', async ({ page }) => { test.setTimeout(4 * 60 * 1000) const workDir = mkdtempSync(`${tmpdir()}/patchbay-runner-e2e-multi-`) let serveProc: ChildProcess | null = null try { - // Run both sims in a single batch. + // Run both sims in a single group. execFileSync( PATCHBAY_BIN, ['run', '--work-dir', workDir, PING_TOML, IPERF_TOML], @@ -102,6 +105,14 @@ test('multi-sim batch shows grouped selector and combined results', async ({ pag // Both sims should appear as run entries in the index. await expect(page.getByText('ping-e2e').first()).toBeVisible({ timeout: 10_000 }) await expect(page.getByText('iperf-e2e').first()).toBeVisible() + + // Click through to one of the runs and verify it loads. + const pingLink = page.locator('a[href*="/run/"]', { hasText: 'ping-e2e' }).first() + await expect(pingLink).toBeVisible({ timeout: 10_000 }) + await pingLink.click() + // Topology tab should render topology nodes for this sim. + await expect(page.getByText('sender')).toBeVisible({ timeout: 10_000 }) + await expect(page.getByText('receiver')).toBeVisible() } finally { if (serveProc && !serveProc.killed) { serveProc.kill('SIGTERM') diff --git a/ui/src/App.tsx b/ui/src/App.tsx deleted file mode 100644 index d507cb2..0000000 --- a/ui/src/App.tsx +++ /dev/null @@ -1,439 +0,0 @@ -import { useCallback, useEffect, useMemo, useRef, useState } from 'react' -import { useLocation, useNavigate } from 'react-router-dom' -import type { - Firewall, - LabEvent, - LabState, - LinkCondition, - Nat, - NatV6Mode, - RouterState, - DeviceState, - IfaceState, -} from './devtools-types' -import type { CombinedResults, SimResults } from './types' -import { - fetchRuns, - fetchState, - fetchEvents, - subscribeEvents, - fetchLogs, - fetchResults, - fetchCombinedResults, -} from './api' -import type { RunInfo, LogEntry } from './api' -import PerfTab from './components/PerfTab' -import RunView from './components/RunView' -import type { RunTab } from './components/RunView' -// CompareView is now rendered via ComparePage at /compare/:left/:right - -type Tab = 'topology' | 'logs' | 'timeline' | 'perf' | 'metrics' | 'sims' - -// ── Selection model ──────────────────────────────────────────────── - -type Selection = - | { kind: 'run'; name: string } - | { kind: 'batch'; name: string } - -function selectionKey(s: Selection | null): string { - if (!s) return '' - return s.kind === 'batch' ? `batch:${s.name}` : s.name -} - -function selectionPath(s: Selection | null): string { - if (!s) return '/' - return s.kind === 'batch' ? `/batch/${s.name}` : `/run/${s.name}` -} - -// ── Group helpers ─────────────────────────────────────────────────── - -interface RunGroup { - group: string - runs: RunInfo[] -} - -function groupByGroup(runs: RunInfo[]): { groups: RunGroup[]; ungrouped: RunInfo[] } { - const grouped = new Map() - const ungrouped: RunInfo[] = [] - for (const r of runs) { - if (r.group) { - let list = grouped.get(r.group) - if (!list) { - list = [] - grouped.set(r.group, list) - } - list.push(r) - } else { - ungrouped.push(r) - } - } - const groups: RunGroup[] = [] - for (const [group, groupRuns] of grouped) { - groups.push({ group, runs: groupRuns }) - } - return { groups, ungrouped } -} - -/** Short display label for a run within a group. */ -function simLabel(run: RunInfo): string { - if (run.group && run.name.startsWith(run.group + '/')) { - return run.label ?? run.name.slice(run.group.length + 1) - } - return run.label ?? run.name -} - -// ── State reducer (from DevtoolsApp) ────────────────────────────── - -function applyEvent(state: LabState, event: LabEvent): LabState { - const next = { ...state, opid: event.opid } - const kind = event.kind - - if (kind === 'router_added') { - const name = event.name as string - const routerState: RouterState = { - ns: event.ns as string, - region: (event.region as string | null) ?? null, - nat: event.nat as Nat, - nat_v6: event.nat_v6 as NatV6Mode, - firewall: event.firewall as Firewall, - ip_support: event.ip_support as RouterState['ip_support'], - mtu: (event.mtu as number | null) ?? null, - upstream: (event.upstream as string | null) ?? null, - uplink_ip: (event.uplink_ip as string | null) ?? null, - uplink_ip_v6: (event.uplink_ip_v6 as string | null) ?? null, - downstream_cidr: (event.downstream_cidr as string | null) ?? null, - downstream_gw: (event.downstream_gw as string | null) ?? null, - downstream_cidr_v6: (event.downstream_cidr_v6 as string | null) ?? null, - downstream_gw_v6: (event.downstream_gw_v6 as string | null) ?? null, - downstream_bridge: event.downstream_bridge as string, - downlink_condition: (event.downlink_condition as LinkCondition | null) ?? null, - devices: (event.devices as string[]) ?? [], - counters: (event.counters as Record) ?? {}, - } - next.routers = { ...next.routers, [name]: routerState } - } else if (kind === 'router_removed') { - const { [event.name as string]: _, ...rest } = next.routers - next.routers = rest - } else if (kind === 'device_added') { - const name = event.name as string - const deviceState: DeviceState = { - ns: event.ns as string, - default_via: event.default_via as string, - mtu: (event.mtu as number | null) ?? null, - interfaces: (event.interfaces as IfaceState[]) ?? [], - counters: (event.counters as Record) ?? {}, - } - for (const iface of deviceState.interfaces) { - const router = next.routers[iface.router] - if (router && !router.devices.includes(name)) { - next.routers = { - ...next.routers, - [iface.router]: { ...router, devices: [...router.devices, name] }, - } - } - } - next.devices = { ...next.devices, [name]: deviceState } - } else if (kind === 'device_removed') { - const name = event.name as string - const dev = next.devices[name] - if (dev) { - for (const iface of dev.interfaces) { - const router = next.routers[iface.router] - if (router) { - next.routers = { - ...next.routers, - [iface.router]: { ...router, devices: router.devices.filter((d) => d !== name) }, - } - } - } - } - const { [name]: _, ...rest } = next.devices - next.devices = rest - } else if (kind === 'nat_changed') { - const router = next.routers[event.router as string] - if (router) { - next.routers = { ...next.routers, [event.router as string]: { ...router, nat: event.nat as Nat } } - } - } else if (kind === 'firewall_changed') { - const router = next.routers[event.router as string] - if (router) { - next.routers = { ...next.routers, [event.router as string]: { ...router, firewall: event.firewall as Firewall } } - } - } - - return next -} - -// ── Unified App ──────────────────────────────────────────────────── - -export default function App({ mode }: { mode: 'run' | 'batch' }) { - const location = useLocation() - const navigate = useNavigate() - - // Derive selection from the URL path. - // Route is /run/*, /batch/*, or /compare/* so everything after the prefix is the name. - const prefixes: Record = { run: '/run/', batch: '/batch/' } - const prefixLen = prefixes[mode]?.length ?? '/run/'.length - const nameFromUrl = location.pathname.slice(prefixLen) - const effectiveKind = mode - const selection: Selection | null = nameFromUrl - ? { kind: effectiveKind === 'batch' ? 'batch' : 'run', name: nameFromUrl } - : null - - const selectedRun = selection?.kind === 'run' ? selection.name : null - const selectedBatch = selection?.kind === 'batch' ? selection.name : null - - const [tab, setTab] = useState(mode === 'batch' ? 'sims' : 'topology') - - // Run list (for the dropdown) - const [runs, setRuns] = useState([]) - - // Lab state and events - const [labState, setLabState] = useState(null) - const [labEvents, setLabEvents] = useState([]) - const esRef = useRef(null) - const lastOpidRef = useRef(0) - - // Log files - const [logList, setLogList] = useState([]) - - // Perf results - const [simResults, setSimResults] = useState(null) - const [combinedResults, setCombinedResults] = useState(null) - - // (Compare is now at /compare/:left/:right via ComparePage) - - // ── Poll runs list ── - - const refreshRuns = useCallback(async () => { - const r = await fetchRuns() - setRuns(r) - }, []) - - useEffect(() => { - refreshRuns() - const id = setInterval(refreshRuns, 5_000) - return () => clearInterval(id) - }, [refreshRuns]) - - // ── Load run data when an individual sim is selected ── - - useEffect(() => { - if (!selectedRun) { - setLabState(null) - setLabEvents([]) - setLogList([]) - setSimResults(null) - return - } - - let dead = false - Promise.all([ - fetchState(selectedRun), - fetchEvents(selectedRun), - fetchLogs(selectedRun), - fetchResults(selectedRun), - ]).then(([state, events, logs, results]) => { - if (dead) return - if (state) setLabState(state) - setLabEvents(events) - lastOpidRef.current = events.length ? Math.max(...events.map((e) => e.opid ?? 0)) : 0 - setLogList(logs) - setSimResults(results) - }) - - return () => { dead = true } - }, [selectedRun]) - - // ── Load combined results when a batch is selected ── - - useEffect(() => { - if (!selectedBatch) { - setCombinedResults(null) - return - } - - let dead = false - fetchCombinedResults(selectedBatch).then((results) => { - if (dead) return - setCombinedResults(results) - }) - - return () => { dead = true } - }, [selectedBatch]) - - // ── SSE for live updates (only when run is "running") ── - - useEffect(() => { - if (!selectedRun) return - const runInfo = runs.find((r) => r.name === selectedRun) - if (runInfo?.status !== 'running') return - - const es = subscribeEvents(selectedRun, lastOpidRef.current, (event) => { - setLabState((prev) => (prev ? applyEvent(prev, event) : prev)) - setLabEvents((prev) => [...prev.slice(-999), event]) - if (event.opid != null) lastOpidRef.current = event.opid - }) - esRef.current = es - return () => { - es.close() - esRef.current = null - } - }, [selectedRun, runs]) - - // Close SSE when tab becomes hidden, reconnect when visible. - useEffect(() => { - const onVisibility = () => { - if (document.hidden) { - esRef.current?.close() - esRef.current = null - } - } - const onUnload = () => esRef.current?.close() - document.addEventListener('visibilitychange', onVisibility) - window.addEventListener('beforeunload', onUnload) - return () => { - document.removeEventListener('visibilitychange', onVisibility) - window.removeEventListener('beforeunload', onUnload) - } - }, []) - - // ── Derived ── - - const isSimView = selection?.kind === 'run' - const isBatchView = selection?.kind === 'batch' - - // Runs belonging to the current batch/group - const batchRuns = isBatchView - ? runs.filter((r) => r.group === selectedBatch) - : [] - - const hasMetricsLogs = logList.some(l => l.kind === 'metrics') - const availableTabs = useMemo(() => - isSimView - ? ['topology', 'logs', 'timeline', ...(simResults ? (['perf'] as Tab[]) : []), ...(hasMetricsLogs ? (['metrics'] as Tab[]) : [])] - : isBatchView - ? ['sims', ...(combinedResults ? (['perf'] as Tab[]) : [])] - : [], - [isSimView, isBatchView, !!simResults, !!combinedResults, hasMetricsLogs] - ) - - // When available tabs change, ensure current tab is still valid. - useEffect(() => { - if (availableTabs.length > 0 && !availableTabs.includes(tab)) { - setTab(availableTabs[0]) - } - }, [availableTabs, tab]) - - // Resolved run info for the selected run - const selectedRunInfo = isSimView ? runs.find((r) => r.name === selectedRun) ?? null : null - - // Group runs for the selector - const { groups, ungrouped } = groupByGroup(runs) - - // ── Render ── - - return ( -
-
-

patchbay

- - {isSimView && selectedRunInfo && ( - - {selectedRunInfo.status ?? ''} - - )} - {labState && ( - - opid: {labState.opid} - - )} -
- - {isSimView && selectedRun && ( - setTab(t)} - /> - )} - - {isBatchView && ( - <> -
- {availableTabs.map((t) => ( - - ))} -
- -
- {tab === 'sims' && ( -
-

{selectedBatch}

- {batchRuns.length === 0 &&
No sims found.
} - {batchRuns.map((r) => ( - { e.preventDefault(); navigate(`/run/${r.name}`) }} - > - {simLabel(r)} - {r.status && {r.status}} - - ))} -
- )} - - {tab === 'perf' && navigate(`/run/${sim}`)} />} -
- - )} -
- ) -} diff --git a/ui/src/BatchPage.tsx b/ui/src/BatchPage.tsx new file mode 100644 index 0000000..6112123 --- /dev/null +++ b/ui/src/BatchPage.tsx @@ -0,0 +1,126 @@ +import { useCallback, useEffect, useMemo, useState } from 'react' +import { useLocation, useNavigate } from 'react-router-dom' +import type { CombinedResults } from './types' +import { fetchRuns, fetchCombinedResults } from './api' +import type { RunInfo } from './api' +import RunSelector, { selectionPath } from './components/RunSelector' +import type { Selection } from './components/RunSelector' +import PerfTab from './components/PerfTab' +import { simLabel } from './utils' + +type BatchTab = 'sims' | 'perf' + +export default function BatchPage() { + const location = useLocation() + const navigate = useNavigate() + + const batchName = location.pathname.startsWith('/group/') + ? location.pathname.slice('/group/'.length) + : location.pathname.slice('/batch/'.length) + const [tab, setTab] = useState('sims') + + // Run list (for the dropdown) + const [runs, setRuns] = useState([]) + const [combinedResults, setCombinedResults] = useState(null) + + // ── Poll runs list ── + + const refreshRuns = useCallback(async () => { + const r = await fetchRuns() + setRuns(r) + }, []) + + useEffect(() => { + refreshRuns() + const id = setInterval(refreshRuns, 5_000) + return () => clearInterval(id) + }, [refreshRuns]) + + // ── Load combined results ── + + useEffect(() => { + if (!batchName) { + setCombinedResults(null) + return + } + + let dead = false + fetchCombinedResults(batchName).then((results) => { + if (dead) return + setCombinedResults(results) + }) + + return () => { dead = true } + }, [batchName]) + + // ── Derived ── + + const selection: Selection | null = batchName ? { kind: 'group', name: batchName } : null + const groupRuns = useMemo( + () => runs.filter((r) => r.group === batchName), + [runs, batchName], + ) + + const availableTabs = useMemo( + () => ['sims', ...(combinedResults ? (['perf'] as BatchTab[]) : [])], + [combinedResults], + ) + + // Ensure current tab is still valid when available tabs change. + useEffect(() => { + if (availableTabs.length > 0 && !availableTabs.includes(tab)) { + setTab(availableTabs[0]) + } + }, [availableTabs, tab]) + + const handleSelectionChange = useCallback((sel: Selection | null) => { + navigate(selectionPath(sel)) + }, [navigate]) + + // ── Render ── + + return ( +
+
+

patchbay

+ +
+ +
+ {availableTabs.map((t) => ( + + ))} +
+ +
+ {tab === 'sims' && ( +
+

{batchName}

+ {groupRuns.length === 0 &&
No sims found.
} + {groupRuns.map((r) => ( + { e.preventDefault(); navigate(`/run/${r.name}`) }} + > + {simLabel(r)} + {r.status && {r.status}} + + ))} +
+ )} + + {tab === 'perf' && ( + navigate(`/run/${sim}`)} /> + )} +
+
+ ) +} diff --git a/ui/src/RunPage.tsx b/ui/src/RunPage.tsx new file mode 100644 index 0000000..d4ae7b7 --- /dev/null +++ b/ui/src/RunPage.tsx @@ -0,0 +1,254 @@ +import { useCallback, useEffect, useRef, useState } from 'react' +import { useLocation, useNavigate } from 'react-router-dom' +import type { + Firewall, + LabEvent, + LabState, + LinkCondition, + Nat, + NatV6Mode, + RouterState, + DeviceState, + IfaceState, +} from './devtools-types' +import type { SimResults } from './types' +import { + fetchRuns, + fetchState, + fetchEvents, + fetchLogs, + fetchResults, + subscribeEvents, +} from './api' +import type { RunInfo, LogEntry } from './api' +import RunSelector, { selectionPath } from './components/RunSelector' +import type { Selection } from './components/RunSelector' +import RunView from './components/RunView' +import type { RunTab } from './components/RunView' + +// ── State reducer ────────────────────────────────────────────────── + +function applyEvent(state: LabState, event: LabEvent): LabState { + const next = { ...state, opid: event.opid } + const kind = event.kind + + if (kind === 'router_added') { + const name = event.name as string + const routerState: RouterState = { + ns: event.ns as string, + region: (event.region as string | null) ?? null, + nat: event.nat as Nat, + nat_v6: event.nat_v6 as NatV6Mode, + firewall: event.firewall as Firewall, + ip_support: event.ip_support as RouterState['ip_support'], + mtu: (event.mtu as number | null) ?? null, + upstream: (event.upstream as string | null) ?? null, + uplink_ip: (event.uplink_ip as string | null) ?? null, + uplink_ip_v6: (event.uplink_ip_v6 as string | null) ?? null, + downstream_cidr: (event.downstream_cidr as string | null) ?? null, + downstream_gw: (event.downstream_gw as string | null) ?? null, + downstream_cidr_v6: (event.downstream_cidr_v6 as string | null) ?? null, + downstream_gw_v6: (event.downstream_gw_v6 as string | null) ?? null, + downstream_bridge: event.downstream_bridge as string, + downlink_condition: (event.downlink_condition as LinkCondition | null) ?? null, + devices: (event.devices as string[]) ?? [], + counters: (event.counters as Record) ?? {}, + } + next.routers = { ...next.routers, [name]: routerState } + } else if (kind === 'router_removed') { + const { [event.name as string]: _, ...rest } = next.routers + next.routers = rest + } else if (kind === 'device_added') { + const name = event.name as string + const deviceState: DeviceState = { + ns: event.ns as string, + default_via: event.default_via as string, + mtu: (event.mtu as number | null) ?? null, + interfaces: (event.interfaces as IfaceState[]) ?? [], + counters: (event.counters as Record) ?? {}, + } + for (const iface of deviceState.interfaces) { + const router = next.routers[iface.router] + if (router && !router.devices.includes(name)) { + next.routers = { + ...next.routers, + [iface.router]: { ...router, devices: [...router.devices, name] }, + } + } + } + next.devices = { ...next.devices, [name]: deviceState } + } else if (kind === 'device_removed') { + const name = event.name as string + const dev = next.devices[name] + if (dev) { + for (const iface of dev.interfaces) { + const router = next.routers[iface.router] + if (router) { + next.routers = { + ...next.routers, + [iface.router]: { ...router, devices: router.devices.filter((d) => d !== name) }, + } + } + } + } + const { [name]: _, ...rest } = next.devices + next.devices = rest + } else if (kind === 'nat_changed') { + const router = next.routers[event.router as string] + if (router) { + next.routers = { ...next.routers, [event.router as string]: { ...router, nat: event.nat as Nat } } + } + } else if (kind === 'firewall_changed') { + const router = next.routers[event.router as string] + if (router) { + next.routers = { ...next.routers, [event.router as string]: { ...router, firewall: event.firewall as Firewall } } + } + } + + return next +} + +// ── RunPage ──────────────────────────────────────────────────────── + +export default function RunPage() { + const location = useLocation() + const navigate = useNavigate() + + const runName = location.pathname.slice('/run/'.length) + const [tab, setTab] = useState('topology') + + // Run list (for the dropdown) + const [runs, setRuns] = useState([]) + + // Lab state and events + const [labState, setLabState] = useState(null) + const [labEvents, setLabEvents] = useState([]) + const esRef = useRef(null) + const lastOpidRef = useRef(0) + + // Log files + const [logList, setLogList] = useState([]) + + // Perf results + const [simResults, setSimResults] = useState(null) + + // ── Poll runs list ── + + const refreshRuns = useCallback(async () => { + const r = await fetchRuns() + setRuns(r) + }, []) + + useEffect(() => { + refreshRuns() + const id = setInterval(refreshRuns, 5_000) + return () => clearInterval(id) + }, [refreshRuns]) + + // ── Load run data ── + + useEffect(() => { + if (!runName) { + setLabState(null) + setLabEvents([]) + setLogList([]) + setSimResults(null) + return + } + + let dead = false + Promise.all([ + fetchState(runName), + fetchEvents(runName), + fetchLogs(runName), + fetchResults(runName), + ]).then(([state, events, logs, results]) => { + if (dead) return + if (state) setLabState(state) + setLabEvents(events) + lastOpidRef.current = events.length ? Math.max(...events.map((e) => e.opid ?? 0)) : 0 + setLogList(logs) + setSimResults(results) + }) + + return () => { dead = true } + }, [runName]) + + // ── SSE for live updates (only when run is "running") ── + + useEffect(() => { + if (!runName) return + const runInfo = runs.find((r) => r.name === runName) + if (runInfo?.status !== 'running') return + + const es = subscribeEvents(runName, lastOpidRef.current, (event) => { + setLabState((prev) => (prev ? applyEvent(prev, event) : prev)) + setLabEvents((prev) => [...prev.slice(-999), event]) + if (event.opid != null) lastOpidRef.current = event.opid + }) + esRef.current = es + return () => { + es.close() + esRef.current = null + } + }, [runName, runs]) + + // Close SSE when tab becomes hidden. + useEffect(() => { + const onVisibility = () => { + if (document.hidden) { + esRef.current?.close() + esRef.current = null + } + } + const onUnload = () => esRef.current?.close() + document.addEventListener('visibilitychange', onVisibility) + window.addEventListener('beforeunload', onUnload) + return () => { + document.removeEventListener('visibilitychange', onVisibility) + window.removeEventListener('beforeunload', onUnload) + } + }, []) + + // ── Derived ── + + const selection: Selection | null = runName ? { kind: 'run', name: runName } : null + const selectedRunInfo = runs.find((r) => r.name === runName) ?? null + + const handleSelectionChange = useCallback((sel: Selection | null) => { + navigate(selectionPath(sel)) + }, [navigate]) + + // ── Render ── + + return ( +
+
+

patchbay

+ + {selectedRunInfo && ( + + {selectedRunInfo.status ?? ''} + + )} + {labState && ( + + opid: {labState.opid} + + )} +
+ + {runName && ( + + )} +
+ ) +} diff --git a/ui/src/RunsIndex.tsx b/ui/src/RunsIndex.tsx index 4a435cf..68f4986 100644 --- a/ui/src/RunsIndex.tsx +++ b/ui/src/RunsIndex.tsx @@ -2,10 +2,11 @@ import { useEffect, useMemo, useState } from 'react' import { Link, useNavigate } from 'react-router-dom' import { fetchRuns } from './api' import type { RunInfo, RunManifest } from './api' +import { groupByGroup as groupByGroupBase } from './utils' // ── Types ── -interface RunGroup { +interface RunGroupWithManifest { group: string runs: RunInfo[] manifest: RunManifest | null @@ -13,27 +14,16 @@ interface RunGroup { // ── Helpers ── -function groupByGroup(runs: RunInfo[]): { groups: RunGroup[]; ungrouped: RunInfo[] } { - const grouped = new Map() - const ungrouped: RunInfo[] = [] - for (const r of runs) { - if (r.group) { - let list = grouped.get(r.group) - if (!list) { - list = [] - grouped.set(r.group, list) - } - list.push(r) - } else { - ungrouped.push(r) - } - } - const groups: RunGroup[] = [] - for (const [group, groupRuns] of grouped) { - const manifest = groupRuns.find((r) => r.manifest)?.manifest ?? null - groups.push({ group, runs: groupRuns, manifest }) +/** Extends the shared groupByGroup with manifest extraction for the index page. */ +function groupByGroup(runs: RunInfo[]): { groups: RunGroupWithManifest[]; ungrouped: RunInfo[] } { + const { groups, ungrouped } = groupByGroupBase(runs) + return { + groups: groups.map((g) => ({ + ...g, + manifest: g.runs.find((r) => r.manifest)?.manifest ?? null, + })), + ungrouped, } - return { groups, ungrouped } } function formatDate(raw: string): string { @@ -145,7 +135,7 @@ export default function RunsIndex() { const { groups, ungrouped } = useMemo(() => groupByGroup(filteredRuns), [filteredRuns]) // Flatten for pagination: each group is one "row", each ungrouped run is one "row" - type Row = { kind: 'group'; group: RunGroup } | { kind: 'run'; run: RunInfo } + type Row = { kind: 'group'; group: RunGroupWithManifest } | { kind: 'run'; run: RunInfo } const allRows = useMemo(() => { const rows: Row[] = [] // Sort groups by the first run's sortKey @@ -265,7 +255,7 @@ export default function RunsIndex() { // ── Subcomponents ── -function ManifestGroupHeader({ group }: { group: RunGroup }) { +function ManifestGroupHeader({ group }: { group: RunGroupWithManifest }) { const m = group.manifest! const outcome = m.test_outcome ?? m.outcome const statusIcon = outcome === 'success' || outcome === 'pass' ? '\u2705' : outcome === 'failure' || outcome === 'fail' ? '\u274c' : null diff --git a/ui/src/api.ts b/ui/src/api.ts index ecd2422..1badc6c 100644 --- a/ui/src/api.ts +++ b/ui/src/api.ts @@ -153,11 +153,11 @@ export async function fetchRunJson(run: string): Promise { } export async function fetchCombinedResults( - batch: string, + group: string, ): Promise { try { const res = await fetch( - `${API}/batches/${encodeURIComponent(batch)}/combined-results`, + `${API}/groups/${encodeURIComponent(group)}/combined-results`, ) if (!res.ok) return null return (await res.json()) as CombinedResults diff --git a/ui/src/components/CompareView.tsx b/ui/src/components/CompareView.tsx index c55e160..2101c8c 100644 --- a/ui/src/components/CompareView.tsx +++ b/ui/src/components/CompareView.tsx @@ -1,4 +1,6 @@ import { useEffect, useState } from 'react' +import type { LabEvent, LabState } from '../devtools-types' +import type { SimResults } from '../types' import { fetchRunJson, fetchState, fetchEvents, fetchLogs, fetchResults } from '../api' import type { RunManifest, RunInfo, LogEntry } from '../api' import RunView from './RunView' @@ -182,10 +184,10 @@ function SplitRunPanel({ runName, activeTab, onTabChange }: { activeTab: RunTab onTabChange: (tab: RunTab) => void }) { - const [state, setState] = useState(null) - const [events, setEvents] = useState([]) + const [state, setState] = useState(null) + const [events, setEvents] = useState([]) const [logs, setLogs] = useState([]) - const [results, setResults] = useState(null) + const [results, setResults] = useState(null) useEffect(() => { let dead = false diff --git a/ui/src/components/LogsTab.tsx b/ui/src/components/LogsTab.tsx index af9e065..4916025 100644 --- a/ui/src/components/LogsTab.tsx +++ b/ui/src/components/LogsTab.tsx @@ -1,4 +1,4 @@ -import { useEffect, useMemo, useRef, useState } from 'react' +import { useCallback, useEffect, useMemo, useRef, useState } from 'react' import type { SimLogEntry } from '../types' import KvPairs from './KvPairs' import JsonTree from './JsonTree' @@ -277,7 +277,7 @@ export default function LogsTab({ base, logs, jumpTarget }: Props) { }, [jumpTarget, logs, jumpHandledNonce]) // Load log content - const loadContent = async () => { + const loadContent = useCallback(async () => { if (!active) return const url = `${base}${active.path}` setLoading(true) @@ -298,19 +298,19 @@ export default function LogsTab({ base, logs, jumpTarget }: Props) { } finally { setLoading(false) } - } + }, [active, base]) // Auto-load when jump is pending useEffect(() => { if (!active || !jumpNeedle || loaded || loading) return loadContent() - }, [active, jumpNeedle, loaded, loading]) + }, [active, jumpNeedle, loaded, loading, loadContent]) // Auto-load structured logs immediately useEffect(() => { if (!active || loaded || loading) return if (AUTO_LOAD_KINDS.has(active.kind)) loadContent() - }, [active, loaded, loading]) + }, [active, loaded, loading, loadContent]) const byNode = useMemo(() => { const m = new Map() diff --git a/ui/src/components/MetricsTab.tsx b/ui/src/components/MetricsTab.tsx index 7f6413f..56dbd93 100644 --- a/ui/src/components/MetricsTab.tsx +++ b/ui/src/components/MetricsTab.tsx @@ -33,6 +33,7 @@ export default function MetricsTab({ run, logs }: { run: string; logs: LogEntry[ const metricsLogs = logs.filter(l => l.kind === 'metrics') if (metricsLogs.length === 0) return + let dead = false Promise.all(metricsLogs.map(async (log) => { const res = await fetch(`${runFilesBase(run)}${log.path}`) if (!res.ok) return [] @@ -55,7 +56,11 @@ export default function MetricsTab({ run, logs }: { run: string; logs: LogEntry[ return Array.from(byKey.entries()).map(([key, values]) => ({ device, key, values })) - })).then(results => setSeries(results.flat())) + })).then(results => { + if (!dead) setSeries(results.flat()) + }) + + return () => { dead = true } }, [run, logs]) if (series.length === 0) { @@ -74,7 +79,7 @@ export default function MetricsTab({ run, logs }: { run: string; logs: LogEntry[ - {series.map((s, i) => ( + {series.map((s) => ( {s.key} {s.device} diff --git a/ui/src/components/PerfTab.tsx b/ui/src/components/PerfTab.tsx index 4162cef..bfbef99 100644 --- a/ui/src/components/PerfTab.tsx +++ b/ui/src/components/PerfTab.tsx @@ -156,11 +156,11 @@ interface PerfTabProps { } export default function PerfTab({ results, combined, onSimSelect }: PerfTabProps) { - // Combined / batch view + // Combined / group view if (combined) { const { runs } = combined if (runs.length === 0) { - return
no combined results for this batch
+ return
no combined results for this group
} const detailRows = runs.flatMap((run) => diff --git a/ui/src/components/RunSelector.tsx b/ui/src/components/RunSelector.tsx new file mode 100644 index 0000000..a78a7d5 --- /dev/null +++ b/ui/src/components/RunSelector.tsx @@ -0,0 +1,65 @@ +import { useMemo } from 'react' +import type { RunInfo } from '../api' +import { groupByGroup, simLabel } from '../utils' + +// ── Selection model ──────────────────────────────────────────────── + +export type Selection = + | { kind: 'run'; name: string } + | { kind: 'group'; name: string } + +export function selectionKey(s: Selection | null): string { + if (!s) return '' + return s.kind === 'group' ? `group:${s.name}` : s.name +} + +export function selectionFromValue(val: string): Selection | null { + if (!val) return null + if (val.startsWith('group:')) return { kind: 'group', name: val.slice(6) } + return { kind: 'run', name: val } +} + +export function selectionPath(s: Selection | null): string { + if (!s) return '/' + return s.kind === 'group' ? `/batch/${s.name}` : `/run/${s.name}` +} + +// ── Component ────────────────────────────────────────────────────── + +interface RunSelectorProps { + runs: RunInfo[] + value: Selection | null + onChange: (selection: Selection | null) => void +} + +export default function RunSelector({ runs, value, onChange }: RunSelectorProps) { + const { groups, ungrouped } = useMemo(() => groupByGroup(runs), [runs]) + + return ( + + ) +} diff --git a/ui/src/components/RunView.tsx b/ui/src/components/RunView.tsx index e8d39ee..8aeeddd 100644 --- a/ui/src/components/RunView.tsx +++ b/ui/src/components/RunView.tsx @@ -1,3 +1,4 @@ +import { useState, useCallback, useMemo } from 'react' import type { LabEvent, LabState } from '../devtools-types' import type { SimResults } from '../types' import type { RunInfo, LogEntry } from '../api' @@ -8,7 +9,6 @@ import TimelineTab from './TimelineTab' import TopologyGraph from './TopologyGraph' import NodeDetail from './NodeDetail' import MetricsTab from './MetricsTab' -import { useState, useCallback } from 'react' export type RunTab = 'topology' | 'logs' | 'timeline' | 'perf' | 'metrics' @@ -38,16 +38,19 @@ export default function RunView({ run, state, events, logs, results, activeTab, }, [onTabChange]) const base = runFilesBase(run.name) - const logsForTabs = logs.map((l) => ({ node: l.node, kind: l.kind, path: l.path })) + const logsForTabs = useMemo( + () => logs.map((l) => ({ node: l.node, kind: l.kind, path: l.path })), + [logs], + ) - const hasMetricsLogs = logs.some(l => l.kind === 'metrics') - const availableTabs: RunTab[] = [ + const hasMetricsLogs = useMemo(() => logs.some(l => l.kind === 'metrics'), [logs]) + const availableTabs = useMemo(() => [ 'topology', 'logs', 'timeline', ...(results ? (['perf'] as RunTab[]) : []), ...(hasMetricsLogs ? (['metrics'] as RunTab[]) : []), - ] + ], [results, hasMetricsLogs]) const tab = availableTabs.includes(activeTab) ? activeTab : availableTabs[0] diff --git a/ui/src/index.css b/ui/src/index.css index 3f00971..a43ab0f 100644 --- a/ui/src/index.css +++ b/ui/src/index.css @@ -725,7 +725,7 @@ tbody td { padding: 16px; } -/* ── Sims list (invocation view) ── */ +/* ── Sims list (group view) ── */ .sims-list { padding: 1.5rem; max-width: 700px; diff --git a/ui/src/main.tsx b/ui/src/main.tsx index 5034dbd..a1d222c 100644 --- a/ui/src/main.tsx +++ b/ui/src/main.tsx @@ -1,7 +1,8 @@ import React from 'react' import ReactDOM from 'react-dom/client' import { BrowserRouter, Routes, Route, Navigate } from 'react-router-dom' -import App from './App' +import RunPage from './RunPage' +import BatchPage from './BatchPage' import RunsIndex from './RunsIndex' import ComparePage from './ComparePage' import './index.css' @@ -11,10 +12,11 @@ ReactDOM.createRoot(document.getElementById('root')!).render( } /> - } /> - } /> + } /> + } /> + } /> } /> - {/* Legacy redirect: /inv/:name → /batch/:name */} + {/* Legacy redirect: /inv/:name -> /batch/:name */} } /> } /> diff --git a/ui/src/utils.ts b/ui/src/utils.ts new file mode 100644 index 0000000..6b9557a --- /dev/null +++ b/ui/src/utils.ts @@ -0,0 +1,38 @@ +import type { RunInfo } from './api' + +// ── Group helpers ─────────────────────────────────────────────────── + +export interface RunGroup { + group: string + runs: RunInfo[] +} + +export function groupByGroup(runs: RunInfo[]): { groups: RunGroup[]; ungrouped: RunInfo[] } { + const grouped = new Map() + const ungrouped: RunInfo[] = [] + for (const r of runs) { + if (r.group) { + let list = grouped.get(r.group) + if (!list) { + list = [] + grouped.set(r.group, list) + } + list.push(r) + } else { + ungrouped.push(r) + } + } + const groups: RunGroup[] = [] + for (const [group, groupRuns] of grouped) { + groups.push({ group, runs: groupRuns }) + } + return { groups, ungrouped } +} + +/** Short display label for a run within a group. */ +export function simLabel(run: RunInfo): string { + if (run.group && run.name.startsWith(run.group + '/')) { + return run.label ?? run.name.slice(run.group.length + 1) + } + return run.label ?? run.name +} From d85913dbdf6a0c84bea474b95fcafd10005adee6 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 12:48:35 +0100 Subject: [PATCH 24/38] fix: use SideStats.fail in compare summary output Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/src/compare.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/patchbay-cli/src/compare.rs b/patchbay-cli/src/compare.rs index f019d22..fc2ac81 100644 --- a/patchbay-cli/src/compare.rs +++ b/patchbay-cli/src/compare.rs @@ -230,8 +230,9 @@ fn status_str(s: TestStatus) -> &'static str { /// Print a comparison summary table. pub fn print_summary(left_ref: &str, right_ref: &str, left: &[TestResult], right: &[TestResult], result: &CompareResult) { println!("\nCompare: {left_ref} \u{2194} {right_ref}\n"); - println!("Tests: {}/{} pass \u{2192} {}/{} pass", - result.left.pass, result.left.total, result.right.pass, result.right.total); + println!("Tests: {}/{} pass ({} fail) \u{2192} {}/{} pass ({} fail)", + result.left.pass, result.left.total, result.left.fail, + result.right.pass, result.right.total, result.right.fail); if result.fixes > 0 { println!("Fixes: {} (fail\u{2192}pass)", result.fixes); } From c6feeb92d31053e89abf8536a8829a63cada5f44 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 12:55:38 +0100 Subject: [PATCH 25/38] fix: persist copies testdir contents, not symlink testdir-current is a symlink to testdir-N. cp -r copies the symlink itself; cp -rL dereferences it and copies the actual directory contents. Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/src/test.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/patchbay-cli/src/test.rs b/patchbay-cli/src/test.rs index 9f9bece..e99da9f 100644 --- a/patchbay-cli/src/test.rs +++ b/patchbay-cli/src/test.rs @@ -307,8 +307,9 @@ fn persist_run() -> Result<()> { let ts = chrono::Utc::now().format("%Y%m%d_%H%M%S"); let dest = PathBuf::from(format!(".patchbay/work/run-{ts}")); std::fs::create_dir_all(dest.parent().unwrap())?; + // -rL: dereference symlinks (testdir-current is a symlink to testdir-N) let status = Command::new("cp") - .args(["-r"]) + .args(["-rL"]) .arg(&testdir) .arg(&dest) .status() From 8b855e710eb517bfcd05ad4054b10369ef661b7b Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 13:14:11 +0100 Subject: [PATCH 26/38] fix: nextest output parsing, dedup results, group-aware run discovery - parse_test_output now handles nextest format (PASS/FAIL/TIMEOUT/IGNORE with duration extraction) in addition to cargo test format - Deduplicate test results by name (nextest reprints failures in summary) - Server scan_runs_recursive: directories with run.json but no events.jsonl are groups (recurse into them), not leaf runs - Fix push e2e test to match by group instead of name Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-server/src/lib.rs | 14 +++-- patchbay-utils/src/manifest.rs | 108 ++++++++++++++++++++++++++------- ui/e2e/push.spec.ts | 4 +- ui/src/main.tsx | 4 +- 4 files changed, 100 insertions(+), 30 deletions(-) diff --git a/patchbay-server/src/lib.rs b/patchbay-server/src/lib.rs index be32640..82dcf0c 100644 --- a/patchbay-server/src/lib.rs +++ b/patchbay-server/src/lib.rs @@ -148,17 +148,17 @@ fn scan_runs_recursive( if !path.is_dir() { continue; } - if path.join(EVENTS_JSONL).exists() || path.join(RUN_JSON).exists() { - // Use the relative path from root as the run name so nested - // runs are addressable via the API (e.g. "sim-20260305/ping-e2e"). + let has_events = path.join(EVENTS_JSONL).exists(); + let has_run_json = path.join(RUN_JSON).exists(); + + if has_events { + // Leaf run: has events.jsonl → it's an actual lab output dir. let name = path .strip_prefix(root) .unwrap_or(&path) .to_string_lossy() .into_owned(); let (label, status) = read_run_metadata(&path); - // Derive group from the first path component (the timestamped - // directory) when the run is nested more than one level deep. let group = name .split('/') .next() @@ -172,6 +172,10 @@ fn scan_runs_recursive( group, manifest: None, // populated after scan }); + } else if has_run_json { + // Group directory: has run.json but no events.jsonl. + // Recurse to find child runs, they inherit this manifest. + scan_runs_recursive(root, &path, depth + 1, runs)?; } else { scan_runs_recursive(root, &path, depth + 1, runs)?; } diff --git a/patchbay-utils/src/manifest.rs b/patchbay-utils/src/manifest.rs index f1bd3c1..74042e8 100644 --- a/patchbay-utils/src/manifest.rs +++ b/patchbay-utils/src/manifest.rs @@ -222,37 +222,80 @@ pub fn find_run_for_commit( // ── Test output parsing ───────────────────────────────────────────── -/// Parse `cargo test` / `cargo nextest` stdout into per-test results. +/// Parse `cargo test` and `cargo nextest` stdout into per-test results. /// -/// Recognises lines of the form: -/// - `test some::path ... ok` -/// - `test some::path ... FAILED` -/// - `test some::path ... ignored` +/// Recognises two formats: +/// - cargo test: `test some::path ... ok` +/// - nextest: ` PASS [ 1.234s] crate::module::test_name` pub fn parse_test_output(output: &str) -> Vec { + let mut seen = std::collections::HashSet::new(); let mut results = Vec::new(); for line in output.lines() { let line = line.trim(); - let Some(rest) = line.strip_prefix("test ") else { - continue; - }; - let Some((name, status_str)) = rest.rsplit_once(" ... ") else { + + // cargo test format: "test name ... ok|FAILED|ignored" + if let Some(rest) = line.strip_prefix("test ") { + if let Some((name, status_str)) = rest.rsplit_once(" ... ") { + let status = match status_str.trim() { + "ok" => TestStatus::Pass, + "FAILED" => TestStatus::Fail, + "ignored" => TestStatus::Ignored, + _ => continue, + }; + let name = name.trim().to_string(); + if seen.insert(name.clone()) { + results.push(TestResult { name, status, duration: None }); + } + } continue; - }; - let status = match status_str.trim() { - "ok" => TestStatus::Pass, - "FAILED" => TestStatus::Fail, - "ignored" => TestStatus::Ignored, - _ => continue, - }; - results.push(TestResult { - name: name.trim().to_string(), - status, - duration: None, - }); + } + + // nextest format: "PASS [ 1.234s] crate::test_name" + // "FAIL [ 0.567s] crate::test_name" + // "IGNORE crate::test_name" + // "TIMEOUT [ 60.0s] crate::test_name" + if let Some((status, rest)) = parse_nextest_line(line) { + let duration = parse_nextest_duration(rest); + let name = rest + .find(']') + .map(|i| &rest[i + 1..]) + .unwrap_or(rest) + .trim() + .to_string(); + if !name.is_empty() && seen.insert(name.clone()) { + results.push(TestResult { name, status, duration }); + } + } } results } +fn parse_nextest_line(line: &str) -> Option<(TestStatus, &str)> { + let prefixes = [ + ("PASS", TestStatus::Pass), + ("FAIL", TestStatus::Fail), + ("IGNORE", TestStatus::Ignored), + ("TIMEOUT", TestStatus::Fail), + ]; + for (prefix, status) in prefixes { + if let Some(rest) = line.strip_prefix(prefix) { + if rest.starts_with(' ') || rest.starts_with('[') { + return Some((status, rest.trim())); + } + } + } + None +} + +fn parse_nextest_duration(s: &str) -> Option { + // "[ 1.234s] name" → extract "1.234" + let s = s.strip_prefix('[')?; + let end = s.find(']')?; + let inner = s[..end].trim().strip_suffix('s')?; + let secs: f64 = inner.parse().ok()?; + Some(Duration::from_secs_f64(secs)) +} + #[cfg(test)] mod tests { use super::*; @@ -277,6 +320,29 @@ test result: FAILED. 1 passed; 1 failed; 1 ignored; assert_eq!(results[2].status, TestStatus::Ignored); } + #[test] + fn test_parse_nextest_output() { + let output = "\ + Compiling my-crate v0.1.0 + PASS [ 1.234s] my-crate::tests::foo + FAIL [ 0.567s] my-crate::tests::bar + TIMEOUT [ 60.001s] my-crate::tests::baz + IGNORE my-crate::tests::qux +"; + let results = parse_test_output(output); + assert_eq!(results.len(), 4); + assert_eq!(results[0].name, "my-crate::tests::foo"); + assert_eq!(results[0].status, TestStatus::Pass); + assert_eq!(results[0].duration, Some(Duration::from_millis(1234))); + assert_eq!(results[1].name, "my-crate::tests::bar"); + assert_eq!(results[1].status, TestStatus::Fail); + assert_eq!(results[2].name, "my-crate::tests::baz"); + assert_eq!(results[2].status, TestStatus::Fail); // timeout = fail + assert_eq!(results[3].name, "my-crate::tests::qux"); + assert_eq!(results[3].status, TestStatus::Ignored); + assert_eq!(results[3].duration, None); + } + #[test] fn test_duration_ms_roundtrip() { #[derive(Serialize, Deserialize, PartialEq, Debug)] diff --git a/ui/e2e/push.spec.ts b/ui/e2e/push.spec.ts index fb10aff..27fa6b5 100644 --- a/ui/e2e/push.spec.ts +++ b/ui/e2e/push.spec.ts @@ -84,8 +84,8 @@ test('push run results and view via deep link', async ({ page }) => { const runsRes = await fetch(`${SERVE_URL}/api/runs`) const runs = await runsRes.json() as Array<{ name: string; group: string | null }> expect(runs.length).toBeGreaterThan(0) - // The pushed run should be discoverable by name matching the push group. - const run = runs.find(r => r.name === pushBody.group) + // The pushed run should belong to a group matching the push dir. + const run = runs.find(r => r.group === pushBody.group) expect(run).toBeTruthy() // Step 5: Open the runs index and verify pushed run appears with manifest data. diff --git a/ui/src/main.tsx b/ui/src/main.tsx index a1d222c..4d8238d 100644 --- a/ui/src/main.tsx +++ b/ui/src/main.tsx @@ -14,10 +14,10 @@ ReactDOM.createRoot(document.getElementById('root')!).render( } /> } /> } /> - } /> } /> {/* Legacy redirect: /inv/:name -> /batch/:name */} } /> + } /> } /> @@ -27,5 +27,5 @@ ReactDOM.createRoot(document.getElementById('root')!).render( /** Redirect legacy /inv/* paths to /batch/*. */ function InvRedirect() { const rest = window.location.pathname.slice('/inv/'.length) - return + return } From b4a31d6f0a2fd246dae30f80e5f37f6cc30c2d55 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 13:24:06 +0100 Subject: [PATCH 27/38] fix: collapsible groups, child run labels, expand-to-navigate in tests - Groups in RunsIndex are collapsed by default with expand/collapse toggle - Child runs show their test/sim name (from label or path), not inherited manifest info (branch@commit which was repeated for every child) - Group header shows manifest summary inline (project, branch, commit, kind, outcome, pass/fail counts) - Group header has checkbox for group-level compare selection - E2e tests updated to expand groups before clicking child runs Co-Authored-By: Claude Opus 4.6 (1M context) --- ui/e2e/push.spec.ts | 8 ++- ui/e2e/runner-sim.spec.ts | 26 ++++++--- ui/src/RunsIndex.tsx | 110 ++++++++++++++++++++++---------------- 3 files changed, 87 insertions(+), 57 deletions(-) diff --git a/ui/e2e/push.spec.ts b/ui/e2e/push.spec.ts index 27fa6b5..d4982d1 100644 --- a/ui/e2e/push.spec.ts +++ b/ui/e2e/push.spec.ts @@ -92,11 +92,9 @@ test('push run results and view via deep link', async ({ page }) => { await page.goto(SERVE_URL) await expect(page.getByRole('heading', { name: 'Runs' })).toBeVisible({ timeout: 15_000 }) - // The run should be visible. RunRow renders branch@commit from manifest. - // For grouped runs, ManifestGroupHeader also shows project, PR link, title. - const runEntry = page.locator('.run-entry, .pushed-run-entry').first() - await expect(runEntry).toBeVisible({ timeout: 10_000 }) - // Branch@commit should appear (RunRow renders this from manifest) + // The group header should show manifest data (branch, commit). + const groupHeader = page.locator('.run-group-header').first() + await expect(groupHeader).toBeVisible({ timeout: 10_000 }) await expect(page.getByText('feat/test').first()).toBeVisible({ timeout: 5_000 }) await expect(page.getByText('abc1234').first()).toBeVisible() diff --git a/ui/e2e/runner-sim.spec.ts b/ui/e2e/runner-sim.spec.ts index fbaa5f8..84b86a0 100644 --- a/ui/e2e/runner-sim.spec.ts +++ b/ui/e2e/runner-sim.spec.ts @@ -41,9 +41,12 @@ test('runner sim produces viewable UI output', async ({ page }) => { await page.goto(UI_URL) await expect(page.getByRole('heading', { name: 'Runs' })).toBeVisible({ timeout: 15_000 }) - // Click on the run entry to navigate to the run detail. + // Expand the group (collapsed by default) then click a child run. + const groupHeader = page.locator('.run-group-header').first() + await expect(groupHeader).toBeVisible({ timeout: 10_000 }) + await groupHeader.click() const runLink = page.locator('a[href*="/run/"]').first() - await expect(runLink).toBeVisible({ timeout: 10_000 }) + await expect(runLink).toBeVisible({ timeout: 5_000 }) await runLink.click() // Topology tab should show the router and devices. @@ -102,13 +105,19 @@ test('multi-sim group shows grouped selector and combined results', async ({ pag await page.goto(UI_URL) await expect(page.getByRole('heading', { name: 'Runs' })).toBeVisible({ timeout: 15_000 }) - // Both sims should appear as run entries in the index. - await expect(page.getByText('ping-e2e').first()).toBeVisible({ timeout: 10_000 }) + + // Expand the group to see child runs. + const groupHeader = page.locator('.run-group-header').first() + await expect(groupHeader).toBeVisible({ timeout: 10_000 }) + await groupHeader.click() + + // Both sims should appear as run entries. + await expect(page.getByText('ping-e2e').first()).toBeVisible({ timeout: 5_000 }) await expect(page.getByText('iperf-e2e').first()).toBeVisible() // Click through to one of the runs and verify it loads. const pingLink = page.locator('a[href*="/run/"]', { hasText: 'ping-e2e' }).first() - await expect(pingLink).toBeVisible({ timeout: 10_000 }) + await expect(pingLink).toBeVisible({ timeout: 5_000 }) await pingLink.click() // Topology tab should render topology nodes for this sim. await expect(page.getByText('sender')).toBeVisible({ timeout: 10_000 }) @@ -148,9 +157,12 @@ test('iperf sim shows perf results', async ({ page }) => { await page.goto(UI_URL) await expect(page.getByRole('heading', { name: 'Runs' })).toBeVisible({ timeout: 15_000 }) - // Click through to the run detail. + // Expand group and click through to the run detail. + const groupHeader = page.locator('.run-group-header').first() + await expect(groupHeader).toBeVisible({ timeout: 10_000 }) + await groupHeader.click() const runLink = page.locator('a[href*="/run/"]').first() - await expect(runLink).toBeVisible({ timeout: 10_000 }) + await expect(runLink).toBeVisible({ timeout: 5_000 }) await runLink.click() // Navigate to perf tab. diff --git a/ui/src/RunsIndex.tsx b/ui/src/RunsIndex.tsx index 68f4986..b339c7b 100644 --- a/ui/src/RunsIndex.tsx +++ b/ui/src/RunsIndex.tsx @@ -92,6 +92,8 @@ export default function RunsIndex() { // Checkbox selection for compare const [selected, setSelected] = useState>(new Set()) + // Collapsed groups (collapsed by default, expanded set tracks which are open) + const [expanded, setExpanded] = useState>(new Set()) useEffect(() => { const refresh = () => fetchRuns().then((r) => { setRuns(r); setLoaded(true) }) @@ -226,21 +228,41 @@ export default function RunsIndex() { {pageRows.map((row) => { if (row.kind === 'group') { const g = row.group + const isExpanded = expanded.has(g.group) + const toggleExpand = () => setExpanded(prev => { + const next = new Set(prev) + if (next.has(g.group)) next.delete(g.group) + else next.add(g.group) + return next + }) return (
- {g.manifest ? ( - - ) : ( -
+
+ + {isExpanded ? '\u25BC' : '\u25B6'} + + { e.stopPropagation(); toggleSelected(g.group) }} + onClick={(e) => e.stopPropagation()} + style={{ cursor: 'pointer' }} + /> + {g.manifest ? ( + + ) : ( {g.group} - {g.runs.length > 1 && ( - - combined ({g.runs.length} sims) - - )} -
- )} - {g.runs.map((r) => ( + )} + + {g.runs.length} {g.runs.length === 1 ? 'run' : 'runs'} + +
+ {isExpanded && g.runs.map((r) => ( ))}
@@ -255,44 +277,41 @@ export default function RunsIndex() { // ── Subcomponents ── -function ManifestGroupHeader({ group }: { group: RunGroupWithManifest }) { +/** Inline content for a group header with manifest info (rendered inside the collapsible header). */ +function GroupHeaderContent({ group }: { group: RunGroupWithManifest }) { const m = group.manifest! const outcome = m.test_outcome ?? m.outcome - const statusIcon = outcome === 'success' || outcome === 'pass' ? '\u2705' : outcome === 'failure' || outcome === 'fail' ? '\u274c' : null + const statusIcon = outcome === 'pass' || outcome === 'success' ? '\u2705' : outcome === 'fail' || outcome === 'failure' ? '\u274c' : null const date = m.started_at ?? extractDate(group.group) return ( - - {m.project || group.group} -
- {m.branch && {m.branch}} - {m.commit && {m.commit.slice(0, 7)}} - {m.pr != null && m.pr_url ? ( - e.stopPropagation()}> - PR #{m.pr} - - ) : m.pr != null ? ( - PR #{m.pr} - ) : null} - {m.title && {m.title}} -
-
- {statusIcon && {statusIcon}} - {date && {typeof date === 'string' && date.includes('T') ? relativeTime(date) : formatDate(date)}} - {m.pass != null && m.total != null && ( - {m.pass}/{m.total} - )} - -
- + <> + {m.project || group.group} + {m.branch && {m.branch}} + {m.commit && {m.commit.slice(0, 7)}} + {m.kind && {m.kind}} + {statusIcon && {statusIcon}} + {date && ( + + {typeof date === 'string' && date.includes('T') ? relativeTime(date) : formatDate(date as string)} + + )} + {m.pass != null && m.total != null && ( + {m.pass}/{m.total} pass + )} + ) } function RunRow({ run, grouped, selected, onToggle }: { run: RunInfo; grouped?: boolean; selected: boolean; onToggle: (name: string) => void }) { - const m = run.manifest - const label = grouped && run.group && run.name.startsWith(run.group + '/') - ? run.label ?? run.name.slice(run.group.length + 1) - : run.label ?? run.name + // For grouped (child) runs: show the test/sim name, not the inherited manifest info + const shortName = grouped && run.group && run.name.startsWith(run.group + '/') + ? run.name.slice(run.group.length + 1).replace(/\//g, ' / ') + : null + const displayLabel = shortName ?? run.label ?? run.name + + // Only show manifest details for ungrouped (top-level) runs + const m = grouped ? null : run.manifest const branchCommit = m?.branch && m?.commit ? `${m.branch}@${m.commit.slice(0, 7)}` @@ -300,30 +319,31 @@ function RunRow({ run, grouped, selected, onToggle }: { run: RunInfo; grouped?: : null const dateStr = m?.started_at ?? extractDate(run.group ?? run.name) - const kindBadge = m?.kind + const kindBadge = grouped ? null : m?.kind return (
{ e.stopPropagation(); onToggle(run.name) }} onClick={(e) => e.stopPropagation()} style={{ cursor: 'pointer' }} /> - {branchCommit ? {branchCommit} : label} + {branchCommit ?? displayLabel} {kindBadge && ( {kindBadge} )} - {dateStr && ( + {!grouped && dateStr && ( {typeof dateStr === 'string' && dateStr.includes('T') ? relativeTime(dateStr) : dateStr} )} - {m?.pass != null && m?.total != null && ( + {!grouped && m?.pass != null && m?.total != null && ( {m.pass}/{m.total} pass From 171591ca76b8393b2040ba25811078aaa4ff72c4 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 13:48:41 +0100 Subject: [PATCH 28/38] feat: compare UX improvements, shared controls, collapsible sidebar Compare view: - Concise single-line header for individual runs (name + pass/fail icons) - Concise header for groups (refs + pass/fail counts + regression/fix count) - Clickable test names in per-test table navigate to individual compare Metrics tab: - Pivoted table: rows=metric keys, columns=devices - Filter input to search by metric key name - Accepts shared filter prop for compare mode Shared controls architecture: - Compare split-screen lifts log filter/levels and metrics filter state - SharedControlsBar renders once above both panels - LogsTab and MetricsTab use external state when provided, hide internal controls Logs tab: - Collapsible sidebar with toggle button Co-Authored-By: Claude Opus 4.6 (1M context) --- ui/e2e/compare.spec.ts | 28 ++-- ui/src/components/CompareView.tsx | 262 ++++++++++++++++++++++-------- ui/src/components/LogsTab.tsx | 145 ++++++++++++----- ui/src/components/MetricsTab.tsx | 127 ++++++++++++--- ui/src/components/RunView.tsx | 24 ++- ui/src/index.css | 1 + 6 files changed, 440 insertions(+), 147 deletions(-) diff --git a/ui/e2e/compare.spec.ts b/ui/e2e/compare.spec.ts index 47c650a..d378399 100644 --- a/ui/e2e/compare.spec.ts +++ b/ui/e2e/compare.spec.ts @@ -129,22 +129,19 @@ test('compare view renders summary and regression', async ({ page }) => { // Navigate directly to the compare view with two run names await page.goto(`${UI_URL}/compare/run-left/run-right`) - // Verify CompareView renders with ref labels (appears in heading + summary + table) + // Verify header shows ref labels and pass/fail summary await expect(page.getByText('main@aaa111').first()).toBeVisible({ timeout: 10_000 }) await expect(page.getByText('feature@bbb222').first()).toBeVisible() - - // Summary bar: per-side pass/fail counts - const summary = page.locator('.compare-summary') - await expect(summary).toBeVisible({ timeout: 10_000 }) - await expect(summary.getByText('2/2 pass')).toBeVisible() // left: 2 pass of 2 - await expect(summary.getByText('1/2 pass')).toBeVisible() // right: 1 pass of 2 - await expect(summary.getByText('Regressions: 1')).toBeVisible() + // Concise header: "2/2 → 1/2 (1 regression)" + await expect(page.getByText('2/2').first()).toBeVisible() + await expect(page.getByText('1/2').first()).toBeVisible() + await expect(page.getByText('regression').first()).toBeVisible() // Negative: no fixes in this scenario - await expect(summary.getByText('Fixes')).not.toBeVisible() + await expect(page.getByText('fix').first()).not.toBeVisible() // Score: 0 fixes, 1 regression => score = -5 - await expect(summary.getByText('-5')).toBeVisible() + await expect(page.getByText('-5').first()).toBeVisible() // Per-test table: verify column content, not just presence const tableRows = page.locator('table tbody tr') @@ -193,14 +190,13 @@ test('compare view shows fix when right side improves', async ({ page }) => { await page.goto(`${UI_URL}/compare/run-broken/run-fixed`) - const summary = page.locator('.compare-summary') - await expect(summary).toBeVisible({ timeout: 10_000 }) - await expect(summary.getByText('Fixes: 1')).toBeVisible() - // Negative: no regressions in this scenario - await expect(summary.getByText('Regressions')).not.toBeVisible() + // Header should show fix info + await expect(page.getByText('fix').first()).toBeVisible({ timeout: 10_000 }) + // Negative: no regressions + await expect(page.getByText('regression')).not.toBeVisible() // Score: 1 fix * 3 = +3 - await expect(summary.getByText('+3')).toBeVisible() + await expect(page.getByText('+3').first()).toBeVisible() // Delta column should show "fixed" not "REGRESS" const thresholdRow = page.locator('table tbody tr').filter({ hasText: 'udp_threshold' }) diff --git a/ui/src/components/CompareView.tsx b/ui/src/components/CompareView.tsx index 2101c8c..dd8f75e 100644 --- a/ui/src/components/CompareView.tsx +++ b/ui/src/components/CompareView.tsx @@ -1,4 +1,5 @@ -import { useEffect, useState } from 'react' +import { useCallback, useEffect, useMemo, useState } from 'react' +import { useNavigate } from 'react-router-dom' import type { LabEvent, LabState } from '../devtools-types' import type { SimResults } from '../types' import { fetchRunJson, fetchState, fetchEvents, fetchLogs, fetchResults } from '../api' @@ -53,9 +54,23 @@ function refLabel(m: RunManifest | null, fallback: string): string { return fallback } +/** Extract the last path segment as a short display name. */ +function shortName(runPath: string): string { + const parts = runPath.split('/') + return parts[parts.length - 1] || runPath +} + +/** Check if this is a group compare (has tests) vs individual run compare. */ +function isGroupCompare(left: RunManifest | null, right: RunManifest | null): boolean { + const leftTests = left?.tests ?? [] + const rightTests = right?.tests ?? [] + return leftTests.length > 0 || rightTests.length > 0 +} + // ── Compare View (route: /compare/:left/:right) ── export default function CompareView({ leftRun, rightRun }: { leftRun: string; rightRun: string }) { + const navigate = useNavigate() const [leftManifest, setLeftManifest] = useState(null) const [rightManifest, setRightManifest] = useState(null) const [loading, setLoading] = useState(true) @@ -76,6 +91,7 @@ export default function CompareView({ leftRun, rightRun }: { leftRun: string; ri const leftLabel = refLabel(leftManifest, leftRun) const rightLabel = refLabel(rightManifest, rightRun) + const isGroup = isGroupCompare(leftManifest, rightManifest) // Compute diff from tests arrays const diff = leftManifest && rightManifest @@ -83,73 +99,156 @@ export default function CompareView({ leftRun, rightRun }: { leftRun: string; ri : { tests: [] as TestDelta[], fixes: 0, regressions: 0, score: 0 } const leftPass = leftManifest?.pass ?? (leftManifest?.tests ?? []).filter(t => t.status === 'pass').length - const leftFail = leftManifest?.fail ?? (leftManifest?.tests ?? []).filter(t => t.status === 'fail').length const leftTotal = leftManifest?.total ?? (leftManifest?.tests ?? []).length const rightPass = rightManifest?.pass ?? (rightManifest?.tests ?? []).filter(t => t.status === 'pass').length - const rightFail = rightManifest?.fail ?? (rightManifest?.tests ?? []).filter(t => t.status === 'fail').length const rightTotal = rightManifest?.total ?? (rightManifest?.tests ?? []).length + const leftOutcome = leftManifest?.test_outcome ?? leftManifest?.outcome ?? null + const rightOutcome = rightManifest?.test_outcome ?? rightManifest?.outcome ?? null + + const handleTestClick = (testName: string) => { + const leftPath = `${leftRun}/${testName}` + const rightPath = `${rightRun}/${testName}` + navigate(`/compare/${encodeURIComponent(leftPath)}/${encodeURIComponent(rightPath)}`) + } return (
-

Compare: {leftLabel} vs {rightLabel}

+ {/* Header: concise single-line for individual, summary for group */} + {!isGroup ? ( +

+ Compare: {shortName(leftRun)} — left: {statusIcon(leftOutcome)} {leftOutcome ?? 'unknown'} | right: {statusIcon(rightOutcome)} {rightOutcome ?? 'unknown'} +

+ ) : ( + <> +

+ Compare: {leftLabel} vs {rightLabel} — {leftPass}/{leftTotal} → {rightPass}/{rightTotal} + {diff.regressions > 0 && ({diff.regressions} regression{diff.regressions > 1 ? 's' : ''})} + {diff.fixes > 0 && ({diff.fixes} fix{diff.fixes > 1 ? 'es' : ''})} +

- {/* Summary bar */} -
-
- {leftLabel}: {leftPass}/{leftTotal} pass, {leftFail} fail -
-
- {rightLabel}: {rightPass}/{rightTotal} pass, {rightFail} fail -
- {diff.fixes > 0 &&
Fixes: {diff.fixes}
} - {diff.regressions > 0 &&
Regressions: {diff.regressions}
} -
- Score: = 0 ? 'var(--green)' : 'var(--red)', fontWeight: 'bold' }}> - {diff.score >= 0 ? '+' : ''}{diff.score} - -
-
+ {/* Summary bar */} +
+
+ Score: = 0 ? 'var(--green)' : 'var(--red)', fontWeight: 'bold' }}> + {diff.score >= 0 ? '+' : ''}{diff.score} + +
+
- {/* Per-test table */} - {diff.tests.length > 0 && ( -
- - - - - - - - - - - {diff.tests.map(({ name, left, right, delta }) => { - let color = '' - if (delta === 'fixed') color = 'var(--green)' - else if (delta === 'REGRESS') color = 'var(--red)' - - return ( - - - - - + {/* Per-test table */} + {diff.tests.length > 0 && ( +
+
Test{leftLabel}{rightLabel}Delta
{name}{statusBadge(left)}{statusBadge(right)}{delta}
+ + + + + + - ) - })} - -
Test{leftLabel}{rightLabel}Delta
-
+ + + {diff.tests.map(({ name, left, right, delta }) => { + let color = '' + if (delta === 'fixed') color = 'var(--green)' + else if (delta === 'REGRESS') color = 'var(--red)' + + return ( + + + handleTestClick(name)} + title={`Compare ${name} side-by-side`} + > + {name} + + + {statusBadge(left)} + {statusBadge(right)} + {delta} + + ) + })} + + +
+ )} + )} - {/* Phase 4c: Split-screen co-navigation */} -

Side-by-side view

+ {/* Split-screen co-navigation */}
) } -// ── Phase 4c: Split-screen co-navigation ── +// ── Shared controls state ── + +interface SharedControls { + logFilter: string + logLevels: Set + metricsFilter: string +} + +const ALL_LEVELS = ['ERROR', 'WARN', 'INFO', 'DEBUG', 'TRACE'] as const + +function SharedControlsBar({ controls, onChange, activeTab }: { + controls: SharedControls + onChange: (updates: Partial) => void + activeTab: RunTab +}) { + const toggleLevel = useCallback((level: string) => { + const next = new Set(controls.logLevels) + if (next.has(level)) next.delete(level) + else next.add(level) + onChange({ logLevels: next }) + }, [controls.logLevels, onChange]) + + if (activeTab === 'logs') { + return ( +
+ Shared: + {ALL_LEVELS.map((level) => ( + toggleLevel(level)} + style={{ cursor: 'pointer' }} + > + {level} + + ))} + onChange({ logFilter: e.target.value })} + style={{ marginLeft: 'auto', minWidth: 180 }} + /> +
+ ) + } + + if (activeTab === 'metrics') { + return ( +
+ Shared: + onChange({ metricsFilter: e.target.value })} + style={{ minWidth: 180 }} + /> +
+ ) + } + + return null +} + +// ── Split-screen co-navigation ── function SplitRunView({ left, right, sharedTab, onTabChange }: { left: string @@ -157,32 +256,46 @@ function SplitRunView({ left, right, sharedTab, onTabChange }: { sharedTab: RunTab onTabChange: (tab: RunTab) => void }) { + const [sharedControls, setSharedControls] = useState({ + logFilter: '', + logLevels: new Set(ALL_LEVELS), + metricsFilter: '', + }) + + const handleControlsChange = useCallback((updates: Partial) => { + setSharedControls(prev => ({ ...prev, ...updates })) + }, []) + return ( -
-
-
- {left} +
+ +
+
+
+ {left} +
+
+ +
-
- -
-
-
-
- {right} -
-
- +
+
+ {right} +
+
+ +
) } -function SplitRunPanel({ runName, activeTab, onTabChange }: { +function SplitRunPanel({ runName, activeTab, onTabChange, sharedControls }: { runName: string activeTab: RunTab onTabChange: (tab: RunTab) => void + sharedControls: SharedControls }) { const [state, setState] = useState(null) const [events, setEvents] = useState([]) @@ -208,6 +321,12 @@ function SplitRunPanel({ runName, activeTab, onTabChange }: { const run: RunInfo = { name: runName, label: null, status: null, group: null } + const externalControls = useMemo(() => ({ + logFilter: sharedControls.logFilter, + logLevels: sharedControls.logLevels, + metricsFilter: sharedControls.metricsFilter, + }), [sharedControls.logFilter, sharedControls.logLevels, sharedControls.metricsFilter]) + return ( ) } @@ -232,3 +352,11 @@ function statusBadge(status?: string) { } return {status.toUpperCase()} } + +function statusIcon(outcome?: string | null): string { + if (!outcome) return '?' + const lower = outcome.toLowerCase() + if (lower === 'pass' || lower === 'success') return '\u2705' + if (lower === 'fail' || lower === 'failure') return '\u274C' + return '\u2753' +} diff --git a/ui/src/components/LogsTab.tsx b/ui/src/components/LogsTab.tsx index 4916025..4085b8b 100644 --- a/ui/src/components/LogsTab.tsx +++ b/ui/src/components/LogsTab.tsx @@ -37,6 +37,10 @@ interface Props { base: string logs: SimLogEntry[] jumpTarget?: { node: string; path: string; timeLabel: string; nonce: number } | null + /** When provided, use this filter instead of internal search state. */ + sharedFilter?: string + /** When provided, use these levels instead of internal level state. */ + sharedLevels?: Set } function valueString(v: unknown): string { @@ -208,7 +212,7 @@ function formatBytes(bytes: number): string { return `${(bytes / (1024 * 1024 * 1024)).toFixed(2)} GiB` } -export default function LogsTab({ base, logs, jumpTarget }: Props) { +export default function LogsTab({ base, logs, jumpTarget, sharedFilter, sharedLevels }: Props) { const [active, setActive] = useState(null) const [text, setText] = useState('') const [loaded, setLoaded] = useState(false) @@ -220,11 +224,16 @@ export default function LogsTab({ base, logs, jumpTarget }: Props) { const [jumpHandledNonce, setJumpHandledNonce] = useState(null) const jumpingRef = useRef(false) - // Level filter (for tracing logs) - const [enabledLevels, setEnabledLevels] = useState>(new Set(ALL_LEVELS)) + // Level filter (for tracing logs) — use shared if provided + const hasSharedLevels = sharedLevels != null + const [localEnabledLevels, setLocalEnabledLevels] = useState>(new Set(ALL_LEVELS)) + const enabledLevels = hasSharedLevels ? sharedLevels : localEnabledLevels + + // Search — use shared filter if provided + const hasSharedFilter = sharedFilter != null + const [localSearchQuery, setLocalSearchQuery] = useState('') + const searchQuery = hasSharedFilter ? sharedFilter : localSearchQuery - // Search - const [searchQuery, setSearchQuery] = useState('') const [searchMatches, setSearchMatches] = useState([]) const [searchIdx, setSearchIdx] = useState(0) const contentRef = useRef(null) @@ -233,10 +242,16 @@ export default function LogsTab({ base, logs, jumpTarget }: Props) { const [timeMode, setTimeMode] = useState('absolute') const [qlogNameFilter, setQlogNameFilter] = useState('all') + // Sidebar collapse + const [sidebarCollapsed, setSidebarCollapsed] = useState(false) + const isStructured = active != null && STRUCTURED_KINDS.has(active.kind) const isTracingLog = active?.kind === 'tracing_jsonl' const isQlog = active?.kind === 'qlog' + // Hide internal controls when shared controls are provided + const hideInternalControls = hasSharedLevels || hasSharedFilter + // Auto-select first log useEffect(() => { setActive((prev) => { @@ -257,11 +272,11 @@ export default function LogsTab({ base, logs, jumpTarget }: Props) { setJumpLine(null) } jumpingRef.current = false - setSearchQuery('') + if (!hasSharedFilter) setLocalSearchQuery('') setSearchMatches([]) setSearchIdx(0) setQlogNameFilter('all') - }, [active, base]) + }, [active, base, hasSharedFilter]) // Handle jump target from timeline useEffect(() => { @@ -345,23 +360,39 @@ export default function LogsTab({ base, logs, jumpTarget }: Props) { }, [parsed]) const filteredLines = useMemo(() => { - if (!isTracingLog) return parsed.map((line, i) => ({ line, origIdx: i })) - return parsed - .map((line, i) => ({ line, origIdx: i })) - .filter(({ line }) => { + let lines = parsed.map((line, i) => ({ line, origIdx: i })) + + // Level filtering for tracing logs + if (isTracingLog) { + lines = lines.filter(({ line }) => { if (line.type === 'tracing') return enabledLevels.has(line.level) return true }) - }, [parsed, enabledLevels, isTracingLog]) + } + + // Text search filtering (from shared or local) + if (searchQuery) { + const q = searchQuery.toLowerCase() + lines = lines.filter(({ line }) => { + const lineText = line.type === 'tracing' + ? `${line.ts} ${line.level} ${line.spans} ${line.target} ${line.msg} ${line.fields}` + : line.type === 'event' ? `${line.kind} ${line.raw}` + : line.raw + return lineText.toLowerCase().includes(q) + }) + } - // Search matches + return lines + }, [parsed, enabledLevels, isTracingLog, searchQuery]) + + // Search matches (for local search navigation only) useEffect(() => { - if (!searchQuery) { + if (!localSearchQuery || hasSharedFilter) { setSearchMatches([]) setSearchIdx(0) return } - const q = searchQuery.toLowerCase() + const q = localSearchQuery.toLowerCase() const matches: number[] = [] filteredLines.forEach(({ line }, i) => { const text = line.type === 'tracing' @@ -372,7 +403,7 @@ export default function LogsTab({ base, logs, jumpTarget }: Props) { }) setSearchMatches(matches) setSearchIdx(0) - }, [searchQuery, filteredLines]) + }, [localSearchQuery, filteredLines, hasSharedFilter]) // Jump needle resolution useEffect(() => { @@ -435,7 +466,7 @@ export default function LogsTab({ base, logs, jumpTarget }: Props) { }, [searchIdx, searchMatches]) const toggleLevel = (level: string) => { - setEnabledLevels((prev) => { + setLocalEnabledLevels((prev) => { const next = new Set(prev) if (next.has(level)) next.delete(level) else next.add(level) @@ -485,24 +516,45 @@ export default function LogsTab({ base, logs, jumpTarget }: Props) { return (
-
- {byNode.map(([node, files]) => ( -
-
{node}
- {files.map((f) => ( -
setActive(f)} - title={f.path} - > - {f.path.split('/').pop()?.replace(/^device\.[^.]+\./, '')} - [{f.kind}] -
- ))} -
- ))} -
+ {/* Sidebar toggle button */} + + + {!sidebarCollapsed && ( +
+ {byNode.map(([node, files]) => ( +
+
{node}
+ {files.map((f) => ( +
setActive(f)} + title={f.path} + > + {f.path.split('/').pop()?.replace(/^device\.[^.]+\./, '')} + [{f.kind}] +
+ ))} +
+ ))} +
+ )}
{error &&
{error}
} @@ -537,8 +589,8 @@ export default function LogsTab({ base, logs, jumpTarget }: Props) { )}
- {/* Tracing log toolbar */} - {isTracingLog && loaded && ( + {/* Tracing log toolbar — hidden when shared controls are active */} + {isTracingLog && loaded && !hideInternalControls && (
)} + {/* Tracing log toolbar (display-only controls) when shared controls are active */} + {isTracingLog && loaded && hideInternalControls && ( +
+ + + +
+ )} + {/* Qlog filter toolbar */} {isQlog && loaded && renderMode === 'rendered' && (
diff --git a/ui/src/components/MetricsTab.tsx b/ui/src/components/MetricsTab.tsx index 56dbd93..dc243c8 100644 --- a/ui/src/components/MetricsTab.tsx +++ b/ui/src/components/MetricsTab.tsx @@ -1,4 +1,4 @@ -import { useEffect, useState } from 'react' +import { Fragment, useEffect, useMemo, useState } from 'react' import { runFilesBase } from '../api' import type { LogEntry } from '../api' @@ -26,8 +26,18 @@ function Sparkline({ values }: { values: number[] }) { ) } -export default function MetricsTab({ run, logs }: { run: string; logs: LogEntry[] }) { +interface MetricsTabProps { + run: string + logs: LogEntry[] + /** When provided, use this filter instead of internal state. */ + sharedFilter?: string +} + +export default function MetricsTab({ run, logs, sharedFilter }: MetricsTabProps) { const [series, setSeries] = useState([]) + const hasSharedFilter = sharedFilter != null + const [localFilter, setLocalFilter] = useState('') + const filterValue = hasSharedFilter ? sharedFilter : localFilter useEffect(() => { const metricsLogs = logs.filter(l => l.kind === 'metrics') @@ -63,32 +73,105 @@ export default function MetricsTab({ run, logs }: { run: string; logs: LogEntry[ return () => { dead = true } }, [run, logs]) + // Derive unique devices and metric keys, then pivot + const devices = useMemo(() => { + const set = new Set() + for (const s of series) set.add(s.device) + return Array.from(set).sort() + }, [series]) + + const metricKeys = useMemo(() => { + const set = new Set() + for (const s of series) set.add(s.key) + return Array.from(set).sort() + }, [series]) + + // Build lookup: key -> device -> series + const lookup = useMemo(() => { + const map = new Map>() + for (const s of series) { + let byDevice = map.get(s.key) + if (!byDevice) { byDevice = new Map(); map.set(s.key, byDevice) } + byDevice.set(s.device, s) + } + return map + }, [series]) + + // Filter metric keys + const filteredKeys = useMemo(() => { + if (!filterValue) return metricKeys + const q = filterValue.toLowerCase() + return metricKeys.filter(k => k.toLowerCase().includes(q)) + }, [metricKeys, filterValue]) + if (series.length === 0) { return
No metrics recorded for this run.
} return ( -
- - - - - - - - - - - {series.map((s) => ( - - - - - +
+ {/* Filter input -- hidden when shared filter is provided */} + {!hasSharedFilter && ( +
+ setLocalFilter(e.target.value)} + style={{ width: '100%', maxWidth: 400 }} + /> +
+ )} + +
+
KeyDeviceLast ValueTrend
{s.key}{s.device}{s.values[s.values.length - 1]?.v.toFixed(2)} v.v)} />
+ + + + {devices.map(d => ( + + ))} + + + + {devices.map(d => ( + + + + + ))} - ))} - -
Metric{d}
valuetrend
+ + + {filteredKeys.map((key) => { + const byDevice = lookup.get(key) + return ( + + {key} + {devices.map(device => { + const s = byDevice?.get(device) + if (!s) { + return ( + + — + + + ) + } + const lastVal = s.values[s.values.length - 1]?.v + return ( + + {lastVal != null ? lastVal.toFixed(2) : '\u2014'} + v.v)} /> + + ) + })} + + ) + })} + + +
) } diff --git a/ui/src/components/RunView.tsx b/ui/src/components/RunView.tsx index 8aeeddd..2559498 100644 --- a/ui/src/components/RunView.tsx +++ b/ui/src/components/RunView.tsx @@ -12,6 +12,13 @@ import MetricsTab from './MetricsTab' export type RunTab = 'topology' | 'logs' | 'timeline' | 'perf' | 'metrics' +/** External controls passed from CompareView for shared filter state. */ +export interface ExternalControls { + logFilter?: string + logLevels?: Set + metricsFilter?: string +} + interface RunViewProps { run: RunInfo state: LabState | null @@ -20,9 +27,10 @@ interface RunViewProps { results: SimResults | null activeTab: RunTab onTabChange: (tab: RunTab) => void + externalControls?: ExternalControls } -export default function RunView({ run, state, events, logs, results, activeTab, onTabChange }: RunViewProps) { +export default function RunView({ run, state, events, logs, results, activeTab, onTabChange, externalControls }: RunViewProps) { const [selectedNode, setSelectedNode] = useState(null) const [selectedKind, setSelectedKind] = useState<'router' | 'device' | 'ix'>('router') const [logJump, setLogJump] = useState<{ node: string; path: string; timeLabel: string; nonce: number } | null>(null) @@ -94,7 +102,13 @@ export default function RunView({ run, state, events, logs, results, activeTab, )} {tab === 'logs' && ( - + )} {tab === 'timeline' && ( @@ -104,7 +118,11 @@ export default function RunView({ run, state, events, logs, results, activeTab, {tab === 'perf' && } {tab === 'metrics' && ( - + )}
diff --git a/ui/src/index.css b/ui/src/index.css index a43ab0f..a42c62d 100644 --- a/ui/src/index.css +++ b/ui/src/index.css @@ -277,6 +277,7 @@ tbody td { display: flex; flex: 1; overflow: hidden; + position: relative; } .logs-sidebar { width: 220px; From af8a08d4a6258d9b91c6ec68d098ff20e3184afe Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 13:52:36 +0100 Subject: [PATCH 29/38] chore: update CI template to use patchbay upload, fix binstall metadata - CI template uses cargo binstall + patchbay test --persist + patchbay upload - Fix binstall bin-dir to match release asset naming ({bin}-{target}) - Fix release workflow to build patchbay-cli (was patchbay-runner) - Add patchbay-serve build to release workflow - Update docs/guide/testing.md with simplified upload example Co-Authored-By: Claude Opus 4.6 (1M context) --- .github/workflows/release-rolling.yml | 7 +- docs/guide/testing.md | 101 ++++++------------- patchbay-cli/Cargo.toml | 2 +- patchbay-server/Cargo.toml | 2 +- patchbay-server/github-workflow-template.yml | 80 ++++++--------- 5 files changed, 70 insertions(+), 122 deletions(-) diff --git a/.github/workflows/release-rolling.yml b/.github/workflows/release-rolling.yml index 5c8b498..3b99a32 100644 --- a/.github/workflows/release-rolling.yml +++ b/.github/workflows/release-rolling.yml @@ -27,18 +27,23 @@ jobs: run: sudo apt-get update && sudo apt-get install -y musl-tools - name: Build patchbay (linux musl) - run: cargo build -p patchbay-runner --bin patchbay --release --target x86_64-unknown-linux-musl + run: cargo build -p patchbay-cli --release --target x86_64-unknown-linux-musl - name: Build patchbay-vm (linux musl) run: cargo build -p patchbay-vm --release --target x86_64-unknown-linux-musl + - name: Build patchbay-serve (linux musl) + run: cargo build -p patchbay-server --release --target x86_64-unknown-linux-musl + - name: Package linux artifacts run: | mkdir -p dist cp target/x86_64-unknown-linux-musl/release/patchbay dist/patchbay-x86_64-unknown-linux-musl cp target/x86_64-unknown-linux-musl/release/patchbay-vm dist/patchbay-vm-x86_64-unknown-linux-musl + cp target/x86_64-unknown-linux-musl/release/patchbay-serve dist/patchbay-serve-x86_64-unknown-linux-musl tar -C dist -czf dist/patchbay-x86_64-unknown-linux-musl.tar.gz patchbay-x86_64-unknown-linux-musl tar -C dist -czf dist/patchbay-vm-x86_64-unknown-linux-musl.tar.gz patchbay-vm-x86_64-unknown-linux-musl + tar -C dist -czf dist/patchbay-serve-x86_64-unknown-linux-musl.tar.gz patchbay-serve-x86_64-unknown-linux-musl - uses: actions/upload-artifact@v4 with: diff --git a/docs/guide/testing.md b/docs/guide/testing.md index 8204b59..ffce27f 100644 --- a/docs/guide/testing.md +++ b/docs/guide/testing.md @@ -112,7 +112,8 @@ On Linux, tests run natively. Install patchbay's CLI if you want the `serve` command for viewing results: ```bash -cargo install --git https://github.com/n0-computer/patchbay patchbay-runner +cargo binstall patchbay-cli --no-confirm \ + || cargo install patchbay-cli --git https://github.com/n0-computer/patchbay ``` Then run your tests and serve the output: @@ -242,89 +243,47 @@ posted as a PR comment. Set two repository secrets: `PATCHBAY_URL` (e.g. `https://patchbay.example.com`) and `PATCHBAY_API_KEY`. -Add this to your workflow **after** the test step: +Install the patchbay CLI in your workflow, then add these steps **after** +the test step: ```yaml - - name: Push patchbay results + # Install patchbay CLI (binstall for speed, cargo install as fallback) + - name: Install patchbay CLI + run: | + cargo binstall patchbay-cli --no-confirm 2>/dev/null \ + || cargo install patchbay-cli --git https://github.com/n0-computer/patchbay + + # Run tests with patchbay (--persist keeps the run directory) + - name: Run tests + id: tests + run: patchbay test --persist -p my-crate --test my-test + + # Upload results to patchbay-serve + - name: Upload results if: always() env: PATCHBAY_URL: ${{ secrets.PATCHBAY_URL }} PATCHBAY_API_KEY: ${{ secrets.PATCHBAY_API_KEY }} run: | set -euo pipefail - PROJECT="${{ github.event.repository.name }}" - TESTDIR="$(cargo metadata --format-version=1 --no-deps | jq -r .target_directory)/testdir-current" - - if [ ! -d "$TESTDIR" ]; then - echo "No testdir output found, skipping push" + RUN_DIR=$(ls -dt .patchbay/work/run-* 2>/dev/null | head -1) + if [ -z "$RUN_DIR" ]; then + echo "No run directory found, skipping upload" exit 0 fi + patchbay upload "$RUN_DIR" \ + --project "$PROJECT" \ + --url "$PATCHBAY_URL" \ + --api-key "$PATCHBAY_API_KEY" +``` - # Create run.json manifest - cat > "$TESTDIR/run.json" <> "$GITHUB_ENV" - echo "Results uploaded: $VIEW_URL" - - - name: Comment on PR - if: always() && github.event.pull_request && env.PATCHBAY_VIEW_URL - uses: actions/github-script@v7 - with: - script: | - const marker = ''; - const body = `${marker}\n**patchbay results:** ${process.env.PATCHBAY_VIEW_URL}`; - const { data: comments } = await github.rest.issues.listComments({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: context.issue.number, - }); - const existing = comments.find(c => c.body.includes(marker)); - if (existing) { - await github.rest.issues.updateComment({ - owner: context.repo.owner, - repo: context.repo.repo, - comment_id: existing.id, - body, - }); - } else { - await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: context.issue.number, - body, - }); - } -``` +For a complete workflow template including the PR comment step, see +[`patchbay-server/github-workflow-template.yml`](https://github.com/n0-computer/patchbay/blob/main/patchbay-server/github-workflow-template.yml). The PR comment is auto-updated on each push, so you always see the latest run. diff --git a/patchbay-cli/Cargo.toml b/patchbay-cli/Cargo.toml index 2d74e76..5a435cc 100644 --- a/patchbay-cli/Cargo.toml +++ b/patchbay-cli/Cargo.toml @@ -45,5 +45,5 @@ vm = ["dep:patchbay-vm"] [package.metadata.binstall] pkg-url = "{ repo }/releases/download/{ version }/patchbay-{ target }.tar.gz" -bin-dir = "{ bin }{ binary-ext }" +bin-dir = "{ bin }-{ target }{ binary-ext }" pkg-fmt = "tgz" diff --git a/patchbay-server/Cargo.toml b/patchbay-server/Cargo.toml index d60cc83..a7eacdd 100644 --- a/patchbay-server/Cargo.toml +++ b/patchbay-server/Cargo.toml @@ -35,5 +35,5 @@ rustls = "0.23" [package.metadata.binstall] pkg-url = "{ repo }/releases/download/{ version }/patchbay-serve-{ target }.tar.gz" -bin-dir = "{ bin }{ binary-ext }" +bin-dir = "{ bin }-{ target }{ binary-ext }" pkg-fmt = "tgz" diff --git a/patchbay-server/github-workflow-template.yml b/patchbay-server/github-workflow-template.yml index b612a78..b3c78d8 100644 --- a/patchbay-server/github-workflow-template.yml +++ b/patchbay-server/github-workflow-template.yml @@ -29,90 +29,74 @@ jobs: # ── Build tools — adjust to your project ── - uses: dtolnay/rust-toolchain@stable + # ── Install patchbay CLI ── + # Try cargo-binstall first (fast, downloads pre-built binary), + # fall back to cargo install (builds from source). + - name: Install patchbay CLI + run: | + cargo binstall patchbay-cli --no-confirm 2>/dev/null \ + || cargo install patchbay-cli --git https://github.com/n0-computer/patchbay + # ── Run tests — replace with your own command ── + # Use `patchbay test` which writes structured output to .patchbay/work/. + # --persist keeps the run directory after tests complete. - name: Run tests id: tests - run: cargo test --release -p my-crate --test patchbay -- --test-threads=1 + run: patchbay test --persist -p my-crate --test my-test env: RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG' }} - # ── Push results to patchbay-serve ── - - name: Push results + # ── Upload results to patchbay-serve ── + - name: Upload results if: always() env: PATCHBAY_URL: ${{ secrets.PATCHBAY_URL }} PATCHBAY_API_KEY: ${{ secrets.PATCHBAY_API_KEY }} - TEST_STATUS: ${{ steps.tests.outcome }} run: | set -euo pipefail PROJECT="${{ github.event.repository.name }}" - TESTDIR="$(cargo metadata --format-version=1 --no-deps | jq -r .target_directory)/testdir-current" - [ ! -d "$TESTDIR" ] && echo "No testdir output, skipping" && exit 0 - cat > "$TESTDIR/run.json" </dev/null | head -1) + if [ -z "$RUN_DIR" ]; then + echo "No run directory found, skipping upload" + exit 0 + fi - RESPONSE=$(tar -czf - -C "$TESTDIR" . | \ - curl -s -w "\n%{http_code}" -X POST \ - -H "Authorization: Bearer $PATCHBAY_API_KEY" \ - -H "Content-Type: application/gzip" \ - --data-binary @- "$PATCHBAY_URL/api/push/$PROJECT") - HTTP_CODE=$(echo "$RESPONSE" | tail -1) - BODY=$(echo "$RESPONSE" | head -n -1) - [ "$HTTP_CODE" != "200" ] && echo "Push failed ($HTTP_CODE): $BODY" && exit 1 - - GROUP=$(echo "$BODY" | jq -r .group) - echo "PATCHBAY_VIEW_URL=$PATCHBAY_URL/batch/$GROUP" >> "$GITHUB_ENV" - echo "PATCHBAY_TEST_STATUS=$TEST_STATUS" >> "$GITHUB_ENV" - echo "Results: $PATCHBAY_URL/batch/$GROUP" + patchbay upload "$RUN_DIR" \ + --project "$PROJECT" \ + --url "$PATCHBAY_URL" \ + --api-key "$PATCHBAY_API_KEY" # ── Post or update PR comment ── - name: Comment on PR - if: always() && env.PATCHBAY_VIEW_URL + if: always() && github.event.pull_request uses: actions/github-script@v7 with: script: | - let prNumber = context.issue?.number; - if (!prNumber) { - const { data: prs } = await github.rest.pulls.list({ - owner: context.repo.owner, repo: context.repo.repo, - head: `${context.repo.owner}:${{ github.ref_name }}`, - state: 'open', - }); - if (!prs.length) return; - prNumber = prs[0].number; - } - - const status = process.env.PATCHBAY_TEST_STATUS; + const marker = ''; + const status = '${{ steps.tests.outcome }}'; const icon = status === 'success' ? '✅' : '❌'; const sha = '${{ github.sha }}'; const shortSha = sha.slice(0, 7); const commitUrl = `${{ github.server_url }}/${{ github.repository }}/commit/${sha}`; const date = new Date().toISOString().replace('T', ' ').slice(0, 19) + ' UTC'; - const marker = ''; + const url = '${{ secrets.PATCHBAY_URL }}'; + const project = '${{ github.event.repository.name }}'; const body = [ marker, - `${icon} **patchbay:** ${status} | ${process.env.PATCHBAY_VIEW_URL}`, + `${icon} **patchbay:** ${status} | ${url}/runs?project=${project}`, `${date} · [\`${shortSha}\`](${commitUrl})`, ].join('\n'); const { data: comments } = await github.rest.issues.listComments({ - owner: context.repo.owner, repo: context.repo.repo, issue_number: prNumber, + owner: context.repo.owner, repo: context.repo.repo, + issue_number: context.issue.number, }); const existing = comments.find(c => c.body.includes(marker)); const params = { owner: context.repo.owner, repo: context.repo.repo }; if (existing) { await github.rest.issues.updateComment({ ...params, comment_id: existing.id, body }); } else { - await github.rest.issues.createComment({ ...params, issue_number: prNumber, body }); + await github.rest.issues.createComment({ ...params, issue_number: context.issue.number, body }); } From 4814be9713f2c40ea1511e944b4f0c3234b9a3a5 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 13:55:12 +0100 Subject: [PATCH 30/38] fix: clean compare header for individual runs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Individual lab runs don't have run.json so outcome showed as "unknown". Just show the test name — the side-by-side view below has all details. Co-Authored-By: Claude Opus 4.6 (1M context) --- ui/src/components/CompareView.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ui/src/components/CompareView.tsx b/ui/src/components/CompareView.tsx index dd8f75e..7609dd7 100644 --- a/ui/src/components/CompareView.tsx +++ b/ui/src/components/CompareView.tsx @@ -113,10 +113,10 @@ export default function CompareView({ leftRun, rightRun }: { leftRun: string; ri return (
- {/* Header: concise single-line for individual, summary for group */} + {/* Header: simple name for individual runs, summary for groups */} {!isGroup ? (

- Compare: {shortName(leftRun)} — left: {statusIcon(leftOutcome)} {leftOutcome ?? 'unknown'} | right: {statusIcon(rightOutcome)} {rightOutcome ?? 'unknown'} + Compare: {shortName(leftRun)}

) : ( <> From 8dbfb1061607a51b4c7cb25065c3ea77f00d0bbc Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 14:06:13 +0100 Subject: [PATCH 31/38] fix: patchbay upload prints direct run URL, CI template uses it Upload now prints the full view URL (e.g. https://pb.example.com/run/name) to stdout. The CI template captures it via GITHUB_OUTPUT and links directly to the run in the PR comment. Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/src/upload.rs | 4 +++- patchbay-server/github-workflow-template.yml | 11 ++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/patchbay-cli/src/upload.rs b/patchbay-cli/src/upload.rs index 068915d..a2ed7a5 100644 --- a/patchbay-cli/src/upload.rs +++ b/patchbay-cli/src/upload.rs @@ -74,8 +74,10 @@ pub fn upload(dir: &Path, project: &str, url: &str, api_key: &str) -> Result<()> } let result: serde_json::Value = resp.json().context("parse response")?; + let base = url.trim_end_matches('/'); if let Some(run) = result.get("run").and_then(serde_json::Value::as_str) { - println!("uploaded: {run}"); + let view_url = format!("{base}/run/{run}"); + println!("{view_url}"); } Ok(()) } diff --git a/patchbay-server/github-workflow-template.yml b/patchbay-server/github-workflow-template.yml index b3c78d8..f69c917 100644 --- a/patchbay-server/github-workflow-template.yml +++ b/patchbay-server/github-workflow-template.yml @@ -48,6 +48,7 @@ jobs: # ── Upload results to patchbay-serve ── - name: Upload results + id: upload if: always() env: PATCHBAY_URL: ${{ secrets.PATCHBAY_URL }} @@ -63,10 +64,11 @@ jobs: exit 0 fi - patchbay upload "$RUN_DIR" \ + VIEW_URL=$(patchbay upload "$RUN_DIR" \ --project "$PROJECT" \ --url "$PATCHBAY_URL" \ - --api-key "$PATCHBAY_API_KEY" + --api-key "$PATCHBAY_API_KEY") + echo "view_url=$VIEW_URL" >> "$GITHUB_OUTPUT" # ── Post or update PR comment ── - name: Comment on PR @@ -81,11 +83,10 @@ jobs: const shortSha = sha.slice(0, 7); const commitUrl = `${{ github.server_url }}/${{ github.repository }}/commit/${sha}`; const date = new Date().toISOString().replace('T', ' ').slice(0, 19) + ' UTC'; - const url = '${{ secrets.PATCHBAY_URL }}'; - const project = '${{ github.event.repository.name }}'; + const viewUrl = '${{ steps.upload.outputs.view_url }}'; const body = [ marker, - `${icon} **patchbay:** ${status} | ${url}/runs?project=${project}`, + `${icon} **patchbay:** ${status} | [view results](${viewUrl})`, `${date} · [\`${shortSha}\`](${commitUrl})`, ].join('\n'); From 312a2d53714f7fb012ffeb88a14e3dd633ef4f16 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 14:08:08 +0100 Subject: [PATCH 32/38] fix: binstall uses rolling tag, install via --git-url The pkg-url used { version } which resolves to 0.1.0, but releases use a force-moved rolling tag. Hardcode rolling in the download path. CI template installs cargo-binstall first, then uses --git-url since the crate is not published on crates.io. Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/Cargo.toml | 2 +- patchbay-server/Cargo.toml | 2 +- patchbay-server/github-workflow-template.yml | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/patchbay-cli/Cargo.toml b/patchbay-cli/Cargo.toml index 5a435cc..10bb1c4 100644 --- a/patchbay-cli/Cargo.toml +++ b/patchbay-cli/Cargo.toml @@ -44,6 +44,6 @@ upload = ["dep:reqwest"] vm = ["dep:patchbay-vm"] [package.metadata.binstall] -pkg-url = "{ repo }/releases/download/{ version }/patchbay-{ target }.tar.gz" +pkg-url = "{ repo }/releases/download/rolling/patchbay-{ target }.tar.gz" bin-dir = "{ bin }-{ target }{ binary-ext }" pkg-fmt = "tgz" diff --git a/patchbay-server/Cargo.toml b/patchbay-server/Cargo.toml index a7eacdd..e09d6fb 100644 --- a/patchbay-server/Cargo.toml +++ b/patchbay-server/Cargo.toml @@ -34,6 +34,6 @@ axum-server = "0.7" rustls = "0.23" [package.metadata.binstall] -pkg-url = "{ repo }/releases/download/{ version }/patchbay-serve-{ target }.tar.gz" +pkg-url = "{ repo }/releases/download/rolling/patchbay-serve-{ target }.tar.gz" bin-dir = "{ bin }-{ target }{ binary-ext }" pkg-fmt = "tgz" diff --git a/patchbay-server/github-workflow-template.yml b/patchbay-server/github-workflow-template.yml index f69c917..c4a0aeb 100644 --- a/patchbay-server/github-workflow-template.yml +++ b/patchbay-server/github-workflow-template.yml @@ -30,11 +30,11 @@ jobs: - uses: dtolnay/rust-toolchain@stable # ── Install patchbay CLI ── - # Try cargo-binstall first (fast, downloads pre-built binary), - # fall back to cargo install (builds from source). + # Install pre-built binary via binstall (fast), or build from source. - name: Install patchbay CLI run: | - cargo binstall patchbay-cli --no-confirm 2>/dev/null \ + curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash + cargo binstall patchbay-cli --git-url https://github.com/n0-computer/patchbay --no-confirm \ || cargo install patchbay-cli --git https://github.com/n0-computer/patchbay # ── Run tests — replace with your own command ── From cfcb1ae9145f0d188cb5f30ff7a13358b670e7a4 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 14:26:02 +0100 Subject: [PATCH 33/38] =?UTF-8?q?fix:=20review=20fixes=20=E2=80=94=20dirty?= =?UTF-8?q?=20detection,=20dir=5Fsize=20panic,=20dead=20code=20cleanup?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - git_context() now checks both unstaged and staged changes for dirty flag - dir_size() handles file_type() errors gracefully instead of panicking - test.rs warns on run.json write failures instead of silently ignoring - Remove duplicate TestResult interface in api.ts - Remove unused statusIcon function in CompareView.tsx - Fix stale comment in InvRedirect Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/src/test.rs | 13 +++-- patchbay-server/src/lib.rs | 32 +++++++++++- patchbay-utils/src/manifest.rs | 81 +++++++++++++++++++++++++++++-- ui/src/api.ts | 6 ++- ui/src/components/CompareView.tsx | 40 +++++++-------- ui/src/main.tsx | 2 +- 6 files changed, 144 insertions(+), 30 deletions(-) diff --git a/patchbay-cli/src/test.rs b/patchbay-cli/src/test.rs index e99da9f..314cf54 100644 --- a/patchbay-cli/src/test.rs +++ b/patchbay-cli/src/test.rs @@ -279,10 +279,17 @@ pub fn run_native(args: TestArgs, verbose: bool, persist: bool) -> Result<()> { if let Some(target_dir) = cargo_target_dir() { let testdir = target_dir.join("testdir-current"); - std::fs::create_dir_all(&testdir).ok(); + if let Err(e) = std::fs::create_dir_all(&testdir) { + eprintln!("patchbay: warning: could not create testdir: {e}"); + } let run_json = testdir.join("run.json"); - if let Ok(json) = serde_json::to_string_pretty(&manifest) { - std::fs::write(&run_json, json).ok(); + match serde_json::to_string_pretty(&manifest) { + Ok(json) => { + if let Err(e) = std::fs::write(&run_json, json) { + eprintln!("patchbay: warning: could not write run.json: {e}"); + } + } + Err(e) => eprintln!("patchbay: warning: could not serialize run.json: {e}"), } } diff --git a/patchbay-server/src/lib.rs b/patchbay-server/src/lib.rs index 82dcf0c..e91efc0 100644 --- a/patchbay-server/src/lib.rs +++ b/patchbay-server/src/lib.rs @@ -246,6 +246,7 @@ fn build_router(state: AppState) -> Router { .route("/inv/{*rest}", get(index_html)) .route("/api/runs", get(get_runs)) .route("/api/runs/subscribe", get(runs_sse)) + .route("/api/runs/{run}/manifest", get(get_run_manifest)) .route("/api/runs/{run}/state", get(get_run_state)) .route("/api/runs/{run}/events", get(run_events_sse)) .route("/api/runs/{run}/events.json", get(run_events_json)) @@ -437,6 +438,31 @@ struct EventsQuery { after: Option, } +async fn get_run_manifest( + AxPath(run): AxPath, + State(state): State, +) -> impl IntoResponse { + let Some(run_dir) = safe_run_dir(&state.base, &run) else { + return ( + StatusCode::FORBIDDEN, + [("content-type", "application/json")], + r#"{"error":"forbidden"}"#.to_string(), + ); + }; + match read_run_json(&run_dir) { + Some(manifest) => ( + StatusCode::OK, + [("content-type", "application/json")], + serde_json::to_string(&manifest).unwrap_or_else(|_| "null".to_string()), + ), + None => ( + StatusCode::NOT_FOUND, + [("content-type", "application/json")], + r#"{"error":"run.json not found"}"#.to_string(), + ), + } +} + async fn get_run_state( AxPath(run): AxPath, State(state): State, @@ -901,7 +927,9 @@ const RUN_JSON: &str = "run.json"; fn read_run_json(dir: &Path) -> Option { let text = fs::read_to_string(dir.join(RUN_JSON)).ok()?; - serde_json::from_str(&text).ok() + let mut manifest: RunManifest = serde_json::from_str(&text).ok()?; + manifest.resolve_test_dirs(dir); + Some(manifest) } // ── Push endpoint ─────────────────────────────────────────────────── @@ -1027,7 +1055,7 @@ fn dir_size(path: &Path) -> u64 { let mut total = 0; if let Ok(entries) = fs::read_dir(path) { for entry in entries.flatten() { - let ft = entry.file_type().unwrap_or_else(|_| unreachable!()); + let Ok(ft) = entry.file_type() else { continue }; if ft.is_file() { total += entry.metadata().map(|m| m.len()).unwrap_or(0); } else if ft.is_dir() { diff --git a/patchbay-utils/src/manifest.rs b/patchbay-utils/src/manifest.rs index 74042e8..5cb5de8 100644 --- a/patchbay-utils/src/manifest.rs +++ b/patchbay-utils/src/manifest.rs @@ -78,6 +78,10 @@ pub struct TestResult { with = "option_duration_ms" )] pub duration: Option, + /// Relative directory path for this test's output (e.g. `"patchbay/holepunch_simple"`). + /// Populated by the server when the directory exists on disk. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub dir: Option, } /// Unified manifest written as `run.json` alongside every run. @@ -147,6 +151,70 @@ pub struct RunManifest { pub patchbay_version: Option, } +impl RunManifest { + /// Populate `dir` fields by scanning the run directory for subdirs that + /// contain `events.jsonl`, then matching them to test results by the bare + /// function name (last path segment of the dir, last token of the nextest name). + pub fn resolve_test_dirs(&mut self, run_dir: &std::path::Path) { + // Collect all dirs with events.jsonl, recursively (up to 2 levels). + let mut test_dirs: Vec = Vec::new(); + collect_event_dirs(run_dir, run_dir, 0, 2, &mut test_dirs); + + // Build a map: bare function name → relative dir path. + // e.g. "holepunch_simple" → "patchbay/holepunch_simple" + let dir_by_fn: std::collections::HashMap<&str, &str> = test_dirs + .iter() + .filter_map(|d| { + let fn_name = d.rsplit('/').next()?; + Some((fn_name, d.as_str())) + }) + .collect(); + + // Match each test result to a directory by bare function name. + // Nextest name: "iroh::patchbay holepunch_simple" → last token "holepunch_simple" + for test in &mut self.tests { + let fn_name = test + .name + .rsplit_once(' ') + .map(|(_, name)| name) + .unwrap_or(&test.name); + if let Some(&dir) = dir_by_fn.get(fn_name) { + test.dir = Some(dir.to_string()); + } + } + } +} + +/// Recursively collect relative paths to directories containing `events.jsonl`. +fn collect_event_dirs( + root: &std::path::Path, + dir: &std::path::Path, + depth: usize, + max_depth: usize, + out: &mut Vec, +) { + if depth > max_depth { + return; + } + let entries = match std::fs::read_dir(dir) { + Ok(e) => e, + Err(_) => return, + }; + for entry in entries.flatten() { + let path = entry.path(); + if !path.is_dir() { + continue; + } + if path.join("events.jsonl").exists() { + if let Ok(rel) = path.strip_prefix(root) { + out.push(rel.to_string_lossy().into_owned()); + } + } else { + collect_event_dirs(root, &path, depth + 1, max_depth, out); + } + } +} + // ── Git helpers ───────────────────────────────────────────────────── /// Snapshot of git repository state. @@ -173,11 +241,18 @@ pub fn git_context() -> GitContext { .and_then(|o| String::from_utf8(o.stdout).ok()) .map(|s| s.trim().to_string()) .filter(|s| s != "HEAD"); - let dirty = !Command::new("git") + // Check both unstaged and staged changes. + let unstaged = !Command::new("git") .args(["diff", "--quiet"]) .status() .map(|s| s.success()) .unwrap_or(true); + let staged = !Command::new("git") + .args(["diff", "--cached", "--quiet"]) + .status() + .map(|s| s.success()) + .unwrap_or(true); + let dirty = unstaged || staged; GitContext { commit, branch, @@ -244,7 +319,7 @@ pub fn parse_test_output(output: &str) -> Vec { }; let name = name.trim().to_string(); if seen.insert(name.clone()) { - results.push(TestResult { name, status, duration: None }); + results.push(TestResult { name, status, duration: None, dir: None }); } } continue; @@ -263,7 +338,7 @@ pub fn parse_test_output(output: &str) -> Vec { .trim() .to_string(); if !name.is_empty() && seen.insert(name.clone()) { - results.push(TestResult { name, status, duration }); + results.push(TestResult { name, status, duration, dir: None }); } } } diff --git a/ui/src/api.ts b/ui/src/api.ts index 1badc6c..6fab1e8 100644 --- a/ui/src/api.ts +++ b/ui/src/api.ts @@ -8,9 +8,10 @@ export interface TestResult { name: string status: string // "pass" | "fail" | "ignored" duration?: number | null + /** Relative directory path for this test's output, if it exists on disk. */ + dir?: string | null } -/** Manifest from run.json, included with pushed CI runs. */ export interface RunManifest { kind?: string | null // "test" | "sim" project?: string | null @@ -144,7 +145,8 @@ export function runFilesBase(run: string): string { /** Fetch run.json manifest for a given run. */ export async function fetchRunJson(run: string): Promise { try { - const res = await fetch(`${runFilesBase(run)}run.json`) + // Use the API endpoint which enriches the manifest (e.g. resolves test dirs). + const res = await fetch(`${API}/runs/${encodeURIComponent(run)}/manifest`) if (!res.ok) return null return (await res.json()) as RunManifest } catch { diff --git a/ui/src/components/CompareView.tsx b/ui/src/components/CompareView.tsx index 7609dd7..3f0ecfb 100644 --- a/ui/src/components/CompareView.tsx +++ b/ui/src/components/CompareView.tsx @@ -17,6 +17,8 @@ interface TestDelta { left?: string right?: string delta: 'fixed' | 'REGRESS' | 'new' | 'removed' | '' + /** Relative directory for this test's output, if it exists on disk. */ + dir?: string } function computeDiff(left: RunManifest, right: RunManifest) { @@ -24,6 +26,7 @@ function computeDiff(left: RunManifest, right: RunManifest) { const rightTests = right.tests ?? [] const leftMap = new Map(leftTests.map(t => [t.name, t.status])) const rightMap = new Map(rightTests.map(t => [t.name, t.status])) + const dirMap = new Map([...leftTests, ...rightTests].filter((t): t is typeof t & { dir: string } => !!t.dir).map(t => [t.name, t.dir])) const allNames = new Set([...leftMap.keys(), ...rightMap.keys()]) const tests: TestDelta[] = [] @@ -40,7 +43,7 @@ function computeDiff(left: RunManifest, right: RunManifest) { else if (!l && r) { delta = 'new' } else if (l && !r) { delta = 'removed' } - tests.push({ name, left: l, right: r, delta }) + tests.push({ name, left: l, right: r, delta, dir: dirMap.get(name) }) } const score = fixes * SCORE_FIX + regressions * SCORE_REGRESS @@ -105,9 +108,9 @@ export default function CompareView({ leftRun, rightRun }: { leftRun: string; ri const leftOutcome = leftManifest?.test_outcome ?? leftManifest?.outcome ?? null const rightOutcome = rightManifest?.test_outcome ?? rightManifest?.outcome ?? null - const handleTestClick = (testName: string) => { - const leftPath = `${leftRun}/${testName}` - const rightPath = `${rightRun}/${testName}` + const handleTestClick = (dir: string) => { + const leftPath = `${leftRun}/${dir}` + const rightPath = `${rightRun}/${dir}` navigate(`/compare/${encodeURIComponent(leftPath)}/${encodeURIComponent(rightPath)}`) } @@ -148,7 +151,7 @@ export default function CompareView({ leftRun, rightRun }: { leftRun: string; ri - {diff.tests.map(({ name, left, right, delta }) => { + {diff.tests.map(({ name, left, right, delta, dir }) => { let color = '' if (delta === 'fixed') color = 'var(--green)' else if (delta === 'REGRESS') color = 'var(--red)' @@ -156,13 +159,19 @@ export default function CompareView({ leftRun, rightRun }: { leftRun: string; ri return ( - handleTestClick(name)} - title={`Compare ${name} side-by-side`} - > - {name} - + {dir ? ( + handleTestClick(dir)} + title={`Compare ${name} side-by-side`} + > + {name} + + ) : ( + + {name} + + )} {statusBadge(left)} {statusBadge(right)} @@ -353,10 +362,3 @@ function statusBadge(status?: string) { return {status.toUpperCase()} } -function statusIcon(outcome?: string | null): string { - if (!outcome) return '?' - const lower = outcome.toLowerCase() - if (lower === 'pass' || lower === 'success') return '\u2705' - if (lower === 'fail' || lower === 'failure') return '\u274C' - return '\u2753' -} diff --git a/ui/src/main.tsx b/ui/src/main.tsx index 4d8238d..ee5ae32 100644 --- a/ui/src/main.tsx +++ b/ui/src/main.tsx @@ -24,7 +24,7 @@ ReactDOM.createRoot(document.getElementById('root')!).render( ) -/** Redirect legacy /inv/* paths to /batch/*. */ +/** Redirect legacy /inv/* paths to /group/*. */ function InvRedirect() { const rest = window.location.pathname.slice('/inv/'.length) return From cf9cb782aba0e30fd5709e4d027df0f07d08ae89 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 14:34:07 +0100 Subject: [PATCH 34/38] refactor: cargo fmt, remove /batch/ routes, harden tar, compare nav MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - cargo fmt across all changed Rust files - Remove /batch/ SPA and API routes, /group/ is now canonical - Rename BatchPage → GroupPage - Harden push tar extraction: manual entry iteration with path checks, reject absolute paths and .. components, disable permissions/xattrs - Individual run compare shows both sides + back link to group compare Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/src/compare.rs | 116 +++++++++++++++++----- patchbay-cli/src/main.rs | 104 ++++++++++++------- patchbay-cli/src/test.rs | 29 ++++-- patchbay-cli/src/upload.rs | 13 ++- patchbay-cli/tests/compare_integration.rs | 62 +++++++++--- patchbay-runner/src/sim/runner.rs | 2 +- patchbay-server/src/lib.rs | 67 ++++++++++--- patchbay-utils/src/manifest.rs | 14 ++- patchbay-vm/src/lib.rs | 112 +++++++++++++++++---- patchbay/src/lib.rs | 4 +- ui/e2e/push.spec.ts | 2 +- ui/src/{BatchPage.tsx => GroupPage.tsx} | 28 +++--- ui/src/components/CompareView.tsx | 25 ++++- ui/src/components/RunSelector.tsx | 2 +- ui/src/main.tsx | 7 +- 15 files changed, 440 insertions(+), 147 deletions(-) rename ui/src/{BatchPage.tsx => GroupPage.tsx} (82%) diff --git a/patchbay-cli/src/compare.rs b/patchbay-cli/src/compare.rs index fc2ac81..85cc859 100644 --- a/patchbay-cli/src/compare.rs +++ b/patchbay-cli/src/compare.rs @@ -1,10 +1,10 @@ //! Compare mode: run tests/sims in two git worktrees and diff results. +use anyhow::{bail, Context, Result}; +use patchbay_utils::manifest::{self, TestResult, TestStatus}; use std::path::{Path, PathBuf}; use std::process::Command; use std::time::Duration; -use anyhow::{bail, Context, Result}; -use patchbay_utils::manifest::{self, TestResult, TestStatus}; /// Set up a git worktree for the given ref. pub fn setup_worktree(git_ref: &str, base: &Path) -> Result { @@ -78,8 +78,13 @@ pub fn run_tests_in_dir( let v = verbose; let out_t = std::thread::spawn(move || { let mut buf = String::new(); - for line in std::io::BufReader::new(stdout_pipe).lines().map_while(Result::ok) { - if v { println!("{line}"); } + for line in std::io::BufReader::new(stdout_pipe) + .lines() + .map_while(Result::ok) + { + if v { + println!("{line}"); + } buf.push_str(&line); buf.push('\n'); } @@ -87,8 +92,13 @@ pub fn run_tests_in_dir( }); let err_t = std::thread::spawn(move || { let mut buf = String::new(); - for line in std::io::BufReader::new(stderr_pipe).lines().map_while(Result::ok) { - if verbose { eprintln!("{line}"); } + for line in std::io::BufReader::new(stderr_pipe) + .lines() + .map_while(Result::ok) + { + if verbose { + eprintln!("{line}"); + } buf.push_str(&line); buf.push('\n'); } @@ -117,8 +127,14 @@ pub fn persist_worktree_run( let dest = PathBuf::from(format!(".patchbay/work/run-{ts}")); std::fs::create_dir_all(&dest)?; - let pass = results.iter().filter(|r| r.status == TestStatus::Pass).count() as u32; - let fail = results.iter().filter(|r| r.status == TestStatus::Fail).count() as u32; + let pass = results + .iter() + .filter(|r| r.status == TestStatus::Pass) + .count() as u32; + let fail = results + .iter() + .filter(|r| r.status == TestStatus::Fail) + .count() as u32; let total = results.len() as u32; let outcome = if fail == 0 { "pass" } else { "fail" }; @@ -155,7 +171,11 @@ fn test_index(results: &[TestResult]) -> std::collections::HashMap<&str, &TestRe } fn merged_names(left: &[TestResult], right: &[TestResult]) -> Vec { - let mut names: Vec = left.iter().chain(right.iter()).map(|r| r.name.clone()).collect(); + let mut names: Vec = left + .iter() + .chain(right.iter()) + .map(|r| r.name.clone()) + .collect(); names.sort(); names.dedup(); names @@ -184,8 +204,14 @@ pub fn compare_results(left: &[TestResult], right: &[TestResult]) -> CompareResu let left_pass = left.iter().filter(|r| r.status == TestStatus::Pass).count(); let left_fail = left.iter().filter(|r| r.status == TestStatus::Fail).count(); - let right_pass = right.iter().filter(|r| r.status == TestStatus::Pass).count(); - let right_fail = right.iter().filter(|r| r.status == TestStatus::Fail).count(); + let right_pass = right + .iter() + .filter(|r| r.status == TestStatus::Pass) + .count(); + let right_fail = right + .iter() + .filter(|r| r.status == TestStatus::Fail) + .count(); let mut fixes = 0; let mut regressions = 0; @@ -207,15 +233,30 @@ pub fn compare_results(left: &[TestResult], right: &[TestResult]) -> CompareResu score += fixes as i32 * 3; score -= regressions as i32 * 5; if !left_time.is_zero() { - let pct = (right_time.as_secs_f64() - left_time.as_secs_f64()) / left_time.as_secs_f64() * 100.0; - if pct < -2.0 { score += 1; } - if pct > 5.0 { score -= 1; } + let pct = + (right_time.as_secs_f64() - left_time.as_secs_f64()) / left_time.as_secs_f64() * 100.0; + if pct < -2.0 { + score += 1; + } + if pct > 5.0 { + score -= 1; + } } CompareResult { - left: SideStats { pass: left_pass, fail: left_fail, total: left.len() }, - right: SideStats { pass: right_pass, fail: right_fail, total: right.len() }, - fixes, regressions, score, + left: SideStats { + pass: left_pass, + fail: left_fail, + total: left.len(), + }, + right: SideStats { + pass: right_pass, + fail: right_fail, + total: right.len(), + }, + fixes, + regressions, + score, } } @@ -228,11 +269,23 @@ fn status_str(s: TestStatus) -> &'static str { } /// Print a comparison summary table. -pub fn print_summary(left_ref: &str, right_ref: &str, left: &[TestResult], right: &[TestResult], result: &CompareResult) { +pub fn print_summary( + left_ref: &str, + right_ref: &str, + left: &[TestResult], + right: &[TestResult], + result: &CompareResult, +) { println!("\nCompare: {left_ref} \u{2194} {right_ref}\n"); - println!("Tests: {}/{} pass ({} fail) \u{2192} {}/{} pass ({} fail)", - result.left.pass, result.left.total, result.left.fail, - result.right.pass, result.right.total, result.right.fail); + println!( + "Tests: {}/{} pass ({} fail) \u{2192} {}/{} pass ({} fail)", + result.left.pass, + result.left.total, + result.left.fail, + result.right.pass, + result.right.total, + result.right.fail + ); if result.fixes > 0 { println!("Fixes: {} (fail\u{2192}pass)", result.fixes); } @@ -244,7 +297,10 @@ pub fn print_summary(left_ref: &str, right_ref: &str, left: &[TestResult], right let right_map = test_index(right); let all_names = merged_names(left, right); - println!("\n{:<50} {:>8} {:>8} {:>10}", "Test", "Left", "Right", "Delta"); + println!( + "\n{:<50} {:>8} {:>8} {:>10}", + "Test", "Left", "Right", "Delta" + ); println!("{}", "-".repeat(80)); for name in &all_names { let name = name.as_str(); @@ -259,9 +315,19 @@ pub fn print_summary(left_ref: &str, right_ref: &str, left: &[TestResult], right (Some(_), None) => "removed", _ => "", }; - let display_name = if name.len() > 48 { &name[name.len()-48..] } else { name }; - println!("{:<50} {:>8} {:>8} {:>10}", display_name, ls_str, rs_str, delta); + let display_name = if name.len() > 48 { + &name[name.len() - 48..] + } else { + name + }; + println!( + "{:<50} {:>8} {:>8} {:>10}", + display_name, ls_str, rs_str, delta + ); } - println!("\nScore: {:+} ({} fixes, {} regressions)", result.score, result.fixes, result.regressions); + println!( + "\nScore: {:+} ({} fixes, {} regressions)", + result.score, result.fixes, result.regressions + ); } diff --git a/patchbay-cli/src/main.rs b/patchbay-cli/src/main.rs index 158012e..d4b82cf 100644 --- a/patchbay-cli/src/main.rs +++ b/patchbay-cli/src/main.rs @@ -436,45 +436,58 @@ async fn tokio_main() -> Result<()> { let cwd = std::env::current_dir().context("get cwd")?; let work_dir = cwd.join(".patchbay/work"); match command { - CompareCommand::Test { left_ref, right_ref, force_build, no_ref_build, args } => { + CompareCommand::Test { + left_ref, + right_ref, + force_build, + no_ref_build, + args, + } => { use patchbay_utils::manifest::{self as mf, RunKind}; let right_label = right_ref.as_deref().unwrap_or("worktree"); - println!("patchbay compare test: {} \u{2194} {}", left_ref, right_label); + println!( + "patchbay compare test: {} \u{2194} {}", + left_ref, right_label + ); // Helper: resolve results for a ref, using cache or building. - let resolve_ref_results = |git_ref: &str, label: &str| -> Result> { - let sha = mf::resolve_ref(git_ref) - .with_context(|| format!("could not resolve ref '{git_ref}'"))?; - - // Check cache (unless --force-build). - if !force_build { - if let Some((_dir, manifest)) = mf::find_run_for_commit(&work_dir, &sha, RunKind::Test) { - println!("Using cached run for {label} ({sha:.8})"); - return Ok(manifest.tests); + let resolve_ref_results = + |git_ref: &str, label: &str| -> Result> { + let sha = mf::resolve_ref(git_ref) + .with_context(|| format!("could not resolve ref '{git_ref}'"))?; + + // Check cache (unless --force-build). + if !force_build { + if let Some((_dir, manifest)) = + mf::find_run_for_commit(&work_dir, &sha, RunKind::Test) + { + println!("Using cached run for {label} ({sha:.8})"); + return Ok(manifest.tests); + } } - } - // No cache — fail if --no-ref-build. - if no_ref_build { - bail!( - "no cached run for {label} ({sha:.8}); \ + // No cache — fail if --no-ref-build. + if no_ref_build { + bail!( + "no cached run for {label} ({sha:.8}); \ run `patchbay test --persist` on that ref first, \ or remove --no-ref-build" - ); - } + ); + } - // Build in worktree. - println!("Running tests in {label} ..."); - let tree_dir = compare::setup_worktree(git_ref, &cwd)?; - let (results, _output) = compare::run_tests_in_dir(&tree_dir, &args, cli.verbose)?; + // Build in worktree. + println!("Running tests in {label} ..."); + let tree_dir = compare::setup_worktree(git_ref, &cwd)?; + let (results, _output) = + compare::run_tests_in_dir(&tree_dir, &args, cli.verbose)?; - // Persist the run so future compares can reuse it. - compare::persist_worktree_run(&tree_dir, &results, &sha)?; + // Persist the run so future compares can reuse it. + compare::persist_worktree_run(&tree_dir, &results, &sha)?; - compare::cleanup_worktree(&tree_dir)?; - Ok(results) - }; + compare::cleanup_worktree(&tree_dir)?; + Ok(results) + }; let left_results = resolve_ref_results(&left_ref, &left_ref)?; @@ -483,33 +496,54 @@ async fn tokio_main() -> Result<()> { } else { // Compare against current worktree: always run fresh. println!("Running tests in worktree ..."); - let (results, _output) = compare::run_tests_in_dir(&cwd, &args, cli.verbose)?; + let (results, _output) = + compare::run_tests_in_dir(&cwd, &args, cli.verbose)?; results }; // Compare let result = compare::compare_results(&left_results, &right_results); - compare::print_summary(&left_ref, right_label, &left_results, &right_results, &result); + compare::print_summary( + &left_ref, + right_label, + &left_results, + &right_results, + &result, + ); if result.regressions > 0 { bail!("{} regressions detected", result.regressions); } Ok(()) } - CompareCommand::Run { sims: _, left_ref: _, right_ref: _ } => { + CompareCommand::Run { + sims: _, + left_ref: _, + right_ref: _, + } => { // TODO: implement compare run (sim comparison) bail!("compare run is not yet implemented"); } } } - Command::Upload { dir, project, url, api_key } => { + Command::Upload { + dir, + project, + url, + api_key, + } => { if !dir.exists() { bail!("directory does not exist: {}", dir.display()); } #[cfg(feature = "upload")] - { upload::upload(&dir, &project, &url, &api_key) } + { + upload::upload(&dir, &project, &url, &api_key) + } #[cfg(not(feature = "upload"))] - { let _ = (&dir, &project, &url, &api_key); bail!("upload support not compiled in (enable the `upload` feature)") } + { + let _ = (&dir, &project, &url, &api_key); + bail!("upload support not compiled in (enable the `upload` feature)") + } } #[cfg(feature = "vm")] Command::Vm { command, backend } => dispatch_vm(command, backend).await, @@ -612,7 +646,9 @@ async fn dispatch_vm(command: VmCommand, backend: patchbay_vm::Backend) -> Resul no_fail_fast, extra_args: { let mut args = Vec::new(); - if let Some(f) = filter { args.push(f); } + if let Some(f) = filter { + args.push(f); + } args.extend(cargo_args); args }, diff --git a/patchbay-cli/src/test.rs b/patchbay-cli/src/test.rs index 314cf54..4128d82 100644 --- a/patchbay-cli/src/test.rs +++ b/patchbay-cli/src/test.rs @@ -222,8 +222,13 @@ pub fn run_native(args: TestArgs, verbose: bool, persist: bool) -> Result<()> { let v = verbose; let out_t = std::thread::spawn(move || { let mut buf = String::new(); - for line in std::io::BufReader::new(stdout_pipe).lines().map_while(Result::ok) { - if v { println!("{line}"); } + for line in std::io::BufReader::new(stdout_pipe) + .lines() + .map_while(Result::ok) + { + if v { + println!("{line}"); + } buf.push_str(&line); buf.push('\n'); } @@ -231,8 +236,13 @@ pub fn run_native(args: TestArgs, verbose: bool, persist: bool) -> Result<()> { }); let err_t = std::thread::spawn(move || { let mut buf = String::new(); - for line in std::io::BufReader::new(stderr_pipe).lines().map_while(Result::ok) { - if verbose { eprintln!("{line}"); } + for line in std::io::BufReader::new(stderr_pipe) + .lines() + .map_while(Result::ok) + { + if verbose { + eprintln!("{line}"); + } buf.push_str(&line); buf.push('\n'); } @@ -248,8 +258,14 @@ pub fn run_native(args: TestArgs, verbose: bool, persist: bool) -> Result<()> { let results = manifest::parse_test_output(&combined); // Write run.json into testdir-current/. - let pass = results.iter().filter(|r| r.status == TestStatus::Pass).count() as u32; - let fail = results.iter().filter(|r| r.status == TestStatus::Fail).count() as u32; + let pass = results + .iter() + .filter(|r| r.status == TestStatus::Pass) + .count() as u32; + let fail = results + .iter() + .filter(|r| r.status == TestStatus::Fail) + .count() as u32; let total = results.len() as u32; let git = manifest::git_context(); let runtime = (ended_at - started_at).to_std().ok(); @@ -328,7 +344,6 @@ fn persist_run() -> Result<()> { Ok(()) } - /// Run tests in a VM via patchbay-vm. #[cfg(feature = "vm")] pub fn run_vm(args: TestArgs, backend: patchbay_vm::Backend) -> anyhow::Result<()> { diff --git a/patchbay-cli/src/upload.rs b/patchbay-cli/src/upload.rs index a2ed7a5..c948e5f 100644 --- a/patchbay-cli/src/upload.rs +++ b/patchbay-cli/src/upload.rs @@ -1,18 +1,20 @@ //! Upload run/compare directories to a patchbay-server instance. -use std::path::Path; use anyhow::{bail, Context, Result}; -use patchbay_utils::manifest::{RunManifest, RunKind}; +use patchbay_utils::manifest::{RunKind, RunManifest}; +use std::path::Path; /// Build a RunManifest from CI environment variables. pub fn manifest_from_env(project: &str) -> RunManifest { RunManifest { kind: RunKind::Sim, // default; overridden if run.json already exists project: Some(project.to_string()), - branch: std::env::var("GITHUB_REF_NAME").ok() + branch: std::env::var("GITHUB_REF_NAME") + .ok() .or_else(|| std::env::var("GITHUB_HEAD_REF").ok()), commit: std::env::var("GITHUB_SHA").ok(), - pr: std::env::var("GITHUB_PR_NUMBER").ok() + pr: std::env::var("GITHUB_PR_NUMBER") + .ok() .and_then(|s| s.parse().ok()), pr_url: None, title: std::env::var("GITHUB_PR_TITLE").ok(), @@ -60,7 +62,8 @@ pub fn upload(dir: &Path, project: &str, url: &str, api_key: &str) -> Result<()> let push_url = format!("{}/api/push/{}", url.trim_end_matches('/'), project); let client = reqwest::blocking::Client::new(); - let resp = client.post(&push_url) + let resp = client + .post(&push_url) .header("Authorization", format!("Bearer {api_key}")) .header("Content-Type", "application/gzip") .body(body) diff --git a/patchbay-cli/tests/compare_integration.rs b/patchbay-cli/tests/compare_integration.rs index 9743042..4e3b009 100644 --- a/patchbay-cli/tests/compare_integration.rs +++ b/patchbay-cli/tests/compare_integration.rs @@ -51,7 +51,10 @@ fn compare_detects_regression() { // Commit 2: regressing (PACKET_COUNT = 2, below THRESHOLD = 3) let src = std::fs::read_to_string(dir.join("tests/counter.rs")).unwrap(); - let regressed = src.replace("const PACKET_COUNT: u32 = 5;", "const PACKET_COUNT: u32 = 2;"); + let regressed = src.replace( + "const PACKET_COUNT: u32 = 5;", + "const PACKET_COUNT: u32 = 2;", + ); std::fs::write(dir.join("tests/counter.rs"), regressed).unwrap(); git(dir, &["add", "."]); git(dir, &["commit", "-m", "regressing"]); @@ -89,14 +92,23 @@ fn compare_detects_regression() { .filter_map(|e| e.ok()) .filter(|e| e.file_name().to_string_lossy().starts_with("run-")) .collect(); - assert_eq!(run_dirs.len(), 2, "expected 2 run directories, found {}", run_dirs.len()); + assert_eq!( + run_dirs.len(), + 2, + "expected 2 run directories, found {}", + run_dirs.len() + ); // Parse run.json from each directory let mut manifests: Vec = run_dirs .iter() .map(|d| { let run_json = d.path().join("run.json"); - assert!(run_json.exists(), "run.json not found in {}", d.path().display()); + assert!( + run_json.exists(), + "run.json not found in {}", + d.path().display() + ); serde_json::from_str(&std::fs::read_to_string(&run_json).unwrap()).unwrap() }) .collect(); @@ -104,7 +116,10 @@ fn compare_detects_regression() { // Both should have kind: "test" for m in &manifests { assert_eq!(m["kind"], "test", "run.json should have kind 'test'"); - assert!(!m["dirty"].as_bool().unwrap_or(true), "run should not be dirty"); + assert!( + !m["dirty"].as_bool().unwrap_or(true), + "run should not be dirty" + ); assert!(m["commit"].is_string(), "run.json should have a commit SHA"); } @@ -131,17 +146,41 @@ fn compare_detects_regression() { let left_manifest = &manifests[0]; let right_manifest = &manifests[1]; - assert_eq!(left_manifest["commit"].as_str().unwrap(), v1_sha, "left run should match v1 SHA"); - assert_eq!(right_manifest["commit"].as_str().unwrap(), v2_sha, "right run should match v2 SHA"); + assert_eq!( + left_manifest["commit"].as_str().unwrap(), + v1_sha, + "left run should match v1 SHA" + ); + assert_eq!( + right_manifest["commit"].as_str().unwrap(), + v2_sha, + "right run should match v2 SHA" + ); // Left side: both tests pass (PACKET_COUNT=5 >= THRESHOLD=3) - assert_eq!(left_manifest["pass"].as_u64().unwrap(), 2, "left should have 2 passes"); - assert_eq!(left_manifest["fail"].as_u64().unwrap(), 0, "left should have 0 failures"); + assert_eq!( + left_manifest["pass"].as_u64().unwrap(), + 2, + "left should have 2 passes" + ); + assert_eq!( + left_manifest["fail"].as_u64().unwrap(), + 0, + "left should have 0 failures" + ); assert_eq!(left_manifest["total"].as_u64().unwrap(), 2); // Right side: udp_threshold fails (PACKET_COUNT=2 < THRESHOLD=3) - assert_eq!(right_manifest["pass"].as_u64().unwrap(), 1, "right should have 1 pass"); - assert_eq!(right_manifest["fail"].as_u64().unwrap(), 1, "right should have 1 failure"); + assert_eq!( + right_manifest["pass"].as_u64().unwrap(), + 1, + "right should have 1 pass" + ); + assert_eq!( + right_manifest["fail"].as_u64().unwrap(), + 1, + "right should have 1 failure" + ); assert_eq!(right_manifest["total"].as_u64().unwrap(), 2); // Per-test results @@ -187,8 +226,7 @@ fn compare_detects_regression() { .current_dir(dir) .output() .unwrap(); - let meta: serde_json::Value = - serde_json::from_slice(&meta_out.stdout).unwrap_or_default(); + let meta: serde_json::Value = serde_json::from_slice(&meta_out.stdout).unwrap_or_default(); let target = meta["target_directory"] .as_str() .map(|s| Path::new(s).join("testdir-current")); diff --git a/patchbay-runner/src/sim/runner.rs b/patchbay-runner/src/sim/runner.rs index 4001545..84d93d4 100644 --- a/patchbay-runner/src/sim/runner.rs +++ b/patchbay-runner/src/sim/runner.rs @@ -20,7 +20,7 @@ use crate::sim::{ env::SimEnv, progress::{ collect_run_environment, format_timestamp, now_stamp, write_json, write_progress, - write_sim_report, ManifestSimSummary, ProgressSim, SimRunReport, RunProgress, + write_sim_report, ManifestSimSummary, ProgressSim, RunProgress, SimRunReport, }, report::{ print_run_summary_table_for_runs, write_combined_results_for_runs, write_results, diff --git a/patchbay-server/src/lib.rs b/patchbay-server/src/lib.rs index e91efc0..20f0f2a 100644 --- a/patchbay-server/src/lib.rs +++ b/patchbay-server/src/lib.rs @@ -241,7 +241,6 @@ fn build_router(state: AppState) -> Router { // SPA fallback: serve index.html for client-side routes. .route("/run/{*rest}", get(index_html)) .route("/group/{*rest}", get(index_html)) - .route("/batch/{*rest}", get(index_html)) .route("/compare/{*rest}", get(index_html)) .route("/inv/{*rest}", get(index_html)) .route("/api/runs", get(get_runs)) @@ -258,10 +257,6 @@ fn build_router(state: AppState) -> Router { get(get_group_combined), ) // Legacy alias — keep for backward-compat (links shared on Discord). - .route( - "/api/batches/{name}/combined-results", - get(get_group_combined), - ) .route( "/api/invocations/{name}/combined-results", get(get_group_combined), @@ -977,16 +972,62 @@ async fn push_run( ); } - // Extract tar.gz + // Extract tar.gz — iterate entries manually to reject unsafe paths. let decoder = flate2::read::GzDecoder::new(&body[..]); let mut archive = tar::Archive::new(decoder); - if let Err(e) = archive.unpack(&run_dir) { - // Clean up on failure - let _ = std::fs::remove_dir_all(&run_dir); - return ( - StatusCode::BAD_REQUEST, - format!("failed to extract archive: {e}"), - ); + archive.set_preserve_permissions(false); + archive.set_unpack_xattrs(false); + archive.set_overwrite(false); + + let entries = match archive.entries() { + Ok(e) => e, + Err(e) => { + let _ = std::fs::remove_dir_all(&run_dir); + return ( + StatusCode::BAD_REQUEST, + format!("failed to read archive entries: {e}"), + ); + } + }; + for entry in entries { + let mut entry = match entry { + Ok(e) => e, + Err(e) => { + let _ = std::fs::remove_dir_all(&run_dir); + return ( + StatusCode::BAD_REQUEST, + format!("failed to read archive entry: {e}"), + ); + } + }; + let path = match entry.path() { + Ok(p) => p.into_owned(), + Err(e) => { + let _ = std::fs::remove_dir_all(&run_dir); + return ( + StatusCode::BAD_REQUEST, + format!("failed to read entry path: {e}"), + ); + } + }; + if path.is_absolute() + || path + .components() + .any(|c| c == std::path::Component::ParentDir) + { + let _ = std::fs::remove_dir_all(&run_dir); + return ( + StatusCode::BAD_REQUEST, + "invalid path in archive".to_string(), + ); + } + if let Err(e) = entry.unpack_in(&run_dir) { + let _ = std::fs::remove_dir_all(&run_dir); + return ( + StatusCode::BAD_REQUEST, + format!("failed to extract archive: {e}"), + ); + } } // Notify subscribers about new run diff --git a/patchbay-utils/src/manifest.rs b/patchbay-utils/src/manifest.rs index 5cb5de8..5d39cd7 100644 --- a/patchbay-utils/src/manifest.rs +++ b/patchbay-utils/src/manifest.rs @@ -319,7 +319,12 @@ pub fn parse_test_output(output: &str) -> Vec { }; let name = name.trim().to_string(); if seen.insert(name.clone()) { - results.push(TestResult { name, status, duration: None, dir: None }); + results.push(TestResult { + name, + status, + duration: None, + dir: None, + }); } } continue; @@ -338,7 +343,12 @@ pub fn parse_test_output(output: &str) -> Vec { .trim() .to_string(); if !name.is_empty() && seen.insert(name.clone()) { - results.push(TestResult { name, status, duration, dir: None }); + results.push(TestResult { + name, + status, + duration, + dir: None, + }); } } } diff --git a/patchbay-vm/src/lib.rs b/patchbay-vm/src/lib.rs index 87939d8..370283e 100644 --- a/patchbay-vm/src/lib.rs +++ b/patchbay-vm/src/lib.rs @@ -40,26 +40,54 @@ pub trait VmOps { pub struct Qemu; impl VmOps for Qemu { - fn up(&self, recreate: bool) -> anyhow::Result<()> { qemu::up_cmd(recreate) } - fn down(&self) -> anyhow::Result<()> { qemu::down_cmd() } - fn status(&self) -> anyhow::Result<()> { qemu::status_cmd() } - fn cleanup(&self) -> anyhow::Result<()> { qemu::cleanup_cmd() } - fn exec(&self, cmd: Vec) -> anyhow::Result<()> { qemu::ssh_cmd_cli(cmd) } - fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()> { qemu::run_sims_in_vm(args) } - fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()> { qemu::run_tests_in_vm(args) } + fn up(&self, recreate: bool) -> anyhow::Result<()> { + qemu::up_cmd(recreate) + } + fn down(&self) -> anyhow::Result<()> { + qemu::down_cmd() + } + fn status(&self) -> anyhow::Result<()> { + qemu::status_cmd() + } + fn cleanup(&self) -> anyhow::Result<()> { + qemu::cleanup_cmd() + } + fn exec(&self, cmd: Vec) -> anyhow::Result<()> { + qemu::ssh_cmd_cli(cmd) + } + fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()> { + qemu::run_sims_in_vm(args) + } + fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()> { + qemu::run_tests_in_vm(args) + } } /// Apple container backend. pub struct Container; impl VmOps for Container { - fn up(&self, recreate: bool) -> anyhow::Result<()> { container::up_cmd(recreate) } - fn down(&self) -> anyhow::Result<()> { container::down_cmd() } - fn status(&self) -> anyhow::Result<()> { container::status_cmd() } - fn cleanup(&self) -> anyhow::Result<()> { container::cleanup_cmd() } - fn exec(&self, cmd: Vec) -> anyhow::Result<()> { container::exec_cmd_cli(cmd) } - fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()> { container::run_sims(args) } - fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()> { container::run_tests(args) } + fn up(&self, recreate: bool) -> anyhow::Result<()> { + container::up_cmd(recreate) + } + fn down(&self) -> anyhow::Result<()> { + container::down_cmd() + } + fn status(&self) -> anyhow::Result<()> { + container::status_cmd() + } + fn cleanup(&self) -> anyhow::Result<()> { + container::cleanup_cmd() + } + fn exec(&self, cmd: Vec) -> anyhow::Result<()> { + container::exec_cmd_cli(cmd) + } + fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()> { + container::run_sims(args) + } + fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()> { + container::run_tests(args) + } } impl Backend { @@ -84,24 +112,66 @@ impl Backend { /// Implement VmOps on Backend by delegating to the resolved backend. impl VmOps for Backend { fn up(&self, recreate: bool) -> anyhow::Result<()> { - match self { Self::Container => Container.up(recreate), Self::Auto => anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first"), Self::Qemu => Qemu.up(recreate) } + match self { + Self::Container => Container.up(recreate), + Self::Auto => { + anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first") + } + Self::Qemu => Qemu.up(recreate), + } } fn down(&self) -> anyhow::Result<()> { - match self { Self::Container => Container.down(), Self::Auto => anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first"), Self::Qemu => Qemu.down() } + match self { + Self::Container => Container.down(), + Self::Auto => { + anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first") + } + Self::Qemu => Qemu.down(), + } } fn status(&self) -> anyhow::Result<()> { - match self { Self::Container => Container.status(), Self::Auto => anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first"), Self::Qemu => Qemu.status() } + match self { + Self::Container => Container.status(), + Self::Auto => { + anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first") + } + Self::Qemu => Qemu.status(), + } } fn cleanup(&self) -> anyhow::Result<()> { - match self { Self::Container => Container.cleanup(), Self::Auto => anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first"), Self::Qemu => Qemu.cleanup() } + match self { + Self::Container => Container.cleanup(), + Self::Auto => { + anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first") + } + Self::Qemu => Qemu.cleanup(), + } } fn exec(&self, cmd: Vec) -> anyhow::Result<()> { - match self { Self::Container => Container.exec(cmd), Self::Auto => anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first"), Self::Qemu => Qemu.exec(cmd) } + match self { + Self::Container => Container.exec(cmd), + Self::Auto => { + anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first") + } + Self::Qemu => Qemu.exec(cmd), + } } fn run_sims(&self, args: RunVmArgs) -> anyhow::Result<()> { - match self { Self::Container => Container.run_sims(args), Self::Auto => anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first"), Self::Qemu => Qemu.run_sims(args) } + match self { + Self::Container => Container.run_sims(args), + Self::Auto => { + anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first") + } + Self::Qemu => Qemu.run_sims(args), + } } fn run_tests(&self, args: TestVmArgs) -> anyhow::Result<()> { - match self { Self::Container => Container.run_tests(args), Self::Auto => anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first"), Self::Qemu => Qemu.run_tests(args) } + match self { + Self::Container => Container.run_tests(args), + Self::Auto => { + anyhow::bail!("Backend::Auto must be resolved before use; call .resolve() first") + } + Self::Qemu => Qemu.run_tests(args), + } } } diff --git a/patchbay/src/lib.rs b/patchbay/src/lib.rs index 166040d..b9b6f3b 100644 --- a/patchbay/src/lib.rs +++ b/patchbay/src/lib.rs @@ -200,10 +200,10 @@ pub mod consts; pub(crate) mod core; /// Lab event system: typed events, state reducer, file writer. pub mod event; -mod metrics; pub(crate) mod firewall; pub(crate) mod handles; mod lab; +mod metrics; pub(crate) mod nat; pub(crate) mod nat64; mod netlink; @@ -221,7 +221,6 @@ pub mod util; pub(crate) mod writer; pub use firewall::PortPolicy; -pub use metrics::MetricsBuilder; pub use ipnet::Ipv4Net; pub use lab::{ ConntrackTimeouts, DefaultRegions, Device, DeviceBuilder, DeviceIface, Firewall, @@ -230,6 +229,7 @@ pub use lab::{ NatConfigBuilder, NatFiltering, NatMapping, NatV6Mode, OutDir, Region, RegionLink, Router, RouterBuilder, RouterIface, RouterPreset, TestGuard, }; +pub use metrics::MetricsBuilder; pub use crate::{ core::{NodeId, ReflectorGuard}, diff --git a/ui/e2e/push.spec.ts b/ui/e2e/push.spec.ts index d4982d1..5257cbe 100644 --- a/ui/e2e/push.spec.ts +++ b/ui/e2e/push.spec.ts @@ -74,7 +74,7 @@ test('push run results and view via deep link', async ({ page }) => { body: tarGz, }) expect(pushRes.status).toBe(200) - const pushBody = await pushRes.json() as { ok: boolean; group: string; batch: string; project: string } + const pushBody = await pushRes.json() as { ok: boolean; group: string; project: string } expect(pushBody.ok).toBe(true) expect(pushBody.project).toBe('test-project') expect(pushBody.group).toBeTruthy() diff --git a/ui/src/BatchPage.tsx b/ui/src/GroupPage.tsx similarity index 82% rename from ui/src/BatchPage.tsx rename to ui/src/GroupPage.tsx index 6112123..b55005f 100644 --- a/ui/src/BatchPage.tsx +++ b/ui/src/GroupPage.tsx @@ -8,16 +8,14 @@ import type { Selection } from './components/RunSelector' import PerfTab from './components/PerfTab' import { simLabel } from './utils' -type BatchTab = 'sims' | 'perf' +type GroupTab = 'sims' | 'perf' -export default function BatchPage() { +export default function GroupPage() { const location = useLocation() const navigate = useNavigate() - const batchName = location.pathname.startsWith('/group/') - ? location.pathname.slice('/group/'.length) - : location.pathname.slice('/batch/'.length) - const [tab, setTab] = useState('sims') + const groupName = location.pathname.slice('/group/'.length) + const [tab, setTab] = useState('sims') // Run list (for the dropdown) const [runs, setRuns] = useState([]) @@ -39,30 +37,30 @@ export default function BatchPage() { // ── Load combined results ── useEffect(() => { - if (!batchName) { + if (!groupName) { setCombinedResults(null) return } let dead = false - fetchCombinedResults(batchName).then((results) => { + fetchCombinedResults(groupName).then((results) => { if (dead) return setCombinedResults(results) }) return () => { dead = true } - }, [batchName]) + }, [groupName]) // ── Derived ── - const selection: Selection | null = batchName ? { kind: 'group', name: batchName } : null + const selection: Selection | null = groupName ? { kind: 'group', name: groupName } : null const groupRuns = useMemo( - () => runs.filter((r) => r.group === batchName), - [runs, batchName], + () => runs.filter((r) => r.group === groupName), + [runs, groupName], ) - const availableTabs = useMemo( - () => ['sims', ...(combinedResults ? (['perf'] as BatchTab[]) : [])], + const availableTabs = useMemo( + () => ['sims', ...(combinedResults ? (['perf'] as GroupTab[]) : [])], [combinedResults], ) @@ -101,7 +99,7 @@ export default function BatchPage() {
{tab === 'sims' && (
-

{batchName}

+

{groupName}

{groupRuns.length === 0 &&
No sims found.
} {groupRuns.map((r) => ( 0 || rightTests.length > 0 } +/** Extract the group (first path segment) from a run path like "run-20260326_123338/project/test". */ +function extractGroup(runPath: string): string { + return runPath.split('/')[0] || runPath +} + +/** Build the parent group compare URL from two individual run paths. */ +function groupCompareUrl(leftRun: string, rightRun: string): string { + const leftGroup = extractGroup(leftRun) + const rightGroup = extractGroup(rightRun) + return `/compare/${encodeURIComponent(leftGroup)}/${encodeURIComponent(rightGroup)}` +} + // ── Compare View (route: /compare/:left/:right) ── export default function CompareView({ leftRun, rightRun }: { leftRun: string; rightRun: string }) { @@ -118,9 +130,14 @@ export default function CompareView({ leftRun, rightRun }: { leftRun: string; ri
{/* Header: simple name for individual runs, summary for groups */} {!isGroup ? ( -

- Compare: {shortName(leftRun)} -

+
+

+ Compare: {shortName(leftRun)} (left) vs {shortName(rightRun)} (right) +

+ + ↩ Back to group compare + +
) : ( <>

diff --git a/ui/src/components/RunSelector.tsx b/ui/src/components/RunSelector.tsx index a78a7d5..57fa7c9 100644 --- a/ui/src/components/RunSelector.tsx +++ b/ui/src/components/RunSelector.tsx @@ -21,7 +21,7 @@ export function selectionFromValue(val: string): Selection | null { export function selectionPath(s: Selection | null): string { if (!s) return '/' - return s.kind === 'group' ? `/batch/${s.name}` : `/run/${s.name}` + return s.kind === 'group' ? `/group/${s.name}` : `/run/${s.name}` } // ── Component ────────────────────────────────────────────────────── diff --git a/ui/src/main.tsx b/ui/src/main.tsx index ee5ae32..76159de 100644 --- a/ui/src/main.tsx +++ b/ui/src/main.tsx @@ -2,7 +2,7 @@ import React from 'react' import ReactDOM from 'react-dom/client' import { BrowserRouter, Routes, Route, Navigate } from 'react-router-dom' import RunPage from './RunPage' -import BatchPage from './BatchPage' +import GroupPage from './GroupPage' import RunsIndex from './RunsIndex' import ComparePage from './ComparePage' import './index.css' @@ -13,11 +13,10 @@ ReactDOM.createRoot(document.getElementById('root')!).render( } /> } /> - } /> + } /> } /> - {/* Legacy redirect: /inv/:name -> /batch/:name */} + {/* Legacy redirect: /inv/:name -> /group/:name */} } /> - } /> } /> From abe927ffc0df54e375d6604101ab398e839df64f Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 14:37:23 +0100 Subject: [PATCH 35/38] fix: show project and refs in compare page topbar MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The topbar now shows context like "iroh · main@fc654dd vs main@abc1234" so you know which project and refs you're comparing. Co-Authored-By: Claude Opus 4.6 (1M context) --- ui/src/ComparePage.tsx | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/ui/src/ComparePage.tsx b/ui/src/ComparePage.tsx index 1f2a69e..9226c50 100644 --- a/ui/src/ComparePage.tsx +++ b/ui/src/ComparePage.tsx @@ -1,17 +1,44 @@ +import { useEffect, useState } from 'react' import { useParams } from 'react-router-dom' +import { fetchRunJson } from './api' +import type { RunManifest } from './api' import CompareView from './components/CompareView' +function refLabel(m: RunManifest | null): string | null { + if (!m) return null + if (m.branch && m.commit) return `${m.branch}@${m.commit.slice(0, 7)}` + if (m.commit) return m.commit.slice(0, 7) + return null +} + export default function ComparePage() { const { left, right } = useParams<{ left: string; right: string }>() + const [leftManifest, setLeftManifest] = useState(null) + const [rightManifest, setRightManifest] = useState(null) + + useEffect(() => { + if (!left || !right) return + fetchRunJson(left).then(setLeftManifest) + fetchRunJson(right).then(setRightManifest) + }, [left, right]) if (!left || !right) { return
Missing run names in URL. Use /compare/:left/:right
} + const project = leftManifest?.project ?? rightManifest?.project + const leftRef = refLabel(leftManifest) + const rightRef = refLabel(rightManifest) + const subtitle = [ + project, + leftRef && rightRef ? `${leftRef} vs ${rightRef}` : leftRef ?? rightRef, + ].filter(Boolean).join(' · ') + return (

patchbay

+ {subtitle && {subtitle}}
From 9ab99b18fab19f24a3c6370d078bbbaf40843122 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 19:02:59 +0100 Subject: [PATCH 36/38] fix: use is_some_and instead of map_or(false, ..) per clippy Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/tests/compare_integration.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/patchbay-cli/tests/compare_integration.rs b/patchbay-cli/tests/compare_integration.rs index 4e3b009..5a55afb 100644 --- a/patchbay-cli/tests/compare_integration.rs +++ b/patchbay-cli/tests/compare_integration.rs @@ -237,7 +237,7 @@ fn compare_detects_regression() { .into_iter() .filter(|p| { p.file_name() - .map_or(false, |f| f.to_string_lossy().ends_with(".metrics.jsonl")) + .is_some_and(|f| f.to_string_lossy().ends_with(".metrics.jsonl")) }) .collect(); assert!( From a0d0d448bea4ce68efdbe9f915c980031190665a Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 26 Mar 2026 19:15:45 +0100 Subject: [PATCH 37/38] chore: fmt --- patchbay-cli/src/compare.rs | 9 ++++++--- patchbay-cli/src/test.rs | 6 ++++-- patchbay-cli/src/upload.rs | 3 ++- patchbay-cli/tests/compare_integration.rs | 3 +-- patchbay-utils/src/manifest.rs | 6 ++++-- patchbay-vm/src/lib.rs | 3 +-- 6 files changed, 18 insertions(+), 12 deletions(-) diff --git a/patchbay-cli/src/compare.rs b/patchbay-cli/src/compare.rs index 85cc859..bf4c616 100644 --- a/patchbay-cli/src/compare.rs +++ b/patchbay-cli/src/compare.rs @@ -1,10 +1,13 @@ //! Compare mode: run tests/sims in two git worktrees and diff results. +use std::{ + path::{Path, PathBuf}, + process::Command, + time::Duration, +}; + use anyhow::{bail, Context, Result}; use patchbay_utils::manifest::{self, TestResult, TestStatus}; -use std::path::{Path, PathBuf}; -use std::process::Command; -use std::time::Duration; /// Set up a git worktree for the given ref. pub fn setup_worktree(git_ref: &str, base: &Path) -> Result { diff --git a/patchbay-cli/src/test.rs b/patchbay-cli/src/test.rs index 4128d82..cfa53f5 100644 --- a/patchbay-cli/src/test.rs +++ b/patchbay-cli/src/test.rs @@ -1,7 +1,9 @@ //! Test command implementation. -use std::path::{Path, PathBuf}; -use std::process::Command; +use std::{ + path::{Path, PathBuf}, + process::Command, +}; use anyhow::{bail, Context, Result}; use patchbay_utils::manifest::{self, RunKind, RunManifest, TestStatus}; diff --git a/patchbay-cli/src/upload.rs b/patchbay-cli/src/upload.rs index c948e5f..cb7420b 100644 --- a/patchbay-cli/src/upload.rs +++ b/patchbay-cli/src/upload.rs @@ -1,8 +1,9 @@ //! Upload run/compare directories to a patchbay-server instance. +use std::path::Path; + use anyhow::{bail, Context, Result}; use patchbay_utils::manifest::{RunKind, RunManifest}; -use std::path::Path; /// Build a RunManifest from CI environment variables. pub fn manifest_from_env(project: &str) -> RunManifest { diff --git a/patchbay-cli/tests/compare_integration.rs b/patchbay-cli/tests/compare_integration.rs index 5a55afb..a78ca7a 100644 --- a/patchbay-cli/tests/compare_integration.rs +++ b/patchbay-cli/tests/compare_integration.rs @@ -2,8 +2,7 @@ //! Copies the counter fixture into a temp git repo, makes two commits //! with different PACKET_COUNT values, and runs compare between them. -use std::path::Path; -use std::process::Command; +use std::{path::Path, process::Command}; fn git(dir: &Path, args: &[&str]) { let status = Command::new("git") diff --git a/patchbay-utils/src/manifest.rs b/patchbay-utils/src/manifest.rs index 5d39cd7..0869011 100644 --- a/patchbay-utils/src/manifest.rs +++ b/patchbay-utils/src/manifest.rs @@ -17,9 +17,10 @@ use serde::{Deserialize, Serialize}; /// Serialize/deserialize a [`Duration`] as integer milliseconds. pub mod duration_ms { - use serde::{Deserialize, Deserializer, Serializer}; use std::time::Duration; + use serde::{Deserialize, Deserializer, Serializer}; + pub fn serialize(d: &Duration, s: S) -> Result { s.serialize_u64(d.as_millis() as u64) } @@ -31,9 +32,10 @@ pub mod duration_ms { /// Serialize/deserialize an `Option` as integer milliseconds. pub mod option_duration_ms { - use serde::{Deserialize, Deserializer, Serializer}; use std::time::Duration; + use serde::{Deserialize, Deserializer, Serializer}; + pub fn serialize(d: &Option, s: S) -> Result { match d { Some(d) => s.serialize_u64(d.as_millis() as u64), diff --git a/patchbay-vm/src/lib.rs b/patchbay-vm/src/lib.rs index 370283e..f0f516e 100644 --- a/patchbay-vm/src/lib.rs +++ b/patchbay-vm/src/lib.rs @@ -3,9 +3,8 @@ pub mod container; pub mod qemu; pub mod util; -pub use common::{RunVmArgs, TestVmArgs}; - use clap::ValueEnum; +pub use common::{RunVmArgs, TestVmArgs}; #[derive(Clone, Debug, ValueEnum)] pub enum Backend { From 2f0a03ba9b38fd83b4fff82274a5de084ce1bd66 Mon Sep 17 00:00:00 2001 From: Frando Date: Fri, 27 Mar 2026 12:24:40 +0100 Subject: [PATCH 38/38] fix: restore invocation field in push response for backward compat Old CI templates (on main) read .invocation from the push response. Keep it alongside group and batch so existing deployments don't break. Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-server/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/patchbay-server/src/lib.rs b/patchbay-server/src/lib.rs index 20f0f2a..41702b1 100644 --- a/patchbay-server/src/lib.rs +++ b/patchbay-server/src/lib.rs @@ -1039,7 +1039,8 @@ async fn push_run( "project": project, "run": run_name, "group": run_name, - "batch": run_name, // backward compat + "batch": run_name, // backward compat + "invocation": run_name, // backward compat (old CI templates read .invocation) }); (StatusCode::OK, serde_json::to_string(&result).unwrap())