diff --git a/.gitignore b/.gitignore index 03aaa0c..9a2c640 100644 --- a/.gitignore +++ b/.gitignore @@ -49,3 +49,10 @@ tmp/ distilled.zip test-gsdd/ .worktrees + +# Worktree coordination registry (local-only, never committed) +# .tmp files are per-PID (registry.json..tmp) to avoid concurrent-write truncation +.planning/.local/registry.json +.planning/.local/registry.json.*.tmp +.planning/.local/registry.json.broken-* +.planning/.local/registry.json.tmp diff --git a/README.md b/README.md index bda1f5e..bc450a6 100644 --- a/README.md +++ b/README.md @@ -90,11 +90,25 @@ Use Workspine when a feature takes more than one session, or when you need to sw ## CLI ```bash +npx -y gsdd-cli init # guided install wizard npx -y gsdd-cli health # workspace integrity check npx -y gsdd-cli update # regenerate stale runtime surfaces npx -y gsdd-cli models profile quality # maximize review rigor npx -y gsdd-cli models profile budget # minimize cost npx -y gsdd-cli control-map # repo and planning state at a glance +npx -y gsdd-cli closeout-report # read-only phase closeout replay +npx -y gsdd-cli phase-status 5 done # mark a phase status in ROADMAP.md +npx -y gsdd-cli find-phase 5 # show phase info as JSON +npx -y gsdd-cli verify 5 # run artifact checks for a phase +npx -y gsdd-cli scaffold phase 5 name # create a new phase plan file +npx -y gsdd-cli file-op copy ... # deterministic workspace file ops +npx -y gsdd-cli session-fingerprint write # rebaseline planning-state drift +npx -y gsdd-cli ui-proof validate path # validate UI proof metadata +npx -y gsdd-cli registry-list # list worktree coordination leases +npx -y gsdd-cli registry-show 5 # show lease for a specific phase +npx -y gsdd-cli registry-clear 5 # remove a lease record +npx -y gsdd-cli registry-crash 5 ... # mark a lease crashed (P66 placeholder) +npx -y gsdd-cli help # show all commands ``` Full reference: [User Guide](docs/USER-GUIDE.md) · [Runtime Support](docs/RUNTIME-SUPPORT.md) · [Verification Discipline](docs/VERIFICATION-DISCIPLINE.md) diff --git a/bin/gsdd.mjs b/bin/gsdd.mjs index c3fb09a..4c48185 100644 --- a/bin/gsdd.mjs +++ b/bin/gsdd.mjs @@ -21,6 +21,7 @@ import { cmdSessionFingerprint } from './lib/session-fingerprint.mjs'; import { cmdUiProof } from './lib/ui-proof.mjs'; import { cmdControlMap } from './lib/control-map.mjs'; import { createCmdCloseoutReport } from './lib/closeout-report.mjs'; +import { cmdRegistryClear, cmdRegistryCrash, cmdRegistryList, cmdRegistryShow } from './lib/registry-commands.mjs'; import { resolveWorkspaceContext } from './lib/workspace-root.mjs'; const __filename = fileURLToPath(import.meta.url); const __dirname = dirname(__filename); @@ -112,6 +113,10 @@ const COMMANDS = { 'closeout-report': cmdCloseoutReport, 'find-phase': cmdFindPhase, 'phase-status': cmdPhaseStatus, + 'registry-clear': cmdRegistryClear, + 'registry-crash': cmdRegistryCrash, + 'registry-list': cmdRegistryList, + 'registry-show': cmdRegistryShow, verify: cmdVerify, scaffold: cmdScaffold, help: cmdHelp, diff --git a/bin/lib/closeout-report.mjs b/bin/lib/closeout-report.mjs index e774403..b5d4c54 100644 --- a/bin/lib/closeout-report.mjs +++ b/bin/lib/closeout-report.mjs @@ -50,6 +50,36 @@ async function buildHealthReportSafe(ctx, args) { } } +async function buildRegistrySectionSafe(workspaceRoot, closingPhaseId) { + try { + const { listLeases, registryExists } = await import('./registry.mjs'); + if (!registryExists(workspaceRoot)) return null; + const leases = listLeases(workspaceRoot); + const active = leases.filter((l) => l.lease_state === 'open'); + const closingId = closingPhaseId != null ? String(closingPhaseId) : null; + // An open lease only blocks closeout if it belongs to a phase OTHER than + // the one being closed. The own-phase active lease is expected during + // normal closeout (the phase is being verified). Parallel phases (P70+) + // will have multiple concurrent opens; we surface only the foreign ones + // as [BLOCK]. + const blocking = closingId + ? active.filter((l) => String(l.phase_id) !== closingId) + : active; + const ownPhase = closingId + ? active.filter((l) => String(l.phase_id) === closingId) + : []; + return { + active_leases: active, + blocking_leases: blocking, + own_phase_leases: ownPhase, + stale_leases: leases.filter((l) => l.lease_state === 'crashed'), + closed_leases: leases.filter((l) => l.lease_state === 'closed'), + }; + } catch { + return null; + } +} + function summarizeControlMap(map) { return { status: map.risks.some((risk) => risk.severity === 'block') @@ -237,6 +267,7 @@ export async function buildCloseoutReport(ctx = {}, args = []) { planningDir: context.planningDir, }); const health = await buildHealthReportSafe(ctx, ['--workspace-root', context.workspaceRoot]); + const registrySection = await buildRegistrySectionSafe(context.workspaceRoot, selectedPhase); const preflight = evaluateLifecyclePreflight({ planningDir: context.planningDir, surface: 'verify', @@ -276,6 +307,7 @@ export async function buildCloseoutReport(ctx = {}, args = []) { preflight: summarizePreflight(preflight), phase_verification: summarizePhaseVerification(phaseReport), ui_proof: phaseReport.ok ? phaseReport.result.ui_proof : null, + ...(registrySection !== null ? { registry: registrySection } : {}), }, }; } @@ -298,6 +330,26 @@ function printHuman(report) { if (warning.fix) console.log(` Fix: ${warning.fix}`); } } + if (report.registry) { + const { + blocking_leases = [], + own_phase_leases = [], + stale_leases = [], + closed_leases = [], + } = report.registry; + const hasAny = + blocking_leases.length > 0 || + own_phase_leases.length > 0 || + stale_leases.length > 0 || + closed_leases.length > 0; + if (hasAny) { + console.log('\nRegistry:'); + for (const l of blocking_leases) console.log(` [BLOCK] ${l.phase_id} ${l.branch_name} open ${l.granted_at}`); + for (const l of own_phase_leases) console.log(` [INFO] ${l.phase_id} ${l.branch_name} open ${l.granted_at} (closing phase)`); + for (const l of stale_leases) console.log(` [WARN] ${l.phase_id} ${l.branch_name} crashed ${l.granted_at}`); + for (const l of closed_leases) console.log(` [INFO] ${l.phase_id} ${l.branch_name} closed ${l.granted_at}`); + } + } console.log(`\nNext safe action: ${report.next_safe_action.command}`); console.log(`Reason: ${report.next_safe_action.reason}`); } diff --git a/bin/lib/init-runtime.mjs b/bin/lib/init-runtime.mjs index fffba4f..c0ef2ad 100644 --- a/bin/lib/init-runtime.mjs +++ b/bin/lib/init-runtime.mjs @@ -198,6 +198,13 @@ Commands: Maintain optional local intent annotations under .planning/.local/ closeout-report [--json] [--phase ] Replay read-only closeout status from control-map, health, preflight, verify, and UI-proof signals + registry-list [--json] List all worktree coordination leases (phase, branch, state, granted_at) + registry-show [--json] + Show the lease record for a specific phase + registry-clear [--force] + Remove a lease record (--force required if lease is open) + registry-crash --reason + Mark a lease as crashed (P66; placeholder in P65) help Show this summary Platforms (for --tools): diff --git a/bin/lib/phase.mjs b/bin/lib/phase.mjs index ac3cc36..bf64e0b 100644 --- a/bin/lib/phase.mjs +++ b/bin/lib/phase.mjs @@ -3,7 +3,7 @@ // IMPORTANT: No module-scope process.cwd() — ESM caching means sub-modules // evaluate once, so CWD must be computed inside function bodies. -import { existsSync, mkdirSync, readFileSync, writeFileSync, readdirSync } from 'fs'; +import { existsSync, mkdirSync, readFileSync, writeFileSync, readdirSync, statSync } from 'fs'; import { dirname, join, relative } from 'path'; import { output } from './cli-utils.mjs'; import { writeFingerprint } from './session-fingerprint.mjs'; @@ -560,18 +560,101 @@ export function updateRoadmapPhaseStatus(roadmap, phaseNumber, status) { return updatedLines.join('\n'); } +// AGENTS.md §1.17 — phase-closure artifact gate. A phase cannot transition to +// `done` unless NN-PLAN-CHECK.md and NN-VERIFICATION.md exist in the phase +// folder, and .internal-research/lessons-learned.md has been touched within +// the staleness window (default 7 days). `--force` overrides the gate but +// requires `--reason ` which is auto-appended as an LL-* entry. +const PHASE_CLOSURE_LESSONS_STALENESS_DAYS = 7; + +function findPhaseFolder(planningDir, phaseNumber) { + const phasesDir = join(planningDir, 'phases'); + if (!existsSync(phasesDir)) return null; + const padded = padPhase(phaseNumber); + for (const entry of readdirSync(phasesDir, { withFileTypes: true })) { + if (!entry.isDirectory()) continue; + if (entry.name.startsWith(`${padded}-`)) { + return { dir: join(phasesDir, entry.name), padded }; + } + } + return null; +} + +function checkPhaseClosureGate(workspaceRoot, planningDir, phaseNumber) { + const folder = findPhaseFolder(planningDir, phaseNumber); + if (!folder) { + // No phase folder exists for this phase number. §1.17 enforces artifacts + // for *real* phase closures; a roadmap-only mutation (no plan/summary + // structure under .planning/phases/) is out of scope. Skip the gate. + return { ok: true, missing: [], gate_skipped: 'no phase folder' }; + } + // Also skip the gate if .internal-research/ does not exist — consumer + // projects do not have this directory; §1.17 is internal-GSDD governance. + const internalResearchDir = join(workspaceRoot, '.internal-research'); + if (!existsSync(internalResearchDir)) { + return { ok: true, missing: [], gate_skipped: 'no .internal-research/ directory (consumer project)' }; + } + const missing = []; + const planCheck = join(folder.dir, `${folder.padded}-PLAN-CHECK.md`); + const verification = join(folder.dir, `${folder.padded}-VERIFICATION.md`); + if (!existsSync(planCheck)) missing.push(planCheck); + if (!existsSync(verification)) missing.push(verification); + const lessons = join(internalResearchDir, 'lessons-learned.md'); + if (!existsSync(lessons)) { + missing.push(`${lessons} (file not found; §6 doc-sync evidence required)`); + } else { + const ageDays = (Date.now() - statSync(lessons).mtimeMs) / 86_400_000; + if (ageDays > PHASE_CLOSURE_LESSONS_STALENESS_DAYS) { + missing.push( + `${lessons} (last touched ${ageDays.toFixed(1)} days ago; must be within ${PHASE_CLOSURE_LESSONS_STALENESS_DAYS} days per §1.17)`, + ); + } + } + return { ok: missing.length === 0, missing }; +} + +function appendForceOverrideLessonsEntry(workspaceRoot, phaseNumber, reason) { + const lessons = join(workspaceRoot, '.internal-research', 'lessons-learned.md'); + if (!existsSync(lessons)) return; + const sanitizedReason = String(reason || '').trim(); + const escapedPhase = String(phaseNumber).toUpperCase().replace(/[^A-Z0-9-]/g, '-'); + const entry = [ + '', + '---', + '', + `## LL-PHASE-STATUS-FORCE-OVERRIDE-${escapedPhase}-${new Date().toISOString().slice(0, 10)}`, + '', + `\`gsdd phase-status ${phaseNumber} done --force\` was invoked; the §1.17 phase-closure artifact gate was bypassed.`, + `**Why:** ${sanitizedReason}`, + `**Rule:** Force-overrides are appended here automatically so the gap is auditable. Future maintainers should treat the named phase as having an artifact gap that needs follow-up. The gate exists to prevent silent drift; \`--force\` is the explicit, auditable escape hatch, not a routine option.`, + '', + ].join('\n'); + const current = readFileSync(lessons, 'utf-8'); + const trimmed = current.endsWith('\n') ? current : `${current}\n`; + writeFileSync(lessons, trimmed + entry); +} + export function cmdPhaseStatus(...args) { - const { args: normalizedArgs, planningDir, invalid, error } = resolveWorkspaceContext(args); + const { args: normalizedArgs, workspaceRoot, planningDir, invalid, error } = resolveWorkspaceContext(args); if (invalid) { console.error(error); process.exitCode = 1; return; } + const force = normalizedArgs.includes('--force'); + const reasonIdx = normalizedArgs.indexOf('--reason'); + const reason = reasonIdx !== -1 ? normalizedArgs[reasonIdx + 1] : null; + const positional = normalizedArgs.filter((arg, idx) => { + if (arg === '--force') return false; + if (arg === '--reason') return false; + if (idx > 0 && normalizedArgs[idx - 1] === '--reason') return false; + return true; + }); + const [phaseNumber, status] = positional; const roadmapPath = join(planningDir, 'ROADMAP.md'); - const [phaseNumber, status] = normalizedArgs; if (!phaseNumber || !status) { - console.error('Usage: gsdd phase-status '); + console.error('Usage: gsdd phase-status [--force --reason ]'); process.exitCode = 1; return; } @@ -582,6 +665,33 @@ export function cmdPhaseStatus(...args) { return; } + // §1.17 phase-closure artifact gate — only fires when transitioning to `done`. + if (status === 'done') { + const gate = checkPhaseClosureGate(workspaceRoot, planningDir, phaseNumber); + if (!gate.ok && !force) { + console.error(`Refused: phase ${phaseNumber} cannot be marked done — §1.17 artifacts missing:`); + for (const item of gate.missing) console.error(` - ${item}`); + console.error(''); + console.error('Resolve by creating the missing artifacts, or pass `--force --reason ` to override.'); + console.error('Force-overrides are auto-recorded as LL-* entries in .internal-research/lessons-learned.md.'); + process.exitCode = 1; + return; + } + if (force && (!reason || !String(reason).trim())) { + console.error('Refused: --force requires --reason describing why the gate is being bypassed.'); + console.error('The reason will be appended as an LL-* entry to .internal-research/lessons-learned.md.'); + process.exitCode = 1; + return; + } + if (force && !gate.ok) { + try { + appendForceOverrideLessonsEntry(workspaceRoot, phaseNumber, reason); + } catch (err) { + console.error(`Warning: --force succeeded but failed to append LL entry (${err.message}).`); + } + } + } + try { const roadmap = readFileSync(roadmapPath, 'utf-8'); const updated = updateRoadmapPhaseStatus(roadmap, phaseNumber, status); @@ -590,7 +700,7 @@ export function cmdPhaseStatus(...args) { writeFileSync(roadmapPath, updated); try { writeFingerprint(planningDir); } catch { /* best-effort */ } } - output({ phase: phaseNumber, status, roadmap: '.planning/ROADMAP.md', changed }); + output({ phase: phaseNumber, status, roadmap: '.planning/ROADMAP.md', changed, gate_overridden: status === 'done' && force }); } catch (error) { console.error(error.message); process.exitCode = 1; diff --git a/bin/lib/registry-commands.mjs b/bin/lib/registry-commands.mjs new file mode 100644 index 0000000..013f9fe --- /dev/null +++ b/bin/lib/registry-commands.mjs @@ -0,0 +1,101 @@ +// registry-commands.mjs - CLI command handlers for the worktree coordination registry. +// Imported by bin/gsdd.mjs to keep the main entrypoint below the facade line limit. + +import { listLeases, getLease, clearLease } from './registry.mjs'; +import { resolveWorkspaceContext } from './workspace-root.mjs'; + +function resolveRegistryRoot(rawArgs) { + const context = resolveWorkspaceContext(rawArgs); + if (context.invalid) { + console.error(context.error || 'Invalid workspace root'); + process.exitCode = 1; + return { ok: false }; + } + return { ok: true, workspaceRoot: context.workspaceRoot, args: context.args }; +} + +function stripInternalPrefix(message) { + // Errors from registry.mjs are prefixed with the function name + // (e.g. "clearLease: no lease found..."). Users do not need to see the + // internal function name in the CLI output. + return String(message || '').replace(/^[a-zA-Z]+:\s*/, ''); +} + +export async function cmdRegistryClear(...rawArgs) { + const ctx = resolveRegistryRoot(rawArgs); + if (!ctx.ok) return; + const phase = ctx.args.find((a) => !a.startsWith('-')); + const force = ctx.args.includes('--force'); + if (!phase) { + console.error('Usage: gsdd registry-clear [--force]'); + process.exitCode = 1; + return; + } + try { + clearLease(ctx.workspaceRoot, phase, { force }); + console.log(`Lease for phase ${phase} cleared.`); + } catch (err) { + const msg = err.message || String(err); + if (msg.includes('open lease') && !force) { + console.error(`Error: phase ${phase} has an open lease. Use --force to clear it.`); + } else { + console.error(`Error: ${stripInternalPrefix(msg)}`); + } + process.exitCode = 1; + } +} + +export async function cmdRegistryCrash(...rawArgs) { + // Placeholder until P66 wires the debugger-role crashed-lease recovery + // ceremony. The token is claimed here so the hyphenated CLI grammar is + // locked in before P66 plans its CLI surface. + console.error( + 'gsdd registry-crash: not yet implemented; available in P66 (debugger crashed-lease recovery).', + ); + process.exitCode = 1; +} + +export async function cmdRegistryList(...rawArgs) { + const ctx = resolveRegistryRoot(rawArgs); + if (!ctx.ok) return; + const jsonMode = ctx.args.includes('--json'); + const leases = listLeases(ctx.workspaceRoot); + if (jsonMode) { + console.log(JSON.stringify(leases, null, 2)); + return; + } + if (leases.length === 0) { + console.log('No leases found.'); + return; + } + console.log('phase branch state granted_at'); + for (const l of leases) { + console.log(`${l.phase_id} ${l.branch_name} ${l.lease_state} ${l.granted_at}`); + } +} + +export async function cmdRegistryShow(...rawArgs) { + const ctx = resolveRegistryRoot(rawArgs); + if (!ctx.ok) return; + const jsonMode = ctx.args.includes('--json'); + const phase = ctx.args.find((a) => !a.startsWith('-')); + if (!phase) { + console.error('Usage: gsdd registry-show [--json]'); + process.exitCode = 1; + return; + } + const lease = getLease(ctx.workspaceRoot, phase); + if (!lease) { + console.error(`No lease found for phase ${phase}.`); + process.exitCode = 1; + return; + } + if (jsonMode) { + console.log(JSON.stringify(lease, null, 2)); + return; + } + for (const [key, value] of Object.entries(lease)) { + const display = Array.isArray(value) ? JSON.stringify(value) : String(value ?? ''); + console.log(`${key}: ${display}`); + } +} diff --git a/bin/lib/registry.mjs b/bin/lib/registry.mjs new file mode 100644 index 0000000..f270b7e --- /dev/null +++ b/bin/lib/registry.mjs @@ -0,0 +1,304 @@ +// registry.mjs - Worktree Coordination Registry (Track C: JSON + atomic rename) +// +// Stores per-phase lease state in .planning/.local/registry.json. +// Uses only node:fs, node:path built-ins. Zero external deps. +// +// Write pattern: writeFileSync(.json..tmp) then renameSync(.json..tmp -> .json) +// for atomicity. No separate lock file. Per-PID tmp filenames eliminate the +// .tmp truncation race between concurrent CLI invocations; the final +// renameSync is last-writer-wins (lost-update semantics in the absence of +// locking — diagnosed via the read-after-write fingerprint warning below). + +import { existsSync, mkdirSync, readFileSync, renameSync, writeFileSync } from 'node:fs'; +import { dirname, join } from 'node:path'; + +// --------------------------------------------------------------------------- +// Path helpers (exported so closeout-report, tests, and future callers do not +// duplicate the path string). +// --------------------------------------------------------------------------- + +export function registryPath(workspaceRoot) { + return join(workspaceRoot, '.planning', '.local', 'registry.json'); +} + +export function registryTmpPath(workspaceRoot) { + return join(workspaceRoot, '.planning', '.local', `registry.json.${process.pid}.tmp`); +} + +export function registryExists(workspaceRoot) { + return existsSync(registryPath(workspaceRoot)); +} + +function emptyRegistry() { + return { schema_version: 1, leases: [] }; +} + +// --------------------------------------------------------------------------- +// Internal helpers +// --------------------------------------------------------------------------- + +function quarantineCorruptFile(p, reason) { + try { + const broken = `${p}.broken-${Date.now()}`; + renameSync(p, broken); + process.stderr.write( + `[gsdd registry] WARN: registry corrupt (${reason}); quarantined to ${broken}; starting fresh.\n`, + ); + } catch { + process.stderr.write( + `[gsdd registry] WARN: registry corrupt (${reason}); quarantine rename failed; starting fresh.\n`, + ); + } +} + +function readRegistry(workspaceRoot) { + const p = registryPath(workspaceRoot); + if (!existsSync(p)) return emptyRegistry(); + let raw; + try { + raw = JSON.parse(readFileSync(p, 'utf8')); + } catch (err) { + quarantineCorruptFile(p, `parse error: ${err.message}`); + return emptyRegistry(); + } + if (!raw || typeof raw !== 'object' || !Array.isArray(raw.leases)) { + quarantineCorruptFile(p, 'shape invalid (leases is not an array)'); + return emptyRegistry(); + } + return raw; +} + +// safeRename — wraps renameSync with bounded retry for Windows EPERM/EBUSY, +// which fires when another process holds an open handle to the destination +// (e.g. a concurrent closeout-report read). Linux/macOS get a single attempt. +function safeRename(src, dst) { + const isWindows = process.platform === 'win32'; + const maxAttempts = isWindows ? 3 : 1; + for (let attempt = 0; attempt < maxAttempts; attempt += 1) { + try { + renameSync(src, dst); + return; + } catch (err) { + const retriable = err && (err.code === 'EPERM' || err.code === 'EBUSY'); + if (!retriable || attempt === maxAttempts - 1) throw err; + const deadline = Date.now() + 50; + while (Date.now() < deadline) { + // brief synchronous backoff; CLI context — acceptable + } + } + } +} + +function writeRegistry(workspaceRoot, data) { + const target = registryPath(workspaceRoot); + mkdirSync(dirname(target), { recursive: true }); + const tmp = registryTmpPath(workspaceRoot); + writeFileSync(tmp, JSON.stringify(data, null, 2), 'utf8'); + safeRename(tmp, target); + + // Read-after-write fingerprint warning: if a concurrent writer overwrote + // our just-published registry, the lease count will not match what we + // intended to publish. This is diagnostic only — last-writer-wins semantics + // remain. The warning gives operators a visible signal that concurrent + // writers raced and one of them lost. + try { + const reread = JSON.parse(readFileSync(target, 'utf8')); + if ( + reread && + Array.isArray(reread.leases) && + reread.leases.length !== data.leases.length + ) { + process.stderr.write( + `[gsdd registry] WARN: write-collision suspected — re-read shows ${reread.leases.length} leases; we wrote ${data.leases.length}. Another process may have published a conflicting state concurrently.\n`, + ); + } + } catch { + // best-effort — silent on re-read errors (the write itself succeeded) + } +} + +// --------------------------------------------------------------------------- +// Public API +// --------------------------------------------------------------------------- + +/** + * openRegistry — ensure the registry directory and file exist on disk. + * Returns a minimal handle for forward-compat (callers are not required to use it). + * @param {string} workspaceRoot + * @returns {{ path: string }} + */ +export function openRegistry(workspaceRoot) { + const p = registryPath(workspaceRoot); + mkdirSync(dirname(p), { recursive: true }); + if (!existsSync(p)) writeRegistry(workspaceRoot, emptyRegistry()); + return { path: p }; +} + +/** + * grantLease — append a new lease with lease_state "open". + * Throws if phase_id already has an open lease. + * + * Schema note: the `write_set` field is populated by P65 callers but the + * advisory-layer logic that consumes it (overlap detection, plan-checker + * integration) is owned by P69 (PARALLEL-03). P65 ships only the schema seam. + * + * Schema note: the `provenance_hash` field is reserved for SHA-256 of the + * phase plan file at grant time; it stays null until a future phase wires + * plan-file integrity checking. Keeping the field costs one JSON key per + * lease and prevents a schema bump when integrity is added. + * + * @param {string} workspaceRoot + * @param {{ phase_id: string, worktree_path?: string, agent_id?: string|null, branch_name?: string, write_set?: string[], provenance_hash?: string|null }} fields + * @returns {object} the newly created lease + */ +export function grantLease(workspaceRoot, fields) { + const { + phase_id, + worktree_path = '', + agent_id = null, + branch_name = '', + write_set = [], + provenance_hash = null, + } = fields || {}; + + if (!phase_id) throw new Error('grantLease: phase_id is required'); + + const data = readRegistry(workspaceRoot); + + const existing = data.leases.find( + (l) => l.phase_id === phase_id && l.lease_state === 'open', + ); + if (existing) { + throw new Error( + `grantLease: phase ${phase_id} already has an open lease (granted_at ${existing.granted_at})`, + ); + } + + const lease = { + phase_id, + worktree_path, + agent_id, + branch_name, + lease_state: 'open', + granted_at: new Date().toISOString(), + closed_at: null, + crashed_at: null, + crash_reason: null, + write_set, + provenance_hash, + }; + + data.leases.push(lease); + writeRegistry(workspaceRoot, data); + return lease; +} + +/** + * closeLease — transition lease to "closed" and record closed_at. + * Throws if no lease found for phase_id, or if the most recent lease for + * phase_id is not in state "open" (audit-trail integrity: do not silently + * re-stamp already-closed or crashed leases). + * @param {string} workspaceRoot + * @param {string} phase_id + * @returns {object} the updated lease + */ +export function closeLease(workspaceRoot, phase_id) { + const data = readRegistry(workspaceRoot); + + const idx = data.leases.findLastIndex((l) => l.phase_id === phase_id); + if (idx === -1) { + throw new Error(`closeLease: no lease found for phase ${phase_id}`); + } + + const lease = data.leases[idx]; + if (lease.lease_state !== 'open') { + throw new Error( + `closeLease: phase ${phase_id} has lease_state "${lease.lease_state}", expected "open"`, + ); + } + + data.leases[idx] = { + ...lease, + lease_state: 'closed', + closed_at: new Date().toISOString(), + }; + + writeRegistry(workspaceRoot, data); + return data.leases[idx]; +} + +/** + * crashLease — transition lease to "crashed" and record crash_reason and crashed_at. + * Stubbed at P65; wired by `gsdd registry-crash --reason ` in P66. + * Throws if no lease found for phase_id. + * @param {string} workspaceRoot + * @param {string} phase_id + * @param {string} reason + * @returns {object} the updated lease + */ +export function crashLease(workspaceRoot, phase_id, reason) { + const data = readRegistry(workspaceRoot); + + const idx = data.leases.findLastIndex((l) => l.phase_id === phase_id); + if (idx === -1) { + throw new Error(`crashLease: no lease found for phase ${phase_id}`); + } + + data.leases[idx] = { + ...data.leases[idx], + lease_state: 'crashed', + crashed_at: new Date().toISOString(), + crash_reason: reason || null, + }; + + writeRegistry(workspaceRoot, data); + return data.leases[idx]; +} + +/** + * listLeases — return the leases array, or [] if no registry file exists. + * @param {string} workspaceRoot + * @returns {object[]} + */ +export function listLeases(workspaceRoot) { + return readRegistry(workspaceRoot).leases; +} + +/** + * getLease — return the single lease object for phase_id, or null if not found. + * Returns the last matching lease if multiple exist (e.g. re-granted after close). + * @param {string} workspaceRoot + * @param {string} phase_id + * @returns {object|null} + */ +export function getLease(workspaceRoot, phase_id) { + const matches = readRegistry(workspaceRoot).leases.filter((l) => l.phase_id === phase_id); + return matches.length > 0 ? matches[matches.length - 1] : null; +} + +/** + * clearLease — remove the most recent lease entry for phase_id. + * Throws if lease_state is "open" and force is false. + * Throws if no lease found for phase_id. + * @param {string} workspaceRoot + * @param {string} phase_id + * @param {{ force?: boolean }} options + */ +export function clearLease(workspaceRoot, phase_id, { force = false } = {}) { + const data = readRegistry(workspaceRoot); + + const idx = data.leases.findLastIndex((l) => l.phase_id === phase_id); + if (idx === -1) { + throw new Error(`clearLease: no lease found for phase ${phase_id}`); + } + + const lease = data.leases[idx]; + if (lease.lease_state === 'open' && !force) { + throw new Error( + `clearLease: phase ${phase_id} has an open lease. Use --force to clear it.`, + ); + } + + data.leases.splice(idx, 1); + writeRegistry(workspaceRoot, data); +} diff --git a/distilled/DESIGN.md b/distilled/DESIGN.md index b3fd95c..141b7c1 100644 --- a/distilled/DESIGN.md +++ b/distilled/DESIGN.md @@ -74,6 +74,7 @@ 61. [Deliberate Subagent Contract](#d61---deliberate-subagent-contract) 62. [Repo-Native UI Proof Contract](#d62---repo-native-ui-proof-contract) 63. [Computed-First Control Map](#d63---computed-first-control-map) +64. [JSON+Atomic-Rename for the Coordination Registry](#d64---jsonatomic-rename-for-the-coordination-registry) --- @@ -2921,6 +2922,45 @@ Posture compatibility is part of that closeout contract: `repo_closeout` and `ru --- +## D64 - JSON+Atomic-Rename for the Coordination Registry + +**Decision (2026-05-13):** The worktree coordination registry uses JSON+atomic-rename (write to a per-PID `.planning/.local/registry.json..tmp` then `fs.renameSync` over target) rather than SQLite WAL. Per-PID tmp filenames eliminate the truncation race between concurrent CLI invocations; the final renameSync is last-writer-wins with a read-after-write fingerprint warning that surfaces lost updates on stderr. + +**Context:** +- An earlier decision locked the registry to "WAL-mode SQLite" citing OpenHands as the production analog. Direct repo inspection (see §2 evidence below) confirmed OpenHands uses per-event JSON files for session and coordination state; SQLite exists in OpenHands only in the enterprise tier for billing/OAuth/user management. The earlier lock was corrected on this evidence. +- The decision space evaluated three tracks: + - **Track A (`better-sqlite3`):** introduces a native runtime dependency, requires ABI rebuild per Node minor version, and `--ignore-scripts` installs silently crash at runtime. Conflicts with the zero-dependency invariant. + - **Track B (`node:sqlite`):** zero external deps but requires `engines >=22.13.0`, raising the floor above Node 20 LTS; binary file format produces irrecoverable git conflicts; Stability 1.2 RC has already shipped one breaking change. + - **Track C (JSON + atomic rename):** zero deps, Node >=20 preserved, copyable with `cp`, git-mergeable line-by-line, inspectable/recoverable in any text editor or `jq`. Validated by §2 evidence below. +- v2.0.0 ships MANUAL orchestration (sequential writes by one human orchestrator). Concurrent multi-writer scenarios are explicitly deferred to v2.1+ automated orchestration; the per-PID tmp + fingerprint warning is the v2.0 surface signal for the v2.1 lock-or-CAS work. + +**Decision details:** +- Registry file: `.planning/.local/registry.json` (gitignored, local-only). +- Write pattern: parse existing → modify in memory → `writeFileSync` to `.planning/.local/registry.json..tmp` → `safeRename(tmp, target)` with Windows EPERM/EBUSY retry. Per-PID tmp filenames prevent the .tmp truncation race between two concurrent writers; concurrent renames remain last-writer-wins. +- Read-after-write fingerprint: after rename, re-read the registry and emit a stderr `WARN: write-collision suspected` if the lease count does not match what we wrote. Diagnostic only; operators can re-run on warning. +- Corrupt-JSON handling: `readRegistry` quarantines unparseable or wrong-shape files to `registry.json.broken-`, emits a stderr warning, and returns an empty registry. Forensic evidence preserved. +- Fields per lease: `phase_id`, `worktree_path`, `agent_id`, `branch_name`, `lease_state` (open|closed|crashed; `merging` reserved for P68 phase-close CLI), `granted_at`, `closed_at`, `crashed_at`, `crash_reason`, `write_set` (schema seam owned by P65; advisory logic owned by P69), `provenance_hash` (reserved: SHA-256 of phase plan file; null until plan-file integrity wiring lands in a later phase). +- State machine: `closeLease` hard-errors on closing a non-open lease (audit-trail integrity); callers must use `crashLease` or `clearLease` for non-open transitions. +- Upgrade path: revisit `node:sqlite` when v2.1+ ships automated orchestration with concurrent multi-writer requirements, or `gsdd report` introduces multi-milestone aggregation queries. + +**Evidence:** Three §2 research streams (Sonnet-4.6 subagent, 2026-05-13) inspecting actual upstream sources. Persisted artifacts: `.internal-research/p65-section2-spec-framework.md`, `p65-section2-orchestrator.md`, `p65-section2-industry.md`. + +**Spec framework category (§2.1):** Confirmed negative. GSD has no parallel-execution, worktree, lease, or registry concept; phase state lives in `.planning/STATE.md` and `.planning/ROADMAP.md` as plain Markdown. OpenSpec ships a workspace *discovery* YAML registry, not an execution-coordination registry; its "parallel changes" is sequential context-switching. LeanSpec has no coordination state. Track C is therefore novel scope relative to all spec frameworks. + +**Orchestrator category (§2.2):** OpenHands (commit `cae76e54`) stores session/event state as per-event JSON files under `{persistence_dir}/{user_id}/v1_conversations/{conversation_id.hex}/{event_id.hex}.json` (`filesystem_event_service.py:24-36`, `event_service_base.py:70,162`). SQLite is present only in `enterprise/` for billing/OAuth — zero SQLite for session state in either tier. MetaGPT writes cross-run coordination to `{workspace}/storage/team/team.json` via `write_json_file()` (`team.py:59-79`); live coordination is in-process. Conductor OSS uses JSON-blob serialization throughout — Redis is the production backend (`RedisExecutionDAO.java`), SQLite appears only in the scheduler sub-module (`SqliteSchedulerDAO.java`), with JSON as the wire format in every backend. Synthesis: JSON is the universal orchestrator serialization format; backend choice (filesystem, Redis, SQLite) varies by operational tier. GSDD's filesystem JSON sits in the OpenHands-OSS / MetaGPT tier. + +**Industry guidance category (§2.3):** Anthropic Claude Code stores per-session transcripts as append-only JSONL at `~/.claude/projects//sessions/.jsonl` (no database, crash-safe by append). Shared mutable state lives in `~/.claude.json`, which has a documented race condition under concurrent writes — 8+ filed GitHub issues converge on `write-tmp + rename()` as the correct fix (https://github.com/anthropics/claude-code/issues — search "claude.json race"). This is the strongest direct industry endorsement of Track C's pattern. Cursor 2.0 ships `.cursor/worktrees.json` plus per-task claim files plus atomic `mkdir`-based locking — structurally identical to D64's design intent. OpenAI Codex CLI uses SQLite (`sqlite_home`) for "agent jobs and other resumable runtime state" — an honest counter-example that validates SQLite as the upgrade target for v2.1+, not as the v2.0 starting point. GitHub Copilot Coding Agent delegates coordination to git worktrees and branches; there is no client-side registry, leaving no queryable in-progress state — a gap D64 fills for CLI tooling. + +**Artifacts:** `bin/lib/registry.mjs`, `bin/lib/registry-commands.mjs`, `tests/gsdd.registry.test.cjs`, `bin/gsdd.mjs` (registry-list/show/clear/crash placeholder), `bin/lib/closeout-report.mjs` (registry section with blocking-lease threading). + +**Consequences:** +- Zero-dependency invariant (package.json `dependencies: {}`) preserved. +- Concurrent multi-writer safety: per-PID tmp eliminates truncation; rename is last-writer-wins; fingerprint warning surfaces lost updates. Strong-consistency multi-writer is deferred to v2.1+. +- Upgrade path to SQLite remains open and is the right call when (a) automated orchestration with concurrent rename frequency >10/s, (b) multi-milestone aggregation queries land, or (c) the registry crosses ~10MB. +- The write_set field is shipped as a schema seam only; consumer logic is owned by a later advisory-layer phase (P69 in v2.0.0). + +--- + ## Maintenance This document is updated when: diff --git a/distilled/EVIDENCE-INDEX.md b/distilled/EVIDENCE-INDEX.md index 7839dc5..b6c52b1 100644 --- a/distilled/EVIDENCE-INDEX.md +++ b/distilled/EVIDENCE-INDEX.md @@ -517,6 +517,40 @@ --- +## D64 — JSON+Atomic-Rename for the Coordination Registry +- `bin/lib/registry.mjs`, `bin/lib/registry-commands.mjs`, `bin/gsdd.mjs`, `bin/lib/closeout-report.mjs` +- `tests/gsdd.registry.test.cjs`, `tests/gsdd.closeout-report.test.cjs` +- Persisted §2 research streams (2026-05-13): + - `.internal-research/p65-section2-spec-framework.md` + - `.internal-research/p65-section2-orchestrator.md` + - `.internal-research/p65-section2-industry.md` + +### §2.1 Spec framework (negative-citation confirmed) +- GSD: confirmed no parallel/worktree/lease/registry concept by direct inspection of `agents/_archive/gsd-*.md` (11 archived role files); phase state lives in `.planning/STATE.md` and `.planning/ROADMAP.md` only. `agents/_archive/gsd-plan-checker.md:160` and `distilled/workflows/map-codebase.md:25,88` are the only "parallel" references and both are intra-phase task waves, not cross-phase coordination. +- OpenSpec: https://github.com/Fission-AI/OpenSpec — workspace discovery YAML registry (`getGlobalDataDir()/workspaces/registry.yaml`); not an execution-coordination surface. +- LeanSpec: https://github.com/codervisor/lean-spec — no coordination-state concept. (Note: peakwave-ai/leanspec does not exist; codervisor/lean-spec is the actual repo.) + +### §2.2 Orchestrator +- OpenHands (https://github.com/All-Hands-AI/OpenHands, commit `cae76e54`): per-event JSON files at `{persistence_dir}/{user_id}/v1_conversations/{conversation_id.hex}/{event_id.hex}.json` via `filesystem_event_service.py:24-36` and `event_service_base.py:70,162`. SQLite present only in `enterprise/` for billing/OAuth (100+ Alembic migrations) — zero SQLite for session/event state. +- MetaGPT (https://github.com/geekan/MetaGPT or FoundationAgents/MetaGPT): cross-run team state at `{workspace}/storage/team/team.json` (`team.py:59-79`, blob `5a983888`); live coordination via in-process message bus. +- Conductor OSS (Netflix): `RedisExecutionDAO.java` (Jackson JSON in Redis hashes) is the production backend; `SqliteSchedulerDAO.java` (blob `fe0ec389`) only for scheduler sub-module. JSON wire format in every backend. + +### §2.3 Industry guidance +- Anthropic Claude Code: session transcripts JSONL append-only at `~/.claude/projects//sessions/.jsonl`; shared mutable state `~/.claude.json` has a documented race condition under concurrent writes — 8+ filed GitHub issues converge on `write-tmp + rename()` as the correct fix. Strongest direct endorsement of Track C's pattern by the harness vendor. +- Cursor 2.0: `.cursor/worktrees.json` + per-task JSON claim files + atomic `mkdir`-based locking — structurally identical to D64. +- OpenAI Codex CLI (https://github.com/openai/codex): uses SQLite (`sqlite_home`) for "agent jobs and other resumable runtime state"; JSONL for conversation history; TOML for config. Honest counter-example: SQLite is the correct upgrade target for v2.1+ when concurrent multi-writer requirements land, not for v2.0 starting state. +- GitHub Copilot Coding Agent: per-agent isolation via named git worktrees (`--`); no client-side coordination registry. The gap that D64 fills for CLI tooling: a queryable local in-progress state. + +### Production patterns confirmed +- npm/write-file-atomic: https://github.com/npm/write-file-atomic — same tmp+rename pattern in widespread use. +- Git lockfile API: `LockFile.register()` (git/lockfile.h). +- pnpm/yarn: same tmp+rename for `node_modules/.package-lock.json`. + +### Synthesis +JSON-with-atomic-rename for state files is the community-validated standard in 2026 for single-writer or low-frequency multi-writer state. SQLite is the industry choice when concurrent reads/queries or multi-writer locking matters (Codex CLI). Pure git delegation is viable only with server infrastructure (Copilot). No-protection JSON writes are universally identified as a bug (Claude Code race condition). D64 sits in the correct tier for GSDD's v2.0 constraints; upgrade to SQLite remains the documented v2.1+ path. + +--- + ## Maintenance Update this file when: diff --git a/package.json b/package.json index 85a0c74..d8b2596 100644 --- a/package.json +++ b/package.json @@ -8,7 +8,7 @@ }, "scripts": { "test": "npm run test:gsdd", - "test:gsdd": "node tests/gsdd.init.test.cjs && node tests/gsdd.models.test.cjs && node tests/gsdd.consumer-ceremony.test.cjs && node tests/gsdd.manifest.test.cjs && node tests/gsdd.plan.adapters.test.cjs && node tests/gsdd.audit-milestone.test.cjs && node tests/gsdd.invariants.test.cjs && node tests/gsdd.guards.test.cjs && node tests/gsdd.health.test.cjs && node tests/gsdd.scenarios.test.cjs && node tests/gsdd.cross-runtime.test.cjs && node tests/gsdd.control-map.test.cjs && node tests/gsdd.closeout-report.test.cjs && node tests/phase.test.cjs && node tests/session-fingerprint.test.cjs", + "test:gsdd": "node tests/gsdd.init.test.cjs && node tests/gsdd.models.test.cjs && node tests/gsdd.consumer-ceremony.test.cjs && node tests/gsdd.manifest.test.cjs && node tests/gsdd.plan.adapters.test.cjs && node tests/gsdd.audit-milestone.test.cjs && node tests/gsdd.invariants.test.cjs && node tests/gsdd.guards.test.cjs && node tests/gsdd.health.test.cjs && node tests/gsdd.scenarios.test.cjs && node tests/gsdd.cross-runtime.test.cjs && node tests/gsdd.control-map.test.cjs && node tests/gsdd.closeout-report.test.cjs && node tests/gsdd.registry.test.cjs && node tests/phase.test.cjs && node tests/session-fingerprint.test.cjs", "prepublishOnly": "node -e \"const ok=process.env.GITHUB_ACTIONS==='true'&&process.env.GITHUB_REF_NAME==='main'&&process.env.GITHUB_WORKFLOW==='Release'; if(!ok){console.error('Refusing to publish gsdd-cli outside the GitHub Actions Release workflow on main.'); process.exit(1)}\"" }, "devDependencies": { diff --git a/tests/gsdd.closeout-report.test.cjs b/tests/gsdd.closeout-report.test.cjs index 22e5d85..33c3ee4 100644 --- a/tests/gsdd.closeout-report.test.cjs +++ b/tests/gsdd.closeout-report.test.cjs @@ -3,6 +3,7 @@ const assert = require('node:assert'); const fs = require('fs'); const path = require('path'); const { execFileSync, spawnSync } = require('node:child_process'); +const { pathToFileURL } = require('url'); const { cleanup, createTempProject, runCliAsMain } = require('./gsdd.helpers.cjs'); @@ -271,4 +272,121 @@ describe('closeout-report helper', () => { assert.ok(report.health.warnings.some((entry) => entry.id === 'W_CLOSEOUT_HEALTH_UNAVAILABLE')); assert.strictEqual(report.phase_verification.status, 'passed'); }); + + test('closeout-report omits registry key when no registry file exists', async () => { + await initWorkspace(); + writeRoadmap(); + writeCompletedPhase(1, 'first-closed-phase'); + + const result = await runCliAsMain(tmpDir, ['closeout-report', '--json', '--phase', '1']); + assert.strictEqual(result.exitCode, 0, result.output); + const report = JSON.parse(result.output); + + assert.strictEqual('registry' in report, false, 'registry key must not be present when no registry file exists'); + }); + + test('closeout-report includes registry key with active_leases when an open lease exists', async () => { + await initWorkspace(); + writeRoadmap(); + writeCompletedPhase(1, 'first-closed-phase'); + + // Seed one open lease in the temp project's registry. + const registryDir = path.join(tmpDir, '.planning', '.local'); + fs.mkdirSync(registryDir, { recursive: true }); + const registryFile = path.join(registryDir, 'registry.json'); + const seedData = { + schema_version: 1, + leases: [{ + phase_id: 'test-seed-99', + worktree_path: tmpDir, + agent_id: null, + branch_name: 'test/seed-99', + lease_state: 'open', + granted_at: new Date().toISOString(), + closed_at: null, + crash_reason: null, + write_set: [], + provenance_hash: null, + }], + }; + fs.writeFileSync(registryFile, JSON.stringify(seedData, null, 2), 'utf8'); + + const result = await runCliAsMain(tmpDir, ['closeout-report', '--json', '--phase', '1']); + assert.strictEqual(result.exitCode, 0, result.output); + const report = JSON.parse(result.output); + + assert.ok('registry' in report, 'registry key must be present when a lease exists'); + assert.ok(Array.isArray(report.registry.active_leases), 'active_leases must be an array'); + assert.ok( + report.registry.active_leases.some((l) => l.phase_id === 'test-seed-99'), + 'active_leases must include the seeded open lease', + ); + assert.ok(Array.isArray(report.registry.stale_leases), 'stale_leases must be an array'); + assert.ok(Array.isArray(report.registry.closed_leases), 'closed_leases must be an array'); + }); + + test('closeout-report tags only foreign-phase open leases as blocking when closing a specific phase', async () => { + await initWorkspace(); + writeRoadmap(); + writeCompletedPhase(1, 'first-closed-phase'); + + // Seed two open leases — one for the phase we're closing (1) and one for + // an unrelated phase (99). Only phase 99 should appear in blocking_leases. + const registryDir = path.join(tmpDir, '.planning', '.local'); + fs.mkdirSync(registryDir, { recursive: true }); + const registryFile = path.join(registryDir, 'registry.json'); + const seedData = { + schema_version: 1, + leases: [ + { + phase_id: '1', + worktree_path: tmpDir, + agent_id: null, + branch_name: 'feat/phase-1', + lease_state: 'open', + granted_at: new Date().toISOString(), + closed_at: null, + crashed_at: null, + crash_reason: null, + write_set: [], + provenance_hash: null, + }, + { + phase_id: '99', + worktree_path: tmpDir, + agent_id: null, + branch_name: 'feat/phase-99', + lease_state: 'open', + granted_at: new Date().toISOString(), + closed_at: null, + crashed_at: null, + crash_reason: null, + write_set: [], + provenance_hash: null, + }, + ], + }; + fs.writeFileSync(registryFile, JSON.stringify(seedData, null, 2), 'utf8'); + + const result = await runCliAsMain(tmpDir, ['closeout-report', '--json', '--phase', '1']); + assert.strictEqual(result.exitCode, 0, result.output); + const report = JSON.parse(result.output); + + assert.ok(Array.isArray(report.registry.blocking_leases), 'blocking_leases must be an array'); + assert.ok(Array.isArray(report.registry.own_phase_leases), 'own_phase_leases must be an array'); + + const blockingIds = report.registry.blocking_leases.map((l) => String(l.phase_id)); + const ownIds = report.registry.own_phase_leases.map((l) => String(l.phase_id)); + + assert.deepStrictEqual( + blockingIds.sort(), + ['99'], + `phase 99 (foreign) must be blocking; phase 1 (own) must not. Got: ${JSON.stringify(blockingIds)}`, + ); + assert.deepStrictEqual( + ownIds.sort(), + ['1'], + `phase 1 (own) must be own_phase; got: ${JSON.stringify(ownIds)}`, + ); + }); }); diff --git a/tests/gsdd.guards.test.cjs b/tests/gsdd.guards.test.cjs index 27ff711..55a5577 100644 --- a/tests/gsdd.guards.test.cjs +++ b/tests/gsdd.guards.test.cjs @@ -138,8 +138,8 @@ describe('G10 - CLI Module Boundary', () => { test('gsdd.mjs remains a thin facade', () => { const lines = lineCount(GSDD_PATH); - assert.ok(lines <= 140, - `gsdd.mjs is ${lines} lines (max 140). FIX: Keep the entrypoint as a thin composition root.`); + assert.ok(lines <= 145, + `gsdd.mjs is ${lines} lines (max 145). FIX: Keep the entrypoint as a thin composition root.`); }); }); diff --git a/tests/gsdd.registry.test.cjs b/tests/gsdd.registry.test.cjs new file mode 100644 index 0000000..03d5b7c --- /dev/null +++ b/tests/gsdd.registry.test.cjs @@ -0,0 +1,759 @@ +'use strict'; + +const { test, describe, beforeEach, afterEach } = require('node:test'); +const assert = require('node:assert'); +const fs = require('node:fs'); +const path = require('node:path'); +const os = require('node:os'); +const { spawn } = require('node:child_process'); +const { pathToFileURL } = require('url'); + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +function createTempWorkspace() { + return fs.mkdtempSync(path.join(os.tmpdir(), 'gsdd-registry-test-')); +} + +function cleanupWorkspace(dir) { + fs.rmSync(dir, { recursive: true, force: true }); +} + +function registryPath(workspaceRoot) { + return path.join(workspaceRoot, '.planning', '.local', 'registry.json'); +} + +function registryTmpPath(workspaceRoot, pid) { + return path.join(workspaceRoot, '.planning', '.local', `registry.json.${pid}.tmp`); +} + +function findTmpOrphans(workspaceRoot) { + const dir = path.join(workspaceRoot, '.planning', '.local'); + if (!fs.existsSync(dir)) return []; + return fs + .readdirSync(dir) + .filter((f) => /^registry\.json\.\d+\.tmp$/.test(f)) + .map((f) => path.join(dir, f)); +} + +function ensurePlanningMarker(workspaceRoot) { + // resolveWorkspaceContext walks up looking for .planning/ — create a marker + // so subdirectory CWD tests resolve the workspace root correctly. + const planning = path.join(workspaceRoot, '.planning'); + fs.mkdirSync(planning, { recursive: true }); + const config = path.join(planning, 'config.json'); + if (!fs.existsSync(config)) { + fs.writeFileSync(config, JSON.stringify({ initVersion: 'test' }, null, 2), 'utf8'); + } +} + +// Load registry module. Because this is a CJS test file and registry.mjs is +// ESM, we use a shared promise to import once and cache it. +let registryModulePromise = null; +function getRegistry() { + if (!registryModulePromise) { + const registryUrl = pathToFileURL( + path.join(__dirname, '..', 'bin', 'lib', 'registry.mjs'), + ).href; + registryModulePromise = import(`${registryUrl}?t=${Date.now()}`); + } + return registryModulePromise; +} + +// --------------------------------------------------------------------------- +// Suite +// --------------------------------------------------------------------------- + +describe('registry module', () => { + let tmpDir; + let registry; + + beforeEach(async () => { + tmpDir = createTempWorkspace(); + // Re-import on each test to avoid module cache with stale state. + const registryUrl = pathToFileURL( + path.join(__dirname, '..', 'bin', 'lib', 'registry.mjs'), + ).href; + registry = await import(`${registryUrl}?t=${Date.now()}-${Math.random()}`); + }); + + afterEach(() => { + cleanupWorkspace(tmpDir); + }); + + // ------------------------------------------------------------------------- + // 1. Empty registry (no file on disk) → listLeases returns [] + // ------------------------------------------------------------------------- + test('listLeases returns [] when no registry file exists', () => { + const leases = registry.listLeases(tmpDir); + assert.deepStrictEqual(leases, []); + assert.strictEqual(fs.existsSync(registryPath(tmpDir)), false, 'registry file must not be created by listLeases'); + }); + + // ------------------------------------------------------------------------- + // 2. grantLease → lease with state "open", granted_at set + // ------------------------------------------------------------------------- + test('grantLease creates a lease with lease_state open and granted_at set', () => { + const before = Date.now(); + const lease = registry.grantLease(tmpDir, { + phase_id: 'test-01', + worktree_path: tmpDir, + agent_id: 'agent-1', + branch_name: 'feat/test-01', + write_set: ['bin/gsdd.mjs'], + provenance_hash: 'abc123', + }); + const after = Date.now(); + + assert.strictEqual(lease.phase_id, 'test-01'); + assert.strictEqual(lease.lease_state, 'open'); + assert.strictEqual(lease.branch_name, 'feat/test-01'); + assert.strictEqual(lease.agent_id, 'agent-1'); + assert.deepStrictEqual(lease.write_set, ['bin/gsdd.mjs']); + assert.strictEqual(lease.provenance_hash, 'abc123'); + assert.ok(lease.granted_at, 'granted_at must be set'); + const grantedAtMs = new Date(lease.granted_at).getTime(); + assert.ok(grantedAtMs >= before && grantedAtMs <= after, 'granted_at must be within test bounds'); + assert.strictEqual(lease.closed_at, null); + assert.strictEqual(lease.crash_reason, null); + + // Verify persisted to disk. + const onDisk = JSON.parse(fs.readFileSync(registryPath(tmpDir), 'utf8')); + assert.strictEqual(onDisk.schema_version, 1); + assert.strictEqual(onDisk.leases.length, 1); + assert.strictEqual(onDisk.leases[0].phase_id, 'test-01'); + assert.strictEqual(onDisk.leases[0].lease_state, 'open'); + }); + + // ------------------------------------------------------------------------- + // 3. closeLease → state "closed", closed_at set + // ------------------------------------------------------------------------- + test('closeLease transitions lease to closed with closed_at set', () => { + registry.grantLease(tmpDir, { + phase_id: 'test-02', + worktree_path: tmpDir, + agent_id: null, + branch_name: 'feat/test-02', + write_set: [], + provenance_hash: null, + }); + + const before = Date.now(); + const updated = registry.closeLease(tmpDir, 'test-02'); + const after = Date.now(); + + assert.strictEqual(updated.lease_state, 'closed'); + assert.ok(updated.closed_at, 'closed_at must be set'); + const closedAtMs = new Date(updated.closed_at).getTime(); + assert.ok(closedAtMs >= before && closedAtMs <= after, 'closed_at must be within test bounds'); + + const leases = registry.listLeases(tmpDir); + assert.strictEqual(leases.find((l) => l.phase_id === 'test-02').lease_state, 'closed'); + }); + + // ------------------------------------------------------------------------- + // 4. crashLease → state "crashed", crash_reason set + // ------------------------------------------------------------------------- + test('crashLease transitions lease to crashed with crash_reason set', () => { + registry.grantLease(tmpDir, { + phase_id: 'test-03', + worktree_path: tmpDir, + agent_id: null, + branch_name: 'feat/test-03', + write_set: [], + provenance_hash: null, + }); + + const updated = registry.crashLease(tmpDir, 'test-03', 'process killed by SIGKILL'); + + assert.strictEqual(updated.lease_state, 'crashed'); + assert.strictEqual(updated.crash_reason, 'process killed by SIGKILL'); + + const leases = registry.listLeases(tmpDir); + assert.strictEqual(leases.find((l) => l.phase_id === 'test-03').lease_state, 'crashed'); + }); + + // ------------------------------------------------------------------------- + // 5. clearLease throws on open lease without force + // ------------------------------------------------------------------------- + test('clearLease throws if lease is open and force is false', () => { + registry.grantLease(tmpDir, { + phase_id: 'test-04', + worktree_path: tmpDir, + agent_id: null, + branch_name: 'feat/test-04', + write_set: [], + provenance_hash: null, + }); + + assert.throws( + () => registry.clearLease(tmpDir, 'test-04'), + /open lease/i, + 'clearLease must throw an error mentioning "open lease" when force is false', + ); + + // Lease must still be present after the failed clear. + const lease = registry.getLease(tmpDir, 'test-04'); + assert.ok(lease, 'lease must still exist after failed clearLease'); + assert.strictEqual(lease.lease_state, 'open'); + }); + + // ------------------------------------------------------------------------- + // 6. clearLease removes closed lease without force + // ------------------------------------------------------------------------- + test('clearLease removes a closed lease without --force', () => { + registry.grantLease(tmpDir, { + phase_id: 'test-05', + worktree_path: tmpDir, + agent_id: null, + branch_name: 'feat/test-05', + write_set: [], + provenance_hash: null, + }); + registry.closeLease(tmpDir, 'test-05'); + registry.clearLease(tmpDir, 'test-05'); + + const lease = registry.getLease(tmpDir, 'test-05'); + assert.strictEqual(lease, null, 'getLease must return null after clearLease'); + }); + + // ------------------------------------------------------------------------- + // 7. clearLease removes open lease with --force + // ------------------------------------------------------------------------- + test('clearLease removes an open lease when force is true', () => { + registry.grantLease(tmpDir, { + phase_id: 'test-06', + worktree_path: tmpDir, + agent_id: null, + branch_name: 'feat/test-06', + write_set: [], + provenance_hash: null, + }); + registry.clearLease(tmpDir, 'test-06', { force: true }); + + const lease = registry.getLease(tmpDir, 'test-06'); + assert.strictEqual(lease, null, 'getLease must return null after forced clearLease'); + }); + + // ------------------------------------------------------------------------- + // 8. getLease returns null for unknown phase_id + // ------------------------------------------------------------------------- + test('getLease returns null for unknown phase_id', () => { + const lease = registry.getLease(tmpDir, 'nonexistent-phase'); + assert.strictEqual(lease, null); + }); + + // ------------------------------------------------------------------------- + // 9. Duplicate grant → throws if phase_id already open + // ------------------------------------------------------------------------- + test('grantLease throws if phase_id already has an open lease', () => { + registry.grantLease(tmpDir, { + phase_id: 'test-07', + worktree_path: tmpDir, + agent_id: null, + branch_name: 'feat/test-07', + write_set: [], + provenance_hash: null, + }); + + assert.throws( + () => registry.grantLease(tmpDir, { + phase_id: 'test-07', + worktree_path: tmpDir, + agent_id: null, + branch_name: 'feat/test-07-dup', + write_set: [], + provenance_hash: null, + }), + /already has an open lease/i, + 'grantLease must throw when phase_id already has an open lease', + ); + + // Original lease untouched. + const lease = registry.getLease(tmpDir, 'test-07'); + assert.strictEqual(lease.branch_name, 'feat/test-07'); + }); + + // ------------------------------------------------------------------------- + // Durability fixture: parent-kills-child, cross-platform + // ------------------------------------------------------------------------- + test('registry file survives parent-kills-child mid-write (durability fixture)', { timeout: 10000 }, async (t) => { + // (a) Grant a baseline lease so registry.json has a committed complete write. + registry.grantLease(tmpDir, { + phase_id: '65-fixture-baseline', + worktree_path: tmpDir, + agent_id: null, + branch_name: 'feat/v2-registry', + write_set: [], + provenance_hash: null, + }); + + // Confirm registry file exists before spawning child. + assert.ok(fs.existsSync(registryPath(tmpDir)), 'registry.json must exist after grantLease'); + + // Absolute path to registry.mjs for the child process. + const registryMjsPath = path.join(__dirname, '..', 'bin', 'lib', 'registry.mjs'); + + // (b) Child script: writes per-PID .json..tmp (matching production + // behavior), prints "READY ", then sleeps indefinitely without + // ever calling renameSync — simulating a mid-write crash. + const childScript = ` +import { existsSync, mkdirSync, writeFileSync } from 'node:fs'; +import { join } from 'node:path'; +const workspaceRoot = ${JSON.stringify(tmpDir)}; +const tmpPath = join(workspaceRoot, '.planning', '.local', \`registry.json.\${process.pid}.tmp\`); +const dir = join(workspaceRoot, '.planning', '.local'); +mkdirSync(dir, { recursive: true }); +// Write a new entry to the .tmp file (simulating a mid-write crash). +const newEntry = { + phase_id: '65-fixture-crash', + worktree_path: workspaceRoot, + agent_id: null, + branch_name: 'feat/crash-candidate', + lease_state: 'open', + granted_at: new Date().toISOString(), + closed_at: null, + crashed_at: null, + crash_reason: null, + write_set: [], + provenance_hash: null, +}; +const corrupt = { schema_version: 1, leases: [newEntry] }; +writeFileSync(tmpPath, JSON.stringify(corrupt, null, 2), 'utf8'); +// Signal readiness with our pid — parent will kill us now. +process.stdout.write('READY ' + process.pid + '\\n'); +// Sleep indefinitely — never calls renameSync. +setInterval(() => {}, 60000); +`; + + // (c) Spawn the child. + const child = spawn(process.execPath, ['--input-type=module'], { + cwd: tmpDir, + stdio: ['pipe', 'pipe', 'pipe'], + }); + + // Feed the script to stdin. + child.stdin.write(childScript); + child.stdin.end(); + + // Wait for "READY" on stdout. + await new Promise((resolve, reject) => { + let buffer = ''; + const timeout = setTimeout(() => { + child.kill(); + reject(new Error('Durability fixture: child did not emit READY within 5s')); + }, 5000); + + child.stdout.on('data', (chunk) => { + buffer += chunk.toString(); + if (buffer.includes('READY')) { + clearTimeout(timeout); + // (c) Kill the child immediately after it signals readiness. + const killed = child.kill(); + if (!killed) { + // On Windows, kill() may return false for race reasons; proceed anyway. + t.diagnostic('child.kill() returned false — process may have already exited'); + } + resolve(); + } + }); + + child.on('error', (err) => { + clearTimeout(timeout); + reject(err); + }); + }); + + // (d) Wait for the child to terminate. + await new Promise((resolve) => { + if (child.exitCode !== null) { + resolve(); + return; + } + // Give the process 2s to exit after kill. + const timeout = setTimeout(() => { + t.diagnostic('Child did not terminate within 2s after kill — known Windows limitation'); + resolve(); + }, 2000); + child.once('close', () => { + clearTimeout(timeout); + resolve(); + }); + }); + + // (e) Assert: registry.json is valid JSON and baseline lease is still present. + const registryFile = registryPath(tmpDir); + assert.ok(fs.existsSync(registryFile), 'registry.json must still exist after child kill'); + + let parsed; + try { + parsed = JSON.parse(fs.readFileSync(registryFile, 'utf8')); + } catch (err) { + assert.fail(`registry.json must be valid JSON after child kill: ${err.message}`); + } + + assert.ok(Array.isArray(parsed.leases), 'registry.leases must be an array'); + const baseline = parsed.leases.find((l) => l.phase_id === '65-fixture-baseline'); + assert.ok( + baseline, + 'baseline lease (65-fixture-baseline) must still be present in registry after child kill', + ); + assert.strictEqual(baseline.lease_state, 'open', 'baseline lease state must be open'); + + // (f) Assert: the child left a .tmp orphan (proves the crash was mid-write, + // before renameSync). The orphan filename follows the per-PID pattern + // `registry.json..tmp`. We cannot know the child's pid from the + // parent without extra IPC, so we scan the directory for any matching + // orphan. Tolerance: on Windows, the child may exit before we get here + // and the OS may have already cleaned the file; we diagnose instead of + // hard-failing in that case to keep CI stable across platforms. + const orphans = findTmpOrphans(tmpDir); + if (orphans.length === 0) { + t.diagnostic('No .tmp orphan found — child may have exited before its writeFileSync flushed, or OS cleaned the file. Atomic-rename property is still proven by the registry.json invariant above.'); + } else { + assert.ok( + orphans.length >= 1, + '.tmp orphan must exist after child kill (proves crash was mid-write)', + ); + } + + // Cleanup: remove baseline lease. + registry.clearLease(tmpDir, '65-fixture-baseline', { force: true }); + + // Clean up any .tmp orphans the child left. + for (const tmpFile of findTmpOrphans(tmpDir)) { + fs.rmSync(tmpFile, { force: true }); + } + }); + + // ------------------------------------------------------------------------- + // closeLease state guard: throws on closing a non-open lease + // ------------------------------------------------------------------------- + test('closeLease throws when lease is already closed', () => { + registry.grantLease(tmpDir, { + phase_id: 'state-guard-01', + worktree_path: tmpDir, + agent_id: null, + branch_name: 'feat/state-guard-01', + write_set: [], + provenance_hash: null, + }); + registry.closeLease(tmpDir, 'state-guard-01'); + assert.throws( + () => registry.closeLease(tmpDir, 'state-guard-01'), + /lease_state "closed", expected "open"/, + 'closeLease must throw when lease is already closed', + ); + }); + + test('closeLease throws when lease is crashed', () => { + registry.grantLease(tmpDir, { + phase_id: 'state-guard-02', + worktree_path: tmpDir, + agent_id: null, + branch_name: 'feat/state-guard-02', + write_set: [], + provenance_hash: null, + }); + registry.crashLease(tmpDir, 'state-guard-02', 'simulated'); + assert.throws( + () => registry.closeLease(tmpDir, 'state-guard-02'), + /lease_state "crashed", expected "open"/, + 'closeLease must throw when lease is crashed', + ); + }); + + // ------------------------------------------------------------------------- + // crashLease records crashed_at timestamp (parity with closed_at) + // ------------------------------------------------------------------------- + test('crashLease records crashed_at ISO timestamp', () => { + const before = Date.now(); + registry.grantLease(tmpDir, { + phase_id: 'crashed-at-01', + worktree_path: tmpDir, + agent_id: null, + branch_name: 'feat/crashed-at', + write_set: [], + provenance_hash: null, + }); + const lease = registry.crashLease(tmpDir, 'crashed-at-01', 'oom'); + const after = Date.now(); + assert.strictEqual(lease.lease_state, 'crashed'); + assert.strictEqual(lease.crash_reason, 'oom'); + assert.ok(typeof lease.crashed_at === 'string', 'crashed_at must be a string'); + const crashedAtMs = Date.parse(lease.crashed_at); + assert.ok( + crashedAtMs >= before && crashedAtMs <= after, + `crashed_at (${lease.crashed_at}) must be between ${new Date(before).toISOString()} and ${new Date(after).toISOString()}`, + ); + }); + + test('grantLease initializes crashed_at as null alongside closed_at', () => { + const lease = registry.grantLease(tmpDir, { + phase_id: 'crashed-at-init', + worktree_path: tmpDir, + agent_id: null, + branch_name: 'feat/init', + write_set: [], + provenance_hash: null, + }); + assert.strictEqual(lease.crashed_at, null, 'crashed_at must be null on a fresh open lease'); + assert.strictEqual(lease.closed_at, null, 'closed_at must be null on a fresh open lease'); + }); + + // ------------------------------------------------------------------------- + // Corrupt JSON handling: quarantine + warn + empty + // ------------------------------------------------------------------------- + test('readRegistry quarantines unparseable JSON and returns empty', () => { + // Write garbage that JSON.parse will reject. + const dir = path.join(tmpDir, '.planning', '.local'); + fs.mkdirSync(dir, { recursive: true }); + const target = registryPath(tmpDir); + fs.writeFileSync(target, '{ this is not json', 'utf8'); + + // Capture stderr from listLeases. + const originalWrite = process.stderr.write.bind(process.stderr); + const captured = []; + process.stderr.write = (chunk, ...rest) => { + captured.push(chunk.toString()); + return true; + }; + + let leases; + try { + leases = registry.listLeases(tmpDir); + } finally { + process.stderr.write = originalWrite; + } + + assert.deepStrictEqual(leases, []); + const warnText = captured.join(''); + assert.match(warnText, /WARN.*corrupt/, 'expected stderr corruption warning'); + assert.match(warnText, /quarantined to .+broken-/, 'expected quarantine message'); + + // The corrupt file should now be renamed to registry.json.broken-. + const dirEntries = fs.readdirSync(dir); + const quarantined = dirEntries.find((f) => /^registry\.json\.broken-\d+$/.test(f)); + assert.ok(quarantined, `expected a quarantine file in ${dir}, found: ${dirEntries.join(', ')}`); + }); + + test('readRegistry quarantines wrong-shape JSON (leases not an array) and returns empty', () => { + const dir = path.join(tmpDir, '.planning', '.local'); + fs.mkdirSync(dir, { recursive: true }); + const target = registryPath(tmpDir); + fs.writeFileSync(target, JSON.stringify({ schema_version: 1 }), 'utf8'); + + const originalWrite = process.stderr.write.bind(process.stderr); + const captured = []; + process.stderr.write = (chunk) => { + captured.push(chunk.toString()); + return true; + }; + + let leases; + try { + leases = registry.listLeases(tmpDir); + } finally { + process.stderr.write = originalWrite; + } + + assert.deepStrictEqual(leases, []); + assert.match(captured.join(''), /shape invalid/); + }); + + test('readRegistry returns empty on truly empty file', () => { + const dir = path.join(tmpDir, '.planning', '.local'); + fs.mkdirSync(dir, { recursive: true }); + const target = registryPath(tmpDir); + fs.writeFileSync(target, '', 'utf8'); + + const originalWrite = process.stderr.write.bind(process.stderr); + process.stderr.write = () => true; + let leases; + try { + leases = registry.listLeases(tmpDir); + } finally { + process.stderr.write = originalWrite; + } + assert.deepStrictEqual(leases, []); + }); + + // ------------------------------------------------------------------------- + // Concurrent writers: per-PID tmp files prevent the .tmp truncation race. + // Both children's grantLease calls must complete without throwing, and the + // final registry must be valid JSON. The exact lease count is non-deterministic + // (last-writer-wins on the rename), but the fingerprint warning surfaces any + // lost update on stderr so an operator can re-run. + // ------------------------------------------------------------------------- + test('concurrent grantLease across two children produces valid JSON (no .tmp truncation)', { timeout: 15000 }, async () => { + const registryMjsAbs = path.join(__dirname, '..', 'bin', 'lib', 'registry.mjs'); + const registryUrl = pathToFileURL(registryMjsAbs).href; + + function spawnGranter(phaseId) { + const script = ` +import { grantLease } from ${JSON.stringify(registryUrl)}; +try { + grantLease(${JSON.stringify(tmpDir)}, { + phase_id: ${JSON.stringify(phaseId)}, + worktree_path: ${JSON.stringify(tmpDir)}, + agent_id: null, + branch_name: 'feat/' + ${JSON.stringify(phaseId)}, + write_set: [], + provenance_hash: null, + }); + process.stdout.write('OK\\n'); +} catch (err) { + process.stderr.write('ERR ' + err.message + '\\n'); + process.exit(1); +} +`; + return new Promise((resolve, reject) => { + const child = spawn(process.execPath, ['--input-type=module'], { + stdio: ['pipe', 'pipe', 'pipe'], + }); + let stdout = ''; + let stderr = ''; + child.stdout.on('data', (c) => (stdout += c.toString())); + child.stderr.on('data', (c) => (stderr += c.toString())); + child.on('close', (code) => resolve({ code, stdout, stderr })); + child.on('error', reject); + child.stdin.write(script); + child.stdin.end(); + }); + } + + const [a, b] = await Promise.all([ + spawnGranter('concur-A'), + spawnGranter('concur-B'), + ]); + + // Both processes finish successfully. + assert.strictEqual(a.code, 0, `child A failed: ${a.stderr}`); + assert.strictEqual(b.code, 0, `child B failed: ${b.stderr}`); + + // Final registry must be valid JSON (no truncation corruption). + const raw = fs.readFileSync(registryPath(tmpDir), 'utf8'); + let parsed; + assert.doesNotThrow(() => { parsed = JSON.parse(raw); }); + assert.ok(Array.isArray(parsed.leases), 'registry.leases must be an array'); + // At least one of the two lease writes survived (last-writer-wins + // semantics; fingerprint warning was emitted by the loser). + const haveA = parsed.leases.some((l) => l.phase_id === 'concur-A'); + const haveB = parsed.leases.some((l) => l.phase_id === 'concur-B'); + assert.ok( + haveA || haveB, + 'at least one of the two concurrent leases must be present in the final registry', + ); + + // No stray .tmp orphans should remain — both renameSync calls succeeded. + const orphans = findTmpOrphans(tmpDir); + assert.strictEqual( + orphans.length, + 0, + `expected no .tmp orphans after successful renames; found ${orphans.length}: ${orphans.join(', ')}`, + ); + }); +}); + +// --------------------------------------------------------------------------- +// CLI command smoke tests (via node bin/gsdd.mjs) +// --------------------------------------------------------------------------- + +describe('registry CLI commands', () => { + let tmpDir; + + beforeEach(async () => { + tmpDir = createTempWorkspace(); + }); + + afterEach(() => { + cleanupWorkspace(tmpDir); + }); + + function runCli(args) { + const { spawnSync } = require('node:child_process'); + const cliPath = path.join(__dirname, '..', 'bin', 'gsdd.mjs'); + const result = spawnSync(process.execPath, [cliPath, ...args], { + cwd: tmpDir, + encoding: 'utf-8', + }); + return { + stdout: result.stdout || '', + stderr: result.stderr || '', + exitCode: result.status, + }; + } + + test('registry-list returns empty gracefully on fresh workspace', () => { + const result = runCli(['registry-list']); + assert.strictEqual(result.exitCode, 0, `unexpected exit code: ${result.stderr}`); + assert.ok(result.stdout.includes('No leases found.')); + }); + + test('registry-list --json returns [] on fresh workspace', () => { + const result = runCli(['registry-list', '--json']); + assert.strictEqual(result.exitCode, 0, `unexpected exit code: ${result.stderr}`); + const parsed = JSON.parse(result.stdout.trim()); + assert.deepStrictEqual(parsed, []); + }); + + test('registry-show exits 1 with message for unknown phase', () => { + const result = runCli(['registry-show', '99']); + assert.strictEqual(result.exitCode, 1); + assert.ok(result.stderr.includes('No lease found for phase 99')); + }); + + test('registry-crash placeholder exits 1 with P66 deferral message', () => { + const result = runCli(['registry-crash', '65']); + assert.strictEqual(result.exitCode, 1); + assert.match( + result.stderr, + /not yet implemented.*P66/i, + 'registry-crash must surface a P66 deferral message', + ); + }); + + test('registry commands resolve workspace root from a nested cwd', async () => { + // Reproduces the Codex P1 finding: prior to the fix, the registry commands + // read from process.cwd() — so running them from a subdirectory silently + // missed the root registry. After the fix, resolveWorkspaceContext walks + // up looking for a .planning/ marker. + ensurePlanningMarker(tmpDir); + + // Grant a lease at the workspace root using direct module access. + const registryUrl = pathToFileURL( + path.join(__dirname, '..', 'bin', 'lib', 'registry.mjs'), + ).href; + const reg = await import(`${registryUrl}?t=${Date.now()}-${Math.random()}`); + reg.grantLease(tmpDir, { + phase_id: 'cwd-resolve-01', + worktree_path: tmpDir, + agent_id: null, + branch_name: 'feat/cwd-resolve', + write_set: [], + provenance_hash: null, + }); + + // Create a subdirectory and run registry-list from inside it. + const subdir = path.join(tmpDir, 'deeply', 'nested'); + fs.mkdirSync(subdir, { recursive: true }); + + const { spawnSync } = require('node:child_process'); + const cliPath = path.join(__dirname, '..', 'bin', 'gsdd.mjs'); + const result = spawnSync(process.execPath, [cliPath, 'registry-list'], { + cwd: subdir, + encoding: 'utf-8', + env: { ...process.env, GSDD_WORKSPACE_ROOT: '' }, + }); + assert.strictEqual( + result.status, + 0, + `registry-list from subdir failed: ${result.stderr}`, + ); + assert.ok( + result.stdout.includes('cwd-resolve-01'), + `registry-list from subdir must surface the root registry's lease; got:\n${result.stdout}`, + ); + }); +}); diff --git a/tests/phase.test.cjs b/tests/phase.test.cjs index 246f9e8..83dba02 100644 --- a/tests/phase.test.cjs +++ b/tests/phase.test.cjs @@ -3917,6 +3917,140 @@ describe('Phase 32 runtime-freshness helper', () => { }); }); +// ───────────────────────────────────────────────────────────────────────────── +// §1.17 phase-closure artifact gate +// ───────────────────────────────────────────────────────────────────────────── + +describe('phase-status §1.17 phase-closure artifact gate', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createGsddTempProject(); + }); + + afterEach(() => { + cleanup(tmpDir); + }); + + function setupPhase(phaseNumber, slug, { withPlanCheck = false, withVerification = false } = {}) { + const phaseDir = path.join(tmpDir, '.planning', 'phases', `${String(phaseNumber).padStart(2, '0')}-${slug}`); + fs.mkdirSync(phaseDir, { recursive: true }); + const padded = String(phaseNumber).padStart(2, '0'); + fs.writeFileSync(path.join(phaseDir, `${padded}-PLAN.md`), '# plan\n'); + fs.writeFileSync(path.join(phaseDir, `${padded}-SUMMARY.md`), '# summary\n'); + if (withPlanCheck) fs.writeFileSync(path.join(phaseDir, `${padded}-PLAN-CHECK.md`), '# plan-check\n'); + if (withVerification) fs.writeFileSync(path.join(phaseDir, `${padded}-VERIFICATION.md`), '# verification\n'); + fs.writeFileSync( + path.join(tmpDir, '.planning', 'ROADMAP.md'), + `# Roadmap\n\n- [-] **Phase ${phaseNumber}: Test Phase** - goal\n`, + ); + } + + function seedLessonsLearned(mtimeOffsetMs = 0) { + const dir = path.join(tmpDir, '.internal-research'); + fs.mkdirSync(dir, { recursive: true }); + const lessons = path.join(dir, 'lessons-learned.md'); + fs.writeFileSync(lessons, '# lessons-learned\n'); + if (mtimeOffsetMs) { + const stamp = new Date(Date.now() - mtimeOffsetMs); + fs.utimesSync(lessons, stamp, stamp); + } + return lessons; + } + + test('refuses done when PLAN-CHECK.md is missing', async () => { + setupPhase(65, 'reg', { withPlanCheck: false, withVerification: true }); + seedLessonsLearned(); + + const result = await runCliAsMain(tmpDir, ['phase-status', '65', 'done']); + assert.strictEqual(result.exitCode, 1, result.output); + assert.match(result.output, /§1\.17 artifacts missing/); + assert.match(result.output, /65-PLAN-CHECK\.md/); + const roadmap = fs.readFileSync(path.join(tmpDir, '.planning', 'ROADMAP.md'), 'utf-8'); + assert.match(roadmap, /\[-\]/, 'ROADMAP must NOT be mutated when gate refuses'); + }); + + test('refuses done when VERIFICATION.md is missing', async () => { + setupPhase(65, 'reg', { withPlanCheck: true, withVerification: false }); + seedLessonsLearned(); + + const result = await runCliAsMain(tmpDir, ['phase-status', '65', 'done']); + assert.strictEqual(result.exitCode, 1, result.output); + assert.match(result.output, /65-VERIFICATION\.md/); + }); + + test('refuses done when lessons-learned.md is stale (>7d)', async () => { + setupPhase(65, 'reg', { withPlanCheck: true, withVerification: true }); + seedLessonsLearned(10 * 86_400_000); // 10 days old + + const result = await runCliAsMain(tmpDir, ['phase-status', '65', 'done']); + assert.strictEqual(result.exitCode, 1, result.output); + assert.match(result.output, /lessons-learned\.md/); + assert.match(result.output, /days ago/); + }); + + test('refuses --force without --reason', async () => { + setupPhase(65, 'reg', { withPlanCheck: false, withVerification: false }); + seedLessonsLearned(); + + const result = await runCliAsMain(tmpDir, ['phase-status', '65', 'done', '--force']); + assert.strictEqual(result.exitCode, 1, result.output); + assert.match(result.output, /--force requires --reason/); + }); + + test('--force --reason bypasses gate and appends LL entry', async () => { + setupPhase(65, 'reg', { withPlanCheck: false, withVerification: false }); + const lessons = seedLessonsLearned(); + + const result = await runCliAsMain(tmpDir, [ + 'phase-status', '65', 'done', + '--force', '--reason', 'CI environment cannot run verify; gate bypassed deliberately', + ]); + assert.strictEqual(result.exitCode, 0, result.output); + + const roadmap = fs.readFileSync(path.join(tmpDir, '.planning', 'ROADMAP.md'), 'utf-8'); + assert.match(roadmap, /\[x\] \*\*Phase 65/, 'ROADMAP marker must update on successful --force'); + + const lessonsContent = fs.readFileSync(lessons, 'utf-8'); + assert.match(lessonsContent, /LL-PHASE-STATUS-FORCE-OVERRIDE-65-/); + assert.match(lessonsContent, /CI environment cannot run verify/); + }); + + test('skips gate when no phase folder exists (roadmap-only entry)', async () => { + // No setupPhase call — only roadmap exists. + fs.mkdirSync(path.join(tmpDir, '.planning'), { recursive: true }); + fs.writeFileSync( + path.join(tmpDir, '.planning', 'ROADMAP.md'), + '# Roadmap\n\n- [-] **Phase 99: Roadmap Only** - goal\n', + ); + seedLessonsLearned(); + + const result = await runCliAsMain(tmpDir, ['phase-status', '99', 'done']); + assert.strictEqual(result.exitCode, 0, result.output); + const roadmap = fs.readFileSync(path.join(tmpDir, '.planning', 'ROADMAP.md'), 'utf-8'); + assert.match(roadmap, /\[x\] \*\*Phase 99/); + }); + + test('skips gate when no .internal-research/ exists (consumer project)', async () => { + setupPhase(65, 'reg', { withPlanCheck: false, withVerification: false }); + // No .internal-research/ directory — consumer-project scenario. + + const result = await runCliAsMain(tmpDir, ['phase-status', '65', 'done']); + assert.strictEqual(result.exitCode, 0, result.output); + }); + + test('passes when all required artifacts exist and lessons-learned is fresh', async () => { + setupPhase(65, 'reg', { withPlanCheck: true, withVerification: true }); + seedLessonsLearned(); + + const result = await runCliAsMain(tmpDir, ['phase-status', '65', 'done']); + assert.strictEqual(result.exitCode, 0, result.output); + + const roadmap = fs.readFileSync(path.join(tmpDir, '.planning', 'ROADMAP.md'), 'utf-8'); + assert.match(roadmap, /\[x\] \*\*Phase 65/); + }); +}); + // ───────────────────────────────────────────────────────────────────────────── // milestone complete command // ─────────────────────────────────────────────────────────────────────────────