diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md
new file mode 100644
index 0000000..7b819b6
--- /dev/null
+++ b/.claude/CLAUDE.md
@@ -0,0 +1,138 @@
+# Global Rules
+
+# Auto-generated from ~/.cursor/rules/ (alwaysApply: true files only).
+# Do not edit manually. Re-generate via convention-sync.
+
+---
+
+## answer-questions-first
+
+# Answer Questions Before Acting
+
+Before using any code editing tools, scan the user's message for `?` characters and determine if it's a question.
+
+- **Ignore** `?` inside code, URLs or query parameters (e.g. `?param=x`, `?key=value` , `const x = ifTrue ? 'yes' : 'no'`)
+- **Treat all other `?`** as question statements, if they appear to be questions.
+
+If questions are detected:
+
+1. Read `~/.cursor/skills/q/SKILL.md` and follow its workflow to answer every question.
+2. **Workflow context**: If a skill was invoked earlier in this conversation, note which one. When a question or critique references agent behavior from that execution, load the skill definition before answering and evaluate whether the skill should have governed that behavior. If it should have but didn't, that's a workflow gap — treat it as the primary concern per `fix-workflow-first.mdc`.
+3. Do **not** edit files, create files, or run mutating commands until the user responds.
+4. Only proceed with implementation after the user permits it in a follow-up message.
+
+---
+
+## load-standards-by-filetype
+
+Load language-specific coding standards before editing or investigating lint/type errors in files, without redundant reads.
+
+
+Before using any code editing tool on a file OR investigating lint/type errors in that file type, check if the matching standards rule is already present in `cursor_rules_context`. Only read the rule file if it is NOT already in context.
+If the rule is not in context, read it using the Read tool and follow its contents BEFORE making the edit or investigating the error.
+
+
+
+
+| File glob | Standards file |
+|---|----|
+| `**/*.ts`,`**/*.tsx` | `~/.cursor/rules/typescript-standards.mdc` |
+
+
+
+---
+
+## no-format-lint
+
+# No Manual Formatting or Lint Fixing
+
+- Do NOT run `yarn lint`, `yarn fix`, `yarn verify`, or any lint/format shell commands unless explicitly asked.
+- Do NOT manually fix formatting issues (whitespace, quotes, semicolons, trailing commas, line length). The `lint-commit.sh` script runs `eslint --fix` (including Prettier) before each commit.
+- Only use `ReadLints` to check for logical or type errors, not formatting. If the only lint errors are formatting-related, ignore them.
+- Focus tokens on correctness and logic, not style.
+
+---
+
+## workflow-halt-on-error
+
+
+
+All workflow-related skill definitions (`*.md` / `SKILL.md`) and workflow companion scripts (`*.sh`) are sourced from `~/.cursor/`. When executing skills, prefer explicit `~/.cursor/...` paths and do not assume repo-local workflow files unless the skill explicitly points to one.
+
+When a skill mentions a script path, resolve it under `~/.cursor/skills//scripts/` unless the skill explicitly specifies an absolute path elsewhere. Do not assume repo-relative `scripts/` paths without verifying the skill directory contents.
+
+When ANY shell command fails (non-zero exit code) while executing an active skill workflow, a delegated subskill from that workflow, or a companion-script step required by that workflow (except where explicitly allowed by `auto-fix-verification-failures` or `companion-script-nonzero-contracts`):
+1. **STOP** — do not retry, work around, substitute, or continue the workflow.
+2. **Report** — show the user the exact command, exit code, and error output.
+3. **Diagnose** — classify the failure: missing tool (`command not found`), wrong path, permissions, or logic error.
+4. **Evaluate workflow** — if the failure reveals a gap in a skill definition, follow the fix-workflow-first rules below.
+5. **Wait** — do not resume until the user responds.
+
+
+When a workflow gap is discovered in an active skill definition:
+1. **Stop immediately** — do not continue the current task or apply any workaround.
+2. **Identify the root cause** in the skill (`.cursor/skills/*/SKILL.md`) definition.
+3. **Propose the fix** to the user and wait for approval before proceeding.
+4. **Fix the skill** using `/author` after approval.
+5. **Resume the original task** only after the skill is updated.
+
+Fixing the skill takes **absolute priority** over all other actions — including workarounds, continuing the original task, or applying temporary fixes. Do NOT apply workarounds or manual fixes before proposing the skill update. The correct sequence is: identify gap → propose fix → get approval → apply fix → then resume original task. This applies to all workflow issues — missed steps, incorrect output, wrong tool usage, shell failures, formatting problems, etc. The skill is the source of truth; patching around it creates drift.
+
+
+These workflow halt rules are for skill-driven execution, especially hands-off/orchestrated skills and their dependencies. They do not automatically apply to ad hoc exploration, incidental verification, or low-risk authoring work unless that command is part of an active skill contract.
+
+Exception to `halt-on-error`: For verification/code-quality failures where diagnostics are explicit and local, continue automatically with bounded remediation.
+
+Allowed auto-fix scope:
+- TypeScript/compiler failures (`tsc`) with clear file/line diagnostics
+- Lint failures (`eslint`) with clear file/line diagnostics
+- Test failures (`jest`/`yarn test`) when stack traces or assertion output identify failing test files
+- `verify-repo.sh` code-step failures that resolve to one of the above
+
+Required behavior:
+1. Briefly log rationale: failure type, affected files, and why scope is unambiguous.
+2. Apply the minimal fix in the failing repo.
+3. Re-run the failing verification step.
+4. Limit to 2 remediation attempts; if still failing or scope expands, fall back to `halt-on-error`.
+
+Never auto-fix:
+- Missing tools/auth (`command not found`, `PROMPT_GH_AUTH`)
+- Wrong path/permissions
+- Companion script contract/usage failures
+- Unexpected exit codes from orchestrator scripts
+- Any failure requiring destructive operations or workflow bypasses
+
+
+Respect documented companion script exit-code contracts. Non-zero does NOT always mean fatal.
+
+For `~/.cursor/skills/im/scripts/lint-warnings.sh`:
+- `0` = no remaining lint findings after auto-fix
+- `1` = remaining lint findings after auto-fix (expected actionable state)
+- `2` = execution error (fatal)
+
+Required behavior:
+1. If exit `1`, continue workflow by fixing the remaining lint findings before implementation.
+2. If the script auto-fixes pre-existing lint issues, commit those changes in a separate lint-fix commit immediately before feature commits, even if no findings remain.
+3. If exit `2`, apply `halt-on-error`.
+
+
+Do NOT silently substitute an alternative tool or approach when a command fails. If `rg` is not found, do not fall back to `grep`. If a script exits non-zero, do not manually replicate what the script does. The failure is the signal — report it.
+
+
+
+
+
+Scan the user's message for `/word` tokens. A token is a **command invocation** when ALL of:
+- `/word` is preceded by whitespace, a newline, or is at the start of the message
+- `word` contains only lowercase letters and hyphens (e.g., `/im`, `/pr-create`, `/author`)
+- `/word` is NOT inside a file path, URL, or code block
+
+When detected:
+1. Read `~/.cursor/skills//SKILL.md` and follow it immediately.
+2. If the file does not exist, inform the user: "Skill `/` not found in `~/.cursor/skills/`."
+
+**Ignore `/`** in: file paths (`/Users/...`, `~/...`), URLs (`https://...`), mid-word (`and/or`), backticks/code blocks.
+
+
+
+
diff --git a/.claude/skills b/.claude/skills
new file mode 120000
index 0000000..8574c4f
--- /dev/null
+++ b/.claude/skills
@@ -0,0 +1 @@
+../.cursor/skills
\ No newline at end of file
diff --git a/.cursor/.syncignore b/.cursor/.syncignore
new file mode 100644
index 0000000..10363f6
--- /dev/null
+++ b/.cursor/.syncignore
@@ -0,0 +1,6 @@
+# Files to exclude from convention-sync (one glob per line)
+# Patterns match against relative paths like: commands/foo.sh, rules/bar.mdc
+
+# WIP commands
+commands/hudl.md
+commands/github-pr-hudl.sh
diff --git a/.cursor/README.md b/.cursor/README.md
new file mode 100644
index 0000000..25761bc
--- /dev/null
+++ b/.cursor/README.md
@@ -0,0 +1,364 @@
+Complete agent-assisted development workflow for Edge repositories — slash skills with companion scripts, coding standards, review standards, and the author skill.
+
+## Installation
+
+**1. Set the required env var** in your `~/.zshrc`:
+```bash
+export GIT_BRANCH_PREFIX=yourname # e.g. jon, paul, sam — used for branch naming and PR discovery
+```
+
+**2. Install files into `~/.cursor/`:**
+```bash
+curl -sL https://github.com/EdgeApp/edge-conventions/archive/refs/heads/jon/agents.tar.gz | \
+ tar -xz --strip-components=2 -C ~/.cursor 'edge-conventions-jon-agents/.cursor' && \
+ find ~/.cursor -type f -name "*.sh" -exec chmod +x {} + && \
+ echo "✓ Installed into ~/.cursor/"
+```
+
+**3. Verify prerequisites:**
+- `gh` CLI — `gh auth login`
+- `jq` — `brew install jq`
+- `ASANA_TOKEN` env var (Asana scripts only)
+
+---
+
+## Table of Contents
+
+- [Architecture](#architecture)
+- [Skills](#skills-slash-skills)
+- [Companion Scripts](#companion-scripts)
+- [Shared Module](#shared-module-edge-repojs)
+- [Rules](#rules-mdc-files)
+- [Author Skill](#author-skill)
+- [Design Principles](#design-principles)
+
+---
+
+## Architecture
+
+```
+.cursor/
+├── skills/ # Primary slash skills (*/SKILL.md) + skill scripts
+├── scripts/ # Shared utility scripts (status dashboard, portability)
+├── commands/ # Minimal legacy command wrappers (if present)
+└── rules/ # Coding/review standards (.mdc)
+```
+
+**Separation of concerns:**
+- **Commands** (`.md`) — Define agent workflows: steps, rules, edge cases. Invoked explicitly via `/command`.
+- **Skills** (`SKILL.md`) — Primary workflow units invoked with `/skill-name` (or selected by context).
+- **Companion scripts** (`.sh`, `.js`) — Handle deterministic operations: API calls, git ops, JSON processing. Skills call scripts; scripts never call skills.
+- **Rules** (`.mdc`) — Persistent coding standards loaded on-demand by file type or command step. Two classes: **editing standards** (loaded when writing code) and **review standards** (loaded during PR review).
+
+All GitHub API operations use **`gh` CLI** (`gh api`, `gh api graphql`, `gh pr`). No raw `curl` + `$GITHUB_TOKEN`.
+
+**User-specific configuration** is driven by the `GIT_BRANCH_PREFIX` env var — set once in `.zshrc`, used by scripts for branch naming (`$GIT_BRANCH_PREFIX/feature-name`) and PR discovery. No hardcoded usernames.
+
+---
+
+## Skills (Slash Skills)
+
+### Core Implementation
+
+| Skill | Description |
+|---------|-------------|
+| [`/im`](.cursor/skills/im/SKILL.md) | Implement an Asana task or ad-hoc feature/fix with clean, structured commits |
+| [`/one-shot`](.cursor/skills/one-shot/SKILL.md) | Legacy-style one-command flow: `/asana-plan` → `/im` → `/pr-create` with default Asana attach/assign |
+| [`/pr-create`](.cursor/skills/pr-create/SKILL.md) | Create a PR from the current branch; optional Asana attach/assign flags |
+| [`/dep-pr`](.cursor/skills/dep-pr/SKILL.md) | Create dependent Asana tasks and run downstream PR workflow |
+| [`/changelog`](.cursor/skills/changelog/SKILL.md) | Update CHANGELOG.md following existing patterns |
+
+### Planning and Context
+
+| Skill | Description |
+|---------|-------------|
+| [`/asana-plan`](.cursor/skills/asana-plan/SKILL.md) | Build implementation plans from Asana tasks or text/file requirements |
+| [`/task-review`](.cursor/skills/task-review/SKILL.md) | Fetch + analyze Asana task context |
+| [`/q`](.cursor/skills/q/SKILL.md) | Answer questions before taking action |
+
+### Review and Landing
+
+| Skill | Description |
+|---------|-------------|
+| [`/pr-review`](.cursor/skills/pr-review/SKILL.md) | Review a PR against coding and review standards |
+| [`/pr-address`](.cursor/skills/pr-address/SKILL.md) | Address PR feedback with fixup commits and replies |
+| [`/pr-land`](.cursor/skills/pr-land/SKILL.md) | Land approved PRs: prepare, merge, publish, and Asana updates |
+
+### Asana and Utility
+
+| Skill | Description |
+|---------|-------------|
+| [`/asana-task-update`](.cursor/skills/asana-task-update/SKILL.md) | Generic Asana mutations (attach PR, assign, status/field updates) |
+| [`/standup`](.cursor/skills/standup/SKILL.md) | Generate daily standup from Asana + GitHub activity |
+| [`/chat-audit`](.cursor/skills/chat-audit/SKILL.md) | Audit chat sessions for workflow/rule issues |
+| [`/convention-sync`](.cursor/skills/convention-sync/SKILL.md) | Sync `~/.cursor` changes with the `edge-conventions` repo and update PR description |
+| [`/author`](.cursor/skills/author/SKILL.md) | Create/update/debug skills and related scripts/rules |
+
+---
+
+## Companion Scripts
+
+### PR Operations
+
+| Script | What it does | API |
+|--------|-------------|-----|
+| [`pr-create.sh`](.cursor/skills/pr-create/scripts/pr-create.sh) | Create PR for current branch with repo-template-aligned title/body | `gh pr create` |
+| [`pr-address.sh`](.cursor/skills/pr-address/scripts/pr-address.sh) | Fetch unresolved feedback, post replies, resolve threads, mark addressed | `gh api` REST + GraphQL |
+| [`github-pr-review.sh`](.cursor/skills/pr-review/scripts/github-pr-review.sh) | Fetch PR context (metadata + patches) and submit reviews | `gh pr view` + `gh api` REST |
+| [`github-pr-activity.sh`](.cursor/skills/standup/scripts/github-pr-activity.sh) | List PRs by activity (recent reviews, comments, CI status) | `gh api graphql` |
+
+### PR Status Dashboard
+
+| Script | What it does | API |
+|--------|-------------|-----|
+| [`pr-status-gql.sh`](.cursor/scripts/pr-status-gql.sh) | PR status with review state, CI checks, new comments (primary) | `gh api graphql` |
+| [`pr-status.sh`](.cursor/scripts/pr-status.sh) | Same as above, REST fallback | `gh api` REST |
+| [`pr-watch.sh`](.cursor/scripts/pr-watch.sh) | TUI wrapper — auto-refresh dashboard with rate limit awareness | Delegates to above |
+
+### PR Landing Pipeline (`/pr-land`)
+
+These scripts run sequentially. Each handles one phase of the landing workflow:
+
+| Script | Phase | What it does | API |
+|--------|-------|-------------|-----|
+| [`pr-land-discover.sh`](.cursor/skills/pr-land/scripts/pr-land-discover.sh) | 1: Discovery | Find all `$GIT_BRANCH_PREFIX/*` PRs with approval status | Single `gh api graphql` query |
+| [`pr-land-comments.sh`](.cursor/skills/pr-land/scripts/pr-land-comments.sh) | 2: Comment check | Detect unaddressed feedback (inline threads, review bodies, top-level comments) | `gh api graphql` per PR |
+| [`pr-land-prepare.sh`](.cursor/skills/pr-land/scripts/pr-land-prepare.sh) | 3: Prepare | Autosquash → rebase → conflict detection → verification | Git only |
+| [`verify-repo.sh`](.cursor/skills/verify-repo.sh) | 3b: Verify | CHANGELOG validation + `prepare`/`tsc`/`lint`/`test` | Git + yarn |
+| [`pr-land-merge.sh`](.cursor/skills/pr-land/scripts/pr-land-merge.sh) | 5: Merge | Sequential merge with auto-rebase, mandatory verification | `gh api` REST |
+| [`pr-land-publish.sh`](.cursor/skills/pr-land/scripts/pr-land-publish.sh) | 6: Publish | Version bump, changelog update, commit + tag (no push) | Git + npm |
+
+**Conflict handling is fully scripted:**
+- Code conflicts → skip PR, continue with remaining
+- CHANGELOG-only (including staging) → agent resolves semantically, re-runs
+
+### Chat Analysis
+
+| Script | What it does |
+|--------|-------------|
+| [`cursor-chat-extract.js`](.cursor/skills/chat-audit/scripts/cursor-chat-extract.js) | Parse Cursor chat export JSON into compact structured summary (messages, tool calls, stats) |
+
+### Asana Integration
+
+| Script | What it does | API |
+|--------|-------------|-----|
+| [`asana-get-context.sh`](.cursor/skills/asana-get-context.sh) | Fetch task details, attachments, subtasks, custom fields | Asana REST |
+| [`asana-task-update.sh`](.cursor/skills/asana-task-update/scripts/asana-task-update.sh) | Generic task updates (attach PR, assign, status, fields) | Asana REST |
+| [`asana-create-dep-task.sh`](.cursor/skills/dep-pr/scripts/asana-create-dep-task.sh) | Create dependent task in another repo's project | Asana REST |
+| [`asana-whoami.sh`](.cursor/skills/asana-whoami.sh) | Get current Asana user info | Asana REST |
+
+### Build & Deps
+
+| Script | What it does |
+|--------|-------------|
+| [`lint-commit.sh`](.cursor/skills/lint-commit.sh) | ESLint `--fix`, localize, graduate warnings, and report effective commit scope before commit |
+| [`lint-warnings.sh`](.cursor/skills/im/scripts/lint-warnings.sh) | Run `eslint --fix`, then summarize any remaining lint findings with matched fix patterns |
+| [`install-deps.sh`](.cursor/skills/install-deps.sh) | Install dependencies and run prepare script |
+| [`upgrade-dep.sh`](.cursor/skills/pr-land/scripts/upgrade-dep.sh) | Upgrade a dependency in the GUI repo |
+
+### Sync & Portability
+
+| Script | What it does |
+|--------|-------------|
+| [`convention-sync.sh`](.cursor/skills/convention-sync/scripts/convention-sync.sh) | Diff and sync `~/.cursor/` files with the edge-conventions repo |
+| [`tool-sync.sh`](.cursor/scripts/tool-sync.sh) | Sync Cursor rules, skills, and scripts to OpenCode and Claude Code formats |
+| [`port-to-opencode.sh`](.cursor/scripts/port-to-opencode.sh) | Convert Cursor `.mdc`/`.md` files to OpenCode-compatible JSON + MD mirrors |
+
+---
+
+## Dependency Graph
+
+### Skill → Skill
+
+```mermaid
+graph LR
+ asanaPlan["/asana-plan"]
+ taskReview["/task-review"]
+ im["/im"]
+ oneShot["/one-shot"]
+ depPr["/dep-pr"]
+ prCreate["/pr-create"]
+ asanaTaskUpdate["/asana-task-update"]
+ author["/author"]
+ conventionSync["/convention-sync"]
+
+ oneShot --> asanaPlan
+ oneShot --> im
+ oneShot --> prCreate
+ asanaPlan --> taskReview
+ im --> asanaPlan
+ depPr --> prCreate
+ prCreate --> asanaTaskUpdate
+ author --> conventionSync
+```
+
+Skills with no skill dependencies:
+
+- `/asana-task-update`
+- `/task-review`
+- `/q`
+- `/pr-review`
+- `/pr-address`
+- `/pr-land`
+- `/standup`
+- `/chat-audit`
+- `/changelog`
+- `/convention-sync`
+
+### Full Skill/Script Dependency Graph
+
+Top-to-bottom organization: skill layer, skill-specific scripts, shared scripts.
+
+```mermaid
+graph TD
+ subgraph skillLayer [Skills]
+ im["/im"]
+ oneShot["/one-shot"]
+ asanaPlan["/asana-plan"]
+ taskReview["/task-review"]
+ depPr["/dep-pr"]
+ prCreate["/pr-create"]
+ asanaTaskUpdate["/asana-task-update"]
+ prLand["/pr-land"]
+ prReview["/pr-review"]
+ prAddress["/pr-address"]
+ standup["/standup"]
+ chatAudit["/chat-audit"]
+ conventionSync["/convention-sync"]
+ author["/author"]
+ q["/q"]
+ changelog["/changelog"]
+ end
+
+ subgraph skillScripts [Skill Scripts]
+ prCreateSh["pr-create.sh"]
+ prAddressSh["pr-address.sh"]
+ prReviewSh["github-pr-review.sh"]
+ depTaskSh["asana-create-dep-task.sh"]
+ asanaTaskUpdateSh["asana-task-update.sh"]
+ prLandDisc["pr-land-discover.sh"]
+ prLandCmts["pr-land-comments.sh"]
+ prLandPrep["pr-land-prepare.sh"]
+ prLandMerge["pr-land-merge.sh"]
+ prLandPublish["pr-land-publish.sh"]
+ prLandExtract["pr-land-extract-asana-task.sh"]
+ standupAsana["asana-standup.sh"]
+ standupGh["github-pr-activity.sh"]
+ chatExtract["cursor-chat-extract.js"]
+ conventionSyncSh["convention-sync.sh"]
+ generateClaude["generate-claude-md.sh"]
+ lintWarn["lint-warnings.sh"]
+ end
+
+ subgraph sharedScripts [Shared Scripts]
+ lintCommit["lint-commit.sh"]
+ verifyRepo["verify-repo.sh"]
+ asanaGetContext["asana-get-context.sh"]
+ asanaWhoAmI["asana-whoami.sh"]
+ installDeps["install-deps.sh"]
+ edgeRepo["edge-repo.js"]
+ end
+
+ oneShot --> asanaPlan
+ oneShot --> im
+ oneShot --> prCreate
+ asanaPlan --> taskReview
+ im --> asanaPlan
+ depPr --> prCreate
+ prCreate --> asanaTaskUpdate
+ author --> conventionSync
+
+ prCreate --> prCreateSh
+ prCreate --> verifyRepo
+ prCreate --> asanaTaskUpdateSh
+ im --> lintWarn
+ im --> lintCommit
+ im --> verifyRepo
+ im --> installDeps
+ depPr --> depTaskSh
+ depPr --> asanaGetContext
+ asanaTaskUpdate --> asanaTaskUpdateSh
+ taskReview --> asanaGetContext
+ prAddress --> prAddressSh
+ prAddress --> lintCommit
+ prReview --> prReviewSh
+ prLand --> prLandDisc
+ prLand --> prLandCmts
+ prLand --> prLandPrep
+ prLand --> prLandMerge
+ prLand --> prLandPublish
+ prLand --> prLandExtract
+ prLand --> asanaTaskUpdateSh
+ prLand --> verifyRepo
+ standup --> standupAsana
+ standup --> standupGh
+ chatAudit --> chatExtract
+ conventionSync --> conventionSyncSh
+ conventionSync --> generateClaude
+
+ depTaskSh --> asanaWhoAmI
+ depTaskSh --> asanaTaskUpdateSh
+ asanaTaskUpdateSh --> asanaWhoAmI
+ standupAsana --> asanaWhoAmI
+ prLandPrep --> edgeRepo
+ prLandMerge --> edgeRepo
+ prLandPublish --> edgeRepo
+```
+
+---
+
+## Shared Module: `edge-repo.js`
+
+[`edge-repo.js`](.cursor/skills/pr-land/scripts/edge-repo.js) eliminates duplication across the `pr-land-*` scripts. Exports:
+
+| Function | Purpose |
+|----------|---------|
+| `getRepoDir(repo)` | Resolve local checkout path (`~/git/`, `~/projects/`, `~/code/`) |
+| `getUpstreamBranch(repo)` | `origin/develop` for GUI, `origin/master` for everything else |
+| `runGit(args, cwd, opts)` | Safe `spawnSync` wrapper with `GIT_EDITOR=true` |
+| `parseConflictFiles(output)` | Extract conflicting file paths from rebase output |
+| `isChangelogOnly(files)` | Check if all conflicts are in CHANGELOG.md |
+| `runVerification(repoDir, baseRef, opts)` | Run the full verify script with scoped lint (supports `{requireChangelog: true}`) |
+| `ghApi(endpoint, opts)` | `gh api` wrapper with method, body, paginate, jq support |
+| `ghGraphql(query, vars)` | `gh api graphql` wrapper with typed variable injection |
+
+---
+
+## Rules (`.mdc` files)
+
+| Rule | Activation | Purpose |
+|------|-----------|---------|
+| [`typescript-standards.mdc`](.cursor/rules/typescript-standards.mdc) | Loaded before editing `.ts`/`.tsx` files | TypeScript + React coding standards for **editing** (includes `simple-selectors` rule, descriptive variable names, biggystring arithmetic) |
+| [`review-standards.mdc`](.cursor/rules/review-standards.mdc) | Loaded by `/pr-review` command | ~50 review-specific diagnostic rules extracted from PR history |
+| [`load-standards-by-filetype.mdc`](.cursor/rules/load-standards-by-filetype.mdc) | Always applied | Auto-loads language-specific standards before editing |
+| [`fix-workflow-first.mdc`](.cursor/rules/fix-workflow-first.mdc) | Always applied | Fix command/skill definitions before patching downstream symptoms |
+| [`answer-questions-first.mdc`](.cursor/rules/answer-questions-first.mdc) | Always applied | Detect `?` in user messages → answer before acting; loads active command context to evaluate workflow gaps |
+| [`no-format-lint.mdc`](.cursor/rules/no-format-lint.mdc) | Always applied | Don't manually fix formatting — auto-format on agent finish handles it |
+| [`eslint-warnings.mdc`](.cursor/rules/eslint-warnings.mdc) | `.ts`/`.tsx` files | ESLint warning handling patterns |
+
+**Editing vs. review separation**: `typescript-standards` contains rules for writing code (prefer `useHandler`, use `InteractionManager`, descriptive variable names, biggystring for numeric calculations). `review-standards` contains diagnostic patterns for catching bugs during review (null `tokenId` fallback, stack trace preservation, module-level cache bugs, etc.). Both are loaded together during `/pr-review`; only `typescript-standards` is loaded during editing.
+
+---
+
+## Author Skill
+
+| Skill | Purpose |
+|-------|---------|
+| [`author/SKILL.md`](.cursor/skills/author/SKILL.md) | Meta-skill for creating/maintaining skills, scripts, and rules. Enforces XML format, `scripts-over-reasoning`, `gh-cli-over-curl`, dependency-audit requirements before script add/update/remove, and convention-sync/CLAUDE sync post-authoring behavior. |
+
+---
+
+## Design Principles
+
+1. **Scripts over reasoning** — Deterministic operations (API calls, git, JSON) go in companion scripts, not inline in commands.
+2. **`gh` CLI over `curl`** — All GitHub API calls use `gh api` / `gh api graphql`. Handles auth, pagination, API versioning automatically.
+3. **GraphQL over REST** — Fetch only required fields in a single request where possible. Fall back to REST only when GraphQL doesn't expose the needed data (e.g., file patches).
+4. **DRY shared modules** — Common utilities extracted into `edge-repo.js` rather than duplicated across scripts.
+5. **XML format** — Skills use XML structure (``, ``, ``) for reliable LLM instruction-following.
+6. **Standards-first** — Load coding standards before writing or reviewing any code.
+7. **Fix workflow first** — When behavior is wrong, fix the command/skill definition, not the downstream symptom.
+8. **No hardcoded usernames** — All user-specific values come from `GIT_BRANCH_PREFIX` env var, set once in `.zshrc`.
+9. **Minimize context** — Script output must be compact and structured. Never return raw API responses. Every token costs context.
+10. **Small-model conventions** — High-frequency skills that run on faster/cheaper models use verbatim bash, file-over-args, inline guardrails, and explicit parallel instructions for reliability.
+11. **Knowledge base over crawling** — Maintain curated knowledge files (e.g., `eslint-warnings.mdc`) instead of having the agent crawl/grep for information repeatedly. Pre-indexed knowledge reduces tool calls and context consumption.
+12. **Continuous improvement** — Workflows feed back into their own knowledge. PR review feedback updates `review-standards.mdc`, addressed warnings update `eslint-warnings.mdc`, and chat audits surface rule gaps. Each cycle reduces repetitive context gathering by the agent and repetitive review by humans.
diff --git a/.cursor/commands/github-pr-hudl.sh b/.cursor/commands/github-pr-hudl.sh
new file mode 100755
index 0000000..2ec3db7
--- /dev/null
+++ b/.cursor/commands/github-pr-hudl.sh
@@ -0,0 +1,389 @@
+#!/usr/bin/env bash
+# github-pr-hudl.sh — Fetch comprehensive GitHub PR activity for a given day.
+# Detects multiple action categories for HUDL standup generation.
+#
+# Categories:
+# - created: PRs created by user on target date
+# - committed: PRs where user pushed commits on target date
+# - addressed: PRs with commits after receiving review comments
+# - reviewed: PRs by others that user reviewed on target date
+# - commented: PRs where user posted comments on target date
+# - approved: PRs that have approval (for Goals Today)
+# - blocked: PRs blocked by CI or changes requested (for Handoffs)
+# - open_prs: All open PRs for debug section
+#
+# Usage:
+# github-pr-hudl.sh [--date YYYY-MM-DD]
+#
+# Requires: gh CLI authenticated, ASANA_TOKEN for cross-referencing
+#
+# Output: JSON with date, username, day_label, and category arrays
+set -euo pipefail
+
+TARGET_DATE=""
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --date) TARGET_DATE="$2"; shift 2 ;;
+ *) echo "Unknown: $1" >&2; exit 1 ;;
+ esac
+done
+
+if ! command -v gh &>/dev/null; then
+ echo "Error: gh CLI not installed" >&2; exit 1
+fi
+if ! gh auth status &>/dev/null 2>&1; then
+ echo "PROMPT_GH_AUTH" >&2; exit 2
+fi
+
+USERNAME=$(gh api user --jq '.login')
+ASANA_TOKEN="${ASANA_TOKEN:-}"
+
+export TARGET_DATE USERNAME ASANA_TOKEN
+
+python3 - << 'PYEOF'
+import json, os, re, subprocess, sys, urllib.request, urllib.error
+from datetime import date, timedelta
+
+USERNAME = os.environ["USERNAME"]
+TARGET_DATE_STR = os.environ.get("TARGET_DATE", "")
+ASANA_TOKEN = os.environ.get("ASANA_TOKEN", "")
+
+today = date.today()
+
+if TARGET_DATE_STR:
+ # Explicit date: use single day
+ target_start = date.fromisoformat(TARGET_DATE_STR)
+ target_end = target_start
+ day_label = target_start.strftime("%A")
+else:
+ # Default: from last workday until now
+ if today.weekday() == 0: # Monday
+ target_start = today - timedelta(days=3) # Friday
+ target_end = today
+ day_label = "since Friday"
+ else:
+ target_start = today - timedelta(days=1) # Yesterday
+ target_end = today
+ day_label = "since yesterday"
+
+TARGET_START_STR = target_start.isoformat()
+TARGET_END_STR = target_end.isoformat()
+
+
+def gh_graphql(query, variables):
+ args = ["gh", "api", "graphql", "-f", f"query={query}"]
+ for k, v in variables.items():
+ args.extend(["-f", f"{k}={v}"])
+ result = subprocess.run(args, capture_output=True, text=True)
+ if result.returncode != 0:
+ print(f"GH_ERROR: {result.stderr[:300]}", file=sys.stderr)
+ return {"data": {"search": {"nodes": []}}}
+ parsed = json.loads(result.stdout)
+ if "errors" in parsed:
+ print(f"GQL_ERROR: {json.dumps(parsed['errors'][:2])}", file=sys.stderr)
+ return parsed
+
+
+def extract_asana_gid(body):
+ if not body:
+ return None
+ m = re.search(r'asana\.com/\S*/(\d{10,})', body)
+ return m.group(1) if m else None
+
+
+def fetch_asana_status(gid):
+ """Fetch Asana task status via API."""
+ if not ASANA_TOKEN or not gid:
+ return None
+ try:
+ req = urllib.request.Request(
+ f"https://app.asana.com/api/1.0/tasks/{gid}?opt_fields=custom_fields.gid,custom_fields.display_value",
+ headers={"Authorization": f"Bearer {ASANA_TOKEN}"}
+ )
+ with urllib.request.urlopen(req, timeout=5) as resp:
+ data = json.loads(resp.read())
+ for f in data.get("data", {}).get("custom_fields", []):
+ if f.get("gid") == "1190660107346181": # Status field
+ return f.get("display_value")
+ except Exception as e:
+ print(f"ASANA_ERROR: {e}", file=sys.stderr)
+ return None
+
+
+# --- Main GraphQL query for user's activity ---
+QUERY_USER_PRS = """
+query($search: String!) {
+ search(query: $search, type: ISSUE, first: 100) {
+ nodes {
+ ... on PullRequest {
+ number
+ title
+ url
+ body
+ state
+ createdAt
+ repository { nameWithOwner }
+ reviews(last: 50) {
+ nodes {
+ author { login }
+ state
+ submittedAt
+ }
+ }
+ commits(last: 50) {
+ nodes {
+ commit {
+ committedDate
+ author { user { login } }
+ }
+ }
+ }
+ comments(last: 50) {
+ nodes {
+ author { login }
+ createdAt
+ }
+ }
+ reviewThreads(first: 50) {
+ nodes {
+ comments(first: 10) {
+ nodes {
+ author { login }
+ createdAt
+ }
+ }
+ }
+ }
+ reviewDecision
+ statusCheckRollup {
+ state
+ }
+ }
+ }
+ }
+}
+"""
+
+# Search 1: User's own PRs (open or recently updated)
+search_authored = f"is:pr author:{USERNAME} updated:>={TARGET_START_STR} sort:updated"
+authored_raw = gh_graphql(QUERY_USER_PRS, {"search": search_authored})
+
+# Search 2: PRs reviewed by user
+search_reviewed = f"is:pr reviewed-by:{USERNAME} -author:{USERNAME} updated:>={TARGET_START_STR} sort:updated"
+reviewed_raw = gh_graphql(QUERY_USER_PRS, {"search": search_reviewed})
+
+# Search 3: PRs where user commented
+search_commented = f"is:pr commenter:{USERNAME} -author:{USERNAME} updated:>={TARGET_START_STR} sort:updated"
+commented_raw = gh_graphql(QUERY_USER_PRS, {"search": search_commented})
+
+search_count = 0
+for raw in [authored_raw, reviewed_raw, commented_raw]:
+ search_count += len(raw.get("data", {}).get("search", {}).get("nodes", []))
+
+print(f"Searched {search_count} PR candidates", file=sys.stderr)
+
+# --- Process authored PRs ---
+created = []
+committed = []
+addressed = []
+approved = []
+blocked = []
+open_prs = []
+
+seen_prs = set()
+
+for node in authored_raw.get("data", {}).get("search", {}).get("nodes", []):
+ if not node or "number" not in node:
+ continue
+
+ pr_key = f"{node['repository']['nameWithOwner']}#{node['number']}"
+ if pr_key in seen_prs:
+ continue
+ seen_prs.add(pr_key)
+
+ asana_gid = extract_asana_gid(node.get("body"))
+ asana_status = fetch_asana_status(asana_gid) if asana_gid else None
+
+ pr_entry = {
+ "pr_number": node["number"],
+ "pr_title": node["title"],
+ "pr_url": node["url"],
+ "repo": node["repository"]["nameWithOwner"],
+ "asana_gid": asana_gid,
+ "asana_status": asana_status,
+ }
+
+ # Check if created within target window
+ created_at = (node.get("createdAt") or "")[:10]
+ if TARGET_START_STR <= created_at <= TARGET_END_STR:
+ created.append(pr_entry)
+
+ # Check for human reviews before target window
+ has_prior_review = False
+ for r in (node.get("reviews") or {}).get("nodes", []):
+ if not r or not r.get("author"):
+ continue
+ reviewer = r["author"].get("login", "")
+ if reviewer == USERNAME or "[bot]" in reviewer:
+ continue
+ submitted = (r.get("submittedAt") or "")[:10]
+ if submitted < TARGET_START_STR and r.get("state") in ("CHANGES_REQUESTED", "COMMENTED"):
+ has_prior_review = True
+ break
+
+ # Check for commits within target window
+ commits_in_window = []
+ for c in (node.get("commits") or {}).get("nodes", []):
+ commit = (c or {}).get("commit", {})
+ committed_date = (commit.get("committedDate") or "")[:10]
+ commit_user = ((commit.get("author") or {}).get("user") or {}).get("login", "")
+ if TARGET_START_STR <= committed_date <= TARGET_END_STR and commit_user == USERNAME:
+ commits_in_window.append(commit)
+
+ if commits_in_window:
+ entry_with_count = {**pr_entry, "commit_count": len(commits_in_window)}
+ # Only count as addressed/committed if PR wasn't created in window
+ if not (TARGET_START_STR <= created_at <= TARGET_END_STR):
+ if has_prior_review:
+ addressed.append(entry_with_count)
+ else:
+ committed.append(entry_with_count)
+
+ # Track open PRs for debug and blocked/approved analysis
+ if node.get("state") == "OPEN":
+ review_decision = node.get("reviewDecision")
+ ci_state = (node.get("statusCheckRollup") or {}).get("state")
+
+ # Determine status summary
+ status_parts = []
+ if review_decision:
+ status_parts.append(review_decision.lower().replace("_", " "))
+ if ci_state:
+ status_parts.append(f"CI: {ci_state.lower()}")
+ if asana_status:
+ status_parts.append(f"Asana: {asana_status}")
+
+ open_prs.append({
+ **pr_entry,
+ "review_decision": review_decision,
+ "ci_state": ci_state,
+ "status_summary": ", ".join(status_parts) if status_parts else "open"
+ })
+
+ # Check if approved (GitHub approved OR Asana Publish Needed)
+ if review_decision == "APPROVED" or asana_status == "Publish Needed":
+ approved.append(pr_entry)
+
+ # Check if blocked
+ if ci_state == "FAILURE":
+ blocked.append({**pr_entry, "block_reason": "ci_failure", "detail": "CI failing"})
+ elif review_decision == "CHANGES_REQUESTED":
+ # Find who requested changes
+ changers = []
+ for r in (node.get("reviews") or {}).get("nodes", []):
+ if r and r.get("state") == "CHANGES_REQUESTED":
+ author = (r.get("author") or {}).get("login", "")
+ if author and author not in changers:
+ changers.append(author)
+ blocked.append({
+ **pr_entry,
+ "block_reason": "changes_requested",
+ "detail": ", ".join(changers) if changers else "reviewer"
+ })
+
+# --- Process reviewed PRs ---
+reviewed = []
+for node in reviewed_raw.get("data", {}).get("search", {}).get("nodes", []):
+ if not node or "number" not in node:
+ continue
+
+ pr_key = f"{node['repository']['nameWithOwner']}#{node['number']}"
+ if pr_key in seen_prs:
+ continue
+ seen_prs.add(pr_key)
+
+ # Find user's review within target window
+ review_state = None
+ for r in (node.get("reviews") or {}).get("nodes", []):
+ if not r or not r.get("author"):
+ continue
+ if r["author"].get("login") != USERNAME:
+ continue
+ submitted = (r.get("submittedAt") or "")[:10]
+ if TARGET_START_STR <= submitted <= TARGET_END_STR:
+ review_state = r.get("state", "COMMENTED")
+ break
+
+ if review_state:
+ reviewed.append({
+ "pr_number": node["number"],
+ "pr_title": node["title"],
+ "pr_url": node["url"],
+ "repo": node["repository"]["nameWithOwner"],
+ "asana_gid": extract_asana_gid(node.get("body")),
+ "review_state": review_state,
+ })
+
+# --- Process commented PRs ---
+commented_list = []
+for node in commented_raw.get("data", {}).get("search", {}).get("nodes", []):
+ if not node or "number" not in node:
+ continue
+
+ pr_key = f"{node['repository']['nameWithOwner']}#{node['number']}"
+ if pr_key in seen_prs:
+ continue
+ seen_prs.add(pr_key)
+
+ # Check for comments by user on target date
+ has_comment = False
+
+ # Issue comments
+ for c in (node.get("comments") or {}).get("nodes", []):
+ if not c:
+ continue
+ author = (c.get("author") or {}).get("login", "")
+ created = (c.get("createdAt") or "")[:10]
+ if author == USERNAME and TARGET_START_STR <= created <= TARGET_END_STR:
+ has_comment = True
+ break
+
+ # Review thread comments
+ if not has_comment:
+ for thread in (node.get("reviewThreads") or {}).get("nodes", []):
+ for c in (thread.get("comments") or {}).get("nodes", []):
+ if not c:
+ continue
+ author = (c.get("author") or {}).get("login", "")
+ created = (c.get("createdAt") or "")[:10]
+ if author == USERNAME and TARGET_START_STR <= created <= TARGET_END_STR:
+ has_comment = True
+ break
+ if has_comment:
+ break
+
+ if has_comment:
+ commented_list.append({
+ "pr_number": node["number"],
+ "pr_title": node["title"],
+ "pr_url": node["url"],
+ "repo": node["repository"]["nameWithOwner"],
+ "asana_gid": extract_asana_gid(node.get("body")),
+ })
+
+print(json.dumps({
+ "date_start": TARGET_START_STR,
+ "date_end": TARGET_END_STR,
+ "day_label": day_label,
+ "username": USERNAME,
+ "search_count": search_count,
+ "created": created,
+ "committed": committed,
+ "addressed": addressed,
+ "reviewed": reviewed,
+ "commented": commented_list,
+ "approved": approved,
+ "blocked": blocked,
+ "open_prs": open_prs,
+}, indent=2))
+PYEOF
diff --git a/.cursor/commands/hudl.md b/.cursor/commands/hudl.md
new file mode 100644
index 0000000..adf630a
--- /dev/null
+++ b/.cursor/commands/hudl.md
@@ -0,0 +1,229 @@
+Generate a daily HUDL document from GitHub PR activity, upload to a single persistent private gist.
+
+
+PR names are the clickable link: `[{title}]({url})`. Never add a separate URL.
+All HUDL files go into ONE gist with description "HUDL Notes". Create on first run, add files on subsequent runs. Never overwrite — append a suffix (`-1`, `-2`, etc.) if the filename exists.
+Delete the local file after successful gist upload.
+Set `block_until_ms: 120000` for the companion script.
+PRs with Asana GIDs in body should have their Asana status fetched to determine true workflow status.
+
+
+
+Run the companion script:
+
+```bash
+~/.cursor/commands/github-pr-hudl.sh
+```
+
+If the user supplies a specific date, pass `--date YYYY-MM-DD`.
+
+Capture stdout (JSON) and stderr (diagnostics) separately.
+
+
+
+The JSON output has these fields:
+- `date_start`, `date_end`: The time window (e.g., Friday to Monday for Monday HUDL)
+- `day_label`: Display label (e.g., "since Friday" or "since yesterday")
+
+And these arrays:
+- `created`: PRs created within window
+- `committed`: PRs where user pushed commits within window
+- `addressed`: PRs with commits after receiving review comments
+- `reviewed`: PRs by others that user reviewed
+- `commented`: PRs where user posted comments
+- `approved`: PRs that have approval (for Goals Today)
+- `blocked`: PRs blocked by CI failure or changes requested (for Handoffs)
+- `open_prs`: All open PRs for debug section
+
+Each entry has: `pr_number`, `pr_title`, `pr_url`, `repo`, `asana_gid` (nullable), `asana_status` (nullable), plus action-specific fields.
+
+
+
+Build the markdown file with EXACTLY the structure below. Every heading, bullet, and blank line matters.
+
+
+Line 1 of the file. Use `date_end` from the JSON for the header date.
+
+```
+# HUDL Notes — {full_weekday_name} {full_month_name} {day}, {year}
+```
+
+Example: `# HUDL Notes — Monday February 17, 2026`
+
+
+
+```
+## Accomplishments {day_label}
+```
+
+Use `day_label` from the JSON (either `"yesterday"` or `"Friday"`).
+
+Categorize each PR into exactly ONE subsection based on its PRIMARY action. Determine the primary action using this priority (highest first):
+
+1. `created` → goes in **PR'd**
+2. `addressed` → goes in **Addressed PR Comments**
+3. `reviewed` → goes in **Reviewed PRs**
+4. `committed` or `commented` → goes in **General**
+
+A PR appears in only ONE subsection — the highest-priority one that matches.
+
+**Subsection: PR'd** — include only if at least one PR qualifies.
+
+```
+### PR'd
+
+- [{pr_title}]({pr_url}) ({repo})
+```
+
+One bullet per PR. No action text — the heading says it.
+
+**Subsection: Addressed PR Comments** — include only if at least one PR qualifies.
+
+```
+### Addressed PR Comments
+
+- [{pr_title}]({pr_url}) ({repo})
+```
+
+**Subsection: Reviewed PRs** — include only if at least one PR qualifies.
+
+```
+### Reviewed PRs
+
+- [{pr_title}]({pr_url}) ({repo}) — approved
+```
+
+Append the review verdict in lowercase after ` — `. Map `review_state`:
+- `APPROVED` → `approved`
+- `CHANGES_REQUESTED` → `changes requested`
+- `COMMENTED` → `commented`
+
+**Subsection: General** — include only if at least one PR qualifies.
+
+```
+### General
+
+- [{pr_title}]({pr_url}) ({repo}) — Committed: 3 commits
+```
+
+Format each action type:
+- `committed` → `Committed: {commit_count} commits`
+- `commented` → `Commented`
+
+If a PR has multiple actions in General, join with `; `.
+
+**Omit any subsection that would have zero bullets.**
+
+
+
+```
+## Goals Today
+```
+
+List PRs from the `approved` array (PRs that are approved and ready to merge/publish):
+
+```
+- Publish [{pr_title}]({pr_url})
+```
+
+After all approved items (or immediately if there are none), add one blank bullet for the user to fill in:
+
+```
+-
+```
+
+
+
+```
+## Handoffs
+```
+
+Group entries from the `blocked` array by block reason.
+
+**CI Failures** — if any PR has `block_reason=ci_failure`:
+
+```
+### Blocked by CI
+
+- [{pr_title}]({pr_url}) — CI failing
+```
+
+**Changes Requested** — if any PR has `block_reason=changes_requested`:
+
+```
+### Changes Requested
+
+- [{pr_title}]({pr_url}) — {reviewer} requested changes
+```
+
+If the blocked array is completely empty, write:
+
+```
+None
+```
+
+
+
+Add a horizontal rule, then a collapsed details block.
+
+```
+---
+
+Debug: {N} open PRs
+
+```
+
+Where `{N}` is the length of the `open_prs` array.
+
+For each entry in `open_prs`, write:
+
+```
+- [{pr_title}]({pr_url}) — {status_summary}
+```
+
+Where `status_summary` includes: review state, CI status, Asana status (if present).
+
+End with search stats and close the details tag:
+
+```
+
+*Searched {search_count} PRs*
+
+
+```
+
+`search_count` comes from the JSON.
+
+
+
+
+1. Write the markdown to `hudl-{date}.md` in the current working directory.
+2. Upload to gist using this exact bash logic:
+
+```bash
+GIST_ID=$(gh gist list --limit 100 --filter "HUDL Notes" | head -1 | awk '{print $1}')
+FILENAME="hudl-{date}.md"
+
+if [ -n "$GIST_ID" ]; then
+ FILES=$(gh gist view "$GIST_ID" --files)
+ N=1
+ BASE="hudl-{date}"
+ while echo "$FILES" | grep -q "$FILENAME"; do
+ N=$((N + 1))
+ FILENAME="${BASE}-${N}.md"
+ done
+ [ "$FILENAME" != "hudl-{date}.md" ] && mv "hudl-{date}.md" "$FILENAME"
+ gh gist edit "$GIST_ID" --add "$FILENAME"
+else
+ gh gist create --desc "HUDL Notes" "$FILENAME"
+ GIST_ID=$(gh gist list --limit 1 --filter "HUDL Notes" | awk '{print $1}')
+fi
+
+rm "$FILENAME"
+```
+
+3. Present a brief summary to the user:
+ - Number of accomplishment items
+ - Number of handoffs
+ - Gist URL: `https://gist.github.com/{username}/{GIST_ID}`
+
diff --git a/.cursor/rules/after_each_chat.mdc b/.cursor/rules/after_each_chat.mdc
new file mode 100644
index 0000000..79b7294
--- /dev/null
+++ b/.cursor/rules/after_each_chat.mdc
@@ -0,0 +1,24 @@
+---
+description: Send full chat response to Telegram at end of each turn
+alwaysApply: false
+---
+
+Send the full response of every chat turn to Telegram via chunked messages.
+
+
+Execute at the end of every chat turn without exception.
+Send the complete response content, not an abbreviated summary.
+Do not ask for confirmation or print extra explanation.
+Send in a single Shell command. Do not write intermediate files, call date, or use multiple steps.
+
+
+
+
+Pipe your full response to the send script via a heredoc in a single Shell command:
+```bash
+python3 ~/.cursor-autopilot/telegram-send.py << 'ENDOFMSG'
+
+ENDOFMSG
+```
+The script reads stdin, splits the message into chunks that fit Telegram's 4096-character limit, and sends each sequentially. Credentials are resolved automatically. Do NOT write intermediate files, call date, or pass any arguments.
+
diff --git a/.cursor/rules/answer-questions-first.mdc b/.cursor/rules/answer-questions-first.mdc
new file mode 100644
index 0000000..0cde491
--- /dev/null
+++ b/.cursor/rules/answer-questions-first.mdc
@@ -0,0 +1,18 @@
+---
+description: Detect questions in prompts and answer them before making changes
+alwaysApply: true
+---
+
+# Answer Questions Before Acting
+
+Before using any code editing tools, scan the user's message for `?` characters and determine if it's a question.
+
+- **Ignore** `?` inside code, URLs or query parameters (e.g. `?param=x`, `?key=value` , `const x = ifTrue ? 'yes' : 'no'`)
+- **Treat all other `?`** as question statements, if they appear to be questions.
+
+If questions are detected:
+
+1. Read `~/.cursor/skills/q/SKILL.md` and follow its workflow to answer every question.
+2. **Workflow context**: If a skill was invoked earlier in this conversation, note which one. When a question or critique references agent behavior from that execution, load the skill definition before answering and evaluate whether the skill should have governed that behavior. If it should have but didn't, that's a workflow gap — treat it as the primary concern per `fix-workflow-first.mdc`.
+3. Do **not** edit files, create files, or run mutating commands until the user responds.
+4. Only proceed with implementation after the user permits it in a follow-up message.
diff --git a/.cursor/rules/eslint-warnings.mdc b/.cursor/rules/eslint-warnings.mdc
new file mode 100644
index 0000000..bb30cc9
--- /dev/null
+++ b/.cursor/rules/eslint-warnings.mdc
@@ -0,0 +1,10 @@
+---
+description: Guidance for addressing ESLint warnings in the codebase
+globs: ["**/*.ts", "**/*.tsx"]
+alwaysApply: false
+---
+
+# ESLint Warning Fixes
+
+- Skip deprecation warnings (`@typescript-eslint/no-deprecated`) unless explicitly asked to address them.
+- After addressing warnings, run `yarn update-eslint-warnings` to update the baseline.
diff --git a/.cursor/rules/load-standards-by-filetype.mdc b/.cursor/rules/load-standards-by-filetype.mdc
new file mode 100644
index 0000000..d272c04
--- /dev/null
+++ b/.cursor/rules/load-standards-by-filetype.mdc
@@ -0,0 +1,19 @@
+---
+description:
+alwaysApply: true
+---
+
+Load language-specific coding standards before editing or investigating lint/type errors in files, without redundant reads.
+
+
+Before using any code editing tool on a file OR investigating lint/type errors in that file type, check if the matching standards rule is already present in `cursor_rules_context`. Only read the rule file if it is NOT already in context.
+If the rule is not in context, read it using the Read tool and follow its contents BEFORE making the edit or investigating the error.
+
+
+
+
+| File glob | Standards file |
+|---|----|
+| `**/*.ts`,`**/*.tsx` | `~/.cursor/rules/typescript-standards.mdc` |
+
+
diff --git a/.cursor/rules/no-format-lint.mdc b/.cursor/rules/no-format-lint.mdc
new file mode 100644
index 0000000..76cd248
--- /dev/null
+++ b/.cursor/rules/no-format-lint.mdc
@@ -0,0 +1,11 @@
+---
+description: Prevent agent from spending tokens on formatting and lint fixing
+alwaysApply: true
+---
+
+# No Manual Formatting or Lint Fixing
+
+- Do NOT run `yarn lint`, `yarn fix`, `yarn verify`, or any lint/format shell commands unless explicitly asked.
+- Do NOT manually fix formatting issues (whitespace, quotes, semicolons, trailing commas, line length). The `lint-commit.sh` script runs `eslint --fix` (including Prettier) before each commit.
+- Only use `ReadLints` to check for logical or type errors, not formatting. If the only lint errors are formatting-related, ignore them.
+- Focus tokens on correctness and logic, not style.
diff --git a/.cursor/rules/review-standards.mdc b/.cursor/rules/review-standards.mdc
new file mode 100644
index 0000000..f888efa
--- /dev/null
+++ b/.cursor/rules/review-standards.mdc
@@ -0,0 +1,199 @@
+---
+description: Review-specific coding conventions for Edge codebase PR reviews. Load alongside typescript-standards.mdc during code review.
+globs: []
+alwaysApply: false
+---
+
+Provide project-specific review patterns to detect in PR code — anti-patterns and conventions that go beyond the editing standards in typescript-standards.mdc.
+
+
+
+Don't use shorthand `.catch(showError)` — it loses the calling file from stack traces.
+❌ `doSomething().catch(showError)`
+✅ `doSomething().catch((error: unknown) => showError(error))`
+
+
+Don't double down on `@ts-expect-error` when trivial fixes exist. Use `?? []`, `?? {}`, or explicit type annotations instead of suppressing type errors.
+
+Use `!== undefined` when `null` has semantic meaning (like "delete this field"). `!= null` treats both the same.
+❌ `const changed = value != null` (when null means "delete")
+✅ `const changed = value !== undefined`
+
+
+Always `await` async operations for proper spinners, double-click prevention, and race condition avoidance.
+❌ `wallet.saveTxMetadata(params).catch(showError)`
+✅ `await wallet.saveTxMetadata(params)`
+
+
+When the whole function is async and the caller handles errors, don't add a separate `.catch()`.
+❌ `const handle = async () => { await op().catch(err => showError(err)) }`
+✅ `const handle = async () => { await op() }`
+
+
+When `tokenId` is a non-null string, any dereference using it must succeed or throw. Never fall back to `null` — it silently changes the intended asset from "this specific token" to "native currency."
+
+When a global error handler (e.g., `withExtendedTouchable`) already catches and displays errors, don't add local `.catch(showError)` — it causes errors to display twice. Only add explicit handling when you need specific error types, cleanup, or there's no global handler.
+
+User cancellations (closing modals, pressing back) should exit silently, not show a generic error.
+❌ `try { await modal() } catch (error) { showError(error) }`
+✅ `if (error instanceof UserCancelledError) return; showError(error)`
+
+
+Catch blocks should not always throw the same generic error. Only throw specific messages for expected errors (e.g., API 400); re-throw the original for unexpected ones so users see accurate messages.
+
+Verify arrays have elements before indexing. `vin.addresses[0]` is `undefined` when the array is empty — check before passing to functions that can't handle undefined.
+
+Don't compare tokenIds with currency codes — they are different identifier types that will never match. Use `request.fromTokenId` when checking against a list of tokenIds, not `request.fromCurrencyCode`.
+
+Use optional chaining on lookup tables with dynamic keys.
+❌ `TABLE[pluginId].includes(tokenId)` (TypeError if key missing)
+✅ `TABLE[pluginId]?.includes(tokenId) ?? false`
+
+
+If a validation applies to all code paths, perform it once at function entry rather than repeating in each branch.
+
+
+
+
+
+Prefer `useHandler` (from `hooks/useHandler`) over `useCallback` for event handlers and async functions. Provides better TypeScript inference and handles async more gracefully.
+
+If two `useEffect` hooks update related state from related dependencies, combine them into one effect to avoid redundant renders.
+
+
+
+Use `StyleSheet.compose(baseStyle, customStyle)` for style composition. Handles null automatically — no manual array handling needed.
+
+iOS number-pad keyboards don't support certain `returnKeyType` values ("Can't find keyplane" warning). Conditionally set: `returnKeyType={Platform.OS === 'ios' ? undefined : 'done'}`
+
+When replacing one component with another, ensure all props (color, size, style) are carried over. Check the original component's props before replacing — missing visual props change appearance.
+
+When switching icon libraries, wrap replacement icons in a `View` with the original margin/padding styles if the new component doesn't accept the same style props.
+
+Wrap navigation calls (push, pop, replace) after complex gestures (slider completion, swipe) in `InteractionManager.runAfterInteractions()`. Navigating while the gesture system is active causes crashes on physical devices.
+
+Disable interactive elements during async operations to prevent double-taps and race conditions. Use a `pending` state and pass it to the component for visual feedback.
+
+
+
+
+
+Don't track Redux state locally with `useState(reduxValue)` — it becomes stale when Redux updates. Read from `useSelector` directly.
+
+Module-level cached state that doesn't reset on logout/login leaks data between users. Export a clear function and call it on logout. This is a recurring bug pattern.
+
+Local account settings belong in Redux, not separate module-level caches. Redux is the right place for globally-available account information.
+
+Use `account.dataStore.setItem/getItem` instead of `account.localDisklet` directly. Disklet filenames are stored in plaintext, leaking information the server shouldn't see. DataStore encrypts filenames.
+
+When changing storage formats, always include migration code: read old format, convert, write new format, delete old. Users have existing data on disk.
+
+When updating nested state objects in storage, merge with existing state to avoid overwriting concurrent updates from other parts of the app.
+❌ `notifState: newNotifState` (overwrites sibling keys)
+✅ `notifState: { ...settings.notifState, ...newNotifState }`
+
+
+
+
+
+
+Always use `makePeriodicTask` instead of `setInterval`, especially for async work. Provides proper start/stop lifecycle and handles overlapping invocations.
+
+Background services go in `components/services/` as React components. Component-based mounting ensures clean lifecycle tied to login/logout. Avoid excessive background work — trigger only when needed.
+
+Use a `runOnce` helper or `pending` flag to prevent duplicate parallel calls when functions can be triggered multiple times (button presses, retries).
+
+When implementing cancellable polling, check the cancel flag after every `await`, not just at loop start. The flag can change during any async gap.
+
+In `setTimeout`/interval callbacks, read state fresh inside the callback. Closures capture stale values — especially problematic for callbacks that fire much later.
+
+Track `setTimeout` IDs in services/engines with a `Set` and clear them all in the shutdown method. Stale timeouts fire on cleared/deallocated state.
+
+When async event handlers operate on shared resources (files, git repos, databases), serialize operations per resource using a pending-operation map or queue. Fire-and-forget `.catch()` patterns cause race conditions on rapid events.
+
+
+
+
+
+All network responses and disk reads must be cleaned with the cleaners library before use. Access cleaned values, not raw data.
+
+Derive types from cleaners with `ReturnType`. Don't duplicate type definitions alongside cleaner definitions.
+
+`asOptional` accepts both `undefined` AND `null` despite the name. To preserve the null/undefined distinction, use `asOptional(asEither(asNull, asString), null)` with a default.
+
+New fields added to cleaners for persisted data MUST use `asOptional` unless migration code is included. Existing data on disk won't have the new field — non-optional fields cause load failures.
+
+Remove or comment out unused fields in cleaners. Dead cleaner fields add noise and can mislead.
+
+
+
+
+
+Don't leave dead or unused code "just in case." Git history preserves it. This includes unused variables, unreachable branches, and commented-out blocks.
+
+Don't declare variables just to pass them to a function — inline the parameters. Exception: typed constants for functions with untyped/`any` parameters, where the constant provides compile-time checking.
+
+Before creating a new utility, check for existing helpers: `getTokenId`/`getTokenIdForced` instead of `getWalletTokenId`, `getExchangeDenom` instead of custom multiplier lookups.
+
+Use existing mock data from `src/util/fake/` or consolidate new mocks there. Duplicated half-baked mock data breaks on core changes.
+
+Never commit hardcoded sandbox URLs or debug flags. Use environment configuration (`envConfig.*`, `__DEV__`).
+
+Don't use local file paths (`file:../my-package`) in package.json dependencies. Breaks builds for other developers and CI.
+
+No unguarded `console.log` in production code. Guard with `ENV.DEBUG_VERBOSE_LOGGING` or remove entirely.
+
+Use a single validation function for both real-time and submit-time checks. Duplicated validation with different thresholds lets users submit invalid forms.
+
+Use local synchronous helpers (`div` from biggystring + `getExchangeDenom`) for amount conversions instead of async wallet API calls that cross an expensive bridge. Always specify decimal precision to avoid integer truncation: `div(native, multiplier, 18)` not `div(native, multiplier)`.
+
+Use established libraries (e.g., `rfc4648` for base64) instead of hand-rolling standard algorithms. Hand-rolled implementations miss edge cases and add maintenance burden.
+
+When a value appears in multiple configuration locations, ensure they match. Extract shared constants to prevent silent drift.
+
+Delete style properties from `StyleSheet.create` that aren't referenced by any component. Unused styles add noise.
+
+
+
+
+
+Search the localization file (`en_US.json`) before adding new keys. Don't create duplicates of existing strings.
+
+String keys describe semantic meaning, not UI location.
+❌ `signup_screen_get_started`
+✅ `get_started_button`
+
+
+Prompts describe the action, not the gesture. Doesn't translate well across platforms.
+❌ `"Tap to select a country"`
+✅ `"Select a country"`
+
+
+Error messages and user-facing strings are localized in the GUI layer, not in API/plugin code. API layers throw structured errors (e.g., `NetworkError('CONNECTION_FAILED')`) that the GUI translates for display.
+
+
+
+
+
+Document constraints that aren't obvious from the code: `// EVM-only: assumes EVM contract address format`
+
+
+
+
+
+
+
+
+
+Place all dependencies in `devDependencies` except cleaner packages (which may be exported as types to NPM consumers).
+
+Server and client configuration in separate files (`serverConfig.json`, `clientConfig.json`), both validated with cleaners via `cleaner-config`. Prevents accidentally exposing server secrets to clients.
+
+Server processes use PM2 with `pm2.json` at repo root. API processes in cluster mode (`"instances": "max"`); engine processes as single instances to avoid duplicate background work.
+
+When a server repo has both backend and frontend, the `build` script must build both. Use `npm-run-all -p build.*` to run in parallel.
+
+
diff --git a/.cursor/rules/typescript-standards.mdc b/.cursor/rules/typescript-standards.mdc
new file mode 100644
index 0000000..4700608
--- /dev/null
+++ b/.cursor/rules/typescript-standards.mdc
@@ -0,0 +1,269 @@
+---
+description: TypeScript/React coding standards for error handling, types, and patterns
+globs: ["**/*.ts","**/*.tsx"]
+alwaysApply: false
+---
+
+Enforce TypeScript and React coding standards in all `.ts`/`.tsx` file edits.
+
+
+
+NEVER use hard-coded user-facing strings. All display text MUST come from localized string resources (`lstrings.*`). This includes error messages, labels, placeholders, and any text visible to users.
+❌ `setError('Something went wrong')`
+❌ `Loading...`
+✅ `setError(lstrings.generic_error)`
+✅ `{lstrings.loading}`
+
+
+Localized strings with placeholders MUST use numbered suffixes (`_1s`, `_2s`, etc.) and positional `sprintf` args (`%1$s`, `%2$s`).
+❌ `warning_message: 'Amount %s exceeds limit of %s'`
+✅ `warning_message_2s: 'Amount %1$s exceeds limit of %2$s'`
+❌ `sprintf(lstrings.warning_header, 'this item')`
+✅ `sprintf(lstrings.warning_header_1s, itemName)`
+
+
+NEVER use `any` types. Define an interface, type, or cleaner. If truly unavoidable, add a comment explaining why.
+
+NEVER use optional chaining results directly in conditions.
+❌ `if (obj?.prop)` → ✅ `if (obj?.prop != null)`
+❌ `if (obj?.arr?.length > 0)` → ✅ `if (obj?.arr != null && obj.arr.length > 0)`
+
+
+NEVER use empty rejection handlers that silently swallow errors.
+❌ `.catch(() => {})`
+✅ `.catch((err: unknown) => { showError(err) })`
+Exception: Empty handlers are acceptable ONLY when the rejection is an expected user action (e.g., user cancelled a modal) AND there's nothing to clean up.
+
+
+Catch blocks MUST use `(error: unknown) => {...}` format.
+
+Do not use the `void` operator to silence Promise returns. Create a non-async handler wrapping the async call with explicit error handling.
+❌ `onSwipe={() => { void doAsync() }}`
+✅ `const onSwipe = useHandler(() => { doAsync().catch((err: unknown) => { showError(err) }) })`
+
+
+Do not use inline styles in JSX. Use `getStyles`/`cacheStyles` (static) and memoized (derived) for style definitions.
+
+JSX event handler props MUST NOT use inline arrow functions. Create named handlers.
+
+
+
+
+
+Use `??` instead of `||` for default values. `??` only treats `null`/`undefined` as missing; `||` treats all falsy values as missing.
+❌ `config.timeout || 5000` → ✅ `config.timeout ?? 5000` (preserves `0`)
+❌ `user.name || 'Anonymous'` → ✅ `user.name ?? 'Anonymous'` (preserves `''`)
+
+
+Prefer flat boolean expressions over nested if/return in filter/predicate functions.
+❌ `if (x != null) { if (f(x).match(y)) { return true } }; return otherResult`
+✅ `return (x != null && f(x).match(y)) || otherResult`
+
+
+Do not add branches that return the same value as the final return.
+❌ `if (node.type === 'TSNullKeyword') { return false }; return false`
+✅ `return false`
+
+
+When a handler only forwards to another function with no additional logic, pass the function directly.
+❌ `const handleComplete = useHandler(() => { onComplete?.() })`
+✅ `onPress: onComplete`
+
+
+
+
+Avoid calling expensive transformation functions (like `normalizeForSearch`, `toLowerCase`) inside loops when the input doesn't change per iteration. Pre-compute outside the loop.
+❌ `items.filter(item => searchTerms.every(term => normalize(item.name).includes(term)))`
+✅ `items.filter(item => { const n = normalize(item.name); return searchTerms.every(term => n.includes(term)) })`
+
+
+Use `asJSON` cleaner instead of manual `JSON.parse`.
+❌ `const data = asMyCleaner(JSON.parse(text))`
+✅ `const data = asJSON(asMyCleaner)(text)`
+
+
+Use TanStack Query (`useQuery`) for async data fetching instead of `useEffect`/`useState` patterns.
+❌ `const [data, setData] = useState(null); useEffect(() => { fetchData().then(setData) }, [])`
+✅ `const { data } = useQuery({ queryKey: ['myData', deps], queryFn: fetchData, enabled: deps != null })`
+
+
+Use specific Redux selectors to avoid unnecessary re-renders.
+❌ `const { countryCode } = useSelector(state => state.ui.settings)`
+✅ `const countryCode = useSelector(state => state.ui.settings.countryCode)`
+
+
+Keep `useSelector` callbacks simple — only access state, never derive. Derivation logic belongs in `useMemo` (or inline) after all referenced variables are declared. Selector callbacks run on every store update and can reference hoisted-but-uninitialized variables, causing silent bugs.
+❌ `const result = useSelector(state => { const x = expensiveFn(someVar, state.foo); return x })`
+✅ `const foo = useSelector(state => state.foo)` then `const result = useMemo(() => expensiveFn(someVar, foo), [someVar, foo])`
+
+
+Use `React.FC` for component exports. Use `React.ReactElement` for non-component render functions.
+❌ `const Component = (props: Props): React.JSX.Element => {`
+✅ `const Component: React.FC = props => {`
+
+
+Use descriptive variable names that clearly indicate their purpose. Avoid single/few-letter variables except in trivial cases (loop counters, mathematical formulas).
+❌ `const s = asMaybePrivateNetworkingSetting(cfg.userSettings)`
+❌ `const ds = asMaybePrivateNetworkingSetting(cfg.currencyInfo.defaultSettings)`
+❌ `return (s ?? ds)?.networkPrivacy === 'nym'`
+✅ `const userSettings = asMaybePrivateNetworkingSetting(currencyConfig.userSettings)`
+✅ `return userSettings?.networkPrivacy === 'nym'`
+
+
+Always include cleanup functions in `useEffect` hooks that create timers, intervals, subscriptions, or other side effects.
+
+
+
+Use `biggystring` for all numeric calculations involving crypto amounts, fiat values, or exchange rates. Native JS floating-point math loses precision. Values from `convertCurrency`, `convertNativeToExchange`, and similar helpers are already biggystring-compatible strings.
+❌ `const impact = (parseFloat(from) - parseFloat(to)) / parseFloat(from)`
+✅ `const impact = div(sub(from, to), from, 8)`
+
+
+When deriving arrays or objects from props/state (e.g. `Object.values()`, `Object.keys()`, `.filter()`, `.map()`), wrap in `React.useMemo` if the result is used in a dependency array or passed as a prop. Bare derivations create new references every render.
+❌ `const wallets = Object.values(currencyWallets)` (used in effect deps)
+✅ `const wallets = React.useMemo(() => Object.values(currencyWallets), [currencyWallets])`
+
+
+When guarding against re-fetching with nullable map lookups, check for the success payload specifically — not just entry existence. Storing error results as non-null entries permanently blocks retry if the guard only checks `== null`.
+❌ `if (resultMap[id] == null) fetchData(id)` (error entries block retry)
+✅ `if (resultMap[id]?.data == null) fetchData(id)` (only skip when data is present)
+Exception: Auto-load effects where infinite retry on persistent failure is undesirable — keep `== null` there and allow retry only via explicit user action.
+
+
+Component files (`.tsx`) and utility files (`.ts`) follow a consistent section ordering.
+
+**File-level ordering:**
+1. Imports
+2. Types / Interfaces — exported types first, then internal `Props`
+3. Constants
+4. Main component (`export const Scene: React.FC`)
+5. Sub-components (internal, non-exported)
+6. Styles (`getStyles` / `cacheStyles`)
+7. Helpers / utility functions — pure functions at the very end of the file
+
+**Component body ordering:**
+1. Props destructuring
+2. Theme / styles (`useTheme`, `getStyles`)
+3. State (`useState`)
+4. Refs (`useRef`)
+5. Selectors (`useSelector`, `useWatch`)
+6. Derived values / `useMemo`
+7. Handlers (`useHandler`)
+8. Effects (`useEffect`, `useBackEvent`)
+9. Return JSX
+
+
+
+
+
+
+`@typescript-eslint/strict-boolean-expressions` on `any`-typed value.
+Cause: Variable is `any` because it comes from an untyped method or third-party code.
+Fix: Type-annotate the variable to remove `any`. Do NOT use explicit comparisons — they don't help when the value itself is `any`.
+❌ `if (!result.ok)` where `result` is `any`
+✅ `const results: Array> = await wallet.split(items)`
+Known untyped methods: `EdgeCurrencyWallet.split()` returns `Array>`.
+
+
+Strict boolean on nullable/optional values.
+Cause: Using truthy check on value that could be `null`, `undefined`, `0`, or `''`.
+Fix: Use explicit nullish comparison (`!= null`, `!== ''`, `> 0`).
+❌ `if (value)` where `value` is `string | undefined`
+✅ `if (value != null && value !== '')`
+❌ `if (array.length)` → ✅ `if (array.length > 0)`
+
+
+Type-only imports MUST use `import type` at the top level, not inline `type` keyword within a value import.
+❌ `import { type Foo, type Bar } from 'module'` (when importing ONLY types)
+✅ `import type { Foo, Bar } from 'module'`
+OK: `import { someValue, type Foo } from 'module'` (mixed value + type import)
+
+
+Imports are auto-sorted by `simple-import-sort/imports`. When adding new imports, place them roughly in alphabetical order — the formatter will fix the exact order. If the pre-commit hook fails with "Run autofix to sort these imports!", the imports just need reordering.
+
+
+Floating promises must have `.catch()` handlers.
+✅ `.catch((err: unknown) => { showError(err) })` — standard for unexpected errors
+✅ `.catch(() => {})` — ONLY for expected rejections (user cancelled modal, expected race condition)
+The `(err: unknown)` typing is required by `@typescript-eslint/use-unknown-in-catch-callback-variable`.
+
+
+Catch callbacks must type error as `unknown`.
+❌ `.catch(err => ...)` or `.catch((err: any) => ...)`
+✅ `.catch((err: unknown) => { showError(err) })`
+For try/catch blocks, use `catch (e: unknown)` and narrow with type guards or assertions.
+
+
+Functions must have explicit return types.
+Fix: Add return type annotation. Common types:
+- `void` for side-effect-only functions
+- `React.ReactElement` or `React.ReactElement | null` for render helpers
+- `Promise` for async functions with no return
+- Specific type for functions that return values
+❌ `function foo() { return 1 }`
+✅ `function foo(): number { return 1 }`
+
+
+Using deprecated API.
+Fix: Check the deprecation message for the replacement API. Common replacements:
+- `NavigationBase` → Read `/fix-eslint` skill `navigation-base` pattern for category-based fix guidance
+- `uniqueIdentifier` → `EdgeSpendInfo.memos`
+- `memo` → `EdgeSpendInfo.memos`
+- `networkFee` / `parentNetworkFee` → `networkFees`
+- `currencyCode` → `tokenId`
+If no clear replacement exists, flag to user for guidance.
+
+
+Event handler props must follow naming convention.
+Fix: Rename handler to match the prop pattern.
+- Props starting with `on` expect handlers starting with `handle`
+❌ `onPress={openModal}` → ✅ `onPress={handleOpenModal}`
+❌ `onChange={updateValue}` → ✅ `onChange={handleUpdateValue}`
+
+
+Components should use `React.FC` pattern.
+❌ `const Component = (props: Props): React.ReactElement => {`
+✅ `export const Component: React.FC = props => {`
+
+
+Generic components cannot use `React.FC` because it does not support type parameters.
+If the generic is not essential (type param only used internally, can be collapsed into a concrete type), remove the generic and convert to `React.FC`.
+If the generic is essential (callers rely on type inference, e.g. ` ...>`), keep the function declaration form with an explicit return type. The warning is accepted.
+✅ `export function MyComponent(props: Props): React.ReactElement {`
+❌ Converting an essential generic to `React.FC` — this loses type safety for callers.
+
+
+Avoid `styled()` wrapper components.
+Fix: Convert to regular component using `useTheme()` and `cacheStyles()`.
+❌ `const StyledView = styled(View)(theme => ({ ... }))`
+✅ Create a regular component:
+```tsx
+const MyView: React.FC = props => {
+ const theme = useTheme()
+ const styles = getStyles(theme)
+ return {props.children}
+}
+const getStyles = cacheStyles((theme: Theme) => ({
+ container: { ... }
+}))
+```
+Note: This is an architectural change. If the file has many `styled()` usages, flag to user rather than refactoring inline.
+
+
+When catching unknown errors that need property inspection, use `cleaners` instead of type assertions.
+❌ `const err = e as { code?: string; message?: string }`
+✅ Define a cleaner and use `asMaybe`:
+```ts
+const asFooError = asObject({
+ code: asValue(FOO_CODE),
+ message: asOptional(asString, '')
+})
+const fooError = asMaybe(asFooError)(e)
+if (fooError != null) { ... }
+```
+For generic error message extraction:
+❌ `err.message ?? ''` (unsafe on `unknown`)
+✅ `e instanceof Error ? e.message : String(e)`
+
+
+
diff --git a/.cursor/rules/workflow-halt-on-error.mdc b/.cursor/rules/workflow-halt-on-error.mdc
new file mode 100644
index 0000000..d5424d3
--- /dev/null
+++ b/.cursor/rules/workflow-halt-on-error.mdc
@@ -0,0 +1,84 @@
+---
+description: Halt on workflow errors and detect slash-command invocations in user messages
+alwaysApply: true
+---
+
+
+
+All workflow-related skill definitions (`*.md` / `SKILL.md`) and workflow companion scripts (`*.sh`) are sourced from `~/.cursor/`. When executing skills, prefer explicit `~/.cursor/...` paths and do not assume repo-local workflow files unless the skill explicitly points to one.
+
+When a skill mentions a script path, resolve it under `~/.cursor/skills//scripts/` unless the skill explicitly specifies an absolute path elsewhere. Do not assume repo-relative `scripts/` paths without verifying the skill directory contents.
+
+When ANY shell command fails (non-zero exit code) while executing an active skill workflow, a delegated subskill from that workflow, or a companion-script step required by that workflow (except where explicitly allowed by `auto-fix-verification-failures` or `companion-script-nonzero-contracts`):
+1. **STOP** — do not retry, work around, substitute, or continue the workflow.
+2. **Report** — show the user the exact command, exit code, and error output.
+3. **Diagnose** — classify the failure: missing tool (`command not found`), wrong path, permissions, or logic error.
+4. **Evaluate workflow** — if the failure reveals a gap in a skill definition, follow the fix-workflow-first rules below.
+5. **Wait** — do not resume until the user responds.
+
+
+When a workflow gap is discovered in an active skill definition:
+1. **Stop immediately** — do not continue the current task or apply any workaround.
+2. **Identify the root cause** in the skill (`.cursor/skills/*/SKILL.md`) definition.
+3. **Propose the fix** to the user and wait for approval before proceeding.
+4. **Fix the skill** using `/author` after approval.
+5. **Resume the original task** only after the skill is updated.
+
+Fixing the skill takes **absolute priority** over all other actions — including workarounds, continuing the original task, or applying temporary fixes. Do NOT apply workarounds or manual fixes before proposing the skill update. The correct sequence is: identify gap → propose fix → get approval → apply fix → then resume original task. This applies to all workflow issues — missed steps, incorrect output, wrong tool usage, shell failures, formatting problems, etc. The skill is the source of truth; patching around it creates drift.
+
+
+These workflow halt rules are for skill-driven execution, especially hands-off/orchestrated skills and their dependencies. They do not automatically apply to ad hoc exploration, incidental verification, or low-risk authoring work unless that command is part of an active skill contract.
+
+Exception to `halt-on-error`: For verification/code-quality failures where diagnostics are explicit and local, continue automatically with bounded remediation.
+
+Allowed auto-fix scope:
+- TypeScript/compiler failures (`tsc`) with clear file/line diagnostics
+- Lint failures (`eslint`) with clear file/line diagnostics
+- Test failures (`jest`/`yarn test`) when stack traces or assertion output identify failing test files
+- `verify-repo.sh` code-step failures that resolve to one of the above
+
+Required behavior:
+1. Briefly log rationale: failure type, affected files, and why scope is unambiguous.
+2. Apply the minimal fix in the failing repo.
+3. Re-run the failing verification step.
+4. Limit to 2 remediation attempts; if still failing or scope expands, fall back to `halt-on-error`.
+
+Never auto-fix:
+- Missing tools/auth (`command not found`, `PROMPT_GH_AUTH`)
+- Wrong path/permissions
+- Companion script contract/usage failures
+- Unexpected exit codes from orchestrator scripts
+- Any failure requiring destructive operations or workflow bypasses
+
+
+Respect documented companion script exit-code contracts. Non-zero does NOT always mean fatal.
+
+For `~/.cursor/skills/im/scripts/lint-warnings.sh`:
+- `0` = no remaining lint findings after auto-fix
+- `1` = remaining lint findings after auto-fix (expected actionable state)
+- `2` = execution error (fatal)
+
+Required behavior:
+1. If exit `1`, continue workflow by fixing the remaining lint findings before implementation.
+2. If the script auto-fixes pre-existing lint issues, commit those changes in a separate lint-fix commit immediately before feature commits, even if no findings remain.
+3. If exit `2`, apply `halt-on-error`.
+
+
+Do NOT silently substitute an alternative tool or approach when a command fails. If `rg` is not found, do not fall back to `grep`. If a script exits non-zero, do not manually replicate what the script does. The failure is the signal — report it.
+
+
+
+
+
+Scan the user's message for `/word` tokens. A token is a **command invocation** when ALL of:
+- `/word` is preceded by whitespace, a newline, or is at the start of the message
+- `word` contains only lowercase letters and hyphens (e.g., `/im`, `/pr-create`, `/author`)
+- `/word` is NOT inside a file path, URL, or code block
+
+When detected:
+1. Read `~/.cursor/skills//SKILL.md` and follow it immediately.
+2. If the file does not exist, inform the user: "Skill `/` not found in `~/.cursor/skills/`."
+
+**Ignore `/`** in: file paths (`/Users/...`, `~/...`), URLs (`https://...`), mid-word (`and/or`), backticks/code blocks.
+
+
diff --git a/.cursor/scripts/port-to-opencode.sh b/.cursor/scripts/port-to-opencode.sh
new file mode 100755
index 0000000..8c7599b
--- /dev/null
+++ b/.cursor/scripts/port-to-opencode.sh
@@ -0,0 +1,224 @@
+#!/usr/bin/env bash
+# port-to-opencode.sh — Convert Cursor .mdc/.md files to OpenCode-compatible JSON + MD mirrors.
+# Single self-contained script (bash + inline node). No Python dependency.
+#
+# Usage:
+# port-to-opencode.sh # Convert all rules and skills
+# port-to-opencode.sh --dry-run # Show what would be done
+# port-to-opencode.sh --validate # Validate existing JSON mirrors
+# port-to-opencode.sh file1.mdc file2.md # Convert specific files
+set -euo pipefail
+
+DRY_RUN=false
+VALIDATE=false
+FILES=()
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --dry-run) DRY_RUN=true; shift ;;
+ --validate) VALIDATE=true; shift ;;
+ --sync) shift ;; # accepted for compat, no-op
+ *) FILES+=("$1"); shift ;;
+ esac
+done
+
+exec node -e '
+const fs = require("fs")
+const pathMod = require("path")
+const os = require("os")
+
+const CURSOR_DIR = pathMod.join(os.homedir(), ".cursor")
+const OPENCODE_DIR = pathMod.join(os.homedir(), ".config", "opencode")
+const DRY_RUN = process.argv[1] === "true"
+const VALIDATE = process.argv[2] === "true"
+const inputFiles = process.argv.slice(3).filter(f => f)
+
+function parseYamlFrontmatter(content) {
+ const match = content.match(/^---\s*\n([\s\S]*?)\n---\s*\n/)
+ if (!match) return {}
+ const fm = {}
+ for (const line of match[1].split("\n")) {
+ const idx = line.indexOf(":")
+ if (idx === -1) continue
+ const key = line.substring(0, idx).trim()
+ let value = line.substring(idx + 1).trim()
+ if (value.startsWith("[") && value.endsWith("]")) {
+ try { value = JSON.parse(value.replace(/\x27/g, "\x22")) } catch {}
+ } else if (value === "true" || value === "false") {
+ value = value === "true"
+ }
+ fm[key] = value
+ }
+ return fm
+}
+
+function extractTagContent(content, tag) {
+ const re = new RegExp("<" + tag + "[^>]*>([\\s\\S]*?)" + tag + ">")
+ const m = content.match(re)
+ return m ? m[1].trim() : ""
+}
+
+function extractGoal(content) { return extractTagContent(content, "goal") }
+
+function extractRules(content) {
+ const section = extractTagContent(content, "rules")
+ if (!section) return []
+ const rules = []
+ const re = /]*>([\s\S]*?)<\/rule>/g
+ let m
+ while ((m = re.exec(section)) !== null) {
+ let instruction = m[2].trim().replace(/\*\*/g, "").replace(/\s+/g, " ")
+ rules.push({ id: m[1], instruction })
+ }
+ return rules
+}
+
+function extractSteps(content) {
+ const steps = []
+ const re = /]*>([\s\S]*?)<\/step>/g
+ let m
+ while ((m = re.exec(content)) !== null) {
+ steps.push({ id: m[1], name: m[2], instruction: m[3].trim() })
+ }
+ return steps
+}
+
+function extractScriptRefs(content) {
+ const refs = new Set()
+ const re = /[~]?\/[\w/\-.]+\.(sh|js)/g
+ let m
+ while ((m = re.exec(content)) !== null) refs.add(m[0])
+ return [...refs].sort()
+}
+
+function convertMdcToJson(filePath) {
+ const content = fs.readFileSync(filePath, "utf8")
+ const fm = parseYamlFrontmatter(content)
+ const basename = pathMod.basename(filePath, ".mdc")
+ return {
+ id: basename, title: basename,
+ description: fm.description || extractGoal(content),
+ globs: fm.globs || [], alwaysApply: fm.alwaysApply || false,
+ goal: extractGoal(content), rules: extractRules(content),
+ steps: extractSteps(content), scripts: extractScriptRefs(content)
+ }
+}
+
+function convertCommandToJson(filePath) {
+ const content = fs.readFileSync(filePath, "utf8")
+ const basename = pathMod.basename(filePath, ".md")
+ const goal = extractGoal(content)
+ return {
+ id: basename, title: basename, description: goal, goal,
+ rules: extractRules(content), steps: extractSteps(content),
+ scripts: extractScriptRefs(content)
+ }
+}
+
+function convertSkillToJson(filePath) {
+ const content = fs.readFileSync(filePath, "utf8")
+ const fm = parseYamlFrontmatter(content)
+ const basename = pathMod.basename(pathMod.dirname(filePath))
+ return {
+ id: basename, title: fm.name || basename, name: fm.name || basename,
+ description: fm.description || extractGoal(content),
+ goal: extractGoal(content), rules: extractRules(content),
+ steps: extractSteps(content), scripts: extractScriptRefs(content)
+ }
+}
+
+function convertToMd(content) {
+ let r = content
+ r = r.replace(/([\s\S]*?)<\/goal>/g, "## Goal\n\n$1\n")
+ r = r.replace(/]*>/g, "## Rules\n\n")
+ r = r.replace(/<\/rules>/g, "")
+ r = r.replace(//g, "- **$1**: ")
+ r = r.replace(/<\/rule>/g, "")
+ r = r.replace(//g, "### Step $1: $2\n\n")
+ r = r.replace(/<\/step>/g, "")
+ r = r.replace(//g, "#### $1\n\n")
+ r = r.replace(/<\/sub-step>/g, "")
+ r = r.replace(//g, "## Edge Cases\n\n")
+ r = r.replace(/<\/edge-cases>/g, "")
+ r = r.replace(//g, "### $1\n\n")
+ r = r.replace(/<\/case>/g, "")
+ r = r.replace(//g, "## Sequence: $1\n\n")
+ r = r.replace(/<\/sequence>/g, "")
+ r = r.replace(//g, "## Scope\n\n")
+ r = r.replace(/<\/scope>/g, "")
+ r = r.replace(/]*>/g, "## Standards\n\n")
+ r = r.replace(/<\/standards>/g, "")
+ r = r.replace(//g, "- **$1**: ")
+ r = r.replace(/<\/standard>/g, "")
+ while (r.includes("\n\n\n")) r = r.replace(/\n\n\n/g, "\n\n")
+ return r
+}
+
+function processFile(filePath) {
+ let outputDir, outputBase, converter
+ if (filePath.includes("/rules/") && filePath.endsWith(".mdc")) {
+ outputDir = pathMod.join(OPENCODE_DIR, "rules")
+ outputBase = pathMod.basename(filePath, ".mdc")
+ converter = convertMdcToJson
+ } else if (filePath.includes("/skills/") && pathMod.basename(filePath) === "SKILL.md") {
+ outputDir = pathMod.join(OPENCODE_DIR, "skills", pathMod.basename(pathMod.dirname(filePath)))
+ outputBase = "SKILL"
+ converter = convertSkillToJson
+ } else {
+ return "Skipping: " + filePath + " (unknown type)"
+ }
+
+ const jsonPath = pathMod.join(outputDir, outputBase + ".json")
+ const mdPath = pathMod.join(outputDir, outputBase + ".md")
+
+ if (DRY_RUN) return "Would create: " + jsonPath + "\n Would create: " + mdPath
+
+ fs.mkdirSync(outputDir, { recursive: true })
+ const jsonData = converter(filePath)
+ const content = fs.readFileSync(filePath, "utf8")
+ fs.writeFileSync(jsonPath, JSON.stringify(jsonData, null, 2) + "\n")
+ fs.writeFileSync(mdPath, convertToMd(content))
+ return "Converted: " + filePath + " -> " + jsonPath
+}
+
+function validateJson(jsonPath) {
+ try {
+ const data = JSON.parse(fs.readFileSync(jsonPath, "utf8"))
+ const missing = ["id", "title", "description"].filter(f => !(f in data))
+ if (missing.length) return "INVALID: " + jsonPath + " (missing: " + missing.join(", ") + ")"
+ return "VALID: " + jsonPath
+ } catch (e) {
+ return "INVALID: " + jsonPath + " (not valid JSON: " + e.message + ")"
+ }
+}
+
+function walkDir(dir, predicate) {
+ const results = []
+ try {
+ for (const entry of fs.readdirSync(dir, { withFileTypes: true })) {
+ const full = pathMod.join(dir, entry.name)
+ if (entry.isDirectory()) results.push(...walkDir(full, predicate))
+ else if (predicate(full, entry.name)) results.push(full)
+ }
+ } catch {}
+ return results
+}
+
+if (VALIDATE) {
+ console.log("Validating JSON mirrors...")
+ for (const f of walkDir(OPENCODE_DIR, (fp, n) => n.endsWith(".json"))) console.log(validateJson(f))
+ process.exit(0)
+}
+
+const files = inputFiles.length > 0
+ ? inputFiles.map(f => f.startsWith("~") ? f.replace("~", os.homedir()) : f)
+ : [
+ ...walkDir(pathMod.join(CURSOR_DIR, "rules"), (fp, n) => n.endsWith(".mdc")),
+ ...walkDir(pathMod.join(CURSOR_DIR, "skills"), (fp, n) => n === "SKILL.md")
+ ]
+
+console.log("Found " + files.length + " files to process")
+for (const f of files) console.log(processFile(f))
+console.log("\nDone. Processed " + files.length + " files.")
+if (DRY_RUN) console.log("Run without --dry-run to write files.")
+' "$DRY_RUN" "$VALIDATE" ${FILES[@]+"${FILES[@]}"}
diff --git a/.cursor/scripts/pr-status-gql.sh b/.cursor/scripts/pr-status-gql.sh
new file mode 100755
index 0000000..b21c3ca
--- /dev/null
+++ b/.cursor/scripts/pr-status-gql.sh
@@ -0,0 +1,429 @@
+#!/usr/bin/env bash
+# pr-status-gql.sh — Fetch status of open PRs for a user (GraphQL API).
+# Single run, no TUI. "New" comments = posted after the PR's last commit.
+#
+# Uses a single GraphQL query per poll. Separate rate limit budget from REST.
+#
+# Usage:
+# pr-status-gql.sh --repo edge-react-gui [--owner EdgeApp] [--user Jon-edge] [--format text|json]
+# pr-status-gql.sh # All repos for user in EdgeApp org
+# pr-status-gql.sh --budget 0.5 # Reserve 50% of rate limit for other tools
+#
+# Requires: gh CLI (authenticated).
+set -euo pipefail
+
+OWNER="EdgeApp" REPO="" USER="" FORMAT="text" BUDGET="0.67"
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --owner) OWNER="$2"; shift 2 ;;
+ --repo) REPO="$2"; shift 2 ;;
+ --user) USER="$2"; shift 2 ;;
+ --format) FORMAT="$2"; shift 2 ;;
+ --budget) BUDGET="$2"; shift 2 ;;
+ *) echo "Unknown arg: $1" >&2; exit 1 ;;
+ esac
+done
+
+STATE_DIR="${TMPDIR:-/tmp}/pr-watch-gql-${OWNER}-${REPO:-all}"
+mkdir -p "$STATE_DIR"
+export STATE_DIR
+
+# Build the GraphQL query based on mode (single repo vs all repos)
+PR_FIELDS='
+ number title isDraft url headRefName updatedAt
+ repository { name nameWithOwner }
+ headRefOid
+ reviewDecision
+ reviews(last: 30) {
+ nodes { author { login } state submittedAt }
+ }
+ comments(last: 100) {
+ totalCount
+ nodes { author { login } createdAt bodyText }
+ }
+ reviewThreads(first: 100) {
+ nodes {
+ isResolved
+ comments(first: 5) {
+ nodes { author { login } createdAt bodyText path line }
+ }
+ }
+ }
+ commits(last: 1) {
+ nodes {
+ commit {
+ committedDate
+ oid
+ statusCheckRollup {
+ contexts(first: 20) {
+ nodes {
+ ... on CheckRun {
+ __typename name status conclusion
+ }
+ ... on StatusContext {
+ __typename context state
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+'
+
+if [[ -n "$REPO" ]]; then
+ QUERY="
+ {
+ viewer { login }
+ repository(owner: \"${OWNER}\", name: \"${REPO}\") {
+ pullRequests(first: 50, states: OPEN, orderBy: {field: UPDATED_AT, direction: DESC}) {
+ nodes {
+ author { login }
+ ${PR_FIELDS}
+ }
+ }
+ }
+ rateLimit { cost remaining resetAt limit }
+ }"
+else
+ QUERY="
+ {
+ viewer {
+ login
+ pullRequests(first: 50, states: OPEN, orderBy: {field: UPDATED_AT, direction: DESC}) {
+ nodes {
+ ${PR_FIELDS}
+ }
+ }
+ }
+ rateLimit { cost remaining resetAt limit }
+ }"
+fi
+
+# Execute query via gh CLI
+GQL_RESULT=$(gh api graphql -f query="$QUERY" 2>&1)
+
+# Process the result with Node.js
+exec node -e '
+const fs = require("fs")
+const { OWNER, REPO, USER_ARG, FORMAT, BUDGET, STATE_DIR } = {
+ OWNER: process.argv[1],
+ REPO: process.argv[2] || "",
+ USER_ARG: process.argv[3],
+ FORMAT: process.argv[4],
+ BUDGET: parseFloat(process.argv[5]) || 0.67,
+ STATE_DIR: process.argv[6]
+}
+const gqlResult = JSON.parse(process.argv[7])
+
+if (gqlResult.errors) {
+ process.stderr.write("GraphQL errors: " + JSON.stringify(gqlResult.errors) + "\n")
+ process.exit(1)
+}
+
+const data = gqlResult.data
+
+// --- Determine user and extract raw PR nodes ---
+let user
+let rawNodes
+
+if (REPO) {
+ // Single-repo mode: repository.pullRequests, filtered by viewer login
+ user = USER_ARG || data.viewer?.login || "unknown"
+ rawNodes = (data.repository?.pullRequests?.nodes || [])
+ .filter(n => n.author?.login === user)
+} else {
+ // All-repo mode: viewer.pullRequests (already scoped to authenticated user)
+ user = data.viewer?.login || USER_ARG || "unknown"
+ rawNodes = data.viewer?.pullRequests?.nodes || []
+}
+
+// --- Rate limit ---
+const rateLimit = data.rateLimit || {}
+const rlCost = rateLimit.cost || 1
+const rlRemaining = rateLimit.remaining
+const rlLimit = rateLimit.limit
+const rlResetAt = rateLimit.resetAt
+
+// --- NEW PR tracking ---
+function loadPreviousPrNumbers() {
+ try { return JSON.parse(fs.readFileSync(`${STATE_DIR}/known-prs.json`, "utf8")) } catch { return [] }
+}
+function savePrNumbers(numbers) {
+ fs.writeFileSync(`${STATE_DIR}/known-prs.json`, JSON.stringify(numbers))
+}
+
+const previousPrNumbers = loadPreviousPrNumbers()
+const currentPrNumbers = rawNodes.map(n => n.number)
+const newPrNumbers = new Set(currentPrNumbers.filter(n => !previousPrNumbers.includes(n)))
+savePrNumbers(currentPrNumbers)
+
+// --- Transform GQL nodes to result format ---
+function checkInfo(contexts, name) {
+ const run = (contexts || []).find(c => c.__typename === "CheckRun" && c.name === name)
+ if (!run) return { status: "none", conclusion: null }
+ return { status: run.status?.toLowerCase() || "none", conclusion: run.conclusion?.toLowerCase() || null }
+}
+
+function relTime(iso) {
+ if (!iso) return "-"
+ const ms = Date.now() - new Date(iso).getTime()
+ const m = Math.floor(ms / 60000)
+ if (m < 60) return m + "m ago"
+ const h = Math.floor(m / 60)
+ if (h < 24) return h + "h ago"
+ return Math.floor(h / 24) + "d ago"
+}
+
+const results = rawNodes.map(pr => {
+ const repo = pr.repository?.name || REPO
+ const n = pr.number
+ const sha = pr.headRefOid?.substring(0, 7) || "?"
+ const lastCommitNode = pr.commits?.nodes?.[0]?.commit
+ const lastCommitDate = lastCommitNode?.committedDate || null
+ const contexts = lastCommitNode?.statusCheckRollup?.contexts?.nodes || []
+
+ // Collect review thread comments (inline review comments)
+ const reviewThreadComments = []
+ for (const thread of (pr.reviewThreads?.nodes || [])) {
+ for (const c of (thread.comments?.nodes || [])) {
+ if (c.author?.login !== user) {
+ reviewThreadComments.push({
+ user: c.author?.login,
+ body: c.bodyText?.substring(0, 120),
+ at: c.createdAt,
+ path: c.path,
+ line: c.line,
+ type: "review"
+ })
+ }
+ }
+ }
+
+ // Issue comments
+ const issueComments = (pr.comments?.nodes || [])
+ .filter(c => c.author?.login !== user)
+ .map(c => ({
+ user: c.author?.login,
+ body: c.bodyText?.substring(0, 120),
+ at: c.createdAt,
+ type: "issue"
+ }))
+
+ const allComments = [...reviewThreadComments, ...issueComments]
+ .sort((a, b) => b.at.localeCompare(a.at))
+
+ // Split into new (after last commit) and old
+ const newComments = lastCommitDate
+ ? allComments.filter(c => c.at > lastCommitDate)
+ : []
+ const oldComments = lastCommitDate
+ ? allComments.filter(c => c.at <= lastCommitDate)
+ : allComments
+
+ // Review approval status — dedupe to latest review per human user
+ const latestByUser = {}
+ for (const r of (pr.reviews?.nodes || [])) {
+ const login = r.author?.login
+ if (!login || login.endsWith("[bot]")) continue
+ if (login === user) continue
+ if (!latestByUser[login] || r.submittedAt > latestByUser[login].submittedAt) {
+ latestByUser[login] = r
+ }
+ }
+ const approvals = Object.values(latestByUser).filter(r => r.state === "APPROVED").map(r => r.author.login)
+ const changesRequested = Object.values(latestByUser).filter(r => r.state === "CHANGES_REQUESTED").map(r => r.author.login)
+ const reviewerCount = Object.keys(latestByUser).length
+
+ return {
+ number: n,
+ repo,
+ title: pr.title,
+ branch: pr.headRefName,
+ draft: pr.isDraft,
+ isNew: newPrNumbers.has(n),
+ lastCommitSha: sha,
+ lastCommitDate,
+ comments: {
+ total: allComments.length,
+ new: newComments.length,
+ old: oldComments.length,
+ newComments: newComments.map(c => ({ user: c.user, at: c.at, path: c.path, line: c.line, body: c.body })),
+ latest: allComments[0] ? { user: allComments[0].user, at: allComments[0].at } : null
+ },
+ reviews: {
+ approvals,
+ changesRequested,
+ reviewerCount
+ },
+ checks: {
+ bugbot: checkInfo(contexts, "Cursor Bugbot"),
+ ci: checkInfo(contexts, "Travis CI - Pull Request"),
+ codeql: checkInfo(contexts, "Analyze (javascript-typescript)")
+ }
+ }
+})
+
+// Calculate recommended interval
+const secsUntilReset = rlResetAt ? Math.max(1, Math.floor((new Date(rlResetAt).getTime() - Date.now()) / 1000)) : 3600
+const budgetCalls = rlRemaining != null ? Math.floor(rlRemaining * BUDGET) : 2500
+const pollsAvailable = budgetCalls > 0 ? Math.floor(budgetCalls / rlCost) : 1
+const recommendedInterval = Math.max(30, Math.ceil(secsUntilReset / pollsAvailable))
+
+const meta = {
+ backend: "graphql",
+ queryCost: rlCost,
+ rateLimitRemaining: rlRemaining,
+ rateLimitLimit: rlLimit,
+ rateLimitResetAt: rlResetAt,
+ recommendedInterval
+}
+
+if (FORMAT === "json") {
+ console.log(JSON.stringify({ user, owner: OWNER, repo: REPO || null, timestamp: new Date().toISOString(), meta, prs: results }, null, 2))
+ process.exit(0)
+}
+
+// Text output — FORCE_COLOR env var overrides TTY detection (for pr-watch subshell)
+const IS_TTY = process.env.FORCE_COLOR === "1" || process.stdout.isTTY
+const B = IS_TTY ? "\x1b[1m" : ""
+const D = IS_TTY ? "\x1b[2m" : ""
+const R = IS_TTY ? "\x1b[0m" : ""
+const GR = IS_TTY ? "\x1b[32m" : ""
+const YL = IS_TTY ? "\x1b[33m" : ""
+const RD = IS_TTY ? "\x1b[31m" : ""
+const CY = IS_TTY ? "\x1b[36m" : ""
+const MG = IS_TTY ? "\x1b[35m" : ""
+const LINE = "─".repeat(72)
+const multiRepo = !REPO
+
+function fmtCheck(label, c) {
+ if (c.status === "none") return D + label + " —" + R
+ if (c.status !== "completed") return YL + "⏳ " + label + R
+ if (c.conclusion === "success") return GR + "✅ " + label + R
+ if (c.conclusion === "neutral") return YL + "⚠️ " + label + R
+ if (c.conclusion === "failure") return RD + "❌ " + label + R
+ return label + " " + (c.conclusion || "?")
+}
+
+function fmtReview(pr) {
+ const { approvals, changesRequested, reviewerCount } = pr.reviews
+ if (changesRequested.length > 0)
+ return `${RD}❌ Changes requested${R} ${D}(${changesRequested.join(", ")})${R}`
+ if (approvals.length > 0 && approvals.length >= reviewerCount && reviewerCount > 0)
+ return `${GR}✅ Approved${R} ${D}(${approvals.join(", ")})${R}`
+ if (approvals.length > 0)
+ return `${GR}👍 ${approvals.length}/${reviewerCount} approved${R} ${D}(${approvals.join(", ")})${R}`
+ if (reviewerCount > 0)
+ return `${YL}👀 Awaiting review${R}`
+ return `${D}No reviews${R}`
+}
+
+function prState(pr) {
+ const hasApproval = pr.reviews.approvals.length > 0
+ const hasChangesRequested = pr.reviews.changesRequested.length > 0
+ const hasNew = pr.comments.new > 0
+ const bugbotOk = pr.checks.bugbot.conclusion === "success" || pr.checks.bugbot.status === "none"
+ const ciOk = pr.checks.ci.conclusion === "success" || pr.checks.ci.status === "none"
+ const ciFail = pr.checks.ci.conclusion === "failure"
+ const ciPending = pr.checks.ci.status !== "completed" && pr.checks.ci.status !== "none"
+ const bugbotPending = pr.checks.bugbot.status !== "completed" && pr.checks.bugbot.status !== "none"
+ const bugbotIssues = pr.checks.bugbot.conclusion === "neutral"
+ const checksGreen = bugbotOk && ciOk
+
+ if (ciFail || hasChangesRequested)
+ return { tier: 5, tag: `${RD}${B}BLOCKED${R}`, emoji: "🔴" }
+ if (hasNew || bugbotIssues)
+ return { tier: 4, tag: `${YL}${B}ATTENTION${R}`, emoji: "🟡" }
+ if (ciPending || bugbotPending)
+ return { tier: 3, tag: `${YL}PENDING${R}`, emoji: "⏳" }
+ if (hasApproval && checksGreen)
+ return { tier: 0, tag: `${GR}${B}READY${R}`, emoji: "🚀" }
+ if (hasApproval)
+ return { tier: 1, tag: `${GR}APPROVED${R}`, emoji: "👍" }
+ if (checksGreen)
+ return { tier: 2, tag: `${GR}CLEAR${R}`, emoji: "🟢" }
+ return { tier: 3, tag: `${D}OPEN${R}`, emoji: "⚪" }
+}
+
+function sortedPRs(list) {
+ return [...list].sort((a, b) => {
+ const ta = prState(a).tier, tb = prState(b).tier
+ if (ta !== tb) return ta - tb
+ const da = a.comments.latest?.at || a.lastCommitDate || ""
+ const db = b.comments.latest?.at || b.lastCommitDate || ""
+ return db.localeCompare(da)
+ })
+}
+
+function renderPR(pr, indent) {
+ const state = prState(pr)
+ const draft = pr.draft ? ` ${D}[draft]${R}` : ""
+ const newPrTag = pr.isNew ? ` ${MG}${B}NEW${R}` : ""
+ const title = pr.title.length > 45 ? pr.title.substring(0, 42) + "..." : pr.title
+ const newTag = pr.comments.new > 0
+ ? ` ${RD}${B}🔔 +${pr.comments.new} new${R}`
+ : ""
+ const latestInfo = pr.comments.latest
+ ? `${D}${pr.comments.latest.user} ${relTime(pr.comments.latest.at)}${R}`
+ : `${D}none${R}`
+ const pad = " ".repeat(indent)
+ const prUrl = `https://github.com/${OWNER}/${pr.repo}/pull/${pr.number}`
+
+ const lines = []
+ lines.push(`${pad}${state.emoji} ${state.tag} ${B}#${pr.number}${R}${draft}${newPrTag} ${CY}${title}${R}`)
+ lines.push(`${pad} ${D}↳${R} ${MG}${pr.branch}${R} ${D}${prUrl}${R}`)
+ lines.push(`${pad} ${fmtReview(pr)}`)
+ lines.push(`${pad} 💬 ${pr.comments.total}${newTag} ${D}latest:${R} ${latestInfo}`)
+ lines.push(`${pad} ${fmtCheck("Bugbot", pr.checks.bugbot)} ${fmtCheck("CI", pr.checks.ci)} ${fmtCheck("CodeQL", pr.checks.codeql)}`)
+ return lines
+}
+
+const scope = REPO ? `${OWNER}/${REPO}` : `${OWNER}/*`
+const out = []
+out.push(`${B}${scope}${R} ${D}— ${user} — ${results.length} open PR(s)${R}`)
+out.push(`${D}${LINE}${R}`)
+
+if (!results.length) {
+ out.push(`${D}No open PRs by ${user}${R}`)
+} else if (multiRepo) {
+ const byRepo = {}
+ for (const pr of results) {
+ if (!byRepo[pr.repo]) byRepo[pr.repo] = []
+ byRepo[pr.repo].push(pr)
+ }
+ const repoOrder = Object.keys(byRepo).sort((a, b) => {
+ const latestA = sortedPRs(byRepo[a])[0]
+ const latestB = sortedPRs(byRepo[b])[0]
+ const da = latestA.comments.latest?.at || latestA.lastCommitDate || ""
+ const db = latestB.comments.latest?.at || latestB.lastCommitDate || ""
+ return db.localeCompare(da)
+ })
+ for (const repo of repoOrder) {
+ out.push(``)
+ out.push(`${B}${repo}${R} ${D}(${byRepo[repo].length})${R}`)
+ for (const pr of sortedPRs(byRepo[repo])) {
+ out.push("")
+ out.push(...renderPR(pr, 2))
+ }
+ }
+} else {
+ for (const pr of sortedPRs(results)) {
+ out.push("")
+ out.push(...renderPR(pr, 0))
+ }
+}
+
+// Footer with rate limit info
+out.push("")
+const rlInfo = rlRemaining != null
+ ? `GQL: ${rlRemaining}/${rlLimit} remaining (cost ${rlCost})`
+ : "GQL: unknown"
+out.push(`${D}${LINE}${R}`)
+out.push(`${D}${rlInfo} | next: ${recommendedInterval}s${R}`)
+
+// Machine-readable line for pr-watch.sh to parse
+out.push(`# interval:${recommendedInterval}`)
+
+console.log(out.join("\n"))
+' "$OWNER" "$REPO" "$USER" "$FORMAT" "$BUDGET" "$STATE_DIR" "$GQL_RESULT"
diff --git a/.cursor/scripts/pr-status.sh b/.cursor/scripts/pr-status.sh
new file mode 100755
index 0000000..44519c7
--- /dev/null
+++ b/.cursor/scripts/pr-status.sh
@@ -0,0 +1,407 @@
+#!/usr/bin/env bash
+# pr-status.sh — Fetch status of open PRs for a user via gh CLI.
+# Single run, no TUI. "New" comments = posted after the PR's last commit.
+#
+# Uses gh CLI for all API access (no GITHUB_TOKEN needed).
+# Per-PR updated_at caching to skip detail fetches for unchanged PRs.
+#
+# Usage:
+# pr-status.sh --repo edge-react-gui [--owner EdgeApp] [--user Jon-edge] [--format text|json]
+# pr-status.sh # All repos for user in EdgeApp org
+# pr-status.sh --user Jon-edge # All repos for specific user in EdgeApp org
+#
+# Requires: gh CLI (authenticated), node.
+set -euo pipefail
+
+OWNER="EdgeApp" REPO="" USER="" FORMAT="text"
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --owner) OWNER="$2"; shift 2 ;;
+ --repo) REPO="$2"; shift 2 ;;
+ --user) USER="$2"; shift 2 ;;
+ --format) FORMAT="$2"; shift 2 ;;
+ *) echo "Unknown arg: $1" >&2; exit 1 ;;
+ esac
+done
+
+command -v gh &>/dev/null || { echo "Error: gh CLI not found. Install: https://cli.github.com" >&2; exit 2; }
+gh auth status &>/dev/null 2>&1 || { echo "Error: gh not authenticated. Run: gh auth login" >&2; exit 2; }
+
+STATE_DIR="${TMPDIR:-/tmp}/pr-watch-${OWNER}-${REPO:-all}"
+mkdir -p "$STATE_DIR"
+export STATE_DIR
+
+exec node -e '
+const { execFile } = require("child_process")
+const fs = require("fs")
+const { OWNER, REPO, USER, FORMAT } = {
+ OWNER: process.argv[1],
+ REPO: process.argv[2] || "",
+ USER: process.argv[3],
+ FORMAT: process.argv[4]
+}
+const STATE_DIR = process.env.STATE_DIR
+
+let apiCallCount = 0
+
+function ghFetch(path, extraArgs) {
+ return new Promise((resolve) => {
+ apiCallCount++
+ const args = ["api", path]
+ if (extraArgs) args.push(...extraArgs)
+ execFile("gh", args, { encoding: "utf8", maxBuffer: 10 * 1024 * 1024 }, (err, stdout) => {
+ if (err) { resolve(null); return }
+ try { resolve(JSON.parse(stdout)) } catch { resolve(null) }
+ })
+ })
+}
+
+// --- Per-PR updated_at caching ---
+function loadPrCache(number) {
+ try { return JSON.parse(fs.readFileSync(`${STATE_DIR}/pr-${number}.json`, "utf8")) } catch { return null }
+}
+
+function savePrCache(number, result, updatedAt) {
+ fs.writeFileSync(`${STATE_DIR}/pr-${number}.json`, JSON.stringify({ updatedAt, result }))
+}
+
+function loadPreviousPrNumbers() {
+ try { return JSON.parse(fs.readFileSync(`${STATE_DIR}/known-prs.json`, "utf8")) } catch { return [] }
+}
+
+function savePrNumbers(numbers) {
+ fs.writeFileSync(`${STATE_DIR}/known-prs.json`, JSON.stringify(numbers))
+}
+
+// --- Concurrency limiter ---
+async function pool(items, concurrency, fn) {
+ const results = new Array(items.length)
+ let next = 0
+ async function worker() {
+ while (next < items.length) {
+ const i = next++
+ results[i] = await fn(items[i], i)
+ }
+ }
+ await Promise.all(Array.from({ length: Math.min(concurrency, items.length) }, () => worker()))
+ return results
+}
+
+// --- Utilities ---
+function relTime(iso) {
+ if (!iso) return "-"
+ const ms = Date.now() - new Date(iso).getTime()
+ const m = Math.floor(ms / 60000)
+ if (m < 60) return m + "m ago"
+ const h = Math.floor(m / 60)
+ if (h < 24) return h + "h ago"
+ return Math.floor(h / 24) + "d ago"
+}
+
+function checkInfo(runs, name) {
+ const run = (runs || []).find(c => c.name === name)
+ if (!run) return { status: "none", conclusion: null }
+ return { status: run.status, conclusion: run.conclusion }
+}
+
+async function main() {
+ let user = USER
+ if (!user) {
+ const me = await ghFetch("/user")
+ user = me?.login || "unknown"
+ }
+
+ const previousPrNumbers = loadPreviousPrNumbers()
+
+ let prs
+ if (REPO) {
+ const allPRs = await ghFetch(`/repos/${OWNER}/${REPO}/pulls?state=open&per_page=30`)
+ if (!Array.isArray(allPRs)) {
+ process.stderr.write("API error fetching PRs\n")
+ process.exit(1)
+ }
+ prs = allPRs
+ .filter(p => p.user.login === user)
+ .map(p => ({ ...p, _repo: REPO }))
+ } else {
+ const q = encodeURIComponent(`type:pr state:open author:${user} org:${OWNER}`)
+ const search = await ghFetch(`/search/issues?q=${q}&per_page=50&sort=updated&order=desc`)
+ if (!search?.items) {
+ process.stderr.write("API error searching PRs\n")
+ process.exit(1)
+ }
+ prs = await pool(search.items, 4, async item => {
+ const repo = item.repository_url.split("/").pop()
+ const full = await ghFetch(`/repos/${OWNER}/${repo}/pulls/${item.number}`)
+ return { ...full, _repo: repo }
+ })
+ }
+
+ const currentPrNumbers = prs.map(p => p.number)
+ const newPrNumbers = new Set(currentPrNumbers.filter(n => !previousPrNumbers.includes(n)))
+ savePrNumbers(currentPrNumbers)
+
+ let changedPrCount = 0
+
+ const results = await pool(prs, 4, async pr => {
+ const repo = pr._repo
+ const n = pr.number
+ const sha = pr.head.sha
+ const updatedAt = pr.updated_at
+
+ const cached = loadPrCache(n)
+ if (cached && cached.updatedAt === updatedAt && !newPrNumbers.has(n)) {
+ return { ...cached.result, isNew: false }
+ }
+
+ changedPrCount++
+
+ const [inline, issue, checks, commits, reviews] = await Promise.all([
+ ghFetch(`/repos/${OWNER}/${repo}/pulls/${n}/comments?per_page=100`),
+ ghFetch(`/repos/${OWNER}/${repo}/issues/${n}/comments?per_page=100`),
+ ghFetch(`/repos/${OWNER}/${repo}/commits/${sha}/check-runs`),
+ ghFetch(`/repos/${OWNER}/${repo}/pulls/${n}/commits?per_page=100`),
+ ghFetch(`/repos/${OWNER}/${repo}/pulls/${n}/reviews?per_page=100`)
+ ])
+
+ const commitList = Array.isArray(commits) ? commits : []
+ const lastCommit = commitList.length > 0 ? commitList[commitList.length - 1] : null
+ const lastCommitDate = lastCommit?.commit?.committer?.date
+ || lastCommit?.commit?.author?.date
+ || null
+
+ const allComments = [
+ ...(Array.isArray(inline) ? inline : [])
+ .filter(c => c.user?.login !== user)
+ .map(c => ({ id: c.id, user: c.user?.login, body: c.body?.substring(0, 120), at: c.created_at, path: c.path, line: c.line, type: "review" })),
+ ...(Array.isArray(issue) ? issue : [])
+ .filter(c => c.user?.login !== user)
+ .map(c => ({ id: c.id, user: c.user?.login, body: c.body?.substring(0, 120), at: c.created_at, type: "issue" }))
+ ].sort((a, b) => b.at.localeCompare(a.at))
+
+ const newComments = lastCommitDate
+ ? allComments.filter(c => c.at > lastCommitDate)
+ : []
+ const oldComments = lastCommitDate
+ ? allComments.filter(c => c.at <= lastCommitDate)
+ : allComments
+
+ const checkRuns = checks?.check_runs || []
+
+ const reviewList = Array.isArray(reviews) ? reviews : []
+ const latestByUser = {}
+ for (const r of reviewList) {
+ const login = r.user?.login
+ if (!login || login.endsWith("[bot]")) continue
+ if (login === user) continue
+ if (!latestByUser[login] || r.submitted_at > latestByUser[login].submitted_at) {
+ latestByUser[login] = r
+ }
+ }
+ const approvals = Object.values(latestByUser).filter(r => r.state === "APPROVED").map(r => r.user.login)
+ const changesRequested = Object.values(latestByUser).filter(r => r.state === "CHANGES_REQUESTED").map(r => r.user.login)
+ const reviewerCount = Object.keys(latestByUser).length
+
+ const result = {
+ number: n,
+ repo,
+ title: pr.title,
+ branch: pr.head.ref,
+ draft: pr.draft,
+ isNew: newPrNumbers.has(n),
+ lastCommitSha: sha.substring(0, 7),
+ lastCommitDate,
+ comments: {
+ total: allComments.length,
+ new: newComments.length,
+ old: oldComments.length,
+ newComments: newComments.map(c => ({ user: c.user, at: c.at, path: c.path, line: c.line, body: c.body })),
+ latest: allComments[0] ? { user: allComments[0].user, at: allComments[0].at } : null
+ },
+ reviews: {
+ approvals,
+ changesRequested,
+ reviewerCount
+ },
+ checks: {
+ bugbot: checkInfo(checkRuns, "Cursor Bugbot"),
+ ci: checkInfo(checkRuns, "Travis CI - Pull Request"),
+ codeql: checkInfo(checkRuns, "Analyze (javascript-typescript)")
+ }
+ }
+
+ savePrCache(n, result, updatedAt)
+ return result
+ })
+
+ // Fetch rate limit info
+ const rateLimit = await ghFetch("/rate_limit")
+ const rateLimitRemaining = rateLimit?.resources?.core?.remaining ?? null
+ const rateLimitLimit = rateLimit?.resources?.core?.limit ?? null
+ const rateLimitReset = rateLimit?.resources?.core?.reset ?? null
+
+ const callsPerPoll = apiCallCount
+ const secsUntilReset = rateLimitReset ? Math.max(1, rateLimitReset - Math.floor(Date.now() / 1000)) : 3600
+ const budgetCalls = rateLimitRemaining != null ? Math.floor(rateLimitRemaining * 0.67) : 2500
+ const recommendedInterval = budgetCalls > 0 ? Math.max(30, Math.ceil(secsUntilReset / (budgetCalls / callsPerPoll))) : 300
+
+ const meta = {
+ apiCalls: apiCallCount,
+ changedPrs: changedPrCount,
+ rateLimitRemaining,
+ rateLimitLimit,
+ rateLimitReset,
+ recommendedInterval
+ }
+
+ if (FORMAT === "json") {
+ console.log(JSON.stringify({ user, owner: OWNER, repo: REPO || null, timestamp: new Date().toISOString(), meta, prs: results }, null, 2))
+ return
+ }
+
+ // Text output — FORCE_COLOR env var overrides TTY detection (for pr-watch subshell)
+ const IS_TTY = process.env.FORCE_COLOR === "1" || process.stdout.isTTY
+ const B = IS_TTY ? "\x1b[1m" : ""
+ const D = IS_TTY ? "\x1b[2m" : ""
+ const R = IS_TTY ? "\x1b[0m" : ""
+ const GR = IS_TTY ? "\x1b[32m" : ""
+ const YL = IS_TTY ? "\x1b[33m" : ""
+ const RD = IS_TTY ? "\x1b[31m" : ""
+ const CY = IS_TTY ? "\x1b[36m" : ""
+ const MG = IS_TTY ? "\x1b[35m" : ""
+ const LINE = "─".repeat(72)
+ const multiRepo = !REPO
+
+ function fmtCheck(label, c) {
+ if (c.status === "none") return D + label + " —" + R
+ if (c.status !== "completed") return YL + "⏳ " + label + R
+ if (c.conclusion === "success") return GR + "✅ " + label + R
+ if (c.conclusion === "neutral") return YL + "⚠️ " + label + R
+ if (c.conclusion === "failure") return RD + "❌ " + label + R
+ return label + " " + (c.conclusion || "?")
+ }
+
+ function fmtReview(pr) {
+ const { approvals, changesRequested, reviewerCount } = pr.reviews
+ if (changesRequested.length > 0)
+ return `${RD}❌ Changes requested${R} ${D}(${changesRequested.join(", ")})${R}`
+ if (approvals.length > 0 && approvals.length >= reviewerCount && reviewerCount > 0)
+ return `${GR}✅ Approved${R} ${D}(${approvals.join(", ")})${R}`
+ if (approvals.length > 0)
+ return `${GR}👍 ${approvals.length}/${reviewerCount} approved${R} ${D}(${approvals.join(", ")})${R}`
+ if (reviewerCount > 0)
+ return `${YL}👀 Awaiting review${R}`
+ return `${D}No reviews${R}`
+ }
+
+ function prState(pr) {
+ const hasApproval = pr.reviews.approvals.length > 0
+ const hasChangesRequested = pr.reviews.changesRequested.length > 0
+ const hasNew = pr.comments.new > 0
+ const bugbotOk = pr.checks.bugbot.conclusion === "success" || pr.checks.bugbot.status === "none"
+ const ciOk = pr.checks.ci.conclusion === "success" || pr.checks.ci.status === "none"
+ const ciFail = pr.checks.ci.conclusion === "failure"
+ const ciPending = pr.checks.ci.status !== "completed" && pr.checks.ci.status !== "none"
+ const bugbotPending = pr.checks.bugbot.status !== "completed" && pr.checks.bugbot.status !== "none"
+ const bugbotIssues = pr.checks.bugbot.conclusion === "neutral"
+ const checksGreen = bugbotOk && ciOk
+
+ if (ciFail || hasChangesRequested)
+ return { tier: 5, tag: `${RD}${B}BLOCKED${R}`, emoji: "🔴" }
+ if (hasNew || bugbotIssues)
+ return { tier: 4, tag: `${YL}${B}ATTENTION${R}`, emoji: "🟡" }
+ if (ciPending || bugbotPending)
+ return { tier: 3, tag: `${YL}PENDING${R}`, emoji: "⏳" }
+ if (hasApproval && checksGreen)
+ return { tier: 0, tag: `${GR}${B}READY${R}`, emoji: "🚀" }
+ if (hasApproval)
+ return { tier: 1, tag: `${GR}APPROVED${R}`, emoji: "👍" }
+ if (checksGreen)
+ return { tier: 2, tag: `${GR}CLEAR${R}`, emoji: "🟢" }
+ return { tier: 3, tag: `${D}OPEN${R}`, emoji: "⚪" }
+ }
+
+ function sortedPRs(list) {
+ return [...list].sort((a, b) => {
+ const ta = prState(a).tier, tb = prState(b).tier
+ if (ta !== tb) return ta - tb
+ const da = a.comments.latest?.at || a.lastCommitDate || ""
+ const db = b.comments.latest?.at || b.lastCommitDate || ""
+ return db.localeCompare(da)
+ })
+ }
+
+ function renderPR(pr, indent) {
+ const state = prState(pr)
+ const draft = pr.draft ? ` ${D}[draft]${R}` : ""
+ const newPrTag = pr.isNew ? ` ${MG}${B}NEW${R}` : ""
+ const title = pr.title.length > 45 ? pr.title.substring(0, 42) + "..." : pr.title
+ const newTag = pr.comments.new > 0
+ ? ` ${RD}${B}🔔 +${pr.comments.new} new${R}`
+ : ""
+ const latestInfo = pr.comments.latest
+ ? `${D}${pr.comments.latest.user} ${relTime(pr.comments.latest.at)}${R}`
+ : `${D}none${R}`
+ const pad = " ".repeat(indent)
+ const prUrl = `https://github.com/${OWNER}/${pr.repo}/pull/${pr.number}`
+
+ const lines = []
+ lines.push(`${pad}${state.emoji} ${state.tag} ${B}#${pr.number}${R}${draft}${newPrTag} ${CY}${title}${R}`)
+ lines.push(`${pad} ${D}↳${R} ${MG}${pr.branch}${R} ${D}${prUrl}${R}`)
+ lines.push(`${pad} ${fmtReview(pr)}`)
+ lines.push(`${pad} 💬 ${pr.comments.total}${newTag} ${D}latest:${R} ${latestInfo}`)
+ lines.push(`${pad} ${fmtCheck("Bugbot", pr.checks.bugbot)} ${fmtCheck("CI", pr.checks.ci)} ${fmtCheck("CodeQL", pr.checks.codeql)}`)
+ return lines
+ }
+
+ const scope = REPO ? `${OWNER}/${REPO}` : `${OWNER}/*`
+ const out = []
+ out.push(`${B}${scope}${R} ${D}— ${user} — ${results.length} open PR(s)${R}`)
+ out.push(`${D}${LINE}${R}`)
+
+ if (!results.length) {
+ out.push(`${D}No open PRs by ${user}${R}`)
+ } else if (multiRepo) {
+ const byRepo = {}
+ for (const pr of results) {
+ if (!byRepo[pr.repo]) byRepo[pr.repo] = []
+ byRepo[pr.repo].push(pr)
+ }
+ const repoOrder = Object.keys(byRepo).sort((a, b) => {
+ const latestA = sortedPRs(byRepo[a])[0]
+ const latestB = sortedPRs(byRepo[b])[0]
+ const da = latestA.comments.latest?.at || latestA.lastCommitDate || ""
+ const db = latestB.comments.latest?.at || latestB.lastCommitDate || ""
+ return db.localeCompare(da)
+ })
+ for (const repo of repoOrder) {
+ out.push(``)
+ out.push(`${B}${repo}${R} ${D}(${byRepo[repo].length})${R}`)
+ for (const pr of sortedPRs(byRepo[repo])) {
+ out.push("")
+ out.push(...renderPR(pr, 2))
+ }
+ }
+ } else {
+ for (const pr of sortedPRs(results)) {
+ out.push("")
+ out.push(...renderPR(pr, 0))
+ }
+ }
+
+ // Footer with rate limit info
+ out.push("")
+ const rlInfo = rateLimitRemaining != null
+ ? `API: ${rateLimitRemaining}/${rateLimitLimit} remaining`
+ : "API: unknown"
+ out.push(`${D}${LINE}${R}`)
+ out.push(`${D}${rlInfo} | ${apiCallCount} calls | next: ${recommendedInterval}s${R}`)
+
+ // Machine-readable line for pr-watch.sh to parse
+ out.push(`# interval:${recommendedInterval}`)
+
+ console.log(out.join("\n"))
+}
+
+main().catch(e => { process.stderr.write("Error: " + e.message + "\n"); process.exit(1) })
+' "$OWNER" "$REPO" "$USER" "$FORMAT"
diff --git a/.cursor/scripts/pr-watch.sh b/.cursor/scripts/pr-watch.sh
new file mode 100755
index 0000000..e257d5b
--- /dev/null
+++ b/.cursor/scripts/pr-watch.sh
@@ -0,0 +1,88 @@
+#!/usr/bin/env bash
+# pr-watch.sh — TUI wrapper around pr-status scripts.
+# Redraws in-place on each poll. Ctrl+C to stop.
+#
+# Usage:
+# pr-watch.sh --repo edge-react-gui [--owner EdgeApp] [--user Jon-edge]
+# pr-watch.sh # All repos, auto interval, GQL backend
+# pr-watch.sh --backend rest # Force REST backend
+# pr-watch.sh --interval 60 # Override interval (clamped to safe minimum)
+# pr-watch.sh --budget 0.5 # Reserve 50% of rate limit budget
+# pr-watch.sh --once [...] # Single poll, no clear, no loop. For agent/script use.
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
+ARGS=() INTERVAL="" ONCE=false BACKEND="" BUDGET=""
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --interval) INTERVAL="$2"; shift 2 ;;
+ --once) ONCE=true; shift ;;
+ --backend) BACKEND="$2"; shift 2 ;;
+ --budget) BUDGET="$2"; shift 2 ;;
+ *) ARGS+=("$1"); shift ;;
+ esac
+done
+
+# Inject --owner default if not already in ARGS
+if [[ ${#ARGS[@]} -eq 0 ]] || ! printf '%s\n' "${ARGS[@]}" | grep -q -- '--owner'; then
+ ARGS+=(--owner EdgeApp)
+fi
+
+# Auto-detect backend: prefer gql if gh CLI is available
+if [[ -z "$BACKEND" ]]; then
+ if command -v gh &>/dev/null && gh auth status &>/dev/null; then
+ BACKEND="gql"
+ else
+ BACKEND="rest"
+ fi
+fi
+
+# Select the status script
+if [[ "$BACKEND" == "gql" ]]; then
+ STATUS_SCRIPT="$SCRIPT_DIR/pr-status-gql.sh"
+else
+ STATUS_SCRIPT="$SCRIPT_DIR/pr-status.sh"
+fi
+
+# Pass budget through if specified
+if [[ -n "$BUDGET" ]]; then
+ ARGS+=(--budget "$BUDGET")
+fi
+
+if $ONCE; then
+ NOW=$(date '+%H:%M:%S')
+ printf '%s\n' "PR Watch — ${NOW} (${BACKEND})"
+ "$STATUS_SCRIPT" "${ARGS[@]}" --format text
+ exit $?
+fi
+
+# TUI loop
+CURRENT_INTERVAL="${INTERVAL:-60}"
+
+while true; do
+ OUTPUT=$(FORCE_COLOR=1 "$STATUS_SCRIPT" "${ARGS[@]}" --format text 2>&1) || true
+ NOW=$(date '+%H:%M:%S')
+
+ # Parse recommended interval from script output
+ RECOMMENDED=$(echo "$OUTPUT" | grep -oP '(?<=^# interval:)\d+' || echo "")
+
+ # Determine actual sleep interval
+ if [[ -n "$INTERVAL" ]]; then
+ # User-specified interval: clamp to at least the recommended minimum
+ if [[ -n "$RECOMMENDED" ]] && [[ "$INTERVAL" -lt "$RECOMMENDED" ]]; then
+ CURRENT_INTERVAL="$RECOMMENDED"
+ else
+ CURRENT_INTERVAL="$INTERVAL"
+ fi
+ elif [[ -n "$RECOMMENDED" ]]; then
+ CURRENT_INTERVAL="$RECOMMENDED"
+ fi
+
+ # Strip the machine-readable line from display output
+ DISPLAY_OUTPUT=$(echo "$OUTPUT" | grep -v '^# interval:')
+
+ printf '\033[H\033[2J'
+ printf '%s\n' "PR Watch — ${NOW} (${BACKEND}, next in ${CURRENT_INTERVAL}s, Ctrl+C to stop)"
+ printf '%s\n' "$DISPLAY_OUTPUT"
+ sleep "$CURRENT_INTERVAL"
+done
diff --git a/.cursor/scripts/push-env-key.sh b/.cursor/scripts/push-env-key.sh
new file mode 100755
index 0000000..fceb8d5
--- /dev/null
+++ b/.cursor/scripts/push-env-key.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+# push-env-key.sh — Update a single key in the server's env.json and push
+#
+# Usage: push-env-key.sh [-m "commit message"]
+#
+# Examples:
+# push-env-key.sh EDGE_API_KEY abc123
+# push-env-key.sh EDGE_API_KEY abc123 -m "Rotate Edge API key"
+
+set -euo pipefail
+
+SERVER="jack"
+REMOTE_REPO="/home/jon/jenkins-files/master"
+
+KEY=""
+VALUE=""
+COMMIT_MSG=""
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ -m) COMMIT_MSG="$2"; shift 2 ;;
+ *)
+ if [[ -z "$KEY" ]]; then KEY="$1"
+ elif [[ -z "$VALUE" ]]; then VALUE="$1"
+ else echo "Unexpected argument: $1" >&2; exit 1
+ fi
+ shift ;;
+ esac
+done
+
+if [[ -z "$KEY" || -z "$VALUE" ]]; then
+ echo "Usage: push-env-key.sh [-m \"commit message\"]" >&2
+ exit 1
+fi
+
+if [[ -z "$COMMIT_MSG" ]]; then
+ COMMIT_MSG="Update $KEY in env.json"
+fi
+
+ssh "$SERVER" bash -s -- "$KEY" "$VALUE" "$COMMIT_MSG" "$REMOTE_REPO" <<'REMOTE'
+ set -euo pipefail
+ KEY="$1"
+ VALUE="$2"
+ MSG="$3"
+ REPO="$4"
+
+ cd "$REPO"
+ git pull --ff-only
+
+ CURRENT=$(jq -r --arg k "$KEY" '.[$k] // empty' env.json)
+ if [[ "$CURRENT" == "$VALUE" ]]; then
+ echo "No change: $KEY is already set to that value."
+ exit 0
+ fi
+
+ jq --arg k "$KEY" --arg v "$VALUE" '.[$k] = $v' env.json > env.json.tmp
+ mv env.json.tmp env.json
+
+ git add env.json
+ git commit -m "$MSG"
+ git push
+ echo "Done: $KEY updated and pushed."
+REMOTE
diff --git a/.cursor/scripts/tool-sync.sh b/.cursor/scripts/tool-sync.sh
new file mode 100755
index 0000000..b8c3d71
--- /dev/null
+++ b/.cursor/scripts/tool-sync.sh
@@ -0,0 +1,406 @@
+#!/usr/bin/env bash
+# tool-sync.sh — Sync Cursor rules, skills, and scripts to OpenCode and Claude Code.
+# Source of truth: ~/.cursor/
+# Targets: ~/.config/opencode/, ~/.claude/
+#
+# Usage: tool-sync.sh [--dry-run] [--target opencode|claude|all]
+# --dry-run Show what would change without writing files
+# --target Sync to a specific target (default: all)
+
+set -euo pipefail
+
+CURSOR_DIR="$HOME/.cursor"
+OPENCODE_DIR="$HOME/.config/opencode"
+CLAUDE_DIR="$HOME/.claude"
+DRY_RUN=false
+TARGET="all"
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --dry-run) DRY_RUN=true; shift ;;
+ --target) TARGET="$2"; shift 2 ;;
+ *) echo "Unknown option: $1" >&2; exit 1 ;;
+ esac
+done
+
+# Counters
+created=0
+updated=0
+removed=0
+skipped=0
+
+log() { echo " $1"; }
+log_action() {
+ local action="$1" file="$2"
+ if [[ "$DRY_RUN" == true ]]; then
+ echo " [DRY-RUN] $action: $file"
+ else
+ echo " $action: $file"
+ fi
+}
+
+# ─── Helpers ──────────────────────────────────────────────────────────────────
+
+# Convert .mdc to .md: strip Cursor-specific XML tags, keep content
+mdc_to_md() {
+ local src="$1"
+ # .mdc files are already valid markdown with YAML frontmatter.
+ # Some use , , , XML tags — convert to markdown.
+ sed \
+ -e 's|^\(.*\)|## Goal\n\n\1|' \
+ -e 's|^|## Goal\n|' \
+ -e 's|^||' \
+ -e 's|^|## Rules\n|' \
+ -e 's|^||' \
+ -e 's|^\(.*\)|- **\1**: \2|' \
+ -e 's|^|- **\1**:|' \
+ -e 's|^||' \
+ -e 's|^|### Step \1: \2\n|' \
+ -e 's|^||' \
+ -e '/^$/N;/^\n$/d' \
+ "$src"
+}
+
+# Generate OpenCode JSON metadata from a .mdc rule file
+generate_rule_json() {
+ local src="$1" name="$2"
+ local description="" always_apply="false" globs="[]"
+
+ # Parse YAML frontmatter
+ local in_frontmatter=false
+ while IFS= read -r line; do
+ if [[ "$line" == "---" ]]; then
+ if [[ "$in_frontmatter" == true ]]; then break; fi
+ in_frontmatter=true
+ continue
+ fi
+ if [[ "$in_frontmatter" == true ]]; then
+ case "$line" in
+ description:*) description="${line#description: }" ;;
+ alwaysApply:*) always_apply="${line#alwaysApply: }" ;;
+ globs:*) globs="${line#globs: }" ;;
+ esac
+ fi
+ done < "$src"
+
+ jq -n \
+ --arg id "$name" \
+ --arg title "$name" \
+ --arg description "$description" \
+ --argjson globs "$globs" \
+ --argjson alwaysApply "$always_apply" \
+ '{id: $id, title: $title, description: $description, globs: $globs, alwaysApply: $alwaysApply}'
+}
+
+# Generate OpenCode JSON metadata from a command .md file
+generate_command_json() {
+ local src="$1" name="$2"
+
+ # Extract goal line (first paragraph after ## Goal)
+ local goal=""
+ goal=$(awk '/^## Goal/{getline; getline; print; exit}' "$src")
+
+ # Extract rules as JSON array
+ local rules="[]"
+ rules=$(awk '
+ /^## Rules/,/^## |^### Step/ {
+ if (/^- \*\*([^*]+)\*\*: (.+)/) {
+ match($0, /\*\*([^*]+)\*\*: (.+)/, m)
+ if (m[1] != "") {
+ printf "{\"id\":\"%s\",\"instruction\":\"%s\"}\n", m[1], m[2]
+ }
+ }
+ }
+ ' "$src" | jq -s '.' 2>/dev/null || echo "[]")
+
+ # Extract steps as JSON array
+ local steps="[]"
+ steps=$(awk '
+ /^### Step [0-9]+:/ {
+ match($0, /^### Step ([0-9]+): (.+)/, m)
+ if (m[1] != "") {
+ if (step_id != "") { printf "{\"id\":\"%s\",\"name\":\"%s\",\"instruction\":\"%s\"}\n", step_id, step_name, instruction }
+ step_id = m[1]; step_name = m[2]; instruction = ""
+ }
+ next
+ }
+ /^## / { if (step_id != "") { printf "{\"id\":\"%s\",\"name\":\"%s\",\"instruction\":\"%s\"}\n", step_id, step_name, instruction; step_id="" } next }
+ step_id != "" { gsub(/"/, "\\\""); instruction = instruction ($0 != "" ? (instruction != "" ? "\\n" : "") $0 : "") }
+ END { if (step_id != "") printf "{\"id\":\"%s\",\"name\":\"%s\",\"instruction\":\"%s\"}\n", step_id, step_name, instruction }
+ ' "$src" | jq -s '.' 2>/dev/null || echo "[]")
+
+ jq -n \
+ --arg id "$name" \
+ --arg title "$name" \
+ --arg description "$goal" \
+ --arg goal "$goal" \
+ --argjson rules "$rules" \
+ --argjson steps "$steps" \
+ '{id: $id, title: $title, description: $description, goal: $goal, rules: $rules, steps: $steps, scripts: ["sh"]}'
+}
+
+# Copy file only if changed, respecting --dry-run
+sync_file() {
+ local src="$1" dest="$2"
+ if [[ ! -f "$dest" ]]; then
+ log_action "CREATE" "$dest"
+ if [[ "$DRY_RUN" == false ]]; then
+ mkdir -p "$(dirname "$dest")"
+ cp "$src" "$dest"
+ fi
+ ((created++)) || true
+ elif ! diff -q "$src" "$dest" >/dev/null 2>&1; then
+ log_action "UPDATE" "$dest"
+ if [[ "$DRY_RUN" == false ]]; then
+ cp "$src" "$dest"
+ fi
+ ((updated++)) || true
+ else
+ ((skipped++)) || true
+ fi
+}
+
+# Write content to file only if changed
+sync_content() {
+ local content="$1" dest="$2"
+ local tmp
+ tmp=$(mktemp)
+ cat <<< "$content" > "$tmp"
+ if [[ ! -f "$dest" ]]; then
+ log_action "CREATE" "$dest"
+ if [[ "$DRY_RUN" == false ]]; then
+ mkdir -p "$(dirname "$dest")"
+ mv "$tmp" "$dest"
+ else
+ rm "$tmp"
+ fi
+ ((created++)) || true
+ elif ! diff -q "$tmp" "$dest" >/dev/null 2>&1; then
+ log_action "UPDATE" "$dest"
+ if [[ "$DRY_RUN" == false ]]; then
+ mv "$tmp" "$dest"
+ else
+ rm "$tmp"
+ fi
+ ((updated++)) || true
+ else
+ rm "$tmp"
+ ((skipped++)) || true
+ fi
+}
+
+# Create symlink, replacing if target changed
+sync_symlink() {
+ local src="$1" dest="$2"
+ if [[ -L "$dest" ]]; then
+ local current
+ current=$(readlink "$dest")
+ if [[ "$current" == "$src" ]]; then
+ ((skipped++)) || true
+ return
+ fi
+ log_action "UPDATE" "$dest -> $src"
+ if [[ "$DRY_RUN" == false ]]; then
+ ln -sf "$src" "$dest"
+ fi
+ ((updated++)) || true
+ elif [[ -f "$dest" ]]; then
+ log_action "REPLACE" "$dest (file -> symlink)"
+ if [[ "$DRY_RUN" == false ]]; then
+ rm "$dest"
+ ln -s "$src" "$dest"
+ fi
+ ((updated++)) || true
+ else
+ log_action "CREATE" "$dest -> $src"
+ if [[ "$DRY_RUN" == false ]]; then
+ mkdir -p "$(dirname "$dest")"
+ ln -s "$src" "$dest"
+ fi
+ ((created++)) || true
+ fi
+}
+
+# ─── OpenCode Sync ────────────────────────────────────────────────────────────
+
+sync_opencode() {
+ echo "━━━ Syncing to OpenCode ━━━"
+
+ # Rules: .mdc → .md + .json
+ echo " Rules:"
+ for mdc in "$CURSOR_DIR"/rules/*.mdc; do
+ [[ -f "$mdc" ]] || continue
+ local name
+ name=$(basename "$mdc" .mdc)
+
+ # Convert .mdc to .md
+ local tmp_md
+ tmp_md=$(mktemp)
+ mdc_to_md "$mdc" > "$tmp_md"
+ sync_file "$tmp_md" "$OPENCODE_DIR/rules/$name.md"
+ rm -f "$tmp_md"
+
+ # Generate .json
+ local json
+ json=$(generate_rule_json "$mdc" "$name")
+ sync_content "$json" "$OPENCODE_DIR/rules/$name.json"
+ done
+
+ # Skills: SKILL.md + scripts/ subdirs
+ echo " Skills:"
+ if [[ -d "$CURSOR_DIR/skills" ]]; then
+ # Shared scripts at skills/ top level
+ for shared in "$CURSOR_DIR"/skills/*.sh; do
+ [[ -f "$shared" ]] || continue
+ local name
+ name=$(basename "$shared")
+ sync_file "$shared" "$OPENCODE_DIR/skills/$name"
+ done
+ # Skill dirs with SKILL.md + scripts/
+ for skill_dir in "$CURSOR_DIR"/skills/*/; do
+ [[ -d "$skill_dir" ]] || continue
+ local name
+ name=$(basename "$skill_dir")
+ if [[ -f "$skill_dir/SKILL.md" ]]; then
+ sync_file "$skill_dir/SKILL.md" "$OPENCODE_DIR/skills/$name/SKILL.md"
+ fi
+ if [[ -d "$skill_dir/scripts" ]]; then
+ for script in "$skill_dir"/scripts/*; do
+ [[ -f "$script" ]] || continue
+ local fname
+ fname=$(basename "$script")
+ sync_file "$script" "$OPENCODE_DIR/skills/$name/scripts/$fname"
+ done
+ fi
+ done
+ fi
+
+ # Standalone scripts
+ echo " Scripts:"
+ for script in "$CURSOR_DIR"/scripts/*.sh "$CURSOR_DIR"/scripts/*.js; do
+ [[ -f "$script" ]] || continue
+ local name
+ name=$(basename "$script")
+ sync_file "$script" "$OPENCODE_DIR/scripts/$name"
+ done
+
+ # Clean up stale files in OpenCode that no longer exist in Cursor
+ echo " Cleanup:"
+ for oc_rule in "$OPENCODE_DIR"/rules/*.md; do
+ [[ -f "$oc_rule" ]] || continue
+ local name
+ name=$(basename "$oc_rule" .md)
+ if [[ ! -f "$CURSOR_DIR/rules/$name.mdc" ]]; then
+ log_action "REMOVE" "$oc_rule"
+ if [[ "$DRY_RUN" == false ]]; then
+ rm -f "$oc_rule" "$OPENCODE_DIR/rules/$name.json"
+ fi
+ ((removed++)) || true
+ fi
+ done
+
+ for oc_skill_dir in "$OPENCODE_DIR"/skills/*/; do
+ [[ -d "$oc_skill_dir" ]] || continue
+ local name
+ name=$(basename "$oc_skill_dir")
+ if [[ ! -d "$CURSOR_DIR/skills/$name" ]]; then
+ log_action "REMOVE" "$oc_skill_dir"
+ if [[ "$DRY_RUN" == false ]]; then
+ rm -rf "$oc_skill_dir"
+ fi
+ ((removed++)) || true
+ fi
+ done
+}
+
+# ─── Claude Code Sync ─────────────────────────────────────────────────────────
+
+sync_claude() {
+ echo "━━━ Syncing to Claude Code ━━━"
+
+ # Skills: symlink SKILL.md files
+ echo " Skills (symlinks):"
+ if [[ -d "$CURSOR_DIR/skills" ]]; then
+ for skill_dir in "$CURSOR_DIR"/skills/*/; do
+ [[ -d "$skill_dir" ]] || continue
+ local name
+ name=$(basename "$skill_dir")
+ if [[ -f "$skill_dir/SKILL.md" ]]; then
+ sync_symlink "$skill_dir/SKILL.md" "$CLAUDE_DIR/skills/$name/SKILL.md"
+ fi
+ done
+ fi
+
+ # Clean up stale symlinks
+ if [[ -d "$CLAUDE_DIR/skills" ]]; then
+ for link in "$CLAUDE_DIR"/skills/*/SKILL.md; do
+ [[ -e "$link" ]] || continue
+ if [[ -L "$link" ]]; then
+ local target
+ target=$(readlink "$link")
+ if [[ ! -f "$target" ]]; then
+ log_action "REMOVE" "$link (dead symlink)"
+ if [[ "$DRY_RUN" == false ]]; then rm "$link"; fi
+ ((removed++)) || true
+ fi
+ fi
+ done
+ fi
+
+ # CLAUDE.md: generate with @import for each rule
+ echo " CLAUDE.md:"
+ local dest="$CLAUDE_DIR/CLAUDE.md"
+ local tmp
+ tmp=$(mktemp)
+
+ {
+ echo "# Rules"
+ echo ""
+ echo "# Imported from ~/.cursor/rules/ — do not edit manually."
+ echo "# Re-generate with: ~/.cursor/scripts/tool-sync.sh"
+ echo ""
+ for mdc in "$CURSOR_DIR"/rules/*.mdc; do
+ [[ -f "$mdc" ]] || continue
+ echo "@$mdc"
+ done
+ } > "$tmp"
+
+ if [[ ! -f "$dest" ]]; then
+ log_action "CREATE" "$dest"
+ if [[ "$DRY_RUN" == false ]]; then
+ mv "$tmp" "$dest"
+ else
+ rm "$tmp"
+ fi
+ ((created++)) || true
+ elif ! diff -q "$tmp" "$dest" >/dev/null 2>&1; then
+ log_action "UPDATE" "$dest"
+ if [[ "$DRY_RUN" == false ]]; then
+ mv "$tmp" "$dest"
+ else
+ rm "$tmp"
+ fi
+ ((updated++)) || true
+ else
+ rm "$tmp"
+ ((skipped++)) || true
+ fi
+}
+
+# ─── Main ─────────────────────────────────────────────────────────────────────
+
+echo "tool-sync: Cursor → ${TARGET}"
+if [[ "$DRY_RUN" == true ]]; then
+ echo "(dry run — no files will be modified)"
+fi
+echo ""
+
+case "$TARGET" in
+ opencode) sync_opencode ;;
+ claude) sync_claude ;;
+ all) sync_opencode; echo ""; sync_claude ;;
+ *) echo "Unknown target: $TARGET" >&2; exit 1 ;;
+esac
+
+echo ""
+echo "Done: $created created, $updated updated, $removed removed, $skipped unchanged"
diff --git a/.cursor/skills/asana-get-context.sh b/.cursor/skills/asana-get-context.sh
new file mode 100755
index 0000000..87f3792
--- /dev/null
+++ b/.cursor/skills/asana-get-context.sh
@@ -0,0 +1,232 @@
+#!/usr/bin/env bash
+# asana-get-context.sh
+# Fetch concise context from an Asana task for implementation or PR creation.
+#
+# Usage:
+# asana-get-context.sh
+# asana-get-context.sh --task-url
+# asana-get-context.sh --task
+#
+# Accepts a raw task GID or a full Asana URL. URL formats supported:
+# https://app.asana.com/0//[/f]
+# https://app.asana.com/1//task/[/f]
+#
+# Requires env var: ASANA_TOKEN
+#
+# Output (compact, agent-friendly):
+# TASK_NAME:
+# TASK_DESCRIPTION:
+# PRIORITY:
+# STATUS:
+# IMPLEMENTOR:
+# REVIEWER:
+# COMMENTS: (most recent 5, one per block)
+# ATTACHMENTS: files
+# DOWNLOADED: files to
+# UNPACKED: -> ( files) [if ZIPs present]
+# PDF_TEXT: (from , chars) [if PDF has text]
+# PDF_PAGES: ( pages from ) [if PDF is image-based]
+set -euo pipefail
+
+# Parse arguments: accept positional, --task, or --task-url
+RAW_INPUT=""
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --task-url|--task)
+ RAW_INPUT="${2:-}"
+ shift 2
+ ;;
+ -*)
+ echo "Unknown flag: $1" >&2
+ exit 1
+ ;;
+ *)
+ RAW_INPUT="$1"
+ shift
+ ;;
+ esac
+done
+
+if [[ -z "$RAW_INPUT" ]]; then
+ echo "Usage: asana-get-context.sh " >&2
+ exit 1
+fi
+
+# Extract task GID: accept a raw numeric GID or any Asana URL containing one.
+# Strips trailing path segments (/f, /subtask/…) and query strings.
+if [[ "$RAW_INPUT" =~ /task/([0-9]+) ]]; then
+ TASK_GID="${BASH_REMATCH[1]}"
+elif [[ "$RAW_INPUT" =~ /([0-9]+)(/f)?([?#].*)?$ ]]; then
+ TASK_GID="${BASH_REMATCH[1]}"
+elif [[ "$RAW_INPUT" =~ ^[0-9]+$ ]]; then
+ TASK_GID="$RAW_INPUT"
+else
+ echo "Error: could not extract task GID from: $RAW_INPUT" >&2
+ exit 1
+fi
+if [[ -z "${ASANA_TOKEN:-}" ]]; then
+ echo "Error: ASANA_TOKEN not set" >&2
+ exit 1
+fi
+
+API="https://app.asana.com/api/1.0"
+AUTH="Authorization: Bearer $ASANA_TOKEN"
+
+# Fetch task + custom fields
+curl -s "$API/tasks/$TASK_GID?opt_fields=name,notes,custom_fields.gid,custom_fields.name,custom_fields.display_value" \
+ -H "$AUTH" | python3 -c "
+import sys, json
+data = json.load(sys.stdin)['data']
+
+print(f\"TASK_NAME: {data['name']}\")
+
+notes = (data.get('notes') or '').strip()
+if len(notes) > 500:
+ notes = notes[:500] + '...'
+print(f\"TASK_DESCRIPTION: {notes or '(empty)'}\")
+
+FIELDS = {
+ '795866930204488': 'PRIORITY',
+ '1190660107346181': 'STATUS',
+ '1203334386796983': 'IMPLEMENTOR',
+ '1203334388004673': 'REVIEWER',
+}
+for f in data.get('custom_fields', []):
+ label = FIELDS.get(f['gid'])
+ if label:
+ val = f.get('display_value') or '(not set)'
+ print(f'{label}: {val}')
+"
+
+# Fetch project memberships — look for version project (e.g. "4.44.0")
+curl -s "$API/tasks/$TASK_GID?opt_fields=memberships.project.name" \
+ -H "$AUTH" | python3 -c "
+import sys, json, re
+data = json.load(sys.stdin)['data']
+for m in data.get('memberships', []):
+ name = m.get('project', {}).get('name', '')
+ if re.match(r'^\d+\.\d+\.\d+$', name):
+ print(f'VERSION_PROJECT: {name}')
+ break
+else:
+ print('VERSION_PROJECT: (not set)')
+"
+
+# Fetch recent comments (last 5)
+curl -s "$API/tasks/$TASK_GID/stories?opt_fields=resource_subtype,text,created_by.name,created_at&limit=100" \
+ -H "$AUTH" | python3 -c "
+import sys, json
+data = json.load(sys.stdin)['data']
+comments = [s for s in data if s.get('resource_subtype') == 'comment_added'][-5:]
+if not comments:
+ print('COMMENTS: (none)')
+else:
+ print('COMMENTS:')
+ for c in comments:
+ author = c.get('created_by', {}).get('name', 'unknown')
+ text = (c.get('text') or '').strip().replace('\n', ' ')
+ if len(text) > 200:
+ text = text[:200] + '...'
+ date = c.get('created_at', '')[:10]
+ print(f' [{date}] {author}: {text}')
+"
+
+# Fetch attachments — download all supported types, then post-process
+DOWNLOAD_DIR="/tmp/asana-task-$TASK_GID"
+
+# Phase 1: Download all supported attachments
+curl -s "$API/tasks/$TASK_GID/attachments?opt_fields=name,resource_subtype,download_url" \
+ -H "$AUTH" | python3 -c "
+import sys, json, os, urllib.request
+
+data = json.load(sys.stdin)['data']
+if not data:
+ print('ATTACHMENTS: (none)')
+ sys.exit(0)
+
+DOWNLOAD_EXTS = {
+ '.md', '.txt', '.json', '.csv', '.log', '.yaml', '.yml',
+ '.pdf',
+ '.zip',
+ '.png', '.jpg', '.jpeg', '.gif', '.webp',
+}
+download_dir = '$DOWNLOAD_DIR'
+downloaded = []
+
+print(f'ATTACHMENTS: {len(data)} files')
+for a in data:
+ name = a.get('name', 'unnamed')
+ url = a.get('download_url')
+ ext = os.path.splitext(name)[1].lower()
+ if ext in DOWNLOAD_EXTS and url:
+ os.makedirs(download_dir, exist_ok=True)
+ dest = os.path.join(download_dir, name)
+ try:
+ urllib.request.urlretrieve(url, dest)
+ downloaded.append(dest)
+ print(f' - {name} (downloaded)')
+ except Exception as e:
+ print(f' - {name} (download failed: {e})')
+ else:
+ print(f' - {name}')
+
+if downloaded:
+ print(f'DOWNLOADED: {len(downloaded)} files to {download_dir}')
+ for d in downloaded:
+ print(f' {d}')
+"
+
+# Phase 2: Unpack ZIP archives (may produce more files to process)
+shopt -s nullglob
+for zip_file in "$DOWNLOAD_DIR"/*.zip; do
+ subdir="$DOWNLOAD_DIR/$(basename "$zip_file" .zip)"
+ if unzip -o -q "$zip_file" -d "$subdir" 2>/dev/null; then
+ file_count=$(find "$subdir" -type f 2>/dev/null | wc -l | tr -d ' ')
+ echo "UNPACKED: $(basename "$zip_file") -> $subdir ($file_count files)"
+ rm "$zip_file"
+ else
+ echo "UNPACK_FAILED: $(basename "$zip_file")"
+ fi
+done
+shopt -u nullglob
+
+# Phase 3: Process PDFs (text extraction first, image fallback)
+process_pdf() {
+ local pdf="$1"
+ local base="${pdf%.pdf}"
+ local fname
+ fname="$(basename "$pdf")"
+
+ if command -v pdftotext &>/dev/null; then
+ local text
+ text=$(pdftotext "$pdf" - 2>/dev/null || true)
+ local char_count
+ char_count=$(printf '%s' "$text" | tr -d '[:space:]' | wc -c | tr -d ' ')
+ if [[ "$char_count" -gt 100 ]]; then
+ printf '%s' "$text" > "${base}.txt"
+ echo "PDF_TEXT: ${base}.txt (from $fname, ${char_count} chars)"
+ return
+ fi
+ fi
+
+ if command -v pdftoppm &>/dev/null; then
+ local pages_dir="${base}_pages"
+ mkdir -p "$pages_dir"
+ pdftoppm -png -r 150 "$pdf" "$pages_dir/page" 2>/dev/null
+ local page_count
+ page_count=$(find "$pages_dir" -name 'page-*.png' 2>/dev/null | wc -l | tr -d ' ')
+ if [[ "$page_count" -gt 0 ]]; then
+ echo "PDF_PAGES: $pages_dir ($page_count pages from $fname)"
+ else
+ echo "PDF_CONVERT_FAILED: $fname"
+ fi
+ else
+ echo "PDF_SKIPPED: $fname (install poppler-utils for text/image extraction)"
+ fi
+}
+
+if [[ -d "$DOWNLOAD_DIR" ]]; then
+ while IFS= read -r pdf; do
+ process_pdf "$pdf"
+ done < <(find "$DOWNLOAD_DIR" -name '*.pdf' -type f 2>/dev/null)
+fi
diff --git a/.cursor/skills/asana-plan/SKILL.md b/.cursor/skills/asana-plan/SKILL.md
new file mode 100644
index 0000000..2f8a519
--- /dev/null
+++ b/.cursor/skills/asana-plan/SKILL.md
@@ -0,0 +1,60 @@
+---
+name: asana-plan
+description: Create an implementation plan from either an Asana task URL or ad-hoc text/file requirements, then wait for user confirmation before implementation.
+compatibility: Requires jq. ASANA_TOKEN for Asana context when task URLs are provided.
+metadata:
+ author: j0ntz
+---
+
+Produce a plan document via Cursor planning flow from Asana or text requirements, and hand off approved context to implementation skills.
+
+
+If input is an Asana task URL, read and follow `~/.cursor/skills/task-review/SKILL.md` steps 1-3 before planning.
+Do not start implementation while in this skill. End by asking for confirmation.
+Use Cursor's plan tool to output the plan document to the normal planning location.
+
+
+
+Accept two input forms:
+
+1. **Asana URL mode**: Task URL is provided
+2. **Text/file mode**: Ad-hoc text requirement or file reference is provided
+
+If input is ambiguous, ask the user to clarify which mode applies.
+
+
+
+
+Read `~/.cursor/skills/task-review/SKILL.md` and run its steps 1-3 to fetch and summarize task context.
+
+
+
+Read the provided description and any referenced file(s), then summarize scope, target areas, and assumptions.
+
+
+
+
+Create a concise actionable implementation plan using Cursor's plan flow. Include:
+
+- Summary
+- Goal / Definition of Done
+- Likely relevant files
+- Findings so far
+- Numbered implementation steps
+- Constraints
+
+
+
+Return:
+
+1. Plan file path
+2. Short execution summary (what will be changed)
+
+Then ask for confirmation before implementation:
+
+> Does this match your understanding? Any adjustments before I start?
+
+
+
+`/im` consumes this output and starts only after user confirmation. `/im` should not re-run a second independent confirmation flow for the same plan.
+
diff --git a/.cursor/skills/asana-task-update/SKILL.md b/.cursor/skills/asana-task-update/SKILL.md
new file mode 100644
index 0000000..91126f1
--- /dev/null
+++ b/.cursor/skills/asana-task-update/SKILL.md
@@ -0,0 +1,89 @@
+---
+name: asana-task-update
+description: Update Asana tasks via one reusable workflow (attach PRs, assign/unassign, set status, and update task fields). Use when any skill needs to modify Asana task state.
+compatibility: Requires jq. ASANA_TOKEN for Asana API updates. ASANA_GITHUB_SECRET for PR attach operations.
+metadata:
+ author: j0ntz
+---
+
+Perform Asana task mutations through one shared command and one shared script, so all callers use the same field mappings and prompts.
+
+
+Use `~/.cursor/skills/asana-task-update/scripts/asana-task-update.sh` for all Asana task mutations. Do not call raw Asana APIs directly from skills that can delegate here.
+Every operation requires `--task `.
+`--attach-pr` requires `ASANA_GITHUB_SECRET`. Other operations require `ASANA_TOKEN`.
+If the script exits code 2 with `PROMPT_REVIEWER` or `PROMPT_IMPLEMENTOR`, ask the user and re-run with explicit `--reviewer` or `--implementor`. Hands-off callers may instead pass `--skip-assign-if-missing` to convert missing-reviewer assignment into a non-blocking skip.
+Asana updates can take time. Use `block_until_ms: 120000` for script calls.
+
+
+
+```bash
+# Attach only
+~/.cursor/skills/asana-task-update/scripts/asana-task-update.sh \
+ --task \
+ --attach-pr --pr-url --pr-title "" --pr-number
+
+# Attach + assign reviewer + set review-needed status + estimate review hours
+~/.cursor/skills/asana-task-update/scripts/asana-task-update.sh \
+ --task \
+ --attach-pr --pr-url --pr-title "" --pr-number \
+ --assign --set-status "Review Needed" --auto-est-review-hrs
+
+# Hands-off attach + best-effort assign (skip if reviewer missing)
+~/.cursor/skills/asana-task-update/scripts/asana-task-update.sh \
+ --task \
+ --attach-pr --pr-url --pr-title "" --pr-number \
+ --assign --skip-assign-if-missing --set-status "Review Needed" --auto-est-review-hrs
+
+# Publish Needed -> Verification Needed (and unassign)
+~/.cursor/skills/asana-task-update/scripts/asana-task-update.sh \
+ --task \
+ --set-status "Verification Needed" --unassign
+```
+
+
+
+Determine which updates are needed by the caller and build one command with all flags:
+
+- `--attach-pr --pr-url --pr-title --pr-number`
+- `--assign` or `--assign `
+- `--skip-assign-if-missing`
+- `--unassign`
+- `--set-status "Review Needed|Publish Needed|Verification Needed"`
+- `--set-reviewer `
+- `--set-implementor `
+- `--set-priority `
+- `--set-planned `
+- `--auto-est-review-hrs`
+
+
+
+Run `asana-task-update.sh` with the built flags. Prefer one call with combined operations over multiple calls.
+
+
+
+If exit code is 2:
+
+- `PROMPT_REVIEWER`: ask who to assign, then re-run with `--reviewer ` and `--assign`
+- `PROMPT_IMPLEMENTOR`: ask who to set as implementor, then re-run with `--implementor `
+
+If the caller used `--skip-assign-if-missing`, do not ask about `PROMPT_REVIEWER` because the script will not emit it for missing-reviewer cases.
+
+
+
+Summarize one line per action from script output (attach result, assignment, status change, field updates).
+
+
+
+1. Jon Tzeng — `1200972350160586`
+2. William Swanson — `10128869002320`
+3. Paul Puey — `9976421903322`
+4. Sam Holmes — `1198904591136142`
+5. Matthew Piche — `522823585857811`
+
+
+
+- `0`: success
+- `1`: error
+- `2`: needs user input (`PROMPT_REVIEWER`, `PROMPT_IMPLEMENTOR`)
+
diff --git a/.cursor/skills/asana-task-update/scripts/asana-task-update.sh b/.cursor/skills/asana-task-update/scripts/asana-task-update.sh
new file mode 100755
index 0000000..7e87d3d
--- /dev/null
+++ b/.cursor/skills/asana-task-update/scripts/asana-task-update.sh
@@ -0,0 +1,268 @@
+#!/usr/bin/env bash
+# asana-task-update.sh
+# Unified Asana task mutation script.
+#
+# Exit codes:
+# 0 = success
+# 1 = error
+# 2 = needs user input (PROMPT_REVIEWER, PROMPT_IMPLEMENTOR)
+set -euo pipefail
+
+TASK_GID=""
+DO_ATTACH=false
+PR_URL=""
+PR_TITLE=""
+PR_NUMBER=""
+
+DO_ASSIGN=false
+ASSIGN_GID=""
+SKIP_ASSIGN_IF_MISSING=false
+DO_UNASSIGN=false
+
+SET_STATUS=""
+SET_REVIEWER_GID=""
+SET_IMPLEMENTOR_GID=""
+SET_PRIORITY_GID=""
+SET_PLANNED_GID=""
+AUTO_EST_REVIEW=false
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --task) TASK_GID="$2"; shift 2 ;;
+ --attach-pr) DO_ATTACH=true; shift ;;
+ --pr-url) PR_URL="$2"; shift 2 ;;
+ --pr-title) PR_TITLE="$2"; shift 2 ;;
+ --pr-number) PR_NUMBER="$2"; shift 2 ;;
+ --assign)
+ DO_ASSIGN=true
+ if [[ $# -ge 2 && "${2:0:2}" != "--" ]]; then
+ ASSIGN_GID="$2"
+ shift 2
+ else
+ shift
+ fi
+ ;;
+ --skip-assign-if-missing) SKIP_ASSIGN_IF_MISSING=true; shift ;;
+ --unassign) DO_UNASSIGN=true; shift ;;
+ --set-status) SET_STATUS="$2"; shift 2 ;;
+ --set-reviewer|--reviewer) SET_REVIEWER_GID="$2"; shift 2 ;;
+ --set-implementor|--implementor) SET_IMPLEMENTOR_GID="$2"; shift 2 ;;
+ --set-priority) SET_PRIORITY_GID="$2"; shift 2 ;;
+ --set-planned) SET_PLANNED_GID="$2"; shift 2 ;;
+ --auto-est-review-hrs) AUTO_EST_REVIEW=true; shift ;;
+ *) echo "Unknown flag: $1" >&2; exit 1 ;;
+ esac
+done
+
+if [[ -z "$TASK_GID" ]]; then
+ echo "Error: --task is required" >&2
+ exit 1
+fi
+
+if ! $DO_ATTACH && ! $DO_ASSIGN && ! $DO_UNASSIGN && [[ -z "$SET_STATUS" ]] && [[ -z "$SET_REVIEWER_GID" ]] && [[ -z "$SET_IMPLEMENTOR_GID" ]] && [[ -z "$SET_PRIORITY_GID" ]] && [[ -z "$SET_PLANNED_GID" ]] && ! $AUTO_EST_REVIEW; then
+ echo "Error: No operations specified" >&2
+ exit 1
+fi
+
+if [[ -z "${ASANA_TOKEN:-}" ]]; then
+ echo "Error: ASANA_TOKEN not set" >&2
+ exit 1
+fi
+
+if $DO_ATTACH && [[ -z "${ASANA_GITHUB_SECRET:-}" ]]; then
+ echo "Error: ASANA_GITHUB_SECRET not set (required for --attach-pr)" >&2
+ exit 1
+fi
+
+ASANA_API="https://app.asana.com/api/1.0"
+SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
+
+# Airbitz.co workspace field GIDs
+STATUS_FIELD="1190660107346181"
+REVIEW_NEEDED_OPTION="1190660107348334"
+PUBLISH_NEEDED_OPTION="1191304757575656"
+VERIFICATION_NEEDED_OPTION="1190660107348340"
+REVIEWER_FIELD="1203334388004673"
+IMPLEMENTOR_FIELD="1203334386796983"
+SPENT_DEV_HRS_FIELD="1202996660964169"
+EST_REVIEW_HRS_FIELD="1203002792997295"
+
+status_to_gid() {
+ case "$1" in
+ "Review Needed") echo "$REVIEW_NEEDED_OPTION" ;;
+ "Publish Needed") echo "$PUBLISH_NEEDED_OPTION" ;;
+ "Verification Needed") echo "$VERIFICATION_NEEDED_OPTION" ;;
+ *) echo "$1" ;;
+ esac
+}
+
+TASK_FIELDS=""
+load_task_fields() {
+ if [[ -n "$TASK_FIELDS" ]]; then
+ return 0
+ fi
+ TASK_FIELDS=$(curl -sf "$ASANA_API/tasks/$TASK_GID?opt_fields=name,assignee.name,custom_fields.gid,custom_fields.name,custom_fields.people_value.gid,custom_fields.people_value.name,custom_fields.number_value,custom_fields.enum_value.gid,custom_fields.enum_value.name" \
+ -H "Authorization: Bearer $ASANA_TOKEN")
+}
+
+read_people_field() {
+ local field_gid="$1"
+ echo "$TASK_FIELDS" | jq -r --arg gid "$field_gid" '
+ .data.custom_fields[]
+ | select(.gid == $gid)
+ | (.people_value[0].gid // "")
+ ' | head -n 1
+}
+
+if $DO_ATTACH; then
+ if [[ -z "$PR_URL" || -z "$PR_TITLE" || -z "$PR_NUMBER" ]]; then
+ echo "Error: --attach-pr requires --pr-url, --pr-title, and --pr-number" >&2
+ exit 1
+ fi
+
+ ATTACH_RESULT=$(curl -s -X POST "https://github.integrations.asana.plus/custom/v1/actions/widget" \
+ -H "Authorization: Bearer $ASANA_GITHUB_SECRET" \
+ -H "Content-Type: application/json" \
+ -d "{
+ \"allowedProjects\": [],
+ \"blockedProjects\": [],
+ \"pullRequestDescription\": \"https://app.asana.com/0/0/$TASK_GID\",
+ \"pullRequestName\": $(jq -Rn --arg v "$PR_TITLE" '$v'),
+ \"pullRequestNumber\": $PR_NUMBER,
+ \"pullRequestURL\": \"$PR_URL\"
+ }" 2>&1)
+
+ ATTACH_STATUS=$(echo "$ATTACH_RESULT" | python3 -c "import sys,json; r=json.load(sys.stdin); print(r[0].get('result','unknown'))" 2>/dev/null || echo "error: $ATTACH_RESULT")
+ echo ">> PR attach: $ATTACH_STATUS"
+fi
+
+if $DO_ASSIGN || [[ -n "$SET_REVIEWER_GID" ]] || [[ -n "$SET_IMPLEMENTOR_GID" ]] || $AUTO_EST_REVIEW || [[ -n "$SET_PRIORITY_GID" ]] || [[ -n "$SET_PLANNED_GID" ]]; then
+ load_task_fields
+fi
+
+if $DO_ASSIGN; then
+ if [[ -z "$ASSIGN_GID" ]]; then
+ ASSIGN_GID="${SET_REVIEWER_GID:-$(read_people_field "$REVIEWER_FIELD")}"
+ fi
+ if [[ -z "$ASSIGN_GID" ]]; then
+ if $SKIP_ASSIGN_IF_MISSING; then
+ echo ">> Assignee: skipped (no reviewer provided or found on task)"
+ DO_ASSIGN=false
+ else
+ echo ">> PROMPT_REVIEWER"
+ exit 2
+ fi
+ fi
+
+ if $DO_ASSIGN; then
+ if [[ -z "$SET_REVIEWER_GID" ]]; then
+ SET_REVIEWER_GID="$ASSIGN_GID"
+ fi
+
+ if [[ -z "$SET_IMPLEMENTOR_GID" ]]; then
+ SET_IMPLEMENTOR_GID="$(read_people_field "$IMPLEMENTOR_FIELD")"
+ fi
+ if [[ -z "$SET_IMPLEMENTOR_GID" ]]; then
+ SET_IMPLEMENTOR_GID="$("$SCRIPT_DIR/../../asana-whoami.sh" 2>/dev/null || true)"
+ if [[ -n "$SET_IMPLEMENTOR_GID" ]]; then
+ echo ">> Implementor: auto-resolved to current user ($SET_IMPLEMENTOR_GID)"
+ fi
+ fi
+ if [[ -z "$SET_IMPLEMENTOR_GID" ]]; then
+ echo ">> PROMPT_IMPLEMENTOR"
+ exit 2
+ fi
+ fi
+fi
+
+CUSTOM_FIELDS_PATCH='{}'
+
+if [[ -n "$SET_STATUS" ]]; then
+ STATUS_GID="$(status_to_gid "$SET_STATUS")"
+ CUSTOM_FIELDS_PATCH=$(echo "$CUSTOM_FIELDS_PATCH" | jq --arg k "$STATUS_FIELD" --arg v "$STATUS_GID" '. + {($k): $v}')
+fi
+if [[ -n "$SET_REVIEWER_GID" ]]; then
+ CUSTOM_FIELDS_PATCH=$(echo "$CUSTOM_FIELDS_PATCH" | jq --arg k "$REVIEWER_FIELD" --arg v "$SET_REVIEWER_GID" '. + {($k): [$v]}')
+fi
+if [[ -n "$SET_IMPLEMENTOR_GID" ]]; then
+ CUSTOM_FIELDS_PATCH=$(echo "$CUSTOM_FIELDS_PATCH" | jq --arg k "$IMPLEMENTOR_FIELD" --arg v "$SET_IMPLEMENTOR_GID" '. + {($k): [$v]}')
+fi
+if [[ -n "$SET_PRIORITY_GID" ]]; then
+ PRIORITY_FIELD_GID=$(echo "$TASK_FIELDS" | jq -r '.data.custom_fields[] | select(.name == "Priority") | .gid' | head -n 1)
+ if [[ -n "$PRIORITY_FIELD_GID" ]]; then
+ CUSTOM_FIELDS_PATCH=$(echo "$CUSTOM_FIELDS_PATCH" | jq --arg k "$PRIORITY_FIELD_GID" --arg v "$SET_PRIORITY_GID" '. + {($k): $v}')
+ fi
+fi
+if [[ -n "$SET_PLANNED_GID" ]]; then
+ PLANNED_FIELD_GID=$(echo "$TASK_FIELDS" | jq -r '.data.custom_fields[] | select(.name == "Planned") | .gid' | head -n 1)
+ if [[ -n "$PLANNED_FIELD_GID" ]]; then
+ CUSTOM_FIELDS_PATCH=$(echo "$CUSTOM_FIELDS_PATCH" | jq --arg k "$PLANNED_FIELD_GID" --arg v "$SET_PLANNED_GID" '. + {($k): $v}')
+ fi
+fi
+
+UPDATE_BODY='{"data":{}}'
+HAS_UPDATE=false
+
+if [[ "$CUSTOM_FIELDS_PATCH" != "{}" ]]; then
+ UPDATE_BODY=$(echo "$UPDATE_BODY" | jq --argjson cf "$CUSTOM_FIELDS_PATCH" '.data.custom_fields = $cf')
+ HAS_UPDATE=true
+fi
+
+if $DO_UNASSIGN; then
+ UPDATE_BODY=$(echo "$UPDATE_BODY" | jq '.data.assignee = null')
+ HAS_UPDATE=true
+elif $DO_ASSIGN; then
+ UPDATE_BODY=$(echo "$UPDATE_BODY" | jq --arg a "$ASSIGN_GID" '.data.assignee = $a')
+ HAS_UPDATE=true
+fi
+
+if $HAS_UPDATE; then
+ curl -sf -X PUT "$ASANA_API/tasks/$TASK_GID" \
+ -H "Authorization: Bearer $ASANA_TOKEN" \
+ -H "Content-Type: application/json" \
+ -d "$UPDATE_BODY" > /dev/null
+ echo ">> Task fields: updated"
+fi
+
+if $DO_ASSIGN; then
+ echo ">> Assigned to reviewer: $ASSIGN_GID"
+fi
+if $DO_UNASSIGN; then
+ echo ">> Assignee: unset"
+fi
+if [[ -n "$SET_STATUS" ]]; then
+ echo ">> Status: $SET_STATUS"
+fi
+if [[ -n "$SET_REVIEWER_GID" ]]; then
+ echo ">> Reviewer field: set"
+fi
+if [[ -n "$SET_IMPLEMENTOR_GID" ]]; then
+ echo ">> Implementor field: set"
+fi
+if [[ -n "$SET_PRIORITY_GID" ]]; then
+ echo ">> Priority field: set"
+fi
+if [[ -n "$SET_PLANNED_GID" ]]; then
+ echo ">> Planned field: set"
+fi
+
+if $AUTO_EST_REVIEW; then
+ load_task_fields
+ EST_REVIEW=$(echo "$TASK_FIELDS" | jq -r --arg gid "$EST_REVIEW_HRS_FIELD" '.data.custom_fields[] | select(.gid == $gid) | (.number_value // empty)' | head -n 1)
+ if [[ -n "$EST_REVIEW" ]]; then
+ echo ">> Est. Review Hrs: already set ($EST_REVIEW)"
+ else
+ SPENT_DEV=$(echo "$TASK_FIELDS" | jq -r --arg gid "$SPENT_DEV_HRS_FIELD" '.data.custom_fields[] | select(.gid == $gid) | (.number_value // empty)' | head -n 1)
+ if [[ -z "$SPENT_DEV" ]]; then
+ echo ">> Est. Review Hrs: skipped (no Spent Dev Hrs)"
+ else
+ EST_VAL=$(python3 -c "v=float('$SPENT_DEV'); x=round(v*0.1,1); print(x if x >= 0.1 else 0.1)")
+ REVIEW_PATCH=$(jq -n --arg f "$EST_REVIEW_HRS_FIELD" --argjson v "$EST_VAL" '{data:{custom_fields:{($f):$v}}}')
+ curl -sf -X PUT "$ASANA_API/tasks/$TASK_GID" \
+ -H "Authorization: Bearer $ASANA_TOKEN" \
+ -H "Content-Type: application/json" \
+ -d "$REVIEW_PATCH" > /dev/null
+ echo ">> Est. Review Hrs: set to $EST_VAL (10% of Spent Dev Hrs)"
+ fi
+ fi
+fi
diff --git a/.cursor/skills/asana-whoami.sh b/.cursor/skills/asana-whoami.sh
new file mode 100755
index 0000000..62b73ff
--- /dev/null
+++ b/.cursor/skills/asana-whoami.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+# asana-whoami.sh
+# Resolve the current Asana user's GID from $ASANA_TOKEN.
+# Caches the result in /tmp for the duration of the session.
+#
+# Usage:
+# asana-whoami.sh # prints GID
+# asana-whoami.sh --name # prints "GID NAME"
+#
+# Requires env var: ASANA_TOKEN
+#
+# Output:
+# (default)
+# (with --name)
+set -euo pipefail
+
+SHOW_NAME=false
+if [[ "${1:-}" == "--name" ]]; then
+ SHOW_NAME=true
+fi
+
+if [[ -z "${ASANA_TOKEN:-}" ]]; then
+ echo "Error: ASANA_TOKEN not set" >&2
+ exit 1
+fi
+
+CACHE_FILE="/tmp/asana-whoami-$(echo "$ASANA_TOKEN" | shasum -a 256 | cut -c1-16).json"
+
+if [[ -f "$CACHE_FILE" ]]; then
+ cached=$(cat "$CACHE_FILE")
+else
+ cached=$(curl -s "https://app.asana.com/api/1.0/users/me?opt_fields=gid,name" \
+ -H "Authorization: Bearer $ASANA_TOKEN")
+ echo "$cached" > "$CACHE_FILE"
+fi
+
+if [[ "$SHOW_NAME" == "true" ]]; then
+ echo "$cached" | python3 -c "
+import sys, json
+d = json.load(sys.stdin)['data']
+print(f\"{d['gid']} {d['name']}\")
+"
+else
+ echo "$cached" | python3 -c "
+import sys, json
+print(json.load(sys.stdin)['data']['gid'])
+"
+fi
diff --git a/.cursor/skills/author/SKILL.md b/.cursor/skills/author/SKILL.md
new file mode 100644
index 0000000..2b6ae0f
--- /dev/null
+++ b/.cursor/skills/author/SKILL.md
@@ -0,0 +1,145 @@
+---
+name: author
+description: Create, edit, revise, or debug Cursor skills (~/.cursor/skills/*/SKILL.md). Use when the user wants to make a new skill, update an existing skill, fix a skill, or asks about .cursor/skills/ files. Also use when the user says "new command", "create command", "create skill", "edit command", "new skill", "update skill", "update command", or references SKILL.md. NOT for general markdown editing (READMEs, CHANGELOGs, docs, AGENTS.md).
+---
+
+Write or revise Cursor commands and skills with maximum agent compliance.
+
+
+Skills (`~/.cursor/skills/*/SKILL.md`): The standard unit. Can be invoked explicitly via `/skill-name` or agent-triggered based on task matching against the description. Companion scripts live in `/scripts/`. Shared scripts live at `~/.cursor/skills/` top-level.
+
+
+
+Be prescriptive, not descriptive. Commands tell the agent what to DO, not what things ARE.
+Examples must be brief and hypothetical. Never use real data from conversations. Keep examples to 3-5 lines max.
+DRY across commands. If two commands share logic, extract it into a shared file and have both reference it.
+Order of operations matters. The agent reads top-to-bottom. Put context-setting steps before action steps.
+Hard rules at the top. Non-negotiable constraints go right after the Goal so they're read before any steps.
+Escape hatches over assumptions. When ambiguity exists, tell the agent to ask — don't let it guess.
+Offload all deterministic logic to companion scripts. If an operation has a known, repeatable sequence of steps (API calls, git commands, file parsing, linting, data fetching), it belongs in a `.sh` script — not inline in the `.md` as shell blocks the agent must reason about. The `.md` file should only handle semantic decisions, user interaction, and interpreting script output. This eliminates context bloat and prevents the agent from re-deriving logic it doesn't need to understand.
+Minimize round-trips. When a step requires multiple independent pieces of information (e.g., git status + git log + git diff), instruct the agent to gather them all in parallel tool calls within a single message/script — not sequentially. Group independent reads, searches, and shell commands together. Only sequence calls when one depends on the output of another.
+Don't duplicate in semantic rules what companion scripts already automate. If a script handles linting, formatting, localization, or other post-processing, the command should reference the script — not also instruct the agent to perform those steps. Duplication risks the agent running a step twice or conflicting with the script's output.
+For GitHub API operations in companion scripts, use `gh api` and `gh api graphql` over raw `curl` + `$GITHUB_TOKEN`. `gh` handles authentication, pagination (`--paginate`), and API versioning automatically. Use GraphQL (`gh api graphql -f query="..."`) to fetch only required fields in a single request, reducing API calls and context size. Fall back to REST (`gh api repos/...`) only when GraphQL doesn't expose the needed data (e.g., file patches).
+When companion scripts need capabilities beyond bash (JSON manipulation, complex regex, structured data processing, async I/O), embed Node.js inline via `exec node -e '...'` rather than depending on Python. Node is already a required dependency for other scripts; adding Python creates an unnecessary second runtime dependency. This keeps scripts as single `.sh` files while unlocking full-featured processing. Avoid single quotes inside the inline node code (bash single-quoted string boundary); use `\x27` in regex to match literal single quotes.
+Companion scripts must minimize context consumption. Return structured, filtered summaries — never raw API responses or full file contents. When a script processes large inputs (logs, exports, API payloads), extract only the fields the command needs and discard the rest. Commands should instruct the agent to use targeted reads (grep, line ranges) over full file reads for large files. Every token of script output that the agent reads costs context — design outputs to be as compact as possible while remaining parseable.
+
+
+
+Use XML tags to structure commands and skills. XML outperforms markdown for LLM instruction-following:
+
+- Anthropic, OpenAI, and Google all recommend XML tags for structuring prompts.
+- Claude is specifically tuned to attend to XML tag boundaries.
+- Empirical tests show up to 40% performance variance based on prompt format alone, with XML consistently outperforming markdown.
+
+Source: https://docs.claude.com/en/docs/use-xml-tags
+
+
+- Use semantic tag names that describe their content (e.g., ``, ``, ``).
+- Use attributes for metadata: `id`, `name`, `description`.
+- Nest tags for hierarchy: `...`.
+- Be consistent — use the same tag names throughout a command.
+- Markdown is still fine for inline formatting within XML tags (bold, code, lists).
+
+
+
+```xml
+One sentence. What does this command accomplish?
+
+
+...
+...
+
+
+
+Instructions for this step.
+
+
+
+Instructions for this step.
+
+
+
+How to handle it.
+
+```
+
+
+
+
+
+Give exact shell commands to copy-paste, not descriptions of what to run. Smaller models copy verbatim; they struggle to construct commands from prose. Include placeholders like `` only where the agent must substitute a value.
+
+Pass multi-line content (PR bodies, commit messages, JSON payloads) via temp files, not shell arguments. Write content using the Write tool, then pass `--body-file /tmp/foo.md` to the script. This avoids shell escaping failures that smaller models cannot debug.
+
+When the command produces formatted output (markdown, JSON, reports), show the exact template line-by-line with placeholders. Include blank lines and heading levels explicitly. Example: show `## Accomplishments {day_label}` not "add a heading for accomplishments."
+
+Spell out parallel tool calls: "Run both scripts **in parallel** (two Shell tool calls in one message)." Smaller models default to sequential unless explicitly told otherwise.
+
+When the agent must categorize or choose between options, use a numbered priority list — not prose. Example: "1. If X → do A. 2. If Y → do B. 3. Otherwise → do C." Smaller models follow numbered sequences reliably; they lose track of nested if/else prose.
+
+Duplicate critical rules from cross-referenced files as top-level `` tags. Smaller models skip "Read file X now" instructions despite explicit language. One-liner guardrails (e.g., `commit-script`, `changelog-required`) catch the failure mode where the cross-read is skipped entirely.
+
+Every action needs an explicit instruction. Never rely on "follow best practices" or "use appropriate patterns." If the agent should run `git push -u origin HEAD`, write that exact command — don't say "push the branch."
+
+Where possible, design steps so each step is ONE tool call. Smaller models lose track of multi-tool steps. If a step requires multiple calls, break it into sub-steps with explicit sequencing ("After step 2a completes, run step 2b").
+
+
+
+When revising an existing command, **every item below is mandatory** — not a suggestion. Older commands may predate current best practices; touching a command is an opportunity to bring it up to spec.
+
+1. Read the full file before making changes
+2. Check for duplicated logic across other commands — consolidate if found
+3. **Check behavioral dependencies**: Search for other commands, skills, and rules that perform similar operations or share domain overlap with the one being edited. If command A has a step that is a lightweight version of command B's core behavior (e.g., `/pr-land` addressing comments vs `/pr-address`), verify that A's step is consistent with B's rules — missing rules in A are likely bugs.
+ - Extract domain-specific verbs and nouns from the step being edited (e.g., a step about handling PR comments yields: `comment`, `reply`, `resolve`, `address`, `fixup`, `thread`)
+ - Search each term across commands, skills, and rules:
+ ```bash
+ rg -l "" ~/.cursor/skills/*/SKILL.md ~/.cursor/rules/*.mdc
+ ```
+ - Read any hits that share domain overlap and check for consistency
+ - If overlap is found, evaluate whether to consolidate per the `dry` principle: can A reference B's rules or a shared file instead of reimplementing? Propose consolidation to the user when the shared logic is non-trivial.
+4. **Check dependent callers before any script/command change**: Before adding, updating, renaming, or removing any command, skill, script, step ID, flag, or output contract, search for direct callers/references and update them in the same change.
+ - Search by skill name, script filename, flag names, and any removed/renamed identifiers:
+ ```bash
+ rg -n "" ~/.cursor/skills ~/.cursor/rules
+ ```
+ - Do not add/update/remove script behavior until caller impacts are audited and required updates are planned.
+ - Do not delete or rename a referenced target until all callers are updated.
+ - In the final response, list which callers were updated.
+5. Verify step ordering matches the agent's decision flow
+6. Ensure examples are brief and generic (no real repo names, PR numbers, or user data)
+7. Check that escape hatches exist for ambiguous cases
+8. Confirm companion scripts match the `.md` expectations
+9. Convert markdown-structured commands to XML format (this is the most commonly skipped item — `##` headers and bullet lists must become ``, ``, `` tags)
+10. Apply all current authoring principles (rules-first, scripts-over-reasoning, batch-tool-calls, etc.) even if the original command predates them
+11. If the command may run on smaller/faster models, apply `` — especially `file-over-args`, `inline-guardrails`, and `verbatim-bash`
+
+
+
+After any authoring change (skills/scripts/rules), ask:
+
+> Run `/convention-sync` to sync files and update PR conventions/description?
+
+When `.cursor/rules/*.mdc` files changed, run:
+
+```bash
+~/.cursor/skills/convention-sync/scripts/generate-claude-md.sh
+```
+
+This keeps `~/.claude/CLAUDE.md` aligned with always-apply rules via the existing convention-sync flow.
+
+
+
+Skill-specific scripts go in `/scripts/`. Shared scripts go in `~/.cursor/skills/` top-level. Conventions:
+
+- `set -euo pipefail` at the top
+- Parse args with a `while/case` loop
+- Output structured, one-line-per-action summaries the agent can parse
+- Exit code 0 = success, 1 = error, 2 = needs user input
+- **Naming**: Name scripts by what they DO, not which command they serve. Scripts will likely be reused by multiple commands. Prefer descriptive, domain-scoped names over command-coupled names:
+ - `lint-commit.sh` — good (describes the operation)
+ - `asana-task-update.sh` — good (describes the operation)
+ - `github-pr-comments.sh` — good (describes the domain + operation)
+ - `pr-address.sh` — bad (coupled to the `/pr-address` command name)
+- Before creating a new script, check if an existing script already covers the operation. Extend it with a new subcommand rather than creating a duplicate.
+- **GitHub API**: Default to `gh api` and `gh api graphql` — never raw `curl`. See `gh-cli-over-curl` principle.
+
diff --git a/.cursor/skills/changelog/SKILL.md b/.cursor/skills/changelog/SKILL.md
new file mode 100644
index 0000000..9a48ec3
--- /dev/null
+++ b/.cursor/skills/changelog/SKILL.md
@@ -0,0 +1,10 @@
+---
+name: changelog
+description: Update CHANGELOG.md(s) with new entries describing changes made in the repo(s). Use when the user wants to update changelogs.
+metadata:
+ author: j0ntz
+---
+
+# changelog
+
+Update the CHANGELOG.md(s) with at most a few new entries describing the changes made in the repo(s). Documented changes should ONLY describe the final state of all the current changes, not the journey, and follow the existing patterns (being sure to parse only a hundred lines to minimize context) for length and formatting, including no word wrapping.
\ No newline at end of file
diff --git a/.cursor/skills/chat-audit/SKILL.md b/.cursor/skills/chat-audit/SKILL.md
new file mode 100644
index 0000000..89411e3
--- /dev/null
+++ b/.cursor/skills/chat-audit/SKILL.md
@@ -0,0 +1,102 @@
+---
+name: chat-audit
+description: Analyze a Cursor chat export to identify inefficiencies, rule violations, and wasted tool calls. Use when the user wants to audit a chat session.
+compatibility: Requires node.
+metadata:
+ author: j0ntz
+---
+
+Analyze current chat or provided Cursor chat export to identify inefficiencies, rule violations, and wasted tool calls against the invoked command's workflow.
+
+
+Use `scripts/cursor-chat-extract.js` to parse the export. Do NOT parse the raw JSON inline — it is deeply nested and will consume excessive context.
+Default to `--tools-only` mode. Only omit the flag if the user asks for full assistant message analysis.
+Do NOT read the export JSON file directly. All data comes from the script output.
+Keep the final report under 50 lines. Use a numbered list for findings, not verbose paragraphs.
+
+
+
+If no chat export file is provided, assume the user is asking for a chat audit of the current chat session.
+
+If chat export file is provided, run the companion script on the user-provided export file:
+
+```bash
+scripts/cursor-chat-extract.js --tools-only
+```
+
+Parse the JSON output. Note the `invokedCommand`, `stats`, and `sequence` fields.
+
+If `invokedCommand` is null, check the first user message for a command reference and ask the user which command was intended.
+
+
+
+If `invokedCommand` is identified, read the command file:
+
+```bash
+Read ~/.cursor/skills//SKILL.md
+```
+
+Extract the command's:
+- **Rules** (the `` tags)
+- **Steps** (the `` tags — just names and key instructions, not full content)
+- **Companion scripts** referenced (filenames only)
+
+
+
+Walk through the `sequence` array and check each tool call against the command's prescribed workflow:
+
+
+For each rule in the command, check if the tool sequence violates it:
+- `commit-script`: Did the agent use raw `git add` + `git commit` instead of `lint-commit.sh`?
+- `use-companion-script`: Did the agent call `gh`, `curl`, or API tools directly instead of the prescribed script?
+- `no-script-bypass`: Did the agent fall back to raw tools after a script error?
+- Cross-reference rules: Did the agent read files referenced with "Read ... now (do NOT skip)"?
+
+
+
+Flag calls that consumed context without contributing to the workflow:
+- **Errors followed by retries** — the error was avoidable (e.g., reading a directory as a file)
+- **Redundant reads** — same information gathered multiple times (e.g., `git status` called twice)
+- **Unnecessary exploration** — reading code files when the user said the change was already done
+- **Sleep-based polling** — `sleep N && tail` instead of using `block_until_ms`
+- **Sequential calls that could be parallel** — independent operations run one at a time
+
+
+
+For each step in the command, check if the tool sequence includes the corresponding action:
+- Missing verification step
+- Missing CHANGELOG entry
+- Missing Asana linking
+- Skipped cross-file reads (e.g., never read `im.md` when step 3 requires it)
+
+
+
+
+Output a structured report:
+
+```
+## Chat Audit: /
+
+**Stats:** N tool calls (M errors, K cancelled) across L user messages
+
+### Rule Violations
+1. [rule-id] Description of what happened
+
+### Wasted Tool Calls
+1. [#N] tool_name — why it was wasteful
+
+### Skipped Steps
+1. [step N] What was skipped
+
+### Recommendations
+1. Specific change to the command file that would prevent this
+```
+
+If the user hasn't asked for command file changes, stop here. If they ask, apply the recommendations using the `/author` skill.
+
+
+
+Ask the user which command was being executed, or analyze without a reference command (just flag errors and wasted calls).
+The conversation may span multiple turns. The first user message typically invokes the command; subsequent ones are follow-ups. Analyze the full sequence but weight findings toward the initial command execution.
+If no `/command` was invoked, still analyze for general inefficiencies (redundant reads, errors, unnecessary exploration) but skip the rule/step compliance checks.
+
diff --git a/.cursor/skills/chat-audit/scripts/cursor-chat-extract.js b/.cursor/skills/chat-audit/scripts/cursor-chat-extract.js
new file mode 100755
index 0000000..6908d20
--- /dev/null
+++ b/.cursor/skills/chat-audit/scripts/cursor-chat-extract.js
@@ -0,0 +1,142 @@
+#!/usr/bin/env node
+// cursor-chat-extract.js — Extract structured conversation data from Cursor chat export JSON.
+// Usage: ./cursor-chat-extract.js [--tools-only]
+// Output: Compact JSON summary of messages and tool calls for agent analysis.
+
+const fs = require("fs");
+const path = require("path");
+
+const file = process.argv[2];
+const toolsOnly = process.argv.includes("--tools-only");
+
+if (!file) {
+ console.error("Usage: cursor-chat-extract.js [--tools-only]");
+ process.exit(1);
+}
+
+let data;
+try {
+ data = JSON.parse(fs.readFileSync(path.resolve(file), "utf8"));
+} catch (e) {
+ console.error(`Failed to parse ${file}: ${e.message}`);
+ process.exit(1);
+}
+
+const composerId = Object.keys(data.bubbles || {})[0];
+if (!composerId) {
+ console.error("No conversation found in export.");
+ process.exit(1);
+}
+
+const entries = data.bubbles[composerId] || [];
+
+function extractText(val) {
+ if (val.text && typeof val.text === "string") return val.text;
+ if (!val.richText) return "";
+ try {
+ const rt = JSON.parse(val.richText);
+ return walkLexical(rt.root);
+ } catch {
+ return "";
+ }
+}
+
+function walkLexical(node) {
+ let out = "";
+ if (node.text) out += node.text;
+ if (node.children) for (const c of node.children) out += walkLexical(c);
+ return out;
+}
+
+function parseToolData(raw) {
+ if (!raw) return null;
+ const d = typeof raw === "string" ? JSON.parse(raw) : raw;
+ if (!d.name) return null;
+
+ const result = { name: d.name, status: d.status || "unknown" };
+
+ try {
+ const params = JSON.parse(d.params || "{}");
+ if (params.command) {
+ result.arg = params.command.length > 150
+ ? params.command.substring(0, 150) + "..."
+ : params.command;
+ } else if (params.targetFile) {
+ result.arg = params.targetFile;
+ } else if (params.globPattern) {
+ result.arg = `glob: ${params.globPattern}`;
+ } else if (params.pattern) {
+ result.arg = `pattern: ${params.pattern}`;
+ } else if (params.query) {
+ result.arg = `query: ${params.query.substring(0, 100)}`;
+ }
+ } catch {
+ // Ignore parse failures
+ }
+
+ return result;
+}
+
+function truncate(text, max) {
+ if (!text || text.length <= max) return text;
+ return text.substring(0, max) + "...";
+}
+
+const messages = [];
+let totalTools = 0;
+let errors = 0;
+let cancellations = 0;
+
+for (const entry of entries) {
+ let val;
+ try {
+ val = JSON.parse(entry.value);
+ } catch {
+ continue;
+ }
+
+ const type = val.type === 1 ? "user" : "assistant";
+ const text = extractText(val);
+
+ const tool = parseToolData(val.toolFormerData);
+ if (tool) {
+ totalTools++;
+ if (tool.status === "error") errors++;
+ if (tool.status === "cancelled") cancellations++;
+ messages.push({ type: "tool", ...tool });
+ continue;
+ }
+
+ if (!text.trim()) continue;
+
+ if (type === "user") {
+ messages.push({ type: "user", text: text.trim() });
+ } else if (!toolsOnly) {
+ messages.push({
+ type: "assistant",
+ text: truncate(text.trim(), 200),
+ });
+ }
+}
+
+// Detect invoked command from first user message
+let invokedCommand = null;
+const firstUser = messages.find((m) => m.type === "user");
+if (firstUser) {
+ const match = firstUser.text.match(/^\/([\w-]+)/);
+ if (match) invokedCommand = match[1];
+}
+
+const output = {
+ invokedCommand,
+ stats: {
+ messages: messages.filter((m) => m.type === "user").length,
+ assistantTurns: messages.filter((m) => m.type === "assistant").length,
+ toolCalls: totalTools,
+ errors,
+ cancellations,
+ },
+ sequence: messages,
+};
+
+console.log(JSON.stringify(output, null, 2));
diff --git a/.cursor/skills/convention-sync/SKILL.md b/.cursor/skills/convention-sync/SKILL.md
new file mode 100644
index 0000000..4ec3b9b
--- /dev/null
+++ b/.cursor/skills/convention-sync/SKILL.md
@@ -0,0 +1,85 @@
+---
+name: convention-sync
+description: Sync cursor files between ~/.cursor/ and the edge-dev-agents repo, commit, push, and update PR description. Use when the user wants to sync conventions.
+compatibility: Requires git, gh.
+metadata:
+ author: j0ntz
+---
+
+Sync cursor files between `~/.cursor/` and the `edge-dev-agents` repo, commit, push, and update PR description from README. Also maintains cross-tool compatibility: symlinks `~/.claude/skills` → `~/.cursor/skills` and generates `~/.claude/CLAUDE.md` from always-apply rules.
+
+
+`~/.cursor/` is the canonical source. Edits happen locally; the repo is the distribution copy. Default direction is `user-to-repo`. Use `--repo-to-user` only for onboarding or pulling changes authored by others. The script does not detect bidirectional conflicts — whichever direction you run overwrites the other side.
+Use `~/.cursor/skills/convention-sync/scripts/convention-sync.sh` for diffing and syncing. Do NOT manually diff or copy files.
+Always run without `--stage` first to show the summary. Only stage/commit after user confirms.
+If the script fails, report the error and STOP.
+`.cursor/README.md` is the source of truth for documentation. The script mirrors it to the PR description automatically.
+Every run ensures `~/.claude/skills` symlinks to `~/.cursor/skills` and regenerates `~/.claude/CLAUDE.md` from `alwaysApply: true` rules. This enables OpenCode and Claude Code to discover skills and rules without separate config.
+For user-to-repo sync, target the `edge-dev-agents` checkout. Do NOT assume the current repo is correct just because it contains a `.cursor/` folder. Let the companion script resolve and validate the repo path.
+
+
+
+Use the companion script's default repo resolution first. It targets the `edge-dev-agents` checkout and fails if the resolved or provided repo is not actually `edge-dev-agents`.
+
+Run the sync script in dry-run mode:
+
+```bash
+~/.cursor/skills/convention-sync/scripts/convention-sync.sh
+```
+
+Parse the JSON output and extract `repoDir`. Then check for an open PR:
+
+```bash
+cd && gh pr view --json number,url --jq '{number: .number, url: .url}' 2>/dev/null || echo '{}'
+```
+
+Use the resolved repo path from the script for subsequent git and PR commands. If the script reports `total` as 0, report "Everything is in sync" and stop.
+
+
+
+Show the user a concise summary including PR update status:
+
+```
+Sync summary (user → repo):
+ New: file1, file2
+ Modified: file3, file4
+ Deleted: file5
+ Ignored: file6, file7 (via .syncignore)
+
+PR #N: Will update description from README.md (or "No open PR")
+
+Commit and push? [y/N]
+```
+
+If `ignored` array is empty, omit the Ignored line.
+
+If the user provided a commit message in their prompt, skip the confirmation and proceed.
+
+
+
+Run the script with `--commit`:
+
+```bash
+~/.cursor/skills/convention-sync/scripts/convention-sync.sh --commit -m ""
+```
+
+Then push:
+
+```bash
+cd && git push origin HEAD
+```
+
+If an open PR exists, update the PR description from README:
+
+```bash
+cd && gh pr edit --body-file .cursor/README.md
+```
+
+
+
+If the user says "pull from repo" or "update my local", run with `--repo-to-user --stage` instead. No git operations needed.
+Do not sync into that repo. Fall back to `~/git/edge-dev-agents` or ask for the correct repo path.
+Reuse the `repoDir` value from the script's JSON output for the PR query, commit run, push, and PR edit steps.
+To permanently exclude files, add glob patterns to `~/.cursor/.syncignore` (one per line, `#` comments). The script skips matching entries and reports them in the `ignored` array. To exclude ad-hoc, remove files from staging with `git reset HEAD .cursor/` before committing.
+If `.cursor/README.md` doesn't exist, skip PR description update and warn the user.
+
diff --git a/.cursor/skills/convention-sync/scripts/convention-sync.sh b/.cursor/skills/convention-sync/scripts/convention-sync.sh
new file mode 100755
index 0000000..2cd56c1
--- /dev/null
+++ b/.cursor/skills/convention-sync/scripts/convention-sync.sh
@@ -0,0 +1,259 @@
+#!/usr/bin/env bash
+# convention-sync.sh — Sync ~/.cursor/ files with the edge-dev-agents repo.
+# Usage: ./convention-sync.sh [repo-dir] [--stage] [--commit -m "message"] [--repo-to-user]
+# Compares ~/.cursor/{skills,rules,scripts} against /.cursor/ and
+# outputs a structured JSON summary of new, modified, and deleted files.
+# With --stage: copies changed files and stages them in git (or copies to user dir with --repo-to-user).
+# With --commit: stages + commits (requires -m). Only valid for user-to-repo direction.
+#
+# Sync model: ~/.cursor/ is canonical. Default direction (user-to-repo) copies local
+# files into the repo. --repo-to-user is for onboarding or pulling others' changes.
+# No bidirectional conflict detection — the chosen direction overwrites the other side.
+
+set -euo pipefail
+
+REPO_DIR=""
+DO_STAGE=false
+DO_COMMIT=false
+COMMIT_MSG=""
+DIRECTION="user-to-repo"
+
+resolve_default_repo_dir() {
+ local cwd remote_url default_repo
+
+ cwd="$(pwd)"
+ if [[ "$(basename "$cwd")" == "edge-dev-agents" ]]; then
+ printf '%s\n' "$cwd"
+ return 0
+ fi
+
+ if git -C "$cwd" rev-parse --is-inside-work-tree >/dev/null 2>&1; then
+ remote_url="$(git -C "$cwd" remote get-url origin 2>/dev/null || true)"
+ if [[ "$remote_url" == *"edge-dev-agents"* ]]; then
+ printf '%s\n' "$cwd"
+ return 0
+ fi
+ fi
+
+ default_repo="$HOME/git/edge-dev-agents"
+ if [[ -d "$default_repo/.git" || -f "$default_repo/.git" ]]; then
+ printf '%s\n' "$default_repo"
+ return 0
+ fi
+
+ return 1
+}
+
+validate_repo_dir() {
+ local repo_dir remote_url
+ repo_dir="$1"
+
+ if [[ ! -d "$repo_dir/.cursor" ]]; then
+ echo "ERROR: Repo directory must contain .cursor/: $repo_dir" >&2
+ return 1
+ fi
+
+ if [[ "$(basename "$repo_dir")" == "edge-dev-agents" ]]; then
+ return 0
+ fi
+
+ if git -C "$repo_dir" rev-parse --is-inside-work-tree >/dev/null 2>&1; then
+ remote_url="$(git -C "$repo_dir" remote get-url origin 2>/dev/null || true)"
+ if [[ "$remote_url" == *"edge-dev-agents"* ]]; then
+ return 0
+ fi
+ fi
+
+ echo "ERROR: Repo directory does not appear to be the edge-dev-agents checkout: $repo_dir" >&2
+ return 1
+}
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --stage) DO_STAGE=true; shift ;;
+ --commit) DO_COMMIT=true; DO_STAGE=true; shift ;;
+ -m) COMMIT_MSG="$2"; shift 2 ;;
+ --repo-to-user) DIRECTION="repo-to-user"; shift ;;
+ *) REPO_DIR="$1"; shift ;;
+ esac
+done
+
+if [[ -z "$REPO_DIR" ]]; then
+ if ! REPO_DIR="$(resolve_default_repo_dir)"; then
+ echo "ERROR: Could not resolve the edge-dev-agents repo. Run with an explicit repo path." >&2
+ echo "Usage: convention-sync.sh [repo-dir] [--stage] [--commit -m \"message\"]" >&2
+ exit 1
+ fi
+fi
+
+if ! validate_repo_dir "$REPO_DIR"; then
+ exit 1
+fi
+
+if [[ "$DO_COMMIT" == true && -z "$COMMIT_MSG" ]]; then
+ echo "ERROR: --commit requires -m \"message\"" >&2
+ exit 1
+fi
+
+USER_DIR="$HOME/.cursor"
+REPO_CURSOR="$REPO_DIR/.cursor"
+DIRS="skills rules scripts"
+SYNCIGNORE="$USER_DIR/.syncignore"
+
+# Load ignore patterns from .syncignore (one glob per line, # comments, blank lines skipped)
+ignore_patterns=()
+if [[ -f "$SYNCIGNORE" ]]; then
+ while IFS= read -r line; do
+ line="${line%%#*}" # strip comments
+ line="${line%"${line##*[![:space:]]}"}" # strip trailing whitespace
+ [[ -z "$line" ]] && continue
+ ignore_patterns+=("$line")
+ done < "$SYNCIGNORE"
+fi
+
+is_ignored() {
+ local entry="$1"
+ for pattern in "${ignore_patterns[@]+"${ignore_patterns[@]}"}"; do
+ # shellcheck disable=SC2254
+ if [[ "$entry" == $pattern ]]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+new_json="[]"
+mod_json="[]"
+del_json="[]"
+ignored_json="[]"
+
+# Check README.md separately (single file, not a directory)
+if [[ -f "$USER_DIR/README.md" ]] && ! is_ignored "README.md"; then
+ if [[ ! -f "$REPO_CURSOR/README.md" ]]; then
+ new_json=$(echo "$new_json" | jq '. + ["README.md"]')
+ elif ! diff -q "$USER_DIR/README.md" "$REPO_CURSOR/README.md" >/dev/null 2>&1; then
+ mod_json=$(echo "$mod_json" | jq '. + ["README.md"]')
+ fi
+elif [[ -f "$REPO_CURSOR/README.md" ]] && ! is_ignored "README.md"; then
+ del_json=$(echo "$del_json" | jq '. + ["README.md"]')
+fi
+
+for dir in $DIRS; do
+ user_path="$USER_DIR/$dir"
+ repo_path="$REPO_CURSOR/$dir"
+
+ [[ -d "$user_path" ]] || continue
+
+ while IFS= read -r rel; do
+ [[ -z "$rel" ]] && continue
+ entry="$dir/$rel"
+ if is_ignored "$entry"; then
+ ignored_json=$(echo "$ignored_json" | jq --arg f "$entry" '. + [$f]')
+ continue
+ fi
+ repo_file="$repo_path/$rel"
+ if [[ ! -f "$repo_file" ]]; then
+ new_json=$(echo "$new_json" | jq --arg f "$entry" '. + [$f]')
+ elif ! diff -q "$user_path/$rel" "$repo_file" >/dev/null 2>&1; then
+ mod_json=$(echo "$mod_json" | jq --arg f "$entry" '. + [$f]')
+ fi
+ done < <(cd "$user_path" && find . -type f ! -name '.DS_Store' | sed 's|^\./||')
+
+ if [[ -d "$repo_path" ]]; then
+ while IFS= read -r rel; do
+ [[ -z "$rel" ]] && continue
+ entry="$dir/$rel"
+ is_ignored "$entry" && continue
+ user_file="$user_path/$rel"
+ if [[ ! -f "$user_file" ]]; then
+ del_json=$(echo "$del_json" | jq --arg f "$entry" '. + [$f]')
+ fi
+ done < <(cd "$repo_path" && find . -type f ! -name '.DS_Store' | sed 's|^\./||')
+ fi
+done
+
+total=$(echo "$new_json $mod_json $del_json" | jq -s '.[0] + .[1] + .[2] | length')
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+# Ensure ~/.claude/skills symlink points to ~/.cursor/skills
+CLAUDE_SKILLS="$HOME/.claude/skills"
+if [[ -L "$CLAUDE_SKILLS" ]]; then
+ link_target="$(readlink "$CLAUDE_SKILLS")"
+ if [[ "$link_target" != "$USER_DIR/skills" ]]; then
+ rm "$CLAUDE_SKILLS"
+ ln -s "$USER_DIR/skills" "$CLAUDE_SKILLS"
+ fi
+elif [[ ! -e "$CLAUDE_SKILLS" ]]; then
+ mkdir -p "$(dirname "$CLAUDE_SKILLS")"
+ ln -s "$USER_DIR/skills" "$CLAUDE_SKILLS"
+fi
+
+# Regenerate ~/.claude/CLAUDE.md from alwaysApply rules
+if [[ -x "$SCRIPT_DIR/generate-claude-md.sh" ]]; then
+ "$SCRIPT_DIR/generate-claude-md.sh" >/dev/null
+fi
+
+if [[ "$DO_STAGE" == true && "$total" -gt 0 ]]; then
+ all_copy=$(echo "$new_json $mod_json" | jq -sr '.[0] + .[1] | .[]')
+ all_del=$(echo "$del_json" | jq -r '.[]')
+
+ if [[ "$DIRECTION" == "user-to-repo" ]]; then
+ while IFS= read -r f; do
+ [[ -z "$f" ]] && continue
+ # README.md is at .cursor/ root, others are in subdirs
+ if [[ "$f" == "README.md" ]]; then
+ cp "$USER_DIR/$f" "$REPO_CURSOR/$f"
+ else
+ mkdir -p "$(dirname "$REPO_CURSOR/$f")"
+ cp "$USER_DIR/$f" "$REPO_CURSOR/$f"
+ fi
+ done <<< "$all_copy"
+
+ while IFS= read -r f; do
+ [[ -z "$f" ]] && continue
+ rm -f "$REPO_CURSOR/$f"
+ done <<< "$all_del"
+
+ cd "$REPO_DIR"
+ while IFS= read -r f; do
+ [[ -z "$f" ]] && continue
+ git add ".cursor/$f"
+ done <<< "$all_copy"
+
+ while IFS= read -r f; do
+ [[ -z "$f" ]] && continue
+ git rm -f --quiet ".cursor/$f" 2>/dev/null || true
+ done <<< "$all_del"
+
+ if [[ "$DO_COMMIT" == true ]]; then
+ git commit -m "$COMMIT_MSG"
+ fi
+ else
+ while IFS= read -r f; do
+ [[ -z "$f" ]] && continue
+ if [[ "$f" == "README.md" ]]; then
+ cp "$REPO_CURSOR/$f" "$USER_DIR/$f"
+ else
+ mkdir -p "$(dirname "$USER_DIR/$f")"
+ cp "$REPO_CURSOR/$f" "$USER_DIR/$f"
+ fi
+ done <<< "$all_copy"
+
+ while IFS= read -r f; do
+ [[ -z "$f" ]] && continue
+ rm -f "$USER_DIR/$f"
+ done <<< "$all_del"
+ fi
+fi
+
+jq -n \
+ --arg repoDir "$REPO_DIR" \
+ --argjson new "$new_json" \
+ --argjson modified "$mod_json" \
+ --argjson deleted "$del_json" \
+ --argjson ignored "$ignored_json" \
+ --argjson total "$total" \
+ --arg staged "$DO_STAGE" \
+ --arg committed "$DO_COMMIT" \
+ '{repoDir: $repoDir, total: $total, new: $new, modified: $modified, deleted: $deleted, ignored: $ignored, staged: ($staged == "true"), committed: ($committed == "true")}'
diff --git a/.cursor/skills/convention-sync/scripts/generate-claude-md.sh b/.cursor/skills/convention-sync/scripts/generate-claude-md.sh
new file mode 100755
index 0000000..3f793b8
--- /dev/null
+++ b/.cursor/skills/convention-sync/scripts/generate-claude-md.sh
@@ -0,0 +1,85 @@
+#!/usr/bin/env bash
+# generate-claude-md.sh — Generate ~/.claude/CLAUDE.md from alwaysApply .mdc rules.
+# Usage: ./generate-claude-md.sh [--dry-run]
+#
+# Reads all .mdc files in ~/.cursor/rules/ that have alwaysApply: true,
+# strips YAML frontmatter, and concatenates them into ~/.claude/CLAUDE.md.
+
+set -euo pipefail
+
+RULES_DIR="$HOME/.cursor/rules"
+OUTPUT="$HOME/.claude/CLAUDE.md"
+DRY_RUN=false
+
+[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
+
+if [[ ! -d "$RULES_DIR" ]]; then
+ echo "ERROR: $RULES_DIR does not exist" >&2
+ exit 1
+fi
+
+mkdir -p "$(dirname "$OUTPUT")"
+
+collected=()
+skipped=()
+
+for mdc in "$RULES_DIR"/*.mdc; do
+ [[ -f "$mdc" ]] || continue
+ basename="$(basename "$mdc")"
+
+ if head -20 "$mdc" | grep -q '^alwaysApply: true'; then
+ collected+=("$basename")
+ else
+ skipped+=("$basename")
+ fi
+done
+
+if [[ ${#collected[@]} -eq 0 ]]; then
+ echo '{"collected":[],"skipped":[],"output":"","dry_run":true}'
+ exit 0
+fi
+
+content="# Global Rules\n\n"
+content+="# Auto-generated from ~/.cursor/rules/ (alwaysApply: true files only).\n"
+content+="# Do not edit manually. Re-generate via convention-sync.\n\n"
+
+for basename in "${collected[@]}"; do
+ mdc="$RULES_DIR/$basename"
+ name="${basename%.mdc}"
+
+ # Strip YAML frontmatter (everything between first --- and second ---)
+ body=$(awk '
+ BEGIN { in_front=0; past_front=0 }
+ /^---$/ {
+ if (!past_front) {
+ if (in_front) { past_front=1; next }
+ else { in_front=1; next }
+ }
+ }
+ past_front { print }
+ ' "$mdc")
+
+ # Trim leading blank lines
+ body=$(echo "$body" | sed '/./,$!d')
+
+ content+="---\n\n"
+ content+="## $name\n\n"
+ content+="$body\n\n"
+done
+
+if [[ "$DRY_RUN" == true ]]; then
+ echo -e "$content" > /dev/null
+else
+ echo -e "$content" > "$OUTPUT"
+fi
+
+# Output JSON summary
+collected_json=$(printf '%s\n' "${collected[@]}" | jq -R . | jq -s .)
+skipped_json=$(printf '%s\n' "${skipped[@]}" | jq -R . | jq -s .)
+
+jq -n \
+ --argjson collected "$collected_json" \
+ --argjson skipped "$skipped_json" \
+ --arg output "$OUTPUT" \
+ --arg dry_run "$DRY_RUN" \
+ '{collected: $collected, skipped: $skipped, output: $output, dry_run: ($dry_run == "true")}'
diff --git a/.cursor/skills/dep-pr/SKILL.md b/.cursor/skills/dep-pr/SKILL.md
new file mode 100644
index 0000000..10f02fb
--- /dev/null
+++ b/.cursor/skills/dep-pr/SKILL.md
@@ -0,0 +1,103 @@
+---
+name: dep-pr
+description: Create a dependent Asana task in another repo and run the full PR workflow for it. Use when the user needs cross-repo dependent task creation.
+compatibility: Requires git, gh, node, jq. ASANA_TOKEN for Asana integration.
+metadata:
+ author: j0ntz
+---
+
+Create a dependent Asana task in another repo and run the full PR workflow for it — automating cross-repo task creation, dependency linking, implementation, and PR creation.
+
+
+A parent Asana task URL is always required. It provides context, project placement, and dependency linking.
+Always check if a dependent task already exists before creating one. The script handles this — respect the `CREATED: false` output.
+Asana scripts can take up to 90s. Always set `block_until_ms: 120000`.
+Do NOT begin implementation until the dependent task is created and linked.
+The dependent task MUST be created in the same project(s) as the parent task, including release-version project tags (for example `4.46.0`). The script handles this automatically by copying all parent project memberships.
+The dependent task is automatically assigned to the current user (resolved via `asana-whoami.sh`). Do NOT hardcode a user GID — omit `--assignee` to let the script auto-resolve.
+
+
+
+The Edge repos have a layered dependency structure:
+
+```
+core (lowest — types, APIs, runtime)
+ ↑
+accb / exch (middle — currency and exchange plugins, depend on core)
+ ↑
+gui (highest — UI, depends on all others)
+```
+
+**Dependency direction rule**: When creating a dependent task for a repo at a **lower or equal** level, the new task **blocks** the parent task. This is the standard case — e.g., an `accb:` task blocks the `gui:` parent because the plugin change must land first.
+
+If the target repo is at a **higher** level than the parent (e.g., creating a `gui:` task from an `accb:` parent), this is unusual. Ask the user to confirm before proceeding — the dependency direction may need to be reversed (parent blocks the new task instead).
+
+| Level | Repos |
+|-------|-------|
+| 3 (highest) | `gui` |
+| 2 | `accb`, `exch` |
+| 1 (lowest) | `core` |
+
+
+
+
+
+| Prefix | Repository | Directory | Branch from |
+|--------|-----------|-----------|-------------|
+| `gui` | `edge-react-gui` | `~/git/edge-react-gui` | `develop` |
+| `exch` | `edge-exchange-plugins` | `~/git/edge-exchange-plugins` | `master` |
+| `accb` | `edge-currency-accountbased` | `~/git/edge-currency-accountbased` | `master` |
+| `core` | `edge-core-js` | `~/git/edge-core-js` | `master` |
+
+
+
+
+The user provides a parent Asana task URL and a target repo (as a prefix or full name).
+
+1. **Extract the parent task GID** from the URL.
+2. **Fetch parent task context** using `asana-get-context.sh` to understand what work is needed.
+3. **Determine the target repo** from the user's input. If not specified, ask.
+4. **Validate dependency direction** using the hierarchy table. If the target is at a higher level than the parent, warn and ask for confirmation.
+
+
+
+Derive the dependent task name from the parent: `: `.
+
+If the parent task name already has a prefix (e.g. `gui: Some feature`), strip it and replace with the target prefix. If no prefix, prepend the target prefix.
+
+```bash
+~/.cursor/skills/dep-pr/scripts/asana-create-dep-task.sh \
+ --parent \
+ --name ": " \
+ --notes ""
+```
+
+The script:
+- Checks if a matching dependency already exists (by name) — if so, outputs `CREATED: false` and the existing GID
+- Creates the task in all parent project memberships (including release-version tags)
+- Copies priority, status, and `Planned` from the parent
+- Assigns to the current user (auto-resolved via `asana-whoami.sh`)
+- Sets the new task as a blocking dependency of the parent
+
+If `CREATED: false`, report the existing task to the user and continue with the existing GID.
+
+
+
+Delegate to the `pr-create.md` workflow using the **new** (or existing) task URL:
+
+1. `cd` to the target repo directory (see repo-map).
+2. **Read `~/.cursor/skills/pr-create/SKILL.md` now** (use the Read tool — do NOT skip this). Then follow its steps 1-6 (push, verify, build PR description, create PR, optional Asana updates, report).
+
+The Asana task context from step 1 provides the implementation requirements. The agent already has full context from the parent task.
+
+
+
+Display both the new Asana task and the PR as clickable links. Note the dependency relationship.
+
+
+
+The script detects this. Report: "Found existing dependent task: [link]. Continuing with PR workflow." Then proceed to step 3.
+The script falls back to the first available project. Warn the user if the placement looks wrong.
+Step 3 delegates to `pr-create.md` which handles branch state assessment.
+Ask: "Creating a [gui] task from a [core] parent is unusual — the dependency direction would be reversed. Confirm? (yes/no)"
+
diff --git a/.cursor/skills/dep-pr/scripts/asana-create-dep-task.sh b/.cursor/skills/dep-pr/scripts/asana-create-dep-task.sh
new file mode 100755
index 0000000..968627c
--- /dev/null
+++ b/.cursor/skills/dep-pr/scripts/asana-create-dep-task.sh
@@ -0,0 +1,245 @@
+#!/usr/bin/env bash
+# asana-create-dep-task.sh
+# Create a dependent Asana task that blocks a parent task.
+# Checks for existing dependencies first to avoid duplicates.
+#
+# Usage:
+# asana-create-dep-task.sh --parent --name "task name" [--notes "description"] [--assignee ]
+#
+# If --assignee is omitted, the task is assigned to the current user
+# (resolved via asana-whoami.sh).
+#
+# Requires env var: ASANA_TOKEN
+#
+# Output:
+# TASK_GID:
+# TASK_URL:
+# CREATED: true|false (false if task already existed)
+# ASSIGNED_TO:
+# FIELDS_SET: priority=, status=, planned=, reviewer=, implementor=
+# DEPENDENCY_SET: blocks
+#
+# Exit codes: 0 = success, 1 = error
+set -euo pipefail
+
+PARENT_GID=""
+TASK_NAME=""
+TASK_NOTES=""
+ASSIGNEE_GID=""
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --parent) PARENT_GID="$2"; shift 2 ;;
+ --name) TASK_NAME="$2"; shift 2 ;;
+ --notes) TASK_NOTES="$2"; shift 2 ;;
+ --assignee) ASSIGNEE_GID="$2"; shift 2 ;;
+ *) echo "Unknown flag: $1" >&2; exit 1 ;;
+ esac
+done
+
+if [[ -z "$PARENT_GID" || -z "$TASK_NAME" ]]; then
+ echo "Usage: asana-create-dep-task.sh --parent --name [--notes ] [--assignee ]" >&2
+ exit 1
+fi
+
+if [[ -z "${ASANA_TOKEN:-}" ]]; then
+ echo "Error: ASANA_TOKEN not set" >&2
+ exit 1
+fi
+
+API="https://app.asana.com/api/1.0"
+AUTH="Authorization: Bearer $ASANA_TOKEN"
+
+SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
+
+# Auto-resolve current user GID (used for assignee and implementor)
+CURRENT_USER_GID=$("$SCRIPT_DIR/../../asana-whoami.sh" 2>/dev/null || true)
+
+# Auto-resolve assignee to current user if not provided
+if [[ -z "$ASSIGNEE_GID" ]]; then
+ ASSIGNEE_GID="$CURRENT_USER_GID"
+fi
+
+# Phase 1: Check if a dependency with a matching name already exists
+existing=$(curl -s "$API/tasks/$PARENT_GID/dependencies?opt_fields=name&limit=100" \
+ -H "$AUTH" | python3 -c "
+import sys, json
+data = json.load(sys.stdin).get('data', [])
+target = '''$TASK_NAME'''
+for dep in data:
+ if dep.get('name', '').strip().lower() == target.strip().lower():
+ print(dep['gid'])
+ sys.exit(0)
+print('')
+")
+
+if [[ -n "$existing" ]]; then
+ echo "TASK_GID: $existing"
+ echo "TASK_URL: https://app.asana.com/0/0/$existing"
+ echo "CREATED: false"
+ exit 0
+fi
+
+# Phase 2: Get parent task's project and custom fields to copy
+parent_info=$(curl -s "$API/tasks/$PARENT_GID?opt_fields=workspace.gid,memberships.project.gid,memberships.project.name,custom_fields.gid,custom_fields.enum_value.gid,custom_fields.enum_value.name,custom_fields.people_value.gid,custom_fields.people_value.name" \
+ -H "$AUTH")
+
+read -r WORKSPACE_GID PROJECT_GIDS PRIORITY_INFO STATUS_INFO PLANNED_INFO REVIEWER_INFO < <(echo "$parent_info" | python3 -c "
+import sys, json, re
+data = json.load(sys.stdin)['data']
+ws = data.get('workspace', {}).get('gid', '')
+
+# Collect all parent projects (including release-version projects like 4.46.0)
+projects = []
+for m in data.get('memberships', []):
+ p = m.get('project', {})
+ gid = p.get('gid', '')
+ if gid:
+ projects.append(gid)
+if not projects and data.get('memberships'):
+ projects.append(data['memberships'][0]['project']['gid'])
+proj_str = ','.join(projects)
+
+# Field GIDs (stable known fields)
+ENUM_FIELDS = {
+ '795866930204488': 'priority',
+ '1190660107346181': 'status',
+}
+PEOPLE_FIELDS = {
+ '1203334388004673': 'reviewer',
+}
+
+enum_results = {}
+people_results = {}
+
+for f in data.get('custom_fields', []):
+ fgid = f['gid']
+ if fgid in ENUM_FIELDS and f.get('enum_value'):
+ label = ENUM_FIELDS[fgid]
+ enum_results[label] = (fgid, f['enum_value']['gid'], f['enum_value'].get('name', ''))
+ # "Planned" is workspace-specific, so detect by field name:
+ if f.get('name') == 'Planned' and f.get('enum_value'):
+ enum_results['planned'] = (
+ fgid,
+ f['enum_value']['gid'],
+ f['enum_value'].get('name', '')
+ )
+ if fgid in PEOPLE_FIELDS:
+ label = PEOPLE_FIELDS[fgid]
+ pv = f.get('people_value', [])
+ if pv:
+ people_results[label] = (fgid, pv[0]['gid'], pv[0].get('name', ''))
+
+def fmt_enum(key):
+ if key in enum_results:
+ return ':'.join(enum_results[key])
+ return '::'
+
+def fmt_people(key):
+ if key in people_results:
+ return ':'.join(people_results[key])
+ return '::'
+
+print(f\"{ws} {proj_str} {fmt_enum('priority')} {fmt_enum('status')} {fmt_enum('planned')} {fmt_people('reviewer')}\")
+")
+
+PRIORITY_FIELD=$(echo "$PRIORITY_INFO" | cut -d: -f1)
+PRIORITY_ENUM=$(echo "$PRIORITY_INFO" | cut -d: -f2)
+PRIORITY_NAME=$(echo "$PRIORITY_INFO" | cut -d: -f3)
+STATUS_FIELD=$(echo "$STATUS_INFO" | cut -d: -f1)
+STATUS_ENUM=$(echo "$STATUS_INFO" | cut -d: -f2)
+STATUS_NAME=$(echo "$STATUS_INFO" | cut -d: -f3)
+PLANNED_FIELD=$(echo "$PLANNED_INFO" | cut -d: -f1)
+PLANNED_ENUM=$(echo "$PLANNED_INFO" | cut -d: -f2)
+PLANNED_NAME=$(echo "$PLANNED_INFO" | cut -d: -f3)
+REVIEWER_FIELD=$(echo "$REVIEWER_INFO" | cut -d: -f1)
+REVIEWER_GID=$(echo "$REVIEWER_INFO" | cut -d: -f2)
+REVIEWER_NAME=$(echo "$REVIEWER_INFO" | cut -d: -f3)
+
+# Auto-resolve implementor to current user
+IMPLEMENTOR_FIELD="1203334386796983"
+IMPLEMENTOR_GID="$CURRENT_USER_GID"
+IMPLEMENTOR_NAME="current user"
+
+# Phase 3: Create the task
+NOTES_JSON=$(python3 -c "import json; print(json.dumps('''$TASK_NOTES'''))")
+
+# Build projects list from comma-separated GIDs
+IFS=',' read -ra PROJECT_ARR <<< "$PROJECT_GIDS"
+
+new_task=$(curl -s "$API/tasks" \
+ -H "$AUTH" \
+ -H "Content-Type: application/json" \
+ -d "$(python3 -c "
+import json
+projects = '''$PROJECT_GIDS'''.split(',')
+assignee = '''$ASSIGNEE_GID''' or None
+data = {
+ 'data': {
+ 'name': '''$TASK_NAME''',
+ 'notes': $NOTES_JSON,
+ 'projects': [p for p in projects if p],
+ 'workspace': '$WORKSPACE_GID'
+ }
+}
+if assignee:
+ data['data']['assignee'] = assignee
+print(json.dumps(data))
+")")
+
+NEW_GID=$(echo "$new_task" | python3 -c "
+import sys, json
+data = json.load(sys.stdin)
+if 'errors' in data:
+ print('ERROR: ' + json.dumps(data['errors']), file=sys.stderr)
+ sys.exit(1)
+print(data['data']['gid'])
+")
+
+if [[ -z "$NEW_GID" || "$NEW_GID" == "ERROR"* ]]; then
+ echo "Error creating task" >&2
+ exit 1
+fi
+
+# Phase 3b: Set copied fields via shared updater script
+UPDATE_CMD=("$SCRIPT_DIR/../../asana-task-update/scripts/asana-task-update.sh" "--task" "$NEW_GID")
+if [[ -n "$PRIORITY_ENUM" ]]; then
+ UPDATE_CMD+=("--set-priority" "$PRIORITY_ENUM")
+fi
+if [[ -n "$STATUS_ENUM" ]]; then
+ UPDATE_CMD+=("--set-status" "$STATUS_ENUM")
+fi
+if [[ -n "$PLANNED_ENUM" ]]; then
+ UPDATE_CMD+=("--set-planned" "$PLANNED_ENUM")
+fi
+if [[ -n "$REVIEWER_GID" ]]; then
+ UPDATE_CMD+=("--set-reviewer" "$REVIEWER_GID")
+fi
+if [[ -n "$IMPLEMENTOR_GID" ]]; then
+ UPDATE_CMD+=("--set-implementor" "$IMPLEMENTOR_GID")
+fi
+if [[ ${#UPDATE_CMD[@]} -gt 3 ]]; then
+ "${UPDATE_CMD[@]}" > /dev/null
+fi
+
+FIRST_PROJECT=$(echo "$PROJECT_GIDS" | cut -d, -f1)
+echo "TASK_GID: $NEW_GID"
+echo "TASK_URL: https://app.asana.com/0/$FIRST_PROJECT/$NEW_GID"
+echo "CREATED: true"
+[[ -n "$ASSIGNEE_GID" ]] && echo "ASSIGNED_TO: $ASSIGNEE_GID"
+
+# Phase 4: Set as blocking dependency
+curl -s -X POST "$API/tasks/$PARENT_GID/addDependencies" \
+ -H "$AUTH" \
+ -H "Content-Type: application/json" \
+ -d "{\"data\": {\"dependencies\": [\"$NEW_GID\"]}}" > /dev/null
+
+echo "DEPENDENCY_SET: $NEW_GID blocks $PARENT_GID"
+
+fields_msg=""
+[[ -n "$PRIORITY_NAME" ]] && fields_msg="priority=$PRIORITY_NAME"
+[[ -n "$STATUS_NAME" ]] && fields_msg="${fields_msg:+$fields_msg, }status=$STATUS_NAME"
+[[ -n "$PLANNED_NAME" ]] && fields_msg="${fields_msg:+$fields_msg, }planned=$PLANNED_NAME"
+[[ -n "$REVIEWER_NAME" ]] && fields_msg="${fields_msg:+$fields_msg, }reviewer=$REVIEWER_NAME"
+[[ -n "$IMPLEMENTOR_GID" ]] && fields_msg="${fields_msg:+$fields_msg, }implementor=$IMPLEMENTOR_NAME"
+[[ -n "$fields_msg" ]] && echo "FIELDS_SET: $fields_msg"
diff --git a/.cursor/skills/fix-eslint/SKILL.md b/.cursor/skills/fix-eslint/SKILL.md
new file mode 100644
index 0000000..8904d9d
--- /dev/null
+++ b/.cursor/skills/fix-eslint/SKILL.md
@@ -0,0 +1,108 @@
+---
+name: fix-eslint
+description: Fix ESLint warnings by applying documented patterns. Use when addressing @typescript-eslint/no-deprecated warnings for NavigationBase, RouteProp, or other deprecated types in edge-react-gui.
+---
+
+Resolve ESLint `@typescript-eslint/no-deprecated` warnings by replacing deprecated type references with their non-deprecated equivalents.
+
+
+Run `npx tsc --noEmit` after every type change to verify no new type errors are introduced.
+Do not suppress deprecation warnings with `eslint-disable` comments. Fix the underlying type reference.
+Exception: `NavigationBase` deprecation in shared cross-navigator code (Categories C, D, F below) is accepted — not suppressed, genuinely not fixable without a broader v7 navigation migration. When the fix scope is too broad, add a TODO comment documenting the required migration pattern and accept the warning.
+Only modify files with deprecation warnings. Do not refactor downstream declarations unless required for the fix to compile.
+
+
+
+
+
+`NavigationBase` is a flat navigation type hack in `routerTypes.tsx` that unions all navigator param lists (`RootParamList & DrawerParamList & EdgeAppStackParamList & ...`) to pretend the app is flat. It is deprecated because it tracks **react-navigation v7 breaking changes**:
+
+1. `navigate()` no longer crosses nested navigator boundaries at runtime.
+2. `navigate()` no longer goes back to an existing screen to update params — use `popTo()` or `navigate(screen, params, { pop: true })` instead.
+
+v7 provides `navigateDeprecated()` and `navigationInChildEnabled` as temporary bridges, both removed in v8. **Do NOT create non-deprecated aliases** (like `AppNavigation`) — this hides a real migration requirement.
+
+Fix `NavigationBase` deprecation by identifying which category the usage falls into:
+
+**Category A — Pass-through props** (component accepts `NavigationBase` only to forward it to children or actions):
+- Fix: Remove the `navigation` prop. Callers already have navigation in scope. If the child needs navigation, it should use `useNavigation()` or accept specific callbacks.
+```typescript
+// Before — CancellableProcessingScene accepts navigation to forward to onError
+interface Props { navigation: NavigationBase; onError: (nav: NavigationBase, err: unknown) => void }
+
+// After — remove navigation prop, callers handle navigation in callbacks
+interface Props { onError: (err: unknown) => Promise }
+```
+
+**Category B — Direct navigation in non-scene components** (component accepts `NavigationBase`, calls `navigate()`/`push()` directly):
+- Fix: Replace `navigation: NavigationBase` prop with `useNavigation()` hook typed to the navigator context the component lives in. Or replace with specific navigation callbacks from the parent scene.
+```typescript
+// Before — BalanceCard accepts NavigationBase, calls navigate directly
+interface Props { navigation: NavigationBase }
+const BalanceCard: React.FC = props => {
+ props.navigation.push('send2', { walletId, tokenId })
+}
+
+// After (option 1) — useNavigation hook
+const BalanceCard: React.FC = props => {
+ const navigation = useNavigation['navigation']>()
+ navigation.push('send2', { walletId, tokenId })
+}
+
+// After (option 2) — navigation callbacks
+interface Props { onSend: (walletId: string, tokenId: EdgeTokenId) => void }
+```
+- If the fix would cascade to many callers or require determining the correct navigator context across multiple usages, add a `// TODO: Replace NavigationBase with useNavigation() or callbacks. Requires v7 navigation migration.` comment and move on.
+
+**Category C — Shared action/thunk functions** (functions in `src/actions/` accept `NavigationBase`):
+- Fix: Invert control. Replace the `navigation: NavigationBase` parameter with a callback for the navigation action the function needs.
+```typescript
+// Before — function navigates internally
+function activateWalletTokens(navigation: NavigationBase, wallet, tokenIds): ThunkAction> {
+ // ... calls navigation.navigate('editToken', ...) internally
+}
+
+// After — caller provides the navigate action
+function activateWalletTokens(wallet, tokenIds, onNavigate: (route: string, params: object) => void): ThunkAction> {
+ // ... calls onNavigate('editToken', ...) instead
+}
+```
+- Simpler alternative for single-navigate functions: Return the target route + params instead of navigating; let the caller dispatch.
+- If the function has many navigate calls to different screens or the refactoring would touch many callers, add a `// TODO: Remove NavigationBase dependency. Requires inversion of navigation control for v7 migration.` comment and move on.
+
+**Category D — Shared modal components** (modals accept `NavigationBase`, navigate after user interaction):
+- Fix: Modal returns a result via Airship bridge resolve; caller handles navigation based on the result. Or modal accepts navigation callbacks.
+- If the modal's navigation logic is complex (multiple paths), add a comment and move on.
+
+**Category E — Scene component casts** (`navigation as NavigationBase`):
+- These casts exist because the scene passes navigation to a Category A-D consumer.
+- Fix: No direct fix needed — casts disappear automatically when the consumer is migrated.
+- If the scene has its own `NavigationBase` usage unrelated to shared code, apply Category B fix.
+
+**Category F — Service components** (non-scene services: `DeepLinkingManager`, `AccountCallbackManager`, etc.):
+- These are the broadest migration cases. Always add: `// TODO: Remove NavigationBase dependency. Requires broader v7 navigation migration for service-level navigation.`
+- Do not attempt to fix these incrementally — they are cross-cutting and require dedicated migration work.
+
+
+
+Replace deprecated `RouteProp<'routeName'>` with the scene-specific route type.
+
+```typescript
+// Before
+import type { RouteProp } from '../../types/routerTypes'
+const route = useRoute>()
+
+// After
+import type { WalletsTabSceneProps } from '../../types/routerTypes'
+const route = useRoute['route']>()
+```
+
+Choose the scene props type that matches the navigator the component lives in:
+- `WalletsTabSceneProps` for walletList, walletDetails, transactionList, transactionDetails
+- `EdgeAppSceneProps` for routes in EdgeAppStackParamList
+- `SwapTabSceneProps` for swap routes
+- `BuySellTabSceneProps` for buy/sell routes
+- `RootSceneProps` for login, home, etc.
+
+
+
diff --git a/.cursor/skills/im/SKILL.md b/.cursor/skills/im/SKILL.md
new file mode 100644
index 0000000..7094d19
--- /dev/null
+++ b/.cursor/skills/im/SKILL.md
@@ -0,0 +1,164 @@
+---
+name: im
+description: Implement an Asana task or ad-hoc feature/fix with clean, structured commits. Use when the user wants to implement a task, build a feature, or fix a bug in an Edge repository.
+compatibility: Requires git, gh, node, jq. ASANA_TOKEN for Asana integration.
+metadata:
+ author: j0ntz
+---
+
+Implement an Asana task or ad-hoc feature/fix with clean, well-structured commits.
+
+
+Before writing ANY code, read `.cursor/rules/typescript-standards.mdc` and follow all rules and standards in it throughout the implementation.
+Do NOT begin implementation until the user confirms the `/asana-plan` output (Step 0).
+Before the first edit to ANY file, run `~/.cursor/skills/im/scripts/lint-warnings.sh ` to auto-fix auto-fixable lint issues, then load any remaining lint findings and matching fix patterns into context. If the script changes files or leaves findings, handle those in a separate lint-fix commit IMMEDIATELY BEFORE the commit with actual changes. This applies to every file you touch, including ones discovered mid-implementation — not just the files you planned upfront.
+Do not manually fix formatting. `lint-commit.sh` runs `eslint --fix` (which includes Prettier) before committing. If you see a formatting lint after editing, do NOT make another edit to fix it.
+Always commit using `~/.cursor/skills/lint-commit.sh -m "message" [files...]` or `--fixup ` for fixup commits.
+When committing with scoped file arguments, treat `src/locales/strings`, `eslint.config.mjs`, and snapshot files as expected auto-generated companion files in the same commit. If `lint-commit.sh` reports additional non-generated files outside the intended scope, evaluate whether the commit plan is wrong before continuing.
+The final commit history must read as a clean, straight-line progression — as if every decision was made correctly up front. Never preserve the "squiggly path" of development (adding then removing code, temporary scaffolding, exploratory commits). If you introduce something in commit A and remove it in commit B, restructure so the final history never contains it. Plan commits proactively to avoid this; when it happens anyway, restructure the branch before finishing.
+If a companion script fails, report the error and STOP. Do NOT fall back to raw `gh`, `curl`, or other workarounds.
+`asana-get-context.sh` can take up to 90s and `install-deps.sh` can exceed 10s on repo prepare steps. Always use at least a 120000ms timeout for these scripts to avoid false failures from client-side time limits.
+
+
+
+Always delegate planning to `~/.cursor/skills/asana-plan/SKILL.md` first:
+
+- If user provided an Asana URL, run `/asana-plan` in Asana mode.
+- If user provided ad-hoc text or file references, run `/asana-plan` in text/file mode.
+
+`/asana-plan` returns a plan file path + short execution summary and waits for user confirmation. Start implementation only after that confirmation.
+
+### Regression analysis
+
+If the task describes a regression (e.g. "broke in version X", "stopped working after update"):
+
+1. **Identify the breaking commit** using `git log`, `git bisect`, or version tag comparison. Don't take the reported version from the task at face value — verify by examining the actual commit history.
+2. **Review the original change's full intent.** Find the associated PR and any linked tasks/discussions. The regression-causing commit likely had legitimate goals (performance, refactoring, new features). Understand ALL of its intended effects, not just the one that broke.
+3. **Ensure the fix preserves the original intent.** The fix must not undo the beneficial changes introduced by the regression commit. If the fix conflicts with the original intent, flag this to the user with tradeoffs before proceeding.
+
+
+
+After Step 0 determines the target repo (or if no Asana task, use the current repo):
+
+1. **Stash any uncommitted changes** (including untracked files) before switching branches: `git stash -u`
+2. Determine the correct branch state:
+ - **Wrong repo**: `cd` to the correct workspace repo directory.
+ - **On an unrelated feature branch**: Switch to the base branch (see "Branch from" column in `task-review.md`), then create a new feature branch.
+ - **On the base branch**: Create a new feature branch.
+ - **On the correct feature branch**: Continue.
+3. **Branch naming**: `$GIT_BRANCH_PREFIX/` or `$GIT_BRANCH_PREFIX/fix/` for bug fixes. Use kebab-case. Example: `/some-feature` or `/fix/some-bug`
+4. **Assume a new branch is needed** unless the current branch clearly matches the task. Do NOT ask for confirmation — the existing branch has its own committed work and is unaffected.
+5. **Install dependencies**: After creating or switching to the feature branch, run `~/.cursor/skills/install-deps.sh` with a timeout of at least 120000ms to ensure dependencies match the base branch state without false timeout failures.
+
+If the task spans multiple repos, note the additional repos but implement in the primary repo first.
+
+
+
+**Before writing ANY code**, run `lint-warnings.sh` on every file you plan to modify:
+
+```bash
+~/.cursor/skills/im/scripts/lint-warnings.sh ...
+```
+
+This script:
+
+1. Runs `eslint --fix`
+2. Detects files that will be "graduated" from the warning suppression list on commit, promoting their suppressed-rule warnings to errors in the output
+3. Shows any remaining findings grouped by rule (with graduation promotions already applied)
+4. Outputs matching fix patterns from `~/.cursor/rules/typescript-standards.mdc`
+5. Flags unmatched rules that need new patterns added
+
+If the script auto-fixes files or remaining findings exist:
+
+1. Fix all reported **errors** first — these include graduation-promoted warnings that will block `lint-commit.sh` after the file is removed from the suppression list
+2. Fix remaining **warnings** using the matched patterns in the output
+3. For **unmatched rules**: After fixing, add a new `` to `typescript-standards.mdc` so future occurrences have guidance
+4. Commit the pre-existing lint changes separately:
+ ```bash
+ ~/.cursor/skills/lint-commit.sh -m "Fix lint warnings in " ...
+ ```
+
+**Architectural vs mechanical fixes**: If a pattern notes "architectural change" (e.g., `styled()` refactoring), flag to user rather than fixing inline — these changes have broader impact and may warrant separate discussion.
+
+`lint-commit.sh` treats passed file arguments as the primary commit scope and only stages those files plus generated companion files (`src/locales/strings`, `eslint.config.mjs`, snapshots). It does not stage unrelated dirty files in the working tree.
+
+This ensures the subsequent feature commit introduces zero pre-existing lint findings. This is the initial pass — if you discover additional files to modify during Step 3, the same check applies (see Step 3).
+
+
+
+1. **Lint-check newly discovered files**: If you need to modify a file not covered in Step 2, run `~/.cursor/skills/im/scripts/lint-warnings.sh ` before editing it. If the script auto-fixes the file or leaves remaining pre-existing findings, commit those changes as a `--fixup` to the lint-fix commit from Step 2 (use `git log --oneline` to find the hash). If no lint-fix commit exists yet, create one.
+2. Break up the feature into multiple commits if necessary. Commit messages should be a concise title without tags like "feat" and a short body.
+3. Open relevant ts/tsx files before writing code.
+4. Commit using `lint-commit.sh`:
+ ```bash
+ ~/.cursor/skills/lint-commit.sh -m "commit message" [files...]
+ ```
+ You can optionally pass specific files to scope the commit.
+5. **Fixup commits**: When a change logically amends an earlier commit on the branch (e.g. fixing a typo from commit A, adding a missed import for commit B, adjusting behavior introduced in a prior commit), use a fixup commit instead of a standalone commit:
+ ```bash
+ ~/.cursor/skills/lint-commit.sh --fixup [files...]
+ ```
+ This marks the commit for automatic squashing into the target commit. Use `git log --oneline` to find the target hash.
+6. Include a `CHANGELOG.md` entry in the **last feature commit** (not a separate commit) using format: `- type: description`
+ - Types: `added`, `changed`, `fixed`
+ - Example: `- added: New short feature description`
+ - Entries are grouped by type in order: all `added`, then all `changed`, then all `fixed`
+ - CHANGELOG.md must ONLY appear in the last commit — never in intermediate feature commits
+ - Avoid reading more than 50 lines of the file
+ - **Which section** (see CHANGELOG placement rules below)
+
+
+
+The following apply only when working in the `edge-react-gui` repo:
+
+- New string literals should be added to `en_US.ts` in the SAME commit that uses them, not in a separate commit. The `lint-commit.sh` script handles `yarn localize` automatically when `en_US.ts` is in the changeset.
+- **Editing `en_US.ts`**: Use grep to find exact insertion points rather than reading the file in chunks. The file is ~2500 lines; reading it piecemeal wastes context. Example:
+ ```bash
+ rg -n "nearby_string_key" src/locales/en_US.ts
+ ```
+ Then use StrReplace with minimal context — only enough surrounding lines to make the match unique. Do NOT reformat existing lines in the replacement.
+
+### CHANGELOG placement (edge-react-gui)
+
+`edge-react-gui` has two active CHANGELOG sections: `## Unreleased (develop)` and `## X.Y.Z (staging)`. Which section to target depends on the Asana task's version project:
+
+1. **Read the staging version** from CHANGELOG: grep for `^## [0-9].*staging` to get the version (e.g. `4.43.0`).
+2. **Read the task's version project** from the `VERSION_PROJECT` field in the Asana context output (e.g. `4.44.0`).
+3. **Compare**:
+ - If `VERSION_PROJECT` matches the staging version → add entry under the `## X.Y.Z (staging)` heading.
+ - If `VERSION_PROJECT` does NOT match (or is not set) → add entry under `## Unreleased (develop)`.
+4. If no Asana context was fetched, default to `## Unreleased`.
+
+Other repos only have `## Unreleased` — no staging distinction.
+
+
+
+**Always run this step** — do not skip it and do not ask for permission. Review the branch history against the `clean-history` rule and automatically fix any issues found.
+
+1. **Check for an open PR**: Run `gh pr view --json url,reviews 2>/dev/null` to determine if a PR exists and whether it has human review comments.
+2. **If a PR exists with human review comments**, skip cleanup — rewriting history would lose review context. Note the pending cleanup in the retrospective.
+3. **Otherwise (no PR, or PR with no human reviews)**, always perform ALL applicable cleanup automatically:
+ - **Fixup commits exist**: Autosquash with `rm -f .git/index.lock && GIT_SEQUENCE_EDITOR=true git rebase -i --autosquash `. Do this immediately — never leave fixup commits unsquashed.
+ - **Reorder commits**: Use the companion script to reorder commits to the desired order. Hashes are oldest-to-newest:
+ ```bash
+ ~/.cursor/skills/im/scripts/reorder-commits.sh ...
+ ```
+ The script handles index lock cleanup, awk-based reordering, and verifies the tree is unchanged afterward.
+ - **Structural issues** (add-then-remove cycles, misplaced changes, commits that should be squashed, CHANGELOG in intermediate commits): Use `reorder-commits.sh` for reordering. For squash/drop operations, use `rm -f .git/index.lock && GIT_SEQUENCE_EDITOR="..." git rebase -i ` with an awk or sed script. Verify the final tree matches the pre-restructure state with `git diff`.
+
+
+
+Run full verification to catch issues that per-commit checks (`lint-commit.sh`) may have missed (e.g. transitive snapshot breakage, type errors across files):
+
+```bash
+~/.cursor/skills/verify-repo.sh . --base
+```
+
+Where `` is `origin/develop` for `edge-react-gui` or `origin/master` for other repos. Set `block_until_ms: 120000`.
+
+If verification fails, fix the issue with a fixup commit targeting the responsible commit, then re-run history cleanup (step 4) and verification.
+
+
+
+When finished, evaluate the context and propose potential improvements to this process — mistakes or errors in the tool calls, ways to improve excessive context bloat, etc.
+
diff --git a/.cursor/skills/im/scripts/lint-warnings.sh b/.cursor/skills/im/scripts/lint-warnings.sh
new file mode 100755
index 0000000..c124dc2
--- /dev/null
+++ b/.cursor/skills/im/scripts/lint-warnings.sh
@@ -0,0 +1,260 @@
+#!/usr/bin/env bash
+# lint-warnings.sh
+# Run eslint --fix on files and match any remaining findings to documented fix
+# patterns. Detects files that will be "graduated" from the ESLint warning
+# suppression list when committed, promoting their suppressed-rule warnings to
+# errors so they can be fixed before commit.
+#
+# Usage:
+# lint-warnings.sh [file2] ...
+#
+# Output:
+# 1. Summary of auto-fixes applied (if any)
+# 2. Graduation warnings (files that will be promoted to error severity)
+# 3. Summary of remaining findings per rule/severity
+# 4. Matched patterns from typescript-standards.mdc (full XML blocks)
+# 5. Unmatched rules (need new patterns added)
+#
+# Exit codes:
+# 0 - No remaining lint findings after auto-fix
+# 1 - Remaining lint findings after auto-fix
+# 2 - Error (missing files, eslint runtime/config failure, etc.)
+set -euo pipefail
+
+PATTERNS_FILE="$HOME/.cursor/rules/typescript-standards.mdc"
+
+if [[ $# -eq 0 ]]; then
+ echo "Usage: lint-warnings.sh [file2] ..." >&2
+ exit 2
+fi
+
+# Filter to existing .ts/.tsx files
+FILES=()
+for f in "$@"; do
+ if [[ ("$f" == *.ts || "$f" == *.tsx) && -f "$f" ]]; then
+ FILES+=("$f")
+ fi
+done
+
+if [[ ${#FILES[@]} -eq 0 ]]; then
+ echo "No .ts/.tsx files found" >&2
+ exit 2
+fi
+
+# Run eslint with --fix, then classify any remaining lint findings.
+TMP_JSON="$(mktemp)"
+TMP_ERR="$(mktemp)"
+trap 'rm -f "$TMP_JSON" "$TMP_ERR"' EXIT
+
+set +e
+./node_modules/.bin/eslint --fix --format json "${FILES[@]}" >"$TMP_JSON" 2>"$TMP_ERR"
+ESLINT_EXIT=$?
+set -e
+
+node -e '
+const fs = require("fs");
+const path = require("path");
+
+const patternsFile = process.argv[1];
+const jsonFile = process.argv[2];
+const errFile = process.argv[3];
+const eslintExit = Number(process.argv[4]);
+
+let input = "";
+let stderrText = "";
+try {
+ input = fs.readFileSync(jsonFile, "utf8");
+} catch (error) {
+ console.error("Failed to read eslint JSON output");
+ process.exit(2);
+}
+
+try {
+ stderrText = fs.readFileSync(errFile, "utf8").trim();
+} catch (error) {
+ stderrText = "";
+}
+
+if (input.trim() === "") {
+ if (stderrText !== "") console.error(stderrText);
+ console.error("ESLint produced no JSON output");
+ process.exit(2);
+}
+
+let results;
+try {
+ results = JSON.parse(input);
+} catch (error) {
+ if (stderrText !== "") console.error(stderrText);
+ console.error("Failed to parse eslint output");
+ process.exit(2);
+}
+
+if (!Array.isArray(results)) {
+ console.error("Unexpected eslint JSON format");
+ process.exit(2);
+}
+
+// --- Graduation detection ---
+// Parse eslint.config.mjs to find files in the warning-suppression list.
+// These files currently have certain rules at "warn" severity, but committing
+// them removes them from the list (via update-eslint-warnings), promoting
+// those rules to "error". We detect this ahead of time so the agent can fix
+// them in a lint-fix commit before the feature commit.
+const GRADUATED_RULES = new Set([
+ "@typescript-eslint/ban-ts-comment",
+ "@typescript-eslint/explicit-function-return-type",
+ "@typescript-eslint/strict-boolean-expressions",
+ "@typescript-eslint/use-unknown-in-catch-callback-variable"
+]);
+
+const suppressedFiles = new Set();
+try {
+ const configPath = path.join(process.cwd(), "eslint.config.mjs");
+ const configContent = fs.readFileSync(configPath, "utf8");
+ // Extract file paths from the suppression block (single-quoted strings)
+ for (const m of configContent.matchAll(/^\s+\x27([^\x27]+)\x27,?\s*$/gm)) {
+ suppressedFiles.add(m[1]);
+ }
+} catch (error) {
+ // No eslint.config.mjs or parse failure — skip graduation detection
+}
+
+const findingsBySeverity = new Map([
+ [2, new Map()],
+ [1, new Map()]
+]);
+let totalErrors = 0;
+let totalWarnings = 0;
+let graduatedCount = 0;
+let autoFixedFiles = 0;
+
+for (const file of results) {
+ if (file != null && typeof file.output === "string") autoFixedFiles += 1;
+
+ const rel = path.relative(process.cwd(), file.filePath);
+ const willGraduate = suppressedFiles.has(rel);
+
+ for (const message of file.messages) {
+ if (message.severity !== 1 && message.severity !== 2) continue;
+
+ const rule = message.ruleId || "unknown";
+
+ // Promote suppressed-rule warnings to errors for files that will graduate
+ let effectiveSeverity = message.severity;
+ if (willGraduate && message.severity === 1 && GRADUATED_RULES.has(rule)) {
+ effectiveSeverity = 2;
+ graduatedCount += 1;
+ }
+
+ const findingsForSeverity = findingsBySeverity.get(effectiveSeverity);
+ if (!findingsForSeverity.has(rule)) {
+ findingsForSeverity.set(rule, []);
+ }
+ findingsForSeverity.get(rule).push({
+ file: rel,
+ line: message.line,
+ message: message.message
+ });
+
+ if (effectiveSeverity === 2) totalErrors += 1;
+ else totalWarnings += 1;
+ }
+}
+
+if (eslintExit > 1 && totalErrors === 0 && totalWarnings === 0) {
+ if (stderrText !== "") console.error(stderrText);
+ console.error("ESLint failed before reporting lint findings");
+ process.exit(2);
+}
+
+if (autoFixedFiles > 0) {
+ console.log(`>> Auto-fixed ${autoFixedFiles} file(s)`);
+}
+
+if (graduatedCount > 0) {
+ console.log(`>> ${graduatedCount} warning(s) promoted to errors (graduation: file will be removed from suppression list on commit)`);
+}
+
+if (totalErrors === 0 && totalWarnings === 0) {
+ console.log(">> No remaining lint findings");
+ process.exit(0);
+}
+
+let patternsContent = "";
+try {
+ patternsContent = fs.readFileSync(patternsFile, "utf8");
+} catch (error) {
+ console.error("Warning: Could not read patterns file:", patternsFile);
+}
+
+const patternRegex = /([\s\S]*?)<\/pattern>/g;
+const patterns = new Map();
+let match;
+while ((match = patternRegex.exec(patternsContent)) !== null) {
+ const [fullMatch, id, rule] = match;
+ if (!patterns.has(rule)) {
+ patterns.set(rule, []);
+ }
+ patterns.get(rule).push({ id, fullMatch });
+}
+
+if (totalErrors > 0) {
+ console.log(`>> ${totalErrors} remaining error(s)`);
+}
+if (totalWarnings > 0) {
+ console.log(`>> ${totalWarnings} remaining warning(s)`);
+}
+
+const printFindings = (heading, findingsByRule) => {
+ if (findingsByRule.size === 0) return;
+
+ console.log(`\n=== ${heading} ===`);
+ for (const [rule, instances] of [...findingsByRule.entries()].sort((left, right) => right[1].length - left[1].length)) {
+ console.log(`\n${rule} (${instances.length}x):`);
+ for (const inst of instances.slice(0, 3)) {
+ console.log(` ${inst.file}:${inst.line} - ${inst.message}`);
+ }
+ if (instances.length > 3) {
+ console.log(` ... and ${instances.length - 3} more`);
+ }
+ }
+};
+
+printFindings("Remaining Errors by Rule", findingsBySeverity.get(2));
+printFindings("Remaining Warnings by Rule", findingsBySeverity.get(1));
+
+const matchedRules = [];
+const unmatchedRules = [];
+const seenRules = new Set();
+for (const findingsByRule of findingsBySeverity.values()) {
+ for (const rule of findingsByRule.keys()) {
+ if (seenRules.has(rule)) continue;
+ seenRules.add(rule);
+ if (patterns.has(rule)) {
+ matchedRules.push(rule);
+ } else {
+ unmatchedRules.push(rule);
+ }
+ }
+}
+
+if (matchedRules.length > 0) {
+ console.log("\n\n=== Matched Fix Patterns ===");
+ for (const rule of matchedRules) {
+ for (const pattern of patterns.get(rule)) {
+ console.log(`\n${pattern.fullMatch}`);
+ }
+ }
+}
+
+if (unmatchedRules.length > 0) {
+ console.log("\n\n=== Unmatched Rules (need patterns added) ===");
+ for (const rule of unmatchedRules) {
+ console.log(`- ${rule}`);
+ }
+ console.log("\nAfter fixing these, add patterns to ~/.cursor/rules/typescript-standards.mdc");
+}
+
+process.exit(1);
+' -- "$PATTERNS_FILE" "$TMP_JSON" "$TMP_ERR" "$ESLINT_EXIT"
diff --git a/.cursor/skills/im/scripts/reorder-commits.sh b/.cursor/skills/im/scripts/reorder-commits.sh
new file mode 100755
index 0000000..700c285
--- /dev/null
+++ b/.cursor/skills/im/scripts/reorder-commits.sh
@@ -0,0 +1,108 @@
+#!/usr/bin/env bash
+# reorder-commits.sh
+# Reorder commits on a branch to a specified order using non-interactive rebase.
+#
+# Usage:
+# reorder-commits.sh ...
+#
+# Arguments:
+# base-branch The branch/ref to rebase onto (e.g., origin/develop)
+# hash1..N Commit hashes in desired order (oldest to newest)
+#
+# The script verifies all hashes exist in base..HEAD, writes an awk-based
+# GIT_SEQUENCE_EDITOR to reorder the pick lines, and runs git rebase -i.
+# It verifies the tree is unchanged after rebase.
+#
+# Exit codes:
+# 0 - Reorder successful
+# 1 - Reorder failed (conflict, missing commits, tree mismatch)
+set -euo pipefail
+
+if [[ $# -lt 3 ]]; then
+ echo "Usage: reorder-commits.sh ..." >&2
+ exit 1
+fi
+
+BASE="$1"
+shift
+DESIRED_ORDER=("$@")
+
+# Remove stale index locks
+rm -f .git/index.lock
+
+# Get short hashes for matching rebase todo lines
+BRANCH_COMMITS=$(git log --reverse --format='%h' "$BASE..HEAD")
+BRANCH_COUNT=$(echo "$BRANCH_COMMITS" | wc -l | tr -d ' ')
+DESIRED_COUNT=${#DESIRED_ORDER[@]}
+
+if [[ "$BRANCH_COUNT" -ne "$DESIRED_COUNT" ]]; then
+ echo "Error: Branch has $BRANCH_COUNT commits but $DESIRED_COUNT hashes were provided" >&2
+ echo "Branch commits: $BRANCH_COMMITS" >&2
+ exit 1
+fi
+
+# Resolve desired hashes to short hashes and verify they're on the branch
+DESIRED_SHORT=()
+for hash in "${DESIRED_ORDER[@]}"; do
+ short=$(git rev-parse --short "$hash" 2>/dev/null) || {
+ echo "Error: Cannot resolve hash '$hash'" >&2
+ exit 1
+ }
+ if ! echo "$BRANCH_COMMITS" | grep -q "^${short}$"; then
+ echo "Error: Commit $short is not in $BASE..HEAD" >&2
+ exit 1
+ fi
+ DESIRED_SHORT+=("$short")
+done
+
+# Capture pre-rebase tree for verification
+PRE_TREE=$(git rev-parse HEAD^{tree})
+
+# Build awk script that reorders pick lines to match desired order
+# The awk program collects all pick lines, then outputs them in the order
+# specified by the DESIRED env var (space-separated short hashes)
+EDITOR_SCRIPT=$(mktemp)
+trap 'rm -f "$EDITOR_SCRIPT"' EXIT
+
+cat > "$EDITOR_SCRIPT" << 'AWKSCRIPT'
+#!/usr/bin/env bash
+exec awk -v desired="$DESIRED" '
+BEGIN {
+ n = split(desired, order, " ")
+}
+/^pick / {
+ hash = $2
+ lines[hash] = $0
+ next
+}
+/^$/ || /^#/ { next }
+END {
+ for (i = 1; i <= n; i++) {
+ for (h in lines) {
+ if (index(h, order[i]) == 1 || index(order[i], h) == 1) {
+ print lines[h]
+ break
+ }
+ }
+ }
+}
+' "$1" > "$1.tmp" && mv "$1.tmp" "$1"
+AWKSCRIPT
+chmod +x "$EDITOR_SCRIPT"
+
+export DESIRED="${DESIRED_SHORT[*]}"
+if GIT_SEQUENCE_EDITOR="$EDITOR_SCRIPT" git rebase -i "$BASE" 2>/dev/null; then
+ POST_TREE=$(git rev-parse HEAD^{tree})
+ if [[ "$PRE_TREE" == "$POST_TREE" ]]; then
+ echo ">> Commits reordered successfully"
+ git log --oneline "$BASE..HEAD"
+ else
+ echo "Error: Tree changed after reorder (pre: $PRE_TREE, post: $POST_TREE)" >&2
+ echo "This indicates content was lost or modified during rebase." >&2
+ exit 1
+ fi
+else
+ git rebase --abort 2>/dev/null || true
+ echo "Error: Rebase failed (likely conflict). Aborted." >&2
+ exit 1
+fi
diff --git a/.cursor/skills/install-deps.sh b/.cursor/skills/install-deps.sh
new file mode 100755
index 0000000..fbe959e
--- /dev/null
+++ b/.cursor/skills/install-deps.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+# install-deps.sh — Install dependencies and run prepare script.
+# Usage: install-deps.sh [repo-dir]
+#
+# Runs `yarn install` and `yarn prepare` (if prepare script exists in package.json).
+# Use after: branch creation, rebase onto upstream, checkout.
+#
+# Exit codes:
+# 0 = Success (or no package.json — skipped)
+# 1 = Install or prepare failed
+
+repo_dir="${1:-.}"
+
+if [ ! -f "$repo_dir/package.json" ]; then
+ echo "⏭ No package.json — skipping dependency install" >&2
+ exit 0
+fi
+
+echo "Installing dependencies..." >&2
+(cd "$repo_dir" && yarn install)
+
+if (cd "$repo_dir" && node -e "process.exit(require('./package.json').scripts?.prepare ? 0 : 1)" 2>/dev/null); then
+ echo "Running prepare..." >&2
+ (cd "$repo_dir" && yarn prepare)
+fi
+
+echo "✓ Dependencies installed and prepared" >&2
diff --git a/.cursor/skills/lint-commit.sh b/.cursor/skills/lint-commit.sh
new file mode 100755
index 0000000..8b3d993
--- /dev/null
+++ b/.cursor/skills/lint-commit.sh
@@ -0,0 +1,327 @@
+#!/usr/bin/env bash
+# lint-commit.sh
+# Lint-fix, verify, localize (if needed), and commit in one atomic step.
+#
+# Usage:
+# lint-commit.sh -m "commit message" [file ...]
+# lint-commit.sh --fixup [file ...]
+# lint-commit.sh -m "fixup! Original commit" [file ...] # Auto-reorders
+#
+# Options:
+# -m "msg" Commit message (mutually exclusive with --fixup)
+# --fixup Create a fixup commit targeting
+# --reorder After fixup commit, rebase to place it after its target (default: true)
+# --no-reorder Skip the reorder rebase
+#
+# If files are given, they are the primary scope for linting/committing.
+# The script may also auto-include generated companion files like:
+# - src/locales/strings
+# - eslint.config.mjs
+# - __snapshots__/*.snap
+# Any additional non-generated files are reported before commit.
+# If no files are given, all staged + unstaged + untracked changes are used.
+# The script will:
+# 1. Run eslint --fix on .ts/.tsx files
+# 2. Run eslint --quiet to verify no remaining errors (exits 1 if any)
+# 2b. Check for new warnings on changed lines (exits 1 if any)
+# 3. Run yarn localize if the project has a localize script
+# 4. git add -A && git commit --no-verify
+# 5. Run yarn test --findRelatedTests -u on committed .ts/.tsx files
+# 6. If snapshots changed, amend the commit to include them
+# 7. If commit is a fixup (--fixup or -m "fixup! ..."), reorder via rebase
+set -euo pipefail
+
+MESSAGE=""
+FIXUP=""
+REORDER="true" # Default to reordering fixups
+FILES=()
+PRIMARY_SCOPE_DECLARED="false"
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ -m)
+ MESSAGE="$2"
+ shift 2
+ ;;
+ --fixup)
+ FIXUP="$2"
+ shift 2
+ ;;
+ --reorder)
+ REORDER="true"
+ shift
+ ;;
+ --no-reorder)
+ REORDER="false"
+ shift
+ ;;
+ *)
+ FILES+=("$1")
+ shift
+ ;;
+ esac
+done
+
+if [[ ${#FILES[@]} -gt 0 ]]; then
+ PRIMARY_SCOPE_DECLARED="true"
+fi
+
+if [[ -z "$MESSAGE" && -z "$FIXUP" ]]; then
+ echo "Error: -m \"commit message\" or --fixup is required" >&2
+ exit 1
+fi
+if [[ -n "$MESSAGE" && -n "$FIXUP" ]]; then
+ echo "Error: -m and --fixup are mutually exclusive" >&2
+ exit 1
+fi
+
+# If no files specified, collect all changed/untracked files
+if [[ ${#FILES[@]} -eq 0 ]]; then
+ while IFS= read -r f; do
+ [[ -n "$f" ]] && FILES+=("$f")
+ done < <(git diff --name-only HEAD 2>/dev/null; git diff --name-only --cached 2>/dev/null; git ls-files --others --exclude-standard 2>/dev/null)
+
+ # Deduplicate (compatible with macOS Bash 3.2 — no mapfile)
+ if [[ ${#FILES[@]} -gt 0 ]]; then
+ DEDUPED=()
+ while IFS= read -r f; do
+ [[ -n "$f" ]] && DEDUPED+=("$f")
+ done < <(printf '%s\n' "${FILES[@]}" | sort -u)
+ FILES=("${DEDUPED[@]}")
+ fi
+fi
+
+if [[ ${#FILES[@]} -eq 0 ]]; then
+ echo "Error: No changed files found" >&2
+ exit 1
+fi
+
+# Filter to lintable files (.ts/.tsx) that exist on disk
+LINT_FILES=()
+for f in "${FILES[@]}"; do
+ if [[ ("$f" == *.ts || "$f" == *.tsx) && -f "$f" ]]; then
+ LINT_FILES+=("$f")
+ fi
+done
+
+# Step 1: eslint --fix
+if [[ ${#LINT_FILES[@]} -gt 0 ]]; then
+ echo ">> eslint --fix (${#LINT_FILES[@]} files)"
+ ./node_modules/.bin/eslint --fix "${LINT_FILES[@]}" || true
+
+ # Step 2: eslint --quiet (must pass)
+ echo ">> eslint --quiet (verify)"
+ if ! ./node_modules/.bin/eslint --quiet "${LINT_FILES[@]}"; then
+ echo "Error: Lint errors remain after --fix. Aborting commit." >&2
+ exit 1
+ fi
+ echo ">> Lint clean"
+
+ # Step 2b: Detect new warnings introduced on changed lines.
+ # Runs eslint (with warnings) and cross-references against git diff to
+ # only flag warnings on lines the developer actually touched.
+ NEW_WARN=$(node -e '
+const { execSync } = require("child_process")
+const path = require("path")
+
+const files = process.argv.slice(1)
+const cmd = "./node_modules/.bin/eslint --format json " + files.map(f => JSON.stringify(f)).join(" ")
+
+let results
+try {
+ results = JSON.parse(execSync(cmd, { encoding: "utf8", maxBuffer: 10 * 1024 * 1024 }))
+} catch (e) {
+ if (e.stdout) try { results = JSON.parse(e.stdout) } catch { process.exit(0) }
+ else process.exit(0)
+}
+
+const cwd = process.cwd()
+const out = []
+
+for (const r of results) {
+ const rel = path.relative(cwd, r.filePath)
+ const warns = r.messages.filter(m => m.severity === 1)
+ if (warns.length === 0) continue
+
+ // Determine which lines were changed in this file
+ let changed
+ try {
+ execSync("git cat-file -e HEAD:" + JSON.stringify(rel), { stdio: "pipe" })
+ const diff = execSync("git diff -U0 HEAD -- " + JSON.stringify(rel), { encoding: "utf8" })
+ changed = new Set()
+ for (const m of diff.matchAll(/@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@/g)) {
+ const start = +m[1]
+ const count = m[2] != null ? +m[2] : 1
+ for (let i = start; i < start + count; i++) changed.add(i)
+ }
+ } catch {
+ changed = null // New file — all lines count as changed
+ }
+
+ for (const w of warns) {
+ if (changed == null || changed.has(w.line)) {
+ out.push(rel + ":" + w.line + ":" + w.column + " warning " + w.message + " " + w.ruleId)
+ }
+ }
+}
+
+if (out.length > 0) console.log(out.join("\n"))
+' -- "${LINT_FILES[@]}" 2>/dev/null || true)
+
+ if [[ -n "$NEW_WARN" ]]; then
+ echo ">> New warnings on changed lines:" >&2
+ echo "$NEW_WARN" >&2
+ echo "Error: Fix new warnings before committing." >&2
+ exit 1
+ fi
+fi
+
+# Step 3: yarn localize if the project has a localize script
+if node -e "process.exit(require('./package.json').scripts?.localize ? 0 : 1)" 2>/dev/null; then
+ echo ">> yarn localize"
+ yarn localize
+fi
+
+# Step 4: Stage files and report effective commit scope
+if [[ "$PRIMARY_SCOPE_DECLARED" == "true" ]]; then
+ echo ">> git add (scoped) && git commit"
+ git add -- "${FILES[@]}"
+ # Stage generated companion files if they have changes
+ for companion in eslint.config.mjs; do
+ if [[ -f "$companion" ]] && ! git diff --quiet -- "$companion" 2>/dev/null; then
+ git add -- "$companion"
+ fi
+ done
+ # Stage locales/strings if yarn localize changed them (already git-added by
+ # yarn localize in some repos, but ensure they're staged)
+ if git diff --quiet --cached -- src/locales/strings 2>/dev/null; then
+ git diff --quiet -- src/locales/strings 2>/dev/null || git add -- src/locales/strings/ 2>/dev/null || true
+ fi
+else
+ echo ">> git add -A && git commit"
+ git add -A
+fi
+
+# Graduate files from eslint warning-override list if the repo has the script
+if node -e "process.exit(require('./package.json').scripts?.['update-eslint-warnings'] ? 0 : 1)" 2>/dev/null; then
+ echo ">> update-eslint-warnings"
+ npm run --silent update-eslint-warnings
+fi
+
+if [[ "$PRIMARY_SCOPE_DECLARED" == "true" ]]; then
+ echo ">> commit scope report"
+ node -e '
+const { execSync } = require("child_process")
+
+const requested = [...new Set(process.argv.slice(1))].sort()
+const staged = execSync("git diff --cached --name-only --diff-filter=ACMRD", {
+ encoding: "utf8"
+})
+ .split("\n")
+ .map(line => line.trim())
+ .filter(Boolean)
+ .sort()
+
+const requestedSet = new Set(requested)
+const isGeneratedCompanion = file => {
+ return (
+ file === "eslint.config.mjs" ||
+ file === "src/locales/strings" ||
+ /(^|\/)__snapshots__\/.*\.snap$/.test(file)
+ )
+}
+
+const requestedStaged = []
+const generatedStaged = []
+const extraStaged = []
+for (const file of staged) {
+ if (requestedSet.has(file)) {
+ requestedStaged.push(file)
+ } else if (isGeneratedCompanion(file)) {
+ generatedStaged.push(file)
+ } else {
+ extraStaged.push(file)
+ }
+}
+
+const missingRequested = requested.filter(file => !staged.includes(file))
+
+const printGroup = (title, files) => {
+ if (files.length === 0) return
+ console.log(title)
+ for (const file of files) console.log("- " + file)
+}
+
+printGroup("Primary scope staged:", requestedStaged)
+printGroup("Auto-generated companion files staged:", generatedStaged)
+printGroup("Additional non-generated files staged:", extraStaged)
+printGroup("Requested files not staged:", missingRequested)
+
+if (extraStaged.length > 0) {
+ console.log("Proceeding with additional non-generated files by default.")
+}
+' -- "${FILES[@]}"
+fi
+
+if [[ -n "$FIXUP" ]]; then
+ git commit --no-verify --fixup "$FIXUP"
+else
+ git commit --no-verify -m "$MESSAGE"
+fi
+
+# Step 5: Update snapshots for related tests (Jest only)
+if [[ ${#LINT_FILES[@]} -gt 0 && -x ./node_modules/.bin/jest ]]; then
+ echo ">> jest --findRelatedTests -u (${#LINT_FILES[@]} files)"
+ ./node_modules/.bin/jest --findRelatedTests "${LINT_FILES[@]}" -u 2>&1 || true
+
+ # Step 6: If snapshots changed, amend the commit
+ SNAP_CHANGES=$(git diff --name-only -- '**/__snapshots__/**' 2>/dev/null || true)
+ if [[ -n "$SNAP_CHANGES" ]]; then
+ echo ">> Snapshots updated, amending commit:"
+ echo "$SNAP_CHANGES"
+ if [[ "$PRIMARY_SCOPE_DECLARED" == "true" ]]; then
+ echo ">> Auto-generated companion files staged:"
+ echo "$SNAP_CHANGES"
+ fi
+ git add -- $SNAP_CHANGES
+ git commit --amend --no-edit --no-verify
+ else
+ echo ">> No snapshot changes"
+ fi
+fi
+
+# Step 7: Reorder fixup commits to be adjacent to their targets
+# Detects fixup commits by --fixup flag or "fixup! " prefix in message
+IS_FIXUP="false"
+if [[ -n "$FIXUP" ]]; then
+ IS_FIXUP="true"
+elif [[ "$MESSAGE" == fixup!* ]]; then
+ IS_FIXUP="true"
+fi
+
+if [[ "$IS_FIXUP" == "true" && "$REORDER" == "true" ]]; then
+ echo ">> Reordering fixup commit..."
+
+ # Find the merge-base with the default upstream branch
+ DEFAULT_UPSTREAM=$(git symbolic-ref --quiet --short refs/remotes/origin/HEAD 2>/dev/null \
+ || echo "origin/$(git remote show origin 2>/dev/null | sed -n '/HEAD branch/s/.*: //p')" \
+ || echo "origin/master")
+
+ BASE=$(git merge-base "$DEFAULT_UPSTREAM" HEAD 2>/dev/null || echo "")
+
+ if [[ -n "$BASE" ]]; then
+ # Interactive rebase with autosquash to reorder (editor does nothing, so commits aren't squashed)
+ if GIT_EDITOR=true git -c sequence.editor=: rebase -i "$BASE" --autosquash 2>/dev/null; then
+ echo ">> Fixup reordered successfully"
+ else
+ # Rebase failed (likely conflict) - abort and warn
+ git rebase --abort 2>/dev/null || true
+ echo ">> Warning: Could not reorder fixup (conflict). Fixup remains at HEAD." >&2
+ echo ">> Run 'git rebase -i --autosquash $BASE' manually to reorder." >&2
+ fi
+ else
+ echo ">> Warning: Could not determine merge-base for reorder" >&2
+ fi
+fi
+
+echo ">> Done"
diff --git a/.cursor/skills/one-shot/SKILL.md b/.cursor/skills/one-shot/SKILL.md
new file mode 100644
index 0000000..e2b84c1
--- /dev/null
+++ b/.cursor/skills/one-shot/SKILL.md
@@ -0,0 +1,70 @@
+---
+name: one-shot
+description: End-to-end flow for a task: plan/context, implementation, PR creation, and Asana attach/assign in one command.
+compatibility: Requires git, gh, node, jq. ASANA_TOKEN for Asana integration. ASANA_GITHUB_SECRET for PR attachment.
+metadata:
+ author: j0ntz
+---
+
+Run the full legacy-style task-to-PR workflow in one command by orchestrating `/asana-plan`, `/im`, and `/pr-create`.
+
+
+Do not re-implement logic already defined in `/asana-plan`, `/im`, or `/pr-create`. Delegate to those skills.
+By default, invoke `/pr-create` with both `--asana-attach` and `--asana-assign`.
+This workflow is hands-off. If reviewer assignment cannot be resolved from task state or explicit input, let `/pr-create` skip assignment rather than pausing for reviewer input.
+If Asana attach/assign flags are active, a task GID must be available from the Asana URL input or explicit `--asana-task` flag; otherwise fail fast.
+If any delegated skill or companion script fails, report and stop. Do not bypass with manual alternatives.
+Do not draft alternate PR markdown formats inside this workflow. `/pr-create` owns PR body generation and template compliance.
+
+
+
+Accept one of:
+
+1. Asana task URL
+2. Text/file requirements
+
+Optional flags:
+
+- `--asana-task ` (explicit Asana GID override)
+- `--no-asana-attach`
+- `--no-asana-assign`
+
+
+
+Run `/asana-plan` with the provided input mode:
+
+- Asana URL mode: fetch task context and create plan
+- Text/file mode: create plan from provided requirements
+
+Wait for user confirmation handled by `/asana-plan`.
+
+
+
+Run `/im` using the approved `/asana-plan` output.
+
+
+
+Run `/pr-create` with defaults:
+
+- include `--asana-attach` unless `--no-asana-attach`
+- include `--asana-assign` unless `--no-asana-assign`
+
+Task GID source priority:
+
+1. explicit `--asana-task `
+2. Asana task URL from step 1
+3. chat context from prior steps
+
+
+
+Return the final PR URL and which delegated phases ran:
+
+- planning: `/asana-plan`
+- implementation: `/im`
+- PR creation: `/pr-create`
+
+
+
+Fail fast and ask for `--asana-task ` or disable flags with `--no-asana-attach` / `--no-asana-assign`.
+Allow workflow with `--no-asana-attach --no-asana-assign` when no task link/GID exists.
+
diff --git a/.cursor/skills/pr-address/SKILL.md b/.cursor/skills/pr-address/SKILL.md
new file mode 100644
index 0000000..596c2a8
--- /dev/null
+++ b/.cursor/skills/pr-address/SKILL.md
@@ -0,0 +1,213 @@
+---
+name: pr-address
+description: Address PR feedback with fixup commits, resolving each comment after replying. Use when the user wants to address review comments on a pull request.
+compatibility: Requires git, gh.
+metadata:
+ author: j0ntz
+---
+
+Address PR feedback with fixup commits, resolving each comment after replying with how it was addressed.
+
+
+Do NOT call `gh` directly. Use `~/.cursor/skills/pr-address/scripts/pr-address.sh` for all GitHub API interactions (it uses `gh` internally).
+If a companion script fails, report the error and STOP. Do NOT fall back to raw `gh`, `curl`, or other workarounds.
+All git commands that may open an editor (`rebase --continue`, `commit` without `-m`) MUST be prefixed with `GIT_EDITOR=true` to prevent blocking on `COMMIT_EDITMSG` in the IDE.
+NEVER use `git_log_or_diff:GitKraken`. Use local `git` commands directly.
+If any other instruction conflicts with this file, **this file wins** for `pr-address`.
+Commit fixups using `~/.cursor/skills/lint-commit.sh --no-reorder -m "fixup! {headline}" [files...]`. `--no-reorder` is required — the default reorder runs `rebase --autosquash` which squashes fixups immediately, conflicting with step 4's conditional autosquash. Do NOT manually run eslint — the commit script handles it.
+GitHub API scripts can take up to 30s. Set `block_until_ms: 60000` when invoking `pr-address.sh`.
+ALWAYS reply explaining how a comment was addressed BEFORE resolving or marking it. No silent resolutions.
+Only explicitly resolved threads (`isResolved: true`) or `` markers count as resolved. Recency (commits after a comment) does NOT mean resolved.
+
+
+
+Before any other work, ensure the PR's branch is checked out and up to date:
+
+```bash
+~/.cursor/skills/pr-address/scripts/pr-address.sh ensure-branch --owner --repo --pr
+```
+
+The script:
+- If already on the PR branch → pulls latest
+- If on a different branch → stashes uncommitted changes (if any), checks out the PR branch, pulls latest
+
+Output includes `BRANCH_READY`, `STASHED`, and (if switched) `PREVIOUS_BRANCH`. If `STASHED=true`, inform the user that changes were stashed on the previous branch.
+
+
+
+Always fetch live from GitHub. Run both in parallel:
+
+```bash
+# Fetch unresolved feedback
+~/.cursor/skills/pr-address/scripts/pr-address.sh fetch --owner --repo --pr
+
+# Populate /tmp/pr-body.md from the live PR body (source of truth)
+~/.cursor/skills/pr-address/scripts/pr-address.sh fetch-pr-body --owner --repo --pr
+```
+
+If either script exits code 2 with `PROMPT_GH_AUTH`, prompt: "`gh` CLI is not authenticated. Please run: `gh auth login`"
+
+The `fetch` output contains:
+- **prAuthor**: The PR author's GitHub username
+- **currentUser**: Your GitHub username (the authenticated `gh` user)
+- **hasHumanReviewers**: `true` if any external human reviewer (not `currentUser`, not bots) has commented — used for autosquash decision
+- **humanReviewers**: List of external human reviewer usernames
+- **threads**: All unresolved inline review threads (includes comments from `currentUser` for context)
+- **reviewBodies**: Latest review body per non-author reviewer (excludes `prAuthor` and bots)
+- **topLevel**: Top-level comments (excludes `prAuthor` and bots)
+
+To inspect a specific inline thread, including an already-resolved one, use:
+
+```bash
+~/.cursor/skills/pr-address/scripts/pr-address.sh fetch-thread \
+ --owner --repo --pr \
+ --thread-id ""
+```
+
+The `fetch-pr-body` call writes the current PR body to `/tmp/pr-body.md`. This file is available for editing throughout the session. If you need to update the PR body (e.g. to revise the description after addressing feedback), edit `/tmp/pr-body.md` via the Write tool and push it back:
+
+```bash
+gh pr edit --body-file /tmp/pr-body.md
+```
+
+
+
+Address every item returned by `fetch`. Group inline threads by file. If the user provided specific files, scope to those only.
+
+
+Ask: **"Which commit introduced the behavior/code this comment is about?"**
+
+- List commits touching the file: `git log --oneline -- `
+- A specific line/function → fixup the commit that introduced it
+- A missing feature/behavior → fixup the commit that should have included it
+- A pattern/style issue → fixup the earliest commit where it appears
+- Ambiguous → ask the user
+
+Get the target commit headline:
+```bash
+git log -1 --format='%s'
+```
+
+
+
+1. Read each file with comments
+2. Apply changes — comment hunks can be narrower than intent; apply consistently within the function/file
+3. Commit using `lint-commit.sh`:
+ ```bash
+ ~/.cursor/skills/lint-commit.sh --no-reorder -m "fixup! {targetHeadline}" [files...]
+ ```
+
+
+
+After all fixup commits are created, push to the remote so the reviewer can see the changes referenced in replies:
+
+```bash
+git push
+```
+
+
+
+
+After fixing, reply to every processed comment — addressed or rejected — then resolve it.
+
+
+If a later fix may affect an already-addressed inline thread, inspect the thread first:
+
+```bash
+~/.cursor/skills/pr-address/scripts/pr-address.sh fetch-thread \
+ --owner --repo --pr \
+ --thread-id ""
+```
+
+Use the returned history to decide whether the existing reply still fully reflects the latest fix. If it does not, add one new factual follow-up reply. Multiple replies in the same thread are acceptable when they capture materially new fixes.
+
+1. Reply to the first comment in the thread:
+ ```bash
+ ~/.cursor/skills/pr-address/scripts/pr-address.sh reply \
+ --owner --repo --pr \
+ --comment-id --body ""
+ ```
+
+ If the comment ID is a GraphQL node ID, resolve to numeric first:
+ ```bash
+ ~/.cursor/skills/pr-address/scripts/pr-address.sh resolve-id \
+ --owner --repo --pr \
+ --node-id ""
+ ```
+
+2. Then mark the thread as resolved:
+ ```bash
+ ~/.cursor/skills/pr-address/scripts/pr-address.sh resolve-thread --thread-id ""
+ ```
+
+
+
+These have no native resolution mechanism. Post a top-level comment with a machine-readable marker:
+
+```bash
+~/.cursor/skills/pr-address/scripts/pr-address.sh mark-addressed \
+ --owner --repo --pr \
+ --type --target-id \
+ --body ""
+```
+
+The script appends `` or `` to the body. Subsequent `fetch` calls detect these markers and exclude already-addressed items.
+
+**Skip bot-only no-op items**: If a review body or top-level comment is from a bot user (e.g., `cursor`, `chatgpt-codex-connector`) AND contains no inline threads with actionable suggestions — only a summary or status message — do NOT post a `mark-addressed` comment. Human reviewer items must always be addressed or rejected, even terse ones like "This needs work".
+
+
+
+- **Addressed**: State what was fixed. Factual, 1 sentence.
+- **Invalid/false-positive**: Brief evidence citing code paths or logic. 1-3 sentences.
+- No pleasantries. Factual tone only.
+
+
+
+
+Only autosquash if `hasHumanReviewers` is `false`. This means no external human reviewer (someone other than `currentUser`) has commented.
+
+Autosquash is **allowed** when only:
+- Automated reviewers (`cursor`, `chatgpt-codex-connector`, or other bots) commented, OR
+- `currentUser` commented (your own notes/action items)
+
+Autosquash is **blocked** when:
+- Any external human reviewer has commented — they are actively reviewing and need to see the fixup commits
+
+If `hasHumanReviewers` is `true`, **do NOT autosquash**. Leave fixup commits visible for human reviewers to verify before squashing on merge.
+
+When autosquashing is allowed:
+```bash
+~/.cursor/skills/pr-address/scripts/pr-address.sh autosquash
+```
+
+If conflicts occur, resolve them, then: `GIT_EDITOR=true git rebase --continue`. If a commit becomes empty after squashing: `git rebase --skip`.
+
+Force push is required after autosquash because the rebase rewrites history:
+```bash
+git push --force-with-lease
+```
+
+
+
+Run full verification to catch issues introduced by fixup commits:
+
+```bash
+~/.cursor/skills/verify-repo.sh . --base
+```
+
+Where `` is `origin/develop` for `edge-react-gui` or `origin/master` for other repos. Set `block_until_ms: 120000`.
+
+If verification fails, fix the issue with another fixup commit, then re-run verification.
+
+
+
+Propose modifications to `~/.cursor/rules/typescript-standards.mdc` to prevent similar review comments in the future. Prompt for confirmation before applying.
+
+
+
+Script exits code 2 with `PROMPT_GH_AUTH`. Prompt user to run `gh auth login` and STOP.
+Report "No unresolved comments on this PR" and STOP.
+Do NOT autosquash when `hasHumanReviewers` is true. Leave fixup commits for the external reviewer to verify, then squash on merge.
+If the current code already handles the feedback (e.g., from a previous fixup), still reply explaining this and resolve/mark the comment. Do not leave it unresolved.
+Fetch the thread history first. If the prior reply no longer reflects the latest fix, post one additional factual follow-up reply. Do not edit or delete prior replies in this workflow.
+
diff --git a/.cursor/skills/pr-address/scripts/pr-address.sh b/.cursor/skills/pr-address/scripts/pr-address.sh
new file mode 100755
index 0000000..ff0e182
--- /dev/null
+++ b/.cursor/skills/pr-address/scripts/pr-address.sh
@@ -0,0 +1,359 @@
+#!/usr/bin/env bash
+# pr-address.sh
+# Companion script for pr-address.md
+# Handles deterministic operations: comment fetching, replies, thread resolution, autosquash.
+#
+# Subcommands:
+# fetch --owner --repo --pr Fetch all unresolved feedback via GraphQL
+# fetch-thread --owner --repo --pr --thread-id
+# reply --owner --repo --pr --comment-id --body
+# resolve-thread --thread-id Mark inline thread as resolved (GraphQL)
+# mark-addressed --owner --repo --pr --type --target-id --body
+# resolve-id --owner --repo --pr --node-id
+# headline --owner --repo --sha
+# fetch-pr-body --owner --repo --pr Fetch current PR body → /tmp/pr-body.md
+# ensure-branch --owner --repo --pr Checkout PR branch, stash if needed, pull
+# autosquash Rebase --autosquash from merge-base
+#
+# Exit codes: 0 = success, 1 = error, 2 = needs user input (e.g. gh not authenticated)
+set -euo pipefail
+
+CMD="${1:-}"
+shift || true
+
+OWNER="" REPO="" PR="" COMMENT_ID="" NODE_ID="" BODY="" SHA="" THREAD_ID="" TARGET_TYPE="" TARGET_ID=""
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --owner) OWNER="$2"; shift 2 ;;
+ --repo) REPO="$2"; shift 2 ;;
+ --pr) PR="$2"; shift 2 ;;
+ --comment-id) COMMENT_ID="$2"; shift 2 ;;
+ --node-id) NODE_ID="$2"; shift 2 ;;
+ --body) BODY="$2"; shift 2 ;;
+ --sha) SHA="$2"; shift 2 ;;
+ --thread-id) THREAD_ID="$2"; shift 2 ;;
+ --type) TARGET_TYPE="$2"; shift 2 ;;
+ --target-id) TARGET_ID="$2"; shift 2 ;;
+ *) echo "Unknown arg: $1" >&2; exit 1 ;;
+ esac
+done
+
+require_gh() {
+ if ! command -v gh &>/dev/null; then
+ echo "PROMPT_GH_INSTALL" >&2; exit 2
+ fi
+ if ! gh auth status &>/dev/null 2>&1; then
+ echo "PROMPT_GH_AUTH" >&2; exit 2
+ fi
+}
+
+case "$CMD" in
+ fetch)
+ require_gh
+ if [[ -z "$OWNER" || -z "$REPO" || -z "$PR" ]]; then
+ echo "Error: --owner, --repo, --pr required" >&2; exit 1
+ fi
+
+ gh api graphql \
+ -f query='query($owner: String!, $repo: String!, $number: Int!) {
+ repository(owner: $owner, name: $repo) {
+ pullRequest(number: $number) {
+ author { login }
+ headRefName
+ baseRefName
+ reviewThreads(first: 100) {
+ nodes {
+ id
+ isResolved
+ comments(first: 50) {
+ nodes {
+ databaseId
+ createdAt
+ author { login }
+ path
+ line
+ body
+ }
+ }
+ }
+ }
+ reviews(last: 50) {
+ nodes {
+ databaseId
+ author { login }
+ state
+ body
+ submittedAt
+ }
+ }
+ comments(last: 50) {
+ nodes {
+ databaseId
+ createdAt
+ author { login }
+ body
+ }
+ }
+ }
+ }
+ }' \
+ -f owner="$OWNER" -f repo="$REPO" -F number="$PR" \
+ | GH_USER=$(gh api user --jq '.login') node -e "
+ const fs = require('fs')
+ const data = JSON.parse(fs.readFileSync('/dev/stdin', 'utf8'))
+ const pr = data.data.repository.pullRequest
+ const prAuthor = pr.author?.login
+ const currentUser = process.env.GH_USER
+
+ const addressedIds = new Set()
+ for (const c of pr.comments.nodes) {
+ for (const m of (c.body || '').matchAll(//g)) {
+ addressedIds.add(Number(m[1]))
+ }
+ }
+
+ const isBot = u => !u || u.includes('[bot]') || u === 'cursor'
+ const isAutomatedReviewer = u => isBot(u) || u === 'chatgpt-codex-connector'
+
+ const threads = pr.reviewThreads.nodes
+ .filter(t => !t.isResolved)
+ .map(t => ({
+ threadId: t.id,
+ path: t.comments.nodes[0]?.path,
+ line: t.comments.nodes[0]?.line,
+ comments: t.comments.nodes.map(c => ({
+ id: c.databaseId,
+ user: c.author?.login,
+ body: c.body,
+ createdAt: c.createdAt
+ }))
+ }))
+
+ // Check if any human (non-bot, non-automated, non-currentUser) reviewer has commented
+ // prAuthor CAN be an external human reviewer if they're not currentUser
+ const humanCommenters = new Set()
+ for (const t of threads) {
+ for (const c of t.comments) {
+ if (c.user && !isAutomatedReviewer(c.user) && c.user !== currentUser) {
+ humanCommenters.add(c.user)
+ }
+ }
+ }
+
+ const latestByUser = {}
+ for (const r of pr.reviews.nodes) {
+ const user = r.author?.login
+ if (!user || user === prAuthor || r.state === 'PENDING' || isBot(user)) continue
+ const prev = latestByUser[user]
+ if (!prev || new Date(r.submittedAt) > new Date(prev.submittedAt)) {
+ latestByUser[user] = r
+ }
+ if (!isAutomatedReviewer(user) && user !== currentUser) {
+ humanCommenters.add(user)
+ }
+ }
+ const reviewBodies = Object.entries(latestByUser)
+ .filter(([, r]) => r.body?.trim() && !addressedIds.has(r.databaseId))
+ .map(([user, r]) => ({
+ reviewId: r.databaseId, user, state: r.state,
+ body: r.body, submittedAt: r.submittedAt
+ }))
+
+ const topLevel = pr.comments.nodes.filter(c => {
+ const user = c.author?.login
+ if (!user || user === prAuthor || isBot(user)) return false
+ if ((c.body || '').includes('"
+ FULL_BODY="${BODY} ${MARKER}"
+ RESULT=$(echo '{}' | jq --arg body "$FULL_BODY" '{body: $body}' | \
+ gh api "repos/$OWNER/$REPO/issues/$PR/comments" -X POST --input -)
+ ID=$(echo "$RESULT" | jq -r '.id // empty')
+ if [[ -n "$ID" ]]; then
+ echo "marked: $ID ($MARKER)"
+ else
+ echo "Mark failed: $RESULT" >&2; exit 1
+ fi
+ ;;
+
+ resolve-id)
+ require_gh
+ if [[ -z "$OWNER" || -z "$REPO" || -z "$PR" || -z "$NODE_ID" ]]; then
+ echo "Error: --owner, --repo, --pr, --node-id required" >&2; exit 1
+ fi
+ RESULT=$(gh api "repos/$OWNER/$REPO/pulls/$PR/comments" --paginate \
+ --jq ".[] | select(.node_id == \"$NODE_ID\") | .id")
+ if [[ -n "$RESULT" ]]; then
+ echo "$RESULT"
+ else
+ echo "Comment not found for node_id: $NODE_ID" >&2; exit 1
+ fi
+ ;;
+
+ headline)
+ require_gh
+ if [[ -z "$OWNER" || -z "$REPO" || -z "$SHA" ]]; then
+ echo "Error: --owner, --repo, --sha required" >&2; exit 1
+ fi
+ gh api "repos/$OWNER/$REPO/commits/$SHA" --jq '.commit.message | split("\n") | .[0]'
+ ;;
+
+ fetch-pr-body)
+ require_gh
+ if [[ -z "$OWNER" || -z "$REPO" || -z "$PR" ]]; then
+ echo "Error: --owner, --repo, --pr required" >&2; exit 1
+ fi
+ BODY=$(gh api "repos/$OWNER/$REPO/pulls/$PR" --jq '.body // ""')
+ echo "$BODY" > /tmp/pr-body.md
+ echo ">> Wrote PR body to /tmp/pr-body.md ($(wc -c < /tmp/pr-body.md | tr -d ' ') bytes)"
+ ;;
+
+ ensure-branch)
+ require_gh
+ if [[ -z "$OWNER" || -z "$REPO" || -z "$PR" ]]; then
+ echo "Error: --owner, --repo, --pr required" >&2; exit 1
+ fi
+
+ PR_BRANCH=$(gh api "repos/$OWNER/$REPO/pulls/$PR" --jq '.head.ref')
+ CURRENT_BRANCH=$(git branch --show-current)
+
+ if [[ "$CURRENT_BRANCH" == "$PR_BRANCH" ]]; then
+ echo ">> Already on $PR_BRANCH — pulling latest"
+ git pull --ff-only 2>&1 || git pull --rebase 2>&1
+ echo ">> BRANCH_READY=$PR_BRANCH STASHED=false"
+ else
+ STASHED=false
+ if ! git diff --quiet HEAD 2>/dev/null || ! git diff --cached --quiet HEAD 2>/dev/null || [[ -n "$(git ls-files --others --exclude-standard)" ]]; then
+ echo ">> Stashing uncommitted changes on $CURRENT_BRANCH"
+ git stash -u
+ STASHED=true
+ fi
+ echo ">> Switching from $CURRENT_BRANCH to $PR_BRANCH"
+ git checkout "$PR_BRANCH" 2>&1
+ git pull --ff-only 2>&1 || git pull --rebase 2>&1
+ echo ">> BRANCH_READY=$PR_BRANCH STASHED=$STASHED PREVIOUS_BRANCH=$CURRENT_BRANCH"
+ fi
+ ;;
+
+ autosquash)
+ DEFAULT_UPSTREAM=$(git symbolic-ref --quiet --short refs/remotes/origin/HEAD 2>/dev/null \
+ || echo "origin/$(git remote show origin | sed -n '/HEAD branch/s/.*: //p')")
+ BASE=$(git merge-base "$DEFAULT_UPSTREAM" HEAD)
+ GIT_EDITOR=true git -c sequence.editor=: rebase -i "$BASE" --autosquash
+ echo ">> Autosquash complete"
+ ;;
+
+ *)
+ echo "Usage: pr-address.sh {fetch|fetch-thread|reply|resolve-thread|mark-addressed|resolve-id|headline|fetch-pr-body|ensure-branch|autosquash} [args]" >&2
+ exit 1
+ ;;
+esac
diff --git a/.cursor/skills/pr-create/SKILL.md b/.cursor/skills/pr-create/SKILL.md
new file mode 100644
index 0000000..1c3428a
--- /dev/null
+++ b/.cursor/skills/pr-create/SKILL.md
@@ -0,0 +1,106 @@
+---
+name: pr-create
+description: Create a pull request from the current branch, with optional Asana attach/assign updates.
+compatibility: Requires git, gh, node, jq. ASANA_TOKEN for Asana updates. ASANA_GITHUB_SECRET for Asana PR attachment.
+metadata:
+ author: j0ntz
+---
+
+Create a PR from the current branch, optionally attach it to Asana and assign reviewer.
+
+
+Do NOT call `gh` directly for PR creation. Use `~/.cursor/skills/pr-create/scripts/pr-create.sh`.
+If a companion script fails, report the error and STOP. Do NOT fall back to raw `gh`, `curl`, or workarounds.
+If script exits code 2 with `PROMPT_GH_AUTH`, prompt user to run `gh auth login` and STOP.
+Do NOT create a PR when there are uncommitted changes.
+Do NOT push to `master`/`develop` directly.
+Run verification before creating the PR.
+`--asana-attach`/`--asana-assign` only run when a task GID is available from chat context or explicit `--asana-task `. If no task GID is available, fail fast and skip Asana updates.
+When `--asana-assign` is requested from a hands-off flow, pass `--skip-assign-if-missing` to `asana-task-update.sh` so missing reviewers do not block PR completion.
+Asana updates can take up to 90s. Use `block_until_ms: 120000` for `asana-task-update.sh` calls.
+If the repo has `.github/PULL_REQUEST_TEMPLATE.md`, the PR body must preserve that template's section headings. Do NOT substitute generic sections like `Summary` or `Test plan`.
+
+
+
+Push current branch if needed:
+
+```bash
+git push -u origin HEAD
+```
+
+If tracking is already configured and branch is up to date, skip.
+
+
+
+Run:
+
+```bash
+~/.cursor/skills/verify-repo.sh . --base
+```
+
+Use `origin/develop` for `edge-react-gui` and `origin/master` for other repos.
+
+
+
+Gather context in parallel:
+
+```bash
+DEFAULT_BRANCH=$(git symbolic-ref --quiet --short refs/remotes/origin/HEAD 2>/dev/null | sed 's|origin/||' || git remote show origin 2>/dev/null | sed -n '/HEAD branch/s/.*: //p' || echo master)
+git log origin/$DEFAULT_BRANCH..HEAD --format=%B---
+```
+
+If `.github/PULL_REQUEST_TEMPLATE.md` exists, read it now and use it as the source of truth for the PR body structure. Fill in its existing sections and only append `### Description` if the template has no description section and branch context needs a place to live.
+
+If Asana context is available from chat or fetched via `--asana-task`, add it inside `### Description`. Do not invent alternate section sets such as `Summary` / `Test plan`.
+
+
+
+Write body to `/tmp/pr-body.md`, then run:
+
+```bash
+~/.cursor/skills/pr-create/scripts/pr-create.sh \
+ --title "" \
+ --body-file /tmp/pr-body.md \
+ [--asana-task ]
+```
+
+The companion script validates body files against the repo template and rejects generic fallback sections on templated repos. Capture PR URL and number from JSON output.
+
+
+
+If neither `--asana-attach` nor `--asana-assign` was requested, skip.
+
+If either flag is requested, resolve `task_gid` from:
+
+1. explicit `--asana-task ` argument
+2. chat context (previous task-review/im context)
+
+If no task GID is available, fail fast and report:
+
+> Asana flags were requested but no task GID was found in flags or chat context.
+
+Then call:
+
+```bash
+~/.cursor/skills/asana-task-update/scripts/asana-task-update.sh \
+ --task \
+ [--attach-pr --pr-url --pr-title "" --pr-number ] \
+ [--assign --skip-assign-if-missing --set-status "Review Needed" --auto-est-review-hrs]
+```
+
+- `--asana-attach` maps to `--attach-pr ...`
+- `--asana-assign` maps to `--assign --skip-assign-if-missing --set-status "Review Needed" --auto-est-review-hrs`
+- If both are set, combine in one command.
+
+
+
+Display PR URL as a clickable markdown link:
+
+`[owner/repo#123](https://github.com/owner/repo/pull/123)`
+
+
+
+Report the existing PR URL and stop.
+Prompt user to run `gh auth login` and stop.
+Ask user before rebasing and force-pushing.
+
diff --git a/.cursor/skills/pr-create/scripts/pr-create.sh b/.cursor/skills/pr-create/scripts/pr-create.sh
new file mode 100755
index 0000000..eb40ead
--- /dev/null
+++ b/.cursor/skills/pr-create/scripts/pr-create.sh
@@ -0,0 +1,331 @@
+#!/usr/bin/env node
+// pr-create.sh — Creates a PR for the current branch using gh CLI.
+// Usage: ./pr-create.sh [--title "PR title"] [--body-file ] [--draft]
+// Reads from git context: repo owner/name, current branch, default branch.
+// Outputs JSON with PR URL and number on success.
+
+const { execSync, spawnSync } = require("child_process");
+const fs = require("fs");
+const os = require("os");
+const path = require("path");
+
+// Parse args
+const args = process.argv.slice(2);
+let title = null;
+let bodyFile = null;
+let draft = false;
+let asanaTask = null;
+
+for (let i = 0; i < args.length; i++) {
+ if (args[i] === "--title" && args[i + 1]) title = args[++i];
+ else if (args[i] === "--body-file" && args[i + 1]) bodyFile = args[++i];
+ else if (args[i] === "--asana-task" && args[i + 1]) asanaTask = args[++i];
+ else if (args[i] === "--draft") draft = true;
+}
+
+function git(cmd) {
+ return execSync(`git ${cmd}`, { encoding: "utf8" }).trim();
+}
+
+function escapeRegExp(value) {
+ return value.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
+}
+
+function countOccurrences(haystack, needle) {
+ const matches = haystack.match(new RegExp(escapeRegExp(needle), "g"));
+ return matches == null ? 0 : matches.length;
+}
+
+function hasSection(bodyText, heading) {
+ return new RegExp(`^${escapeRegExp(heading)}$`, "m").test(bodyText);
+}
+
+function extractTemplateHeadings(templateBody) {
+ return Array.from(templateBody.matchAll(/^### .+$/gm), match => match[0]);
+}
+
+function setChecklistValue(bodyText, label, checked) {
+ const pattern = new RegExp(
+ `^- \\[[ x]\\] ${escapeRegExp(label)}$`,
+ "m"
+ );
+ return bodyText.replace(pattern, `- [${checked ? "x" : " "}] ${label}`);
+}
+
+function appendDescriptionSection(bodyText, description) {
+ if (description === "") return bodyText.trimEnd();
+ return `${bodyText.trimEnd()}\n\n### Description\n\n${description}`;
+}
+
+function insertAfterHeading(bodyText, heading, insertText) {
+ const headingPattern = new RegExp(
+ `^${escapeRegExp(heading)}\\n`,
+ "m"
+ );
+ const match = headingPattern.exec(bodyText);
+ if (match == null) return null;
+
+ const afterHeading = match.index + match[0].length;
+ const rest = bodyText.slice(afterHeading).replace(/^\n*/, "");
+ return (
+ bodyText.slice(0, afterHeading) +
+ `\n${insertText}\n\n` +
+ rest
+ );
+}
+
+function buildDescriptionFromCommits() {
+ try {
+ const log = git(`log origin/${defaultBranch}..HEAD --format=%B---`);
+ const messages = log
+ .split("---")
+ .map(message => message.trim())
+ .filter(Boolean);
+
+ if (messages.length === 1) {
+ const parts = messages[0].split("\n").filter(Boolean);
+ return parts.length > 1 ? parts.slice(1).join("\n") : "none";
+ }
+
+ return "none";
+ } catch {
+ return "none";
+ }
+}
+
+function loadRepoTemplate() {
+ const templatePath = path.join(process.cwd(), ".github", "PULL_REQUEST_TEMPLATE.md");
+ if (!fs.existsSync(templatePath)) return null;
+
+ return {
+ path: templatePath,
+ body: fs.readFileSync(templatePath, "utf8").replace(/\r\n/g, "\n").trim()
+ };
+}
+
+function buildBodyFromTemplate(templateBody) {
+ let rendered = templateBody;
+
+ if (hasSection(rendered, "### CHANGELOG")) {
+ rendered = setChecklistValue(rendered, "Yes", hasChangelog);
+ rendered = setChecklistValue(rendered, "No", !hasChangelog);
+ }
+
+ const description = buildDescriptionFromCommits();
+ return hasSection(rendered, "### Description")
+ ? rendered
+ : appendDescriptionSection(rendered, description);
+}
+
+function validateBodyForTemplate(bodyText, templateInfo) {
+ if (templateInfo == null) return;
+
+ const templateHeadings = extractTemplateHeadings(templateInfo.body);
+ const missingHeadings = templateHeadings.filter(
+ heading => !hasSection(bodyText, heading)
+ );
+ if (missingHeadings.length > 0) {
+ console.error(
+ "ERROR: PR body is missing required template headings from " +
+ `${templateInfo.path}: ${missingHeadings.join(", ")}`
+ );
+ process.exit(1);
+ }
+
+ const genericSections = [];
+ if (/^## Summary$/m.test(bodyText)) genericSections.push("## Summary");
+ if (/^## Test plan$/m.test(bodyText)) genericSections.push("## Test plan");
+ if (genericSections.length > 0) {
+ console.error(
+ "ERROR: PR body uses generic sections for a repo with a PR template: " +
+ genericSections.join(", ")
+ );
+ process.exit(1);
+ }
+}
+
+function requireGh() {
+ const check = spawnSync("gh", ["auth", "status"], { encoding: "utf8" });
+ if (check.status !== 0) {
+ console.error("PROMPT_GH_AUTH");
+ process.exit(2);
+ }
+}
+
+requireGh();
+
+// Detect repo info from git
+const remoteUrl = git("remote get-url origin");
+const normalizedRemoteUrl = remoteUrl.replace(/\/+$/, "");
+const match = normalizedRemoteUrl.match(/[:/]([^/]+)\/([^/.]+?)(?:\.git)?$/);
+if (!match) {
+ console.error("ERROR: Could not parse owner/repo from remote:", remoteUrl);
+ process.exit(1);
+}
+const [, owner, repo] = match;
+
+const branch = git("rev-parse --abbrev-ref HEAD");
+if (["master", "develop", "HEAD"].includes(branch)) {
+ console.error(
+ `ERROR: Cannot create PR from '${branch}'. Switch to a feature branch.`
+ );
+ process.exit(1);
+}
+
+// Detect default branch
+let defaultBranch;
+try {
+ defaultBranch = git(
+ "symbolic-ref --quiet --short refs/remotes/origin/HEAD"
+ ).replace("origin/", "");
+} catch {
+ try {
+ const show = execSync("git remote show origin", { encoding: "utf8" });
+ defaultBranch =
+ show.match(/HEAD branch:\s*(.+)/)?.[1]?.trim() || "master";
+ } catch {
+ defaultBranch = "master";
+ }
+}
+
+let hasChangelog = false;
+try {
+ const diff = git(`diff origin/${defaultBranch}..HEAD -- CHANGELOG.md`);
+ hasChangelog =
+ diff.includes("## Unreleased") ||
+ /^\+- (added|changed|fixed):/m.test(diff);
+} catch {}
+
+const templateInfo = loadRepoTemplate();
+
+// Build title from commits/branch if not provided
+if (!title) {
+ try {
+ const commits = git(`log origin/${defaultBranch}..HEAD --oneline`)
+ .split("\n")
+ .filter(Boolean);
+ if (commits.length === 1) {
+ title = commits[0].replace(/^[a-f0-9]+\s+/, "");
+ } else {
+ title = branch
+ .replace(/^jon\//, "")
+ .replace(/^fix\//, "Fix: ")
+ .replace(/^feat\//, "")
+ .replace(/[-_]/g, " ")
+ .replace(/^\w/, (c) => c.toUpperCase());
+ }
+ } catch {
+ title = branch;
+ }
+}
+
+// Read body from file if provided
+let body = bodyFile ? fs.readFileSync(bodyFile, "utf8") : null;
+
+// Build body from template if not provided
+if (!body) {
+ body =
+ templateInfo == null
+ ? `### CHANGELOG\n\n` +
+ `Does this branch warrant an entry to the CHANGELOG?\n\n` +
+ `- [${hasChangelog ? "x" : " "}] Yes\n` +
+ `- [${hasChangelog ? " " : "x"}] No\n\n` +
+ `### Dependencies\n\nnone\n\n### Description\n\n${buildDescriptionFromCommits()}`
+ : buildBodyFromTemplate(templateInfo.body);
+}
+
+validateBodyForTemplate(body, templateInfo);
+
+// Guardrail: fail fast if the body appears to include duplicate templates.
+// This prevents accidental append/concatenation from creating malformed PR descriptions.
+const templateSectionCounts = {
+ changelog: countOccurrences(body, "### CHANGELOG"),
+ dependencies: countOccurrences(body, "### Dependencies"),
+ description: countOccurrences(body, "### Description")
+};
+if (
+ templateSectionCounts.changelog > 1 ||
+ templateSectionCounts.dependencies > 1 ||
+ templateSectionCounts.description > 1
+) {
+ console.error(
+ "ERROR: PR body contains duplicated template sections. Regenerate /tmp/pr-body.md and retry."
+ );
+ console.error(JSON.stringify(templateSectionCounts));
+ process.exit(1);
+}
+
+// Guardrail: fail fast on duplicated PR template sections.
+// This catches stale/concatenated body files before creating malformed PRs.
+const sectionCounts = {
+ changelog: countOccurrences(body, "### CHANGELOG"),
+ dependencies: countOccurrences(body, "### Dependencies"),
+ description: countOccurrences(body, "### Description"),
+};
+if (
+ sectionCounts.changelog > 1 ||
+ sectionCounts.dependencies > 1 ||
+ sectionCounts.description > 1
+) {
+ console.error(
+ "ERROR: PR body appears to contain duplicated template sections. " +
+ "Regenerate the body file and retry."
+ );
+ console.error(JSON.stringify(sectionCounts));
+ process.exit(1);
+}
+
+// Inject Asana link if provided and not already present
+if (asanaTask) {
+ const asanaUrl = `https://app.asana.com/0/0/${asanaTask}/f`;
+ const asanaRegex = new RegExp(`https://app\\.asana\\.com/\\d+/\\d+/(?:task/)?${asanaTask}`, "i");
+ if (!asanaRegex.test(body)) {
+ const link = `[Asana task](${asanaUrl})`;
+ body =
+ insertAfterHeading(body, "### Description", link) ??
+ appendDescriptionSection(body, link);
+ }
+}
+
+// Create PR via gh CLI — write body to a temp file to avoid arg length issues
+const tmpBody = path.join(os.tmpdir(), `pr-body-${process.pid}.md`);
+fs.writeFileSync(tmpBody, body, "utf8");
+const ghArgs = ["pr", "create", "--title", title, "--body-file", tmpBody];
+if (draft) ghArgs.push("--draft");
+
+const result = spawnSync("gh", ghArgs, { encoding: "utf8" });
+try { fs.unlinkSync(tmpBody); } catch {}
+if (bodyFile && bodyFile.startsWith(os.tmpdir())) {
+ try {
+ fs.unlinkSync(bodyFile);
+ } catch {}
+}
+if (result.status !== 0) {
+ console.error("ERROR:", (result.stderr || "").trim());
+ process.exit(1);
+}
+
+// gh pr create outputs the PR URL on stdout (--json not supported in older gh)
+const prUrl = (result.stdout || "").trim();
+const prMatch = prUrl.match(/\/pull\/(\d+)$/);
+if (!prMatch) {
+ console.error("ERROR: Could not parse PR URL from output:", prUrl);
+ process.exit(1);
+}
+
+console.log(
+ JSON.stringify(
+ {
+ url: prUrl,
+ number: parseInt(prMatch[1], 10),
+ title,
+ base: defaultBranch,
+ head: branch,
+ draft,
+ owner,
+ repo,
+ },
+ null,
+ 2
+ )
+);
diff --git a/.cursor/skills/pr-land/SKILL.md b/.cursor/skills/pr-land/SKILL.md
new file mode 100644
index 0000000..777d010
--- /dev/null
+++ b/.cursor/skills/pr-land/SKILL.md
@@ -0,0 +1,368 @@
+---
+name: pr-land
+description: Land approved PRs by autosquashing fixups, rebasing onto the default upstream branch, and merging. Use when the user wants to merge/land pull requests.
+compatibility: Requires git, gh, node, jq. ASANA_TOKEN for Asana updates.
+metadata:
+ author: j0ntz
+---
+
+Land approved PRs by autosquashing fixups, rebasing onto the default upstream branch, and merging. Accepts repo names, explicit PR references, or Asana task URLs.
+
+
+```
+/pr-land # All EdgeApp repos with $GIT_BRANCH_PREFIX/* PRs
+/pr-land edge-react-gui # Specific repo
+/pr-land edge-react-gui edge-core-js # Multiple repos
+/pr-land edge-react-gui#123 # Specific PR (shorthand)
+/pr-land https://github.com/EdgeApp/edge-react-gui/pull/123 # Specific PR (URL)
+/pr-land https://app.asana.com/0/1234/5678 # Asana task → resolves linked PRs
+/pr-land edge-react-gui#123 edge-core-js # Mix: explicit PR + repo scan
+```
+
+Arguments are classified automatically:
+- **Repo names** → branch-prefix scan (original behavior)
+- **PR URLs / shorthand** (`repo#N`) → fetched directly, no branch-prefix filter
+- **Asana task URLs** → resolved to linked GitHub PRs via Asana API (requires `ASANA_TOKEN`)
+- **No args** → scans all EdgeApp repos
+
+
+
+All GitHub API calls go through companion scripts that use `gh` CLI internally. Do NOT call `gh` or `curl` directly for GitHub operations — use the scripts.
+If a script exits code 2 with `PROMPT_GH_AUTH`, prompt the user to run `gh auth login`.
+Code conflicts → Skip PR. Abort the rebase to leave the repo clean, continue with remaining PRs. Report all skipped PRs at the end.
+Stale PRs → Skip and report. Old PRs with multiple conflicts should be skipped like code conflicts. Don't block the flow.
+CHANGELOG conflicts (any section, including staging): Agent resolves semantically, scripts verify the result.
+Verification is mandatory. Built into scripts, no bypass.
+Do NOT force-push without explicit user confirmation.
+Never open editors. All git operations must be non-interactive: `GIT_EDITOR=true` for commit messages, `GIT_SEQUENCE_EDITOR=:` for rebase todo lists.
+Unexpected exit codes → STOP immediately. If any script returns an exit code not documented in this file, STOP and report to user. Do NOT attempt to interpret, retry, or work around unexpected errors.
+Sequential merging requires rebase. Each subsequent PR MUST be rebased onto the updated base branch after the previous merge.
+Don't publish if outstanding PRs remain. Only offer to publish a repo when ALL approved PRs for that repo are merged. If any were skipped or held back, do NOT publish that repo.
+Step 7 CANNOT begin until the user explicitly confirms npm publish succeeded. `npm publish` requires interactive 2FA — the agent cannot run it. Do NOT infer publish completion from git push or tagging. STOP and WAIT for user confirmation.
+Asana updates are LAST. Do NOT update Asana tasks until ALL merges, publishes, and GUI dependency upgrades are complete. Only update status for PRs that are fully landed (merged, and if non-GUI: published + GUI deps updated).
+
+
+
+
+| Script | Purpose |
+|--------|---------|
+| `pr-land-discover.sh` | Discover PRs and approval status |
+| `pr-land-comments.sh` | Check for recent unaddressed feedback (inline threads, review bodies, top-level comments) |
+| `pr-land-prepare.sh` | Rebase + conflict detection + verification |
+| `verify-repo.sh` | Verification (CHANGELOG + code; lint scoped to changed files when `--base` given) |
+| `pr-land-merge.sh` | Rebase + verify + merge via GitHub API |
+| `pr-land-publish.sh` | Version bump, changelog update, commit + tag (no push) |
+| `staging-cherry-pick.sh` | Cherry-pick merged PR commits onto staging (see `/staging-cherry-pick` skill) |
+| `asana-task-update.sh` | Update linked Asana tasks after merge |
+
+| Script | Exit 0 | Exit 1 | Exit 2 | Exit 3 | Exit 4 |
+|--------|--------|--------|--------|--------|--------|
+| `pr-land-discover.sh` | Success | Error | Auth needed | - | - |
+| `pr-land-comments.sh` | Success | Error | - | - | - |
+| `pr-land-prepare.sh` | Ready | All failed | - | - | - |
+| `verify-repo.sh` | Pass | Code fail | CHANGELOG fail | - | - |
+| `pr-land-merge.sh` | Merged | Verify fail | - | - | CHANGELOG conflict |
+| `staging-cherry-pick.sh` | All cherry-picked | Error | Auth needed | CHANGELOG conflict | - |
+| `pr-land-publish.sh` | Ready (needs push) | Verify fail | No unreleased | - | - |
+| `asana-task-update.sh` | Success | Error | Needs user input | - | - |
+
+**Any exit code not in this table = STOP immediately and report to user.**
+
+
+
+ONE tool call:
+
+```bash
+~/.cursor/skills/pr-land/scripts/pr-land-discover.sh [args...]
+```
+
+Args can be repo names, PR URLs, PR shorthand (`repo#N`), or Asana task URLs (mixed freely).
+No args = scan all EdgeApp repos for `$GIT_BRANCH_PREFIX/*` PRs.
+
+Returns JSON: `{ "prs": [...], "errors": [...] }`. Each PR has `repo`, `prNumber`, `branch`, `title`, `approved`, `changesRequested`, `reviewers`. Errors include Asana resolution failures or PR fetch failures.
+
+
+
+```bash
+echo '[{"repo":"...","prNumber":123,"branch":"/..."}]' | ~/.cursor/skills/pr-land/scripts/pr-land-comments.sh
+```
+
+Returns PRs with unaddressed feedback posted after the last commit. The script checks **three sources**:
+
+1. **Unresolved inline review threads** — threads where `isResolved: false` with comments newer than last commit
+2. **Review bodies** — the latest review from each non-author/non-bot reviewer, if it has a non-empty body newer than last commit (catches feedback written in the approve/reject dialog, regardless of review state)
+3. **Top-level PR comments** — non-author/non-bot comments newer than last commit
+
+Items previously marked with `` or `` markers are automatically excluded.
+
+
+1. AI/bot comments: Already filtered out by the script.
+2. Human reviewer comments on approved PRs — address and set aside:
+ 1. Read the comment and understand the requested change
+ 2. Make the fix as a fixup commit: `~/.cursor/skills/im/scripts/lint-commit.sh --fixup [files...]`
+ 3. Push the fixup to the branch
+ 4. Reply on the PR thread explaining what was fixed (1 sentence, factual). Use `gh pr comment --repo EdgeApp/ --body "..."` for top-level comments, or reply to the specific thread if the feedback was inline.
+ 5. **Remove this PR from the merge set** — it needs re-review after the fixup
+ 6. Continue with remaining PRs that have no outstanding comments
+ 7. Report addressed PRs to the user at the end of the workflow
+
+**Do NOT block the rest of the flow** for PRs with comments.
+
+
+
+
+ONE tool call per batch:
+
+```bash
+echo '[{"repo":"...","branch":"/feature"}]' | ~/.cursor/skills/pr-land/scripts/pr-land-prepare.sh
+```
+
+The prepare script handles: clone/checkout, autosquash fixups, rebase onto upstream, conflict detection, and verification.
+
+**Exit codes:**
+- `0` = At least one PR ready to push (skipped PRs reported in JSON output)
+- `1` = All PRs failed (verification or other errors, none ready)
+
+PR is skipped and reported in the `skipped` array. Rebase is aborted to leave repo clean. Other PRs continue.
+
+Agent resolves semantically (upstream entries first, then ours), then re-runs prepare.
+
+
+
+After prepare succeeds, push with `--force-with-lease`.
+
+
+
+Ask for user confirmation, then:
+
+```bash
+echo '[{"repo":"...","prNumber":123,"branch":"/..."}]' | ~/.cursor/skills/pr-land/scripts/pr-land-merge.sh [method]
+```
+
+The merge script processes PRs **sequentially** with automatic rebase-before-merge:
+
+1. **Check if already merged** — skip (handles re-runs after CHANGELOG resolution)
+2. **Fetch + rebase onto upstream** — ALWAYS done, even for first PR
+3. **Conflict handling during rebase:**
+ - No conflict → continue
+ - CHANGELOG-only (any section) → **exit 4** (agent resolves, re-runs)
+ - Code conflict → **skip PR**, abort rebase, continue
+4. **Push `--force-with-lease`**
+5. **Run local verification** (MANDATORY)
+6. **Merge via GitHub API**
+
+**Exit codes:**
+- `0` = All (non-skipped) PRs merged
+- `1` = Verification failed
+- `4` = CHANGELOG-only conflict (agent resolves, re-runs)
+
+**On exit 4:** Agent resolves semantically, pushes, re-runs merge. Script detects already-merged PRs and skips them.
+
+
+
+**Gating:** Only non-GUI repos. Only when ALL approved PRs for the repo are merged. Skip if any were skipped/held back.
+
+Ask for user confirmation:
+```
+Merged repos ready to publish (all PRs landed):
+ - ()
+
+Repos with outstanding PRs (not ready to publish):
+ - (N PRs skipped)
+
+Publish ready repos to npm? [y/N]
+```
+
+If confirmed:
+
+```bash
+echo '[{"repo":"...","branch":"master"}]' | ~/.cursor/skills/pr-land/scripts/pr-land-publish.sh
+```
+
+**Exit codes:**
+- `0` = Version bumped, committed, tagged (check `needsPush` in JSON output)
+- `1` = Verification failed
+- `2` = No unreleased changes in CHANGELOG
+
+After script completes:
+1. Show version bump details to user
+2. If confirmed, push master and tag: `git push origin master && git push origin v`
+3. Prompt user to run `npm login` and `npm publish` in a real terminal. Both commands require browser-based authentication (npm opens a URL for web login + 2FA approval) that cannot be automated.
+
+**STOP HERE. Do NOT proceed to step 7 until the user confirms npm publish succeeded.**
+
+
+
+**Trigger:** Only if non-`edge-react-gui` repos were merged and published in step 6. All non-GUI EdgeApp repos are GUI dependencies, so publishing always requires a GUI dep upgrade.
+
+Ask user to confirm `npm publish` completed, then:
+
+1. Save current branch and switch to develop:
+ ```bash
+ cd
+ ORIG_BRANCH=$(git rev-parse --abbrev-ref HEAD)
+ git checkout develop && git pull origin develop
+ ```
+
+2. Run `upgrade-dep.sh` for each published package (sequentially):
+ ```bash
+ cd && ~/.cursor/skills/pr-land/scripts/upgrade-dep.sh
+ ```
+ If any fails, STOP and report. Ask user how to proceed.
+
+3. Restore original branch:
+ ```bash
+ cd
+ git checkout $ORIG_BRANCH
+ git stash pop
+ ```
+ If stash pop fails with conflicts, STOP and report. If "No stash entries", that's fine.
+
+
+
+**Trigger:** Only for `edge-react-gui` commits that target the `## X.Y.Z (staging)` CHANGELOG section (not `## Unreleased`). This includes both merged PR commits and GUI dependency upgrade commits from step 7.
+
+Check CHANGELOG diffs to determine which commits qualify — if the entry was added under a `(staging)` heading, it needs cherry-picking.
+
+**Skip** this step entirely if no commits have staging CHANGELOG entries.
+
+For qualifying PRs/commits, invoke the `/staging-cherry-pick` skill:
+
+```bash
+echo '[{"repo":"edge-react-gui","prNumber":123,"mergeSha":"abc123"}]' | ~/.cursor/skills/staging-cherry-pick/scripts/staging-cherry-pick.sh
+```
+
+Pass the `mergeSha` from the merge step's JSON output. For dep upgrade commits, pass the commit SHA from step 7. The script cherry-picks individual (non-merge) commits onto the staging branch.
+
+**On exit 3 (CHANGELOG conflict):** Resolve semantically (existing staging entries first, then the new entry), then `git add CHANGELOG.md && GIT_EDITOR=true git cherry-pick --continue`. Re-run for remaining PRs.
+
+**On exit 1 (code conflict):** STOP and report to user.
+
+After cherry-picks succeed, ask user to confirm push:
+```bash
+git push origin staging
+```
+
+Then restore the previous branch.
+
+
+
+**Runs ONLY after ALL merges, cherry-picks, publishes, and GUI dep upgrades are complete.**
+
+Only update for fully landed PRs:
+- GUI PRs: merged
+- Non-GUI PRs: merged AND published AND GUI deps updated
+
+Do NOT update for: skipped PRs, addressed-but-not-re-reviewed PRs, or repos not published.
+
+
+Pipe the PR metadata through the new helper so you only consume the Asana link once per PR:
+
+```bash
+printf '[{"repo":"edge-react-gui","prNumber":123}]' | ~/.cursor/skills/pr-land/scripts/pr-land-extract-asana-task.sh > /tmp/asana.json
+```
+
+The helper outputs JSON like `{ "tasks": [{ "taskGid": "...", "label": "repo#123" }], "missing": [{ "label": "...", "reason": "..." }] }`.
+Review the `missing` array, report any entries lacking an Asana link, and skip those PRs for Asana updates.
+
+
+
+For each task in `.tasks`, run:
+
+```bash
+~/.cursor/skills/asana-task-update/scripts/asana-task-update.sh \
+ --task \
+ --set-status "Verification Needed" \
+ --unassign
+```
+
+This replaces the old dedicated verification updater behavior.
+
+**Exit codes per call:**
+- `0` = success
+- `1` = error
+- `2` = needs user input
+
+
+
+
+```
+=== PR Land Summary ===
+
+Fully landed:
+ ✓ # () — merged, cherry-picked to staging, Asana updated
+ ✓ # () — merged, Asana updated
+ ✓ # () — merged, published v, GUI deps updated, Asana updated
+
+Addressed but needs re-review:
+ ⚠ # () — fixup pushed, awaiting review
+
+Skipped (conflicts):
+ ⚠ # () — stale / code conflict in
+
+Not published (outstanding PRs):
+ ⚠ — N PRs skipped, publish deferred
+```
+
+
+
+
+| Conflict Type | Script Behavior | Agent Action |
+|---|---|---|
+| Code files | Skip PR, abort rebase, continue | Report to user at end |
+| CHANGELOG only (prepare) | Report conflict | Resolve semantically, re-run prepare |
+| CHANGELOG only (merge) | **exit 4** with instructions | Resolve semantically, push, re-run merge |
+
+Both prepare and merge scripts can detect CHANGELOG-only conflicts. In either case:
+1. Script outputs clear resolution instructions
+2. Agent resolves semantically (upstream entries first)
+3. `git add CHANGELOG.md && GIT_EDITOR=true git rebase --continue`
+4. Push with `--force-with-lease`
+5. Re-run the script to verify and proceed
+
+
+
+```
+# Typical conflict:
+<<<<<<< HEAD
+- added: Feature from upstream
+=======
+- changed: Our feature
+>>>>>>> our-commit
+
+# Resolution: Upstream first, then ours:
+- added: Feature from upstream
+- changed: Our feature
+```
+
+
+1. Read CHANGELOG.md with conflict markers
+2. Resolve semantically using StrReplace
+3. `git add CHANGELOG.md && GIT_EDITOR=true git rebase --continue`
+4. Re-run `~/.cursor/skills/pr-land/scripts/pr-land-prepare.sh`
+
+
+
+1. `cd `
+2. `git fetch origin && git rebase origin/master` (or `origin/develop`)
+3. Read CHANGELOG.md with conflict markers
+4. Resolve semantically using StrReplace
+5. `git add CHANGELOG.md && GIT_EDITOR=true git rebase --continue`
+6. `git push --force-with-lease`
+7. Re-run `~/.cursor/skills/pr-land/scripts/pr-land-merge.sh` — verification runs automatically
+
+
+Verification checks: no conflict markers remaining, proper entry format (`- type: description`), no malformed entries. If verification fails after resolution, the script prompts the user.
+
+
+
+1. Code conflicts skip cleanly — scripts abort rebase and skip, no dirty state
+2. CHANGELOG conflicts are scripted — agent resolves semantically (any section including staging), verification validates
+3. Verification is mandatory — built into merge script, physically blocks merge on failure
+4. Pre-merge is safe — can force-push as many times as needed
+5. Sequential merging with auto-rebase — each PR rebased onto updated base
+6. No bypasses — scripts enforce rules, agent cannot skip steps
+7. Unexpected errors halt execution — undocumented exit codes stop immediately
+8. Publish gating — repos with outstanding PRs are not published
+9. Asana is last — task updates only after full pipeline completes
+
diff --git a/.cursor/skills/pr-land/scripts/edge-repo.js b/.cursor/skills/pr-land/scripts/edge-repo.js
new file mode 100644
index 0000000..af85b4c
--- /dev/null
+++ b/.cursor/skills/pr-land/scripts/edge-repo.js
@@ -0,0 +1,152 @@
+// edge-repo.js — Shared Edge repository utilities.
+// Common functions for repo discovery, git operations, and conflict handling.
+// Used by: pr-land-prepare.sh, pr-land-merge.sh, pr-land-publish.sh
+const { spawnSync, execSync } = require("child_process");
+const { existsSync } = require("fs");
+const path = require("path");
+const os = require("os");
+
+function getRepoDir(repo) {
+ const homeDir = os.homedir();
+ const candidates = [
+ path.join(homeDir, "git", repo),
+ path.join(homeDir, "projects", repo),
+ path.join(homeDir, "code", repo),
+ ];
+ for (const dir of candidates) {
+ if (existsSync(path.join(dir, ".git"))) return dir;
+ }
+ return path.join(homeDir, "git", repo);
+}
+
+function getUpstreamBranch(repo) {
+ return repo === "edge-react-gui" ? "origin/develop" : "origin/master";
+}
+
+function runGit(args, cwd, options = {}) {
+ const { allowFailure = false } = options;
+ const argArray = Array.isArray(args) ? args : args.split(" ");
+ const result = spawnSync("git", argArray, {
+ cwd,
+ encoding: "utf8",
+ env: { ...process.env, GIT_EDITOR: "true", GIT_SEQUENCE_EDITOR: ":" },
+ });
+
+ if (result.status !== 0 && !allowFailure) {
+ throw new Error(
+ (result.stderr || result.stdout || "Unknown git error").trim()
+ );
+ }
+
+ return {
+ success: result.status === 0,
+ stdout: result.stdout?.trim() || "",
+ stderr: result.stderr?.trim() || "",
+ };
+}
+
+function parseConflictFiles(output) {
+ const files = [];
+ for (const line of output.split("\n")) {
+ const match = line.match(/CONFLICT.*in (.+)$/);
+ if (match) files.push(match[1]);
+ const bothMatch = line.match(/^\s+both modified:\s+(.+)$/);
+ if (bothMatch) files.push(bothMatch[1]);
+ }
+ return [...new Set(files)];
+}
+
+function isChangelogOnly(files) {
+ return (
+ files.length > 0 &&
+ files.every((f) => f === "CHANGELOG.md" || f.endsWith("/CHANGELOG.md"))
+ );
+}
+
+function runVerification(repoDir, baseRef, options = {}) {
+ const verifyScript = path.join(
+ os.homedir(),
+ ".cursor",
+ "skills",
+ "verify-repo.sh"
+ );
+ const baseArg = baseRef != null ? ` --base "${baseRef}"` : "";
+ const changelogArg = options.requireChangelog ? " --require-changelog" : "";
+ const skipInstallArg = options.skipInstall ? " --skip-install" : "";
+ try {
+ execSync(
+ `node "${verifyScript}" "${repoDir}"${baseArg}${changelogArg}${skipInstallArg}`,
+ { stdio: "inherit", encoding: "utf8" }
+ );
+ return { success: true };
+ } catch (e) {
+ return { success: false, exitCode: e.status };
+ }
+}
+
+// gh CLI wrapper for GitHub API calls
+function ghApi(endpoint, options = {}) {
+ const { method, body, paginate, jq } = options;
+ const args = ["api", endpoint];
+ if (method && method !== "GET") args.push("-X", method);
+ if (paginate) args.push("--paginate");
+ if (jq) args.push("--jq", jq);
+ if (body) args.push("--input", "-");
+
+ const result = spawnSync("gh", args, {
+ encoding: "utf8",
+ input: body ? JSON.stringify(body) : undefined,
+ });
+
+ if (result.status !== 0) {
+ throw new Error(
+ `gh api ${endpoint} failed: ${(result.stderr || "").trim()}`
+ );
+ }
+
+ const out = result.stdout.trim();
+ if (!out) return null;
+ try {
+ return JSON.parse(out);
+ } catch {
+ return out;
+ }
+}
+
+function ghGraphql(query, variables = {}) {
+ const args = ["api", "graphql", "-f", `query=${query}`];
+ for (const [k, v] of Object.entries(variables)) {
+ args.push(typeof v === "number" ? "-F" : "-f", `${k}=${v}`);
+ }
+
+ const result = spawnSync("gh", args, { encoding: "utf8" });
+
+ if (result.status !== 0) {
+ throw new Error(
+ `gh api graphql failed: ${(result.stderr || "").trim()}`
+ );
+ }
+
+ const parsed = JSON.parse(result.stdout);
+ if (parsed.errors) {
+ throw new Error(`GraphQL errors: ${JSON.stringify(parsed.errors)}`);
+ }
+ return parsed.data;
+}
+
+function installAndPrepare(repoDir) {
+ const script = path.join(__dirname, "..", "..", "install-deps.sh");
+ execSync(`"${script}" "${repoDir}"`, { stdio: "inherit" });
+}
+
+module.exports = {
+ getRepoDir,
+ getUpstreamBranch,
+ runGit,
+ parseConflictFiles,
+ isChangelogOnly,
+ runVerification,
+ installAndPrepare,
+ ghApi,
+ ghGraphql,
+};
diff --git a/.cursor/skills/pr-land/scripts/pr-land-comments.sh b/.cursor/skills/pr-land/scripts/pr-land-comments.sh
new file mode 100755
index 0000000..4a3f23d
--- /dev/null
+++ b/.cursor/skills/pr-land/scripts/pr-land-comments.sh
@@ -0,0 +1,189 @@
+#!/usr/bin/env node
+// pr-land-comments.sh — Landing gate: checks for recent unaddressed feedback.
+// Surfaces unresolved inline threads, review bodies, and top-level comments
+// posted after the last commit. Uses a single GraphQL query per PR.
+//
+// Skips: resolved threads, bot/author comments, items with addressed markers.
+//
+// Usage: echo '[{"repo":"...","prNumber":123,"branch":"..."}]' | ./pr-land-comments.sh
+
+const { spawnSync } = require("child_process")
+
+function requireGh() {
+ const check = spawnSync("gh", ["auth", "status"], { encoding: "utf8" })
+ if (check.status !== 0) {
+ console.error("PROMPT_GH_AUTH")
+ process.exit(2)
+ }
+}
+
+function ghGraphql(query, variables = {}) {
+ const args = ["api", "graphql", "-f", `query=${query}`]
+ for (const [k, v] of Object.entries(variables)) {
+ args.push(typeof v === "number" ? "-F" : "-f", `${k}=${v}`)
+ }
+ const result = spawnSync("gh", args, { encoding: "utf8" })
+ if (result.status !== 0) {
+ throw new Error(`GraphQL failed: ${(result.stderr || "").trim()}`)
+ }
+ const parsed = JSON.parse(result.stdout)
+ if (parsed.errors) {
+ throw new Error(`GraphQL errors: ${JSON.stringify(parsed.errors)}`)
+ }
+ return parsed.data
+}
+
+const QUERY = `
+query($owner: String!, $repo: String!, $number: Int!) {
+ repository(owner: $owner, name: $repo) {
+ pullRequest(number: $number) {
+ author { login }
+ commits(last: 1) {
+ nodes { commit { committedDate } }
+ }
+ reviewThreads(first: 100) {
+ nodes {
+ id
+ isResolved
+ comments(first: 50) {
+ nodes {
+ createdAt
+ author { login }
+ path
+ body
+ }
+ }
+ }
+ }
+ reviews(last: 50) {
+ nodes {
+ databaseId
+ author { login }
+ state
+ body
+ submittedAt
+ }
+ }
+ comments(last: 50) {
+ nodes {
+ databaseId
+ createdAt
+ author { login }
+ body
+ }
+ }
+ }
+ }
+}`
+
+requireGh()
+
+function extractAddressedIds(comments) {
+ const ids = new Set()
+ for (const c of comments) {
+ for (const m of (c.body || "").matchAll(
+ //g
+ )) {
+ ids.add(Number(m[1]))
+ }
+ }
+ return ids
+}
+
+function isBot(login) {
+ return !login || login.includes("[bot]")
+}
+
+async function main() {
+ let input = ""
+ for await (const chunk of process.stdin) input += chunk
+
+ const prs = JSON.parse(input)
+ const results = []
+
+ for (const { repo, prNumber, branch } of prs) {
+ let data
+ try {
+ data = ghGraphql(QUERY, { owner: "EdgeApp", repo, number: prNumber })
+ } catch (e) {
+ console.error(
+ `WARNING: Failed to query ${repo}#${prNumber}: ${e.message}`
+ )
+ continue
+ }
+
+ const pr = data.repository.pullRequest
+ const prAuthor = pr.author?.login
+ const lastCommitDate = pr.commits.nodes[0]
+ ? new Date(pr.commits.nodes[0].commit.committedDate)
+ : new Date(0)
+
+ const addressedIds = extractAddressedIds(pr.comments.nodes)
+ const recentComments = []
+
+ for (const thread of pr.reviewThreads.nodes) {
+ if (thread.isResolved) continue
+ for (const c of thread.comments.nodes) {
+ if (new Date(c.createdAt) > lastCommitDate) {
+ recentComments.push({
+ type: "inline",
+ user: c.author?.login,
+ path: c.path,
+ body: c.body?.slice(0, 200)
+ })
+ }
+ }
+ }
+
+ const latestByUser = {}
+ for (const r of pr.reviews.nodes) {
+ const user = r.author?.login
+ if (!user || user === prAuthor || r.state === "PENDING") continue
+ if (isBot(user)) continue
+ const prev = latestByUser[user]
+ if (
+ !prev ||
+ new Date(r.submittedAt) > new Date(prev.submittedAt)
+ ) {
+ latestByUser[user] = r
+ }
+ }
+ for (const [user, r] of Object.entries(latestByUser)) {
+ if (!r.body?.trim()) continue
+ if (addressedIds.has(r.databaseId)) continue
+ if (new Date(r.submittedAt) > lastCommitDate) {
+ recentComments.push({
+ type: "review-body",
+ user,
+ state: r.state,
+ body: r.body.slice(0, 200)
+ })
+ }
+ }
+
+ for (const c of pr.comments.nodes) {
+ const user = c.author?.login
+ if (!user || user === prAuthor || isBot(user)) continue
+ if ((c.body || "").includes("